content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Dimension Reduction Methods for Multivariate Time Series.
#'
#' BigVAR implements the HLAG and VARX-L frameworks which allow for the estimation of vector autoregressions and vector autoregressions with exogenous variables using structured convex penalties. This package originated as a 2014 Google "Summer of Code" Project. The development version of this package is hosted on github: \url{https://github.com/wbnicholson/BigVAR}.
#'
#' @author Will Nicholson \email{wbn8@@cornell.edu},
#' @docType package
#' @useDynLib BigVAR
#' @name BigVAR
#' @details To use the facilities of this package, starting with an \eqn{T \times k+m} multivariate time series (in which T denotes the length of the series, k the number of endogenous or "model") and run \code{\link{constructModel}} to create an object of class \code{\link{BigVAR}}. \code{\link{cv.BigVAR}} creates an object of class \code{\link{BigVAR.results}}, which chooses an optimal penalty parameter based on minimizing h-step ahead forecasts on a specified cross-validation period over a grid of values as well as comparisons against AIC, BIC, unconditional mean, and a random walk. There are plot functions for both BigVAR (\code{\link{plot.BigVAR}}) and BigVAR.results (\code{\link{plot}}) as well as a predict function for BigVAR.results (\code{\link{predict}}).
#' @seealso \code{\link{constructModel}}, \code{\link{cv.BigVAR}}, \code{\link{BigVAR.results}}, \code{\link{plot}}, \code{\link{predict}}
#' @examples
#' # Fit a Basic VAR-L(3,4) on simulated data
#' data(Y)
#' T1=floor(nrow(Y)/3)
#' T2=floor(2*nrow(Y)/3)
#' m1=constructModel(Y,p=4,struct="Basic",gran=c(50,10),verbose=FALSE,T1=T1,T2=T2,IC=FALSE)
#' plot(m1)
#' results=cv.BigVAR(m1)
#' plot(results)
#' predict(results,n.ahead=1)
#' @references
#' Lutkepohl "New Introduction to Multivariate Time Series",
#' Banbura, Marta, Domenico Giannone, and Lucrezia Reichlin. 'Large Bayesian vector auto regressions.' Journal of Applied Econometrics 25.1 (2010): 71-92.
#' Breheny P, Huang J (2011). “Coordinate descent algorithms for nonconvex penalized regression, with applications to biological feature selection.” Annals of Applied Statistics, 5(1), 232–253.
#' Nicholson, William, I. Wilms, J. Bien, and D. S. Matteson. High dimensional forecasting via interpretable vector autoregression. Journal of Machine Learning Research, 21(166):1–52, 2020.
#' William B. Nicholson, David S. Matteson, Jacob Bien,VARX-L: Structured regularization for large vector autoregressions with exogenous variables, International Journal of Forecasting, Volume 33, Issue 3, 2017, Pages 627-651,
#' William B Nicholson, David S. Matteson, and Jacob Bien (2016), 'BigVAR: Tools for Modeling Sparse High-Dimensional Multivariate Time Series' arxiv:1702.07094
#' @import Rcpp
#' @import methods
#' @import stats
#' @importFrom utils head
#' @importFrom utils setTxtProgressBar
#' @importFrom utils txtProgressBar
#' @importFrom utils tail
NULL
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/R/BigVAR.R
|
.MCPFit <- function(B, Z, Y, lambda, eps, p, MN, k, k1, s, m, C, group, gamma = 3, YMean, ZMean) {
tk <- 1/max(Mod(eigen(Z %*% t(Z), only.values = TRUE)$values))
B1 <- abind::adrop(B[, 2:dim(B)[2], 1, drop = F], 3)
nc <- apply(B, 3, ncol)[1]
BINI <- B[, 2:nc, , drop = F]
if (group == "MCP") {
mcp = TRUE
} else {
mcp = FALSE
}
beta <- gamloopMCP(BINI, Y, Z, as.matrix(lambda), eps, as.matrix(YMean), as.matrix(ZMean), gamma = gamma, mcp)
if (MN) {
beta <- adjust_mn_var(beta, C)
}
return(beta)
}
# Sparse Own/Other (VAR)
.SparseGroupLassoVAROO <- function(beta, groups, compgroups, Y, Z, lambda, alpha, INIactive, eps, q1a, p, MN, dual = FALSE,
C, YMean, ZMean) {
k <- ncol(Y)
Y <- t(Y)
m <- 0
ZZ <- kronecker(t(Z), diag(k))
M1f <- list()
M2f <- list()
jj <- .lfunction3(p, k)
eigs <- c()
q1 <- list()
# get step size from inverse of max eigenvalue via power method
for (j in seq_len(length(jj))) {
M1f[[j]] <- ZZ[, jj[[j]]]
M2f[[j]] <- crossprod(ZZ[, jj[[j]]])
gg1 <- powermethod(M2f[[j]], q1a[[j]])
eigs[j] <- gg1$lambda
q1[[j]] <- gg1$q1
}
jj <- .lfunction3cpp(p, k)
jjfull <- jj
jjcomp <- .lfunctioncomp(p, k)
dims <- dim(beta)
beta <- array(beta[, 2:dim(beta)[2], ], dim = c(dims[1], dims[2] - 1, dims[3]))
if (!dual) {
BB <- GamLoopSGLOO(beta, INIactive, lambda, alpha, Y, ZZ, jj, jj, jjcomp, eps, YMean, ZMean, k, p * k, M2f, eigs,
m)
} else {
BB <- GamLoopSGLOODP(beta, INIactive, lambda, alpha, Y, ZZ, jj, jj, jjcomp, eps, YMean, ZMean, k, p * k, M2f, eigs,
m)
}
BB$q1 <- q1
if (MN) {
BB$beta <- adjust_mn_var(BB$beta, C)
}
return(BB)
}
# Sparse Lag (VAR)
.SparseGroupLassoVAR <- function(beta, groups, compgroups, Y, Z, lambda, alpha, INIactive, eps, q1a, p, MN, C, YMean, ZMean) {
k <- ncol(Y)
Y <- t(Y)
M1f <- list()
M2f <- list()
eigs <- c()
q1 <- list()
jj <- .groupfun(p, k)
# get step size from inverse of max eigenvalue via power method
for (j in seq_len(length(jj))) {
M1f[[j]] <- Z[jj[[j]], ]
M2f[[j]] <- Z[jj[[j]], ] %*% t(Z[jj[[j]], ])
gg1 <- powermethod(M2f[[j]], q1a[[j]])
eigs[j] <- gg1$lambda
q1[[j]] <- gg1$q1
}
jj <- .groupfuncpp(p, k)
jjfull <- jj
jjcomp <- .groupfuncomp(p, k)
dims <- dim(beta)
beta <- array(beta[, 2:dim(beta)[2], ], dim = c(dims[1], dims[2] - 1, dims[3]))
BB <- GamLoopSGL(beta, INIactive, lambda, alpha, Y, Z, jj, jjfull, jjcomp, eps, YMean, as.matrix(ZMean), k, p * k, M1f,
M2f, eigs)
BB$q1 <- q1
if (MN) {
BB$beta <- adjust_mn_var(BB$beta, C)
}
return(BB)
}
# Sparse Lag (VAR) Dual Search
.SparseGroupLassoVARDual <- function(beta, groups, compgroups, Y, Z, lambda, alpha, INIactive, eps, q1a, p, MN, C, YMean,
ZMean) {
k <- ncol(Y)
Y <- t(Y)
M1f <- list()
M2f <- list()
eigs <- c()
q1 <- list()
jj <- .groupfun(p, k)
# get step size from inverse of max eigenvalue via power method
for (j in seq_len(length(jj))) {
M1f[[j]] <- Z[jj[[j]], ]
M2f[[j]] <- Z[jj[[j]], ] %*% t(Z[jj[[j]], ])
gg1 <- powermethod(M2f[[j]], q1a[[j]])
eigs[j] <- gg1$lambda
q1[[j]] <- gg1$q1
}
jj <- .groupfuncpp(p, k)
jjfull <- jj
jjcomp <- .groupfuncomp(p, k)
ngp <- length(alpha) * length(lambda)
dims <- dim(beta)
beta <- array(beta[, 2:dim(beta)[2], ], dim = c(dims[1], dims[2] - 1, dims[3]))
BB <- GamLoopSGLDP(beta, INIactive, lambda, alpha, Y, Z, jj, jjfull, jjcomp, eps, YMean, ZMean, k, p * k, M1f, M2f, eigs)
BB$q1 <- q1
if (MN) {
BB$beta <- adjust_mn_var(BB$beta, C)
}
return(BB)
}
# Lag Group (VAR/VARX-L)
.GroupLassoVAR1 <- function(beta, groups, compgroups, Y, Z, lambda, INIactive, eps, p, MN, k, k1, s, C, YMean, ZMean) {
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
k1 <- ncol(Y)
m <- k - k1
Y <- t(Y)
fullgroups <- groups
Eigsys <- Eigencomp(Z, groups, length(groups), k1)
M2 <- Eigsys$M3
eigvals <- Eigsys$eigval
eigvecs <- Eigsys$eigvec
beta <- beta[, 2:dim(beta)[2], , drop = F]
BB <- GamLoopGL2(beta, INIactive, lambda, Y, Z, groups, fullgroups, compgroups, eps, YMean, ZMean, k1, p * k1 + m * s,
M2, eigvals, eigvecs)
if (MN) {
BB$beta <- adjust_mn_var(BB$beta, C)
}
return(BB)
}
# Do I really need a separate function for X vs not? my guess is no.... should be the case for all of these Group
# Lasso Own/Other (VARXL)
.GroupLassoOOX <- function(beta, groups, compgroups, Y, Z, lambda, INIactive, eps, p, MN, k, k1, s, C, YMean, ZMean) {
m <- k - k1
Y <- t(Y)
ZZ <- kronecker(t(Z), diag(k1))
Eigsys <- EigencompOO(ZZ, groups, length(groups), k1)
M2 <- Eigsys$M3
eigvals <- Eigsys$eigval
eigvecs <- Eigsys$eigvec
groups_full <- groups
beta <- array(beta[, 2:ncol(as.matrix(beta[, , 1])), ], dim = c(k1, (k1) * p + m * s, length(lambda)))
BB <- GamLoopGLOO(beta, INIactive, lambda, Y, ZZ, groups, groups_full, compgroups, eps, YMean, ZMean, k1, p * (k1) +
m * s, M2, eigvals, eigvecs, k1)
if (MN) {
BB$beta <- adjust_mn_var(BB$beta, C)
}
return(BB)
}
# Own/Other Group VAR-L
.GroupLassoOO <- function(beta, groups, compgroups, Y, Z, lambda, INIactive, eps, p, MN, C, YMean, ZMean) {
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
k <- ncol(Y)
Y <- t(Y)
fullgroups <- groups
ZZ <- kronecker(t(Z), diag(k))
Eigsys <- EigencompOO(ZZ, groups, length(groups), k)
M2 <- Eigsys$M3
eigvals <- Eigsys$eigval
eigvecs <- Eigsys$eigvec
beta <- array(beta[, 2:ncol(as.matrix(beta[, , 1])), ], dim = c(k, k * p, length(lambda)))
BB <- GamLoopGLOO(beta, INIactive, lambda, Y, ZZ, groups, fullgroups, compgroups, eps, YMean, ZMean, k, p * k, M2, eigvals,
eigvecs, k)
if (MN) {
BB$beta <- adjust_mn_var(BB$beta, C)
}
return(BB)
}
# Sparse Lag VARX-L
.SparseGroupLassoVARX <- function(beta, groups, compgroups, Y, Z, lambda, alpha, INIactive, eps, starting_eigvals, p, MN,
k, s, k1, C, YMean, ZMean) {
m <- k - k1
Y <- t(Y)
M1f <- list()
M2f <- list()
eigs <- c()
q1 <- list()
grps_lg <- lapply(groups, function(x) {
x + 1
})
for (j in seq_len(length(grps_lg))) {
M2f[[j]] <- Z[grps_lg[[j]], ] %*% t(Z[grps_lg[[j]], ])
if (j <= p) {
gg1 <- powermethod(M2f[[j]], starting_eigvals[[j]])
eigs[j] <- gg1$lambda
q1[[j]] <- gg1$q1
} else {
M2f[[j]] <- as.vector(Z[grps_lg[[j]], ]) %*% as.vector(t(Z[grps_lg[[j]], ]))
eigs[j] <- M2f[[j]]
}
}
groups_full <- groups
beta <- array(beta[, 2:ncol(as.matrix(beta[, , 1])), ], dim = c(k1, (k1) * p + (s * m), length(lambda)))
BB <- GamLoopSGLX(beta, INIactive, lambda, alpha, Y, Z, groups, groups_full, compgroups, eps, YMean, ZMean, k1, (k1) *
p + (s * m), M2f, eigs, k1)
BB$q1 <- q1
if (MN) {
BB$beta <- adjust_mn_var(BB$beta, C)
}
return(BB)
}
.SparseGroupLassoVARXDual <- function(beta, groups, compgroups, Y, Z, lambda, alpha, INIactive, eps, starting_eigvals, p,
MN, k, s, k1, C, YMean, ZMean) {
m <- k - k1
Y <- t(Y)
M1f <- list()
M2f <- list()
eigs <- c()
q1 <- list()
jj <- lapply(groups, function(x) {
x + 1
})
for (j in seq_len(length(jj))) {
M2f[[j]] <- Z[jj[[j]], ] %*% t(Z[jj[[j]], ])
if (j <= p) {
gg1 <- powermethod(M2f[[j]], starting_eigvals[[j]])
eigs[j] <- gg1$lambda
q1[[j]] <- gg1$q1
} else {
M2f[[j]] <- as.vector(Z[jj[[j]], ]) %*% as.vector(t(Z[jj[[j]], ]))
eigs[j] <- M2f[[j]]
}
}
beta <- array(beta[, 2:ncol(as.matrix(beta[, , 1])), ], dim = c(k1, (k1) * p + (s * m), nrow(lambda) * length(alpha)))
BB <- GamLoopSGLXDP(beta, INIactive, lambda, alpha, Y, Z, groups, groups, compgroups, eps, YMean, ZMean, k1, (k1) * p +
(s * m), M2f, eigs, k1)
BB$q1 <- q1
if (MN) {
BB$beta <- adjust_mn_var(BB$beta, C)
}
return(BB)
}
# Sparse Own/Other (VARX)
.SparseGroupLassoVAROOX <- function(beta, groups, compgroups, Y, Z, lambda, alpha, INIactive, eps, p, MN, k1, s, k, dual = FALSE,
C, YMean, ZMean) {
m <- k - k1
Y <- t(Y)
M1f <- list()
M2f <- list()
eigs <- c()
q1 <- list()
# function for R calculations jj <- diaggroupfunVARXLG(p, k, k1, s)
rgroups <- lapply(groups, function(x) {
x + 1
})
# for c++ calculations kk <- diaggroupfunVARX(p, k, k1, s) jjcomp <- diaggroupfunVARXcomp(p, k, k1, s)
ZZ <- kronecker(t(Z), diag(k1))
for (j in seq_len(length(rgroups))) {
M2f[[j]] <- crossprod(ZZ[, rgroups[[j]]])
eigs[j] <- max(Mod(eigen(M2f[[j]], only.values = TRUE)$values))
}
fullgroups <- groups
if (!dual) {
gran2 <- length(lambda)
} else {
gran2 <- nrow(lambda) * ncol(lambda)
}
beta <- array(beta[, 2:ncol(as.matrix(beta[, , 1])), ], dim = c(k1, k1 * p + m * s, gran2))
if (dual) {
BB <- GamLoopSGLOODP(beta, INIactive, lambda, alpha, Y, ZZ, groups, groups, compgroups, eps, YMean, ZMean, k1, p *
k1 + m * s, M2f, eigs, m)
} else {
BB <- GamLoopSGLOO(beta, INIactive, lambda, alpha, Y, ZZ, groups, fullgroups, compgroups, eps, YMean, ZMean, k1,
p * k1 + m * s, M2f, eigs, m)
}
if (MN) {
BB$beta <- adjust_mn_var(BB$beta, C)
}
return(BB)
}
# Elementwise HLAG
.HLAGElemAlg <- function(beta, Y, Z, lambda, eps, p, MN, C, YMean, ZMean, separate_lambdas = FALSE) {
k <- ncol(Y)
betafin <- beta
tk <- 1/max(Mod(eigen(Z %*% t(Z))$values))
lambda <- as.matrix(lambda)
betaini <- array(beta[, 2:dim(beta)[2], ], dim = c(k, k * p, nrow(lambda)))
betafin <- gamloopElem(betaini, Y, Z, lambda, eps, YMean, ZMean, as.matrix(betaini[, , 1]), k, p, separate_lambdas)
if (MN) {
betafin <- adjust_mn_var(betafin, C)
}
return(betafin)
}
.lassoVARFistX <- function(B, Z, Y, lambda, eps, p, MN, k, k1, s, m, C, YMean, ZMean, separate_lambdas = FALSE) {
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
tk <- 1/max(Mod(eigen(Z %*% t(Z), only.values = TRUE)$values))
B1 <- abind::adrop(B[, 2:dim(B)[2], 1, drop = F], 3)
nc <- apply(B, 3, ncol)[1]
BINI <- B[, 2:nc, , drop = F]
beta <- gamloopFista(BINI, Y, Z, as.matrix(lambda), eps, as.matrix(YMean), as.matrix(ZMean), B1, k, p, tk, k1, s, separate_lambdas)
if (MN) {
beta <- adjust_mn_var(beta, C)
}
return(beta)
}
# general basic/elastic net
.lassoVARFistXEN <- function(B, Z, Y, lambda, alpha, eps, p, MN, k, k1, s, m, C, YMean, ZMean, separate_lambdas = FALSE) {
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
tk <- 1/max(Mod(eigen(Z %*% t(Z), only.values = TRUE)$values))
BFOO1 <- abind::adrop(B[, 2:dim(B)[2], 1, drop = F], 3)
nc <- apply(B, 3, ncol)[1]
BFOO <- B[, 2:nc, , drop = F]
## if (length(alpha) == 1) { alpha <- rep(alpha, dim(B)[3]) }
beta <- gamloopFistaEN(BFOO, Y, Z, as.matrix(lambda), as.matrix(alpha), eps, as.matrix(YMean), as.matrix(ZMean), BFOO1,
k, p, tk, k1, s, separate_lambdas)
if (MN) {
beta <- adjust_mn_var(beta, C)
}
return(beta)
}
# Componentwise HLAG
.HLAGCAlg <- function(beta, Y, Z, lambda, eps, p, MN, C, YMean, ZMean, separate_lambdas = FALSE) {
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
k <- ncol(Y)
betafin <- beta
tk <- 1/max(Mod(eigen(Z %*% t(Z))$values))
lambda <- as.matrix(lambda)
if (separate_lambdas) {
betaini <- array(beta[, 2:ncol(as.matrix(beta[, , 1])), ], dim = c(k, k * p, nrow(lambda)))
} else {
betaini <- array(beta[, 2:ncol(as.matrix(beta[, , 1])), ], dim = c(k, k * p, length(lambda)))
}
betafin <- gamloopHLAG(betaini, Y, Z, lambda, eps, YMean, ZMean, as.matrix(betaini[, , 1]), k, p, separate_lambdas)
if (MN) {
betafin <- adjust_mn_var(betafin, C)
}
return(betafin)
}
# Endogenous First VARX-L
.EFVARX <- function(beta, Y, Z, lambda, eps, MN, k1, s, m, p, C, YMean, ZMean) {
betafin <- beta
tk <- 1/max(Mod(eigen(Z %*% t(Z))$values))
if (k1 == 1) {
betaini <- array(beta[, 2:(k1 * p + m * s + 1), ], dim = c(1, k1 * p + m * s, dim(beta)[3]))
} else {
betaini <- beta[, 2:dim(beta)[2], ]
}
for (i in seq_len(length(lambda))) {
if (dim(beta)[3] > 1) {
betaF <- fistaX(Y, Z, matrix(betaini[, , i], nrow = k1), p, k1, lambda[i], eps, tk, m, s)
} else {
betaF <- fistaX(Y, Z, matrix(betaini, nrow = k1), p, k1, lambda[i], eps, tk, m, s)
}
nu <- YMean - betaF %*% ZMean
betafin[, , i] <- cbind(nu, betaF)
}
if (MN) {
betafin <- adjust_mn_var(betafin, C)
}
return(betafin)
}
# HLAG Own/Other
.HLAGOOAlg <- function(beta, Y, Z, lambda, eps, p, MN, C, YMean, ZMean, separate_lambdas = FALSE) {
k <- ncol(Y)
betafin <- beta
YOLD <- Y
ZOLD <- Z
weights <- sqrt(c(rep(c(1, k - 1), length = 2 * p)))
groups <- list()
for (i in 1:k) {
groups[[i]] <- .vecoovarscpp(p, k, i)
}
lambda <- as.matrix(lambda)
betaini <- array(beta[, 2:ncol(as.matrix(beta[, , 1])), ], dim = c(k, k * p, nrow(lambda)))
betafin <- gamloopOO(betaini, Y, Z, lambda, eps, YMean, ZMean, as.matrix(betaini[, , 1]), k, p, weights, groups, separate_lambdas)
if (MN) {
betafin <- adjust_mn_var(betafin, C)
}
return(betafin)
}
# indexing for efx
vxsubs <- function(i, k, m, p, s) {
vv <- c(((i - 1) * k + 1):((i - 1) * k + k), ((i - 1) * m + k * p + 1):((i - 1) * m + k * p + m))
vv
}
prox2 <- function(v, lambda, k, p, m, s) {
for (i in 1:p) {
if (i <= s) {
vv <- vxsubs(i, k, m, p, s)
F1 <- 0
}
if (i > s) {
vv <- ((i - 1) * k + 1):((i - 1) * k + k)
F1 <- 1
}
v2 <- proxvx2(v[vv], p, lambda, m, k, F1)
v[vv] <- v2
}
v
}
fistaX <- function(Y, Z, beta, p, k1, lambda, eps, tk, m, s) {
for (i in 1:k1) {
phiOLD <- beta[i, ]
phiOLDOLD <- beta[i, ]
j <- 1
thresh <- 10 * eps
while (thresh > eps) {
v <- matrix(phiOLD + ((j - 2)/(j + 1)) * (phiOLD - phiOLDOLD), nrow = 1)
phiR <- prox2(v + tk * as.vector((Y[, i] - v %*% Z) %*% t(Z)), tk * lambda, k1, p, m, s)
thresh <- max(abs(phiR - v))
phiOLDOLD <- phiOLD
phiOLD <- phiR
j <- j + 1
}
beta[i, ] <- phiR
}
beta
}
# Lag weighted lasso: VAR only
.lassoVARTL <- function(B, Z, Y, lambda, eps, p, MN, alpha, C, YMean, ZMean) {
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
k <- ncol(Y)
tk <- 1/max(Mod(eigen(Z %*% t(Z), only.values = TRUE)$values))
BFOO1 <- as.matrix(B[, 2:ncol(B[, , 1]), 1])
BFOO <- array(B[, 2:ncol(as.matrix(B[, , 1])), ], dim = c(k, k * p, length(lambda) * length(alpha)))
p2 <- 1:p
gran2 <- length(lambda)
for (i in seq_len(length(alpha))) {
W <- rep(p2^(-alpha[i]), each = k)
ZADJ <- diag(W) %*% Z
B[, , (1 + (i - 1) * gran2):(i * length(lambda))] <- gamloopFista(array(BFOO[, , (1 + (i - 1) * gran2):(i * length(lambda))],
dim = c(k, k * p, length(lambda))), Y, ZADJ, as.matrix(lambda), eps, as.matrix(YMean), as.matrix(ZMean), BFOO1,
k, p, tk, k, p)
for (j in (1 + (i - 1) * gran2):(i * length(lambda))) {
B[, 2:(k * p + 1), j] <- B[, 2:(k * p + 1), j] %*% diag(W)
}
}
if (MN) {
B <- adjust_mn_var(B, C)
}
return(B)
}
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/R/BigVARAlgorithms.R
|
# BigVAR fit used in rolling cv, out of sample forecast evaluation and
# BigVAR.est
.BigVAR.fit <- function(group, beta, trainZ, trainY, lambda, tol, p, m = 0, k1, k,
s = 0, s1 = 0, MN = FALSE, C, intercept = TRUE, separate_lambdas, dual, activeset = NULL,
starting_eigvals = NULL, groups = NULL, compgroups = NULL, VARX = FALSE, alpha = NULL,
palpha=NULL, gamma=3) {
if (is.null(s)) {
s <- 0
}
if (is.null(s1)) {
s1 <- 0
}
if (is.null(m)) {
m <- 0
}
pre_proc <- pre_process(trainY, trainZ, C, MN, intercept)
if (separate_lambdas) {
if (is.vector(lambda)) {
lambda <- matrix(lambda, nrow = 1)
}
gran2 <- nrow(lambda)
}
trainY <- pre_proc$Y
trainZ <- pre_proc$Z
YMean <- pre_proc$YMean
ZMean <- pre_proc$ZMean
C <- pre_proc$C
if (group == "Basic") {
beta <- .lassoVARFistX(beta, trainZ, trainY, lambda, tol, p, MN, k,
k1, s + s1, m, C, YMean, ZMean, separate_lambdas)
}
if(group=="MCP"|group=="SCAD")
{
beta <- .MCPFit(beta,trainZ,trainY,lambda,tol,p,MN,k,k1,s,m,C,group,gamma, YMean, ZMean)
}
if (group == "BasicEN") {
beta <- .lassoVARFistXEN(beta, trainZ, trainY, lambda, alpha, tol, p, MN, k,
k1, s + s1, m, C, YMean, ZMean, separate_lambdas)
}
if (group == "Lag") {
GG <- .GroupLassoVAR1(beta, groups, compgroups, trainY, trainZ, lambda, activeset,
tol, p, MN, k, k1, s + s1, C, YMean, ZMean)
beta <- GG$beta
activeset <- GG$active
}
if (group == "SparseLag") {
if (VARX) {
if (!dual) {
GG <- .SparseGroupLassoVARX(beta, groups, compgroups, trainY, trainZ,
lambda, alpha, INIactive = activeset, tol, starting_eigvals, p, MN,
k, s + s1, k1, C, YMean, ZMean)
} else {
GG <- .SparseGroupLassoVARXDual(beta, groups, compgroups, trainY,
trainZ, lambda, alpha, INIactive = activeset, tol, starting_eigvals,
p, MN, k, s + s1, k1, C, YMean, ZMean)
}
} else {
if (!dual) {
GG <- .SparseGroupLassoVAR(beta, groups, compgroups, trainY, trainZ,
lambda, alpha, INIactive = activeset, tol, starting_eigvals, p, MN,
C, YMean, ZMean)
} else {
GG <- .SparseGroupLassoVARDual(beta, groups, compgroups, trainY,
trainZ, lambda, alpha, INIactive = activeset, tol, starting_eigvals,
p, MN, C, YMean, ZMean)
}
}
beta <- GG$beta
activeset <- GG$active
starting_eigvals <- GG$q1
}
if (group == "OwnOther") {
if (VARX) {
GG <- .GroupLassoOOX(beta, groups, compgroups, trainY, trainZ, lambda,
activeset, tol, p, MN, k, k1, s + s1, C, YMean, ZMean)
} else {
GG <- .GroupLassoOO(beta, groups, compgroups, trainY, trainZ, lambda, activeset,
tol, p, MN, C, YMean, ZMean)
}
beta <- GG$beta
activeset <- GG$active
}
if (group == "SparseOO") {
if (VARX) {
GG <- .SparseGroupLassoVAROOX(beta, groups, compgroups, trainY, trainZ,
lambda, alpha, INIactive = activeset, tol, p, MN, k1, s + s1, k, dual,
C, YMean, ZMean)
} else {
GG <- .SparseGroupLassoVAROO(beta, groups, compgroups, trainY, trainZ,
lambda, alpha, INIactive = activeset, tol, starting_eigvals, p, MN,
dual, C, YMean, ZMean)
starting_eigvals <- GG$q1
}
beta <- GG$beta
activeset <- GG$active
}
if (group == "Tapered") {
beta <- .lassoVARTL(beta, trainZ, trainY, lambda, tol, p, MN, palpha, C, YMean,
ZMean)
}
if (group == "EFX") {
beta <- .EFVARX(beta, trainY, trainZ, lambda, tol, MN, k1, s, m, p, C, YMean,
ZMean)
}
if (group == "HLAGC") {
beta <- .HLAGCAlg(beta, trainY, trainZ, lambda, tol, p, MN, C, YMean, ZMean,
separate_lambdas)
}
if (group == "HLAGOO") {
beta <- .HLAGOOAlg(beta, trainY, trainZ, lambda, tol, p, MN, C, YMean, ZMean,
separate_lambdas)
}
if (group == "HLAGELEM") {
beta <- .HLAGElemAlg(beta, trainY, trainZ, lambda, tol, p, MN, C, YMean, ZMean,
separate_lambdas)
}
if (group == "BGR") {
trainZ <- rbind(1, trainZ)
beta <- BGRGridSearch(trainY, trainZ, p, lambda, as.numeric(MN))
}
## if (group == "MCP" | group == "SCAD") {
## beta <- .MCPFit(beta, trainZ, trainY, lambda, tol, p, MN, k, k1, s, m, C, YMean,
## ZMean, group, lambda)
## }
if (!exists("activeset")) {
activeset <- NULL
}
if (!exists("starting_eigvals")) {
starting_eigvals <- NULL
}
return(list(beta = beta, activeset = activeset, starting_eigvals = starting_eigvals))
}
#' Simple function to fit BigVAR model with fixed penalty parameter
#' @param Y \eqn{T \times k} multivariate time series or Y \eqn{T \times (k+m)} endogenous and exogenous series, respectively
#' @param p Predetermined maximal lag order (for modeled series)
#' @param struct The choice of penalty structure (see details).
#' @param lambda vector or matrix of penalty parameters.
#' @param intercept True or False: option to fit an intercept
#' @param RVAR True or False: option to refit based upon the support selected using the Relaxed-VAR procedure
#' @param refit_fraction fraction of least squares refit to incorporate (default 1)
#' @param MN Minnesota Prior Indicator
#' @param VARX List containing VARX model specifications.
#' @param alpha grid of candidate parameters for the alpha in the Sparse Lag and Sparse Own/Other VARX-L
#' @param C vector of coefficients to shrink toward a random walk (if \code{MN} is \code{TRUE})
#' @param tf transfer function indicator (i.e. VARX in which p=0 & s>0) (default false)
#' @param tol optimization tolerance (default 1e-4)
#' @param separate_lambdas indicator for separate penalty parameters for each time series (default \code{FALSE})
#' @param beta optional \eqn{k\times (k\times p + m\times s +1)} coefficient matrix to use as a 'warm start' (default \code{NULL})
#' @param gamma additional parameter for SCAD/MCP penalty (default 3)
#'
#' @details The choices for 'struct' are as follows
#' \itemize{
#' \item{ 'Basic' (Basic VARX-L)}
#' \item{ 'BasicEN' (Basic Elastic Net VARX-L)}
#' \item{ 'Lag' (Lag Group VARX-L)}
#' \item{ 'SparseLag' (Lag Sparse Group VARX-L)}
#' \item{ 'OwnOther' (Own/Other Group VARX-L) }
#' \item{ 'SparseOO' (Own/Other Sparse Group VARX-L) }
#' \item{ 'EFX' (Endogenous First VARX-L)}
#' \item{ 'HLAGC' (Componentwise HLAG) }
#' \item{ 'HLAGOO' (Own/Other HLAG) }
#' \item{ 'HLAGELEM' (Elementwise HLAG)}
#' \item{ 'Tapered' (Lag weighted Lasso VAR)}
#' \item{ 'BGR' (Bayesian Ridge Regression (cf. Banbura et al))}
#' \item{ 'MCP' (Minimax Concave Penalty (cf. Breheny and Huang))}
#' \item{ 'SCAD' (Smoothly Clipped Absolute Deviation (cf. Breheny and Huang))}
#' }
#'
#' VARX specifications consist of a list with entry k denoting the series that are to be modeled and entry s to denote the maximal lag order for exogenous series.
#'
#' The argument alpha is ignored unless the structure choice is 'SparseLag' or 'Lag.' By default 'alpha' is set to \code{NULL} and will be initialized as 1/(k+1) in \code{cv.BigVAR} and \code{BigVAR.est}. Any user supplied values must be between 0 and 1.
#' @note The specifications 'Basic', 'Lag,' 'SparseLag,' 'SparseOO,' and 'OwnOther' can accommodate both VAR and VARX models. EFX only applies to VARX models. 'HLAGC,' 'HLAGOO,' 'HLAGELEM,' and 'Tapered' can only be used with VAR models. Our implementation of the SCAD and MCP penalties is heavily influenced by the implementation in \code{ncvreg}.
#'
#' @seealso \code{\link{cv.BigVAR}},\code{\link{BigVAR.est}},\code{\link{constructModel}}
#'
#' @references
#' Banbura, Marta, Domenico Giannone, and Lucrezia Reichlin. 'Large Bayesian vector auto regressions.' Journal of Applied Econometrics 25.1 (2010): 71-92.
#' Breheny P, Huang J (2011). “Coordinate descent algorithms for nonconvex penalized regression, with applications to biological feature selection.” Annals of Applied Statistics, 5(1), 232–253.
#' William B Nicholson, Jacob Bien, and David S Matteson. 'High Dimensional Forecasting via Interpretable Vector Autoregression.' arXiv preprint arXiv:1412.5250, 2016.
#' William B. Nicholson, David S. Matteson, Jacob Bien,VARX-L: Structured regularization for large vector autoregressions with exogenous variables, International Journal of Forecasting, Volume 33, Issue 3, 2017, Pages 627-651,
#' William B Nicholson, David S. Matteson, and Jacob Bien (2016), 'BigVAR: Tools for Modeling Sparse High-Dimensional Multivariate Time Series' arxiv:1702.07094
#' @examples
#' # VARX Example
#' # Fit a Basic VARX-L with k=2, m=1, s=2, p=4, lambda=1e-2
#' VARX=list()
#' VARX$k=2 # indicates that the first two series are modeled
#' VARX$s=2 # sets 2 as the maximal lag order for exogenous series
#' data(Y)
#' BigVAR.fit(Y,p=4,'Basic',lambda=1e-2,VARX=VARX)
#' @export
BigVAR.fit <- function(Y, p, struct, lambda, alpha = NULL, VARX = list(), separate_lambdas = F,
MN = F, C = as.double(NULL), intercept = TRUE, tf = F, tol = 1e-04, RVAR = F, refit_fraction=1,
beta = NULL, gamma=3) {
if (!is.matrix(Y)) {
stop("Y needs to be a matrix")
}
if (is.null(lambda)) {
stop("Must include penalty parameter")
}
if (is.null(alpha)) {
alpha <- 1/(ncol(Y) + 1)
}
dual <- FALSE
k <- ncol(Y)
# runs constructModel just to check for errors
temp.bv <- constructModel(Y, p, struct = struct, gran = c(lambda), ownlambdas = TRUE,
VARX = VARX, cv = "None", model.controls=list(MN=MN, RVAR=RVAR, C=C, intercept = intercept,gamma=gamma))
gran2 <- length(lambda)
## structures=c('Basic','Lag','SparseLag','OwnOther','SparseOO','HLAGC','HLAGOO','HLAGELEM','Tapered','EFX','BGR')
group <- struct
groups <- 0
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
s <- ifelse(VARX, VARX$s, 0)
T <- nrow(Y) - max(p, s)
if (length(VARX) != 0) {
k1 <- VARX$k
s <- VARX$s
if(exists('contemp',where=VARX)){
if(!is.logical(VARX$contemp)){
stop("contemp must be logical")
}
if(VARX$contemp){
contemp <- TRUE
s1 <- 1
}else{
contemp <- FALSE
s1 <- 0
}
}else{
contemp <- FALSE
s1 <- 0
}
VARX <- TRUE
m <- k - k1
Y1 <- matrix(Y[, 1:k1], ncol = k1)
X <- matrix(Y[, (ncol(Y) - m + 1):ncol(Y)], ncol = m)
if (!tf) {
trainZ <- VARXCons(Y1, X, k1, p, m, s, contemp = contemp)
} else {
trainZ <- VARXCons(matrix(0, ncol = 1, nrow = nrow(X)), matrix(X, ncol = m),
k = 0, p = 0, m = m, s = s, contemp = contemp, oos = FALSE)
}
trainZ <- trainZ[2:nrow(trainZ), ]
trainY <- matrix(Y[(max(c(p, s)) + 1):nrow(Y), 1:k1], ncol = k1)
grps <- create_group_indexes(group, p, k, gran2 * length(alpha), VARX, k1,
s + s1)
groups <- grps$groups
compgroups <- grps$compgroups
activeset <- grps$activeset
starting_eigvals <- grps$starting_eigvals
if (group == "BGR") {
Grid <- seq(1, 5, by = 0.025)
grid <- Grid * sqrt(k * p)
MSFE <- matrix(0, nrow = 1, ncol = length(grid))
}
# Initial Coefficient Matrix
beta1 <- array(0, dim = c(k1, k1 * p + (k - k1) * (s + s1) + 1, gran2 * length(alpha)))
if (!is.null(beta) & all(dim(beta) == dim(beta1))) {
beta <- beta
} else {
beta <- beta1
}
} else {
# No VARX
VARX <- FALSE
s <- p
s1 <- 0
m <- 0
Z1 <- VARXCons(Y, matrix(0, nrow = nrow(Y)), k, p, 0, 0)
trainZ <- Z1[2:nrow(Z1), , drop = F]
trainY <- matrix(Y[(p + 1):nrow(Y), ], ncol = k)
k1 <- k
s <- 0
grps <- create_group_indexes(group, p, k, gran2)
groups <- grps$group
compgroups <- grps$compgroups
activeset <- grps$activeset
starting_eigvals <- grps$starting_eigvals
beta <- array(0, dim = c(k, k * p + 1, gran2 * length(alpha)))
}
if (group == "Tapered")
{
palpha <- seq(0, 1, length = 10)
palpha <- rev(palpha)
gran2 <- length(lambda) * length(palpha)
beta <- array(0, dim = c(k, k * p + 1, gran2))
}
if (group == "BGR") {
trainZ <- rbind(1, trainZ)
beta <- BGRGridSearch(trainY, trainZ, p, lambda, as.numeric(MN))
} else {
## browser()
temp <- .BigVAR.fit(group, beta, trainZ, trainY, lambda, tol, p, m, k1, k,
s, s1, MN, C, intercept, separate_lambdas, dual, activeset, starting_eigvals,
groups, compgroups, VARX, alpha, palpha)
beta <- temp$beta
}
# refit if varx
if (RVAR) {
for(i in dim(beta)[3]) {
beta_rls<- RelaxedLS(cbind(t(trainZ), trainY), beta[,,i])
beta[,,i] <- (1-refit_fraction)*beta[,,i] +refit_fraction*beta_rls
}
}
return(beta)
}
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/R/BigVARFitFun.R
|
# Ensures that the created BigVAR object is valid
check.BigVAR <- function(object) {
errors <- character()
VARX <- object@VARX
Y <- object@Data
if (any(is.na(Y))) {
msg <- c("Remove NA values before running ConstructModel")
errors <- c(errors, msg)
}
if(!is.matrix(Y)){
msg('Y must be coercible to a matrix')
errors <- c(errors,msg)
}
if (dim(Y)[2] > dim(Y)[1] & length(VARX) == 0) {
msg <- paste("k is", ncol(Y), "which is greater than T, is Y formatted correctly (k x T)?")
}
if (object@lagmax < 0) {
msg <- c("Maximal lag order must be at least 0")
errors <- c(errors, msg)
}
if (object@lagmax == 0 & !object@Structure %in% c("Basic", "BasicEN")) {
msg <- c("Only Basic VARX-L supports a transfer function")
errors <- c(errors, msg)
}
structures <- c("Basic", "Lag", "SparseLag", "OwnOther", "SparseOO", "HLAGC", "HLAGOO", "HLAGELEM", "Tapered", "EFX",
"BGR", "BasicEN", "MCP", "SCAD")
cond1 <- object@Structure %in% structures
if (cond1 == FALSE) {
msg <- paste("struct must be one of", structures)
errors <- c(errors, msg)
}
if (object@horizon < 1) {
msg <- paste("Forecast Horizon is ", object@horizon, " must be at least 1")
}
if (object@delta < 0) {
msg <- paste("Huber Delta is ", object@delta, " must be positive")
}
if (object@gamma < 0) {
msg <- paste("Gamma is ", object@gamma, " must be positive")
}
if (object@refit_fraction < 0 | object@refit_fraction > 1) {
msg <- paste("Refit fraction is ", object@refit_fraction, " must be between 0 and 1")
}
if (!(object@crossval %in% c("Rolling", "LOO", "None"))) {
msg <- c("Cross-Validation type must be one of Rolling, LOO or None")
errors <- c(errors, msg)
}
if (length(object@Granularity) != 2 & object@ownlambdas == FALSE) {
msg("Granularity must have two parameters")
errors <- c(errors, msg)
}
if (any(object@Granularity <= 0)) {
msg <- c("Granularity parameters must be positive")
errors <- c(errors, msg)
}
structure2 <- c("Basic", "Lag", "HLAGC", "BasicEN", "MCP", "SCAD")
cond2 <- object@Structure %in% structure2
k1 <- 0
if (length(VARX) != 0) {
k1 <- VARX$k
if (k1 > ncol(Y)) {
msg <- c("k is greater than the number of columns in Y")
errors <- c(errors, msg)
}
} else {
k <- 0
}
m <- ncol(Y) - k1
if (object@tf & object@lagmax != 0) {
msg <- c("p must be 0 if fitting a transfer function")
errors <- c(errors, msg)
}
nseries <- ncol(Y) - ifelse(m < ncol(Y), m, 0)
if (nseries == 1 & cond2 == FALSE) {
msg <- c("Univariate support is only available for Basic VARX-L, Lag Group VARX-L, SCAD, MCP and Componentwise HLAG")
errors <- c(errors, msg)
}
if (length(VARX) == 0 & object@Structure == "EFX") {
msg <- c("EFX is only supported in the VARX framework")
errors <- c(errors, msg)
}
if (!object@ownlambdas & object@Granularity[2] == 1 & object@separate_lambdas) {
stop("separate lambda estimation requires more than one candidate penalty parameter")
}
if (is.list(VARX) & length(VARX) > 0 & !(exists("k", where = VARX) & exists("s", where = VARX))) {
msg <- c("VARX Specifications entered incorrectly")
errors <- c(errors, msg)
}
if (object@Structure == "EFX" & !is.null(VARX$contemp)) {
if (VARX$contemp) {
msg <- c("EFX does not support contemporaneous dependence")
errors <- c(errors, msg)
}
}
structs <- c("HLAGC", "HLAGOO", "HLAGELEM")
if (length(VARX) != 0 & object@Structure %in% structs) {
msg <- c("EFX is the only nested model supported in the VARX framework")
errors <- c(errors, msg)
}
if (object@T1 > nrow(Y) | object@T2 > nrow(Y) | object@T2 < object@T1) {
msg <- c("Training dates exceed series length")
errors <- c(errors, msg)
}
if (any(object@alpha < 0) || any(object@alpha > 1)) {
msg <- c("alpha must be between zero and 1")
errors <- c(errors, msg)
}
if (object@recursive & length(VARX) > 0) {
msg <- c("recursive forecasts can only be used with VAR models")
errors <- c(errors, msg)
}
if (length(errors) == 0)
TRUE else errors
}
#' BigVAR Object Class
#'
#' An object class to be used with cv.BigVAR
#'
#' @slot Data a \eqn{T \times k} multivariate time series
#' @slot model_data processed time series and lag matrix
#' @slot lagmax Maximal lag order for modeled series
#' @slot intercept Indicator as to whether an intercept should be included
#' @slot Structure Penalty Structure
#' @slot Relaxed Indicator for relaxed VAR
#' @slot Granularity Granularity of penalty grid
#' @slot horizon Desired Forecast Horizon
#' @slot crossval Cross-Validation Procedure
#' @slot Minnesota Minnesota Prior Indicator
#' @slot verbose Indicator for Verbose output
#' @slot dates dates extracted from an xts object
#' @slot ic Indicator for including AIC and BIC benchmarks
#' @slot VARX VARX Model Specifications
#' @slot VARXI VARX Indicator
#' @slot T1 Index of time series in which to start cross validation
#' @slot T2 Index of times series in which to start forecast evaluation
#' @slot ONESE Indicator for 'One Standard Error Heuristic'
#' @slot ownlambdas Indicator for user-supplied lambdas
#' @slot tf Indicator for transfer function
#' @slot alpha Grid of candidate alpha values (applies only to Sparse VARX-L and Elastic Net models)
#' @slot recursive Indicator as to whether recursive multi-step forecasts are used (applies only to multiple horizon VAR models)
#' @slot constvec vector indicating variables to shrink toward a random walk instead of toward zero (valid only if Minnesota is \code{TRUE})
#' @slot tol optimization tolerance
#' @slot window.size size of rolling window. If set to NULL an expanding window will be used.
#' @slot separate_lambdas indicator to use separate penalty parameter for each time series (default \code{FALSE})
#' @slot loss Loss function to select penalty parameter (one of 'L1','L2','Huber').
#' @slot delta delta parameter for Huber loss (default 2.5)
#' @slot gamma gamma parameter for SCAD or MCP penalty (default 3)
#' @slot rolling_oos True or False: indicator to update the penalty parameter over the evaluation period (default \code{False})
#' @slot linear indicator for linearly decrementing penalty grid (FALSE is log-linear).
#' @slot refit_fraction fraction of least squares refit to incorporate (default is 1).
#' @details To construct an object of class BigVAR, use the function \code{\link{constructModel}}
#' @seealso \code{\link{constructModel}}
#' @export
setClass(Class = "BigVAR", representation(Data = "matrix", model_data = "list", lagmax = "numeric", Structure = "character",
Relaxed = "logical", Granularity = "numeric", intercept = "logical", Minnesota = "logical", horizon = "numeric", verbose = "logical",
crossval = "character", ic = "logical", VARX = "list", T1 = "numeric", T2 = "numeric", ONESE = "logical", ownlambdas = "logical",
tf = "logical", alpha = "numeric", recursive = "logical", dates = "character", constvec = "numeric", tol = "numeric",
window.size = "numeric", separate_lambdas = "logical", loss = "character", delta = "numeric", gamma = "numeric", rolling_oos = "logical",
VARXI = "logical", linear = "logical", refit_fraction = "numeric"), validity = check.BigVAR)
#' Construct an object of class BigVAR
#' @param Y \eqn{T \times k} multivariate time series or Y \eqn{T \times (k+m)} endogenous and exogenous series, respectively.
#' @param p Predetermined maximal lag order (for modeled series).
#' @param struct The choice of penalty structure (see details).
#' @param gran vector of penalty parameter specifications.
#' @param h Desired forecast horizon.
#' @param cv Cross-validation approach, either 'Rolling' for rolling cross-validation or 'LOO' for leave-one-out cross-validation. 'None' for use with BigVAR.fit.
#' @param verbose Verbose output while estimating.
#' @param IC True or False: whether to include AIC and BIC benchmarks.
#' @param VARX List containing VARX model specifications.
#' @param T1 Index of time series in which to start cross validation.
#' @param T2 Index of times series in which to start forecast evaluation.
#' @param ONESE True or False: whether to use the 'One Standard Error Heuristic.'
#' @param ownlambdas True or False: Indicator for user-supplied penalty parameters.
#' @param recursive True or False: Indicator as to whether iterative multi-step predictions are desired in the VAR context if the forecast horizon is greater than 1.
#' @param loss Loss function to select penalty parameter (one of 'L1','L2','Huber')
#' @param dates optional vector of dates corresponding to \eqn{Y}.
#' @param separate_lambdas indicator for separate penalty parameters for each time series (default \code{FALSE}).
#' @param window.size size of rolling window. If set to 0 an expanding window will be used.
#' @param linear indicator for linearly decrementing penalty grid (FALSE is log-linear; default \code{FALSE}).
#' @param rolling_oos True or False: indicator to update the penalty parameter over the evaluation period (default \code{False})
#' @param model.controls named list of control parameters for BigVAR model estimation (see details).
#' @details The choices for 'struct' are as follows
#' \itemize{
#' \item{ 'Basic' (Basic VARX-L)}
#' \item{ 'BasicEN' (Elastic Net VARX-L)}
#' \item{ 'Lag' (Lag Group VARX-L)}
#' \item{ 'SparseLag' (Lag Sparse Group VARX-L)}
#' \item{ 'OwnOther' (Own/Other Group VARX-L) }
#' \item{ 'SparseOO' (Own/Other Sparse Group VARX-L) }
#' \item{ 'EFX' (Endogenous First VARX-L)}
#' \item{ 'HLAGC' (Componentwise HLAG) }
#' \item{ 'HLAGOO' (Own/Other HLAG) }
#' \item{ 'HLAGELEM' (Elementwise HLAG)}
#' \item{ 'Tapered' (Lag weighted Lasso VAR)}
#' \item{ 'BGR' (Bayesian Ridge Regression (cf. Banbura et al))}
#' \item{ 'MCP' (Minimax Concave Penalty (cf. Breheny and Huang))}
#' \item{ 'SCAD' (Smoothly Clipped Absolute Deviation Penalty (cf. Breheny and Huang))}
#' }
#'
#' The first number in the vector 'gran' specifies how deep to construct the penalty grid and the second specifies how many penalty parameters to use If ownlambas is set to TRUE, gran should contain the user-supplied penalty parameters.
#'
#' VARX specifications consist of a named list with entry k denoting the series that are to be modeled and entry s to denote the maximal lag order for exogenous series.
#'
#' As the capabilities of BigVAR have expanded, we have decided to consolidate parameters in the list model.controls. These parameters include:
#' \itemize{
#' \item{ 'alpha:' grid of candidate parameters for the alpha in the Basic Elastic Net, Sparse Lag, Sparse Own/Other VARX-L.}
#' \item{ 'C:' vector of coefficients to shrink toward a random walk (if \code{MN} is \code{TRUE}).}
#' \item{ 'delta:' parameter for Huber loss (default 2.5)}
#' \item{ 'intercept:' option to fit an intercept, default \code{TRUE}}
#' \item{ 'loss:' Loss function to select penalty parameter (one of 'L1','L2','Huber')}
#' \item{ 'MN:' Minnesota Prior Indicator, default \code{FALSE}}
#' \item{ 'RVAR:' option to refit based upon the support selected using the Relaxed-VAR procedure (default FALSE).}
#' \item{ 'refit_fraction:' If RVAR is \code{TRUE}, proportional tradeoff between least squares fit and penalized fit (default 1).}
#' \item{ 'tol:' optimization tolerance (default 1e-4)}
#' }
#'
#' The argument alpha is ignored unless the structure choice is 'SparseLag' or 'Lag.' By default 'alpha' is set to \code{NULL} and will be initialized as 1/(k+1) in \code{cv.BigVAR} and \code{BigVAR.est}. Any user supplied values must be between 0 and 1.
#' @note The specifications 'Basic','BasicEN', 'Lag,' 'SparseLag,' 'SparseOO','OwnOther', 'MCP', and 'SCAD.' can accommodate both VAR and VARX models. EFX only applies to VARX models. 'HLAGC,' 'HLAGOO,' 'HLAGELEM,' and 'Tapered' can only be used with VAR models. Our implementation of the SCAD and MCP penalties is heavily influenced by the package \code{ncvreg}.
#'
#' @seealso \code{\link{cv.BigVAR}},\code{\link{BigVAR.est}}
#'
#' @references
#' Banbura, Marta, Domenico Giannone, and Lucrezia Reichlin. 'Large Bayesian vector auto regressions.' Journal of Applied Econometrics 25.1 (2010): 71-92.
#' Breheny P, Huang J (2011). “Coordinate descent algorithms for nonconvex penalized regression, with applications to biological feature selection.” Annals of Applied Statistics, 5(1), 232–253.
#' Nicholson, William, I. Wilms, J. Bien, and D. S. Matteson. High dimensional forecasting via interpretable vector autoregression. Journal of Machine Learning Research, 21(166):1–52, 2020.
#' William B. Nicholson, David S. Matteson, Jacob Bien,VARX-L: Structured regularization for large vector autoregressions with exogenous variables, International Journal of Forecasting, Volume 33, Issue 3, 2017, Pages 627-651,
#' William B Nicholson, David S. Matteson, and Jacob Bien (2016), 'BigVAR: Tools for Modeling Sparse High-Dimensional Multivariate Time Series' arxiv:1702.07094
#' @examples
#' # VARX Example
#' # Create a Basic VARX-L with k=2, m=1, s=2, p=4
#' VARX=list()
#' VARX$k=2 # indicates that the first two series are modeled
#' VARX$s=2 # sets 2 as the maximal lag order for exogenous series
#' data(Y)
#' T1=floor(nrow(Y)/3)
#' T2=floor(2*nrow(Y)/3)
#' Model1=constructModel(Y,p=4,struct='Basic',gran=c(50,10),verbose=FALSE,VARX=VARX,T1=T1,T2=T2)
#' @export
constructModel <- function(Y, p, struct, gran, h = 1, cv = "Rolling", verbose = TRUE, IC = TRUE, VARX = list(), T1 = floor(nrow(Y)/3),
T2 = floor(2 * nrow(Y)/3), ONESE = FALSE, ownlambdas = FALSE, recursive = FALSE, dates = as.character(NULL), window.size = 0,
separate_lambdas = FALSE, linear = FALSE, loss = "L2", rolling_oos = FALSE, model.controls = list()) {
if (exists("RVAR", where = model.controls)) {
RVAR <- model.controls$RVAR
} else {
RVAR <- FALSE
}
if (exists("refit_fraction", where = model.controls)) {
refit_fraction <- model.controls$refit_fraction
} else {
refit_fraction <- 1
}
## if (exists("linear")) {
## linear <- linear
## } else {
## linear <- FALSE
## }
linear <- linear
if (exists("alpha", where = model.controls)) {
alpha = model.controls$alpha
} else {
alpha <- as.double(NULL)
}
if (exists("tol", where = model.controls)) {
tol <- model.controls$tol
} else {
tol <- 1e-04
}
if (exists("MN", where = model.controls)) {
MN <- model.controls$MN
} else {
MN <- FALSE
}
if (exists("C", where = model.controls)) {
C <- model.controls$C
} else {
C <- as.double(NULL)
}
if (exists("delta", where = model.controls)) {
delta <- model.controls$delta
} else {
delta <- 2.5
}
if (exists("gamma", where = model.controls)) {
gamma <- model.controls$gamma
} else {
gamma <- 3
}
if (exists("intercept", where = model.controls)) {
intercept <- model.controls$intercept
} else {
intercept <- TRUE
}
# remove alpha if it's not used
if (length(alpha) != 0 & !struct %in% c("BasicEN", "SparseLag", "SparseOO")) {
alpha <- as.double(NULL)
}
if (any(is.na(Y))) {
stop("Remove NA values before running constructModel")
}
if (dim(Y)[2] > dim(Y)[1] & length(VARX) == 0) {
warning("k is greater than T, is Y formatted correctly (k x T)?")
}
if (p < 0) {
stop("Maximal lag order must be at least 0")
}
if (p == 0 & !struct %in% c("Basic", "BasicEN")) {
stop("Only Basic VARX-L supports a transfer function")
}
oldnames <- c("None", "Diag", "SparseDiag")
if (struct %in% oldnames)
stop("Naming Convention for these structures has changed. Use Basic, OwnOther, and SparseOO.")
structures <- c("Basic", "Lag", "SparseLag", "OwnOther", "SparseOO", "HLAGC", "HLAGOO", "HLAGELEM", "Tapered", "EFX",
"BGR", "BasicEN", "MCP", "SCAD")
if (struct == "BasicEN" & length(alpha) > 1 & separate_lambdas) {
stop("Cannot perform separate lambdas per series and range of alphas simultaneously")
}
cond1 <- struct %in% structures
if (!loss %in% c("L1", "L2", "Huber")) {
stop("loss must be one of L1,L2,Huber")
}
if (!cond1) {
stop(paste("struct must be one of", structures))
}
if (h < 1) {
stop("Forecast Horizon must be at least 1")
}
if (cv != "None" & T1 - 2 * h - p < 0) {
stop("Forecast Horizon too long; increase T1 or decrease h ")
}
if (!(cv %in% c("Rolling", "LOO", "None"))) {
stop("Cross-Validation type must be one of Rolling, LOO or None")
}
if (length(gran) != 2 & ownlambdas == FALSE) {
stop("Granularity must have two parameters")
}
if (any(gran <= 0)) {
stop("Granularity parameters must be positive")
}
if (tol < 0 | tol > 0.1) {
stop("Tolerance must be positive")
}
if (window.size > nrow(Y) | window.size < 0) {
stop("window size must be shorter than the series length")
}
## ws_init<- max(c(T1 - window.size - h-p, 1))
## start_index<- (ws1 + h):(T1 - h)
## if (length(start_index)<10) {
## stop("window.size and forecast horizon")
## }
if (delta < 0) {
stop("huber delta must be positive")
}
bss <- c("Basic", "HLAGC", "HLAGOO", "HLAGELEM", "BasicEN", "SCAD", "MCP")
if (separate_lambdas & !struct %in% c("Basic", "HLAGC", "HLAGOO", "HLAGELEM", "BasicEN", "SCAD", "MCP")) {
stop(paste("separate lambda estimation only available for ", toString(bss)))
}
start_ind <- (T1 - p - h + 1)
if (cv == "Rolling" & start_ind < 5) {
stop("too few rows for rolling validation, try running BigVAR.fit")
}
if (MN & intercept) {
intercept <- FALSE
}
structure2 <- c("Basic", "Lag", "HLAGC", "BasicEN", "MCP", "SCAD")
cond2 <- struct %in% structure2
if (length(VARX) != 0) {
k <- VARX$k
if (k > ncol(Y)) {
stop("k is greater than the number of columns in Y")
}
} else {
k <- ncol(Y)
}
m <- ncol(Y) - k
nseries <- ncol(Y) - ifelse(m < ncol(Y), m, 0)
if (p == 0) {
tf <- TRUE
} else {
tf <- FALSE
}
if (nseries == 1 & cond2 == FALSE) {
stop("Univariate support is only available for Basic, Elastic Net, Lag Group, and Componentwise HLAG")
}
if (length(VARX) == 0 & struct == "EFX") {
stop("EFX is only supported in the VARX framework")
}
if (struct == "EFX" & !is.null(VARX$contemp)) {
if (VARX$contemp) {
stop("EFX does not support contemporaneous dependence")
}
}
structs <- c("HLAGC", "HLAGOO", "HLAGELEM")
if (length(VARX) != 0 & struct %in% structs) {
stop("EFX is the only nested model supported in the VARX framework")
}
if (length(VARX) != 0 & struct == "BGR") {
stop("BGR is only available in the VAR framework")
}
if (length(VARX) != 0 & struct == "Tapered") {
stop("Lag-Weighted Lasso only available in VAR framework")
}
if (T1 > nrow(Y) | T2 > nrow(Y) | T2 < T1) {
stop("Training dates exceed series length")
}
if (is.list(VARX) & length(VARX) > 0 & !(exists("k", where = VARX) & exists("s", where = VARX))) {
stop("VARX Specifications entered incorrectly")
}
if (!is.null(alpha)) {
if (any(alpha < 0) || any(alpha > 1)) {
stop("alpha must be between 0 and 1")
}
}
if (length(C) != 0) {
if (length(C) != k) {
stop("C must have length k")
}
if (!all(C %in% c(0, 1))) {
stop("Values of C must be either 0 or 1")
}
} else {
C <- rep(1, k)
}
if (length(dates) != 0) {
ind <- dates
} else {
ind <- as.character(NULL)
}
model_data <- VARXConsModel(Y, p, VARX, tf)
var_data <- list(trainY = model_data$trainY, trainZ = model_data$trainZ)
VARXI <- model_data$VARXI
if (VARXI) {
VARX$s1 <- model_data$s1
VARX$contemp <- model_data$contemp
}
if (VARXI & window.size > 0) {
window.size <- window.size + 1
}
if (ncol(model_data$trainY) == 1 & separate_lambdas) {
separate_lambdas <- FALSE
}
(BV1 <- new("BigVAR", Data = Y, model_data = var_data, lagmax = p, Structure = struct, Relaxed = RVAR, Granularity = gran,
Minnesota = MN, verbose = verbose, horizon = h, crossval = cv, ic = IC, VARX = VARX, VARXI = VARXI, T1 = T1, T2 = T2,
ONESE = ONESE, ownlambdas = ownlambdas, tf = tf, alpha = alpha, recursive = recursive, dates = ind, constvec = C,
intercept = intercept, tol = tol, window.size = window.size, separate_lambdas = separate_lambdas, loss = loss, delta = delta,
gamma = gamma, rolling_oos = rolling_oos, linear = linear, refit_fraction = refit_fraction))
return(BV1)
}
# show-default method to show an object when its name is printed in the console.
#' Default show method for an object of class BigVAR
#'
#' @param object \code{BigVAR} object created from \code{ConstructModel}
#' @return Displays the following information about the BigVAR object:
#' \itemize{
#' \item{Prints the first 5 rows of \code{Y}}
#' \item{ Penalty Structure}
#' \item{ Relaxed Least Squares Indicator}
#' \item{Maximum lag order}
#' \item{ VARX Specifications (if applicable)}
#' \item{Start, end of cross validation period}
#' }
#' @seealso \code{\link{constructModel}}
#' @name show.BigVAR
#' @aliases show,BigVAR-method
#' @docType methods
#' @rdname show-methods
#' @export
setMethod("show", "BigVAR", function(object) {
T1P <- ifelse(length(object@dates) != 0, object@dates[object@T1], object@T1)
T2P <- ifelse(length(object@dates) != 0, object@dates[object@T2], object@T2)
nrowShow <- min(5, nrow(object@Data))
cat("*** BIGVAR MODEL *** \n")
cat("Data (First 5 Observations):\n")
print(formatC(object@Data[1:nrowShow, ]), quote = FALSE)
cat("Structure\n")
print(object@Structure)
cat("Forecast Horizon \n")
print(object@horizon)
cat("Relaxed VAR \n")
print(object@Relaxed)
cat("Minnesota Prior \n")
print(object@Minnesota)
cat("Maximum Lag Order \n")
print(object@lagmax)
if (length(object@VARX) != 0) {
cat("VARX Specs \n")
print(object@VARX)
}
cat("Start of Cross Validation Period \n")
print(T1P)
cat("End of Cross Validation Period \n")
print(T2P)
})
#' Plot a BigVAR object
#'
#' @param x BigVAR object created from \code{ConstructModel}
#' @param y NULL
#' @param ... additional plot arguments
#' @return NA, side effect is graph
#' @details Uses plot.zoo to plot each indivdual series of \code{Y} on a single plot
#' @name plot.BigVAR
#' @import methods
#' @seealso \code{\link{constructModel}}
#' @aliases plot,BigVAR-method
#' @docType methods
#' @method plot method
#' @rdname plot.BigVAR-methods
#' @export
#' @importFrom zoo plot.zoo
#' @importFrom zoo as.zoo
#' @importFrom zoo zoo
#' @importFrom zoo as.yearqtr
#' @importFrom zoo index
#' @importFrom graphics abline
#' @importFrom graphics legend
setMethod(f = "plot", signature = "BigVAR", definition = function(x, y = NULL, ...) {
T1P <- ifelse(length(x@dates) != 0, x@dates[x@T1], x@T1)
T2P <- ifelse(length(x@dates) != 0, x@dates[x@T2], x@T2)
g <- ncol(x@Data)
names <- ifelse(rep(!is.null(colnames(x@Data)), ncol(x@Data)), colnames(x@Data), as.character(1:g))
if (length(x@dates) != 0) {
dates <- as.yearqtr(x@dates)
} else {
dates <- seq_len(nrow(x@Data))
}
Yzoo <- zoo(as.matrix(x@Data), order.by = dates)
plot.zoo(Yzoo, plot.type = "single", col = 1:g)
legend("topright", names, lty = 1, col = 1:g)
abline(v = index(Yzoo[as.yearqtr(T1P)]))
abline(v = index(Yzoo[as.yearqtr(T2P)]))
})
#' Cross Validation for BigVAR
#'
#' @usage cv.BigVAR(object)
#' @param object BigVAR object created from \code{ConstructModel}
#' @details The main function of the BigVAR package. Performs cross validation to select penalty parameters over a training sample (as the minimizer of in-sample MSFE), then evaluates them over a test set. Compares against sample mean, random walk, AIC, and BIC benchmarks. Creates an object of class \code{BigVAR.results}
#' @return An object of class \code{BigVAR.results}.
#' @seealso \code{\link{constructModel}}, \code{\link{BigVAR.results}},\code{\link{BigVAR.est}}
#' @name cv.BigVAR
#' @aliases cv.BigVAR,BigVAR-method
#' @docType methods
#' @rdname cv.BigVAR-methods
#' @examples
#' data(Y)
#' # Fit a Basic VARX-L with rolling cross validation
#' Model1=constructModel(Y,p=4,struct='Basic',gran=c(50,10), verbose=FALSE)
#' results=cv.BigVAR(Model1)
#' @importFrom abind adrop
#' @importFrom abind abind
#' @export
setGeneric(name = "cv.BigVAR", def = function(object) {
standardGeneric("cv.BigVAR")
})
# Cross-validation and evaluation function
setMethod(f = "cv.BigVAR", signature = "BigVAR", definition = function(object) {
p <- object@lagmax
Y <- object@Data
k <- ncol(Y)
alpha <- object@alpha
gamma <- object@gamma
RVAR <- object@Relaxed
refit_fraction <- object@refit_fraction
group <- object@Structure
cvtype <- object@crossval
if(cvtype=='None'){
stop("set cv to rolling or LOO to run cv.BigVAR")
}
intercept <- object@intercept
recursive <- object@recursive
VARX <- object@VARX
VARXI <- object@VARXI
tol <- object@tol
window.size <- [email protected]
verbose <- object@verbose
loss <- object@loss
delta <- object@delta
linear <- object@linear
rolling_oos <- object@rolling_oos
if (length(alpha) == 0) {
if (length(VARX) > 0) {
alpha <- 1/(VARX$k + 1)
} else {
alpha <- 1/(k + 1)
}
}
C <- object@constvec
if (length(alpha) > 1 & group %in% c("SparseLag", "SparseOO", "BasicEN")) {
dual <- TRUE
} else {
dual <- FALSE
}
MN <- object@Minnesota
h <- object@horizon
jj <- 0
separate_lambdas <- object@separate_lambdas
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
if (object@crossval == "Rolling") {
T1 <- object@T1
T1 <- floor(T1)
} else {
T1 <- p + h + 1
}
T2 <- object@T2
T2 <- floor(T2)
ONESE <- object@ONESE
if (object@ownlambdas) {
lambda <- object@Granularity
gran2 <- length(lambda)
if (gran2 == 1) {
ONESE <- FALSE
}
} else if (object@Granularity[2] == 1) {
warning("only one penalty parameter; more efficient to run BigVAR.est instead of cv.BigVAR")
}
if (!object@ownlambdas) {
gran2 <- object@Granularity[2]
gran1 <- object@Granularity[1]
}
trainZ <- object@model_data$trainZ
trainY <- object@model_data$trainY
if (VARXI) {
k1 <- object@VARX$k
s <- object@VARX$s
contemp <- object@VARX$contemp
s1 <- object@VARX$contemp
m <- ncol(Y) - ncol(trainY)
beta <- array(0, dim = c(k1, k1 * p + (k - k1) * (s + s1) + 1, gran2 * length(alpha)))
} else {
k1 <- 0
s <- 0
contemp <- FALSE
s1 <- 0
m <- 0
beta <- array(0, dim = c(k, k * p + 1, gran2 * length(alpha)))
}
if (object@crossval == "Rolling") {
T1 <- T1 - max(p, s)
T2 <- T2 - max(p, s)
}
grps <- create_group_indexes(group, p, k, gran2 * length(alpha), VARXI, k1, s + s1)
groups <- grps$groups
compgroups <- grps$compgroups
activeset <- grps$activeset
starting_eigvals <- grps$starting_eigvals
if (object@ownlambdas) {
lambda <- object@Granularity
} else {
gran2 <- object@Granularity[2]
gran1 <- object@Granularity[1]
lambda <- create_lambda_grid(trainY[1:T2, , drop = FALSE], trainZ[, 1:T2], lapply(groups, function(x) {
x + 1
}), gran1, gran2, group, p, k1, s + s1, m, k, MN, alpha, C, intercept, tol, VARXI, separate_lambdas, dual, gamma,
linear, verbose)
}
h <- object@horizon
ZFull <- list()
if (!is.matrix(trainZ)) {
trainZ <- matrix(trainZ, ncol = 1)
}
if (!is.matrix(trainY)) {
trainY <- matrix(trainY, ncol = 1)
}
ZFull$Z <- trainZ
ZFull$Y <- trainY
T3 <- nrow(trainY)
if (object@ownlambdas) {
lambda <- object@Granularity
}
if (group == "Tapered") {
palpha <- seq(0, 1, length = 10)
palpha <- rev(palpha)
gran2 <- length(lambda) * length(palpha)
beta <- array(0, dim = c(k, k * p + 1, gran2))
} else {
palpha <- NULL
}
if (!is.matrix(ZFull$Y)) {
ZFull$Y <- matrix(ZFull$Y, ncol = 1)
}
if (!dual) {
if (separate_lambdas) {
if (!VARXI) {
MSFE <- array(0, dim = c(T2 - T1 + 1, gran2, k))
} else {
MSFE <- array(0, dim = c(T2 - T1 + 1, gran2, k1))
}
} else {
MSFE <- matrix(0, nrow = T2 - T1 + 1, ncol = gran2)
lambda <- as.matrix(lambda)
}
} else {
nalpha <- length(alpha)
MSFE <- matrix(0, nrow = T2 - T1 + 1, ncol = gran2 * nalpha)
}
if (verbose) {
pb <- txtProgressBar(min = T1, max = T2, style = 3)
cat("Validation Stage:", group)
}
YT <- Y[1:T2, , drop = FALSE]
betaWS <- beta
for (v in (T1 - h + 1):T2) {
if (v + h - 1 > T2) {
break
}
if (cvtype == "Rolling") {
if (h > 1 & !recursive) {
if (window.size != 0) {
ws1 <- max(c(v - window.size - h, 1))
index_y <- (ws1 + h-1):(v - 1)
index_z <- (ws1 ):(v - h)
trainY <- ZFull$Y[index_y, , drop = FALSE]
trainZ <- ZFull$Z[, index_z,drop=FALSE]
} else {
index_y <- (h):(v - 1)
index_z <- 1:(v - h)
trainY <- ZFull$Y[index_y, , drop = FALSE]
trainZ <- ZFull$Z[, index_z, drop = F]
}
} else {
if (window.size != 0) {
ws1 <- max(c(v - window.size, 1))
trainY <- ZFull$Y[(ws1):(v - 1), , drop = FALSE]
trainZ <- ZFull$Z[, (ws1):(v - 1), drop = FALSE]
} else {
trainY <- ZFull$Y[(1):(v - 1), , drop = FALSE]
trainZ <- ZFull$Z[, (1):(v - 1), drop = FALSE]
}
}
} else {
if (VARXI) {
YT2 <- YT[-v, , drop = FALSE]
Y1 <- YT2[, 1:k1, drop = FALSE]
X <- YT2[, (ncol(YT2) - m + 1):ncol(YT2), drop = FALSE]
trainZ <- VARXCons(Y1, X, k1, p, m, s, contemp = contemp)
trainZ <- trainZ[2:nrow(trainZ), , drop = FALSE]
trainY <- YT2[(max(c(p, s)) + 1):nrow(YT2), 1:k1, drop = FALSE]
} else {
YT2 <- YT[-v, , drop = FALSE]
Z1 <- VARXCons(YT2, matrix(0, nrow = nrow(YT2)), k, p, 0, 0)
trainZ <- Z1[2:nrow(Z1), ]
trainY <- YT2[(p + 1):nrow(YT2), , drop = FALSE]
}
}
temp <- .BigVAR.fit(group, betaWS, trainZ, trainY, lambda, tol, p, m, k1, k, s, s1, MN, C, intercept, separate_lambdas,
dual, activeset, starting_eigvals, groups, compgroups, VARXI, alpha, palpha, gamma)
beta <- temp$beta
betaWS <- temp$beta
if (MN) {
for (i in 1:dim(betaWS)[3]) {
submat <- adrop(betaWS[1:dim(betaWS)[1], 1:dim(betaWS)[1], i, drop = F], drop = 3)
diag(submat) <- diag(submat) - C
betaWS[1:dim(betaWS)[1], 1:dim(betaWS)[1], i] <- submat
}
}
activeset <- temp$activeset
q1a <- temp$q1a
eZ <- c(1, ZFull$Z[, v])
msfe_index <- v - (T1 - h)
temp_results <- refine_and_forecast(beta, as.matrix(eZ), trainZ, trainY, ZFull$Y[v + h - 1, , drop = F], lambda = lambda,
h = h, recursive = recursive, MN = MN, RVAR = RVAR, refit_fraction = refit_fraction, separate_lambdas = separate_lambdas,
inds = NULL, loss = loss, delta = delta, k = k, p = p, k1 = k1, s = s, oos = FALSE)
if (separate_lambdas) {
MSFE[msfe_index, , ] <- temp_results$MSFE
} else {
MSFE[msfe_index, ] <- temp_results$MSFE
}
if (verbose) {
setTxtProgressBar(pb, v)
}
}
tapered <- group == "Tapered"
optimal_indices <- find_optimal_lambda(MSFE, lambda, palpha, alpha, ONESE, dual, separate_lambdas, tapered)
optind <- optimal_indices$optind
lambdaopt <- optimal_indices$lambdaopt
alphaopt <- optimal_indices$alphaopt
int_results <- new("BigVAR.intermediate", ZFull = ZFull, InSampMSFE = MSFE, InSampSD = apply(MSFE, 2, sd), index = optind,
OptimalLambda = lambdaopt, dual = dual, contemp = contemp, LambdaGrid = as.matrix(lambda), object, T1 = T2, T2 = T3,
alpha = alphaopt)
OOSEval <- BigVAR.Eval(int_results)
MSFEOOSAgg <- na.omit(OOSEval$MSFE)
betaPred <- OOSEval$betaPred
betaArray <- OOSEval$betaArray
lambda_evolve <- OOSEval$lambda_evolve
preds <- OOSEval$predictions
Y <- object@Data
if (VARXI) {
if (contemp) {
OOS <- FALSE
} else {
OOS <- TRUE
}
if (!object@tf) {
Zvals <- VARXCons(matrix(Y[, 1:k1], ncol = k1), matrix(Y[, (ncol(Y) - m + 1):ncol(Y)], ncol = m), k1, p, m, s,
oos = OOS, contemp = contemp)
} else {
Zvals <- VARXCons(matrix(0, ncol = 1, nrow = nrow(Y)), matrix(Y[, (ncol(Y) - m + 1):ncol(Y)], ncol = m), 0, 0,
m, s, oos = FALSE, contemp = contemp)
}
} else {
m <- 0
s <- 0
k1 <- k
Zvals <- VARXCons(matrix(Y[, 1:k1], ncol = k1), matrix(0, nrow = nrow(Y)), k1, p, m, s, oos = TRUE)
}
Zvals <- matrix(Zvals[, ncol(Zvals)], ncol = 1)
if (ncol(Y) == 1 | k1 == 1) {
betaPred <- matrix(betaPred, nrow = 1)
}
lagmatrix <- rbind(rep(1, ncol(ZFull$Z)), ZFull$Z)
fitted <- t(betaPred %*% lagmatrix)
resids <- ((ZFull$Y) - fitted)
MSFEOOS <- mean(na.omit(MSFEOOSAgg))
seoos <- sd(na.omit(MSFEOOSAgg))/sqrt(length(na.omit(MSFEOOSAgg)))
if (!VARXI) {
k1 <- k
}
meanbench <- .evalMean(ZFull$Y[, 1:k1], T2, T3, h = h, loss = loss, delta = delta)
RWbench <- .evalRW(ZFull$Y[, 1:k1], T2, T3, h = h, loss = loss, delta = delta)
if (object@ic == FALSE | object@tf) {
AICbench <- list()
AICbench$Mean <- as.double(NA)
AICbench$SD <- as.double(NA)
AICbench$preds <- as.matrix(NA)
AICbench$pvec <- as.double(NA)
AICbench$svec <- as.double(NA)
BICbench <- list()
BICbench$Mean <- as.double(NA)
BICbench$SD <- as.double(NA)
BICbench$preds <- as.matrix(NA)
BICbench$pvec <- as.double(NA)
BICbench$svec <- as.double(NA)
} else {
if (!VARXI) {
X <- matrix(0, nrow = nrow(Y), ncol = k)
AICbench1 <- VARXForecastEval(matrix(ZFull$Y, ncol = k), X, p, 0, T2, T3, "AIC", h, loss = loss, delta = delta)
AICbench <- list()
AICbench$Mean <- mean(AICbench1$MSFE)
AICbench$SD <- sd(AICbench1$MSFE)/sqrt(length(AICbench1$MSFE))
AICbench$preds <- AICbench1$pred
AICbench$pvec <- AICbench1$p
AICbench$svec <- AICbench1$s
BICbench1 <- VARXForecastEval(matrix(ZFull$Y, ncol = k), X, p, 0, T2, T3, "BIC", h, loss = loss, delta = delta)
BICbench <- list()
BICbench$Mean <- mean(BICbench1$MSFE)
BICbench$SD <- sd(BICbench1$MSFE)/sqrt(length(BICbench1$MSFE))
BICbench$preds <- BICbench1$pred
BICbench$pvec <- BICbench1$p
BICbench$svec <- BICbench1$s
} else {
offset <- max(c(p, s))
X <- matrix(Y[(offset + 1):nrow(Y), (k1 + 1):ncol(Y)], ncol = m)
AICbench1 <- VARXForecastEval(matrix(ZFull$Y[, 1:k1], ncol = k1), as.matrix(X), p, s, T2, T3, "AIC", h = h, loss = loss,
delta = delta)
AICbench <- list()
AICbench$Mean <- mean(AICbench1$MSFE)
AICbench$SD <- sd(AICbench1$MSFE)/sqrt(length(AICbench1$MSFE))
AICbench$preds <- AICbench1$pred
AICbench$pvec <- AICbench1$p
AICbench$svec <- AICbench1$s
BICbench1 <- VARXForecastEval(matrix(ZFull$Y[, 1:k1], ncol = k1), X, p, s, T2, T3, "BIC", h = h, loss = loss,
delta = delta)
BICbench <- list()
BICbench$Mean <- mean(BICbench1$MSFE)
BICbench$SD <- sd(BICbench1$MSFE)/sqrt(length(BICbench1$MSFE))
BICbench$preds <- BICbench1$pred
BICbench$pvec <- BICbench1$p
BICbench$svec <- BICbench1$s
}
}
if (!VARXI) {
contemp <- FALSE
}
if (separate_lambdas) {
tmean <- t(apply(MSFE, 3, colMeans))
}
if (separate_lambdas) {
isMSFE <- MSFE
} else {
isMSFE <- array(MSFE, dim = c(nrow(MSFE), ncol(MSFE), 1))
}
sparse_count <- function(x) {
x_ss <- x[, 2:ncol(x)]
sc <- length(x_ss[x_ss != 0])/length(x_ss)
sc
}
sc <- mean(apply(betaArray, 3, sparse_count))
results <- new("BigVAR.results", InSampMSFE = isMSFE, InSampSD = apply(MSFE, 2, sd)/sqrt(nrow(MSFE)), LambdaGrid = as.matrix(lambda),
index = optind, OptimalLambda = lambdaopt, OOSMSFE = as.matrix(MSFEOOSAgg), seoosmsfe = seoos, MeanMSFE = meanbench$Mean,
AICMSFE = AICbench$Mean, AICpvec = AICbench$pvec, AICsvec = AICbench$svec, AICPreds = AICbench$preds, BICpvec = BICbench$pvec,
BICsvec = BICbench$svec, BICPreds = BICbench$preds, RWMSFE = RWbench$Mean, RWPreds = RWbench$preds, MeanSD = meanbench$SD,
MeanPreds = meanbench$preds, AICSD = AICbench$SD, BICMSFE = BICbench$Mean, BICSD = BICbench$SD, RWSD = RWbench$SD,
sparse_count = sc, betaPred = betaPred, Zvals = Zvals, resids = resids, VARXI = VARXI, preds = preds, alpha = alphaopt,
fitted = fitted, lagmatrix = lagmatrix, betaArray = betaArray, dual = dual, contemp = contemp, lambda_evolve_path = lambda_evolve,
object)
return(results)
})
#' BigVAR.intermediate
#' This class contains the in-sample results for cv.BigVAR
#'
#' It inherits the class BigVAR, and contains the results from rolling validation
#' @field ZFull List containing full lag matrix and time series
#' @field InSampMSFE In-sample MSFE from optimal value of lambda
#' @field LambdaGrid Grid of candidate lambda values
#' @field index Index order of optimal lambda value
#' @field OptimalLambda Value of lambda that minimizes MSFE
#' @field Data a \eqn{T \times k} or \eqn{T\times k + m} multivariate time Series
#' @field lagmax Maximal lag order
#' @field Structure Penalty structure
#' @field Relaxed Indicator for relaxed VAR
#' @field Granularity Granularity of penalty grid
#' @field horizon Desired forecast horizon
#' @field crossval Cross-Validation procedure
#' @field alpha additional penalty parameter for Sparse Lag Group or Sparse Own/Other methods. Will contain either the heuristic choice of \eqn{1/(k+1)} or the value selected by cross validation if the argument \code{dual} is set to \code{TRUE}
#' @field Minnesota Minnesota Prior Indicator
#' @field verbose verbose indicator
#' @field dual indicator as to whether dual cross validation was conducted
#' @field contemp indicator if contemporaneous exogenous predictors are used
#'
#' @note One can also access any object of class BigVAR from BigVAR.intermediate
#' @name BigVAR.intermediate
#' @rdname BigVAR.intermediate
#' @aliases BigVAR.intermediate-class
#' @exportClass BigVAR.intermediate
#' @author Will Nicholson
#' @export
setClass("BigVAR.intermediate", representation(ZFull = "list", InSampMSFE = "array", InSampSD = "numeric", LambdaGrid = "matrix",
index = "numeric", OptimalLambda = "numeric", dual = "logical", contemp = "logical"), contains = "BigVAR")
setGeneric(name = "BigVAR.Eval", def = function(object) {
standardGeneric("BigVAR.Eval")
})
setGeneric(name = "BigVAR.Eval", def = function(object) {
standardGeneric("BigVAR.Eval")
})
setMethod(f = "BigVAR.Eval", signature = "BigVAR.intermediate", definition = function(object) {
VARXI <- object@VARXI
VARX <- object@VARX
p <- object@lagmax
gamma <- object@gamma
tol <- object@tol
T2 <- object@T1
T3 <- object@T2
MN <- object@Minnesota
h <- object@horizon
loss <- object@loss
delta <- object@delta
rolling_oos <- object@rolling_oos
if (rolling_oos) {
lambda <- object@LambdaGrid
optind <- object@index
} else {
lambda <- object@OptimalLambda
optind <- 1
}
gran2 <- nrow(as.matrix(lambda))
lambdaopt <- object@OptimalLambda
delta <- object@delta
verbose <- object@verbose
window.size <- [email protected]
recursive <- object@recursive
group <- object@Structure
relaxed <- object@Relaxed
alpha <- object@alpha
ZFull <- object@ZFull
RVAR <- object@Relaxed
refit_fraction <- object@refit_fraction
intercept <- object@intercept
Y <- ZFull$Y
k <- ncol(object@Data)
n_oos_obs <- length((T2 + 1):T3)
separate_lambdas <- object@separate_lambdas
if (rolling_oos) {
MSFE <- object@InSampMSFE
if (separate_lambdas) {
MSFE_oos <- matrix(NA, nrow = n_oos_obs, ncol = ncol(Y))
lambda_evolve <- matrix(NA, nrow = n_oos_obs, ncol = ncol(Y))
} else {
MSFE_oos <- matrix(NA, nrow = n_oos_obs, ncol = 1)
lambda_evolve <- matrix(NA, nrow = n_oos_obs, ncol = 1)
}
} else {
MSFE_oos <- rep(NA, n_oos_obs)
lambda_evolve <- matrix(lambda, nrow = n_oos_obs, ncol = ncol(object@LambdaGrid))
gran2 <- 1
}
ONESE <- object@ONESE
if (VARXI) {
k1 <- object@VARX$k
s <- object@VARX$s
contemp <- object@VARX$contemp
s1 <- object@VARX$contemp
m <- k - k1
beta <- array(0, dim = c(k1, k1 * p + (k - k1) * (s + s1) + 1, gran2 * length(alpha)))
} else {
k1 <- 0
s <- 0
contemp <- FALSE
s1 <- 0
m <- 0
beta <- array(0, dim = c(k, k * p + 1, gran2 * length(alpha)))
}
C <- object@constvec
s1 <- 0
if (exists("contemp", where = VARX)) {
if (VARX$contemp) {
contemp <- TRUE
s1 <- 1
} else {
contemp <- FALSE
s1 <- 0
}
} else {
contemp <- FALSE
s1 <- 0
}
if (group == "Tapered") {
palpha <- object@alpha
gran2 <- length(lambda) * length(palpha)
beta <- array(0, dim = c(k, k * p + 1, gran2))
tapered <- TRUE
} else {
palpha <- NULL
tapered <- FALSE
}
preds <- matrix(NA, nrow = n_oos_obs, ncol = ncol(Y))
if (verbose) {
print("Evaluation Stage")
pb <- txtProgressBar(min = T2 - h + 2, max = T3 - h, style = 3)
}
betaArray <- array(0, dim = c(dim(beta)[1], dim(beta)[2], n_oos_obs))
grps <- create_group_indexes(group, p, k, gran2 * length(alpha), VARXI, k1, s + s1)
groups <- grps$groups
compgroups <- grps$compgroups
activeset <- grps$activeset
starting_eigvals <- grps$starting_eigvals
betaWS <- beta
for (v in (T2 - h + 2):T3) {
## if (h > 1 & !recursive) {
## if (window.size != 0) {
## ws1 <- max(c(v - window.size - h, 1))
## trainY <- ZFull$Y[(ws1 + h):(v - 1), , drop = F]
## trainZ <- ZFull$Z[, (ws1 + h):(v - h),drop=F]
## } else {
## trainY <- ZFull$Y[(h):(v - 1), , drop = F]
## trainZ <- ZFull$Z[, 1:(v - h),drop=F]
## }
## } else {
## if (window.size != 0) {
## ws1 <- max(c(v - window.size, 1))
## trainY <- ZFull$Y[(ws1):(v - 1), , drop = F]
## trainZ <- ZFull$Z[, (ws1):(v - 1),drop=F]
## } else {
## trainY <- ZFull$Y[(1):(v - 1), , drop = F]
## trainZ <- ZFull$Z[, (1):(v - 1),drop=F]
## }
## }
if (h > 1 & !recursive) {
if (window.size != 0) {
ws1 <- max(c(v - window.size - h, 1))
index_y <- (ws1 + h-1):(v - 1)
index_z <- (ws1 ):(v - h)
trainY <- ZFull$Y[index_y, , drop = FALSE]
trainZ <- ZFull$Z[, index_z,drop=FALSE]
} else {
index_y <- (h):(v - 1)
index_z <- 1:(v - h)
trainY <- ZFull$Y[index_y, , drop = FALSE]
trainZ <- ZFull$Z[, index_z, drop = F]
}
} else {
if (window.size != 0) {
ws1 <- max(c(v - window.size, 1))
trainY <- ZFull$Y[(ws1):(v - 1), , drop = FALSE]
trainZ <- ZFull$Z[, (ws1):(v - 1), drop = FALSE]
} else {
trainY <- ZFull$Y[(1):(v - 1), , drop = FALSE]
trainZ <- ZFull$Z[, (1):(v - 1), drop = FALSE]
}
}
if (v + h - 1 > T3) {
break
}
dual <- FALSE
temp <- .BigVAR.fit(group, betaWS, trainZ, trainY, lambda, tol, p, m, k1, k, s, s1, MN, C, intercept, separate_lambdas,
dual, activeset, starting_eigvals, groups, compgroups, VARXI, alpha, palpha, gamma)
eZ <- c(1, ZFull$Z[, v])
beta <- temp$beta
betaWS <- temp$beta
if (MN) {
for (i in 1:dim(betaWS)[3]) {
submat <- adrop(betaWS[1:dim(betaWS)[1], 1:dim(betaWS)[1], i, drop = F], drop = 3)
diag(submat) <- diag(submat) - C
betaWS[1:dim(betaWS)[1], 1:dim(betaWS)[1], i] <- submat
}
}
activeset <- temp$activeset
msfe_index <- v - (T2 - h) - 1
temp_results <- refine_and_forecast(beta, as.matrix(eZ), trainZ, trainY, ZFull$Y[v + h - 1, , drop = FALSE], lambda = lambda,
h = h, recursive = recursive, MN = MN, RVAR = RVAR, refit_fraction = refit_fraction, separate_lambdas = separate_lambdas,
C = C, inds = NULL, loss = loss, delta = delta, k = k, p = p, k1 = k1, s = s, oos = !rolling_oos)
if (!rolling_oos) {
if (separate_lambdas) {
MSFE_oos[msfe_index] <- temp_results$MSFE[optind]
for (col in seq_len(ncol(temp_results$MSFE))) {
preds[msfe_index, col] <- temp_results$preds[col, optind[col]]
betaArray[col, , msfe_index] <- temp$beta[col, , optind[col]]
}
} else {
MSFE_oos[msfe_index] <- temp_results$MSFE
preds[msfe_index, ] <- temp_results$preds[, optind]
betaArray[, , msfe_index] <- temp_results$betaArray
}
} else {
if (!separate_lambdas) {
MSFE_temp <- temp_results$MSFE[optind]
MSFE_oos[msfe_index, ] <- MSFE_temp
MSFE <- rbind(MSFE, temp_results$MSFE)
preds[msfe_index, ] <- temp_results$preds[, optind]
betaArray[, , msfe_index] <- temp_results$betaArray
} else {
opts <- c()
for (col in seq_len(ncol(temp_results$MSFE))) {
opts[col] <- temp_results$MSFE[optind[col], col]
betaArray[col, , msfe_index] <- temp$beta[col, , optind[col]]
preds[msfe_index, col] <- temp_results$preds[col, optind[col]]
}
MSFE_oos[msfe_index, ] <- opts
MSFE <- abind(MSFE, temp_results$MSFE, along = 1)
}
opt_lambdas <- find_optimal_lambda(MSFE, lambda, palpha, alpha, ONESE, dual, separate_lambdas, tapered)
optind <- opt_lambdas$optind
lambdaopt <- opt_lambdas$lambdaopt
lambda_evolve[msfe_index, ] <- lambdaopt
}
}
temp <- .BigVAR.fit(group, beta, ZFull$Z, ZFull$Y, lambda, tol, p, m, k1, k, s, s1, MN, C, intercept, separate_lambdas,
dual, activeset, starting_eigvals, groups, compgroups, VARXI, alpha, palpha)
betas_full <- temp$beta
if (separate_lambdas) {
betaPred <- matrix(0, nrow = ncol(Y), ncol = dim(betas_full)[2])
for (col in seq_len(ncol(Y))) betaPred[col, ] <- betas_full[col, , optind[col]]
} else {
betaPred <- as.matrix(betas_full[, , optind])
}
return(list(MSFE = MSFE_oos, betaPred = betaPred, predictions = preds, betaArray = betaArray, lambda_evolve = lambda_evolve))
})
#' BigVAR Estimation
#' @description
#' Fit a BigVAR object with a structured penalty (VARX-L or HLAG).
#' @usage BigVAR.est(object)
#' @param object BigVAR object created from \code{ConstructModel}
#' @details Fits HLAG or VARX-L model on a BigVAR object. Does not perform cross-validation. This method allows the user to construct their own penalty parameter selection procedure.
#' @return An array of \eqn{k \times kp \times n} or \eqn{k\times kp+ms \times n} coefficient matrices; one for each of the n values of lambda.
#' @seealso \code{\link{constructModel}}, \code{\link{BigVAR.results}},\code{\link{cv.BigVAR}}
#' @name BigVAR.est
#' @aliases BigVAR.est,BigVAR-method
#' @docType methods
#' @rdname BigVAR.est-methods
#' @examples
#' data(Y)
#' Y=Y[1:100,]
#' #construct a Basic VAR-L
#' Model1=constructModel(Y,p=4,struct='Basic',gran=c(50,10))
#' BigVAR.est(Model1)
#' @export
setGeneric(name = "BigVAR.est", def = function(object) {
standardGeneric("BigVAR.est")
})
setMethod(f = "BigVAR.est", signature = "BigVAR", definition = function(object) {
p <- object@lagmax
s1 <- 0
Y <- object@Data
k <- ncol(Y)
alpha <- object@alpha
gamma <- object@gamma
RVAR <- object@Relaxed
refit_fraction <- object@refit_fraction
group <- object@Structure
intercept <- object@intercept
VARX <- object@VARX
tol <- object@tol
loss <- object@loss
linear <- object@linear
delta <- object@delta
if (length(alpha) == 0) {
if (length(VARX) > 0) {
alpha <- 1/(VARX$k + 1)
} else {
alpha <- 1/(k + 1)
}
}
C <- object@constvec
if (length(alpha) > 1 & group %in% c("SparseLag", "SparseOO", "BasicEN")) {
dual <- TRUE
} else {
dual <- FALSE
}
MN <- object@Minnesota
h <- object@horizon
separate_lambdas <- object@separate_lambdas
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
s <- ifelse(length(object@VARX) != 0, object@VARX$s, 0)
if (object@ownlambdas) {
lambda <- object@Granularity
gran2 <- length(lambda)
if (gran2 == 1) {
ONESE <- FALSE
}
} else {
gran2 <- object@Granularity[2]
gran1 <- object@Granularity[1]
}
trainZ <- object@model_data$trainZ
trainY <- object@model_data$trainY
VARXI <- object@VARXI
if (VARXI) {
k1 <- object@VARX$k
s <- object@VARX$s
contemp <- object@VARX$contemp
s1 <- object@VARX$contemp
m <- ncol(Y) - ncol(trainY)
beta <- array(0, dim = c(k1, k1 * p + (k - k1) * (s + s1) + 1, gran2 * length(alpha)))
} else {
k1 <- 0
s <- 0
contemp <- FALSE
s1 <- 0
m <- 0
beta <- array(0, dim = c(k, k * p + 1, gran2 * length(alpha)))
}
grps <- create_group_indexes(group, p, k, gran2 * length(alpha), VARXI, k1, s + s1)
groups <- grps$groups
compgroups <- grps$compgroups
activeset <- grps$activeset
starting_eigvals <- grps$starting_eigvals
if (object@ownlambdas) {
lambda <- object@Granularity
} else {
gran2 <- object@Granularity[2]
gran1 <- object@Granularity[1]
lambda <- create_lambda_grid(trainY, trainZ, lapply(groups, function(x) {
x + 1
}), gran1, gran2, group, p, k1, s + s1, m, k, MN, alpha, C, intercept, tol, VARXI, separate_lambdas, dual, gamma,
linear, verbose = FALSE)
}
h <- object@horizon
ZFull <- list()
if (!is.matrix(trainZ)) {
trainZ <- matrix(trainZ, ncol = 1)
}
if (!is.matrix(trainY)) {
trainY <- matrix(trainY, ncol = 1)
}
ZFull$Z <- trainZ
ZFull$Y <- trainY
T <- nrow(trainY)
if (group == "Tapered") {
palpha <- seq(0, 1, length = 10)
palpha <- rev(palpha)
gran2 <- length(lambda) * length(palpha)
beta <- array(0, dim = c(k, k * p + 1, gran2))
} else {
palpha <- NULL
}
## temp <- .BigVAR.fit(group, beta, trainZ, trainY, lambda, tol, p, m, k1, k, s, s1, MN, C, intercept,
## separate_lambdas, dual, activeset, q1a, jj, jjcomp, VARX, alpha, kk, palpha)
temp <- .BigVAR.fit(group, beta, trainZ, trainY, lambda, tol, p, m, k1, k, s, s1, MN, C, intercept, separate_lambdas,
dual, activeset, starting_eigvals, groups, compgroups, VARXI, alpha, palpha, gamma)
beta <- temp$beta
if (RVAR) {
beta_rls <- RelaxedLS(cbind(t(trainZ), trainY), beta)
beta <- (1 - refit_fraction) * beta + refit_fraction * beta_rls
}
activeset <- temp$activeset
q1a <- temp$q1a
return(list(B = beta, lambdas = lambda))
})
## bigVAR results, inherits class bigVAR, prints results from cv.bigVAR
#' BigVAR.results
#' This class contains the results from cv.BigVAR.
#'
#' It inherits the class BigVAR, but contains substantially more information.
#'
#' @field InSampMSFE In-sample MSFE from optimal value of lambda
#' @field LambdaGrid Grid of candidate lambda values
#' @field index Rank of optimal lambda value
#' @field OptimalLambda Value of lambda that minimizes MSFE
#' @field OOSMSFE Average Out of sample MSFE of BigVAR model with optimal lambda
#' @field seoosfmsfe Standard error of out of sample MSFE of BigVAR model with optimal lambda
#' @field MeanMSFE Average out of sample MSFE of unconditional mean forecast
#' @field MeanSD Standard error of out of sample MSFE of unconditional mean forecast
#' @field MeanPreds predictions from conditional mean model
#' @field RWMSFE Average out of sample MSFE of random walk forecast
#' @field RWPreds Predictions from random walk model
#' @field RWSD Standard error of out of sample MSFE of random walk forecast
#' @field AICMSFE Average out of sample MSFE of AIC forecast
#' @field AICSD Standard error of out of sample MSFE of AIC forecast
#' @field AICPreds Predictions from AIC VAR/VARX model
#' @field AICpvec Lag orders selected from AIC VAR model
#' @field AICpvec Lag orders selected from AIC VARX model
#' @field BICMSFE Average out of sample MSFE of BIC forecast
#' @field BICSD Standard error of out of sample MSFE of BIC forecast
#' @field BICPreds Predictions from BIC VAR/VARX model
#' @field BICpvec Lag orders selected from BIC VAR model
#' @field BICpvec Lag orders selected from BIC VARX model
#' @field betaPred The final estimated \eqn{k\times kp+ms+1} coefficient matrix, to be used for prediction
#' @field Zvals The final lagged values of \code{Y}, to be used for prediction
#' @field fitted fitted values obtained from betaPred
#' @field resids residuals obtained from betaPred
#' @field Data a \eqn{T \times k} or \eqn{T\times k + m} multivariate time Series
#' @field lagmax Maximal lag order
#' @field Structure Penalty structure
#' @field Relaxed Indicator for relaxed VAR
#' @field Granularity Granularity of penalty grid
#' @field horizon Desired forecast horizon
#' @field crossval Cross-Validation procedure
#' @field alpha additional penalty parameter for Sparse Lag Group or Sparse Own/Other methods. Will contain either the heuristic choice of \eqn{1/(k+1)} or the value selected by cross validation if the argument \code{dual} is set to \code{TRUE}
#' @field VARXI VARX Indicator
#' @field Minnesota Minnesota Prior Indicator
#' @field verbose verbose indicator
#' @field dual indicator as to whether dual cross validation was conducted
#' @field contemp indicator if contemporaneous exogenous predictors are used
#' @field lagmatrix matrix of lagged values used to compute residuals (of which Zvals is the final column)
#' @field betaArray array of VAR/VARX coefficients from out of sample forecasts
#' @field sparse_count average fraction of active coefficients in validation period
#' @field lambda_evolve_path evolution of lambda over evaluation period
#'
#' @note One can also access any object of class BigVAR from BigVAR.results
#' @name BigVAR.results
#' @rdname BigVAR.results
#' @aliases BigVAR.results-class
#' @exportClass BigVAR.results
#' @author Will Nicholson
#' @export
setClass("BigVAR.results", representation(InSampMSFE = "array", InSampSD = "numeric", LambdaGrid = "matrix", index = "numeric",
OptimalLambda = "numeric", OOSMSFE = "matrix", seoosmsfe = "numeric", MeanMSFE = "numeric", AICMSFE = "numeric", AICPreds = "matrix",
BICMSFE = "numeric", BICpvec = "numeric", BICsvec = "numeric", AICpvec = "numeric", AICsvec = "numeric", BICSD = "numeric",
BICPreds = "matrix", RWMSFE = "numeric", RWPreds = "matrix", MeanSD = "numeric", MeanPreds = "matrix", AICSD = "numeric",
RWSD = "numeric", betaPred = "matrix", Zvals = "matrix", VARXI = "logical", resids = "matrix", preds = "matrix", dual = "logical",
contemp = "logical", fitted = "matrix", lagmatrix = "matrix", betaArray = "array", sparse_count = "numeric", lambda_evolve_path = "matrix"),
contains = "BigVAR")
#' Plot an object of class BigVAR.results
#'
#' @param x BigVAR.results object created from \code{cv.BigVAR}
#' @param y NULL
#' @param ... additional arguments
#' @details Plots the in sample MSFE of all values of lambda with the optimal value highlighted.
#' @name plot
#' @import methods
#' @aliases plot,BigVAR.results-method
#' @aliases plot-methods
#' @docType methods
#' @method plot method
#' @rdname BigVAR.results-plot-methods
#' @importFrom graphics abline
#' @importFrom graphics par
#' @export
setMethod(f = "plot", signature = "BigVAR.results", definition = function(x, y = NULL, ...) {
if (!x@separate_lambdas) {
plot(x@LambdaGrid, colMeans(x@InSampMSFE[, , 1]), type = "o", xlab = "Value of Lambda", ylab = "MSFE", log = "x")
abline(v = x@OptimalLambda, col = "green")
} else {
k <- ncol(x@Data)
par(mfrow = c(k, 1))
for (i in 1:k) {
plot(x@LambdaGrid[, i], colMeans(x@InSampMSFE[, , i]), type = "o", xlab = "Value of Lambda", ylab = "MSFE", log = "x")
abline(v = x@OptimalLambda[i], col = "green")
}
}
})
#' Default show method for an object of class BigVAR.results
#'
#' @param object BigVAR.results object created from \code{cv.BigVAR}
#' @details prints forecast results and additional diagnostic information as well as comparisons with mean, random walk, and AIC, and BIC benchmarks
#' @seealso \code{\link{cv.BigVAR}},\code{\link{BigVAR.results}}
#' @name show
#' @aliases show,BigVAR.results-method
#' @docType methods
#' @method show method
#' @rdname show-methods-BigVAR.results
#' @export
setMethod("show", "BigVAR.results", function(object) {
cat("*** BIGVAR MODEL Results *** \n")
cat("Structure\n")
print(object@Structure)
if (object@Relaxed == TRUE) {
cat("Relaxed VAR \n")
print(object@Relaxed)
}
cat("Loss \n")
print(object@loss)
cat("Forecast Horizon \n")
print(object@horizon)
cat("Minnesota VAR\n")
print(object@Minnesota)
if (object@VARXI) {
cat("VARX Specs \n")
print(object@VARX)
}
cat("Maximum Lag Order \n")
print(object@lagmax)
cat("Optimal Lambda \n")
print(signif(object@OptimalLambda, digits = 4))
if (object@dual) {
cat("Optimal Alpha \n")
print(signif(object@alpha, digits = 2))
}
cat("Grid Depth \n")
print(object@Granularity[1])
cat("Index of Optimal Lambda \n")
print(object@index)
cat("Fraction of active coefficients \n")
print(signif(object@sparse_count, digits = 4))
if (!object@separate_lambdas) {
cat("In-Sample Loss\n")
print(signif(mean(object@InSampMSFE[, object@index, ]), digits = 3))
} else {
cat("In-Sample Loss\n")
print(signif(apply(object@InSampMSFE[, object@index, ], 2, mean), digits = 3))
}
cat("BigVAR Out of Sample Loss\n")
print(signif(mean(object@OOSMSFE), digits = 3))
cat("*** Benchmark Results *** \n")
cat("Conditional Mean Out of Sample Loss\n")
print(signif(object@MeanMSFE, digits = 3))
cat("AIC Out of Sample Loss\n")
print(signif(object@AICMSFE, digits = 3))
cat("BIC Out of Sample Loss\n")
print(signif(object@BICMSFE, digits = 3))
cat("RW Out of Sample Loss\n")
print(signif(object@RWMSFE, digits = 3))
})
#' Forecast using a BigVAR.results object
#'
#' @usage predict(object,...)
#' @param object BigVAR.results object from \code{cv.BigVAR}
#' @param ... additional arguments affecting the predictions produced (e.g. \code{n.ahead}, \code{confint})
#' @details Provides \code{n.ahead} step forecasts using the model produced by cv.BigVAR. If \code{confint} is set to \code{TRUE}, a 95 percent confidence interval will also be returned.
#' @seealso \code{\link{cv.BigVAR}}
#' @name predict
#' @aliases predict,BigVAR.results-method
#' @docType methods
#' @method predict method
#' @rdname predict-methods-BigVAR.results
#' @examples
#' data(Y)
#' Y=Y[1:100,]
#' Model1=constructModel(Y,p=4,struct='Basic',gran=c(50,10),verbose=FALSE)
#' results=cv.BigVAR(Model1)
#' predict(results,n.ahead=1)
#' @export
setMethod("predict", "BigVAR.results", function(object, n.ahead = 1, newxreg = NULL, predict_all = FALSE, confint = FALSE,
...) {
MN <- object@Minnesota
eZ <- object@Zvals
betaPred <- object@betaPred
Y <- object@Data
k <- object@VARX$k
m <- ncol(object@Data) - k
p <- object@lagmax
s <- object@VARX$s
VARX <- object@VARXI
contemp <- object@contemp
s1 <- 0
if (confint) {
YZ <- object@model_data
ci = create_sigma_u(YZ$trainY, YZ$trainZ, betaPred, n.ahead)
}
fcst <- matrix(betaPred %*% eZ, ncol = 1)
fcst_full <- fcst
if (n.ahead == 1) {
if (confint) {
lower <- fcst + ci[, 1]
upper <- fcst + ci[, 2]
fcst <- as.data.frame(cbind(fcst, lower, upper))
names(fcst) <- c("forecast", "lower", "upper")
}
return(fcst)
} else {
if (!VARX) {
fcst <- predictMS(matrix(fcst, nrow = 1), Y[(nrow(Y) - p + 1):nrow(Y), ], n.ahead - 1, betaPred, p, MN, predict_all = predict_all)
if (predict_all) {
row.names(fcst) <- paste0("T+", 1:n.ahead)
} else {
fcst <- t(fcst)
}
} else {
if (is.null(newxreg)) {
stop("Need new data for multi-step VARX forecasts. Re-run with new data in newxreg")
} else {
if (nrow(newxreg) < n.ahead - 1) {
stop(paste("Need at least ", n.ahead - 1, "rows of new data"))
}
C <- max(p, s)
if (contemp) {
C <- C + 3
}
fcst <- matrix(predictMSX(matrix(fcst, nrow = 1), as.matrix(Y[(nrow(Y) - C + 1):nrow(Y), 1:(k)]), n.ahead -
1, betaPred, p, newxreg, matrix(Y[(nrow(Y) - C + 1):nrow(Y), (ncol(Y) - m + 1):ncol(Y)], ncol = m), m,
s, 1, MN, contemp), ncol = 1)
}
}
}
if (confint) {
lower <- fcst + ci[, 1]
upper <- fcst + ci[, 2]
fcst <- as.data.frame(cbind(fcst, lower, upper))
names(fcst) <- c("forecast", "lower", "upper")
}
return(fcst)
})
#' Sparsity Plot of a BigVAR.results object
#'
#' @param object BigVAR.results object
#' @return NA, side effect is graph
#' @details Uses \code{levelplot} from the \code{lattice} package to plot the magnitude of each coefficient in the last coefficient estimated by \code{cv.BigVAR}.
#' @name SparsityPlot.BigVAR.results
#' @aliases SparsityPlot.BigVAR.results,BigVAR.results-method
#' @seealso \code{\link{cv.BigVAR}}, \code{\link{BigVAR.results}}
#' @docType methods
#' @rdname SparsityPlot.BigVAR.results-methods
#' @examples
#' data(Y)
#' Y <- Y[1:100,]
#' Model1 <- constructModel(Y,p=4,struct='Basic',gran=c(50,10),verbose=FALSE)
#' SparsityPlot.BigVAR.results(cv.BigVAR(Model1))
#' @export
#' @importFrom lattice levelplot
#' @importFrom lattice panel.abline
#' @importFrom lattice panel.levelplot
#' @importFrom grDevices colorRampPalette
setGeneric(name = "SparsityPlot.BigVAR.results", def = function(object) {
standardGeneric("SparsityPlot.BigVAR.results")
})
setMethod(f = "SparsityPlot.BigVAR.results", signature = "BigVAR.results", definition = function(object) {
B <- object@betaPred
if (nrow(B) == 1) {
B <- matrix(B[, 2:ncol(B)], nrow = 1)
} else {
B <- B[, 2:ncol(B)]
}
k <- nrow(B)
p <- object@lagmax
s1 <- 0
if (length(object@VARX != 0)) {
s <- object@VARX$s
m <- ncol(object@Data) - object@VARX$k
contemp <- object@VARX$contemp
if (!is.null(contemp)) {
if (contemp) {
s1 <- 1
}
} else {
s1 <- 0
}
} else {
m <- 0
s <- 0
}
s <- s + s1
text <- c()
for (i in 1:p) {
text1 <- as.expression(bquote(bold(Phi)^(.(i))))
text <- append(text, text1)
}
if (m > 0) {
for (i in (p + 1):(p + s + 1)) {
text1 <- as.expression(bquote(bold(beta)^(.(i - p - s1))))
text <- append(text, text1)
}
}
f <- function(m) t(m)[, nrow(m):1]
rgb.palette <- colorRampPalette(c("white", "blue"), space = "Lab")
at <- seq(k/2 + 0.5, p * (k) + 0.5, by = k)
if (m > 0) {
at2 <- seq(p * k + m/2 + 0.5, p * k + s * m + 0.5, by = m)
} else {
at2 <- c()
}
at <- c(at, at2)
se2 <- seq(1.75, by = k, length = k)
L2 <- levelplot(as.matrix(f(abs(B))), col.regions = rgb.palette, colorkey = NULL, xlab = NULL, ylab = NULL, main = list(label = "Sparsity Pattern Generated by BigVAR",
cex = 1), panel = function(...) {
panel.levelplot(...)
panel.abline(a = NULL, b = 1, h = seq(1.5, m * s + p * k + 0.5, by = 1), v = seq(1.5, by = 1, length = p * k + m *
s), lwd = 0.5)
bl1 <- seq(k + 0.5, p * k + 0.5, by = k)
b23 <- seq(p * k + 0.5, p * k + 0.5 + s * m, by = m)
b1 <- c(bl1, b23)
panel.abline(a = NULL, b = 1, v = p * k + 0.5, lwd = 3)
panel.abline(a = NULL, b = 1, v = b1, lwd = 2)
}, scales = list(x = list(alternating = 1, labels = text, cex = 1, at = at, tck = c(0, 0)), y = list(alternating = 0,
tck = c(0, 0))))
return(L2)
})
# show-default method to show an object when its name is printed in the console.
#' Default coef method BigVAR-results, returns the last coefficient matrix from the evaluation period
#'
#' @param object BigVAR.results object created from \code{cv.BigVAR}
#' @details displays formatted coefficient matrix
#' @name coef
#' @import methods
#' @aliases coef,BigVAR.results-method
#' @aliases coef-methods
#' @docType methods
#' @method coef method
#' @rdname BigVAR.results-coef-methods
#' @export
setMethod(f = "coef", signature = "BigVAR.results", definition = function(object) {
B <- data.frame(object@betaPred)
k <- nrow(B)
p <- object@lagmax
Y <- object@Data
intercept <- object@intercept
if (!intercept) {
B <- B[, 2:ncol(B)]
}
row.names(B) <- paste0("Y", 1:k)
if (!object@VARXI) {
bnames <- c(outer(X = paste0("Y", 1:k), Y = paste0("L", 1:p), paste0))
if (intercept) {
bnames <- c("intercept", bnames)
}
names(B) <- bnames
} else {
if (p > 0) {
bnames <- c(outer(X = paste0("Y", 1:k), Y = paste0("L", 1:p), paste0))
} else {
bnames <- NULL
}
m <- ncol(Y) - k
s <- object@VARX$s
if (!is.null(object@VARX$contemp)) {
bnamesX <- c(outer(X = paste0("X", 1:m), Y = paste0("L", 1:s), paste0))
} else {
bnamesX <- c(outer(X = paste0("X", 1:m), Y = paste0("L", 0:s), paste0))
}
bnames <- c(bnames, bnamesX)
if (intercept) {
bnames <- c("intercept", bnames)
}
names(B) <- bnames
}
return(B)
})
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/R/BigVARObjectClass.R
|
### Support functions for BigVAR Package These are mostly utility functions that will not be seen by the user
#' Construct a VAR or VARX lag matrix
#'
#' @param Y a \eqn{T \times k} matrix of endogenous (modeled) series
#' @param X a \eqn{T \times m} matrix of exogenous (unmodeled) series (default NULL)
#' @param p Endogenous Lag order
#' @param s exogenous lag order (default zero)
#' @param oos indicator as to whether the data should be constructed for out of sample prediction (i.e. last available entries of Y as final lags default FALSE)
#' @param contemp indicator as to whether to use contemporaneous exogenous predictors (for example, if exogenous series become available before exogenous default FALSE).
#' @return list with two entries:
#' \itemize{
#' \item{'Z'}{\eqn{kp+ms+1\times T-max(p,s)} VARX lag matrix}
#' \item{'Y'}{adjusted \eqn{k\times T-max(p,s)} endogenous series}
#' }
#' @details This function is not required unless you which to design your own cross validation routine.
#' @references
#' See page 15 of Lutkepohl, 'A New Introduction to Multiple Time Series Analysis
#' @seealso \code{\link{MultVarSim}}
#' @examples
#' data(Y)
#' # construct VAR lag matrix with p=4
#' ZZ<-VARXLagCons(Y,X=NULL,p=4,s=0)
#' @export
VARXLagCons <- function(Y, X = NULL, p, s = 0, oos = FALSE, contemp = FALSE) {
if (is.null(X)) {
X <- matrix(0, nrow = nrow(Y))
}
if (nrow(Y) != nrow(X)) {
stop("Y and X must have same dimensions")
}
if (s == 0 & !contemp) {
m <- 0
} else {
m <- ncol(X)
}
if (p < 0 | m < 0) {
stop("lag orders must be positive")
}
k <- ifelse(is.null(Y), 0, ncol(Y))
XX <- VARXCons(Y, X, k, p, m, s, oos, contemp)
Y <- t(Y[(max(c(p, s)) + 1):nrow(Y), ])
return(list(Z = XX, Y = Y))
}
# VARX Construction for constructModel
VARXConsModel <- function(Y, p, VARX, tf) {
k <- ncol(Y)
if (length(VARX) != 0) {
VARXI <- TRUE
k1 <- VARX$k
s <- VARX$s
if(exists('contemp',where=VARX)){
if(!is.logical(VARX$contemp)){
stop("contemp must be logical")
}
if(VARX$contemp){
contemp <- TRUE
s1 <- 1
}else{
contemp <- FALSE
s1 <- 0
}
}else{
contemp <- FALSE
s1 <- 0
}
m <- k - k1
Y1 <- matrix(Y[, 1:k1, drop = F], ncol = k1)
X <- matrix(Y[, (ncol(Y) - m + 1):ncol(Y), drop = F], ncol = m)
if (!tf) {
trainZ <- VARXCons(Y1, X, k1, p, m, s, contemp = contemp)
} else {
trainZ <- VARXCons(matrix(0, ncol = 1, nrow = nrow(X)), matrix(X, ncol = m), k = 0, p = 0, m = m, s = s, contemp = contemp,
oos = FALSE)
}
trainZ <- trainZ[2:nrow(trainZ), , drop = F]
trainY <- matrix(Y[(max(c(p, s)) + 1):nrow(Y), 1:k1, drop = F], ncol = k1)
} else {
# VAR setting
VARXI <- FALSE
contemp <- FALSE
s1 <- 0
Z1 <- VARXCons(Y, matrix(0, nrow = nrow(Y)), k, p, 0, 0)
trainZ <- Z1[2:nrow(Z1), , drop = FALSE]
trainY <- matrix(Y[(p + 1):nrow(Y), , drop = FALSE], ncol = k)
}
return(list(trainY = trainY, trainZ = trainZ, s1 = s1, contemp = contemp, VARXI = VARXI))
}
.check_is_matrix <- function(x) {
!is.null(attr(x, "dim"))
}
.huber_loss <- function(r, delta) {
l <- ifelse(abs(r) < delta, 1/2 * abs(r)^2, delta * (abs(r) - 1/2 * delta))
return(l)
}
.calc.loss <- function(x, univ = FALSE, loss, delta) {
if (loss == "L1") {
l <- sum(abs(x))
} else if (loss == "L2") {
if (univ) {
l <- x^2
} else {
l <- norm2(x)^2
}
} else if (loss == "Huber") {
l <- .huber_loss(x, delta)
}
return(sum(l))
}
# mean benchmark
.evalMean <- function(Y, T1, T2, h = 1, loss = "L2", delta = 2.5) {
ypredF <- NULL
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
MSFE <- c()
k <- ncol(Y)
for (u in (T1 - h + 2):T2) {
if (h + u - 1 > T2) {
break
}
trainY1 <- Y[1:(u - 1), ]
if (k > 1) {
ypred <- colMeans(trainY1)
ypredF <- rbind(ypredF, ypred)
} else {
ypred <- mean(trainY1)
ypredF <- c(ypredF, ypred)
}
uhat <- matrix(Y[u + h - 1, ] - ypred, ncol = k)
MSFE <- c(MSFE, .calc.loss(uhat, univ = FALSE, loss, delta))
}
ypredF <- unname(ypredF)
return(list(Mean = mean(na.omit(MSFE)), SD = sd(na.omit(MSFE))/sqrt(length(na.omit(MSFE))), preds = as.matrix(ypredF)))
}
# random walk benchmark
.evalRW <- function(Y, T1, T2, h = 1, loss = "L2", delta = 2.5) {
if (!is.matrix(Y)) {
Y <- matrix(Y, ncol = 1)
}
ypredF <- NULL
MSFE <- c()
k <- ncol(Y)
for (u in (T1 - h + 2):T2) {
if (h + u - 1 > T2) {
break
}
trainY1 <- Y[u - 1, ]
ypredF <- rbind(ypredF, trainY1)
uhat <- matrix(Y[u + h - 1, ] - trainY1, ncol = k)
MSFE <- c(MSFE, .calc.loss(uhat, univ = FALSE, loss, delta))
}
ypredF <- unname(ypredF)
return(list(Mean = mean(na.omit(MSFE)), SD = sd(na.omit(MSFE))/sqrt(length(na.omit(MSFE))), preds = as.matrix(ypredF)))
}
# Construct Lambda Grid:
.LambdaGrid <- function(gran1, gran2, groups, Y, Z, group, p, k1, s, m, k, MN, alpha, C, intercept, tol, VARX = FALSE, separate_lambdas = FALSE,
verbose = FALSE, gamma = 3, linear) {
nseries <- ifelse(VARX == TRUE, k1, k)
if (group == "Lag") {
mat <- list()
for (i in seq_len(length(groups))) {
if (k > 1) {
mat[[i]] <- norm2(Z[groups[[i]], ] %*% Y)
} else {
mat[[i]] <- norm2(t(Y) %*% Z[groups[[i]], ])
}
}
lambdastart <- max(unlist(mat))
}
if (group == "Basic" | group == "BasicEN" | group == "Tapered") {
if (!separate_lambdas) {
if (group == "Basic" | group == "Tapered") {
lambdastart <- max(abs(t(Y) %*% t(Z)))
} else {
lambdastart <- max(abs(t(Y) %*% t(Z)))/max(c(alpha, 0.01))
}
} else {
lambdastart <- c()
for (i in 1:nseries) {
if (group == "Basic" | group == "Tapered") {
lambdastart[i] <- max(abs(t(Y[, i, drop = F]) %*% t(Z)))
} else {
lambdastart[i] <- max(abs(t(Y[, i, drop = F]) %*% t(Z)))/max(c(alpha, 0.01))
}
}
}
}
if (group == "MCP" | group == "SCAD") {
if (!separate_lambdas) {
lambdastart <- max(abs(crossprod(t(Z), Y)))/nrow(Y)
} else {
lambdastart <- c()
for (i in 1:nseries) {
lambdastart[i] <- max(abs(crossprod(t(Z), Y[, i])))/nrow(Y)
}
}
}
if (group == "SparseLag") {
mat <- list()
if (alpha > 0) {
for (i in seq_len(length(groups))) {
if (k > 1) {
mat[[i]] <- norm2(Z[groups[[i]], ] %*% Y * (1/(alpha)))
} else {
mat[[i]] <- norm2(t(Y) %*% Z[groups[[i]], ])
}
}
lambdastart <- max(unlist(mat))
} else {
lambdastart <- max(t(Y) %*% t(Z))
}
}
if (group == "OwnOther") {
mat <- list()
ZZ <- kronecker(t(Z), diag(nseries))
for (i in seq_len(length(groups))) {
mat[[i]] <- norm(as.vector(t(Y)) %*% ZZ[, groups[[i]]]/sqrt(length(groups[[i]])), "F")
}
lambdastart <- max(unlist(mat))
}
if (group == "SparseOO") {
mat <- list()
ZZ <- kronecker(t(Z), diag(nseries))
if (alpha > 0) {
for (i in seq_len(length(groups))) {
mat[[i]] <- norm(1/(alpha) * as.vector(t(Y)) %*% ZZ[, groups[[i]]]/sqrt(length(groups[[i]])), "F")
}
lambdastart <- max(unlist(mat))
} else {
lambdastart <- max(t(Y) %*% t(Z))
}
}
if (group == "EFX") {
gmax <- c()
for (i in 1:nseries) {
gmax[i] <- norm2(Z %*% Y[, i])/sqrt(k * p)
}
lambdastart <- max(gmax)
}
if (group == "HLAGC" | group == "HLAGOO" | group == "HLAGELEM") {
gmax <- c()
for (i in 1:nseries) {
gmax[i] <- norm2(Z %*% Y[, i])
}
if (!separate_lambdas) {
lambdastart <- max(gmax)
} else {
lambdastart <- gmax
}
}
if (VARX) {
beta <- array(0, dim = c(k1, k1 * p + s * m + 1, 1))
} else if (group == "Tapered") {
beta <- array(0, dim = c(k, k * p + 1, 1))
} else {
beta <- array(0, dim = c(k, k * p + 1, 1))
}
if (!separate_lambdas) {
lambdastart <- LGSearch(lambdastart, Y, Z, beta, group, k1, p, s, m, groups, k, MN, alpha, C, intercept, tol, VARX,
gamma)
if (!linear) {
lambda <- exp(seq(from = log(lambdastart), to = log(lambdastart/gran1), length = gran2))
} else {
lambda <- seq(from = lambdastart, to = lambdastart/gran1, length = gran2)
}
} else {
lambda <- matrix(NA, nrow = c(gran2), ncol = ncol(Y))
for (i in seq_len(ncol(lambda))) {
lambdastart[i] <- LGSearch(lambdastart[i], Y, Z, beta, group, k1, p, s, m, groups, k, MN, alpha, C, intercept,
tol, VARX, gamma)
lambdastart[i] <- ifelse(lambdastart[i] == 0, 1e-04, lambdastart[i])
if (verbose & i%%20 == 0) {
print(sprintf("determined lambda grid for series %s", i))
}
if (!linear) {
lambda[, i] <- exp(seq(from = log(lambdastart[i]), to = log(lambdastart[i]/gran1), length = gran2))
} else {
lambda[, i] <- seq(from = lambdastart[i], to = lambdastart[i]/gran1, length = gran2)
}
}
}
return(lambda)
}
#' Converts a VAR coefficient matrix of order p to multiple companion form
#'
#' @param B a \eqn{k \times kp} coefficient matrix
#' @param p Lag order
#' @param k Number of Series
#' @return Returns a \eqn{kp \times kp} coefficient matrix representing all coefficient matrices contained in Ai as a VAR(1).
#' @references See page 15 of Lutkepohl, 'A New Introduction to Multiple Time Series Analysis'
#' @seealso \code{\link{MultVarSim}}
#' @examples
#' k=3;p=6
#' B=matrix(0,nrow=k,ncol=p*k)
#' A1<- matrix(c(.4,-.02,.01,-.02,.3,.02,.01,.04,.3),ncol=3,nrow=3)
#' A2 <- matrix(c(.2,0,0,0,.3,0,0,0,.13),ncol=3,nrow=3)
#' B[,1:k]=A1
#' B[,(4*k+1):(5*k)]=A2
#' A <- VarptoVar1MC(B,p,k)
#' @export
VarptoVar1MC <- function(B, p, k) {
Fp <- matrix(0, nrow = k * p, ncol = k * p)
Fp[1:k, ] <- B
Fp[-(1:k), 1:(k * (p - 1))] <- diag(k * (p - 1))
# We require that the coefficient matrix generates a stationary VAR
if (max(Mod(eigen(Fp)$values)) > 1) {
warning("Coefficient Matrix is not stationary")
}
return(Fp)
}
#' Simulate a VAR
#'
#' @param k Number of Series
#' @param A1 Either a \eqn{k \times k} coefficient matrix or a \eqn{kp \times kp} matrix created using \code{\link{VarptoVar1MC}}.
#' @param p Maximum Lag Order
#' @param Sigma Residual Covariance Matrix of dimension \eqn{k\times k}
#' @param T Number of simulations
#' @return Returns a \eqn{T \times k} of realizations from a VAR.
#' @references Lutkepohl, 'A New Introduction to Multiple Time Series Analysis'
#' @seealso \code{\link{VarptoVar1MC}}
#' @examples
#' k=3;p=6
#' B=matrix(0,nrow=k,ncol=p*k)
#' A1<- matrix(c(.4,-.02,.01,-.02,.3,.02,.01,.04,.3),ncol=3,nrow=3)
#' A2 <- matrix(c(.2,0,0,0,.3,0,0,0,.13),ncol=3,nrow=3)
#' B[,1:k]=A1
#' B[,(4*k+1):(5*k)]=A2
#' A <- VarptoVar1MC(B,p,k)
#' Y <-MultVarSim(k,A,p,.1*diag(k),100)
#' @export
#' @importFrom MASS mvrnorm
MultVarSim <- function(k, A1, p, Sigma, T) {
if (max(Mod(eigen(A1)$values)) > 1) {
stop("Error: Generator Matrix is not stationary")
}
# add 500 observations for initialization purposes
Y <- matrix(0, nrow = T + 500 + p, ncol = k)
YY <- as.vector(Y)
for (i in seq(from = (k * p + 1), to = (nrow(Y) * k - 1), by = k)) {
u <- as.vector(c(mvrnorm(1, rep(0, k), Sigma), rep(0, k * p - k)))
YY[(i + k):(i - k * p + 1 + k)] <- A1 %*% YY[(i):(i - k * p + 1)] + as.matrix(u, ncol = 1)
}
YY <- YY[YY != 0]
Y <- matrix(YY, ncol = k, byrow = TRUE)
Y <- Y[, c(ncol(Y):1)]
Y <- Y[501:nrow(Y), ]
return(Y)
}
# function to create subsets for lag group VARX-L
.groupfun <- function(p, k) {
jjj <- list()
jjj[[1]] <- 1:k
if (p > 1) {
for (i in 2:p) {
jjj[[i]] <- jjj[[i - 1]] + k
}
}
return(jjj)
}
# C++ groupings to account for 0 indexing
.groupfuncpp <- function(p, k) {
jjj <- list()
jjj[[1]] <- 0:(k - 1)
if (p > 1) {
for (i in 2:p) {
jjj[[i]] <- jjj[[i - 1]] + k
}
}
return(jjj)
}
# subsetting complement of groups in rcpp
.groupfuncomp <- function(p, k) {
ownoth <- .groupfuncpp(p, k)
kk2 <- list()
pmax <- max(unlist(ownoth))
to <- 0:(pmax)
for (i in seq_len(length(ownoth))) {
kk2[[i]] <- to[is.na(pmatch(to, ownoth[[i]]))]
}
return(kk2)
}
# Group indexing for own/other VARX-L
.lfunction2 <- function(p, k) {
kk <- list()
kk[[1]] <- 1:(k^2)
if (p > 1) {
for (i in 2:p) {
kk[[i]] <- 1:(k^2) + tail(kk[[i - 1]], 1)
}
}
return(kk)
}
.lfunction2cpp <- function(p, k) {
kk <- list()
kk[[1]] <- 0:(k^2 - 1)
if (p > 1)
{
for (i in 2:p) for (i in 2:p) {
kk[[i]] <- 0:(k^2 - 1) + tail(kk[[i - 1]], 1) + 1
}
}
return(kk)
}
.lfunction3 <- function(p, k) {
kk <- .lfunction2(p, k)
oo <- list()
pp <- list()
for (i in seq_len(length(kk))) {
j <- 0
oo[[i]] <- kk[[i]][(seq(1, length(kk[[1]]), k + 1) + (j * k^2))]
pp[[i]] <- kk[[i]][-(seq(1, length(kk[[1]]), k + 1) + (j * k^2))]
j <- j + 1
}
ownoth <- c(oo, pp)
return(ownoth)
}
.lfunction3cpp <- function(p, k) {
kk <- .lfunction2cpp(p, k)
oo <- list()
pp <- list()
for (i in seq_len(length(kk))) {
j <- 0
oo[[i]] <- kk[[i]][(seq(1, length(kk[[1]]), k + 1) + (j * k^2))]
pp[[i]] <- kk[[i]][-(seq(1, length(kk[[1]]), k + 1) + (j * k^2))]
j <- j + 1
}
ownoth <- c(oo, pp)
return(ownoth)
}
.lfunctioncomp <- function(p, k) {
ownoth <- .lfunction3cpp(p, k)
kk2 <- list()
pmax <- max(unlist(ownoth))
to <- 0:(pmax)
for (i in seq_len(length(ownoth))) {
kk2[[i]] <- to[is.na(pmatch(to, ownoth[[i]]))]
}
return(kk2)
}
# This function should work for arbitrary groups
.lfunction <- function(groups, p) {
H <- as.vector(do.call("cbind", groups))
kk <- list()
kk[[1]] <- H
if (p > 1) {
for (i in 2:p) {
kk[[i]] <- as.vector(do.call("cbind", groups)) + tail(kk[[i - 1]], 1)
}
}
return(kk)
}
# Indexing for HLAG
.vsubs <- function(p, k) {
vi <- list()
for (i in p:1) {
g <- max(k * i - k, 0)
vi[[i]] <- g:(k * (p) - 1)
}
return(vi)
}
# indexing for HLAG OO
.oofun <- function(p, k) {
kk <- .lfunction2(p, k)
oo <- list()
pp <- list()
for (i in seq_len(length(kk))) {
j <- 0
oo[[i]] <- kk[[i]][(seq(1, length(kk[[1]]), k + 1) + (j * k^2))]
pp[[i]] <- kk[[i]][-(seq(1, length(kk[[1]]), k + 1) + (j * k^2))]
j <- j + 1
}
ownoth <- list()
jj <- .lfunction3(p, k)
for (i in seq_len(length(jj))) {
if (i == 1)
{
ownoth[[i]] <- jj[[i]]
oo[[1]] <- NULL
}
if (i == length(jj)) {
ownoth[[i]] <- tail(jj, 1)
pp[[1]] <- NULL
}
if (i != 1 & i%%2 != 0) {
ownoth[[i]] <- head(oo, 1)
oo[[1]] <- NULL
}
if (i != length(jj) & i%%2 == 0) {
ownoth[[i]] <- head(pp, 1)
pp[[1]] <- NULL
}
}
return(rev(ownoth))
}
.oocumfun <- function(p, k) {
kk <- rev(.oofun(p, k))
oogroups <- list()
oogroups[[1]] <- unlist(kk)
for (i in 2:length(kk)) {
oogroups[[i]] <- unlist(kk[-(1:(i - 1))])
}
return(oogroups)
}
# indexing function for Own/Other HLAG
.vecoovars <- function(p, k, k1) {
vv <- list()
vv[[1]] <- 1:(p * k)
vv[[2]] <- vv[[1]][-k1]
q1 <- 1
if (p > 1) {
for (i in 3:(2 * p)) {
if (i%%2 != 0) {
vv[[i]] <- (q1 * k + 1):(k * p)
q1 <- q1 + 1
} else {
vv[[i]] <- vv[[i - 1]][-k1]
}
}
}
return(vv)
}
# indexing to start at zero for use within rcpp
.vecoovarscpp <- function(p, k, k1) {
vv <- list()
vv[[1]] <- 0:(p * k - 1)
vv[[2]] <- vv[[1]][-(k1)]
q1 <- 1
if (p > 1) {
for (i in 3:(2 * p)) {
if (i%%2 != 0) {
vv[[i]] <- (q1 * k):(k * p - 1)
q1 <- q1 + 1
} else {
vv[[i]] <- vv[[i - 1]][-(k1)]
}
}
}
return(vv)
}
# VARX Lag Group function
groupfunVARX <- function(p, k, k1, s) {
jj <- list()
m <- k - k1
jj <- .groupfuncpp(p, k1)
kp <- k1 * p + m * s - 1
jj2 <- list()
startjj <- max(unlist(jj)) + 1
for (i in seq(startjj, kp, by = 1)) {
jj[[i]] <- i
}
jj[sapply(jj, is.null)] <- NULL
return(jj)
}
groupfunVARXcomp <- function(p, k, k1, s) {
ownoth <- groupfunVARX(p, k, k1, s)
kk2 <- list()
pmax <- max(unlist(ownoth))
to <- 0:(pmax)
for (i in seq_len(length(ownoth))) {
kk2[[i]] <- to[is.na(pmatch(to, ownoth[[i]]))]
}
return(kk2)
}
diaggroupfunVARX <- function(p, k, k1, s) {
m <- k - k1
jj <- list()
jj <- .lfunction3cpp(p, k1)
kp <- k1 * (p * k1 + s * m) - 1
jj2 <- list()
startjj <- max(unlist(jj)) + 1
for (i in seq(startjj, kp, by = k1)) {
jj[[i]] <- i:(i + k1 - 1)
}
jj[sapply(jj, is.null)] <- NULL
return(jj)
}
diaggroupfunVARXcomp <- function(p, k, k1, s) {
ownoth <- diaggroupfunVARX(p, k, k1, s)
kk2 <- list()
pmax <- max(unlist(ownoth))
to <- 0:(pmax)
for (i in seq_len(length(ownoth))) {
kk2[[i]] <- to[is.na(pmatch(to, ownoth[[i]]))]
}
return(kk2)
}
diaggroupfunVARXL <- function(p, k, k1) {
jj <- list()
jj <- .lfunction3cpp(p, k1)
kp <- k1 * p * k - 1
jj2 <- list()
startjj <- max(unlist(jj)) + 1
for (i in seq(startjj, kp, by = 1)) {
jj[[i]] <- i
}
jj[sapply(jj, is.null)] <- NULL
return(jj)
}
diaggroupfunVARXcompL <- function(p, k, k1) {
ownoth <- diaggroupfunVARXL(p, k, k1)
kk2 <- list()
pmax <- max(unlist(ownoth))
to <- 0:(pmax)
for (i in seq_len(length(ownoth))) {
kk2[[i]] <- to[is.na(pmatch(to, ownoth[[i]]))]
}
return(kk2)
}
# iterative procedure to find a tighter bound for lambda starting value via binary search
LGSearch <- function(gstart, Y, Z, BOLD, group, k1, p, s, m, gs, k, MN, alpha, C, intercept, tol, VARX, gamma) {
s1 <- 0
palpha <- NULL
tk <- 1/max(Mod(eigen(Z %*% t(Z))$values))
lambdah <- gstart
lambdal <- 0
activeset <- list(rep(rep(list(0), length(gs))))
gran2 <- 1
grps <- create_group_indexes(group, p, k, gran2 * length(alpha), VARX, k1, s)
groups <- grps$groups
compgroups <- grps$compgroups
activeset <- grps$activeset
starting_eigvals <- grps$starting_eigvals
nseries <- nrow(BOLD)
while (max(abs(lambdah - lambdal)) > 10 * tol) {
lambda <- (lambdah + lambdal)/2
dual <- FALSE
separate_lambdas <- FALSE
temp <- .BigVAR.fit(group, BOLD, Z, Y, lambda, tol, p, m, k1, k, s, s1, MN, C, intercept, separate_lambdas, dual,
activeset, starting_eigvals, groups, compgroups, VARX, alpha, palpha, gamma)
# remove intercept from consideration
if (group == "Tapered") {
param <- adrop(BOLD[, -1, , drop = F], drop = 3)
} else {
BOLD <- temp$beta
param <- adrop(BOLD[, -1, , drop = F], drop = 3)
}
activeset <- temp$activeset
q1a <- temp$q1a
if (MN) {
submat <- param[1:nseries, 1:nseries, drop = F]
diag(submat) <- ifelse(C == 0, diag(param[1:nseries, 1:nseries, drop = F]), diag(param[1:nseries, 1:nseries,
drop = F]) - C)
param[1:nseries, 1:nseries] <- submat
BOLD[, -1, ] <- param
}
if (max(abs(param)) < tol) {
lambdah <- lambda
} else {
lambdal <- lambda
}
}
lambdah
}
#' Evaluate forecasts from a VAR or VARX with lag orders selected by AIC/BIC
#'
#' @param Y a \eqn{T \times k} multivariate time series
#' @param X a \eqn{T \times m} multivariate time series of unmodeled exogenous variables
#' @param p maximum lag order for endogenous series
#' @param s maximum lag order for exogenous series
#' @param T1 start of forecast evaluation period.
#' @param T2 end of forecast evaluation period
#' @param IC specifies whether to select lag order according to 'AIC' or 'BIC'
#' @param h desired forecast horizon
#' @param loss loss function (default 'L2', one of 'L1','L2','Huber')
#' @param delta delta for Huber loss function (default 2.5)
#' @param iterated indicator as to whether to use iterated or direct multistep forecasts (if applicable, VAR context only)
#' @return Returns the one-step ahead MSFE as well as the forecasts over the evaluation period and lag order selected.
#' @details This function evaluates the one-step ahead forecasts of a VAR or VARX fit by least squares over an evaluation period. At every point in time, lag orders for the endogenous and exogenous series are selected according to AIC or BIC. This function is run automatically when \code{\link{cv.BigVAR}} is called unless \code{ic} is set to \code{FALSE} in \code{\link{constructModel}}.
#' @references Neumaier, Arnold, and Tapio Schneider. 'Estimation of parameters and eigenmodes of multivariate autoregressive models.' ACM Transactions on Mathematical Software (TOMS) 27.1 (2001): 27-57.
#' @seealso \code{\link{VARXFit}},\code{\link{constructModel}}, \code{\link{cv.BigVAR}}
#' @examples
#' data(Y)
#'
#' # Evaluate the performance of a VAR with lags selected by BIC.
#' p <- 4
#' T1 <- floor(nrow(Y))/3
#' T2 <- floor(2*nrow(Y))/3
#' # Matrix of zeros for X
#' X <- matrix(0,nrow=nrow(Y),ncol=ncol(Y))
#' BICMSFE <- VARXForecastEval(Y,X,p,0,T1,T2,'BIC',1)
#'
#' @export
VARXForecastEval <- function(Y, X, p, s, T1, T2, IC, h, iterated = FALSE, loss = "L2", delta = 2.5) {
if (T1 > nrow(Y) | T2 > nrow(Y) | T2 < T1) {
stop("Training dates exceed series length")
}
if (!IC %in% c("AIC", "BIC")) {
stop("IC must either be AIC or BIC")
}
MSFE <- c()
predF <- NULL
pvec <- NULL
svec <- NULL
k <- ncol(Y)
m <- ifelse(s != 0, ncol(X), 0)
for (i in (T1 - h + 2):T2) {
if (h + i - 1 > T2) {
break
}
testY <- as.matrix(Y[1:(i - 1), ])
testX <- as.matrix(X[1:(i - 1), ])
if (!iterated) {
hd <- h
} else {
hd <- 1
}
if (IC == "BIC") {
popt <- ICX(testY, testX, k, p, s, m, "BIC", h = hd)
}
if (IC == "AIC") {
popt <- ICX(testY, testX, k, p, s, m, "AIC", h = hd)
}
B1 <- popt$B
if (popt$p == 0 & popt$s == 0) {
eZ <- matrix(rep(1, 1), ncol = 1)
pred <- B1 %*% eZ
} else {
C <- max(popt$p, popt$s)
## # possibly memory leak in VARX lag matrix construction in Eigen if maxlag is 1. # to be on the safe
## side, we will perform it in R
eZ <- VARXCons(as.matrix(Y[(i - C):(i), ]), as.matrix(X[(i - C):(i), ]), k, popt$p, m, popt$s)
## }
pred <- B1 %*% eZ
# iterated multistep forecasts (if VAR and horizon greater than 1)
if (h > 1 & s == 0 & iterated) {
pred <- predictMS(matrix(pred, nrow = 1), Y, h - 1, B1, C, FALSE)
}
}
predF <- rbind(predF, t(pred))
MSFEi <- .calc.loss(Y[i + h - 1, ] - pred, univ = FALSE, loss, delta)
MSFE <- c(MSFE, MSFEi)
svec <- c(svec, popt$s)
pvec <- c(pvec, popt$p)
}
return(list(MSFE = MSFE, pred = as.matrix(predF), p = pvec, s = svec))
}
#' Fit a VAR or VARX model by least squares
#'
#' @param Y a \eqn{t \times k} multivariate time series
#' @param p maximum lag order
#' @param IC Information criterion indicator, if set to \code{NULL}, it will fit a least squares VAR(X) of orders p and s. Otherwise, if set to 'AIC' or 'BIC' it return the model with lag orders that minimize the given IC.
#' @param VARX a list of VARX specifications (as in \code{\link{constructModel}} (or NULL )
#' @return Returns a list with four entries:
#' \itemize{
#' \item{'Bhat'}{Estimated \eqn{k\times kp+ms} coefficient matrix}
#' \item{'SigmaU}{Estimated \eqn{k\times k} residual covariance matrix}
#' \item{'phat'}{Selected lag order for VAR component}
#' \item{'shat'}{Selected lag order for VARX component}
#' \item{'Y'}{multivariate time series retained for prediction purposes}
#' \item{'Y'}{number of endogenous (modeled) time series}
#' }
#' @details This function uses a modified form of the least squares technique proposed by Neumaier and Schneider (2001). It fits a least squares VAR or VARX via a QR decomposition that does not require explicit matrix inversion. This results in improved computational performance as well as numerical stability over the conventional least squares approach.
#' @references Neumaier, Arnold, and Tapio Schneider. 'Estimation of parameters and eigenmodes of multivariate autoregressive models.' ACM Transactions on Mathematical Software (TOMS) 27.1 (2001): 27-57.
#' @seealso \code{\link{constructModel}}, \code{\link{cv.BigVAR}},\code{\link{BigVAR.fit}}
#' @examples
#' data(Y)
#' # fit a VAR_3(3)
#' mod <- VARXFit(Y,3,NULL,NULL)
#' # fit a VAR_3 with p= 6 and lag selected according to AIC
#' modAIC <- VARXFit(Y,6,'AIC',NULL)
#' # Fit a VARX_{2,1} with p=6, s=4 and lags selected by BIC
#' modXBIC <- VARXFit(Y,6,'BIC',list(k=1,s=4))
#'
#' @export
VARXFit <- function(Y, p, IC, VARX = NULL) {
if (!is.null(VARX)) {
if (is.list(VARX) & !(exists("k", where = VARX) & exists("s", where = VARX))) {
stop("VARX Specifications entered incorrectly")
}
}
if (is.list(VARX) & (length(VARX) != 0)) {
k1 <- VARX$k
s <- VARX$s
Y1 <- matrix(Y[, 1:k1], ncol = k1)
m <- ncol(Y) - k1
X <- matrix(Y[, (k1 + 1):ncol(Y)], ncol = m)
if (exists("contemp", where = VARX)) {
contemp <- VARX$contemp
} else {
contemp <- FALSE
}
Z <- VARXCons(Y1, X, k1, p, m, s, contemp = contemp)
offset <- max(p, s) + 1
YT <- matrix(Y1[offset:nrow(Y), ], ncol = k1)
X <- matrix(X[offset:nrow(X), ], ncol = m)
} else {
k <- ncol(Y)
k1 <- k
s <- 0
m <- 0
offset <- p + 1
X <- matrix(0, nrow = nrow(Y))
Z <- VARXCons(Y, X, k, p, m, s)
YT <- matrix(Y[(offset):nrow(Y), ], ncol = ncol(Y))
}
if (is.null(IC)) {
Res <- ARFitVARXR(cbind(t(Z), YT), k1, p, m, s)
shat <- s
phat <- p
} else {
if (!IC %in% c("AIC", "BIC")) {
stop("IC must either be AIC,BIC, or set to NULL")
}
Res <- ICX(YT, X, k1, p, s, m, IC)
shat <- Res$s
phat <- Res$p
}
if (is.null(VARX)) {
k <- ncol(Y)
} else {
k <- VARX$k
}
list(Bhat = Res$B, SigmaU = Res$SigmaU, phat = phat, shat = shat, Y = Y, k = k)
}
#' One-step ahead predictions for VARX models
#'
#' @param VARXRes the results from \code{\link{VARXFit}}
#' @return Returns a vector consisting of the out-of-sample forecasts for the provided \code{\link{VARXFit}} model.
#' @seealso \code{\link{VARXFit}}
#' @examples
#' data(Y)
#' # fit a VAR_3(3)
#' mod <- VARXFit(Y,3,NULL,NULL)
#' pred <-PredictVARX(mod)
#'
#' @export
PredictVARX <- function(VARXRes) {
B <- VARXRes$Bhat
Y <- VARXRes$Y
k <- VARXRes$k
m <- ncol(Y) - k
if (k < ncol(Y)) {
Z <- VARXCons(Y[, 1:k, drop = FALSE], Y[, (k + 1):ncol(Y), drop = FALSE], k, VARXRes$phat, m, VARXRes$shat, oos = TRUE)
} else {
Z <- VARXCons(Y[, 1:k, drop = FALSE], matrix(0, nrow = nrow(Y)), k, VARXRes$phat, m, VARXRes$shat, oos = TRUE)
}
return(as.numeric(tail(t(B %*% Z), 1)))
}
# Recursive multi-step predictions
predictMS <- function(pred, Y, n.ahead, B, p, MN = FALSE, predict_all = FALSE, n.ahead_full = n.ahead) {
# Augment Y with predictions, create lag matrix (no intercept if MN)
Y <- rbind(Y, pred)
Z <- VARXCons(Y, matrix(0, nrow = nrow(Y), ncol = 1), ncol(Y), p, 0, 0, oos = TRUE)
if (MN) {
Z <- Z[2:nrow(Z), , drop = F]
}
Z <- Z[, ncol(Z), drop = F]
pred <- matrix(B %*% Z, ncol = ncol(Y), nrow = 1)
if (n.ahead == 1) {
if (predict_all) {
return(rbind(Y[((nrow(Y) - n.ahead_full) + 1):nrow(Y), ], pred))
} else {
return(pred)
}
}
predictMS(pred, Y, n.ahead - 1, B, p, MN, predict_all, n.ahead_full = n.ahead)
}
# Multi-step VARX with new data.
predictMSX <- function(pred, Y, n.ahead, B, p, newxreg, X, m, s, cumulative, MN, contemp = FALSE) {
Y <- rbind(Y, pred)
X <- rbind(X, matrix(newxreg[cumulative, ], ncol = m))
if (nrow(Y) != nrow(X)) {
stop("error, dimension issue")
}
if (!contemp) {
Z <- VARXCons(as.matrix(Y), X, ncol(Y), p, m, s, oos = TRUE)
} else {
Z <- VARXCons(as.matrix(Y), as.matrix(X), ncol(Y), p, m, s, oos = FALSE, contemp = TRUE)
}
Z <- Z[, ncol(Z), drop = F]
if (MN) {
Z <- as.matrix(Z[2:nrow(Z), drop = F])
pred <- matrix(B[, 2:ncol(B), drop = F] %*% Z, ncol = ncol(Y), nrow = 1)
} else {
pred <- matrix(B %*% Z, ncol = ncol(Y), nrow = 1)
}
if (n.ahead == 1) {
return(pred)
}
predictMSX(pred, Y, n.ahead - 1, B, p, newxreg, X, m, s, cumulative + 1, MN)
}
# Find optimal values in 2-d gridsearch
findind <- function(opt, lambda1, lambda2) {
if (opt < length(lambda2)) {
lambda1ind <- 1
} else {
lambda1ind <- ceiling(opt/length(lambda2))
}
if (lambda1ind == 1) {
jind <- opt
} else {
jind <- opt - (length(lambda2)) * (lambda1ind - 1)
}
return(c(lambda1ind, jind))
}
# Bayesian VAR with MN prior
BVARLitterman <- function(Y, Z, p, tau, mu, H, iRW) {
T <- nrow(Y)
k <- ncol(Y)
# prior covariance based on univariate AR models
sigmas <- c()
for (i in 1:k) {
Z1 <- VARXCons(Y[, i, drop = F], matrix(0, nrow = nrow(Y), ncol = 1), 1, p, 0, 0)
K <- cbind(t(Z1), Y[(p + 1):nrow(Y), i])
sigmas[i] <- sqrt(ARFitVARXR(K, 1, p, 0, 0)$SigmaU)
}
MMO <- colMeans(Y)
# create prior random walk dummy
Yrw1 <- diag(sigmas * iRW)
Yrw2 <- matrix(0, nrow = k * (p - 1), ncol = k)
Yrw <- tau * (rbind(Yrw1, Yrw2))
Zrw <- tau * cbind(kronecker(diag(1:p), diag(sigmas)), matrix(0, nrow = k * p, ncol = 1))
# create dummy for intercept
epsilon <- 1e-05
Ycs <- 1e-05 * matrix(0, nrow = 1, ncol = k)
Zcs <- epsilon * cbind(matrix(0, ncol = k * p, nrow = 1), 1)
# dummy on the sums of coefficients
Ylr <- mu * diag(MMO * iRW)
Zlr1 <- kronecker(matrix(1, nrow = 1, ncol = p), diag(MMO) * iRW)
Zlr <- mu * (cbind(Zlr1, matrix(0, nrow = k, ncol = 1)))
# Dummy for residual covariance matrix
Ycv <- diag(sigmas)
Zcv <- matrix(0, nrow = k, ncol = k * p + 1)
Yprior <- rbind(Yrw, Ylr, Ycv, Ycs)
Zprior <- rbind(Zrw, Zlr, Zcv, Zcs)
Tstar <- nrow(Yprior)
Z <- t(Z)
Z <- cbind(Z[, 2:ncol(Z)], 1)
ZZinv <- solve(t(Zprior) %*% Zprior + t(Z) %*% Z)
ZY <- t(Zprior) %*% Yprior + t(Z) %*% Y
beta <- ZZinv %*% ZY
return(t(beta))
}
# grid search for BGR
BGRGridSearch <- function(Y, Z, p, grid, RWIND) {
preds <- list()
for (i in seq_len(length(grid))) {
pi <- grid[i]
mu <- pi * 0.1 # used in BGR paper
preds[[i]] <- BVARLitterman(Y, Z, p, pi, mu, -1, RWIND)
}
preds <- array(unlist(preds), dim = c(nrow(preds[[1]]), ncol(preds[[1]]), length(preds)))
return(preds)
}
# process MN prior
MN_prior <- function(Y, Z, C1) {
C <- matrix(0, nrow = ncol(Y), ncol = nrow(Z))
diag(C) <- C1
Y <- t(Y)
Y <- Y - C %*% Z
Y <- t(Y)
return(list(Y = Y, C = C))
}
# pre-process data (subtract intercept, adjust for shrinking toward constants)
pre_process <- function(Y, Z, C1, MN, intercept) {
k <- ncol(Y)
if (MN) {
YC <- MN_prior(Y, Z, C1)
Y <- YC$Y
C <- YC$C
}
Y <- t(Y)
if (intercept) {
YMean <- c(apply(Y, 1, mean))
ZMean <- c(apply(Z, 1, mean))
if (k > 1) {
Y <- Y - YMean %*% t(c(rep(1, ncol(Y))))
} else {
Y <- Y - mean(Y)
}
Z <- Z - ZMean %*% t(c(rep(1, ncol(Z))))
} else {
YMean <- rep(0, nrow(Y))
ZMean <- rep(0, nrow(Z))
}
Y <- t(Y)
return(list(Y = Y, Z = Z, YMean = YMean, ZMean = ZMean, C = C))
}
# s needs to be s+ s1 create indices for group structures
create_group_indexes <- function(group, p, k, gran2, VARX = FALSE, k1 = NULL, s = NULL) {
starting_eigvals <- NULL
groups <- NULL
compgroups <- NULL
activeset <- NULL
if (VARX) {
if (group == "Lag") {
groups <- groupfunVARX(p, k, k1, s)
compgroups <- groupfunVARXcomp(p, k, k1, s)
activeset <- rep(list(rep(rep(list(0), length(groups)))), gran2)
} else if (group == "SparseLag") {
groups <- groupfunVARX(p, k, k1, s)
compgroups <- groupfunVARXcomp(p, k, k1, s)
starting_eigvals <- list()
for (i in 1:(p + s)) {
starting_eigvals[[i]] <- matrix(runif(k1, -1, 1), ncol = 1)
}
activeset <- rep(list(rep(rep(list(0), length(groups)))), gran2)
} else if (group == "OwnOther") {
groups <- diaggroupfunVARX(p, k, k1, s)
compgroups <- diaggroupfunVARXcomp(p, k, k1, s)
activeset <- rep(list(rep(rep(list(0), length(groups)))), gran2)
} else if (group == "SparseOO") {
groups <- diaggroupfunVARX(p, k, k1, s)
compgroups <- diaggroupfunVARXcomp(p, k, k1, s)
activeset <- rep(list(rep(rep(list(0), length(groups)))), gran2)
}
} else {
if (group == "Lag") {
groups <- .groupfuncpp(p, k)
compgroups <- .groupfuncomp(p, k)
activeset <- rep(list(rep(rep(list(0), length(groups)))), gran2)
} else if (group == "SparseLag") {
groups <- .groupfuncpp(p, k)
compgroups <- .groupfuncomp(p, k)
starting_eigvals <- list()
for (i in 1:p) {
starting_eigvals[[i]] <- matrix(runif(k, -1, 1), ncol = 1)
}
activeset <- rep(list(rep(rep(list(0), length(groups)))), gran2)
} else if (group == "OwnOther") {
groups <- .lfunction3cpp(p, k)
compgroups <- .lfunctioncomp(p, k)
activeset <- rep(list(rep(rep(list(0), length(groups)))), gran2)
} else if (group == "SparseOO") {
groups <- .lfunction3cpp(p, k)
compgroups <- .lfunctioncomp(p, k)
activeset <- rep(list(rep(rep(list(0), length(groups)))), gran2)
starting_eigvals <- list()
for (i in 1:(2 * p)) {
starting_eigvals[[i]] <- matrix(runif(length(groups[[i]]), -1, 1), ncol = 1)
}
}
}
return(list(groups = groups, compgroups = compgroups, starting_eigvals = starting_eigvals, activeset = activeset))
}
create_lambda_grid <- function(trainY, trainZ, groups, gran1, gran2, group, p, k1, s, m, k, MN, alpha, C, intercept, tol,
VARX, separate_lambdas, dual, gamma, linear, verbose) {
# Constructs penalty grid if both alpha and lambda are selected
if (dual) {
lambda <- matrix(0, nrow = gran2, ncol = length(alpha))
for (i in 1:length(alpha)) {
lambda[, i] <- .LambdaGrid(gran1, gran2, groups, trainY, trainZ, group, p, k1, s, m, k, MN, alpha[i], C, intercept,
tol, VARX = VARX, linear = linear)
}
} else {
# Penalty parameter grid for just lambda
if (group != "BGR") {
lambda <- .LambdaGrid(gran1, gran2, groups, trainY, trainZ, group, p, k1, s, m, k, MN, alpha, C, intercept, tol,
VARX = VARX, separate_lambdas, verbose, linear = linear)
} else {
# special handling for BGR
lambda <- seq(1, 5, length = gran2)
lambda <- lambda * sqrt(k * p)
}
}
return(lambda)
}
# runs at each iteration of penalty parameter selection and validation stage
refine_and_forecast <- function(betaArray, eZ, trainZ, trainY, testY, lambda, h, recursive, MN, RVAR, refit_fraction, separate_lambdas,
C, inds, loss, delta, k, p, k1, s, oos = FALSE) {
if (separate_lambdas) {
MSFE_temp <- matrix(0, nrow = dim(betaArray)[3], ncol = ncol(trainY))
} else {
MSFE_temp <- c()
}
if (MN) {
eZ <- eZ[2:nrow(eZ), , drop = FALSE]
}
nlambdas <- dim(betaArray)[3]
preds <- matrix(0, nrow = ncol(trainY), ncol = nlambdas)
for (i in 1:nlambdas) {
beta <- adrop(betaArray[, , i, drop = F], drop = 3)
if (RVAR) {
beta_rls <- RelaxedLS(cbind(t(trainZ), trainY), beta)
beta <- (1 - refit_fraction) * beta + refit_fraction * beta_rls
}
if (!is.null(inds)) {
if (i == inds) {
beta_return <- beta
}
} else {
# if we don't need beta matrix (i.e validation stage, just return the last value)
beta_return <- beta
}
if (MN) {
beta <- beta[, 2:ncol(beta)]
}
if (h > 1 & recursive) {
ptemp <- beta %*% eZ
pred <- matrix(ptemp, nrow = 1)
pred <- predictMS(pred, trainY, h - 1, beta, p, MN)
pred <- matrix(pred, ncol = 1)
} else {
pred <- beta %*% eZ
}
if (separate_lambdas) {
if (!oos) {
for (j in seq_len(ncol(testY))) {
MSFE_temp[i, j] <- .calc.loss(testY[, j] - pred[j, ], univ = TRUE, loss, delta)
}
} else {
MSFE_temp[i] <- .calc.loss(t(testY) - beta %*% eZ, univ = FALSE, loss, delta)
}
} else {
MSFE_temp[i] <- .calc.loss(t(testY) - beta %*% eZ, univ = FALSE, loss, delta)
}
preds[, i] <- pred
}
## # just return optimal prediction in validation stage
return(list(MSFE = MSFE_temp, betaArray = beta_return, preds = preds))
}
# determines optimal lambda from a grid of values
find_optimal_lambda <- function(MSFE, lambda, palpha, alpha, ONESE, dual, separate_lambdas, tapered) {
if (tapered) {
indopt <- which.min(colMeans(MSFE))
if (indopt < length(lambda)) {
alphaind <- 1
alphaopt <- palpha[1]
} else {
alphaopt <- palpha[floor(indopt/length(lambda))]
alphaind <- floor(indopt/length(lambda))
}
if (alphaind == 1) {
lambdaopt <- lambda[indopt]
} else if (indopt%%length(lambda) == 0) {
lambdaopt <- lambda[length(lambda)]
} else {
lambdaind <- indopt - length(lambda) * alphaind
lambdaopt <- lambda[lambdaind]
}
palpha <- alphaopt
optind <- indopt
} else {
# one standard error correction
if (ONESE & !dual & !separate_lambdas) {
MSFE2 <- MSFE
G2 <- colMeans(na.omit(MSFE2))
G3 <- sd(na.omit(MSFE2))/sqrt(nrow(na.omit(MSFE2)))
optind <- min(which(G2 < (min(G2) + G3)))
lambdaopt <- lambda[optind]
} else {
if (!tapered & !dual) {
# in rare cases in which MSFE is equal, the smaller penalty parameter is chosen. This prevents
# extremely sparse solutions
if (separate_lambdas) {
if (ONESE) {
MSFES <- t(apply(MSFE, 3, colMeans))
sds <- t(apply(MSFE, 3, function(x) sd(na.omit(x))/sqrt(nrow(na.omit(x)))))
lambdaopt <- c()
optinds <- c()
for (i in 1:nrow(MSFES)) {
optinds[i] <- min(which(MSFES[i, ] < sds[i] + min(MSFES[i, ])))
lambdaopt[i] <- lambda[optinds[i], i, drop = F]
}
optind = optinds
} else {
MSFES <- t(apply(MSFE, 3, colMeans))
optinds <- apply(MSFES, 1, which.min)
lambdaopt <- c()
for (i in 1:nrow(MSFES)) {
lambdaopt[i] <- lambda[optinds[i], i]
}
optind = optinds
}
} else {
optind <- max(which(colMeans(na.omit(MSFE)) == min(colMeans(na.omit(MSFE)))))
lambdaopt <- lambda[optind]
}
} else if (dual) {
if (!ONESE) {
optind <- max(which(colMeans(na.omit(MSFE)) == min(colMeans(na.omit(MSFE)))))
inds <- findind(optind, lambda[, 1], alpha)
} else {
G2 <- colMeans(na.omit(MSFE))
G3 <- sd(na.omit(MSFE))/sqrt(nrow(na.omit(MSFE)))
optind <- min(which(G2 < (min(G2) + G3)))
inds <- findind(optind, lambda[, 1], alpha)
}
lambdaopt <- lambda[inds[1], inds[2]]
lambda <- lambda[, inds[2]]
alphaopt <- alpha[inds[2]]
optind <- inds
}
}
if (!dual) {
alphaopt <- alpha
}
}
if (!exists("alphaopt")) {
alphaopt <- NULL
}
return(list(optind = optind, lambdaopt = lambdaopt, alphaopt = alphaopt))
}
create_sigma_u <- function(Y, Z, B, h) {
trainY <- Y[(h):(nrow(Y) - 1), , drop = F]
trainZ <- Z[, 1:(ncol(Z) - h)]
Sigma = 1/nrow(trainY) * sqrt(diag(tcrossprod(t(Y) - B %*% rbind(1, Z))))
cis <- c(qnorm(0.025), qnorm(0.975))
lower <- cis[1] * Sigma
upper <- cis[2] * Sigma
return(cbind(lower, upper))
}
# add C back to coefficient matrix in MN setting
adjust_mn_var <- function(beta, C) {
for (i in 1:(dim(beta)[3])) beta[, 2:dim(beta)[2], i] <- beta[, 2:dim(beta)[2], i] + C
return(beta)
}
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/R/BigVARSupportFunctions.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
VARXCons <- function(Y1, X1, k, p, m, s, oos = FALSE, contemp = FALSE) {
.Call('_BigVAR_VARXCons', PACKAGE = 'BigVAR', Y1, X1, k, p, m, s, oos, contemp)
}
ARFitVARXR <- function(K21, k, p, m, s) {
.Call('_BigVAR_ARFitVARXR', PACKAGE = 'BigVAR', K21, k, p, m, s)
}
ICX <- function(Y1, X1, k, pmax, smax, m, pen, h = 1L) {
.Call('_BigVAR_ICX', PACKAGE = 'BigVAR', Y1, X1, k, pmax, smax, m, pen, h)
}
ST1a <- function(z, gam) {
.Call('_BigVAR_ST1a', PACKAGE = 'BigVAR', z, gam)
}
ST3a <- function(z, gam) {
.Call('_BigVAR_ST3a', PACKAGE = 'BigVAR', z, gam)
}
gamloopFista <- function(beta_, Y, Z, gammgrid, eps, YMean2, ZMean2, B1, k, p, tk, k1, s, sep_lambda = FALSE) {
.Call('_BigVAR_gamloopFista', PACKAGE = 'BigVAR', beta_, Y, Z, gammgrid, eps, YMean2, ZMean2, B1, k, p, tk, k1, s, sep_lambda)
}
gamloopFistaEN <- function(beta_, Y, Z, gammgrid, alpha, eps, YMean2, ZMean2, B1, k, p, tk, k1, s, sep_lambda = FALSE) {
.Call('_BigVAR_gamloopFistaEN', PACKAGE = 'BigVAR', beta_, Y, Z, gammgrid, alpha, eps, YMean2, ZMean2, B1, k, p, tk, k1, s, sep_lambda)
}
Eigencomp <- function(Z1, groups, n1, k1) {
.Call('_BigVAR_Eigencomp', PACKAGE = 'BigVAR', Z1, groups, n1, k1)
}
EigencompOO <- function(ZZ1, groups, n1, k) {
.Call('_BigVAR_EigencompOO', PACKAGE = 'BigVAR', ZZ1, groups, n1, k)
}
GamLoopGL2 <- function(beta_, Activeset, gamm, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M2f_, eigvalF_, eigvecF_) {
.Call('_BigVAR_GamLoopGL2', PACKAGE = 'BigVAR', beta_, Activeset, gamm, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M2f_, eigvalF_, eigvecF_)
}
GamLoopGLOO <- function(beta_, Activeset, gamm, Y, Z, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M2f_, eigvalF_, eigvecF_, k1) {
.Call('_BigVAR_GamLoopGLOO', PACKAGE = 'BigVAR', beta_, Activeset, gamm, Y, Z, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M2f_, eigvalF_, eigvecF_, k1)
}
GamLoopSGLOO <- function(beta_, Activeset_, gamm, alpha, Y, Z, jj_, jjfull_, jjcomp_, eps, YMean2, ZMean2, k1, pk, M2f_, eigs_, m) {
.Call('_BigVAR_GamLoopSGLOO', PACKAGE = 'BigVAR', beta_, Activeset_, gamm, alpha, Y, Z, jj_, jjfull_, jjcomp_, eps, YMean2, ZMean2, k1, pk, M2f_, eigs_, m)
}
GamLoopSGLOODP <- function(beta_, Activeset_, gamm, alpha, Y, Z, jj_, jjfull_, jjcomp_, eps, YMean2, ZMean2, k1, pk, M2f_, eigs_, m) {
.Call('_BigVAR_GamLoopSGLOODP', PACKAGE = 'BigVAR', beta_, Activeset_, gamm, alpha, Y, Z, jj_, jjfull_, jjcomp_, eps, YMean2, ZMean2, k1, pk, M2f_, eigs_, m)
}
Fistapar <- function(Y, Z, phi, L, lambda, eps, tk, k, sep_lambda = FALSE) {
.Call('_BigVAR_Fistapar', PACKAGE = 'BigVAR', Y, Z, phi, L, lambda, eps, tk, k, sep_lambda)
}
gamloopHLAG <- function(beta_, Y, Z, gammgrid, eps, YMean2, ZMean2, B1, k, p, sep_lambda = FALSE) {
.Call('_BigVAR_gamloopHLAG', PACKAGE = 'BigVAR', beta_, Y, Z, gammgrid, eps, YMean2, ZMean2, B1, k, p, sep_lambda)
}
gamloopOO <- function(beta_, Y, Z, gammgrid, eps, YMean2, ZMean2, B1, k, p, w, groups_, sep_lambda = FALSE) {
.Call('_BigVAR_gamloopOO', PACKAGE = 'BigVAR', beta_, Y, Z, gammgrid, eps, YMean2, ZMean2, B1, k, p, w, groups_, sep_lambda)
}
FistaElem <- function(Y, Z, phi, p, k, lambda, eps, tk, sep_lambda = FALSE) {
.Call('_BigVAR_FistaElem', PACKAGE = 'BigVAR', Y, Z, phi, p, k, lambda, eps, tk, sep_lambda)
}
gamloopElem <- function(beta_, Y, Z, gammgrid, eps, YMean2, ZMean2, B1, k, p, sep_lambda = FALSE) {
.Call('_BigVAR_gamloopElem', PACKAGE = 'BigVAR', beta_, Y, Z, gammgrid, eps, YMean2, ZMean2, B1, k, p, sep_lambda)
}
powermethod <- function(A, x1) {
.Call('_BigVAR_powermethod', PACKAGE = 'BigVAR', A, x1)
}
norm2 <- function(x) {
.Call('_BigVAR_norm2', PACKAGE = 'BigVAR', x)
}
RelaxedLS <- function(K, B2) {
.Call('_BigVAR_RelaxedLS', PACKAGE = 'BigVAR', K, B2)
}
GamLoopSGLX <- function(beta_, Activeset, gamm, alpha, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M2f_, eigs, k1) {
.Call('_BigVAR_GamLoopSGLX', PACKAGE = 'BigVAR', beta_, Activeset, gamm, alpha, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M2f_, eigs, k1)
}
proxvx2 <- function(v2, L, lambda, m, k, F1) {
.Call('_BigVAR_proxvx2', PACKAGE = 'BigVAR', v2, L, lambda, m, k, F1)
}
GamLoopSGL <- function(beta_, Activeset, gamm, alpha, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M1f_, M2f_, eigs_) {
.Call('_BigVAR_GamLoopSGL', PACKAGE = 'BigVAR', beta_, Activeset, gamm, alpha, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M1f_, M2f_, eigs_)
}
GamLoopSGLDP <- function(beta_, Activeset, gamm, alpha, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M1f_, M2f_, eigs_) {
.Call('_BigVAR_GamLoopSGLDP', PACKAGE = 'BigVAR', beta_, Activeset, gamm, alpha, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M1f_, M2f_, eigs_)
}
GamLoopSGLXDP <- function(beta_, Activeset, gamm, alpha, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M2f_, eigs, k1) {
.Call('_BigVAR_GamLoopSGLXDP', PACKAGE = 'BigVAR', beta_, Activeset, gamm, alpha, Y1, Z1, jj, jjfull, jjcomp, eps, YMean2, ZMean2, k, pk, M2f_, eigs, k1)
}
mcp_loop <- function(Y, Z, B, lambda, tol, gamma, mcp = TRUE) {
.Call('_BigVAR_mcp_loop', PACKAGE = 'BigVAR', Y, Z, B, lambda, tol, gamma, mcp)
}
gamloopMCP <- function(beta_, Y, Z, lambda, eps, YMean2, ZMean2, gamma, mcp) {
.Call('_BigVAR_gamloopMCP', PACKAGE = 'BigVAR', beta_, Y, Z, lambda, eps, YMean2, ZMean2, gamma, mcp)
}
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/R/RcppExports.R
|
#' Realization of a simulated multivariate time series
#'
#' @name Y
#' @title Simulated Multivariate Time Series
#' @docType data
#' @details \eqn{100 \times 3} multivariate time series distributed according to the generator matrix \link{A}.
#' @author Will Nicholson
NULL
#' Coefficient matrix for a stationary simulated multivariate time series
#'
#' @name A
#' @title Generator for Simulated Multivariate Time Series
#' @docType data
#' @details Example generator matrix adapted from Table 3.2 of Gredenhoff and Karlsson (1997)
#' @references Gredenhoff, Mikael, and Sune Karlsson. "Lag-length selection in VAR-models using equal and unequal lag-length procedures." Computational Statistics 14.2 (1999): 171-187.
#' @author Will Nicholson
NULL
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/R/data.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- eval=FALSE--------------------------------------------------------------
# install_github("wbnicholson/BigVAR/BigVAR")
## -----------------------------------------------------------------------------
library(BigVAR)
data(Y)
## -----------------------------------------------------------------------------
# 3 x 7 coefficient matrix
B = BigVAR.fit(Y,struct='Basic',p=2,lambda=1)[,,1]
# construct 7 x 99 lag matrix of Y
Z = VARXLagCons(Y,p=2,oos=TRUE)$Z
# obtain out of sample forecasts
yhat = B%*%Z[,ncol(Z),drop=F]
## ---- echo=FALSE--------------------------------------------------------------
suppressMessages(library(BigVAR,quietly=TRUE,warn.conflicts=FALSE))
#onorm=c()
library(lattice)
#Oracle=diag(3)
#SigmaU=diag(3)
SparsityPlot <- function (B, p, k,s,m, title = NULL)
{
text <- c()
for (i in 1:p) {
text1 <- as.expression(bquote(bold(Phi)^(.(i))))
text <- append(text, text1)
}
## text <- c()
if(s>0){
for (i in (p+1):(p+s+1)) {
text1 <- as.expression(bquote(bold(beta)^(.(i-p))))
text <- append(text, text1)
}
}
f <- function(m) t(m)[, nrow(m):1]
rgb.palette <- colorRampPalette(c("white", "grey" ),space = "Lab")
## rgb.palette <- colorRampPalette(c("white", "blue"), space = "Lab")
at <- seq(k/2 + 0.5, p * (k)+ 0.5, by = k)
at2 <- seq(p*k+s/2+.5,p*k+s*m+.5,by=s)
at <- c(at,at2)
se2 = seq(1.75, by = k, length = k)
L2 <- levelplot(f(abs(B)), col.regions = rgb.palette, colorkey = NULL,
xlab = NULL, ylab = NULL, main = list(label = title,
cex = 1), panel = function(...) {
panel.levelplot(...)
panel.abline(a = NULL, b = 1, h = seq(1.5, m*s+p* k +
0.5, by = 1), v = seq(1.5, by = 1, length = p *
k+m*s))
bl1 <- seq(k + 0.5, p *
k + 0.5, by = k)
bl2 <- seq(p*k + 0.5, p *
k + 0.5+s*m, by = m)
b1 <- c(bl1,bl2)
panel.abline(a = NULL, b = 1, v = p*k+.5, lwd = 7)
panel.abline(a = NULL, b = 1, v = b1, lwd = 3)
}, scales = list(x = list(alternating = 1, labels = text,
cex = 1, at = at, tck = c(0, 0)), y = list(alternating = 0,
tck = c(0, 0))))
return(L2)
}
B1 <- matrix(rep(1,57)*rbinom(57,1,.6),nrow=3,ncol=19)
B2 <-matrix(0,nrow=3,ncol=19)
B2[,1:3] <- 1
B2[,10:12] <- 1
B2[,16] <- 1
B2[,19] <- 1
B3 <-matrix(0,nrow=3,ncol=19)
diag(B3[,1:3])<- 1
B3[,10:12] <- 1
diag(B3[,10:12])<- 0
B2[,16] <- 1
B2[,19] <- 1
B4 <-matrix(0,nrow=3,ncol=19)
B4[,1:3] <- rep(1,9)*rbinom(9,1,.6)
B4[,10:12] <- rep(1,9)*rbinom(9,1,.4)
B4[,18] <- c(1,0,1)
p=5;k=3
Lasso <- SparsityPlot(B1,p,k,m=2,s=2,title="Basic VARX-L")
Group <- SparsityPlot(B2,p,k,m=2,s=2,title="Lag VARX-L")
OO <- SparsityPlot(B3,p,k,m=2,s=2,title="Own/Other VARX-L")
Sparse <- SparsityPlot(B4,p,k,m=2,s=2,title="Sparse Lag VARX-L")
## ----echo=FALSE---------------------------------------------------------------
library(gridExtra,quietly=TRUE)
grid.arrange(Lasso,Group,OO,Sparse,ncol=2)
## ----echo=FALSE---------------------------------------------------------------
k=3;p=5
HLAGC <- matrix(0,nrow=k,ncol=k*p)
HLAGC[1,] <- 1
HLAGC[2,1:6] <- 1
HLAGC[3,1:12] <- 1
HLAGOO <- matrix(0,nrow=k,ncol=k*p)
HLAGOO[1,1:13] <- 1
HLAGOO[2,1:6] <- 1
HLAGOO[3,c(1:9,12)] <- 1
HLAGELEM <- matrix(0,nrow=k,ncol=k*p)
HLAGELEM[1,c(1:10,12,13)] <- 1
HLAGELEM[2,c(1,3,4,6,7,9,10,12,13,15)] <- 1
HLAGELEM[3,c(1:8,10:11,13:14)] <- 1
SparsityPlot <-
function (B, p, k,s,m, title = NULL)
{
text <- c()
for (i in 1:p) {
text1 <- as.expression(bquote(bold('\U03A6')^(.(i))))
text <- append(text, text1)
}
## text <- c()
if(m>0){
for (i in (p+1):(p+s+1)) {
text1 <- as.expression(bquote(bold('\U03B2')^(.(i-p))))
text <- append(text, text1)
}
}
f <- function(m) t(m)[, nrow(m):1]
rgb.palette <- colorRampPalette(c("white", "grey" ),space = "Lab")
## rgb.palette <- colorRampPalette(c("white", "blue"), space = "Lab")
at <- seq(k/2 + 0.5, p * (k)+ 0.5, by = k)
if(m>0){
at2 <- seq(p*k+m/2+.5,p*k+s*m+.5,by=m)}else{at2=c()}
at <- c(at,at2)
se2 = seq(1.75, by = k, length = k)
L2 <- levelplot(f(abs(B)), col.regions = rgb.palette, colorkey = NULL,
xlab = NULL, ylab = NULL, main = list(label = title,
cex = 1), panel = function(...) {
panel.levelplot(...)
panel.abline(a = NULL, b = 1, h = seq(1.5, m*s+p* k +
0.5, by = 1), v = seq(1.5, by = 1, length = p *
k+m*s))
bl1 <- seq(k + 0.5, p *
k + 0.5, by = k)
if(m>0){
bl2 <- seq(p*k + 0.5, p *
k + 0.5+s*m, by = m)}else(bl2=c())
b1 <- c(bl1,bl2)
panel.abline(a = NULL, b = 1, v = p*k+.5, lwd = 3)
panel.abline(a = NULL, b = 1, v = b1, lwd = 3)
}, scales = list(x = list(alternating = 1, labels = text,
cex = 1, at = at, tck = c(0, 0)), y = list(alternating = 0,
tck = c(0, 0))))
return(L2)
}
set.seed(1986)
B5 <-matrix(0,nrow=3,ncol=15)
B5[,1:3] <- rep(1,9)*rbinom(9,1,.85)
B5[,4:6] <- rep(1,9)*rbinom(9,1,.65)
B5[,7:9] <- rep(1,9)*rbinom(9,1,.45)
B5[,10:12] <- rep(1,9)*rbinom(9,1,.25)
B5[,13:15] <- rep(1,9)*rbinom(9,1,.05)
HV4 <- SparsityPlot(B5,5,k,0,0,title='Lag-Weighted Lasso')
HVC=SparsityPlot(HLAGC,p,k,0,0,title="Componentwise HLAG")
HVOO=SparsityPlot(HLAGOO,p,k,0,0,title="Own/Other HLAG")
HVELEM=SparsityPlot(HLAGELEM,p,k,0,0,title="Elementwise HLAG")
grid.arrange(HVC,HVOO,HVELEM,HV4,ncol=2)
## -----------------------------------------------------------------------------
data(Y)
# Create a Basic VAR-L (Lasso Penalty) with maximum lag order p=4, 10 grid points with lambda optimized according to rolling validation of 1-step ahead MSFE
mod1<-constructModel(Y,p=4,"Basic",gran=c(150,10),h=1,cv="Rolling",verbose=FALSE,IC=TRUE,model.controls=list(intercept=TRUE))
## -----------------------------------------------------------------------------
results=cv.BigVAR(mod1)
results
## -----------------------------------------------------------------------------
str(results)
## -----------------------------------------------------------------------------
plot(results)
## -----------------------------------------------------------------------------
mod2<-constructModel(Y,p=4,"Basic",gran=c(5,10),h=1,cv="Rolling",verbose=FALSE,IC=FALSE)
res2=cv.BigVAR(mod2)
plot(res2)
## -----------------------------------------------------------------------------
mod3<-constructModel(Y,p=4,"Basic",gran=c(500,10),h=1,cv="Rolling",verbose=FALSE,IC=FALSE)
res3=cv.BigVAR(mod3)
plot(res3)
## -----------------------------------------------------------------------------
SparsityPlot.BigVAR.results(results)
## -----------------------------------------------------------------------------
predict(results,n.ahead=1)
## -----------------------------------------------------------------------------
predict(results,n.ahead=1, confint=TRUE)
## -----------------------------------------------------------------------------
coef(results)
## ----echo=TRUE----------------------------------------------------------------
data(Y) # simulated multivariate time series
# coefficient matrix used to generate Y
data(Generator)
# note that coefficients with a darker shade are larger in magnitude
SparsityPlot(A[1:3,],p=4,3,s=0,m=0,title="Sparsity Structure of Generator Matrix")
## -----------------------------------------------------------------------------
# fit a Basic VARX-L with k=2,m=1,s=2,p=4,lambda=.01
VARX=list(k=2,s=2)
#returns k x (kp+ms+1) coefficient matrix
model=BigVAR.fit(Y,p,"Basic",lambda=1e-2,VARX=VARX,intercept=TRUE)
model
## -----------------------------------------------------------------------------
# N-fold cross validation for VAR
# Y: data
# nfolds: number of cross validation folds
# struct: penalty structure
# p: lag order
# nlambdas: number of lambdas:
# gran1: depth of lambda grid
# seed: set to make it reproducible
NFoldcv <- function(Y,nfolds,struct,p,nlambdas,gran1,seed)
{
MSFE <- matrix(0,nrow=nrow(Y),ncol=10)
A <- constructModel(Y,p,struct=struct,gran=c(gran1,nlambdas),verbose=F)
# construct lag matrix
Z1 <- VARXLagCons(Y,X=NULL,s=0,p=p,0,0)
trainZ <- Z1$Z[2:nrow(Z1$Z),]
trainY <- matrix(Y[(p+1):nrow(Y),],ncol=ncol(Y))
set.seed(seed)
inds <- sample(nrow(trainY))
B <- BigVAR.est(A)
lambda.grid <- B$lambda
folds <- cut(inds,breaks=nfolds,labels=FALSE)
MSFE <- matrix(0,nrow=nfolds,ncol=nlambdas)
for(i in 1:nfolds){
test <- trainY[which(folds==i),]
train <- trainY[which(folds!=i),]
testZ <-t(t(trainZ)[which(folds!=i),])
B=BigVAR.fit(train,p=p,lambda=lambda.grid,struct='Basic')
#iterate over lambdas
for(j in 1:nlambdas){
MSFETemp <- c()
for(k in 1:nrow(test)) {
tempZ <- testZ[,k,drop=FALSE]
bhat <- matrix(B[,2:dim(B)[2],j],nrow=ncol(Y),ncol=(p*ncol(Y)))
preds <- B[,1,j]+bhat%*%tempZ
MSFETemp <- c(MSFETemp,sum(abs(test[k,]-preds))^2)
}
MSFE[i,j] <- mean(MSFETemp)
}
}
return(list(MSFE=MSFE,lambdas=lambda.grid))
}
# 10 fold cv
MSFEs<-NFoldcv(Y,nfolds=10,"Basic",p=5,nlambdas=10,gran1=50,seed=2000)
# choose smaller lambda in case of ties (prevents extremely sparse solutions)
opt=MSFEs$lambda[max(which(colMeans(MSFEs$MSFE)==min(colMeans(MSFEs$MSFE))))]
opt
## -----------------------------------------------------------------------------
data(Y)
p <- 4
T1 <- floor(nrow(Y))/3
T2 <- floor(2*nrow(Y))/3
#Matrix of zeros for X
X <- matrix(0,nrow=nrow(Y),ncol=ncol(Y))
BICMSFE <- VARXForecastEval(Y,X,p,0,T1,T2,"BIC",1)
## -----------------------------------------------------------------------------
mod <- VARXFit(Y,3,NULL,NULL)
pred <-PredictVARX(mod)
pred
## -----------------------------------------------------------------------------
library(MASS)
k=3;p=6
B=matrix(0,nrow=k,ncol=p*k)
A1<- matrix(c(.4,-.07,.08,-.06,-.7,.07,-.08,.07,-.4),ncol=3,nrow=3)
A2 <- matrix(c(-.6,0,0,0,-.4,0,0,0,.5),ncol=3,nrow=3)
B[,1:k]=A1
B[,(5*k+1):(6*k)]=A2
A <- VarptoVar1MC(B,p,k)
set.seed(2000)
Y <-MultVarSim(k,A,p,.005*diag(k),500)
SparsityPlot(B,p,k,0,0, title='Sparsity Plot of VAR Coefficient Matrix')
## ----cache=TRUE---------------------------------------------------------------
library(MCS)
# train on first 250 observations
YTrain=Y[1:250,]
Loss <- c()
T1=1*floor(nrow(YTrain)/3);T2=2*floor(nrow(YTrain)/3)
p=8
structures<-c("Basic","BasicEN","Lag","SparseLag","OwnOther","HLAGC","HLAGOO","HLAGELEM","MCP","SCAD")
for(i in structures){
# construct BigVAR object; we will perform a dual grid search for the sparse lag and sparse own/other models
if(i%in%c("SparseLag","SparseOO")){
alpha=seq(0,1,length=10)
}else{
alpha=0
}
A<- constructModel(YTrain,p=p,struct=i,gran=c(100,10),T1=T1,T2=T2,verbose=FALSE,model.controls=list(intercept=FALSE,alpha=alpha))
# perform rolling cv
res<- cv.BigVAR(A)
# save out of sample loss for each structure
Loss <- cbind(Loss,res@OOSMSFE)
}
# construct AIC and BIC benchmarks
BIC <- VARXForecastEval(YTrain,matrix(0,nrow=nrow(YTrain)),p,0,T2,nrow(YTrain),"BIC",1)$MSFE
AIC <- VARXForecastEval(YTrain,matrix(0,nrow=nrow(YTrain)),p,0,T2,nrow(YTrain),"AIC",1)$MSFE
Loss <- as.data.frame(Loss)
names(Loss) <- structures
Loss <- cbind(Loss,BIC,AIC)
names(Loss)[(ncol(Loss)-1):ncol(Loss)] <- c("BIC","AIC")
names(Loss) <- paste0(names(Loss),"L")
mcs.test <- MCSprocedure(as.matrix(Loss),verbose=FALSE)
mcs.test
## -----------------------------------------------------------------------------
suppressMessages(library(expm))
# Phi k x kp coefficient matrix
# sigma kxk residual covariance matrix
# n number of time steps to run IRF
# p lag order
# k number of series
# Y0: k dimensional vector reflecting initialization of the IRF
generateIRF <- function(Phi,Sigma,n,k,p,Y0)
{
if(p>1){
A <-VarptoVar1MC(Phi,p,k)
}else{
A <- Phi
}
J <- matrix(0,nrow=k,ncol=k*p)
diag(J) <- 1
P <- t(chol(Sigma))
IRF <- matrix(0,nrow=k,ncol=n+1)
for(i in 0:n)
{
phi1 <- J%*%(A%^%i)%*%t(J)
theta20 <- phi1%*%P
IRF[,i+1] <- (theta20%*%Y0)
}
return(IRF)
}
## ---- echo=TRUE,eval=TRUE,cache=TRUE------------------------------------------
require(quantmod)
require(zoo)
# get GDP, Federal Funds Rate, CPI from FRED
#Gross Domestic Product (Relative to 2000)
getSymbols('GDP',src='FRED',type='xts')
GDP<- aggregate(GDP,as.yearqtr,mean)
GDP <- GDP/mean(GDP["2000"])*100
# Transformation Code: First Difference of Logged Variables
GDP <- diff(log(GDP))
index(GDP) <- as.yearqtr(index(GDP))
# Federal Funds Rate
getSymbols('FEDFUNDS',src='FRED',type='xts')
FFR <- aggregate(FEDFUNDS,as.yearqtr,mean)
# Transformation Code: First Difference
FFR <- diff(FFR)
# CPI ALL URBAN CONSUMERS, relative to 1983
getSymbols('CPIAUCSL',src='FRED',type='xts')
CPI <- aggregate(CPIAUCSL,as.yearqtr,mean)
CPI <- CPI/mean(CPI['1983'])*100
# Transformation code: difference of logged variables
CPI <- diff(log(CPI))
# Seasonally Adjusted M1
getSymbols('M1SL',src='FRED',type='xts')
M1<- aggregate(M1SL,as.yearqtr,mean)
# Transformation code, difference of logged variables
M1 <- diff(log(M1))
# combine series
Y <- cbind(CPI,FFR,GDP,M1)
names(Y) <- c("CPI","FFR","GDP","M1")
Y <- na.omit(Y)
k=ncol(Y)
T <- nrow(Y)
# start/end of rolling validation
T1 <- which(index(Y)=="1985 Q1")
T2 <- which(index(Y)=="2005 Q1")
#Demean
Y <- Y - (c(rep(1, nrow(Y))))%*%t(c(apply(Y[1:T1,], 2, mean)))
#Standarize Variance
for (i in 1:k) {
Y[, i] <- Y[, i]/apply(Y[1:T1,], 2, sd)[i]
}
library(expm)
# Fit an Elementwise HLAG model
Model1=constructModel(as.matrix(Y),p=4,struct="HLAGELEM",gran=c(25,10),verbose=FALSE,VARX=list(),T1=T1,T2=T2)
Model1Results=cv.BigVAR(Model1)
# generate IRF for 10 quarters following a 1 percent increase in the federal funds rate
IRFS <- generateIRF(Phi=Model1Results@betaPred[,2:ncol(Model1Results@betaPred)],Sigma=cov(Model1Results@resids),n=10,k=ncol(Y),p=4,Y0=c(0,.01,0,0))
IRFS <- generateIRF(Model1Results@betaPred[,2:ncol(Model1Results@betaPred)],cov(Model1Results@resids),10,4,4,c(0,.01,0,0))
## ----functions, include=FALSE, echo=FALSE-------------------------------------
# A function for captioning and referencing images
fig <- local({
i <- 0
ref <- list()
list(
cap=function(refName, text) {
i <<- i + 1
ref[[refName]] <<- i
paste("Figure ", i, ": ", text, sep="")
},
ref=function(refName) {
ref[[refName]]
})
})
## ----irf_plot,fig.cap='\\label{fig:irf_plot} Generated Impulse Reponses', echo=FALSE----
# Plot IRFs
par(mfrow=c(2,2))
for(i in 1:4)
{
plot(IRFS[i,],type='l',main=names(Y)[i],ylab="",xlab="")
}
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/inst/doc/BigVAR.R
|
---
title: "BigVAR: Tools for Modeling Sparse Vector Autoregressions with Exogenous Variables"
author:
- Will Nicholson
- Jacob Bien
- David Matteson
- Ines Wilms
date: "`r format(Sys.time(), '%B %d, %Y')`"
bibliography: additional_material/bigvar_references.bib
link-citations: true
output:
html_document:
toc: true
toc_depth: 3
fig_caption: yes
vignette: >
%\VignetteIndexEntry{BigVAR: Tools for Modeling Sparse Vector Autoregressions with Exogenous Variables}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "all" } }
});
</script>
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
# Overview
${\tt BigVAR}$ is the companion R package to the papers "**VARX-L: Structured Regularization for Large Vector Autoregression with Exogenous Variables**" (Joint with [David Matteson](https://davidsmatteson.com/) and [Jacob Bien](http://faculty.marshall.usc.edu/Jacob-Bien/)) and "**High Dimensional Forecasting via Interpretable Vector Autoregression (HLag)**" (Joint with [Ines Wilms](https://sites.google.com/view/iwilms), David Matteson, and Jacob Bien).
${\tt BigVAR}$ allows for the simultaneous estimation and forecasting of high-dimensional time series by applying structured penalties to the standard vector autoregression (VAR) and vector autoregression with exogenous variables (VARX) frameworks. This is useful in many applications which make use of time-dependent data, such as macroeconomics, finance, and internet traffic, as the conventional VAR and VARX are heavily overparameterized. In addition, as stated in @ghysels, VARs with "large enough" lag order can adequately approximate VARMA models.
Our package adapts solution methods from the regularization literature to a multivariate time series setting, allowing for the computationally efficient estimation of high-dimensional VAR and VARX models.
We also allow for least squares refitting based on the nonzero support selected by our procedures as well as the ability to incorporate mild non-stationarity by shrinking toward a vector random walk. For more information on these extensions, we refer you to our papers @Nicholson and @Nicholson2.
This vignette presents a brief formal overview of our notation, the models contained in ${\tt BigVAR}$, and the functionality of the package. For an interactive tutorial see [the shiny app](http://bigvar.ddns.net:3838/BigVAR/). Any questions or feature requests with regard to ${\tt BigVAR}$ can be addressed to [email protected]. If you have basic questions about VAR or multivariate time series in general, we recommend consulting @lutk.
## Roadmap
[Notation and Methodology](#notation) provides an overview of the VARX model as well as the ${\tt BigVAR}$ framework. Our penalty structures are described in [VARX-L](#varxl) and [HLAG](#hlag). Empirical penalty parameter selection procedures are discussed in [Penalty Parameter Selection](#penparam) and [N-fold cross validation](#nfold). Package specific syntax is detailed in [BigVAR Details](#package_specifics). Finally, example applications and extensions of ${\tt BigVAR}$ are provided in [Selecting a Structure](#structure) and [Impulse Response Functions](#irf).
## Installation {#install}
The stable version of ${\tt BigVAR}$ is available on [cran](https://CRAN.R-project.org/package=BigVAR). The developmental release can be installed from github using the following command:
```{r, eval=FALSE}
install_github("wbnicholson/BigVAR/BigVAR")
```
## Quick Start
In this section, we provide a basic overview of the capabilities of ${\tt BigVAR}$. Further sections will provide elaboration as to the full functionality of ${\tt BigVAR}$.
$\mathbf{Y}$, a simulated multivariate time series of dimension $100\times 3$ is included with ${\tt BigVAR}$ and is used throughout this vignette (details as to its construction are provided in [Example Data](#data)). It can be accessed by calling:
```{r}
library(BigVAR)
data(Y)
```
In order to forecast $\hat{y}_{t+1}$ using a vector autoregression with a lasso penalty $\lambda=1$ and maximum lag order of 2, one can simply run
```{r}
# 3 x 7 coefficient matrix
B = BigVAR.fit(Y,struct='Basic',p=2,lambda=1)[,,1]
# construct 7 x 99 lag matrix of Y
Z = VARXLagCons(Y,p=2,oos=TRUE)$Z
# obtain out of sample forecasts
yhat = B%*%Z[,ncol(Z),drop=F]
```
Some potential use cases of ${\tt BigVAR.fit}$ are elaborated upon in [Extensions](#extensions). More sophisticated analysis requires the construction of an object of class ${\tt BigVAR}$ as described in [constructModel](#constructModel).
## Notation and Methodology {#notation}
Let $\{ \mathbf{y_t}\}_{t = 1}^T$ denote a $k$ dimensional vector time series and $\{\mathbf{x}_t\}_{t=1}^{T}$ denote an $m$-dimensional unmodeled \emph{exogenous} series. A vector autoregression with exogenous variables of order (p,s) , VARX$_{k,m}$($p,s$), can be expressed as
$$
\begin{align}
\label{VAR1}
\mathbf{y}_t=\mathbf{\nu}+\sum_{\ell=1}^p\mathbf{\Phi}^{(\ell)}\mathbf{y}_{t-\ell}+\sum_{j=1}^s \mathbf{\beta}^{(j)}\mathbf{x}_{t-j}+\mathbf{u}_t \; \text{ for } \;t=1,\ldots,T,
\end{align}
$$
in which $\mathbf{\nu}$ denotes a $k\times 1$ intercept vector, each $\mathbf{\Phi}^{(\ell)}$ represents a $k\times k$ endogenous (modeled) coefficient matrix, each $\mathbf{\beta}^{(j)}$ represents a $k\times m$ exogenous (unmodeled) coefficient matrix, and $\mathbf{u}_t\stackrel{\text{wn}}{\sim}(\mathbf{0},\mathbf{\Sigma}_u)$. Note that the VAR is a special case of Equation $\ref{VAR1}$ in which the second summation ($\sum_{j=1}^s \mathbf{\beta}^{(j)}\mathbf{x}_{t-j}$) is not included.
```{r, echo=FALSE}
suppressMessages(library(BigVAR,quietly=TRUE,warn.conflicts=FALSE))
#onorm=c()
library(lattice)
#Oracle=diag(3)
#SigmaU=diag(3)
SparsityPlot <- function (B, p, k,s,m, title = NULL)
{
text <- c()
for (i in 1:p) {
text1 <- as.expression(bquote(bold(Phi)^(.(i))))
text <- append(text, text1)
}
## text <- c()
if(s>0){
for (i in (p+1):(p+s+1)) {
text1 <- as.expression(bquote(bold(beta)^(.(i-p))))
text <- append(text, text1)
}
}
f <- function(m) t(m)[, nrow(m):1]
rgb.palette <- colorRampPalette(c("white", "grey" ),space = "Lab")
## rgb.palette <- colorRampPalette(c("white", "blue"), space = "Lab")
at <- seq(k/2 + 0.5, p * (k)+ 0.5, by = k)
at2 <- seq(p*k+s/2+.5,p*k+s*m+.5,by=s)
at <- c(at,at2)
se2 = seq(1.75, by = k, length = k)
L2 <- levelplot(f(abs(B)), col.regions = rgb.palette, colorkey = NULL,
xlab = NULL, ylab = NULL, main = list(label = title,
cex = 1), panel = function(...) {
panel.levelplot(...)
panel.abline(a = NULL, b = 1, h = seq(1.5, m*s+p* k +
0.5, by = 1), v = seq(1.5, by = 1, length = p *
k+m*s))
bl1 <- seq(k + 0.5, p *
k + 0.5, by = k)
bl2 <- seq(p*k + 0.5, p *
k + 0.5+s*m, by = m)
b1 <- c(bl1,bl2)
panel.abline(a = NULL, b = 1, v = p*k+.5, lwd = 7)
panel.abline(a = NULL, b = 1, v = b1, lwd = 3)
}, scales = list(x = list(alternating = 1, labels = text,
cex = 1, at = at, tck = c(0, 0)), y = list(alternating = 0,
tck = c(0, 0))))
return(L2)
}
B1 <- matrix(rep(1,57)*rbinom(57,1,.6),nrow=3,ncol=19)
B2 <-matrix(0,nrow=3,ncol=19)
B2[,1:3] <- 1
B2[,10:12] <- 1
B2[,16] <- 1
B2[,19] <- 1
B3 <-matrix(0,nrow=3,ncol=19)
diag(B3[,1:3])<- 1
B3[,10:12] <- 1
diag(B3[,10:12])<- 0
B2[,16] <- 1
B2[,19] <- 1
B4 <-matrix(0,nrow=3,ncol=19)
B4[,1:3] <- rep(1,9)*rbinom(9,1,.6)
B4[,10:12] <- rep(1,9)*rbinom(9,1,.4)
B4[,18] <- c(1,0,1)
p=5;k=3
Lasso <- SparsityPlot(B1,p,k,m=2,s=2,title="Basic VARX-L")
Group <- SparsityPlot(B2,p,k,m=2,s=2,title="Lag VARX-L")
OO <- SparsityPlot(B3,p,k,m=2,s=2,title="Own/Other VARX-L")
Sparse <- SparsityPlot(B4,p,k,m=2,s=2,title="Sparse Lag VARX-L")
```
### Example Sparsity Patterns
```{r,echo=FALSE}
library(gridExtra,quietly=TRUE)
grid.arrange(Lasso,Group,OO,Sparse,ncol=2)
```
1. The Basic VARX-L will zero individual elements in $[\mathbf{\Phi},\mathbf{\beta}]$
2. The Lag VARX-L assigns a group lasso penalty by lag coefficient matrix $\mathbf{\Phi}$. Each column in the exogenous coefficient matrices $\mathbf{\beta}$ is assigned to its own group.
3. The Own/Other VARX-L assigns separate group lasso penalties to _own_ _lags_ ($\text{diag}(\mathbf{\Phi}^{(\ell)})$ and _other_ _lags_ $(\mathbf{\Phi}^{(\ell)^{-}})$. It applies the same penalty as the Lag VARX-L to the exogenous structure.
4. The Sparse Lag and Sparse Own/Other VARX-L extend the aforementioned structures to allow for within-group sparsity.
We recently added an _elastic net_ penalty (BasicEN) which combines the Basic VARX-L with a ridge penalty.
### VARX-L {#varxl}
$\tt BigVAR$ can be used to apply the following penalties to the VARX (Equation $\ref{VAR1}$):
|Model Structure|BigVAR Name| Penalty | Solution Algorithm |
|---------------|:------------------:|-------------------:|-------------------:|
| Basic |$\tt{Basic}$| $\lambda\|[\mathbf{\Phi},\mathbf{\beta}]\|_1$ | Coordinate Descent|
| Basic-Elastic Net |$\tt{BasicEN}$| $\lambda\big(\alpha\|[\mathbf{\Phi},\mathbf{\beta}]\|_1+(1-\alpha)\|[\mathbf{\Phi},\mathbf{\beta}]\|_2^2\big)$| Coordinate Descent|
| Lag VARX-L| $\tt{Lag}$| $\lambda\big(\sum_{\ell=1}^p\|\mathbf{\Phi}_\ell\|_F+\sqrt{k}\sum_{j=1}^s\sum_{i=1}^m\|\beta_{\cdot,i}^{(j)}\|_F \big)$ |Block Coordinate Descent |
| Own/Other VARX-L|$\tt{OwnOther}$|$\lambda(\rho_1\sum_{\ell=1}^p \|\text{diag}(\mathbf{\Phi}^{(\ell)})\|_F+\gamma_1\sum_{\ell=1}^p\|\mathbf{\Phi}^{(\ell^{-})}\|_F+\sqrt{k}\sum_{j=1}^s\sum_{i=1}^m\|\beta_{\cdot,i}^{(j)}\|_F)$|Block Coordinate Descent |
|Sparse Lag VARX-L|$\tt{SparseLag}$|$(1-\alpha)\sqrt{k^2}\sum_{\ell=1}^p\|\mathbf{\Phi}^{(\ell)}\|_F+\alpha\|\mathbf{\Phi}\|_1 (1-\alpha)\sqrt{k}\sum_{j=1}^s\sum_{i=1}^m\|\beta_{\cdot,i}^{(j)}\|_F +\alpha\|\beta\|_1$|Generalized Gradient Descent|
|Own/Other Sparse VARX-L|$\tt{SparseOwnOther}$|$\lambda(1-\alpha)[\rho_1\sum_{\ell=1}^p \|\text{diag}(\mathbf{\Phi}^{(\ell)})\|_F+\gamma_1\sum_{\ell=1}^p\|\mathbf{\Phi}^{(\ell)^{-}}\|_F]+\alpha\lambda\|\mathbf{\Phi}\|{_1}$|Generalized Gradient Descent|
$\lambda>0$ is a penalty parameter that can be selected via a validation procedure or chosen by the user; larger values of $\lambda$ encourage a greater degree of sparsity. $0\leq \alpha\leq 1$ is an additional penalty parameter set by default to $\frac{1}{k+1}$ to control within-group sparsity in the sparse group setting or the trade-off between the ridge and lasso penalty in the elastic net setting. We allow for $\alpha$ to be estimated empirically with the option ${\tt dual=TRUE}$ in the function ${\tt constructModel}$. $\rho_1=\sqrt{k}$ and $\gamma_1=\sqrt{k(k-1)}$ are fixed weights accounting for the cardinality of each group.
### HLAG {#hlag}
```{r,echo=FALSE}
k=3;p=5
HLAGC <- matrix(0,nrow=k,ncol=k*p)
HLAGC[1,] <- 1
HLAGC[2,1:6] <- 1
HLAGC[3,1:12] <- 1
HLAGOO <- matrix(0,nrow=k,ncol=k*p)
HLAGOO[1,1:13] <- 1
HLAGOO[2,1:6] <- 1
HLAGOO[3,c(1:9,12)] <- 1
HLAGELEM <- matrix(0,nrow=k,ncol=k*p)
HLAGELEM[1,c(1:10,12,13)] <- 1
HLAGELEM[2,c(1,3,4,6,7,9,10,12,13,15)] <- 1
HLAGELEM[3,c(1:8,10:11,13:14)] <- 1
SparsityPlot <-
function (B, p, k,s,m, title = NULL)
{
text <- c()
for (i in 1:p) {
text1 <- as.expression(bquote(bold('\U03A6')^(.(i))))
text <- append(text, text1)
}
## text <- c()
if(m>0){
for (i in (p+1):(p+s+1)) {
text1 <- as.expression(bquote(bold('\U03B2')^(.(i-p))))
text <- append(text, text1)
}
}
f <- function(m) t(m)[, nrow(m):1]
rgb.palette <- colorRampPalette(c("white", "grey" ),space = "Lab")
## rgb.palette <- colorRampPalette(c("white", "blue"), space = "Lab")
at <- seq(k/2 + 0.5, p * (k)+ 0.5, by = k)
if(m>0){
at2 <- seq(p*k+m/2+.5,p*k+s*m+.5,by=m)}else{at2=c()}
at <- c(at,at2)
se2 = seq(1.75, by = k, length = k)
L2 <- levelplot(f(abs(B)), col.regions = rgb.palette, colorkey = NULL,
xlab = NULL, ylab = NULL, main = list(label = title,
cex = 1), panel = function(...) {
panel.levelplot(...)
panel.abline(a = NULL, b = 1, h = seq(1.5, m*s+p* k +
0.5, by = 1), v = seq(1.5, by = 1, length = p *
k+m*s))
bl1 <- seq(k + 0.5, p *
k + 0.5, by = k)
if(m>0){
bl2 <- seq(p*k + 0.5, p *
k + 0.5+s*m, by = m)}else(bl2=c())
b1 <- c(bl1,bl2)
panel.abline(a = NULL, b = 1, v = p*k+.5, lwd = 3)
panel.abline(a = NULL, b = 1, v = b1, lwd = 3)
}, scales = list(x = list(alternating = 1, labels = text,
cex = 1, at = at, tck = c(0, 0)), y = list(alternating = 0,
tck = c(0, 0))))
return(L2)
}
set.seed(1986)
B5 <-matrix(0,nrow=3,ncol=15)
B5[,1:3] <- rep(1,9)*rbinom(9,1,.85)
B5[,4:6] <- rep(1,9)*rbinom(9,1,.65)
B5[,7:9] <- rep(1,9)*rbinom(9,1,.45)
B5[,10:12] <- rep(1,9)*rbinom(9,1,.25)
B5[,13:15] <- rep(1,9)*rbinom(9,1,.05)
HV4 <- SparsityPlot(B5,5,k,0,0,title='Lag-Weighted Lasso')
HVC=SparsityPlot(HLAGC,p,k,0,0,title="Componentwise HLAG")
HVOO=SparsityPlot(HLAGOO,p,k,0,0,title="Own/Other HLAG")
HVELEM=SparsityPlot(HLAGELEM,p,k,0,0,title="Elementwise HLAG")
grid.arrange(HVC,HVOO,HVELEM,HV4,ncol=2)
```
|Model Structure|BigVAR Name| Penalty |
|---------------|:------------------:|-------------------:|
| Componentwise| $\tt{HLAGC}$ | $\sum_{i=1}^k\sum_{\ell=1}^p\|\mathbf{\Phi}_i^{(\ell:p)}\|_2.$ |
| Own/Other | $\tt{HLAGOO}$ | $\sum_{i=1}^k\sum_{\ell=1}^p\left[\|\mathbf{\Phi}_{i}^{(\ell:p)}\|_2+\|(\mathbf{\Phi}_{i,-i}^{(\ell)}, \mathbf{\Phi}_{i}^{([\ell+1]:p)})\|_2\right]$. |
|Elementwise|$\tt{HLAGELEM}$| $\sum_{i=1}^k\sum_{j=1}^k\sum_{\ell=1}^p\|\mathbf{\Phi}_{ij}^{(\ell:p)}\|_2$. |
Lag-weighted Lasso|$\tt{Tapered}$| $\sum_{\ell=1}^p\ell^{\gamma}\|\mathbf{\Phi}^{(\ell)}\|_1$.|
We additionally incorporate several VAR-specific penalties that directly address lag order selection. In addition to returning sparse solutions, our $\text{HLAG}_k(p)$ procedures induce regularization toward models with low maximum lag order. To allow for greater flexibility, instead of imposing a single, universal lag order (as information criterion minimization based approaches tend to do), we allow it to vary across marginal models (i.e. the rows of the coefficient matrix $\mathbf{\Phi}=[\mathbf{\Phi}^{(1)},\dots,\mathbf{\Phi}^{(p)}]$).
$\tt{BigVAR}$ includes three HLAG models as well as the _Lag-weighted Lasso_, which incorporates a lasso penalty that increases geometrically as the lag order increases. This penalty does not directly address lag order but it encourages a greater degree of sparsity at more distant lags (as controlled by the additional penalty parameter $\gamma \in (0,1)$.
The componentwise HLAG embeds a conventional lag order selection penalty into the hierarchical group lasso; the maximum lag order can vary across series, but within a series all components have the same maximum lag. The *Own/Other* HLAG adds another layer of lag order: within a lag a series *own* lags will be prioritized over *other* lags. Finally, the Elementwise HLAG allows for the most flexibility, allowing each series in each marginal model to have its own maximum lag order resulting in $k^2$ possible lag orders.
#### Additional Penalties
In addition to the HLAG and VARX-L frameworks we also include two non-convex penalties: Smoothly Clipped Absolute Deviation (SCAD) and Minimax Concave Penalty (MCP). These penalties serve to obviate the bias of lasso-type penalties, which tend to "over-regularize", by decreasing the amount of penalization as the magnitude of the coefficient decreases. Though these penalties are not convex, the coordinate descent algorithm developed by @breheny fits seamlessly into the ${\tt BigVAR}$ framework. Along with SCAD and MCP, we also incorporate the Bayesian VAR developed by @bgr into the BigVAR framework under the name ${\tt BGR}$.
<!-- |Model Structure|BigVAR Name| Penalty | -->
<!-- |---------------|:------------------:|-------------------:| -->
<!-- | Smoothly Clipped Absolute Deviation| $\tt{SCAD}$ | $$ | -->
<!-- | Minimax Concave Penalty | $\tt{MCP}$ | $\sum_{i=1}^k\sum_{\ell=1}^p\left[\|\mathbf{\Phi}_{i}^{(\ell:p)}\|_2+\|(\mathbf{\Phi}_{i,-i}^{(\ell)}, \mathbf{\Phi}_{i}^{([\ell+1]:p)})\|_2\right]$. | -->
# BigVAR Details {#package_specifics}
Our package allows for the straightforward estimation of the aforementioned VARX-L and HLAG procedures.
## Input Arguments {#constructModel}
The end-user completely specifies their model by constructing an object of class ${\tt BigVAR}$.
To construct an object of class ${\tt BigVAR}$, simply run the command ${\tt constructModel}$:
```{r}
data(Y)
# Create a Basic VAR-L (Lasso Penalty) with maximum lag order p=4, 10 grid points with lambda optimized according to rolling validation of 1-step ahead MSFE
mod1<-constructModel(Y,p=4,"Basic",gran=c(150,10),h=1,cv="Rolling",verbose=FALSE,IC=TRUE,model.controls=list(intercept=TRUE))
```
The first four arguments below are required; the rest only need to be specified if expanded or non-standard functionality is requested.
The input arguments are:
1. ${\tt Y}$: Multivariate time series of the form $T\times k$
2. ${\tt p}$: Maximal lag order
3. ${\tt struct}$: Structure of Penalty. Choices are
1. ${\tt Basic}$: Lasso Penalty
2. ${\tt BasicEN}$: Lasso with Elastic Net
3. ${\tt Lag}$: Lag Penalty
4. ${\tt OwnOther}$: Own/Other Penalty
5. ${\tt SparseLag}$: Sparse Lag Penalty
6. ${\tt SparseOwnOther}$: Sparse Own/Other Lag Penalty
7. ${\tt SCAD}$: Smoothly Clipped Absolute Deviation
8. ${\tt MCP}$: Minimax Concave Penalty
9. ${\tt EFX}$: Endogenous-First VARX
10. ${\tt HLAGC}$: Componentwise HLAG
11. ${\tt HLAGOO}$: Own/Other HLAG
12. ${\tt HLAGELEM}$: Elementwise HLAG
13. ${\tt Tapered}$: Lasso with Lag Penalty
14. ${\tt BGR}$: Bayesian VAR as detailed in @bgr.
The first 7 can be applied to VAR and VARX models, EFX can only be applied toward VARX models, the remaining 5 are only applicable to VAR models.
4. ${\tt gran}$: two options for the grid of penalty parameters $\lambda$.
The first option controls the depth of the lambda grid (a good default option is 50). The second option controls the number of grid values (a good default is 10). If your grid does not go deep enough, your forecasts results may be suboptimal, but if it is too deep, the routine may take a substantial amount of time to run. The index of the optimal penalty parameter is monitored by ${\tt cv.BigVAR}$. If it is on the border of the grid, it is recommended to re-run the function with a larger granularity parameter. If you set the option ${\tt ownlambdas}$ to ${\tt TRUE}$, ${\tt gran}$ is used to supply a user defined grid of lambdas. For more details on the granularity parameter, see [Diagnostics](#diag).
5. ${\tt h}$: Forecast horizon in which to optimize (default 1).
6. ${\tt verbose}$: Logical, if ${\tt TRUE}$, will display a progress bar for the validation and evaluation procedures.
7. ${\tt IC}:$ Logical, if ${\tt TRUE}$, will return AIC and BIC VAR(X) benchmarks.
8. ${\tt VARX}:$ VARX characteristics in a list form. The list must contain two arguments:
1. k: number of modeled series.
2. s: maximum lag order for unmodeled series.
3. contemp: (Optional) Indicator for contemporaneous unmodeled series.
9. ${\tt window.size}$: Size of window for rolling cv. (Default is 0; resulting in an expanding window).
10. ${\tt T1}$ Start of rolling cv period (default $\lfloor \frac{T}{3} \rfloor$)
11. ${\tt T2}$: End of rolling cv period (default $\lfloor \frac{2T}{3} \rfloor$)
12. ${\tt cv}:$ Type of validation (as described in [Penalty Parameter Selection](#penparam)) used to select the penalty parameter (options are "rolling" (default) and "LOO," a pseudo "leave one out" cv procedure that respects time dependence).
13. ${\tt ownlambdas}:$ Logical, Indicator as to whether the user supplied a penalty grid in slot ${\tt gran}$ (default ${\tt FALSE}$).
14. ${\tt recursive}$ Whether recursive (iterated) or direct forecasts are used for multi-step VAR predictions (default FALSE, indicating direct forecasts are used). Note that only direct forecasts are available for VARX models. For more information on the distinction consult @marcellino.
15. ${\tt \verb|separate_lambdas|}$: Logical, indicator to use separate penalty parameters for each series. This option is only valid for the structures ${\tt Basic, BasicEN,HLAG,HLAGOO,HLAGELEM, SCAD, MCP}$.
16. ${\tt \verb|model.controls|}$ As the capabilities of BigVAR have expanded, we have decided to consolidate parameters into the list model.controls. These parameters include:
1. ${\tt tol}$: Optimizer tolerance (default $0.0001$).
2. ${\tt intercept}$: Logical, indicator as to whether an intercept should be fit (default ${\tt TRUE}$). (Note that the intercept is fit separately and not subject to regularization).
3. ${\tt MN}$: Logical, option to select a pseudo "Minnesota Prior" (shrinks series toward a known constant matrix ; useful for mildly non-stationary data)
4. ${\tt C}$: vector of coefficients to shrink toward (only used if ${\tt MN}$ is ${\tt TRUE}$, default is $\mathbf{I}_k$, corresponding to a vector random walk).
5. ${\tt delta}$: Delta for [huber loss](https://en.wikipedia.org/wiki/Huber_loss) function (default 2.5).
6. ${\tt gamma}$: Gamma parameter for ${\tt SCAD}$ or ${\tt MCP}$ (default 3).
7. ${\tt RVAR}$: Logical, option to use our relaxed VAR(X) procedure to re-fit the nonzero support selected by a model via least squares (default ${\tt FALSE}$).
8. ${\tt alpha}$: Numeric or vector; user defined $0\leq \alpha \leq 1$ denoting the trade-off between $L_1$ and $L_2$ penalties (in the Sparse Lag and Sparse Own/Other structures) or the trade-off between the lasso and ridge penalties in the elastic net structure. Defaults to $\frac{1}{k+1}$ if not specified.
17. ${\tt linear}$: Logical, indicator for linear lambda grid (default ${\tt TRUE}$; ${\tt FALSE}$ constructs a log-linear grid).
18. ${\tt \verb|rolling_oos|}$ Logical, option to re-determine the optimal penalty parameter following each iteration of rolling cross validation, (see [Rolling Extension](#rolling_oos)), default ${\tt FALSE}$.
One we construct the model object, we can run ${\tt \verb|cv.BigVAR(mod1)|}$, which selects the optimal penalty parameter via a validation procedure, evaluates its forecast accuracy, and compares it against conditional mean, random walk, AIC, and BIC VAR(X) benchmarks over an out-of-sample period ${\tt T_2}$ through ${\tt T}$.
```{r}
results=cv.BigVAR(mod1)
results
```
## Penalty Parameter Selection {#penparam}
In order to account for time-dependence, penalty parameter selection is conducted in a rolling manner. Define time indices $T_1=\left \lfloor \frac{T}{3} \right\rfloor$, and $T_2=\left\lfloor \frac{2T}{3} \right\rfloor$
The training period $T_1+1$ through $T_2$ is used to select $\lambda$,
$T_2+1$ through $T$ is for evaluation of forecast accuracy in a rolling manner. The process is visualized in the following figure

Define $\hat{\mathbf{y}}_{t+1}^{\lambda}$ as the one-step ahead forecast based on $\mathbf{y}_1,\dots\mathbf{y}_t$. We choose $\lambda$ based on minimizing one-step ahead mean square forecast error (MSFE) over the training period: MSFE$(\lambda)=\frac{1}{(T_2-T_1-1)}\sum_{t=T_1}^{T_2-1} \|\hat{\mathbf{y}}_{t+1}^{\lambda}-\mathbf{y}_{t+1}\|_F^2.$
Though we select $\lambda$ based on minimizing one-step ahead mean squared forecast error (MSFE) by default, this can be easily generalized to longer forecast horizons (by adjusting ${\tt h}$ in ${\tt constructModel}$) or alternative loss functions (by adjusting ${\tt loss}$ in the ${\tt model.controls}$ list within ${\tt constructModel}$).
### Rolling Extension {#rolling_oos}
By default, the selected penalty parameter is fixed throughout the forecast evaluation period. In certain applications, it may be more appropriate to allow for a greater degree of flexibility. Consequently, by setting ${\tt \verb|rolling_oos|}$ to ${\tt TRUE}$ in ${\tt constructModel}$ we allow for the penalty parameter to be re-evaluated using a rolling window following each iteration in the forecast evaluation period, as depicted in the following figure

### Leave One Out Validation {#LOO}
As an alternative to rolling validation, we also offer a psuedo "leave-one-out" selection approach that respects the intrinsic time ordering of the VARX model. This procedure iterates through the data in the same manner as rolling validation. However, at each iteration $t$, the row $\mathbf{y}_t$ is removed from consideration when constructing the VARX lag matrix and instead used as a test set. Every other row up to $T_2$ is used for training, as visualized in the following figure

This procedure is particularly amenable relative to rolling validation in scenarios with limited data.
## Diagnostics {#diag}
Generally, the only potential post-hoc diagnostic procedures are adjusting the depth/size of the penalty grid as well as the maximum lag order. We suggest setting the maximum lag order based on the frequency of the data (e.g. 4 for quarterly, 12 for monthly, etc).
The method ${\tt \verb|cv.BigVAR|}$ method returns an object of class ${\tt \verb|BigVAR.results|}$. This object inherits the properties of class ${\tt BigVAR}$ and contains both in and out-of-sample diagnostic information. For information on all fields, consult the package manual.
```{r}
str(results)
```
${\tt BigVAR.results}$ also has a plot method, to show a comparison of in-sample MSFE over the grid of $\lambda$ values.
```{r}
plot(results)
```
Generally, you want this graph to have a parabolic shape with the optimal value in one of the middle indices. In this scenario, since the slope of the line is very flat, it is likely that increasing the depth of the grid (i.e. the first parameter of ${\tt gran}$ in $\tt{constructModel}$) would not substantially improve forecasts. It is not recommended to make the depth too large as it substantially increases computation time.
```{r}
mod2<-constructModel(Y,p=4,"Basic",gran=c(5,10),h=1,cv="Rolling",verbose=FALSE,IC=FALSE)
res2=cv.BigVAR(mod2)
plot(res2)
```
However, since the slope of the line in this case is quite steep, it is likely that forecasts will be improved by increasing the depth.
```{r}
mod3<-constructModel(Y,p=4,"Basic",gran=c(500,10),h=1,cv="Rolling",verbose=FALSE,IC=FALSE)
res3=cv.BigVAR(mod3)
plot(res3)
```
As evidenced above, this plot does not always take on a parabolic shape. On occasion, when the grid is very deep, it will start to level off. In this scenario, it is best to decrease the depth of the grid.
<!-- #### Re-running evaluation period -->
<!-- After performing rolling or leave-one-out validation, an object of class ${\tt \verb|BigVAR.intermediate|}$ is produced which contains the grid of candidate lambdas as well as their in-sample MSFE. Though ${\tt \verb|BigVAR.intermediate|}$ is not intended to be user-facing, it can be modified to re-run out of sample forecast evaluation with different parameters. -->
## Prediction
We can also view the sparsity pattern of the final estimated coefficient matrix with ${\tt \verb|SparsityPlot.BigVAR.results|}$
```{r}
SparsityPlot.BigVAR.results(results)
```
Finally, out of sample predictions can be computed with ${\tt predict}$
```{r}
predict(results,n.ahead=1)
```
95 percent confidence intervals can be returned with the option ${\tt confint=TRUE}$
```{r}
predict(results,n.ahead=1, confint=TRUE)
```
## Coefficients
A formatted dataframe of the coefficient matrix of the final iteration of forecast evaluation can be obtained via the ${\tt coef}$ method
```{r}
coef(results)
```
## Example Data {#data}
${\tt Y}$, the sparse multivariate time series included with ${\tt BigVAR}$ was generated using matrix $\mathbf{A}$, included with ${\tt BigVAR}$ as ${\tt Generator}$. The sparsity structure of $\mathbf{A}$ is visualized in the following plot
```{r,echo=TRUE}
data(Y) # simulated multivariate time series
# coefficient matrix used to generate Y
data(Generator)
# note that coefficients with a darker shade are larger in magnitude
SparsityPlot(A[1:3,],p=4,3,s=0,m=0,title="Sparsity Structure of Generator Matrix")
```
In order to generate multivariate VARs, we transform $k\times kp$ coefficient matrix to its *multiple companion form* (i.e. converting to a $kp\times kp$ matrix representing a VAR of lag order 1). For details, consult page 15 of @lutk.
# Extensions {#extensions}
## Fit with fixed, known $\lambda$ {#BigVAR.fit}
In certain scenarios, it may be overly cumbersome to construct a $\tt{BigVAR}$ object and perform rolling validation or lambda grid construction (for example, out-of-sample testing once an "optimal" penalty parameter has been selected). As an alternative, we include the function $\tt{\verb|BigVAR.fit|}$ which will fit a ${\tt BigVAR}$ model with a fixed penalty parameter without requiring the construction of a ${\tt BigVAR}$ object.
```{r}
# fit a Basic VARX-L with k=2,m=1,s=2,p=4,lambda=.01
VARX=list(k=2,s=2)
#returns k x (kp+ms+1) coefficient matrix
model=BigVAR.fit(Y,p,"Basic",lambda=1e-2,VARX=VARX,intercept=TRUE)
model
```
## N-fold cross validation {#nfold}
If, instead of rolling or "leave one out" validation, you wish to use a custom procedure to set the penalty parameters, you can do so using repeated calls to ${\tt \verb|BigVAR.fit|}$. As an example, we provide a N-fold cross validation function (which does not respect time dependence).
```{r}
# N-fold cross validation for VAR
# Y: data
# nfolds: number of cross validation folds
# struct: penalty structure
# p: lag order
# nlambdas: number of lambdas:
# gran1: depth of lambda grid
# seed: set to make it reproducible
NFoldcv <- function(Y,nfolds,struct,p,nlambdas,gran1,seed)
{
MSFE <- matrix(0,nrow=nrow(Y),ncol=10)
A <- constructModel(Y,p,struct=struct,gran=c(gran1,nlambdas),verbose=F)
# construct lag matrix
Z1 <- VARXLagCons(Y,X=NULL,s=0,p=p,0,0)
trainZ <- Z1$Z[2:nrow(Z1$Z),]
trainY <- matrix(Y[(p+1):nrow(Y),],ncol=ncol(Y))
set.seed(seed)
inds <- sample(nrow(trainY))
B <- BigVAR.est(A)
lambda.grid <- B$lambda
folds <- cut(inds,breaks=nfolds,labels=FALSE)
MSFE <- matrix(0,nrow=nfolds,ncol=nlambdas)
for(i in 1:nfolds){
test <- trainY[which(folds==i),]
train <- trainY[which(folds!=i),]
testZ <-t(t(trainZ)[which(folds!=i),])
B=BigVAR.fit(train,p=p,lambda=lambda.grid,struct='Basic')
#iterate over lambdas
for(j in 1:nlambdas){
MSFETemp <- c()
for(k in 1:nrow(test)) {
tempZ <- testZ[,k,drop=FALSE]
bhat <- matrix(B[,2:dim(B)[2],j],nrow=ncol(Y),ncol=(p*ncol(Y)))
preds <- B[,1,j]+bhat%*%tempZ
MSFETemp <- c(MSFETemp,sum(abs(test[k,]-preds))^2)
}
MSFE[i,j] <- mean(MSFETemp)
}
}
return(list(MSFE=MSFE,lambdas=lambda.grid))
}
# 10 fold cv
MSFEs<-NFoldcv(Y,nfolds=10,"Basic",p=5,nlambdas=10,gran1=50,seed=2000)
# choose smaller lambda in case of ties (prevents extremely sparse solutions)
opt=MSFEs$lambda[max(which(colMeans(MSFEs$MSFE)==min(colMeans(MSFEs$MSFE))))]
opt
```
## Information Criterion Benchmarks
We have noticed a relative dearth of packages that allow for the estimation and forecasting of VAR and VARX models via least squares with lag order selected according to an information criterion. By default, $\tt{cv.BigVAR}$ returns least squares AIC and BIC benchmarks as forecast comparison.
$\tt{VARXFit}$ will fit a VAR or VARX model with pre-specified maximum lag orders and the function $\tt{VARXForecastEval}$ will evaluate the forecasting performance of VAR and VARX models with information criterion selected by AIC or BIC over a user-specified time horizon.
The arguments to $\tt{VARXForecastEval}$ are detailed below:
1. ${\tt Y}$: $T\times k$ matrix of endogenous (modeled) series.
2. ${\tt X}$: $T\times m$ matrix of exogenous (unmodeled) series.
3. ${\tt p}$: Maximum lag order for endogenous series.
4. ${\tt s}$: Maximum lag order for exogenous series.
5. ${\tt T1}$: Integer, Start of forecast evaluation period.
6. ${\tt T2}$: Interger, End of forecast evaluation period.
7. ${\tt IC}$: Information criterion used ("AIC" or "BIC")
8. ${\tt h}$: Forecast horizon.
9. ${\tt Iterated}$: Logical, indicator to use "iterated" (default $\tt{FALSE}$ indicating that direct forecasts are used).
Note that one may encounter scenarios in which the number of least squares VARX parameters $(k^2\times p+m*k*s) > (k+m)T$. Our algorithm will terminate lag order selection as soon as the problem becomes ill-posed. In the event that the problem is ill-conditioned at $p=1$, the algorithm will always return a lag order of zero.
An example usage of $\tt{VARXForecastEval}$ is below.
```{r}
data(Y)
p <- 4
T1 <- floor(nrow(Y))/3
T2 <- floor(2*nrow(Y))/3
#Matrix of zeros for X
X <- matrix(0,nrow=nrow(Y),ncol=ncol(Y))
BICMSFE <- VARXForecastEval(Y,X,p,0,T1,T2,"BIC",1)
```
In addition, one-step ahead predictions from VARX models can be computed using the function $\tt{PredictVARX}$.
```{r}
mod <- VARXFit(Y,3,NULL,NULL)
pred <-PredictVARX(mod)
pred
```
## Selecting a Structure {#structure}
The choice of structured penalty is not always clear at the outset of the forecasting problem. Since our methods are all computationally manageable across most dimensions, one approach that we recommend is to use a subset of the data to fit models with all applicable structured penalties and find the set of "superior models" using the $\tt{MCSProcedure}$ function from the package $\tt{MCS}$. For more information about the package and procedure consult @MCSPackage and the original paper @hansen.
We will start by simulating a $\text{VAR}_3(6)$
```{r}
library(MASS)
k=3;p=6
B=matrix(0,nrow=k,ncol=p*k)
A1<- matrix(c(.4,-.07,.08,-.06,-.7,.07,-.08,.07,-.4),ncol=3,nrow=3)
A2 <- matrix(c(-.6,0,0,0,-.4,0,0,0,.5),ncol=3,nrow=3)
B[,1:k]=A1
B[,(5*k+1):(6*k)]=A2
A <- VarptoVar1MC(B,p,k)
set.seed(2000)
Y <-MultVarSim(k,A,p,.005*diag(k),500)
SparsityPlot(B,p,k,0,0, title='Sparsity Plot of VAR Coefficient Matrix')
```
The first lag matrix and the _own_ lags in the fifth coefficient matrix are the only nonzero entries. This suggests that the structures incorporating an _Own/Other_ or _Lag_ type penalty will achieve the best forecast performance. There is no within-group sparsity so we should not expect the Sparse counterparts to be in the set of superior models.
```{r,cache=TRUE}
library(MCS)
# train on first 250 observations
YTrain=Y[1:250,]
Loss <- c()
T1=1*floor(nrow(YTrain)/3);T2=2*floor(nrow(YTrain)/3)
p=8
structures<-c("Basic","BasicEN","Lag","SparseLag","OwnOther","HLAGC","HLAGOO","HLAGELEM","MCP","SCAD")
for(i in structures){
# construct BigVAR object; we will perform a dual grid search for the sparse lag and sparse own/other models
if(i%in%c("SparseLag","SparseOO")){
alpha=seq(0,1,length=10)
}else{
alpha=0
}
A<- constructModel(YTrain,p=p,struct=i,gran=c(100,10),T1=T1,T2=T2,verbose=FALSE,model.controls=list(intercept=FALSE,alpha=alpha))
# perform rolling cv
res<- cv.BigVAR(A)
# save out of sample loss for each structure
Loss <- cbind(Loss,res@OOSMSFE)
}
# construct AIC and BIC benchmarks
BIC <- VARXForecastEval(YTrain,matrix(0,nrow=nrow(YTrain)),p,0,T2,nrow(YTrain),"BIC",1)$MSFE
AIC <- VARXForecastEval(YTrain,matrix(0,nrow=nrow(YTrain)),p,0,T2,nrow(YTrain),"AIC",1)$MSFE
Loss <- as.data.frame(Loss)
names(Loss) <- structures
Loss <- cbind(Loss,BIC,AIC)
names(Loss)[(ncol(Loss)-1):ncol(Loss)] <- c("BIC","AIC")
names(Loss) <- paste0(names(Loss),"L")
mcs.test <- MCSprocedure(as.matrix(Loss),verbose=FALSE)
mcs.test
```
As expected, we find that the set of superior models contains only the Own/Other VAR-L and Lag VAR-L.
## Impulse Response Functions {#irf}
(Note: this section is adapted from the @nicholson_bigvar.)
Though ${\tt BigVAR}$ is primarily designed to forecast high-dimensional time series, it can also be of use in analyzing the joint dynamics of a group of interrelated time series. In order to conduct policy analysis, many macroeconomists make use of VARs to examine the impact of shocks to certain variables on the entire system (holding all other variables fixed). This is know as impulse response analysis.
For example, a macroeconomist may wish to analyze the impact of a 100 basis point increase in the Federal Funds Rate on all included series over the next 8 quarters. To do so, we can utilize the function ${\tt generateIRF}$, which converts the last estimated ${\tt BigVAR}$ coefficient matrix to fundamental form.
We use the following function to generate an impulse response function:
```{r}
suppressMessages(library(expm))
# Phi k x kp coefficient matrix
# sigma kxk residual covariance matrix
# n number of time steps to run IRF
# p lag order
# k number of series
# Y0: k dimensional vector reflecting initialization of the IRF
generateIRF <- function(Phi,Sigma,n,k,p,Y0)
{
if(p>1){
A <-VarptoVar1MC(Phi,p,k)
}else{
A <- Phi
}
J <- matrix(0,nrow=k,ncol=k*p)
diag(J) <- 1
P <- t(chol(Sigma))
IRF <- matrix(0,nrow=k,ncol=n+1)
for(i in 0:n)
{
phi1 <- J%*%(A%^%i)%*%t(J)
theta20 <- phi1%*%P
IRF[,i+1] <- (theta20%*%Y0)
}
return(IRF)
}
```
```{r, echo=TRUE,eval=TRUE,cache=TRUE}
require(quantmod)
require(zoo)
# get GDP, Federal Funds Rate, CPI from FRED
#Gross Domestic Product (Relative to 2000)
getSymbols('GDP',src='FRED',type='xts')
GDP<- aggregate(GDP,as.yearqtr,mean)
GDP <- GDP/mean(GDP["2000"])*100
# Transformation Code: First Difference of Logged Variables
GDP <- diff(log(GDP))
index(GDP) <- as.yearqtr(index(GDP))
# Federal Funds Rate
getSymbols('FEDFUNDS',src='FRED',type='xts')
FFR <- aggregate(FEDFUNDS,as.yearqtr,mean)
# Transformation Code: First Difference
FFR <- diff(FFR)
# CPI ALL URBAN CONSUMERS, relative to 1983
getSymbols('CPIAUCSL',src='FRED',type='xts')
CPI <- aggregate(CPIAUCSL,as.yearqtr,mean)
CPI <- CPI/mean(CPI['1983'])*100
# Transformation code: difference of logged variables
CPI <- diff(log(CPI))
# Seasonally Adjusted M1
getSymbols('M1SL',src='FRED',type='xts')
M1<- aggregate(M1SL,as.yearqtr,mean)
# Transformation code, difference of logged variables
M1 <- diff(log(M1))
# combine series
Y <- cbind(CPI,FFR,GDP,M1)
names(Y) <- c("CPI","FFR","GDP","M1")
Y <- na.omit(Y)
k=ncol(Y)
T <- nrow(Y)
# start/end of rolling validation
T1 <- which(index(Y)=="1985 Q1")
T2 <- which(index(Y)=="2005 Q1")
#Demean
Y <- Y - (c(rep(1, nrow(Y))))%*%t(c(apply(Y[1:T1,], 2, mean)))
#Standarize Variance
for (i in 1:k) {
Y[, i] <- Y[, i]/apply(Y[1:T1,], 2, sd)[i]
}
library(expm)
# Fit an Elementwise HLAG model
Model1=constructModel(as.matrix(Y),p=4,struct="HLAGELEM",gran=c(25,10),verbose=FALSE,VARX=list(),T1=T1,T2=T2)
Model1Results=cv.BigVAR(Model1)
# generate IRF for 10 quarters following a 1 percent increase in the federal funds rate
IRFS <- generateIRF(Phi=Model1Results@betaPred[,2:ncol(Model1Results@betaPred)],Sigma=cov(Model1Results@resids),n=10,k=ncol(Y),p=4,Y0=c(0,.01,0,0))
IRFS <- generateIRF(Model1Results@betaPred[,2:ncol(Model1Results@betaPred)],cov(Model1Results@resids),10,4,4,c(0,.01,0,0))
```
```{r functions, include=FALSE, echo=FALSE}
# A function for captioning and referencing images
fig <- local({
i <- 0
ref <- list()
list(
cap=function(refName, text) {
i <<- i + 1
ref[[refName]] <<- i
paste("Figure ", i, ": ", text, sep="")
},
ref=function(refName) {
ref[[refName]]
})
})
```
The impulse responses generated from this "shock" are depicted below.
```{r irf_plot,fig.cap='\\label{fig:irf_plot} Generated Impulse Reponses', echo=FALSE}
# Plot IRFs
par(mfrow=c(2,2))
for(i in 1:4)
{
plot(IRFS[i,],type='l',main=names(Y)[i],ylab="",xlab="")
}
```
# References
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/inst/doc/BigVAR.Rmd
|
---
title: "BigVAR: Tools for Modeling Sparse Vector Autoregressions with Exogenous Variables"
author:
- Will Nicholson
- Jacob Bien
- David Matteson
- Ines Wilms
date: "`r format(Sys.time(), '%B %d, %Y')`"
bibliography: additional_material/bigvar_references.bib
link-citations: true
output:
html_document:
toc: true
toc_depth: 3
fig_caption: yes
vignette: >
%\VignetteIndexEntry{BigVAR: Tools for Modeling Sparse Vector Autoregressions with Exogenous Variables}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "all" } }
});
</script>
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
# Overview
${\tt BigVAR}$ is the companion R package to the papers "**VARX-L: Structured Regularization for Large Vector Autoregression with Exogenous Variables**" (Joint with [David Matteson](https://davidsmatteson.com/) and [Jacob Bien](http://faculty.marshall.usc.edu/Jacob-Bien/)) and "**High Dimensional Forecasting via Interpretable Vector Autoregression (HLag)**" (Joint with [Ines Wilms](https://sites.google.com/view/iwilms), David Matteson, and Jacob Bien).
${\tt BigVAR}$ allows for the simultaneous estimation and forecasting of high-dimensional time series by applying structured penalties to the standard vector autoregression (VAR) and vector autoregression with exogenous variables (VARX) frameworks. This is useful in many applications which make use of time-dependent data, such as macroeconomics, finance, and internet traffic, as the conventional VAR and VARX are heavily overparameterized. In addition, as stated in @ghysels, VARs with "large enough" lag order can adequately approximate VARMA models.
Our package adapts solution methods from the regularization literature to a multivariate time series setting, allowing for the computationally efficient estimation of high-dimensional VAR and VARX models.
We also allow for least squares refitting based on the nonzero support selected by our procedures as well as the ability to incorporate mild non-stationarity by shrinking toward a vector random walk. For more information on these extensions, we refer you to our papers @Nicholson and @Nicholson2.
This vignette presents a brief formal overview of our notation, the models contained in ${\tt BigVAR}$, and the functionality of the package. For an interactive tutorial see [the shiny app](http://bigvar.ddns.net:3838/BigVAR/). Any questions or feature requests with regard to ${\tt BigVAR}$ can be addressed to [email protected]. If you have basic questions about VAR or multivariate time series in general, we recommend consulting @lutk.
## Roadmap
[Notation and Methodology](#notation) provides an overview of the VARX model as well as the ${\tt BigVAR}$ framework. Our penalty structures are described in [VARX-L](#varxl) and [HLAG](#hlag). Empirical penalty parameter selection procedures are discussed in [Penalty Parameter Selection](#penparam) and [N-fold cross validation](#nfold). Package specific syntax is detailed in [BigVAR Details](#package_specifics). Finally, example applications and extensions of ${\tt BigVAR}$ are provided in [Selecting a Structure](#structure) and [Impulse Response Functions](#irf).
## Installation {#install}
The stable version of ${\tt BigVAR}$ is available on [cran](https://CRAN.R-project.org/package=BigVAR). The developmental release can be installed from github using the following command:
```{r, eval=FALSE}
install_github("wbnicholson/BigVAR/BigVAR")
```
## Quick Start
In this section, we provide a basic overview of the capabilities of ${\tt BigVAR}$. Further sections will provide elaboration as to the full functionality of ${\tt BigVAR}$.
$\mathbf{Y}$, a simulated multivariate time series of dimension $100\times 3$ is included with ${\tt BigVAR}$ and is used throughout this vignette (details as to its construction are provided in [Example Data](#data)). It can be accessed by calling:
```{r}
library(BigVAR)
data(Y)
```
In order to forecast $\hat{y}_{t+1}$ using a vector autoregression with a lasso penalty $\lambda=1$ and maximum lag order of 2, one can simply run
```{r}
# 3 x 7 coefficient matrix
B = BigVAR.fit(Y,struct='Basic',p=2,lambda=1)[,,1]
# construct 7 x 99 lag matrix of Y
Z = VARXLagCons(Y,p=2,oos=TRUE)$Z
# obtain out of sample forecasts
yhat = B%*%Z[,ncol(Z),drop=F]
```
Some potential use cases of ${\tt BigVAR.fit}$ are elaborated upon in [Extensions](#extensions). More sophisticated analysis requires the construction of an object of class ${\tt BigVAR}$ as described in [constructModel](#constructModel).
## Notation and Methodology {#notation}
Let $\{ \mathbf{y_t}\}_{t = 1}^T$ denote a $k$ dimensional vector time series and $\{\mathbf{x}_t\}_{t=1}^{T}$ denote an $m$-dimensional unmodeled \emph{exogenous} series. A vector autoregression with exogenous variables of order (p,s) , VARX$_{k,m}$($p,s$), can be expressed as
$$
\begin{align}
\label{VAR1}
\mathbf{y}_t=\mathbf{\nu}+\sum_{\ell=1}^p\mathbf{\Phi}^{(\ell)}\mathbf{y}_{t-\ell}+\sum_{j=1}^s \mathbf{\beta}^{(j)}\mathbf{x}_{t-j}+\mathbf{u}_t \; \text{ for } \;t=1,\ldots,T,
\end{align}
$$
in which $\mathbf{\nu}$ denotes a $k\times 1$ intercept vector, each $\mathbf{\Phi}^{(\ell)}$ represents a $k\times k$ endogenous (modeled) coefficient matrix, each $\mathbf{\beta}^{(j)}$ represents a $k\times m$ exogenous (unmodeled) coefficient matrix, and $\mathbf{u}_t\stackrel{\text{wn}}{\sim}(\mathbf{0},\mathbf{\Sigma}_u)$. Note that the VAR is a special case of Equation $\ref{VAR1}$ in which the second summation ($\sum_{j=1}^s \mathbf{\beta}^{(j)}\mathbf{x}_{t-j}$) is not included.
```{r, echo=FALSE}
suppressMessages(library(BigVAR,quietly=TRUE,warn.conflicts=FALSE))
#onorm=c()
library(lattice)
#Oracle=diag(3)
#SigmaU=diag(3)
SparsityPlot <- function (B, p, k,s,m, title = NULL)
{
text <- c()
for (i in 1:p) {
text1 <- as.expression(bquote(bold(Phi)^(.(i))))
text <- append(text, text1)
}
## text <- c()
if(s>0){
for (i in (p+1):(p+s+1)) {
text1 <- as.expression(bquote(bold(beta)^(.(i-p))))
text <- append(text, text1)
}
}
f <- function(m) t(m)[, nrow(m):1]
rgb.palette <- colorRampPalette(c("white", "grey" ),space = "Lab")
## rgb.palette <- colorRampPalette(c("white", "blue"), space = "Lab")
at <- seq(k/2 + 0.5, p * (k)+ 0.5, by = k)
at2 <- seq(p*k+s/2+.5,p*k+s*m+.5,by=s)
at <- c(at,at2)
se2 = seq(1.75, by = k, length = k)
L2 <- levelplot(f(abs(B)), col.regions = rgb.palette, colorkey = NULL,
xlab = NULL, ylab = NULL, main = list(label = title,
cex = 1), panel = function(...) {
panel.levelplot(...)
panel.abline(a = NULL, b = 1, h = seq(1.5, m*s+p* k +
0.5, by = 1), v = seq(1.5, by = 1, length = p *
k+m*s))
bl1 <- seq(k + 0.5, p *
k + 0.5, by = k)
bl2 <- seq(p*k + 0.5, p *
k + 0.5+s*m, by = m)
b1 <- c(bl1,bl2)
panel.abline(a = NULL, b = 1, v = p*k+.5, lwd = 7)
panel.abline(a = NULL, b = 1, v = b1, lwd = 3)
}, scales = list(x = list(alternating = 1, labels = text,
cex = 1, at = at, tck = c(0, 0)), y = list(alternating = 0,
tck = c(0, 0))))
return(L2)
}
B1 <- matrix(rep(1,57)*rbinom(57,1,.6),nrow=3,ncol=19)
B2 <-matrix(0,nrow=3,ncol=19)
B2[,1:3] <- 1
B2[,10:12] <- 1
B2[,16] <- 1
B2[,19] <- 1
B3 <-matrix(0,nrow=3,ncol=19)
diag(B3[,1:3])<- 1
B3[,10:12] <- 1
diag(B3[,10:12])<- 0
B2[,16] <- 1
B2[,19] <- 1
B4 <-matrix(0,nrow=3,ncol=19)
B4[,1:3] <- rep(1,9)*rbinom(9,1,.6)
B4[,10:12] <- rep(1,9)*rbinom(9,1,.4)
B4[,18] <- c(1,0,1)
p=5;k=3
Lasso <- SparsityPlot(B1,p,k,m=2,s=2,title="Basic VARX-L")
Group <- SparsityPlot(B2,p,k,m=2,s=2,title="Lag VARX-L")
OO <- SparsityPlot(B3,p,k,m=2,s=2,title="Own/Other VARX-L")
Sparse <- SparsityPlot(B4,p,k,m=2,s=2,title="Sparse Lag VARX-L")
```
### Example Sparsity Patterns
```{r,echo=FALSE}
library(gridExtra,quietly=TRUE)
grid.arrange(Lasso,Group,OO,Sparse,ncol=2)
```
1. The Basic VARX-L will zero individual elements in $[\mathbf{\Phi},\mathbf{\beta}]$
2. The Lag VARX-L assigns a group lasso penalty by lag coefficient matrix $\mathbf{\Phi}$. Each column in the exogenous coefficient matrices $\mathbf{\beta}$ is assigned to its own group.
3. The Own/Other VARX-L assigns separate group lasso penalties to _own_ _lags_ ($\text{diag}(\mathbf{\Phi}^{(\ell)})$ and _other_ _lags_ $(\mathbf{\Phi}^{(\ell)^{-}})$. It applies the same penalty as the Lag VARX-L to the exogenous structure.
4. The Sparse Lag and Sparse Own/Other VARX-L extend the aforementioned structures to allow for within-group sparsity.
We recently added an _elastic net_ penalty (BasicEN) which combines the Basic VARX-L with a ridge penalty.
### VARX-L {#varxl}
$\tt BigVAR$ can be used to apply the following penalties to the VARX (Equation $\ref{VAR1}$):
|Model Structure|BigVAR Name| Penalty | Solution Algorithm |
|---------------|:------------------:|-------------------:|-------------------:|
| Basic |$\tt{Basic}$| $\lambda\|[\mathbf{\Phi},\mathbf{\beta}]\|_1$ | Coordinate Descent|
| Basic-Elastic Net |$\tt{BasicEN}$| $\lambda\big(\alpha\|[\mathbf{\Phi},\mathbf{\beta}]\|_1+(1-\alpha)\|[\mathbf{\Phi},\mathbf{\beta}]\|_2^2\big)$| Coordinate Descent|
| Lag VARX-L| $\tt{Lag}$| $\lambda\big(\sum_{\ell=1}^p\|\mathbf{\Phi}_\ell\|_F+\sqrt{k}\sum_{j=1}^s\sum_{i=1}^m\|\beta_{\cdot,i}^{(j)}\|_F \big)$ |Block Coordinate Descent |
| Own/Other VARX-L|$\tt{OwnOther}$|$\lambda(\rho_1\sum_{\ell=1}^p \|\text{diag}(\mathbf{\Phi}^{(\ell)})\|_F+\gamma_1\sum_{\ell=1}^p\|\mathbf{\Phi}^{(\ell^{-})}\|_F+\sqrt{k}\sum_{j=1}^s\sum_{i=1}^m\|\beta_{\cdot,i}^{(j)}\|_F)$|Block Coordinate Descent |
|Sparse Lag VARX-L|$\tt{SparseLag}$|$(1-\alpha)\sqrt{k^2}\sum_{\ell=1}^p\|\mathbf{\Phi}^{(\ell)}\|_F+\alpha\|\mathbf{\Phi}\|_1 (1-\alpha)\sqrt{k}\sum_{j=1}^s\sum_{i=1}^m\|\beta_{\cdot,i}^{(j)}\|_F +\alpha\|\beta\|_1$|Generalized Gradient Descent|
|Own/Other Sparse VARX-L|$\tt{SparseOwnOther}$|$\lambda(1-\alpha)[\rho_1\sum_{\ell=1}^p \|\text{diag}(\mathbf{\Phi}^{(\ell)})\|_F+\gamma_1\sum_{\ell=1}^p\|\mathbf{\Phi}^{(\ell)^{-}}\|_F]+\alpha\lambda\|\mathbf{\Phi}\|{_1}$|Generalized Gradient Descent|
$\lambda>0$ is a penalty parameter that can be selected via a validation procedure or chosen by the user; larger values of $\lambda$ encourage a greater degree of sparsity. $0\leq \alpha\leq 1$ is an additional penalty parameter set by default to $\frac{1}{k+1}$ to control within-group sparsity in the sparse group setting or the trade-off between the ridge and lasso penalty in the elastic net setting. We allow for $\alpha$ to be estimated empirically with the option ${\tt dual=TRUE}$ in the function ${\tt constructModel}$. $\rho_1=\sqrt{k}$ and $\gamma_1=\sqrt{k(k-1)}$ are fixed weights accounting for the cardinality of each group.
### HLAG {#hlag}
```{r,echo=FALSE}
k=3;p=5
HLAGC <- matrix(0,nrow=k,ncol=k*p)
HLAGC[1,] <- 1
HLAGC[2,1:6] <- 1
HLAGC[3,1:12] <- 1
HLAGOO <- matrix(0,nrow=k,ncol=k*p)
HLAGOO[1,1:13] <- 1
HLAGOO[2,1:6] <- 1
HLAGOO[3,c(1:9,12)] <- 1
HLAGELEM <- matrix(0,nrow=k,ncol=k*p)
HLAGELEM[1,c(1:10,12,13)] <- 1
HLAGELEM[2,c(1,3,4,6,7,9,10,12,13,15)] <- 1
HLAGELEM[3,c(1:8,10:11,13:14)] <- 1
SparsityPlot <-
function (B, p, k,s,m, title = NULL)
{
text <- c()
for (i in 1:p) {
text1 <- as.expression(bquote(bold('\U03A6')^(.(i))))
text <- append(text, text1)
}
## text <- c()
if(m>0){
for (i in (p+1):(p+s+1)) {
text1 <- as.expression(bquote(bold('\U03B2')^(.(i-p))))
text <- append(text, text1)
}
}
f <- function(m) t(m)[, nrow(m):1]
rgb.palette <- colorRampPalette(c("white", "grey" ),space = "Lab")
## rgb.palette <- colorRampPalette(c("white", "blue"), space = "Lab")
at <- seq(k/2 + 0.5, p * (k)+ 0.5, by = k)
if(m>0){
at2 <- seq(p*k+m/2+.5,p*k+s*m+.5,by=m)}else{at2=c()}
at <- c(at,at2)
se2 = seq(1.75, by = k, length = k)
L2 <- levelplot(f(abs(B)), col.regions = rgb.palette, colorkey = NULL,
xlab = NULL, ylab = NULL, main = list(label = title,
cex = 1), panel = function(...) {
panel.levelplot(...)
panel.abline(a = NULL, b = 1, h = seq(1.5, m*s+p* k +
0.5, by = 1), v = seq(1.5, by = 1, length = p *
k+m*s))
bl1 <- seq(k + 0.5, p *
k + 0.5, by = k)
if(m>0){
bl2 <- seq(p*k + 0.5, p *
k + 0.5+s*m, by = m)}else(bl2=c())
b1 <- c(bl1,bl2)
panel.abline(a = NULL, b = 1, v = p*k+.5, lwd = 3)
panel.abline(a = NULL, b = 1, v = b1, lwd = 3)
}, scales = list(x = list(alternating = 1, labels = text,
cex = 1, at = at, tck = c(0, 0)), y = list(alternating = 0,
tck = c(0, 0))))
return(L2)
}
set.seed(1986)
B5 <-matrix(0,nrow=3,ncol=15)
B5[,1:3] <- rep(1,9)*rbinom(9,1,.85)
B5[,4:6] <- rep(1,9)*rbinom(9,1,.65)
B5[,7:9] <- rep(1,9)*rbinom(9,1,.45)
B5[,10:12] <- rep(1,9)*rbinom(9,1,.25)
B5[,13:15] <- rep(1,9)*rbinom(9,1,.05)
HV4 <- SparsityPlot(B5,5,k,0,0,title='Lag-Weighted Lasso')
HVC=SparsityPlot(HLAGC,p,k,0,0,title="Componentwise HLAG")
HVOO=SparsityPlot(HLAGOO,p,k,0,0,title="Own/Other HLAG")
HVELEM=SparsityPlot(HLAGELEM,p,k,0,0,title="Elementwise HLAG")
grid.arrange(HVC,HVOO,HVELEM,HV4,ncol=2)
```
|Model Structure|BigVAR Name| Penalty |
|---------------|:------------------:|-------------------:|
| Componentwise| $\tt{HLAGC}$ | $\sum_{i=1}^k\sum_{\ell=1}^p\|\mathbf{\Phi}_i^{(\ell:p)}\|_2.$ |
| Own/Other | $\tt{HLAGOO}$ | $\sum_{i=1}^k\sum_{\ell=1}^p\left[\|\mathbf{\Phi}_{i}^{(\ell:p)}\|_2+\|(\mathbf{\Phi}_{i,-i}^{(\ell)}, \mathbf{\Phi}_{i}^{([\ell+1]:p)})\|_2\right]$. |
|Elementwise|$\tt{HLAGELEM}$| $\sum_{i=1}^k\sum_{j=1}^k\sum_{\ell=1}^p\|\mathbf{\Phi}_{ij}^{(\ell:p)}\|_2$. |
Lag-weighted Lasso|$\tt{Tapered}$| $\sum_{\ell=1}^p\ell^{\gamma}\|\mathbf{\Phi}^{(\ell)}\|_1$.|
We additionally incorporate several VAR-specific penalties that directly address lag order selection. In addition to returning sparse solutions, our $\text{HLAG}_k(p)$ procedures induce regularization toward models with low maximum lag order. To allow for greater flexibility, instead of imposing a single, universal lag order (as information criterion minimization based approaches tend to do), we allow it to vary across marginal models (i.e. the rows of the coefficient matrix $\mathbf{\Phi}=[\mathbf{\Phi}^{(1)},\dots,\mathbf{\Phi}^{(p)}]$).
$\tt{BigVAR}$ includes three HLAG models as well as the _Lag-weighted Lasso_, which incorporates a lasso penalty that increases geometrically as the lag order increases. This penalty does not directly address lag order but it encourages a greater degree of sparsity at more distant lags (as controlled by the additional penalty parameter $\gamma \in (0,1)$.
The componentwise HLAG embeds a conventional lag order selection penalty into the hierarchical group lasso; the maximum lag order can vary across series, but within a series all components have the same maximum lag. The *Own/Other* HLAG adds another layer of lag order: within a lag a series *own* lags will be prioritized over *other* lags. Finally, the Elementwise HLAG allows for the most flexibility, allowing each series in each marginal model to have its own maximum lag order resulting in $k^2$ possible lag orders.
#### Additional Penalties
In addition to the HLAG and VARX-L frameworks we also include two non-convex penalties: Smoothly Clipped Absolute Deviation (SCAD) and Minimax Concave Penalty (MCP). These penalties serve to obviate the bias of lasso-type penalties, which tend to "over-regularize", by decreasing the amount of penalization as the magnitude of the coefficient decreases. Though these penalties are not convex, the coordinate descent algorithm developed by @breheny fits seamlessly into the ${\tt BigVAR}$ framework. Along with SCAD and MCP, we also incorporate the Bayesian VAR developed by @bgr into the BigVAR framework under the name ${\tt BGR}$.
<!-- |Model Structure|BigVAR Name| Penalty | -->
<!-- |---------------|:------------------:|-------------------:| -->
<!-- | Smoothly Clipped Absolute Deviation| $\tt{SCAD}$ | $$ | -->
<!-- | Minimax Concave Penalty | $\tt{MCP}$ | $\sum_{i=1}^k\sum_{\ell=1}^p\left[\|\mathbf{\Phi}_{i}^{(\ell:p)}\|_2+\|(\mathbf{\Phi}_{i,-i}^{(\ell)}, \mathbf{\Phi}_{i}^{([\ell+1]:p)})\|_2\right]$. | -->
# BigVAR Details {#package_specifics}
Our package allows for the straightforward estimation of the aforementioned VARX-L and HLAG procedures.
## Input Arguments {#constructModel}
The end-user completely specifies their model by constructing an object of class ${\tt BigVAR}$.
To construct an object of class ${\tt BigVAR}$, simply run the command ${\tt constructModel}$:
```{r}
data(Y)
# Create a Basic VAR-L (Lasso Penalty) with maximum lag order p=4, 10 grid points with lambda optimized according to rolling validation of 1-step ahead MSFE
mod1<-constructModel(Y,p=4,"Basic",gran=c(150,10),h=1,cv="Rolling",verbose=FALSE,IC=TRUE,model.controls=list(intercept=TRUE))
```
The first four arguments below are required; the rest only need to be specified if expanded or non-standard functionality is requested.
The input arguments are:
1. ${\tt Y}$: Multivariate time series of the form $T\times k$
2. ${\tt p}$: Maximal lag order
3. ${\tt struct}$: Structure of Penalty. Choices are
1. ${\tt Basic}$: Lasso Penalty
2. ${\tt BasicEN}$: Lasso with Elastic Net
3. ${\tt Lag}$: Lag Penalty
4. ${\tt OwnOther}$: Own/Other Penalty
5. ${\tt SparseLag}$: Sparse Lag Penalty
6. ${\tt SparseOwnOther}$: Sparse Own/Other Lag Penalty
7. ${\tt SCAD}$: Smoothly Clipped Absolute Deviation
8. ${\tt MCP}$: Minimax Concave Penalty
9. ${\tt EFX}$: Endogenous-First VARX
10. ${\tt HLAGC}$: Componentwise HLAG
11. ${\tt HLAGOO}$: Own/Other HLAG
12. ${\tt HLAGELEM}$: Elementwise HLAG
13. ${\tt Tapered}$: Lasso with Lag Penalty
14. ${\tt BGR}$: Bayesian VAR as detailed in @bgr.
The first 7 can be applied to VAR and VARX models, EFX can only be applied toward VARX models, the remaining 5 are only applicable to VAR models.
4. ${\tt gran}$: two options for the grid of penalty parameters $\lambda$.
The first option controls the depth of the lambda grid (a good default option is 50). The second option controls the number of grid values (a good default is 10). If your grid does not go deep enough, your forecasts results may be suboptimal, but if it is too deep, the routine may take a substantial amount of time to run. The index of the optimal penalty parameter is monitored by ${\tt cv.BigVAR}$. If it is on the border of the grid, it is recommended to re-run the function with a larger granularity parameter. If you set the option ${\tt ownlambdas}$ to ${\tt TRUE}$, ${\tt gran}$ is used to supply a user defined grid of lambdas. For more details on the granularity parameter, see [Diagnostics](#diag).
5. ${\tt h}$: Forecast horizon in which to optimize (default 1).
6. ${\tt verbose}$: Logical, if ${\tt TRUE}$, will display a progress bar for the validation and evaluation procedures.
7. ${\tt IC}:$ Logical, if ${\tt TRUE}$, will return AIC and BIC VAR(X) benchmarks.
8. ${\tt VARX}:$ VARX characteristics in a list form. The list must contain two arguments:
1. k: number of modeled series.
2. s: maximum lag order for unmodeled series.
3. contemp: (Optional) Indicator for contemporaneous unmodeled series.
9. ${\tt window.size}$: Size of window for rolling cv. (Default is 0; resulting in an expanding window).
10. ${\tt T1}$ Start of rolling cv period (default $\lfloor \frac{T}{3} \rfloor$)
11. ${\tt T2}$: End of rolling cv period (default $\lfloor \frac{2T}{3} \rfloor$)
12. ${\tt cv}:$ Type of validation (as described in [Penalty Parameter Selection](#penparam)) used to select the penalty parameter (options are "rolling" (default) and "LOO," a pseudo "leave one out" cv procedure that respects time dependence).
13. ${\tt ownlambdas}:$ Logical, Indicator as to whether the user supplied a penalty grid in slot ${\tt gran}$ (default ${\tt FALSE}$).
14. ${\tt recursive}$ Whether recursive (iterated) or direct forecasts are used for multi-step VAR predictions (default FALSE, indicating direct forecasts are used). Note that only direct forecasts are available for VARX models. For more information on the distinction consult @marcellino.
15. ${\tt \verb|separate_lambdas|}$: Logical, indicator to use separate penalty parameters for each series. This option is only valid for the structures ${\tt Basic, BasicEN,HLAG,HLAGOO,HLAGELEM, SCAD, MCP}$.
16. ${\tt \verb|model.controls|}$ As the capabilities of BigVAR have expanded, we have decided to consolidate parameters into the list model.controls. These parameters include:
1. ${\tt tol}$: Optimizer tolerance (default $0.0001$).
2. ${\tt intercept}$: Logical, indicator as to whether an intercept should be fit (default ${\tt TRUE}$). (Note that the intercept is fit separately and not subject to regularization).
3. ${\tt MN}$: Logical, option to select a pseudo "Minnesota Prior" (shrinks series toward a known constant matrix ; useful for mildly non-stationary data)
4. ${\tt C}$: vector of coefficients to shrink toward (only used if ${\tt MN}$ is ${\tt TRUE}$, default is $\mathbf{I}_k$, corresponding to a vector random walk).
5. ${\tt delta}$: Delta for [huber loss](https://en.wikipedia.org/wiki/Huber_loss) function (default 2.5).
6. ${\tt gamma}$: Gamma parameter for ${\tt SCAD}$ or ${\tt MCP}$ (default 3).
7. ${\tt RVAR}$: Logical, option to use our relaxed VAR(X) procedure to re-fit the nonzero support selected by a model via least squares (default ${\tt FALSE}$).
8. ${\tt alpha}$: Numeric or vector; user defined $0\leq \alpha \leq 1$ denoting the trade-off between $L_1$ and $L_2$ penalties (in the Sparse Lag and Sparse Own/Other structures) or the trade-off between the lasso and ridge penalties in the elastic net structure. Defaults to $\frac{1}{k+1}$ if not specified.
17. ${\tt linear}$: Logical, indicator for linear lambda grid (default ${\tt TRUE}$; ${\tt FALSE}$ constructs a log-linear grid).
18. ${\tt \verb|rolling_oos|}$ Logical, option to re-determine the optimal penalty parameter following each iteration of rolling cross validation, (see [Rolling Extension](#rolling_oos)), default ${\tt FALSE}$.
One we construct the model object, we can run ${\tt \verb|cv.BigVAR(mod1)|}$, which selects the optimal penalty parameter via a validation procedure, evaluates its forecast accuracy, and compares it against conditional mean, random walk, AIC, and BIC VAR(X) benchmarks over an out-of-sample period ${\tt T_2}$ through ${\tt T}$.
```{r}
results=cv.BigVAR(mod1)
results
```
## Penalty Parameter Selection {#penparam}
In order to account for time-dependence, penalty parameter selection is conducted in a rolling manner. Define time indices $T_1=\left \lfloor \frac{T}{3} \right\rfloor$, and $T_2=\left\lfloor \frac{2T}{3} \right\rfloor$
The training period $T_1+1$ through $T_2$ is used to select $\lambda$,
$T_2+1$ through $T$ is for evaluation of forecast accuracy in a rolling manner. The process is visualized in the following figure

Define $\hat{\mathbf{y}}_{t+1}^{\lambda}$ as the one-step ahead forecast based on $\mathbf{y}_1,\dots\mathbf{y}_t$. We choose $\lambda$ based on minimizing one-step ahead mean square forecast error (MSFE) over the training period: MSFE$(\lambda)=\frac{1}{(T_2-T_1-1)}\sum_{t=T_1}^{T_2-1} \|\hat{\mathbf{y}}_{t+1}^{\lambda}-\mathbf{y}_{t+1}\|_F^2.$
Though we select $\lambda$ based on minimizing one-step ahead mean squared forecast error (MSFE) by default, this can be easily generalized to longer forecast horizons (by adjusting ${\tt h}$ in ${\tt constructModel}$) or alternative loss functions (by adjusting ${\tt loss}$ in the ${\tt model.controls}$ list within ${\tt constructModel}$).
### Rolling Extension {#rolling_oos}
By default, the selected penalty parameter is fixed throughout the forecast evaluation period. In certain applications, it may be more appropriate to allow for a greater degree of flexibility. Consequently, by setting ${\tt \verb|rolling_oos|}$ to ${\tt TRUE}$ in ${\tt constructModel}$ we allow for the penalty parameter to be re-evaluated using a rolling window following each iteration in the forecast evaluation period, as depicted in the following figure

### Leave One Out Validation {#LOO}
As an alternative to rolling validation, we also offer a psuedo "leave-one-out" selection approach that respects the intrinsic time ordering of the VARX model. This procedure iterates through the data in the same manner as rolling validation. However, at each iteration $t$, the row $\mathbf{y}_t$ is removed from consideration when constructing the VARX lag matrix and instead used as a test set. Every other row up to $T_2$ is used for training, as visualized in the following figure

This procedure is particularly amenable relative to rolling validation in scenarios with limited data.
## Diagnostics {#diag}
Generally, the only potential post-hoc diagnostic procedures are adjusting the depth/size of the penalty grid as well as the maximum lag order. We suggest setting the maximum lag order based on the frequency of the data (e.g. 4 for quarterly, 12 for monthly, etc).
The method ${\tt \verb|cv.BigVAR|}$ method returns an object of class ${\tt \verb|BigVAR.results|}$. This object inherits the properties of class ${\tt BigVAR}$ and contains both in and out-of-sample diagnostic information. For information on all fields, consult the package manual.
```{r}
str(results)
```
${\tt BigVAR.results}$ also has a plot method, to show a comparison of in-sample MSFE over the grid of $\lambda$ values.
```{r}
plot(results)
```
Generally, you want this graph to have a parabolic shape with the optimal value in one of the middle indices. In this scenario, since the slope of the line is very flat, it is likely that increasing the depth of the grid (i.e. the first parameter of ${\tt gran}$ in $\tt{constructModel}$) would not substantially improve forecasts. It is not recommended to make the depth too large as it substantially increases computation time.
```{r}
mod2<-constructModel(Y,p=4,"Basic",gran=c(5,10),h=1,cv="Rolling",verbose=FALSE,IC=FALSE)
res2=cv.BigVAR(mod2)
plot(res2)
```
However, since the slope of the line in this case is quite steep, it is likely that forecasts will be improved by increasing the depth.
```{r}
mod3<-constructModel(Y,p=4,"Basic",gran=c(500,10),h=1,cv="Rolling",verbose=FALSE,IC=FALSE)
res3=cv.BigVAR(mod3)
plot(res3)
```
As evidenced above, this plot does not always take on a parabolic shape. On occasion, when the grid is very deep, it will start to level off. In this scenario, it is best to decrease the depth of the grid.
<!-- #### Re-running evaluation period -->
<!-- After performing rolling or leave-one-out validation, an object of class ${\tt \verb|BigVAR.intermediate|}$ is produced which contains the grid of candidate lambdas as well as their in-sample MSFE. Though ${\tt \verb|BigVAR.intermediate|}$ is not intended to be user-facing, it can be modified to re-run out of sample forecast evaluation with different parameters. -->
## Prediction
We can also view the sparsity pattern of the final estimated coefficient matrix with ${\tt \verb|SparsityPlot.BigVAR.results|}$
```{r}
SparsityPlot.BigVAR.results(results)
```
Finally, out of sample predictions can be computed with ${\tt predict}$
```{r}
predict(results,n.ahead=1)
```
95 percent confidence intervals can be returned with the option ${\tt confint=TRUE}$
```{r}
predict(results,n.ahead=1, confint=TRUE)
```
## Coefficients
A formatted dataframe of the coefficient matrix of the final iteration of forecast evaluation can be obtained via the ${\tt coef}$ method
```{r}
coef(results)
```
## Example Data {#data}
${\tt Y}$, the sparse multivariate time series included with ${\tt BigVAR}$ was generated using matrix $\mathbf{A}$, included with ${\tt BigVAR}$ as ${\tt Generator}$. The sparsity structure of $\mathbf{A}$ is visualized in the following plot
```{r,echo=TRUE}
data(Y) # simulated multivariate time series
# coefficient matrix used to generate Y
data(Generator)
# note that coefficients with a darker shade are larger in magnitude
SparsityPlot(A[1:3,],p=4,3,s=0,m=0,title="Sparsity Structure of Generator Matrix")
```
In order to generate multivariate VARs, we transform $k\times kp$ coefficient matrix to its *multiple companion form* (i.e. converting to a $kp\times kp$ matrix representing a VAR of lag order 1). For details, consult page 15 of @lutk.
# Extensions {#extensions}
## Fit with fixed, known $\lambda$ {#BigVAR.fit}
In certain scenarios, it may be overly cumbersome to construct a $\tt{BigVAR}$ object and perform rolling validation or lambda grid construction (for example, out-of-sample testing once an "optimal" penalty parameter has been selected). As an alternative, we include the function $\tt{\verb|BigVAR.fit|}$ which will fit a ${\tt BigVAR}$ model with a fixed penalty parameter without requiring the construction of a ${\tt BigVAR}$ object.
```{r}
# fit a Basic VARX-L with k=2,m=1,s=2,p=4,lambda=.01
VARX=list(k=2,s=2)
#returns k x (kp+ms+1) coefficient matrix
model=BigVAR.fit(Y,p,"Basic",lambda=1e-2,VARX=VARX,intercept=TRUE)
model
```
## N-fold cross validation {#nfold}
If, instead of rolling or "leave one out" validation, you wish to use a custom procedure to set the penalty parameters, you can do so using repeated calls to ${\tt \verb|BigVAR.fit|}$. As an example, we provide a N-fold cross validation function (which does not respect time dependence).
```{r}
# N-fold cross validation for VAR
# Y: data
# nfolds: number of cross validation folds
# struct: penalty structure
# p: lag order
# nlambdas: number of lambdas:
# gran1: depth of lambda grid
# seed: set to make it reproducible
NFoldcv <- function(Y,nfolds,struct,p,nlambdas,gran1,seed)
{
MSFE <- matrix(0,nrow=nrow(Y),ncol=10)
A <- constructModel(Y,p,struct=struct,gran=c(gran1,nlambdas),verbose=F)
# construct lag matrix
Z1 <- VARXLagCons(Y,X=NULL,s=0,p=p,0,0)
trainZ <- Z1$Z[2:nrow(Z1$Z),]
trainY <- matrix(Y[(p+1):nrow(Y),],ncol=ncol(Y))
set.seed(seed)
inds <- sample(nrow(trainY))
B <- BigVAR.est(A)
lambda.grid <- B$lambda
folds <- cut(inds,breaks=nfolds,labels=FALSE)
MSFE <- matrix(0,nrow=nfolds,ncol=nlambdas)
for(i in 1:nfolds){
test <- trainY[which(folds==i),]
train <- trainY[which(folds!=i),]
testZ <-t(t(trainZ)[which(folds!=i),])
B=BigVAR.fit(train,p=p,lambda=lambda.grid,struct='Basic')
#iterate over lambdas
for(j in 1:nlambdas){
MSFETemp <- c()
for(k in 1:nrow(test)) {
tempZ <- testZ[,k,drop=FALSE]
bhat <- matrix(B[,2:dim(B)[2],j],nrow=ncol(Y),ncol=(p*ncol(Y)))
preds <- B[,1,j]+bhat%*%tempZ
MSFETemp <- c(MSFETemp,sum(abs(test[k,]-preds))^2)
}
MSFE[i,j] <- mean(MSFETemp)
}
}
return(list(MSFE=MSFE,lambdas=lambda.grid))
}
# 10 fold cv
MSFEs<-NFoldcv(Y,nfolds=10,"Basic",p=5,nlambdas=10,gran1=50,seed=2000)
# choose smaller lambda in case of ties (prevents extremely sparse solutions)
opt=MSFEs$lambda[max(which(colMeans(MSFEs$MSFE)==min(colMeans(MSFEs$MSFE))))]
opt
```
## Information Criterion Benchmarks
We have noticed a relative dearth of packages that allow for the estimation and forecasting of VAR and VARX models via least squares with lag order selected according to an information criterion. By default, $\tt{cv.BigVAR}$ returns least squares AIC and BIC benchmarks as forecast comparison.
$\tt{VARXFit}$ will fit a VAR or VARX model with pre-specified maximum lag orders and the function $\tt{VARXForecastEval}$ will evaluate the forecasting performance of VAR and VARX models with information criterion selected by AIC or BIC over a user-specified time horizon.
The arguments to $\tt{VARXForecastEval}$ are detailed below:
1. ${\tt Y}$: $T\times k$ matrix of endogenous (modeled) series.
2. ${\tt X}$: $T\times m$ matrix of exogenous (unmodeled) series.
3. ${\tt p}$: Maximum lag order for endogenous series.
4. ${\tt s}$: Maximum lag order for exogenous series.
5. ${\tt T1}$: Integer, Start of forecast evaluation period.
6. ${\tt T2}$: Interger, End of forecast evaluation period.
7. ${\tt IC}$: Information criterion used ("AIC" or "BIC")
8. ${\tt h}$: Forecast horizon.
9. ${\tt Iterated}$: Logical, indicator to use "iterated" (default $\tt{FALSE}$ indicating that direct forecasts are used).
Note that one may encounter scenarios in which the number of least squares VARX parameters $(k^2\times p+m*k*s) > (k+m)T$. Our algorithm will terminate lag order selection as soon as the problem becomes ill-posed. In the event that the problem is ill-conditioned at $p=1$, the algorithm will always return a lag order of zero.
An example usage of $\tt{VARXForecastEval}$ is below.
```{r}
data(Y)
p <- 4
T1 <- floor(nrow(Y))/3
T2 <- floor(2*nrow(Y))/3
#Matrix of zeros for X
X <- matrix(0,nrow=nrow(Y),ncol=ncol(Y))
BICMSFE <- VARXForecastEval(Y,X,p,0,T1,T2,"BIC",1)
```
In addition, one-step ahead predictions from VARX models can be computed using the function $\tt{PredictVARX}$.
```{r}
mod <- VARXFit(Y,3,NULL,NULL)
pred <-PredictVARX(mod)
pred
```
## Selecting a Structure {#structure}
The choice of structured penalty is not always clear at the outset of the forecasting problem. Since our methods are all computationally manageable across most dimensions, one approach that we recommend is to use a subset of the data to fit models with all applicable structured penalties and find the set of "superior models" using the $\tt{MCSProcedure}$ function from the package $\tt{MCS}$. For more information about the package and procedure consult @MCSPackage and the original paper @hansen.
We will start by simulating a $\text{VAR}_3(6)$
```{r}
library(MASS)
k=3;p=6
B=matrix(0,nrow=k,ncol=p*k)
A1<- matrix(c(.4,-.07,.08,-.06,-.7,.07,-.08,.07,-.4),ncol=3,nrow=3)
A2 <- matrix(c(-.6,0,0,0,-.4,0,0,0,.5),ncol=3,nrow=3)
B[,1:k]=A1
B[,(5*k+1):(6*k)]=A2
A <- VarptoVar1MC(B,p,k)
set.seed(2000)
Y <-MultVarSim(k,A,p,.005*diag(k),500)
SparsityPlot(B,p,k,0,0, title='Sparsity Plot of VAR Coefficient Matrix')
```
The first lag matrix and the _own_ lags in the fifth coefficient matrix are the only nonzero entries. This suggests that the structures incorporating an _Own/Other_ or _Lag_ type penalty will achieve the best forecast performance. There is no within-group sparsity so we should not expect the Sparse counterparts to be in the set of superior models.
```{r,cache=TRUE}
library(MCS)
# train on first 250 observations
YTrain=Y[1:250,]
Loss <- c()
T1=1*floor(nrow(YTrain)/3);T2=2*floor(nrow(YTrain)/3)
p=8
structures<-c("Basic","BasicEN","Lag","SparseLag","OwnOther","HLAGC","HLAGOO","HLAGELEM","MCP","SCAD")
for(i in structures){
# construct BigVAR object; we will perform a dual grid search for the sparse lag and sparse own/other models
if(i%in%c("SparseLag","SparseOO")){
alpha=seq(0,1,length=10)
}else{
alpha=0
}
A<- constructModel(YTrain,p=p,struct=i,gran=c(100,10),T1=T1,T2=T2,verbose=FALSE,model.controls=list(intercept=FALSE,alpha=alpha))
# perform rolling cv
res<- cv.BigVAR(A)
# save out of sample loss for each structure
Loss <- cbind(Loss,res@OOSMSFE)
}
# construct AIC and BIC benchmarks
BIC <- VARXForecastEval(YTrain,matrix(0,nrow=nrow(YTrain)),p,0,T2,nrow(YTrain),"BIC",1)$MSFE
AIC <- VARXForecastEval(YTrain,matrix(0,nrow=nrow(YTrain)),p,0,T2,nrow(YTrain),"AIC",1)$MSFE
Loss <- as.data.frame(Loss)
names(Loss) <- structures
Loss <- cbind(Loss,BIC,AIC)
names(Loss)[(ncol(Loss)-1):ncol(Loss)] <- c("BIC","AIC")
names(Loss) <- paste0(names(Loss),"L")
mcs.test <- MCSprocedure(as.matrix(Loss),verbose=FALSE)
mcs.test
```
As expected, we find that the set of superior models contains only the Own/Other VAR-L and Lag VAR-L.
## Impulse Response Functions {#irf}
(Note: this section is adapted from the @nicholson_bigvar.)
Though ${\tt BigVAR}$ is primarily designed to forecast high-dimensional time series, it can also be of use in analyzing the joint dynamics of a group of interrelated time series. In order to conduct policy analysis, many macroeconomists make use of VARs to examine the impact of shocks to certain variables on the entire system (holding all other variables fixed). This is know as impulse response analysis.
For example, a macroeconomist may wish to analyze the impact of a 100 basis point increase in the Federal Funds Rate on all included series over the next 8 quarters. To do so, we can utilize the function ${\tt generateIRF}$, which converts the last estimated ${\tt BigVAR}$ coefficient matrix to fundamental form.
We use the following function to generate an impulse response function:
```{r}
suppressMessages(library(expm))
# Phi k x kp coefficient matrix
# sigma kxk residual covariance matrix
# n number of time steps to run IRF
# p lag order
# k number of series
# Y0: k dimensional vector reflecting initialization of the IRF
generateIRF <- function(Phi,Sigma,n,k,p,Y0)
{
if(p>1){
A <-VarptoVar1MC(Phi,p,k)
}else{
A <- Phi
}
J <- matrix(0,nrow=k,ncol=k*p)
diag(J) <- 1
P <- t(chol(Sigma))
IRF <- matrix(0,nrow=k,ncol=n+1)
for(i in 0:n)
{
phi1 <- J%*%(A%^%i)%*%t(J)
theta20 <- phi1%*%P
IRF[,i+1] <- (theta20%*%Y0)
}
return(IRF)
}
```
```{r, echo=TRUE,eval=TRUE,cache=TRUE}
require(quantmod)
require(zoo)
# get GDP, Federal Funds Rate, CPI from FRED
#Gross Domestic Product (Relative to 2000)
getSymbols('GDP',src='FRED',type='xts')
GDP<- aggregate(GDP,as.yearqtr,mean)
GDP <- GDP/mean(GDP["2000"])*100
# Transformation Code: First Difference of Logged Variables
GDP <- diff(log(GDP))
index(GDP) <- as.yearqtr(index(GDP))
# Federal Funds Rate
getSymbols('FEDFUNDS',src='FRED',type='xts')
FFR <- aggregate(FEDFUNDS,as.yearqtr,mean)
# Transformation Code: First Difference
FFR <- diff(FFR)
# CPI ALL URBAN CONSUMERS, relative to 1983
getSymbols('CPIAUCSL',src='FRED',type='xts')
CPI <- aggregate(CPIAUCSL,as.yearqtr,mean)
CPI <- CPI/mean(CPI['1983'])*100
# Transformation code: difference of logged variables
CPI <- diff(log(CPI))
# Seasonally Adjusted M1
getSymbols('M1SL',src='FRED',type='xts')
M1<- aggregate(M1SL,as.yearqtr,mean)
# Transformation code, difference of logged variables
M1 <- diff(log(M1))
# combine series
Y <- cbind(CPI,FFR,GDP,M1)
names(Y) <- c("CPI","FFR","GDP","M1")
Y <- na.omit(Y)
k=ncol(Y)
T <- nrow(Y)
# start/end of rolling validation
T1 <- which(index(Y)=="1985 Q1")
T2 <- which(index(Y)=="2005 Q1")
#Demean
Y <- Y - (c(rep(1, nrow(Y))))%*%t(c(apply(Y[1:T1,], 2, mean)))
#Standarize Variance
for (i in 1:k) {
Y[, i] <- Y[, i]/apply(Y[1:T1,], 2, sd)[i]
}
library(expm)
# Fit an Elementwise HLAG model
Model1=constructModel(as.matrix(Y),p=4,struct="HLAGELEM",gran=c(25,10),verbose=FALSE,VARX=list(),T1=T1,T2=T2)
Model1Results=cv.BigVAR(Model1)
# generate IRF for 10 quarters following a 1 percent increase in the federal funds rate
IRFS <- generateIRF(Phi=Model1Results@betaPred[,2:ncol(Model1Results@betaPred)],Sigma=cov(Model1Results@resids),n=10,k=ncol(Y),p=4,Y0=c(0,.01,0,0))
IRFS <- generateIRF(Model1Results@betaPred[,2:ncol(Model1Results@betaPred)],cov(Model1Results@resids),10,4,4,c(0,.01,0,0))
```
```{r functions, include=FALSE, echo=FALSE}
# A function for captioning and referencing images
fig <- local({
i <- 0
ref <- list()
list(
cap=function(refName, text) {
i <<- i + 1
ref[[refName]] <<- i
paste("Figure ", i, ": ", text, sep="")
},
ref=function(refName) {
ref[[refName]]
})
})
```
The impulse responses generated from this "shock" are depicted below.
```{r irf_plot,fig.cap='\\label{fig:irf_plot} Generated Impulse Reponses', echo=FALSE}
# Plot IRFs
par(mfrow=c(2,2))
for(i in 1:4)
{
plot(IRFS[i,],type='l',main=names(Y)[i],ylab="",xlab="")
}
```
# References
|
/scratch/gouwar.j/cran-all/cranData/BigVAR/vignettes/BigVAR.Rmd
|
# Copyright (C) Kevin R. Coombes, 2007-2012
## bimIndex.R
bimodalIndex <- function(dataset, verbose=TRUE) {
bim <- matrix(NA, nrow=nrow(dataset), ncol=6)
if (verbose) cat("1 ")
for (i in 1:nrow(dataset)) {
if (verbose && 0 == i%%100) cat(".")
if (verbose && 0 == i%%1000) cat(paste("\n", 1 + i/1000, ' ', sep=''))
x <- as.vector(as.matrix(dataset[i, ]))
if (any(is.na(x))) next
mc <- Mclust(x, G = 2, modelNames = "E", verbose = FALSE)
sigma <- sqrt(mc$parameters$variance$sigmasq)
delta <- abs(diff(mc$parameters$mean))/sigma
# pi <- max(mc$parameters$pro)
pi <- mc$parameters$pro[1]
bi <- delta * sqrt(pi*(1-pi))
bim[i,] <- c(mc$parameters$mean, sigma=sigma, delta=delta, pi=pi, bim=bi)
}
if(verbose) cat("\n")
dimnames(bim) <- list(rownames(dataset),
c("mu1", "mu2", "sigma", "delta", "pi", "BI"))
bim <- as.data.frame(bim)
bim
}
|
/scratch/gouwar.j/cran-all/cranData/BimodalIndex/R/cd09-bimIndex.R
|
### R code from vignette source 'bim.Rnw'
###################################################
### code chunk number 1: bim.Rnw:28-31
###################################################
options(width=88)
options(SweaveHooks = list(fig = function() par(bg='white')))
#if (!file.exists("Figures")) dir.create("Figures")
###################################################
### code chunk number 2: simdata
###################################################
set.seed(564684)
nSamples <- 60
nGenes <- 3000
dataset <- matrix(rnorm(nSamples*nGenes), ncol=nSamples, nrow=nGenes)
dimnames(dataset) <- list(paste("G", 1:nGenes, sep=''),
paste("S", 1:nSamples, sep=''))
###################################################
### code chunk number 3: shift
###################################################
dataset[1:100, 1:30] <- dataset[1:100, 1:30] + 3
###################################################
### code chunk number 4: lib
###################################################
library(BimodalIndex)
###################################################
### code chunk number 5: bim
###################################################
bim <- bimodalIndex(dataset)
summary(bim)
###################################################
### code chunk number 6: bim.Rnw:74-76
###################################################
getOption("SweaveHooks")[["fig"]]()
plot(bim$BI, col=rep(c("red", "black"), times=c(100, 2900)),
xlab="Gene", ylab="Bimodal Index")
###################################################
### code chunk number 7: extra
###################################################
summary(bim$BI[101:3000])
cutoffs <- quantile(bim$BI[101:3000], probs=c(0.90, 0.95, 0.99))
cutoffs
###################################################
### code chunk number 8: sensitivity
###################################################
sapply(cutoffs, function(x) sum(bim$BI[1:100] > x))
###################################################
### code chunk number 9: getwd
###################################################
getwd()
###################################################
### code chunk number 10: si
###################################################
sessionInfo()
|
/scratch/gouwar.j/cran-all/cranData/BimodalIndex/inst/doc/bim.R
|
#'Conditional power computation using asymptotic test.
#'
#' Compute conditional power of single-arm group sequential design with binary endpoint based on asymptotic test, given the interim
#' result.
#'
#'Conditional power quantifies the conditional probability of crossing the upper bound given the interim result \eqn{z_i},
#'\eqn{1\le i<K}. Having inherited sample sizes and boundaries from \code{\link{asymdesign}} or \code{\link{asymprob}},
#'given the interim statistic at \eqn{i}th analysis \eqn{z_i}, the conditional power is defined as
#'
#'\eqn{\alpha _{i,K}(p|z_i)=P_{p}(Z_K\ge u_K, Z_{K-1}>l_{K-1}, \ldots, Z_{i+1}>l_{i+1}|Z_i=z_i)}
#'
#'With asymptotic test, the test
#'statistic at analysis \eqn{k} is
#'\eqn{Z_k=\hat{\theta}_k\sqrt{n_k/p/(1-p)}=(\sum_{s=1}^{n_k}X_s/n_k-p_0)\sqrt{n_k/p/(1-p)}},
#'which follows the normal distribution \eqn{N(\theta \sqrt{n_k/p/(1-p)},1)}
#'with \eqn{\theta=p-p_0}. In practice, \eqn{p} in \eqn{Z_k} can be substituted
#'with the sample response rate \eqn{\sum_{s=1}^{n_k}X_s/n_k}.
#'
#'The increment statistic \eqn{Z_k\sqrt{n_k/p/(1-p)}-Z_{k-1}\sqrt{n_{k-1}/p/(1-p)}} also follows a normal distribution independently
#'of \eqn{Z_{1}, \ldots, Z_{k-1}}. Then the conditional power can be easily obtained using a procedure similar
#'to that for unconditional boundary crossing probabilities.
#'
#' @param d An object of the class asymdesign or asymprob.
#' @param p_1 A scalar or vector representing response rate or probability of success under the alternative hypothesis. The
#' value(s) should be within (p_0,1).
#' @param i Index of the analysis at which the interim statistic is given. Should be an integer ranges from 1 to K-1. i will be
#' rounded to its nearest whole value if it is not an integer.
#' @param z_i The interim statistic at analysis i.
#'
#' @return A list with the elements as follows:
#' \itemize{
#' \item{K: As in d.}
#' \item{n.I: As in d.}
#' \item{u_K: As in d.}
#' \item{lowerbounds: As in d.}
#' \item{i: i used in computation.}
#' \item{z_i: As input.}
#' \item{cp: A matrix of conditional powers under different response rates.}
#' \item{p_1: As input.}
#' \item{p_0: As input.}
#' }
#'
#'
#'@section Reference: \itemize{
#' \item{Alan Genz et al. (2018). mvtnorm: Multivariate Normal and t Distributions. R package version 1.0-11.}}
#'
#'@seealso \code{\link{asymprob}}, \code{\link{asymdesign}},
#' \code{\link{exactcp}}.
#'
#' @export
#'
#' @examples
#' I=c(0.2,0.4,0.6,0.8,0.99)
#'beta=0.2
#'betaspend=c(0.1,0.2,0.3,0.3,0.2)
#'alpha=0.05
#'p_0=0.3
#'p_1=0.5
#'K=4.6
#'tol=1e-6
#'tt1=asymdesign(I,beta,betaspend,alpha,p_0,p_1,K,tol)
#'tt2=asymprob(p_1=c(0.4,0.5,0.6,0.7,0.8,0.9),d=tt1)
#' asymcp(tt1,p_1=c(0.4,0.5,0.6,0.7,0.8,0.9),1,2)
#' asymcp(tt2,p_1=c(0.4,0.5,0.6,0.7,0.8,0.9),3,2.2)
asymcp<-function(d,p_1,i,z_i){
##check validity of inputs
if(!(methods::is(d,"asymdesign")|methods::is(d,"asymprob")))
stop("asymcp must be called with class of either asymdesign or asymprob.")
p_0=d$p_0
i=round(i)
if((min(p_1)<=p_0)|(max(p_1)>=1))
stop('Please input p_1 that lies between p_0 and 1 (not including p_0 and 1).')
if((i>=K)|(i<1))
stop('i is less than 1 or more than K-1.')
if(abs(z_i)>=100)
stop('invalid interim statistic: too large or too small.')
K=d$K-i ##the no. of analysis after the ith analysis.
n.I=(d$n.I)[i:(i+K)] ##sample sizes for i,i+1,...,K analysis
lowerbounds=(d$lowerbounds)[(i+1):(i+K)] ##lower bounds for i+1,i+2,...K analysis.
cp=matrix(0,1+length(p_1),1) ##to store conditional power under different p
#conditional power under H0
infor=n.I/p_0/(1-p_0) ##sequence of Fisher information under H0, for i, i+1,...K analysis.
lowerbounds1=lowerbounds*sqrt(infor[-1])-z_i*sqrt(infor[1]) ##new lower bounds under H0,l_m^*
sigma=matrix(0,K,K) #the covariance matrix of multivariate normal distribution, K here is essentially K-i.
for(j1 in 1:K){
for(j2 in 1:K){
sigma[j1,j2]=infor[(min(j1,j2)+1)] ##I_k, k is the smaller between j1 and j2.
}
}
sigma=sigma-infor[1] ##the covariance matrix of the joint normal dist. of Z_{i,i+1},Z_{i,i+2},...,Z_{i,K}
cp[1,]=mvtnorm::pmvnorm(lower=lowerbounds1,upper=rep(Inf,K),mean=rep(0,K),sigma=sigma)[1] ##cp under H0
##conditional power under H1.
for(s in 1:length(p_1)){
p1=p_1[s]
infor=n.I/p1/(1-p1) ##sequence of Fisher information under H1, for i, i+1,...K analysis.
lowerbounds1=lowerbounds*sqrt(infor[-1])-z_i*sqrt(infor[1]) ##new lower bounds under H1,l_m^*
sigma=matrix(0,K,K) #the covariance matrix of multivariate normal distribution, K here is essentially K-i.
for(j1 in 1:K){
for(j2 in 1:K){
sigma[j1,j2]=infor[(min(j1,j2)+1)] ##I_k, k is the smaller between j1 and j2.
}
}
sigma=sigma-infor[1] ##the covariance matrix of the joint normal dist. of Z_{i,i+1},Z_{i,i+2},...,Z_{i,K}
mean1=(p1-p_0)*(infor[-1]-infor[1])
cp[(s+1),]=mvtnorm::pmvnorm(lower=lowerbounds1,upper=rep(Inf,K),mean=mean1,sigma=sigma)[1] ##cp under H0
}
cp=cbind(c(p_0,p_1),cp)
colnames(cp)=c('p','cp')
return(list(K=d$K,n.I=d$n.I,u_K=d$u_K,lowerbounds=d$lowerbounds,i=i,z_i=z_i,cp=cp,p_1=p_1,p_0=p_0))
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/asymcp.r
|
#'Boundary and sample size computation using asymptotic test.
#'
#'Calculate boundaries and sample sizes of single-arm group sequential design
#'with binary endpoint based on asymptotic test.
#'
#'Suppose \eqn{X_{1}, X_{2}, \ldots} are binary outcomes following Bernoulli
#'distribution \eqn{b(1,p)}, in which 1 stands for the case that the subject
#'responds to the treatment and 0 otherwise. Consider a group sequential test
#'with \eqn{K} planned analyses, where the null and alternative hypotheses are
#'\eqn{H_0: p=p_0} and \eqn{H_1: p=p_1} respectively. Note that generally
#'\eqn{p_1} is greater than \eqn{p_0}. For \eqn{k<K}, the trial stops if and
#'only if the test statistic \eqn{Z_k} crosses the futility boundary, that is,
#'\eqn{Z_k<=l_k}. The lower bound for the last analysis \eqn{l_K} is set to be
#'equal to the last and only upper bound \eqn{u_K} to make a decision. At the
#'last analysis, the null hypothesis will be rejected if \eqn{Z_K>=u_K}.
#'
#'The computation of lower bounds except for the last one is implemented with
#'\eqn{u_K} fixed, thus the derived lower bounds are non-binding. Furthermore,
#'the overall type I error will not be inflated if the trial continues after
#'crossing any of the interim lower bounds, which is convenient for the purpose
#'of monitoring. Let the sequence of sample sizes required at each analysis be
#'\eqn{n_{1}, n_{2}, \ldots, n_{K}}. For binomial endpoint, the Fisher
#'information equals \eqn{n_k/p/(1-p)} which is proportional to \eqn{n_k}.
#'Accordingly, the information fraction available at each analysis is equivalent
#'to \eqn{n_k/n_K}.
#'
#'For a \eqn{p_0} not close to 1 or 0, with a large sample size, the test
#'statistic at analysis \eqn{k} is
#'\eqn{Z_k=\hat{\theta}_k\sqrt{n_k/p/(1-p)}=(\sum_{s=1}^{n_k}X_s/n_k-p_0)\sqrt{n_k/p/(1-p)}},
#'which follows the normal distribution \eqn{N(\theta \sqrt{n_k/p/(1-p)},1)}
#'with \eqn{\theta=p-p_0}. In practice, \eqn{p} in \eqn{Z_k} can be substituted
#'with the sample response rate \eqn{\sum_{s=1}^{n_k}X_s/n_k}.
#'
#'Under the null hypothesis, \eqn{\theta=0} and \eqn{Z_k} follows a standard
#'normal distribution. During the calculation, the only upper bound \eqn{u_K} is
#'firstly derived under \eqn{H_0}, without given \eqn{n_K}. Thus, there is no
#'need to adjust \eqn{u_K} for different levels of \eqn{n_K}. Following East,
#'given \eqn{u_K}, compute the maximum sample size \eqn{n_K} under \eqn{H_1}.
#'The rest sample sizes can be obtained by multiplying information fractions
#'and \eqn{n_K}. The lower boundaries for the first \eqn{K-1} analyses are
#'sequentially determined by a search method. The whole searching procedure
#'stops if the overall type II error does not excess the desired level or the
#'times of iteration excess 30. Otherwise, increase the sample sizes until
#'the type II error meets user's requirement.
#'
#'The multiple integrals of multivariate normal density functions are conducted with
#'\code{\link[mvtnorm]{pmvnorm}} in R package mvtnorm. Through a few transformations of the integral variables,
#'\code{\link[mvtnorm]{pmvnorm}} turns the multiple integral to the product of several
#'univariate integrals, which greatly reduces the computational burden of sequentially searching for
#'appropriate boundaries.
#'
#'@param I The information fractions at each analysis. For binary endpoints, the
#' information fraction for analysis k is equal to n_k/n_K, where n_k is the
#' sample size available at analysis k and n_K is the sample size available at
#' the last analysis or the maximum sample size. Should be a positive
#' increasing vector of length K or K-1. If I has K elements among which the
#' last one is not 1, then I will be standardized so that the last information
#' fraction is 1. If I has K-1 elements, the last element in I must be less
#' than 1.
#'@param beta The desired overall type II error level. Should be a scalar within
#' the interval (0,0.5]. Default value is 0.3, that is, power=0.7.
#'@param betaspend The proportions of beta spent at each analysis. Should be a
#' vector of length K with all elements belong to [0,1]. If the sum of all
#' elements in betaspend is not equal to 1, betaspend will be standardized.
#'@param alpha The desired overall type I error level. Should be a scalar within
#' the interval (0,0.3]. Default is 0.05.
#'@param p_0 The response rate or the probability of success under null
#' hypothesis. Should be a scalar within (0,1).
#'@param p_1 The response rate or the probability of success under alternative
#' hypothesis. Should be a scalar within (p_0,1).
#'@param K The maximum number of analyses, including the interim and the final.
#' Should be an integer within (1,20]. K will be rounded to its nearest whole
#' number if it is not an integer.
#'@param tol The tolerance level which is essentially the maximum acceptable difference between
#' the desired type II error spending and the actual type II error spending, when
#' computing the boundaries using asymptotic test. Should be a positive scalar no
#' more than 0.01. The default value is 1e-6.
#'
#'@return An object of the class asymdesign. This class contains:
#'\itemize{
#' \item{I: I used in computation.}
#' \item{beta: As input.}
#' \item{betaspend: The desired type II error spent at each analysis used in computation.}
#' \item{alpha: As input.}
#' \item{p_0: As input.}
#' \item{p_1: As input.}
#' \item{K: K used in computation.}
#' \item{tol: As input.}
#' \item{n.I: A vector of length
#' K which contains sample sizes required at each analysis to achieve desired
#' type I and type II error requirements. n.I equals sample size for the last
#' analysis times the vector of information fractions.}
#' \item{u_K: The upper boundary for the last analysis.}
#' \item{lowerbounds: A vector of length K
#' which contains lower boundaries for each analysis. Note that the lower
#' boundaries are non-binding.}
#' \item{problow: Probabilities of crossing the
#' lower bounds under \eqn{H_1} or the actual type II error at each analysis.}
#' \item{probhi: Probability of crossing the last upper bound under \eqn{H_0} or the
#' actual type I error.} \item{power: power of the group sequential test with
#' the value equals 1-sum(problow).} }
#'
#'
#'@section Reference: \itemize{ \item{Cytel Inc. East Version 6.4.1 Manual.
#' 2017.}
#' \item{Alan Genz et al. (2018). mvtnorm: Multivariate Normal and t Distributions. R package version 1.0-11.}}
#'
#'@seealso \code{\link{asymprob}}, \code{\link{asymcp}},
#' \code{\link{exactdesign}}.
#'
#'@export
#'
#' @examples
#' I=c(0.2,0.4,0.6,0.8,0.99)
#'beta=0.2
#'betaspend=c(0.1,0.2,0.3,0.3,0.2)
#'alpha=0.05
#'p_0=0.3
#'p_1=0.5
#'K=4.6
#'tol=1e-6
#'tt1=asymdesign(I,beta,betaspend,alpha,p_0,p_1,K,tol)
#'
asymdesign<-function(I,beta=0.3,betaspend,alpha=0.05,p_0,p_1,K,tol=1e-6){#I is timing/information fractions
##-------check the validity of all inputs-----
temp1=check.asymdesign(I,beta,betaspend,alpha,p_0,p_1,K,tol)
I=temp1$I
betaspend=temp1$betaspend
K=temp1$K
lowerbounds=rep(0,K) ##the vector for storing the lower bounds for each analysis.
betaspend=betaspend*beta ##the vector of type II error spent at each analysis
##-------obtain the upper bound u_K and the initial value sample sizes n.I and l_1----
u_K=stats::qnorm(1-alpha) #the upper bound for the last analysis.
n_K=ceiling(p_1*(1-p_1)*(((u_K-stats::qnorm(beta))/(p_1-p_0))^2)) ##n_K for initialization.
n.I=ceiling(n_K*I) ##the vector of sample sizes for all stages.
lowerbounds[1]=stats::qnorm(betaspend[1])+(p_1-p_0)*sqrt(n.I[1]/p_1/(1-p_1)) ##lower boundary for the first analysis
problow=betaspend ##the vector for storing actual type II error achieved at each analysis under H1.
##for design with more than 2 stages.
if(K>2){
for(k in 2:(K-1)){
temp1=bound1(k,lowerbounds[1:(k-1)],u_K,n.I[1:k],p_1,p_0,betaspend[k],tol) ##get the lower bound for kth analysis.
flag=temp1$flag
if(flag){##if l_k=u_K, then make the rest of lower bounds be u_K.
lowerbounds[k:K]=u_K
break
}
lowerbounds[k]=temp1$l_k
problow[k]=temp1$error
}
}
##make the last lower bound=u_K and calculate actual power
lowerbounds[K]=u_K
if(!flag){## so only the type II error at the last analysis needs to calculate
mean1=(p_1-p_0)*sqrt(n.I/p_1/(1-p_1)) ##the mean vector of the multivariate normal distribution.
lowerlimits=c(lowerbounds[1:(K-1)],-Inf) ##the lower limits for computing the boundary crossing probability.
sigma=matrix(0,K,K) #the covariance matrix of multivariate normal distribution.
for(i in 1:K){
for(j in 1:K){
sigma[i,j]=mean1[min(i,j)]/mean1[max(i,j)]
}
}
problow[K]=mvtnorm::pmvnorm(lower=lowerlimits,upper=c(rep(Inf,(K-1)),u_K),mean=mean1,sigma=sigma)[1]
}else{##call for gsprob function to compute the boundary crossing probabilities.
problow=asymprob1(n.I,lowerbounds,p_0,p_1,K)
}
##check if the design satisfies the power constraint. if not ,increase the sample size.
t=0
##if the actual power less than desired, increase sample size.
while((beta<sum(problow))&(t<=30)){
n_K=n_K+1 ##increase sample size
n.I=ceiling(n_K*I) ##the vector of sample sizes for all stages.
problow=asymprob1(n.I,lowerbounds,p_0,p_1,K)
t=t+1
}
if(beta<sum(problow)) ##the last while iteration is stopped due to t>30 but not convergence
stop('cannot converge with the current tol.')
probhi=asymprob2(n.I,lowerbounds,K) ##the value is the probabilities of crossing the lower bounds under H0.
probhi=1-sum(probhi) ##the type I error is equal to 1-sum(lower bound crossing probabities under H0)
x=list(I=I,beta=beta,betaspend=betaspend,alpha=alpha,p_0=p_0,p_1=p_1,K=K,tol=tol,n.I=n.I,u_K=u_K,lowerbounds=lowerbounds,
problow=problow,probhi=probhi,power=1-sum(problow))
class(x)="asymdesign"
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/asymdesign.R
|
#'Boundary crossing probabilities computation using asymptotic test.
#'
#'Calculate boundary crossing probabilities of single-arm group sequential
#'design with binary endpoint based on asymptotic test.
#'
#'This function calculates probabilities of crossing the upper or the lower
#'boundaries under null hypothesis and a set of alternative hypotheses. With \code{K=0}
#'(as default), d must be an object of class asymdesign. Meanwhile, other
#'arguments except for \code{p_1} will be inherited from \code{d} and the input values will be
#'ignored. With \code{K!=0}, the probabilities are derived from the input arguments. In
#'this circumstance, all arguments except for \code{d} are required.
#'
#'The computation is based on the single-arm group sequential asymptotic test
#'described in \code{\link{asymdesign}}. Therefore, for the output matrix of
#'upper bound crossing probabilities, the values for the first K-1 analyses are
#'zero since there is only one upper bound for the last analysis.
#'
#'@param K The maximum number of analyses, including the interim and the final.
#' Should be an integer within (1,20]. K will be rounded to its nearest whole
#' number if it is not an integer. The default is 0.
#'@param p_0 The response rate or the probability of success under null
#' hypothesis. Should be a scalar within (0,1).
#'@param p_1 A scalar or vector representing response rate or probability of
#' success under the alternative hypothesis. The value(s) should be within
#' (p_0,1). It is a mandatory input.
#'@param n.I A vector of length K which contains sample sizes required at each
#' analysis. Should be a positive and increasing sequence.
#'@param u_K The upper boundary for the last analysis.
#'@param lowerbounds Non-decreasing lower boundaries for each analysis. With
#' length K, the last lower bound must be identical to u_K. With length K-1,
#' the last element must be no greater than u_K and u_K will be automatically
#' added into the sequence.
#'@param d An object of the class asymdesign.
#'
#'@return An object of the class asymprob. This class contains: \itemize{
#' \item{p_0: As input with \code{d=NULL} or as in \code{d}.} \item{p_1: As input.} \item{K:
#' K used in computation.} \item{n.I: As input with \code{d=NULL} or as in \code{d}.}
#' \item{u_K: As input with \code{d=NULL} or as in \code{d}.} \item{lowerbounds: lowerbounds
#' used in computation.} \item{problow: Probabilities of crossing the lower
#' bounds at each analysis.} \item{probhi: Probability of
#' crossing the upper bounds at each analysis.} }
#'
#'
#'@section Reference: \itemize{ \item{Alan Genz et al. (2018). mvtnorm:
#' Multivariate Normal and t Distributions. R package version 1.0-11.}}
#'
#'@seealso \code{\link{asymdesign}}, \code{\link{asymcp}}, \code{\link{exactprob}}.
#'
#'@export
#'
#' @examples
#' I=c(0.2,0.4,0.6,0.8,0.99)
#'beta=0.2
#'betaspend=c(0.1,0.2,0.3,0.3,0.2)
#'alpha=0.05
#'p_0=0.3
#'p_1=0.5
#'K=4.6
#'tol=1e-6
#'tt1=asymdesign(I,beta,betaspend,alpha,p_0,p_1,K,tol)
#'asymprob(p_1=c(0.4,0.5,0.6,0.7,0.8,0.9),d=tt1)
#'asymprob(K=5,p_0=0.4,p_1=c(0.5,0.6,0.7,0.8),n.I=c(15,20,25,30,35),u_K=1.65,
#'lowerbounds=c(-1.2,-0.5,0.2,0.8,1.65))
asymprob<-function(K=0,p_0,p_1,n.I,u_K,lowerbounds,d=NULL){
##check validity of inputs
if(K==0){##so the user input wrong parameter values,check if d belongs to class "asymdesign"
if(!methods::is(d,"asymdesign"))
stop('d is not an object of class asymdesign')
##if d is asymdesign class, then adopt the parameters except for p_1
p_0=d$p_0
if((min(p_1)<=p_0)|(max(p_1)>=1))
stop('Please input p_1 that lies between p_0 and 1 (not including p_0 and 1).')
K=d$K
n.I=d$n.I
u_K=d$u_K
lowerbounds=d$lowerbounds
}else{##in this case, we need to check the validity of inputs and adopt the inputs by the user.
temp1=check.prob(K,p_0,p_1,n.I,u_K,lowerbounds)
K=temp1$K
lowerbounds=temp1$lowerbounds
n.I=temp1$n.I
}
probhi=matrix(0,1+length(p_1),K)
problow=probhi
##compute boundary crossing probabilities under H0 and put them in the first row.
problow[1,]=asymprob2(n.I,lowerbounds,K)
probhi[1,K]=1-sum(problow[1,])
##compute boundary crossing probabilities under H1
for(i in 1:length(p_1)){
problow[(i+1),]=asymprob1(n.I,lowerbounds,p_0,p_1[i],K)
}
probhi[2:(1+length(p_1)),K]=1-rowSums(problow[2:(1+length(p_1)),])
problow=cbind(c(p_0,p_1),problow,(1-probhi[,K]))
probhi=cbind(c(p_0,p_1),probhi)
colnames(problow)=c('p',1:K,'Total')
colnames(probhi)=c('p',1:K)
x=list(p_0=p_0,p_1=p_1,K=K,n.I=n.I,u_K=u_K,lowerbounds=lowerbounds,problow=problow,probhi=probhi)
class(x)='asymprob'
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/asymprob.r
|
#compute the lower boundary crossing probabilities given the design, under H1.
#asymprob1(n.I,lowerbounds,p_0,p_1,K)
asymprob1<-function(n.I,lowerbounds,p_0,p_1,K){
mean1=(p_1-p_0)*sqrt(n.I/p_1/(1-p_1)) ##the mean vector of the multivariate normal distribution.
sigma=matrix(0,K,K) #the covariance matrix of multivariate normal distribution.
for(i in 1:K){
for(j in 1:K){
sigma[i,j]=mean1[min(i,j)]/mean1[max(i,j)]
}
}
problow=rep(0,K)
problow[1]=stats::pnorm(lowerbounds[1]-mean1[1]) ##Z_1 follows a normal distribution.
##note the last (K-k) lower and upper integration would not influence the result since the intergrands are Inf or -Inf.
for(k in 2:K){
upperlimits=c(rep(Inf,(k-1)),lowerbounds[k],rep(Inf,(K-k)))
lowerlimits=c(lowerbounds[1:(k-1)],rep(-Inf,(K-k+1)))
problow[k]=mvtnorm::pmvnorm(lower=lowerlimits,upper=upperlimits,mean=mean1,sigma=sigma)[1]
}
return(problow)
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/asymprob1.r
|
#compute the lower boundary crossing probabilities given the design, under H0.
#asymprob2(n.I,lowerbounds,K)
asymprob2<-function(n.I,lowerbounds,K){
sigma=matrix(0,K,K) #the covariance matrix of multivariate normal distribution.
for(i in 1:K){
for(j in 1:K){
sigma[i,j]=sqrt(n.I[min(i,j)]/n.I[max(i,j)])
}
}
problow=rep(0,K)
problow[1]=stats::pnorm(lowerbounds[1]) ##Z_1 follows a standard normal distribution.
##note the last (K-k) lower and upper integration would not influence the result.
for(k in 2:K){
upperlimits=c(rep(Inf,k-1),lowerbounds[k],rep(Inf,(K-k)))
lowerlimits=c(lowerbounds[1:(k-1)],rep(-Inf,(K-k+1)))
problow[k]=mvtnorm::pmvnorm(lower=lowerlimits,upper=upperlimits,mean=rep(0,K),sigma=sigma)[1]
}
return(problow)
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/asymprob2.r
|
#compute the lower bound for the kth analysis given:
#(1)lowerbounds: lower bounds before kth analysis, l_1,l_2,...,l_{k-1}.
#(2)n.I: sample sizes before and at kth analysis, n_1,n_2,...,n_k.
#(3)betak: desired type II error spent at kth analysis.
#(4)k: the index of the analysis at which the lower bound is computed.
#(5)p_1,p_0,the last upper bound u_K.
#NOTE:the computation of lower bounds are taken under H1!!
#bound1(k,lowerbounds[1:(k-1)],u_K,n.I[1:k],p_1,p_0,betaspend[k],tol)
bound1<-function(k,lowerbounds,u_K,n.I,p_1,p_0,betak,tol){
mean1=(p_1-p_0)*sqrt(n.I/p_1/(1-p_1)) ##the mean vector of the multivariate normal distribution for analysis 1,...,k under H1.
lowerlimits=c(lowerbounds,-Inf) ##the lower limits for computing the boundary crossing probability.
sigma=matrix(0,k,k) #the covariance matrix of multivariate normal distribution.
for(i in 1:k){
for(j in 1:k){
sigma[i,j]=mean1[min(i,j)]/mean1[max(i,j)]
}
}
uppermin=c(rep(Inf,(k-1)),lowerbounds[(k-1)]) ##use the lower bound at (k-1)th analysis as an initial lower searching boundary.
uppermax=c(rep(Inf,(k-1)),u_K) ##use the last upper bound as the initial upper searching boundary.
errormin=mvtnorm::pmvnorm(lower=lowerlimits,upper=uppermin,mean=mean1,sigma=sigma)[1]##the type II error given l_k=l_{k-1}.
errormax=mvtnorm::pmvnorm(lower=lowerlimits,upper=uppermax,mean=mean1,sigma=sigma)[1]##the type II error given l_k=u_K.
if(((errormin+tol)>=betak)&(betak>=errormin)){ ##if l_k=l_{k-1} achieves the type II error requirement.
return(list(l_k=lowerbounds[(k-1)],error=errormin,flag=0)) ##flag==0 means everything is fine.
}
if(((errormax+tol)>=betak)&(betak>=errormax)){ ##if l_k=u_K achieves the type II error requirement.
return(list(l_k=u_K,error=errormax,flag=1)) ##flag==1 means lower bounds after kth analysis should be set to be u_K.
}
##update the lower and upper searching bounds, uses bisection twice to narrow the searching interval
boundmax=u_K
boundmin=lowerbounds[(k-1)]
for(i in 1:2){
boundnew=(boundmin+boundmax)/2
uppernew=c(rep(Inf,(k-1)),boundnew)
errornew=mvtnorm::pmvnorm(lower=lowerlimits,upper=uppernew,mean=mean1,sigma=sigma)[1]##the type II error given boundnew.
if(((errornew+tol)>=betak)&(betak>=errornew)){ ##if l_k=boundnew achieves the type II error requirement.
return(list(l_k=boundnew,error=errornew,flag=0))
}
if(errornew>betak){##in this case,boundnew should be the new upper searching bound.
boundmax=boundnew
errormax=errornew
}else{
boundmin=boundnew
errormin=errornew
}
}
##boundmin and boundmax did not meet the error requirement, so try with weighted average of boundmin and boundmax.
#the weights are chosen so that if (errormax-betak)>(betak-errormin),then the new bound would be closer to boundmin.
boundnew=(boundmin*(errormax-betak)+boundmax*(betak-errormin))/(errormax-errormin)
uppernew=c(rep(Inf,(k-1)),boundnew)
errornew=mvtnorm::pmvnorm(lower=lowerlimits,upper=uppernew,mean=mean1,sigma=sigma)[1]##the type II error given boundnew.
##if none of the initial value of boundnew, u_K and l_{k-1} satisfies the type II error constraint.
t=0
while((((errornew+tol)<betak)|(betak<errornew))&(t<=30)){
if(betak>errornew){ ##in this case, errornew is too small, l_k should lie between boundnew and boundmax
boundmin=boundnew
errormin=errornew
}else{##in this case,errornew is too large,l_k should lie between boundmin and boundnew
boundmax=boundnew
errormax=errornew
}
boundnew=(boundmin*(errormax-betak)+boundmax*(betak-errormin))/(errormax-errormin)
uppernew=c(rep(Inf,(k-1)),boundnew)
errornew=mvtnorm::pmvnorm(lower=lowerlimits,upper=uppernew,mean=mean1,sigma=sigma)[1]
t=t+1
}
if(((errornew+tol)<betak)|(betak<errornew)) ##the last while iteration is stopped due to t>20 but not convergence
stop('cannot converge with the current tol.')
return(list(l_k=boundnew,error=errornew,flag=0))
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/bound1.r
|
##calculate lower bound for kth analysis given sample sizes and l_1,...l_{k-1} using binomial distribution
#(1)lowerbounds: lower bounds before kth analysis, l_1,l_2,...,l_{k-1}.
#(2)n.I: sample sizes before and at kth analysis, n_1,n_2,...,n_k.
#(3)betak: desired type II error spent at kth analysis.
#(4)k: the index of the analysis at which the lower bound is computed.
#(5)p_1,p_0,the last upper bound u_K.
#NOTE:the computation of lower bounds are taken under H1!!
#bound2(k,lowerbounds[1:(k-1)],u_K,n.I[1:k],p_1,betaspend[k],K)
bound2<-function(k,lowerbounds,u_K,n.I,p_1,betak,K){
errormin=(exactprob1(n.I,c(lowerbounds,lowerbounds[(k-1)]+1),p_1,k,K)$plo)[k]##the type II error given l_k=l_{k-1}+1.
errormax=(exactprob1(n.I,c(lowerbounds,u_K),p_1,k,K)$plo)[k]##the type II error given l_k=u_K.
if(errormin>betak)
stop(paste0('cannot find a lower bound for ',k,'th analysis.'))
if(errormax<=betak){
if(k<(K-1))
stop(paste0('cannot find a lower bound for ',k,'th analysis.'))
return(list(l_k=u_K,error=errormax))
}
##until now, betamin<=betak,betamax>betak and l_{k-1}+1<u_K
if((u_K-lowerbounds[(k-1)])==2){ ##then l_k=l_{k-1}+1=u_K-1 is the largest integer satisifes the error requirement.
return(list(l_k=lowerbounds[(k-1)]+1,error=errormin))
}
##until now, (l_{k-1}+1)<(u_K-1)
##update the lower and upper searching bounds until the lower searching bound+1=the upper.
#uses bisection only to narrow the searching interval since the solution is integer. it converges fast.
boundmax=u_K
boundmin=lowerbounds[(k-1)]+1
while(boundmax>(boundmin+1)){ ##iterate until boundmax=boundmin+1. Since errormax>betak and errormin<=betak, the final errormin is the solution.
boundnew=floor((boundmin+boundmax)/2)
errornew=(exactprob1(n.I,c(lowerbounds,boundnew),p_1,k,K)$plo)[k]##the type II error given boundnew.
if(errornew>betak){##in this case,boundnew should be the new upper searching bound.
boundmax=boundnew
errormax=errornew
}else{
boundmin=boundnew
errormin=errornew
}
}
##since errormin<=betak,errormax>betak,boundmin=boundmax-1, boundmin is the largest integer that makes type II error<=betak
return(list(l_k=boundmin,error=errormin))
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/bound2.r
|
##check validity of parameters
##I is an increasing vector of length K. and the last element should be 1 or I will be standardized. or of length K-1 and the
##last element be less than 1.
##beta should be a value in the interval (0,0.5]. alpha be a scalar belongs to (0,0.3].
##betaspend be a vector with length K, with all elements greater than 0 and less than 1. and sum to 1, or the true betaspend
##would be betaspend/sum(betaspend). p_1,p_0 be two numbers and p_1 greater than p_0.
check.asymdesign<-function(I,beta,betaspend,alpha,p_0,p_1,K,tol){
##--round up K and check if K is no less than 1 and no more than 20----
K=round(K)
if((K<=1)|(K>20))
stop('Please input a K that lies between 1 and 20 (not including 1).')
##--check the sequence of information fractions I--
if(min(I)<=0)
stop('Element(s) of I should be positive.')
temp1=length(I)
temp2=I-c(0, I[1:(temp1-1)]) #temp2 is c(I[1],I[2]-I[1],I[3]-I[2],...)
if(min(temp2)<=0)
stop('I should be an increasing vector.')
if(temp1==K){##if length of I is equal to K,check its values.
if(I[K]!=1){
warning('I will be standardized so that the last element is 1.')
I=I/I[K]
}
}else{
if(temp1!=(K-1))
stop('length of I is neither K nor K-1.')
if(I[(K-1)]>=1)
stop('Please input I with I[K-1] less than 1.')
I=c(I,1) ##so that I if of length K.
}
##--check beta----
if(length(beta)>1)
stop('beta is not a scalar.')
if((beta<=0)|(beta>0.5))
stop('Please input a beta that lies between 0 and 0.5 (not including 0).')
##--check betaspend----
if(length(betaspend)!=K)
stop('betaspend is not of length K.')
if((min(betaspend)<0)|(max(betaspend)>1))
stop('Element(s) in betaspend is less than 0 or greater than 1.')
if(sum(betaspend)!=1){
warning('betaspend will be standardized so that the total is 1.')
betaspend=betaspend/sum(betaspend)
}
##--check alpha--
if(length(alpha)>1)
stop('alpha is not a scalar.')
if((alpha<=0)|(alpha>0.3))
stop('Please input an alpha that lies between 0 and 0.3 (not including 0).')
##--check p_0
if(length(p_0)>1)
stop('p_0 is not a scalar.')
if((p_0<=0)|(p_0>=1))
stop('Please input a p_0 that lies between 0 and 1 (not including 0 and 1).')
##--check p_1--
if(length(p_1)>1)
stop('p_1 is not a scalar.')
if((p_1<=p_0)|(p_1>=1))
stop('Please input a p_1 that lies between p_0 and 1 (not including p_0 and 1).')
##check tol--
if(length(tol)>1)
stop('tol is not a scalar.')
if(tol>0.01)
stop('tol is greater than 0.01.')
if(tol<=0)
stop('tol is 0 or negative.')
return(list(I=I,betaspend=betaspend,K=K))
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/check.asymdesign.r
|
##check validity of inputs
check.prob<-function(K,p_0,p_1,n.I,u_K,lowerbounds){
##--round up K and check if K is no less than 1 and no more than 20----
K=round(K)
if((K<=1)|(K>20))
stop('Please input a K that lies between 1 and 20 (not including 1).')
##--check validity of p_1 and p_0
if(length(p_0)>1)
stop('p_0 is not a scalar.')
if((p_0<=0)|(p_0>=1))
stop('Please input a p_0 that lies between 0 and 1 (not including 0 and 1).')
if((min(p_1)<=p_0)|(max(p_1)>=1))
stop('Please input p_1 that lies between p_0 and 1 (not including p_0 and 1).')
##--check the sequence of sample sizes--
n.I=round(n.I)
if(min(n.I)<=0)
stop('element(s) of n.I should be positive.')
temp1=length(n.I)
temp2=n.I-c(0, n.I[1:(temp1-1)]) #temp2 is c(n_1,n_2-n_1,...,n_K-n_{K-1})
if(min(temp2)<=0)
stop('n.I should be an increasing vector.')
if(temp1!=K)##if length of n.I is not equal to K.
stop('length of n.I is not K.')
##check lowerbounds
temp1=length(lowerbounds)
temp2=lowerbounds[-1]-lowerbounds[1:(temp1-1)] #temp2 is c(l_2-l_1,l_3-l_2,...,l_K-l_{K-1}).
if(min(temp2)<0)
stop('lowerbounds should be a non-decreasing vector.')
if(temp1!=K){##if length of lowerbounds is not equal to K,check its values.
if(temp1!=(K-1))
stop('length of lowerbounds is neither K nor K-1.')
if(lowerbounds[temp1]>u_K)
stop('lower bound for analysis k-1 is greater than u_K.')
lowerbounds=c(lowerbounds,u_K)
}
if(u_K!=lowerbounds[K])
stop('u_K must equal the last lower bound.')
return(list(K=K,lowerbounds=lowerbounds,n.I=n.I))
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/check.prob.r
|
#'Conditional power computation using exact test.
#'
#'Compute conditional power of single-arm group sequential design with binary
#'endpoint based on binomial distribution.
#'
#'Conditional power quantifies the conditional probability of crossing the upper bound given the interim result \eqn{z_i},
#'\eqn{1\le i<K}. Having inherited sample sizes and boundaries from \code{\link{exactdesign}} or \code{\link{exactprob}},
#'given the interim statistic at \eqn{i}th analysis \eqn{z_i}, the conditional power is defined as
#'
#'\eqn{\alpha _{i,K}(p|z_i)=P_{p}(Z_K\ge u_K, Z_{K-1}>l_{K-1}, \ldots, Z_{i+1}>l_{i+1}|Z_i=z_i)}
#'
#'With exact test, the test statistic at analysis \eqn{k} is \eqn{Z_k=\sum_{s=1}^{n_k}X_s}
#'which follows binomial distribution \eqn{b(n_k,p)}. Actually, \eqn{Z_k} is the total
#'number of responses up to the kth analysis.
#'
#'The increment statistic \eqn{Z_k-Z_{k-1}} also follows a binomial distribution \eqn{b(n_k-n_{k-1},p)} independently
#'of \eqn{Z_{1}, \ldots, Z_{k-1}}. Then the conditional power can be easily obtained using the same procedure
#'for deriving unconditional boundary crossing probabilities.
#'
#'Note that \eqn{Z_{1}, \ldots, Z_{K}} is a non-decreasing sequence, thus the conditional power is 1 when the interim statistic
#' \eqn{z_i>=u_K}.
#'
#'@param d An object of the class exactdesign or exactprob.
#'@param p_1 A scalar or vector representing response rate or probability of
#' success under the alternative hypothesis. The value(s) should be within
#' (p_0,1).
#'@param i Index of the analysis at which the interim statistic is given. Should
#' be an integer ranges from 1 to K-1. i will be rounded to its nearest whole
#' value if it is not an integer.
#'@param z_i The interim statistic at analysis i.
#'
#'@return A list with the elements as follows: \itemize{ \item{K: As in d.}
#' \item{n.I: As in d.} \item{u_K: As in d.} \item{lowerbounds: As in d.}
#' \item{i: i used in computation.} \item{z_i: As input.} \item{cp: A matrix of
#' conditional powers under different response rates.} \item{p_1: As input.}
#' \item{p_0: As input.} }
#'
#'
#'@section Reference: \itemize{ \item{Christopher Jennison, Bruce W. Turnbull. Group Sequential Methods with
#' Applications to Clinical Trials. Chapman and Hall/CRC, Boca Raton, FL, 2000.} }
#'
#'@seealso \code{\link{exactprob}}, \code{\link{asymcp}},
#' \code{\link{exactdesign}}.
#'
#'@export
#'
#' @examples
#'I=c(0.2,0.4,0.6,0.8,0.99)
#'beta=0.2
#'betaspend=c(0.1,0.2,0.3,0.3,0.2)
#'alpha=0.05
#'p_0=0.3
#'p_1=0.5
#'K=4.6
#'tol=1e-6
#'tt1=asymdesign(I,beta,betaspend,alpha,p_0,p_1,K,tol)
#'tt2=exactdesign(tt1)
#'tt3=exactprob(p_1=c(0.4,0.5,0.6,0.7,0.8,0.9),d=tt2)
#'exactcp(tt2,p_1=c(0.4,0.5,0.6,0.7,0.8,0.9),1,2)
#'exactcp(tt3,p_1=c(0.4,0.5,0.6,0.7,0.8,0.9),3,19)
exactcp<-function(d,p_1,i,z_i){
##check validity of inputs
if(!(methods::is(d,"exactdesign")|methods::is(d,"exactprob")))
stop("exactcp must be called with class of either exactdesign or exactprob.")
p_0=d$p_0
i=round(i)
z_i=round(z_i)
if((min(p_1)<=p_0)|(max(p_1)>=1))
stop('Please input p_1 that lies between p_0 and 1 (not including p_0 and 1).')
if((i>=K)|(i<1))
stop('i is less than 1 or more than K-1.')
if((z_i>=1000)|(z_i<=0))
stop('invalid interim statistic: too large or too small.')
K=d$K-i ##the no. of analysis after the ith analysis.
n.I=(d$n.I)[i:(i+K)] ##sample sizes for i,i+1,...,K analysis
lowerbounds=(d$lowerbounds)[(i+1):(i+K)] ##lower bounds for i+1,i+2,...K analysis.
lowerbounds=lowerbounds-z_i ##new lower bounds for Z_{i,j}
n.I=n.I[-1]-n.I[1]
cp=matrix(0,1+length(p_1),1) ##to store conditional power under different p
##if z_i is euqal to or greater than u_K, cp is 1.
if(lowerbounds[K]<=0){
cp[,1]=1
return(list(K=d$K,n.I=d$n.I,u_K=d$u_K,lowerbounds=d$lowerbounds,i=i,z_i=z_i,cp=cp,p_1=p_1,p_0=p_0))
}
##if z_i is greater than one or some of the lower bounds.
index1=which(lowerbounds<(-1))
if(length(index1)>0){
lowerbounds[1:max(index1)]=-1 #make these lower bounds equalling -1 so that the trial will continue even with 0 newly occurred events.
}
#conditional power under H0
cp[1,]=exactprob1(n.I,lowerbounds,p_0,K,K)$phi ##cp under H0
##conditional power under H1.
for(s in 1:length(p_1)){
cp[(s+1),]=exactprob1(n.I,lowerbounds,p_1[s],K,K)$phi ##cp under H0
}
cp=cbind(c(p_0,p_1),cp)
colnames(cp)=c('p','cp')
return(list(K=d$K,n.I=d$n.I,u_K=d$u_K,lowerbounds=d$lowerbounds,i=i,z_i=z_i,cp=cp,p_1=p_1,p_0=p_0))
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/exactcp.r
|
#' Compute sample size and boundaries using exact binomial distribution
#'
#' Compute sample size and boundaries of single-arm group sequential design with binary endpoint using exact binomial distribution
#'
#'Suppose \eqn{X_{1}, X_{2}, \ldots} are binary outcomes following Bernoulli
#'distribution \eqn{b(1,p)}, in which 1 stands for the case that the subject
#'responds to the treatment and 0 otherwise. Consider a group sequential test
#'with \eqn{K} planned analyses, where the null and alternative hypotheses are
#'\eqn{H_0: p=p_0} and \eqn{H_1: p=p_1} respectively. Note that generally
#'\eqn{p_1} is greater than \eqn{p_0}. For \eqn{k<K}, the trial stops if and
#'only if the test statistic \eqn{Z_k} crosses the futility boundary, that is,
#'\eqn{Z_k<=l_k}. The lower bound for the last analysis \eqn{l_K} is set to be
#'equal to the last and only upper bound \eqn{u_K} to make a decision. At the
#'last analysis, the null hypothesis will be rejected if \eqn{Z_K>=u_K}.
#'
#'The computation of lower bounds except for the last one is implemented with
#'\eqn{u_K} fixed, thus the derived lower bounds are non-binding. Furthermore,
#'the overall type I error will not be inflated if the trial continues after
#'crossing any of the interim lower bounds, which is convenient for the purpose
#'of monitoring. Let the sequence of sample sizes required at each analysis be
#'\eqn{n_{1}, n_{2}, \ldots, n_{K}}. For binomial endpoint, the Fisher
#'information equals \eqn{n_k/p/(1-p)} which is proportional to \eqn{n_k}.
#'Accordingly, the information fraction available at each analysis is equivalent
#'to \eqn{n_k/n_K}.
#'
#'With exact test, the test statistic at analysis \eqn{k} is \eqn{Z_k=\sum_{s=1}^{n_k}X_s}
#'which follows binomial distribution \eqn{b(n_k,p)}. Actually, \eqn{Z_k} is the total
#'number of responses up to the kth analysis.
#'
#'Under the null hypothesis, \eqn{Z_k} follows a binomial distribution \eqn{b(n_k,p_0)}.
#'While under the alternative hypothesis, \eqn{Z_k} follows \eqn{b(n_k,p_1)}.
#'It may involve massive computation to simultaneously find proper \eqn{n_K} and \eqn{u_K}.
#'In fact, the sample sizes obtained from asymptotic test ought to be close to those from exact test.
#'Thus, we adopt \eqn{n_K} from asymptotic test as the starting value. The starting value of \eqn{u_K} is
#'computed given the \eqn{n_K}. Iteratively update \eqn{u_K} and \eqn{n_K} until errors are limited to
#'certain amount.
#'
#'Like \code{\link{asymdesign}}, the lower boundaries for the first \eqn{K-1} analyses are
#'sequentially determined by a search method. However, if the actual overall type II error exceeds the desired level,
#'not only sample sizes but also all the boundaries are updated, since the binomial distribution under \eqn{H_0}
#'involves with sample size.
#'
#'Due to the discreteness of binomial distribution, in exact test, the type I and
#'type II error actually spent at each analysis may not approximate the designated
#'amount. With the only one upper bound, the whole type I error is spent at the final analysis.
#' From some simulation studies, though not presented here, we found that carrying over
#'unused type II error has minor influence on the resulting boundaries and sample sizes.
#'However, in an attempt to reduce the false positive rate, we decided to recycle the unspent
#' amount of desired type II error. Thus, the elements of betaspend in an exactdesign object may be greater than
#' the amount pre-specified by the user.
#'
#' @param d An object of the class asymdesign.
#'
#' @return An object of the class exactdesign. This class contains:
#' \itemize{
#' \item{I: I used in computation, as in d.}
#' \item{beta: The desired overall type II error level, as in d.}
#' \item{betaspend: The desired type II error spent at each analysis used in computation, as in d.}
#' \item{alpha: The desired overall type I error level, as in d.}
#' \item{p_0: The response rate or the probability of success under null hypothesis, as in d.}
#' \item{p_1: The response rate or the probability of success under alternative hypothesis, as in d.}
#' \item{K: K used in computation, as in d.}
#' \item{n.I: A vector of length K which contains sample sizes required at each analysis to achieve desired type I and type
#' II error requirements. n.I equals sample size for the last analysis times the vector of information fractions.}
#' \item{u_K: The upper boundary for the last analysis.}
#' \item{lowerbounds: A vector of length K which contains lower boundaries for each analysis. Note that the lower
#' boundaries are non-binding.}
#' \item{problow: Probabilities of crossing the lower bounds under \eqn{H_1} or the actual type II error at each analysis.}
#' \item{probhi: Probability of crossing the last upper bound under \eqn{H_0} or the actual type I error.}
#' \item{power: power of the group sequential test with the value equals 1-sum(problow).}
#' }
#'
#'
#'@section Reference: \itemize{ \item{Christopher Jennison, Bruce W. Turnbull. Group Sequential Methods with
#' Applications to Clinical Trials. Chapman and Hall/CRC, Boca Raton, FL, 2000.} }
#'
#'@seealso \code{\link{exactprob}}, \code{\link{exactcp}},
#' \code{\link{asymdesign}}.
#'
#' @export
#'
#' @examples
#' I=c(0.2,0.4,0.6,0.8,0.99)
#' beta=0.2
#'betaspend=c(0.1,0.2,0.3,0.3,0.2)
#'alpha=0.05
#'p_0=0.3
#'p_1=0.5
#'K=4.6
#'tol=1e-6
#'tt1=asymdesign(I,beta,betaspend,alpha,p_0,p_1,K,tol)
#'tt2=exactdesign(tt1)
exactdesign<-function(d){
if(!methods::is(d,"asymdesign"))
stop('d is not an object of class asymdesign')
I=d$I
beta=d$beta
betaspend=d$betaspend ##the desired type II error spent at each analysis
alpha=d$alpha
p_0=d$p_0
p_1=d$p_1
K=d$K
n_K=d$n.I[K] ##adopt the maximum sample size output by asymdesign as starting value.
s=0
lowerbounds=rep(0,K) ##to store lower bounds
problow=rep(0,K) ##to store actual lower bound crossing probabilities under H1
while(s<=20){
##get starting value of u_K and check if the maximum sample size satisfies the two types of error requirements
u_K=stats::qbinom(1-alpha,n_K,p_0)+1 ##starting value of u_K under H0
temp1=stats::pbinom(u_K-1,n_K,p_1)
t=0
while((temp1>beta)&(t<=20)){ ##Step 0 of Algorithm 2
n_K=n_K+1
u_K=stats::qbinom(1-alpha,n_K,p_0)+1
temp1=stats::pbinom(u_K-1,n_K,p_1)
t=t+1
}
if(temp1>beta)
stop('cannot converge with the current setting of inputs. You may consider a larger beta or alpha.')
n.I=ceiling(n_K*I) ##the vector of sample sizes for all stages.
temp1=c(stats::qbinom(betaspend[1],n.I[1],p_1)-1,stats::qbinom(betaspend[1],n.I[1],p_1))
temp2=stats::pbinom(temp1,n.I[1],p_1)
lowerbounds[1]=max(temp1[temp2<=betaspend[1]]) ##the first lower bound.
if(lowerbounds[1]>=u_K)
stop('the first lower bound is larger than or equal to the last upper bound.')
problow[1]=max(temp2[temp2<=betaspend[1]]) ##the actual type II error for first analysis
betaspend[2]=betaspend[2]+betaspend[1]-problow[1] ##to carry over the unspent type II error
##for design with more than 2 stages.
if(K>2){
for(k in 2:(K-1)){
temp1=bound2(k,lowerbounds[1:(k-1)],u_K,n.I[1:k],p_1,betaspend[k],K) ##get the lower bound for kth analysis.
lowerbounds[k]=temp1$l_k
problow[k]=temp1$error
betaspend[(k+1)]=betaspend[(k+1)]+betaspend[k]-problow[k] ##carryover the unspent type II error.
}
}
##make the last lower bound=u_K and calculate actual power
lowerbounds[K]=u_K
problow[K]=(exactprob1(n.I,lowerbounds,p_1,K,K)$plo)[K]
##check if the design satisfies the power constraint. if not ,increase the sample size.
if(beta>=sum(problow))
break
##if the actual power less than desired, increase the maximum sample size by 1.
n_K=n_K+1 ##increase sample size.
s=s+1
}
if(beta<sum(problow)) ##the last while iteration is stopped due to s>20 but not convergence
stop('cannot converge with the current tol.')
probhi=exactprob1(n.I,lowerbounds,p_0,K,K)$phi ##the value is the probability of crossing the upper bound under H0.
x=list(I=I,beta=beta,betaspend=d$betaspend,alpha=alpha,p_0=p_0,p_1=p_1,K=K,n.I=n.I,u_K=u_K,lowerbounds=lowerbounds,
problow=problow,probhi=probhi,power=1-sum(problow))
class(x)="exactdesign"
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/exactdesign.r
|
#'Boundary crossing probabilities computation using exact test.
#'
#'Calculate boundary crossing probabilities of single-arm group sequential design with binary endpoint using binomial distribution
#'
#'This function is similar to \code{\link{asymprob}} except that the former uses binomial distribution and the latter
#'uses the normal asymptotic distribution. With \code{K=0}
#'(as default), \code{d} must be an object of class exactdesign. Meanwhile, other
#'arguments except for \code{p_1} will be inherited from \code{d} and the input values will be
#'ignored. With \code{K!=0}, the probabilities are derived from the input arguments. In
#'this circumstance, all the arguments except for \code{d} are required.
#'
#'The computation is based on the single-arm group sequential exact test
#'described in \code{\link{exactdesign}}. Therefore, for the output matrix of
#'upper bound crossing probabilities, the values for the first K-1 analyses are
#'zero since there is only one upper bound for the last analysis.
#'
#' @param K The maximum number of analyses, including the interim and the final. Should be an integer within (1,20]. K will be
#' rounded to the nearest whole number if it is not an integer. The default is 0.
#' @param p_0 The response rate or the probability of success under null hypothesis. Should be a scalar within (0,1).
#' @param p_1 A scalar or vector representing response rate or probability of success under the alternative hypothesis. The
#' value(s) should be within (p_0,1). It is a mandatory input.
#' @param n.I A vector of length K which contains sample sizes required at each analysis. Should be a positive and increasing
#' sequence.
#' @param u_K The upper boundary for the last analysis.
#' @param lowerbounds Non-decreasing lower boundaries for each analysis, in which each element is no less than -1 (no lower bound). With length K,
#' the last lower bound must be identical to u_K. With length K-1, the last element must be no greater than u_K and u_K will
#' be automatically added into the sequence. Note the lower bound must be less than the corresponding sample size.
#' @param d An object of the class exactdesign.
#'
#' @return An object of the class exactprob. This class contains:
#' \itemize{
#' \item{p_0: As input with \code{d=NULL} or as in \code{d}.}
#' \item{p_1: As input.}
#' \item{K: K used in computation.}
#' \item{n.I: As input with \code{d=NULL} or as in \code{d}.}
#' \item{u_K: As input with \code{d=NULL} or as in \code{d}.}
#' \item{lowerbounds: lowerbounds used in computation.}
#' \item{problow: Probabilities of crossing the lower bounds at each analysis.}
#' \item{probhi: Probability of crossing the upper bounds at each analysis.}
#' }
#'
#'@note The calculation of boundary crossing probabilities here borrowed strength from the
#'source code of function \code{gsBinomialExact} in package gsDesign and we really appreciate
#'their work.
#'
#'@section Reference: \itemize{ \item{Christopher Jennison, Bruce W. Turnbull. Group Sequential Methods with
#' Applications to Clinical Trials. Chapman and Hall/CRC, Boca Raton, FL, 2000.}
#' \item{Keaven M. Anderson, Dan (Jennifer) Sun, Zhongxin (John) Zhang. gsDesign: An R
#' Package for Designing Group Sequential Clinical Trials. R package version 3.0-1. }}
#'
#'@seealso \code{\link{exactdesign}}, \code{\link{exactcp}}, \code{\link{asymprob}}.
#'
#' @export
#'
#' @examples
#' I=c(0.2,0.4,0.6,0.8,0.99)
#' beta=0.2
#'betaspend=c(0.1,0.2,0.3,0.3,0.2)
#'alpha=0.05
#'p_0=0.3
#'p_1=0.5
#'K=4.6
#'tol=1e-6
#'tt1=asymdesign(I,beta,betaspend,alpha,p_0,p_1,K,tol)
#'tt2=exactdesign(tt1)
#'tt3=exactprob(p_1=c(0.4,0.5,0.6,0.7,0.8,0.9),d=tt2)
#'tt3=exactprob(K=5,p_0=0.4,p_1=c(0.5,0.6,0.7,0.8),n.I=c(15,20,25,30,35),u_K=15,
#'lowerbounds=c(3,5,10,12,15))
exactprob<-function(K=0,p_0,p_1,n.I,u_K,lowerbounds,d=NULL){
##check validity of inputs
if(K==0){##so the user input wrong parameter values,check if d belongs to class "exactdesign"
if(!methods::is(d,"exactdesign"))
stop('d is not an object of class exactdesign')
##if d is exactdesign class, then adopt the parameters except for p_1
p_0=d$p_0
if((min(p_1)<=p_0)|(max(p_1)>=1))
stop('Please input p_1 that lies between p_0 and 1 (not including p_0 and 1).')
K=d$K
n.I=d$n.I
u_K=d$u_K
lowerbounds=d$lowerbounds
}else{##in this case, we need to check the validity of inputs and adopt the inputs by the user.
if(min(lowerbounds)<(-1))
stop('Lowerbounds must be no less than -1.')
u_K=round(u_K)
temp1=check.prob(K,p_0,p_1,n.I,u_K,round(lowerbounds))
K=temp1$K
lowerbounds=temp1$lowerbounds
n.I=temp1$n.I
temp2=n.I-lowerbounds
if(min(temp2)<=0)
stop('lower bound must be less than the corresponding sample size.')
}
probhi=matrix(0,1+length(p_1),K)
problow=probhi
##compute boundary crossing probabilities under H0 and put them in the first row.
temp1=exactprob1(n.I,lowerbounds,p_0,K,K)
problow[1,]=temp1$plo
probhi[1,K]=temp1$phi
##compute boundary crossing probabilities under H1
for(i in 1:length(p_1)){
temp1=exactprob1(n.I,lowerbounds,p_1[i],K,K)
problow[(i+1),]=temp1$plo
probhi[(i+1),K]=temp1$phi
}
problow=cbind(c(p_0,p_1),problow,rowSums(problow))
probhi=cbind(c(p_0,p_1),probhi)
colnames(problow)=c('p',1:K,'Total')
colnames(probhi)=c('p',1:K)
x=list(p_0=p_0,p_1=p_1,K=K,n.I=n.I,u_K=u_K,lowerbounds=lowerbounds,problow=problow,probhi=probhi)
class(x)='exactprob'
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/exactprob.r
|
#compute the lower boundary crossing probabilities given the design, under H0 or H1. using binomial distribution.
##this function borrows the idea of gsBinomialExact of package gsDesign.
##exactprob1(n.I,c(lowerbounds,lowerbounds[(k-1)]+1),p_1,k,K)
##n.I is the sample size for the first k analysis. lowerbounds are for the first k analysis.
##this function can be checked by using the results of gsBinomialExact with b=rep(n.I[1:(k-1)]+1,l_k) and a=c(lowerbounds[1:(k-1)],l_k-1)
exactprob1<-function(n.I,lowerbounds,p,k,K){
m=c(n.I[1],diff(n.I)) ##m is the increment of sample size at each analysis,m=c(n_1,n_2-n_1)
plo=rep(0,k) ###store the probabilities of crossing the lower boundaries defined in (12.6)
phi=NULL ##when k=K, phi would be the upper boundary crossing probability
c.mat=matrix(0,ncol=k,nrow=n.I[k]+1) ### c.mat is the recursive function defined in (12.5)
c.mat[,1]=stats::dbinom(0:n.I[k],m[1],p) ##the probability of 0:n.I[k] responses occured at the first analysis
plo[1]=sum(c.mat[(0:n.I[k])<=lowerbounds[1],1])
for(i in 2:k){
no.stop=((lowerbounds[(i-1)]+1):n.I[(i-1)]) #the number of responses occured that falls into the countine interval before ith analysis
no.stop.mat=matrix(no.stop,byrow=T,nrow=n.I[k]+1,ncol=length(no.stop)) #j in (12.5) with each col stands for a different j.
succ.mat=matrix(0:n.I[k],byrow=F,ncol=length(no.stop),nrow=n.I[k]+1)#y in (12.5) with each row stands for a different y.
bin.mat=matrix(stats::dbinom(succ.mat-no.stop.mat,m[i],p),byrow=F,ncol=length(no.stop),nrow=n.I[k]+1)#B_{m_i}(y-j,p) in (12.5)
c.mat[,i]=bin.mat%*%c.mat[no.stop+1,(i-1)] ##c_i(y,p)=sum{c_{i-1}(j,p)*B_{m_i}(y-j,p)} for each y.
plo[i]=sum(c.mat[(0:n.I[k])<=lowerbounds[i],i]) ##r_k^l in (12.6)
if(i==K){
plo[i]=sum(c.mat[(0:n.I[k])<lowerbounds[i],i]) #for the last analysis, lower bound crossing prob is P(z<l_K)
phi=sum(c.mat[(0:n.I[k])>=lowerbounds[i],i]) #for the last analysis, upper bound crossing prob is P(z>=l_K)
}
}
return(list(plo=plo,phi=phi))
}
|
/scratch/gouwar.j/cran-all/cranData/BinGSD/R/exactprob1.r
|
#' Example input data containing a consolidated binary matrix with groups
#' @docType data
#'
#' @usage data(BinMatInput_ordination)
#'
#' @format A dataframe with columns for loci, and rows of replicate pairs. Grouping information is in the second column.
#' @examples data(BinMatInput_ordination)
#' mat = BinMatInput_ordination
#' group.names(mat)
#' scree(mat)
#' shepard(mat)
#' clrs = c("red", "green", "black")
#' nmds(mat, colours = clrs, labs = TRUE)
"BinMatInput_ordination"
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/BinMatInput_ordination.R
|
#' Example input data containing a binary matrix comprising replicate pairs
#'
#' @docType data
#'
#' @usage data(BinMatInput_reps)
#'
#' @format A dataframe with columns for loci, and rows of replicate pairs.
#' @examples data(BinMatInput_reps)
#' mat = BinMatInput_reps
#' check.data(mat)
#' cons = consolidate(mat)
#' pks = peaks.consolidated(cons)
#' err = errors(cons)
#' rem = peak.remove(cons, 4)
#' clust = upgma(cons)
"BinMatInput_reps"
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/BinMatInput_reps.R
|
#' Example input file of Bunias orientalis AFLP data, taken from Tewes et. al. (2017). This dataset has already been consolidated, and can be used
#' as input for the generation of an nMDS plot. The paper can be found here: <https://besjournals.onlinelibrary.wiley.com/doi/full/10.1111/1365-2745.12869>
#' @docType data
#'
#' @usage data(bunias_orientalis)
#'
#' @format A dataframe with columns for loci, and rows of replicate pairs. Grouping information is in the second column.
#' @example
#' bunias = bunias_orientalis
#' group.names(bunias)
#' nmds(bunias, labs = FALSE, include_ellipse = TRUE, legend_pos = "right")
#'
#'
"bunias_orientalis"
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/bunias_orientalis.R
|
#' @title Checks binary matrix for unwanted characters.
#'
#' @description Checks for unwanted values (other than 1, 0, and ?).
#'
#' @param x A CSV file containing replicate pairs of binary data.
#'
#' @return Index positions where unwanted values occur (row, column).
#'
#' @examples data(BinMatInput_reps)
#' mat = BinMatInput_reps
#' check.data(mat)
#'
#' @export
check.data = function(x){
row.names(x) <- x[[1]]
x[,1] <- NULL
x[,] <- sapply(x[,], as.numeric)
answer = which(x != 0 & x != 1 & x != "?", arr.ind = T)
if(length(answer) > 0) print(answer)
else {writeLines("None found.")}
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/check_data.R
|
#' @title Consolidates replicate pairs in a binary matrix.
#'
#' @description Reads in a binary matrix comprising replicate pairs and consolidates each pair into a consensus read. For each replicate pair at each locus, 1 & 1 -> 1 (shared presence), 0 & 0 -> 0 (shared absence), 0 & 1 -> ? (ambiguity).
#'
#' @param x A CSV file containing replicate pairs of binary data. See the example input file "BinMatInput_reps".
#'
#' @return Consolidated binary matrix.
#' @examples data(BinMatInput_reps)
#' mat = BinMatInput_reps
#' cons = consolidate(mat)
#'
#' @export
consolidate = function(x){
if(length(unique(x[,1]))!=length(x[,1])) {
n = as.character(x[,1])
dups = which(duplicated(n))
stop(c("These sample names are duplicated in your dataset: ", c(n[dups]), ". Please ensure that all sample names are unique."))
}
else if ((nrow(x) %% 2) != 0) {
stop(c("There is an odd number of samples in this dataset.\nIf you have a set of replicate pairs, there should be an even number. \nPlease correct this and re-upload your file."))
}
else {
row.names(x) <- x[[1]]
x[,1] <- NULL
x[,] <- sapply(x[,], as.numeric)
odd = x[seq(1, nrow(x), by = 2),]
even = x[seq(2, nrow(x), by = 2),]
new = odd+even
new[,] <- lapply(new[,], gsub, pattern = "1", replacement = "?", fixed = TRUE)
new[,] <- lapply(new[,], gsub, pattern = "2", replacement = "1", fixed = TRUE)
nams = row.names(x)
samplenames = vector()
for(i in 1:(length(nams)/2))
{ #divide by two, because the new matrix is half the size (due to the rep pairs being combined)
samplenames[i] = paste(nams[i*2-1], nams[i*2], sep = '+')
}
row.names(new) = samplenames
colnames(new) = colnames(x)
return(data.frame(new))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/consolidate.R
|
#' @title Calculates Jaccard and Euclidean error rates.
#'
#' @description Calculates the Jaccard and Euclidean error rates for the dataset. Jaccard's error does not take shared absences of bands as being biologically meaningful. JE = (f10 + f01)/(f10 + f01 + f11) and EE = (f10 + f01)/(f10 + f01 + f11 + f00). At each locus, f01 and f10 indicates a case where a 0 was present in one replicate, and a 1 in the other. f11 indicates the shared presence of a band in both replicates, and f00 indicates a shared absence. For example, if a replicate pair comprises Rep1 = 00101 and Rep2 = 01100, JE = (1+1)/(1+1+1) = 2/3 = 0.67, EE = (1+1)/(1+1+1+2) = 2/5 = 0.4.
#'
#' @param x Consolidated binary matrix.
#'
#' @return JE (Jaccard Error), EE (Euclidean Error), and standard deviations.
#'
#' @examples data(BinMatInput_reps)
#' mat = BinMatInput_reps
#' cons = consolidate(mat)
#' errors(cons)
#'
#' @export
errors = function(x){
mismatch_err = matrix(nrow=nrow(x), ncol = 1)
jacc_err = matrix(nrow=nrow(x), ncol = 1)
for(i in 1:nrow(x)) {
# find the number of 1s, Os and question marks
ones = length(which(x[i,] == 1))
zeroes = length(which(x[i,] == 0))
questions = length(which(x[i,] == "?"))
sum_bands = ones + questions
mismatch_err[i,] = (questions/(questions + ones + zeroes))
jacc_err[i,] = (questions/(questions + ones))
}
error_table = data.frame("Errors" = matrix(ncol = 2, nrow = 4))
error_table[1,1] = "Average Euclidean Error:"
error_table[1,2] = round(base::mean(mismatch_err[,1]),4)
error_table[2,1] = "Euclidean error St. dev:"
error_table[2,2] = round(stats::sd(mismatch_err[,1]),4)
error_table[3,1] = "Average Jaccard:"
error_table[3,2] = round(base::mean(jacc_err[,1]),4)
error_table[4,1] = "Jaccard error St.dev:"
error_table[4,2] = round(stats::sd(jacc_err[,1]),4)
colnames(error_table) = c("Metric", "Value")
return(error_table)
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/errors.R
|
#' @title Outputs group names specified in the input file for the creation of an nMDS plot.
#'
#' @description Returns group names in the uploaded consolidated binary data. This will help in knowing which colours are assigned to which group name.
#'
#' @param x Consolidated binary matrix with grouping information in column 2.
#'
#' @return Scree plot.
#'
#' @examples mat = BinMatInput_ordination
#' group.names(mat)
#'
#' @export
group.names = function(x){
grps = as.factor(x[,2])
print(levels(grps))
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/group_names.R
|
#' @title Creates a non-metric multidimensional scaling plot (nMDS).
#'
#' @description Creates an nMDS plot from a consolidated binary matrix with grouping information. Colours and shapes of plotted points need to be specified. For example, if there are two groups, then: clrs = c("red", "blue"), sh = c(16, 16). This assigns red to the first group name, and blue to the second. Both will have a pch shape of 16 (round dot). These two vectors are then passed to the function nmds() as: colours = clrs, shapes = sh.
#'
#' @param x Consolidated binary matrix with grouping information in the second column.
#' @param dist_meth Distance method. Set to "binary" by default. Other options are "euclidean", "maximum", "manhattan", "canberra", or "minkowski".
#' @param k_val Number of dimensions for the nMDS plot. Set to 2 by default.
#' @param pt_size Point size for symbols on the plot. Set to 1 by default.
#' @param colours Vector containing colours to be assigned to groups. This can be changed to the options available in the RColorBrewer palette set (e.g. "Set1"). See <http://applied-r.com/rcolorbrewer-palettes/> for more palette options.
#' Alternatively, the colours can be set manually using, for example, c("red", "green", "blue"), thereby setting a colour for each group
#' in your dataset. There are 28 default colours that will be set automatically to your groups.
#' @param labs Indicate whether labels should appear on the graph or not (TRUE or FALSE). Default = FALSE.
#' @param legend_pos Indicate the position of the legend. Default = "right", but other options are "left", "bottom", "top", or "none"
#' @param include_ellipse Indicate whether ellipses should be included around groups. Default = FALSE.
#' @param ellipse_type Select the type of ellipses to include around groups. Options are "convex", "confidence", "t", "norm", and "euclid". See the ggpubr::ggscatter() function documentation for more details.
#' @param dimension1 Indicate the first dimension to plot (1, 2, or 3) for the x axis. If k = 2, the first two dimensions will automatically be plotted. If k = 3, select between the three.
#' @param dimension2 Indicate the second dimension to plot (1, 2, or 3) for the y axis
#' @return nMDS plot.
#'
#' @examples mat = BinMatInput_ordination
#' group.names(mat)
#' clrs = c("red", "green", "black")
#' nmds(mat, colours = clrs, labs = TRUE, include_ellipse = TRUE)
#'
#' @export
nmds = function(x, dist_meth = "binary", k_val = 2, pt_size = 1,
colours = c("dodgerblue", "black", "red", "green3", "orange", "darkblue", "gold2",
"darkgreen", "darkred", "grey", "darkgrey", "magenta", "darkorchid",
"purple", "brown", "coral3", "turquoise", "deeppink", "lawngreen",
"deepskyblue", "tomato", "yellow", "yellowgreen",
"royalblue", "olivedrab", "midnightblue", "indianred1", "darkturquoise"),
#shapes = 16,
labs = FALSE,
legend_pos = "right",
include_ellipse = FALSE,
ellipse_type = "norm", dimension1 = 1,
dimension2 = 2){
if(k_val <= 0)
stop("Enter a positive k-value.")
row.names(x) <- x[[1]] # make the sample names rownames,
x[,1] <- NULL # and then remove the sample name column
# make the names shorter (here, only 10 characters long)
newnames =substring(row.names(x), 0, 50)
row.names(x) = newnames
x[x =="?"] <- NA
#x = as.data.frame(x)
d = stats::dist((x[,2:ncol(x)]), method = dist_meth, diag = TRUE, upper = T)
d = as.data.frame(as.matrix(d))
d2 = stats::as.dist(d)
d2 = d2 + 0.01 # adding 0.01 here to cover for cases where there are identical sequences, leading to zero distances. Zero distances give the error "Warning: Error in isoMDS: zero or negative distance between objects x and y"
isoplot = MASS::isoMDS(d2, k = k_val)
fac = as.factor(x[,1]) # groups
isoplot_df = suppressWarnings( as.data.frame(tibble::as_tibble( isoplot$points )) )
if(k_val == 2) colnames(isoplot_df) = c("Dimension 1", "Dimension 2")
if(k_val ==3 ) colnames(isoplot_df) = c("Dimension 1", "Dimension 2", "Dimension 3")
if(labs == TRUE) mds_labs = row.names(x)
if(labs == FALSE) mds_labs = ""
x_dimension = paste("Dimension", dimension1)
y_dimension = paste("Dimension", dimension2)
isoplot_df$groups = fac
nmds = #suppressWarnings(
ggpubr::ggscatter(isoplot_df,
x = x_dimension,
y = y_dimension,
label = mds_labs,
color = "groups",
palette = colours,
#shape = shapes_nmds[fac],
ellipse = include_ellipse,
ellipse.type = ellipse_type,
size = pt_size,
legend = legend_pos,
legend.title = "Groups",
show.legend.text = FALSE
) #) # end of suppress warnings
return(nmds)
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/nmds.R
|
#' Example input file of Nymphaea ISSR data, taken from Reid et. al. (2021).
#' This dataset has already been consolidated, and can be used
#' as input for the generation of an nMDS plot. The paper can be found here: <https://www.sciencedirect.com/science/article/pii/S0304377021000218>
#' @docType data
#'
#' @usage data(nymphaea)
#'
#' @format A dataframe with columns for loci, and rows of replicate pairs. Grouping information is in the second column
#' @example nymph = nymphaea
#' group.names(nymph)
#' colrs = c("dodgerblue", "black", "red", "green3", "orange", "darkblue", "gold2", "darkgreen", "darkred", "grey", "darkgrey", "magenta", "darkorchid", "purple", "brown", "coral3", "turquoise", "deeppink", "lawngreen", "deepskyblue", "tomato")
#' nmds(nymph, labs = FALSE, include_ellipse = FALSE, colours = colrs, legend_pos = "right", pt_size = 2)
"nymphaea"
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/nymphaea.R
|
#' @title Removes samples with peaks equal to or less than a specified threshold value.
#'
#' @description Removes samples with a peak number less than a specified value.
#' @param x Binary matrix - consolidated or original.
#' @param thresh Peak threshold value for removal.
#'
#' @return Filtered dataset, and either the row name/s or row number/s of samples that were removed.
#' @examples mat = BinMatInput_ordination
#' new = peak.remove(mat, 4)
#'
#' @export
peak.remove = function(x, thresh) {
#row.names(x) <- x[[1]] # make the sample names rownames,
#x[,1] <- NULL # and then remove the sample name column
peak_record = matrix(data = NA, nrow(x), ncol = 1)
for(i in 1:nrow(x)){
count = length(which(x[i,2:ncol(x)]==1))
peak_record[i,]=count
}
if(thresh <= 0)
stop("Enter a threshold value greater than zero.")
if(thresh > max(peak_record))
stop(c("Do not remove samples with peaks less than ", max(peak_record)+1, ", as that the maximum peak number detected in your data set is ", max(peak_record), "."))
else {
x_tally = cbind(x, peak_record)
x_keep = subset(x_tally, peak_record >= thresh)
x_removed = subset(x_tally, peak_record < thresh)
x_keep$peak_record = NULL
x_removed$peak_record = NULL
names(x_keep)[1]="Sample"
#x_keep[is.na(x_keep)]<-"?"
# names(x_removed)[1]="Group"
writeLines(c("Number of samples removed: ", nrow(x_removed)))
if(nrow(x_removed) > 0)
writeLines(c("This was/these were: ", rownames(x_removed)))
return(x_keep)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/peakRemove.R
|
#' @title Calculates peak numbers for a consolidated data set (total, maximum, and minimum).
#'
#' @description Returns total, maximum, and minimum number of peaks in the binary matrix.
#'
#' @param x Binary matrix comprising replicate pairs.
#'
#' @return Peak information.
#'
#' @examples data(BinMatInput_reps)
#' mat = BinMatInput_reps
#' cons = consolidate(mat)
#' peaks.consolidated(cons)
#' @export
peaks.consolidated = function(x){
nr_peaks = matrix(nrow = nrow(x), ncol = 1)
for(i in 1:nrow(x)) {
total = 0
for(j in 1:ncol(x)) {if(x[i,j] == 1) total = total + 1}
nr_peaks[i,] = total
}
summary_table = data.frame("Summary" = matrix(ncol = 1, nrow = 5))
summary_table[1,1] = "Average no. peaks: "
summary_table[1,2] = round(base::mean(nr_peaks),4)
summary_table[2,1] = "sd: "
summary_table[2,2] = round(stats::sd(nr_peaks),4)
summary_table[3,1] = "Max. no. peaks: "
summary_table[3,2] = max(nr_peaks)
summary_table[4,1] = "Min. no. peaks: "
summary_table[4,2] = min(nr_peaks)
summary_table[5,1] = "No. loci: "
summary_table[5,2] = ncol(x)
colnames(summary_table) = c("Metric", "Value")
return(summary_table)
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/peaks_consolidated.R
|
#' @title Calculates peak numbers for the data set with all replicates (total, maximum, and minimum).
#'
#' @description Returns total, maximum, and minimum number of peaks in the binary matrix.
#'
#' @param x Binary matrix comprising replicate pairs.
#'
#' @return Peak information.
#'
#' @examples data(BinMatInput_reps)
#' mat = BinMatInput_reps
#' peaks.original(mat)
#'
#' @export
peaks.original = function(x){
row.names(x) <- x[[1]]
x[,1] <- NULL
nr_peaks = matrix(nrow = nrow(x), ncol = 1)
for(i in 1:nrow(x)) {
total = 0
for(j in 1:ncol(x)) {if(x[i,j] == 1) total = total + 1}
nr_peaks[i,] = total
}
summary_table = data.frame("Summary" = matrix(ncol = 2, nrow = 5))
summary_table[1,1] = "Average no. peaks: "
summary_table[1,2] = round(base::mean(nr_peaks),4)
summary_table[2,1] = "sd: "
summary_table[2,2] = round(stats::sd(nr_peaks),4)
summary_table[3,1] = "Max. no. peaks: "
summary_table[3,2] = max(nr_peaks)
summary_table[4,1] = "Min. no. peaks: "
summary_table[4,2] = min(nr_peaks)
summary_table[5,1] = "No. loci: "
summary_table[5,2] = ncol(x)
colnames(summary_table) = c("Metric", "Value")
return(summary_table)
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/peaks_replicates.R
|
#' @title Draws a scree plot.
#'
#' @description Creates a scree plot for the nMDS. This indicates the optimum number of dimensions to use to minimise the stress value. The stress value is indicated by a red dotted line at 0.15. Values equal to or below this are considered acceptable.
#'
#' @param x Consolidated binary matrix with grouping information in column 2.
#' @param dimensions Number of dimensions to plot. Set to 4 by default.
#' @param dist_meth Distance method. Set to "binary" by default. Other options are "euclidean", "maximum", "manhattan", "canberra", or "minkowski".
#' @return Scree plot.
#' @examples mat = BinMatInput_ordination
#' scree(mat)
#'
#'
#' @export
scree = function(x, dimensions = 4, dist_meth = "binary") {
if(dimensions <=0)
stop("Enter a positive number for dimensions.")
row.names(x) <- x[[1]] # make the sample names rownames,
x[,1] <- NULL # and then remove the sample name column
# make the names shorter (here, only 10 characters long)
newnames =substring(row.names(x), 0, 50)
row.names(x) = newnames
x[x =="?"] <- NA
x = as.data.frame(x)
# x[,2:ncol(x)] starts calculating the distance from the second column to avoid including the grouping column information
d = stats::dist((x[,2:ncol(x)]), method = dist_meth, diag = TRUE, upper = T)
d = as.data.frame(as.matrix(d))
d2 = stats::as.dist(d)
d2 = d2 + 0.01 # adding 0.01 here to cover for cases where there are identical sequences, leading to zero distances. Zero distances give the error "Warning: Error in isoMDS: zero or negative distance between objects x and y"
scree.plot = function(d, k) {
stresses=MASS::isoMDS(d, k=k)$stress
for(i in rev(seq(k-1)))
stresses=append(stresses,MASS::isoMDS(d, k=i)$stress)
graphics::plot(seq(k),rev(stresses), type="b", xaxp=c(1,k, k-1), ylab="Stress (%)", xlab="Number of dimensions")
}
scree_plot = scree.plot(d2, k=dimensions)
graphics::abline(b=0,h=15, col = "red", lty = 2)
return(scree_plot)
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/scree.R
|
#' @title Creates a shepard plot.
#'
#' @description Creates a Shepard plot for the nMDS. This indicates the 'goodness of fit' of the original distance matrix vs the ordination representation. A high R-squared value is favourable.
#'
#' @param x Consolidated binary matrix.
#' @param k_val Number of dimensions. Set to 2 by default.
#' @param dist_meth Distance method. Set to "binary" by default. Other options are "euclidean", "maximum", "manhattan", "canberra", or "minkowski".
#' @return Shepard plot.
#'
#' @examples mat = BinMatInput_ordination
#' shepard(mat)
#' @export
shepard = function(x, k_val = 2, dist_meth = "binary"){
if(k_val <= 0)
stop("Enter a positive k-value.")
row.names(x) <- x[[1]] # make the sample names rownames,
x[,1] <- NULL # and then remove the sample name column
# make the names shorter (here, only 10 characters long)
newnames =substring(row.names(x), 0, 50)
row.names(x) = newnames
x[x =="?"] <- NA
x = as.data.frame(x)
# x[,2:ncol(x)] starts calculating the distance from the second column to avoid including the grouping column information
d = stats::dist((x[,2:ncol(x)]), method = dist_meth, diag = TRUE, upper = T)
d = as.data.frame(as.matrix(d))
d2 = stats::as.dist(d)
d2 = d2 + 0.01 # adding 0.01 here to cover for cases where there are identical sequences, leading to zero distances. Zero distances give the error "Warning: Error in isoMDS: zero or negative distance between objects x and y"
isoplot = MASS::isoMDS(d2,k=k_val)
shep = MASS::Shepard(d2,isoplot$points, p=2)
summ = summary(stats::lm(shep$y~shep$x)) # R squared value
r_sq = round(summ$r.squared, digits = 2)
shep_plot = graphics::plot(shep, pch=16, xlab = "Original data distances", ylab = "Ordination distances", main = c("R-squared = ", r_sq))
graphics::abline(stats::lm(shep$y~shep$x), col = "blue", lty = 2,lwd = 2)
return(shep_plot)
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/shepard.R
|
#' @title Draws a hierarchical clustering tree (UPGMA).
#'
#' @description Creates a UPGMA hierarchical clustering tree, with a specified number of bootstrap repetitions.
#'
#' @param x Consolidated binarx matrix.
#' @param bts Bootstrap replications. Set to 10 by default.
#' @param method Distance method. Set to 'binary' (=Jaccard distance) by default.
#' @param hclust Clustering method. Set to 'average' (=UPGMA) by default
#' @param size Size of plot. Set to 0.55 by default.
#' @param lab_size Size of label text. Set to 0.55 by default.
#' @param fromFile Indicates whether the binary data used by the function has been consolidated by BinMat, or whether it comes from the user's own file. Set to FALSE by default (in the assumption that the data has been consolidated by BinMat, and that that object is being passed to the function).
#'
#'
#' @return UPGMA tree
#' @examples data(BinMatInput_reps)
#' mat = BinMatInput_reps
#' cons = consolidate(mat)
#' clust = upgma(cons)
#'
#' @export
upgma = function(x, bts = 10, size = 0.55, lab_size = 0.55, method = "binary", hclust="average", fromFile = FALSE){
if(bts <= 0)
stop("Enter a bootstrap repetition value > 0.")
if (fromFile == TRUE){
row.names(x) <- x[[1]]
x[,1] <- NULL
}
new_names_upgma = substring(row.names(x),0,50)
row.names(x) = new_names_upgma
x[x=="?"] = NA
x = as.data.frame(x)
result = pvclust::pvclust(t(x), method.dist = method, method.hclust = hclust, nboot = bts) # 'average' method is the UPGMA
dendro = graphics::plot(result, cex = size, print.num = F, print.pv = T, cex.pv = lab_size)
return(dendro)
}
|
/scratch/gouwar.j/cran-all/cranData/BinMat/R/upgma.R
|
## ----setup, fig.height = 5, fig.width = 5-------------------------------------
library(BinMat)
data1 = BinMatInput_reps
data2 = BinMatInput_ordination
# data1 contains all the replicate pairs that need to be consolidated into a consensus output
# data2 contains a consolidated binary matrix with grouping information in the second column
# Check the data for unwanted values
check.data(data1)
# Get information about peak numbers for all replicates
peaks.original(data1)
# Consolidate the replicate pairs in the matrix
cons = consolidate(data1)
# View the original matrix
data1
# View the consolidated output
cons
# Get the Jaccard and Euclidean error rates
errors(cons)
# Get information about the peak numbers in the consolidated matrix
peaks.consolidated(cons)
# Create a hierarchical clustering tree using the UPGMA method
clustTree = upgma(cons, size = 0.6)
# Find samples with peaks less than a specified threshold value, and return the new, filtered data set
filtered_data1 = peak.remove(cons, thresh = 6)
filtered_data1
# data2 contains an already-consolidated matrix with grouping information. This is used to create a scree, shepard, and an nMDS plot.
# # Find samples with peaks less than a specified threshold value, and return the new, filtered data set
filtered_data2 = peak.remove(data2, thresh = 7)
filtered_data2
# Get the names of the groups specified in the second column
group.names(data2)
# Create an object containing colours for each group
# Colours: Africa = red, Australia = blue, Europe = dark green
clrs = c("red", "blue", "darkgreen")
# Create a scree plot to check how the number of dimensions for an nMDS plot will affect the resulting stress values
scree(data2)
# Create a shepard plot showing the goodness of fit for the original data vs the ordination data
shepard(data2)
# Create an nMDS plot for the data. Default dimension is 2
nmds(data2, colours = clrs, labs = TRUE)
## ----fig.height = 5, fig.width = 5--------------------------------------------
bunias = bunias_orientalis
group.names(bunias)
nmds(bunias, labs = FALSE, include_ellipse = TRUE, legend_pos = "right")
## ----fig.height = 7, fig.width = 7--------------------------------------------
nymph = nymphaea
group.names(nymph)
colrs = c("dodgerblue", "black", "red", "green3", "orange", "darkblue", "gold2", "darkgreen", "darkred", "grey", "darkgrey", "magenta", "darkorchid", "purple", "brown", "coral3", "turquoise", "deeppink", "lawngreen", "deepskyblue", "tomato")
nmds(nymph, labs = FALSE, include_ellipse = FALSE, colours = colrs, legend_pos = "right", pt_size = 2)
|
/scratch/gouwar.j/cran-all/cranData/BinMat/inst/doc/BinMat.R
|
---
title: "BinMat: Processes Binary Data Obtained from Fragment Analysis"
author: "Clarke van Steenderen"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BinMat}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---

<br/><br/>
*Department of Zoology and Entomology*
*The Centre for Biological Control* (https://www.ru.ac.za/centreforbiologicalcontrol/)
*Rhodes University, Grahamstown, Eastern Cape, South Africa*
*e-mail:* [email protected]
<br/><br/>
The idea behind this package was taken from my M.Sc. project <br/>
"*A genetic analysis of the species and intraspecific lineages of* Dactylopius *Costa (Hemiptera:Dactylopiidae)*" <br/>
See the publication in the [Biological Control journal](https://www.sciencedirect.com/science/article/pii/S1049964420306538), and the thesis on [Research Gate](https://www.researchgate.net/publication/339124038_A_genetic_analysis_of_the_species_and_intraspecific_lineages_of_Dactylopius_Costa_Hemiptera_Dactylopiidae). The GUI version of this package is available on the [R Shiny online server](https://clarkevansteenderen.shinyapps.io/BINMAT/), or it is accessible via GitHub by typing:
```{}
shiny::runGitHub("BinMat", "clarkevansteenderen")
```
into the console in R.
<br/>
---
## **OVERVIEW**
Processing and visualising trends in the binary data obtained from fragment analysis methods in molecular biology can be a time-consuming and cumbersome process, and typically entails complex workflows.
The BinMat package automates the analysis pipeline on one platform, and was written to process binary matrices derived from dominant marker genetic analyses.
The program consolidates replicate sample pairs in a dataset into consensus reads, produces summary statistics (peaks and error rates), and allows the user to visualise their data as non-metric multidimensional scaling (nMDS) plots and hierarchical clustering trees.
---
## **UPLOADING DATA**
**Data type 1**
Binary data containing replicate pairs that need be consolidated should be in the following example format, uploaded as CSV file (use the read.csv() function):
| | Locus 1 | Locus 2 | Locus 3 | Locus 4 | Locus 5 |
|---------------------- |:------------: | :------------: |:------------: |:------------: |:------------: |
| Sample A replicate 1 | 0 | 0 | 1 | 1 | 1 |
| Sample A replicate 2 | 0 | 0 | 1 | 1 | 1 |
| Sample B replicate 1 | 1 | 1 | 0 | 0 | 0 |
| Sample B replicate 2 | 0 | 1 | 0 | 0 | 1 |
Note that replicate pairs must be directly underneath each other, and that each sample needs to have a unique name.
The following conditions are applied to binary data replicates:
1. A 0 and a 1 produces a "?"
2. A 0 and a 0 produces a "0"
3. A 1 and a 1 produces a "1"
The consolidated output for the above example would thus be:
| | Locus 1 | Locus 2 | Locus 3 | Locus 4 | Locus 5 |
|---------------------- |:------------: | :------------: |:------------: |:------------: |:------------: |
| Sample A replicate 1 & 2 | 0 | 0 | 1 | 1 | 1 |
| Sample B replicate 1 & 2 | ? | 1 | 0 | 0 | ? |
---
**Data type 2**
Binary data that has already been consolidated must have grouping information in the second column, as shown in the example below. This should also be in CSV format. This matrix can be used to create an nMDS plot coloured by group.
| | Group | Locus 1 | Locus 2 | Locus 3 | Locus 4 | Locus 5 |
|-----------|:----------: |:------------: | :------------: |:------------: |:------------: |:------------: |
| Sample A | Africa | ? | 0 | 1 | 1 | 1 |
| Sample B | Asia | 0 | 0 | 1 | 1 | ? |
| Sample C | Europe | 1 | ? | 0 | 0 | 0 |
| Sample D | USA | ? | 1 | 0 | ? | 1 |
---
## **FUNCTIONS**
| Function | Details |
|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| check.data() | Checks for unwanted values (other than 1, 0, and ?). |
| consolidate() | Reads in a binary matrix comprising replicate pairs and consolidates each pair into a consensus read. For each replicate pair at each locus, 1 & 1 -> 1 (shared presence), 0 & 0 -> 0 (shared absence), 0 & 1 -> ? (ambiguity). |
| errors() | Calculates the Jaccard and Euclidean error rates for the dataset. Jaccard's error does not take shared absences of bands as being biologically meaningful. JE = (f10 + f01)/(f10 + f01 + f11) and EE = (f10 + f01)/(f10 + f01 + f11 + f00). At each locus, f01 and f10 indicates a case where a 0 was present in one replicate, and a 1 in the other. f11 indicates the shared presence of a band in both replicates, and f00 indicates a shared absence. |
| group.names() | Returns group names in the uploaded consolidated binary data. This will help in knowing which colours are assigned to which group name. |
| nmds() | Creates an nMDS plot from a consolidated binary matrix with grouping information. Colours of plotted points need to be specified. |
| peak.remove() | Removes samples with a peak number less than a specified value. |
| peaks.consolidated() | Returns total, maximum, and minimum number of peaks in a consolidated binary matrix. |
| peaks.original() | Returns total, maximum, and minimum number of peaks in the binary matrix comprising replicates. |
| scree() | Creates a scree plot for the nMDS. This indicates the optimum number of dimensions to use to minimise the stress value. The stress value is indicated by a red dotted line at 0.15. Values equal to or below this are considered acceptable. |
| shepard() | Creates a Shepard plot for the nMDS. This indicates the 'goodness of fit' of the original distance matrix vs the ordination representation. A high R-squared value is favourable. |
| upgma() | Creates a UPGMA hierarchical clustering tree, with a specified number of bootstrap repetitions. |
## **METHODS**
**Euclidean error = (f01 + f10) / (f01 + f10 + f00 + f11)**
The Euclidean error rate includes the shared absence of a band (f00). <br/><br/>
**Jaccard error = (f01 + f10) / (f01 + f10 + f11)**
The Jaccard error rate does not take shared absences of bands into account. The error rate will thus be inflated compared to the Euclidean. <br/><br/>
In the formulae above, 'f' refers to the frequency of each combination (i.e. 'f01 + f10' means the sum of all the occurences of a zero and a one, and a one and a zero).
An error value is calculated for each replicate pair, and an average obtained representing the whole dataset.
The error rates for the example samples below would be:
<br/>
**Sample X rep 1: 0 1 1 0**
**Sample X rep 2: 1 0 1 0**
<br/>
Euclidean error = (1+1) / (1+1+1+1) = 2/4 = 0.5
Jaccard error = (1+1) / (1+1+1) = 2/3 = 0.67
<br/>
**Sample Y rep 1: 1 1 1 0**
**Sample Y rep 2: 1 1 0 0**
<br/>
Euclidean error = (1) / (1+1+2) = 1/4 = 0.25
Jaccard error = (1) / (1+2) = 1/3 = 0.33
---
Average Euclidean error = (0.5+0.25) / 2 = 0.38
Average Jaccard error = (0.67+0.33) / 2 = 0.5
---
## **WORKED EXAMPLE**
There are four sample data sets embedded in the package: <br/>
1. BinMatInput_reps <br/>
2. BinMatInput_ordination <br/>
3. bunias_orientalis <br/>
4. nymphaea <br/>
The first two are small made-up datasets, and illustrate how the BinMat functions are implemented:
```{r setup, fig.height = 5, fig.width = 5}
library(BinMat)
data1 = BinMatInput_reps
data2 = BinMatInput_ordination
# data1 contains all the replicate pairs that need to be consolidated into a consensus output
# data2 contains a consolidated binary matrix with grouping information in the second column
# Check the data for unwanted values
check.data(data1)
# Get information about peak numbers for all replicates
peaks.original(data1)
# Consolidate the replicate pairs in the matrix
cons = consolidate(data1)
# View the original matrix
data1
# View the consolidated output
cons
# Get the Jaccard and Euclidean error rates
errors(cons)
# Get information about the peak numbers in the consolidated matrix
peaks.consolidated(cons)
# Create a hierarchical clustering tree using the UPGMA method
clustTree = upgma(cons, size = 0.6)
# Find samples with peaks less than a specified threshold value, and return the new, filtered data set
filtered_data1 = peak.remove(cons, thresh = 6)
filtered_data1
# data2 contains an already-consolidated matrix with grouping information. This is used to create a scree, shepard, and an nMDS plot.
# # Find samples with peaks less than a specified threshold value, and return the new, filtered data set
filtered_data2 = peak.remove(data2, thresh = 7)
filtered_data2
# Get the names of the groups specified in the second column
group.names(data2)
# Create an object containing colours for each group
# Colours: Africa = red, Australia = blue, Europe = dark green
clrs = c("red", "blue", "darkgreen")
# Create a scree plot to check how the number of dimensions for an nMDS plot will affect the resulting stress values
scree(data2)
# Create a shepard plot showing the goodness of fit for the original data vs the ordination data
shepard(data2)
# Create an nMDS plot for the data. Default dimension is 2
nmds(data2, colours = clrs, labs = TRUE)
```
## REAL-WORLD WORKED EXAMPLE
The **bunias_orientalis** and **nymphaea** datasets are real-world AFLP and ISSR files, respectively, and have already been consolidated by BinMat. These produce nMDS plots, as shown below:
### *Bunias orientalis*

```{r fig.height = 5, fig.width = 5}
bunias = bunias_orientalis
group.names(bunias)
nmds(bunias, labs = FALSE, include_ellipse = TRUE, legend_pos = "right")
```
### *Nymphaea*
{width=50%}
```{r fig.height = 7, fig.width = 7}
nymph = nymphaea
group.names(nymph)
colrs = c("dodgerblue", "black", "red", "green3", "orange", "darkblue", "gold2", "darkgreen", "darkred", "grey", "darkgrey", "magenta", "darkorchid", "purple", "brown", "coral3", "turquoise", "deeppink", "lawngreen", "deepskyblue", "tomato")
nmds(nymph, labs = FALSE, include_ellipse = FALSE, colours = colrs, legend_pos = "right", pt_size = 2)
```
|
/scratch/gouwar.j/cran-all/cranData/BinMat/inst/doc/BinMat.Rmd
|
---
title: "BinMat: Processes Binary Data Obtained from Fragment Analysis"
author: "Clarke van Steenderen"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{BinMat}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---

<br/><br/>
*Department of Zoology and Entomology*
*The Centre for Biological Control* (https://www.ru.ac.za/centreforbiologicalcontrol/)
*Rhodes University, Grahamstown, Eastern Cape, South Africa*
*e-mail:* [email protected]
<br/><br/>
The idea behind this package was taken from my M.Sc. project <br/>
"*A genetic analysis of the species and intraspecific lineages of* Dactylopius *Costa (Hemiptera:Dactylopiidae)*" <br/>
See the publication in the [Biological Control journal](https://www.sciencedirect.com/science/article/pii/S1049964420306538), and the thesis on [Research Gate](https://www.researchgate.net/publication/339124038_A_genetic_analysis_of_the_species_and_intraspecific_lineages_of_Dactylopius_Costa_Hemiptera_Dactylopiidae). The GUI version of this package is available on the [R Shiny online server](https://clarkevansteenderen.shinyapps.io/BINMAT/), or it is accessible via GitHub by typing:
```{}
shiny::runGitHub("BinMat", "clarkevansteenderen")
```
into the console in R.
<br/>
---
## **OVERVIEW**
Processing and visualising trends in the binary data obtained from fragment analysis methods in molecular biology can be a time-consuming and cumbersome process, and typically entails complex workflows.
The BinMat package automates the analysis pipeline on one platform, and was written to process binary matrices derived from dominant marker genetic analyses.
The program consolidates replicate sample pairs in a dataset into consensus reads, produces summary statistics (peaks and error rates), and allows the user to visualise their data as non-metric multidimensional scaling (nMDS) plots and hierarchical clustering trees.
---
## **UPLOADING DATA**
**Data type 1**
Binary data containing replicate pairs that need be consolidated should be in the following example format, uploaded as CSV file (use the read.csv() function):
| | Locus 1 | Locus 2 | Locus 3 | Locus 4 | Locus 5 |
|---------------------- |:------------: | :------------: |:------------: |:------------: |:------------: |
| Sample A replicate 1 | 0 | 0 | 1 | 1 | 1 |
| Sample A replicate 2 | 0 | 0 | 1 | 1 | 1 |
| Sample B replicate 1 | 1 | 1 | 0 | 0 | 0 |
| Sample B replicate 2 | 0 | 1 | 0 | 0 | 1 |
Note that replicate pairs must be directly underneath each other, and that each sample needs to have a unique name.
The following conditions are applied to binary data replicates:
1. A 0 and a 1 produces a "?"
2. A 0 and a 0 produces a "0"
3. A 1 and a 1 produces a "1"
The consolidated output for the above example would thus be:
| | Locus 1 | Locus 2 | Locus 3 | Locus 4 | Locus 5 |
|---------------------- |:------------: | :------------: |:------------: |:------------: |:------------: |
| Sample A replicate 1 & 2 | 0 | 0 | 1 | 1 | 1 |
| Sample B replicate 1 & 2 | ? | 1 | 0 | 0 | ? |
---
**Data type 2**
Binary data that has already been consolidated must have grouping information in the second column, as shown in the example below. This should also be in CSV format. This matrix can be used to create an nMDS plot coloured by group.
| | Group | Locus 1 | Locus 2 | Locus 3 | Locus 4 | Locus 5 |
|-----------|:----------: |:------------: | :------------: |:------------: |:------------: |:------------: |
| Sample A | Africa | ? | 0 | 1 | 1 | 1 |
| Sample B | Asia | 0 | 0 | 1 | 1 | ? |
| Sample C | Europe | 1 | ? | 0 | 0 | 0 |
| Sample D | USA | ? | 1 | 0 | ? | 1 |
---
## **FUNCTIONS**
| Function | Details |
|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| check.data() | Checks for unwanted values (other than 1, 0, and ?). |
| consolidate() | Reads in a binary matrix comprising replicate pairs and consolidates each pair into a consensus read. For each replicate pair at each locus, 1 & 1 -> 1 (shared presence), 0 & 0 -> 0 (shared absence), 0 & 1 -> ? (ambiguity). |
| errors() | Calculates the Jaccard and Euclidean error rates for the dataset. Jaccard's error does not take shared absences of bands as being biologically meaningful. JE = (f10 + f01)/(f10 + f01 + f11) and EE = (f10 + f01)/(f10 + f01 + f11 + f00). At each locus, f01 and f10 indicates a case where a 0 was present in one replicate, and a 1 in the other. f11 indicates the shared presence of a band in both replicates, and f00 indicates a shared absence. |
| group.names() | Returns group names in the uploaded consolidated binary data. This will help in knowing which colours are assigned to which group name. |
| nmds() | Creates an nMDS plot from a consolidated binary matrix with grouping information. Colours of plotted points need to be specified. |
| peak.remove() | Removes samples with a peak number less than a specified value. |
| peaks.consolidated() | Returns total, maximum, and minimum number of peaks in a consolidated binary matrix. |
| peaks.original() | Returns total, maximum, and minimum number of peaks in the binary matrix comprising replicates. |
| scree() | Creates a scree plot for the nMDS. This indicates the optimum number of dimensions to use to minimise the stress value. The stress value is indicated by a red dotted line at 0.15. Values equal to or below this are considered acceptable. |
| shepard() | Creates a Shepard plot for the nMDS. This indicates the 'goodness of fit' of the original distance matrix vs the ordination representation. A high R-squared value is favourable. |
| upgma() | Creates a UPGMA hierarchical clustering tree, with a specified number of bootstrap repetitions. |
## **METHODS**
**Euclidean error = (f01 + f10) / (f01 + f10 + f00 + f11)**
The Euclidean error rate includes the shared absence of a band (f00). <br/><br/>
**Jaccard error = (f01 + f10) / (f01 + f10 + f11)**
The Jaccard error rate does not take shared absences of bands into account. The error rate will thus be inflated compared to the Euclidean. <br/><br/>
In the formulae above, 'f' refers to the frequency of each combination (i.e. 'f01 + f10' means the sum of all the occurences of a zero and a one, and a one and a zero).
An error value is calculated for each replicate pair, and an average obtained representing the whole dataset.
The error rates for the example samples below would be:
<br/>
**Sample X rep 1: 0 1 1 0**
**Sample X rep 2: 1 0 1 0**
<br/>
Euclidean error = (1+1) / (1+1+1+1) = 2/4 = 0.5
Jaccard error = (1+1) / (1+1+1) = 2/3 = 0.67
<br/>
**Sample Y rep 1: 1 1 1 0**
**Sample Y rep 2: 1 1 0 0**
<br/>
Euclidean error = (1) / (1+1+2) = 1/4 = 0.25
Jaccard error = (1) / (1+2) = 1/3 = 0.33
---
Average Euclidean error = (0.5+0.25) / 2 = 0.38
Average Jaccard error = (0.67+0.33) / 2 = 0.5
---
## **WORKED EXAMPLE**
There are four sample data sets embedded in the package: <br/>
1. BinMatInput_reps <br/>
2. BinMatInput_ordination <br/>
3. bunias_orientalis <br/>
4. nymphaea <br/>
The first two are small made-up datasets, and illustrate how the BinMat functions are implemented:
```{r setup, fig.height = 5, fig.width = 5}
library(BinMat)
data1 = BinMatInput_reps
data2 = BinMatInput_ordination
# data1 contains all the replicate pairs that need to be consolidated into a consensus output
# data2 contains a consolidated binary matrix with grouping information in the second column
# Check the data for unwanted values
check.data(data1)
# Get information about peak numbers for all replicates
peaks.original(data1)
# Consolidate the replicate pairs in the matrix
cons = consolidate(data1)
# View the original matrix
data1
# View the consolidated output
cons
# Get the Jaccard and Euclidean error rates
errors(cons)
# Get information about the peak numbers in the consolidated matrix
peaks.consolidated(cons)
# Create a hierarchical clustering tree using the UPGMA method
clustTree = upgma(cons, size = 0.6)
# Find samples with peaks less than a specified threshold value, and return the new, filtered data set
filtered_data1 = peak.remove(cons, thresh = 6)
filtered_data1
# data2 contains an already-consolidated matrix with grouping information. This is used to create a scree, shepard, and an nMDS plot.
# # Find samples with peaks less than a specified threshold value, and return the new, filtered data set
filtered_data2 = peak.remove(data2, thresh = 7)
filtered_data2
# Get the names of the groups specified in the second column
group.names(data2)
# Create an object containing colours for each group
# Colours: Africa = red, Australia = blue, Europe = dark green
clrs = c("red", "blue", "darkgreen")
# Create a scree plot to check how the number of dimensions for an nMDS plot will affect the resulting stress values
scree(data2)
# Create a shepard plot showing the goodness of fit for the original data vs the ordination data
shepard(data2)
# Create an nMDS plot for the data. Default dimension is 2
nmds(data2, colours = clrs, labs = TRUE)
```
## REAL-WORLD WORKED EXAMPLE
The **bunias_orientalis** and **nymphaea** datasets are real-world AFLP and ISSR files, respectively, and have already been consolidated by BinMat. These produce nMDS plots, as shown below:
### *Bunias orientalis*

```{r fig.height = 5, fig.width = 5}
bunias = bunias_orientalis
group.names(bunias)
nmds(bunias, labs = FALSE, include_ellipse = TRUE, legend_pos = "right")
```
### *Nymphaea*
{width=50%}
```{r fig.height = 7, fig.width = 7}
nymph = nymphaea
group.names(nymph)
colrs = c("dodgerblue", "black", "red", "green3", "orange", "darkblue", "gold2", "darkgreen", "darkred", "grey", "darkgrey", "magenta", "darkorchid", "purple", "brown", "coral3", "turquoise", "deeppink", "lawngreen", "deepskyblue", "tomato")
nmds(nymph, labs = FALSE, include_ellipse = FALSE, colours = colrs, legend_pos = "right", pt_size = 2)
```
|
/scratch/gouwar.j/cran-all/cranData/BinMat/vignettes/BinMat.Rmd
|
Biserial.Corr.BN <-
function(n.BB, n.NN, prop.vec, corr.vec = NULL, corr.mat=NULL, coef.mat) {
validation.bin(n.BB, prop.vec)
if(is.null(corr.mat) && !is.null(corr.vec)) {
d=ceiling(uniroot(function(d) d^2-d-2*length(corr.vec), interval=c(0,1000))$root)
corr.mat=diag(1,d)
corr.mat[lower.tri(corr.mat)]=corr.vec
corr.mat=corr.mat+t(corr.mat)-diag(1,d)
}
correlation.bound.check(n.BB, n.NN, prop.vec, corr.vec=NULL, corr.mat, coef.mat)
cor.bn=apply(as.matrix(prop.vec,length(prop.vec),1),1,function(x) dnorm(qnorm(x))/sqrt(x*(1-x)))
bicor.mat=t(sapply(1:n.BB, function(ii) sapply(1:n.NN, function(i) (corr.mat[ii,i+n.BB]/cor.bn[ii])/(coef.mat[2,i]+3*coef.mat[4,i]))))
return(bicor.mat)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/Biserial.Corr.BN.R
|
Int.Corr.NN <-
function(n.NN, corr.vec = NULL, corr.mat=NULL, coef.mat) {
if(is.null(corr.mat) && !is.null(corr.vec)) {
d=ceiling(uniroot(function(d) d^2-d-2*length(corr.vec), interval=c(0,1000))$root)
corr.mat=diag(1,d)
corr.mat[lower.tri(corr.mat)]=corr.vec
corr.mat=corr.mat+t(corr.mat)-diag(1,d)
}
n.BB<-ncol(corr.mat)-n.NN
cor.mat.NN<-as.matrix(corr.mat[(n.BB+1):(n.BB+n.NN),(n.BB+1):(n.BB+n.NN)],n.NN,n.NN)
correlation.bound.check(n.BB=0, n.NN, prop.vec=NULL, corr.vec=NULL, corr.mat=cor.mat.NN, coef.mat)
if(n.NN==1) {intcor.mat=1
} else
if(n.NN>1){
usigma.star.c<-diag(n.NN)
for ( ii in 2:n.NN) {
for ( i in 1:(ii-1)) {
mycorfunc<- function(rro) {
r <- rro*((coef.mat[2,i]*coef.mat[2,ii])+(3*coef.mat[2,i]*coef.mat[4,ii])+(3*coef.mat[4,i]*coef.mat[2,ii])+(9*coef.mat[4,i]*coef.mat[4,ii]))+
(rro^2)*(2*coef.mat[3,i]*coef.mat[3,ii])+ rro^3*(6*coef.mat[4,i]*coef.mat[4,ii])-cor.mat.NN[i,ii]
r
}#myfunc
p0 <-0
usigma.star.c[i,ii]=suppressWarnings(dfsane(par = p0, fn=mycorfunc, control=list(trace=FALSE)))$par
}#ii
}#i
intcor.mat<-usigma.star.c+t(usigma.star.c)
diag(intcor.mat)<-1
}#if
return(intcor.mat)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/Int.Corr.NN.R
|
Tetra.Corr.BB <-
function(n.BB, prop.vec, corr.vec = NULL, corr.mat=NULL) {
validation.bin(n.BB, prop.vec)
if(is.null(corr.mat) && !is.null(corr.vec)) {
d=ceiling(uniroot(function(d) d^2-d-2*length(corr.vec), interval=c(0,1000))$root)
corr.mat=diag(1,d)
corr.mat[lower.tri(corr.mat)]=corr.vec
corr.mat=corr.mat+t(corr.mat)-diag(1,d)
}
n.NN<-ncol(corr.mat)-n.BB
cor.mat.BB<-as.matrix(corr.mat[1:n.BB,1:n.BB],n.BB,n.BB)
correlation.bound.check(n.BB, n.NN=0, prop.vec, corr.vec=NULL, corr.mat=cor.mat.BB, coef.mat=NULL)
q.vec=(1-prop.vec)
if(n.BB==1) {tetcor.mat=1
} else
if(n.BB>1) {
##get the pairwise correlations##
usigma.star.b<-diag(n.BB)
for ( i in 2:n.BB) {
for ( ii in 1:(i-1)) {
mycorfuncb<- function(ro.mat) {
r<-integrate(function(z2) {sapply(z2, function(z2) {integrate(function(z1) ((2*pi*sqrt((1-ro.mat^2)))^-1)* exp(-(z1^2-2*ro.mat*z1*z2+z2^2)/(2*(1-ro.mat^2))), -Inf, qnorm(prop.vec[i]) )$value})}, -Inf, qnorm(prop.vec[ii]))$value-
cor.mat.BB[ii,i]*sqrt(prop.vec[i]*q.vec[ii]*prop.vec[ii]*q.vec[i])-(prop.vec[i]*prop.vec[ii])
r
}
p0 <-0
usigma.star.b[ii,i]=suppressWarnings(dfsane(par = p0, fn=mycorfuncb, control=list(trace=FALSE)))$par
}#ii
}#i
tetcor.mat<-usigma.star.b+t(usigma.star.b)
diag(tetcor.mat)<-1
}#if
return(tetcor.mat)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/Tetra.Corr.BB.R
|
correlation.bound.check <-
function(n.BB, n.NN, prop.vec=NULL, corr.vec = NULL, corr.mat = NULL, coef.mat=NULL) {
validation.corr(n.BB, n.NN, corr.vec, corr.mat)
limitscor.mat=correlation.limits(n.BB,n.NN,prop.vec,coef.mat)
if(is.null(corr.mat) && !is.null(corr.vec)) {
d = n.BB + n.NN
corr.mat=diag(1,d)
corr.mat[lower.tri(corr.mat)]=corr.vec
corr.mat=corr.mat+t(corr.mat)-diag(1,d)
}
if((n.BB+n.NN)>1) {
errorCount= 0
for (i in 1:(n.BB+n.NN-1)) {
for (j in (i+1):(n.BB+n.NN)) {
if (i != j) {
if (corr.mat[i, j] > limitscor.mat[i, j] | corr.mat[j, i] < limitscor.mat[j, i]) {
cat("\n corr.mat[", i, ",", j, "] must be between ", round(limitscor.mat[j,i], 7), " and ", round(limitscor.mat[i,j], 7), "\n")
errorCount = errorCount + 1
cat("\n")
}
}
}
}
if (errorCount > 0) {
stop("Range violation occurred in the target correlation matrix! \n")
}
}
return(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/correlation.bound.check.R
|
correlation.limits <-
function(n.BB,n.NN, prop.vec=NULL,coef.mat=NULL) {
validation.bin(n.BB, prop.vec)
if (missing(n.NN) == TRUE && !is.null(coef.mat)) {
stop("Number of continuous variables is not specified !")
} else
if (n.NN > 0 && is.null(coef.mat)) {
stop("Coefficient matrix is not specified while n.NN > 0 !")
} else
if (!is.null(coef.mat)) {
if(n.NN == 0) {
stop("Coefficient matrix is specified while n.NN=0")
} else
if (n.NN > 0 && (ncol(coef.mat) != n.NN)) {
stop("Dimension of coefficient matrix does not match the number of continuous variables! \n")
}
} #if
if(!is.null(prop.vec)) {
q.vec=(1-prop.vec)
a=unlist(sapply(2:n.BB , function(i) sapply(1:(i-1), function(j) -sqrt((prop.vec[i]*prop.vec[j])/(q.vec[i]*q.vec[j])) )))
b=unlist(sapply(2:n.BB , function(i) sapply(1:(i-1), function(j) -sqrt((q.vec[i]*q.vec[j])/(prop.vec[i]*prop.vec[j])) )))
low.lim.b=apply(cbind(a,b),1,max)
c=unlist(sapply(2:n.BB , function(i) sapply(1:(i-1), function(j) sqrt((prop.vec[i]*q.vec[j])/(q.vec[i]*prop.vec[j])) )))
d=unlist(sapply(2:n.BB , function(i) sapply(1:(i-1), function(j) sqrt((q.vec[i]*prop.vec[j])/(prop.vec[i]*q.vec[j])) )))
upp.lim.b=apply(cbind(c,d),1,min)
sugcormat.b=diag(1,n.BB)
sugcormat.b[lower.tri(sugcormat.b)]=low.lim.b
sugcormat.b[upper.tri(sugcormat.b)]=upp.lim.b
} #ifbinary
if(!is.null(coef.mat)) {
samples = 1e+05
xmat=matrix(NA, nrow=samples, ncol=n.NN)
for (i in 1:n.NN){
x=as.vector(rnorm(samples))
xx=cbind(1,x,x^2,x^3)
xmat[,i]=xx%*%coef.mat[,i]
}
if(!is.null(prop.vec)){
xmat=cbind(sapply(1:length(prop.vec),function(i) rbinom(samples,1,prop.vec[i])),xmat)
}
sxmat=apply(xmat,2,sort)
upp.lim=cor(sxmat)[col(cor(sxmat)) > row(cor(sxmat))]
rsxmat=apply(sxmat,2,rev)
low.lim=cor(sxmat,rsxmat)[col(cor(sxmat,rsxmat)) < row(cor(sxmat,rsxmat))]
} #if
if(!is.null(prop.vec) && is.null(coef.mat) ) {
sugcormat= sugcormat.b
diag(sugcormat)=NA
} else
if( is.null(prop.vec) && !is.null(coef.mat) ) {
sugcormat=diag(1,n.NN)
sugcormat[lower.tri(sugcormat)]=low.lim
sugcormat[upper.tri(sugcormat)]=upp.lim
diag(sugcormat)=NA
} else
if( !is.null(prop.vec) && !is.null(coef.mat) ) {
sugcormat=diag(1,(n.BB+n.NN))
sugcormat[lower.tri(sugcormat)]=low.lim
sugcormat[upper.tri(sugcormat)]=upp.lim
sugcormat[1:n.BB,1:n.BB]=sugcormat.b
diag(sugcormat)=NA
}
limitscor.mat=sugcormat
return(limitscor.mat)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/correlation.limits.R
|
fleishman.coef <-
function(n.NN, skewness.vec=NULL, kurtosis.vec=NULL){
fleishman.poly<- function(dd,gamma1,gamma2) {
r <- c(NA,length(dd))
r[1] <- dd[1]+dd[3]
r[2] <- (dd[2]^2)+(6*dd[2]*dd[4])+(2*(dd[3]^2))+(15*(dd[4]^2))-1
r[3] <- 2*dd[3]*((dd[2]^2)+(24*dd[2]*dd[4])+(105*(dd[4]^2))+2)-gamma1
r[4] <- 24*((dd[2]*dd[4])+(dd[3]^2)*(1+(dd[2]^2)+(28*dd[2]*dd[4]))+(dd[4]^2)*(12+(48*dd[2]*dd[4])+(141*(dd[3]^2))+(225*(dd[4]^2))))-gamma2
r
}#myfunc
scm=rbind(0,1,skewness.vec, kurtosis.vec)
coef.mat=matrix(0,4,n.NN)
for ( i in 1:n.NN){
p0 <- matrix(rnorm(25*4), 25, 4)
log <- capture.output({
ans <- multiStart(par=p0, fn=fleishman.poly, gamma1=scm[3,i], gamma2=scm[4,i], control=list(trace=FALSE), quiet=FALSE)
})
if(any(ans$conv==TRUE)==FALSE) stop(cat("The algorithm did not converge for continuous variable",i,"!","\n"))
soln1 <- ans$par[which(ans$converged==TRUE),]
soln <- round(ans$par[which(ans$converged==TRUE),],5)
pats <- do.call(paste, c(as.data.frame(soln), sep = "\r"))
pats <- factor(pats, levels = unique(pats))
amat=cbind(unique(soln), Freq = as.vector(table(pats)))
index=which(amat[,"Freq"]==max(amat[,"Freq"]))
if(length(index)==1) {coef.mat[,i]<-round(soln1[which(apply(round(soln1,5), 1, function(x) all(x==amat[index,1:4])) )[1],],7)} else
{coef.mat[,i]<-round(soln1[which(apply(round(soln1,5), 1, function(x) all(x==amat[sample(index,1),1:4])) )[1],],7)
}
}#for
rownames(coef.mat)=c("a","b","c","d")
colnames(coef.mat)=paste("NN",seq(1:n.NN), sep="")
return(coef.mat)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/fleishman.coef.R
|
###########################################################################################################################
###Simulates a sample of size n from a set of multivariate binary and nonnormal continuous variables.
###########################################################################################################################
gen.Bin.NonNor<-function(n, n.BB, n.NN, prop.vec=NULL, mean.vec=NULL, variance.vec=NULL, skewness.vec=NULL, kurtosis.vec=NULL, final.corr.mat, coef.mat=NULL){
if(missing(n)==TRUE) stop("n was not specified! \n")
if(missing(final.corr.mat)) stop("Final correlation matrix was not specified! \n")
validation.bin(n.BB, prop.vec)
validation.skewness.kurtosis(n.NN, skewness.vec, kurtosis.vec)
if(n.BB >0 && n.NN >0 && ncol(final.corr.mat) != length(prop.vec)+ length(skewness.vec)) {
stop("Dimension of final correlation matrix does not match the number of variables! \n")
} else
if(n.BB ==0 && n.NN >0 && ncol(final.corr.mat) != length(skewness.vec)) {
stop("Dimension of final correlation matrix does not match the number of continuous variables! \n")
} else
if(n.BB >0 && n.NN==0 && ncol(final.corr.mat) != length(prop.vec)) {
stop("Dimension of final correlation matrix does not match the number of binary variables! \n")
}#if
if(!is.null(prop.vec)&& is.null(skewness.vec)) {
myz<-rmvnorm(n, mean=rep(0,n.BB),final.corr.mat)
myb<-matrix(0,n,n.BB)
myb=matrix(sapply(1:n.BB, function(ii) sapply(1:n, function(i) if(1*myz[i,ii]>qnorm(1-prop.vec[ii])) myb[i,ii]=1 else myb[i,ii]=0 )),n,n.BB)
mydata=cbind(myb)
} else
if(is.null(prop.vec) && !is.null(skewness.vec)) {
myz<-rmvnorm(n, mean=rep(0,n.NN),final.corr.mat)
myy<-matrix(0,n,n.NN)
myy=matrix(sapply(1:n.NN, function(j) sapply(1:n,function(i) (coef.mat[1,j]+coef.mat[2,j]*myz[i,j]+coef.mat[3,j]*(myz[i,j]^2)+coef.mat[4,j]*myz[i,j]^3)*
sqrt(variance.vec[j])+(mean.vec[j]))),n,n.NN)
mydata=cbind(myy)
}else
if(!is.null(prop.vec) && !is.null(skewness.vec)) {
myz<-rmvnorm(n, mean=rep(0,(n.BB+n.NN)),final.corr.mat)
myb<-matrix(0,n,n.BB)
myb=matrix(sapply(1:n.BB, function(ii) sapply(1:n, function(i) if(1*myz[i,ii]>qnorm(1-prop.vec[ii])) myb[i,ii]=1 else myb[i,ii]=0)),n,n.BB)
myb=myb
myy<-matrix(0,n,n.NN)
myy=matrix(sapply(1:n.NN, function(j) sapply(1:n,function(i) (coef.mat[1,j]+coef.mat[2,j]*myz[i,j+n.BB]+coef.mat[3,j]*(myz[i,j+n.BB]^2)+coef.mat[4,j]*myz[i,j+n.BB]^3)*
sqrt(variance.vec[j])+(mean.vec[j]))),n,n.NN)
mydata=cbind(myb,myy)
colnames(mydata)<-NULL
}#if
return(mydata)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/gen.Bin.NonNor.R
|
overall.corr.mat <-
function(n.BB, n.NN, prop.vec=NULL, corr.vec = NULL, corr.mat=NULL, coef.mat=NULL){
validation.bin(n.BB, prop.vec)
if(is.null(corr.mat) && !is.null(corr.vec)) {
d=ceiling(uniroot(function(d) d^2-d-2*length(corr.vec), interval=c(0,1000))$root)
corr.mat=diag(1,d)
corr.mat[lower.tri(corr.mat)]=corr.vec
corr.mat=corr.mat+t(corr.mat)-diag(1,d)
}
correlation.bound.check(n.BB, n.NN, prop.vec, corr.vec=NULL, corr.mat, coef.mat)
if(!is.null(prop.vec) && is.null(coef.mat) ) {
final.corr.mat<-diag(1,n.BB)
final.corr.mat[1:n.BB,1:n.BB]=Tetra.Corr.BB(n.BB, prop.vec, corr.vec = NULL, corr.mat)
} else
if( is.null(prop.vec) && !is.null(coef.mat) ) {
final.corr.mat<-diag(1,n.NN)
final.corr.mat[1:n.NN,1:n.NN]=Int.Corr.NN(n.NN, corr.vec = NULL, corr.mat, coef.mat)
} else
if(!is.null(prop.vec) && !is.null(coef.mat) ) {
final.corr.mat<-diag(1,(n.BB+n.NN))
final.corr.mat[1:n.BB,1:n.BB]=Tetra.Corr.BB(n.BB, prop.vec, corr.vec = NULL, corr.mat)
final.corr.mat[(n.BB+1):(n.BB+n.NN),(n.BB+1):(n.BB+n.NN)]=Int.Corr.NN(n.NN, corr.vec = NULL, corr.mat, coef.mat)
final.corr.mat[1:n.BB,(n.BB+1):(n.BB+n.NN)]=Biserial.Corr.BN(n.BB, n.NN, prop.vec, corr.vec = NULL, corr.mat, coef.mat)
final.corr.mat[(n.BB+1):(n.BB+n.NN),1:n.BB]=t(Biserial.Corr.BN(n.BB, n.NN, prop.vec, corr.vec = NULL, corr.mat, coef.mat))
}
if(is.positive.definite(final.corr.mat)==FALSE) {
warning("Intermediate correlation matrix is not positive definite. Nearest positive definite matrix is used!")
final.cor.mat = as.matrix(nearPD(final.corr.mat, corr = TRUE, keepDiag =TRUE)$mat)
}
return(final.corr.mat)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/overall.corr.mat.R
|
validation.bin <-
function(n.BB, prop.vec = NULL){
if (missing(n.BB) == TRUE && !is.null(prop.vec)) {
stop("Number of binary variables is not specified !")
}
if ((n.BB < 0) | (floor(n.BB) != n.BB)) {
stop("Number of binary variables must be a non-negative integer !")
} else
if (n.BB > 0 && is.null(prop.vec)) {
stop("Proportion vector is not specified while n.BB > 0 !")
} else
if (!is.null(prop.vec)) {
if(n.BB == 0) {
stop("Proportion vector is specified while n.BB=0")
} else
if (n.BB > 0 && (length(prop.vec) != n.BB)) {
stop("Proportion vector is misspecified, dimension is wrong!")
} else
if (n.BB > 0 && (length(prop.vec) = n.BB)) {
errorCount= 0
for (i in 1:n.BB){
if(prop.vec[i] <= 0 | prop.vec[i] >= 1) {
cat("\n Proportion for binary variable",i,"must be between '0' and '1!!","\n")
errorCount = errorCount + 1
cat("\n")
} #if
} #for
if (errorCount > 0) {
stop("Range violation occurred in the proportion vector!")
}#if
} #if
} #if
return(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/validation.bin.R
|
validation.corr <-
function (n.BB, n.NN, corr.vec = NULL, corr.mat = NULL) {
if (missing(n.BB) == TRUE) {
stop("Number of binary variables is not specified !")
} else
if (missing(n.NN) == TRUE) {
stop("Number of continuous variables is not specified !")
} else
if (!missing(n.BB) && !missing(n.NN)) {
if ((n.BB < 0) | (floor(n.BB) != n.BB)) {
stop("Number of binary variables must be a non-negative integer !")
} else
if ((n.NN < 0) | (floor(n.NN) != n.NN)) {
stop("Number of continuous variables must be a non-negative integer !")
} else
d = n.BB + n.NN
} #if
if (is.null(corr.mat) & is.null(corr.vec)) {
stop("You must specify full correlation matrix OR vector of elements below the diagonal")
} #if
if (!is.null(corr.mat) & !is.null(corr.vec)) {
corr.mat.from.corr.vec=diag(1,d)
corr.mat.from.corr.vec[lower.tri(corr.mat.from.corr.vec)]=corr.vec
corr.mat.from.corr.vec=corr.mat.from.corr.vec+t(corr.mat.from.corr.vec)-diag(1,d)
if (sum(dim(corr.mat.from.corr.vec) == dim(corr.mat)[1]) !=2) {
stop("corr.vec and corr.mat are non-conformable")
}#if
if (sum(corr.mat.from.corr.vec == corr.mat) != (d * d)) {
stop("Correlation matrix from corr.vec and corr.mat are not the same")
}#if
} #if
if (!is.null(corr.vec)) {
if (length(corr.vec) != (d * (d - 1)/2)) {
stop("Vector of correlations is misspecified, dimension is wrong!\n")
} #if
if ((min(corr.vec) <= -1) | (max(corr.vec) >= 1)) {
stop("Correlations must be between -1 and 1!\n")
} #if
corr.mat.from.corr.vec=diag(1,d)
corr.mat.from.corr.vec[lower.tri(corr.mat.from.corr.vec)]=corr.vec
corr.mat.from.corr.vec=corr.mat.from.corr.vec+t(corr.mat.from.corr.vec)-diag(1,d)
if (is.positive.definite(corr.mat.from.corr.vec) == FALSE) {
stop("Specified correlation matrix (from corr.vec) is not positive definite! \n")
} #if
}#if
if (!is.null(corr.mat)) {
if (dim(corr.mat)[1] != d | dim(corr.mat)[2] != d) {
stop("Correlation matrix dimension is wrong!\n")
}#if
if (is.positive.definite(corr.mat) == FALSE) {
stop("Specified correlation matrix is not positive definite! \n")
}#if
if (isSymmetric(corr.mat) == FALSE) {
stop("Specified correlation matrix is not symmetric! \n")
}#if
}#if
return(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/validation.corr.R
|
validation.skewness.kurtosis <-
function(n.NN, skewness.vec=NULL, kurtosis.vec=NULL){
if (missing(n.NN) == TRUE && !is.null(skewness.vec) && !is.null(kurtosis.vec)) {
stop("Number of continuous variables is not specified !")
} else
if ((n.NN < 0) | (floor(n.NN) != n.NN)) {
stop("Number of continuous variables must be a non-negative integer !")
} else
if (n.NN > 0 && is.null(skewness.vec) && is.null(kurtosis.vec) ) {
stop("Skewness and kurtosis vectors are not specified while n.NN > 0 !")
} else
if (n.NN > 0 && is.null(skewness.vec) && !is.null(kurtosis.vec) ) {
stop("Skewness vector is not specified while kurtosis.vector is specified and n.NN > 0 !")
} else
if (n.NN > 0 && !is.null(skewness.vec) && is.null(kurtosis.vec) ) {
stop("Kurtosis vector is not specified while skewness.vector is specified and n.NN > 0 !")
} else
if (!is.null(skewness.vec) && !is.null(kurtosis.vec)) {
if(n.NN == 0) {
stop("Skewness and kurtosis vectors are specified while n.NN=0 !")
} else
if (n.NN > 0) {
if( length(skewness.vec)!=length(kurtosis.vec)) {
stop("Lengths of skewness and kurtosis vectors differ!")
} else
if (length(skewness.vec)==length(kurtosis.vec)){
if( length(skewness.vec)!= n.NN) {
stop("Skewness and kurtosis vectors are misspecified, dimension is wrong!")
} else
if (length(skewness.vec)== n.NN) {
errorCount= 0
for (i in 1:n.NN){
if(kurtosis.vec[i] < (skewness.vec[i]^2-2)){
cat("\n Kurtosis of continuous variable",i,"must be greater than or equal to",(skewness.vec[i]^2-2),"given its skewness!","\n")
errorCount = errorCount + 1
} #for
} #if
} #if
if (errorCount > 0) {
stop("Range violation occurred in the kurtosis vector!")
}#if
}#if
} #if
} #if
return(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNonNor/R/validation.skewness.kurtosis.R
|
compute.sigma.star <-
function (no.bin, no.nor, prop.vec.bin = NULL, corr.vec = NULL,
corr.mat = NULL)
{
d = no.bin + no.nor
validation.corr(no.bin, no.nor, prop.vec.bin, corr.vec = corr.vec,
corr.mat = corr.mat)
if ((no.nor < 0) | (floor(no.nor) != no.nor)) {
warning("Number of normal variables \nmust be a non-negative integer!\n")
}
if (is.null(corr.mat)) {
corr.mat = lower.tri.to.corr.mat(corr.vec, d)
}
sigma = corr.mat
p = prop.vec.bin
q = 1 - p
if (no.bin != 0) {
sigmaBB = diag(no.bin)
for (i in 1:no.bin) {
for (j in 1:no.bin) {
if (i != j)
sigmaBB[i, j] = phi2tetra(sigma[i, j], c(p[i], p[j]))
}
}
}
if (no.bin > 0 & no.nor > 0) {
sigmaBN = sigma
for (i in (no.bin + 1):d) {
for (j in 1:no.bin) {
sigmaBN[i, j] = sigmaBN[i, j]/(dnorm(qnorm(p[j]))/sqrt(p[j] *
q[j]))
}
}
sigmaBN = sigmaBN[(no.bin + 1):d, 1:no.bin]
sigma_star = sigma
sigma_star[1:no.bin, 1:no.bin] = sigmaBB
sigma_star[(no.bin + 1):d, 1:no.bin] = sigmaBN
sigma_star[1:no.bin, (no.bin + 1):d] = t(sigmaBN)
}
if (no.bin > 0 & no.nor == 0) {
sigma_star = sigmaBB
}
if (no.bin == 0 & no.nor > 0) {
sigma_star = sigma
}
PD = TRUE
temp = NULL
eigenv = eigen(sigma_star)$value
if (is.positive.definite(sigma_star) == FALSE) {
temp = sigma_star
cat("sigma_star before using nearPD is\n")
print(temp)
sigma_star = as.matrix(nearPD(sigma_star, corr = TRUE,
keepDiag = TRUE)$mat)
sigma_star = (sigma_star + t(sigma_star))/2
warning("sigma_star is not positive definite.\nAlgorithm will be using the nearest positive definite matrix which is\n",
immediate. = TRUE)
print(sigma_star)
PD = FALSE
}
return(list(sigma_star = sigma_star, nonPD = temp, PD = PD,
eigenv = eigenv))
}
|
/scratch/gouwar.j/cran-all/cranData/BinNor/R/compute.sigma.star.R
|
jointly.generate.binary.normal <-
function (no.rows, no.bin, no.nor, prop.vec.bin = NULL, mean.vec.nor = NULL,
var.nor = NULL, sigma_star = NULL, corr.vec = NULL, corr.mat = NULL,
continue.with.warning = TRUE)
{
if ((no.rows<1)|(floor(no.rows)!=no.rows)){stop("Number of rows must be an integer whose value is at least 1!\n")}
d = no.bin + no.nor
validation.bin(no.bin, prop.vec.bin)
validation.nor(no.nor, mean.vec.nor, var.nor)
if (is.null(sigma_star)) {
validation.corr(no.bin, no.nor, prop.vec.bin, corr.vec = corr.vec,
corr.mat = corr.mat)
sig_star = compute.sigma.star(no.bin, no.nor, prop.vec.bin,
corr.vec, corr.mat)
sigma_star = sig_star$sigma_star
if (sig_star$PD == FALSE & continue.with.warning == FALSE) {
stop("User has chosen to stop as the final correlation matrix is not positive definite")
}
}
else {
if (is.positive.definite(sigma_star) == FALSE) {
print(sigma_star)
if (continue.with.warning == TRUE) {
sigma_star = as.matrix(nearPD(sigma_star, corr = TRUE,
keepDiag = TRUE)$mat)
sigma_star = (sigma_star + t(sigma_star))/2
warning("sigma_star is not positive definite.\nAlgorithm will be using the nearest positive definite matrix!\nThe nearest positive definite matrix computed is:",
immediate. = TRUE)
}
else {
stop("The final correlation matrix is not positive definite")
}
}
}
data = rmvnorm(no.rows, mean = rep(0, d), sigma = sigma_star)
p = prop.vec.bin
q = 1 - p
if (no.bin > 0) {
for (i in 1:no.rows) {
for (j in 1:no.bin) {
if (data[i, j] <= qnorm(1 - p[j]))
data[i, j] = 0
else data[i, j] = 1
}
}
}
if (no.nor > 0) {
temp = 1
for (j in (no.bin + 1):d) {
data[, j] = mean.vec.nor[temp] + (data[, j] * sqrt(var.nor[temp]))
temp = temp + 1
}
}
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNor/R/jointly.generate.binary.normal.R
|
lower.tri.to.corr.mat <-
function(corr.vec=NULL,d){
if(length(corr.vec)!=(d*(d-1)/2)){
stop("Vector of correlations is misspecified, dimension is wrong!\n")}
corr.mat = diag(d)
corr.mat [ lower.tri(corr.mat,diag=FALSE)]<-corr.vec
corr.mat = corr.mat + t(corr.mat)
diag(corr.mat)=1
return(corr.mat)
}
|
/scratch/gouwar.j/cran-all/cranData/BinNor/R/lower.tri.to.corr.mat.R
|
simulation <-
function (seed = NULL, nsim, no.rows, no.bin, no.nor, mean.vec.nor = NULL,
var.nor = NULL, prop.vec.bin = NULL, corr.vec = NULL, corr.mat = NULL,
continue.with.warning = TRUE)
{
d = no.bin + no.nor
if ((nsim < 1) | (floor(nsim) != nsim)) {
stop("Number of simulations must be an integer whose value is at least 1!\n")
}
if (is.null(seed)) {
seed = runif(1, 1e+06, 9e+06)
}
set.seed(seed)
d = no.bin + no.nor
if (is.null(corr.mat)) {
corr.mat = lower.tri.to.corr.mat(corr.vec, d)
}
sigma.star = compute.sigma.star(no.bin, no.nor, prop.vec.bin,
corr.vec, corr.mat)
if (sigma.star$PD == FALSE) {
if (continue.with.warning == TRUE) {
warning("sigma_star is not positive definite.\nAlgorithm used the nearest positive definite matrix!!!!",
immediate. = TRUE)
}
else {
stop("User has chosen to stop as the final correlation matrix is not positive definite")
}
}
emp.mean = matrix(0, nsim, d)
emp.corr = matrix(0, nsim, d^2)
emp.var = matrix(0, nsim, no.nor)
for (i in 1:nsim) {
print(c(i, date()))
mydata = jointly.generate.binary.normal(no.rows, no.bin,
no.nor, prop.vec.bin, mean.vec.nor, var.nor, sigma_star = sigma.star$sigma_star,
continue.with.warning = TRUE)
emp.mean[i, ] = apply(mydata, 2, mean)
if (no.nor > 0) {
emp.var[i, ] = apply(mydata[, (no.bin + 1):d], 2,
var)
}
emp.corr[i, ] = as.vector(cor(mydata))
}
emp.cormat.mean = matrix(apply(emp.corr, 2, mean), d, d)
if (sigma.star$PD == FALSE) {
cat("==============================:\n")
cat("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!:\n")
warning("sigma_star is not positive definite.\nAlgorithm used the nearest positive definite matrix!!!!",
immediate. = TRUE)
print(sigma.star$nonPD)
}
cat("==============================:\n")
cat("Desired correlation matrix:\n")
print(corr.mat)
cat("Averaged correlation matrix:\n")
print(round(emp.cormat.mean, 7))
cat("==============================:\n")
cat("Desired proportion and mean parameters: \n")
print(c(prop.vec.bin, mean.vec.nor))
cat("Averaged proportion and mean parameters: \n")
print(apply(emp.mean, 2, mean))
cat("==============================:\n")
cat("Target variance of normal variate(s): \n")
print(var.nor)
cat("Actual variance of normal variate(s): \n")
print(apply(emp.var, 2, mean))
}
|
/scratch/gouwar.j/cran-all/cranData/BinNor/R/simulation.R
|
validation.bin <-
function (no.bin, prop.vec.bin = NULL)
{
if ((no.bin < 0) | (floor(no.bin) != no.bin)) {
stop("Number of binary variables \nmust be a non-negative integer\n")
}
else if (!is.null(prop.vec.bin)) {
if (no.bin == 0) {
stop("Proportion vector is specified while no.bin=0")
}
else if ((min(prop.vec.bin) <= 0) | (max(prop.vec.bin) >=
1)) {
stop("Proportions for binary variables must be between 0 and 1!\n")
}
else if (length(prop.vec.bin) != no.bin) {
stop("Proportion vector is misspecified, dimension is wrong!\n")
}
}
else if (is.null(prop.vec.bin)) {
if (no.bin > 0) {
stop("Proportion vector is not specified while no.bin > 0")
}
}
cat("No problems are detected for the marginal specification of binary variables! \n")
}
|
/scratch/gouwar.j/cran-all/cranData/BinNor/R/validation.bin.R
|
validation.corr <-
function (no.bin, no.nor, prop.vec.bin = NULL, corr.vec = NULL,
corr.mat = NULL)
{
d = no.bin + no.nor
validation.bin(no.bin, prop.vec.bin)
if (is.null(corr.vec) & is.null(corr.mat)) {
stop("You must specify full correlation matrix OR vector of elements below the diagonal")
}
if (!is.null(corr.vec) & !is.null(corr.mat)) {
corr.mat.from.corr.vec = lower.tri.to.corr.mat(corr.vec,
d)
if (sum(dim(corr.mat.from.corr.vec) == dim(corr.mat)) !=
2) {
stop("corr.vec and corr.mat are non-conformable")
}
if (sum(corr.mat.from.corr.vec == corr.mat) != (d * d)) {
stop("Correlation matrix from corr.vec and corr.mat are not the same")
}
}
if (!is.null(corr.vec)) {
if (length(corr.vec) != (d * (d - 1)/2)) {
stop("Vector of correlations is misspecified, dimension is wrong!\n")
}
if ((min(corr.vec) <= -1) | (max(corr.vec) >= 1)) {
stop("Correlations must be between -1 and 1!\n")
}
corr.mat.from.corr.vec = lower.tri.to.corr.mat(corr.vec,
d)
if (is.positive.definite(corr.mat.from.corr.vec) == FALSE) {
stop("Specified correlation matrix (from corr.vec) is not positive definite! \n")
}
validation.range(no.bin, no.nor, prop.vec.bin, corr.mat.from.corr.vec)
}
if (!is.null(corr.mat)) {
if (dim(corr.mat)[1] != d | dim(corr.mat)[2] != d) {
stop("Correlation matrix dimension is wrong!\n")
}
if (is.positive.definite(corr.mat) == FALSE) {
stop("Specified correlation matrix is not positive definite! \n")
}
if (isSymmetric(corr.mat) == FALSE) {
stop("Specified correlation matrix is not symmetric! \n")
}
validation.range(no.bin, no.nor, prop.vec.bin, corr.mat)
}
cat("No specification problems are detected for the correlation structure! \n")
}
|
/scratch/gouwar.j/cran-all/cranData/BinNor/R/validation.corr.R
|
validation.nor <-
function (no.nor, mean.vec.nor = NULL, var.nor = NULL)
{
if ((no.nor < 0) | (floor(no.nor) != no.nor)) {
stop("Number of normal variables \nmust be an integer whose value or 0 !\n")
}
if (!is.null(mean.vec.nor) & no.nor == 0) {
stop("Mean vector for the normal part is specified while no.nor=0!\n")
}
if (!is.null(var.nor) & no.nor == 0) {
stop("Vector of variances for the normal part is specified while no.nor=0!\n")
}
if (is.null(mean.vec.nor) & no.nor > 0) {
stop("Mean vector for the normal part is not specified while no.nor>0!\n")
}
if (is.null(var.nor) & no.nor > 0) {
stop("Vector of variances for the normal part is not specified while no.nor>0!\n")
}
if (!is.null(mean.vec.nor) & !is.null(var.nor) & no.nor >
0) {
if (length(mean.vec.nor) != no.nor) {
stop("Mean vector for the normal part is misspecified, \ndimension is wrong!\n")
}
if (length(var.nor) != no.nor) {
stop("Vector of variances for the normal part is misspecified, \ndimension is wrong!\n")
}
if (min(var.nor) <= 0) {
stop("Variances must be positive!\n")
}
}
cat("No problems are detected for the marginal specification of normal variables! \n")
}
|
/scratch/gouwar.j/cran-all/cranData/BinNor/R/validation.nor.R
|
validation.range <-
function(no.bin, no.nor, prop.vec.bin=NULL, corr.mat){
d = no.bin+no.nor
sigma=corr.mat
p=prop.vec.bin ; q=1-p
#Create lower and upper bound matrices for correlations
L_sigma=diag(d) ; U_sigma=diag(d)
## Find lower and upper bounds for binary-binary combinations
#(STEP 2 in the algorithm)
if(no.bin>0){
for (i in 1:no.bin){
for (j in 1:no.bin){
if (i!=j) L_sigma[i,j]=L_sigma[j,i]=max(-sqrt((p[i]*p[j])/(q[i]*q[j])),
-sqrt((q[i]*q[j])/(p[i]*p[j])))
if (i!=j) U_sigma[i,j]=U_sigma[j,i]=min(sqrt((p[i]*q[j])/(q[i]*p[j])),
sqrt((q[i]*p[j])/(p[i]*q[j])))}}
## Find lower and upper bounds for binary-normal combinations
#(STEP 3 in the algorithm)
}
if(no.bin>0&no.nor>0){
for(i in (no.bin+1):d){
for(j in 1:no.bin){
L_sigma[i,j]=L_sigma[j,i]= -dnorm(qnorm(p[j]))/sqrt(p[j]*q[j])
U_sigma[i,j]=U_sigma[j,i]= dnorm(qnorm(p[j]))/sqrt(p[j]*q[j])}}
}
#Lower & upper bounds for normal-normal combinations
if(no.nor>0){
for (i in (no.bin+1):d){
for (j in (no.bin+1):d){
L_sigma[i,j]=L_sigma[j,i]=-1
U_sigma[i,j]=U_sigma[j,i]= 1}}
}
## Check if the correlations are in the feasible range
#(STEP 4 in the algorithm)
valid.state=TRUE
for (i in 1:d){for (j in 1:d){ if(j >=i){
if(sigma[i,j] < L_sigma[i,j] | sigma[i,j] > U_sigma[i,j]){
cat("Range violation! Corr[",i,",",j,"] must be between", round(L_sigma[i,j],3),
"and",round(U_sigma[i,j],3),"\n")
valid.state=FALSE}}}}
if(valid.state==TRUE) cat("All correlations are in feasible range! \n")
if(valid.state==FALSE) stop("All correlations must be in feasible range!")
}
|
/scratch/gouwar.j/cran-all/cranData/BinNor/R/validation.range.R
|
Fleishman.coef.NN <-
function(skew.vec, kurto.vec)
{
if (length(skew.vec) != length(kurto.vec)) {
stop("Lengths of the skewness vector and the kurtosis vector differ!\n")
}
coef <- matrix(NA, length(skew.vec), 4)
fleishman.poly <- function(p, r1, r2) {
b <- p[1]
c <- p[2]
d <- p[3]
r <- rep(NA, 3)
r[1] <- b^2+6*b*d+2*c^2+15*d^2-1
r[2] <- 2*c*(b^2+24*b*d+105*d^2+2)-r1
r[3] <- b*d+c^2*(1+b^2+28*b*d)+d^2*(12+48*b*d+141*c^2+225*d^2)-r2/24
r
}
for (i in 1:length(skew.vec)) {
r1 <- skew.vec[i]
r2 <- kurto.vec[i]
if (r2 < r1^2-2) stop("The skewness (r1) and kurtosis (r2) parameters for the ", i, "-th continuous variable violates the inequality: r2 >= r1^2-2!\n")
p0 <- rep(0,3) # starting value
poly.coef <- BBsolve(par=p0, fn=fleishman.poly, r1=r1, r2=r2)
if (poly.coef$conv == 0) coef[i,] <- c(-poly.coef$par[2], poly.coef$par)
}
# Rounding the coefficients to 9 digits after the decimal
coef <- round(coef, 9)
colnames(coef) <- c("a", "b", "c", "d")
return(coef)
}
|
/scratch/gouwar.j/cran-all/cranData/BinOrdNonNor/R/Fleishman.coef.NN.R
|
IntermediateNonNor <-
function(skew.vec, kurto.vec, cormat)
{
no.NN <- nrow(cormat)
if (validate.target.cormat.BinOrdNN(plist=NULL,skew.vec=skew.vec, kurto.vec=kurto.vec, no.bin=0, no.ord=0, no.NN=no.NN, CorrMat=cormat)){
intcor.mat <- diag(nrow(cormat))
coef <- Fleishman.coef.NN(skew.vec, kurto.vec)
for (i in 1:(no.NN-1)) {
for (j in (i+1):no.NN){
c1 <- -cormat[i,j]
c2 <- coef[i,2]*coef[j,2]+3*coef[i,2]*coef[j,4]+3*coef[i,4]*coef[j,2]+9*coef[i,4]*coef[j,4]
c3 <- 2*coef[i,3]*coef[j,3]
c4 <- 6*coef[i,4]*coef[j,4]
roots <- polyroot(c(c1,c2,c3,c4))
for (k in 1:3) {
if(abs(Im(roots[k]))<1e-6 & abs(Re(roots[k]))<=1) {
intcor.mat[i,j] <- intcor.mat[j,i] <- Re(roots[k])
}
}
}
}
return(intcor.mat=intcor.mat)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BinOrdNonNor/R/IntermediateNonNor.R
|
IntermediateONN <-
function(plist, skew.vec, kurto.vec, ONNCorrMat)
{
cmat.corrected <- matrix(NA, nrow(ONNCorrMat), ncol(ONNCorrMat))
coef.est <- Fleishman.coef.NN(skew.vec, kurto.vec)
# Compute the Fleishman coefficients for standard normal variable X
coef.X <- Fleishman.coef.NN(0,0)
Cor_forONN <- function(pvec, ONN.cor, coef) {
X <- rnorm(1e+05, 0, 1)
XORD <- ordinalize(pvec, X)
c.hat <- cor(XORD, X)
rho.XY <- ONN.cor/c.hat
# Given X is standard normal and Y is non-normal, need to compute the intermediate
# correlation between the two standard normal variables X and Z
c1 <- -rho.XY
c2 <- coef.X[2]*coef[2]+3*coef.X[2]*coef[4]+3*coef.X[4]*coef[2]+9*coef.X[4]*coef[4]
c3 <- 2*coef.X[3]*coef[3]
c4 <- 6*coef.X[4]*coef[4]
roots <- polyroot(c(c1,c2,c3,c4))
for (k in 1:3) {
if(abs(Im(roots[k]))<1e-6 & abs(Re(roots[k]))<=1) {
r <- Re(roots[k])
}
}
return(r)
}
# Correlation between the jth binary/ordinal variable and the ith non-normal variable
for (j in 1:ncol(ONNCorrMat)) {
for (i in 1:nrow(ONNCorrMat)) {
cmat.corrected[i, j] = Cor_forONN(plist[[j]], ONNCorrMat[i,j], as.vector(coef.est[i,]))
}
}
return(cmat.corrected)
}
|
/scratch/gouwar.j/cran-all/cranData/BinOrdNonNor/R/IntermediateONN.R
|
Limit_forNN <- function(skew.vec, kurto.vec) {
.Deprecated("LimitforNN") #include a package argument, too
LimitforNN(skew.vec, kurto.vec)
}
LimitforNN <-
function(skew.vec, kurto.vec)
{
coef <- Fleishman.coef.NN(skew.vec, kurto.vec)
X <- rnorm(1e+05, 0, 1)
XX <- cbind(1,X,X^2,X^3)
U <- XX%*%coef[1,]
Y <- rnorm(1e+05, 0, 1)
YY <- cbind(1,Y,Y^2,Y^3)
V <- YY%*%coef[2,]
max <- cor(U[order(U)], V[order(V)])
min <- cor(U[order(U, decreasing = TRUE)], V[order(V)])
rm(U, V)
return(c(min, max))
}
|
/scratch/gouwar.j/cran-all/cranData/BinOrdNonNor/R/LimitforNN.R
|
Limit_forONN <- function(pvec1, skew1, kurto1) {
.Deprecated("LimitforONN") #include a package argument, too
LimitforONN(pvec1, skew1, kurto1)
}
LimitforONN <-
function (pvec1, skew1, kurto1)
{
validate.plist(list(pvec1), 1)
coef <- Fleishman.coef.NN(skew1, kurto1)
X <- rnorm(1e+05, 0, 1)
Z <- rnorm(1e+05, 0, 1)
ZZ <- cbind(1, Z, Z^2, Z^3)
Y <- ZZ%*%as.vector(coef)
XORD <- ordinalize(pvec1, X)
max <- cor(XORD[order(XORD)], Y[order(Y)])
min <- cor(XORD[order(XORD, decreasing = TRUE)], Y[order(Y)])
rm(X, Y)
return(c(min, max))
}
|
/scratch/gouwar.j/cran-all/cranData/BinOrdNonNor/R/LimitforONN.R
|
cmat.star.BinOrdNN <-
function(plist, skew.vec, kurto.vec, no.bin, no.ord, no.NN, CorrMat)
{
no.binord <- no.bin + no.ord
if (no.NN == 0) {
Sigma <- IntermediateOO(plist, CorrMat)
}
if (no.binord == 0) {
Sigma <- IntermediateNonNor(skew.vec, kurto.vec, CorrMat)
}
if (no.NN > 0 & no.binord > 0) {
if (validate.target.cormat.BinOrdNN(plist, skew.vec, kurto.vec, no.bin, no.ord, no.NN, CorrMat)) {
k <- length(plist)
tot <- nrow(CorrMat)
if (no.binord > 1) OO <- IntermediateOO(plist, CorrMat[1:k, 1:k])
else OO <- 1
ONN <- IntermediateONN(plist, skew.vec, kurto.vec, CorrMat[(k + 1):tot,1:k])
if (no.NN > 1) NN <- IntermediateNonNor(skew.vec, kurto.vec, CorrMat[(k + 1):tot, (k + 1):tot])
else NN <- 1
Sigma <- cbind(rbind(OO, ONN), rbind(t(ONN), NN))
if (!is.positive.definite(Sigma)) {
warning("Intermediate correlation matrix is not positive definite. A nearPD function is applied.")
Sigma <- as.matrix(nearPD(Sigma, corr = TRUE, keepDiag = TRUE)$mat)
}
Sigma <- (Sigma + t(Sigma))/2
}
}
return(Sigma)
}
|
/scratch/gouwar.j/cran-all/cranData/BinOrdNonNor/R/cmat.star.BinOrdNN.R
|
genBinOrdNN <-
function(n, plist, mean.vec, var.vec, skew.vec, kurto.vec, no.bin, no.ord, no.NN, cmat.star)
{
if (missing(n)){
stop("n was not specified! \n")
}
if (missing(cmat.star)) {
stop("The intermediate correlation matrix was not specified! \n")
}
if (no.bin > 0){
for (i in 1:no.bin){
if (length(plist[[i]])>1) {
warning("The probability vector for the ", i, "-th binary variable contains more than one value!\n")
}
}
}
if (no.ord > 0){
for (i in 1:no.ord){
if (length(plist[[no.bin+i]])<2) {
warning("The probability vector for the ", eval(no.bin+i), "-th ordinal variable contains only one value!\n")
}
}
}
no.binord <- no.bin + no.ord
if (ncol(cmat.star) != (no.binord + no.NN)){
stop("Dimension of intermediate correlation matrix cmat.star des not match the number of variables!\n")
}
if (no.binord > 0) {
if (length(plist) != no.binord) {
stop("Dimension of the probability vector does not match the number of binary and ordinal variables!\n")
}
}
if (no.NN > 0){
if (length(skew.vec) != no.NN) {
stop("Length of the skewness vector does not match the number of non-normal variables!\n")
}
if (length(kurto.vec) != no.NN) {
stop("Length of the kurtosis vector does not match the number of non-normal variables!\n")
}
if (length(mean.vec) != no.NN) {
stop("Length of the mean vector does not match the number of continuous variables!\n")
}
if (length(var.vec) != no.NN) {
stop("Length of the variance vector does not match the number of continuous variables!\n")
}
}
if (no.NN == 0) {
YY <- ordsample(n, plist, cmat.star, cormat = "continuous")
}
if (no.binord == 0) {
coef <- Fleishman.coef.NN(skew.vec,kurto.vec)
XX <- rmvnorm(n, rep(0, ncol(cmat.star)), cmat.star)
YY <- NULL
for (i in 1:no.NN){
X <- cbind(1,XX[,i], XX[,i]^2, XX[,i]^3)
Y <- X%*%coef[i,]*sqrt(var.vec[i-length(plist)])+mean.vec[i-length(plist)]
YY <- cbind(YY,Y)
}
}
if (no.NN > 0 & no.binord > 0) {
XX <- rmvnorm(n, rep(0, ncol(cmat.star)), cmat.star)
YY <- NULL
for (i in 1:no.binord) {
OO <- ordinalize(plist[[i]], XX[, i])
YY <- cbind(YY, OO)
rm(OO)
}
coef <- Fleishman.coef.NN(skew.vec,kurto.vec)
for (i in (no.binord + 1):(no.binord + no.NN)) {
X <- cbind(1,XX[,i], XX[,i]^2, XX[,i]^3)
Y <- X%*%coef[i-length(plist),]*sqrt(var.vec[i-length(plist)])+mean.vec[i-length(plist)]
YY <- cbind(YY,Y)
}
rm(XX)
}
colnames(YY) <- NULL
return(YY)
}
|
/scratch/gouwar.j/cran-all/cranData/BinOrdNonNor/R/genBinOrdNN.R
|
valid.limits.BinOrdNN <-
function(plist, skew.vec, kurto.vec, no.bin, no.ord, no.NN)
{
no.binord <- no.bin + no.ord
if (no.binord>0) validate.plist(plist, no.binord)
minmat <- maxmat <- diag(length(plist) + no.NN)
for (r in 2:nrow(minmat)) {
for (c in 1:(r - 1)) {
if (r <= length(plist)) {
minmax <- LimitforOO(plist[[r]], plist[[c]])
}else if (r > length(plist) & c > length(plist)) {
minmax <- LimitforNN(skew.vec[c(r-no.binord,c-no.binord)],
kurto.vec[c(r-no.binord,c-no.binord)])
}else if (r > length(plist) & c <= length(plist)) {
minmax = LimitforONN(plist[[c]], skew.vec[r-no.binord], kurto.vec[r-no.binord])
}
minmat[r, c] <- minmax[1]
maxmat[r, c] <- minmax[2]
rm(minmax)
}
}
minmat <- minmat + t(minmat)
diag(minmat) <- 1
maxmat <- maxmat + t(maxmat)
diag(maxmat) <- 1
return(list(lower = minmat, upper = maxmat))
}
|
/scratch/gouwar.j/cran-all/cranData/BinOrdNonNor/R/valid.limits.BinOrdNN.R
|
validate.target.cormat.BinOrdNN <-
function (plist, skew.vec, kurto.vec, no.bin, no.ord, no.NN, CorrMat)
{
if (is.positive.definite(CorrMat) == FALSE) {
stop("Specified correlation matrix is not positive definite! \n")
}
if (isSymmetric(CorrMat) == FALSE) {
stop("Specified correlation matrix is not symmetric! \n")
}
if (ncol(CorrMat) != (no.bin + no.ord + no.NN)) {
stop("Dimension of correlation matrix does not match the number of variables!\n")
}
if (sum(diag(CorrMat)) != ncol(CorrMat)) {
stop("Diagonal elements of correlation matrix do not all equals to 1!\n")
}
Limits <- valid.limits.BinOrdNN(plist, skew.vec, kurto.vec, no.bin, no.ord, no.NN)
minmat <- Limits$lower
maxmat <- Limits$upper
rangemat <- (minmat <= CorrMat & CorrMat <= maxmat)
if (sum(!rangemat) > 0) {
d <- ncol(rangemat)
cat("Target matrix is not valid. The following values are invalid.\n")
for (i in 1:d) {
for (j in i:d) {
if (rangemat[i, j] == FALSE) {
cat("CorrMat[", i, ",", j, "] must be between", round(minmat[i, j], 3), "and", round(maxmat[i, j], 3), "\n")
}
}
}
stop("Range violation occurred in the target correlation matrix.\n")
}
return(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/BinOrdNonNor/R/validate.target.cormat.BinOrdNN.R
|
BinSegBstrap <- function(y, bandwidth, nbandwidth = 30L, B = 500, alpha = 0.05,
kernel = c("epanechnikov", "gaussian", "rectangular",
"triangular", "biweight", "silverman")) {
if (!is.numeric(y) || any(!is.finite(y))) {
stop("observations 'y' must be a numeric vector containing only finite values")
}
n <- length(y)
if (missing(bandwidth)) {
if (!is.numeric(nbandwidth) || length(nbandwidth) != 1 || !is.finite(nbandwidth)) {
stop("'nbandwidth' must be a single positive integer")
}
if (!is.integer(nbandwidth)) {
nbandwidth <- as.integer(nbandwidth + 1e-6)
}
if (nbandwidth < 1L) {
stop("'nbandwidth' must be a single positive integer")
}
bandwidth <- exp(seq(log(10 / n), log(0.25), length.out = nbandwidth))
} else {
if (!is.numeric(bandwidth) || any(!is.finite(bandwidth)) || any(bandwidth < 1 / n) || any(bandwidth > 0.5)) {
stop("'bandwidth' must be a numeric vector containing only finite values between 1 / length(y) and 0.5")
}
}
if (!is.function(kernel)) {
kernel <- match.arg(kernel)
kernel <- switch(kernel,
rectangular = function(x) 1 / 2,
triangular = function(x) 1 - abs(x),
epanechnikov = function(x) 3 / 4 * (1 - x^2),
biweight = function(x) 5 / 16 * (1 - x^2)^2,
gaussian = function(x) dnorm(x, 0, 1),
silverman = function(x) exp(-abs(x) / sqrt(2)) * sin(abs(x) / sqrt(2) + pi / 4) / 2,
stop("unknown kernel")
)
}
if (!is.numeric(B) || length(B) != 1 || !is.finite(B)) {
stop("'B' must be a single positive integer")
}
if (!is.integer(B)) {
B <- as.integer(B + 1e-6)
}
if (B < 1L) {
stop("'B' must be a single positive integer")
}
if (!is.numeric(alpha) || length(alpha) != 1 || !is.finite(alpha) || alpha <= 0 || alpha >= 1) {
stop("alpha must be a probability, i.e., a single numeric between 0 and 1")
}
ret <- .BinSegBstrap(y = y, kernel = kernel, bandwidth = bandwidth, B = B, alpha = alpha)
cps <- c(1, ret, n + 1)
if (length(bandwidth) > 1) {
cv <- numeric(length(bandwidth))
for (i in seq_along(bandwidth)) {
b <- as.integer(n * bandwidth[i] + 1e-12)
for (k in 1:(length(cps) - 1)) {
cv[i] <- cv[i] + .CVtwosided(Y = y[cps[k]:(cps[k + 1] - 1)], K = kernel(+(1:b) / (n * bandwidth[i])))
}
}
bandwidth <- bandwidth[which.min(cv)]
}
est <- numeric(n)
b <- as.integer(n * bandwidth + 1e-12)
for (i in 1:(length(cps) - 1)) {
est[cps[i]:(cps[i + 1] - 1)] <- .kernelSmoothing(y[cps[i]:(cps[i + 1] - 1)], kernel((-b:b) / (n * bandwidth)))
}
list(est = est, cps = ret, bandwidth = bandwidth)
}
.BinSegBstrap <- function(y, kernel, bandwidth, B, alpha, n = length(y), s = 1, e = length(y)) {
if (e - s <= 2 * as.integer(min(bandwidth) * n + 1e-12)) {
return(integer(0))
} else {
ret <- .BstrapTest(y = y[s:e], kernel = kernel, bandwidth = bandwidth, B = B, alpha = alpha, n = n)
if (ret$outcome) {
loc <- ret$cp + s - 1L
cpLeft <- .BinSegBstrap(y = y, kernel = kernel, bandwidth = bandwidth, B = B, alpha = alpha,
n = n, s = s, e = loc - 1)
cpRight <- .BinSegBstrap(y = y, kernel = kernel, bandwidth = bandwidth, B = B, alpha = alpha,
n = n, s = loc, e = e)
} else {
return(integer(0))
}
}
c(cpLeft, loc, cpRight)
}
|
/scratch/gouwar.j/cran-all/cranData/BinSegBstrap/R/BinSegBstrap.R
|
BstrapTest <- function(y, bandwidth, nbandwidth = 30L, B = 500L, alpha = 0.05,
kernel = c("epanechnikov", "gaussian", "rectangular",
"triangular", "biweight", "silverman")) {
if (!is.numeric(y) || any(!is.finite(y))) {
stop("observations 'y' must be a numeric vector containing only finite values")
}
n <- length(y)
if (missing(bandwidth)) {
if (!is.numeric(nbandwidth) || length(nbandwidth) != 1 || !is.finite(nbandwidth)) {
stop("'nbandwidth' must be a single positive integer")
}
if (!is.integer(nbandwidth)) {
nbandwidth <- as.integer(nbandwidth + 1e-6)
}
if (nbandwidth < 1L) {
stop("'nbandwidth' must be a single positive integer")
}
bandwidth <- exp(seq(log(10 / n), log(0.25), length.out = nbandwidth))
} else {
if (!is.numeric(bandwidth) || any(!is.finite(bandwidth)) || any(bandwidth < 1 / n) || any(bandwidth > 0.5)) {
stop("'bandwidth' must be a numeric vector containing only finite values between 1 / length(y) and 0.5")
}
}
if (!is.function(kernel)) {
kernel <- match.arg(kernel)
kernel <- switch(kernel,
rectangular = function(x) 1 / 2,
triangular = function(x) 1 - abs(x),
epanechnikov = function(x) 3 / 4 * (1 - x^2),
biweight = function(x) 5 / 16 * (1 - x^2)^2,
gaussian = function(x) dnorm(x, 0, 1),
silverman = function(x) exp(-abs(x) / sqrt(2)) * sin(abs(x) / sqrt(2) + pi / 4) / 2,
stop("unknown kernel")
)
}
if (!is.numeric(B) || length(B) != 1 || !is.finite(B)) {
stop("'B' must be a single positive integer")
}
if (!is.integer(B)) {
B <- as.integer(B + 1e-6)
}
if (B < 1L) {
stop("'B' must be a single positive integer")
}
if (!is.numeric(alpha) || length(alpha) != 1 || !is.finite(alpha) || alpha <= 0 || alpha >= 1) {
stop("alpha must be a probability, i.e., a single numeric between 0 and 1")
}
.BstrapTest(y = y, bandwidth = bandwidth, kernel = kernel, B = B, alpha = alpha)
}
.BstrapTest <- function(y, bandwidth, kernel, B, alpha, n = length(y)) {
ret <- .estimateSingleCp(y = y, bandwidth = bandwidth, kernel = kernel, n = n)
names(ret)[1] <- "piecewiseSignal"
if (length(bandwidth) > 1) {
cv <- numeric(length(bandwidth))
for (i in seq_along(bandwidth)) {
b <- as.integer(n * bandwidth[i] + 1e-12)
cv[i] <- .CVtwosided(Y = y, K = kernel(+(1:b) / (n * bandwidth[i])))
}
ret$bandwidthSmooth <- bandwidth[which.min(cv)]
} else {
ret$bandwidthSmooth <- bandwidth
}
b <- as.integer(n * ret$bandwidthSmooth + 1e-12)
ret$smoothSignal <- .kernelSmoothing(y, kernel((-b:b) / (n * ret$bandwidthSmooth)))
etilde <- y - ret$piecewiseSignal
ehat <- etilde - mean(etilde)
Tstar <- numeric(B)
for (b in 1:B) {
eStar <- sample(ehat, length(y), replace = TRUE)
yStar <- ret$smoothSignal + eStar
Tstar[b] <- .estimateSingleCp(y = yStar, bandwidth = bandwidth, kernel = kernel, n = n)$size
}
ret$critVal <- quantile(abs(Tstar), 1 - alpha)
ret$pValue <- mean(abs(ret$size) <= abs(Tstar))
ret$outcome <- abs(ret$size) > as.numeric(ret$critVal)
ret
}
|
/scratch/gouwar.j/cran-all/cranData/BinSegBstrap/R/BstrapTest.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
.CVonesided <- function(Y, K) {
.Call(`_BinSegBstrap_CVonesided`, Y, K)
}
.CVtwosided <- function(Y, K) {
.Call(`_BinSegBstrap_CVtowsided`, Y, K)
}
DmaxVec <- function(Y, bint) {
.Call(`_BinSegBstrap_DmaxVec`, Y, bint)
}
.kernelSmoothing <- function(Y, K) {
.Call(`_BinSegBstrap_kernelSmoothing`, Y, K)
}
|
/scratch/gouwar.j/cran-all/cranData/BinSegBstrap/R/RcppExports.R
|
estimateSingleCp <- function(y, bandwidth, nbandwidth = 30L,
kernel = c("epanechnikov", "gaussian", "rectangular",
"triangular", "biweight", "silverman")) {
if (!is.numeric(y) || any(!is.finite(y))) {
stop("observations 'y' must be a numeric vector containing only finite values")
}
n <- length(y)
if (missing(bandwidth)) {
if (!is.numeric(nbandwidth) || length(nbandwidth) != 1 || !is.finite(nbandwidth)) {
stop("'nbandwidth' must be a single positive integer")
}
if (!is.integer(nbandwidth)) {
nbandwidth <- as.integer(nbandwidth + 1e-6)
}
if (nbandwidth < 1L) {
stop("'nbandwidth' must be a single positive integer")
}
bandwidth <- exp(seq(log(2 / n), log(0.25), length.out = nbandwidth))
} else {
if (!is.numeric(bandwidth) || any(!is.finite(bandwidth)) || any(bandwidth < 1 / n) || any(bandwidth > 0.5)) {
stop("'bandwidth' must be a numeric vector containing only finite values between 1 / length(y) and 0.5")
}
}
if (!is.function(kernel)) {
kernel <- match.arg(kernel)
kernel <- switch(kernel,
rectangular = function(x) 1 / 2,
triangular = function(x) 1 - abs(x),
epanechnikov = function(x) 3 / 4 * (1 - x^2),
biweight = function(x) 5 / 16 * (1 - x^2)^2,
gaussian = function(x) dnorm(x, 0, 1),
silverman = function(x) exp(-abs(x) / sqrt(2)) * sin(abs(x) / sqrt(2) + pi / 4) / 2,
stop("unknown kernel")
)
}
.estimateSingleCp(y = y, bandwidth = bandwidth, kernel = kernel)
}
.estimateSingleCp <- function(y, bandwidth, kernel, n = length(y)) {
if (length(bandwidth) > 1) {
cv <- numeric(length(bandwidth))
cp <- numeric(length(bandwidth))
for (i in seq_along(bandwidth)) {
b <- as.integer(n * bandwidth[i] + 1e-12)
if (length(y) < 2L * b) {
cv[i] <- Inf
cp[i] <- NA
} else {
cp[i] <- as.integer(which.max(DmaxVec(y, b)) + b + 1e-12)
cv[i] <- .CVonesided(Y = y[1:(cp[i] - 1)], K = kernel(+(1:b) / (n * bandwidth[i]))) +
.CVonesided(Y = rev(y[cp[i]:length(y)]), K = kernel(+(1:b) / (n * bandwidth[i])))
}
}
if (all(is.na(cp))) {
stop("all bandwidth values are too large")
}
indexBandwidth <- which.min(cv)
bandwidth <- bandwidth[indexBandwidth]
cp <- cp[indexBandwidth]
} else {
b <- as.integer(n * bandwidth + 1e-12)
if (length(y) < 2L * b) {
stop("bandwidth is too large")
}
cp <- as.integer(which.max(DmaxVec(y, b)) + b + 1e-12)
}
est <- numeric(length(y))
b <- as.integer(n * bandwidth + 1e-12)
est[1:(cp - 1)] <- .kernelSmoothing(y[1:(cp - 1)], kernel((-b:b) / (n * bandwidth)))
est[cp:length(y)] <- .kernelSmoothing(y[cp:length(y)], kernel((-b:b) / (n * bandwidth)))
size <- est[cp] - est[cp - 1]
list(est = est, cp = cp, size = size, bandwidth = bandwidth)
}
|
/scratch/gouwar.j/cran-all/cranData/BinSegBstrap/R/estimateSingleCp.R
|
## ----setup, echo = FALSE, message = FALSE-------------------------------------
library(knitr)
library(BinSegBstrap)
## ----estimateSingleCpFixedBandwidth, fig.cap = 'Observations (grey points), underlying signal (black line) and estimated signal (red line).'----
set.seed(1)
n <- 100
signal <- sin(2 * pi * 1:n / n)
signal[51:100] <- signal[51:100] + 5
y <- rnorm(n) + signal
# call of estimateSingleCp with fixed bandwidth 0.1
est <- estimateSingleCp(y = y, bandwidth = 0.1)
# estimated location
est$cp
# estimated jump size
est$size
# plot of observations, true and estimated signal
plot(y, pch = 16, col = "grey30")
lines(signal)
lines(est$est, col = "red")
## ----estimateSingleCp, fig.cap = 'Observations (grey points), underlying signal (black line) and estimated signal (red line).'----
set.seed(1)
n <- 100
signal <- sin(2 * pi * 1:n / n)
signal[51:100] <- signal[51:100] + 5
y <- rnorm(n) + signal
# call of estimateSingleCp with crossvalidated bandwidth
est <- estimateSingleCp(y = y)
# crossvalidated bandwidth
est$bandwidth
# estimated location
est$cp
# estimated jump size
est$size
# plot of observations, true and estimated signal
plot(y, pch = 16, col = "grey30")
lines(signal)
lines(est$est, col = "red")
## ----BstrapTest---------------------------------------------------------------
set.seed(1)
n <- 100
signal <- sin(2 * pi * 1:n / n)
signal[51:100] <- signal[51:100] + 5
y <- rnorm(n) + signal
test <- BstrapTest(y = y)
# whether the test rejected
test$outcome
# p-Value
test$pValue
## ----BinSegBstrap, fig.cap = 'Observations (grey points), underlying signal (black line) and estimated signal (red line).'----
set.seed(1)
n <- 200
signal <- sin(2 * pi * 1:n / n)
signal[51:100] <- signal[51:100] + 5
signal[151:200] <- signal[151:200] + 5
y <- rnorm(n) + signal
est <- BinSegBstrap(y = y)
# estimated change-points
est$cps
# plot of observations, true and estimated signal
plot(y, pch = 16, col = "grey30")
lines(signal)
lines(est$est, col = "red")
|
/scratch/gouwar.j/cran-all/cranData/BinSegBstrap/inst/doc/BinSegBstrap.R
|
TASC <- function(vect,
method=c("A","B"),
tau = 0.01,
numberOfSamples = 999,
sigma = seq(0.1, 20, by=.1),
na.rm=FALSE,
error = c("mean", "min")){
# Check type of input vector
if(!is.numeric(vect))
stop("The input vector must consist of numerical values!")
# Check for NA values
if (!na.rm && any(is.na(vect)))
stop("Cannot binarize in the presence of NA values!")
else
if (na.rm)
{
vect <- vect[!is.na(vect)]
}
if (any(!is.finite(vect)))
stop("Cannot binarize Inf values!")
# Check type of method
method <- match.arg(method, c("A","B"))
error <- match.arg(error, c("mean", "min"))
# Check type and value of tau
if(!is.numeric(tau))
stop("'tau' must be numeric!")
if(tau < 0 || tau > 1)
stop("'tau' has to be in [0,1]!")
# Check type and value of numberofSamples
if(!is.numeric(numberOfSamples))
stop("'numberOfSamples' must be numeric!")
if(numberOfSamples < 0)
stop("'numberOfSamples' has to be >= 0!")
# Check type of sigma
if(method == "B" && !is.numeric(sigma))
stop("'sigma' must consist of numerical values!")
# Verify input length
if(length(vect) < 3)
stop("The input vector must have at least 3 entries!")
# Check whether the input is constant
if(length(unique(vect)) == 1)
stop("The input vector is constant!")
#set.seed(proc.time()[3]*1000)
runif(1)
#calculate the results according to the <method> argument
if(method == "A"){
if(error == "min"){
res <- TASCA_C_min(vect, tau, numberOfSamples)
me <- "TASC A (min)"
}else{
res <- TASCA_C(vect, tau, numberOfSamples)
me <- "TASC A"
}
result <- new(
"TASCResult",
originalMeasurements = vect,
trinarizedMeasurements = res$binarized_vector,
threshold1 = res$threshold1,
threshold2 = res$threshold2,
p.value = res$p_value,
intermediateSteps = res$other_results$P_Mat[-1,,drop=FALSE],
intermediateHeights1 = res$other_results$H_Mat1[-1,,drop=FALSE],
intermediateHeights2 = res$other_results$H_Mat2[-1,,drop=FALSE],
intermediateStrongestSteps = res$other_results$v_vec[-1,,drop=FALSE],
method = me
)
}else if(method == "B"){
if(error == "min"){
res <- TASCB_C_min(vect, tau, numberOfSamples, sigma)
me <- "TASC B (min)"
}else{
res <- TASCB_C(vect, tau, numberOfSamples, sigma)
me <- "TASC B"
}
result <- new(
"TASCResult",
originalMeasurements = vect,
trinarizedMeasurements = res$binarized_vector,
threshold1 = res$threshold1,
threshold2 = res$threshold2,
p.value = res$p_value,
intermediateSteps = res$other_results$steps[-1,,drop=FALSE],
intermediateHeights1 = res$other_results$H_Mat1[-1,,drop=FALSE],
intermediateHeights2 = res$other_results$H_Mat2[-1,,drop=FALSE],
intermediateStrongestSteps = res$other_results$v_vec[-1,,drop=FALSE],
method = me
)
}else{
stop(sprintf("'method' has to be either \"A\" or \"B\"!", method))
}
return(result)
}
TASCA_C <- function(vect, tau, numberOfSamples){
#call the C-Function
result <- .Call("TASCA",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$Cc <- t(result$other_results$Cc)
result$other_results$Ind <- t(result$other_results$Ind)
result$other_results$P_Mat <- t(result$other_results$P_Mat)
result$other_results$Q_Mat <- t(result$other_results$Q_Mat)
result$other_results$H_Mat1 <- t(result$other_results$H_Mat1)
result$other_results$H_Mat2 <- t(result$other_results$H_Mat2)
result$other_results$v_vec <- matrix(result$other_results$v_vec, ncol = 2, byrow = TRUE)
return(result);
}
TASCB_C <- function(vect, tau, numberOfSamples, sigma){
result <- .Call("TASCB",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples),
as.double(sigma))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$smoothed = t(result$other_results$smoothed)
result$other_results$zerocrossing = t(result$other_results$zerocrossing)
result$other_results$steps = t(result$other_results$steps)
result$other_results$H_Mat1 = t(result$other_results$H_Mat1)
result$other_results$H_Mat2 = t(result$other_results$H_Mat2)
result$other_results$smoothedX = t(result$other_results$smoothedX)
result$other_results$meanlist = t(result$other_results$meanlist)
result$other_results$v_vec <- matrix(result$other_results$v_vec, ncol = 2, byrow = TRUE)
return(result)
}
TASCA_C_min <- function(vect, tau, numberOfSamples){
#call the C-Function
result <- .Call("TASCA_min",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$Cc <- t(result$other_results$Cc)
result$other_results$Ind <- t(result$other_results$Ind)
result$other_results$P_Mat <- t(result$other_results$P_Mat)
result$other_results$Q_Mat <- t(result$other_results$Q_Mat)
result$other_results$H_Mat1 <- t(result$other_results$H_Mat1)
result$other_results$H_Mat2 <- t(result$other_results$H_Mat2)
result$other_results$v_vec <- matrix(result$other_results$v_vec, ncol = 2, byrow = TRUE)
return(result);
}
TASCB_C_min <- function(vect, tau, numberOfSamples, sigma){
result <- .Call("TASCB_min",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples),
as.double(sigma))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$smoothed = t(result$other_results$smoothed)
result$other_results$zerocrossing = t(result$other_results$zerocrossing)
result$other_results$steps = t(result$other_results$steps)
result$other_results$H_Mat1 = t(result$other_results$H_Mat1)
result$other_results$H_Mat2 = t(result$other_results$H_Mat2)
result$other_results$smoothedX = t(result$other_results$smoothedX)
result$other_results$meanlist = t(result$other_results$meanlist)
result$other_results$v_vec <- matrix(result$other_results$v_vec, ncol = 2, byrow = TRUE)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/Binarize/R/TASC.R
|
# Provides two methods to binarize a vector consisting of real values. The methods are scale-space-based. With <method>="A",
# the threshold for the binarization is calculated according to the BASC A algorithm and with <method>="B" according to the
# BASC B algorithm. For details on how this is done have a look at the vignette. The <tau> argument is a parameter for the bootstrap-test
# and it is an indicator what quality the binarization should have. The resulting p-value is shows how good the binarization fulfills this
# quality requirement. With <numberofSamples> you can control the number of samples that are used for the bootstrap test.
# <sigma> is only used for the BASC B algorithm and so it will be ignored if <method> equals "A". If <method>="B" then <sigma> should be a sequence
# of ascending numbers and the values are used as parameters for the bessel function.
binarize.BASC <- function(vect, method=c("A","B"), tau = 0.01, numberOfSamples = 999, sigma = seq(0.1, 20, by=.1), na.rm=FALSE){
# Check type of input vector
if(!is.numeric(vect))
stop("The input vector must consist of numerical values!")
# Check for NA values
if (!na.rm && any(is.na(vect)))
stop("Cannot binarize in the presence of NA values!")
else
if (na.rm)
{
vect <- vect[!is.na(vect)]
}
if (any(!is.finite(vect)))
stop("Cannot binarize Inf values!")
# Check type of method
method <- match.arg(method, c("A","B"))
# Check type and value of tau
if(!is.numeric(tau))
stop("'tau' must be numeric!")
if(tau < 0 || tau > 1)
stop("'tau' has to be in [0,1]!")
# Check type and value of numberofSamples
if(!is.numeric(numberOfSamples))
stop("'numberOfSamples' must be numeric!")
if(numberOfSamples < 0)
stop("'numberOfSamples' has to be >= 0!")
# Check type of sigma
if(method == "B" && !is.numeric(sigma))
stop("'sigma' must consist of numerical values!")
# Verify input length
if(length(vect) < 3)
stop("The input vector must have at least 3 entries!")
# Check whether the input is constant
if(length(unique(vect)) == 1)
stop("The input vector is constant!")
#set.seed(proc.time()[3]*1000)
runif(1)
#calculate the results according to the <method> argument
if(method == "A"){
res <- binarizeBASCA_C(vect, tau, numberOfSamples)
result <- new(
"BASCResult",
originalMeasurements = vect,
binarizedMeasurements = res$binarized_vector,
threshold = res$threshold,
p.value = res$p_value,
intermediateSteps = res$other_results$P_Mat,
intermediateHeights = res$other_results$H_Mat,
intermediateStrongestSteps = res$other_results$v_vec,
method = "BASC A"
)
}
else if(method == "B"){
res <- binarizeBASCB_C(vect, tau, numberOfSamples, sigma)
result <- new(
"BASCResult",
originalMeasurements = vect,
binarizedMeasurements = res$binarized_vector,
threshold = res$threshold,
p.value = res$p_value,
intermediateSteps = res$other_results$steps,
intermediateHeights = res$other_results$H_Mat,
intermediateStrongestSteps = res$other_results$v_vec,
method = "BASC B"
)
}
else{
stop(sprintf("'method' has to be either \"A\" or \"B\"!", method))
}
return(result)
}
#interface for the Call to the C-Function, which does all the calculations for binarization.BASC with method
#argument = "A"
binarizeBASCA_C <- function(vect, tau, numberOfSamples){
#call the C-Function
result <- .Call("binarizeBASCA",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$Cc <- t(result$other_results$Cc)
result$other_results$Ind <- t(result$other_results$Ind)
result$other_results$P_Mat <- t(result$other_results$P_Mat)
result$other_results$Q_Mat <- t(result$other_results$Q_Mat)
result$other_results$H_Mat <- t(result$other_results$H_Mat)
return(result);
}
#interface for the Call to the C-Function, which does all the calculations for binarization.BASC with method
#argument = "B"
binarizeBASCB_C <- function(vect, tau, numberOfSamples, sigma){
result <- .Call("binarizeBASCB",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples),
as.double(sigma))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$smoothed = t(result$other_results$smoothed)
result$other_results$zerocrossing = t(result$other_results$zerocrossing)
result$other_results$steps = t(result$other_results$steps)
result$other_results$H_Mat = t(result$other_results$H_Mat)
result$other_results$smoothedX = t(result$other_results$smoothedX)
result$other_results$meanlist = t(result$other_results$meanlist)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/Binarize/R/binarizeBASC.R
|
# A method which uses the k-Means algorithm to binarize a real-valued vector.
# <nstart> controls how many random sets should be chosen by the 'kmeans' method and <iter.max> is the
# maximum number of iterations that are allowed (see also the help for kmeans)
binarize.kMeans <- function(vect, nstart=1, iter.max=10, dip.test=TRUE, na.rm=FALSE){
#some checks of the arguments
if(!is.numeric(vect))
stop("The input vector must consist of numerical values!")
if (!na.rm && any(is.na(vect)))
stop("Cannot binarize in the presence of NA values!")
else
if (na.rm)
{
vect <- vect[!is.na(vect)]
}
if (any(!is.finite(vect)))
stop("Cannot binarize Inf values!")
if(!is.numeric(nstart))
stop("'nstart' must be numeric!")
if(nstart < 0)
stop("'nstart' must be >= 0!")
if(!is.numeric(iter.max))
stop("'iter.max' must be numeric!")
if(iter.max < 0)
stop("'iter.max' must be >= 0!")
if(length(vect) < 3)
stop("The input vector must have at least 3 entries!")
if(length(unique(vect))==1)
stop("The input vector is constant!")
if (dip.test)
{
p.value <- dip.test(vect)$p.value
}
else
p.value <- as.numeric(NA)
#start the standard kmeans method to do all the calulations
km_res <- kmeans(vect, 2, nstart = nstart, iter.max = iter.max)
#the center with greater value should get the binarized value 1, the other 0.
if(km_res$centers[1] > km_res$centers[2]){
binarizeddata <- abs(km_res$cluster - 2)
}
else{
binarizeddata <- km_res$cluster - 1
}
#calculate the threshold as mean of the calculated centers
#threshold <- min(km_res$centers) + dist(km_res$centers)[1] / 2
#threshold <- mean(km_res$centers)
threshold <- (max(vect[!as.logical(binarizeddata)]) + min(vect[as.logical(binarizeddata)])) / 2
#put all computed results into a 'BinarizationResult' object and return it
return(new("BinarizationResult",
originalMeasurements = vect,
binarizedMeasurements = as.integer(binarizeddata),
threshold = threshold,
p.value = p.value,
method = "k-Means"))
}
|
/scratch/gouwar.j/cran-all/cranData/Binarize/R/binarizeKMeans.R
|
binarizeMatrix <- function(mat, method=c("BASCA","BASCB","kMeans"), adjustment="none", ...){
binFunc <- switch(match.arg(method, c("BASCA","BASCB","kMeans")),
"BASCA" = function(x, ...){
bin <- binarize.BASC(x, method="A", ...)
return(c(bin@binarizedMeasurements,
list(bin@threshold, [email protected])))
},
"BASCB" = function(x, ...){
bin <- binarize.BASC(x, method="B", ...)
return(c(bin@binarizedMeasurements,
list(bin@threshold, [email protected])))
},
"kMeans" = function(x, ...){
bin <- binarize.kMeans(x, ...)
return(c(bin@binarizedMeasurements,
list(bin@threshold, [email protected])))
})
bin <- do.call("rbind.data.frame", apply(mat, 1, binFunc, ...))
if(!is.null(colnames(mat))){
colnames(bin) <- c(colnames(mat), "threshold", "p.value")
}else{
colnames(bin) <- c(paste("V",seq_len(ncol(mat)),sep=""),
"threshold", "p.value")
}
bin[,"p.value"] <- p.adjust(bin[,"p.value"], method=adjustment)
return(bin)
}
trinarizeMatrix <- function(mat, method=c("TASCA","TASCB"), adjustment="none", ...){
triFunc <- switch(match.arg(method, c("TASCA","TASCB")),
"TASCA" = function(x, ...){
tri <- TASC(x, method="A", ...)
return(c(tri@trinarizedMeasurements,
list(tri@threshold1, tri@threshold2, [email protected])))
},
"TASCB" = function(x, ...){
tri <- TASC(x, method="B", ...)
return(c(tri@trinarizedMeasurements,
list(tri@threshold1, tri@threshold2, [email protected])))
})
tri <- do.call("rbind.data.frame", apply(mat, 1, triFunc, ...))
if(!is.null(colnames(mat))){
colnames(tri) <- c(colnames(mat), "threshold1", "threshold2", "p.value")
}else{
colnames(tri) <- c(paste("V",seq_len(ncol(mat)),sep=""),
"threshold1", "threshold2", "p.value")
}
tri[,"p.value"] <- p.adjust(tri[,"p.value"], method=adjustment)
return(tri)
}
|
/scratch/gouwar.j/cran-all/cranData/Binarize/R/binarizeMatrix.R
|
#############################################Class BinarizationResult############################################
#This is the base class of all results of the binarization functions. It provides the basic methods show, print and
#a method called plotBinarization. It also checks all created object for validity.
setClass(
Class = "BinarizationResult",
representation = representation(
originalMeasurements = "numeric",
binarizedMeasurements = "integer",
threshold = "numeric",
method = "character",
p.value = "numeric"
),
validity = function(object){
#extract object slots
omeasure <- object@originalMeasurements
bmeasure <- object@binarizedMeasurements
thresh <- object@threshold
meth <- object@method
p.value <- [email protected]
#initialize the basic strings
valid_methods <- c(
"BASC A",
"BASC B",
"Scan Statistic",
"Edge Detector: First Edge",
"Edge Detector: Maximum Edge",
"k-Means"
)
for(i in seq(1, length(valid_methods))){
valid_methods_string <- ifelse(i==1, sprintf("\"%s\"", valid_methods[i]), sprintf("%s, \"%s\"", valid_methods_string, valid_methods[i]))
}
#initialize the critical error messages
critical_invalid_strings <- c(
"'originalMeasurements' isn't set!",
"'binarizedMeasurements' isn't set!",
"'threshold' isn't set!",
"'method' isn't set!",
"'p.value' isn't set!"
)
#check object for critical errors
critical_invalid <- c(
!length(omeasure),
!length(bmeasure),
!length(thresh),
!length(meth),
!length(p.value)
)
#if critical error occured return the corresponding error messages
if (sum(as.integer(critical_invalid))){
return(critical_invalid_strings[which(critical_invalid)])
}
#initialize the weak error messages
weak_invalid_strings <- c(
"Only zeros and ones are valid values for 'binarizedMeasurements'.",
sprintf("'method' must be element of {%s}, but it is \"%s\".", valid_methods_string, as.character(meth)),
"Length of original and binarized Measurements must be the same.",
sprintf("'threshold' must be within the borders of the original values, which is the interval [%f, %f], but it is %f.", min(omeasure), max(omeasure), thresh),
"'p.value' must be in range [0,1]."
)
#check object for weak errors
weak_invalid <- c(
length(which(bmeasure > 1)) || length(which(bmeasure < 0)),
length(which(valid_methods == meth)) < 1,
length(bmeasure) != length(omeasure),
thresh < min(omeasure) || thresh > max(omeasure),
(!is.na(p.value) && (p.value < 0 || p.value > 1))
)
#if weak error occured return the corresponding error messages
if (sum(as.integer(weak_invalid))){
return(weak_invalid_strings[which(weak_invalid)])
}
#object is valid
return(TRUE)
}
)
#This method prints the last three slots out to console (binarizedMeasurements is limited to 10 values). It is called
#when creating an object without an assignment or by only typing the name of a BinarizationResult-object at console.
setMethod(
f = "show",
signature = "BinarizationResult",
definition = function(object){
cat("Method: ", object@method, "\n",sep="")
if (length(object@binarizedMeasurements) <= 10)
cat("\nBinarized vector: [ ", paste(object@binarizedMeasurements, collapse=" "),
" ]\n",sep="")
else
cat("\nBinarized vector: [ ",paste(object@binarizedMeasurements[1:10], collapse=" "),
" ...]\n",sep="")
cat("\nThreshold: ", object@threshold, "\n", sep="")
if (!is.na([email protected]))
cat("\np value: ", [email protected], "\n", sep="")
}
)
#setGeneric(
# name = "plot",
# def = function(x, twoDimensional=FALSE, showLegend=TRUE, showThreshold=TRUE, ...){
# standardGeneric("plot")
# }
#)
setGeneric("plot", useAsDefault = plot)
#This Method plots the computed binarization in a one- or two-dimensional way.
setMethod(
f = "plot",
signature = c("BinarizationResult"),
definition = function(x, twoDimensional=FALSE, showLegend=TRUE, showThreshold=TRUE, ...)
{
if (twoDimensional)
plot(1:length(x@binarizedMeasurements), x, showLegend=showLegend, showThreshold=showThreshold, ...)
else
{
#extract the base values of x
vect_length <- length(x@originalMeasurements)
min_val <- min(x@originalMeasurements) #floor(min(c(x@originalMeasurements,0)))
max_val <- max(x@originalMeasurements) #ceiling(max(c(x@originalMeasurements,0)))
#get the ... argument into a list
args <- list(...)
#check for several standard graphic parameters and if they aren't set, set them to default values
if (is.null(args$ylab))
args$ylab <- ""
if (is.null(args$xlab))
args$xlab <- ""
if (is.null(args$lty))
args$lty <- 2
if (is.null(args$pch)){
args$pch <- x@binarizedMeasurements
}
else
if (length(args$pch) == 2)
{
pchs <- args$pch
args$pch <- rep(pchs[1], length(x@binarizedMeasurements))
args$pch[as.logical(x@binarizedMeasurements)] <- rep(pchs[2], sum(x@binarizedMeasurements))
}
col <- args$col
if (is.null(col))
{
col <- c("red","green","black")
}
if (length(col) == 2 || length(col) == 3)
{
args$col <- rep(col[1], length(x@binarizedMeasurements))
args$col[as.logical(x@binarizedMeasurements)] <- rep(col[2], sum(x@binarizedMeasurements))
if (length(col) == 2)
col <- c(col,"black")
}
if (is.null(args$type))
args$type <- "p"
if (is.null(args$yaxt))
args$yaxt="n"
#plotting the axes shouldn't be controlled by standard plot function
#this method does it later
#args$axes <- FALSE
#check for the limit standard graphic parameters and if they aren't set, set them to default values
if (is.null(args$xlim))
args$xlim <- c(min_val,max_val)
if (is.null(args$ylim))
args$ylim <- c(-0.1,0.1)
#set the point coordinates
args$x <- x@originalMeasurements
args$y <- rep(0,vect_length)
#plot them
do.call("plot", args)
#plot the threshold as line
if (as.logical(showThreshold))
{
par(new=TRUE)
largs <- list(...)
if (is.null(largs$lty))
largs$lty <- 2
if (length(col) == 3)
largs$col <- col[3]
else
largs$col <- "black"
do.call("abline", c(largs,v=x@threshold))
}
#if axes isn't set or TRUE plot the x-axis
#if (is.null(list(...)$axes) || as.logical(list(...)$axes) || list(...)$yaxt != "n")
#{
# if (is.null(args$lwd))
# {
# lwd <- 1
# }
# else
# {
# lwd <- args$lwd
# }
# at <- round(seq(min_val,max_val,by=(max_val-min_val)/5),1)
# axis(1, at=at, lwd=lwd, pos=-0.01)
# #axis(1, at=at, lwd=lwd, pos=-0.05)#c(min_val,-10))
# #axis(1, at=at, lwd=lwd, pos=-0.1)#c(min_val,-10))
#}
if (as.logical(showLegend))
{
if (is.null(args$lwd))
{
lwd <- 1
}
else{
lwd <- args$lwd
}
if (as.logical(showThreshold))
{
if (is.null(args$pch)){
pch <- c(0,1,NA)
}
else
if (length(args$pch) > 2)
{
pch <- c(15, 16, NA)
}
else{
pch <- c(unique(args$pch), NA)
}
names <- c("zeros", "ones", "threshold")
lty <- c(NA, NA, args$lty[1])
}
else
{
if (is.null(args$pch))
{
pch <- c(0,1)
}
else
if (length(args$pch) > 2)
{
pch <- c(15, 16)
}
else{
pch <- unique(args$pch)
}
names <- c("zeros", "ones")
lty <- c(NA, NA)
#if (is.null(args$col)){
# col <- "black"
#}
#else if (length(args$col) < 3){
# col <- args$col
#}
#else{
# col <- args$col[1:2]
#}
}
legend("topleft", names, pch=pch,
lty=lty, inset=c(0.05, 0.05), bty="n", cex=0.8, lwd=lwd, col=col)
}
}
}
)
setMethod(
f = "plot",
signature = c("numeric","BinarizationResult"),
definition = function(x, y, showLegend=TRUE, showThreshold=TRUE, ...)
{
#extract the base values of y
vect_length <- length(y@originalMeasurements)
min_val <- min(y@originalMeasurements)
max_val <- max(y@originalMeasurements)
#get the ... argument into a list
args <- list(...)
#check for several standard graphic parameters and if they aren't set, set them to default values
if (is.null(args$ylab))
args$ylab <- ""
if (is.null(args$xlab))
args$xlab <- ""
if (is.null(args$lty))
args$lty <- 2
if (is.null(args$cex.axis))
args$cex.axis <- par("cex.axis")
if (is.null(args$cex.lab))
args$cex.lab <- par("cex.lab")
if (is.null(args$pch))
{
args$pch <- y@binarizedMeasurements
}
else
if (length(args$pch) == 2)
{
pchs <- args$pch
args$pch <- rep(pchs[1], length(y@binarizedMeasurements))
args$pch[as.logical(y@binarizedMeasurements)] <- rep(pchs[2], sum(y@binarizedMeasurements))
}
col <- args$col
if (is.null(col)){
col <- c("red","green","black")
}
if (length(col) == 2 || length(col) == 3){
args$col <- rep(col[1], length(y@binarizedMeasurements))
args$col[as.logical(y@binarizedMeasurements)] <- rep(col[2], sum(y@binarizedMeasurements))
if (length(col) == 2)
col <- c(col,"black")
}
if (is.null(args$type))
args$type <- "p"
#plotting the axes shouldn't be controlled by standard plot function
#this method does it later
#args$axes <- FALSE
args$xaxt="n"
#maxx is the minimal value >= vect_length and dividable by vect_length DIV 5
#(for example: if vect_length = 11 => maxx is 12 and if vect_length = 19 => maxx is 21)
#maxx <- ifelse(vect_length%%5==0, vect_length, vect_length%/%5*6)
#while(maxx < vect_length)
# maxx <- maxx + vect_length%/%5
#check for the limit standard graphic arguments. if not set set them to default values
#if (is.null(args$xlim))
# args$xlim <- c(0, maxx)
#if (is.null(args$ylim))
# args$ylim <- c(min_val, max_val)
#plot the binarization
args$x <- x
#seq(along = x@originalMeasurements)
args$y <- y@originalMeasurements
print(args)
do.call("plot", args)
#plot the threshold as line
if (as.logical(showThreshold))
{
par(new=TRUE)
largs <- list(...)
if (is.null(largs$lty))
largs$lty <- 2
if (length(col) == 3)
largs$col <- col[3]
else
largs$col <- "black"
do.call("abline", c(largs,h=y@threshold))
}
#if axes isn't set or TRUE plot the x and y axis according to maxx, min_val, max_val
if (is.null(list(...)$axes) || as.logical(list(...)$axes) || list(...)$xaxt != "n")
{
if (is.null(args$lwd))
{
lwd <- 1
}
else
{
lwd <- args$lwd
}
axis(1, at=x, lwd=lwd, cex.axis=args$cex.axis, cex.lab=args$cex.lab)
}
if (as.logical(showLegend))
{
if (is.null(args$lwd))
{
lwd <- 1
}
else{
lwd <- args$lwd
}
if (as.logical(showThreshold))
{
if (is.null(args$pch)){
pch <- c(0,1,NA)
}
else
if (length(args$pch) > 2)
{
pch <- c(15, 16, NA)
}
else{
pch <- c(unique(args$pch), NA)
}
names <- c("zeros", "ones", "threshold")
lty <- c(NA, NA, args$lty[1])
}
else
{
if (is.null(args$pch))
{
pch <- c(0,1)
}
else
if (length(args$pch) > 2)
{
pch <- c(15, 16)
}
else{
pch <- unique(args$pch)
}
names <- c("zeros", "ones")
lty <- c(NA, NA)
}
legend("topleft", names, pch=pch,
lty=lty, inset=c(0.05, 0.05), bty="n", cex=0.8, lwd=lwd, col=col)
}
}
)
#This method prints the last three slots out to console
setMethod(
f = "print",
signature = "BinarizationResult",
definition = function(x){
cat("Method: ", x@method, "\n", sep="")
cat("\nThreshold: ", x@threshold, "\n", sep="")
cat("\nBinarized vector: [ ", paste(x@binarizedMeasurements, collapse=" "),
" ]\n", sep="")
if (!is.na([email protected]))
cat("\np value: ",[email protected],"\n", sep="")
}
)
#############################################Class BASCResult##############################################
#This is the result class for the two BASC algorithms. It provides an additional method called plotStepFunctions and is
#derived from the BinarizationResult class.
setClass(
Class = "BASCResult",
representation = representation(
intermediateSteps = "matrix",
intermediateHeights = "matrix",
intermediateStrongestSteps = "integer"
),
contains = "BinarizationResult",
validity = function(object){
#extract relevant object slots
isteps <- object@intermediateSteps
iheights <- object@intermediateHeights
istrsteps <- object@intermediateStrongestSteps
omeasure <- object@originalMeasurements
#initialize the critical error messages
critical_invalid_strings <- c(
"'intermediateSteps' isn't set!",
"'intermediateHeights' isn't set!",
"'intermediateStrongestSteps' isn't set!"
)
#check object for critical errors
critical_invalid <- c(
!length(isteps),
!length(iheights),
!length(istrsteps)
)
#if critical error occured return the corresponding error messages
if (sum(as.integer(critical_invalid))){
return(critical_invalid_strings[which(critical_invalid)])
}
#initialize weak error messages
weak_invalid_strings <- c(
"'intermediateSteps' and 'intermediateHeights' must have the same dimensionality.",
"'intermediateStrongestSteps' must have the same length as the number of rows of 'intermediateSteps'.",
"The values of 'intermediateSteps' must be in range [0, #Measurements].",
"The values of 'intermediateStrongestSteps' must be in range [1, #Measurements]."
)
#check object for weak errors
weak_invalid <- c(
as.logical(sum(dim(isteps) != dim(iheights))),
length(istrsteps) != nrow(isteps),
(sum(isteps < 0) || sum(isteps > length(omeasure))),
(sum(istrsteps < 1) || sum(istrsteps > length(omeasure)))
)
#if weak error occured return the corresponding error messages
if (sum(as.integer(weak_invalid))){
return(weak_invalid_strings[which(weak_invalid)])
}
#object is valid
return(TRUE)
}
)
setGeneric(
name = "plotStepFunctions",
def = function(x, showLegend=TRUE, connected=FALSE, withOriginal=TRUE, ...){
standardGeneric("plotStepFunctions")
}
)
#This method plots all the computed optimal step functions with n steps in one diagram. These step functions are formed
#by the two BASC algorithms and are used to determine the optimal jumping point and are also used to calculate the
#P-Value.
setMethod(
f = "plotStepFunctions",
signature = "BASCResult",
definition = function(x, showLegend=TRUE, connected=FALSE, withOriginal=TRUE, ...){
#check the input BASCResult-Object
if (ncol(x@intermediateSteps) == 0 || nrow(x@intermediateSteps) == 0)
stop("intermediateSteps has no values to plot.")
if (ncol(x@intermediateHeights) == 0 || nrow(x@intermediateHeights) == 0)
stop("intermediateHeights has no values to plot.")
if (length(x@intermediateStrongestSteps) == 0)
stop("intermediateStrongestSteps has no values to plot.")
#get the value-count
vect_count <- length(x@originalMeasurements)
#steps is a matrix with all the jump indices computed by the C-function concatenated with
#1:vect_count which is used for plotting the original step-function
if (as.logical(withOriginal)){
steps <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
steps[1:(nrow(steps)-1),1:ncol(x@intermediateSteps)] <- x@intermediateSteps
steps[nrow(steps),] <- seq(along=x@originalMeasurements)
}
else{
steps <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
steps[1:nrow(steps),1:ncol(x@intermediateSteps)] <- x@intermediateSteps
}
#heights is a matrix with all the jump heights computed by the C-function concatenated with
#the jump heights of the original step-function
if (as.logical(withOriginal)){
heights <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
heights[1:(nrow(heights)-1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights
heights[nrow(heights),] <- c(diff(sort(x@originalMeasurements)), 0)
}
else{
heights <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
heights[1:nrow(heights),1:ncol(x@intermediateSteps)] <- x@intermediateHeights
}
heights <- t(apply(heights,1,function(x)x/sum(x)))
#the maximal y-value is calculated. y starts at 1, all the individual jump-heights are added and
#between every single step-function there's 0.5 free space
maxy <- nrow(steps) * 0.5 + sum(heights) + 1
# #maxx is the minimal value >= vect_length and dividable by vect_length DIV 5
# #(for example: if vect_length = 11 => maxx is 12 and if vect_length = 19 => maxx is 21)
#maxx <- ifelse((vect_count%%5)==0, vect_count, (vect_count%/%5)*6)
#while(maxx < vect_count)
# maxx <- maxx + (vect_count%/%5)
maxx <- vect_count
#calculate the coordinates of the lines of the step-functions
lines <- sapply(
#loop over the rows of steps from last row to first row
rev(seq(along = steps[,1])),
function(i, st, he){
#calculate the base y-value of the current "line"
#it is calculated like maxy but only for the first i lines.
cury <- ifelse(i < nrow(he), sum(he[seq(i + 1, nrow(he)),]) + (nrow(he) - i + 1) * 0.5, 0.5)
#cury <- ifelse(i < nrow(he), sum(he[seq(i + 1, nrow(he)),]), 0.5)
#get the current steps and heights row
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
#count is the current number of single lines of the current step functions
#except the last line which is always added directly before the return statement
count <- min(vect_count-1, length(cur_steps))
if (!as.logical(connected))
lines <- matrix(nrow=2, ncol=2+4*count)
else
lines <- matrix(nrow=2, ncol=2+2*count)
#construct the coordinates of the lines first and last x,y-pair will be added
#after the next block
lines[,seq(2, ncol(lines)-1)] <- matrix(
sapply(
seq(1, count),
function(j,s,h,base){
#the NAs are neccessary because vertical lines direct at a step shouldn't be
#plotted
if (!as.logical(connected)){
result <- matrix(ncol=4, nrow=2, rep(NA,8))
result[1,c(1,4)] <- rep(s[j], 2)
result[2, 1] <- ifelse(j==1, base, base + sum(h[1:j-1]))
result[2, 4] <- base + sum(h[1:j])
}
else{
result <- matrix(ncol=2, nrow=2, rep(NA,4))
result[1,c(1,2)] <- rep(s[j], 2)
result[2, 1] <- ifelse(j==1, base, base + sum(h[1:j-1]))
result[2, 2] <- base + sum(h[1:j])
}
return(result)
},
cur_steps,
cur_heights,
cury
)
)
#set the first and the last coordinates pair and return all the coordinates for the
#current step-function
lines[, 1] <- c(0, cury)
lines[, ncol(lines)] <- c(vect_count, lines[2, ncol(lines) - 1])
return(lines)
},
steps,
heights
)
#calculate the coordinates for the lines of the respective strongest steps
if (as.logical(withOriginal)){
ncol <- 3 * (nrow(steps) - 1)
sequence <- rev(seq(1, nrow(steps) - 1))
}
else{
ncol <- 3 * nrow(steps)
sequence <- rev(seq(1, nrow(steps)))
}
strongestLines <- matrix(
nrow = 2,
ncol = ncol,
data = sapply(
sequence,
function(i, st, he, x, l){
#get the current values
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
cur_l <- l[[length(l) - i + 1]]
#get the coordinates from the current values
result <- matrix(nrow = 2, ncol = 3, data = rep(NA, 6))
result[1,c(2,3)] <- rep(x@intermediateStrongestSteps[i], 2)
result[2,2] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i])[1]]
result[2,3] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i])[2]]
return(result)
},
steps,
heights,
x,
lines
)
)
#insert NA's at the strongestStep positions, because this lines are plotted seperatly
if (as.logical(connected)){
for(i in seq(along=lines)){
l <- lines[[i]]
matched <- match(strongestLines[2,],l[2,])
matched <- matched[!is.na(matched)]
if (length(matched) > 0){
ind <- max(matched)
l <- matrix(nrow=2,data=c(l[,1:(ind-1)],NA,NA,l[,-(1:(ind-1))]))
lines[[i]] <- l
}
}
}
#put the additional arguments in args
args <- list(...)
#check several standard graphics parameter and set them to default values if they aren't
#set yet
if (is.null(args$xlim))
args$xlim <- c(0, maxx)
if (is.null(args$ylim))
args$ylim <- c(0, maxy*1.01)
if (is.null(args$pch))
args$pch <- c(1,20)
if (is.null(args$type))
args$type <- "o"
else
args$type <- args$type[1] #if args$type is a vector then take first element for standard-lines
#and the second element for strongest-step lines others will be ignored
if (is.null(args$cex))
args$cex <- c(1,1.2)
if (is.null(args$ylab))
args$ylab <- ""
if (is.null(args$xlab))
args$xlab <- ""
if (is.null(args$lty))
args$lty <- 1
else
args$lty <- args$lty[1] #same handling as args$type
if (!is.null(args$col))
args$col <- args$col[1] #same handling as args$type
#drawing axes will be handled later by this function and not by the standard plot function
args$axes <- FALSE
#plot the step functions
lapply(
lines,
function(l){
args$x <- l[1,]
args$y <- l[2,]
do.call("plot", args)
#par(new=TRUE) is neccessary beacause the old lines shouldn't be deleted
par(new=TRUE)
}
)
#setup args for plotting strongest steps
args$x <- strongestLines[1,]
args$y <- strongestLines[2,]
if (is.null(list(...)$type))
args$type <- "l"
else if (length(list(...)$type) > 1)
args$type <- list(...)$type[2]
else
args$type <- list(...)$type
if (is.null(list(...)$lty))
args$lty <- 2
else if (length(list(...)$lty) > 1)
args$lty <- list(...)$lty[2]
else
args$lty <- list(...)$lty
if (!is.null(list(...)$col) & length(list(...)$col) > 1){
args$col <- list(...)$col[2]
}
#plot the strongest steps of the step functions
do.call("plot", args)
if (is.null(list(...)$lwd))
lwd <- 1
else
lwd <- list(...)$lwd
#if axes isn't set or set to TRUE then plot the x-axes
if (is.null(list(...)$axes) || as.logical(list(...)$axes)){
axis(1, pos=0, at=seq(0,maxx,by=(vect_count%/%5)), lwd = lwd)
}
#if showLegend is TRUE plot a legend
if (as.logical(showLegend)){
#if lty wasn't set take the default values for the line types else take the first
#two values (if possible) for the line type
if (is.null(list(...)$lty))
lty <- c(1,2)
else if (length(list(...)$lty) == 1)
lty <- list(...)$lty
else
lty <- list(...)$lty[c(1,2)]
if (is.null(list(...)$col))
col <- "black"
else if (length(list(...)$col) == 1)
col <- list(...)$col
else
col <- list(...)$col[1:2]
legend("topleft", c("steps","strongest steps"), lty=lty, col=col, inset=c(0.05,0), bty="n", cex=0.8, lwd=lwd)
}
}
)
|
/scratch/gouwar.j/cran-all/cranData/Binarize/R/result.R
|
#############################################Class TrinarizationResult############################################
#This is the base class of all results of the trinarization functions. It provides the basic methods show, print and
#a method called plotBinarization. It also checks all created object for validity.
setClass(
Class = "TrinarizationResult",
representation = representation(
originalMeasurements = "numeric",
trinarizedMeasurements = "integer",
threshold1 = "numeric",
threshold2 = "numeric",
method = "character",
p.value = "numeric"
),
validity = function(object){
#extract object slots
omeasure <- object@originalMeasurements
bmeasure <- object@trinarizedMeasurements
thresh1 <- object@threshold1
thresh2 <- object@threshold2
meth <- object@method
p.value <- [email protected]
#initialize the basic strings
valid_methods <- c(
"TASC A",
"TASC A (min)",
"TASC B",
"TASC B (min)"
)
for(i in seq(1, length(valid_methods))){
valid_methods_string <- ifelse(i==1, sprintf("\"%s\"", valid_methods[i]), sprintf("%s, \"%s\"", valid_methods_string, valid_methods[i]))
}
#initialize the critical error messages
critical_invalid_strings <- c(
"'originalMeasurements' isn't set!",
"'trinarizedMeasurements' isn't set!",
"'threshold1' isn't set!",
"'threshold2' isn't set!",
"'method' isn't set!",
"'p.value' isn't set!"
)
#check object for critical errors
critical_invalid <- c(
!length(omeasure),
!length(bmeasure),
!length(thresh1),
!length(thresh2),
!length(meth),
!length(p.value)
)
#if critical error occured return the corresponding error messages
if (sum(as.integer(critical_invalid))){
return(critical_invalid_strings[which(critical_invalid)])
}
#initialize the weak error messages
weak_invalid_strings <- c(
"Only zeros, ones and twos are valid values for 'trinarizedMeasurements'.",
sprintf("'method' must be element of {%s}, but it is \"%s\".", valid_methods_string, as.character(meth)),
"Length of original and trinarized Measurements must be the same.",
sprintf("'threshold1' and 'threshold2' must be within the borders of the original values, which is the interval [%f, %f], but they are %f and %f.", min(omeasure), max(omeasure), thresh1, thresh2),
"'p.value' must be in range [0,1]."
)
#check object for weak errors
weak_invalid <- c(
length(which(bmeasure > 2)) || length(which(bmeasure < 0)),
length(which(valid_methods == meth)) < 1,
length(bmeasure) != length(omeasure),
thresh1 < min(omeasure) || thresh1 > max(omeasure) || thresh2 < min(omeasure) || thresh2 > max(omeasure),
(!is.na(p.value) && (p.value < 0 || p.value > 1))
)
#if weak error occured return the corresponding error messages
if (sum(as.integer(weak_invalid))){
return(weak_invalid_strings[which(weak_invalid)])
}
#object is valid
return(TRUE)
}
)
#This method prints the last three slots out to console (trinarizedMeasurements is limited to 10 values). It is called
#when creating an object without an assignment or by only typing the name of a TrinarizationResult-object at console.
setMethod(
f = "show",
signature = "TrinarizationResult",
definition = function(object){
cat("Method: ", object@method, "\n",sep="")
if(length(object@trinarizedMeasurements) <= 10){
cat("\nTrinarized vector: [ ", paste(object@trinarizedMeasurements, collapse=" "),
" ]\n",sep="")
}else{
cat("\nTrinarized vector: [ ",paste(object@trinarizedMeasurements[1:10], collapse=" "),
" ...]\n",sep="")
}
cat("\nThreshold1: ", object@threshold1, "\n", sep="")
cat("\nThreshold2: ", object@threshold2, "\n", sep="")
if(!is.na([email protected])){
cat("\np value: ", [email protected], "\n", sep="")
}
}
)
#This method prints the last three slots out to console
setMethod(
f = "print",
signature = "TrinarizationResult",
definition = function(x){
cat("Method: ", x@method, "\n", sep="")
cat("\nThreshold1: ", x@threshold1, "\n", sep="")
cat("\nThreshold2: ", x@threshold2, "\n", sep="")
cat("\nTrinarized vector: [ ", paste(x@trinarizedMeasurements, collapse=" "),
" ]\n", sep="")
if (!is.na([email protected])){
cat("\np value: ",[email protected],"\n", sep="")
}
}
)
setGeneric("plot", useAsDefault = plot)
#This Method plots the computed binarization in a one- or two-dimensional way.
setMethod(
f = "plot",
signature = c("TrinarizationResult"),
definition = function(x, twoDimensional=FALSE, showLegend=TRUE, showThreshold=TRUE, ...)
{
if (twoDimensional){
plot(1:length(x@trinarizedMeasurements), x, showLegend=showLegend, showThreshold=showThreshold, ...)
}else{
#extract the base values of x
vect_length <- length(x@originalMeasurements)
min_val <- min(x@originalMeasurements) #floor(min(c(x@originalMeasurements,0)))
max_val <- max(x@originalMeasurements) #ceiling(max(c(x@originalMeasurements,0)))
#get the ... argument into a list
args <- list(...)
#check for several standard graphic parameters and if they aren't set, set them to default values
if(is.null(args$ylab)){
args$ylab <- ""
}
if(is.null(args$xlab)){
args$xlab <- ""
}
if(is.null(args$lty)){
args$lty <- 2
}
if(is.null(args$pch)){
args$pch <- x@trinarizedMeasurements
}else if(length(args$pch) == 3){
args$pch <- args$pch[x@trinarizedMeasurements+1]
}
col <- args$col
if(is.null(col)){
col <- c("red","green","blue","black")
}
if(length(col) < 3){
col <- rep(col, 3)[1:3]
}
if(length(col) == 3){
col <- c(col, "black")
}
if(length(col) == 4){
args$col <- col[x@trinarizedMeasurements+1]
}
if(is.null(args$type)){
args$type <- "p"
}
if(is.null(args$yaxt)){
args$yaxt="n"
}
#plotting the axes shouldn't be controlled by standard plot function
#this method does it later
#args$axes <- FALSE
#check for the limit standard graphic parameters and if they aren't set, set them to default values
if(is.null(args$xlim)){
args$xlim <- c(min_val,max_val)
}
if(is.null(args$ylim)){
args$ylim <- c(-0.1,0.1)
}
#set the point coordinates
args$x <- x@originalMeasurements
args$y <- rep(0,vect_length)
#plot them
do.call("plot", args)
#plot the threshold as line
if(as.logical(showThreshold)){
par(new=TRUE)
largs <- list(...)
if(is.null(largs$lty)){
largs$lty <- 2
}
largs$col <- col[4]
do.call("abline", c(largs,v=x@threshold1))
do.call("abline", c(largs,v=x@threshold2))
}
#if axes isn't set or TRUE plot the x-axis
#if (is.null(list(...)$axes) || as.logical(list(...)$axes) || list(...)$yaxt != "n"){
# if (is.null(args$lwd)){
# lwd <- 1
# }else{
# lwd <- args$lwd
# }
# at <- round(seq(min_val,max_val,by=(max_val-min_val)/5),1)
# axis(1, at=at, lwd=lwd, pos=-0.01)
# #axis(1, at=at, lwd=lwd, pos=-0.05)#c(min_val,-10))
# #axis(1, at=at, lwd=lwd, pos=-0.1)#c(min_val,-10))
#}
if(as.logical(showLegend)){
if(is.null(args$lwd)){
lwd <- 1
}else{
lwd <- args$lwd
}
if(as.logical(showThreshold)){
if(is.null(args$pch)){
pch <- c(0,1,2,NA)
}else if(length(args$pch) > 3){
pch <- c(15, 16, 17, NA)
}else{
pch <- c(unique(args$pch), NA)
}
names <- c("zero", "one", "two", "threshold")
lty <- c(NA, NA, NA, args$lty[1])
}else{
if(is.null(args$pch)){
pch <- c(0,1,2)
}else if(length(args$pch) > 3){
pch <- c(15, 16, 17)
}else{
pch <- unique(args$pch)
}
names <- c("zero", "one", "two")
lty <- c(NA, NA, NA)
#if(is.null(args$col)){
# col <- "black"
#}else if (length(args$col) < 3){
# col <- args$col
#}else{
# col <- args$col[1:2]
#}
}
legend("topleft", names, pch=pch,
lty=lty, inset=c(0.05, 0.05), bty="n", cex=0.8, lwd=lwd, col=col)
}
}
}
)
setMethod(
f = "plot",
signature = c("numeric","TrinarizationResult"),
definition = function(x, y, showLegend=TRUE, showThreshold=TRUE, ...)
{
#extract the base values of y
vect_length <- length(y@originalMeasurements)
min_val <- min(y@originalMeasurements)
max_val <- max(y@originalMeasurements)
#get the ... argument into a list
args <- list(...)
#check for several standard graphic parameters and if they aren't set, set them to default values
if(is.null(args$ylab)){
args$ylab <- ""
}
if(is.null(args$xlab)){
args$xlab <- ""
}
if(is.null(args$lty))
args$lty <- 2
if(is.null(args$cex.axis))
args$cex.axis <- par("cex.axis")
if(is.null(args$cex.lab))
args$cex.lab <- par("cex.lab")
if(is.null(args$pch))
{
args$pch <- y@trinarizedMeasurements
}
else
if(length(args$pch) == 3)
{
args$pch <- args$pch[y@trinarizedMeasurements+1]
}
col <- args$col
if(is.null(col)){
col <- c("red","green","blue","black")
}
if(length(col) < 3){
col <- rep(col, 3)[1:3]
}
if(length(col) == 3){
col <- c(col, "black")
}
if(length(col) == 4){
args$col <- col[y@trinarizedMeasurements+1]
}
if(is.null(args$type))
args$type <- "p"
#plotting the axes shouldn't be controlled by standard plot function
#this method does it later
#args$axes <- FALSE
args$xaxt="n"
#maxx is the minimal value >= vect_length and dividable by vect_length DIV 5
#(for example: if vect_length = 11 => maxx is 12 and if vect_length = 19 => maxx is 21)
#maxx <- ifelse(vect_length%%5==0, vect_length, vect_length%/%5*6)
#while(maxx < vect_length)
# maxx <- maxx + vect_length%/%5
#check for the limit standard graphic arguments. if not set set them to default values
#if (is.null(args$xlim))
# args$xlim <- c(0, maxx)
#if (is.null(args$ylim))
# args$ylim <- c(min_val, max_val)
#plot the binarization
args$x <- x
#seq(along = x@originalMeasurements)
args$y <- y@originalMeasurements
print(args)
do.call("plot", args)
#plot the threshold as line
if(as.logical(showThreshold)){
par(new=TRUE)
largs <- list(...)
if (is.null(largs$lty))
largs$lty <- 2
largs$col <- col[4]
do.call("abline", c(largs,h=y@threshold1))
do.call("abline", c(largs,h=y@threshold2))
}
#if axes isn't set or TRUE plot the x and y axis according to maxx, min_val, max_val
if(is.null(list(...)$axes) || as.logical(list(...)$axes) || list(...)$xaxt != "n")
{
if(is.null(args$lwd))
{
lwd <- 1
}
else
{
lwd <- args$lwd
}
axis(1, at=x, lwd=lwd, cex.axis=args$cex.axis, cex.lab=args$cex.lab)
}
if(as.logical(showLegend))
{
if(is.null(args$lwd))
{
lwd <- 1
}
else{
lwd <- args$lwd
}
if(as.logical(showThreshold))
{
if(is.null(args$pch)){
pch <- c(0,1,2,NA)
}
else
if(length(args$pch) > 3)
{
pch <- c(15, 16, 17, NA)
}
else{
pch <- c(unique(args$pch), NA)
}
names <- c("zero", "one", "two", "threshold")
lty <- c(NA, NA, NA, args$lty[1])
}
else
{
if(is.null(args$pch))
{
pch <- c(0,1,2)
}
else
if(length(args$pch) > 3)
{
pch <- c(15, 16, 17)
}
else{
pch <- unique(args$pch)
}
names <- c("zero", "one", "two")
lty <- c(NA, NA, NA)
}
legend("topleft", names, pch=pch,
lty=lty, inset=c(0.05, 0.05), bty="n", cex=0.8, lwd=lwd, col=col)
}
}
)
#############################################Class TASCResult##############################################
#This is the result class for the TASC algorithm. It provides an additional method called plotStepFunctions and is
#derived from the BinarizationResult class.
setClass(
Class = "TASCResult",
representation = representation(
intermediateSteps = "matrix",
intermediateHeights1 = "matrix",
intermediateHeights2 = "matrix",
intermediateStrongestSteps = "matrix"),
contains = "TrinarizationResult",
validity = function(object){
#extract relevant object slots
isteps <- object@intermediateSteps
iheights1 <- object@intermediateHeights1
iheights2 <- object@intermediateHeights2
istrsteps <- object@intermediateStrongestSteps
omeasure <- object@originalMeasurements
#initialize the critical error messages
critical_invalid_strings <- c(
"'intermediateSteps' isn't set!",
"'intermediateHeights1' isn't set!",
"'intermediateHeights2' isn't set!",
"'intermediateStrongestSteps' isn't set!"
)
#check object for critical errors
critical_invalid <- c(
!length(isteps),
!length(iheights1),
!length(iheights2),
!length(istrsteps)
)
#if critical error occured return the corresponding error messages
if(sum(as.integer(critical_invalid))){
return(critical_invalid_strings[which(critical_invalid)])
}
#initialize weak error messages
weak_invalid_strings <- c(
"'intermediateSteps' and 'intermediateHeights' must have the same dimensionality.",
"'intermediateStrongestSteps' must have the same number of rows as 'intermediateSteps'.",
"The values of 'intermediateSteps' must be in range [0, #Measurements].",
"The values of 'intermediateStrongestSteps' must be in range [1, #Measurements]."
)
#check object for weak errors
weak_invalid <- c(
as.logical(sum(dim(isteps) != dim(iheights1)) || sum(dim(isteps) != dim(iheights2))),
nrow(istrsteps) != nrow(isteps),
(sum(isteps < 0) || sum(isteps > length(omeasure))),
(sum(istrsteps[,1] < 1) || sum(istrsteps[,1] > length(omeasure)) || sum(istrsteps[,2] < 1) || sum(istrsteps[,2] > length(omeasure)))
)
#if weak error occured return the corresponding error messages
if(sum(as.integer(weak_invalid))){
return(weak_invalid_strings[which(weak_invalid)])
}
#object is valid
return(TRUE)
}
)
#This method plots all the computed optimal step functions with n steps in one diagram. These step functions are formed
#by the two BASC algorithms and are used to determine the optimal jumping point and are also used to calculate the
#P-Value.
setMethod(
f = "plotStepFunctions",
signature = "TASCResult",
definition = function(x, showLegend=TRUE, connected=FALSE, withOriginal=TRUE, ...){
#check the input BASCResult-Object
if(ncol(x@intermediateSteps) == 0 || nrow(x@intermediateSteps) == 0)
stop("intermediateSteps has no values to plot.")
if(ncol(x@intermediateHeights1) == 0 || nrow(x@intermediateHeights1) == 0)
stop("intermediateHeights has no values to plot.")
if(ncol(x@intermediateHeights2) == 0 || nrow(x@intermediateHeights2) == 0)
stop("intermediateHeights has no values to plot.")
if(ncol(x@intermediateStrongestSteps) == 0 || nrow(x@intermediateStrongestSteps) == 0)
stop("intermediateStrongestSteps has no values to plot.")
#get the value-count
vect_count <- length(x@originalMeasurements)
#steps is a matrix with all the jump indices computed by the C-function concatenated with
#1:vect_count which is used for plotting the original step-function
if(as.logical(withOriginal)){
steps <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
steps[1:(nrow(steps)-1),1:ncol(x@intermediateSteps)] <- x@intermediateSteps
steps[nrow(steps),] <- seq(along=x@originalMeasurements)
}else{
steps <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
steps[1:nrow(steps),1:ncol(x@intermediateSteps)] <- x@intermediateSteps
}
#heights is a matrix with all the jump heights computed by the C-function concatenated with
#the jump heights of the original step-function
if(as.logical(withOriginal)){
heights1 <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
heights1[1:(nrow(heights1)-1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights1
heights1[nrow(heights1),] <- c(diff(sort(x@originalMeasurements)), 0)
heights2 <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
heights2[1:(nrow(heights2)-1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights2
heights2[nrow(heights2),] <- c(diff(sort(x@originalMeasurements)), 0)
}else{
heights1 <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
heights1[1:nrow(heights1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights1
heights2 <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
heights2[1:nrow(heights1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights2
}
heights1 <- t(apply(heights1,1,function(x)x/sum(x)))
heights2 <- t(apply(heights2,1,function(x)x/sum(x)))
#the maximal y-value is calculated. y starts at 1, all the individual jump-heights are added and
#between every single step-function there's 0.5 free space
maxy <- nrow(steps) * 0.5 + sum(heights1) + 1
# #maxx is the minimal value >= vect_length and dividable by vect_length DIV 5
# #(for example: if vect_length = 11 => maxx is 12 and if vect_length = 19 => maxx is 21)
#maxx <- ifelse((vect_count%%5)==0, vect_count, (vect_count%/%5)*6)
#while(maxx < vect_count)
# maxx <- maxx + (vect_count%/%5)
maxx <- vect_count
#calculate the coordinates of the lines of the step-functions
lines <- sapply(
#loop over the rows of steps from last row to first row
rev(seq(along = steps[,1])),
function(i, st, he){
#calculate the base y-value of the current "line"
#it is calculated like maxy but only for the first i lines.
cury <- ifelse(i < nrow(he), sum(he[seq(i + 1, nrow(he)),]) + (nrow(he) - i + 1) * 0.5, 0.5)
#cury <- ifelse(i < nrow(he), sum(he[seq(i + 1, nrow(he)),]), 0.5)
#get the current steps and heights row
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
#count is the current number of single lines of the current step functions
#except the last line which is always added directly before the return statement
count <- min(vect_count-1, length(cur_steps))
if(!as.logical(connected))
lines <- matrix(nrow=2, ncol=2+4*count)
else
lines <- matrix(nrow=2, ncol=2+2*count)
#construct the coordinates of the lines first and last x,y-pair will be added
#after the next block
lines[,seq(2, ncol(lines)-1)] <- matrix(
sapply(
seq(1, count),
function(j,s,h,base){
#the NAs are neccessary because vertical lines direct at a step shouldn't be
#plotted
if(!as.logical(connected)){
result <- matrix(ncol=4, nrow=2, rep(NA,8))
result[1,c(1,4)] <- rep(s[j], 2)
result[2, 1] <- ifelse(j==1, base, base + sum(h[1:j-1]))
result[2, 4] <- base + sum(h[1:j])
}
else{
result <- matrix(ncol=2, nrow=2, rep(NA,4))
result[1,c(1,2)] <- rep(s[j], 2)
result[2, 1] <- ifelse(j==1, base, base + sum(h[1:j-1]))
result[2, 2] <- base + sum(h[1:j])
}
return(result)
},
cur_steps,
cur_heights,
cury
)
)
#set the first and the last coordinates pair and return all the coordinates for the
#current step-function
lines[, 1] <- c(0, cury)
lines[, ncol(lines)] <- c(vect_count, lines[2, ncol(lines) - 1])
return(lines)
},
steps,
heights1
)
#calculate the coordinates for the lines of the respective strongest steps
if(as.logical(withOriginal)){
ncol <- 3 * (nrow(steps) - 1)
sequence <- rev(seq(1, nrow(steps) - 1))
}
else{
ncol <- 3 * nrow(steps)
sequence <- rev(seq(1, nrow(steps)))
}
strongestLines <- matrix(
nrow = 2,
ncol = ncol,
data = sapply(
sequence,
function(i, st, he, x, l){
#get the current values
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
cur_l <- l[[length(l) - i + 1]]
#get the coordinates from the current values
result <- matrix(nrow = 2, ncol = 3, data = rep(NA, 6))
result[1,c(2,3)] <- rep(x@intermediateStrongestSteps[i,1], 2)
result[2,2] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i,1])[1]]
result[2,3] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i,1])[2]]
return(result)
},
steps,
heights1,
x,
lines
)
)
strongestLines2 <- matrix(
nrow = 2,
ncol = ncol,
data = sapply(
sequence,
function(i, st, he, x, l){
#get the current values
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
cur_l <- l[[length(l) - i + 1]]
#get the coordinates from the current values
result <- matrix(nrow = 2, ncol = 3, data = rep(NA, 6))
result[1,c(2,3)] <- rep(x@intermediateStrongestSteps[i,2], 2)
result[2,2] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i,2])[1]]
result[2,3] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i,2])[2]]
return(result)
},
steps,
heights2,
x,
lines
)
)
strongestLines <- cbind(strongestLines, strongestLines2)
#insert NA's at the strongestStep positions, because this lines are plotted seperatly
if(as.logical(connected)){
for(i in seq(along=lines)){
l <- lines[[i]]
matched <- match(strongestLines[2,],l[2,])
matched <- matched[!is.na(matched)]
if (length(matched) > 0){
ind <- max(matched)
l <- matrix(nrow=2,data=c(l[,1:(ind-1)],NA,NA,l[,-(1:(ind-1))]))
lines[[i]] <- l
}
}
}
#put the additional arguments in args
args <- list(...)
#check several standard graphics parameter and set them to default values if they aren't
#set yet
if(is.null(args$xlim))
args$xlim <- c(0, maxx)
if(is.null(args$ylim))
args$ylim <- c(0, maxy*1.01)
if(is.null(args$pch))
args$pch <- c(1,20)
if(is.null(args$type))
args$type <- "o"
else
args$type <- args$type[1] #if args$type is a vector then take first element for standard-lines
#and the second element for strongest-step lines others will be ignored
if(is.null(args$cex))
args$cex <- c(1,1.2)
if(is.null(args$ylab))
args$ylab <- ""
if(is.null(args$xlab))
args$xlab <- ""
if(is.null(args$lty))
args$lty <- 1
else
args$lty <- args$lty[1] #same handling as args$type
if(!is.null(args$col))
args$col <- args$col[1] #same handling as args$type
#drawing axes will be handled later by this function and not by the standard plot function
args$axes <- FALSE
#plot the step functions
lapply(
lines,
function(l){
args$x <- l[1,]
args$y <- l[2,]
do.call("plot", args)
#par(new=TRUE) is neccessary beacause the old lines shouldn't be deleted
par(new=TRUE)
}
)
#setup args for plotting strongest steps
args$x <- strongestLines[1,]
args$y <- strongestLines[2,]
if(is.null(list(...)$type))
args$type <- "l"
else if(length(list(...)$type) > 1)
args$type <- list(...)$type[2]
else
args$type <- list(...)$type
if(is.null(list(...)$lty))
args$lty <- 2
else if(length(list(...)$lty) > 1)
args$lty <- list(...)$lty[2]
else
args$lty <- list(...)$lty
if(!is.null(list(...)$col) & length(list(...)$col) > 1){
args$col <- list(...)$col[2]
}
#plot the strongest steps of the step functions
do.call("plot", args)
if(is.null(list(...)$lwd))
lwd <- 1
else
lwd <- list(...)$lwd
#if axes isn't set or set to TRUE then plot the x-axes
if(is.null(list(...)$axes) || as.logical(list(...)$axes)){
axis(1, pos=0, at=seq(0,maxx,by=(vect_count%/%5)), lwd = lwd)
}
#if showLegend is TRUE plot a legend
if(as.logical(showLegend)){
#if lty wasn't set take the default values for the line types else take the first
#two values (if possible) for the line type
if(is.null(list(...)$lty))
lty <- c(1,2)
else if(length(list(...)$lty) == 1)
lty <- list(...)$lty
else
lty <- list(...)$lty[c(1,2)]
if(is.null(list(...)$col))
col <- "black"
else if(length(list(...)$col) == 1)
col <- list(...)$col
else
col <- list(...)$col[1:2]
legend("topleft", c("steps","strongest steps"), lty=lty, col=col, inset=c(0.05,0), bty="n", cex=0.8, lwd=lwd)
}
}
)
|
/scratch/gouwar.j/cran-all/cranData/Binarize/R/result_TASC.R
|
### R code from vignette source 'Vignette.Rnw'
###################################################
### code chunk number 1: Vignette.Rnw:36-37
###################################################
set.seed(13579)
###################################################
### code chunk number 2: Vignette.Rnw:195-196 (eval = FALSE)
###################################################
## install.packages("Binarize")
###################################################
### code chunk number 3: Vignette.Rnw:199-200
###################################################
library("Binarize")
###################################################
### code chunk number 4: Vignette.Rnw:213-214
###################################################
data(binarizationExample)
###################################################
### code chunk number 5: Vignette.Rnw:220-227
###################################################
pdf("density.pdf")
par(mar=c(2,2,1,1))
#plot(density(binarizationExample[1,]),main="")
#abline(v=mean(binarizationExample[1,]), lty="dashed")
plot(function(x)dnorm(x,mean=0,sd=1)+dnorm(x,mean=10,sd=1),xlim=c(-5,15),main="")
abline(v=5, lty="dashed")
dev.off()
###################################################
### code chunk number 6: Vignette.Rnw:241-243
###################################################
bin <- binarize.kMeans(binarizationExample[1,])
print(bin)
###################################################
### code chunk number 7: Vignette.Rnw:250-251
###################################################
print(bin@binarizedMeasurements)
###################################################
### code chunk number 8: Vignette.Rnw:255-256 (eval = FALSE)
###################################################
## plot(bin)
###################################################
### code chunk number 9: Vignette.Rnw:259-260 (eval = FALSE)
###################################################
## plot(bin, twoDimensional=TRUE)
###################################################
### code chunk number 10: Vignette.Rnw:264-270
###################################################
pdf("plot_oneD.pdf")
plot(bin)
dev.off()
pdf("plot_twoD.pdf")
plot(bin, twoDimensional=TRUE)
dev.off()
###################################################
### code chunk number 11: Vignette.Rnw:286-288
###################################################
label <- c(rep(0,5), rep(1,5))
bin <- binarize.kMeans(binarizationExample[10,])
###################################################
### code chunk number 12: Vignette.Rnw:293-296 (eval = FALSE)
###################################################
## plot(bin, twoDimensional=TRUE,
## col=label+1, pch=label,
## showLegend=FALSE)
###################################################
### code chunk number 13: Vignette.Rnw:298-301
###################################################
pdf("plot_bin_with_label.pdf")
plot(bin, twoDimensional=TRUE, col=label+1, pch=label, showLegend=FALSE)
dev.off()
###################################################
### code chunk number 14: Vignette.Rnw:314-316
###################################################
binMatrix <- binarizeMatrix(binarizationExample,
method="kMeans")
###################################################
### code chunk number 15: Vignette.Rnw:320-323
###################################################
binMatrixFDR <- binarizeMatrix(binarizationExample,
method="kMeans",
adjustment="fdr")
###################################################
### code chunk number 16: Vignette.Rnw:330-332
###################################################
bin <- binarize.BASC(binarizationExample[1,], method="A")
print(bin)
###################################################
### code chunk number 17: Vignette.Rnw:346-347
###################################################
print(bin@intermediateStrongestSteps)
###################################################
### code chunk number 18: Vignette.Rnw:353-359
###################################################
pdf("stepsA.pdf")
plotStepFunctions(bin, connected=TRUE)
dev.off()
pdf("stepsB.pdf")
plotStepFunctions(binarize.BASC(binarizationExample[1,], method="B"), connected=TRUE)
dev.off()
###################################################
### code chunk number 19: Vignette.Rnw:361-362 (eval = FALSE)
###################################################
## plotStepFunctions(bin)
###################################################
### code chunk number 20: Vignette.Rnw:386-387
###################################################
data(trinarizationExample)
###################################################
### code chunk number 21: Vignette.Rnw:393-395
###################################################
tri <- TASC(trinarizationExample[1,], method="A")
print(tri)
###################################################
### code chunk number 22: Vignette.Rnw:423-424
###################################################
print(tri@intermediateStrongestSteps)
###################################################
### code chunk number 23: Vignette.Rnw:431-437
###################################################
pdf("triA.pdf")
par(mfrow = c(1,2), mar = c(2,2,1,1))
plotStepFunctions(tri, connected=TRUE)
par(mar = c(2,2,1,1))
plot(tri, twoDimensional = TRUE)
dev.off()
###################################################
### code chunk number 24: Vignette.Rnw:439-441 (eval = FALSE)
###################################################
## plotStepFunctions(tri)
## plot(tri, twoDimensional = TRUE)
###################################################
### code chunk number 25: Vignette.Rnw:469-474
###################################################
binMatrix <- binarizeMatrix(binarizationExample,
method="kMeans",
adjustment="fdr")
significantRows <- sum(binMatrix[,12] < 0.05)
print(significantRows)
###################################################
### code chunk number 26: Vignette.Rnw:482-488
###################################################
binarizations <- apply(binarizationExample, 1, binarize.BASC, method="A")
pVals <- p.adjust(sapply(binarizations, function(x)
{
return([email protected])
}), method="fdr")
significantRows <- sum(pVals < 0.05)
###################################################
### code chunk number 27: Vignette.Rnw:490-491
###################################################
print(significantRows)
###################################################
### code chunk number 28: Vignette.Rnw:496-502
###################################################
binarizations <- apply(binarizationExample, 1, binarize.BASC, method="B")
pVals <- p.adjust(sapply(binarizations, function(x)
{
return([email protected])
}), method="fdr")
significantRows <- sum(pVals < 0.05)
###################################################
### code chunk number 29: Vignette.Rnw:504-505
###################################################
print(significantRows)
###################################################
### code chunk number 30: Vignette.Rnw:524-526
###################################################
tauValues <- seq(0,0.25, 0.05)
print(tauValues)
###################################################
### code chunk number 31: Vignette.Rnw:531-544
###################################################
significantFeatures <- sapply(tauValues, function(tau)
{
binMatrix <- binarizeMatrix(binarizationExample,
method="BASCB",
adjustment="fdr",
tau=tau)
significantRows <- sum(binMatrix[,12] < 0.05)
return(significantRows)})
names(significantFeatures) <- tauValues
print(significantFeatures)
|
/scratch/gouwar.j/cran-all/cranData/Binarize/inst/doc/Vignette.R
|
#' Merge binary dosage files together
#'
#' Routine to merge binary dosage files together. The files
#' don't have to be in the same format. They will be merged
#' into a file with the format specified. Information about
#' the SNPs, aaf, maf, avgcall, rsq, can be maintained for
#' each file, or recalculated for the merged set.
#'
#' @param mergefiles Vector of file names for the merged binary
#' files. The first is the
#' binary dosage data containing the dosages and genetic
#' probabilities. The second file name is the family information
#' file. The third file name is the SNP information file.
#' The family and SNP information files are not used if the
#' binary dosage file is in format 4. For this format the
#' family and SNP information are in the file with the dosages
#' and genetic probabilities.
#' @param format The format of the output binary dosage file.
#' Allowed values are 1, 2, 3, and 4. The default value is 4.
#' Using the default value is recommended.
#' @param subformat The subformat of the format of the output
#' binary dosage file. A value of 1 or 3 indicates that only the
#' dosage value is saved. A value of 2 or 4 indicates
#' the dosage and genetic probabilities will be output. Values
#' of 3 or 4 are only allowed with formats 3 and 4. If a value
#' of zero if provided, and genetic probabilities are in the vcf
#' file, subformat 2 will be used for formats 1 and 2, and
#' subformat 4 will be used for formats 3 and 4. If the vcf file
#' does not contain genetic probabilities, subformat 1 will be
#' used for formats 1 and 2, and subformat 3 will be used for
#' formats 3 and 4. The default value is 0.
#' @param bdfiles Vector of binary dosage file names to be merged.
#' @param famfiles Vector of family file names that correspond to
#' the names in bdfiles. If the binary dosage files are all in format
#' 4, this may be an empty character array. Default value is character().
#' @param mapfiles Vector of map file names that correspond to the
#' names in bdfiles. If the binary dosage files are all in format
#' 4, this may be an empty character array. Default value is character().
#' @param onegroup Indicator to combine all the samples in one group.
#' If this is FALSE, the groups in each binary dosage file are
#' maintained and any binary dosage file with one group is made into
#' its own group. Default value is TRUE.
#' @param bdoptions Options indicating what information to calculate
#' and store for each SNP. These can be aaf, maf, and rsq. This option
#' is only available if format is equal to 4 and onegroup is TRUE.
#' Default value is character().
#' @param snpjoin Character value that can be either "inner" or "outer".
#' This indicates whether to do an inner or outer join of the SNPs in
#' each binary dosage file. Default value is "inner".
#'
#' @return
#' None
#' @export
#'
#' @examples
#' bdvcf1afile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
#' bdvcf1bfile <- system.file("extdata", "vcf1b.bdose", package = "BinaryDosage")
#' mergefiles <- tempfile()
#'
#' BinaryDosage:::bdmerge(mergefiles = mergefiles,
#' bdfiles = c(bdvcf1afile, bdvcf1bfile),
#' bdoptions = "maf")
#' bdinfo <- getbdinfo(mergefiles)
bdmerge <- function(mergefiles,
format = 4,
subformat = 0L,
bdfiles,
famfiles = character(),
mapfiles = character(),
onegroup = TRUE,
bdoptions = character(),
snpjoin = "inner") {
if (is.numeric(format) == FALSE && is.integer(format) == FALSE)
stop("format must be an integer value")
if (length(format) != 1)
stop("format must be an integer vector of length 1")
if (is.numeric(format) == TRUE) {
if (floor(format) != format)
stop("format must be an integer")
format = floor(format)
}
if (format < 1 || format > 4)
stop("format must be an integer value from 1 to 4")
if (is.numeric(subformat) == FALSE && is.integer(subformat) == FALSE)
stop("subformat must be an integer value")
if (length(subformat) != 1)
stop("subformat must be an integer vector of length 1")
if (is.numeric(subformat) == TRUE) {
if (floor(subformat) != subformat)
stop("subformat must be an integer")
subformat = floor(subformat)
}
if (subformat < 0 || subformat > 4)
stop("subformat must be an integer value from 0 to 4")
if (format < 3 && subformat > 2)
stop("subformat must be an integer value from 0 to 2 for formats 1 and 2")
if (missing(mergefiles) == TRUE)
stop("No output files specified")
if (is.character(mergefiles) == FALSE)
stop("Output file names must be a character values")
if (format == 4 & length(mergefiles) != 1)
stop("Only one file name is needed when using format 4")
if (format < 4 & length(mergefiles) != 3)
stop("Three file names are required when using formats 1, 2, and 3")
if (is.na(match("", mergefiles)) == FALSE)
stop("Output file names cannot be blank")
if (missing(bdfiles) == TRUE)
stop("No files specified")
if (is.character(bdfiles) == FALSE)
stop("bdfiles must be a character vector")
if (length(bdfiles) < 2)
stop("At least two binary dosage files must be specified")
if (is.character(famfiles) == FALSE)
stop("famfiles must be a character vector")
if (is.character(mapfiles) == FALSE)
stop("mapfiles must be a character vector")
if (length(famfiles) != 0 & length(mapfiles) == 0)
stop("If famfiles is specified, mapfiles must be specified")
if (length(famfiles) == 0 & length(mapfiles) != 0)
stop("If mapfiles is specified, famfiles must be specified")
if (length(famfiles) != 0 & (length(famfiles) != length(bdfiles) | length(mapfiles) != length(bdfiles)))
stop("If famfiles and mapfiles are specified they must have the same length as bdfiles")
if (is.logical(onegroup) == FALSE)
stop("onegroup must be logical value")
if (length(onegroup) != 1)
stop("onegroup must be a logical vector of length 1")
if (is.character(bdoptions) == FALSE)
stop("bdoptions must be a character vector")
if (onegroup == FALSE & length(bdoptions) > 0)
stop("bdoptions can only be used if onegroup is TRUE")
if (format != 4 & length(bdoptions) > 0)
stop("bdoptions can only be used if format = 4")
if (length(bdoptions) > 0) {
if (any(is.na(match(bdoptions, c("aaf", "maf", "rsq")))) == TRUE)
stop("Only valid bdoptions are aaf, maf, and rsq")
}
if (snpjoin != "inner" & snpjoin != "outer")
stop("snpjoin must have a value of either \"inner\" or \"outer\"")
allsnps <- TRUE
if (snpjoin == "inner")
allsnps <- FALSE
bdinfo <- vector("list", length(bdfiles))
for (i in 1:length(bdfiles)) {
if (length(famfiles) > 0)
bdinfo[[i]] <- getbdinfo(c(bdfiles[i], famfiles[i], mapfiles[i]))
else
bdinfo[[i]] <- getbdinfo(bdfiles[i])
}
bdmergedinfo <- mergebdinfo(bdinfo = bdinfo,
format = format,
subformat = subformat,
onegroup = onegroup)
mergedgeneticinfo <- mergegeneticinfo(mergefile = mergefiles[1],
geneticinfo = bdinfo,
allsnps = allsnps)
mergedgeneticinfo$additionalinfo <- bdmergedinfo
snpsbtom <- list()
snpsmtob <- list()
snpsbtomNA <- list()
snpsmtobNA <- list()
for (i in 1:length(bdinfo)) {
snpsbtomNA[[i]] <- prodlim::row.match(bdinfo[[i]]$snps, mergedgeneticinfo$snps)
snpsbtom[[i]] <- snpsbtomNA[[i]][!is.na(snpsbtomNA[[i]])]
snpsmtobNA[[i]] <- prodlim::row.match(mergedgeneticinfo$snps, bdinfo[[i]]$snps)
snpsmtob[[i]] <- snpsmtobNA[[i]][!is.na(snpsmtobNA[[i]])]
}
if (onegroup == FALSE) {
mergedgeneticinfo$snpinfo <- mergesnpinfo(mergedinfo = mergedgeneticinfo,
numgroups = bdmergedinfo$numgroups,
geneticinfo = bdinfo,
snpsbtom = snpsbtom,
snpsmtob = snpsmtob)
}
WriteBinaryDosageHeader(format = bdmergedinfo$format,
subformat = bdmergedinfo$subformat,
filename = mergefiles,
genefileinfo = mergedgeneticinfo,
bdoptions = bdoptions)
headerinfo <- ReadBinaryDosageHeader(filename = mergefiles)
bdwriteinfo <- AllocateBinaryDosageWriteMemory(headerinfo = headerinfo)
dosage <- numeric(nrow(mergedgeneticinfo$samples))
p0 <- numeric(nrow(mergedgeneticinfo$samples))
p1 <- numeric(nrow(mergedgeneticinfo$samples))
p2 <- numeric(nrow(mergedgeneticinfo$samples))
us <- integer(2 * nrow(mergedgeneticinfo$samples))
dosaget <- numeric(nrow(mergedgeneticinfo$samples))
p0t <- numeric(nrow(mergedgeneticinfo$samples))
p1t <- numeric(nrow(mergedgeneticinfo$samples))
p2t <- numeric(nrow(mergedgeneticinfo$samples))
ust <- integer(2 * nrow(mergedgeneticinfo$samples))
startgroup <- integer(length(bdinfo))
endgroup <- integer(length(bdinfo))
startgroup[1] <- 1
endgroup[1] <- nrow(bdinfo[[1]]$samples)
for (i in 2:length(bdinfo)) {
startgroup[i] <- endgroup[i - 1] + 1
endgroup[i] <- endgroup[i - 1] + nrow(bdinfo[[i]]$samples)
}
for (i in 1:nrow(mergedgeneticinfo$snps)) {
for (j in 1:length(bdinfo)) {
dosage[1:nrow(mergedgeneticinfo$samples)] <- NA
p0[1:nrow(mergedgeneticinfo$samples)] <- NA
p1[1:nrow(mergedgeneticinfo$samples)] <- NA
p2[1:nrow(mergedgeneticinfo$samples)] <- NA
if (is.na(snpsmtobNA[[j]][i]) == FALSE)
ReadBinaryDosageData(bdinfo[[j]], snpsmtobNA[[j]][i], dosage, p0, p1, p2, us)
dosaget[startgroup[j]:endgroup[j]] <- dosage[1:nrow(bdinfo[[j]]$samples)]
p0t[startgroup[j]:endgroup[j]] <- p0[1:nrow(bdinfo[[j]]$samples)]
p1t[startgroup[j]:endgroup[j]] <- p1[1:nrow(bdinfo[[j]]$samples)]
p2t[startgroup[j]:endgroup[j]] <- p2[1:nrow(bdinfo[[j]]$samples)]
}
WriteBinaryDosageData(dosaget, p0t, p1t, p2t, bdwriteinfo)
}
WriteBinaryDosageIndices(writeinfo = bdwriteinfo)
mbdinfo <- getbdinfo(bdfiles = mergefiles)
if (is.na(match("aaf", bdoptions)) == FALSE)
updateaaf(mbdinfo)
if (is.na(match("maf", bdoptions)) == FALSE)
updatemaf(mbdinfo)
if (is.na(match("rsq", bdoptions)) == FALSE)
updatersq(mbdinfo)
}
mergesnpinfo <- function (mergedinfo,
numgroups,
geneticinfo,
snpsbtom,
snpsmtob) {
hasaaf <- FALSE
hasmaf <- FALSE
hasavgcall <- FALSE
hasrsq <- FALSE
if (is.na(match("aaf", names(geneticinfo[[1]]$snpinfo))) == FALSE)
hasaaf <- TRUE
if (is.na(match("maf", names(geneticinfo[[1]]$snpinfo))) == FALSE)
hasmaf <- TRUE
if (is.na(match("avgcall", names(geneticinfo[[1]]$snpinfo))) == FALSE)
hasavgcall <- TRUE
if (is.na(match("rsq", names(geneticinfo[[1]]$snpinfo))) == FALSE)
hasrsq <- TRUE
for (i in 2:length(geneticinfo)) {
if (is.na(match("aaf", names(geneticinfo[[i]]$snpinfo))) == FALSE)
hasaaf <- TRUE
if (is.na(match("maf", names(geneticinfo[[i]]$snpinfo))) == FALSE)
hasmaf <- TRUE
if (is.na(match("avgcall", names(geneticinfo[[i]]$snpinfo))) == FALSE)
hasavgcall <- TRUE
if (is.na(match("rsq", names(geneticinfo[[i]]$snpinfo))) == FALSE)
hasrsq <- TRUE
}
snpinfo <- list()
numsnpinfo <- 0
snpinfonames <- character(0)
if (hasaaf == TRUE) {
numsnpinfo <- numsnpinfo + 1
snpinfo[[numsnpinfo]] <- matrix(rep(as.numeric(NA), nrow(mergedinfo$snps) * numgroups),
nrow(mergedinfo$snps), numgroups)
snpinfonames <- c(snpinfonames, "aaf")
}
if (hasmaf == TRUE) {
numsnpinfo <- numsnpinfo + 1
snpinfo[[numsnpinfo]] <- matrix(rep(as.numeric(NA), nrow(mergedinfo$snps) * numgroups),
nrow(mergedinfo$snps), numgroups)
snpinfonames <- c(snpinfonames, "maf")
}
if (hasavgcall == TRUE) {
numsnpinfo <- numsnpinfo + 1
snpinfo[[numsnpinfo]] <- matrix(rep(as.numeric(NA), nrow(mergedinfo$snps) * numgroups),
nrow(mergedinfo$snps), numgroups)
snpinfonames <- c(snpinfonames, "avgcall")
}
if (hasrsq == TRUE) {
numsnpinfo <- numsnpinfo + 1
snpinfo[[numsnpinfo]] <- matrix(rep(as.numeric(NA), nrow(mergedinfo$snps) * numgroups),
nrow(mergedinfo$snps), numgroups)
snpinfonames <- c(snpinfonames, "rsq")
}
if(numsnpinfo > 0) {
names(snpinfo) <- snpinfonames
currentgroup <- 1L
for (i in 1:length(geneticinfo)) {
setgroups <- 1L
if (class(geneticinfo[[i]]$additionalinfo) == "bdose-info")
setgroups <- geneticinfo[[i]]$additionalinfo$numgroups
for (j in 1:numsnpinfo) {
if (is.na(match(names(snpinfo)[j], names(geneticinfo[[i]]$snpinfo))) == FALSE) {
snpinfo[[j]][snpsbtom[[i]], currentgroup:(currentgroup + setgroups - 1)] <-
geneticinfo[[i]]$snpinfo[[names(snpinfo)[j]]][snpsmtob[[i]],]
}
}
currentgroup <- currentgroup + setgroups
}
}
return(snpinfo)
}
mergegeneticinfo <- function(mergefile,
geneticinfo,
allsnps) {
usesfid <- geneticinfo[[1]]$usesfid
samples <- geneticinfo[[1]]$samples
snpidformat <- geneticinfo[[1]]$snpidformat
snps <- geneticinfo[[1]]$snps
for (i in 2:length(geneticinfo)) {
if (geneticinfo[[i]]$usesfid != usesfid)
stop("Some files use FID and others do not")
if (snpidformat != geneticinfo[[i]]$snpidformat)
snpidformat <- 0L
samples <- rbind(samples, geneticinfo[[i]]$samples)
snps <- merge(snps, geneticinfo[[i]]$snps, all = allsnps)
}
if (nrow(unique(samples)) != nrow(samples))
stop("There are duplicate samples in the files to merge")
chr1 <- snps$chromosome[1]
onechr <- all(snps$chromosome == chr1)
return (list(filename = mergefile,
usesfid = usesfid,
samples = samples,
onechr = FALSE,
snpidformat = snpidformat,
snps = snps,
snpinfo = list(),
datasize = integer(0),
indices = numeric(0)))
}
mergebdinfo <- function(bdinfo, format, subformat, onegroup) {
if (onegroup) {
numgroups <- 1L
groups <- 0L
for (i in 1:length(bdinfo))
groups <- groups + nrow(bdinfo[[i]]$samples)
} else {
groups <- integer()
for (i in 1:length(bdinfo))
groups <- c(groups, bdinfo[[i]]$additionalinfo$groups)
numgroups <- length(groups)
}
if (subformat == 0L) {
subformat <- 2L
for (i in 1:length(bdinfo)) {
if (bdinfo[[i]]$additionalinfo$subformat == 1)
subformat <- 1L
}
}
retval <- list(format = format,
subformat = subformat,
headersize = 0,
numgroups = numgroups,
groups = groups)
class(retval) <- "bdose-info"
return (retval)
}
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/R/Merge.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
GetLineLocations <- function(filename) {
.Call(`_BinaryDosage_GetLineLocations`, filename)
}
ReadBinaryDosageBaseHeader <- function(filename) {
.Call(`_BinaryDosage_ReadBinaryDosageBaseHeader`, filename)
}
ReadBinaryDosageHeader3A <- function(filename) {
.Call(`_BinaryDosage_ReadBinaryDosageHeader3A`, filename)
}
ReadBinaryDosageHeader3B <- function(filename) {
.Call(`_BinaryDosage_ReadBinaryDosageHeader3B`, filename)
}
ReadBinaryDosageHeader4A <- function(filename) {
.Call(`_BinaryDosage_ReadBinaryDosageHeader4A`, filename)
}
ReadBinaryDosageHeader4B <- function(filename) {
.Call(`_BinaryDosage_ReadBinaryDosageHeader4B`, filename)
}
ReadBDIndices3C <- function(filename, numSNPs, indexStart) {
.Call(`_BinaryDosage_ReadBDIndices3C`, filename, numSNPs, indexStart)
}
ReadBDIndices4C <- function(filename, numSNPs, headersize) {
.Call(`_BinaryDosage_ReadBDIndices4C`, filename, numSNPs, headersize)
}
ReadBinaryDosageDataC <- function(filename, headersize, numsub, snp, dosage, us, base) {
.Call(`_BinaryDosage_ReadBinaryDosageDataC`, filename, headersize, numsub, snp, dosage, us, base)
}
ReadBinaryDosageDataP1P2 <- function(filename, headersize, numsub, snp, dosage, p0, p1, p2, us, base) {
.Call(`_BinaryDosage_ReadBinaryDosageDataP1P2`, filename, headersize, numsub, snp, dosage, p0, p1, p2, us, base)
}
ReadBinaryDosageDataCompressed <- function(filename, index, datasize, numsub, dosage, p0, p1, p2, us) {
.Call(`_BinaryDosage_ReadBinaryDosageDataCompressed`, filename, index, datasize, numsub, dosage, p0, p1, p2, us)
}
WriteBinaryDosageBaseHeader <- function(filename, format, subformat) {
.Call(`_BinaryDosage_WriteBinaryDosageBaseHeader`, filename, format, subformat)
}
WriteBinaryDosageHeader3A <- function(filename, numSubjects) {
.Call(`_BinaryDosage_WriteBinaryDosageHeader3A`, filename, numSubjects)
}
WriteBinaryDosageHeader3B <- function(filename, md5samples, md5SNPs, numIndices) {
.Call(`_BinaryDosage_WriteBinaryDosageHeader3B`, filename, md5samples, md5SNPs, numIndices)
}
WriteBinaryDosageHeader4A <- function(filename, headerEntries, numSubjects, numSNPs, groups, sid, fid, snpid, chromosome, location, reference, alternate, aaf, maf, avgCall, rsq, offsets, numIndices) {
.Call(`_BinaryDosage_WriteBinaryDosageHeader4A`, filename, headerEntries, numSubjects, numSNPs, groups, sid, fid, snpid, chromosome, location, reference, alternate, aaf, maf, avgCall, rsq, offsets, numIndices)
}
WriteBinaryDosageDataC <- function(filename, dosage, us, base) {
.Call(`_BinaryDosage_WriteBinaryDosageDataC`, filename, dosage, us, base)
}
WriteBinaryP1P2Data <- function(filename, p1, p2, us, base) {
.Call(`_BinaryDosage_WriteBinaryP1P2Data`, filename, p1, p2, us, base)
}
WriteBinaryCompressed <- function(filename, dosage, p0, p1, p2, snpnumber, datasize, us) {
.Call(`_BinaryDosage_WriteBinaryCompressed`, filename, dosage, p0, p1, p2, snpnumber, datasize, us)
}
WriteBinaryDosageIndicesC <- function(filename, headersize, datasize) {
.Call(`_BinaryDosage_WriteBinaryDosageIndicesC`, filename, headersize, datasize)
}
updatesnpinfo <- function(filename, offset, value) {
.Call(`_BinaryDosage_updatesnpinfo`, filename, offset, value)
}
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/R/RcppExports.R
|
#***************************************************************************#
# #
# Reading Binary Dosage files #
# #
#***************************************************************************#
#***************************************************************************#
# #
# Reading the Binary Dosage header #
# #
#***************************************************************************#
# Reads the header for the various formats of the formats
# of the binary dosage file. These vary for all the different
# formats.
ReadBinaryDosageHeader <- function(filename) {
ReadHeaderFunc <- list(f1 <- c(ReadBinaryDosageHeader11, ReadBinaryDosageHeader12),
f2 <- c(ReadBinaryDosageHeader21, ReadBinaryDosageHeader22),
f3 <- c(ReadBinaryDosageHeader31, ReadBinaryDosageHeader32, ReadBinaryDosageHeader33, ReadBinaryDosageHeader34),
f4 <- c(ReadBinaryDosageHeader41, ReadBinaryDosageHeader42, ReadBinaryDosageHeader43, ReadBinaryDosageHeader44))
bdformat <- ReadBinaryDosageBaseHeader(filename[1])
if (is.na(match("error", names(bdformat))) == FALSE)
stop(bdformat$error)
if (bdformat$format == 4) {
if (length(filename) != 1)
stop("Binary dosage file format 4 does not use family and map files")
} else {
if (length(filename) != 3)
stop("Binary dosage file format 1, 2, and 3 require family and map files")
}
return (ReadHeaderFunc[[bdformat$format]][[bdformat$subformat]](filename))
}
#***************************************************************************#
# Routines to read the subject and SNP info #
#***************************************************************************#
# Function to read subject and map files for formats 1, 2, and 3
# @parameter filename - vector of files names for binary dosage files
# first value is the binary dosage file, the second value is the fam file
# and the third is the map file
# @parameter format - format of the binary dosage file
# @parameter subformat - subformat of the binary dosage file
# @parameter headersize - size of the binary dosage header in bytes
# @return - The subject and family data needed to create a bdinfo list
ReadFamAndMapFiles <- function(filename, format, subformat, headersize) {
fqfilename <- normalizePath(filename[1], winslash = '/')
samples <- readRDS(filename[2])
if (all(samples$fid == "") == TRUE)
usesfid <- FALSE
else
usesfid <- TRUE
snps <- readRDS(filename[3])
chr1 <- snps$chromosome[1]
onechr <- all(snps$chromosome == chr1)
chrlocid <- paste(snps$chromosome, snps$location, sep = ":")
if(all(snps$snpid == chrlocid) == TRUE)
snpidformat <- 1
else {
chrlocrefaltid <- paste(snps$chromosome, snps$location,
snps$reference, snps$alternate, sep = ":")
if (all(snps$snpid == chrlocrefaltid) == TRUE)
snpidformat <- 2
else
snpidformat <- 0
}
additionalinfo <- list(format = format,
subformat = subformat,
headersize = headersize,
numGroups = 1,
groups = nrow(samples))
class(additionalinfo) <- "bdose-info"
return (list(filename = fqfilename,
usesfid = usesfid,
samples = samples,
onechr = onechr,
snpidformat = snpidformat,
snps = snps,
snpinfo = list(),
additionalinfo = additionalinfo))
}
# Function to convert the binary dosage header subject and SNP info
# into the bdinfo format
# @parameter filename - vector of file names for the binary dosage
# file
# @parameter header - header information read from the binary dosage
# file
# @paramter format - format of the binary dosage file - should be 4
# @paramter subformat - subformat of the binary dosage file
# @return - The subject and family data needed to create a bdinfo list
Convert4HeaderToBDInfo <- function(filename, header, format, subformat) {
sid <- unlist(strsplit(header$samples$sidstring, '\t'))
if (header$samples$fidsize == 0) {
usesfid <- FALSE
fid = rep("", header$numsub)
} else {
usesfid <- TRUE
fid <- unlist(strsplit(header$samples$fidstring, '\t'))
}
samples <- data.frame(fid, sid, stringsAsFactors = FALSE)
if (header$snps$chrsize == 0) {
onechr <- FALSE
chromosome <- rep("", header$numSNPs)
} else {
chromosome <- unlist(strsplit(header$snps$chrstring, '\t'))
if (length(chromosome) == 1) {
onechr <- TRUE
chromosome <- rep(header$snps$chrstring, header$numSNPs)
} else {
onechr <- FALSE
}
}
if (length(header$snps$location) == 0)
location <- rep(0L, header$numSNPs)
else
location <- header$snps$location
if (header$snps$refsize == 0)
reference <- rep("", header$numSNPs)
else
reference <- unlist(strsplit(header$snps$refstring, '\t'))
if (header$snps$altsize == 0)
alternate <- rep("", header$numSNPs)
else
alternate <- unlist(strsplit(header$snps$altstring, '\t'))
if (header$snps$snpsize == 0) {
if (header$snps$refsize == 0) {
snpid <- paste(chromosome, location, sep = ':')
snpidformat <- 1
} else {
snpid <- paste(chromosome, location, reference, alternate, sep = ':')
snpidformat <- 2
}
} else {
snpid <- unlist(strsplit(header$snps$snpstring, '\t'))
snpidformat <- 0
}
snps <- data.frame(chromosome, location, snpid, reference, alternate, stringsAsFactors = FALSE)
snpinfocol <- match(c("aaf", "maf", "avgcall", "rsq"), names(header$snps))
snpinfocol <- snpinfocol[sapply(header$snps, function(x) length(x) != 0)[snpinfocol]]
snpinfo <- lapply(header$snps[snpinfocol], matrix, nrow = header$numSNPs, ncol = header$numgroups)
additionalinfo <- list(format = format,
subformat = subformat,
headersize = header$dosageoffset,
numgroups = header$numgroups,
groups = header$groups)
class(additionalinfo) <- "bdose-info"
return (list(filename = normalizePath(filename[1], winslash = "/"),
usesfid = usesfid,
samples = samples,
onechr = onechr,
snpidformat = snpidformat,
snps = snps,
snpinfo = snpinfo,
additionalinfo = additionalinfo))
}
ReadBinaryDosageHeader11 <- function(filename) {
return (ReadFamAndMapFiles(filename, 1, 1, 8))
}
ReadBinaryDosageHeader12 <- function(filename) {
return (ReadFamAndMapFiles(filename, 1, 2, 8))
}
ReadBinaryDosageHeader21 <- function(filename) {
return (ReadFamAndMapFiles(filename, 2, 1, 8))
}
ReadBinaryDosageHeader22 <- function(filename) {
return (ReadFamAndMapFiles(filename, 2, 2, 8))
}
ReadBinaryDosageHeader31 <- function(filename) {
bdInfo <- ReadFamAndMapFiles(filename, 3, 1, 12)
additionalInfo = ReadBinaryDosageHeader3A(filename[1])
if (additionalInfo$numsub != nrow(bdInfo$samples))
stop("Subject file does not line up with binary dosage file")
return (bdInfo)
}
ReadBinaryDosageHeader32 <- function(filename) {
bdInfo <- ReadFamAndMapFiles(filename, 3, 2, 12)
additionalInfo = ReadBinaryDosageHeader3A(filename[1])
if (additionalInfo$numsub != nrow(bdInfo$samples))
stop("Subject file does not line up with binary dosage file")
return (bdInfo)
}
ReadBinaryDosageHeader33 <- function(filename) {
bdInfo <- ReadFamAndMapFiles(filename, 3, 3, 72)
additionalInfo = ReadBinaryDosageHeader3B(filename[1])
if (digest(bdInfo$samples) != additionalInfo$md5[1])
stop("Subject file does not line up with binary dosage file")
if (digest(bdInfo$snps) != additionalInfo$md5[2])
stop("Map file does not line up with binary dosage file")
return (bdInfo)
}
ReadBinaryDosageHeader34 <- function(filename) {
bdInfo <- ReadFamAndMapFiles(filename, 3, 4, 72)
bdInfo$additionalinfo$headersize <- bdInfo$additionalinfo$headersize + 4 * nrow(bdInfo$snps)
additionalInfo = ReadBinaryDosageHeader3B(filename[1])
if (digest(bdInfo$samples) != additionalInfo$md5[1])
stop("Subject file does not line up with binary dosage file")
if (digest(bdInfo$snps) != additionalInfo$md5[2])
stop("Map file does not line up with binary dosage file")
return (bdInfo)
}
ReadBinaryDosageHeader41 <- function(filename) {
header <- ReadBinaryDosageHeader4A(filename[1])
return (Convert4HeaderToBDInfo(filename, header, 4, 1))
}
ReadBinaryDosageHeader42 <- function(filename) {
header <- ReadBinaryDosageHeader4A(filename[1])
return (Convert4HeaderToBDInfo(filename, header, 4, 2))
}
ReadBinaryDosageHeader43 <- function(filename) {
header <- ReadBinaryDosageHeader4B(filename[1])
bdInfo <- Convert4HeaderToBDInfo(filename, header, 4, 3)
return (bdInfo)
}
ReadBinaryDosageHeader44 <- function(filename) {
header <- ReadBinaryDosageHeader4B(filename[1])
bdInfo <- Convert4HeaderToBDInfo(filename, header, 4, 4)
return (bdInfo)
}
#***************************************************************************#
# #
# Getting the indices for a binary dosage file #
# #
#***************************************************************************#
# Gets the file locations for snps in a binary dosage file
ReadBinaryDosageIndices <- function(bdInfo) {
ReadIndicesFunc <- list(f1 <- c(ReadIndices1, ReadIndices2),
f2 <- c(ReadIndices1, ReadIndices2),
f3 <- c(ReadIndices1, ReadIndices3, ReadIndices1, ReadIndices4),
f4 <- c(ReadIndices1, ReadIndices3, ReadIndices1, ReadIndices4))
return (ReadIndicesFunc[[bdInfo$additionalinfo$format]][[bdInfo$additionalinfo$subformat]](bdInfo))
}
# Function to set up the datasize and indices vectors when the
# data size is the same for each SNP.
# @parameter numsub - number of subjects in data set
# @parameter numSNPs - number of snps
FixedIndices <- function(numsub, numSNPs, firstIndex, snpsize) {
datasize <- snpsize * numsub
indices <- seq(firstIndex, firstIndex + datasize * (numSNPs - 1), datasize)
return (list(datasize = datasize, indices = indices))
}
# This routine sets up the indices when only the dosages are
# in the binary dosage file. This is simple because the size
# is fixed 2 bytes per subject per SNP
ReadIndices1 <- function(bdInfo) {
return (FixedIndices(nrow(bdInfo$samples),
nrow(bdInfo$snps),
bdInfo$additionalinfo$headersize, 2))
}
# This routine sets up the indices when reading formats 1 and 2
# when there is both dosage and genetic probability data. This
# is also simple because the size is fixed to 4 bytes per subject
# per SNP
ReadIndices2 <- function(bdInfo) {
return (FixedIndices(nrow(bdInfo$samples),
nrow(bdInfo$snps),
bdInfo$additionalinfo$headersize, 4))
}
# This routine sets up the indices when reading formats 3 and 4,
# subformat 2. The inidices are stored at the start of each SNP's
# dosage data. This is a time consuming operation.
ReadIndices3 <- function(bdInfo) {
return (ReadBDIndices3C(bdInfo$filename, nrow(bdInfo$snps), bdInfo$additionalinfo$headersize))
}
# This routine sets up the indices when reading formats 3 and 4,
# subformat 4. The inidices are stored in the header and are easy
# to read in.
ReadIndices4 <- function(bdInfo) {
return (ReadBDIndices4C(bdInfo$filename, nrow(bdInfo$snps), bdInfo$additionalinfo$headersize))
}
#***************************************************************************#
# #
# Read the data #
# #
#***************************************************************************#
# Reads a SNP form the various formats
# of the binary dosage file.
ReadBinaryDosageData <- function(bdInfo, snp, d, p0, p1, p2, us) {
ReadHeaderFunc <- list(f1 <- c(ReadBinaryDosageData1, ReadBinaryDosageData2),
f2 <- c(ReadBinaryDosageData3, ReadBinaryDosageData4),
f3 <- c(ReadBinaryDosageData3, ReadBinaryDosageData5, ReadBinaryDosageData3, ReadBinaryDosageData5),
f4 <- c(ReadBinaryDosageData3, ReadBinaryDosageData5, ReadBinaryDosageData3, ReadBinaryDosageData5))
return (ReadHeaderFunc[[bdInfo$additionalinfo$format]][[bdInfo$additionalinfo$subformat]](bdInfo, snp, d, p0, p1, p2, us))
}
ReadBinaryDosageData1 <- function(bdInfo, snp, d, p0, p1, p2, us) {
ReadBinaryDosageDataC(filename = bdInfo$filename,
headersize = bdInfo$additionalinfo$headersize,
numsub = nrow(bdInfo$samples),
snp = snp,
dosage = d,
us = us,
base = 1)
}
ReadBinaryDosageData2 <- function(bdInfo, snp, d, p0, p1, p2, us) {
ReadBinaryDosageDataP1P2(filename = bdInfo$filename,
headersize = bdInfo$additionalinfo$headersize,
numsub = nrow(bdInfo$samples),
snp = snp,
dosage = d,
p0 = p0,
p1 = p1,
p2 = p2,
us = us,
base = 2)
}
ReadBinaryDosageData3 <- function(bdInfo, snp, d, p0, p1, p2, us) {
ReadBinaryDosageDataC(filename = bdInfo$filename,
headersize = bdInfo$additionalinfo$headersize,
numsub = nrow(bdInfo$samples),
snp = snp,
dosage = d,
us = us,
base = 3)
}
ReadBinaryDosageData4 <- function(bdInfo, snp, d, p0, p1, p2, us) {
ReadBinaryDosageDataP1P2(filename = bdInfo$filename,
headersize = bdInfo$additionalinfo$headersize,
numsub = nrow(bdInfo$samples),
snp = snp,
dosage = d,
p0 = p0,
p1 = p1,
p2 = p2,
us = us,
base = 3)
}
ReadBinaryDosageData5 <- function(bdInfo, snp, d, p0, p1, p2, us) {
return (ReadBinaryDosageDataCompressed(filename = bdInfo$filename,
index = bdInfo$indices[snp],
datasize = bdInfo$datasize[snp],
numsub = nrow(bdInfo$samples),
dosage = d,
p0 = p0,
p1 = p1,
p2 = p2,
us = us))
}
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/R/ReadBinaryDosage.R
|
#***************************************************************************#
# #
# Writing Binary Dosage files #
# #
#***************************************************************************#
#***************************************************************************#
# Support functions for subject and SNP data #
#***************************************************************************#
# Formats 1, 2, and 3 all have a separate family and map file
# This routine saves the data frames in RDS format
WriteFamilyAndMapFiles <- function(filename, samples, snps) {
saveRDS(samples, filename[2])
saveRDS(snps, filename[3])
return (md5 <- c(digest(samples, "md5"), digest(snps, "md5")))
}
# Find the groups value in the genetic file info. If it doesn't
# exist it returns the number of samples
FindGroups <- function(geneticfileinfo) {
x <- match("groups", names(geneticfileinfo$additionalinfo))
if (is.na(x) == TRUE)
return (nrow(geneticfileinfo$samples))
return(geneticfileinfo$additional$groups)
}
# Create the subject and family strings to write to the binary
# dosage header for format 4
SIDandFID4 <- function(genefileinfo) {
sid <- paste(genefileinfo$samples$sid, collapse = '\t')
if (genefileinfo$usesfid == TRUE)
fid <- paste(genefileinfo$samples$fid, collapse = '\t')
else
fid <- ""
return (list(sid = sid, fid = fid))
}
# Find a numeric vector in the SNP info data frame or the
# bdoptions. If option is specified, a vector of zeros is
# return. If it is not found in the options, it
# returns the vector if found in the SNP info, otherwise
# it returns a numeric vector of length 0
FindSNPInfoNumeric <- function(tofind, snpinfo, numsnps, bdoptions) {
if (is.na(match(tofind, bdoptions)) == FALSE)
return (numeric(numsnps))
infocol <- match(tofind, names(snpinfo))
if (is.na(infocol) == FALSE)
return (as.vector(snpinfo[[infocol]]))
return (numeric(0))
}
# Find the SNP information needed for format 4
FindBDSNPInfo <- function(genefileinfo, bdoptions) {
snpcolnames <- colnames(genefileinfo$snps)
if (genefileinfo$snpidformat == 0)
snpid <- genefileinfo$snps$snpid
else
snpid <- ""
chr <- genefileinfo$snps$chromosome
if (genefileinfo$onechr == TRUE)
chr <- chr[1]
loc <- genefileinfo$snps$location
ref <- genefileinfo$snps$reference
alt <- genefileinfo$snps$alternate
aaf <- FindSNPInfoNumeric("aaf", genefileinfo$snpinfo, nrow(genefileinfo$snps), bdoptions)
maf <- FindSNPInfoNumeric("maf", genefileinfo$snpinfo, nrow(genefileinfo$snps), bdoptions)
# Average call cannot be calculated, therefore bdoptions are meaningless
avgcall <- FindSNPInfoNumeric("avgcall", genefileinfo$snpinfo, nrow(genefileinfo$snps), "")
rsq <- FindSNPInfoNumeric("rsq", genefileinfo$snpinfo, nrow(genefileinfo$snps), bdoptions)
snpid <- paste0(snpid, collapse = '\t')
chr <- paste0(chr, collapse = '\t')
ref <- paste0(ref, collapse = '\t')
alt <- paste0(alt, collapse = '\t')
return (list(snpid = snpid,
chromosome = chr,
location = loc,
reference = ref,
alternate = alt,
aaf = aaf,
maf = maf,
avgcall = avgcall,
rsq = rsq))
}
#***************************************************************************#
# #
# Writing the Binary Dosage header #
# #
#***************************************************************************#
# Writes the header for the various formats of the formats
# of the binary dosage file. These vary for all the different
# formats.
WriteBinaryDosageHeader <- function(format, subformat, filename, genefileinfo, bdoptions) {
writeHeaderFunc <- list(f1 <- c(WriteBinaryDosageHeader1, WriteBinaryDosageHeader1),
f2 <- c(WriteBinaryDosageHeader1, WriteBinaryDosageHeader1),
f3 <- c(WriteBinaryDosageHeader31, WriteBinaryDosageHeader31, WriteBinaryDosageHeader33, WriteBinaryDosageHeader34),
f4 <- c(WriteBinaryDosageHeader41, WriteBinaryDosageHeader41, WriteBinaryDosageHeader43, WriteBinaryDosageHeader44))
WriteBinaryDosageBaseHeader(filename[1], format - 1, subformat - 1)
return (writeHeaderFunc[[format]][[subformat]](filename, genefileinfo, bdoptions))
}
WriteBinaryDosageHeader1 <- function(filename, genefileinfo, bdoptions) {
md5 <- WriteFamilyAndMapFiles(filename, genefileinfo$samples, genefileinfo$snps)
return (0)
}
WriteBinaryDosageHeader31 <- function(filename, genefileinfo, bdoptions) {
md5 <- WriteFamilyAndMapFiles(filename, genefileinfo$samples, genefileinfo$snps)
WriteBinaryDosageHeader3A(filename[1], nrow(genefileinfo$samples))
return (0)
}
WriteBinaryDosageHeader33 <- function(filename, genefileinfo, bdoptions) {
md5 <- WriteFamilyAndMapFiles(filename, genefileinfo$samples, genefileinfo$snps)
WriteBinaryDosageHeader3B(filename[1], md5[1], md5[2], 0)
return (0)
}
WriteBinaryDosageHeader34 <- function(filename, genefileinfo, bdoptions) {
md5 <- WriteFamilyAndMapFiles(filename, genefileinfo$samples, genefileinfo$snps)
WriteBinaryDosageHeader3B(filename[1], md5[1], md5[2], nrow(genefileinfo$snps))
return (0)
}
WriteBinaryDosageHeader4 <- function(filename, genefileinfo, bdoptions,
headerEntries, offsets, numindices) {
subInfo <- SIDandFID4(genefileinfo)
snpInfo <- FindBDSNPInfo(genefileinfo, bdoptions)
WriteBinaryDosageHeader4A(filename[1],
headerEntries,
nrow(genefileinfo$samples),
nrow(genefileinfo$snps),
FindGroups(genefileinfo),
subInfo$sid[1],
subInfo$fid[1],
snpInfo$snpid[1],
snpInfo$chromosome[1],
snpInfo$location,
snpInfo$reference[1],
snpInfo$alternate[1],
snpInfo$aaf,
snpInfo$maf,
snpInfo$avgcall,
snpInfo$rsq,
offsets,
numindices)
return (0)
}
WriteBinaryDosageHeader41 <- function(filename, genefileinfo, bdoptions) {
headerEntries <- 8L
offsets <- c(seq(8L, 36L, 4L), 36L)
return (WriteBinaryDosageHeader4(filename, genefileinfo, bdoptions,
headerEntries, offsets, 0))
}
WriteBinaryDosageHeader43 <- function(filename, genefileinfo, bdoptions) {
headerEntries <- 4
offsets <- c(rep(-1L, 5), seq(8L, 20L, 4L))
return (WriteBinaryDosageHeader4(filename, genefileinfo, bdoptions,
headerEntries, offsets, 0))
}
WriteBinaryDosageHeader44 <- function(filename, genefileinfo, bdoptions) {
headerEntries <- 4
offsets <- c(rep(-1L, 5), seq(8L, 20L, 4L))
return (WriteBinaryDosageHeader4(filename, genefileinfo, bdoptions,
headerEntries, offsets, nrow(genefileinfo$snps)))
}
#***************************************************************************#
# #
# Writing the Binary Dosage data #
# #
#***************************************************************************#
#***************************************************************************#
# Allocate memory #
#***************************************************************************#
# Allocates memory needed to write binary dosage files
# This is sufficient for all formats
AllocateBinaryDosageWriteMemory <- function(headerinfo) {
filename <- headerinfo$filename
format <- headerinfo$additionalinfo$format
subformat <- headerinfo$additionalinfo$subformat
headersize <- headerinfo$additionalinfo$headersize
snpnumber <- integer(1)
datasize <- integer(nrow(headerinfo$snps))
us <- integer(2*nrow(headerinfo$samples))
return(list(filename = filename,
format = format,
subformat = subformat,
headersize = headersize,
snpnumber = snpnumber,
datasize = datasize,
us = us))
}
#***************************************************************************#
# Write the data #
#***************************************************************************#
# Write binary dosage data at the end of the file
# Header has already been written
# writeinfo was already created using AllocateBinaryDosageWriteMemory (see above)
WriteBinaryDosageData <- function(dosage, p0, p1, p2, writeinfo) {
writeFunc <- list(f1 <- c(WriteBinaryDosageData1, WriteBinaryDosageData2),
f2 <- c(WriteBinaryDosageData3, WriteBinaryDosageData4),
f3 <- c(WriteBinaryDosageData3, WriteBinaryDosageData5, WriteBinaryDosageData3, WriteBinaryDosageData6),
f4 <- c(WriteBinaryDosageData3, WriteBinaryDosageData5, WriteBinaryDosageData3, WriteBinaryDosageData6))
return (writeFunc[[writeinfo$format]][[writeinfo$subformat]](writeinfo, dosage, p0, p1, p2))
}
WriteBinaryDosageData1 <- function(writeinfo, dosage, p0, p1, p2) {
return (WriteBinaryDosageDataC(writeinfo$filename, dosage, writeinfo$us, 1))
}
WriteBinaryDosageData2 <- function(writeinfo, dosage, p0, p1, p2) {
return (WriteBinaryP1P2Data(writeinfo$filename, p1, p2, writeinfo$us, 2))
}
WriteBinaryDosageData3 <- function(writeinfo, dosage, p0, p1, p2) {
return (WriteBinaryDosageDataC(writeinfo$filename, dosage, writeinfo$us, 3))
}
WriteBinaryDosageData4 <- function(writeinfo, dosage, p0, p1, p2) {
return (WriteBinaryP1P2Data(writeinfo$filename, p1, p2, writeinfo$us, 3))
}
WriteBinaryDosageData5 <- function(writeinfo, dosage, p0, p1, p2) {
snpnumber <- -1L
return (WriteBinaryCompressed(writeinfo$filename,
dosage, p0, p1, p2,
snpnumber,
writeinfo$datasize,
writeinfo$us))
}
WriteBinaryDosageData6 <- function(writeinfo, dosage, p0, p1, p2) {
return (WriteBinaryCompressed(writeinfo$filename,
dosage, p0, p1, p2,
writeinfo$snpnumber,
writeinfo$datasize,
writeinfo$us))
}
#***************************************************************************#
# #
# Writing the Binary Dosage indices #
# #
#***************************************************************************#
# Write binary dosage indices to the file
# Header has already been written
# funcData was already created using AllocateBinaryDosageWriteMemory (see above)
WriteBinaryDosageIndices <- function(writeinfo) {
writeFunc <- list(f1 <- c(WriteBinaryDosageIndices1, WriteBinaryDosageIndices1),
f2 <- c(WriteBinaryDosageIndices1, WriteBinaryDosageIndices1),
f3 <- c(WriteBinaryDosageIndices1, WriteBinaryDosageIndices1, WriteBinaryDosageIndices1, WriteBinaryDosageIndices2),
f4 <- c(WriteBinaryDosageIndices1, WriteBinaryDosageIndices1, WriteBinaryDosageIndices1, WriteBinaryDosageIndices2))
return (writeFunc[[writeinfo$format]][[writeinfo$subformat]](writeinfo))
}
WriteBinaryDosageIndices1 <- function(writeinfo) {
return (0)
}
WriteBinaryDosageIndices2 <- function(writeinfo) {
return(WriteBinaryDosageIndicesC(writeinfo$filename, writeinfo$headersize, writeinfo$datasize))
}
#***************************************************************************#
# #
# Update snp info, aaf, maf, and rsq #
# #
#***************************************************************************#
#' Calculate alternate allele frequency
#'
#' Routine to calculate the alternate allele frequency given the dosages.
#' Missing values for dosage ignored. This function is used internally and
#' is exported for use in examples.
#'
#' @param dosage Dosage values
#' @param p0 Pr(g=0) - unused
#' @param p1 Pr(g=1) - unused
#' @param p2 Pr(g=2) - unused
#'
#' @return
#' Alternate allele frequency
#' @export
#'
#' @examples
#' # Get information about binary dosage file
#' bdfile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
#' bdinfo <- getbdinfo(bdfiles = bdfile)
#' snp1 <- getsnp(bdinfo = bdinfo, 1)
#' aaf <- getaaf(snp1$dosage)
getaaf <- function(dosage, p0, p1, p2) {
return (mean(dosage, na.rm = TRUE) / 2)
}
#' Calculate minor allele frequency
#'
#' Routine to calculate the minor allele frequency given the dosages.
#' Missing values for dosage ignored. This function is used internally and
#' is exported for use in examples. Note: The minor allele in one data set
#' may be different from another data set. This can make comparing minor
#' allele frequencies between data sets nonsensical.
#'
#' @param dosage Dosage values
#' @param p0 Pr(g=0) - unused
#' @param p1 Pr(g=1) - unused
#' @param p2 Pr(g=2) - unused
#'
#' @return
#' Minor allele frequency
#' @export
#'
#' @examples
#' # Get information about binary dosage file
#' bdfile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
#' bdinfo <- getbdinfo(bdfiles = bdfile)
#' snp1 <- getsnp(bdinfo = bdinfo, 1)
#' maf <- getmaf(snp1$dosage)
getmaf <- function(dosage, p0, p1, p2) {
aaf <- mean(dosage, na.rm = TRUE) / 2
maf <- ifelse(aaf > 0.5, 1. - aaf, aaf)
return (maf)
}
#' Calculate imputation r squared
#'
#' Routine to calculate the imputation r squared given the dosages
#' and Pr(g=2).
#' This is an estimate for the imputation r squared returned from
#' minimac and impute2. The r squared values are calculated slightly
#' differently between the programs. This estimate is based on the
#' method used by minimac. It does well for minor allele frequencies
#' above 5%. This function is used internally and is exported for
#' use in examples.
#'
#' @param dosage Dosage values
#' @param p0 Pr(g=0) - unused
#' @param p1 Pr(g=1) - unused
#' @param p2 Pr(g=2)
#'
#' @return
#' Imputation r squared
#' @export
#'
#' @examples
#' # Get information about binary dosage file
#' bdfile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
#' bdinfo <- getbdinfo(bdfiles = bdfile)
#' snp1 <- getsnp(bdinfo = bdinfo, 1, dosageonly = FALSE)
#' rsq <- BinaryDosage:::getrsq(snp1$dosage, p2 = snp1$p2)
getrsq <- function(dosage, p0, p1, p2) {
q <- numeric(2 * length(dosage))
d <- dosage * dosage - 4 * p2
d <- ifelse(d < 0 & d > -0.01, 0., d)
d <- sqrt(d)
q[1:length(dosage)] <- 0.5 * (dosage - d)
q[(length(dosage) + 1):(2*length(dosage))] <- 0.5 * (dosage + d)
q <- q[is.na(q) == FALSE]
mu <- mean(q)
sigma <- mean(q*q) - mu * mu
rsq <- sigma / (mu * (1. - mu))
return (rsq)
}
updateaaf <- function (bdinfo) {
if (bdinfo$additionalinfo$subformat < 3)
headerinfo <- ReadBinaryDosageHeader4A(bdinfo$filename)
else
headerinfo <- ReadBinaryDosageHeader4B(bdinfo$filename)
aaf <- unlist(bdapply(bdinfo, getaaf))
updatesnpinfo(bdinfo$filename, headerinfo$snps$aafoffset, aaf)
}
updatemaf <- function (bdinfo) {
if (bdinfo$additionalinfo$subformat < 3)
headerinfo <- ReadBinaryDosageHeader4A(bdinfo$filename)
else
headerinfo <- ReadBinaryDosageHeader4B(bdinfo$filename)
maf <- unlist(bdapply(bdinfo, getmaf))
updatesnpinfo(bdinfo$filename, headerinfo$snps$mafoffset, maf)
}
updatersq <- function (bdinfo) {
if (bdinfo$additionalinfo$subformat < 3)
headerinfo <- ReadBinaryDosageHeader4A(bdinfo$filename)
else
headerinfo <- ReadBinaryDosageHeader4B(bdinfo$filename)
rsq <- unlist(bdapply(bdinfo, getrsq))
updatesnpinfo(bdinfo$filename, headerinfo$snps$rsqoffset, rsq)
}
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/R/WriteBinaryDosage.R
|
###########################################################
# Binary Dosage #
###########################################################
#' Apply a function to each SNP in a
#' binary dosage file
#'
#' A routine that reads in the SNP data serially
#' from a binary dosage file and applies a user
#' specified function to the data.
#'
#' @param bdinfo List with information about the
#' binary dosage file returned from getbdinfo
#' @param func A user supplied function to apply
#' to the data for each snp. The function must be
#' provide with the following parameters, dosage,
#' p0, p1, and p2, where dosage is the dosage values
#' for each subject and p0, p1, and p2 are the
#' probabilities that a subject has zero, one,
#' and two copies of the alternate allele,
#' respectively.
#' @param ... Additional parameters needed by the
#' user supplied function
#'
#' @return A list with length equal to the number
#' of SNPs in the binary dosage file. Each element
#' of the list is the value returned by the user
#' supplied function
#' @export
#' @family Iterating functions
#' @examples
#' # Get information about a binary dosage file
#'
#' vcf1abdfile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
#' bdinfo <- getbdinfo(bdfiles = vcf1abdfile)
#'
#' # Apply the getaaf, get alternate allele frequency, function
#' # to all the SNPs in the binary dosage file
#'
#' aaf <- bdapply(bdinfo = bdinfo,
#' func = BinaryDosage:::getaaf)
bdapply <- function(bdinfo, func, ...) {
if (missing(bdinfo) == TRUE)
stop("No binary dosage file information specified")
if (is.na(match("genetic-info", class(bdinfo))) == TRUE)
stop("bdinfo does not contain information about a binary dosage file")
if (is.na(match("bdose-info", class(bdinfo$additionalinfo))) == TRUE)
stop("bdinfo does not contain information about a binary dosage file")
if (missing(func) == TRUE)
stop("No function specified")
if (is.na(match("function", class(func))) == TRUE)
stop("func is not a function")
retval <- vector("list", nrow(bdinfo$snps))
dosage <- numeric(nrow(bdinfo$samples))
p0 <- numeric(nrow(bdinfo$samples))
p1 <- numeric(nrow(bdinfo$samples))
p2 <- numeric(nrow(bdinfo$samples))
us <- integer(2 * nrow(bdinfo$samples))
for (i in 1:nrow(bdinfo$snps)) {
dosage[1:nrow(bdinfo$samples)] <- NA
p0[1:nrow(bdinfo$samples)] <- NA
p1[1:nrow(bdinfo$samples)] <- NA
p2[1:nrow(bdinfo$samples)] <- NA
ReadBinaryDosageData(bdinfo, i, dosage, p0, p1, p2, us)
retval[[i]] <- func(dosage, p0, p1, p2, ...)
}
return (retval)
}
###########################################################
# VCF #
###########################################################
#' Apply a function to each SNP in a vcf file
#'
#' A routine that reads in the SNP data serially
#' from a vcf file and applies a user specified
#' function to the data.
#'
#' @param vcfinfo List with information about the
#' vcf file returned from getvcfinfo
#' @param func A user supplied function to apply
#' to the data for each snp. The function must be
#' provide with the following parameters, dosage,
#' p0, p1, and p2, where dosage is the dosage values
#' for each subject and p0, p1, and p2 are the
#' probabilities that a subject has zero, one,
#' and two copies of the alternate allele,
#' respectively.
#' @param ... Additional parameters needed by the
#' user supplied function
#'
#' @return A list with length equal to the number
#' of SNPs in the vcf file. Each element
#' of the list is the value returned by the user
#' supplied function
#' @export
#' @family Iterating functions
#' @examples
#' # Get information about a vcf file
#'
#' vcf1afile <- system.file("extdata", "set1a.vcf", package = "BinaryDosage")
#' vcfinfo <- getvcfinfo(vcffiles = vcf1afile)
#'
#' # Apply the getaaf, get alternate allele frequency, function
#' # to all the SNPs in the vcf file
#'
#' aaf <- vcfapply(vcfinfo = vcfinfo,
#' func = BinaryDosage:::getaaf)
vcfapply <- function(vcfinfo, func, ...) {
if (missing(vcfinfo) == TRUE)
stop("No vcf file information specified")
if (is.na(match("genetic-info", class(vcfinfo))) == TRUE)
stop("vcfinfo does not appear to contain information about a vcf file")
if (is.na(match("vcf-info", class(vcfinfo$additionalinfo))) == TRUE)
stop("vcfinfo does not appear to contain information about a vcf file")
if (missing(func) == TRUE)
stop("No function specified")
if (is.na(match("function", class(func))) == TRUE)
stop("func is not a function")
retval <- vector("list", nrow(vcfinfo$snps))
if (vcfinfo$additionalinfo$gzipped == FALSE)
con <- file(vcfinfo$filename, "r")
else
con <- gzfile(vcfinfo$filename, "r")
line <- readLines(con, n = vcfinfo$additionalinfo$headerlines)
dosage <- numeric(nrow(vcfinfo$samples))
p0 <- numeric(nrow(vcfinfo$samples))
p1 <- numeric(nrow(vcfinfo$samples))
p2 <- numeric(nrow(vcfinfo$samples))
for (i in 1:nrow(vcfinfo$snps)) {
line <- readLines(con, n = 1)
x <- unlist(strsplit(line, "\t"))
y <- unlist(strsplit(x[10:length(x)], ":"))
if (length(vcfinfo$additionalinfo$datacolumns$dosage) == 1) {
dosagecol <- vcfinfo$additionalinfo$datacolumns$dosage
gpcol <- vcfinfo$additionalinfo$datacolumns$genotypeprob
numcolumns <- vcfinfo$additionalinfo$datacolumns$numcolumns
} else {
dosagecol <- vcfinfo$additionalinfo$datacolumns$dosage[i]
gpcol <- vcfinfo$additionalinfo$datacolumns$genotypeprob[i]
numcolumns <- vcfinfo$additionalinfo$datacolumns$numcolumns[i]
}
if(is.na(dosagecol) == FALSE) {
dosage[1:nrow(vcfinfo$samples)] <- as.numeric(y[seq(dosagecol, length(y) - numcolumns + dosagecol, numcolumns)])
}
if(is.na(gpcol) == FALSE) {
gpstring <- y[seq(gpcol, length(y) - numcolumns + gpcol, numcolumns)]
z <- unlist(strsplit(gpstring, ","))
p0[1:nrow(vcfinfo$samples)] <- as.numeric(z[seq(1, length(z) - 2, 3)])
p1[1:nrow(vcfinfo$samples)] <- as.numeric(z[seq(2, length(z) - 1, 3)])
p2[1:nrow(vcfinfo$samples)] <- as.numeric(z[seq(3, length(z), 3)])
} else {
p0[1:nrow(vcfinfo$samples)] <- NA
p1[1:nrow(vcfinfo$samples)] <- NA
p2[1:nrow(vcfinfo$samples)] <- NA
}
if(is.na(dosagecol) == FALSE) {
dosage[1:nrow(vcfinfo$samples)] <- as.numeric(y[seq(dosagecol, length(y) - numcolumns + dosagecol, numcolumns)])
} else {
dosage[1:nrow(vcfinfo$samples)] <- p1 + p2 + p2
}
retval[[i]] <- func(dosage = dosage,
p0 = p0,
p1 = p1,
p2 = p2
, ...)
}
close(con)
return (retval)
}
###########################################################
# GEN (Impute2) #
###########################################################
#' Apply a function to each SNP in a
#' gen, impute2, file
#'
#' A routine that reads in the SNP data serially
#' from a gen file and applies a user
#' specified function to the data.
#'
#' @param geninfo List with information about the
#' gen, impute2, file returned from [getgeninfo]
#' @param func A user supplied function to apply
#' to the data for each snp. The function must be
#' provide with the following parameters, dosage,
#' p0, p1, and p2, where dosage is the dosage values
#' for each subject and p0, p1, and p2 are the
#' probabilities that a subject has zero, one,
#' and two copies of the alternate allele,
#' respectively.
#' @param ... Additional parameters needed by the
#' user supplied function
#'
#' @return A list with length equal to the number
#' of SNPs in the vcf file. Each element
#' of the list is the value returned by the user
#' supplied function
#' @export
#'
#' @family Iterating functions
#' @examples
#' # Get information about a gen, impute2, file
#'
#' gen1afile <- system.file("extdata", "set1a.imp", package = "BinaryDosage")
#' geninfo <- getgeninfo(genfiles = gen1afile,
#' snpcolumns = c(1L, 3L, 2L, 4L, 5L),
#' header = TRUE)
#'
# Apply the getaaf, get alternate allele frequency, function
# to all the SNPs in the vcf file
#'
#' aaf <- genapply(geninfo = geninfo,
#' func = BinaryDosage:::getaaf)
genapply <- function(geninfo, func, ...) {
if (missing(geninfo) == TRUE)
stop("No gen file information specified")
if (is.na(match("genetic-info", class(geninfo))) == TRUE)
stop("geninfo does not appear to contain information about a gen file")
if (is.na(match("gen-info", class(geninfo$additionalinfo))) == TRUE)
stop("geninfo does not appear to contain information about a gen file")
if (missing(func) == TRUE)
stop("No function specified")
if (is.na(match("function", class(func))) == TRUE)
stop("func is not a function")
retval <- vector("list", nrow(geninfo$snps))
if (geninfo$additionalinfo$gzipped == FALSE)
con <- file(geninfo$filename, "r")
else
con <- gzfile(geninfo$filename, "r")
line <- readLines(con, n = geninfo$additionalinfo$headersize)
dosage <- numeric(nrow(geninfo$samples))
p0 <- numeric(nrow(geninfo$samples))
p0[1:length(p0)] <- NA
p1 <- p0
p2 <- p0
for (i in 1:nrow(geninfo$snps)) {
line <- readLines(con, n = 1)
x <- unlist(strsplit(line, geninfo$additionalinfo$sep))
y <- x[geninfo$additionalinfo$startcolumn:length(x)]
if (geninfo$additionalinfo$format == 1) {
dosage[1:length(dosage)] <- as.numeric(y)
} else if (geninfo$additionalinfo$format == 2) {
p0[1:length(p0)] <- as.numeric(y[seq(1, length(y) - 1, 2)])
p1[1:length(p1)] <- as.numeric(y[seq(2, length(y), 2)])
p2[1:length(p2)] <- ifelse(p0 + p1 < 1, 1 - p0 - p1, 0.)
dosage[1:length(dosage)] <- ifelse(p1 + p2 + p2 < 2, p1 + p2 + p2, 2.)
} else {
p0[1:length(p0)] <- as.numeric(y[seq(1, length(y) - 2, 3)])
p1[1:length(p1)] <- as.numeric(y[seq(2, length(y) - 1, 3)])
p2[1:length(p2)] <- as.numeric(y[seq(3, length(y), 3)])
dosage[1:length(dosage)] <- ifelse(p1 + p2 + p2 < 2, p1 + p2 + p2, 2.)
}
retval[[i]] <- func(dosage = dosage,
p0 = p0,
p1 = p1,
p2 = p2
, ...)
}
close(con)
return (retval)
}
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/R/apply.R
|
#' @useDynLib BinaryDosage, .registration = TRUE
#' @importFrom Rcpp sourceCpp
#' @importFrom digest digest
#' @importFrom prodlim row.match
#' @importFrom utils read.table
NULL
validatebdinput <- function(bdfiles,
format,
subformat,
snpidformat,
bdoptions) {
if (is.character(bdfiles) == FALSE)
stop("bdfiles must be a vector of characters")
if (is.numeric(format) == FALSE && is.integer(format) == FALSE)
stop("format must be an integer value")
if (length(format) != 1)
stop("format must be an integer vector of length 1")
if (is.numeric(format) == TRUE) {
if (floor(format) != format)
stop("format must be an integer")
format <- floor(format)
}
if (format < 1 || format > 4)
stop("format must be an integer value from 1 to 4")
if (is.numeric(subformat) == FALSE && is.integer(subformat) == FALSE)
stop("subformat must be an integer value")
if (length(subformat) != 1)
stop("subformat must be an integer vector of length 1")
if (is.numeric(subformat) == TRUE) {
if (floor(subformat) != subformat)
stop("subformat must be an integer")
subformat <- floor(subformat)
}
if (subformat < 0 || subformat > 4)
stop("subformat must be an integer value from 0 to 4")
if (format < 3 && subformat > 2)
stop("subformat must be an integer value from 0 to 2 for formats 1 and 2")
if (format == 4 & length(bdfiles) != 1)
stop("Only one output file name is needed when using format 4")
if (format < 4 & length(bdfiles) != 3)
stop("Three output file names are required when using formats 1, 2, and 3")
if (is.na(match("", bdfiles)) == FALSE)
stop("Output file names cannot be blank")
if (is.numeric(snpidformat) == FALSE && is.integer(snpidformat) == FALSE)
stop("snpidformat must be an integer value")
if (length(snpidformat) != 1)
stop("snpidformat must be an integer vector of length 1")
if (is.numeric(snpidformat) == TRUE) {
if (floor(snpidformat) != snpidformat)
stop("snpidformat must be an integer")
snpidformat = floor(snpidformat)
}
if (snpidformat < -1 | snpidformat > 3)
stop("snpidformat must be and integer from -1 to 3")
if (is.character(bdoptions) == FALSE)
stop("bdoptions must be a character array")
if (length(bdoptions) > 0 & format != 4)
stop("bdoptions can only be used with format 4")
if (length(bdoptions) > 0) {
if (any(is.na(match(bdoptions, c("aaf", "maf", "rsq")))) == TRUE)
stop("Only valid bdoptions are aaf, maf, and rsq")
}
return(list(format = format,
subformat = subformat))
}
###########################################################
# VCF to Binary Dosage #
###########################################################
#' Convert a VCF file to a binary dosage file
#'
#' Routine to read information from a VCF file and create
#' a binary dosage file. The function is designed to use
#' files return from the Michigan Imputation Server but will
#' run on other VCF files if they contain dosage and genetic
#' probabilities. Note: This routine can take a long time to
#' run if the VCF file is large.
#'
#' @param vcffiles A vector of file names.
#' The first is the name of the vcf file. The
#' second is name of the file that contains information
#' about the imputation of the SNPs. This file is produced
#' by minimac 3 and 4.
#' @param gz Indicator if VCF file is compressed using gzip.
#' Default value is FALSE.
#' @param bdfiles Vector of names of the output files.
#' The binary dosage file name is first. The family and
#' map files follow. For format 4, no family and map file
#' names are needed.
#' @param format The format of the output binary dosage file.
#' Allowed values are 1, 2, 3, and 4. The default value is 4.
#' Using the default value is recommended.
#' @param subformat The subformat of the format of the output
#' binary dosage file. A value of 1 or 3 indicates that only the
#' dosage value is saved. A value of 2 or 4 indicates
#' the dosage and genetic probabilities will be output. Values
#' of 3 or 4 are only allowed with formats 3 and 4. If a value
#' of zero if provided, and genetic probabilities are in the vcf
#' file, subformat 2 will be used for formats 1 and 2, and
#' subformat 4 will be used for formats 3 and 4. If the vcf file
#' does not contain genetic probabilities, subformat 1 will be
#' used for formats 1 and 2, and subformat 3 will be used for
#' formats 3 and 4. The default value is 0.
#' @param snpidformat The format that the SNP ID will be saved as.
#' -1 SNP ID not written
#' 0 - same as in the VCF file
#' 1 - chromosome:location
#' 2 - chromosome:location:reference_allele:alternate_allele
#' If snpidformat is 1 and the VCF file uses format 2, an error is
#' generated. Default value is 0.
#' @param bdoptions Character array containing any of the following
#' value, "aaf", "maf", "rsq". The presence of any of these
#' values indicates that the specified values should be
#' calculates and stored in the binary dosage file. These values only
#' apply to format 4.
#'
#' @return
#' None
#' @export
#'
#' @examples
#' # Find the vcf file names
#' vcf1afile <- system.file("extdata", "set1a.vcf", package = "BinaryDosage")
#' vcf1ainfo <- system.file("extdata", "set1a.info", package = "BinaryDosage")
#' bdfiles <- tempfile()
#' # Convert the file
#' vcftobd(vcffiles = c(vcf1afile, vcf1ainfo), bdfiles = bdfiles)
#' # Verify the file was written correctly
#' bdinfo <- getbdinfo(bdfiles)
vcftobd <- function(vcffiles,
gz = FALSE,
bdfiles,
format = 4L,
subformat = 0L,
snpidformat = 0,
bdoptions = character(0)) {
if (missing(vcffiles) == TRUE)
stop("No VCF file specified")
if (missing(bdfiles) == TRUE)
stop("No output files specified")
validation <- validatebdinput(bdfiles = bdfiles,
format = format,
subformat = subformat,
snpidformat = snpidformat,
bdoptions = bdoptions)
format <- validation$format
subformat <- validation$subformat
if (snpidformat == -1)
readsnpformat = 0
else
readsnpformat = snpidformat
vcfinfo <- getvcfinfo(vcffiles = vcffiles,
gz = gz,
index = FALSE,
snpidformat = readsnpformat)
if (snpidformat == -1)
vcfinfo$snpidformat = 1
else
vcfinfo$snpidformat = 0
if (subformat == 0) {
if (anyNA(vcfinfo$additionalinfo$datacolumns$genotypeprob) == TRUE)
subformat <- 1
else
subformat <- 2
}
WriteBinaryDosageHeader(format = format,
subformat = subformat,
filename = bdfiles,
genefileinfo = vcfinfo,
bdoptions = bdoptions)
headerinfo <- ReadBinaryDosageHeader(filename = bdfiles)
bdwriteinfo <- AllocateBinaryDosageWriteMemory(headerinfo = headerinfo)
vcfapply(vcfinfo = vcfinfo,
func = WriteBinaryDosageData,
writeinfo = bdwriteinfo)
WriteBinaryDosageIndices(writeinfo = bdwriteinfo)
bdinfo <- getbdinfo(bdfiles = bdfiles)
if (is.na(match("aaf", bdoptions)) == FALSE)
updateaaf(bdinfo)
if (is.na(match("maf", bdoptions)) == FALSE)
updatemaf(bdinfo)
if (is.na(match("rsq", bdoptions)) == FALSE)
updatersq(bdinfo)
##return (0)
}
###########################################################
# Gen to Binary Dosage #
###########################################################
#' Convert a gen file to a binary dosage file
#'
#' Routine to read information from a gen file and create
#' a binary dosage file. Note: This routine can take a long
#' time to run if the gen file is large.
#'
#' @param genfiles A vector of file names.
#' The first is the name of the gen file. The
#' second is name of the sample file that contains
#' the subject information.
#' @param snpcolumns Column numbers containing chromosome,
#' snpid, location, reference allele, alternate allele,
#' respectively. This must be an integer vector. All
#' values must be positive except for the chromosome.
#' The value for the chromosome may be -1 or -0.
#' -1 indicates that the chromosome value is passed to
#' the routine using the chromosome parameter.
#' 0 indicates that the chromosome value is in the snpid
#' and that the snpid has the format chromosome:other_data.
#' Default value is c(1L, 2L, 3L, 4L, 5L).
#' @param startcolumn Column number of first column with
#' genetic probabilities or dosages. Must
#' be an integer value. Default value is 6L.
#' @param impformat Number of genetic data values per
#' subject. 1 indicates dosage only, 2 indicates P(g=0)
#' and P(g=1) only, 3 indicates P(g=0), P(g=1), and
#' P(g=2). Default value is 3L.
#' @param chromosome Chromosome value to use if the
#' first value of the snpcolumns is equal to 0.
#' Default value is character().
#' @param header Indicators if the gen and sample files
#' have headers. If the gen file does not have a
#' header. A sample file must be included.
#' Default value is c(FALSE, TRUE).
#' @param gz Indicator if file is compressed using gzip.
#' Default value is FALSE.
#' @param sep Separator used in the gen file. Default
#' value is `"\t"`
#' @param bdfiles Vector of names of the output files.
#' The binary dosage file name is first. The family and
#' map files follow. For format 4, no family and map file
#' names are needed.
#' @param format The format of the output binary dosage file.
#' Allowed values are 1, 2, 3, and 4. The default value is 4.
#' Using the default value is recommended.
#' @param subformat The subformat of the format of the output
#' binary dosage file. A value of 1 or 3 indicates that only the
#' dosage value is saved. A value of 2 or 4 indicates
#' the dosage and genetic probabilities will be output. Values
#' of 3 or 4 are only allowed with formats 3 and 4. If a value
#' of zero if provided, and genetic probabilities are in the vcf
#' file, subformat 2 will be used for formats 1 and 2, and
#' subformat 4 will be used for formats 3 and 4. If the vcf file
#' does not contain genetic probabilities, subformat 1 will be
#' used for formats 1 and 2, and subformat 3 will be used for
#' formats 3 and 4. The default value is 0.
#' @param snpidformat The format that the SNP ID will be saved as.
#' -1 - SNP ID not written.
#' 0 - same as in the VCF file.
#' 1 - chromosome:location.
#' 2 - chromosome:location:reference_allele:alternate_allele.
#' If snpidformat is 1 and the VCF file uses format 2, an error is
#' generated. Default value is 0.
#' @param bdoptions Character array containing any of the following
#' value, "aaf", "maf", "rsq". The presence of any of these
#' values indicates that the specified values should be
#' calculates and stored in the binary dosage file. These values only
#' apply to format 4.
#'
#' @return
#' None
#' @export
#'
#' @examples
#' # Find the gen file names
#' gen3afile <- system.file("extdata", "set3a.imp", package = "BinaryDosage")
#' gen3asample <- system.file("extdata", "set3a.sample", package = "BinaryDosage")
#' # Get temporary output file name
#' bdfiles <- tempfile()
#' # Convert the file
#' gentobd(genfiles = c(gen3afile, gen3asample),
#' snpcolumns = c(0L, 2L:5L),
#' bdfiles = bdfiles)
#' # Verify the file was written correctly
#' bdinfo <- getbdinfo(bdfiles = bdfiles)
gentobd <- function(genfiles,
snpcolumns = 1L:5L,
startcolumn = 6L,
impformat = 3L,
chromosome = character(),
header = c(FALSE, TRUE),
gz = FALSE,
sep = "\t",
bdfiles,
format = 4L,
subformat = 0L,
snpidformat = 0L,
bdoptions = character(0)) {
if (missing(genfiles) == TRUE)
stop("No gen file specified")
if (missing(bdfiles) == TRUE)
stop("No output files specified")
validation <- validatebdinput(bdfiles = bdfiles,
format = format,
subformat = subformat,
snpidformat = snpidformat,
bdoptions = bdoptions)
format <- validation$format
subformat <- validation$subformat
if (snpidformat == -1)
readsnpformat = 0L
else
readsnpformat = as.integer(snpidformat)
geninfo <- getgeninfo(genfiles = genfiles,
snpcolumns = snpcolumns,
startcolumn = startcolumn,
impformat = impformat,
chromosome = chromosome,
header = header,
gz = gz,
index = FALSE,
snpidformat = readsnpformat,
sep = sep)
if (snpidformat == -1)
geninfo$snpidformat = 1
else
geninfo$snpidformat = 0
if (subformat == 0) {
if (geninfo$additionalinfo$format == 1L)
subformat <- 1L
else
subformat <- 2L
}
WriteBinaryDosageHeader(format = format,
subformat = subformat,
filename = bdfiles,
genefileinfo = geninfo,
bdoptions = bdoptions)
headerinfo <- ReadBinaryDosageHeader(filename = bdfiles)
bdwriteinfo <- AllocateBinaryDosageWriteMemory(headerinfo = headerinfo)
genapply(geninfo = geninfo,
func = WriteBinaryDosageData,
writeinfo = bdwriteinfo)
WriteBinaryDosageIndices(writeinfo = bdwriteinfo)
bdinfo <- getbdinfo(bdfiles = bdfiles)
if (is.na(match("aaf", bdoptions)) == FALSE)
updateaaf(bdinfo)
if (is.na(match("maf", bdoptions)) == FALSE)
updatemaf(bdinfo)
if (is.na(match("rsq", bdoptions)) == FALSE)
updatersq(bdinfo)
}
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/R/convert.R
|
###########################################################
# Binary Dosage #
###########################################################
#' Get information about a binary dosage file
#'
#' Routine to return information about a binary dosage file.
#' This information is used by other routines to
#' allow for quicker extraction of values from the
#' file.
#'
#' @param bdfiles Vector of file names. The first is the
#' binary dosage data containing the dosages and genetic
#' probabilities. The second file name is the family information
#' file. The third file name is the SNP information file.
#' The family and SNP information files are not used if the
#' binary dosage file is in format 4. For this format the
#' family and SNP information are in the file with the dosages
#' and genetic probabilities.
#'
#' @return List with information about the binary dosage file.
#' This includes family and subject IDs along with
#' a list of the SNPs in the file. Other information needed
#' to read the file is also included.
#'
#' @export
#'
#' @examples
#' vcf1abdfile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
#' bdinfo <- getbdinfo(bdfiles = vcf1abdfile)
getbdinfo <- function(bdfiles) {
if (missing(bdfiles) == TRUE)
stop("No binary dosage files specified")
if (is.character(bdfiles) == FALSE)
stop("bdfiles must be a character vector")
if (length(bdfiles) != 1 & length(bdfiles) != 3)
stop("bdfiles must be a character vector of length 1 or 3")
if (bdfiles[1] == "")
stop("No binary dosage file specified")
if (length(bdfiles) == 3) {
if (bdfiles[2] == "" & bdfiles[3] == "") {
bdfiles <- bdfiles[1]
} else if (bdfiles[2] == "" | bdfiles[3] == "") {
stop("bdfiles contains empty strings")
}
}
headerinfo <- ReadBinaryDosageHeader(bdfiles)
indices <- ReadBinaryDosageIndices(headerinfo)
headerinfo$datasize <- indices$datasize
headerinfo$indices <- indices$indices
class(headerinfo) <- c("genetic-info")
return (headerinfo)
}
###########################################################
# VCF #
###########################################################
summarizevcfadditionalinfo <- function(x) {
if (length(unique(x)) != 1)
return (x)
if (x[1] == '.')
return (character(0))
return (x[1])
}
readminimacinfofile <- function(filename) {
addinfo <- read.table(filename, header = TRUE, stringsAsFactors = FALSE)
if (ncol(addinfo) != 13)
stop("Error reading information file - Wrong number of columns")
if (all(colnames(addinfo) == c("SNP", "REF.0.", "ALT.1.", "ALT_Frq", "MAF",
"AvgCall", "Rsq", "Genotyped", "LooRsq",
"EmpR", "EmpRsq", "Dose0", "Dose1")) == FALSE)
stop("Error reading information file - Wrong column names")
return(addinfo)
}
#' Get information about a vcf file
#'
#' Routine to return information about a vcf file.
#' This information is used by other routines to
#' allow for quicker extraction of values from the
#' file.
#'
#' @param vcffiles A vector of file names.
#' The first is the name of the vcf file. The
#' second is name of the file that contains information
#' about the imputation of the SNPs. This file is produced
#' by minimac 3 and 4.
#' @param gz Indicator if VCF file is compressed using gzip.
#' Default value is FALSE.
#' @param index Indicator if file should be indexed. This
#' allows for faster reading of the file. Indexing a gzipped
#' file is not supported.
#' Default value is TRUE.
#' @param snpidformat The format that the SNP ID will be saved as.
#' 0 - same as in the VCF file
#' 1 - chromosome:location
#' 2 - chromosome:location:referenceallele:alternateallele
#' If snpidformat is 1 and the VCF file uses format 2, an error is
#' generated. Default value is 0.
#'
#' @return List containing information about the VCF file
#' to include file name, subject IDs, and information about
#' the SNPs. Indices for faster reading will be included
#' if index is set to TRUE
#'
#' @export
#'
#' @examples
#' # Get file names of th vcf and infromation file
#' vcf1afile <- system.file("extdata", "set1a.vcf", package = "BinaryDosage")
#' vcf1ainfo <- system.file("extdata", "set1a.info", package = "BinaryDosage")
#'
#' # Get the information about the vcf file
#' vcf1ainfo <- getvcfinfo(vcffiles = c(vcf1afile, vcf1ainfo))
getvcfinfo <- function(vcffiles,
gz = FALSE,
index = TRUE,
snpidformat = 0L) {
if (missing(vcffiles) == TRUE)
stop("No VCF file specified")
if (is.character(vcffiles) == FALSE)
stop("vcfiles must be a character value")
if (length(vcffiles) != 1 & length(vcffiles) != 2)
stop("vcffiles must be a character vector of length 1 or 2")
filename = vcffiles[1]
if (length(vcffiles) != 1)
infofile <- vcffiles[2]
else
infofile <- ""
if (filename == "")
stop("No VCF file specified")
if (is.logical(gz) == FALSE)
stop("gz must be a logical value")
if (length(gz) != 1)
stop("gz must be a logical vector of length 1")
if (is.logical(index) == FALSE)
stop("index must be a logical value")
if (length(index) != 1)
stop("index must be a logical vector of length 1")
if (gz == TRUE && index == TRUE)
stop("Indexing gzipped files is not supported.")
if (is.numeric(snpidformat) == FALSE)
stop("snpidformat must be an integer value")
if (length(snpidformat) != 1)
stop("snpidformat must be an interger vector of length 1")
if (floor(snpidformat) != snpidformat)
stop("snpidformat must be an integer value")
snpidformat <- as.integer(snpidformat)
if (snpidformat < 0 || snpidformat > 2)
stop("snpidformat must have a value of 0, 1, or 2")
if (gz == FALSE) {
con <- file(filename, "r")
} else {
con <- gzfile(filename, "r")
}
fqfilename <- normalizePath(filename, winslash = '/')
headerlines <- 1L
headersize <- -1L
while (TRUE) {
currentpos <- seek(con, origin = "current")
line <- readLines(con, n = 1)
if (substr(line, 1, 1) != '#') {
close(con)
stop("Error processing header")
}
if (substr(line, 2, 2) != '#') {
x <- unlist(strsplit(line, "\t"))
if (x[1] != "#CHROM") {
close(con)
stop("Error processing header")
}
x[1] = "CHROM"
begindata <- seek(con, origin = "current")
break
}
headerlines <- headerlines + 1L
}
if (index == TRUE)
headersize <- begindata
close(con)
if (all(x[1:9] == c("CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT")) == FALSE)
stop("Column names incorrect")
samples = data.frame(fid = rep("", length(x) - 9L),
sid = x[10:length(x)],
stringsAsFactors = FALSE)
usesfid = FALSE
coltypes = c("character", "integer", rep("character", 7),
rep("NULL", nrow(samples)))
snps <- read.table(filename,
skip = headerlines,
colClasses = coltypes,
stringsAsFactors = FALSE)
colnames(snps) <- c("chromosome", "location", "snpid",
"reference", "alternate", "quality",
"filter", "info", "format")
vcfinfo <- as.list(snps[,6:9])
summaryinfo <- lapply(vcfinfo, summarizevcfadditionalinfo)
datacolumns <- data.frame(numcolumns = rep(0L, length(summaryinfo$format)),
dosage = rep(0L, length(summaryinfo$format)),
genotypeprob = rep(0L, length(summaryinfo$format)),
genotype = rep(0L, length(summaryinfo$format)),
stringsAsFactors = FALSE)
for (i in 1:length(summaryinfo$format)) {
formatsplit <- unlist(strsplit(summaryinfo$format[i], split = ':'))
datacolumns$numcolumns[i] <- length(formatsplit)
datacolumns$dosage[i] <- match("DS", formatsplit)
datacolumns$genotypeprob[i] <- match("GP", formatsplit)
datacolumns$genotype[i] <- match("GT", formatsplit)
}
additionalinfo <- list(gzipped = gz,
headerlines = headerlines,
headersize = headersize,
quality = summaryinfo$quality,
filter = summaryinfo$filter,
info = summaryinfo$info,
format = summaryinfo$format,
datacolumns = datacolumns)
class(additionalinfo) <- "vcf-info"
rm(vcfinfo)
rm(datacolumns)
if (infofile == "") {
snpinfo <- list()
} else {
minimacinfo <- readminimacinfofile(infofile)
if (nrow(minimacinfo) == nrow(snps)) {
if (all(minimacinfo$SNP == snps$snpid) == TRUE &
all(minimacinfo$REF.0. == snps$reference) == TRUE &
all(minimacinfo$ALT.1. == snps$alternate) == TRUE) {
snpinfo <- list(aaf = as.matrix(minimacinfo$ALT_Frq),
maf = as.matrix(minimacinfo$MAF),
avgcall = as.matrix(minimacinfo$AvgCall),
rsq = as.matrix(minimacinfo$Rsq))
} else {
stop("Infromation file does not line up with VCF file - different SNPs")
}
} else {
stop("Information file does not line up with VCF file - different number of SNPs")
}
}
snps <- snps[,1:5]
chr1 <- snps$chromosome[1]
oneChr <- all(snps$chromosome == chr1)
chrlocid <- paste(snps$chromosome, snps$location, sep = ":")
vcfsnpformat1 <- all(snps$snpid == chrlocid)
chrlocrefaltid <- paste(snps$chromosome, snps$location,
snps$reference, snps$alternate, sep = ":")
vcfsnpformat2 <- all(snps$snpid == chrlocrefaltid)
if (snpidformat == 0) {
if (vcfsnpformat1 == TRUE) {
snps$snpid <- chrlocid
snpidformat <- 1L
} else if (vcfsnpformat2 == TRUE) {
snps$snpid <- chrlocrefaltid
snpidformat <- 2L
}
} else if (snpidformat == 1) {
if (vcfsnpformat2 == TRUE)
stop ("snpidformat 1 specified but VCF file uses snpidformat 2")
if (vcfsnpformat1 == FALSE)
snps$snpid <- chrlocid
} else if (snpidformat == 2) {
if (vcfsnpformat2 == FALSE)
snps$snpid <- chrlocrefaltid
}
if (index == TRUE) {
datasize <- integer(nrow(snps))
indices <- numeric(nrow(snps))
if (gz == FALSE) {
x <- GetLineLocations(filename)
indices <- x[(headerlines + 1):(length(x) - 1)]
for (i in 1:length(datasize))
datasize[i] <- x[headerlines + i + 1] - x[headerlines + i]
}
} else {
datasize <- integer(0)
indices <- numeric(0)
}
retval = list(filename = fqfilename,
usesfid = usesfid,
samples = samples,
onechr = oneChr,
snpidformat = snpidformat,
snps = snps,
snpinfo = snpinfo,
datasize = datasize,
indices = indices,
additionalinfo = additionalinfo)
class(retval) <- c("genetic-info")
return (retval)
}
###########################################################
# GEN (Impute2) #
###########################################################
#' Get information about a gen, impute2, file
#'
#' Routine to return information about a gen file.
#' This information is used by other routines to
#' allow for quicker extraction of values from the
#' file.
#'
#' @param genfiles A vector of file names.
#' The first is the name of the gen file. The
#' second is name of the sample file that contains
#' the subject information.
#' @param snpcolumns Column numbers containing chromosome,
#' snpid, location, reference allele, alternate allele,
#' respectively. This must be an integer vector. All
#' values must be positive except for the chromosome.
#' The value for the chromosome may be -1 or -0.
#' -1 indicates that the chromosome value is passed to
#' the routine using the chromosome parameter.
#' 0 indicates that the chromosome value is in the snpid
#' and that the snpid has the format chromosome:other_data.
#' Default value is c(1L, 2L, 3L, 4L, 5L).
#' @param startcolumn Column number of first column with
#' genetic probabilities or dosages. Must
#' be an integer value. Default value is 6L.
#' @param impformat Number of genetic data values per
#' subject. 1 indicates dosage only, 2 indicates P(g=0)
#' and P(g=1) only, 3 indicates P(g=0), P(g=1), and
#' P(g=2). Default value is 3L.
#' @param chromosome Chromosome value to use if the
#' first value of the snpcolumns is equal to 0.
#' Default value is character().
#' @param header Indicators if the gen and sample files
#' have headers. If the gen file does not have a
#' header. A sample file must be included.
#' Default value is c(FALSE, TRUE).
#' @param gz Indicator if file is compressed using gzip.
#' Default value is FALSE.
#' @param index Indicator if file should be indexed. This
#' allows for faster reading of the file. Indexing a gzipped
#' file is not supported.
#' Default value is TRUE.
#' @param snpidformat Format to change the snpid to.
#' 0 indicates to use the snpid format in the file.
#' 1 indicates to change the snpid into chromosome:location,
#' 2 indicates to change the snpid into chromosome:location:referenceallele:alternateallele,
#' 3 indicates to change the snpid into chromosome:location_referenceallele_alternateallele,
#' Default value is 0.
#' @param sep Separators used in the gen file and sample files,
#' respectively. If only value is provided it is used for both
#' files. Default value is c(`"\t"`, `"\t"`)
#'
#' @return List with information about the gen file.
#' This includes family and subject IDs along with
#' a list of the SNPs in the file. Other information needed
#' to read the file is also included.
#' @export
#'
#' @examples
#' # Get file names of th gen and sample file
#' gen3afile <- system.file("extdata", "set3a.imp", package = "BinaryDosage")
#' gen3ainfo <- system.file("extdata", "set3a.sample", package = "BinaryDosage")
#'
#' # Get the information about the gen file
#' geninfo <- getgeninfo(genfiles = c(gen3afile, gen3ainfo),
#' snpcolumns = c(0L, 2L:5L))
getgeninfo <- function(genfiles,
snpcolumns = 1L:5L,
startcolumn = 6L,
impformat = 3L,
chromosome = character(),
header = c(FALSE, TRUE),
gz = FALSE,
index = TRUE,
snpidformat = 0L,
sep = c("\t", "\t")) {
if (missing(genfiles) == TRUE)
stop("No gen file specified")
if (is.character(genfiles) == FALSE)
stop("genfiles must be a character value")
if (length(genfiles) != 1 & length(genfiles) != 2)
stop("genfiles must be a character vector of length 1 or 2")
genfile <- genfiles[1]
if (length(genfiles) == 1) {
samplefile <- character()
} else {
samplefile <- genfiles[2]
if (samplefile == "")
samplefile <- character()
}
if (genfile == "")
stop("No gen file specified")
if (is.integer(snpcolumns) == FALSE)
stop("snpcolumns must be an integer vector")
if (length(snpcolumns) != 5)
stop("snpcolumns must be an integer vector of length 5")
if (min(snpcolumns[2:5]) < 1)
stop("snpcolumns values other than chromosome must be positive integers")
if (snpcolumns[1] < -1)
stop("snpcolumns chromosome value must be -1, or a non-negative integer")
if (is.integer(startcolumn) == FALSE)
stop("startcolumn must be an integer value")
if (length(startcolumn) != 1)
stop("startcolumn must be an integer vector of length 1")
if (startcolumn < 1)
stop("startcolumn must be a positive integer")
if (startcolumn <= max(snpcolumns))
stop("startcolumn value must be larger than any value in snpcolumns")
if (is.integer(impformat) == FALSE)
stop("impformat must be an integer value")
if (length(impformat) != 1)
stop("impformat must be an integer vector of length 1")
if (impformat < 1 | impformat > 3)
stop("impformat must have a value of 1, 2, or 3")
if (is.character(chromosome) == FALSE)
stop("chromosome must be a character variable")
if (length(chromosome) > 1)
stop("chromosome must be a character vector of length 0 or 1")
if (length(chromosome) == 1) {
if (chromosome == "")
chromosome = character()
}
if (length(chromosome) == 0) {
if (snpcolumns[1] == -1)
stop("No chromosome column or value provided")
} else {
if (snpcolumns[1] > -1)
stop("Both chromosome column and chromosome value provided")
}
if (is.logical(header) == FALSE)
stop("header must be a logical value")
if (length(header) != 1 & length(header) != 2)
stop("header must be a logical vector of length 1 or 2")
if (length(header) == 1)
header = c(header, TRUE)
if (header[1] == FALSE) {
if (length(samplefile) == 0)
stop("File has no header and no sample file is provided")
} else {
if (length(samplefile) > 0)
stop("header = TRUE and a sample file is provided")
}
if (is.logical(gz) == FALSE)
stop("gz must be a logical value")
if (length(gz) != 1)
stop("gz must be a logical vector of length 1")
if (is.logical(index) == FALSE)
stop("index must be a logical value")
if (length(index) != 1)
stop("index must be a logical vector of length 1")
if (gz == TRUE & index == TRUE)
stop("Indexing gzipped files is not supported.")
if (is.numeric(snpidformat) == FALSE)
stop("snpidformat must be an integer value")
if (length(snpidformat) != 1)
stop("snpidformat must be an interger vector of length 1")
if (floor(snpidformat) != snpidformat)
stop("snpidformat must be an integer value")
snpidformat <- as.integer(snpidformat)
if (snpidformat < 0 || snpidformat > 3)
stop("snpidformat must have a value of 0, 1, 2, or 3")
if (is.character(sep) == FALSE)
stop("sep must be a character value")
if (length(sep) != 1 & length(sep) != 2)
stop("sep must be a character vector of length 1 or 2")
if (length(sep) == 1)
sep = c(sep, sep)
if (sep[1] == "" | sep[2] == "")
stop("sep values cannot be empty strings")
if (header[1] == TRUE) {
if (gz == TRUE)
filecon <- gzfile(genfile, "r")
else
filecon <- file(genfile, "r")
headerline <- readLines(filecon, 1)
close(filecon)
headervalues <- unlist(strsplit(headerline, sep[1]))
if (length(headervalues) < startcolumn)
stop("Number of values in header less than startcolumn")
headervalues <- headervalues[startcolumn:length(headervalues)]
if (length(headervalues) != 2 * floor(length(headervalues) / 2))
stop("Odd number of values for family and subject ID")
fid <- headervalues[seq(1,length(headervalues) - 1, 2)]
iid <- headervalues[seq(2,length(headervalues), 2)]
samples <- data.frame(fid = fid,
sid = iid,
stringsAsFactors = FALSE)
} else {
samples <- read.table(samplefile,
header = header[2],
sep = sep[2],
stringsAsFactors = FALSE)
if (ncol(samples) == 1)
samples[,2] <- samples[,1]
samples <- samples[,1:2]
samples[,1] <- as.character(samples[,1])
samples[,2] <- as.character(samples[,2])
colnames(samples) <- c("fid", "sid")
if (samples[1,1] == "0" & samples[1,2] == "0")
samples <- samples[2:nrow(samples),]
}
if (all(samples$fid == samples$sid)) {
usesfid <- FALSE
samples$fid <- ""
} else {
usesfid <- TRUE
}
coltypes = rep("NULL", impformat * nrow(samples) + (startcolumn - 1))
if (snpcolumns[1] > 0)
coltypes[snpcolumns[1]] <- "character"
coltypes[snpcolumns[c(2, 4, 5)]] <- "character"
coltypes[snpcolumns[3]] <- "integer"
headersize <- 0
if (header[1] == TRUE)
headersize <- 1
snps <- read.table(genfile,
skip = headersize,
colClasses = coltypes,
sep = sep[1],
stringsAsFactors = FALSE)
if (snpcolumns[1] == -1) {
snps$chromosome <- chromosome
snpcolumns[1] <- startcolumn
} else if (snpcolumns[1] == 0) {
chromosomeid <- unlist(strsplit(snps[,1], ':'))
snpidentries <- length(chromosomeid) / nrow(snps)
snps$chromosome <- chromosomeid[seq(1, length(chromosomeid) + 1 - snpidentries, snpidentries)]
snpcolumns[1] <- startcolumn
}
x <- snpcolumns[2]
snpcolumns[2] <- snpcolumns[3]
snpcolumns[3] <- x
snps <- snps[,order(order(snpcolumns))]
colnames(snps) <- c("chromosome", "location", "snpid", "reference", "alternate")
chr1 <- snps$chromosome[1]
onechr <- FALSE
if (all(snps$chromosome == chr1))
onechr <- TRUE
if (snpidformat == 0) {
chrlocid <- unlist(paste(snps$chromosome, snps$location, sep = ':'))
if (all(chrlocid == snps$snpid)) {
snpidformat <- 1
} else {
chrlocrefaltid <- unlist(paste(snps$chromosome, snps$location,
snps$reference, snps$alternate, sep = ':'))
if (all(chrlocrefaltid == snps$snpid)) {
snpidformat <- 2
} else {
chrlocrefaltid <- unlist(paste(snps$chromosome, snps$location, sep = ':'))
chrlocrefaltid <- unlist(paste(chrlocrefaltid, snps$reference,
snps$alternate, sep = '_'))
if (all(chrlocrefaltid == snps$snpid))
snpidformat <- 3
}
}
} else if (snpidformat == 1) {
chrlocrefaltid <- unlist(paste(snps$chromosome, snps$location,
snps$reference, snps$alternate, sep = ':'))
if (all(chrlocrefaltid == snps$snpid)) {
stop("snpidformat 1 specified but GEN file uses snpidformat 2")
}
snps$snpid <- unlist(paste(snps$chromosome, snps$location, sep = ':'))
} else if (snpidformat == 2) {
snps$snpid <- unlist(paste(snps$chromosome, snps$location,
snps$reference, snps$alternate, sep = ':'))
} else {
chrlocrefaltid <- unlist(paste(snps$chromosome, snps$location, sep = ':'))
snps$snpid <- unlist(paste(chrlocrefaltid, snps$reference,
snps$alternate, sep = '_'))
}
snpinfo <- list()
if (index == FALSE) {
datasize <- integer(0)
indices <- numeric(0)
} else {
datasize <- integer(nrow(snps))
indices <- numeric(nrow(snps))
if (gz == FALSE) {
x <- GetLineLocations(genfile)
if (header[1] == TRUE)
headerlines <- 1
else
headerlines <- 0
indices <- x[(headerlines + 1):(length(x) - 1)]
for (i in 1:length(datasize))
datasize[i] <- x[headerlines + i + 1] - x[headerlines + i]
}
}
additionalinfo <- list(gzipped = gz,
headersize = headersize,
format = impformat,
startcolumn = startcolumn,
sep = sep[1])
class(additionalinfo) <- "gen-info"
retval <- list(filename = normalizePath(genfile, winslash = '/'),
usesfid = usesfid,
samples = samples,
onechr = onechr,
snpidformat = snpidformat,
snps = snps,
snpinfo = snpinfo,
datasize = datasize,
indices = indices,
additionalinfo = additionalinfo)
class(retval) <- "genetic-info"
return(retval)
}
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/R/getinfo.R
|
#' Read SNP data from a binary dosage file
#'
#' Routine to read the dosage and genetic probabilities about
#' a SNP from a binary dosage file
#'
#' @param bdinfo Information about a binary dosage file return
#' from getbdinfo
#' @param snp The SNP to read the information about. This may
#' be the SNP ID or the index of the SNP in the snps dataset in
#' the bdinfo list
#' @param dosageonly Indicator to return the dosages only or the
#' dosages allowing with the genetic probabilities.
#' Default value is TRUE
#'
#' @return
#' A list with either the dosages or the dosages and the genetic
#' probabilities.
#' @export
#'
#' @examples
#' # Get the information about the file
#' vcf1abdfile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
#' bdinfo <- getbdinfo(bdfiles = vcf1abdfile)
#'
#' # Read the first SNP
#' getsnp(bdinfo, 1, FALSE)
getsnp <- function(bdinfo, snp, dosageonly = TRUE) {
if (missing(bdinfo) == TRUE)
stop("bdinfo missing")
if (is.na(match("genetic-info", class(bdinfo))) == TRUE)
stop("bdinfo is not of class genetic-info")
if (is.na(match("bdose-info", class(bdinfo$additionalinfo))) == TRUE)
stop("bdinfo does not contain information about a binary dosage file")
if (missing(snp) == TRUE)
stop("No SNP specified")
if (length(snp) != 1)
stop("snp must be of length one")
if (is.character(snp) == TRUE) {
x <- match(snp, bdinfo$snps$snpid)
if (is.na(x))
stop("Cannot find SNP in bdinfo")
snp <- x
} else if (is.numeric(snp) == TRUE) {
if (floor(snp) != snp)
stop("snp must be a character or integer value")
snp <- as.integer(floor(snp))
}
if (is.integer(snp) == FALSE)
stop("snp must be a character or integer value")
if (snp < 1 | snp > nrow(bdinfo$snps))
stop("snp value out or range")
dosage <- numeric(nrow(bdinfo$samples))
p0 <- numeric(nrow(bdinfo$samples))
p1 <- numeric(nrow(bdinfo$samples))
p2 <- numeric(nrow(bdinfo$samples))
us <- numeric(2*nrow(bdinfo$samples))
dosage[1:nrow(bdinfo$samples)] <- NA
p0[1:nrow(bdinfo$samples)] <- NA
p1[1:nrow(bdinfo$samples)] <- NA
p2[1:nrow(bdinfo$samples)] <- NA
ReadBinaryDosageData(bdinfo, snp, dosage, p0, p1, p2, us)
if (dosageonly == TRUE)
return(list(dosage = dosage))
return(list(dosage = dosage,
p0 = p0,
p1 = p1,
p2 = p2))
}
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/R/getsnp.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/inst/doc/bdformats.R
|
---
title: "Binary Dosage Formats"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Binary Dosage Formats}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
There are currently 4 formats for the binary dosage file.
The first three formats consist of three files, binary dosage, family, and map. The family and maps are data frames with information about the subjects and SNPs in the binary dosage file, respectively. These are data frames saved with the <span style="font-family:Courier">saveRDS</span> command.
# Format 1
Format 1 has a header that begins with a magic word and is followed by a number indicating whether it is in format 1.1 or 1.2. It is the followed by the genotype information. The total header length is 8 bytes.
## Format 1.1
In format 1.1 the only value stored is the dosage. The dosage values stored are multiplied by $2^{15} - 1$, 0x7ffe, and stored as short integers. If a value is missing it is stored as $2^{16}$, 0xffff. Each subject needs to 2 bytes per SNP. The total size of the data section is 2 times the number of subjects times the number of SNPs bytes.
## Format 1.2
In format 1.2 the only values stored are $\Pr(g=1)$ and $\Pr(g=2)$. These values are multiplied by $2^{16} - 1$, 0xfffe, and stored as short integers. A value of $2^16$, 0xffff indicates a value is missing. The total size of the data section is 4 times the number of subjects times the number of SNPs bytes.
# Format 2
Format 2 has the same header as format 1.
## Format 2.1
The format of the data section is same as format 1.1 except the dosage values are multiplied by 20,000, 0x4e20. The missing value is still $2^{16}$, 0xffff.
## Format 2.2
The format of the data section is same as format 1.1 except the dosage values are multiplied by 20,000, 0x4e20. The missing value is still $2^{16}$, 0xffff.
**Note:** Format 2 was adopted when it was discovered that the values returned from the imputation programs were limited to 3 or 4 digits passed the decimal point. When results from fitting models were compared between using the binary dosage file and the original VCF or GEN file, there were slight but unimportant differences. It was considered desirable to be able to return the values exactly as they appear in the original imputation file.
# Format 3
Format 3.1 and 3.2 has a header similar to formats 1 and 2 but the number of subjects and SNPs were added to the header to avoid problems associating the wrong family or map file to the binary dosage file.
Format 3.3 and 3.4 has a header similar to formats 1 and 2 but the md5 hash of the family and map data frames were added to the header to avoid problems associating the wrong family or map file to the binary dosage file.
## Format 3.1 and 3.3
The data section of formats 3.1 and 3.3 are the same as format 2.1
## Format 3.2
Each SNP in the data section begins with a integer value identifying how long the section is for that SNP. The data is then stored as described below in minimizing the data stored.
## Format 3.4
Format 3.4 stores the data is a similar format as 3.2 but the section begins with the lengths of all the sections for the SNPs and then is followed by the genotype information.
# Format 4
Format 4 takes the data that is in the family and map files and moves it into the header of the binary dosage file. The first section of the header has the magic word and the format. This is followed by information on where the family, map, and genotype data are stored in the file. After the header there is the family data, followed by the map data, and then the imputation data.
## Format 4.1 and 4.3
The data section of formats 4.1 and 4.3 are the same as format 2.1
## Format 4.2 and 4.4
The data section of formats 4.2 and 4.4 are the same as format 4.2 and 4.3 respectively.
# Minimizing the data stored
A lot is known about the imputation data. We know the following
$$\Pr(g=0) + \Pr(g=1) + \Pr(g=2) = 1 $$
$$ d = \Pr(g=1) + 2\Pr(g=2)$$
where $d$ is the dosage. This means we only need to know two of the values to calculate the other two. In the <span style="font-family:Courier">BinaryDosage</span> package, the dosage and $\Pr(g=1) are used.
It is quite often the case that either $\Pr(g=0)$ or $\Pr(g=2)$ is 0. In this case, knowing the dosage is enough.
$$
\Pr(g = 1) = \left\{\begin{array}{ll}%
d & \; \Pr(g=2)=0, d \leq 1\\%
2 - d & \; \Pr(g=0) = 0, d > 1 %
\end{array}\right.
$$
Once the dosage and $\Pr(g=1)$ is know the other values can be quickly calculated.
$$\Pr(g=2) = \frac{d - \Pr(g=1)}{2}$$
$$\Pr(g=0) = 1 - \Pr(g=1) - \Pr(g=2)$$
These formulae work well but sometimes there is round off error in the imputation values. In these cases the above equations can't be used to get the exact imputation values. In these situations all four imputations values, $d$, $\Pr(g=0)$,$\Pr(g=1)$, and $\Pr(g=2)$ have to be saved. Fortunately this is not a common occurrence.
Since the values stored are short integers of 2 bytes in length, only the last 15 bits are used. This allows the 16th bit to be used as an indicator. For each SNP for each subject the first value saved is the dosage. If the 16th bit is 0, this indicates that either $\Pr(g=0)$ or $\Pr(g=2)$ is 0 and the other values can be calculated as described above. If the 16th bit is set to 1, this indicates that the value of $\Pr(g=1)$ follows. If the 16th bit is set to 0, this indicates that the above equations can be used to calculate $\Pr(g=0)$ and $\Pr(g=2)$. If the 16th bit is set to one, this indicates the next two values are $\Pr(g=0)$ and $\Pr(g=2)$ respectively.
**Note:** Usage of the this method general results in 2.2 to 2.4 bytes needed to store each SNP for each subject.
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/inst/doc/bdformats.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BinaryDosage)
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/inst/doc/geneticfileinfo.R
|
---
title: "Genetic File Information"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Genetic File Information}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BinaryDosage)
```
The routines <span style="font-family:Courier">getbdinfo</span>, <span style="font-family:Courier">getvcfinfo</span>, and <span style="font-family:Courier">getgeninfo</span> return a list with information about the data in the files. The list returned by each of these routines a section common to them all and a list <span style="font-family:Courier">additionalinfo</span> that is specific to the file type.
## Common section
The common section has the following elements
- filename - Character value with the complete path and file name of the file with the genetic data
- usesfid - Logical value indicating if the subject data has family IDs.
- samples - Data frame containing the following information about the subjects
+ fid - Character value with family IDs
+ sid - Character value with the individual IDs
- onchr - Logical value indicating if all the SNPs are on the same chromosome
- snpidformat - Integer indicating the format of the SNP IDs as follows
+ 0 - Unknown for VCF and GEN files or user specified for binary dosage files
+ 1 - chromosome:location
+ 2 - chromosome:location:referenceallele:alternateallele
+ 3 - chromosome:location_referenceallele_alternateallele
- snps - Data frame containing the following values
+ chromosome - Character value indicating what chromosome the SNP is on
+ location - Integer value with the location of the SNP on the chromosome
+ snpid - Character value with the ID of the SNP
+ reference - Character value of the reference allele
+ alternate - Character value of the alternate allele
- snpinfo - List that contain the following information
+ aaf - numeric vector with the alternate allele frequencies
+ maf - numeric vector with the minor allele frequencies
+ avgcall - Numeric vector with the imputation average call
+ rsq - Numeric vector with the imputation r squared value
- datasize - Numeric vector indicating the size of data in the file for each SNP
- indices - Numeric vector indicating the starting location in the file for each SNP
The list returned has its class value set to "genetic-info".
The <span style="font-family:Courier">datasize</span> and <span style="font-family:Courier">indices</span> values are only returned if the parameter <span style="font-family:Courier">index</span> is set equal to <span style="font-family:Courier">TRUE</span>
## Binary Dosage Additional Information
The additional information returned for binary dosage files contains the following information.
- format - numeric value with the format of the binary dosage file
- subformat - numeric value with the subformat of the binary dosage file
- headersize - integer value with the size of the header in the binary dosage file
- numgroups - integer value of the number of groups of subjects in the binary dosage file. This is usually the number of binary dosage files merged together to form the file
- groups - integer vector with size of each of the groups
This list has its class value set to "bdose-info".
## VCF File Additional Information
The additional information returned for VCF files contains the following information.
- gzipped - Logical value indicating if the file has been compressed using gzip
- headerlines - Integer value indicating the number of lines in the header
- headersize - Numeric value indicating the size of the header in bytes
- quality - Character vector containing the values in QUALITY column
- filter - Character vector containing the values in the FILTER column
- info - Character vector containing the values in the INFO column
- format - Character vector containing the values in the FORMAT column
- datacolumns - Data frame summarizing the entries in the FORMAT value containing the following information
+ numcolumns - Integer value indicating the number of values in the FORMAT value
+ dosage - Integer value indicating the column containing the dosage value
+ genotypeprob - Integer value indicating the column containing the genotype probabilities
+ genotype - Integer value indicating the column containing the genotype call
This list has its class value set to "vcf-info".
The values for quality, filter, info, and format can have a length of 0 if all the values are missing. They will have a length of 1 if all the values are equal. The number of rows in the datacolumns data frame will be equal to the length of the format value.
## GEN File Additional Information
The additional information returned for GEN files contains the following information.
- gzipped - Logical value indicating if the GEN file is compressed using gz
- headersize - Integer value indicating the size of the header in bytes
- format - Integer value indicating the number of genotype probabilities for each subject with the following meanings
+ 1 - Dosage only
+ 2 - $\Pr(g=0)$ and $\Pr(g=1)$
+ 3 - $\Pr(g=0)$, $\Pr(g=1)$, and $\Pr(g=2)$
- startcolumn - Integer value indicating in which column the genetic data starts
- sep - Character value indicating what value separates the columns
$g$ indicates the number of alternate alleles the subject has.
This list has its class value set to "gen-info".
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/inst/doc/geneticfileinfo.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BinaryDosage)
## ---- eval = T, echo = T, message = F, warning = F, tidy = T------------------
bd1afile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
bd1bfile <- system.file("extdata", "vcf1b.bdose", package = "BinaryDosage")
bd1file <- tempfile()
bdmerge(mergefiles = bd1file, bdfiles = c(bd1afile, bd1bfile))
bd1ainfo <- getbdinfo(bd1afile)
bd1binfo <- getbdinfo(bd1bfile)
bd1info <- getbdinfo(bd1file)
nrow(bd1ainfo$samples)
nrow(bd1binfo$samples)
nrow(bd1info$samples)
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/inst/doc/mergingfiles.R
|
---
title: "Merging Files"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Merging Files}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(BinaryDosage)
```
Quite often subjects have their genotypes imputed in batches. The files returned by these imputation can be converted into binary dosage files. These binary files can be merged into a single file if they have the same SNPs and different subjects using the bdmerge routine.
## bdmerge
The bdmerge routine takes the following parameters
- mergefiles - A character vector of the binary dosage file, family file, and map file names
- format - Integer value indicating which format of the binary dosage file should be used for the merged files
- subformat - Integer value indicating which subformat should be used for the merged files
- bdfiles - A character vector of the binary dosage files to merge
- famfiles - Character vector of the family files associated with the binary dosage files to merge
- mapfiles - Character vector of the map files associated with the binary dosage files to merge
- onegroup - Logical value indicating if the binary dosage saves SNP summary information about each merged file
- bdoptions - Character vector indicating on which SNP information should be evaluated for the merged files. This cannot be used if onegroup is set to FALSE
- snpjoin - Character value indicating if an inner or outer join is done for the SNPs
The following code merges *vcf1a.bdose* and *vcf1b.bdose* into one binary dosage file. It then displays the number of subjects in each file.
``` {r, eval = T, echo = T, message = F, warning = F, tidy = T}
bd1afile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
bd1bfile <- system.file("extdata", "vcf1b.bdose", package = "BinaryDosage")
bd1file <- tempfile()
bdmerge(mergefiles = bd1file, bdfiles = c(bd1afile, bd1bfile))
bd1ainfo <- getbdinfo(bd1afile)
bd1binfo <- getbdinfo(bd1bfile)
bd1info <- getbdinfo(bd1file)
nrow(bd1ainfo$samples)
nrow(bd1binfo$samples)
nrow(bd1info$samples)
```
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/inst/doc/mergingfiles.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/inst/doc/r2estimates.R
|
---
title: "Estimating Imputed R-squares"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Estimating Imputed R-squares}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
A common way to measure the imputation $r^2$ is calculate the variance of the imputed alleles probabilities and divide that by the variance if the alleles were perfectly imputed. An allele is perfectly imputed if $\Pr(a_i = 1)$ equals 0 or 1 for all $i$.
The variance of the alleles when are perfectly imputed is $q(1-q)$ where $q$ is the alternate allele frequency. Given the imputation data We do not know what $q$ is the general population. However we can estimate it using the dosage values for each subject.
$$
\hat q = \sum_{i = 1}^{N}\frac{d_i}{2N}
$$
where the dosage is calculated as
$$
d_i = \frac{\Pr(g_i = 1) + 2\Pr(g_i=2)}{2}
$$
Another problem with the dosage data is we don't have the probabilities for each allele. Instead we have $\Pr(g_i=0), \Pr(g_i=1),$ and $\Pr(g_i=2)$. If we assume that a subject's two allelic probabilities, $q_1, q_2$ are independently imputed we know the following
$$
q_1(1-q2) + (1-q_1)q_2 = \Pr(g=1)
$$
and
$$
q_1 q_2 = \Pr(g=2)
$$
These equations can be solved resulting in the following values
$$
q_1 = \frac{d - \sqrt{d^2 - \Pr(g = 2)}}{2}\\%
q_2 = \frac{d + \sqrt{d^2 - \Pr(g = 2)}}{2}
$$
There can be some problems using the above equations. Sometimes the value inside the radical can be negative. This can be caused by roundoff error. If the value is negative and close to zero, the value can be set to zero.
**Note:** The documentation for minimac and Impute 2 indicate that the imputation values for the two alleles are imputed independently.
Since each subject has two alleles we can let $q_1$ to $q_N$ represent the first allele of each subject and $q_{N+1}$ to $q_{2N}$ represent the second allele. Given this we can calculate all the $q$'s as follows
$$
q_i = \left\{\begin{array}{ll}%
\frac{d_i - \sqrt{d_i^2 - 4\Pr(g_i = 2)}}{2} & \; 0<i\leq N\\%
\frac{d_i + \sqrt{d_i^2 - 4\Pr(g_i = 2)}}{2} & \; N<i\leq 2N %
\end{array}\right.
$$
Once the $q$'s have been calculated, the imputation $r^2$ can be estimated as follows
$$
\hat r^2 = \frac{\sum_{i = 1}^{2N}\frac{(q_i - \hat q)^2}{2N}}{\hat q(1 - \hat q)}
$$
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/inst/doc/r2estimates.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(BinaryDosage)
## ---- eval = T, echo = T, message = F, warning = F, tidy = T------------------
bd1afile <- system.file("extdata", "vcf1a.bdose", package = "BinaryDosage")
bd1ainfo <- getbdinfo(bdfiles = bd1afile)
## ---- eval = T, echo = T, message = F, warning = F, tidy = T------------------
aaf <- unlist(bdapply(bdinfo = bd1ainfo, func = getaaf))
altallelefreq <- data.frame(SNP = bd1ainfo$snps$snpid, aafcalc = aaf)
knitr::kable(altallelefreq, caption = "Information vs Calculated aaf", digits = 3)
## ---- eval = T, echo = T, message = F, warning = F, tidy = T------------------
snp3 <- data.frame(getsnp(bdinfo = bd1ainfo, "1:12000:T:C", FALSE))
knitr::kable(snp3[1:20,], caption = "SNP 1:12000:T:C", digits = 3)
|
/scratch/gouwar.j/cran-all/cranData/BinaryDosage/inst/doc/usingbdfiles.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.