content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Internal functions for constrained minimization
#'
#' Groups of functions used for the constrained minimization problem arising in the computation of the
#' likelihood ratio test statistics.
#'
#' @describeIn objFunction objective function to be optimized
#'
#' @param x A vector
#' @param cst A list of constants to be passed to the optimisation function
#' @return value of the objective function, its gradient, and the set of inequality and equality constraints
#'
objFunction <- function(x,cst){
return(t(cst$Z-x)%*%cst$invV%*%(cst$Z-x))
}
#' @describeIn objFunction gradient of the objective function
gradObjFunction <- function(x,cst){
return(-2*t(cst$Z-x)%*%cst$invV)
}
#' @describeIn objFunction function creating a symmetric matrix from its unique elements stored in a vector
symMatrixFromVect <- function(x){
n <- length(x)
if (n==1){
return(x)
}else{
p <- floor(sqrt(2*n))
m <- matrix(0,nrow=p,ncol=p)
m[lower.tri(m,diag=TRUE)] <- x
return(m+t(m)-diag(diag(m)))
}
}
#' @describeIn objFunction set of inequality constraints
ineqCstr <- function(x,cst){
r <- cst$dimsCone$dimGamma$dimSplus # nb of variances tested in each block
# n0R : nb of components in the cone corresponding to {0} or R
n0R <- cst$dimsCone$dimBeta$dim0 + cst$dimsCone$dimBeta$dimR + cst$dimsCone$dimGamma$dim0 + cst$dimsCone$dimGamma$dimR
dimMats <- r*(r+1)/2 # dimensions of the blocks of variances tested
n <- length(x) # total dimension of parameter space
ds <- cst$dimsCone$dimSigma # dimension of residual parameter
if (sum(r) == 1){
constr <- x[n-ds]
}else{
if (cst$orthan){
constr <- x[(n0R+1):(n0R+sum(r))]
}else{
constr <- numeric()
for (i in 1:length(r)){
m <- symMatrixFromVect(x[(n0R+1):(n0R+dimMats[i])]) # reconstruction of the tested block from its unique elements in a vectorized form
n0R <- n0R + dimMats[i]
if (dimMats[i]==1){
constr <- c(constr,m)
}else{
constr <- c(constr,det(m)) # add the constraint that the determinant should be positive
}
}
}
}
return(constr)
}
#' @describeIn objFunction jacobian of the inequality constraints
jacobianIneqCstr <- function(x,cst){
r <- cst$dimsCone$dimGamma$dimSplus
n0R <- cst$dimsCone$dimBeta$dim0 + cst$dimsCone$dimBeta$dimR + cst$dimsCone$dimGamma$dim0 + cst$dimsCone$dimGamma$dimR # dimensions of spaces {0} and R (corresponding to non-constrained elements)
dimMats <- r*(r+1)/2
n <- length(x)
ds <- cst$dimsCone$dimSigma
# Jacobian of the inequality constrains
jacobian <- matrix(0,nrow=length(dimMats),ncol=n)
if (cst$orthan){
jacobian <- matrix(0,nrow=r,ncol=n)
jacobian[1:r,(n0R+1):(n0R+r)] <- diag(r)
}else{
if (sum(r) == 1){
# If r=1 there is only one non-null term in the Jacobian
jacobian[1,n-ds] <- 1
}else{
# If r>1 the gradient is computed for each block of the matrix
for (i in 1:length(r)){
m <- symMatrixFromVect(x[(n0R+1):(n0R+dimMats[i])])
if (dimMats[i]==1){
jacobian[i,n0R+1] <- 1
}else{
derivMatrixForm <- det(m) * (2*solve(m) - solve(m)*diag(r[i]))
jacobian[i,(n0R+1):(n0R+dimMats[i])] <- derivMatrixForm[lower.tri(derivMatrixForm,diag = T)]
}
n0R <- n0R + dimMats[i]
}
}
}
return(jacobian)
}
#' @describeIn objFunction set of equality constraints
eqCstr <- function(x,cst){
# equality constraints come from the fixed effects, the untested blocks, the untested covariances in partially tested blocks, and the residual
nontestedFix <- cst$dimsCone$dimBeta$dim0
nbFix <- cst$dimsCone$dimBeta$dim0 + cst$dimsCone$dimBeta$dimR
n0 <- nbFix + cst$dimsCone$dimGamma$dim0
n <- length(x)
ds <- cst$dimsCone$dimSigma
if (nontestedFix < nbFix){ # if some fixed effects are tested
constr <- x[c(1:nontestedFix,(nbFix+1):n0,(n-ds+1):n)]
}else{
if (ds>=1) constr <- x[c(1:n0,(n-ds+1):n)]
if (ds==0) constr <- x[1:n0]
}
return(constr)
}
#' @describeIn objFunction jacobian of the inequality constraints
jacobianEqCstr <- function(x,cst){
n0 <- cst$dimsCone$dimBeta$dim0 + cst$dimsCone$dimBeta$dimR + cst$dimsCone$dimGamma$dim0
n <- length(x)
ds <- cst$dimsCone$dimSigma
jacobian <- matrix(0,nrow=n0+ds,ncol=n)
jacobian[1:n0,1:n0] <- diag(n0)
if (ds>1) jacobian[(n0+1):(n0+ds),(n-ds+1):n] <- diag(ds)
return(jacobian)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/constraintsFn.R
|
#' @title Variance component testing
#'
#' @description Perform a likelihood ratio test to test whether a subset of the variances of the random effects
#' are equal to zero. The test is defined by two hypotheses, H0 and H1, and the model under H0 is
#' assumed to be nested within the model under H1. These functions can be used on objects of class
#' \code{lme}-, \code{nlme}-, \code{mer}-, \code{lmerMod}, \code{glmerMod}, \code{nlmerMord} or \code{SaemixObject}.
#'
#' It is possible to tests if any subset of the variances are equal to zero. However, the function does not
#' currently support nested random effects, and assumes that the random effects are Gaussian.
#'
#' @details The asymptotic distribution of the likelihood ratio test is a chi-bar-square, with weights that need to be
#' approximated by Monte Carlo methods, apart from some specific cases where they are available explicitly.
#' Therefore, the p-value of the test is not exact but approximated. This computation can be time-consuming, so
#' the default behaviour of the function is to provide bounds on the exact p-value, which can be enough in practice
#' to decide whether to reject or not the null hypothesis. This is triggered by the option \code{pval.comp="bounds"}.
#' To compute an approximation of the exact p-value, one should use the option \code{pval.comp="approx"} or \code{pval.comp="both"}.
#'
#' When \code{pval.comp="approx"} or \code{pval.comp="both"}, the weights of the chi-bar-square distribution are computed using Monte Carlo,
#' which might involve a larger computing time.
#'
#' The \code{control} argument controls the options for chi-bar-square weights computation. It is a list with the
#' following elements: \code{M} the size of the Monte Carlo simulation, i.e. the number of samples generated, \code{parallel} a
#' boolean to specify if parallel computing should be used, and \code{nbcores} the number of cores to be used in case of
#' parallel computing. Default is \code{M=5000}, \code{parallel=FALSE} and \code{nb_cores=1}.
#' If \code{parallel=TRUE} but the value of \code{nb_cores} is not given, then it is set to the number of detected
#' cores minus 1
#'
#' @name varCompTest
#' @aliases varCompTest.lme varCompTest.lme4 varCompTest.saemix
#'
#' @param m1 a fit of the model under H1, obtained from \code{nlme}, \code{lme4}
#' or \code{saemix}
#' @param m0 a fit of the model under H0, obtained from the same package as \code{m0}
#' @param control (optional) a list of control options for the computation of the chi-bar-weights (see Details section)
#' @param pval.comp (optional) the method to be used to compute the p-value, one of: \code{"bounds"} (the default),
#' \code{"approx"} or \code{"both"} (see Details section)
#' @param fim (optional) the method to compute the Fisher Information Matrix. Options are: \code{fim="extract"} to extract the
#' FIM computed by the package which was used to fit the models, \code{fim="compute"} to evaluate the FIM using parametric
#' bootstrap, and \code{fim=I} with \code{I} a positive semidefinite matrix, for a FIM provided by the user.
#' @param output a boolean specifying if any output should be printed in the console (default to TRUE)
#'
#' @return An object of class \code{htest} with the following components:
#' \itemize{
#' \item \code{statistic} the likelihood ratio test statistics
#' \item \code{null.value}
#' \item \code{alternative}
#' \item \code{parameters} the parameters of the limiting chi-bar-square distribution: the degrees of freedom and
#' the weights of the chi-bar-square components and the Fisher Information Matrix
#' \item \code{method} a character string indicating the name of the test
#' \item \code{pvalue} a named vector containing the different p-values computed by the function: using the
#' (estimated) weights, using the random sample from the chi-bar-square distribution, and the two bounds on
#' the p-value.
#' }
#'
#' @author Charlotte Baey <\email{[email protected]}>
#'
#' @examples
#' # load lme4 package and example dataset
#' library(lme4)
#' data(Orthodont, package = "nlme")
#'
#' # fit the two models under H1 and H0
#' m1 <- lmer(distance ~ 1 + Sex + age + age*Sex +
#' (0 + age | Subject), data = Orthodont, REML = FALSE)
#' m0 <- lm(distance ~ 1 + Sex + age + age*Sex, data = Orthodont)
#'
#' # compare them (order is important: m1 comes first)
#' varCompTest(m1,m0,pval.comp="bounds")
#'
#' # using nlme
#' library(nlme)
#' m1 <- lme(distance ~ 1 + Sex + age + age*Sex,
#' random = pdSymm(Subject ~ 1 + age), data = Orthodont, method = "ML")
#' m0 <- lme(distance ~ 1 + Sex, random = ~ 1 | Subject, data = Orthodont, method = "ML")
#'
#' varCompTest(m1,m0)
#'
#' @references Baey C, Cournède P-H, Kuhn E, 2019. Asymptotic distribution of likelihood ratio test
#' statistics for variance components in nonlinear mixed effects models. \emph{Computational
#' Statistics and Data Analysis} 135:107-122.
#'
#' Silvapulle MJ, Sen PK, 2011. Constrained statistical inference: order, inequality and shape constraints.
#' @export varCompTest
#' @importFrom stats formula pchisq
varCompTest <- function(m1, m0, control = list(M=5000,parallel=T,nb_cores=1,B=1000),pval.comp = "bounds",fim = "extract", output=TRUE) {
pkg1 <- pckName(m1)
pkg0 <- pckName(m0)
samePkg <- max(pkg1 == pkg0)
if(!(pkg1 %in% c("nlme", "lme4", "saemix")))
stop("'m1' must be fitted using 'nlme', 'lme4', or 'saemix' package")
if(!samePkg & !max(pkg0 %in% c("lm","nls","glm")))
stop("'m1' and 'm0' must be fitted with the same package")
UseMethod("varCompTest", m1)
}
varTest <- function(m1, m0, control = list(M=5000,parallel=T,nb_cores=1,B=1000),pval.comp = "bounds",fim = "extract") {
.Deprecated("varCompTest")
varCompTest(m1, m0, control = list(M=5000,parallel=T,nb_cores=1,B=1000),pval.comp = "bounds",fim = "extract", output=TRUE)
}
#' @title Extracting models' structures
#'
#' @description Functions extracting the structure of the models under both hypothesis: the number of fixed and random effects,
#' the number of tested fixed and random effects, and the residual dimension, as well as the random effects covariance structure
#'
#' @param m1 the model under H1
#' @param m0 the model under H0
#' @param randm0 a boolean stating whether the model under H0 contains any random effect
#' @return A list with the following components:
#' \item{\code{detailStruct}}{a data frame containing the list of the parameters and whether they are tested or not}
#' \item{\code{nameVarTested}}{the name of the variance components being tested}
#' \item{\code{nameFixedTested}}{the name of the fixed effects being tested}
#' \item{\code{dims}}{a list with the dimensions of fixed and random effects, tested or not tested}
#' \item{\code{structGamma}}{the structure of the covariance matrix of the random effects \code{diag}, \code{full} or
#' \code{blockDiag}}
#'
#' @name extractStruct
#' @export
extractStruct <- function(m1,m0,randm0){
UseMethod("extractStruct",m1)
}
#' @title Extract covariance matrix
#'
#' @description Extract covariance matrix of the random effects for a model fitted with lme4.
#'
#' @param m a fit from lme4 package (either linear or nonlinear)
#'
#' @export extractVarCov
extractVarCov <- function(m){
UseMethod("extractVarCov",m)
}
#' @title Approximation of the inverse of the Fisher Information Matrix via parametric bootstrap
#'
#' @description When the FIM is not available, this function provides an approximation of the FIM based on an estimate
#' of the covariance matrix of the model's parameters obtained via parametric bootstrap.
#'
#' @name bootinvFIM
#' @aliases invFIM bootstrap
#'
#' @param m a fitted model that will be used as the basis of the parametric bootstrap (providing the initial maximum
#' likelihood estimate of the parameters and the modelling framework)
#' @param B the size of the bootstrap sample
#' @param seed a seed for the random generator
#' @return the empirical covariance matrix of the parameter estimates obtained on the bootstrap sample
#' @author Charlotte Baey <\email{[email protected]}>
#'
#' @importFrom foreach %dopar%
#' @export bootinvFIM
bootinvFIM <- function(m,B=1000,seed=0){
UseMethod("bootinvFIM",m)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/generics.R
|
#' @name extractStruct.merMod
#' @rdname extractStruct.merMod
#'
#' @title Extract model structure
#'
#' @param m1 the fit under H1
#' @param m0 the fit under H0
#' @param randm0 a boolean indicating whether random effects are present in m0
extractStruct.merMod <- function(m1,m0,randm0){
# name of the grouping factor
nameRE <- names(m1@flist)
if (length(nameRE)>1) stop("the package does not currently support more than one level of random effects")
# dimension of the parameters
nbFixEff1 <- lme4::getME(m1,"p")
# names of fixed and random effects
if (randm0){
namesRE0 <- unlist(m0@cnms)
namesFE0 <- names(lme4::getME(m0,"fixef"))
nbFixEff0 <- lme4::getME(m0,"p")
}else{
namesRE0 <- NULL
if (inherits(m0,c("lm","glm"))) namesFE0 <- names(stats::coefficients(m0))
if (inherits(m0,"nls")) namesFE0 <- names(m0$m$getPars())
nbFixEff0 <- length(namesFE0)
}
namesFE1 <- names(lme4::getME(m1,"fixef"))
namesRE1 <- unlist(m1@cnms)
nameVarTested <- namesRE1[!(namesRE1%in%namesRE0)]
nameFixedTested <- namesFE1[!(namesFE1%in%namesFE0)]
nbRanEff0 <- length(namesRE0)
nbRanEff1 <- length(namesRE1)
nbRanEffTest <- nbRanEff1-nbRanEff0
#if (nbRanEff0==nbRanEff1) stop("Error: there are the same number of random effects in models m0 and m1. Please check the models' formulation.")
# Structure of the covariance matrix (diagonal, blocked or full)
nbCompVar1 <- lme4::getME(m1,"devcomp")$dims["nth"]
if (randm0){
nbCompVar0 <- lme4::getME(m0,"devcomp")$dims["nth"]
}else{
nbCompVar0 <- 0
}
diag <- (nbCompVar1==nbRanEff1)
full <- (nbCompVar1==(nbRanEff1*(nbRanEff1+1)/2))
if (nbCompVar1==1) full <- FALSE
blockDiag <- !diag & !full
struct <- c("diag","full","blockDiag")[c(diag,full,blockDiag)]
nbCov1 <- nbCompVar1 - nbRanEff1
nbCov0 <- nbCompVar0 - nbRanEff0
if (prod(namesRE1 %in% namesRE0) && nbCompVar0 == nbCompVar1) stop("the models have the same covariance structure under both hypotheses")
if (nbCov1 < nbCov0) stop("the models should be nested but there are some covariances in <m0> which are not in <m1>")
# CHECK IF ML WAS USED AND NOT REML
if (lme4::isREML(m1)) stop("the models should be fitted using Maximum Likelihood (ML) instead of Restricted ML (REML)")
if (randm0) if (lme4::isREML(m0)) stop("the models should be fitted using Maximum Likelihood (ML) instead of Restricted ML (REML)")
# retrieve names of parameters to identify their order in the FIM and in the cone
#nbTotParam <- ncol(invfim1)
if (randm0) nameParams0 <- c(namesFE0,paste0("cov_",names(lme4::getME(m0,"theta"))),"residual")
if (!randm0) nameParams0 <- c(namesFE0,"residual")
nameParams1 <- c(namesFE1,paste0("cov_",names(lme4::getME(m1,"theta"))),"residual")
paramTested <- !(nameParams1 %in% nameParams0)
# create a dataset with the list of parameters and: their tyoe (fixed, variance or correlation)
# whether they are tested equal to 0 or not, and if they are tested, if it's as a subpart of a block
# or in a block which is fully tested
dd <- data.frame(names=nameParams1,tested=paramTested)
dd$type <- c(rep("beta",nbFixEff1),substr(dd$names[(nbFixEff1+1):nrow(dd)],1,2))
indRes <- as.numeric(rownames(dd[dd$type=="re",])) # get indices of residual parameters
dd <- dd[-indRes,]
ddco <- dd[dd$type=="co",]
if (nrow(ddco)>0){
dd$var1 <- dd$var2 <- dd$covTested <- dd$covInBlock <- NA
isVariance <- numeric()
for (i in 1:nrow(ddco)){
tmp<-gsub(paste("cov_",nameRE,".",sep=""),"",ddco$names[i]);
tmp<-strsplit(tmp,"[.]");
dd[dd$type=="co",][i,]$var1 <- tmp[[1]][1];
dd[dd$type=="co",][i,]$var2 <- tmp[[1]][2];
if (length(tmp[[1]])==1) isVariance <- c(isVariance,i);
dd[dd$type=="co",][i,]$covTested <- dd[dd$type=="co",][i,]$tested*prod(!(unlist(tmp) %in% nameVarTested)) # identify covariances that are tested without testing the associate variances
dd[dd$type=="co",][i,]$covInBlock <- dd[dd$type=="co",][i,]$tested*prod((unlist(tmp) %in% nameVarTested)) # identify covariances that are tested as part of a block of the covariance matrix tested
}
dd[nrow(dd)-nrow(ddco)+isVariance,]$type <- "sd"
dd$covInBlock[dd$type=="sd"] <- 1
}else{
dd$covInBlock[dd$type=="beta"] <- NA
dd$covInBlock[dd$type!="beta"] <- 1
}
if(blockDiag){
dimBlock1 <- lengths(m1@cnms) # nb of ramdom effects per block
dimBlock0 <- lengths(m0@cnms)
dd$block <- 0
for(i in 1:length(dimBlock1)){
nameREinBlock <- m1@cnms[[i]]
loc <- sapply(1:length(nameREinBlock),FUN = function(x){grep(nameREinBlock[x],dd$names)})
dd$block[unlist(loc)] <- i
}
dd$block[dd$type=="beta"] <- 0
}
return(list(detailStruct=dd,
nameVarTested=nameVarTested,
nameFixedTested=nameFixedTested,
dims=list(nbFE1=nbFixEff1,nbFE0=nbFixEff0,nbRE1=nbRanEff1,nbRE0=nbRanEff0,nbCov1=nbCov1,nbCov0=nbCov0,dimSigma=1*!(inherits(m0,"glm"))),
structGamma=struct))
}
#' @name extractVarCov.merMod
#' @rdname extractVarCov.merMod
#'
#' @title Extract covariance matrix
#'
#' @description Extract covariance matrix of the random effects for a model fitted with lme4.
#'
#' @param m a fit from lme4 package (either linear or nonlinear)
#' @export extractVarCov.merMod
#' @export
extractVarCov.merMod <- function(m){
varcorr <- as.data.frame(lme4::VarCorr(m))
varcorr <- varcorr[varcorr$grp!="Residual",]
indCov <- which(!is.na(varcorr$var2))
if (length(indCov)>0){
stdev <- varcorr$sdcor[-indCov]
corrmat <- diag(1,length(stdev))
corrmat[lower.tri(corrmat,diag=FALSE)] <- varcorr$sdcor[indCov]
corrmat[upper.tri(corrmat,diag=FALSE)] <- varcorr$sdcor[indCov]
v <- diag(stdev)%*%corrmat%*%diag(stdev)
colnames(v) <- varcorr$var1[-indCov]
rownames(v) <- varcorr$var1[-indCov]
}else{
v <- diag(varcorr$vcov,nrow=nrow(varcorr))
colnames(v) <- varcorr$var1
rownames(v) <- varcorr$var1
}
return(v)
}
#' @name bootinvFIM.merMod
#' @rdname bootinvFIM.merMod
#'
#' @title Compute the inverse of the Fisher Information Matrix using parametric bootstrap
#'
#' @param m the model under H1
#' @param B the bootstrap sample size
#' @param seed a seed for the random generator
#' @export bootinvFIM.merMod
#' @export
bootinvFIM.merMod <- function(m, B=1000, seed=0){
mySumm <- function(m,diagSigma=F) {
beta <- lme4::fixef(m)
resStd <- stats::sigma(m)
grpFactor <- names(lme4::getME(m,"cnms"))
vc <- lme4::VarCorr(m)
Sigma <- as.matrix(Matrix::bdiag(as.matrix(vc)))
if(diagSigma){
theta <- c(beta=beta,Gamma=diag(Sigma),sigma=resStd)
}else{
theta <- c(beta=beta,Gamma=Sigma[lower.tri(Sigma,diag = T)],sigma=resStd)
}
return(theta)
}
nonlin <- inherits(m,"nlmerMod")
# Use bootMer functions if linear or generalized linear, otherwise code our own bootstrap
if (!nonlin){
message(paste0("\t ...generating the B=",B," bootstrap samples ...\n"))
set.seed(seed)
bootstrap <- lme4::bootMer(m, mySumm, use.u = F, type = "parametric", nsim = B)
bootstrap <- bootstrap$t[, colSums(bootstrap$t != 0) > 0]
invfim <- cov(bootstrap)
namesParams <- c(names(lme4::getME(m,"fixef")),paste0("cov_",names(lme4::getME(m,"theta"))),"residual")
colnames(invfim) <- rownames(invfim) <- namesParams
}else{
beta <- lme4::fixef(m) # fixed effects
resStd <- stats::sigma(m)
grpFactor <- unique(names(lme4::getME(m,"cnms")))
vc <- lme4::VarCorr(m)
Sigma <- extractVarCov(m)
diagSigma <- Matrix::isDiagonal(Sigma)
# Generate B bootstrap samples
nind <- lme4::getME(m,"l_i")
nrandEfft <- nrow(Sigma)
namesRE <- lme4::getME(m,"cnms")
namesParams <- c(names(lme4::fixef(m)),paste0("cov_",names(lme4::getME(m,"theta"))),"sd_residual")
message(paste0("\t ...generating the B=",B," bootstrap samples ...\n"))
#no_cores <- max(1,parallel::detectCores() - 1)
# Initiate cluster
#doParallel::registerDoParallel(no_cores)
thetaBoot <- numeric()
b <- 1
tbar <- utils::txtProgressBar(min=1,max=B,char = ".", style = 3)
grpVar <- m@frame[,grpFactor]
nmeInd <- unique(grpVar)
set.seed(seed)
while (b <= B){
utils::setTxtProgressBar(tbar,b)
phi <- t(t(chol(Sigma))%*%matrix(stats::rnorm(nrandEfft*nind,0,1),ncol=nind))
betaAll <- as.data.frame(matrix(rep(beta,nind),nrow=nind,byrow = TRUE))
betaAll <- cbind(betaAll,grp=grpVar)
names(betaAll) <- c(names(beta),grpFactor)
pos <- names(betaAll)%in%unlist(namesRE) # TRUE/FALSE to identify where are the random effects
c <- 1
for (i in 1:length(pos)){
if (pos[i]){
betaAll[,i] <- c(sapply(nmeInd, FUN = function(j){betaAll[grpVar==j,i] + phi[j,c]}))
c <- c+1
}
}
# get name of response variable
responseVar <- unlist(strsplit(as.character(m@call)[2],"[~]"))[1]
responseVar <- gsub(" ","",responseVar)
# get name of internal nonlinear function
nlmod <- unlist(strsplit(as.character(m@resp$nlmod)[2],"[(]"))[1]
# get names of all variables, and identify the covariables as the complement of response variable and grouping factor
namesAllVar <- names(m@frame)
namesCov <- namesAllVar[! (namesAllVar %in% c(responseVar,grpFactor))]
# build the part of the formula with the covariables, to be found in m@frame
modAndCov <- paste(paste0("m@frame$",namesCov),collapse=",")
# build the part of the formula with the parameters
posParamInBeta <- (1:ncol(betaAll))[names(betaAll) %in% names(beta)]
modAndParam <- paste(paste0("betaAll[,",posParamInBeta,"]"),collapse=",")
simuResp <- eval(parse(text=paste0(nlmod,"(",modAndCov,",",modAndParam,")")))
d <- m@frame
d[,responseVar] <- simuResp + stats::rnorm(nrow(betaAll),0,resStd)
fitInd <- suppressWarnings(try({setTimeLimit(10)
stats::update(m, data=d, start=beta)},silent=TRUE))
#fitInd <- update(m, data=d)
if (!inherits(fitInd,"try-error")){
thetaHatBoot <- mySumm(fitInd,diagSigma)
names(thetaHatBoot) <- namesParams
thetaBoot <- rbind(thetaBoot,thetaHatBoot)
b <- b + 1
}
}
invfim <- cov(thetaBoot) # empirical covariance matrix of the bootstrap samples as an estimator of the inverse of the FIM
colnames(invfim) <- namesParams
rownames(invfim) <- namesParams
}
return(invfim)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/lme4_utilities.R
|
#' @name extractFIM.lme
#' @rdname extractFIM.lme
#'
#' @title Extract FIM
#'
#' @param m the model to extract the FIM from
#' @param struct the structure of the covariance matrix (either 'full', 'diag', or 'blockdiag)
extractFIM.lme <- function(m,struct){
nameGroup <- names(m$groups)
namesFE <- names(m$coefficients$fixed)
vc <- eval(parse(text=paste("m$modelStruct$reStruct$",nameGroup,sep="")))
namesRE <- attr(vc,"Dimnames")[[1]]
apVarNu <- m$apVar # covariance matrix of TRANSFORMED variance components
if (is.character(apVarNu)) stop(paste0("in nlme package for apVar in m1: ",apVarNu,". Try to re-run with fim='compute'"))
# variances are log-transformed in nu and covariances are logit-transformed (see Pinheiro and Bates)
meanNu <- attr(apVarNu,"Pars")
nre <- length(namesRE)
if (struct == "diag"){
formulaDeltaMethod <- paste0("~exp(x",1:(nre+1),")^2") # nb of random effects + residual
vecAsFor <- Vectorize(stats::as.formula,"object")
apVarTheta <- msm::deltamethod(lapply(1:length(formulaDeltaMethod),FUN = function(i){stats::as.formula(formulaDeltaMethod[i])}), mean=meanNu, cov=apVarNu, ses=F)
colnames(apVarTheta) <- rownames(apVarTheta) <- c(paste0("sd(",namesRE,")"),"residual")
}else if (struct == "full"){
# loop over columns of apVar -> first line is diagonal elements and the others are covariance param
formulaDeltaMethod <- c(paste0("~exp(x",1:nre,")^2"))
nameParams <- paste0("sd(",namesRE,")")
for (k in (2:nre)){
minInd <- (k-1)*nre - sum(0:max(0,k-2)) + 1
maxInd <- minInd + (nre - (k-1)) - 1
formulaDeltaMethod <- c(formulaDeltaMethod,paste0("~sqrt(exp(x",k-1,")*exp(x",k:nre,"))*(exp(x",minInd:maxInd,")-1)/(exp(x",minInd:maxInd,")+1)"))
nameParams <- c(nameParams,paste0("cor(",namesRE[k-1],",",namesRE[k:nre],")"))
}
formulaDeltaMethod <- c(formulaDeltaMethod,paste0("~exp(x",length(meanNu),")"))
nameParams <- c(nameParams,"residual")
vecAsFor <- Vectorize(stats::as.formula,"object")
apVarTheta <- msm::deltamethod(lapply(1:nrow(apVarNu),FUN = function(i){stats::as.formula(formulaDeltaMethod[i])}), mean=meanNu, cov=apVarNu, ses=F)
colnames(apVarTheta) <- rownames(apVarTheta) <- nameParams
}else{ # block diag
sizeBlocks <- attr(vc,"plen")
formulaDeltaMethod <- character()
nameParams <- character()
indb <- 0
for (b in 1:length(sizeBlocks)){
nre <- floor(sqrt(2*sizeBlocks[b]))
nameREinblock <- attr(vc[[b]],"Dimnames")[[1]]
formulaDeltaMethod <- c(formulaDeltaMethod,paste0("~exp(x",(indb + 1):(indb + nre),")^2"))
nameParams <- c(nameParams,paste0("sd(",nameREinblock,")"))
if (nre > 1){
for (k in (2:nre)){
minInd <- indb + (k-1)*nre - sum(0:max(0,k-2)) + 1
maxInd <- indb + minInd + (nre - (k-1)) - 1
formulaDeltaMethod <- c(formulaDeltaMethod,paste0("~sqrt(exp(x",k-1,")*exp(x",k:nre,"))*(exp(x",minInd:maxInd,")-1)/(exp(x",minInd:maxInd,")+1)"))
nameParams <- c(nameParams,paste0("cor(",nameREinblock[k-1],",",nameREinblock[k:nre],")"))
}
}
indb <- indb + length(formulaDeltaMethod)
}
formulaDeltaMethod <- c(formulaDeltaMethod,paste0("~exp(x",length(meanNu),")^2"))
nameParams <- c(nameParams,"residual")
vecAsFor <- Vectorize(stats::as.formula,"object")
apVarTheta <- msm::deltamethod(lapply(1:length(formulaDeltaMethod),FUN = function(i){stats::as.formula(formulaDeltaMethod[i])}), mean=meanNu, cov=apVarNu, ses=F)
}
invfim <- as.matrix(Matrix::bdiag(m$varFix,apVarTheta))
colnames(invfim) <- rownames(invfim) <- c(colnames(m$varFix),colnames(apVarTheta))
return(invfim)
}
#' @name extractStruct.lme
#' @rdname extractStruct.lme
#'
#' @title Extract model structure
#'
#' @param m1 the fit under H1
#' @param m0 the fit under H0
#' @param randm0 a boolean indicating whether random effects are present in m0
extractStruct.lme <- function(m1,m0,randm0){
# name of the random grouping variable
nameRE <- names(m1$groups)
# get the residual variance structure
varStructm1 <- formula(m1$modelStruct$varStruct)
varStructm0 <- formula(m0$modelStruct$varStruct)
if (length(varStructm0) > 0 & length(varStructm1) > 0){
if (varStructm0 != varStructm1){
stop("the residual variance model should be the same under both hypotheses")
}
}
# get the structure of the random effects in m0 and m1
if (randm0){
vc0 <- eval(parse(text=paste("m0$modelStruc$reStruct$",nameRE,sep="")))
}else{
vc0 <- NULL
}
vc1 <- eval(parse(text=paste("m1$modelStruc$reStruct$",nameRE,sep="")))
# Structure of the covariance matrix
if (randm0) covStruct0 <- class(vc0)[1]
covStruct1 <- class(vc1)[1]
# names of fixed and random effects
if (randm0){
namesRE0 <- colnames(m0$coefficients$random[[1]])
namesFE0 <- names(m0$coefficients$fixed)
}else{
namesRE0 <- NULL
if (inherits(m0,"lm")) namesFE0 <- names(stats::coefficients(m0))
if (inherits(m0,"nls")) namesFE0 <- names(m0$m$getPars())
}
namesRE1 <- colnames(m1$coefficients$random[[1]])
nameVarTested <- namesRE1[!(namesRE1%in%namesRE0)]
namesFE1 <- names(m1$coefficients$fixed)
nameFixedTested <- namesFE1[!(namesFE1%in%namesFE0)]
# Throwing errors for cases not covered by the package
#if ( !!!!!!! ) stop("Error: the current version of the package does not support more than 1 level of random effects")
if (!prod(namesFE0 %in% namesFE1)) stop("the models should be nested, but it seems that some fixed effects are in m0 but not in m1")
if (!prod(namesRE0 %in% namesRE1)) stop("the models should be nested, but it seems that some random effects are in m0 but not in m1")
# dimension of the parameters
nbFixEff0 <- length(namesFE0)
nbFixEff1 <- length(namesFE1)
nbRanEff0 <- length(namesRE0)
nbRanEff1 <- length(namesRE1)
nbRanEffTest <- nbRanEff1-nbRanEff0
# retrieve names of parameters to identify their order in the FIM and in the cone
#nbTotParam <- ncol(invfim)
# get the covariance parameters
Gamma1 <- extractVarCov(m1)
covNames1 <- NULL
covNames0 <- NULL
if (length(Gamma1)>1 & !Matrix::isDiagonal(Gamma1)){
lowDiag <- Gamma1
lowDiag[lower.tri(lowDiag,diag = T)] <- NA
posNonZeros <- which(lowDiag!=0,arr.ind = TRUE)
rowNonZeros <- posNonZeros[,"row"]
colNonZeros <- posNonZeros[,"col"]
covNames1 <- sapply(1:nrow(posNonZeros),FUN=function(i){paste0("cor(",namesRE1[rowNonZeros[i]],",",namesRE1[colNonZeros[i]],")")})
}
if (randm0) {
Gamma0 <- extractVarCov(m0)
if (length(Gamma0)>1 & !Matrix::isDiagonal(Gamma0)){
lowDiag <- Gamma0
lowDiag[lower.tri(lowDiag,diag = T)] <- NA
posNonZeros <- which(lowDiag!=0,arr.ind = TRUE)
rowNonZeros <- posNonZeros[,"row"]
colNonZeros <- posNonZeros[,"col"]
covNames0 <- sapply(1:nrow(posNonZeros),FUN=function(i){paste0("cor(",namesRE0[rowNonZeros[i]],",",namesRE0[colNonZeros[i]],")")})
}
}
if (!prod(covNames0%in%covNames1)) stop("the models should be nested but there are some covariances in m0 which are not in m1")
nameParams0 <- c(namesFE0,paste0("sd(",namesRE0,")"),covNames0)
nameParams1 <- c(namesFE1,paste0("sd(",namesRE1,")"),covNames1)
paramTested <- !(nameParams1 %in% nameParams0)
dd <- data.frame(names=nameParams1,tested=paramTested)
dd$type <- c(rep("beta",nbFixEff1),substr(dd$names[(nbFixEff1+1):nrow(dd)],1,2))
if (nrow(dd[dd$type=="co",])>0){
dd$var1 <- dd$var2 <- dd$covTested <- dd$covInBlock <- NA
ddco <- dd[dd$type=="co",]
for (i in 1:nrow(ddco)){
tmp<-gsub("cor","",ddco$names[i]);
tmp<-gsub("[(]","",tmp);
tmp<-gsub(")","",tmp);
tmp<-strsplit(tmp,",");
dd[dd$type=="co",][i,]$var1 <- tmp[[1]][1];
dd[dd$type=="co",][i,]$var2 <- tmp[[1]][2];
dd[dd$type=="co",][i,]$covTested <- dd[dd$type=="co",][i,]$tested*prod(!(unlist(tmp) %in% nameVarTested)) # identify covariances that are tested without testing the associate variances
dd[dd$type=="co",][i,]$covInBlock <- dd[dd$type=="co",][i,]$tested*prod((unlist(tmp) %in% nameVarTested)) # identify covariances that are tested as part of a block of the covariance matrix tested
}
dd$covInBlock[dd$type=="sd"] <- 1
}
# Throwing errors for cases not covered by the package
#if ( !!!!!!! ) stop("Error: the current version of the package does not support more than 1 level of random effects")
#if (nbFixEff1 != nbFixEff0) stop("Error: the current version of the package does not support simultaneously testing means and variances. Models should have the same fixed effects")
if (!prod(namesRE0 %in% namesRE1)) stop("the models should be nested, but it seems that some random effects are in m0 but not in m1")
# get the dimension of the residual variance
# it should be identical under H0 and H1
if(is.null(m1$modelStruct$corStruct)){
dimSigma=1
if (randm0 & !is.null(m0$modelStruct$corStruct)) stop("The same model should be used under H0 and H1 for the residual covariance matrix")
}else{
# if no random effects under H0 then the model is fitted with lm which only allow for residual of dimension 1
if (!randm0) stop("The same model should be used under H0 and H1 for the residual covariance matrix")
dimSigma=1+length(m1$modelStruct$corStruct) # to be refined
}
nbCov1 <- length(nameParams1) - nbFixEff1 - nbRanEff1# - dimSigma
nbCov0 <- length(nameParams0) - nbFixEff0 - nbRanEff0# - dimSigma
diag <- (covStruct1 == "pdDiag" || covStruct1 == "pdIdent" || !("co" %in% dd$type))
blockDiag <- (covStruct1 == "pdBlocked")
full <- !diag & !blockDiag #!!! compound symmetry !!!
struct <- c("diag","full","blockDiag")[c(diag,full,blockDiag)]
if (blockDiag){
dimBlock1 <- lengths(vc1) # gives the number of elements in each block
dimBlock0 <- lengths(vc0)
dd$block <- 0
for(i in 1:length(dimBlock1)){
nameREinBlock <- attr(vc1[[i]],"Dimnames")[[1]]
loc <- sapply(1:length(nameREinBlock),FUN = function(x){grep(nameREinBlock[x],dd$names)})
dd$block[loc] <- i
}
dd$block[dd$type=="beta"] <- 0
dd$diagBlock <- NA
dd$testInBlock <- 0
for (i in 1:length(dimBlock1)){
dd$diagBlock[dd$block==i] <- !("co" %in% dd$type[dd$block==i])
dd$testInBlock[dd$block==i] <- max(dd$tested[dd$block==i])
}
}
nameVarTested <- gsub("[(]","",nameVarTested)
nameVarTested <- gsub("[)]","",nameVarTested)
return(list(detailStruct=dd,
nameVarTested=nameVarTested,
nameFixedTested=nameFixedTested,
dims=list(nbFE1=nbFixEff1,nbFE0=nbFixEff0,nbRE1=nbRanEff1,nbRE0=nbRanEff0,nbCov1=nbCov1,nbCov0=nbCov0,dimSigma=dimSigma),
structGamma=struct))
}
#' @name extractVarCov.lme
#' @rdname extractVarCov.lme
#'
#' @title Extract covariance matrix
#'
#' @description Extract covariance matrix of the random effects for a model fitted with nlme.
#'
#' @param m a fit from nlme package (either linear or nonlinear)
#' @export extractVarCov.lme
#' @export
extractVarCov.lme <- function(m){
if (! (inherits(m,"nlme"))){
v <- nlme::getVarCov(m)
v <- matrix(as.numeric(v),ncol=ncol(v),nrow=nrow(v))
}else{
varcorr <- nlme::VarCorr(m)
coln <- colnames(varcorr)
if ("Corr" %in% coln){
stdev <- as.numeric(varcorr[-nrow(varcorr),2])
corrmat <- diag(1,length(stdev))
# NA appear during the conversion from characters to numbers, due to the fact that only the lower
# triangular of the correlation matrix is printed -> NA can be removed safely and correspond to blanks
corrmat[lower.tri(corrmat,diag=FALSE)] <- na.omit(as.numeric(varcorr[-c(1,nrow(varcorr)),-c(1:2)]))
corrmat[upper.tri(corrmat,diag=FALSE)] <- na.omit(as.numeric(varcorr[-c(1,nrow(varcorr)),-c(1:2)]))
v <- diag(stdev)%*%corrmat%*%diag(stdev)
colnames(v) <- rownames(varcorr)[1:length(stdev)]
rownames(v) <- rownames(varcorr)[1:length(stdev)]
}else{
v <- diag(as.numeric(varcorr[-nrow(varcorr),1]),nrow=nrow(varcorr)-1)
}
v <- matrix(as.numeric(v),ncol=ncol(v),nrow=nrow(v))
colnames(v) <- rownames(varcorr)[1:ncol(v)]
rownames(v) <- rownames(varcorr)[1:nrow(v)]
}
return(v)
}
#' @name bootinvFIM.lme
#' @rdname bootinvFIM.lme
#'
#' @title Compute the inverse of the Fisher Information Matrix using parametric bootstrap
#'
#' @param m the model under H1
#' @param B the bootstrap sample size
#' @param seed a seed for the random generator
#' @export bootinvFIM.lme
#' @export
bootinvFIM.lme <- function(m, B=1000, seed=0){
mySumm <- function(m,diagSigma=F) {
beta <- nlme::fixef(m)
resStd <- stats::sigma(m)
Sigma <- extractVarCov(m)
diagSigma = Matrix::isDiagonal(Sigma)
if(diagSigma){
theta <- c(beta=beta,Gamma=diag(Sigma),sigma=resStd)
}else{
theta <- c(beta=beta,Gamma=Sigma[lower.tri(Sigma,diag = T)],sigma=resStd)
}
return(theta)
}
nonlin <- inherits(m,"nlme")
if (!nonlin){
message(paste0("\t ...generating the B=",B," bootstrap samples ...\n"))
success <- F
set.seed(seed)
while(!success){
bootstrap <- try(lmeresampler::bootstrap(m, mySumm, B = B, type = "parametric"),silent=TRUE)
success <- !inherits(bootstrap,"try-error")
if (!success){
set.seed(seed)
bootstrap <- try(lmeresampler::bootstrap(m, mySumm, B = B, type = "residual"),silent=TRUE)
success <- !inherits(bootstrap,"try-error")
}
}
bootstrap <- bootstrap$replicates[, colSums(bootstrap$replicates != 0) > 0]
invfim <- cov(bootstrap)
## new version to avoid using archived package lmersampler
# paramBoot <- mySumm(m)
#
# message(paste0("\t ...generating the B=",B," bootstrap samples ...\n"))
# refits <- list()
#
# b <- 1
# tbar <- utils::txtProgressBar(min=1,max=B,char = ".", style = 3)
# while (b <= B){
# ystar <- nlmeU::simulateY(m, nsim = 1)
# row.names(ystar) <- 1:m$dims$N
# ystar <- data.frame(ystar)
#
# mod.fixd <- stats::as.formula(m$call$fixed)
# mod.rand <- m$call$random
# mod.data <- m$data
#
# mod.data[,as.character(mod.fixd[[2]])] <- unname(ystar)
# # create new lme
# if(is.null(mod.rand)){
# fitInd <- try(do.call("lme", args = list(fixed = mod.fixd, data = mod.data)))
# } else{
# mod.rand <- stats::as.formula(mod.rand)
# fitInd <- try(do.call("lme", args = list(fixed = mod.fixd, data = mod.data, random = mod.rand)))
# }
#
# if (!inherits(fitInd,"try-error")){
# refits[[b]] <- fitInd
# b <- b + 1
# }
# }
#
# bootstrap <- t(sapply(refits, FUN=function(fitInd){mySumm(fitInd)}))
# bootstrap <- bootstrap[, colSums(bootstrap != 0) > 0]
# invfim <- cov(bootstrap)
#
Gamma1 <- extractVarCov(m)
namesRE <- colnames(m$coefficients$random[[1]])
if (length(Gamma1)>1 & !Matrix::isDiagonal(Gamma1)){
lowDiag <- Gamma1
lowDiag[lower.tri(lowDiag,diag = F)] <- NA
posNonZeros <- which(lowDiag!=0,arr.ind = TRUE)
rowNonZeros <- posNonZeros[,"row"]
colNonZeros <- posNonZeros[,"col"]
covNames1 <- sapply(1:nrow(posNonZeros),FUN=function(i){
if (rowNonZeros[i]==colNonZeros[i]){
nme <- paste0("var(",namesRE[rowNonZeros[i]],")")
}else{
nme <- paste0("cov(",namesRE[rowNonZeros[i]],",",namesRE[colNonZeros[i]],")")
}
return(nme)
})
}else{
if (length(Gamma1) == 1){
covNames1 <- namesRE[1]
}else{
covNames1 <- paste0("var(",namesRE,")")
}
}
namesParams <- c(names(m$coefficients$fixed),covNames1,"sd_residual")
colnames(invfim) <- rownames(invfim) <- namesParams
}else{
beta <- nlme::fixef(m) # fixed effects
resStd <- stats::sigma(m)
Sigma <- extractVarCov(m)
nind <- nrow(m$coefficients$random[[1]]) # only one level of random effects
nrandEfft <- nrow(Sigma)
namesRE <- colnames(m$coefficients$random[[1]])
grpFactor <- names(m$groups)
diagSigma <- Matrix::isDiagonal(Sigma)
if (diagSigma){
namesParams <- c(names(m$coefficients$fixed),paste0("var(",namesRE,")"),"sd_residual")
}else{
Sigma2 <- Sigma
Sigma2[lower.tri(Sigma2)] <- NA
nonZeros <- which(Sigma2!=0, arr.ind = T)
namesCovParams <- character()
for (l in 1:nrow(nonZeros)){
if (nonZeros[l,1] == nonZeros[l,2]) namesCovParams <- c(namesCovParams,paste0("var(",colnames(Sigma)[nonZeros[l,1]],")"))
if (nonZeros[l,1] != nonZeros[l,2]) namesCovParams <- c(namesCovParams,paste0("cov(",colnames(Sigma)[nonZeros[l,1]],",",colnames(Sigma)[nonZeros[l,2]],")"))
}
namesParams <- c(names(m$coefficients$fixed),namesCovParams,"sd_residual")
}
message(paste0("\t ...generating the B=",B," bootstrap samples ...\n"))
thetaBoot <- numeric()
b <- 1
tbar <- utils::txtProgressBar(min=1,max=B,char = ".", style = 3)
grpVar <- m$groups[,1]
set.seed(seed)
while (b <= B){
utils::setTxtProgressBar(tbar,b)
phi <- t(chol(Sigma)%*%matrix(stats::rnorm(nrow(Sigma)*nind,0,1),ncol=nind))
betaAll <- as.data.frame(matrix(rep(beta,nind),nrow=nind,byrow = T))
betaAll <- cbind(betaAll,grp=unique(grpVar))
names(betaAll) <- c(names(beta),grpFactor)
pos <- names(betaAll)%in%namesRE # TRUE/FALSE to identify where are the random effects
c <- 1
## !!! pb de comparaison de facteurs qd grpVar n'est pas forcément numérique ... il faudrait "enlever" le type
nmeInd <- unique(grpVar)
for (i in 1:length(pos)){
if (pos[i]){
betaAll[,i] <- betaAll[,i] + phi[,c]#c(sapply(nmeInd, FUN = function(j){betaAll[grpVar==j,i] + phi[j,c]}))
c <- c+1
}
}
# get name of response variable
responseVar <- unlist(strsplit(as.character(nlme::getResponseFormula(m)),"~"))[[2]]
# get name of internal nonlinear function
nlmod <- unlist(strsplit(as.character(nlme::getCovariateFormula(m))[2],"[(]"))[1]
# get names of all variables, and identify the covariables as the complement of response variable and grouping factor
data.m <- nlme::getData(m)
namesAllVar <- names(data.m)
namesCov <- namesAllVar[! (namesAllVar %in% c(responseVar,grpFactor))]
# build the part of the formula with the covariables, to be found in m@frame
modAndCov <- paste(paste0("data.m$",namesCov),collapse=",")
# build the part of the formula with the parameters
posParamInBeta <- (1:ncol(betaAll))[names(betaAll) %in% names(beta)]
modAndParam <- paste(paste0("betaAll[,",posParamInBeta,"]"),collapse=",")
namesFE <- names(m$coefficients$fixed)
simuResp <- lapply(1:nrow(betaAll),FUN = function(i){
eval(parse(text=paste0(namesFE,"=",betaAll[i,posParamInBeta],sep=";")))
with(data.m[eval(parse(text=paste0("data.m$",grpFactor,"==nmeInd[i]"))),],
eval(parse(text=as.character(getCovariateFormula(m))[2])))
})
simuResp <- do.call("c",simuResp)
data.m[,responseVar] <- simuResp + stats::rnorm(nrow(betaAll),0,resStd)
#############################
fitInd <- suppressWarnings(try({setTimeLimit(10)
stats::update(m, data=data.m)},silent=TRUE))
if (!inherits(fitInd,"try-error")){
thetaHatBoot <- mySumm(fitInd,diagSigma)
names(thetaHatBoot) <- namesParams
thetaBoot <- rbind(thetaBoot,thetaHatBoot)
b <- b + 1
}
}
invfim <- cov(thetaBoot) # empirical covariance matrix of the bootstrap samples as an estimator of the inverse of the FIM
colnames(invfim) <- namesParams
rownames(invfim) <- namesParams
}
return(invfim)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/lme_utilities.R
|
# methods for 'vctest' objects
#' @title Extract the Fisher Information Matrix
#' @param object an object of class vctest
#' @export fim.vctest
fim.vctest <- function(object) {
object$parameters$FIM
}
#' @title Print
#' @param x an object of class vctest
#' @param ... additional arguments
#' @export print.vctest
#' @export
print.vctest <- function(x, ...)
{
cat("Variance components testing in mixed effects models\n")
cat("Testing that:\n",paste(paste(names(x$null.value),"is equal to 0"),collapse="\n "))
cat("\nagainst the alternative that:\n",paste(x$alternative,collapse="\n "))
lrt <- x$statistic
pval_sample <- !is.na(x$p.value["pvalue.sample"])
pval_weights <- !is.na(x$p.value["pvalue.weights"])
cat("\n\n","Likelihood ratio test statistic:\n\tLRT = ",format(lrt,5))
if (pval_weights | pval_sample){
if (max(x$parameters$sdweights) == 0){
cat("\n\n","exact p-value:",x$p.value["pvalue.weights"])
}else{
if (pval_weights) cat("\n\n","p-value from estimated weights:",x$p.value["pvalue.weights"])
if (pval_sample) cat("\n","p-value from random sample:",x$p.value["pvalue.sample"])
}
}else{
if (x$p.value["pvalue.lowerbound"] != x$p.value["pvalue.upperbound"]){
cat("\n\n","bounds on p-value: lower ",format(x$p.value["pvalue.lowerbound"],5),
"upper ",format(x$p.value["pvalue.upperbound"],5))
if (is.na(x$parameters$FIM)) message("\n\nBounds based on the smallest and biggest degrees of freedom of the chi-bar-square distribution components. Re-run with option 'pval.comp=\"both\" or pval.comp=\"comp\" to approximate the weights of each chi-bar-square component and the p-value.")
}else{
cat("\n\n","exact p-value: ",format(x$p.value["pvalue.lowerbound"],5))
}
}
cat("\n")
invisible(x)
}
#' @title Summary
#' @param object an object of class vctest
#' @param ... additional arguments
#' @export summary.vctest
#' @export
summary.vctest <- function(object, ...)
{
cat("Variance components testing in mixed effects models\n")
cat("Testing that:\n",paste(paste(names(object$null.value),"is equal to 0"),collapse="\n "))
cat("\nagainst the alternative that:\n",paste(object$alternative,collapse="\n "))
lrt <- object$statistic
w <- object$parameters$weights
df <- object$parameters$df
sdw <- object$parameters$sdweights
pval_sample <- !is.na(object$p.value["pvalue.sample"])
pval_weights <- !is.na(object$p.value["pvalue.weights"])
cat("\n\n","Likelihood ratio test statistic:\n\tLRT = ",format(lrt,5))
cat("\n\n","Limiting distribution:")
if (length(df) > 1){
cat("\n\tmixture of",length(df),"chi-bar-square distributions with degrees of freedom",df)
if (!prod(is.na(w))){
if (max(sdw) > 0) cat("\n\tassociated weights (and sd):",paste0(format(w,5)," (",sdw,")"))
if (max(sdw) == 0) cat("\n\tassociated (exact) weights:",paste0(format(w,5)))
}
cat("\n\n","p-value of the test:")
if (pval_weights){
if (max(sdw) == 0){
cat("\n\tfrom exact weights:",object$p.value["pvalue.weights"])
}else{
cat("\n\tfrom estimated weights:",object$p.value["pvalue.weights"])
}
}
if (pval_sample) cat("\n\tfrom random sample:",object$p.value["pvalue.sample"])
if (object$p.value["pvalue.lowerbound"] != object$p.value["pvalue.upperbound"]){
cat("\n\tbounds on p-value: lower ",format(object$p.value["pvalue.lowerbound"],5),
"upper ",format(object$p.value["pvalue.upperbound"],5))
message("\n\nBounds based on the smallest and biggest degrees of freedom of the chi-bar-square distribution components. Re-run with option 'pval.comp=\"both\" or pval.comp=\"comp\" to approximate the weights of each chi-bar-square component and the p-value.")
}
}else{
cat("chi-bar-square distribution with ",df," degree of freedom\n")
cat("p-value: ",format(object$p.value["pvalue.weights"],5))
}
cat("\n\n")
invisible(object)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/methods.R
|
#' Extract package name from a fitted mixed-effects model
#'
#' @param m a model with random effects fitted with \code{nlme}, \code{lme4} or \code{saemix}
#' @return a string giving the name of the package
pckName <- function(m){
if (inherits(m,"lme")) {
pkg <- "nlme"
}else if(inherits(m,c("lmerMod","glmerMod","nlmerMod","lmerModLmerTest"))){
pkg <- "lme4"
}else if(inherits(m,"SaemixObject")){
pkg <- "saemix"
}else{
pkg <- class(m)
}
return(pkg)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/pkgName.R
|
#' @name extractStruct.SaemixObject
#' @rdname extractStruct.SaemixObject
#'
#' @title Extract model structure
#'
#' @param m1 the fit under H1
#' @param m0 the fit under H0
#' @param randm0 a boolean indicating whether random effects are present in m0
extractStruct.SaemixObject <- function(m1,m0,randm0){
# dimension of the parameters
nbFixEff0 <- sum(m0@[email protected]>0)
nbFixEff1 <- sum(m1@[email protected]>0)
# names of fixed and random effects
if (randm0){
namesRE0 <- m0@[email protected]
namesFE0 <- m0@[email protected][m0@[email protected]>0]
}else{
namesRE0 <- NULL
if (inherits(m0,c("lm","glm"))) namesFE0 <- names(stats::coefficients(m0))
if (inherits(m0,"nls")) namesFE0 <- names(m0$m$getPars())
}
namesRE1 <- m1@[email protected]
namesFE1 <- m1@[email protected][m1@[email protected]>0]
nameFixedTested <- namesFE1[!(namesFE1%in%namesFE0)]
nameVarTested <- namesRE1[!(namesRE1%in%namesRE0)]
#nameVarTested <- gsub("omega2.","",nameVarTested)
nbRanEff0 <- length(namesRE0)
nbRanEff1 <- length(namesRE1)
nbRanEffTest <- nbRanEff1-nbRanEff0
if (!prod(namesFE0 %in% namesFE1)) stop("the models should be nested, but it seems that some fixed effects are in <m0> but not in <m1>")
if (!prod(namesRE0 %in% namesRE1)) stop("the models should be nested, but it seems that some random effects are in <m0> but not in <m1>")
covStruct1 <- m1@[email protected]
if (randm0) covStruct0 <- m0@[email protected]
diag <- (sum(covStruct1)==sum(diag(covStruct1)))
full <- (min(covStruct1)==1)
blockDiag <- !diag & !full
struct <- c("diag","full","blockDiag")[c(diag,full,blockDiag)]
nbCompVar1 <- sum(m1@[email protected])
nbCompVar0 <- sum(m0@[email protected])
nbCov1 <- (nbCompVar1 - nbRanEff1)/2
nbCov0 <- (nbCompVar0 - nbRanEff0)/2
# dimension of residual error
if (m1@[email protected] == "combined"){
dimSigma <- 2
}else{
dimSigma <- 1
}
if(nbRanEff0==nbRanEff1) stop("there are the same number of random effects in models m0 and m1. Please check the models' formulation.")
if(randm0) if (min(covStruct1-covStruct0) < 0) stop("the models should be nested, but it seems that some random effects are in m0 but not in m1")
# create a dataset with the list of parameters and: their tyoe (fixed, variance or correlation)
# whether they are tested equal to 0 or not, and if they are tested, if it's as a subpart of a block
# or in a block which is fully tested
dd <- expand.grid(rownames(covStruct1),rownames(covStruct1))
names(dd) <- c("var1","var2")
dd$names <- paste(dd[,1],dd[,2],sep=".")
dd$lowertri <- as.vector(lower.tri(covStruct1,diag = T))
dd$zero1 <- as.vector(covStruct1==0)
dd$diag <- as.vector(lower.tri(covStruct1,diag = T)) - as.vector(lower.tri(covStruct1,diag = F))
dd$type <- ifelse(dd$diag==1,"sd","co")
if (randm0){
dd$tested <- as.vector(covStruct0==0)
}else{
dd$tested <- TRUE
}
dd <- dd[dd$lowertri & !dd$zero1,]
dd$zero1 <- dd$lowertri <- dd$diag <- NULL
dd <- rbind(data.frame(names=namesFE1,var1=rep(NA,length(namesFE1)),var2=rep(NA,length(namesFE1)),type=rep("beta",length(namesFE1)),tested=rep(FALSE,length(namesFE1))),dd)
ddco <- dd[dd$type=="co",]
if (nrow(ddco)>0){
dd$covTested <- dd$covInBlock <- NA
for (i in 1:nrow(ddco)){
dd[dd$type=="co",][i,]$covTested <- dd[dd$type=="co",][i,]$tested*(!(dd$var1[dd$type=="co"][i] %in% nameVarTested) & !(dd$var2[dd$type=="co"][i] %in% nameVarTested))
dd[dd$type=="co",][i,]$covInBlock <- dd[dd$type=="co",][i,]$tested*(dd$var1[dd$type=="co"][i] %in% nameVarTested) & (dd$var2[dd$type=="co"][i] %in% nameVarTested) # identify covariances that are tested as part of a block of the covariance matrix tested
}
dd$covInBlock[dd$type=="sd"] <- 1
}else{
dd$covInBlock[dd$type=="beta"] <- NA
dd$covInBlock[dd$type!="beta"] <- 1
}
if (blockDiag){
# identify block structure using spectral clustering
D <- apply(covStruct1,1,sum)
L <- diag(nrow(covStruct1)) - diag(1/sqrt(D))%*% covStruct1 %*% diag(1/sqrt(D))
ev <- eigen(L,symmetric=T)$values
k <- length(ev[ev<1e-10])
blocks <- anocva::spectralClustering(covStruct1,k)
dd$block <- 0
dd$block[dd$type=="sd"] <- blocks
ddco <- dd[dd$type=="co",]
for (i in 1:k){
nameREinBlock <- dd$var1[dd$block==i]
loc <- sapply(1:length(nameREinBlock),FUN = function(x){grep(nameREinBlock[x],dd$names)})
dd$block[unlist(loc)] <- i
}
dd$block[dd$type=="beta"] <- 0
}
rownames(dd) <- 1:nrow(dd) # reinitialize rownames (used to be indices of elements in the covariance matrix)
if (full){
dd$block <- 0
dd$block[dd$tested] <- 1
}
if (diag){
dd$block <- 1:nrow(dd)
dd$block[!dd$tested] <- 0
}
return(list(detailStruct=dd,
nameVarTested=nameVarTested,
nameFixedTested=nameFixedTested,
dims=list(nbFE1=nbFixEff1,nbFE0=nbFixEff0,nbRE1=nbRanEff1,nbRE0=nbRanEff0,nbCov1=nbCov1,nbCov0=nbCov0,dimSigma=dimSigma),
structGamma=struct))
}
#' @name bootinvFIM.SaemixObject
#' @rdname bootinvFIM.SaemixObject
#'
#' @title Compute the inverse of the Fisher Information Matrix using parametric bootstrap
#'
#' @param m the model under H1
#' @param B the bootstrap sample size
#' @param seed a seed for the random generator
#' @export bootinvFIM.SaemixObject
#' @export
bootinvFIM.SaemixObject <- function(m, B=1000, seed=0){
simul <- saemix::simul.saemix(m,nsim=B)
m.data <- m@data
options <- list(save=FALSE,save.graphs=FALSE,ll.is=FALSE,save=FALSE,ll.qg=FALSE,print=FALSE,warnings=FALSE,displayProgress=FALSE)
diagSigma <- Matrix::isDiagonal(m@results@omega)
fit.saemix <- lapply(1:B, FUN = function(i){
data_i <- [email protected]@datasim[[email protected]@datasim$irep==i,]
m.data@data[,[email protected]] <- data_i$ysim
log <- utils::capture.output({
res <- saemix::saemix(m@model,m.data,options)})
if (diagSigma){
theta <- c(res@results@betas,diag(res@results@omega),res@results@respar[res@results@respar>0])
}else{
theta <- c(res@results@betas,res@results@omega[lower.tri(res@results@omega,diag=T)],res@results@respar[res@results@respar>0])
}
return(theta)
})
theta <- do.call(rbind,fit.saemix)
invfim <- cov(theta)
colnames(invfim) <- rownames(invfim) <- c(m@[email protected],m@[email protected],m@[email protected][m@results@respar>0])
return(invfim)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/saemix_utilities.R
|
#' Useful intern functions
#'
#' @title null.desc
#' @description create null.value description
#' @param msdata a list containing the structure of the model and data, as an output from
#' \code{extractStruct.<package_name>} functions
null.desc <- function(msdata){
null.value <- rep(0,1+length(msdata$nameFixedTested))
if (length(msdata$nameFixedTested)==0){
if (length(msdata$nameVarTested)==1){
names(null.value) <- paste("variance of the random effect associated to",msdata$nameVarTested)
}else{
names(null.value) <- paste("covariance matrix of",paste(msdata$nameVarTested,collapse = " and "))
}
}else{
names(null.value) <- c(paste("mean of the random effect associated to",paste(msdata$nameFixedTested,collapse = " and ")),
paste(ifelse(length(msdata$nameVarTested)==1,"variance of ","covariance matrix of "),paste(msdata$nameVarTested,collapse = " and ")))
}
return(null.value)
}
#' @title alt.desc
#' @description create alternative description
#' @param msdata a list containing the structure of the model and data, as an output from
#' \code{extractStruct.<package_name>} functions
alt.desc <- function(msdata){
if (length(msdata$nameFixedTested)==0){
if (length(msdata$nameVarTested)==1){
alternative=c(paste("variance of the random effect associated to",msdata$nameVarTested,"> 0 "))
}else{
alternative <- paste("covariance matrix of",paste(msdata$nameVarTested,collapse = " and "),"> 0 ")
}
}else{
alternative <- c(paste("mean of the random effect associated to",paste(msdata$nameFixedTested,collapse = " is different from 0 and ")),
paste(" and ",ifelse(length(msdata$nameVarTested)==1,"variance of ","covariance matrix of "),paste(msdata$nameVarTested,collapse = " and "),"> 0 "))
}
return(alternative)
}
#' @title print.desc.message
#' @description print a message to indicate the null and alternative hypotheses
#' @param msdata a list containing the structure of the model and data, as an output from
#' \code{extractStruct.<package_name>} functions
print.desc.message <- function(msdata){
if (length(msdata$nameFixedTested)==0){
if (length(msdata$nameVarTested)==1){
message(paste("Testing that the variance of the random effect associated to",msdata$nameVarTested,"is equal to 0"))
}else if (length(msdata$nameVarTested) > 1){
message(paste("Testing that the covariance matrix of",paste(msdata$nameVarTested,sep="",collapse = " and "),"is equal to 0\n"))
}else{
covTested <- msdata$detailStruct[msdata$detailStruct$tested,]
namesToPrint <- paste(covTested$var1," and ",covTested$var2)
message("Testing that covariances between the random effects ",paste(namesToPrint,collapse=", "),
ifelse(length(namesToPrint)==1," is"," are")," equal to 0")
}
}else if (length(msdata$nameFixedTested)==1){
message("Testing that the mean of the random effect associated to ",msdata$nameFixedTested," is equal to 0 and that")
if (length(msdata$nameVarTested)==1){
message(paste(" the variance of the random effect associated to",msdata$nameVarTested,"is equal to 0"))
}else if (length(msdata$nameVarTested) > 1){
message(paste("the covariance matrix of",paste(msdata$nameVarTested,sep="",collapse = " and "),"is equal to 0\n"))
}else{
covTested <- msdata$detailStruct[msdata$detailStruct$tested,]
namesToPrint <- paste(covTested$var1," and ",covTested$var2)
message("Testing that covariances between the random effects ",paste(namesToPrint,collapse=", "),
ifelse(length(namesToPrint)==1," is"," are")," equal to 0")
}
}else{
message("Testing that the means of the random effects associated to ",paste(msdata$nameFixedTested,sep="",collapse = " and ")," are equal to 0 and that")
if (length(msdata$nameVarTested)==1){
message(paste(" the variance of the random effect associated to",msdata$nameVarTested,"is equal to 0"))
}else if (length(msdata$nameVarTested) > 1){
message(paste("the covariance matrix of",paste(msdata$nameVarTested,sep="",collapse = " and "),"is equal to 0\n"))
}else{
covTested <- msdata$detailStruct[msdata$detailStruct$tested,]
namesToPrint <- paste(covTested$var1," and ",covTested$var2)
message("Testing that covariances between the random effects ",paste(namesToPrint,collapse=", "),
ifelse(length(namesToPrint)==1," is"," are")," equal to 0")
}
}
}
#' @title print.res.message
#' @description print a message with the results
#' @param results an object of class vctest
print.res.message <- function(results){
pval_sample <- !is.na(results$p.value["pvalue.sample"])
pval_weights <- !is.na(results$p.value["pvalue.weights"])
message("Likelihood ratio test statistic:\n\tLRT = ",format(results$statistic,5))
if (pval_weights | pval_sample){
if (results$p.value["pvalue.lowerbound"] == results$p.value["pvalue.upperbound"]){
message("\np-value from exact weights: ",format(results$p.value["pvalue.weights"],5))
}else{
if (pval_weights) message("\np-value from estimated weights: ",format(results$p.value["pvalue.weights"],5))
if (pval_sample) message("\np-value from random sample: ",format(results$p.value["pvalue.sample"],5))
message("bounds on p-value: lower ",format(results$p.value["pvalue.lowerbound"],5),
"\tupper ",format(results$p.value["pvalue.upperbound"],5))
}
}else{
if (results$p.value["pvalue.lowerbound"] != results$p.value["pvalue.upperbound"]){
message("bounds on p-value: lower ",format(results$p.value["pvalue.lowerbound"],5),
"\tupper ",format(results$p.value["pvalue.upperbound"],5))
}else{
message("exact p-value: ",format(results$p.value["pvalue.lowerbound"],5))
}
}
message("\n")
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/utils.R
|
#' @rdname varCompTest
#' @importFrom stats formula pchisq
#' @rawNamespace export(varCompTest.lme)
#' @export
varCompTest.lme <- function(m1,m0,control = list(M=5000,parallel=FALSE,nb_cores=1,B=1000),pval.comp = "bounds",fim = "extract", output=TRUE){
# Specify default arguments in control
if (!is.null(control)) {
optionNames <- names(control)
if (!"M" %in% optionNames) control$M=5000
if (!"parallel" %in% optionNames) control$parallel=FALSE
if (!"nbcores" %in% optionNames) control$nbcores=1
if (!"B" %in% optionNames) control$B = 1000
}
if (output) message("Variance components testing in mixed effects models")
randm0 <- !(max(class(m0) %in% c("lm","glm","nls"))) # are there any random effect under H0?
# Extract data structure
msdata <- extractStruct(m1,m0,randm0)
# Print message
if (output) print.desc.message(msdata)
# Compute LRT
ll1 <- m1$logLik
ll0 <- ifelse(randm0,m0$logLik,stats::logLik(m0))
lrt <- -2*(ll0 - ll1)
# Degrees of freedom of the chi-square components
cbs.df.dims <- dfChiBarSquare(msdata)
# FIM to compute the weights
if (pval.comp != "bounds" & (length(cbs.df.dims$df) > 2)){
if (fim == "extract"){
invfim <- extractFIM.lme(m1,msdata$structGamma) # error message in case apVar is non positive definite
}else if (fim == "compute"){
if (output) message("Computing Fisher Information Matrix by bootstrap...\n")
invfim <- bootinvFIM(m1, control$B)
}else if (is.matrix(fim)){
invfim <- chol2inv(fim)
}else{
stop("Unknown option for fim. Please use fim='extract' or fim='compute'")
}
# re-order FIM to match the definition of the convex cone in the chi-bar-square computation
if (msdata$struct %in% c("diag","full")){
neworder <- as.numeric(rownames(msdata$detailStruct[order(msdata$detailStruct$tested,msdata$detailStruct$names),]))
invfim <- invfim[c(neworder,nrow(invfim)-msdata$dims$dimSigma+1),c(neworder,nrow(invfim)-msdata$dims$dimSigma+1)]
}else{
dd <- msdata$detailStruct[order(msdata$detailStruct$tested,msdata$detailStruct$block,msdata$detailStruct$covInBlock),]
neworder <- as.numeric(rownames(dd))
invfim <- invfim[c(neworder,nrow(invfim)-msdata$dims$dimSigma+1),c(neworder,nrow(invfim)-msdata$dims$dimSigma+1)]
}
fim <- chol2inv(chol(invfim))
}else{
invfim <- fim <- NA
}
# Compute chi-bar-square weights and p-value
if (length(cbs.df.dims$df)>1){
if (pval.comp %in% c("approx","both")){
cbs.weights.sample <- weightsChiBarSquare(df=cbs.df.dims$df,
V=invfim,
dimsCone=cbs.df.dims$dimsCone,
orthan=(msdata$structGamma == "diag"),
control=control)
pvalue1 <- sum(cbs.weights.sample$weights * stats::pchisq(lrt,df=cbs.df.dims$df,lower.tail = F)) # p-value from weights
pvalue2 <- mean(cbs.weights.sample$randomCBS >= lrt) # p-value from random sample
if (min(cbs.weights.sample$weights)<0) warning("\nSome weights were estimated to be negative. Results can be improved by increasing the sampling size M.\n")
}else{
pvalue1 <- NA
pvalue2 <- NA
cbs.weights.sample <- list(weights=NA,sdWeights=NA,randomCBS=NA)
}
if (length(cbs.df.dims$df)==2){
cbs.weights.sample <- list(weights=c(0.5,0.5),sdWeights=c(0,0),randomCBS=NA)
pvalue1 <- sum(cbs.weights.sample$weights * stats::pchisq(lrt,df=cbs.df.dims$df,lower.tail = F)) # p-value from weights
pvalue2 <- NA
}
}else{
pvalue1 <- stats::pchisq(lrt,cbs.df.dims$df[1],lower.tail = F)
pvalue2 <- NA
cbs.weights.sample <- list(weights=1,sdWeights=0,randomCBS=NA)
}
# Bounds on p-value
uppboundpval <- (1/2)*sum(stats::pchisq(lrt,cbs.df.dims$df[(length(cbs.df.dims$df)-1):length(cbs.df.dims$df)],lower.tail = F))
lowboundpval <- (1/2)*sum(stats::pchisq(lrt,cbs.df.dims$df[1:2],lower.tail = F))
# create results, object of class htest
null.value <- null.desc(msdata)
alternative <- alt.desc(msdata)
results <- list(statistic=c(LRT=lrt),
null.value=null.value,
alternative=alternative,
parameters=list(df=cbs.df.dims$df,weights=cbs.weights.sample$weights,sdweights=cbs.weights.sample$sdWeights,FIM=fim),
method="Likelihood ratio test for variance components in mixed effects models",
p.value=c(pvalue.weights=pvalue1,pvalue.sample=pvalue2,pvalue.lowerbound=lowboundpval,pvalue.upperbound=uppboundpval))
class(results) <- c("vctest","htest")
if (output) print.res.message(results)
invisible(results)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/varCompTest.lme.R
|
#' @rdname varCompTest
#' @importFrom stats formula pchisq
#' @rawNamespace export(varCompTest.merMod)
#' @export
varCompTest.merMod <- function(m1,m0,control = list(M=5000,parallel=FALSE,nb_cores=1,B=1000),pval.comp = "bounds",fim = "extract", output=TRUE){
# Specify default arguments in control
if (!is.null(control)) {
optionNames <- names(control)
if (!"M" %in% optionNames) control$M=5000
if (!"parallel" %in% optionNames) control$parallel=FALSE
if (!"nbcores" %in% optionNames) control$nbcores=1
if (!"B" %in% optionNames) control$B = 1000
}
if (output) message("Variance components testing in mixed effects models")
# Identify the packages from which m0 and m1 come from
randm0 <- !inherits(m0,c("lm","glm","nls")) # are there any random effect under H0?
# Extract data structure
msdata <- extractStruct(m1,m0,randm0)
# Print message
if (output) print.desc.message(msdata)
# Compute LRT
lrt <- -2*(stats::logLik(m0) - stats::logLik(m1))
# Degrees of freedom of the chi-square components
cbs.df.dims <- dfChiBarSquare(msdata)
# FIM to compute the weights
if (pval.comp != "bounds" & (length(cbs.df.dims$df) > 2)){
if (fim == "extract"){
if (inherits(m1,"nlmerMod")) stop("Fisher information matrix is not available for nonlinear mixed effect models fitted with nlmer() of package lme4. Please use nlme or saemix packages, or option fim='compute'.")
if (inherits(m1,"lmerMod")){
invfim <- as.matrix(merDeriv::vcov.lmerMod(m1,full=T))
}else{
invfim <- as.matrix(merDeriv::vcov.glmerMod(m1,full=T))
}
}else if (fim == "compute"){
if (output) message("Computing Fisher Information Matrix by bootstrap...")
invfim <- bootinvFIM(m1, control$B)
}else if (is.matrix(fim)){
invfim <- chol2inv(fim)
}else{
stop("Unknown option for fim. Please use fim='extract' or fim='compute'")
}
# re-order FIM
if (msdata$struct %in% c("diag","full")){
# reorder FIM so that the tested variances are at the end, before the residual covariance structure
# neworder is a vector with ordered indices from 1 to total nb of parameters - 1 (residual not accounted for)
neworder <- as.numeric(rownames(msdata$detailStruct[order(msdata$detailStruct$tested,msdata$detailStruct$names),]))
invfim <- invfim[c(neworder,nrow(invfim)-msdata$dims$dimSigma+1),c(neworder,nrow(invfim)-msdata$dims$dimSigma+1)]
}else{
# get indices for the re-ordered parameters : first the fixed effects, then the variances and covariances which are NOT tested,
# then the covariances tested that are not part of a tested block of sub-matrix, and then the blocks of subsets of variances tested
dd <- msdata$detailStruct[order(msdata$detailStruct$tested,msdata$detailStruct$block,msdata$detailStruct$covInBlock),]
neworder <- as.numeric(rownames(dd))
invfim <- invfim[c(neworder,nrow(invfim)-msdata$dims$dimSigma+1),c(neworder,nrow(invfim)-msdata$dims$dimSigma+1)]
}
fim <- chol2inv(chol(invfim))
}else{
invfim <- fim <- NA
}
# Compute chi-bar-square weights and p-value
if (length(cbs.df.dims$df)>1){
if (pval.comp %in% c("approx","both")){
cbs.weights.sample <- weightsChiBarSquare(df=cbs.df.dims$df,
V=invfim,
dimsCone=cbs.df.dims$dimsCone,
orthan=(msdata$structGamma == "diag"),
control=control)
pvalue1 <- sum(cbs.weights.sample$weights * stats::pchisq(lrt,df=cbs.df.dims$df,lower.tail = F)) # p-value from weights
pvalue2 <- mean(cbs.weights.sample$randomCBS >= lrt) # p-value from random sample
if (min(cbs.weights.sample$weights)<0) warning("Some weights were estimated to be negative. Results can be improved by increasing the sampling size M.")
}else{
pvalue1 <- NA
pvalue2 <- NA
cbs.weights.sample <- list(weights=NA,sdWeights=NA,randomCBS=NA)
}
if (length(cbs.df.dims$df)==2){
cbs.weights.sample <- list(weights=c(0.5,0.5),sdWeights=c(0,0),randomCBS=NA)
pvalue1 <- sum(cbs.weights.sample$weights * stats::pchisq(lrt,df=cbs.df.dims$df,lower.tail = F)) # p-value from weights
pvalue2 <- NA
}
}else{
pvalue1 <- stats::pchisq(lrt,cbs.df.dims$df[1],lower.tail = F)
pvalue2 <- NA
cbs.weights.sample <- list(weights=1,sdWeights=0,randomCBS=NA)
}
# Bounds on p-value
if (length(cbs.df.dims$df) > 1){
uppboundpval <- (1/2)*sum(stats::pchisq(lrt,cbs.df.dims$df[(length(cbs.df.dims$df)-1):length(cbs.df.dims$df)],lower.tail = F))
lowboundpval <- (1/2)*sum(stats::pchisq(lrt,cbs.df.dims$df[1:2],lower.tail = F))
}else{
uppboundpval <- lowboundpval <- stats::pchisq(lrt,cbs.df.dims$df,lower.tail = F)
}
# create results, object of class htest
null.value <- null.desc(msdata)
alternative <- alt.desc(msdata)
results <- list(statistic=c(LRT=lrt),
null.value=null.value,
alternative=alternative,
parameters=list(df=cbs.df.dims$df,weights=cbs.weights.sample$weights,sdweights=cbs.weights.sample$sdWeights,FIM=fim),
method="Likelihood ratio test for variance components in mixed effects models",
p.value=c(pvalue.weights=pvalue1,pvalue.sample=pvalue2,pvalue.lowerbound=lowboundpval,pvalue.upperbound=uppboundpval))
class(results) <- c("vctest","htest")
if (output) print.res.message(results)
invisible(results)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/varCompTest.merMod.R
|
#' @rdname varCompTest
#' @importFrom stats formula pchisq
#' @rawNamespace export(varCompTest.SaemixObject)
#' @export
varCompTest.SaemixObject <- function(m1,m0,control = list(M=5000,parallel=FALSE,nb_cores=1,B=1000),pval.comp = "bounds",fim = "extract", output=TRUE){
# Specify default arguments in control
if (!is.null(control)) {
optionNames <- names(control)
if (!"M" %in% optionNames) control$M=5000
if (!"parallel" %in% optionNames) control$parallel=FALSE
if (!"nbcores" %in% optionNames) control$nbcores=1
if (!"B" %in% optionNames) control$B = 1000
}
if (output) message("Variance components testing in mixed effects models")
# Identify the packages from which m0 and m1 come from
randm0 <- !(class(m0) %in% c("lm","glm","nls")) # are there any random effect under H0?
# Extract data structure
msdata <- extractStruct(m1,m0,randm0)
# Print message
if (output) print.desc.message(msdata)
# Compute LRT
lrt <- -2*(saemix::logLik.SaemixObject(m0) - saemix::logLik.SaemixObject(m1))
# Degrees of freedom of the chi-square components
cbs.df.dims <- dfChiBarSquare(msdata)
# FIM to compute the weights
if (pval.comp != "bounds" & (length(cbs.df.dims$df) > 2)){
if (fim == "extract"){
invfim <- chol2inv(chol(m1@results@fim))
}else if (fim == "compute"){
invfim <- bootinvFIM(m1,control$B)
}else if (is.matrix(fim)){
invfim <- chol2inv(fim)
}else{
stop("Unknown option for fim. Please use fim='extract' or fim='compute'")
}
# re-order FIM
if (msdata$struct %in% c("diag","full")){
neworder <- as.numeric(rownames(msdata$detailStruct[order(msdata$detailStruct$tested,msdata$detailStruct$names),]))
invfim <- invfim[c(neworder,nrow(invfim)-msdata$dims$dimSigma+1),c(neworder,nrow(invfim)-msdata$dims$dimSigma+1)]
}else{
dd <- msdata$detailStruct[order(msdata$detailStruct$tested,msdata$detailStruct$block,msdata$detailStruct$covInBlock),]
neworder <- as.numeric(rownames(dd))
invfim <- invfim[c(neworder,nrow(invfim)-msdata$dims$dimSigma+1),c(neworder,nrow(invfim)-msdata$dims$dimSigma+1)]
}
fim <- chol2inv(chol(invfim))
}else{
invfim <- fim <- NA
}
# Compute chi-bar-square weights and p-value
if (length(cbs.df.dims$df)>1){
if (pval.comp %in% c("approx","both")){
cbs.weights.sample <- weightsChiBarSquare(df=cbs.df.dims$df,
V=invfim,
dimsCone=cbs.df.dims$dimsCone,
orthan=(msdata$structGamma == "diag"),
control=control)
pvalue1 <- sum(cbs.weights.sample$weights * stats::pchisq(lrt,df=cbs.df.dims$df,lower.tail = F)) # p-value from weights
pvalue2 <- mean(cbs.weights.sample$randomCBS >= lrt) # p-value from random sample
if (min(cbs.weights.sample$weights)<0) warning("\nSome weights were estimated to be negative. Results can be improved by increasing the sampling size M.\n")
}else{
pvalue1 <- NA
pvalue2 <- NA
cbs.weights.sample <- list(weights=NA,sdWeights=NA,randomCBS=NA)
}
if (length(cbs.df.dims$df)==2){
cbs.weights.sample <- list(weights=c(0.5,0.5),sdWeights=c(0,0),randomCBS=NA)
pvalue1 <- sum(cbs.weights.sample$weights * stats::pchisq(lrt,df=cbs.df.dims$df,lower.tail = F)) # p-value from weights
pvalue2 <- NA
}
}else{
pvalue1 <- stats::pchisq(lrt,cbs.df.dims$df[1],lower.tail = F)
pvalue2 <- NA
cbs.weights.sample <- list(weights=1,sdWeights=0,randomCBS=NA)
}
# Bounds on p-value
uppboundpval <- (1/2)*sum(stats::pchisq(lrt,cbs.df.dims$df[(length(cbs.df.dims$df)-1):length(cbs.df.dims$df)],lower.tail = F))
lowboundpval <- (1/2)*sum(stats::pchisq(lrt,cbs.df.dims$df[1:2],lower.tail = F))
# create results, object of class htest
null.value <- null.desc(msdata)
alternative <- alt.desc(msdata)
results <- list(statistic=c(LRT=lrt),
null.value=null.value,
alternative=alternative,
parameters=list(df=cbs.df.dims$df,weights=cbs.weights.sample$weights,sdweights=cbs.weights.sample$sdWeights,FIM=fim),
method="Likelihood ratio test for variance components in mixed effects models",
p.value=c(pvalue.weights=pvalue1,pvalue.sample=pvalue2,pvalue.lowerbound=lowboundpval,pvalue.upperbound=uppboundpval))
class(results) <- c("vctest","htest")
if (output) print.res.message(results)
invisible(results)
}
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/varCompTest.saemixObject.R
|
#' Variance components testing in linear and nonlinear mixed effects models
#'
#' \code{varTestnlme} implements likelihood ratio tests for variance components in general (linear and nonlinear) mixed effects models,
#' assuming a multivariate Gaussian distribution for the random effects.
# More preciely, we consider models of the form:
# \deqn{y_{i} = g(\varphi_i, x_{i}) + \varepsilon_{i}
# \varphi_i =U_i \beta + V_i b_i \ \ , \ 1 \leq i \leq n,}
# with \eqn{y_{i}} denotes the vector of \eqn{n_i} observations of individual \eqn{i} , \eqn{1 \leq i \leq n}, \eqn{\missing_i} the
# vector of individual parameters of individual \eqn{i}, \eqn{x_{i}} a vector of covariates, and \eqn{\varepsilon_i} an error term.
#'
#' @name varTestnlme-package
#' @aliases varTestnlme-package varTestnlme
#' @docType package
#' @author Charlotte Baey (\email{[email protected]})
#' @references Charlotte Baey, Paul-Henry Cournède, Estelle Kuhn (2019). Asymptotic distribution of likelihood ratio test statistics for
#' variance components in nonlinear mixed effects models. \emph{Computational Statistics and Data Analysis.}
#' @keywords package
#' @import methods
#' @noRd
NULL
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/R/varTestnlme-package.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(varTestnlme)
## ---- message=FALSE-----------------------------------------------------------
# Load the packages
library(nlme)
library(lme4)
library(saemix)
library(EnvStats)
## ----results='hide', message=FALSE--------------------------------------------
data("Orthodont")
# using nlme, with correlated slope and intercept
m1.nlme <- lme(distance ~ 1 + Sex + age + age*Sex, random = pdSymm(Subject ~ 1 + age), data = Orthodont, method = "ML")
m0.nlme <- lme(distance ~ 1 + Sex + age + age*Sex, random = ~ 1 | Subject, data = Orthodont, method = "ML")
# using lme4, with correlated slope and intercept
m1.lme4 <- lmer(distance ~ 1 + Sex + age + age*Sex + (1 + age | Subject), data = Orthodont, REML = FALSE)
m0.lme4 <- lmer(distance ~ 1 + Sex + age + age*Sex + (1 | Subject), data = Orthodont, REML = FALSE)
## -----------------------------------------------------------------------------
vt1.nlme <- varCompTest(m1.nlme,m0.nlme)
vt1.lme4 <- varCompTest(m1.lme4,m0.lme4)
## -----------------------------------------------------------------------------
print(vt1.nlme)
## -----------------------------------------------------------------------------
vt1.nlme$statistic
vt1.nlme$p.value
## -----------------------------------------------------------------------------
# using nlme, with uncorrelated slope and intercept
m1diag.nlme <- lme(distance ~ 1 + Sex + age + age*Sex, random = pdDiag(Subject ~ 1 + age), data = Orthodont, method = "ML")
# using lme4, with uncorrelated slope and intercept
m1diag.lme4 <- lmer(distance ~ 1 + Sex + age + age*Sex + (1 + age || Subject), data = Orthodont, REML = FALSE)
vt1diag.nlme <- varCompTest(m1diag.nlme,m0.nlme)
vt1diag.lme4 <- varCompTest(m1diag.lme4,m0.lme4)
## -----------------------------------------------------------------------------
m0noRE <- lm(distance ~ 1 + Sex + age + age*Sex, data = Orthodont)
vt <- varCompTest(m1diag.nlme,m0noRE,pval.comp = "both")
vt2 <- varCompTest(m1diag.lme4,m0noRE)
## ---- eval=FALSE--------------------------------------------------------------
# varCompTest(m1diag.nlme, m0noRE, fim = "compute", pval.comp = "both", control = list(B=100))
# varCompTest(m1diag.lme4, m0noRE, fim = "compute", pval.comp = "both", control = list(B=100))
## -----------------------------------------------------------------------------
m1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
family = binomial, data = cbpp)
m0 <- glm(cbind(incidence, size - incidence) ~ period,
family = binomial, data = cbpp)
varCompTest(m1,m0)
## -----------------------------------------------------------------------------
# with nlme
fm1Theo.nlme <- nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
fixed = lKe + lKa + lCl ~ 1,
start=c( -2.4, 0.45, -3.2),
random = pdSymm(lKa + lCl ~ 1))
fm2Theo.nlme <- nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
fixed = lKe + lKa + lCl ~ 1,
start=c( -2.4, 0.45, -3.2),
random = pdDiag(lCl ~ 1))
varCompTest(fm1Theo.nlme,fm2Theo.nlme)
## -----------------------------------------------------------------------------
# with lme4
Th.start <- c(lKe = -2.4, lKa = 0.45, lCl = -3.2)
nm1 <- nlmer(conc ~ SSfol(Dose , Time ,lKe , lKa , lCl) ~
0+lKe+lKa+lCl +(lKe+lKa+lCl|Subject),
nAGQ=0,
Theoph,
start = Th.start)
nm0 <- nlmer(conc ~ SSfol(Dose , Time ,lKe , lKa , lCl) ~
0+lKe+lKa+lCl +(lKa+lCl|Subject),
nAGQ=0,
Theoph,
start = Th.start)
varCompTest(nm1,nm0)
## -----------------------------------------------------------------------------
fm1 <- nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
fixed = lKe + lKa + lCl ~ 1,
start=c( -2.4, 0.45, -3.2),
random = pdDiag(lKe + lKa + lCl ~ 1))
fm0 <- nls(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
start=list(lKe=-2.4,lKa=0.45,lCl=-3.2))
varCompTest(fm1,fm0)
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/inst/doc/varTestnlme.R
|
---
title: "Get started"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Get started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(varTestnlme)
```
The **varTesnlme** package is very easy to use. Below are small examples on how to run it for linear, generalized linear and nonlinear mixed-effect models. A more detailed description is available in the paper <doi:10.18637/jss.v107.i06>.
Mixed-effect models can be run using [nlme](https://CRAN.R-project.org/package=nlme) or [lme4](https://CRAN.R-project.org/package=lme4), but also using [saemix](https://CRAN.R-project.org/package=saemix). **varTestnlme** can be used to compare two nested models using likelihood ratio tests, where the variance of at least one random effect is tested equal to 0. Fixed effects can also be tested simultaneously, as well as covariances.
```{r, message=FALSE}
# Load the packages
library(nlme)
library(lme4)
library(saemix)
library(EnvStats)
```
## Linear models
Here we focus on models run using **lme4** and **nlme**, but **saemix** can also be used.
### Case 1 : testing the variance of one random effect
We illustrate the results on the **Orthodont** dataset, which is part of the **nlme** package. We are interested in modeling the distance between the pituitary and the pterygomaxillary fissure (in mm) as a function of age, in 27 children. We will fit a random slope and random intercept model, and test whether the slope is random or not.
We first need to fit the two nested models: the full model corresponding to $H_1$ and the null model corresponding to $H_0$, where there is no random effect associated to `age`.
```{r results='hide', message=FALSE}
data("Orthodont")
# using nlme, with correlated slope and intercept
m1.nlme <- lme(distance ~ 1 + Sex + age + age*Sex, random = pdSymm(Subject ~ 1 + age), data = Orthodont, method = "ML")
m0.nlme <- lme(distance ~ 1 + Sex + age + age*Sex, random = ~ 1 | Subject, data = Orthodont, method = "ML")
# using lme4, with correlated slope and intercept
m1.lme4 <- lmer(distance ~ 1 + Sex + age + age*Sex + (1 + age | Subject), data = Orthodont, REML = FALSE)
m0.lme4 <- lmer(distance ~ 1 + Sex + age + age*Sex + (1 | Subject), data = Orthodont, REML = FALSE)
```
Now we can run the likelihood ratio test using the **varTestnlme** package. The function returns an object from S3 class `htest`.
```{r}
vt1.nlme <- varCompTest(m1.nlme,m0.nlme)
vt1.lme4 <- varCompTest(m1.lme4,m0.lme4)
```
Using the `EnvStats` package, nice printing options are available for objects of type `htest`:
```{r}
print(vt1.nlme)
```
It is also possible to access the components of the object using $ or [[:
```{r}
vt1.nlme$statistic
vt1.nlme$p.value
```
For the p-value, four different values are provided:
1. the p-value computed using the chi-bar-square expression as a mixture of chi-square distributions. This requires that the weights of the chi-bar-square are available, either because their exact expression is known (only in some simple cases), or because the user ask for their approximation via the option `pval.comp = "both"` or `pval.comp = "approx"`.
2. the p-value computed using a sample from the chi-bar-square distribution. This sample is used to approximate the chi-bar-square weights, and thus the corresponding p-value is only available when `pval.comp = "both"` or `pval.comp = "approx"`.
3. the lower bound on the p-value: this is always available
4. the upper bound on the p-value: this is always available
### Case 2 : testing the variance of one effect with uncorrelated random effects
```{r}
# using nlme, with uncorrelated slope and intercept
m1diag.nlme <- lme(distance ~ 1 + Sex + age + age*Sex, random = pdDiag(Subject ~ 1 + age), data = Orthodont, method = "ML")
# using lme4, with uncorrelated slope and intercept
m1diag.lme4 <- lmer(distance ~ 1 + Sex + age + age*Sex + (1 + age || Subject), data = Orthodont, REML = FALSE)
vt1diag.nlme <- varCompTest(m1diag.nlme,m0.nlme)
vt1diag.lme4 <- varCompTest(m1diag.lme4,m0.lme4)
```
### Case 3 : testing all the variances
In the previous section, the weights of the chi-bar-square distribution where available explicitly. However, it is not always the case.
By default, since the computation of these weights can be time consuming, the function is computing bounds on the p-value. In many cases this can be enough to decide whether to reject or not the null hypothesis. If more precision is wanted or needed, it is possible to specify it via the option `pval.comp`, which then needs to be set to either `pval.comp="approx"` or to `pval.comp="both"`. In both cases, the `control` argument can be used to control the computation process. It is a list which contains three slots: `M` (default to 5000), the size of the Monte Carlo computation, `parallel` (default to `FALSE`) to specify whether computation should be parallelized, and `nbcores` (default to `1`) to set the number of cores to be used in case of parallel computing.
```{r}
m0noRE <- lm(distance ~ 1 + Sex + age + age*Sex, data = Orthodont)
vt <- varCompTest(m1diag.nlme,m0noRE,pval.comp = "both")
vt2 <- varCompTest(m1diag.lme4,m0noRE)
```
By default, the FIM is extracted from the packages, but it is also possible to compute it via parametric bootstrap. In this case, simply use the option `fim="compute"`. The default bootstrap sampling size is `B=1000` but it can be changed.
To get the exact p-value one can use
```{r, eval=FALSE}
varCompTest(m1diag.nlme, m0noRE, fim = "compute", pval.comp = "both", control = list(B=100))
varCompTest(m1diag.lme4, m0noRE, fim = "compute", pval.comp = "both", control = list(B=100))
```
## Generalized linear model
```{r}
m1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
family = binomial, data = cbpp)
m0 <- glm(cbind(incidence, size - incidence) ~ period,
family = binomial, data = cbpp)
varCompTest(m1,m0)
```
## Nonlinear model
Testing that one variance is equal to 0 in a model with two correlated random effects, using the Theophylline dataset and the nlme package.
```{r}
# with nlme
fm1Theo.nlme <- nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
fixed = lKe + lKa + lCl ~ 1,
start=c( -2.4, 0.45, -3.2),
random = pdSymm(lKa + lCl ~ 1))
fm2Theo.nlme <- nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
fixed = lKe + lKa + lCl ~ 1,
start=c( -2.4, 0.45, -3.2),
random = pdDiag(lCl ~ 1))
varCompTest(fm1Theo.nlme,fm2Theo.nlme)
```
Testing that one variance is null in a model with 3 correlated random effects, using the Theophylline dataset and the lme4 package.
```{r}
# with lme4
Th.start <- c(lKe = -2.4, lKa = 0.45, lCl = -3.2)
nm1 <- nlmer(conc ~ SSfol(Dose , Time ,lKe , lKa , lCl) ~
0+lKe+lKa+lCl +(lKe+lKa+lCl|Subject),
nAGQ=0,
Theoph,
start = Th.start)
nm0 <- nlmer(conc ~ SSfol(Dose , Time ,lKe , lKa , lCl) ~
0+lKe+lKa+lCl +(lKa+lCl|Subject),
nAGQ=0,
Theoph,
start = Th.start)
varCompTest(nm1,nm0)
```
Testing for the presence of randomness in the model, using the nlme package.
```{r}
fm1 <- nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
fixed = lKe + lKa + lCl ~ 1,
start=c( -2.4, 0.45, -3.2),
random = pdDiag(lKe + lKa + lCl ~ 1))
fm0 <- nls(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
start=list(lKe=-2.4,lKa=0.45,lCl=-3.2))
varCompTest(fm1,fm0)
```
We can see that there is no need to approximate the weights of the chi-bar-square distribution since the bounds on the p-value are sufficient to reject the null hypothesis at any ``classical`` level.
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/inst/doc/varTestnlme.Rmd
|
---
title: "Get started"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Get started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(varTestnlme)
```
The **varTesnlme** package is very easy to use. Below are small examples on how to run it for linear, generalized linear and nonlinear mixed-effect models. A more detailed description is available in the paper <doi:10.18637/jss.v107.i06>.
Mixed-effect models can be run using [nlme](https://CRAN.R-project.org/package=nlme) or [lme4](https://CRAN.R-project.org/package=lme4), but also using [saemix](https://CRAN.R-project.org/package=saemix). **varTestnlme** can be used to compare two nested models using likelihood ratio tests, where the variance of at least one random effect is tested equal to 0. Fixed effects can also be tested simultaneously, as well as covariances.
```{r, message=FALSE}
# Load the packages
library(nlme)
library(lme4)
library(saemix)
library(EnvStats)
```
## Linear models
Here we focus on models run using **lme4** and **nlme**, but **saemix** can also be used.
### Case 1 : testing the variance of one random effect
We illustrate the results on the **Orthodont** dataset, which is part of the **nlme** package. We are interested in modeling the distance between the pituitary and the pterygomaxillary fissure (in mm) as a function of age, in 27 children. We will fit a random slope and random intercept model, and test whether the slope is random or not.
We first need to fit the two nested models: the full model corresponding to $H_1$ and the null model corresponding to $H_0$, where there is no random effect associated to `age`.
```{r results='hide', message=FALSE}
data("Orthodont")
# using nlme, with correlated slope and intercept
m1.nlme <- lme(distance ~ 1 + Sex + age + age*Sex, random = pdSymm(Subject ~ 1 + age), data = Orthodont, method = "ML")
m0.nlme <- lme(distance ~ 1 + Sex + age + age*Sex, random = ~ 1 | Subject, data = Orthodont, method = "ML")
# using lme4, with correlated slope and intercept
m1.lme4 <- lmer(distance ~ 1 + Sex + age + age*Sex + (1 + age | Subject), data = Orthodont, REML = FALSE)
m0.lme4 <- lmer(distance ~ 1 + Sex + age + age*Sex + (1 | Subject), data = Orthodont, REML = FALSE)
```
Now we can run the likelihood ratio test using the **varTestnlme** package. The function returns an object from S3 class `htest`.
```{r}
vt1.nlme <- varCompTest(m1.nlme,m0.nlme)
vt1.lme4 <- varCompTest(m1.lme4,m0.lme4)
```
Using the `EnvStats` package, nice printing options are available for objects of type `htest`:
```{r}
print(vt1.nlme)
```
It is also possible to access the components of the object using $ or [[:
```{r}
vt1.nlme$statistic
vt1.nlme$p.value
```
For the p-value, four different values are provided:
1. the p-value computed using the chi-bar-square expression as a mixture of chi-square distributions. This requires that the weights of the chi-bar-square are available, either because their exact expression is known (only in some simple cases), or because the user ask for their approximation via the option `pval.comp = "both"` or `pval.comp = "approx"`.
2. the p-value computed using a sample from the chi-bar-square distribution. This sample is used to approximate the chi-bar-square weights, and thus the corresponding p-value is only available when `pval.comp = "both"` or `pval.comp = "approx"`.
3. the lower bound on the p-value: this is always available
4. the upper bound on the p-value: this is always available
### Case 2 : testing the variance of one effect with uncorrelated random effects
```{r}
# using nlme, with uncorrelated slope and intercept
m1diag.nlme <- lme(distance ~ 1 + Sex + age + age*Sex, random = pdDiag(Subject ~ 1 + age), data = Orthodont, method = "ML")
# using lme4, with uncorrelated slope and intercept
m1diag.lme4 <- lmer(distance ~ 1 + Sex + age + age*Sex + (1 + age || Subject), data = Orthodont, REML = FALSE)
vt1diag.nlme <- varCompTest(m1diag.nlme,m0.nlme)
vt1diag.lme4 <- varCompTest(m1diag.lme4,m0.lme4)
```
### Case 3 : testing all the variances
In the previous section, the weights of the chi-bar-square distribution where available explicitly. However, it is not always the case.
By default, since the computation of these weights can be time consuming, the function is computing bounds on the p-value. In many cases this can be enough to decide whether to reject or not the null hypothesis. If more precision is wanted or needed, it is possible to specify it via the option `pval.comp`, which then needs to be set to either `pval.comp="approx"` or to `pval.comp="both"`. In both cases, the `control` argument can be used to control the computation process. It is a list which contains three slots: `M` (default to 5000), the size of the Monte Carlo computation, `parallel` (default to `FALSE`) to specify whether computation should be parallelized, and `nbcores` (default to `1`) to set the number of cores to be used in case of parallel computing.
```{r}
m0noRE <- lm(distance ~ 1 + Sex + age + age*Sex, data = Orthodont)
vt <- varCompTest(m1diag.nlme,m0noRE,pval.comp = "both")
vt2 <- varCompTest(m1diag.lme4,m0noRE)
```
By default, the FIM is extracted from the packages, but it is also possible to compute it via parametric bootstrap. In this case, simply use the option `fim="compute"`. The default bootstrap sampling size is `B=1000` but it can be changed.
To get the exact p-value one can use
```{r, eval=FALSE}
varCompTest(m1diag.nlme, m0noRE, fim = "compute", pval.comp = "both", control = list(B=100))
varCompTest(m1diag.lme4, m0noRE, fim = "compute", pval.comp = "both", control = list(B=100))
```
## Generalized linear model
```{r}
m1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
family = binomial, data = cbpp)
m0 <- glm(cbind(incidence, size - incidence) ~ period,
family = binomial, data = cbpp)
varCompTest(m1,m0)
```
## Nonlinear model
Testing that one variance is equal to 0 in a model with two correlated random effects, using the Theophylline dataset and the nlme package.
```{r}
# with nlme
fm1Theo.nlme <- nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
fixed = lKe + lKa + lCl ~ 1,
start=c( -2.4, 0.45, -3.2),
random = pdSymm(lKa + lCl ~ 1))
fm2Theo.nlme <- nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
fixed = lKe + lKa + lCl ~ 1,
start=c( -2.4, 0.45, -3.2),
random = pdDiag(lCl ~ 1))
varCompTest(fm1Theo.nlme,fm2Theo.nlme)
```
Testing that one variance is null in a model with 3 correlated random effects, using the Theophylline dataset and the lme4 package.
```{r}
# with lme4
Th.start <- c(lKe = -2.4, lKa = 0.45, lCl = -3.2)
nm1 <- nlmer(conc ~ SSfol(Dose , Time ,lKe , lKa , lCl) ~
0+lKe+lKa+lCl +(lKe+lKa+lCl|Subject),
nAGQ=0,
Theoph,
start = Th.start)
nm0 <- nlmer(conc ~ SSfol(Dose , Time ,lKe , lKa , lCl) ~
0+lKe+lKa+lCl +(lKa+lCl|Subject),
nAGQ=0,
Theoph,
start = Th.start)
varCompTest(nm1,nm0)
```
Testing for the presence of randomness in the model, using the nlme package.
```{r}
fm1 <- nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
fixed = lKe + lKa + lCl ~ 1,
start=c( -2.4, 0.45, -3.2),
random = pdDiag(lKe + lKa + lCl ~ 1))
fm0 <- nls(conc ~ SSfol(Dose, Time, lKe, lKa, lCl),
Theoph,
start=list(lKe=-2.4,lKa=0.45,lCl=-3.2))
varCompTest(fm1,fm0)
```
We can see that there is no need to approximate the weights of the chi-bar-square distribution since the bounds on the p-value are sufficient to reject the null hypothesis at any ``classical`` level.
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/vignettes/varTestnlme.Rmd
|
---
title: "Estimation of the Fisher Information Matrix"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Estimation of the Fisher Information Matrix}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
LOCAL <- identical(Sys.getenv("LOCAL"), "true")
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
purl = LOCAL)
```
```{r setup}
library(varTestnlme)
```
When testing that the variance of at least one random effect is equal to 0, the limiting distribution of the test statistic is a chi-bar-square distribution whose weights depend on the Fisher Information Matrix (FIM) of the model.
`varCompTestnlme` provides different ways to handle the FIM. Depending on the package used to fit the model and on your preferences, it is possible to specify different options with the argument `fim`:
* `fim="extract"`: (the default option) extract the FIM computed by the package used to fit the models. Please note that it might not be available in some cases. For example, for nonlinear models fitted with `lme4`. It might also rely on some linearization of the model.
* `fim="compute"`: to estimate the FIM using parametric bootstrap. Note that it can take some time to run. In this case, one must provide the bootstrap sample size with the `control` argument
* `fim=I`: with `I` a symmetric positive definite matrix, to provide your own FIM
We provide some examples below.
# Fitting the models
We use a dataset on high-flux hemodialyzers, available in the `nlme` package, in which ultrafiltration rates of 20 dialyzers were measured at 7 different dates. A nonlinear model is considered, with two random effects on the asymptote and on the rate.
$$y_{ij} = A_i*(1 - \exp(-e^{l_i}*(t_{ij} - c_i))) + \varepsilon_{ij}, \quad \varepsilon_{ij} \sim \mathcal{N}(0,\sigma^2)$$
$$(A_i,l_i,c_i)^t \sim \mathcal{N}(\beta,\Gamma) $$
Suppose we wish to test if the asymptote is indeed random, i.e.:
$$H_0: \Gamma = \begin{pmatrix}
\gamma_1^2 & 0 & 0\\
0 & 0 & 0 \\
0 & 0 & 0
\end{pmatrix} \quad \text{versus} \quad H_1 : \Gamma = \begin{pmatrix}
\gamma_1^2 & 0 & 0 \\
0 & \gamma_2^2 & 0 \\
0 & 0 & \gamma_3^2
\end{pmatrix}$$
Using `nlme`, the two models under $H_0$ and $H_1$ can be specified in the following way:
```{r}
library(nlme)
fm1.lis <- nlsList(rate ~ SSasympOff(pressure, Asym, lrc, c0), data=Dialyzer)
m1 <- nlme(fm1.lis, random = pdDiag(Asym + lrc + c0 ~ 1))
m0 <- nlme(fm1.lis, random = pdDiag(Asym ~ 1))
```
# Run the test
To run the test with the default settings, simply use:
```{r}
varCompTest(m1,m0)
```
With the default settings, only bounds on the p-value are computed. In this case it is largely enough to reject the null hypothesis that the asymptote is the only random effect. However, if more precision is needed, one should specify it using `pval.comp = "both"` or `pval.comp="approx"`. By default, the `fim` option is set to `"extract"`, which means that `varCompTestnlme` will extract the FIM computed by the package used to fit the model (i.e. `nlme`, `lme4` or `saemix`).
```{r, error=TRUE}
varCompTest(m1,m0,pval.comp = "both",fim="extract")
```
In this case we get an error message from `nlme` package stating that the FIM is non-positive definite. We will then use the `fim="compute"` option to approximate the FIM using parametric bootstrap, and we set the bootstrap sample size to 100 with the `control` argument.
```{r}
varCompTest(m1,m0,pval.comp="both",fim="compute",control=list(B=100))
```
The weights of the chi-bar-square distribution are provided, along with their standard deviations associated to the Monte Carlo approximation of the weights. Note that exact formulas exist for the weights when the number of chi-bar-square components is less than or equal to 3, in which cases no Monte Carlo approximation is needed and standard deviations are thus null.
|
/scratch/gouwar.j/cran-all/cranData/varTestnlme/vignettes/web_only/fim.Rmd
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
soft_threshold <- function(beta, lam, r, result) {
invisible(.Call('varband_soft_threshold', PACKAGE = 'varband', beta, lam, r, result))
}
close_update <- function(S, S_inv, r, rho, u, gamma, res) {
invisible(.Call('varband_close_update', PACKAGE = 'varband', S, S_inv, r, rho, u, gamma, res))
}
inverse_update <- function(S, rho, S_inv) {
invisible(.Call('varband_inverse_update', PACKAGE = 'varband', S, rho, S_inv))
}
elliproj_u <- function(y, tau, pp) {
invisible(.Call('varband_elliproj_u', PACKAGE = 'varband', y, tau, pp))
}
rootfind <- function(pp, ww, tau, l) {
.Call('varband_rootfind', PACKAGE = 'varband', pp, ww, tau, l)
}
elliproj_w <- function(y, tau, pp) {
invisible(.Call('varband_elliproj_w', PACKAGE = 'varband', y, tau, pp))
}
rowadmm <- function(S, init_row, lambda, w = FALSE, tol = 1.0e-4, itermax = 1e+6L) {
.Call('varband_rowadmm', PACKAGE = 'varband', S, init_row, lambda, w, tol, itermax)
}
rowadmm_lasso <- function(S, init_row, lambda, tol = 1.0e-4, itermax = 1e+6L) {
.Call('varband_rowadmm_lasso', PACKAGE = 'varband', S, init_row, lambda, tol, itermax)
}
#' Compute the varband estimate for a fixed tuning parameter value with different penalty options.
#'
#' Solves the main optimization problem in Yu & Bien (2016):
#' \deqn{min_L -2 \sum_{r=1}^p L_{rr} + tr(SLL^T) + lam * \sum_{r=2}^p P_r(L_{r.})}{min_L -2 sum_{r=1}^p L_{rr} + tr(SLL^T) + lam * sum_{r=2}^p P_r(L_{r.})}
#' where \deqn{P_r(L_{r.}) = \sum_{\ell = 2}^{r-1} \left(\sum_{m=1}^\ell w_{\ell m}^2 L_{rm}^2\right)^{1/2}}{P_r(L_r.) = sum_{l=2}^{r-1} (sum_m=1^l w^2_lm L^2_rm)^{1/2}}
#' or \deqn{P_r(L_{r.}) = \sum_{\ell = 1}^{r-1} |L_{r\ell}|}
#'
#' The function decomposes into p independent row problems,
#' each of which is solved by an ADMM algorithm.
#' see paper for more explanation.
#' @param S The sample covariance matrix
#' @param lambda Non-negative tuning parameter. Controls sparsity level.
#' @param w Logical. Should we use weighted version of the penalty or not? If \code{TRUE}, we use general weight. If \code{FALSE}, use unweighted penalty. Default is \code{FALSE}.
#' @param lasso Logical. Should we use l1 penalty instead of hierarchical group lasso penalty? Note that by using l1 penalty, we lose the banded structure in the resulting estimate. Default is \code{FALSE}.
#' @param init Initial estimate of L. Default is a closed-form diagonal estimate of L.
#' @return Returns the variable banding estimate of L, where L^TL = Omega.
#'
#' @examples
#' set.seed(123)
#' n <- 50
#' true <- varband_gen(p = 50, block = 5)
#' x <- sample_gen(L = true, n = n)
#' S <- crossprod(scale(x, center = TRUE, scale = FALSE)) / n
#' init <- diag(1/sqrt(diag(S)))
#' # unweighted estimate
#' L_unweighted <- varband(S, lambda = 0.1, init, w = FALSE)
#' # weighted estimate
#' L_weighted <- varband(S, lambda = 0.1, init, w = TRUE)
#' # lasso estimate
#' L_lasso <- varband(S, lambda = 0.1, init, w = TRUE, lasso = TRUE)
#' @seealso \code{\link{varband_path}} \code{\link{varband_cv}}
#'
#' @export
varband <- function(S, lambda, init, w = FALSE, lasso = FALSE) {
.Call('varband_varband', PACKAGE = 'varband', S, lambda, init, w, lasso)
}
|
/scratch/gouwar.j/cran-all/cranData/varband/R/RcppExports.R
|
#' @useDynLib varband
#' @import Rcpp
# #' @import RcppArmadillo
# importing RcppArmadillo here would cause the following note:
# Package in Depends/Imports which should probably only be in LinkingTo: 'RcppArmadillo'
NULL
.onUnload <- function (libpath) {
library.dynam.unload("varband", libpath)
}
|
/scratch/gouwar.j/cran-all/cranData/varband/R/misc.R
|
#' Generate random samples.
#'
#' Generate \code{n} random samples from multivariate Gaussian distribution N(0, (L^TL)^{-1})
#'
#' @param L p-dimensional inverse Cholesky factor of true covariance matrix.
#' @param n number of samples to generate.
#' @return returns a n-by-p matrix with each row a random sample generated.
#' @examples
#' set.seed(123)
#' true <- varband_gen(p = 50, block = 5)
#' x <- sample_gen(L = true, n = 100)
#' @export
sample_gen<-function(L, n){
#### This function generates normal random vectors
# Y ~ N(0, (L^T L)^{-1}) = N(0, L^{-1}L^{-T})
# Sigma = L^{-1}L^{-T}
p <- nrow(L)
# Z~N(0,1_p)
# Z<-mvrnorm(n, rep(0,p), diag(rep(1,p)))
Z <- matrix(rnorm(n * p), n, p)
# X[i,]%*%L^{-T} ~ N (0, L^{-1}L^{-T}) = N(0, Sigma)
X <- t(solve(L, t(Z)))
return(X)
}
#' Generate an autoregressive model.
#'
#' Generate lower triangular matrix with strict bandwidth. See, e.g., Model 1 in the paper.
#'
#' @param p the dimension of L
#' @param phi_vec a K-dimensional vector for off-diagonal values
#' @return a p-by-p strictly banded lower triangular matrix
#' @examples
#' true_ar <- ar_gen(p = 50, phi = c(0.5, -0.4, 0.1))
#' @export
#' @import stats
ar_gen<-function(p, phi_vec){
set.seed(123)
ii <- toeplitz(1:p)
K <- length(phi_vec) + 1
L <- ii <= K
for (k in seq(2, K)) L[ii == k] <- phi_vec[k - 1]
diag(L) <- rep(0, p)
L <- L - upper.tri(L) * L
L <- diag(rep(1, p)) - L
# Note that Omega = L^T L
# NEWLY ADDED
L <- diag(1/runif(p, 2, 5)) %*% L
return(L)
}
#' Generate a model with variable bandwidth.
#'
#' Generate lower triangular matrix with variable bandwidth. See, e.g., Model 2 and 3 in the paper.
#' @param p the dimension of L
#' @param block the number of block diagonal structures in the resulting model, assumed to divide p
#' @return a p-by-p lower triangular matrix with variable bandwidth
#' @examples
#' set.seed(123)
#' # small block size (big number of blocks)
#' true_small <- varband_gen(p = 50, block = 10)
#' # large block size (small number of blocks)
#' true_large <- varband_gen(p = 50, block = 2)
#' @export
varband_gen <- function(p, block = 10){
set.seed(123)
L <- matrix(0, p, p)
stopifnot(p%%block == 0)
block_size <- p / block
for(k in seq(block)){
L[((k-1) * block_size + 1):(k * block_size),
((k-1)*block_size + 1):(k * block_size)] <- block_gen(block_size)
}
diag(L) <- 1
L <- diag(1/runif(p, 2, 5)) %*% L
return(L)
}
block_gen <- function(block_size){
smallL <- matrix(0, block_size, block_size)
for(i in seq(2, block_size)){
flag <- rbinom(1, 1, 0.5)
if (flag == 1){
Ji <- sample(i - 1, 1)
sign <- ((runif(i - 1, 0, 1) > 0.5) - 1/2 ) * 2
smallL[i, 1 : (i - 1)] <- sign * runif(i - 1, 0.1, 0.4)
smallL[i, 1 : Ji] <- 0
}
else
smallL[i, 1:(i - 1)] <- 0
}
return(smallL)
}
#' Generate a model with block-diagonal structure
#'
#' @param p the dimension of L
#' @return a p-by-p lower triangular matrix with block-diagonal structure from p/4-th row to 3p/4-th row
#' @examples
#' set.seed(123)
#' true_L_block_diag <- block_diag_gen(p = 50)
#' @export
block_diag_gen <- function(p){
L <- matrix(0, p, p)
set.seed(123)
for(i in seq(p / 4 + 2, p * 3 / 4)){
sign <- ((runif(i - p / 4 - 1, 0, 1) > 0.5) - 1/2 ) * 2
L[i, (p / 4 + 1) : (i - 1)] <- sign * runif(i - p / 4 - 1, 0.1, 0.2)
}
diag(L) <- 1
L <- diag(1/runif(p, 2, 5)) %*% L
return(L)
}
|
/scratch/gouwar.j/cran-all/cranData/varband/R/model_gen.R
|
THRESH <- 1e-10
# #' Refit a row problem with given support
# #'
# #' @param r row index
# #' @param ind the index set of support
# #' @param S p-by-p sample covariance matrix
# #' @param delta a nonnegative tuning parameter for the ridge penalty for numerical stability, default is 0.01
refit_row <- function(r, ind, S, delta = 0.01){
# Refitting by plain MLE with
# bandwidth k = r-1-J given our estimator
p <- ncol(S)
res <- rep(0, r)
if(is.null(ind)){
res[r] <- 1/sqrt(S[r, r])
}
else{
# if J < r-1
# ind <- (J+1):(r-1)
# If S[ind,ind] is not invertible
# add a little ridge penalty to that
tmpVec <- solve((S[ind, ind] +
delta * diag(rep(1, length(ind)))),
S[ind, r])
res[r] <- 1/sqrt(S[r, r] + delta -
crossprod(S[r, ind], tmpVec))
res[ind] <- -tmpVec * res[r]
}
return(res)
}
# #' Refit the estimate of lower triangular matrix L with given support
# #'
# #' @param S p-by-p sample covariance matrix
# #' @param mat p-by-p estimate of lower triangular matrix L
# #' @param delta a nonnegative tuning parameter for the ridge penalty for numerical stability, default is 0.01
refit_matrix <- function(S, mat, delta = 0.01){
p <- ncol(S)
refit <- matrix(0, p, p)
refit[1, 1] <- 1/sqrt(S[1, 1])
for(r in seq(2, p)){
ind <- c()
for(j in seq(1,r-1)){
if(abs(mat[r, j]) >= THRESH){
ind <- c(ind, j)
}
}
refit[r, 1:r] <- refit_row(r = r, ind = ind,
S = S, delta = delta)
}
return(refit)
}
# #' Refit a path of estimates of lower triangular matrix L with given support
# #'
# #' @param S: p-by-p sample covariance matrix
# #' @param path: a list of p-by-p estimate of lower triangular matrix L along a path of tuning parameters
# #' @param delta: a nonnegative tuning parameter for the ridge penalty for numerical stability, default is 0.01
refit_path <- function(S, path, delta = 0.01){
p <- dim(path)[1]
nlam <- dim(path)[3]
refit <- array(NA, c(p, p, nlam))
for(i in seq(nlam)){
refit[, , i] <- refit_matrix(S, path[, , i], delta)
}
return(refit)
}
|
/scratch/gouwar.j/cran-all/cranData/varband/R/refit.R
|
#### This file contains all utility functions
#### Not necessarily useful in the package
#' Plot the sparsity pattern of a square matrix
#'
#' Black, white and gray stand for positive, zero and negative respectively
#'
#' @param Mat A matrix to plot.
#' @param main A plot title.
#'
#' @examples
#' set.seed(123)
#' p <- 50
#' n <- 50
#' phi <- 0.4
#' true <- varband_gen(p = p, block = 5)
#' matimage(true)
#' @export
#' @import graphics
matimage <- function(Mat, main = NULL){
tmppar <- par(pty = "s")
image(sign(t(apply(Mat, 2, rev))), axes = FALSE, col = c("gray50","white","black"), main = main)
par(tmppar)
}
KL <- function(Omega_est, Omega_true){
# Kullback-Leibler divergence for gaussian likelihood
# see (14) in Nested Lasso (Levina et,al 2008)
res <- determinant(x = Omega_est, logarithm = TRUE)$modulus
res <- -res + determinant(x = Omega_true, logarithm = TRUE)$modulus
res <- sum(diag(solve(Omega_true, Omega_est))) + res - nrow(Omega_est)
return(as.numeric(res))
}
Irrep <- function(L){
# Calculate the irrepresentability condition of true L
p <- ncol(L)
Sigma <- solve(crossprod(L))
res <- c()
for (r in seq(2, p)){
# For each row
Jr <- 0
for (j in seq(r-1)){
if(L[r, j] != 0){
Jr <- j-1
break
}
}
# Got Ir
if(Jr > 0){
Ir <- ((Jr+1):(r-1))
tmp <- rep(NA, Jr)
for (ll in seq(Jr)){
tmp[ll] <- norm(as.matrix(Sigma[ll, Ir]%*%solve(Sigma[Ir, Ir])), "1")
}
cat("r = ", r, " ", "Jr = ", length(tmp), " ", "Kr = ", length(Ir), fill = TRUE)
res <- c(res, max(tmp))
}
}
return(max(res))
}
#################################################################
#### The following functions are the first R version of main
#### functions used in the package
#### They have been replaced by their c++ version
#################################################################
# #' Evaluate the proximal operator of the hierarchical group lasso with general weights
# #'
# #' This function solves (7) in the paper for general weight w
# #' by solving its dual by performing Newton's method on at most
# #' r-1 univariate functions.
# #' See Algorithm 1 and Theorem 1 in the online supplemenatry.
# #'
# #' @param y An r-dimensional vector.
# #' @param tau lambda/rho
elliproj_w_R <- function(y, tau){
# This function performs the ellipsoid projection
# See supplementary material
r <- length(y)
nu <- rep(NA, r-1)
# pp is the z vector in the paper
pp <- y
for(l in seq(r-1)){
# ww[m] = w_{lm}
ww <- 1 / (seq(l,1)^2)
# check if it lies in the ellipsoid
if (sum((pp[1:l]/ww)^2) <= tau^2){
nu[l] <- 0
pp[1:l] <- 0
}
else{
# project onto the elliposid
f <- function(nu)
1-tau/sqrt(sum((pp[1:l]/(ww+nu/ww))^2))
# lower and upper bound of root
nu.u <- sqrt(sum((ww*pp[1:l])^2))/tau
nu.l <- max(nu.u-ww[l]^2, 0)
# find root
if(abs(f(nu.u)) < SMALL)
nu[l] <- nu.u
else if (abs(f(nu.l)) < SMALL)
nu[l] <- nu.l
else
nu[l] <- uniroot(f = f, interval = c(nu.l, nu.u), tol = 1e-15)$root
pp[1:l] <- pp[1:l] * nu[l] / ( ww^2 + nu[l] )
}
}
return(pp)
}
# #' Evaluate the proximal operator of the hierarchical group lasso with simple weights
# #'
# #' This function solves (7) in the paper for unweighted version(w = 1)
# #' by solving its dual by performing Newton's method on at most
# #' r-1 univariate functions.
# #' See Algorithm 2 in the paper
# #'
# #' @param y An r-dimensional vector.
# #' @param tau lambda/rho
elliproj_u_R <- function(y, tau){
# This function performs the ellipsoid projection
# of the unweighted estimator, which is very easy
# See algorithm 2 in the paper
r <- length(y)
# pp is the z vector in the paper
pp <- y
for(l in seq(r-1)){
tmpnorm <- sqrt(sum(pp[1:l]^2))
if(tmpnorm <= tau)
pp[1:l] <- 0
else
pp[1:l] <- (1-tau/tmpnorm)*pp[1:l]
}
return(pp)
}
SMALL <- 1e-15
# #' Compute the varband estimate for a fixed tuning parameter value.
# #'
# #' Solves the main optimization problem in Yu & Bien (2016):
# #' \deqn{min_L -2 \sum_{r=1}^p L_{rr} + tr(SLL^T) + lam * \sum_{r=2}^p P_r(L_{r.})}{min_L -2 sum_{r=1}^p L_{rr} + tr(SLL^T) + lam * sum_{r=2}^p P_r(L_{r.})}
# #' where \deqn{P_r(L_{r.}) = \sum_{\ell = 2}^{r-1} \left(\sum_{m=1}^\ell w_{\ell m}^2 L_{rm}^2\right)^{1/2}}{P_r(L_r.) = sum_{l=2}^{r-1} (sum_m=1^l w^2_lm L^2_rm)^{1/2}}
# #'
# #' The function decomposes into p independent row problems,
# #' each of which is solved by an ADMM algorithm.
# #' see paper for more explanation.
# #' @param S The sample covariance matrix
# #' @param lambda Non-negative tuning parameter. Controls sparsity level.
# #' @param w Logical. Should we use weighted version of the penalty or not? If \code{TRUE}, we use general weight. If \code{FALSE}, use unweighted penalty. Default is \code{FALSE}.
# #' @param init Initial estimate of L. Default is a closed-form diagonal estimate of L.
# #' @return Returns the variable banding estimate of L, where L^TL = Omega.
# #'
# #' @seealso \code{\link{varband_path}} \code{\link{varband_cv}}
# #'
varband_R <- function(S, lambda, init = NULL, w = FALSE){
p <- ncol(S)
# check that S is square
stopifnot(p == nrow(S))
if (is.null(init))
init <- diag(1/sqrt(diag(S)))
L <- matrix(0,p,p)
L[1,1] <- 1/(sqrt(S[1,1]))
# for the second to the p-th row
for(r in seq(2,p)){
L[r,] <- c(rowadmm_R(S = S[1:r,1:r],
init_row = init[r,1:r],
lambda = lambda, w = w),
rep(0,p-r))
}
return(L)
}
# #' Compute one row of varband estimate for a fixed tuning parameter
# #'
# #'This function solve the following r-th row estimation problem \deqn{min_{beta_r>0} -2 log beta_r + 1/n ||X beta||^2 + lambda P(beta)}
# #' using an ADMM algorithm with changing rho.
# #'
# #' See algorithm 1 in the paper.
# #'
# #' @param S An r-by-r submatrix of sample covariance matrix.
# #' @param init_row The initial estimate of the row.
# #' @param lambda Non-negative tuning parameter. Controls sparsity level.
# #' @param w Logical. Should we use weighted version of the penalty or not? If \code{TRUE}, we use general weight. If \code{FALSE}, use unweighted penalty. Default is \code{FALSE}.
# #' @param tol Tolerance for convergence.
# #' @param itermax Maximum number of iterations of ADMM to perform.
rowadmm_R <- function(S, init_row, lambda, w = FALSE, tol = 1e-4, itermax = 1e5){
# This function solve the following row estimation problem
# \min_{\beta_r>0} -2 log \beta_r + 1/n ||X\beta||^2
# + \lambda P(\beta)
# using an ADMM algorithm with changing rho
r <- ncol(S)
stopifnot(r == nrow(S))
# could use a lower tolerance for unweighted version
if (!w)
tol <- 1e-8
# Default parameter in ADMM
tolabs <- tol
tolrel <- tol
# Changing rho
rho <- 2
mu <- 10
inc <- 2
dec <- 2
# Initialize the result
beta <- init_row
gamma <- init_row
# dual variable
u <- rep(0,r)
S_inv <- inverse_update_R(S = as.matrix(S[-r,-r]),
r = r, rho = rho)
for (i in seq(itermax)) {
# Primal&Dual Updates
beta_new <- close_update_R(S = S, S_inv = S_inv,
r = r, rho = rho, u = u,
gamma = gamma)
if (w)
gamma_new <- elliproj_w_R(y = beta_new + u/rho,
tau = lambda/rho)
else
gamma_new <- elliproj_u_R(y = beta_new + u/rho,
tau = lambda/rho)
u <- u + rho*(beta_new - gamma_new)
# check convergence See pp 22 Boyd(2011)
# primal residual
pres <- sqrt(crossprod(beta_new - gamma_new))
# dual residual
dres <- rho*sqrt(crossprod(gamma_new - gamma))
# primal tolerance
peps <- tolabs*sqrt(r) +
tolrel*max(sqrt(crossprod(beta_new)),
sqrt(crossprod(gamma_new)))
# dual tolerance
deps <- tolabs*sqrt(r) + tolrel*sqrt(crossprod(u))
if(pres <= peps & dres <= deps)
{
#cat("ADMM converges after",i,"iterations",fill=TRUE)
break
}
else{
# if not, update estimates and rho
beta <- beta_new
gamma <- gamma_new
# Update rho if needed and corresponding S_inv
if(pres > mu*dres){
rho <- rho*inc
S_inv <- inverse_update_R(S = as.matrix(S[-r,-r]),
r = r, rho = rho)
}
else if(dres > mu*pres){
rho <- rho/dec
S_inv <- inverse_update_R(S = as.matrix(S[-r,-r]),
r = r, rho = rho)
}
}
}
if(i==itermax)
cat("ADMM fails to converge",fill=TRUE)
return (gamma_new)
}
inverse_update_R <- function(S, r, rho){
if(r==1){
S_inv <- as.matrix(1/(2*S[1,1] + rho))
return(S_inv)
}
else{
S_inv <- 2*S
diag(S_inv) <- diag(S_inv) + rho
S_inv <- solve(S_inv)
return(S_inv)
}
}
# #' Close-form update of beta in Algorithm 1
# #'
# #' This function solves (6) in the paper with a closed form solution
# #'
# #' @param S An r-by-r submatrix of sample covariance matrix.
# #' @param S_inv inverse of (2S_{-r,-r} + rho I)
# #' @param r row index
# #' @param rho parameter rho used in ADMM
# #' @param u dual variable in ADMM
# #' @param gamma priaml variable in ADMM
close_update_R <- function(S, S_inv, r, rho, u, gamma){
# This performs the closed updated for beta
# see Section 3 in the paper
vec.tmp <- S_inv%*%S[-r,r]
A <- 4*crossprod(vec.tmp,S[-r,r]) - 2*S[r,r] - rho
B <- 2*crossprod(vec.tmp, (u[-r] - rho*gamma[-r])) - u[r] + rho*gamma[r]
res <- rep(0,r)
# beta_r
res[r] <- (-sqrt(B^2 - 8 * A) - B)/(2 * A)
# beta_[-r]
res[-r] <- -2*res[r]*vec.tmp - S_inv%*%(u[-r] - rho*gamma[-r])
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/varband/R/utils.R
|
#' Perform nfolds-cross validation
#'
#' Select tuning parameter by cross validation according to the likelihood on testing data, with and without refitting.
#'
#' @param x A n-by-p sample matrix, each row is an observation of the p-dim random vector.
#' @param w Logical. Should we use weighted version of the penalty or not? If \code{TRUE}, we use general weight. If \code{FALSE}, use unweighted penalty. Default is \code{FALSE}.
#' @param lasso Logical. Should we use l1 penalty instead of hierarchical group lasso penalty? Note that by using l1 penalty, we lose the banded structure in the resulting estimate. And when using l1 penalty, the becomes CSCS (Convex Sparse Cholesky Selection) introduced in Khare et al. (2016). Default value for \code{lasso} is \code{FALSE}.
#' @param lamlist A list of non-negative tuning parameters \code{lambda}.
#' @param nlam If lamlist is not provided, create a lamlist with length \code{nulam}. Default is 60.
#' @param flmin If lamlist is not provided, create a lamlist with ratio of the smallest and largest lambda in the list equal to \code{flmin}. Default is 0.01.
#' @param folds Folds used in cross-validation
#' @param nfolds If folds are not provided, create folds of size \code{nfolds}.
#'
#' @return A list object containing \describe{
#' \item{errs_fit: }{A \code{nlam}-by-\code{nfolds} matrix of negative Gaussian log-likelihood values on the CV test data sets. \code{errs[i,j]} is negative Gaussian log-likelihood values incurred in using \code{lamlist[i]} on fold \code{j}}.
#' \item{errs_refit: }{A \code{nlam}-by-\code{nfolds} matrix of negative Gaussian log-likelihood values of the refitting.}
#' \item{folds: }{Folds used in cross validation.}
#' \item{lamlist: }{\code{lambda} grid used in cross validation.}
#' \item{ibest_fit: }{index of \code{lamlist} minimizing CV negative Gaussian log-likelihood.}
#' \item{ibest_refit: }{index of \code{lamlist} minimizing refitting CV negative Gaussian log-likelihood.}
#' \item{i1se_fit: }{Selected value of \code{lambda} using the one-standard-error rule.}
#' \item{i1se_refit: }{Selected value of \code{lambda} of the refitting process using the one-standard-error rule.}
#' \item{L_fit: }{Estimate of L corresponding to \code{ibest_fit}.}
#' \item{L_refit: }{Refitted estimate of L corresponding to \code{ibest_refit}.}
#' }
#'
#' @examples
#' set.seed(123)
#' p <- 50
#' n <- 50
#' true <- varband_gen(p = p, block = 5)
#' x <- sample_gen(L = true, n = n)
#' res_cv <- varband_cv(x = x, w = FALSE, nlam = 40, flmin = 0.03)
#' @export
#'
#' @seealso \code{\link{varband}} \code{\link{varband_path}}
varband_cv <- function(x, w = FALSE, lasso = FALSE, lamlist = NULL, nlam = 60, flmin = 1e-2, folds = NULL, nfolds = 5) {
n <- nrow(x)
p <- ncol(x)
S <- crossprod(scale(x, center=TRUE, scale=FALSE)) / n
if(is.null(folds))
folds <- makefolds(n, nfolds = nfolds)
nfolds <- length(folds)
if (is.null(lamlist)) {
lam_max <- lammax(S = S)
lamlist <- pathGen(nlam = nlam, lam_max = lam_max,
flmin = flmin, S = S)
} else {
nlam <- length(lamlist)
}
errs_fit <- matrix(NA, nlam, nfolds)
errs_refit <- matrix(NA, nlam, nfolds)
# error function is the negative log Gaussian likelihood
for (i in seq(nfolds)) {
# train on all but i-th fold:
x_tr <- x[-folds[[i]],]
meanx <- colMeans(x_tr)
x_tr <- scale(x_tr, center = meanx, scale = FALSE)
S_tr <- crossprod(x_tr) / (dim(x_tr)[1])
path_fit <- varband_path(S = S_tr, w = w, lasso = lasso,
lamlist = lamlist)$path
path_refit <- refit_path(S = S_tr, path = path_fit)
# evaluate this on left-out fold:
x_te <- x[folds[[i]], ]
x_te <- scale(x_te, center = meanx, scale = FALSE)
S_te <- crossprod(x_te) / (dim(x_te)[1])
for (j in seq(nlam)) {
errs_fit[j, i] <- likelihood(crossprod(path_fit[, , j]), S_te)
errs_refit[j, i] <- likelihood(crossprod(path_refit[, , j]), S_te)
}
}
m_fit <- rowMeans(errs_fit)
se_fit <- apply(errs_fit, 1, sd) / sqrt(nfolds)
m_refit <- rowMeans(errs_refit)
se_refit <- apply(errs_refit, 1, sd) / sqrt(nfolds)
ibest_fit <- which.min(m_fit)
i1se_fit <- min(which(m_fit < m_fit[ibest_fit] + se_fit[ibest_fit]))
ibest_refit <- which.min(m_refit)
i1se_refit <- min(which(m_refit < m_refit[ibest_refit] + se_refit[ibest_refit]))
fit_cv <- varband(S = S, lambda = lamlist[ibest_fit], init = path_fit[, , ibest_fit], w = w, lasso = lasso)
refit_cv <- varband(S = S, lambda = lamlist[ibest_refit], init = path_refit[, , ibest_refit], w = w, lasso = lasso)
refit_cv <- refit_matrix(S = S, mat = refit_cv)
return(list(errs_fit = errs_fit, errs_refit = errs_refit,
folds = folds, lamlist = lamlist,
ibest_fit = ibest_fit, ibest_refit = ibest_refit,
i1se_fit = i1se_fit, i1se_refit = i1se_refit,
L_fit = fit_cv, L_refit = refit_cv))
}
makefolds <- function(n, nfolds) {
# divides the indices 1:n into nfolds random folds of about the same size.
nn <- round(n / nfolds)
sizes <- rep(nn, nfolds)
sizes[nfolds] <- sizes[nfolds] + n - nn * nfolds
b <- c(0, cumsum(sizes))
ii <- sample(n)
folds <- list()
for (i in seq(nfolds))
folds[[i]] <- ii[seq(b[i] + 1, b[i + 1])]
folds
}
likelihood <- function (Omega, S){
# Calculate the negative log-Gaussian likelihood with
# precision matrix Omega and sample covariance S
return(-determinant(Omega, logarithm = TRUE)$modulus[1] + sum(S*Omega))
}
|
/scratch/gouwar.j/cran-all/cranData/varband/R/varband_cv.R
|
#' Solve main optimization problem along a path of lambda
#'
#' Compute the varband estimates along a path of tuning parameter values.
#'
#' @param S The sample covariance matrix
#' @param w Logical. Should we use weighted version of the penalty or not? If \code{TRUE}, we use general weight. If \code{FALSE}, use unweighted penalty. Default is \code{FALSE}.
#' @param lasso Logical. Should we use l1 penalty instead of hierarchical group lasso penalty? Note that by using l1 penalty, we lose the banded structure in the resulting estimate. And when using l1 penalty, the becomes CSCS (Convex Sparse Cholesky Selection) introduced in Khare et al. (2016). Default value for \code{lasso} is \code{FALSE}.
#' @param lamlist A list of non-negative tuning parameters \code{lambda}.
#' @param nlam If lamlist is not provided, create a lamlist with length \code{node}. Default is 60.
#' @param flmin if lamlist is not provided, create a lamlist with ratio of the smallest and largest lambda in the list. Default is 0.01.
#' @return A list object containing \describe{
#' \item{path: }{A array of dim (\code{p}, \code{p}, \code{nlam}) of estimates of L}
#' \item{lamlist: }{a grid values of tuning parameters}
#' }
#' @examples
#' set.seed(123)
#' n <- 50
#' true <- varband_gen(p = 50, block = 5)
#' x <- sample_gen(L = true, n = n)
#' S <- crossprod(scale(x, center = TRUE, scale = FALSE))/n
#' path_res <- varband_path(S = S, w = FALSE, nlam = 40, flmin = 0.03)
#' @export
#'
#' @seealso \code{\link{varband}} \code{\link{varband_cv}}
varband_path <- function(S, w = FALSE, lasso = FALSE, lamlist = NULL, nlam = 60, flmin = 0.01){
p <- ncol(S)
stopifnot(p == nrow(S))
if (is.null(lamlist)) {
lam_max <- lammax(S = S)
lamlist <- pathGen(nlam = nlam, lam_max = lam_max,
flmin = flmin, S = S)
} else {
nlam <- length(lamlist)
}
result<- array(NA, c(p, p, nlam))
for (i in seq(nlam)) {
if(i==1){
# cat(i)
result[, , i] <- diag(1/sqrt(diag(S)))
}
else
{
# cat(i)
result[, , i] <- varband(S = S, lambda = lamlist[i],
init = result[, , i-1], w = w, lasso = lasso)
}
}
return(list(path = result, lamlist = lamlist))
}
lammax <- function(S){
#### This function calculates the max value in the tuning parameter list
# such that the estimator L_{\lambda} is a diagonal matrix
# NOTE: this is not necessarily true, but generally
# a upper bound of the value we are looking for.
# Args:
# S: the p-by-p sample covariance matrix
p <- ncol(S)
sighat <- rep(NA, p-1)
for (r in seq(2, p)){
sighat[r-1] <- max(abs(S[(1:(r-1)), r]))/sqrt(S[r, r])
}
2 * max(sighat)
}
pathGen <- function(nlam, lam_max, flmin, S){
# Generate a path of lambda, with
# nlam/2 decreasing exponentially
# nlam/2 decreasing linearly
# lam_max <- lammax(S)
lamlist_lin <- lam_max * exp(seq(0, log(flmin), length = nlam/2))
lamlist_exp <- seq(lam_max - 1e-8, lam_max*flmin - 1e-8, length.out = nlam/2)
return(sort(unique(c(lamlist_lin, lamlist_exp)), decreasing = T))
}
|
/scratch/gouwar.j/cran-all/cranData/varband/R/varband_path.R
|
## ------------------------------------------------------------------------
library(varband)
set.seed(123)
p <- 50
n <- 100
true <- varband_gen(p = p, block = 5)
## ------------------------------------------------------------------------
# random sample
x <- sample_gen(L = true, n = n)
# sample covariance matrix
S <- crossprod(scale(x, center = TRUE, scale = FALSE)) / n
## ---- fig.height = 4, fig.width = 7--------------------------------------
par(mfrow = c(1, 2), mar = c(0, 0, 2, 0))
matimage(true, main = "True L")
matimage(S, main = "Sample covariance matrix")
## ---- fig.height = 4, fig.width = 7, fig.align='center'------------------
# use identity matrix as initial estimate
init <- diag(p)
L_weighted <- varband(S = S, lambda = 0.4, init = init, w = TRUE)
L_unweighted <- varband(S = S, lambda = 0.4, init = init, w = FALSE)
par(mfrow = c(1,2), mar = c(0, 0, 2, 0))
matimage(L_weighted, main = "weighted, lam = 0.4")
matimage(L_unweighted, main = "unweighted, lam = 0.4")
## ---- fig.height = 12.6, fig.width = 7, fig.align='center'---------------
# generate a grid of 40 tuning paramters,
# with the ratio of smallest value and largest value equals to 0.03
res <- varband_path(S = S, w = FALSE, nlam = 40, flmin = 0.03)
par(mfrow = c(8, 5), mar = 0.1 + c(0, 0, 2, 0))
for (i in seq_along(res$lamlist))
matimage(res$path[, , i], main = sprintf("lam=%s", round(res$lamlist[i], 4)))
## ---- fig.height = 5, fig.width = 5, fig.align='center'------------------
res_cv <- varband_cv(x = x, w = FALSE, nlam = 40, flmin = 0.03)
m <- rowMeans(res_cv$errs_fit)
se <- apply(res_cv$errs_fit, 1, sd) / sqrt(length(res_cv$folds))
plot(res_cv$lamlist, m,
main = "negative Gaussian log-likelihood",
xlab = "tuning parameter", ylab = "average neg-log-likelihood",
type="o", ylim = range(m - se, m + se), pch = 20)
# 1-se rule
lines(res_cv$lamlist, m + se)
lines(res_cv$lamlist, m - se)
abline(v = res_cv$lamlist[res_cv$ibest_fit], lty = 2)
abline(v = res_cv$lamlist[res_cv$i1se_fit], lty = 2)
## ---- fig.height = 4, fig.width = 7--------------------------------------
par(mfrow = c(1,2), mar = c(0, 0, 2, 0))
matimage(res_cv$L_fit, main = "Fit")
matimage(res_cv$L_refit, main = "Refit")
|
/scratch/gouwar.j/cran-all/cranData/varband/inst/doc/varband-vignette.R
|
---
title: "Using the varband package"
author: "Guo Yu"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
#pdf_document
vignette: >
%\VignetteIndexEntry{Using the varband package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
The `varband` package contains the implementations of the variable banding method for learning local dependence and estimating large sparse precision matrix in the setting where variables have a natural ordering. The details of the method can be found in [Yu, Bien (2016) *Learning Local Dependence in Ordered Data*(under revision)](http://arxiv.org/abs/1604.07451).
In particular, given a data matrix $X \in \mathbb{R}^{n \times p}$, with each row an observation of a $p$ dimensional random vector $X \sim N(0, \Omega^{-1} = (L^T L)^{-1})$, this package implements a penalized likelihood-based approach of estimating $L$ with data-adaptively variable bandwidth.
This document serves as an introduction of using the package.
The main function is `varband`, which takes a sample covariance matrix of the observations and returns the estimate of $L$. For demonstration purpose and simulation study, the package also contains functions to generate random samples from true models with user-specified variable banded patterns.
## Data simulation
The package contains two functions for generating true models: `ar_gen` and `varband_gen`. The function `ar_gen` takes a vector of pre-specified off-diagonal values and returns a strictly banded $L$, which corresponds to a autoregressive model of order equal to the bandwidth. The function `varband_gen` returns a lower triangular block-diagonal matrix with each block having variable bandwidth.
```{r}
library(varband)
set.seed(123)
p <- 50
n <- 100
true <- varband_gen(p = p, block = 5)
```
With a generated true model in place, we can then generate a data matrix $X \in \mathbb{R}^{n \times p}$ with each row a random sample drawn independently from a Gaussian distribution of mean zero and covariance $\Sigma = (L^T L)^{-1}$.
```{r}
# random sample
x <- sample_gen(L = true, n = n)
# sample covariance matrix
S <- crossprod(scale(x, center = TRUE, scale = FALSE)) / n
```
And we can plot the sparsity patterns of the true model and the sample covariance matrix by using `matimage`
```{r, fig.height = 4, fig.width = 7}
par(mfrow = c(1, 2), mar = c(0, 0, 2, 0))
matimage(true, main = "True L")
matimage(S, main = "Sample covariance matrix")
```
## Estimating $L$ with a fixed tuning parameter
Besides the sample covariance matrix, the main function `varband` takes three more arguments. First it takes a value of the tuning parameter $\lambda$, which is a nonnegative constant that controls the sparsity level induced in the resulting estimator. The function also requires an initial estimate, which could essentially be any lower triangular matrix with positive diagonals. Finally one needs to specify the weighting scheme `w` to choose between a weighted and an unweighted estimator. The unweighted estimator puts more penalty and thus produces a sparser estimator than the weighted one with the same value of $\lambda$. As shown in the paper, the unweighted estimator is more efficient to compute and has better practical performance, while the weighted estimator enjoys better theoretical properties.
```{r, fig.height = 4, fig.width = 7, fig.align='center'}
# use identity matrix as initial estimate
init <- diag(p)
L_weighted <- varband(S = S, lambda = 0.4, init = init, w = TRUE)
L_unweighted <- varband(S = S, lambda = 0.4, init = init, w = FALSE)
par(mfrow = c(1,2), mar = c(0, 0, 2, 0))
matimage(L_weighted, main = "weighted, lam = 0.4")
matimage(L_unweighted, main = "unweighted, lam = 0.4")
```
## Computing estimators along the a tuning parameter path
In most cases, one does not know the exact value of tuning parameter $\lambda$ that should be used. The function `varband_path` gets $\hat{L}$ along a grid of $\lambda$ values.
Users can specify their own grid of $\lambda$ values (via `lamlist`). Alternatively, a path of decreasing tuning parameter of user-specified length (via `nlam`) will be generated and returned. In this situation, user also needs to specify `flmin`, the ratio of the smallest and largest $\lambda$ value in the list, where the largest $\lambda$ is computed such that the resulting estimator is a diagonal matrix.
And we can plot them to see if they cover the full spectrum of sparsity level.
```{r, fig.height = 12.6, fig.width = 7, fig.align='center'}
# generate a grid of 40 tuning paramters,
# with the ratio of smallest value and largest value equals to 0.03
res <- varband_path(S = S, w = FALSE, nlam = 40, flmin = 0.03)
par(mfrow = c(8, 5), mar = 0.1 + c(0, 0, 2, 0))
for (i in seq_along(res$lamlist))
matimage(res$path[, , i], main = sprintf("lam=%s", round(res$lamlist[i], 4)))
```
## Selecting the tuning parameter
User can also use an implementation of cross-validation process(in `varband_cv`) to select the best value for tuning parameter. The cross-validation selects the value for lambda such that the resulting estimators attains the highest average likelihood on the testing data.
```{r, fig.height = 5, fig.width = 5, fig.align='center'}
res_cv <- varband_cv(x = x, w = FALSE, nlam = 40, flmin = 0.03)
m <- rowMeans(res_cv$errs_fit)
se <- apply(res_cv$errs_fit, 1, sd) / sqrt(length(res_cv$folds))
plot(res_cv$lamlist, m,
main = "negative Gaussian log-likelihood",
xlab = "tuning parameter", ylab = "average neg-log-likelihood",
type="o", ylim = range(m - se, m + se), pch = 20)
# 1-se rule
lines(res_cv$lamlist, m + se)
lines(res_cv$lamlist, m - se)
abline(v = res_cv$lamlist[res_cv$ibest_fit], lty = 2)
abline(v = res_cv$lamlist[res_cv$i1se_fit], lty = 2)
```
The return of `varband_cv` is a list of many objects. For details see `?varband_cv`. In particular, the function also returns the best refitted version of estimates.
For example, to take a look at the support of the best
refitted estimate, use
```{r, fig.height = 4, fig.width = 7}
par(mfrow = c(1,2), mar = c(0, 0, 2, 0))
matimage(res_cv$L_fit, main = "Fit")
matimage(res_cv$L_refit, main = "Refit")
```
|
/scratch/gouwar.j/cran-all/cranData/varband/inst/doc/varband-vignette.Rmd
|
---
title: "Using the varband package"
author: "Guo Yu"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
#pdf_document
vignette: >
%\VignetteIndexEntry{Using the varband package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
The `varband` package contains the implementations of the variable banding method for learning local dependence and estimating large sparse precision matrix in the setting where variables have a natural ordering. The details of the method can be found in [Yu, Bien (2016) *Learning Local Dependence in Ordered Data*(under revision)](http://arxiv.org/abs/1604.07451).
In particular, given a data matrix $X \in \mathbb{R}^{n \times p}$, with each row an observation of a $p$ dimensional random vector $X \sim N(0, \Omega^{-1} = (L^T L)^{-1})$, this package implements a penalized likelihood-based approach of estimating $L$ with data-adaptively variable bandwidth.
This document serves as an introduction of using the package.
The main function is `varband`, which takes a sample covariance matrix of the observations and returns the estimate of $L$. For demonstration purpose and simulation study, the package also contains functions to generate random samples from true models with user-specified variable banded patterns.
## Data simulation
The package contains two functions for generating true models: `ar_gen` and `varband_gen`. The function `ar_gen` takes a vector of pre-specified off-diagonal values and returns a strictly banded $L$, which corresponds to a autoregressive model of order equal to the bandwidth. The function `varband_gen` returns a lower triangular block-diagonal matrix with each block having variable bandwidth.
```{r}
library(varband)
set.seed(123)
p <- 50
n <- 100
true <- varband_gen(p = p, block = 5)
```
With a generated true model in place, we can then generate a data matrix $X \in \mathbb{R}^{n \times p}$ with each row a random sample drawn independently from a Gaussian distribution of mean zero and covariance $\Sigma = (L^T L)^{-1}$.
```{r}
# random sample
x <- sample_gen(L = true, n = n)
# sample covariance matrix
S <- crossprod(scale(x, center = TRUE, scale = FALSE)) / n
```
And we can plot the sparsity patterns of the true model and the sample covariance matrix by using `matimage`
```{r, fig.height = 4, fig.width = 7}
par(mfrow = c(1, 2), mar = c(0, 0, 2, 0))
matimage(true, main = "True L")
matimage(S, main = "Sample covariance matrix")
```
## Estimating $L$ with a fixed tuning parameter
Besides the sample covariance matrix, the main function `varband` takes three more arguments. First it takes a value of the tuning parameter $\lambda$, which is a nonnegative constant that controls the sparsity level induced in the resulting estimator. The function also requires an initial estimate, which could essentially be any lower triangular matrix with positive diagonals. Finally one needs to specify the weighting scheme `w` to choose between a weighted and an unweighted estimator. The unweighted estimator puts more penalty and thus produces a sparser estimator than the weighted one with the same value of $\lambda$. As shown in the paper, the unweighted estimator is more efficient to compute and has better practical performance, while the weighted estimator enjoys better theoretical properties.
```{r, fig.height = 4, fig.width = 7, fig.align='center'}
# use identity matrix as initial estimate
init <- diag(p)
L_weighted <- varband(S = S, lambda = 0.4, init = init, w = TRUE)
L_unweighted <- varband(S = S, lambda = 0.4, init = init, w = FALSE)
par(mfrow = c(1,2), mar = c(0, 0, 2, 0))
matimage(L_weighted, main = "weighted, lam = 0.4")
matimage(L_unweighted, main = "unweighted, lam = 0.4")
```
## Computing estimators along the a tuning parameter path
In most cases, one does not know the exact value of tuning parameter $\lambda$ that should be used. The function `varband_path` gets $\hat{L}$ along a grid of $\lambda$ values.
Users can specify their own grid of $\lambda$ values (via `lamlist`). Alternatively, a path of decreasing tuning parameter of user-specified length (via `nlam`) will be generated and returned. In this situation, user also needs to specify `flmin`, the ratio of the smallest and largest $\lambda$ value in the list, where the largest $\lambda$ is computed such that the resulting estimator is a diagonal matrix.
And we can plot them to see if they cover the full spectrum of sparsity level.
```{r, fig.height = 12.6, fig.width = 7, fig.align='center'}
# generate a grid of 40 tuning paramters,
# with the ratio of smallest value and largest value equals to 0.03
res <- varband_path(S = S, w = FALSE, nlam = 40, flmin = 0.03)
par(mfrow = c(8, 5), mar = 0.1 + c(0, 0, 2, 0))
for (i in seq_along(res$lamlist))
matimage(res$path[, , i], main = sprintf("lam=%s", round(res$lamlist[i], 4)))
```
## Selecting the tuning parameter
User can also use an implementation of cross-validation process(in `varband_cv`) to select the best value for tuning parameter. The cross-validation selects the value for lambda such that the resulting estimators attains the highest average likelihood on the testing data.
```{r, fig.height = 5, fig.width = 5, fig.align='center'}
res_cv <- varband_cv(x = x, w = FALSE, nlam = 40, flmin = 0.03)
m <- rowMeans(res_cv$errs_fit)
se <- apply(res_cv$errs_fit, 1, sd) / sqrt(length(res_cv$folds))
plot(res_cv$lamlist, m,
main = "negative Gaussian log-likelihood",
xlab = "tuning parameter", ylab = "average neg-log-likelihood",
type="o", ylim = range(m - se, m + se), pch = 20)
# 1-se rule
lines(res_cv$lamlist, m + se)
lines(res_cv$lamlist, m - se)
abline(v = res_cv$lamlist[res_cv$ibest_fit], lty = 2)
abline(v = res_cv$lamlist[res_cv$i1se_fit], lty = 2)
```
The return of `varband_cv` is a list of many objects. For details see `?varband_cv`. In particular, the function also returns the best refitted version of estimates.
For example, to take a look at the support of the best
refitted estimate, use
```{r, fig.height = 4, fig.width = 7}
par(mfrow = c(1,2), mar = c(0, 0, 2, 0))
matimage(res_cv$L_fit, main = "Fit")
matimage(res_cv$L_refit, main = "Refit")
```
|
/scratch/gouwar.j/cran-all/cranData/varband/vignettes/varband-vignette.Rmd
|
#########################
### Numerical binning ###
#########################
varbin <- function(df, x, y, p=0.05, custom_vec=NA){
if (!is.data.frame(df)) {
return("Data not a data.frame")
}
else if (is.numeric(y) | is.numeric(x)) {
return("Column name not string")
}
else if (grepl("[.]", y) | grepl("[.]", x)) {
return("Column name with a dot [.]")
}
else i = which(names(df) == y)
j = which(names(df) == x)
if (!is.numeric(df[, i])) {
return("Target (y) not found or it is not numeric")
}
else if (max(df[, i], na.rm = T) != 1) {
return("Maximum not 1")
}
else if (min(df[, i], na.rm = T) != 0) {
return("Minimum not 0")
}
else if (p <= 0 | p > 0.5) {
return("p must be greater than 0 and lower than 0.5 (50%)")
}
else if (!is.numeric(df[, j])) {
return("Characteristic (x) not found or it is not a number")
}else{
df <- data.frame(df, stringsAsFactors=T)
df_out <- data.frame(NULL, stringsAsFactors=F)
if (is.na(custom_vec[1])){
f <- as.formula(paste0(y," ~ ", x))
tree <- rpart(f, data=df, na.action=na.omit,
control=rpart.control(cp=0.0001, maxdepth=20, minbucket=round(nrow(df)*p)))
bin_vec <- sort(as.numeric(tree$splits[,4]))
bin_vec <- c(bin_vec, bin_vec[length(bin_vec)])
if (length(bin_vec)<=1){
return("No split possible")
}
}else{
bin_vec <- sort(custom_vec)
bin_vec <- c(bin_vec, bin_vec[length(bin_vec)])
}
for (i in 1:length(bin_vec)){
if (i==1){
y_sub <- df[((df[x]<bin_vec[i]) & (!is.na(df[x]))), y]
cp <- paste0("<", bin_vec[i])
}else if (i==length(bin_vec)){
y_sub <- df[((df[x]>=bin_vec[i]) & (!is.na(df[x]))), y]
cp <- paste0(">=", bin_vec[i])
}else{
y_sub <- df[((df[x]>=bin_vec[i-1]) & (df[x]<bin_vec[i]) & (!is.na(df[x]))), y]
cp <- paste0("<", bin_vec[i])
}
n <- length(y_sub)
n1 <- sum(y_sub)
n0 <- n-n1
df_out <- rbind(df_out, data.frame(Cutpoint=cp, CntRec=n, CntNoEvent=n0, CntEvent=n1))
}
n_na <- length(df[is.na(df[x]), y])
n1_na <- sum(df[is.na(df[x]), y])
n0_na <- n_na-n1_na
cp_na <- "Missing"
df_out <- rbind(df_out, data.frame(Cutpoint=cp_na, CntRec=n_na, CntNoEvent=n0_na, CntEvent=n1_na))
n_total <- nrow(df)
n1_total <- sum(df[y])
n0_total <- n_total-n1_total
df_out$CntCumRec <- cumsum(df_out$CntRec)
df_out$CntCumNoEvent <- cumsum(df_out$CntNoEvent)
df_out$CntCumEvent <- cumsum(df_out$CntEvent)
df_out$PctRec <- df_out$CntRec/n_total
df_out$NoEventRate <- df_out$CntNoEvent/df_out$CntRec
df_out$EventRate <- df_out$CntEvent/df_out$CntRec
df_out$Odds <- df_out$EventRate/df_out$NoEventRate
df_out$LnOdds <- log(df_out$EventRate/df_out$NoEventRate)
df_out$WoE <- log((df_out$CntNoEvent/n0_total)/(df_out$CntEvent/n1_total))*100
df_out$IV <- ((df_out$CntNoEvent/n0_total)-(df_out$CntEvent/n1_total))*df_out$WoE
cp_total <- "Total"
df_out <- rbind(df_out, data.frame(Cutpoint=cp_total, CntRec=n_total, CntNoEvent=n0_total,
CntEvent=n1_total, CntCumRec=NA, CntCumNoEvent=NA, CntCumEvent=NA,
PctRec=sum(df_out$PctRec), NoEventRate=n0_total/n_total,
EventRate=n1_total/n_total,
Odds=(n1_total/n_total)/(n0_total/n_total),
LnOdds=log((n1_total/n_total)/(n0_total/n_total)), WoE=0.0,
IV=sum(df_out$IV, na.rm=T)))
return(df_out)
}
}
######################
### Factor binning ###
######################
varbin.factor <- function(df, x, y, custom_vec=NA){
if (!is.data.frame(df)) {
return("Data not a data.frame")
}
else if (is.numeric(y) | is.numeric(x)) {
return("Column name not string")
}
else if (grepl("[.]", y) | grepl("[.]", x)) {
return("Column name with a dot [.]")
}
else i = which(names(df) == y)
j = which(names(df) == x)
if (!is.numeric(df[, i])) {
return("Target (y) not found or it is not numeric")
}
else if (max(df[, i], na.rm = T) != 1) {
return("Maximum not 1")
}
else if (any(grepl(",", df[, j]))) {
return("Values contain comma")
}
else if (tolower(y) == "default") {
return("Field name 'default' not allowed")
}
else if (min(df[, i], na.rm = T) != 0) {
return("Minimum not 0")
}
else if (!is.factor(df[, j])) {
return("Characteristic (x) not found or it is not a factor")
}
else if (length(unique(df[, j])) <= 1) {
return("Characteristic (x) requires at leats 2 uniques categories")
}else{
df <- data.frame(df, stringsAsFactors=T)
df_out <- data.frame(NULL, stringsAsFactors=F)
if (is.na(custom_vec[1])){
for (bin in unique(df[!is.na(df[x]), x])){
y_sub <- df[((df[x]==bin) & (!is.na(df[x]))), y]
cp <- paste0("=", bin)
n <- length(y_sub)
n1 <- sum(y_sub)
n0 <- n-n1
df_out <- rbind(df_out, data.frame(Cutpoint=cp, CntRec=n, CntNoEvent=n0, CntEvent=n1))
}
}else{
for (bin in custom_vec){
bin_clean <- strsplit(bin, split = ",")[[1]]
y_sub <- df[((df[, x] %in% bin_clean) & (!is.na(df[x]))), y]
cp <- paste0("=", bin)
n <- length(y_sub)
n1 <- sum(y_sub)
n0 <- n-n1
df_out <- rbind(df_out, data.frame(Cutpoint=cp, CntRec=n, CntNoEvent=n0, CntEvent=n1))
}
}
n_na <- length(df[is.na(df[x]), y])
n1_na <- sum(df[is.na(df[x]), y])
n0_na <- n_na-n1_na
cp_na <- "Missing"
df_out <- rbind(df_out, data.frame(Cutpoint=cp_na, CntRec=n_na, CntNoEvent=n0_na, CntEvent=n1_na))
n_total <- nrow(df)
n1_total <- sum(df[y])
n0_total <- n_total-n1_total
df_out$CntCumRec <- cumsum(df_out$CntRec)
df_out$CntCumNoEvent <- cumsum(df_out$CntNoEvent)
df_out$CntCumEvent <- cumsum(df_out$CntEvent)
df_out$PctRec <- df_out$CntRec/n_total
df_out$NoEventRate <- df_out$CntNoEvent/df_out$CntRec
df_out$EventRate <- df_out$CntEvent/df_out$CntRec
df_out$Odds <- df_out$EventRate/df_out$NoEventRate
df_out$LnOdds <- log(df_out$EventRate/df_out$NoEventRate)
df_out$WoE <- log((df_out$CntNoEvent/n0_total)/(df_out$CntEvent/n1_total))*100
df_out$IV <- ((df_out$CntNoEvent/n0_total)-(df_out$CntEvent/n1_total))*df_out$WoE
cp_total <- "Total"
df_out <- rbind(df_out, data.frame(Cutpoint=cp_total, CntRec=n_total, CntNoEvent=n0_total,
CntEvent=n1_total, CntCumRec=NA, CntCumNoEvent=NA, CntCumEvent=NA,
PctRec=sum(df_out$PctRec), NoEventRate=n0_total/n_total,
EventRate=n1_total/n_total,
Odds=(n1_total/n_total)/(n0_total/n_total),
LnOdds=log((n1_total/n_total)/(n0_total/n_total)), WoE=0.0,
IV=sum(df_out$IV, na.rm=T)))
return(df_out)
}
}
###############################################
### Monotonically in- or decreasing binning ###
###############################################
varbin.monotonic <- function(df, x, y, p=0.05, increase=F, decrease=F, auto=T){
if (!is.data.frame(df)) {
return("Data not a data.frame")
}
else if (is.numeric(y) | is.numeric(x)) {
return("Column name not string")
}
else if (grepl("[.]", y) | grepl("[.]", x)) {
return("Column name with a dot [.]")
}
else i = which(names(df) == y)
j = which(names(df) == x)
if (!is.numeric(df[, i])) {
return("Target (y) not found or it is not numeric")
}
else if (max(df[, i], na.rm = T) != 1) {
return("Maximum not 1")
}
else if (min(df[, i], na.rm = T) != 0) {
return("Minimum not 0")
}
else if (p <= 0 | p > 0.5) {
return("p must be greater than 0 and lower than 0.5 (50%)")
}
else if (!is.numeric(df[, j])) {
return("Characteristic (x) not found or it is not a number")
}else{
df <- data.frame(df, stringsAsFactors=T)
# Slice data
d1 <- df[c(y, x)]
# Ensure no Nas for regression
d2 <- d1[!is.na(d1[x]), ]
# Estimate rank correlation to determine sign of slope
c <- cor(d2[, 2], d2[, 1], method = "spearman", use = "complete.obs")
# Perform isotonic regression
if (increase==F && decrease==F && auto==T){
reg <- isoreg(d2[, 2], c / abs(c) * d2[, 1])
}else if (increase==T && decrease==F && auto==F){
reg <- isoreg(d2[, 2], d2[, 1])
}else if (increase==F && decrease==T && auto==F){
reg <- isoreg(d2[, 2], -d2[, 1])
}
# Get x-axis step-values
k <- unique(knots(as.stepfun(reg)))+min(diff(sort(unique(d1[, x]), na.last=NA)))/10
# Perform binning on raw x-values
sm1 <- varbin(d1, x, y, p, custom_vec=k)
# Filter out non-informative bins and bins with too few obs.
CntNoEvent <- "im a dummy"
CntEvent <- "im a dummy"
CntRec <- "im a dummy"
Cutpoint <- "im a dummy"
c1 <- subset(sm1, subset=CntNoEvent * CntEvent > 0 & CntRec>(nrow(df)*p), select=Cutpoint)
# c1 <- sm1[(((sm1[,"CntNoEvent"]*sm1[,"CntEvent"])>0) &
# (sm1[,"CntRec"]>(nrow(df)*p))), "Cutpoint"]
# Extract the resulting cutpoints and convert to numerics
c2 <- apply(c1, 1, function(x) as.numeric(gsub("[^0-9\\.]", "", x)))
# Remove Nas
c3 <- c2[!is.na(c2)]
if (length(c3)<=1){
return("No Bins")
}else{
return(varbin(d1, x, y, p, custom_vec=c3[-length(c3)]))
}
}
}
#####################################
### Global/local extremum binning ###
#####################################
varbin.kink <- function(df, x, y, p=0.05){
if (!is.data.frame(df)) {
return("Data not a data.frame")
}
else if (is.numeric(y) | is.numeric(x)) {
return("Column name not string")
}
else if (grepl("[.]", y) | grepl("[.]", x)) {
return("Column name with a dot [.]")
}
else i = which(names(df) == y)
j = which(names(df) == x)
if (!is.numeric(df[, i])) {
return("Target (y) not found or it is not numeric")
}
else if (max(df[, i], na.rm = T) != 1) {
return("Maximum not 1")
}
else if (min(df[, i], na.rm = T) != 0) {
return("Minimum not 0")
}
else if (p <= 0 | p > 0.5) {
return("p must be greater than 0 and lower than 0.5 (50%)")
}
else if (!is.numeric(df[, j])) {
return("Characteristic (x) not found or it is not a number")
}else{
df <- data.frame(df, stringsAsFactors=T)
WoE_monotonic_pos <- varbin.monotonic(df, x, y, p, increase=T, decrease=F, auto=F)
WoE_monotonic_neg <- varbin.monotonic(df, x, y, p, increase=F, decrease=T, auto=F)
if (class(WoE_monotonic_pos)=="character"){
return("Monotonically increase not possible")
}else if (class(WoE_monotonic_neg)=="character"){
return("Monotonically decrease not possible")
}else {
pos_cuts <- WoE_monotonic_pos$Cutpoint
neg_cuts <- WoE_monotonic_neg$Cutpoint
pos_cuts <- sapply(pos_cuts, function(x) as.numeric(gsub("[^0-9\\.]", "", x)))
neg_cuts <- sapply(neg_cuts, function(x) as.numeric(gsub("[^0-9\\.]", "", x)))
pos_cuts <- pos_cuts[!is.na(pos_cuts)]
neg_cuts <- neg_cuts[!is.na(neg_cuts)]
pos_cuts <- pos_cuts[-length(pos_cuts)]
neg_cuts <- neg_cuts[-length(neg_cuts)]
if (length(neg_cuts)>=length(pos_cuts)){
med_cut <- median(neg_cuts)
start_cut <- pos_cuts[1]
end_cut <- pos_cuts[length(pos_cuts)]
if(start_cut<med_cut & end_cut>med_cut){
t1 <- sum(pos_cuts<med_cut)
t2 <- sum(pos_cuts>med_cut)
if(t1>=t2){
idx1 <- which(pos_cuts<med_cut)
idx2 <- which(neg_cuts>=med_cut)
cutpoints <- c(pos_cuts[idx1], neg_cuts[idx2])
}else{
idx1 <- which(pos_cuts>med_cut)
idx2 <- which(neg_cuts<=med_cut)
cutpoints <- c(neg_cuts[idx2], pos_cuts[idx1])
}
}else if (end_cut<med_cut){
idx <- which(end_cut<neg_cuts)
cutpoints <- c(pos_cuts, neg_cuts[idx])
}else if (start_cut>med_cut){
idx <- which(start_cut>neg_cuts)
cutpoints <- c(neg_cuts[idx], pos_cuts)
}else{
NA
}
}else{
med_cut <- median(pos_cuts)
start_cut <- neg_cuts[1]
end_cut <- neg_cuts[length(neg_cuts)]
if(start_cut<med_cut & end_cut>med_cut){
t1 <- sum(neg_cuts<med_cut)
t2 <- sum(neg_cuts>med_cut)
if(t1>=t2){
idx1 <- which(neg_cuts<med_cut)
idx2 <- which(pos_cuts>=med_cut)
cutpoints <- c(neg_cuts[idx1], pos_cuts[idx2])
}else{
idx1 <- which(neg_cuts>med_cut)
idx2 <- which(pos_cuts<=med_cut)
cutpoints <- c(pos_cuts[idx2], neg_cuts[idx1])
}
}else if (end_cut<med_cut){
idx <- which(end_cut<pos_cuts)
cutpoints <- c(neg_cuts, pos_cuts[idx])
}else if (start_cut>med_cut){
idx <- which(start_cut>pos_cuts)
cutpoints <- c(pos_cuts[idx], neg_cuts)
}else{
NA
}
}
return(varbin(df, x, y, p, custom_vec=cutpoints))
}
}
}
######################
### Bin conversion ###
######################
varbin.convert <- function(df, ivTable, x){
if (!is.data.frame(df)) {
return("Data not a data.frame")
}else if (!is.data.frame(ivTable)){
return("Table not a data.frame")
}else{
#Initialize values
df <- data.frame(df, stringsAsFactors=T)
df$tmp <- NA
ivTable <- ivTable[, c("Cutpoint", "WoE")]
ivTable <- ivTable[ivTable[,1]!="Total", ]
ivTable[, 2] <- as.numeric(ivTable[, 2])
if (class(df[, x])=='factor'){
ivTable[, 1] <- sub("=", "", ivTable[, 1])
# Handle merged bin values
s <- strsplit(as.character(ivTable[, 1]), split=",")
ivTable <- data.frame(Cutpoint=unlist(s), WoE=rep(ivTable[, 2], sapply(s, length)),
stringsAsFactors=F)
ivTable <- ivTable[!is.na(ivTable[, 2]), ]
# Assign WoE values to tmp variable
for (bin in ivTable[, 1]){
if (bin=="Missing"){
df$tmp[is.na(df[x])] <- ivTable[ivTable[, 1]==bin, 2]
}else{
df$tmp[df[x]==bin] <- ivTable[ivTable[, 1]==bin, 2]
}
}
}else{
# Assign WoE values to tmp variable
i <- 0
for (bin in ivTable[, 1]){
i <- i+1
if (regexpr('>=', bin)[1]>-1){
bin_clean <- as.numeric(sub(">=", "", bin))
df$tmp[((df[x]>=bin_clean) & (!is.na(df[x])))] <- ivTable[ivTable[, 1]==bin, 2]
}else if((regexpr('<', bin)[1]>-1) & (i==1)){
bin_clean <- as.numeric(sub("<", "", bin))
df$tmp[((df[x]<bin_clean) & (!is.na(df[x])))] <- ivTable[ivTable[, 1]==bin, 2]
}else if((regexpr('<', bin)[1]>-1) & (i>=2)){
bin_clean_upper <- as.numeric(sub("<", "", bin))
bin_clean_lower <- as.numeric(sub("<", "", ivTable[, 1][i-1]))
df$tmp[((df[x]>=bin_clean_lower) & (df[x]<bin_clean_upper) & (!is.na(df[x])))] <- ivTable[ivTable[, 1]==bin, 2]
}else if(bin=='Missing'){
df$tmp[is.na(df[x])] <- ivTable[ivTable[, 1]==bin, 2]
}
}
}
new_name <- paste0('WoE_', x)
if (new_name %in% colnames(df)){
df <- df[, !(names(df) %in% new_name)]
}
colnames(df)[colnames(df) == 'tmp'] <- new_name
return(df)
}
}
################
### Bin plot ###
################
varbin.plot <- function(ivTable){
if (!is.data.frame(ivTable)){
return("Table is not a data.frame")
}
x <- ivTable[, 'Cutpoint']
y <- ivTable[, 'WoE']
if (is.na(ivTable[ivTable$Cutpoint=='Missing', "WoE"])){
x <- x[-c(length(x)-1, length(x))]
y <- y[-c(length(y)-1, length(y))]
col <- c(rep("black",length(x)))
pch <- c(rep(19,length(x)))
}else{
x <- x[-length(x)]
y <- y[-length(y)]
col <- c(rep("black",length(x)-1))
pch <- c(rep(19,length(x)-1),1)
}
par(mar=c(2,2,2,2))
plot(1:length(x), y, xlab="Cutpoint", ylab="WoE", xaxt="n", cex=1, pch=pch, cex.lab=1, col=col)
abline(h=0, lty=2, col='black')
title(main="Weight of Evidence", line=0.25)
axis(1, at=1:length(x), labels=x)
}
|
/scratch/gouwar.j/cran-all/cranData/varbin/R/varbin.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
varbvsnormupdate_rcpp <- function(X, sigma, sa, logodds, xy, d,
alpha, mu, Xr, i)
invisible(.Call(C_varbvs_varbvsnormupdate_rcpp,X,sigma,sa,
logodds,xy,d,alpha,mu,Xr,i))
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/RcppExports.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Compute numerical estimate of Bayes factor.
bayesfactor <- function (logw0, logw1) {
# Compute the marginal log-likelihood under the null hypothesis.
c <- max(logw0)
logz0 <- c + log(mean(exp(logw0 - c)))
# Compute the marginal log-likelihood under the alternative hypothesis.
c <- max(logw1)
logz1 <- c + log(mean(exp(logw1 - c)))
# Compute the numerical estimate of the Bayes factor.
return(exp(logz1 - logz0))
}
# A more user-friendly interface for computing a Bayes factor
# comparing two Bayesian variable selection models.
varbvsbf <- function (fit0, fit1) {
# Check that the first and second inputs are of class "varbvs".
if (!is(fit0,"varbvs"))
stop("Argument \"fit0\" should be a varbvs object")
if (!is(fit1,"varbvs"))
stop("Argument \"fit1\" should be a varbvs object")
return(bayesfactor(fit0$logw,fit1$logw))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/bayesfactor.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Return "credible" or "confidence" intervals for all hyperparameter
# settings, as well as intervals averaging over all hyperparameter
# settings.
confint.varbvs <- function (object, parm, level = 0.95, ...) {
# If input "parm" is not provided, select the top 5 variables by
# posterior inclusion probability.
if (missing(parm))
parm <- order(object$pip,decreasing = TRUE)[1:5]
# Get the number of hyperparameter settings (ns), the number of
# selected variables (n), and the total number of variables (p).
ns <- length(object$w)
n <- length(parm)
p <- nrow(object$alpha)
# Check and process input "parm".
variable.names <- rownames(object$alpha)
if (is.numeric(parm))
parm <- variable.names[parm]
if (any(is.na(parm)) | !all(is.element(parm,variable.names)))
stop(paste("Argument \"parm\" should contain only valid variable names",
"(column names of X) or variable indices (columns of X)"))
# Set up the data structure for storing the output.
out <- vector("list",n)
names(out) <- parm
# Compute the confidence intervals for for each requested variable.
for (i in parm)
out[[i]] <- get.confint.matrix(object,i,level)
return(out)
}
# Return an n x 2 matrix containing the x% confidence intervals (x =
# level) for variable i at each of the n hyperparameter settings, plus
# the confidence interval averaging over all hyperparameter settings.
get.confint.matrix <- function (fit, i, level) {
# Get the number of hyperparameter settings.
ns <- length(fit$w)
if (ns == 1) {
# In the special case when there is only one hyperparmaeter
# setting, return the confidence interval in a 2 x 2 matrix.
out <- credintnorm(level,fit$mu[i,],fit$s[i,])
out <- matrix(out,2,2,byrow = TRUE)
} else {
# Set up the data structure for storing the output.
out <- matrix(0,ns + 1,2)
# Compute the credible interval for each hyperparameter setting.
for (j in 1:ns)
out[j,] <- credintnorm(level,fit$mu[i,j],fit$s[i,j])
# Compute the credible interval averaging over all hyperparameter
# settings.
out[ns + 1,] <- credintmix(level,fit$w,fit$mu[i,],fit$s[i,])
}
# Add row labels, and column labels indicating the lower and upper
# confidence limits.
rownames(out) <- c(colnames(fit$alpha),"averaged")
colnames(out) <- paste(round(100*c(0.5 - level/2,0.5 + level/2),
digits = 3),"%")
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/confint.varbvs.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# SUMMARY
# -------
# This file contains various functions to implement the variational
# methods for large-scale Bayesian variational selection. Here is an
# overview of the functions defined in this file:
#
# tf2yn(x)
# logspace(x,y,n)
# var1(x)
# var1.cols(X)
# dot(x,y)
# norm2(x)
# quadnorm(x,a)
# rep_col(x,n)
# rep_row(x,n)
# rand(m,n)
# randn(m,n)
# diagsq(X,a)
# diagsqt(X,a)
# diagsq2(X,A)
# remove.covariate.effects(X,Z,y)
# sigmoid(x)
# logit(x)
# logpexp(x)
# logsigmoid(x)
# slope(x)
# int.gamma(logodds,alpha)
# int.klbeta(alpha,mu,s,sa)
# betavar(p,mu,s)
# credintnorm(x,mu,s)
# credintmix(x,w,mu,s)
# normalizelogweights(logw)
# resid.dev.logistic(y,p)
# cred(x,x0,w,c)
#
# Shorthand for machine precision.
eps <- .Machine$double.eps
# FUNCTION DEFINITIONS
# ----------------------------------------------------------------------
tf2yn <- function (x) {
if (x)
return("yes")
else
return("no")
}
# ----------------------------------------------------------------------
# Generates a vector of n points that are equally spaced on the
# logarithmic scale. Note that x and y should be positive numbers.
logspace <- function (x, y, n)
2^seq(log2(x),log2(y),length = n)
# ----------------------------------------------------------------------
# Return the second moment of x about its mean.
var1 <- function (x) {
n <- length(x)
return(var(x)*(n-1)/n)
}
# ----------------------------------------------------------------------
# Return the second moment of each column of X about its mean.
var1.cols <- function (X)
return(apply(X,2,var1))
# ----------------------------------------------------------------------
# Return the dot product of vectors x and y.
dot <- function (x,y)
sum(x*y)
# ----------------------------------------------------------------------
# Return the quadratic norm (2-norm) of vector x.
norm2 <- function (x)
sqrt(dot(x,x))
# ----------------------------------------------------------------------
# When input a is a matrix, this function returns the quadratic norm
# of vector x with respect to matrix a. When a is not a matrix, this
# function returns the norm of x with respect to A = diag(a). For a
# definition of the quadratic norm, see p. 635 of Convex Optimization
# (2004) by Boyd & Vandenberghe.
quadnorm <- function (x, a) {
x <- c(x)
if (is.matrix(a))
y <- sqrt(c(x %*% a %*% x))
else
y <- sqrt(dot(x*a,x))
return(y)
}
# ----------------------------------------------------------------------
# Replicate vector x to create an m x n matrix, where m = length(x).
rep_col <- function (x, n)
matrix(x,length(x),n,byrow = FALSE)
# ----------------------------------------------------------------------
# Replicate vector x to create an n x m matrix, where m = length(x).
rep_row <- function (x, n)
matrix(x,n,length(x),byrow = TRUE)
# ----------------------------------------------------------------------
# Return matrix containing pseudorandom values drawn from the standard
# uniform distribution.
rand <- function (m, n)
matrix(runif(m*n),m,n)
# ----------------------------------------------------------------------
# Return matrix containing pseudorandom values drawn from the standard
# normal distribution.
randn <- function (m, n)
matrix(rnorm(m*n),m,n)
# ----------------------------------------------------------------------
# diagsq(X) is the same as diag(X'*X), but the computation is done more
# efficiently, and without having to store an intermediate matrix of the
# same size as X. diag(X,a) efficiently computes diag(X'*diag(a)*X).
#
# This function calls "diagsq_Call", a function compiled from C code,
# using the .Call interface. To load the C function into R, first
# build the "shared object" (.so) file using the following command in
# the "src" directory: R CMD SHLIB diagsqr.c diagsq.c misc.c. Next,
# load the shared objects into R using the R function dyn.load:
# dyn.load("../src/diagsqr.so").
diagsq <- function (X, a = NULL) {
# If input a is not provided, set it to a vector of ones.
if (is.null(a))
a <- rep(1,nrow(X))
else
a <- as.double(a)
# Initialize the result.
y <- rep(0,ncol(X))
# Execute the C routine using the .Call interface. The main reason
# for using .Call interface is that there is less of a constraint on
# the size of the input matrices.
out <- .Call(C_diagsq_Call,X = X,a = a,y = y)
return(y)
}
# ----------------------------------------------------------------------
# diagsqt(X) returns the same result as diag(X*X'), but the
# computation is done more efficiently, and without having to store an
# intermediate result of the same size as X. diagsqt(X,a) efficiently
# computes diag(X*diag(a)*X').
#
# This function calls "diagsqt_Call", a function compiled from C code,
# using the .Call interface. See function "diagsq" for instructions on
# compiling and loading this C function into R.
diagsqt <- function (X, a = NULL) {
# If input a is not provided, set it to a vector of ones.
if (is.null(a))
a <- rep(1,ncol(X))
else
a <- as.double(a)
# Initialize the result.
y <- rep(0,nrow(X))
# Execute the C routine using the .Call interface. The main reason
# for using .Call interface is that there is less of a constraint on
# the size of the input matrices.
out <- .Call(C_diagsqt_Call,X = X,a = a,y = y)
return(y)
}
# ----------------------------------------------------------------------
# diagsq2(X) is the same as diag(X'*A*X), but the computation is done
# more efficiently.
diagsq2 <- function (X, A)
rowSums((X %*% A) * X)
# Adjust variables X and continuous outcome Y so that the linear
# effects of the covariates Z are removed. This is equivalent to
# integrating out the regression coefficients corresponding to the
# covariates with respect to an improper, uniform prior; see Chipman,
# George and McCulloch, "The Practical Implementation of Bayesian
# Model Selection," 2001. It is assumed that the first column of Z is
# the intercept; that is, a column of ones.
remove.covariate.effects <- function (X, Z, y) {
# Here I compute two quantities that are used here to remove linear
# effects of the covariates (Z) on X and y, and later on to
# efficiently compute estimates of the regression coefficients for
# the covariates.
n <- ncol(Z)
e <- sqrt(.Machine$double.eps)
A <- forceSymmetric(crossprod(Z) + e*diag(n))
SZy <- as.vector(solve(A,c(y %*% Z)))
SZX <- as.matrix(solve(A,t(Z) %*% X))
if (ncol(Z) == 1) {
X <- scale(X,center = TRUE,scale = FALSE)
y <- y - mean(y)
} else {
# The equivalent expressions in MATLAB are
#
# y = y - Z*((Z'*Z)\(Z'*y))
# X = X - Z*((Z'*Z)\(Z'*X))
#
# This should give the same result as centering the columns of X
# and subtracting the mean from y when we have only one
# covariate, the intercept.
y <- y - c(Z %*% SZy)
X <- X - Z %*% SZX
}
return(list(X = X,y = y,SZy = SZy,SZX = SZX))
}
# ----------------------------------------------------------------------
# sigmoid(x) returns the sigmoid of the elements of x. The sigmoid
# function is also known as the logistic link function. It is the
# inverse of logit(x).
sigmoid <- function (x)
1/(1 + exp(-x))
# ----------------------------------------------------------------------
# logit(x) returns the logit of the elements of X. It is the inverse of
# sigmoid(x).
logit <- function (x)
log((x + eps)/((1 - x) + eps))
# ----------------------------------------------------------------------
# logpexp(x) returns log(1 + exp(x)). The computation is performed in a
# numerically stable manner. For large entries of x, log(1 + exp(x)) is
# effectively the same as x.
logpexp <- function (x) {
y <- x
i <- which(x < 16)
y[i] <- log(1 + exp(x[i]))
return(y)
}
# ----------------------------------------------------------------------
# Use this instead of log(sigmoid(x)) to avoid loss of numerical precision.
logsigmoid <- function (x)
-logpexp(-x)
# ----------------------------------------------------------------------
# slope(x) returns (sigmoid(x) - 1/2)/x, the slope of the conjugate to the
# log-sigmoid function at x, times 2. For details, see Bishop (2006), or the
# Bayesian Analysis paper. This is useful for working with the variational
# approximation for logistic regression.
slope <- function (x)
(sigmoid(x) - 0.5)/(x + eps)
# ----------------------------------------------------------------------
# Computes an integral that appears in the variational lower bound of
# the marginal log-likelihood. This integral is the expectation on the
# prior inclusion probabilities taken with respect to the variational
# approximation. This returns the same result as sum(alpha*log(q) +
# (1-alpha)*log(1-q)).
int.gamma <- function (logodds, alpha)
sum((alpha-1)*logodds + logsigmoid(logodds))
# ----------------------------------------------------------------------
# Computes an integral that appears in the variational lower bound of
# the marginal log-likelihood. This integral is the negative K-L
# divergence between the approximating distribution and the prior of
# the coefficients. Note that this sa is not the same as the sa used
# as an input to varbvsnorm.
int.klbeta <- function (alpha, mu, s, sa)
(sum(alpha) + dot(alpha,log(s/sa)) - dot(alpha,s + mu^2)/sa)/2 -
dot(alpha,log(alpha + eps)) - dot(1 - alpha,log(1 - alpha + eps))
# ----------------------------------------------------------------------
# Compute the variance of X, in which X is drawn from the normal
# distribution with probability p, and X is 0 with probability
# 1-p. Inputs mu and s specify the mean and variance of the normal
# density. Inputs p, mu and s must be arrays of the same
# dimension. This function is useful for calculating the variance of
# the coefficients under the fully-factorized variational
# approximation.
#
# Note that this is the same as
#
# v = p*(s + mu^2) - (p*mu)^2.
#
betavar <- function (p, mu, s)
p*(s + (1 - p)*mu^2)
# ----------------------------------------------------------------------
# Return the x% credible interval (or "confidence interval") for a
# normal distribution with mean mu and variance s (note: *not*
# standard deviation). Also note that x, mu and s must be scalars.
credintnorm <- function (x, mu, s)
qnorm(c(0.5 - x/2,0.5 + x/2),mu,sqrt(s))
# ----------------------------------------------------------------------
# Return the x% credible interval (or "confidence interval") for a
# mixture of normals with means mu, variances s and mixture weights w.
credintmix <- function (x, w, mu, s) {
mix <- norMix(mu = mu,sigma = sqrt(s),w = w,name = "mix")
return(qnorMix(c(0.5 - x/2,0.5 + x/2),mix,method = "root2"))
}
# ----------------------------------------------------------------------
# normalizelogweights takes as input an array of unnormalized
# log-probabilities logw and returns normalized probabilities such
# that the sum is equal to 1.
normalizelogweights <- function (logw) {
# Guard against underflow or overflow by adjusting the
# log-probabilities so that the largest probability is 1.
c <- max(logw)
w <- exp(logw - c)
# Normalize the probabilities.
return(w/sum(w))
}
# ----------------------------------------------------------------------
# Compute the deviance residuals for a logistic regression
# model. Argument y is the vector or array of ground-truth binary
# outcomes, and p is the model response; that is, the predicted
# probability that the outcome is equal to 1. See
# http://data.princeton.edu/wws509/notes/c3s8.html for the
# mathematical formula for the deviance residuals.
resid.dev.logistic <- function (y, p) {
i <- which(y == 0)
j <- which(y == 1)
out <- p
out[i] <- log(1 - p[i])
out[j] <- log(p[j])
return(sign(y - p) * sqrt(-2*out))
}
# ----------------------------------------------------------------------
# Returns a c% credible interval [a,b], in which c is a number between
# 0 and 1. Precisely, we define the credible interval [a,b] to be the
# smallest interval containing x0 that contains c% of the probability
# mass. (Note that the credible interval is not necessarily symmetric
# about x0. Other definitions of the credible interval are possible.)
# By default, c = 0.95.
#
# Input x is the vector of random variable assignments, and w contains
# the corresponding probabilities. (These probabilities need not be
# normalized.) Inputs x and w must be numeric arrays with the same
# number of elements.
cred <- function (x, x0, w = NULL, cred.int = 0.95) {
# Get the number of points.
n <- length(x)
# By default, all samples have the same weight.
if (is.null(w))
w <- rep(1/n,n)
# Convert the inputs x and w to vectors.
x <- c(x)
w <- c(w)
# Make sure the probabilities sum to 1.
w <- w/sum(w)
# Sort the points in increasing order.
i <- order(x)
x <- x[i]
w <- w[i]
# Generate all possible intervals [a,b] from the set of points x.
a <- matrix(1:n,n,n,byrow = TRUE)
b <- matrix(1:n,n,n,byrow = FALSE)
i <- which(a <= b)
a <- a[i]
b <- b[i]
# Select only the intervals [a,b] that contain x0.
i <- which(x[a] <= x0 & x0 <= x[b])
a <- a[i]
b <- b[i]
# Select only the intervals that contain cred.int % of the mass.
p <- cumsum(w)
i <- which(p[b] - p[a] + w[a] >= cred.int);
a <- a[i]
b <- b[i]
# From the remaining intervals, choose the interval that has the
# smallest width.
i <- which.min(x[b] - x[a])
return(list(a = x[a[i]],b = x[b[i]]))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/misc.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Summarize the variable selection results in a single plot.
plot.varbvs <-
function (x, score, groups, vars = NULL, var.labels, draw.threshold = NA,
gap = 0,col = "midnightblue", pch = 20, scales = NULL,
xlab = "variable", ylab = "posterior probability",
main = "fitted varbvs model: variable selection results",
abline.args = list(lty = "dotted",col = "orangered"),
vars.xyplot.args = list(pch = 20,col = "magenta"),
vars.ltext.args = list(col = "black",pos = 4,cex = 0.5),
par.settings = list(par.main.text = list(font = 1,cex = 0.8),
layout.heights = list(axis.top = 0,
axis.bottom = 0)),
...) {
# CHECK INPUTS
# ------------
# Check that the first input is an instance of class "varbvs".
if (!is(x,"varbvs"))
stop("Input argument x must be an instance of class \"varbvs\".")
# PROCESS OPTIONS
# ---------------
# Get the posterior inclusion probabilities (PIPs) if a
# "score" isn't provided as one of the inputs.
if (missing(score)) {
y <- x$pip
} else
y <- score
p <- length(y)
# Determine the grouping of the variables. By default, all variables
# are assigned to one group.
if (missing(groups))
groups <- rep(1,p)
group.labels <- unique(groups)
# Determine the selected variable labels. By default, use the labels
# stored in the varbvs data structure.
if (is.character(vars))
vars <- match(vars,rownames(x$alpha))
if (missing(var.labels))
var.labels <- rownames(x$alpha)[vars]
if (is.null(var.labels))
var.labels <- rep("",length(vars))
# GENERATE GENOME-WIDE SCAN
# -------------------------
# Determine the positions of the variables and, if necessary, group
# labels along the horizontal axis.
if (length(group.labels) == 1) {
x <- 1:p
xticks <- NULL
group.labels <- NULL
} else {
x <- rep(0,p)
pos <- 0
xticks <- NULL
for (i in group.labels) {
j <- which(groups == i)
n <- length(j)
x[j] <- pos + 1:n
xticks <- c(xticks,pos+n/2)
pos <- pos + n + gap
}
}
# CREATE XYPLOT
# -------------
# Plot the posterior probabilities, or "scores", highlighting and
# labeling any selected variables.
out <- xyplot(y ~ x,data.frame(x=x,y=y),pch = pch,col = col,
scales = c(scales,
list(x = list(at = xticks,labels = group.labels))),
xlab = xlab,ylab = ylab,main = main,
par.settings = par.settings,
panel = function (x,y,...) {
panel.xyplot(x,y,...);
if (!is.na(draw.threshold))
do.call("panel.abline",
c(list(a = draw.threshold,b = 0),abline.args))
},,...)
if (!is.null(vars))
out <- out +
as.layer(do.call("xyplot",
c(list(x = (y ~ x),data = data.frame(x=x,y=y)[vars,],
panel = function (x,y,...) {
panel.xyplot(x,y,...);
do.call("ltext",
c(list(x=x,y=y,labels=var.labels),
vars.ltext.args))
}),vars.xyplot.args)))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/plot.varbvs.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2019, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Predict Y (outcome) given X (variables), Z (covariates) and model.
predict.varbvs <-
function (object, X, Z = NULL, type = c("link","response","class"),
averaged = TRUE, ...) {
# Check that the first input is an instance of class "varbvs".
if (!is(object,"varbvs"))
stop("Input argument object must be an instance of class \"varbvs\".")
# Process and check input argument "type".
type <- match.arg(type)
if (object$family == "gaussian" & type != "link")
stop(paste("Prediction types \"response\" and \"class\" apply only to",
"logistic regression (family = \"binomial\")"))
# Get the number of samples (n), variables (p) and hyperparameter
# settings (ns).
n <- nrow(X)
p <- ncol(X)
ns <- length(object$logw)
# Check input X.
if (!(is.matrix(X) & is.numeric(X) & sum(is.na(X)) == 0))
stop("Input X must be a numeric matrix with no missing values.")
storage.mode(X) <- "double"
# Check input Z, and add an intercept.
if (is.null(Z))
Z <- matrix(1,n,1)
else {
Z <- as.matrix(Z)
if (!is.numeric(Z) | sum(is.na(Z)) > 0)
stop("Input Z must be a numeric matrix with no missing values.")
if (nrow(Z) != n)
stop("Inputs X and Z do not match.")
storage.mode(Z) <- "double"
Z <- cbind(1,Z)
}
if (ncol(Z) != nrow(object$mu.cov))
stop("Inputs arguments object and Z are not compatible")
# Get the normalized (approximate) probabilities.
w <- object$w
# Compute the estimates for each hyperparameter setting.
out <- with(object,varbvs.linear.predictors(X,Z,mu.cov,alpha,mu))
if (type == "response")
out <- sigmoid(out)
else if (type == "class")
out <- round(sigmoid(out))
# Average the estimates of Y over the hyperparameter settings, if
# requested. For the logistic regression, the final "averaged"
# prediction is obtained by collecting the "votes" from each
# hyperparameter setting, weighting the votes by the marginal
# probabilities, and outputing the estimate that wins by
# majority. The averaged estimate is computed this way because the
# estimates conditioned on each hyperparameter setting are not
# necessarily calibrated in the same way.
if (averaged) {
if (type == "link")
out <- c(out %*% w)
else if (type == "response")
out <- c(out %*% w)
else
out <- c(round(out %*% w))
}
names(out) <- rownames(X)
return(out)
}
# ----------------------------------------------------------------------
# For each hyperparameter setting, and for each sample, compute a
# posterior mean estimate of Y. (For the logistic regression model, Y
# contains the posterior probability that the binary outcome is 1.)
varbvs.linear.predictors <- function (X, Z, mu.cov, alpha, mu) {
ns <- ncol(alpha)
Y <- Z %*% mu.cov + X %*% (alpha*mu)
return(Y)
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/predict.varbvs.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2019, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Predict Y (outcome) given X (variables), Z (covariates) and model.
predict.varbvsmix <-
function (object, X, Z = NULL, ...) {
# Check that the first input is an instance of class "varbvsmix".
if (!is(object,"varbvsmix"))
stop("Input argument object must be an instance of class \"varbvsmix\".")
# Get the number of samples (n), variables (p) and hyperparameter
# settings (ns).
n <- nrow(X)
p <- ncol(X)
ns <- length(object$logw)
# Check input X.
if (!(is.matrix(X) & is.numeric(X) & sum(is.na(X)) == 0))
stop("Input X must be a numeric matrix with no missing values.")
storage.mode(X) <- "double"
# Check input Z, and add an intercept.
if (is.null(Z))
Z <- matrix(1,n,1)
else {
Z <- as.matrix(Z)
if (!is.numeric(Z) | sum(is.na(Z)) > 0)
stop("Input Z must be a numeric matrix with no missing values.")
if (nrow(Z) != n)
stop("Inputs X and Z do not match.")
storage.mode(Z) <- "double"
Z <- cbind(1,Z)
}
if (ncol(Z) != length(object$mu.cov))
stop("Inputs arguments object and Z are not compatible")
# Compute the posterior mean estimates of the outcomes, Y.
return(with(object,drop(Z %*% mu.cov + X %*% rowSums(alpha*mu))))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/predict.varbvsmix.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Select a subset of the candidate hyperparameter settings, and return
# a new varbvs object with these hyperparameter settings only.
subset.varbvs <- function (x, subset, ...) {
# Check that the first input is an instance of class "varbvs".
if (!is(x,"varbvs"))
stop("Input argument object must be an instance of class \"varbvs\".")
# Get the unevaluated subset expression.
e <- substitute(subset)
# Get the hyperparameter settings satisfying the 'subset' condition.
i <- which(eval(e,x,parent.frame()))
if (length(i) == 0)
stop("No hyperparameter settings are selected.")
# Output the new varbvs object with these hyperparameter settings
# only.
out <- x
out$sa <- out$sa[i]
out$logodds <- out$logodds[i]
out$logw <- out$logw[i]
out$w <- normalizelogweights(out$logw)
out$mu.cov <- as.matrix(out$mu.cov[,i])
out$alpha <- as.matrix(out$alpha[,i])
out$mu <- as.matrix(out$mu[,i])
out$s <- as.matrix(out$s[,i])
out$fitted.values <- as.matrix(out$fitted.values)
out$residuals <- as.matrix(out$residuals)
if (!is.null(out$pve))
out$pve <- as.matrix(out$pve[,i])
if (out$family == "gaussian")
out$sigma <- out$sigma[i]
else if (out$family == "binomial")
out$eta <- out$eta[,i]
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/subset.varbvs.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Generate a four-part summary of the fitted Bayesian variable
# selection model.
summary.varbvs <- function (object, cred.int = 0.95, nv, pip.cutoff, ...) {
# Check that the first input is an instance of class "varbvs".
if (!is(object,"varbvs"))
stop("Input argument object must be an instance of class \"varbvs\".")
# Determine the number of variables for which to show detailed
# summaries.
if (missing(nv) & missing(pip.cutoff))
nv <- 5
else if (!missing(nv) & !missing(pip.cutoff))
stop("Input arguments \"nv\" and \"pip.cutoff\" cannot both be specified")
else if (!missing(pip.cutoff)) {
if (pip.cutoff < 0 | pip.cutoff > 1)
stop("Argument \"pip.cutoff\" should be a number between 0 and 1")
nv <- sum(object$pip >= pip.cutoff)
}
# Get the normalized importance weights.
w <- object$w
# Get the number of variables (p) and number of candidate
# hyperparameter settings (ns).
p <- nrow(object$alpha)
ns <- length(w)
# Input nv must be at least 1, and cannot be greater than the number
# of variables.
nv <- max(1,nv)
nv <- min(p,nv)
# Generate the summary.
out <-
list(family = object$family,
cred.int = cred.int,
n = nobs(object),
p = p,
ns = ns,
ncov = nrow(object$mu.cov),
prior.same = object$prior.same,
update.sigma = object$update.sigma,
update.sa = object$update.sa,
optimize.eta = object$optimize.eta,
logw = object$logw,
w = w,
sigma = list(x = NA,x0 = NA,a = NA,b = NA),
sa = list(x = NA,x0 = NA,a = NA,b = NA),
logodds = list(x = NA,x0 = NA,a = NA,b = NA),
model.pve = NULL)
# Summarize the proportion of variance explained (PVE) by the fitted
# model.
if (!is.null(object$model.pve))
out$model.pve =
list(x0 = mean(object$model.pve),
a = quantile(object$model.pve,0.5 - cred.int/2,na.rm = TRUE),
b = quantile(object$model.pve,0.5 + cred.int/2,na.rm = TRUE))
# Summarize the candidate hyperparameter settings, when provided.
if (!object$update.sigma)
out$sigma$x <- object$sigma
if (!object$update.sa)
out$sa$x <- object$sa
if (object$prior.same)
out$logodds$x <- object$logodds
if (ns == 1) {
# Summarize the hyperparameter settings when there is only one
# candidate setting.
out$sa$x0 <- object$sa
if (object$family == "gaussian")
out$sigma$x0 <- object$sigma
if (object$prior.same)
out$logodds$x0 <- object$logodds
} else {
# Summarize the residual variance parameter (sigma).
if (object$family == "gaussian") {
x <- object$sigma
if (length(unique(x)) > 1) {
x0 <- dot(w,x)
out$sigma <- c(list(x = out$sigma$x,x0 = x0),cred(x,x0,w,cred.int))
}
}
# Summarize the fitted prior variance parameter (sa).
x <- object$sa
if (length(unique(x)) > 1) {
x0 <- dot(w,x)
out$sa <- c(list(x = out$sa$x,x0 = x0),cred(x,x0,w,cred.int))
}
# Summarize the fitted prior log-odds of inclusion (logodds).
if (object$prior.same) {
x <- object$logodds
x0 <- dot(w,x)
out$logodds <- c(list(x = out$logodds$x,x0 = x0),cred(x,x0,w,cred.int))
}
}
# Summarize the number of variables selected at different PIP thresholds.
out$num.included <-
with(object,as.table(c(sum(pip>0.1),sum(pip>0.25),sum(pip>0.5),
sum(pip>0.75),sum(pip>0.9),sum(pip>0.95))))
names(out$num.included) <- c(">0.10",">0.25",">0.50",">0.75",">0.90",">0.95")
# Get more detailed statistics about the top nv variables by the
# probability that they are included.
vars <- order(object$pip,decreasing = TRUE)[1:nv]
CIs <- confint(object,vars,cred.int)
CIs <- do.call(rbind,lapply(CIs,function (x) x[ns + 1,]))
out$top.vars <-
data.frame(index = vars,variable = rownames(object$alpha)[vars],
prob = object$pip[vars],PVE = NA,coef = object$beta[vars],
cred = NA,stringsAsFactors = FALSE)
for (i in 1:length(vars)) {
if (!is.null(object$pve))
out$top.vars[i,"PVE"] <- dot(w,object$pve[vars[i],])
out$top.vars[i,"cred"] <- sprintf("[%+0.3f,%+0.3f]",CIs[i,1],CIs[i,2])
}
colnames(out$top.vars)[6] <- sprintf("Pr(coef.>%0.2f)",cred.int)
rownames(out$top.vars) <- NULL
class(out) <- c("summary.varbvs","list")
return(out)
}
# ----------------------------------------------------------------------
print.summary.varbvs <- function (x, digits = 3, ...) {
# Check that the first input is an instance of class "summary.varbvs".
if (!is(x,"summary.varbvs"))
stop("Input must be an instance of class \"summary.varbvs\".")
with(x,{
# SUMMARIZE ANALYSIS SETUP
# ------------------------
cat("Summary of fitted Bayesian variable selection model:\n")
cat(sprintf("family: %-8s",family))
cat(sprintf(" num. hyperparameter settings: %d\n",ns))
cat(sprintf("samples: %-6d",n))
cat(sprintf(" iid variable selection prior: %s\n",tf2yn(prior.same)))
cat(sprintf("variables: %-6d",p))
cat(sprintf(" fit prior var. of coefs (sa): %s\n",tf2yn(update.sa)))
cat(sprintf("covariates: %-6d ",ncov))
if (family == "gaussian")
cat(sprintf("fit residual var. (sigma): %s\n",tf2yn(update.sigma)))
else if (family == "binomial")
cat(sprintf("fit approx. factors (eta): %s\n",tf2yn(optimize.eta)))
cat(sprintf("maximum log-likelihood lower bound: %0.4f\n",max(logw)))
if (!is.null(model.pve)) {
with(model.pve,{
cat("proportion of variance explained: ")
cat(sprintf("%0.3f [%0.3f,%0.3f]\n",x0,a,b))
})
}
# SUMMARIZE RESULTS ON HYPERPARAMETERS
# ------------------------------------
cat("Hyperparameters: ")
if (ns == 1) {
# Summarize the hyperparameter settings when there is only one
# candidate setting.
cat(sprintf("sigma=%0.3g sa=%0.3g ",sigma$x0,sa$x0))
if (prior.same)
cat(sprintf("logodds=%+0.2f",logodds$x0))
cat("\n")
} else {
# Summarize the residual variance parameter (sigma).
cat("\n")
cat(" estimate ")
cat(sprintf("Pr>%0.2f candidate values\n",cred.int))
if (family == "gaussian")
with(sigma,cat(sprintf("sigma %8.3g %-19s %0.3g--%0.3g\n",x0,
sprintf("[%0.3g,%0.3g]",a,b),min(x),max(x))))
# Summarize the fitted prior variance parameter (sa).
with(sa,cat(sprintf("sa %8.3g %-19s %0.3g--%0.3g\n",x0,
sprintf("[%0.3g,%0.3g]",a,b),min(x),max(x))))
# Summarize the fitted prior log-odds of inclusion (logodds).
if (prior.same)
with(logodds,
cat(sprintf("logodds*%+8.2f %-19s (%+0.2f)--(%+0.2f)\n",x0,
sprintf("[%+0.2f,%+0.2f]",a,b),min(x),max(x))))
}
cat("*See help(varbvs) for details on how to convert between the\n")
cat("prior log-odds and the prior inclusion probability.\n")
# SUMMARIZE VARIABLE SELECTION RESULTS
# ------------------------------------
# Summarize the number of variables selected at different PIP thresholds.
cat("Selected variables by probability cutoff:\n")
print(num.included)
# Give more detailed statistics about the top n variables by the
# probability that they are included.
if (x$family == "binomial")
names(top.vars)[5] <- "coef*"
cat(sprintf("Top %d variables by inclusion probability:\n",nrow(top.vars)))
print(top.vars,digits = digits)
if (x$family == "binomial")
cat("*See help(varbvs) about interpreting coefficients in logistic",
"regression.\n")
})
return(invisible(x))
}
# ----------------------------------------------------------------------
# Display summary of pcaviz object.
print.varbvs <- function (x, digits = 3, ...)
print(summary(x),digits,...)
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/summary.varbvs.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2019, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Compute fully-factorized variational approximation for Bayesian
# variable selection in linear (family = "gaussian") or logistic
# regression (family = "binomial"). See varbvs.Rd for details.
varbvs <- function (X, Z, y, family = c("gaussian","binomial"), sigma, sa,
logodds, weights, resid.vcov, alpha, mu, eta,
update.sigma, update.sa, optimize.eta, initialize.params,
update.order, nr = 100, sa0 = 1, n0 = 10, tol = 1e-4,
maxiter = 1e4, verbose = TRUE) {
# Get the number of samples (n) and variables (p).
n <- nrow(X)
p <- ncol(X)
# (1) CHECK INPUTS
# ----------------
# Check input X.
if (!(is.matrix(X) & is.numeric(X) & sum(is.na(X)) == 0))
stop("Input X must be a numeric matrix with no missing values.")
storage.mode(X) <- "double"
# Check input Z.
if (!is.null(Z)) {
Z <- as.matrix(Z)
if (!is.numeric(Z) | sum(is.na(Z)) > 0)
stop("Input Z must be a numeric matrix with no missing values.")
if (nrow(Z) != n)
stop("Inputs X and Z do not match.")
storage.mode(Z) <- "double"
}
# Add intercept.
if (is.null(Z))
Z <- matrix(1,n,1)
else
Z <- cbind(1,Z)
# Check input y.
if (!is.numeric(y) | sum(is.na(y)) > 0)
stop("Input y must be a numeric vector with no missing values.")
if (length(y) != n)
stop("Inputs X and y do not match")
y <- c(as.double(y))
# Get choice of regression model.
family <- match.arg(family)
# (2) PROCESS OPTIONS
# -------------------
if (!is.finite(maxiter))
stop("Input maxiter must be a finite number")
# Get candidate settings for the variance of the residual (sigma),
# if provided. Note that this option is not valid for a binary trait.
if (missing(sigma)) {
sigma <- var(y)
update.sigma.default <- TRUE
} else {
sigma <- c(sigma)
update.sigma.default <- FALSE
if (family == "binomial")
stop("Input sigma is not allowed for family = binomial")
}
# Get candidate settings for the prior variance of the coefficients,
# if provided.
if (missing(sa)) {
sa <- 1
update.sa.default <- TRUE
} else {
sa <- c(sa)
update.sa.default <- FALSE
}
# Get candidate settings for the prior log-odds of inclusion. This
# may either be specified as a vector, in which case this is the
# prior applied uniformly to all variables, or it is a p x ns
# matrix, where p is the number of variables and ns is the number of
# candidate hyperparameter settings, in which case the prior
# log-odds is specified separately for each variable. A default
# setting is only available if the number of other hyperparameter
# settings is 1, in which case we select 20 candidate settings for
# the prior log-odds, evenly spaced between log10(1/p) and -1.
if (missing(logodds)) {
if (length(sigma) == 1 & length(sa) == 1)
logodds <- seq(-log10(p),-1,length.out = 20)
else
stop("logodds can only be missing when length(sigma) = length(sa) = 1")
}
if (!is.matrix(logodds)) {
prior.same <- TRUE
logodds <- t(matrix(logodds))
} else if (nrow(logodds) != p) {
prior.same <- TRUE
logodds <- t(matrix(logodds))
} else
prior.same <- FALSE
# Here is where I ensure that the numbers of candidate hyperparameter
# settings are consistent.
ns <- max(length(sigma),length(sa),ncol(logodds))
if (length(sigma) == 1)
sigma <- rep(sigma,ns)
if (length(sa) == 1)
sa <- rep(sa,ns)
if (ncol(logodds) == 1)
logodds <- rep_col(logodds,ns)
if (length(sigma) != ns | length(sa) != ns | ncol(logodds) != ns)
stop("options.sigma, options.sa and options.logodds are inconsistent")
# Determine whether to update the residual variance parameter. Note
# that this option is only relevant for a binary trait.
if (missing(update.sigma))
update.sigma <- update.sigma.default
# Determine whether to update the prior variance of the regression
# coefficients.
if (missing(update.sa))
update.sa <- update.sa.default
# Set the weights---used to indicate that different observations
# have different variances---or the covariance matrix of the
# residual (resid.vcov). Note that only one of the weights and
# residual covariance matrix can be non-NULL.
if (missing(weights))
weights <- NULL
if (missing(resid.vcov))
resid.vcov <- NULL
if (!(is.null(weights) & is.null(resid.vcov)) & family != "gaussian")
stop(paste("Specifying weights or resid.vcov is only allowed for",
"family == \"gaussian\""))
if (!is.null(weights) & !is.null(resid.vcov))
stop("Only one of weights and resid.vcov may be specified")
if (!is.null(weights))
if (!(is.vector(weights) & length(weights) == n))
stop("Input weights should be a vector with the same length as y")
if (!is.null(resid.vcov))
if (is.matrix(resid.vcov) | inherits(resid.vcov,"Matrix")) {
if (!(nrow(resid.vcov) == n & ncol(resid.vcov) == n &
all(resid.vcov == t(resid.vcov))))
stop(paste("Input resid.vcov should be an n x n symmetric",
"matrix where n = length(y)"))
} else
stop("Input resid.vcov should be class \"matrix\" or \"Matrix\"")
# Set initial estimates of variational parameter alpha.
initialize.params.default <- TRUE
if (missing(alpha)) {
alpha <- rand(p,ns)
alpha <- alpha / rep_row(colSums(alpha),p)
} else
initialize.params.default <- FALSE
if (!is.matrix(alpha))
alpha <- matrix(alpha)
if (nrow(alpha) != p)
stop("Input alpha must have as many rows as X has columns")
if (ncol(alpha) == 1)
alpha <- rep_col(alpha,ns)
# Set initial estimates of variational parameter mu.
if (missing(mu))
mu <- randn(p,ns)
else
initialize.params.default <- FALSE
if (!is.matrix(mu))
mu <- matrix(mu)
if (nrow(mu) != p)
stop("Input mu must have as many rows as X has columns")
if (ncol(mu) == 1)
mu <- rep_col(mu,ns)
# Determine whether to find a good initialization for the
# variational parameters.
if (missing(initialize.params))
initialize.params <- initialize.params.default
else if (initialize.params & ns == 1)
stop(paste("initialize.params = TRUE has no effect when there is",
"only one hyperparameter setting"))
# Set initial estimates of variational parameter eta. Note this
# input is only relevant for logistic regression.
if (missing(eta)) {
eta <- matrix(1,n,ns)
optimize.eta.default <- TRUE
} else {
optimize.eta.default <- FALSE
if (family != "binomial")
stop("Input eta is only valid for family = binomial")
}
if (nrow(eta) != n)
stop("Input eta must have as many rows as X")
if (ncol(eta) == 1)
eta <- rep_col(eta,ns)
# Determine whether to update the variational parameter eta. Note this
# option is only relevant for logistic regression.
if (missing(optimize.eta))
optimize.eta <- optimize.eta.default
# Determine the order of the co-ordinate ascent updates.
if (missing(update.order))
update.order <- 1:p
if (!all(sort(intersect(update.order,1:p)) == 1:p))
stop(paste("Argument \"update.order\" should be a vector in which each",
"variable index (column of X) is included at least once"))
# Provide a brief summary of the analysis.
if (verbose) {
cat("Welcome to ")
cat("-- * * \n")
cat("VARBVS version 2.6-10")
cat("-- | | | \n")
cat("large-scale Bayesian ")
cat("-- || | | | || | | | \n")
cat("variable selection ")
cat("-- | || | | | | || || |||| || | || \n")
cat("*********************")
cat("*******************************************************\n")
cat("Copyright (C) 2012-2023 Peter Carbonetto.\n")
cat("See http://www.gnu.org/licenses/gpl.html for the full license.\n")
cat("Fitting variational approximation for Bayesian variable",
"selection model.\n")
cat(sprintf("family: %-8s",family))
cat(" num. hyperparameter settings:",length(sa),"\n")
cat(sprintf("samples: %-6d",n))
cat(sprintf(" convergence tolerance %0.1e\n",tol))
cat(sprintf("variables: %-6d",p))
cat(" iid variable selection prior:",tf2yn(prior.same),"\n")
cat(sprintf("covariates: %-6d",max(0,ncol(Z) - 1)))
cat(" fit prior var. of coefs (sa):",tf2yn(update.sa),"\n")
cat("intercept: yes ")
if (family == "gaussian")
cat("fit residual var. (sigma): ",tf2yn(update.sigma),"\n")
else if (family == "binomial")
cat("fit approx. factors (eta): ",tf2yn(optimize.eta),"\n")
}
# (3) PREPROCESSING STEPS
# -----------------------
if (family == "gaussian") {
if (!is.null(weights)) {
# Adjust the inputs X and y to account for the weights.
#
# Note that this is equivalent to setting to the residual
# variance-covariance matrix to
#
# resid.vcov <- diag(1/weights)
#
X <- sqrt(weights) * X
Z <- sqrt(weights) * Z
y <- sqrt(weights) * y
} else if (!is.null(resid.vcov)) {
# Adjust the inputs X and y to account for the variance-covariance
# matrix of the residuals.
if (verbose)
cat("Adjusting inputs for residual variance-covariance matrix.\n")
L <- tryCatch(t(chol(resid.vcov)),error = function(e) NULL)
if (is.null(L))
stop("Input resid.vcov is not a positive definite matrix")
else {
X <- forwardsolve(L,X)
Z <- forwardsolve(L,Z)
y <- forwardsolve(L,y)
}
}
# Adjust the inputs X and y so that the linear effects of the
# covariates (Z) are removed. This is equivalent to integrating
# out the regression coefficients corresponding to the covariates
# with respect to an improper, uniform prior.
out <- remove.covariate.effects(X,Z,y)
X <- out$X
y <- out$y
SZy <- out$SZy
SZX <- out$SZX
rm(out)
} else {
SZy <- NULL
SZX <- NULL
}
# Add row and column names to X if they are not provided.
if (is.null(rownames(X)))
rownames(X) <- 1:n
if (is.null(colnames(X)))
colnames(X) <- paste0("X",1:p)
# Add column names to Z if they are not already provided.
if (is.null(colnames(Z)) & ncol(Z) > 1)
colnames(Z) <- c("(Intercept)",paste0("Z",1:(ncol(Z) - 1)))
else
colnames(Z)[1] <- "(Intercept)"
# (4) INITIALIZE STORAGE FOR THE OUTPUTS
# --------------------------------------
# Initialize storage for the variational estimate of the marginal
# log-likelihood for each hyperparameter setting (logw), and the
# variances of the regression coefficients (s), and the posterior
# mean estimates of the coefficients for the covariates (mu.cov),
# which includes the intercept.
logw <- rep(0,ns)
s <- matrix(0,p,ns)
mu.cov <- matrix(0,ncol(Z),ns)
# (5) FIT BAYESIAN VARIABLE SELECTION MODEL TO DATA
# -------------------------------------------------
if (ns == 1) {
# Find a set of parameters that locally minimize the K-L
# divergence between the approximating distribution and the exact
# posterior.
if (verbose) {
cat(" variational max. incl variance params\n")
cat(" iter lower bound change vars sigma sa\n")
}
out <- outerloop(X,Z,y,family,weights,resid.vcov,SZy,SZX,c(sigma),
c(sa),c(logodds),c(alpha),c(mu),c(eta),update.order,
tol,maxiter,verbose,NULL,update.sigma,update.sa,
optimize.eta,n0,sa0)
logw <- out$logw
sigma <- out$sigma
sa <- out$sa
mu.cov[] <- out$mu.cov
alpha[] <- out$alpha
mu[] <- out$mu
s[] <- out$s
eta[] <- out$eta
if (verbose)
cat("\n")
} else {
# If a good initialization isn't already provided, find a good
# initialization for the variational parameters. Repeat for each
# candidate setting of the hyperparameters.
if (initialize.params) {
if (verbose) {
cat("Finding best initialization for",ns,"combinations of",
"hyperparameters.\n");
cat("-iteration- variational max. incl variance params\n");
cat("outer inner lower bound change vars sigma sa\n");
}
# Repeat for each setting of the hyperparameters.
for (i in 1:ns) {
out <- outerloop(X,Z,y,family,weights,resid.vcov,SZy,SZX,sigma[i],
sa[i],logodds[,i],alpha[,i],mu[,i],eta[,i],
update.order,tol,maxiter,verbose,i,update.sigma,
update.sa,optimize.eta,n0,sa0)
logw[i] <- out$logw
sigma[i] <- out$sigma
sa[i] <- out$sa
mu.cov[,i] <- out$mu.cov
alpha[,i] <- out$alpha
mu[,i] <- out$mu
s[,i] <- out$s
eta[,i] <- out$eta
}
# Choose an initialization common to all the runs of the
# coordinate ascent algorithm. This is chosen from the
# hyperparameters with the highest variational estimate of the
# marginal likelihood.
i <- which.max(logw)
alpha <- rep_col(alpha[,i],ns)
mu <- rep_col(mu[,i],ns)
if (optimize.eta)
eta <- rep_col(eta[,i],ns)
if (update.sigma)
sigma <- rep(sigma[i],ns)
if (update.sa)
sa <- rep(sa[i],ns)
}
# Compute a variational approximation to the posterior distribution
# for each candidate setting of the hyperparameters.
if (verbose) {
cat("Computing marginal likelihood for",ns,"combinations of",
"hyperparameters.\n")
cat("-iteration- variational max. incl variance params\n")
cat("outer inner lower bound change vars sigma sa\n")
}
# For each setting of the hyperparameters, find a set of
# parameters that locally minimize the K-L divergence between the
# approximating distribution and the exact posterior.
for (i in 1:ns) {
out <- outerloop(X,Z,y,family,weights,resid.vcov,SZy,SZX,sigma[i],sa[i],
logodds[,i],alpha[,i],mu[,i],eta[,i],update.order,tol,
maxiter,verbose,i,update.sigma,update.sa,optimize.eta,
n0,sa0)
logw[i] <- out$logw
sigma[i] <- out$sigma
sa[i] <- out$sa
mu.cov[,i] <- out$mu.cov
alpha[,i] <- out$alpha
mu[,i] <- out$mu
s[,i] <- out$s
eta[,i] <- out$eta
}
}
# (6) CREATE FINAL OUTPUT
# -----------------------
# Compute the normalized importance weights and the posterior
# inclusion probabilities (PIPs) and mean regression coefficients
# averaged over the hyperparameter settings.
if (ns == 1) {
w <- 1
pip <- c(alpha)
beta <- c(alpha*mu)
beta.cov <- c(mu.cov)
} else {
w <- normalizelogweights(logw)
pip <- c(alpha %*% w)
beta <- c((alpha*mu) %*% w)
beta.cov <- c(mu.cov %*% w)
}
if (family == "gaussian") {
fit <- list(family = family,n0 = n0,sa0 = sa0,mu.cov = mu.cov,
update.sigma = update.sigma,update.sa = update.sa,
prior.same = prior.same,optimize.eta = FALSE,logw = logw,
w = w,sigma = sigma,sa = sa,logodds = logodds,alpha = alpha,
mu = mu,s = s,eta = NULL,pip = pip,beta = beta,
beta.cov = beta.cov,y = y)
class(fit) <- c("varbvs","list")
if (is.null(weights) & is.null(resid.vcov) & ncol(Z) == 1) {
# Compute the proportion of variance in Y---only in the
# unweighted, i.i.d. case when there are no additional covariates
# included in the model.
if (verbose)
cat("Estimating proportion of variance in Y explained by model.\n");
fit$model.pve <- varbvspve(fit,X,nr)
# Compute the proportion of variance in Y explained by each
# variable. This can only be estimated in the i.i.d. case when
# there are no additional covariates included in the model.
fit$pve <- matrix(0,p,ns)
rownames(fit$pve) <- colnames(X)
sx <- var1.cols(X)
for (i in 1:ns)
fit$pve[,i] <- sx*(mu[,i]^2 + s[,i])/var1(y)
} else {
fit$model.pve <- NULL
fit$pve <- NULL
}
# Restore the inputted X and y.
X <- X + Z %*% SZX
y <- y + c(Z %*% SZy)
# Compute the fitted values for each hyperparameter setting.
fit$fitted.values <- varbvs.linear.predictors(X,Z,mu.cov,alpha,mu)
# Compute the residuals for each hyperparameter setting.
fit$residuals <- y - fit$fitted.values
fit$residuals.response
} else if (family == "binomial") {
fit <- list(family = family,n0 = n0,mu.cov = mu.cov,sa0 = sa0,
update.sigma = FALSE,update.sa = update.sa,
optimize.eta = optimize.eta,prior.same = prior.same,
logw = logw,w = w,sigma = NULL,sa = sa,logodds = logodds,
alpha = alpha,mu = mu,s = s,eta = eta,pip = pip,beta = beta,
beta.cov = beta.cov,pve = NULL,model.pve = NULL)
class(fit) <- c("varbvs","list")
# Compute the fitted values for each hyperparameter setting.
fit$fitted.values <-
sigmoid(varbvs.linear.predictors(X,Z,mu.cov,alpha,mu))
# Compute the "deviance" and "response" residuals for
# hyperparameter setting.
fit$residuals <-
list(deviance = resid.dev.logistic(matrix(y,n,ns),fit$fitted.values),
response = y - fit$fitted.values)
}
# Add names to some of the outputs.
hyper.labels <- paste("theta",1:ns,sep = "_")
rownames(fit$alpha) <- colnames(X)
rownames(fit$mu) <- colnames(X)
rownames(fit$s) <- colnames(X)
names(fit$beta) <- colnames(X)
names(fit$pip) <- colnames(X)
rownames(fit$mu.cov) <- colnames(Z)
names(fit$beta.cov) <- colnames(Z)
rownames(fit$fitted.values) <- rownames(X)
colnames(fit$mu.cov) <- hyper.labels
colnames(fit$alpha) <- hyper.labels
colnames(fit$mu) <- hyper.labels
colnames(fit$s) <- hyper.labels
colnames(fit$fitted.values) <- hyper.labels
if (family == "gaussian") {
rownames(fit$residuals) <- rownames(X)
colnames(fit$residuals) <- hyper.labels
} else {
rownames(fit$eta) <- rownames(X)
colnames(fit$eta) <- hyper.labels
rownames(fit$residuals$deviance) <- rownames(X)
rownames(fit$residuals$response) <- rownames(X)
colnames(fit$residuals$deviance) <- hyper.labels
colnames(fit$residuals$response) <- hyper.labels
}
if (prior.same)
fit$logodds <- c(fit$logodds)
else
rownames(fit$logodds) <- colnames(X)
if (!is.null(fit$pve))
colnames(fit$pve) <- hyper.labels
return(fit)
}
# ----------------------------------------------------------------------
# This function implements one iteration of the "outer loop".
outerloop <- function (X, Z, y, family, weights, resid.vcov, SZy, SZX, sigma,
sa, logodds, alpha, mu, eta, update.order, tol, maxiter,
verbose, outer.iter, update.sigma, update.sa,
optimize.eta, n0, sa0) {
p <- ncol(X)
if (length(logodds) == 1)
logodds <- rep(logodds,p)
# Note that we need to multiply the prior log-odds by log(10),
# because varbvsnorm, varbvsbin and varbvsbinz define the prior
# log-odds using the natural logarithm (base e).
if (family == "gaussian") {
# Optimize the variational lower bound for the Bayesian variable
# selection model.
out <- varbvsnorm(X,y,sigma,sa,log(10)*logodds,alpha,mu,update.order,
tol,maxiter,verbose,outer.iter,update.sigma,update.sa,
n0,sa0)
out$eta <- eta
# If weights are provided, adjust the variational lower bound to
# account for the differences in variance of the samples.
if (!is.null(weights))
out$logw <- out$logw + sum(log(weights))/2
# If a covariance matrix is provided for the residuals, adjust the
# variational lower bound to account for a non-identity covariance
# matrix.
if (!is.null(resid.vcov))
out$logw <- out$logw - determinant(resid.vcov,logarithm = TRUE)$modulus/2
# Adjust the variational lower bound to account for integral over
# the regression coefficients corresponding to the covariates.
out$logw <- out$logw - determinant(crossprod(Z),logarithm = TRUE)$modulus/2
# Compute the posterior mean estimate of the regression
# coefficients for the covariates under the current variational
# approximation.
out$mu.cov <- c(with(out,SZy - SZX %*% (alpha*mu)))
} else if (family == "binomial") {
# Optimize the variational lower bound for the Bayesian variable
# selection model.
if (ncol(Z) == 1)
out <- varbvsbin(X,y,sa,log(10)*logodds,alpha,mu,eta,update.order,tol,
maxiter,verbose,outer.iter,update.sa,optimize.eta,
n0,sa0)
else
out <- varbvsbinz(X,Z,y,sa,log(10)*logodds,alpha,mu,eta,update.order,
tol,maxiter,verbose,outer.iter,update.sa,optimize.eta,
n0,sa0)
out$sigma <- sigma
# Compute the posterior mean estimate of the regression
# coefficients for the covariates under the current variational
# approximation.
Xr <- with(out,c(X %*% (alpha*mu)))
d <- slope(out$eta)
out$mu.cov <- c(solve(t(Z) %*% (Z*d),c((y - 0.5 - d*Xr) %*% Z)))
}
numiter <- length(out$logw)
out$logw <- out$logw[numiter]
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvs.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Return the number of observations used to fit a model.
nobs.varbvs <- function (object, ...)
nrow(object$fitted.values)
# ----------------------------------------------------------------------
# Return the names of the samples.
case.names.varbvs <- function (object, ...)
rownames(object$fitted.values)
# ----------------------------------------------------------------------
# Return the names of the (included) variables.
variable.names.varbvs <- function (object, full = FALSE,
include.threshold = 0.01, ...) {
if (full)
return(with(object,c(rownames(mu.cov),rownames(alpha))))
else {
i <- which(object$pip >= include.threshold)
return(with(object,c(rownames(mu.cov),rownames(alpha)[i])))
}
}
# ----------------------------------------------------------------------
# Return the names of the candidate variables.
labels.varbvs <- function (object, ...)
rownames(object$alpha)
# ----------------------------------------------------------------------
# Return the estimates of the regression coefficients at each
# hyperparameter setting, as well as the "averaged" estimates in the
# last column of this matrix.
coef.varbvs <- function (object, ...) {
ns <- length(object$w)
if (ns == 1)
out <- with(object,rbind(mu.cov,alpha*mu))
else {
out <- with(object,rbind(cbind(mu.cov,beta.cov),
cbind(alpha*mu,beta)))
colnames(out)[ns + 1] <- "averaged"
}
return(out)
}
# ----------------------------------------------------------------------
# Return the estimates of the regression coefficients from the fitted
# varbvsmix model.
coef.varbvsmix <- function (object, ...)
return(with(object,c(mu.cov,rowSums(alpha*mu))))
# ----------------------------------------------------------------------
# Return the fitted values stored in an n x ns matrix, where n is the
# number of samples and ns is the number of hyperparameter settings.
fitted.varbvs <- function (object, ...)
object$fitted.values
# ----------------------------------------------------------------------
# Return the residuals stored in an n x ns matrix, where n is the
# number of samples and ns is the number of hyperparameter
# settings. For a logistic regression model, there are two types of
# residuals ("deviance" and "response").
resid.varbvs <- function (object, type = c("deviance","response"), ...) {
if (object$family == "gaussian")
out <- object$residuals
else {
type <- match.arg(type)
if (type == "deviance")
out <- object$residuals$deviance
else if (type == "response")
out <- object$residuals$response
else
stop("Argument \"type\" should be \"deviance\" or \"response\"")
}
return(out)
}
residuals.varbvs <- resid.varbvs
# ----------------------------------------------------------------------
# Return the deviance for each hyperparameter setting.
deviance.varbvs <- function (object, ...)
colSums(resid(object,type = "deviance")^2)
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvs.properties.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Implements the fully-factorized variational approximation for
# Bayesian variable selection in logistic regression. It finds the
# "best" fully-factorized variational approximation to the posterior
# distribution of the coefficients in a logistic regression model of a
# binary outcome, with spike and slab priors on the coefficients. By
# "best", we mean the approximating distribution that locally
# minimizes the K-L divergence between the approximating distribution
# and the exact posterior.
#
# Input X is the n x p matrix of variable (or feature) observations,
# where n is the number of samples, and p is the number of variables.
# Input y contains samples of the binary outcome; it is a vector of
# length n.
#
# Inputs sa and logodds are the hyperparameters. Scalar sa is the
# prior variance of the coefficients. Input logodds is the prior
# log-odds of inclusion for each variable. Note that the prior
# log-odds here is defined with respect to the *natural* logarithm,
# whereas in function varbvs the prior log-odds is defined with
# respect to the base-10 logarithm, so a scaling factor of log(10) is
# needed to convert from the latter to the former. Also, note that the
# residual variance parameter (sigma) is not needed to model a binary
# outcome.
#
# Output logw is the variational estimate of the marginal
# log-likelihood given the hyperparameters at each iteration of the
# co-ordinate ascent optimization procedure. Output err is the maximum
# difference between the approximate posterior probabilities (alpha)
# at successive iterations. Outputs alpha, mu and s are the
# parameters of the variational approximation and, equivalently,
# variational estimates of posterior quantites: under the variational
# approximation, the ith regression coefficient is normal with
# probability alpha[i]; mu[i] and s[i] are the mean and variance of
# the coefficient given that it is included in the model. Output eta
# is the vector of free parameters that specify the variational
# approximation to the likelihood factors in the logistic regression.
varbvsbin <- function (X, y, sa, logodds, alpha, mu, eta, update.order,
tol = 1e-4, maxiter = 1e4, verbose = TRUE,
outer.iter = NULL, update.sa = TRUE,
optimize.eta = TRUE, n0 = 10, sa0 = 1) {
# Get the number of samples (n) and variables (p).
n <- nrow(X)
p <- ncol(X)
# (1) INITIAL STEPS
# -----------------
# Compute a few useful quantities.
Xr <- c(X %*% (alpha*mu))
stats <- updatestats_varbvsbin(X,y,eta)
s <- sa/(sa*stats$xdx + 1)
# Initialize storage for outputs logw and err.
logw <- rep(0,maxiter)
err <- rep(0,maxiter)
# (2) MAIN LOOP
# -------------
# Repeat until convergence criterion is met, or until the maximum
# number of iterations is reached.
for (iter in 1:maxiter) {
# Save the current variational parameters and model parameters.
alpha0 <- alpha
mu0 <- mu
s0 <- s
eta0 <- eta
sa.old <- sa
# (2a) COMPUTE CURRENT VARIATIONAL LOWER BOUND
# --------------------------------------------
# Compute variational lower bound to marginal log-likelihood.
logw0 <- int.logit(y,stats,alpha,mu,s,Xr,eta) +
int.gamma(logodds,alpha) +
int.klbeta(alpha,mu,s,sa)
# (2b) UPDATE VARIATIONAL APPROXIMATION
# -------------------------------------
# Run a forward or backward pass of the coordinate ascent updates.
out <- varbvsbinupdate(X,sa,logodds,stats,alpha,mu,Xr,update.order)
alpha <- out$alpha
mu <- out$mu
Xr <- out$Xr
rm(out)
# (2c) UPDATE ETA
# ---------------
# Update the free parameters specifying the variational approximation
# to the logistic regression factors.
if (optimize.eta) {
eta <- update_eta(X,y,betavar(alpha,mu,s),Xr,stats$d)
stats <- updatestats_varbvsbin(X,y,eta)
s <- sa/(sa*stats$xdx + 1)
}
# (2d) COMPUTE UPDATED VARIATIONAL LOWER BOUND
# --------------------------------------------
# Compute variational lower bound to marginal log-likelihood.
logw[iter] <- int.logit(y,stats,alpha,mu,s,Xr,eta) +
int.gamma(logodds,alpha) +
int.klbeta(alpha,mu,s,sa)
# (2e) UPDATE PRIOR VARIANCE OF REGRESSION COEFFICIENTS
# -----------------------------------------------------
# Compute the maximum a posteriori estimate of sa, if requested.
# Note that we must also recalculate the variance of the
# regression coefficients when this parameter is updated.
if (update.sa) {
sa <- (sa0*n0 + dot(alpha,s + mu^2))/(n0 + sum(alpha))
s <- sa/(sa*stats$xdx + 1)
}
# (2f) CHECK CONVERGENCE
# ----------------------
# Print the status of the algorithm and check the convergence
# criterion. Convergence is reached when the maximum relative
# difference between the parameters at two successive iterations
# is less than the specified tolerance, or when the variational
# lower bound has decreased. I ignore parameters that are very
# small. If the variational bound decreases, stop.
err[iter] <- max(abs(alpha - alpha0))
if (verbose) {
if (is.null(outer.iter))
status <- NULL
else
status <- sprintf("%05d ",outer.iter)
progress.str <-
paste(status,sprintf("%05d %+13.6e %0.1e %06.1f NA %0.1e",
iter,logw[iter],err[iter],sum(alpha),sa),sep="")
cat(progress.str,"\n")
}
if (logw[iter] < logw0) {
logw[iter] <- logw0
err[iter] <- 0
sa <- sa.old
alpha <- alpha0
mu <- mu0
s <- s0
eta <- eta0
break
} else if (err[iter] < tol)
break
}
# Return the variational estimates.
return(list(logw = logw[1:iter],err = err[1:iter],sa = sa,
alpha = alpha,mu = mu,s = s,eta = eta))
}
# ----------------------------------------------------------------------
# Calculates useful quantities for updating the variational approximation
# to the logistic regression factors.
updatestats_varbvsbin <- function (X, y, eta) {
# Compute the slope of the conjugate.
d <- slope(eta)
# Compute beta0 and yhat. See the journal paper for an explanation
# of these two variables.
beta0 <- sum(y - 0.5)/sum(d)
yhat <- y - 0.5 - beta0*d
# Calculate xy = X'*yhat and xd = X'*d.
xy <- c(yhat %*% X)
xd <- c(d %*% X)
# Compute the diagonal entries of X'*dhat*X. For a definition of
# dhat, see the Bayesian Analysis journal paper.
#
# This is the less numerically stable version of this update:
#
# xdx <- diagsq(X,d) - xd^2/sum(d)
#
dzr <- d/sqrt(sum(d))
xdx <- diagsq(X,d) - c(dzr %*% X)^2
# Return the result.
return(list(d = d,yhat = yhat,xy = xy,xd = xd,xdx = xdx))
}
# ----------------------------------------------------------------------
# Computes the M-step update for the parameters specifying the
# variational lower bound to the logistic regression factors. Input Xr
# must be Xr = X*r, where r is the posterior mean of the coefficients.
# Note that under the fully-factorized variational approximation, r =
# alpha*mu. Input v is the posterior variance of the coefficients. For
# this update to be valid, it is required that the posterior
# covariance of the coefficients is equal to diag(v). Input d must be
# d = slope(eta); see function 'slope' for details.
update_eta <- function (X, y, v, Xr, d) {
# Compute mu0, the posterior mean of the intercept in the logistic
# regression under the variational approximation. Here, a is the
# conditional variance of the intercept given the other coefficients.
a <- 1/sum(d)
mu0 <- a*(sum(y - 0.5) - dot(d,Xr))
# Compute s0, the (marginal) posterior variance of the intercept in the
# logistic regression.
xd <- c(d %*% X)
s0 <- a*(1 + a*dot(v,xd^2))
# Calculate the covariance between the intercept and coefficients.
w <- (-a*xd*v)
# This is the M-step update for the free parameters.
return(sqrt((mu0 + Xr)^2 + s0 + diagsqt(X,v) + 2*c(X %*% w)))
}
# ----------------------------------------------------------------------
# Computes an integral that appears in the variational lower bound of
# the marginal log-likelihood for the logistic regression model. This
# integral is an approximation to the expectation of the logistic
# regression log-likelihood taken with respect to the variational
# approximation.
int.logit <- function (y, stats, alpha, mu, s, Xr, eta) {
# Get some of the statistics.
yhat <- stats$yhat
xdx <- stats$xdx
d <- stats$d
# Get the variance of the intercept given the other coefficients.
a <- 1/sum(d)
# Compute the variational approximation to the expectation of the
# log-likelihood with respect to the variational approximation.
return(sum(logsigmoid(eta)) + dot(eta,d*eta - 1)/2 + log(a)/2 +
a*sum(y - 0.5)^2/2 + dot(yhat,Xr) - quadnorm(Xr,d)^2/2 +
a*dot(d,Xr)^2/2 - dot(xdx,betavar(alpha,mu,s))/2)
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsbin.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Execute a single iteration of the coordinate ascent updates to
# maximize the variational lower bound for Bayesian variable selection
# in logistic regression.
#
# Input X is an n x p matrix of observations of the variables (or
# features), where n is the number of samples, and p is the number of
# variables. Input y contains samples of the binary outcome; it is a
# vector of length n.
#
# Input sa specifies the prior variance of the coefficients. Input
# logodds is the prior log-odds of inclusion for each variable. It
# must be a vector of length p. Note that a residual variance
# parameter (sigma) is not needed to model a binary outcome. See
# function 'updatestats_varbvsbin' for more information about input
# 'stats'.
#
# Inputs alpha0, mu0 are the current parameters of the variational
# approximation; under the variational approximation, the ith
# regression coefficient is normal with probability alpha0[i], and
# mu0[i] is the mean of the coefficient given that it is included in
# the model. Input Xr0 must be Xr0 = X*(alpha0*mu0).
#
# Input i specifies the order in which the coordinates are updated. It
# may be a vector of any length. Each entry of i must be an integer
# between 1 and p.
#
# There are three outputs. Output vectors alpha and mu are the updated
# variational parameters, and Xr = X*(alpha*mu). The computational
# complexity is O(n*length(i)).
#
# This function calls "varbvsbinupdate_Call", a function compiled from
# C code, using the .Call interface. See the comments accompanying
# function 'varbvsnormupdate' for instructions on building and loading
# the shared objects (.so) file into R.
varbvsbinupdate <- function (X, sa, logodds, stats, alpha0, mu0, Xr0,
updates) {
# Get the number of samples (n) and variables (p).
n <- nrow(X)
p <- ncol(X)
# Check input X.
if (!is.double(X) || !is.matrix(X))
stop("Input argument 'X' must be a double-precision matrix")
# Check input sa.
if (length(sa) != 1)
stop("Input sa must be a scalar")
# Check input logodds, alpha0 and mu0.
if (!(length(logodds) == p & length(alpha0) == p & length(mu0) == p))
stop("logodds, alpha0 and mu0 must have length = ncol(X)")
# Check input Xr0.
if (length(Xr0) != n)
stop("length(Xr0) must be equal to nrow(X)")
# Check input "updates".
if (sum(updates < 1 | updates > p) > 0)
stop("Input \"updates\" contains invalid variable indices")
# Initialize storage for the results.
alpha <- c(alpha0)
mu <- c(mu0)
Xr <- c(Xr0)
# Execute the C routine using the .Call interface, and return the
# updated variational parameters statistics in a list object. The
# main reason for using the .Call interface is that there is less of
# a constraint on the size of the input matrices. The only
# components that change are alpha, mu and Xr. Note that I need to
# subtract 1 from the indices because R vectors start at 1, and C
# arrays start at 0.
out <- .Call(C_varbvsbinupdate_Call,X = X,sa = as.double(sa),
logodds = as.double(logodds),d = as.double(stats$d),
xdx = as.double(stats$xdx),xy = as.double(stats$xy),
xd = as.double(stats$xd),alpha = alpha,mu = mu,Xr = Xr,
i = as.integer(updates - 1))
return(list(alpha = alpha,mu = mu,Xr = Xr))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsbinupdate.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# This function implements the fully-factorized variational
# approximation for Bayesian variable selection in logistic
# regression, allowing for covariates. It is the same as varbvsbin,
# except that it allows for an additional set of covariates that are
# not subject to the same "spike-and-slab" priors as the other
# variables. The covariate data Z are specified as an n x m matrix,
# where n is the number of samples, and m is the number of
# covariates. This function is equivalent to varbvsbin when only one
# covariate is specified, the intercept, and Z = ones(n,1).
varbvsbinz <- function (X, Z, y, sa, logodds, alpha, mu, eta, update.order,
tol = 1e-4, maxiter = 1e4, verbose = TRUE,
outer.iter = NULL, update.sa = TRUE,
optimize.eta = TRUE,n0 = 10, sa0 = 1) {
# Get the number of samples (n) and variables (p).
n <- nrow(X)
p <- ncol(X)
# (1) INITIAL STEPS
# -----------------
# Compute a few useful quantities.
Xr <- c(X %*% (alpha*mu))
stats <- updatestats_varbvsbinz(X,Z,y,eta)
s <- sa/(sa*stats$xdx + 1)
# Initialize storage for outputs logw and err.
logw <- rep(0,maxiter)
err <- rep(0,maxiter)
# (2) MAIN LOOP
# -------------
# Repeat until convergence criterion is met, or until the maximum
# number of iterations is reached.
for (iter in 1:maxiter) {
# Save the current variational parameters and model parameters.
alpha0 <- alpha
mu0 <- mu
s0 <- s
eta0 <- eta
sa.old <- sa
# (2a) COMPUTE CURRENT VARIATIONAL LOWER BOUND
# --------------------------------------------
logw0 <- int.logitz(Z,y,stats,alpha,mu,s,Xr,eta) +
int.gamma(logodds,alpha) +
int.klbeta(alpha,mu,s,sa)
# (2b) UPDATE VARIATIONAL APPROXIMATION
# -------------------------------------
# Run a forward or backward pass of the coordinate ascent updates.
out <- varbvsbinzupdate(X,sa,logodds,stats,alpha,mu,Xr,update.order)
alpha <- out$alpha
mu <- out$mu
Xr <- out$Xr
rm(out)
# (2c) UPDATE ETA
# ---------------
# Update the free parameters specifying the variational approximation
# to the logistic regression factors.
if (optimize.eta) {
eta <- update_etaz(X,Z,y,betavar(alpha,mu,s),Xr,stats$d)
stats <- updatestats_varbvsbinz(X,Z,y,eta)
s <- sa/(sa*stats$xdx + 1)
}
# (2d) COMPUTE UPDATED VARIATIONAL LOWER BOUND
# --------------------------------------------
# Compute variational lower bound to marginal log-likelihood.
logw[iter] <- int.logitz(Z,y,stats,alpha,mu,s,Xr,eta) +
int.gamma(logodds,alpha) +
int.klbeta(alpha,mu,s,sa)
# (2e) UPDATE PRIOR VARIANCE OF REGRESSION COEFFICIENTS
# -----------------------------------------------------
# Compute the maximum a posteriori estimate of sa, if requested.
# Note that we must also recalculate the variance of the
# regression coefficients when this parameter is updated.
if (update.sa) {
sa <- (sa0*n0 + dot(alpha,s + mu^2))/(n0 + sum(alpha))
s <- sa/(sa*stats$xdx + 1)
}
# (2f) CHECK CONVERGENCE
# ----------------------
# Print the status of the algorithm and check the convergence
# criterion. Convergence is reached when the maximum relative
# difference between the parameters at two successive iterations
# is less than the specified tolerance, or when the variational
# lower bound has decreased. I ignore parameters that are very
# small. If the variational bound decreases, stop.
err[iter] <- max(abs(alpha - alpha0))
if (verbose) {
if (is.null(outer.iter))
status <- NULL
else
status <- sprintf("%05d ",outer.iter)
progress.str <-
paste(status,sprintf("%05d %+13.6e %0.1e %06.1f NA %0.1e",
iter,logw[iter],err[iter],sum(alpha),sa),sep="")
cat(progress.str,"\n")
}
if (logw[iter] < logw0) {
logw[iter] <- logw0
err[iter] <- 0
sa <- sa.old
alpha <- alpha0
mu <- mu0
s <- s0
eta <- eta0
break
} else if (err[iter] < tol)
break
}
# Return the variational estimates.
return(list(logw = logw[1:iter],err = err[1:iter],sa = sa,
alpha = alpha,mu = mu,s = s,eta = eta))
}
# ----------------------------------------------------------------------
# diagprod(A,B) efficiently computes diag(A*B').
diagprod <- function (A, B)
rowSums(A * B)
# ----------------------------------------------------------------------
# This function returns useful quantities for updating the variational
# approximation to the logistic regression factors, allowing for
# covariates.
updatestats_varbvsbinz <- function (X, Z, y, eta) {
# Compute the slope of the conjugate.
d <- slope(eta)
# Compute the posterior covariance of u (coefficients for Z) given
# beta (coefficients for X). This is equivalent to MATLAB expression
# S = inv(Z'*diag(d)*Z).
S <- solve(t(Z) %*% (Z * d))
# Compute matrix dzr = D*Z*R', where D = diag(d), and R is an upper
# triangular matrix such that R'*R = S.
R <- chol(S)
dzr <- d * (Z %*% t(R))
# Compute yhat.
yhat <- c(y - 0.5 - dzr %*% R %*% (t(Z) %*% (y - 0.5)))
# Calculate xy = X'*yhat and xd = X'*d.
xy <- c(yhat %*% X)
xd <- c(d %*% X)
# Compute the diagonal entries of X'*Dhat*X. For a definition of Dhat,
# see the Bayesian Analysis journal paper.
xdx <- diagsq(X,d) - diagsq(t(dzr) %*% X)
# Return the result.
return(list(S = S,d = d,yhat = yhat,xy = xy,xd = xd,xdx = xdx,dzr = dzr))
}
# ----------------------------------------------------------------------
# Computes the M-step update for the parameters specifying the
# variational lower bound to the logistic regression factors, allowing
# for additional covariates.
update_etaz <- function (X, Z, y, v, Xr, d) {
# Compute the posterior covariance of u given beta. This is
# equivalent to MATLAB expression S = inv(Z'*diag(d)*Z).
S <- solve(t(Z) %*% (Z * d))
# Compute the posterior mean of the regression coefficients
# corresponding to the covariates.
muz <- S %*% t(Z) %*% (y - 0.5 - d*Xr)
# Calculate the covariance between the coefficients u and beta.
W <- (-t((S %*% t(Z * d)) %*% X) * v)
# This is the M-step update for the free parameters.
U <- t((S %*% t(Z * d)) %*% X)
return(sqrt(c(Z %*% muz + Xr)^2 + diagsq2(Z,S) +
diagsq2(Z,t(U) %*% (U * v)) + diagsqt(X,v) +
2*diagprod(X %*% W,Z)))
}
# -----------------------------------------------------------------------
int.logitz <- function (Z, y, stats, alpha, mu, s, Xr, eta) {
# Get some of the statistics.
yhat <- stats$yhat
xdx <- stats$xdx
S <- stats$S
d <- stats$d
# Compute the variational approximation to the expectation of the
# log-likelihood with respect to the approximate posterior distribution.
return(sum(logsigmoid(eta)) + dot(eta,d*eta - 1)/2 +
c(determinant(S,logarithm = TRUE)$modulus/2) +
quadnorm(t(Z) %*% (y - 0.5),S)^2/2 + dot(yhat,Xr) -
quadnorm(Xr,d)^2/2 + quadnorm(t(Z) %*% (Xr * d),S)^2/2 -
dot(xdx,betavar(alpha,mu,s))/2)
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsbinz.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Executes a single iteration of the coordinate ascent updates to
# maximize the variational lower bound for Bayesian variable selection
# in logistic regression, allowing for additional covariates. See
# varbvsbinupdate for more details.
varbvsbinzupdate <- function (X, sa, logodds, stats, alpha0, mu0, Xr0,
updates) {
# Get the number of samples (n) and variables (p).
n <- nrow(X)
p <- ncol(X)
# Check input X.
if (!is.double(X) || !is.matrix(X))
stop("Input argument 'X' must be a double-precision matrix")
# Check input sa.
if (length(sa) != 1)
stop("Input sa must be a scalar")
# Check input logodds, alpha0 and mu0.
if (!(length(logodds) == p & length(alpha0) == p & length(mu0) == p))
stop("logodds, alpha0 and mu0 must have length = ncol(X)")
# Check input Xr0.
if (length(Xr0) != n)
stop("length(Xr0) must be equal to nrow(X)")
# Check input "updates".
if (sum(updates < 1 | updates > p) > 0)
stop("Input \"updates\" contains invalid variable indices")
# Initialize storage for the results.
alpha <- c(alpha0)
mu <- c(mu0)
Xr <- c(Xr0)
# Execute the C routine using the .Call interface, and return the
# updated variational parameters statistics in a list object. The
# main reason for using the .Call interface is that there is less of
# a constraint on the size of the input matrices. The only
# components that change are alpha, mu and Xr. Note that I need to
# subtract 1 from the indices because R vectors start at 1, and C
# arrays start at 0.
out <- .Call(C_varbvsbinzupdate_Call,X = X,sa = as.double(sa),
logodds = as.double(logodds),d = as.double(stats$d),
xdx = as.double(stats$xdx),xy = as.double(stats$xy),
dzr = stats$dzr,alpha = alpha,mu = mu,Xr = Xr,
i = as.integer(updates - 1))
return(list(alpha = alpha,mu = mu,Xr = Xr))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsbinzupdate.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Compute posterior statistics, ignoring correlations.
varbvsindep <- function (fit, X, Z, y) {
# Check that the first input is an instance of class "varbvs".
if (!is(fit,"varbvs"))
stop("Input fit must be an instance of class \"varbvs\".")
# Get the number of samples (n), variables (p) and hyperparameter
# settings (ns).
n <- nrow(X)
p <- ncol(X)
ns <- length(fit$logw)
# Check input X.
if (!(is.matrix(X) & is.numeric(X) & sum(is.na(X)) == 0))
stop("Input X must be a numeric matrix with no missing values.")
storage.mode(X) <- "double"
if (nrow(fit$alpha) != p)
stop("Inputs X and fit are not compatible.")
# Check input Z.
if (!is.null(Z)) {
Z <- as.matrix(Z)
if (!is.numeric(Z) | sum(is.na(Z)) > 0)
stop("Input Z must be a numeric matrix with no missing values.")
if (nrow(Z) != n)
stop("Inputs X and Z do not match.")
storage.mode(Z) <- "double"
}
# Add intercept.
if (is.null(Z))
Z <- matrix(1,n,1)
else
Z <- cbind(1,Z)
# Check input y.
if (!is.numeric(y) | sum(is.na(y)) > 0)
stop("Input y must be a numeric vector with no missing values.")
y <- c(as.double(y))
# If necessary, convert the prior log-odds to a p x ns matrix.
if (fit$prior.same)
fit$logodds <- matrix(fit$logodds,p,ns,byrow = TRUE)
# Adjust the genotypes and phenotypes so that the linear effects of
# the covariates are removed. This is equivalent to integrating out
# the regression coefficients corresponding to the covariates with
# respect to an improper, uniform prior; see Chipman, George and
# McCulloch, "The Practical Implementation of Bayesian Model
# Selection," 2001.
if (fit$family == "gaussian") {
if (ncol(Z) == 1) {
X <- scale(X,center = TRUE,scale = FALSE)
y <- y - mean(y)
} else {
# The equivalent expressions in MATLAB are
#
# y = y - Z*((Z'*Z)\(Z'*y))
# X = X - Z*((Z'*Z)\(Z'*X))
#
# This should give the same result as centering the columns of X
# and subtracting the mean from y when we have only one
# covariate, the intercept.
y <- y - c(Z %*% solve(crossprod(Z),c(y %*% Z)))
X <- X - Z %*% solve(crossprod(Z),t(Z) %*% X)
}
}
# Initialize storage for the outputs.
alpha <- matrix(0,p,ns)
mu <- matrix(0,p,ns)
s <- matrix(0,p,ns)
dimnames(alpha) <- dimnames(fit$alpha)
dimnames(mu) <- dimnames(fit$mu)
dimnames(s) <- dimnames(fit$s)
# Calculate the mean (mu) and variance (s) of the coefficients given that
# the coefficients are included in the model, and the posterior inclusion
# probabilities (alpha), ignoring correlations between variables. Repeat
# for each combination of the hyperparameters.
for (i in 1:ns) {
if (fit$family == "gaussian")
out <- with(fit,varbvsnormindep(X,y,sigma[i],sa[i],log(10)*logodds[,i]))
else if (fit$family == "binomial")
out <- with(fit,varbvsbinzindep(X,Z,y,eta[,i],sa[i],log(10)*logodds[,i]))
alpha[,i] <- out$alpha
mu[,i] <- out$mu
s[,i] <- out$s
rm(out)
}
return(list(alpha = alpha,mu = mu,s = s))
}
# ----------------------------------------------------------------------
# This function computes the mean (mu) and variance (s) of the
# coefficients given that they are included in the linear regression
# model, then it computes the posterior inclusion probabilities
# (alpha), ignoring correlations between variables. This function is
# used in 'varbvsindep', above.
varbvsnormindep <- function (X, y, sigma, sa, logodds) {
s <- sa*sigma/(sa*diagsq(X) + 1)
mu <- s*c(y %*% X)/sigma
alpha <- sigmoid(logodds + (log(s/(sa*sigma)) + mu^2/s)/2)
return(list(alpha = alpha,mu = mu,s = s))
}
# ----------------------------------------------------------------------
# This function computes the mean (mu) and variance (s) of the coefficients
# given that they are included in the logistic regression model, then it
# computes the posterior inclusion probabilities (alpha), ignoring
# correlations between variables. This function is used in varbvsindep.m.
varbvsbinzindep <- function (X, Z, y, eta, sa, logodds) {
stats <- updatestats_varbvsbinz(X,Z,y,eta)
s <- sa/(sa*stats$xdx + 1)
mu <- s * stats$xy;
alpha <- sigmoid(logodds + (log(s/sa) + mu^2/s)/2)
return(list(alpha = alpha,mu = mu,s = s))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsindep.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2019, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Fit linear regression model with mixture-of-normals prior using
# variational approximation techniques. See varbvsmix.Rd for details.
varbvsmix <- function (X, Z, y, sa, sigma, w, alpha, mu, update.sigma,
update.sa, update.w, w.penalty, drop.threshold = 1e-8,
tol = 1e-4, maxiter = 1e4, update.order = 1:ncol(X),
verbose = TRUE) {
# Get the number of samples (n) and variables (p).
n <- nrow(X)
p <- ncol(X)
# (1) CHECK INPUTS
# ----------------
# Check input X.
if (!(is.matrix(X) & is.numeric(X) & sum(is.na(X)) == 0))
stop("Input X must be a numeric matrix with no missing values.")
storage.mode(X) <- "double"
# Add column names to X if they are not provided.
if (is.null(colnames(X)))
colnames(X) <- paste0("X",1:p)
# Check input Z.
if (!is.null(Z)) {
Z <- as.matrix(Z)
if (!is.numeric(Z) | sum(is.na(Z)) > 0)
stop("Input Z must be a numeric matrix with no missing values.")
if (nrow(Z) != n)
stop("Inputs X and Z do not match.")
storage.mode(Z) <- "double"
}
# Add intercept.
if (is.null(Z))
Z <- matrix(1,n,1)
else
Z <- cbind(1,Z)
# Check input y.
if (!is.numeric(y) | sum(is.na(y)) > 0)
stop("Input y must be a numeric vector with no missing values.")
if (length(y) != n)
stop("Inputs X and y do not match")
y <- c(as.double(y))
# (2) PROCESS SOME OF THE OPTIONS
# -------------------------------
# Check input argument "maxiter".
if (!is.finite(maxiter))
stop("Input maxiter must be a finite number")
# Check input argument "update.order".
if (!all(sort(intersect(update.order,1:p)) == 1:p))
stop(paste("Argument \"update.order\" should be a vector in which each",
"variable index (column of X) is included at least once"))
# Get initial estimate for the variance of the residual, if
# provided.
if (missing(sigma)) {
sigma <- var(y)
update.sigma.default <- TRUE
} else {
sigma <- c(sigma)
update.sigma.default <- FALSE
}
# Determine whether to update the residual variance parameter. Note
# that the default seting is determined by whether sigma is
# provided.
if (missing(update.sigma))
update.sigma <- update.sigma.default
# Determine whether to update the mixture variance parameters. By default,
# these parameters are not updated.
if (missing(update.sa))
update.sa <- FALSE
if (update.sa)
stop("Estimation of mixture variances not implemented")
# Determine whether to update the mixture weights.
if (missing(update.w))
update.w <- TRUE
# (3) PROCESS X AND y
# -------------------
# Adjust the genotypes and phenotypes so that the linear effects of
# the covariates are removed. This is equivalent to integrating out
# the regression coefficients corresponding to the covariates with
# respect to an improper, uniform prior.
out <- remove.covariate.effects(X,Z,y)
X <- out$X
y <- out$y
SZy <- out$SZy
SZX <- out$SZX
rm(out)
# Compute a couple useful quantities.
xy <- drop(y %*% X)
d <- diagsq(X)
# (4) PROCESS REMAINING OPTIONS
# -----------------------------
# When input argument "sa" is missing, or when it is a scalar, the
# prior variances are automatically chosen using function
# "selectmixsd". The variance of the first mixture component should
# be exactly zero, corresponding to the "spike".
if (missing(sa))
sa <- 20
if (length(sa) == 1) {
if (!(sa > 1 & round(sa) == sa))
stop("When \"sa\" is a scalar, it must be an integer greater than 1")
sa <- selectmixsd(xy/d,sa)^2
}
else if (sa[1] != 0)
stop("Variance of first mixture component should be zero")
# Get the number of mixture components.
K <- length(sa)
# Get initial estimate for the mixture weights, if provided.
if (missing(w))
w <- rep(1/K,K)
else
w <- c(w)
if (length(w) != K)
stop("Length of input w should be the same as length of sa")
# Specify the penalty term for the mixture weights.
if (missing(w.penalty))
w.penalty <- rep(1,K)
else
w.penalty <- c(w.penalty)
# Set initial estimates of variational parameters alpha, ensuring
# that the smallest value is not less than the "drop threshold" for
# the mixture components. These parameters are stored as a p x K
# matrix.
if (missing(alpha)) {
alpha <- rand(p,K) + K*drop.threshold
alpha <- alpha / rep_col(rowSums(alpha),K)
}
if (nrow(alpha) != p)
stop("Input alpha should have as many rows as X has columns")
if (ncol(alpha) != K)
stop("Input alpha should have one column for each mixture component")
if (any(alpha < drop.threshold))
stop("Initial estimates of \"alpha\" must all be above \"drop.threshold\"")
# Set initial estimates of variational parameters 'mu'. These
# parameters are stored as a p x K matrix. Note that the first
# column of this matrix is always zero because it corresponds to the
# "spike" component.
if (missing(mu))
mu <- randn(p,K)
if (nrow(mu) != p)
stop("Input mu should have as many rows as X has columns")
if (ncol(mu) != K)
stop("Input mu should have one column for each mixture component")
mu[,1] <- 0
# For each variable and each mixture component, calculate s[i,k],
# the variance of the regression coefficient conditioned on being
# drawn from the kth mixture component. Note that first column of
# "s" is always zero since this corresponds to the "spike" mixture
# component.
s <- matrix(0,p,K)
for (i in 2:K)
s[,i] <- sigma*sa[i]/(sa[i]*d + 1)
# Initialize storage for outputs logZ, err and nzw.
logZ <- rep(0,maxiter)
err <- rep(0,maxiter)
nzw <- rep(0,maxiter)
# Initialize the "inactive set"; that is the mixture components with
# weights that are exactly zero. Also, keep the initial set of
# mixture variances (sa) and the initial number of mixture
# components (K). The term "inactive set" is borrowed from "active
# set methods" in numerical optimization.
inactive <- 1:K
K0 <- K
w0.penalty <- w.penalty
sa0 <- sa
# Initialize the "fitted values".
Xr <- drop(X %*% rowSums(alpha*mu))
# Provide a brief summary of the analysis.
if (verbose) {
cat("Fitting variational approximation for linear regression",
"model with\n")
cat("mixture-of-normals priors.\n")
cat(sprintf("samples: %-6d ",n))
cat(sprintf("mixture component sd's: %0.2g..%0.2g\n",
min(sqrt(sa[2:K])),max(sqrt(sa[2:K]))))
cat(sprintf("variables: %-6d ",p))
cat(sprintf("mixture component drop thresh.: %0.1e\n",drop.threshold))
cat(sprintf("covariates: %-6d ",max(0,ncol(Z) - 1)))
cat(sprintf("fit mixture weights: %s\n",tf2yn(update.w)))
cat(sprintf("mixture size: %-6d ",K))
cat(sprintf("fit residual var. (sigma): %s\n",tf2yn(update.sigma)))
cat("intercept: yes ")
cat(sprintf("convergence tolerance %0.1e\n",tol))
}
# (5) FIT MODEL TO DATA
# ---------------------
# Repeat until convergence criterion is met, or until the maximum
# number of iterations is reached.
if (verbose) {
cat(" variational max. --------- hyperparameters ---------\n")
cat("iter lower bound change sigma mixture sd's mix. weights",
"(drop)\n")
}
for (iter in 1:maxiter) {
# Save the current variational parameters and model parameters.
alpha0 <- alpha
mu0 <- mu
s0 <- s
sigma0 <- sigma
w0 <- w
# (5a) COMPUTE CURRENT VARIATIONAL LOWER BOUND
# --------------------------------------------
# Compute the lower bound to the marginal log-likelihood.
logZ0 <- computevarlbmix(Z,Xr,d,y,sigma,sa,w,alpha,mu,s)
# (5b) UPDATE VARIATIONAL APPROXIMATION
# -------------------------------------
out <- varbvsmixupdate(X,sigma,sa,w,xy,d,alpha,mu,Xr,update.order)
alpha <- out$alpha
mu <- out$mu
Xr <- out$Xr
rm(out)
# (5c) COMPUTE UPDATED VARIATIONAL LOWER BOUND
# --------------------------------------------
# Compute the lower bound to the marginal log-likelihood.
logZ[iter] <- computevarlbmix(Z,Xr,d,y,sigma,sa,w,alpha,mu,s)
# (5d) UPDATE RESIDUAL VARIANCE
# -----------------------------
# Compute the approximate maximum likelihood estimate of the residual
# variable (sigma), if requested. Note that we should also
# recalculate the variance of the regression coefficients when this
# parameter is updated.
if (update.sigma) {
sigma <-
(norm2(y - Xr)^2 + dot(d,betavarmix(alpha,mu,s))
+ sum(colSums(as.matrix(alpha[,-1]*(s[,-1] + mu[,-1]^2)))/sa[-1]))/
(n + sum(alpha[,-1]))
for (i in 2:K)
s[,i] <- sigma*sa[i]/(sa[i]*d + 1)
}
# (5e) UPDATE MIXTURE WEIGHTS
# ---------------------------
# Compute the approximate penalized maximum likelihood estimate of
# the mixture weights (w), if requested.
if (update.w) {
w <- colSums(alpha) + w.penalty - 1
w <- w/sum(w)
}
# (5f) CHECK CONVERGENCE
# ----------------------
# Print the status of the algorithm and check the convergence
# criterion. Convergence is reached when the maximum difference
# between the posterior mixture assignment probabilities at two
# successive iterations is less than the specified tolerance, or
# when the variational lower bound has decreased.
err[iter] <- max(abs(alpha - alpha0))
nzw[iter] <- K0 - K
if (verbose) {
progress.str <-
sprintf("%04d %+13.6e %0.1e %0.1e %13s [%0.3f,%0.3f] (%d)",
iter,logZ[iter],err[iter],sigma,
sprintf("[%0.1g,%0.1g]",sqrt(min(sa[-1])),sqrt(max(sa))),
min(w),max(w),nzw[iter])
cat(progress.str,"\n")
}
if (logZ[iter] < logZ0) {
logZ[iter] <- logZ0
err[iter] <- 0
sigma <- sigma0
w <- w0
alpha <- alpha0
mu <- mu0
s <- s0
break
} else if (err[iter] < tol)
break
# (5g) ADJUST INACTIVE SET
# ------------------------
# Check if any mixture components should be dropped based on
# "drop.threshold". Note that the first mixture component (the
# "spike") should never be droped.
keep <- apply(alpha,2,max) >= drop.threshold
keep[1] <- TRUE
if (!all(keep)) {
# At least one of the mixture components satisfy the criterion
# for being dropped, so adjust the inactive set.
inactive <- inactive[keep]
sa <- sa[keep]
w <- w[keep]
w0 <- w0[keep]
w.penalty <- w.penalty[keep]
alpha <- alpha[,keep]
alpha0 <- alpha0[,keep]
mu <- mu[,keep]
mu0 <- mu0[,keep]
s <- s[,keep]
s0 <- s0[,keep]
K <- length(inactive)
}
}
if (verbose)
cat("\n")
# (6) CREATE FINAL OUTPUT
# -----------------------
K <- K0
fit <- list(n = n,mu.cov = NULL,update.sigma = update.sigma,
update.sa = update.sa,update.w = update.w,
w.penalty = w0.penalty,drop.threshold = drop.threshold,
sigma = sigma,sa = sa0,w = rep(0,K),alpha = matrix(0,p,K),
mu = matrix(0,p,K),s = matrix(0,p,K),lfsr = NULL,
logZ = logZ[1:iter],err = err[1:iter],nzw = nzw[1:iter])
fit$w[inactive] <- w
fit$alpha[,inactive] <- alpha
fit$mu[,inactive] <- mu
fit$s[,inactive] <- s
# Compute the posterior mean estimate of the regression
# coefficients for the covariates under the current variational
# approximation.
fit$mu.cov <- c(SZy - SZX %*% rowSums(alpha * mu))
# Compute the local false sign rate (LFSR) for each variable.
fit$lfsr <- computelfsrmix(alpha,mu,s)
# Add row names to some of the outputs.
rownames(fit$alpha) <- colnames(X)
rownames(fit$mu) <- colnames(X)
rownames(fit$s) <- colnames(X)
names(fit$lfsr) <- colnames(X)
# Declare the return value as an instance of class 'varbvsmix'.
class(fit) <- c("varbvsmix","list")
return(fit)
}
# ----------------------------------------------------------------------
# Try to select a reasonable set of standard deviations that should
# be used for the mixture model based on the values of x. This is
# code is based on the autoselect.mixsd function from the ashr
# package.
selectmixsd <- function (x, k) {
smin <- 1/10
if (all(x^2 < 1))
smax <- 1
else
smax <- 2*sqrt(max(x^2 - 1))
return(c(0,logspace(smin,smax,k - 1)))
}
# ----------------------------------------------------------------------
# betavarmix(p,mu,s) returns variances of variables drawn from mixtures of
# normals. Each of the inputs is a n x k matrix, where n is the number of
# variables and k is the number of mixture components. Specifically,
# variable i is drawn from a mixture in which the jth mixture component is
# the univariate normal with mean mu[i,j] and variance s[i,j].
#
# Note that the following two lines should return the same result when k=2
# and the first component is the "spike" density with zero mean and
# variance.
#
# y1 <- betavar(p,mu,s)
# y2 <- betavarmix(c(1-p,p),cbind(0,mu),cbind(0,s))
#
betavarmix <- function (p, mu, s)
rowSums(p*(s + mu^2)) - rowSums(p*mu)^2
# ----------------------------------------------------------------------
# Compute the lower bound to the marginal log-likelihood.
computevarlbmix <- function (Z, Xr, d, y, sigma, sa, w, alpha, mu, s) {
# Get the number of samples (n), variables (p) and mixture
# components (K).
n <- length(y)
p <- length(d)
K <- length(w)
# Compute the variational lower bound.
out <- (-n/2*log(2*pi*sigma)
- determinant(crossprod(Z),logarithm = TRUE)$modulus/2
- (norm2(y - Xr)^2 + dot(d,betavarmix(alpha,mu,s)))/(2*sigma))
for (i in 1:K)
out <- (out + sum(alpha[,i]*log(w[i] + eps))
- sum(alpha[,i]*log(alpha[,i] + eps)))
for (i in 2:K)
out <- (out + (sum(alpha[,i]) + sum(alpha[,i]*log(s[,i]/(sigma*sa[i]))))/2
- sum(alpha[,i]*(s[,i] + mu[,i]^2))/(sigma*sa[i])/2)
return(out)
}
# ----------------------------------------------------------------------
# Compute the local false sign rate (LFSR) for each variable. This
# assumes that the first mixture component is a "spike" (that is, a
# normal density with a variance approaching zero).
computelfsrmix <- function (alpha, mu, s) {
# Get the number of variables (p) and the number of mixture
# components (k).
p <- nrow(alpha)
k <- ncol(alpha)
# For each variable, get the posterior probability that the
# regression coefficient is exactly zero.
p0 <- alpha[,1]
# For each variable, get the posterior probability that the
# regression coefficient is negative.
if (k == 2)
pn <- alpha[,2] * pnorm(0,mu[,2],sqrt(s[,2]))
else
pn <- rowSums(alpha[,-1] * pnorm(0,mu[,-1],sqrt(s[,-1])))
# Compute the local false sign rate (LFSR) following the formula
# given in the Biostatistics paper, "False discovery rates: a new
# deal".
lfsr <- rep(0,p)
b <- pn > 0.5*(1 - p0)
lfsr[b] <- 1 - pn[b]
lfsr[!b] <- p0[!b] + pn[!b]
return(lfsr)
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsmix.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# [alpha,mu,Xr] = varbvsmixupdate(X,sigma,sa,q,xy,d,alpha0,mu0,Xr0,i)
# runs a single iteration of the coordinate ascent updates maximizing the
# variational lower bound for the linear regression model with a
# mixture-of-normals prior.
varbvsmixupdate <- function (X, sigma, sa, w, xy, d, alpha0, mu0, Xr0, i) {
# Get the number of samples (n), the number of variables (p), and the
# number of mixture components including the "spike" (K).
n <- nrow(X)
p <- ncol(X)
K <- length(w)
# Check input X.
if (!is.double(X) || !is.matrix(X))
stop("Input X should be a double-precision matrix")
# Check input sigma.
if (length(sigma) != 1)
stop("Input sigma should be a scalar")
# Check input sa.
if (length(sa) != K)
stop("Input sa should have length equal to input w")
# Check inputs xy and d.
if (!(length(xy) == p & length(d) == p))
stop("Inputs xy and d should have length = ncol(X)")
# Check inputs alpha0 and mu0.
if (any(c(dim(alpha0),dim(mu0)) != c(p,K,p,K)))
stop(paste("Inputs alpha0 and mu0 should be p x K matrices,",
"with p = ncol(X) and K = length(w)"))
# Check input Xr0.
if (length(Xr0) != n)
stop("length(Xr0) must be equal to nrow(X)")
# Check input i.
if (sum(i < 1 | i > p) > 0)
stop("Input i contains invalid variable indices")
# Initialize storage for the results.
alpha <- t(alpha0)
mu <- t(mu0)
Xr <- c(Xr0)
# Execute the C routine using the .Call interface and return the
# updated variational parameters statistics in a list object. The
# main reason for using the .Call interface is that there is less of
# a constraint on the size of the input matrices. The only
# components that change are alpha, mu and Xr. Note that I need to
# subtract 1 from the indices because R vectors start at 1, and C
# arrays start at 0. Also note that the alpha and mu matrices are
# stored differently in the C implementation---variables correspond
# to columns---so we need to first transpose these matrices.
out <- .Call(C_varbvsmixupdate_Call,X = X,sigma = as.double(sigma),
sa = as.double(sa),w = as.double(w),xy = as.double(xy),
d = as.double(d),alpha = alpha,mu = mu,Xr = Xr,
i = as.integer(i-1),eps = eps)
return(list(alpha = t(alpha),mu = t(mu),Xr = Xr))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsmixupdate.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Implements the fully-factorized variational approximation for
# Bayesian variable selection in linear regression. It finds the
# "best" fully-factorized variational approximation to the posterior
# distribution of the coefficients for a linear regression model of a
# continuous outcome (quantitiative trait), with spike and slab priors
# on the coefficients. By "best", we mean the approximating
# distribution that locally minimizes the K-L divergence between the
# approximating distribution and the exact posterior.
#
# Input X is an n x p matrix of observations of the variables (or
# features), where n is the number of samples, and p is the number of
# variables. Input y contains samples of the outcome; it is a vector
# of length n.
#
# Inputs sigma, sa and logodds are additional model parameters; sigma
# and sa are scalars. Input sigma specifies the variance of the
# residual, and sa specifies the prior variance of the coefficients
# (scaled by sigma). Input logodds is the prior log-odds of inclusion
# for each variable. Note that the prior log-odds here is defined with
# respect to the *natural* logarithm, whereas in function varbvs the
# prior log-odds is defined with respect to the base-10 logarithm, so
# a scaling factor of log(10) is needed to convert from the latter to
# the former.
#
# Output logw is the variational estimate of the marginal
# log-likelihood given the hyperparameters at each iteration of the
# co-ordinate ascent optimization procedure. Output err is the maximum
# difference between the approximate posterior probabilities (alpha)
# at successive iterations. Outputs alpha, mu and s are the
# parameters of the variational approximation and, equivalently,
# variational estimates of posterior quantites: under the variational
# approximation, the ith regression coefficient is normal with
# probability alpha(i); mu[i] and s(i) are the mean and variance of
# the coefficient given that it is included in the model.
#
# When update.sa = TRUE, there is the additional option of computing
# the maximum a posteriori (MAP) estimate of the prior variance
# parameter (sa), in which sa is drawn from a scaled inverse
# chi-square distribution with scale sa0 and degrees of freedom n0.
varbvsnorm <- function (X, y, sigma, sa, logodds, alpha, mu, update.order,
tol = 1e-4, maxiter = 1e4, verbose = TRUE,
outer.iter = NULL, update.sigma = TRUE,
update.sa = TRUE, n0 = 10, sa0 = 1) {
# Get the number of samples (n) and variables (p).
n <- nrow(X)
p <- ncol(X)
# (1) INITIAL STEPS
# -----------------
# Compute a few useful quantities.
xy <- c(y %*% X)
d <- diagsq(X)
Xr <- c(X %*% (alpha*mu))
# Calculate the variance of the coefficients.
s <- sa*sigma/(sa*d + 1)
# Initialize storage for outputs logw and err.
logw <- rep(0,maxiter)
err <- rep(0,maxiter)
# (2) MAIN LOOP
# -------------
# Repeat until convergence criterion is met, or until the maximum
# number of iterations is reached.
for (iter in 1:maxiter) {
# Save the current variational and model parameters.
alpha0 <- alpha
mu0 <- mu
s0 <- s
sigma0 <- sigma
sa.old <- sa
# (2a) COMPUTE CURRENT VARIATIONAL LOWER BOUND
# --------------------------------------------
# Compute the lower bound to the marginal log-likelihood.
logw0 <- int.linear(Xr,d,y,sigma,alpha,mu,s) +
int.gamma(logodds,alpha) +
int.klbeta(alpha,mu,s,sigma*sa)
# (2b) UPDATE VARIATIONAL APPROXIMATION
# -------------------------------------
out <- varbvsnormupdate(X,sigma,sa,logodds,xy,d,alpha,mu,Xr,update.order)
alpha <- out$alpha
mu <- out$mu
Xr <- out$Xr
rm(out)
# (2c) COMPUTE UPDATED VARIATIONAL LOWER BOUND
# --------------------------------------------
# Compute the lower bound to the marginal log-likelihood.
logw[iter] <- int.linear(Xr,d,y,sigma,alpha,mu,s) +
int.gamma(logodds,alpha) +
int.klbeta(alpha,mu,s,sigma*sa)
# (2d) UPDATE RESIDUAL VARIANCE
# -----------------------------
# Compute the maximum likelihood estimate of sigma, if requested.
# Note that we must also recalculate the variance of the regression
# coefficients when this parameter is updated.
if (update.sigma) {
sigma <- (norm2(y - Xr)^2 + dot(d,betavar(alpha,mu,s)) +
dot(alpha,(s + mu^2)/sa))/(n + sum(alpha))
# Simpler update formula from Youngseok:
# w <- mean(alpha)
# b <- alpha * mu
# r <- drop(y - Xr)
# bt <- (b + drop(r %*% X))/d
# sigma_new <- (norm2(r)^2 + sum(d*b*(bt - b)) + sigma*p*w)/(n + p*w)
s <- sa*sigma/(sa*d + 1)
}
# (2e) UPDATE PRIOR VARIANCE OF REGRESSION COEFFICIENTS
# -----------------------------------------------------
# Compute the maximum a posteriori estimate of sa, if requested.
# Note that we must also recalculate the variance of the
# regression coefficients when this parameter is updated.
if (update.sa) {
sa <- (sa0*n0 + dot(alpha,s + mu^2))/(n0 + sigma*sum(alpha))
s <- sa*sigma/(sa*d + 1)
}
# (2f) CHECK CONVERGENCE
# ----------------------
# Print the status of the algorithm and check the convergence
# criterion. Convergence is reached when the maximum difference
# between the posterior probabilities at two successive iterations
# is less than the specified tolerance, or when the variational
# lower bound has decreased.
err[iter] <- max(abs(alpha - alpha0))
if (verbose) {
if (is.null(outer.iter))
status <- NULL
else
status <- sprintf("%05d ",outer.iter)
progress.str <-
paste(status,sprintf("%05d %+13.6e %0.1e %06.1f %0.1e %0.1e",
iter,logw[iter],err[iter],sum(alpha),
sigma,sa),sep="")
cat(progress.str,"\n")
}
if (logw[iter] < logw0) {
logw[iter] <- logw0
err[iter] <- 0
sigma <- sigma0
sa <- sa.old
alpha <- alpha0
mu <- mu0
s <- s0
break
} else if (err[iter] < tol)
break
}
return(list(logw = logw[1:iter],err = err[1:iter],sigma = sigma,sa = sa,
alpha = alpha,mu = mu,s = s))
}
# ----------------------------------------------------------------------
# Computes an integral that appears in the variational lower bound of
# the marginal log-likelihood. This integral is the expectation of the
# linear regression log-likelihood taken with respect to the
# variational approximation.
int.linear <- function (Xr, d, y, sigma, alpha, mu, s) {
n <- length(y)
return(-n/2*log(2*pi*sigma) - norm2(y - Xr)^2/(2*sigma)
- dot(d,betavar(alpha,mu,s))/(2*sigma))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsnorm.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Execute a single iteration of the coordinate ascent updates to
# maximize the variational lower bound for Bayesian variable selection
# in linear regression.
#
# Input X is an n x p matrix of observations of the variables (or
# features), where n is the number of samples, and p is the number of
# variables. Input xy = X'*y, where y is the vector of samples of the
# continuous outcome.
#
# Inputs sigma, sa and logodds specify other model parameters. sigma
# and sa are scalars. sigma specifies the variance of the residual,
# and sa is the prior variance of the regression coefficients (scaled
# by sigma). Input logodds is the prior log-odds of inclusion for each
# variable. It must be a vector of length p.
#
# Inputs alpha0, mu0 are the current parameters of the variational
# approximation. Under the variational approximation, the ith
# regression coefficient is included in the model with probability
# alpha0[i], and mu0(i) is the mean of the coefficient given that it
# is included in the model. Inputs Xr0 and d must be Xr0 =
# X*(alpha0*mu0) and d = diag(X'*X).
#
# Input i specifies the order in which the coordinates are updated. It
# may be a vector of any length. Each entry of i must be an integer
# between 1 and p.
#
# There are 3 outputs. Output vectors alpha and mu are the updated
# variational parameters, and Xr = X*(alpha*mu). The computational
# complexity is O(n*length(i)).
#
# When algorithm.version = ".Call", this function calls
# "varbvsnormupdate_Call", a function compiled from C code, using the
# .Call interface. To load the C function into R, first build the
# "shared object" (.so) file using the following command in the "src"
# directory: R CMD SHLIB varbvsr.c varbvs.c misc.c. Next, load the
# shared objects into R using the R function dyn.load:
# dyn.load("../src/varbvsr.so").
#
# When algorithm.version = "Rcpp", this function uses the code from
# varbvsnormupdate_rcpp.cpp. If any changes are made to this code,
# make sure to run Rcpp::compileAttributes(), which updates
# RcppExports.R.
varbvsnormupdate <-
function (X, sigma, sa, logodds, xy, d, alpha0, mu0, Xr0, updates,
algorithm.version = c(".Call","Rcpp","R")) {
# Get the number of samples (n) and variables (p).
n <- nrow(X)
p <- ncol(X)
# Specify the algorithm implementation.
algorithm.version <- match.arg(algorithm.version)
# Check input X.
if (!is.double(X) || !is.matrix(X))
stop("Input X must be a double-precision matrix")
# Check inputs sigma and sa.
if (length(sigma) != 1 | length(sa) != 1)
stop("Inputs sigma and sa must be scalars")
# Check input logodds, xy, d, alpha0 and mu0.
if (!(length(logodds) == p & length(xy) == p & length(d) == p &
length(alpha0) == p & length(mu0) == p))
stop("logodds, xy, d, alpha0 and mu0 must have length = ncol(X)")
# Check input Xr0.
if (length(Xr0) != n)
stop("length(Xr0) must be equal to nrow(X)")
# Check input "updates".
if (sum(updates < 1 | updates > p) > 0)
stop("Input \"updates\" contains invalid variable indices")
# Initialize storage for the results.
alpha <- c(alpha0)
mu <- c(mu0)
Xr <- c(Xr0)
if (algorithm.version == ".Call") {
# Execute the C routine using the .Call interface, and return the
# updated variational parameters statistics in a list object. The
# main reason for using the .Call interface is that there is less of
# a constraint on the size of the input matrices. The only
# components that change are alpha, mu and Xr. Note that I need to
# subtract 1 from the indices because R vectors start at 1, and C
# arrays start at 0.
out <- .Call(C_varbvsnormupdate_Call,X = X,sigma = as.double(sigma),
sa = as.double(sa),logodds = as.double(logodds),
xy = as.double(xy),d = as.double(d),alpha = alpha,mu = mu,
Xr = Xr,i = as.integer(updates - 1))
} else if (algorithm.version == "Rcpp") {
# Execute the C routine using the Rcpp interface.
varbvsnormupdate_rcpp(X = X,sigma = sigma,sa = sa,logodds = logodds,
xy = xy,d = d,alpha = alpha,mu = mu,Xr = Xr,
i = updates - 1)
} else if (algorithm.version == "R") {
# Repeat for each co-ordinate to update.
for (j in updates) {
# Compute the variational estimate of the posterior variance.
s <- sa*sigma/(sa*d[j] + 1)
# Update the variational estimate of the posterior mean.
r <- alpha[j] * mu[j]
mu[j] <- s/sigma * (xy[j] + d[j]*r - sum(X[,j]*Xr))
# Update the variational estimate of the posterior inclusion
# probability.
alpha[j] <- sigmoid(logodds[j] + (log(s/(sa*sigma)) + mu[j]^2/s)/2)
# Update Xr = X*r.
Xr <- Xr + (alpha[j]*mu[j] - r) * X[,j]
}
} else
stop("Invalid argument \"algorithm.version\" passed to varbvsnormupdate.")
return(list(alpha = alpha,mu = mu,Xr = Xr))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsnormupdate.R
|
# Compute Bayes factors measuring improvement in "fit" when each
# candidate variable from "vars" is included in the model instead of
# variable i.
varbvsproxybf <- function (X, Z, y, fit, i, vars) {
# Get the number of samples (n), the number of variables (p), and
# the number of hyperparameter settings (ns).
n <- nrow(X)
p <- ncol(X)
ns <- length(fit$w)
# CHECK INPUTS
# ------------
# TO DO: Check all inputs.
# Check input "fit".
if (fit$family != "gaussian")
stop(paste("Function varbvsproxybf is currently only implemented for",
"linear regression (fit$family = \"gaussian\")"))
# If the set of candidate variables is not provided, set it to all
# the variables.
if (missing(vars))
vars <- 1:p
# Add an intercept.
if (is.null(Z))
Z <- matrix(1,n,1)
else
Z <- cbind(1,Z)
# PREPROCESSING STEPS
# -------------------
# Adjust the genotypes and phenotypes so that the linear effects of
# the covariates are removed. This is equivalent to integrating out
# the regression coefficients corresponding to the covariates with
# respect to an improper, uniform prior.
out <- remove.covariate.effects(X,Z,y)
X <- out$X
y <- out$y
rm(out)
# INITIALIZE STORAGE FOR OUTPUTS
# ------------------------------
# Create a p x ns matrix containing the Bayes factors.
BF <- matrix(0,p,ns)
mu <- matrix(0,p,ns)
s <- matrix(0,p,ns)
rownames(BF) <- rownames(fit$alpha)
rownames(mu) <- rownames(fit$alpha)
rownames(s) <- rownames(fit$alpha)
# COMPUTE PROXY PROBABILITIES
# ---------------------------
d <- diagsq(X)
# Repeat for each hyperparameter setting.
for (k in 1:ns) {
# Get the hyperparameter values.
sigma <- fit$sigma[k]
sa <- fit$sa[k]
# Get the posterior means of the regression coefficients, and the
# posterior inclusion probabilities.
alpha0 <- fit$alpha[,k]
mu0 <- fit$mu[,k]
# Remove from the outcome y the linear effects of all variables
# except for variable i.
alpha0[i] <- 0
y0 <- c(y - X %*% (alpha0*mu0))
# Repeat for each candidate variable.
for (j in vars) {
# Add back in the linear effect of variable j (except in the
# special case when i = j, because the effect was not removed in
# the first place).
if (i != j)
y0 <- y0 + alpha0[j]*mu0[j] * X[,j]
# Compute the Bayes factor.
s[j,k] <- sa*sigma/(sa*d[j] + 1)
mu[j,k] <- s[j,k]*dot(y0,X[,j])/sigma
BF[j,k] <- sqrt(s[j,k]/(sa*sigma)) * exp(mu[j,k]^2/(2*s[j,k]))
# Remove the linear effect of variable j (except when i = j).
if (i != j)
y0 <- y0 - alpha0[j]*mu0[j] * X[,j]
}
}
# Output the Bayes factors for the selected candidate proxy variables
# only, as well as the estimated (posterior) means and variances of
# the regression coefficients.
return(list(BF = BF[vars,],mu = mu[vars,],s = s[vars,]))
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvsproxybf.R
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2018, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Samples nr posterior estimates of the proportion of variance in Y
# explained by the Bayesian variable selection model fitted using a
# variational approximation. This function is only valid for the
# linear regression model (family = "gaussian") with an intercept.
varbvspve <- function (fit, X, nr = 1000) {
# Get the number of variables (p) and the number of hyperparameter
# settings (ns).
p <- ncol(X)
ns <- length(fit$logw)
# Check input X.
if (!(is.matrix(X) & is.numeric(X) & sum(is.na(X)) == 0))
stop("Input X must be a numeric matrix with no missing values.")
if (nrow(fit$alpha) != p)
stop("Inputs X and fit are not compatible.")
# Check input "fit".
if (!is(fit,"varbvs"))
stop("Input argument \"fit\" must be an instance of class \"varbvs\".")
if (fit$family != "gaussian")
stop("varbvspve is only implemented for family = \"gaussian\".")
# Initialize storage for posterior estimates of the proportion of
# variance explained.
pve <- rep(0,nr)
# For each sample, compute the proportion of variance explained.
for (i in 1:nr) {
# Draw a hyperparameter setting from the posterior distribution.
j <- sample(ns,1,prob = fit$w)
# Sample the region coefficients.
b <- with(fit,mu[,j] + sqrt(s[,j]) * rnorm(p))
b <- b * (runif(p) < fit$alpha[,j])
# Compute the proportion of variance explained.
sz <- c(var1(X %*% b))
pve[i] <- sz/(sz + fit$sigma[j])
}
return(pve)
}
|
/scratch/gouwar.j/cran-all/cranData/varbvs/R/varbvspve.R
|
# This script illustrates 'varbvs' for genome-wide mapping of a binary
# (e.g., case-control) trait in a simulated data set in which all the
# genetic markers are uncorrelated with each other (i.e., they are
# "unlinked").
library(lattice)
library(varbvs)
# SCRIPT PARAMETERS
# -----------------
n <- 1400 # Number of samples (subjects).
p <- 1000 # Number of variables (genetic markers).
na <- 10 # Number of markers that affect the binary outcome.
sa <- 0.2 # Variance of log-odds ratios.
p1 <- 0.5 # Target proportion of subjects that are cases (y = 1).
# Names of covariates.
if (!exists("covariates")) {
covariates <- c("age","weight")
}
# Candidate values for the prior log-odds of inclusion.
if (!exists("logodds")) {
logodds <- seq(-3,-1.5,0.5)
}
# Set the random number generator seed.
set.seed(1)
# GENERATE DATA SET
# -----------------
# Generate the minor allele frequencies so that they are uniform over
# range [0.05,0.5]. Then simulate genotypes assuming all markers are
# uncorrelated (i.e., unlinked), according to the specified minor
# allele frequencies.
cat("1. GENERATING DATA SET.\n")
maf <- 0.05 + 0.45*runif(p)
X <- (runif(n*p) < maf) +
(runif(n*p) < maf)
X <- matrix(as.double(X),n,p,byrow = TRUE)
# Generate additive effects for the markers so that exactly na of them
# have a nonzero effect on the trait.
i <- sample(p,na)
beta <- rep(0,p)
beta[i] <- sqrt(sa)*rnorm(na)
# Generate labels for the markers.
colnames(X) <- paste0("rs",sample(1e6,p))
# Generate the covariate data (Z), and the linear effects of the
# covariates (u).
m <- length(covariates)
if (m > 0) {
Z <- randn(n,m)
u <- rnorm(m)
colnames(Z) <- covariates
} else {
Z <- NULL
}
# For each sample, calculate the probability of being a case (y = 1).
mu <- varbvs:::logit(p1)
w <- mu + X %*% beta
if (m > 0)
w <- w + Z %*% u
# Simulate the binary trait (case-control status) as a coin toss with
# success rates given by the logistic regression.
y <- as.double(runif(n) < varbvs:::sigmoid(w))
# Generate labels for the samples.
names(y) <- sprintf("A%05d",sample(99999,n))
rownames(X) <- names(y)
if (!is.null(Z))
rownames(Z) <- names(y)
# FIT VARIATIONAL APPROXIMATION TO POSTERIOR
# ------------------------------------------
# Fit the fully-factorized variational approximation to the posterior
# distribution of the coefficients for a logistic regression model of
# a binary outcome (case-control status), with spike and slab priors
# on the coefficients.
cat("2. FITTING MODEL TO DATA.\n")
fit <- varbvs(X,Z,y,"binomial",logodds = logodds,n0 = 0)
cat("\n")
# SUMMARIZE POSTERIOR DISTRIBUTION
# --------------------------------
cat("3. SUMMARIZING RESULTS.\n")
print(summary(fit))
cat("\n")
# COMPARE ESTIMATES AGAINST GROUND-TRUTH
# --------------------------------------
# Plot the estimated coefficients against the ground-truth coefficients.
# It is expected that oefficients near zero will be "shrunk" to zero.
cat("4. PLOTTING COEFFICIENT ESTIMATES.\n")
trellis.par.set(par.xlab.text = list(cex = 0.75),
par.ylab.text = list(cex = 0.75),
axis.text = list(cex = 0.75))
markers <- labels(fit)
if (length(logodds) > 1) {
beta.est <- coef(fit)[markers,"averaged"]
} else {
beta.est <- coef(fit)[markers,]
}
print(xyplot(beta.est ~ beta.true,
data.frame(beta.true = beta,beta.est = beta.est),
pch = 4,col = "black",cex = 0.6,
panel = function(x, y, ...) {
panel.xyplot(x,y,...)
panel.abline(a = 0,b = 1,col = "magenta",lty = "dotted")
},
scales = list(limits = c(-1.1,1.1)),
xlab = "ground-truth regression coefficient",
ylab = "estimated regression coefficient"))
# EVALUATE MODEL PREDICTIONS
# --------------------------
# Compute estimates of the binary trait using the fitted model, and
# compare against the observed values.
cat("5. EVALUATING FITTED MODEL.\n")
y.fit <- predict(fit,X,Z,type = "class")
cat("Comparison of observed case-control status against estimated outcome:\n")
print(table(y = factor(y),y.fit = factor(y.fit)))
|
/scratch/gouwar.j/cran-all/cranData/varbvs/demo/varbvs.cc.R
|
# This script illustrates usage of the function varbvs for genome-wide
# mapping of a quantitative trait. The data set is simulated assuming
# that all the genetic markers are uncorrelated with each other (i.e.,
# they are "unlinked").
library(lattice)
library(varbvs)
# SCRIPT PARAMETERS
# -----------------
n <- 800 # Number of samples.
p <- 2000 # Number of variables (genetic markers).
na <- 20 # Number of quantitative trait loci (QTLs).
se <- 4 # Variance of residual.
r <- 0.5 # Proportion of variance in trait explained by QTLs.
# Names of covariates.
if (!exists("covariates")) {
covariates <- c("age","weight","glucose")
}
# Candidate values for the prior log-odds of inclusion.
if (!exists("logodds")) {
logodds <- seq(-3,-1,0.1)
}
# Set the random number generator seed.
suppressWarnings(RNGversion("3.5.0"))
set.seed(1)
# GENERATE DATA SET
# -----------------
# Generate the minor allele frequencies so that they are uniform over range
# [0.05,0.5]. Then simulate genotypes assuming all markers are uncorrelated
# (i.e., unlinked), according to the specified minor allele frequencies.
cat("1. GENERATING DATA SET.\n")
maf <- 0.05 + 0.45*runif(p)
X <- (runif(n*p) < maf) +
(runif(n*p) < maf)
X <- matrix(as.double(X),n,p,byrow = TRUE)
# Generate additive effects for the markers so that exactly na of them have
# a nonzero effect on the trait.
i <- sample(p,na)
beta <- rep(0,p)
beta[i] <- rnorm(na)
# Generate labels for the markers.
colnames(X) <- paste0("rs",sample(1e6,p))
# Adjust the QTL effects so that we control for the proportion of variance
# explained (r). That is, we adjust beta so that r = a/(a+1), where I've
# defined a = beta'*cov(X)*beta. Here, sb is the variance of the (nonzero)
# QTL effects.
sb <- r/(1-r)/var(c(X %*% beta))
beta <- sqrt(sb*se) * beta
# Generate a random intercept.
mu <- rnorm(1)
# Generate the covariate data (Z), and the linear effects of the
# covariates (u).
m <- length(covariates)
if (m > 0) {
Z <- randn(n,m)
u <- rnorm(m)
colnames(Z) <- covariates
} else {
Z <- NULL
}
# Generate the quantitative trait measurements.
y <- mu + X %*% beta + sqrt(se)*rnorm(n)
if (m > 0)
y <- y + Z %*% u
y <- c(y)
# Generate labels for the samples.
names(y) <- sprintf("A%05d",sample(99999,n))
rownames(X) <- names(y)
if (!is.null(Z))
rownames(Z) <- names(y)
# FIT VARIATIONAL APPROXIMATION TO POSTERIOR
# ------------------------------------------
# Fit the fully-factorized variational approximation to the posterior
# distribution of the coefficients for a linear regression model of a
# continuous outcome (quantitiative trait), with spike and slab priors on
# the coefficients.
cat("2. FITTING MODEL TO DATA.\n")
fit <- varbvs(X,Z,y,"gaussian",logodds = logodds,n0 = 0)
cat("\n")
# SUMMARIZE RESULTS
# -----------------
cat("3. SUMMARIZING RESULTS.\n")
print(summary(fit))
cat("\n")
# COMPARE ESTIMATES AGAINST GROUND-TRUTH
# --------------------------------------
# Plot the estimated coefficients against the ground-truth coefficients.
# It is expected that oefficients near zero will be "shrunk" to zero.
cat("4. PLOTTING COEFFICIENT ESTIMATES.\n")
trellis.par.set(par.xlab.text = list(cex = 0.75),
par.ylab.text = list(cex = 0.75),
axis.text = list(cex = 0.75))
markers <- labels(fit)
if (length(logodds) > 1) {
beta.est <- coef(fit)[markers,"averaged"]
} else {
beta.est <- coef(fit)[markers,]
}
print(xyplot(beta.est ~ beta.true,
data.frame(beta.true = beta,beta.est = beta.est),
pch = 4,col = "black",cex = 0.6,
panel = function(x, y, ...) {
panel.xyplot(x,y,...)
panel.abline(a = 0,b = 1,col = "magenta",lty = "dotted")
},
scales = list(limits = c(-1.1,1.1)),
xlab = "ground-truth regression coefficient",
ylab = "estimated regression coefficient"))
# EVALUATE MODEL PREDICTIONS
# --------------------------
# Compute estimates of the quantitative trait using the fitted model,
# and compare against the observed values.
cat("5. EVALUATING FITTED MODEL.\n")
y.fit <- predict(fit,X,Z)
cat(sprintf("r^2 between predicted Y and observed Y is %0.3f.\n",
cor(y,y.fit)^2))
|
/scratch/gouwar.j/cran-all/cranData/varbvs/demo/varbvs.qtl.R
|
# Here we illustrate two varbvs features: (1) specifying the
# covariance structure of the samples by setting input argument
# resid.vcov; (2) comparison of Bayesian variable selection models by
# computing Bayes factors.
#
# In the application to genetic data, the model with dependent
# residuals is equivalent to the Bayesian sparse linear mixed model
# (BSLMM) described in Zhou, Carbonetto & Stephens, PLoS Genetics, 2013.
#
# The data for this demo are simulated in a similar way to the
# varbvs.qtl demo, the main difference being that all genetic markers
# contribute to variation in the phenotype, with a small proportion of
# genetic markers contributing much more variation than the others
# (these markers are considered the QTLs, or "quantitative trait
# loci").
library(varbvs)
# SCRIPT PARAMETERS
# -----------------
n <- 800 # Number of samples.
p <- 2000 # Number of variables (genetic markers).
na <- 2 # Number of quantitative trait loci (QTLs).
r <- 0.5 # Proportion of variance in trait explained by markers.
d <- 0.6 # Proportion of additive genetic variance due to QTLs.
# Candidate values for the prior log-odds of inclusion.
logodds <- seq(-3,-1,0.1)
# Set the random number generator seed.
suppressWarnings(RNGversion("3.5.0"))
set.seed(1)
# GENERATE DATA SET
# -----------------
# Generate the minor allele frequencies so that they are uniform over range
# [0.05,0.5]. Then simulate genotypes assuming all markers are uncorrelated
# (i.e., unlinked), according to the specified minor allele frequencies.
cat("1. GENERATING DATA SET.\n")
maf <- 0.05 + 0.45*runif(p)
X <- (runif(n*p) < maf) +
(runif(n*p) < maf)
X <- matrix(as.double(X),n,p,byrow = TRUE)
X <- scale(X,center = TRUE,scale = FALSE)
# Generate (small) polygenic additive effects for the markers.
u <- rnorm(p)
# Generate (large) QTL effects for the markers.
i <- sample(p,na)
beta <- rep(0,p)
beta[i] <- rnorm(na)
# Generate labels for the markers.
colnames(X) <- paste0("rs",sample(1e6,p))
# Adjust the additive effects so that we control for the proportion of
# additive genetic variance that is due to QTL effects (d) and the
# total proportion of variance explained (r). That is, we adjust beta
# and u so that
#
# r = a/(a+1)
# d = b/a,
#
# where I've defined
#
# a = (u + beta)'*cov(X)*(u + beta),
# b = beta'*cov(X)*beta.
#
# Note: this code only works if d or r are not exactly 0 or exactly 1.
st <- c(r/(1-r) * d/var(X %*% beta))
beta <- sqrt(st) * beta
sa <- max(Re(polyroot(c(c(var(X %*% beta) - r/(1-r)),
2*sum((X %*% beta) * (X %*% u))/n,
c(var(X %*% u))))))^2
u <- sqrt(sa) * u
# Generate the quantitative trait measurements.
y <- X %*% (u + beta) + rnorm(n)
y <- c(y)
# Generate labels for the samples.
names(y) <- sprintf("A%05d",sample(99999,n))
rownames(X) <- names(y)
# FIT BASIC VARBVS MODEL
# ----------------------
cat("2. FITTING BASIC VARBVS MODEL TO DATA.\n")
fit1 <- varbvs(X,NULL,y,"gaussian",logodds = logodds,n0 = 0)
cat("\n")
cat("3. SUMMARIZING FITTED MODEL.\n")
print(summary(fit1))
cat("\n")
# FIT VARBVS MODEL WITH DEPENDENT RESIDUALS
# -----------------------------------------
# This is equivalent to the Bayesian sparse linear mixed model (BSLMM)
# when the covariance of the residuals is given by
#
# resid.cov = I + a*K
#
# where K is the kinship matrix K = crossprod(X)/p, and a is a
# parameter to be estimated.
cat("4. FITTING VARBVS MODEL WITH DEPENDENT RESIDUALS (BSLMM).\n")
fit2 <- varbvs(X,NULL,y,"gaussian",logodds = logodds,n0 = 0,
resid.vcov = diag(n) + sa*tcrossprod(X))
cat("\n")
cat("5. SUMMARIZING FITTED MODEL.\n")
print(summary(fit2))
cat("\n")
# COMPUTE BAYES FACTOR
# --------------------
cat("6. COMPUTING BAYES FACTOR.\n")
bf <- varbvsbf(fit1,fit2)
cat("Bayes factor quantifying support for model with dependent residuals",
"(BSLMM):\n")
cat("BF =",bf,"\n")
|
/scratch/gouwar.j/cran-all/cranData/varbvs/demo/varbvs.resid.vcov.R
|
# This script illustrates the "varbvsmix" function on a simulated data
# set, in which all candidate variables (predictors) are uncorrelated.
library(lattice)
library(varbvs)
# SCRIPT PARAMETERS
# -----------------
n <- 1000 # Number of samples.
p <- 2000 # Number of variables (genetic markers).
se <- 4 # Variance of residual.
# Names of the covariates.
covariates <- c("age","weight")
# The standard deviations and mixture weights used to simulate the
# additive effects on the quantitative trait. Note that the first
# mixture component must have a standard deviation of exactly zero.
sd <- c(0, 0.1, 0.2, 0.5)
w <- c(0.95,0.03,0.01,0.01)
# Set the random number generator seed.
suppressWarnings(RNGversion("3.5.0"))
set.seed(1)
# GENERATE DATA SET
# -----------------
# Get the number of covariates.
m <- length(covariates)
# Generate the minor allele frequencies so that they are uniform over
# range [0.05,0.5]. Then simulate genotypes assuming all markers are
# uncorrelated (i.e., unlinked), according to the specified minor
# allele frequencies.
cat("1. GENERATING DATA SET.\n")
cat("Data simulation settings:\n")
cat(sprintf(" - Num. data samples %d\n",n))
cat(sprintf(" - Num. covariates %d\n",m))
cat(sprintf(" - Num. variables (SNPs) %d\n",p))
cat(sprintf(" - Num. mixture components %d\n",length(w)))
maf <- 0.05 + 0.45*runif(p)
X <- (runif(n*p) < maf) +
(runif(n*p) < maf)
X <- matrix(as.double(X),n,p,byrow = TRUE)
# Generate additive effects according to the specified standard
# deviations (sd) and mixture weights (w).
k <- sample(length(w),p,replace = TRUE,prob = w)
beta <- sd[k] * rnorm(p)
# Generate random labels for the markers.
colnames(X) <- paste0("rs",sample(1e6,p))
# Generate a random intercept.
mu <- rnorm(1)
# Generate the covariate data (Z), and the linear effects of the
# covariates (u).
if (m > 0) {
Z <- randn(n,m)
u <- rnorm(m)
colnames(Z) <- covariates
} else {
Z <- NULL
}
# Generate the quantitative trait measurements.
y <- mu + X %*% beta + sqrt(se)*rnorm(n)
if (m > 0)
y <- y + Z %*% u
y <- c(y)
# FIT VARIATIONAL APPROXIMATION TO POSTERIOR
# ------------------------------------------
# Fit the fully-factorized variational approximation to the posterior
# distribution of the coefficients for a linear regression model of
# the quantitative trait (Y), with the mixture-of-normals prior on the
# coefficients.
cat("2. FITTING MODEL TO DATA.\n")
fit <- varbvsmix(X,Z,y,sd^2)
# Plot the estimated coefficients against the ground-truth coefficients.
trellis.par.set(par.xlab.text = list(cex = 0.75),
par.ylab.text = list(cex = 0.75),
axis.text = list(cex = 0.75))
beta.est <- rowSums(fit$alpha * fit$mu)
print(xyplot(beta.est ~ beta.true,
data.frame(beta.true = beta,beta.est = beta.est),
pch = 4,col = "black",cex = 0.6,
panel = function(x, y, ...) {
panel.xyplot(x,y,...)
panel.abline(a = 0,b = 1,col = "magenta",lty = "dotted")
},
scales = list(limits = c(-1.1,1.1)),
xlab = "ground-truth regression coefficient",
ylab = "estimated regression coefficient"),
split = c(1,1,3,1),
more = TRUE)
# Plot the ground-truth coefficients against the local false sign rate.
print(xyplot(lfsr ~ beta.est,data.frame(beta.est = beta.est,lfsr = fit$lfsr),
pch = 19,col = "black",cex = 0.6,
xlab = "estimated regression coefficient",
ylab = "LFSR"),
split = c(2,1,3,1),
more = TRUE)
# Show the change in the variational lower bound at each iteration of the
# co-ordinate ascent algorithm.
numiter <- length(fit$logZ)
print(xyplot(y ~ x,data.frame(x = 1:numiter,y = max(fit$logZ) - fit$logZ),
type = "l",col = "darkorange",lwd = 2,
scales = list(y = list(log = 10)),xlab = "iteration",
ylab = "distance from final lower bound"),
split = c(3,1,3,1),
more = FALSE)
|
/scratch/gouwar.j/cran-all/cranData/varbvs/demo/varbvsmix.R
|
# This is similar to the "varbvsmix" demo, except that the prior is a
# larger mixture of normals. This script is mainly intended to
# illustrate the use of the "drop.threshold" varbvsmix argument to
# speed up computation when a lot of the mixture components have a
# negligible (near zero) weight.
library(lattice)
library(varbvs)
# SCRIPT PARAMETERS
# -----------------
n <- 1000 # Number of samples.
p <- 2000 # Number of variables (genetic markers).
se <- 4 # Variance of residual.
# Names of the covariates.
covariates <- c("age","weight")
# The standard deviations and mixture weights used to simulate the
# additive effects on the quantitative trait. Note that the first
# mixture component must have a standard deviation of exactly zero.
sd <- c(0, 0.1, 0.2, 0.5)
w <- c(0.95,0.03,0.01,0.01)
# The dense grid of prior standard deviations used to model the data.
sd.grid <- c(0,10^seq(-2,1,length.out = 19))
# Set the random number generator seed.
set.seed(1)
# GENERATE DATA SET
# -----------------
# Get the number of covariates.
m <- length(covariates)
# Generate the minor allele frequencies so that they are uniform over
# range [0.05,0.5]. Then simulate genotypes assuming all markers are
# uncorrelated (i.e., unlinked), according to the specified minor
# allele frequencies.
cat("1. GENERATING DATA SET.\n")
cat("Data simulation settings:\n")
cat(sprintf(" - Num. data samples %d\n",n))
cat(sprintf(" - Num. covariates %d\n",m))
cat(sprintf(" - Num. variables (SNPs) %d\n",p))
cat(sprintf(" - Num. mixture components %d\n",length(w)))
maf <- 0.05 + 0.45*runif(p)
X <- (runif(n*p) < maf) +
(runif(n*p) < maf)
X <- matrix(as.double(X),n,p,byrow = TRUE)
# Generate additive effects according to the specified standard
# deviations (sd) and mixture weights (w).
k <- sample(length(w),p,replace = TRUE,prob = w)
beta <- sd[k] * rnorm(p)
# Generate random labels for the markers.
colnames(X) <- paste0("rs",sample(1e6,p))
# Generate a random intercept.
mu <- rnorm(1)
# Generate the covariate data (Z), and the linear effects of the
# covariates (u).
if (m > 0) {
Z <- randn(n,m)
u <- rnorm(m)
colnames(Z) <- covariates
} else {
Z <- NULL
}
# Generate the quantitative trait measurements.
y <- mu + X %*% beta + sqrt(se)*rnorm(n)
if (m > 0)
y <- y + Z %*% u
y <- c(y)
# FIT VARIATIONAL APPROXIMATION TO POSTERIOR
# ------------------------------------------
# Fit the fully-factorized variational approximation to the posterior
# distribution of the coefficients for a linear regression model of
# the quantitative trait (Y), with the mixture-of-normals prior on the
# coefficients.
cat("2. FITTING MODEL TO DATA.\n")
fit <- varbvsmix(X,Z,y,sd.grid^2)
# Plot the estimated coefficients against the ground-truth coefficients.
trellis.par.set(par.xlab.text = list(cex = 0.75),
par.ylab.text = list(cex = 0.75),
axis.text = list(cex = 0.75))
print(xyplot(beta.est ~ beta.true,
data.frame(beta.true = beta,
beta.est = rowSums(fit$alpha * fit$mu)),
pch = 4,col = "black",cex = 0.6,
panel = function(x, y, ...) {
panel.xyplot(x,y,...)
panel.abline(a = 0,b = 1,col = "magenta",lty = "dotted")
},
scales = list(limits = c(-1.1,1.1)),
xlab = "ground-truth regression coefficient",
ylab = "estimated regression coefficient"),
split = c(1,1,3,1),
more = TRUE)
# Show the change in the variational lower bound at each iteration of the
# co-ordinate ascent algorithm.
numiter <- length(fit$logZ)
print(xyplot(y ~ x,data.frame(x = 1:numiter,y = max(fit$logZ) - fit$logZ),
type = "l",col = "darkorange",lwd = 2,
scales = list(y = list(log = 10)),xlab = "iteration",
ylab = "distance from final lower bound"),
split = c(2,1,3,1),
more = TRUE)
# Plot the number of nonzero mixture components at each iteration of
# the co-ordinate ascent algorithm.
print(xyplot(y ~ x,data.frame(x = 1:numiter,y = length(sd.grid) - fit$nzw),
type = "l",col = "royalblue",lwd = 2,xlab = "iteration",
ylab = "nonzero mixture components"),
split = c(3,1,3,1),
more = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/varbvs/demo/varbvsmix.dense.R
|
# The varbvs and varbvsmix functions should produce the same estimates
# when there are exactly 2 mixture components (a "spike" and a
# "slab"). This script verifies this in a small simulated data set.
library(varbvs)
# SCRIPT PARAMETERS
# -----------------
n <- 1000 # Number of samples.
p <- 2000 # Number of variables (genetic markers).
se <- 4 # Variance of residual.
# Names of the covariates.
covariates <- c("age","weight")
# The standard deviations and mixture weights used to simulate the
# additive effects on the quantitative trait. Note that the first
# mixture component must have a standard deviation of exactly zero.
sd <- c(0,0.5)
w <- c(0.95,0.05)
# Set the random number generator seed.
suppressWarnings(RNGversion("3.5.0"))
set.seed(1)
# GENERATE DATA SET
# -----------------
# Get the number of covariates.
m <- length(covariates)
# Generate the minor allele frequencies so that they are uniform over
# range [0.05,0.5]. Then simulate genotypes assuming all markers are
# uncorrelated (i.e., unlinked), according to the specified minor
# allele frequencies.
cat("1. GENERATING DATA SET.\n")
cat("Data simulation settings:\n")
cat(sprintf(" - Num. data samples %d\n",n))
cat(sprintf(" - Num. covariates %d\n",m))
cat(sprintf(" - Num. variables (SNPs) %d\n",p))
cat(sprintf(" - Num. mixture components %d\n",length(w)))
maf <- 0.05 + 0.45*runif(p)
X <- (runif(n*p) < maf) +
(runif(n*p) < maf)
X <- matrix(as.double(X),n,p,byrow = TRUE)
# Generate additive effects according to the specified standard
# deviations (sd) and mixture weights (w).
k <- sample(length(w),p,replace = TRUE,prob = w)
beta <- sd[k] * rnorm(p)
# Generate random labels for the markers.
colnames(X) <- paste0("rs",sample(1e6,p))
# Generate a random intercept.
mu <- rnorm(1)
# Generate the covariate data (Z), and the linear effects of the
# covariates (u).
if (m > 0) {
Z <- randn(n,m)
u <- rnorm(m)
colnames(Z) <- covariates
} else {
Z <- NULL
}
# Generate the quantitative trait measurements.
y <- mu + X %*% beta + sqrt(se)*rnorm(n)
if (m > 0)
y <- y + Z %*% u
y <- c(y)
# FIT VARIATIONAL APPROXIMATION TO POSTERIOR
# ------------------------------------------
# Fit the varbvsmix model.
cat("2. FITTING VARBVSMIX MODEL TO DATA.\n")
alpha <- runif(p)
alpha <- cbind(alpha,1 - alpha)
mu <- cbind(0,rnorm(p))
fit <- varbvsmix(X,Z,y,sd^2,alpha = alpha,mu = mu)
# Fit the varbvs model.
cat("3. FITTING VARBVS MODEL TO DATA.\n")
w1 <- fit$w[2]
fit2 <- varbvs(X,Z,y,sa = sd[2]^2,logodds = log10(w1/(1 - w1)),
alpha = alpha[,2],mu = mu[,2])
# Check that the parameter estimates are the same.
niter <- length(fit$logZ)
relerr <- function (x, y)
abs(x - y)/min(abs(x),abs(y))
cat(sprintf("Max. difference in PIPs: %0.2e\n",
max(abs(fit$alpha[,2] - fit2$alpha))))
cat(sprintf("Max. difference in posterior means: %0.2e\n",
max(abs(fit$mu[,2] - fit2$mu))))
cat(sprintf("Relative difference in lower bounds: %0.2e\n",
relerr(fit$logZ[niter],fit2$logw)))
|
/scratch/gouwar.j/cran-all/cranData/varbvs/demo/varbvsmix.test.R
|
## ---- echo = FALSE, message = FALSE-------------------------------------------
knitr::opts_chunk$set(eval = FALSE,collapse = TRUE,comment = "#")
## ---- eval = TRUE, message = FALSE--------------------------------------------
library(lattice)
library(varbvs)
## ---- eval = TRUE-------------------------------------------------------------
set.seed(1)
## -----------------------------------------------------------------------------
# load("cd.RData")
## -----------------------------------------------------------------------------
# r <- system.time(fit <- varbvs(X,NULL,y,family = "binomial",
# logodds = seq(-6,-3,0.25),n0 = 0)
# cat(sprintf("Model fitting took %0.2f minutes.\n",r["elapsed"]/60))
## -----------------------------------------------------------------------------
# pip <- c(varbvsindep(fit,X,NULL,y)$alpha %*% fit$w)
## -----------------------------------------------------------------------------
# save(list = c("fit","map","pip","r"),
# file = "varbvs.demo.cd.RData")
## -----------------------------------------------------------------------------
# print(summary(fit,nv = 9))
## ---- fig.width = 9,fig.height = 4,fig.align = "center"-----------------------
# i <- which(fit$pip > 0.5)
# var.labels <- paste0(round(map$pos[i]/1e6,digits = 2),"Mb")
# print(plot(fit,groups = map$chr,vars = i,var.labels = var.labels,gap = 7500,
# ylab = "posterior prob."),
# split = c(1,1,1,2),more = TRUE)
# print(plot(fit,groups = map$chr,score = log10(pip + 0.001),vars = i,
# var.labels = var.labels,gap = 7500,ylab = "log10 posterior prob."),
# split = c(1,2,1,2),more = FALSE)
|
/scratch/gouwar.j/cran-all/cranData/varbvs/inst/doc/cd.R
|
---
title: "Mapping disease risk loci using varbvs"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Crohn's disease demo}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
This vignettes demonstrates how to fit a Bayesian variable selection model
using **varbvs** to identify genetic markers associated with Crohn's
disease risk. The data consist of 442,001 SNPs genotyped for 1,748 cases
and 2,938 controls. Note that file `cd.RData` cannot be made publicly
available due to data sharing restrictions, so this vignette is for
viewing only.
```{r, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(eval = FALSE,collapse = TRUE,comment = "#")
```
Begin by loading a couple packages into the R environment.
```{r, eval = TRUE, message = FALSE}
library(lattice)
library(varbvs)
```
Set the random number generator seed.
```{r, eval = TRUE}
set.seed(1)
```
## Load the genotype and phenotype data
```{r}
load("cd.RData")
```
## Fit variational approximation to posterior
Here we fit the fully-factorized variational approximation to the posterior
distribution of the coefficients for a logistic regression model of a binary
outcome (case-control status), with spike and slab priors on the coefficients.
```{r}
r <- system.time(fit <- varbvs(X,NULL,y,family = "binomial",
logodds = seq(-6,-3,0.25),n0 = 0)
cat(sprintf("Model fitting took %0.2f minutes.\n",r["elapsed"]/60))
```
Compute "single-marker" posterior inclusion probabilities.
```{r}
pip <- c(varbvsindep(fit,X,NULL,y)$alpha %*% fit$w)
```
## Save the results to a file
```{r}
save(list = c("fit","map","pip","r"),
file = "varbvs.demo.cd.RData")
```
## Summarize the model fitting
```{r}
print(summary(fit,nv = 9))
```
Show two "genome-wide scans", one using the posterior inclusion
probabilities (PIPs) computed in the joint analysis of all
variables, and one using the PIPs that ignore correlations between
the variables. The latter is meant to look like a typical
genome-wide "Manhattan" plot used to summarize the results of a
genome-wide association study. Variables with `PIP > 0.5` are
highlighted.
```{r, fig.width = 9,fig.height = 4,fig.align = "center"}
i <- which(fit$pip > 0.5)
var.labels <- paste0(round(map$pos[i]/1e6,digits = 2),"Mb")
print(plot(fit,groups = map$chr,vars = i,var.labels = var.labels,gap = 7500,
ylab = "posterior prob."),
split = c(1,1,1,2),more = TRUE)
print(plot(fit,groups = map$chr,score = log10(pip + 0.001),vars = i,
var.labels = var.labels,gap = 7500,ylab = "log10 posterior prob."),
split = c(1,2,1,2),more = FALSE)
```
|
/scratch/gouwar.j/cran-all/cranData/varbvs/inst/doc/cd.Rmd
|
## ---- echo = FALSE, message = FALSE-------------------------------------------
knitr::opts_chunk$set(eval = FALSE,collapse = TRUE,comment = "#")
## ---- eval = TRUE, message = FALSE--------------------------------------------
library(curl)
library(lattice)
library(varbvs)
## ---- eval = TRUE-------------------------------------------------------------
trait <- "testis"
covariates <- "sacwt"
logodds <- seq(-5,-3,0.25)
sa <- 0.05
## ---- eval = TRUE-------------------------------------------------------------
set.seed(1)
## -----------------------------------------------------------------------------
# load(curl("https://zenodo.org/record/546142/files/cfw.RData"))
## -----------------------------------------------------------------------------
# rows <- which(apply(pheno[,c(trait,covariates)],1,
# function (x) sum(is.na(x)) == 0))
# pheno <- pheno[rows,]
# geno <- geno[rows,]
## -----------------------------------------------------------------------------
# runtime <- system.time(fit <-
# varbvs(geno,as.matrix(pheno[,covariates]),pheno[,trait],
# sa = sa,logodds = logodds,verbose = FALSE))
# cat(sprintf("Model fitting took %0.2f minutes.\n",runtime["elapsed"]/60))
## -----------------------------------------------------------------------------
# print(summary(fit))
## ---- fig.width = 7,fig.height = 5.5, fig.align = "center"--------------------
# trellis.par.set(axis.text = list(cex = 0.7),
# par.ylab.text = list(cex = 0.7),
# par.main.text = list(cex = 0.7,font = 1))
# j <- which(fit$pip > 0.5)
# r <- gwscan.gemma[[trait]]
# r[is.na(r)] <- 0
# chromosomes <- levels(gwscan.bvsr$chr)
# xticks <- rep(0,length(chromosomes))
# names(xticks) <- chromosomes
# pos <- 0
# for (i in chromosomes) {
# n <- sum(gwscan.bvsr$chr == i)
# xticks[i] <- pos + n/2
# pos <- pos + n + 25
# }
# print(plot(fit,groups = map$chr,vars = j,gap = 1500,cex = 0.6,
# ylab = "probability",main = "a. multi-marker (varbvs)",
# scales = list(y = list(limits = c(-0.1,1.2),at = c(0,0.5,1))),
# vars.xyplot.args = list(cex = 0.6)),
# split = c(1,1,1,3),more = TRUE)
# print(plot(fit,groups = map$chr,score = r,vars = j,cex = 0.6,gap = 1500,
# draw.threshold = 5.71,ylab = "-log10 p-value",
# main = "b. single-marker (GEMMA -lm 2)",
# scales = list(y = list(limits = c(-1,20),at = seq(0,20,5))),
# vars.xyplot.args = list(cex = 0.6)),
# split = c(1,2,1,3),more = TRUE)
# print(xyplot(p1 ~ plot.x,gwscan.bvsr,pch = 20,col = "midnightblue",
# scales = list(x = list(at = xticks,labels = chromosomes),
# y = list(limits = c(-0.1,1.2),at = c(0,0.5,1))),
# xlab = "",ylab = "probability",main = "c. multi-marker (BVSR)"),
# split = c(1,3,1,3),more = FALSE)
## -----------------------------------------------------------------------------
# sessionInfo()
|
/scratch/gouwar.j/cran-all/cranData/varbvs/inst/doc/cfw.R
|
---
title: "Mapping QTLs in outbred mice using varbvs"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{QTL mapping demo}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(eval = FALSE,collapse = TRUE,comment = "#")
```
In this vignette, we use **varbvs** to map QTLs for phenotypes
measured in CFW (Carworth Farms White) outbred mice. Phenotypes
include muscle weights---EDL and soleus muscle---and testis weight
measured at sacrifice. Running this script with `trait = "testis"`
reproduces the results and figures shown in the second example of a
forthcoming paper (Carbonetto *et al*, 2016).
## Vignette parameters
Begin by loading packages into the R environment.
```{r, eval = TRUE, message = FALSE}
library(curl)
library(lattice)
library(varbvs)
```
These script parameters specify the candidate prior log-odds
settings, the prior variance of the coefficients, and which trait to
analyze. Set trait to "edl", "soleus" or "testis".
```{r, eval = TRUE}
trait <- "testis"
covariates <- "sacwt"
logodds <- seq(-5,-3,0.25)
sa <- 0.05
```
Set the random number generator seed.
```{r, eval = TRUE}
set.seed(1)
```
## Load the genotype and phenotype data
Retrieve the data from the Zenodo repository.
```{r}
load(curl("https://zenodo.org/record/546142/files/cfw.RData"))
```
Only analyze samples for which the phenotype and all the covariates
are observed.
```{r}
rows <- which(apply(pheno[,c(trait,covariates)],1,
function (x) sum(is.na(x)) == 0))
pheno <- pheno[rows,]
geno <- geno[rows,]
```
## Fit variational approximation to posterior
```{r}
runtime <- system.time(fit <-
varbvs(geno,as.matrix(pheno[,covariates]),pheno[,trait],
sa = sa,logodds = logodds,verbose = FALSE))
cat(sprintf("Model fitting took %0.2f minutes.\n",runtime["elapsed"]/60))
```
## Summarize the results of model fitting
```{r}
print(summary(fit))
```
Show three genome-wide scans: (1) one using the posterior inclusion
probabilities (PIPs) computed in the BVS analysis of all SNPs; (2)
one using the p-values computed using GEMMA; and (3) one using the
PIPs computed from the BVSR model in GEMMA.
```{r, fig.width = 7,fig.height = 5.5, fig.align = "center"}
trellis.par.set(axis.text = list(cex = 0.7),
par.ylab.text = list(cex = 0.7),
par.main.text = list(cex = 0.7,font = 1))
j <- which(fit$pip > 0.5)
r <- gwscan.gemma[[trait]]
r[is.na(r)] <- 0
chromosomes <- levels(gwscan.bvsr$chr)
xticks <- rep(0,length(chromosomes))
names(xticks) <- chromosomes
pos <- 0
for (i in chromosomes) {
n <- sum(gwscan.bvsr$chr == i)
xticks[i] <- pos + n/2
pos <- pos + n + 25
}
print(plot(fit,groups = map$chr,vars = j,gap = 1500,cex = 0.6,
ylab = "probability",main = "a. multi-marker (varbvs)",
scales = list(y = list(limits = c(-0.1,1.2),at = c(0,0.5,1))),
vars.xyplot.args = list(cex = 0.6)),
split = c(1,1,1,3),more = TRUE)
print(plot(fit,groups = map$chr,score = r,vars = j,cex = 0.6,gap = 1500,
draw.threshold = 5.71,ylab = "-log10 p-value",
main = "b. single-marker (GEMMA -lm 2)",
scales = list(y = list(limits = c(-1,20),at = seq(0,20,5))),
vars.xyplot.args = list(cex = 0.6)),
split = c(1,2,1,3),more = TRUE)
print(xyplot(p1 ~ plot.x,gwscan.bvsr,pch = 20,col = "midnightblue",
scales = list(x = list(at = xticks,labels = chromosomes),
y = list(limits = c(-0.1,1.2),at = c(0,0.5,1))),
xlab = "",ylab = "probability",main = "c. multi-marker (BVSR)"),
split = c(1,3,1,3),more = FALSE)
```
## Session information
This is the version of R and the packages that were used to generate
these results.
```{r}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/varbvs/inst/doc/cfw.Rmd
|
## ---- echo = FALSE, message = FALSE-------------------------------------------
knitr::opts_chunk$set(eval = FALSE,collapse = TRUE,comment = "#")
## ---- eval = TRUE, message = FALSE--------------------------------------------
library(lattice)
library(varbvs)
## ---- eval = TRUE-------------------------------------------------------------
set.seed(1)
## -----------------------------------------------------------------------------
# load("cd.RData")
# data(cytokine)
## -----------------------------------------------------------------------------
# fit.null <- varbvs(X,NULL,y,"binomial",logodds = -4,n0 = 0)
## -----------------------------------------------------------------------------
# logodds <- matrix(-4,442001,13)
# logodds[cytokine == 1,] <- matrix(-4 + seq(0,3,0.25),6711,13,byrow = TRUE)
# fit.cytokine <- varbvs(X,NULL,y,"binomial",logodds = logodds,n0 = 0,
# alpha = fit.null$alpha,mu = fit.null$mu,
# eta = fit.null$eta,optimize.eta = TRUE)
## -----------------------------------------------------------------------------
# BF <- varbvsbf(fit.null,fit.cytokine)
## -----------------------------------------------------------------------------
# save(list = c("fit.null","fit.cytokine","map","cytokine","BF"),
# file = "varbvs.demo.cytokine.RData")
## ---- fig.width = 9,fig.height = 4,fig.align = "center"-----------------------
# i <- which(fit.null$pip > 0.5 | fit.cytokine$pip > 0.5)
# var.labels <- paste0(round(map$pos[i]/1e6,digits = 2),"Mb")
# print(plot(fit.null,groups = map$chr,vars = i,var.labels = NULL,
# gap = 7500,ylab = "posterior prob."),
# split = c(1,1,1,2),more = TRUE)
# print(plot(fit.cytokine,groups = map$chr,vars = i,var.labels = var.labels,
# gap = 7500,ylab = "posterior prob."),
# split = c(1,2,1,2),more = FALSE)
|
/scratch/gouwar.j/cran-all/cranData/varbvs/inst/doc/cytokine.R
|
---
title: "Assessing support for gene sets in disease using varbvs"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Cytokine signaling genes demo}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
In this vignette, we fit two variable selection models: the first ("null")
model has a uniform prior for all variables (the 442,001 genetic markers);
the second model has higher prior probability for genetic markers near
cytokine signaling genes. This analysis is intended to assess support for
enrichment of Crohn's disease risk factors near cytokine signaling genes;
a large Bayes factor means greater support for this enrichment hypothesis.
The data in this analysis consist of 442,001 SNPs genotyped for 1,748 cases
and 2,938 controls. Note that file `cd.RData` cannot be made publicly
available due to data sharing restrictions, so this script is for viewing
only.
```{r, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(eval = FALSE,collapse = TRUE,comment = "#")
```
Begin by loading a couple packages into the R environment.
```{r, eval = TRUE, message = FALSE}
library(lattice)
library(varbvs)
```
Set the random number generator seed.
```{r, eval = TRUE}
set.seed(1)
```
## Load the genotypes, phenotypes and pathway annotation
```{r}
load("cd.RData")
data(cytokine)
```
## Fit variational approximation to posterior
Here we compute the variational approximation given the assumption that all
variables (genetic markers) are, *a priori*, equally likely to be included
in the model.
```{r}
fit.null <- varbvs(X,NULL,y,"binomial",logodds = -4,n0 = 0)
```
Next, compute the variational approximation given the assumption that
genetic markers near cytokine signaling genes are more likely to be
included in the model. For faster and more accurate computation of
posterior probabilities, the variational parameters are initialized to
the fitted values computed above with the exchangeable prior.
```{r}
logodds <- matrix(-4,442001,13)
logodds[cytokine == 1,] <- matrix(-4 + seq(0,3,0.25),6711,13,byrow = TRUE)
fit.cytokine <- varbvs(X,NULL,y,"binomial",logodds = logodds,n0 = 0,
alpha = fit.null$alpha,mu = fit.null$mu,
eta = fit.null$eta,optimize.eta = TRUE)
```
Compute the Bayes factor.
```{r}
BF <- varbvsbf(fit.null,fit.cytokine)
```
## Save the results to a file
```{r}
save(list = c("fit.null","fit.cytokine","map","cytokine","BF"),
file = "varbvs.demo.cytokine.RData")
```
## Summarize the results of model fitting
Show two "genome-wide scans" from the multi-marker PIPs, with and
without conditioning on enrichment of cytokine signaling genes.
```{r, fig.width = 9,fig.height = 4,fig.align = "center"}
i <- which(fit.null$pip > 0.5 | fit.cytokine$pip > 0.5)
var.labels <- paste0(round(map$pos[i]/1e6,digits = 2),"Mb")
print(plot(fit.null,groups = map$chr,vars = i,var.labels = NULL,
gap = 7500,ylab = "posterior prob."),
split = c(1,1,1,2),more = TRUE)
print(plot(fit.cytokine,groups = map$chr,vars = i,var.labels = var.labels,
gap = 7500,ylab = "posterior prob."),
split = c(1,2,1,2),more = FALSE)
```
|
/scratch/gouwar.j/cran-all/cranData/varbvs/inst/doc/cytokine.Rmd
|
## ---- echo = FALSE, message = FALSE-------------------------------------------
knitr::opts_chunk$set(collapse = TRUE,comment = "#",fig.width = 6.9,
fig.height = 5.5,fig.align = "center",
fig.cap = " ",dpi = 120)
## ---- message = FALSE---------------------------------------------------------
library(lattice)
library(latticeExtra)
library(glmnet)
library(varbvs)
## -----------------------------------------------------------------------------
nfolds <- 20 # Number of cross-validation folds.
alpha <- 0.95 # Elastic net mixing parameter.
lambda <- 10^(seq(0,-2,-0.05)) # Lambda sequence.
## -----------------------------------------------------------------------------
data(leukemia)
X <- leukemia$x
y <- leukemia$y
set.seed(1)
## -----------------------------------------------------------------------------
# This is the model fitting step.
r <- system.time(fit.glmnet <-
glmnet(X,y,family = "binomial",lambda = lambda,alpha = alpha))
cat(sprintf("Model fitting took %0.2f seconds.\n",r["elapsed"]))
# This is the cross-validation step.
r <- system.time(out.cv.glmnet <-
cv.glmnet(X,y,family = "binomial",type.measure = "class",
alpha = alpha,nfolds = nfolds,lambda = lambda))
lambda <- out.cv.glmnet$lambda
cat(sprintf("Cross-validation took %0.2f seconds.\n",r["elapsed"]))
# Choose the largest value of lambda that is within 1 standard error
# of the smallest misclassification error.
lambda.opt <- out.cv.glmnet$lambda.1se
## ---- results = "hold"--------------------------------------------------------
cat("classification results with lambda = ",lambda.opt,":\n",sep="")
y.glmnet <- c(predict(fit.glmnet,X,s = lambda.opt,type = "class"))
print(table(true = factor(y),pred = factor(y.glmnet)))
## -----------------------------------------------------------------------------
trellis.par.set(par.xlab.text = list(cex = 0.85),
par.ylab.text = list(cex = 0.85),
axis.text = list(cex = 0.75))
# Choose the largest value of lambda that is within 1 standard error
# of the smallest misclassification error.
lambda.opt <- out.cv.glmnet$lambda.1se
# Plot regression coefficients.
lambda <- fit.glmnet$lambda
vars <- setdiff(which(rowSums(abs(coef(fit.glmnet))) > 0),1)
n <- length(vars)
b <- as.matrix(t(coef(fit.glmnet)[vars,]))
i <- coef(fit.glmnet,s = lambda.opt)
i <- rownames(i)[which(i != 0)]
i <- i[-1]
vars.opt <- colnames(b)
vars.opt[!is.element(vars.opt,i)] <- ""
vars.opt <- substring(vars.opt,2)
lab <- expression("more complex" %<-% paste(log[10],lambda) %->%
"less complex")
r <- xyplot(y ~ x,data.frame(x = log10(lambda),y = b[,1]),type = "l",
col = "blue",xlab = lab,ylab = "regression coefficient",
scales = list(x = list(limits = c(-2.35,0.1)),
y = list(limits = c(-0.8,1.2))),
panel = function(x, y, ...) {
panel.xyplot(x,y,...);
panel.abline(v = log10(lambda.opt),col = "orangered",
lwd = 2,lty = "dotted");
ltext(x = -2,y = b[nrow(b),],labels = vars.opt,pos = 2,
offset = 0.5,cex = 0.5);
})
for (i in 2:n)
r <- r + as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = b[,i]),
type = "l",col = "blue"))
print(r,split = c(2,1,2,1),more = TRUE)
# Plot classification error.
Y <- predict(fit.glmnet,X,type = "class")
mode(Y) <- "numeric"
print(with(out.cv.glmnet,
xyplot(y ~ x,data.frame(x = log10(lambda),y = cvm),type = "l",
col = "blue",xlab = lab,
ylab = "20-fold cross-validation \n classification error",
scales = list(y = list(limits = c(-0.02,0.45))),
panel = function(x, y, ...) {
panel.xyplot(x,y,...);
panel.abline(v = log10(lambda.opt),col = "orangered",
lwd = 2,lty = "dotted");
}) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = cvm),
pch = 20,cex = 0.6,col = "blue")) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = cvup),
type = "l",col = "blue",lty = "solid")) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = cvlo),
type = "l",col = "blue",lty = "solid")) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),
y = colMeans(abs(Y - y))),
type = "l",col = "darkorange",lwd = 2,
lty = "solid"))),
split = c(1,1,2,2),more = TRUE)
# Plot number of non-zero regression coefficients.
print(with(out.cv.glmnet,
xyplot(y ~ x,data.frame(x = log10(lambda),y = nzero),type = "l",
col = "blue",xlab = lab,
ylab = "number of non-zero \n coefficients",
panel = function(x, y, ...) {
panel.xyplot(x,y,...)
panel.abline(v = log10(lambda.opt),col = "orangered",
lwd = 2,lty = "dotted")
}) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = nzero),
pch = 20,cex = 0.6,col = "blue"))),
split = c(1,2,2,2),more = FALSE)
## -----------------------------------------------------------------------------
r <- system.time(fit.varbvs <- varbvs(X,NULL,y,"binomial",verbose = FALSE))
cat(sprintf("Model fitting took %0.2f seconds.\n",r["elapsed"]))
## ---- results = "hold"--------------------------------------------------------
y.varbvs <- predict(fit.varbvs,X,type = "class")
print(table(true = factor(y),pred = factor(y.varbvs)))
## -----------------------------------------------------------------------------
trellis.par.set(par.xlab.text = list(cex = 0.85),
par.ylab.text = list(cex = 0.85),
axis.text = list(cex = 0.75))
# Get the normalized importance weights.
w <- fit.varbvs$w
# Plot classification error at each hyperparameter setting.
sigmoid10 <- function (x)
1/(1 + 10^(-x))
logodds <- fit.varbvs$logodds
log10q <- log10(sigmoid10(logodds))
m <- length(logodds)
err <- rep(0,m)
for (i in 1:m) {
r <- subset(fit.varbvs,logodds == logodds[i])
ypred <- predict(r,X)
err[i] <- mean(y != ypred)
}
lab <- expression("more complex" %<-% paste(log[10],pi) %->% "less complex")
print(xyplot(y ~ x,data.frame(x = log10q,y = err),type = "l",
col = "blue",xlab = lab,ylab = "classification error",
scales = list(x = list(limits = c(-0.9,-3.65)))) +
as.layer(xyplot(y ~ x,data.frame(x = log10q,y = err),
col = "blue",pch = 20,cex = 0.65)),
split = c(1,1,2,2),more = TRUE)
# Plot expected number of included variables at each hyperparameter
# setting.
r <- colSums(fit.varbvs$alpha)
print(xyplot(y ~ x,data.frame(x = log10q,y = r),type = "l",col = "blue",
xlab = lab,ylab = "expected number of\nincluded variables",
scales = list(x = list(limits = c(-0.9,-3.65)),
y = list(log = 10,at = c(1,10,100)))) +
as.layer(xyplot(y ~ x,data.frame(x = log10q,y = r),
col = "blue",pch = 20,cex = 0.65,
scales = list(x = list(limits = c(-0.9,-3.65)),
y = list(log = 10)))),
split = c(1,2,2,2),more = TRUE)
# Plot density of prior inclusion probability hyperparameter.
print(xyplot(y ~ x,data.frame(x = log10q,y = w),type = "l",col = "blue",
xlab = lab,
ylab = expression(paste("posterior probability of ",pi)),
scales = list(x = list(limits = c(-0.9,-3.65)))) +
as.layer(xyplot(y ~ x,data.frame(x = log10q,y = w),
col = "blue",pch = 20,cex = 0.65)),
split = c(2,1,2,1),more = FALSE)
## -----------------------------------------------------------------------------
sessionInfo()
|
/scratch/gouwar.j/cran-all/cranData/varbvs/inst/doc/leukemia.R
|
---
title: "Comparison of glmnet and varbvs in Leukemia data set"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{varbvs leukemia demo}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
This vignette demonstrates application of **glmnet** and **varbvs** to
the Leukemia data set. The main aim of this script is to illustrate
some of the different properties of Bayesian variable selection and
penalized sparse regression (as implemented by **varbvs** and **glmnet**,
respectively).
We use the preprocessed data of Dettling (2004) retrieved from the
supplementary materials accompanying Friedman *et al* (2010). The data
are represented as a 72 x 3,571 matrix of gene expression values
(variable `X`), and a vector of 72 binary disease outcomes (variable
`y`).
```{r, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(collapse = TRUE,comment = "#",fig.width = 6.9,
fig.height = 5.5,fig.align = "center",
fig.cap = " ",dpi = 120)
```
## Vignette parameters
Begin by loading these packages into your R environment.
```{r, message = FALSE}
library(lattice)
library(latticeExtra)
library(glmnet)
library(varbvs)
```
Specify settings for the glmnet analysis.
```{r}
nfolds <- 20 # Number of cross-validation folds.
alpha <- 0.95 # Elastic net mixing parameter.
lambda <- 10^(seq(0,-2,-0.05)) # Lambda sequence.
```
## Load the Leukemia data
Also set the random number generator seed.
```{r}
data(leukemia)
X <- leukemia$x
y <- leukemia$y
set.seed(1)
```
## Fit elastic net model to data
Here, we also run 20-fold cross-validation to select the largest
setting of the L1-penalty strength (*lambda*) that is within 1
standard error of the minimum classification error.
```{r}
# This is the model fitting step.
r <- system.time(fit.glmnet <-
glmnet(X,y,family = "binomial",lambda = lambda,alpha = alpha))
cat(sprintf("Model fitting took %0.2f seconds.\n",r["elapsed"]))
# This is the cross-validation step.
r <- system.time(out.cv.glmnet <-
cv.glmnet(X,y,family = "binomial",type.measure = "class",
alpha = alpha,nfolds = nfolds,lambda = lambda))
lambda <- out.cv.glmnet$lambda
cat(sprintf("Cross-validation took %0.2f seconds.\n",r["elapsed"]))
# Choose the largest value of lambda that is within 1 standard error
# of the smallest misclassification error.
lambda.opt <- out.cv.glmnet$lambda.1se
```
## Evaluate the glmnet predictions
Compute estimates of the disease outcome using the fitted model, and
compare against the observed values.
```{r, results = "hold"}
cat("classification results with lambda = ",lambda.opt,":\n",sep="")
y.glmnet <- c(predict(fit.glmnet,X,s = lambda.opt,type = "class"))
print(table(true = factor(y),pred = factor(y.glmnet)))
```
## Visualize results of glmnet analysis
The first plot shows the evolution of regression coefficients at different
settings of *lambda*. (Note that the intercept is not shown.) Only the
curves for the variables that are selected at the optimal setting
of *lambda* ("lambda.opt"") are labeled.
The second plot shows the classification error at different settings of
*lambda*.
The third plot shows the number of nonzero regression coefficients at
different settings of *lambda*.
```{r}
trellis.par.set(par.xlab.text = list(cex = 0.85),
par.ylab.text = list(cex = 0.85),
axis.text = list(cex = 0.75))
# Choose the largest value of lambda that is within 1 standard error
# of the smallest misclassification error.
lambda.opt <- out.cv.glmnet$lambda.1se
# Plot regression coefficients.
lambda <- fit.glmnet$lambda
vars <- setdiff(which(rowSums(abs(coef(fit.glmnet))) > 0),1)
n <- length(vars)
b <- as.matrix(t(coef(fit.glmnet)[vars,]))
i <- coef(fit.glmnet,s = lambda.opt)
i <- rownames(i)[which(i != 0)]
i <- i[-1]
vars.opt <- colnames(b)
vars.opt[!is.element(vars.opt,i)] <- ""
vars.opt <- substring(vars.opt,2)
lab <- expression("more complex" %<-% paste(log[10],lambda) %->%
"less complex")
r <- xyplot(y ~ x,data.frame(x = log10(lambda),y = b[,1]),type = "l",
col = "blue",xlab = lab,ylab = "regression coefficient",
scales = list(x = list(limits = c(-2.35,0.1)),
y = list(limits = c(-0.8,1.2))),
panel = function(x, y, ...) {
panel.xyplot(x,y,...);
panel.abline(v = log10(lambda.opt),col = "orangered",
lwd = 2,lty = "dotted");
ltext(x = -2,y = b[nrow(b),],labels = vars.opt,pos = 2,
offset = 0.5,cex = 0.5);
})
for (i in 2:n)
r <- r + as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = b[,i]),
type = "l",col = "blue"))
print(r,split = c(2,1,2,1),more = TRUE)
# Plot classification error.
Y <- predict(fit.glmnet,X,type = "class")
mode(Y) <- "numeric"
print(with(out.cv.glmnet,
xyplot(y ~ x,data.frame(x = log10(lambda),y = cvm),type = "l",
col = "blue",xlab = lab,
ylab = "20-fold cross-validation \n classification error",
scales = list(y = list(limits = c(-0.02,0.45))),
panel = function(x, y, ...) {
panel.xyplot(x,y,...);
panel.abline(v = log10(lambda.opt),col = "orangered",
lwd = 2,lty = "dotted");
}) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = cvm),
pch = 20,cex = 0.6,col = "blue")) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = cvup),
type = "l",col = "blue",lty = "solid")) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = cvlo),
type = "l",col = "blue",lty = "solid")) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),
y = colMeans(abs(Y - y))),
type = "l",col = "darkorange",lwd = 2,
lty = "solid"))),
split = c(1,1,2,2),more = TRUE)
# Plot number of non-zero regression coefficients.
print(with(out.cv.glmnet,
xyplot(y ~ x,data.frame(x = log10(lambda),y = nzero),type = "l",
col = "blue",xlab = lab,
ylab = "number of non-zero \n coefficients",
panel = function(x, y, ...) {
panel.xyplot(x,y,...)
panel.abline(v = log10(lambda.opt),col = "orangered",
lwd = 2,lty = "dotted")
}) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = nzero),
pch = 20,cex = 0.6,col = "blue"))),
split = c(1,2,2,2),more = FALSE)
```
## Fit variational approximation to posterior
Fit the fully-factorized variational approximation to the posterior
distribution of the coefficients for a logistic regression model of
the binary outcome (the type of leukemia), with spike-and-slab priors
on the coefficients.
```{r}
r <- system.time(fit.varbvs <- varbvs(X,NULL,y,"binomial",verbose = FALSE))
cat(sprintf("Model fitting took %0.2f seconds.\n",r["elapsed"]))
```
## Evaluate the varbvs predictions
Compute estimates of the disease outcome using the fitted model, and
compare against the observed values.
```{r, results = "hold"}
y.varbvs <- predict(fit.varbvs,X,type = "class")
print(table(true = factor(y),pred = factor(y.varbvs)))
```
## Visualize results of varbvs analysis
The first plot shows the classification error at each setting of
the prior log-odds.
The second plot shows the evolution of the posterior mean regression
coefficients (the beta's) at different settings of the prior log-odds,
for the top 6 variables ranked by posterior inclusion probability
(averaged over settings of the hyperparameters).
The top-ranked variable (by posterior inclusion probability) has a
much larger coefficient than all the others, so it is shown in a
separate plot.
The third plot shows the (approximate) probability density of the
prior log-odds parameter.
```{r}
trellis.par.set(par.xlab.text = list(cex = 0.85),
par.ylab.text = list(cex = 0.85),
axis.text = list(cex = 0.75))
# Get the normalized importance weights.
w <- fit.varbvs$w
# Plot classification error at each hyperparameter setting.
sigmoid10 <- function (x)
1/(1 + 10^(-x))
logodds <- fit.varbvs$logodds
log10q <- log10(sigmoid10(logodds))
m <- length(logodds)
err <- rep(0,m)
for (i in 1:m) {
r <- subset(fit.varbvs,logodds == logodds[i])
ypred <- predict(r,X)
err[i] <- mean(y != ypred)
}
lab <- expression("more complex" %<-% paste(log[10],pi) %->% "less complex")
print(xyplot(y ~ x,data.frame(x = log10q,y = err),type = "l",
col = "blue",xlab = lab,ylab = "classification error",
scales = list(x = list(limits = c(-0.9,-3.65)))) +
as.layer(xyplot(y ~ x,data.frame(x = log10q,y = err),
col = "blue",pch = 20,cex = 0.65)),
split = c(1,1,2,2),more = TRUE)
# Plot expected number of included variables at each hyperparameter
# setting.
r <- colSums(fit.varbvs$alpha)
print(xyplot(y ~ x,data.frame(x = log10q,y = r),type = "l",col = "blue",
xlab = lab,ylab = "expected number of\nincluded variables",
scales = list(x = list(limits = c(-0.9,-3.65)),
y = list(log = 10,at = c(1,10,100)))) +
as.layer(xyplot(y ~ x,data.frame(x = log10q,y = r),
col = "blue",pch = 20,cex = 0.65,
scales = list(x = list(limits = c(-0.9,-3.65)),
y = list(log = 10)))),
split = c(1,2,2,2),more = TRUE)
# Plot density of prior inclusion probability hyperparameter.
print(xyplot(y ~ x,data.frame(x = log10q,y = w),type = "l",col = "blue",
xlab = lab,
ylab = expression(paste("posterior probability of ",pi)),
scales = list(x = list(limits = c(-0.9,-3.65)))) +
as.layer(xyplot(y ~ x,data.frame(x = log10q,y = w),
col = "blue",pch = 20,cex = 0.65)),
split = c(2,1,2,1),more = FALSE)
```
### References
Dettling, M. (2004). BagBoosting for tumor classification with gene
expression data. *Bioinformatics* **20**, 3583–3593.
Friedman, J., Hastie, T., Tibshirani, R. (2010) Regularization paths
for generalized linear models via coordinate descent. *Journal of
Statistical Software* **33**, 1–22.
## Session information
This is the version of R and the packages that were used to generate
these results.
```{r}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/varbvs/inst/doc/leukemia.Rmd
|
---
title: "Mapping disease risk loci using varbvs"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Crohn's disease demo}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
This vignettes demonstrates how to fit a Bayesian variable selection model
using **varbvs** to identify genetic markers associated with Crohn's
disease risk. The data consist of 442,001 SNPs genotyped for 1,748 cases
and 2,938 controls. Note that file `cd.RData` cannot be made publicly
available due to data sharing restrictions, so this vignette is for
viewing only.
```{r, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(eval = FALSE,collapse = TRUE,comment = "#")
```
Begin by loading a couple packages into the R environment.
```{r, eval = TRUE, message = FALSE}
library(lattice)
library(varbvs)
```
Set the random number generator seed.
```{r, eval = TRUE}
set.seed(1)
```
## Load the genotype and phenotype data
```{r}
load("cd.RData")
```
## Fit variational approximation to posterior
Here we fit the fully-factorized variational approximation to the posterior
distribution of the coefficients for a logistic regression model of a binary
outcome (case-control status), with spike and slab priors on the coefficients.
```{r}
r <- system.time(fit <- varbvs(X,NULL,y,family = "binomial",
logodds = seq(-6,-3,0.25),n0 = 0)
cat(sprintf("Model fitting took %0.2f minutes.\n",r["elapsed"]/60))
```
Compute "single-marker" posterior inclusion probabilities.
```{r}
pip <- c(varbvsindep(fit,X,NULL,y)$alpha %*% fit$w)
```
## Save the results to a file
```{r}
save(list = c("fit","map","pip","r"),
file = "varbvs.demo.cd.RData")
```
## Summarize the model fitting
```{r}
print(summary(fit,nv = 9))
```
Show two "genome-wide scans", one using the posterior inclusion
probabilities (PIPs) computed in the joint analysis of all
variables, and one using the PIPs that ignore correlations between
the variables. The latter is meant to look like a typical
genome-wide "Manhattan" plot used to summarize the results of a
genome-wide association study. Variables with `PIP > 0.5` are
highlighted.
```{r, fig.width = 9,fig.height = 4,fig.align = "center"}
i <- which(fit$pip > 0.5)
var.labels <- paste0(round(map$pos[i]/1e6,digits = 2),"Mb")
print(plot(fit,groups = map$chr,vars = i,var.labels = var.labels,gap = 7500,
ylab = "posterior prob."),
split = c(1,1,1,2),more = TRUE)
print(plot(fit,groups = map$chr,score = log10(pip + 0.001),vars = i,
var.labels = var.labels,gap = 7500,ylab = "log10 posterior prob."),
split = c(1,2,1,2),more = FALSE)
```
|
/scratch/gouwar.j/cran-all/cranData/varbvs/vignettes/cd.Rmd
|
---
title: "Mapping QTLs in outbred mice using varbvs"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{QTL mapping demo}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(eval = FALSE,collapse = TRUE,comment = "#")
```
In this vignette, we use **varbvs** to map QTLs for phenotypes
measured in CFW (Carworth Farms White) outbred mice. Phenotypes
include muscle weights---EDL and soleus muscle---and testis weight
measured at sacrifice. Running this script with `trait = "testis"`
reproduces the results and figures shown in the second example of a
forthcoming paper (Carbonetto *et al*, 2016).
## Vignette parameters
Begin by loading packages into the R environment.
```{r, eval = TRUE, message = FALSE}
library(curl)
library(lattice)
library(varbvs)
```
These script parameters specify the candidate prior log-odds
settings, the prior variance of the coefficients, and which trait to
analyze. Set trait to "edl", "soleus" or "testis".
```{r, eval = TRUE}
trait <- "testis"
covariates <- "sacwt"
logodds <- seq(-5,-3,0.25)
sa <- 0.05
```
Set the random number generator seed.
```{r, eval = TRUE}
set.seed(1)
```
## Load the genotype and phenotype data
Retrieve the data from the Zenodo repository.
```{r}
load(curl("https://zenodo.org/record/546142/files/cfw.RData"))
```
Only analyze samples for which the phenotype and all the covariates
are observed.
```{r}
rows <- which(apply(pheno[,c(trait,covariates)],1,
function (x) sum(is.na(x)) == 0))
pheno <- pheno[rows,]
geno <- geno[rows,]
```
## Fit variational approximation to posterior
```{r}
runtime <- system.time(fit <-
varbvs(geno,as.matrix(pheno[,covariates]),pheno[,trait],
sa = sa,logodds = logodds,verbose = FALSE))
cat(sprintf("Model fitting took %0.2f minutes.\n",runtime["elapsed"]/60))
```
## Summarize the results of model fitting
```{r}
print(summary(fit))
```
Show three genome-wide scans: (1) one using the posterior inclusion
probabilities (PIPs) computed in the BVS analysis of all SNPs; (2)
one using the p-values computed using GEMMA; and (3) one using the
PIPs computed from the BVSR model in GEMMA.
```{r, fig.width = 7,fig.height = 5.5, fig.align = "center"}
trellis.par.set(axis.text = list(cex = 0.7),
par.ylab.text = list(cex = 0.7),
par.main.text = list(cex = 0.7,font = 1))
j <- which(fit$pip > 0.5)
r <- gwscan.gemma[[trait]]
r[is.na(r)] <- 0
chromosomes <- levels(gwscan.bvsr$chr)
xticks <- rep(0,length(chromosomes))
names(xticks) <- chromosomes
pos <- 0
for (i in chromosomes) {
n <- sum(gwscan.bvsr$chr == i)
xticks[i] <- pos + n/2
pos <- pos + n + 25
}
print(plot(fit,groups = map$chr,vars = j,gap = 1500,cex = 0.6,
ylab = "probability",main = "a. multi-marker (varbvs)",
scales = list(y = list(limits = c(-0.1,1.2),at = c(0,0.5,1))),
vars.xyplot.args = list(cex = 0.6)),
split = c(1,1,1,3),more = TRUE)
print(plot(fit,groups = map$chr,score = r,vars = j,cex = 0.6,gap = 1500,
draw.threshold = 5.71,ylab = "-log10 p-value",
main = "b. single-marker (GEMMA -lm 2)",
scales = list(y = list(limits = c(-1,20),at = seq(0,20,5))),
vars.xyplot.args = list(cex = 0.6)),
split = c(1,2,1,3),more = TRUE)
print(xyplot(p1 ~ plot.x,gwscan.bvsr,pch = 20,col = "midnightblue",
scales = list(x = list(at = xticks,labels = chromosomes),
y = list(limits = c(-0.1,1.2),at = c(0,0.5,1))),
xlab = "",ylab = "probability",main = "c. multi-marker (BVSR)"),
split = c(1,3,1,3),more = FALSE)
```
## Session information
This is the version of R and the packages that were used to generate
these results.
```{r}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/varbvs/vignettes/cfw.Rmd
|
---
title: "Assessing support for gene sets in disease using varbvs"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Cytokine signaling genes demo}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
In this vignette, we fit two variable selection models: the first ("null")
model has a uniform prior for all variables (the 442,001 genetic markers);
the second model has higher prior probability for genetic markers near
cytokine signaling genes. This analysis is intended to assess support for
enrichment of Crohn's disease risk factors near cytokine signaling genes;
a large Bayes factor means greater support for this enrichment hypothesis.
The data in this analysis consist of 442,001 SNPs genotyped for 1,748 cases
and 2,938 controls. Note that file `cd.RData` cannot be made publicly
available due to data sharing restrictions, so this script is for viewing
only.
```{r, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(eval = FALSE,collapse = TRUE,comment = "#")
```
Begin by loading a couple packages into the R environment.
```{r, eval = TRUE, message = FALSE}
library(lattice)
library(varbvs)
```
Set the random number generator seed.
```{r, eval = TRUE}
set.seed(1)
```
## Load the genotypes, phenotypes and pathway annotation
```{r}
load("cd.RData")
data(cytokine)
```
## Fit variational approximation to posterior
Here we compute the variational approximation given the assumption that all
variables (genetic markers) are, *a priori*, equally likely to be included
in the model.
```{r}
fit.null <- varbvs(X,NULL,y,"binomial",logodds = -4,n0 = 0)
```
Next, compute the variational approximation given the assumption that
genetic markers near cytokine signaling genes are more likely to be
included in the model. For faster and more accurate computation of
posterior probabilities, the variational parameters are initialized to
the fitted values computed above with the exchangeable prior.
```{r}
logodds <- matrix(-4,442001,13)
logodds[cytokine == 1,] <- matrix(-4 + seq(0,3,0.25),6711,13,byrow = TRUE)
fit.cytokine <- varbvs(X,NULL,y,"binomial",logodds = logodds,n0 = 0,
alpha = fit.null$alpha,mu = fit.null$mu,
eta = fit.null$eta,optimize.eta = TRUE)
```
Compute the Bayes factor.
```{r}
BF <- varbvsbf(fit.null,fit.cytokine)
```
## Save the results to a file
```{r}
save(list = c("fit.null","fit.cytokine","map","cytokine","BF"),
file = "varbvs.demo.cytokine.RData")
```
## Summarize the results of model fitting
Show two "genome-wide scans" from the multi-marker PIPs, with and
without conditioning on enrichment of cytokine signaling genes.
```{r, fig.width = 9,fig.height = 4,fig.align = "center"}
i <- which(fit.null$pip > 0.5 | fit.cytokine$pip > 0.5)
var.labels <- paste0(round(map$pos[i]/1e6,digits = 2),"Mb")
print(plot(fit.null,groups = map$chr,vars = i,var.labels = NULL,
gap = 7500,ylab = "posterior prob."),
split = c(1,1,1,2),more = TRUE)
print(plot(fit.cytokine,groups = map$chr,vars = i,var.labels = var.labels,
gap = 7500,ylab = "posterior prob."),
split = c(1,2,1,2),more = FALSE)
```
|
/scratch/gouwar.j/cran-all/cranData/varbvs/vignettes/cytokine.Rmd
|
---
title: "Comparison of glmnet and varbvs in Leukemia data set"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{varbvs leukemia demo}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
This vignette demonstrates application of **glmnet** and **varbvs** to
the Leukemia data set. The main aim of this script is to illustrate
some of the different properties of Bayesian variable selection and
penalized sparse regression (as implemented by **varbvs** and **glmnet**,
respectively).
We use the preprocessed data of Dettling (2004) retrieved from the
supplementary materials accompanying Friedman *et al* (2010). The data
are represented as a 72 x 3,571 matrix of gene expression values
(variable `X`), and a vector of 72 binary disease outcomes (variable
`y`).
```{r, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(collapse = TRUE,comment = "#",fig.width = 6.9,
fig.height = 5.5,fig.align = "center",
fig.cap = " ",dpi = 120)
```
## Vignette parameters
Begin by loading these packages into your R environment.
```{r, message = FALSE}
library(lattice)
library(latticeExtra)
library(glmnet)
library(varbvs)
```
Specify settings for the glmnet analysis.
```{r}
nfolds <- 20 # Number of cross-validation folds.
alpha <- 0.95 # Elastic net mixing parameter.
lambda <- 10^(seq(0,-2,-0.05)) # Lambda sequence.
```
## Load the Leukemia data
Also set the random number generator seed.
```{r}
data(leukemia)
X <- leukemia$x
y <- leukemia$y
set.seed(1)
```
## Fit elastic net model to data
Here, we also run 20-fold cross-validation to select the largest
setting of the L1-penalty strength (*lambda*) that is within 1
standard error of the minimum classification error.
```{r}
# This is the model fitting step.
r <- system.time(fit.glmnet <-
glmnet(X,y,family = "binomial",lambda = lambda,alpha = alpha))
cat(sprintf("Model fitting took %0.2f seconds.\n",r["elapsed"]))
# This is the cross-validation step.
r <- system.time(out.cv.glmnet <-
cv.glmnet(X,y,family = "binomial",type.measure = "class",
alpha = alpha,nfolds = nfolds,lambda = lambda))
lambda <- out.cv.glmnet$lambda
cat(sprintf("Cross-validation took %0.2f seconds.\n",r["elapsed"]))
# Choose the largest value of lambda that is within 1 standard error
# of the smallest misclassification error.
lambda.opt <- out.cv.glmnet$lambda.1se
```
## Evaluate the glmnet predictions
Compute estimates of the disease outcome using the fitted model, and
compare against the observed values.
```{r, results = "hold"}
cat("classification results with lambda = ",lambda.opt,":\n",sep="")
y.glmnet <- c(predict(fit.glmnet,X,s = lambda.opt,type = "class"))
print(table(true = factor(y),pred = factor(y.glmnet)))
```
## Visualize results of glmnet analysis
The first plot shows the evolution of regression coefficients at different
settings of *lambda*. (Note that the intercept is not shown.) Only the
curves for the variables that are selected at the optimal setting
of *lambda* ("lambda.opt"") are labeled.
The second plot shows the classification error at different settings of
*lambda*.
The third plot shows the number of nonzero regression coefficients at
different settings of *lambda*.
```{r}
trellis.par.set(par.xlab.text = list(cex = 0.85),
par.ylab.text = list(cex = 0.85),
axis.text = list(cex = 0.75))
# Choose the largest value of lambda that is within 1 standard error
# of the smallest misclassification error.
lambda.opt <- out.cv.glmnet$lambda.1se
# Plot regression coefficients.
lambda <- fit.glmnet$lambda
vars <- setdiff(which(rowSums(abs(coef(fit.glmnet))) > 0),1)
n <- length(vars)
b <- as.matrix(t(coef(fit.glmnet)[vars,]))
i <- coef(fit.glmnet,s = lambda.opt)
i <- rownames(i)[which(i != 0)]
i <- i[-1]
vars.opt <- colnames(b)
vars.opt[!is.element(vars.opt,i)] <- ""
vars.opt <- substring(vars.opt,2)
lab <- expression("more complex" %<-% paste(log[10],lambda) %->%
"less complex")
r <- xyplot(y ~ x,data.frame(x = log10(lambda),y = b[,1]),type = "l",
col = "blue",xlab = lab,ylab = "regression coefficient",
scales = list(x = list(limits = c(-2.35,0.1)),
y = list(limits = c(-0.8,1.2))),
panel = function(x, y, ...) {
panel.xyplot(x,y,...);
panel.abline(v = log10(lambda.opt),col = "orangered",
lwd = 2,lty = "dotted");
ltext(x = -2,y = b[nrow(b),],labels = vars.opt,pos = 2,
offset = 0.5,cex = 0.5);
})
for (i in 2:n)
r <- r + as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = b[,i]),
type = "l",col = "blue"))
print(r,split = c(2,1,2,1),more = TRUE)
# Plot classification error.
Y <- predict(fit.glmnet,X,type = "class")
mode(Y) <- "numeric"
print(with(out.cv.glmnet,
xyplot(y ~ x,data.frame(x = log10(lambda),y = cvm),type = "l",
col = "blue",xlab = lab,
ylab = "20-fold cross-validation \n classification error",
scales = list(y = list(limits = c(-0.02,0.45))),
panel = function(x, y, ...) {
panel.xyplot(x,y,...);
panel.abline(v = log10(lambda.opt),col = "orangered",
lwd = 2,lty = "dotted");
}) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = cvm),
pch = 20,cex = 0.6,col = "blue")) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = cvup),
type = "l",col = "blue",lty = "solid")) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = cvlo),
type = "l",col = "blue",lty = "solid")) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),
y = colMeans(abs(Y - y))),
type = "l",col = "darkorange",lwd = 2,
lty = "solid"))),
split = c(1,1,2,2),more = TRUE)
# Plot number of non-zero regression coefficients.
print(with(out.cv.glmnet,
xyplot(y ~ x,data.frame(x = log10(lambda),y = nzero),type = "l",
col = "blue",xlab = lab,
ylab = "number of non-zero \n coefficients",
panel = function(x, y, ...) {
panel.xyplot(x,y,...)
panel.abline(v = log10(lambda.opt),col = "orangered",
lwd = 2,lty = "dotted")
}) +
as.layer(xyplot(y ~ x,data.frame(x = log10(lambda),y = nzero),
pch = 20,cex = 0.6,col = "blue"))),
split = c(1,2,2,2),more = FALSE)
```
## Fit variational approximation to posterior
Fit the fully-factorized variational approximation to the posterior
distribution of the coefficients for a logistic regression model of
the binary outcome (the type of leukemia), with spike-and-slab priors
on the coefficients.
```{r}
r <- system.time(fit.varbvs <- varbvs(X,NULL,y,"binomial",verbose = FALSE))
cat(sprintf("Model fitting took %0.2f seconds.\n",r["elapsed"]))
```
## Evaluate the varbvs predictions
Compute estimates of the disease outcome using the fitted model, and
compare against the observed values.
```{r, results = "hold"}
y.varbvs <- predict(fit.varbvs,X,type = "class")
print(table(true = factor(y),pred = factor(y.varbvs)))
```
## Visualize results of varbvs analysis
The first plot shows the classification error at each setting of
the prior log-odds.
The second plot shows the evolution of the posterior mean regression
coefficients (the beta's) at different settings of the prior log-odds,
for the top 6 variables ranked by posterior inclusion probability
(averaged over settings of the hyperparameters).
The top-ranked variable (by posterior inclusion probability) has a
much larger coefficient than all the others, so it is shown in a
separate plot.
The third plot shows the (approximate) probability density of the
prior log-odds parameter.
```{r}
trellis.par.set(par.xlab.text = list(cex = 0.85),
par.ylab.text = list(cex = 0.85),
axis.text = list(cex = 0.75))
# Get the normalized importance weights.
w <- fit.varbvs$w
# Plot classification error at each hyperparameter setting.
sigmoid10 <- function (x)
1/(1 + 10^(-x))
logodds <- fit.varbvs$logodds
log10q <- log10(sigmoid10(logodds))
m <- length(logodds)
err <- rep(0,m)
for (i in 1:m) {
r <- subset(fit.varbvs,logodds == logodds[i])
ypred <- predict(r,X)
err[i] <- mean(y != ypred)
}
lab <- expression("more complex" %<-% paste(log[10],pi) %->% "less complex")
print(xyplot(y ~ x,data.frame(x = log10q,y = err),type = "l",
col = "blue",xlab = lab,ylab = "classification error",
scales = list(x = list(limits = c(-0.9,-3.65)))) +
as.layer(xyplot(y ~ x,data.frame(x = log10q,y = err),
col = "blue",pch = 20,cex = 0.65)),
split = c(1,1,2,2),more = TRUE)
# Plot expected number of included variables at each hyperparameter
# setting.
r <- colSums(fit.varbvs$alpha)
print(xyplot(y ~ x,data.frame(x = log10q,y = r),type = "l",col = "blue",
xlab = lab,ylab = "expected number of\nincluded variables",
scales = list(x = list(limits = c(-0.9,-3.65)),
y = list(log = 10,at = c(1,10,100)))) +
as.layer(xyplot(y ~ x,data.frame(x = log10q,y = r),
col = "blue",pch = 20,cex = 0.65,
scales = list(x = list(limits = c(-0.9,-3.65)),
y = list(log = 10)))),
split = c(1,2,2,2),more = TRUE)
# Plot density of prior inclusion probability hyperparameter.
print(xyplot(y ~ x,data.frame(x = log10q,y = w),type = "l",col = "blue",
xlab = lab,
ylab = expression(paste("posterior probability of ",pi)),
scales = list(x = list(limits = c(-0.9,-3.65)))) +
as.layer(xyplot(y ~ x,data.frame(x = log10q,y = w),
col = "blue",pch = 20,cex = 0.65)),
split = c(2,1,2,1),more = FALSE)
```
### References
Dettling, M. (2004). BagBoosting for tumor classification with gene
expression data. *Bioinformatics* **20**, 3583–3593.
Friedman, J., Hastie, T., Tibshirani, R. (2010) Regularization paths
for generalized linear models via coordinate descent. *Journal of
Statistical Software* **33**, 1–22.
## Session information
This is the version of R and the packages that were used to generate
these results.
```{r}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/varbvs/vignettes/leukemia.Rmd
|
#' mBIC for subspace clustering
#'
#' Computes the value of modified Bayesian Information Criterion (mBIC) for
#' given data set partition and clusters' dimensionalities. In each cluster we
#' assume that variables are spanned by few factors. Considering maximum
#' likelihood we get that those factors are in fact principal components.
#' Additionally, it uses by default an informative prior distribution on models.
#'
#'
#' @param X A matrix with only quantitative variables.
#' @param segmentation A vector, segmentation for which likelihood is computed.
#' Clusters numbers should be from range [1, numb.clusters].
#' @param dims A vector of integers, dimensions of subspaces. Number of
#' principal components (fixed or chosen by PESEL criterion) that span each
#' subspace.
#' @param numb.clusters An integer, number of clusters.
#' @param max.dim An integer, upper bound for allowed dimension of a subspace.
#' @param flat.prior A boolean, if TRUE (default is FALSE) then flat prior on
#' models is used.
#' @keywords internal
#' @return Value of mBIC
cluster.pca.BIC <- function(X, segmentation, dims, numb.clusters, max.dim, flat.prior = FALSE) {
if (!is.matrix(X)) {
# if X is one variable it is stored as vector
X <- matrix(X, ncol = 1)
}
D <- dim(X)[1]
p <- dim(X)[2]
formula <- rep(0, numb.clusters)
for (k in 1:numb.clusters) {
# one cluster
dimk <- dims[k]
Xk <- X[, segmentation == k, drop = F]
if (dim(Xk)[2] > dimk && dim(Xk)[1] > dimk) {
formula[k] <- pesel(X = Xk, npc.min = dimk, npc.max = dimk, scale = FALSE,
method = "heterogenous")$vals[1]
} else {
warning("The dimensionality of the cluster was greater or equal than max(number of observation, number of variables) in the cluster.
Ignoring the cluster during mBIC calculation")
formula[k] <- 0
}
}
# apriori
apriori.segmentations <- -p * log(numb.clusters)
apriori.dimensions <- -log(max.dim) * numb.clusters
BIC <- sum(formula)
if (!flat.prior) {
BIC <- BIC + apriori.segmentations + apriori.dimensions
}
return(BIC)
}
#' Choses a subspace for a variable
#'
#' Selects a subspace closest to a given variable. To select the subspace, the method
#' considers (for every subspace) a subset of its principal components and tries
#' to fit a linear model with the variable as the response. Then the method chooses
#' the subspace for which the value of BIC was the highest.
#'
#' @param variable A variable to be assigned.
#' @param pcas Orthogonal basis for each of the subspaces.
#' @param number.clusters Number of subspaces (clusters).
#' @param show.warnings A boolean - if set to TRUE all warnings are displayed, default value is FALSE.
#' @keywords internal
#' @return index Number of most similar subspace to variable.
choose.cluster.BIC <- function(variable, pcas, number.clusters, show.warnings = FALSE) {
BICs <- NULL
for (i in 1:number.clusters) {
nparams <- ncol(pcas[[i]])
n <- length(variable)
res <- fastLmPure(pcas[[i]], variable, method = 0L)$residuals
sigma.hat <- sqrt(sum(res^2)/n)
if (sigma.hat < 1e-15 && show.warnings) {
warning("In function choose.cluster.BIC: estimated value of noise in cluster is <1e-15. It might corrupt the result.")
}
loglik <- sum(dnorm(res, 0, sigma.hat, log = T))
BICs[i] <- loglik - nparams * log(n)/2
}
which.max(BICs)
}
#' Calculates principal components for every cluster
#'
#' For given segmentation this function estimates dimensionality of each cluster (or chooses fixed dimensionality)
#' and for each cluster calculates the number of principal components equal to the this dimensionality
#'
#' @param X A data matrix.
#' @param segmentation A vector, segmentation of variables into clusters.
#' @param number.clusters An integer, number of subspaces (clusters).
#' @param max.subspace.dim An integer, upper bound for allowed dimension of subspace.
#' @param estimate.dimensions A boolean, if TRUE subspaces dimensions are estimated using PESEL.
#' @keywords internal
#' @return A subset of principal components for every cluster.
calculate.pcas <- function(X, segmentation, number.clusters, max.subspace.dim, estimate.dimensions) {
rowNumb <- dim(X)[1]
pcas <- lapply(1:number.clusters, function(k) {
Xk <- X[, segmentation == k, drop = F]
sub.dim <- dim(Xk)
if (sub.dim[2] > 0) {
a <- summary(prcomp(x = Xk))
if (estimate.dimensions) {
max.dim <- min(max.subspace.dim, floor(sqrt(sub.dim[2])), sub.dim[1])
cut <- max(1, pesel(X = Xk, npc.min = 1, npc.max = max.dim, scale = FALSE,
method = "heterogenous")$nPCs)
} else {
cut <- min(max.subspace.dim, floor(sqrt(sub.dim[2])), sub.dim[1])
}
return(matrix(a$x[, 1:cut], nrow = rowNumb))
} else {
return(matrix(rnorm(rowNumb), nrow = rowNumb, ncol = 1))
}
})
return(pcas)
}
#' Plot mlcc.fit class object
#'
#' @param x mlcc.fit class object
#' @param ... Further arguments to be passed to or from other methods. They are ignored in this function.
#' @export
#' @keywords internal
plot.mlcc.fit <- function(x, ...) {
clusterNumbs <- lapply(x$all.fit, function(y) y$nClusters)
BICVals <- lapply(x$all.fit, function(y) y$BIC)
plot.default(clusterNumbs, BICVals, type = "b", xaxt = "n", ylab = "BIC", xlab = "Number of clusters")
axis(side = 1, labels = clusterNumbs, at = clusterNumbs)
}
#' Print mlcc.fit class object
#'
#' @param x mlcc.fit class object
#' @param ... Further arguments to be passed to or from other methods. They are ignored in this function.
#' @export
#' @keywords internal
print.mlcc.fit <- function(x, ...) {
cat("$nClusters: ", x$nClusters, "\n")
cat("$segmentation:\n")
print(x$segmentation)
cat("$BIC: ", x$BIC, "\n")
cat("$subspacesDimensions:\n", unlist(x$subspacesDimensions), "\n")
}
#' Print mlcc.reps.fit class object
#'
#' @param x mlcc.reps.fit class object
#' @param ... Further arguments to be passed to or from other methods. They are
#' ignored in this function.
#' @export
#' @keywords internal
print.mlcc.reps.fit <- function(x, ...) {
cat("$segmentation:\n")
print(x$segmentation)
cat("$BIC: ", x$BIC, "\n")
cat("$basis:\n")
cat(str(x$basis))
}
#' Print clusters obtained from MLCC
#'
#' @param data The original data set.
#' @param segmentation A vector, segmentation of variables into clusters.
#' @export
show.clusters <- function(data, segmentation) {
data <- as.data.frame(data)
max_cluster_size <- max(as.data.frame(table(segmentation))$Freq)
clusters <- lapply(1:max(segmentation), function(i) {
colnames_in_cluster <- colnames(data)[segmentation == i]
current_cluster_size <- length(colnames_in_cluster)
c(colnames_in_cluster,
rep("-", times = max_cluster_size - current_cluster_size))
})
clusters <- as.data.frame(clusters)
colnames(clusters) <- paste("cluster", 1:max(segmentation), sep = "_")
print(clusters)
}
|
/scratch/gouwar.j/cran-all/cranData/varclust/R/auxiliary.functions.R
|
#' Simulates subspace clustering data
#'
#' Generates data for simulation with a low-rank subspace structure: variables
#' are clustered and each cluster has a low-rank representation. Factors than
#' span subspaces are not shared between clusters.
#'
#' @param n An integer, number of individuals.
#' @param SNR A numeric, signal to noise ratio measured as variance of the
#' variable, element of a subspace, to the variance of noise.
#' @param K An integer, number of subspaces.
#' @param numb.vars An integer, number of variables in each subspace.
#' @param max.dim An integer, if equal.dims is TRUE then max.dim is dimension of
#' each subspace. If equal.dims is FALSE then subspaces dimensions are drawn
#' from uniform distribution on [min.dim,max.dim].
#' @param min.dim An integer, minimal dimension of subspace .
#' @param equal.dims A boolean, if TRUE (value set by default) all clusters are
#' of the same dimension.
#' @export
#' @return A list consisting of: \item{X}{matrix, generated data}
#' \item{signals}{matrix, data without noise} \item{dims}{vector, dimensions
#' of subspaces} \item{factors}{matrix, columns of which span subspaces}
#' \item{s}{vector, true partiton of variables}
#' @examples
#' sim.data <- data.simulation()
#' sim.data2 <- data.simulation(n = 30, SNR = 2, K = 5, numb.vars = 20,
#' max.dim = 3, equal.dims = FALSE)
data.simulation <- function(n = 100, SNR = 1, K = 10, numb.vars = 30, max.dim = 2,
min.dim = 1, equal.dims = TRUE) {
sigma <- 1/SNR
# subspaces dimensions depend on equal.dims value
if (equal.dims) {
dims <- rep(max.dim, K)
} else {
dims <- sample(1:max.dim, K, replace = T)
}
X <- NULL
Y <- NULL
s <- NULL
factors <- NULL
for (j in 1:K) {
Z <- qr.Q(qr(replicate(dims[j], rnorm(n, 0, 1))))
coeff <- matrix(runif(dims[j] * numb.vars, 0.1, 1) * sign(runif(dims[j] *
numb.vars, -1, 1)), nrow = dims[j])
SIGNAL <- Z %*% coeff
SIGNAL <- scale(SIGNAL)
Y <- cbind(Y, SIGNAL)
factors <- cbind(factors, Z)
X <- cbind(X, SIGNAL + replicate(numb.vars, rnorm(n, 0, sigma)))
s <- c(s, rep(j, numb.vars))
}
return(list(X = X, signals = Y, factors = factors, dims = dims, s = s))
}
#' Simulates subspace clustering data with shared factors
#'
#' Generating data for simulation with a low-rank subspace structure: variables
#' are clustered and each cluster has a low-rank representation. Factors that
#' span subspaces are shared between clusters.
#'
#' @inheritParams data.simulation
#' @param numb.factors An integer, number of factors from which subspaces basis
#' will be drawn.
#' @param separation.parameter a numeric, coefficients of variables in each
#' subspace basis are drawn from range [separation.parameter,1]
#' @export
#' @return A list consisting of: \item{X}{matrix, generated data}
#' \item{signals}{matrix, data without noise} \item{factors}{matrix, columns
#' of which span subspaces} \item{indices}{list of vectors, indices of factors
#' that span subspaces} \item{dims}{vector, dimensions of subspaces}
#' \item{s}{vector, true partiton of variables}
#' @examples
#' sim.data <- data.simulation.factors()
#' sim.data2 <- data.simulation.factors(n = 30, SNR = 2, K = 5, numb.vars = 20,
#' numb.factors = 10, max.dim = 3, equal.dims = FALSE, separation.parameter = 0.2)
data.simulation.factors <- function(n = 100, SNR = 1, K = 10, numb.vars = 30, numb.factors = 10,
min.dim = 1, max.dim = 2, equal.dims = TRUE, separation.parameter = 0.1) {
sigma <- 1/SNR
# subspaces dimensions depend on equal.dims value
if (equal.dims) {
dims <- rep(max.dim, K)
} else {
dims <- sample(min.dim:max.dim, K, replace = T)
}
factors <- scale(replicate(numb.factors, rnorm(n, 0, 1)))
X <- NULL
Y <- NULL
s <- NULL
factors.indices <- list()
for (j in 1:K) {
factors.indices[[j]] <- sample(numb.factors, dims[j], replace = FALSE)
Z <- factors[, factors.indices[[j]], drop = FALSE]
coeff <- matrix(runif(dims[j] * numb.vars, separation.parameter, 1) * sign(runif(dims[j] *
numb.vars, -1, 1)), nrow = dims[j])
SIGNAL <- Z %*% coeff
SIGNAL <- scale(SIGNAL)
Y <- cbind(Y, SIGNAL)
X <- cbind(X, SIGNAL + replicate(numb.vars, rnorm(n, 0, sigma)))
s <- c(s, rep(j, numb.vars))
}
return(list(X = X, signals = Y, factors = factors, indices = factors.indices,
dims = dims, s = s))
}
|
/scratch/gouwar.j/cran-all/cranData/varclust/R/data.simulation.R
|
#' Computes integration and acontamination of the clustering
#'
#' Integartion and acontamination are measures of the quality of a clustering
#' with a reference to a true partition. Let \eqn{X = (x_1, \ldots x_p)} be the
#' data set, \eqn{A} be a partition into clusters \eqn{A_1, \ldots A_n} (true
#' partition) and \eqn{B} be a partition into clusters \eqn{B_1, \ldots, B_m}.
#' Then for cluster \eqn{A_j} integration is eqaul to: \deqn{Int(A_j) =
#' \frac{max_{k = 1, \ldots, m} \# \{ i \in \{ 1, \ldots p \}: x_i \in A_j
#' \wedge x_i \in B_k \} }{\# A_j}} The \eqn{B_k} for which the value is
#' maximized is called the integrating cluster of \eqn{A_j}. Then the
#' integration for the whole clustering equals is \eqn{Int(A,B) = \frac{1}{n}
#' \sum_{j=1}^n Int(A_j)} .The acontamination is defined by: \deqn{Acont(A_j) =
#' \frac{ \# \{ i \in \{ 1, \ldots p \}: x_i \in A_j \wedge x_i \in B_k \} }{\#
#' B_k}} where \eqn{B_k} is the integrating cluster for \eqn{A_j}. Then the
#' acontamination for the whole dataset is \eqn{Acont(A,B) = \frac{1}{n}
#' \sum_{j=1}^n Acont(A_j)}
#'
#' @param group A vector, first partition.
#' @param true_group A vector, second (reference) partition.
#' @references {M. Sołtys. Metody analizy skupień. Master’s thesis, Wrocław
#' University of Technology, 2010}
#' @export
#' @return An array containing values of integration and acontamination.
#' @examples
#' \donttest{
#' sim.data <- data.simulation(n = 20, SNR = 1, K = 2, numb.vars = 50, max.dim = 2)
#' true_segmentation <- rep(1:2, each=50)
#' mlcc.fit <- mlcc.reps(sim.data$X, numb.clusters = 2, max.dim = 2, numb.cores=1)
#' integration(mlcc.fit$segmentation, true_segmentation)}
#'
integration <- function(group, true_group) {
n <- length(group)
K1 <- max(unique(group))
K2 <- max(unique(true_group))
if (n != length(true_group)) {
stop("Partitions are of different lengths")
}
integrationMatrix <- matrix(0, nrow = K1, ncol = K2)
for (i in 1:n) {
integrationMatrix[group[i], true_group[i]] = integrationMatrix[group[i],
true_group[i]] + 1
}
clusters <- apply(integrationMatrix, 2, max)
cluster_indices <- apply(integrationMatrix, 2, which.max)
sizes_true <- apply(integrationMatrix, 2, sum)
sizes_group <- apply(integrationMatrix, 1, sum)
int <- clusters/sizes_true
acont <- clusters/sizes_group[cluster_indices]
return(c(mean(int), mean(acont)))
}
|
/scratch/gouwar.j/cran-all/cranData/varclust/R/integration.R
|
#' Computes misclassification rate
#'
#' Missclasification is a commonly used performance measure in subspace
#' clustering. It allows to compare two partitions with the same number of
#' clusters.
#'
#' As getting exact value of misclassification requires checking all
#' permutations and is therefore intrackable even for modest number of clusters,
#' a heuristic approach is proposed. It is assumed that there are K classes of
#' maximum M elements. Additional requirement is that classes labels are from
#' range [1, K].
#'
#' @param group A vector, first partition.
#' @param true_group A vector, second (reference) partition.
#' @param M An integer, maximal number of elements in one class.
#' @param K An integer, number of classes.
#' @references {R. Vidal. Subspace clustering. Signal Processing Magazine, IEEE,
#' 28(2):52-68,2011}
#' @export
#' @return Misclassification rate.
#' @examples
#' \donttest{
#' sim.data <- data.simulation(n = 100, SNR = 1, K = 5, numb.vars = 30, max.dim = 2)
#' mlcc.fit <- mlcc.reps(sim.data$X, numb.clusters = 5, numb.runs = 20, max.dim = 2, numb.cores=1)
#' misclassification(mlcc.fit$segmentation,sim.data$s, 30, 5)
#'
#'
#' #one can use this function not only for clusters
#' partition1 <- sample(10, 300, replace = TRUE)
#' partition2 <- sample(10, 300, replace = TRUE)
#' misclassification(partition1, partition1, max(table(partition1)), 10)
#' misclassification(partition1, partition2, max(table(partition2)), 10)}
misclassification <-function(group, true_group, M, K) {
if (length(group) != length(true_group))
stop("Partitions are of different lengths")
forbidden <- NULL
suma <- 0
nG = max(group);
for (i in M:1) { #differnet concordance levels
for (j in 1:nG) { #subspaces numbers (found)
if (sum(j==forbidden)==0) { #subspace not yet used
for (k in 1:K) { # subspaces numbers (true)
if (sum(j==group[true_group==k])==i) {
suma <- suma + i
forbidden <- c(forbidden, j)
break
}
}
}
}
}
mis <- 1 - suma/length(true_group)
return(mis)
}
|
/scratch/gouwar.j/cran-all/cranData/varclust/R/misclassification.R
|
#' Multiple Latent Components Clustering - Subspace clustering with automatic
#' estimation of number of clusters and their dimension
#'
#' This function is an implementation of Multiple Latent Components Clustering
#' (MLCC) algorithm which clusteres quantitative variables into a number, chosen
#' using mBIC, of groups. For each considered number of clusters in
#' \emph{numb.clusters} \code{\link{mlcc.reps}} function is called. It invokes
#' K-means based algorithm (\code{\link{mlcc.kmeans}}) finding local minimum of
#' mBIC, which is run a given number of times (\emph{numb.runs}) with different
#' initializations. The best partition is choosen with mBIC (see
#' \code{\link{mlcc.reps}} function).
#'
#' @param X A data frame or a matrix with only continuous variables.
#' @param numb.clusters A vector, numbers of clusters to be checked.
#' @param numb.runs An integer, number of runs (initializations) of
#' \code{\link{mlcc.kmeans}}.
#' @param stop.criterion An integer, if an iteration of
#' \code{\link{mlcc.kmeans}} algorithm makes less changes in partitions than
#' \code{stop.criterion}, \code{\link{mlcc.kmeans}} stops.
#' @param max.iter An integer, maximum number of iterations of the loop in
#' \code{\link{mlcc.kmeans}} algorithm.
#' @param max.dim An integer, if estimate.dimensions is FALSE then max.dim is
#' dimension of each subspace. If estimate.dimensions is TRUE then subspaces
#' dimensions are estimated from the range [1, max.dim].
#' @param scale A boolean, if TRUE (value set by default) then variables in
#' dataset are scaled to zero mean and unit variance.
#' @param numb.cores An integer, number of cores to be used, by default all
#' cores are used.
#' @param greedy A boolean, if TRUE (value set by default) the clusters are
#' estimated in a greedy way - first local minimum of mBIC is chosen.
#' @param estimate.dimensions A boolean, if TRUE (value set by default)
#' subspaces dimensions are estimated.
#' @param verbose A boolean, if TRUE plot with mBIC values for different numbers
#' of clusters is produced and values of mBIC, computed for every number of
#' clusters and subspaces dimensions, are printed (value set by default is
#' FALSE).
#' @param flat.prior A boolean, if TRUE then, instead of an informative prior
#' that takes into account number of models for a given number of clusters,
#' flat prior is used.
#' @param show.warnings A boolean, if set to TRUE all warnings are displayed,
#' default value is FALSE.
#' @export
#' @return An object of class mlcc.fit consisting of \item{segmentation}{a
#' vector containing the partition of the variables} \item{BIC}{numeric, value
#' of mBIC} \item{subspacesDimensions}{a list containing dimensions of the
#' subspaces} \item{nClusters}{an integer, estimated number of clusters}
#' \item{factors}{a list of matrices, basis for each subspace}
#' \item{all.fit}{a list of segmentation, mBIC, subspaces dimension for all
#' numbers of clusters considered for an estimated subspace dimensions}
#' \item{all.fit.dims}{a list of lists of segmentation, mBIC, subspaces
#' dimension for all numbers of clusters and subspaces dimensions considered}
#' @examples
#' \donttest{
#' sim.data <- data.simulation(n = 50, SNR = 1, K = 3, numb.vars = 50, max.dim = 3)
#' mlcc.res <- mlcc.bic(sim.data$X, numb.clusters = 1:5, numb.runs = 20, numb.cores = 1, verbose=TRUE)
#' show.clusters(sim.data$X, mlcc.res$segmentation)
#' }
mlcc.bic <- function(X, numb.clusters = 1:10, numb.runs = 30, stop.criterion = 1,
max.iter = 30, max.dim = 4, scale = TRUE, numb.cores = NULL, greedy = TRUE, estimate.dimensions = TRUE,
verbose = FALSE, flat.prior = FALSE, show.warnings = FALSE) {
if (is.data.frame(X)) {
warning("X is not a matrix. Casting to matrix.")
X <- as.matrix(X)
}
if (any(is.na(X))) {
warning("Missing values are imputed by the mean of the variable")
X[is.na(X)] = matrix(apply(X, 2, mean, na.rm = TRUE), ncol = ncol(X), nrow = nrow(X),
byrow = TRUE)[is.na(X)]
}
if (any(!sapply(X, is.numeric))) {
auxi <- NULL
for (j in 1:ncol(X)) if (!is.numeric(X[, j])) {
auxi <- c(auxi, j)
}
stop(paste("\nThe following variables are not quantitative: ", auxi))
}
if (scale) {
X <- scale(X)
}
n <- nrow(X)
p <- ncol(X)
greedy.stop <- max(numb.clusters)
results <- list()
if (verbose)
cat("Number of clusters \t BIC \n")
for (i in 1:length(numb.clusters)) {
number.clusters <- numb.clusters[i]
MLCC.fit <- mlcc.reps(X = X, numb.clusters = number.clusters, numb.runs = numb.runs,
max.dim = max.dim, scale = FALSE, numb.cores = numb.cores, estimate.dimensions = estimate.dimensions,
flat.prior = flat.prior, show.warnings = show.warnings)
results[[i]] <- list(segmentation = MLCC.fit$segmentation, BIC = MLCC.fit$BIC,
subspacesDimensions = lapply(MLCC.fit$basis, ncol), nClusters = number.clusters,
factors = MLCC.fit$basis)
if (greedy & (i > 2)) {
if ((results[[i]]$BIC < results[[i - 1]]$BIC) & (results[[i - 2]]$BIC <
results[[i - 1]]$BIC)) {
greedy.stop <- i
if (verbose) {
cat(paste(" ", number.clusters, " ", formatC(results[[i]]$BIC,
digits = ceiling(log(abs(results[[i]]$BIC), 10))), "\n"))
}
break
}
}
if (verbose) {
cat(paste(" ", number.clusters, " ", formatC(results[[i]]$BIC,
digits = ceiling(log(abs(results[[i]]$BIC), 10))), "\n"))
}
}
BICs <- lapply(results, function(res) res$BIC)
if (verbose) {
plot(numb.clusters[1:greedy.stop], BICs, type = "b", xaxt = "n", ylab = "BIC",
xlab = "Number of clusters")
axis(side = 1, labels = numb.clusters[1:greedy.stop], at = numb.clusters[1:greedy.stop])
}
result <- results[[which.max(BICs)]]
result$factors <- lapply(1:result$nClusters, function(i) {
d <- ncol(result$factors[[i]])
colnames(result$factors[[i]]) <- paste(i, 1:d)
result$factors[[i]]
})
result$all.fit <- results
class(result) <- "mlcc.fit"
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/varclust/R/mlcc.bic.R
|
#' Multiple Latent Components Clustering - kmeans algorithm
#'
#' Performs k-means based subspace clustering. Center of each cluster is some
#' number of principal components. This number can be fixed or estimated by
#' PESEL. Similarity measure between variable and a cluster is calculated using
#' BIC.
#'
#' @param X A matrix with only continuous variables.
#' @param number.clusters An integer, number of clusters to be used.
#' @param stop.criterion An integer indicating how many changes in partitions
#' triggers stopping the algorithm.
#' @param max.iter An integer, maximum number of iterations of k-means loop.
#' @param max.subspace.dim An integer, maximum dimension of subspaces.
#' @param initial.segmentation A vector, initial segmentation of variables to
#' clusters.
#' @param estimate.dimensions A boolean, if TRUE (value set by default)
#' subspaces dimensions are estimated.
#' @param show.warnings A boolean, if set to TRUE all warnings are displayed,
#' default value is FALSE.
#' @references \emph{Bayesian dimensionality reduction with PCA using penalized semi-integrated likelihood},
#' Piotr Sobczyk, Malgorzata Bogdan, Julie Josse
#' @export
#' @return A list consisting of: \item{segmentation}{a vector containing the
#' partition of the variables} \item{pcas}{a list of matrices, basis vectors
#' for each cluster (subspace)}
#' @examples
#' \donttest{
#' sim.data <- data.simulation(n = 50, SNR = 1, K = 5, numb.vars = 50, max.dim = 3)
#' mlcc.res <- mlcc.kmeans(sim.data$X, number.clusters = 5, max.iter = 20, max.subspace.dim = 3)
#' show.clusters(sim.data$X, mlcc.res$segmentation)
#' }
mlcc.kmeans <- function(X, number.clusters = 2, stop.criterion = 1, max.iter = 30,
max.subspace.dim = 4, initial.segmentation = NULL, estimate.dimensions = TRUE,
show.warnings = FALSE) {
numbVars <- dim(X)[2]
rowNumb <- dim(X)[1]
if (!is.null(initial.segmentation) && length(initial.segmentation) != numbVars) {
stop(paste("The lenght of initial segmentation was incorrect: ", length(initial.segmentation),
".It should be: ", numbVars))
}
if (!is.null(initial.segmentation) && max(initial.segmentation) > number.clusters) {
stop(paste("Too many cluster indices in initial segmentation. Should be in range [1, number.clusters]."))
}
pcas <- list(NULL)
if (is.null(initial.segmentation)) {
los <- sample(1:numbVars, number.clusters)
pcas <- lapply(1:number.clusters, function(i) matrix(X[, los[i]], nrow = rowNumb))
segmentation <- sapply(1:numbVars, function(j) choose.cluster.BIC(X[, j],
pcas, number.clusters, show.warnings))
} else {
segmentation <- initial.segmentation
}
new.segmentation <- segmentation
for (iter in 1:max.iter) {
pcas <- calculate.pcas(X, segmentation, number.clusters, max.subspace.dim,
estimate.dimensions)
new.segmentation <- sapply(1:numbVars, function(j) choose.cluster.BIC(X[,
j], pcas, number.clusters, show.warnings))
if (sum(new.segmentation != segmentation) < stop.criterion)
break
segmentation <- new.segmentation
}
pcas <- calculate.pcas(X, segmentation, number.clusters, max.subspace.dim, estimate.dimensions)
return(list(segmentation = segmentation, pcas = pcas))
}
|
/scratch/gouwar.j/cran-all/cranData/varclust/R/mlcc.kmeans.R
|
#' Multiple Latent Components Clustering - Subspace clustering assuming that the
#' number of clusters is known
#'
#' For a fixed number of cluster function returns the best partition and basis
#' for each subspace.
#'
#' In more detail, an algorithm \code{\link{mlcc.kmeans}} is run a
#' \emph{numb.runs} of times with random or custom initializations. The best
#' partition is selected according to the BIC.
#'
#'
#' @param X A data frame or a matrix with only continuous variables.
#' @param numb.clusters An integer, number of cluster.
#' @param numb.runs An integer, number of runs of \code{\link{mlcc.kmeans}
#' algorithm} with random initialization.
#' @param stop.criterion An integer, if an iteration of
#' \code{\link{mlcc.kmeans}} algorithm makes less changes in partitions than
#' \code{stop.criterion}, \code{\link{mlcc.kmeans}} stops.
#' @param max.iter max.iter An integer, maximum number of iterations of the loop
#' in \code{\link{mlcc.kmeans}} algorithm.
#' @param initial.segmentations A list of vectors, segmentations that user wants
#' to be used as an initial segmentation in \code{\link{mlcc.kmeans}}
#' algorithm.
#' @param max.dim An integer, maximal dimension of subspaces.
#' @param scale A boolean, if TRUE (value set by default) then variables in
#' dataset are scaled to zero mean and unit variance.
#' @param numb.cores An integer, number of cores to be used, by default all
#' cores are used.
#' @param estimate.dimensions A boolean, if TRUE (value set by default)
#' subspaces dimensions are estimated.
#' @param flat.prior A boolean, if TRUE then, instead of a prior that takes into
#' account number of models for a given number of clusters, flat prior is
#' used.
#' @param show.warnings A boolean, if set to TRUE all warnings are displayed,
#' default value is FALSE.
#' @export
#' @return A list consisting of \item{segmentation}{a vector containing the
#' partition of the variables} \item{BIC}{a numeric, value of the mBIC}
#' \item{basis}{a list of matrices, the factors for each of the subspaces}
#' @examples
#' \donttest{
#' sim.data <- data.simulation(n = 50, SNR = 1, K = 5, numb.vars = 50, max.dim = 3)
#' mlcc.res <- mlcc.reps(sim.data$X, numb.clusters = 5, numb.runs = 20, max.dim = 4, numb.cores = 1)
#' show.clusters(sim.data$X, mlcc.res$segmentation)
#' }
mlcc.reps <- function(X, numb.clusters = 2, numb.runs = 30, stop.criterion = 1, max.iter = 30,
initial.segmentations = NULL, max.dim = 4, scale = TRUE, numb.cores = NULL, estimate.dimensions = TRUE,
flat.prior = FALSE, show.warnings = FALSE) {
if (is.data.frame(X)) {
warning("X is not a matrix. Casting to matrix.")
X <- as.matrix(X)
}
if (any(is.na(X))) {
warning("Missing values are imputed by the mean of the variable")
X[is.na(X)] = matrix(apply(X, 2, mean, na.rm = TRUE), ncol = ncol(X), nrow = nrow(X),
byrow = TRUE)[is.na(X)]
}
if (any(!sapply(X, is.numeric))) {
auxi <- NULL
for (j in 1:ncol(X)) if (!is.numeric(X[, j])) {
auxi <- c(auxi, j)
}
stop(paste("\nThe following variables are not quantitative: ", auxi))
}
if (is.null(numb.cores)) {
numb.cores <- max(1, detectCores() - 1)
}
cl <- makeCluster(numb.cores)
registerDoParallel(cl)
if (scale) {
X <- scale(X)
}
i <- NULL
BICs <- NULL
segmentations <- NULL
if (is.null(initial.segmentations)) {
segmentations <- foreach(i = (1:numb.runs)) %dorng% {
MLCC.res <- mlcc.kmeans(X = X, number.clusters = numb.clusters, max.subspace.dim = max.dim,
max.iter = max.iter, estimate.dimensions = estimate.dimensions, show.warnings = show.warnings)
current.segmentation <- MLCC.res$segmentation
current.pcas <- MLCC.res$pcas
list(current.segmentation, cluster.pca.BIC(X, current.segmentation, sapply(current.pcas,
ncol), numb.clusters, max.dim = max.dim, flat.prior = flat.prior),
current.pcas)
}
# running user specified clusters
} else {
segmentations <- foreach(i = (1:length(initial.segmentations))) %dorng% {
MLCC.res <- mlcc.kmeans(X = X, number.clusters = numb.clusters, max.subspace.dim = max.dim,
max.iter = max.iter, initial.segmentation = initial.segmentations[[i]],
estimate.dimensions = estimate.dimensions, show.warnings = show.warnings)
current.segmentation <- MLCC.res$segmentation
current.pcas <- MLCC.res$pcas
list(current.segmentation, cluster.pca.BIC(X, current.segmentation, sapply(current.pcas,
ncol), numb.clusters, max.dim = max.dim, flat.prior = flat.prior),
current.pcas)
}
}
stopCluster(cl)
BICs <- unlist(lapply(segmentations, function(x) x[2]))
basis <- lapply(segmentations, function(x) x[3])
segmentations <- lapply(segmentations, function(x) x[[1]])
result <- list(segmentation = segmentations[[which.max(BICs)]], BIC = BICs[which.max(BICs)],
basis = basis[[which.max(BICs)]][[1]])
class(result) <- "mlcc.reps.fit"
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/varclust/R/mlcc.reps.R
|
#' Variable Clustering with Multiple Latent Components Clustering algorithm
#'
#' Package varclust performs clustering of variables, according to a
#' probabilistic model, which assumes that each cluster lies in a low
#' dimensional subspace. Segmentation of variables, number of clusters and their
#' dimensions are selected based on the appropriate implementation of the
#' Bayesian Information Criterion.
#'
#' The best candidate models are identified by the specific implementation of
#' K-means algorithm, in which cluster centers are represented by some number of
#' orthogonal factors(principal components of the variables within a cluster)
#' and similarity between a given variable and a cluster center depends on
#' residuals from a linear model fit. Based on the Bayesian Information
#' Criterion (BIC), sums of squares of residuals are appropriately scaled, which
#' allows to avoid an over-excessive attraction by clusters with larger
#' dimensions. To reduce the chance that the local minimum of modified BIC
#' (mBIC) is obtained instead of the global one, for every fixed number of
#' clusters in a given range K-means algorithm is run large number of times,
#' with different random initializations of cluster centers.
#'
#' The main function of package \pkg{varclust} is \code{\link{mlcc.bic}} which
#' allows clustering variables in a data with unknown number of clusters.
#' Variable partition is computed with k-means based algorithm. Number of
#' clusters and their dimensions are estimated using mBIC and PESEL
#' respectively. If the number of clusters is known one might use function
#' \code{\link{mlcc.reps}}, which takes number of clusters as a parameter. For
#' \code{\link{mlcc.reps}} one might specify as well some initial segmentation
#' for k-means algorithm. This can be useful if user has some a priori knowledge
#' about clustering.
#'
#' We provide also two functions to simulate datasets with described structure.
#' The function \code{\link{data.simulation}} generates the data so that the
#' subspaces are indepentend and \code{\link{data.simulation.factors}} generates
#' the data where some factores are shared between the subspaces.
#'
#' We also provide function measures of quality of clustering.
#' \code{\link{misclassification}} computes misclassification rate between two
#' partitions. This performance measure is extensively used in image
#' segmentation. The other measure is implemented as \code{\link{integration}}
#' function.
#'
#' @docType package
#' @name varclust
#' @details Version: 0.9.4
#' @importFrom RcppEigen fastLmPure
#' @importFrom doParallel registerDoParallel
#' @importFrom parallel makeCluster
#' @importFrom parallel stopCluster
#' @importFrom parallel detectCores
#' @importFrom pesel pesel
#' @importFrom graphics axis plot plot.default points
#' @importFrom stats cov dnorm pnorm prcomp rnorm runif var
#' @importFrom utils str
#' @import doRNG
#' @import foreach
#' @author Piotr Sobczyk, Stanislaw Wilczynski, Julie Josse, Malgorzata Bogdan
#'
#' Maintainer: Piotr Sobczyk \email{pj.sobczyk@@gmail.com}
#'
#' @examples
#' \donttest{
#' sim.data <- data.simulation(n = 50, SNR = 1, K = 3, numb.vars = 50, max.dim = 3)
#' mlcc.bic(sim.data$X, numb.clusters = 1:5, numb.runs = 20, numb.cores = 1, verbose = TRUE)
#' mlcc.reps(sim.data$X, numb.clusters = 3, numb.runs = 20, numb.cores = 1)}
NULL
|
/scratch/gouwar.j/cran-all/cranData/varclust/R/varclust.R
|
## ---- results='hide', message=FALSE, warning=FALSE-----------------------
library(varclust)
library(mclust)
## ---- warning=FALSE------------------------------------------------------
comp_file_name <- system.file("extdata", "gene.csv", package = "varclust")
comp <- read.table(comp_file_name, sep=";", header=T, row.names=1)
benchmarkClustering <- c(rep(1, 68), rep(2, 356))
comp <- as.matrix(comp[,-ncol(comp)])
set.seed(2)
mlcc.fit <- mlcc.bic(comp, numb.clusters = 1:10, numb.runs = 10, max.dim = 8, greedy = TRUE,
estimate.dimensions = TRUE, numb.cores = 1, verbose = FALSE)
print(mlcc.fit)
plot(mlcc.fit)
mclust::adjustedRandIndex(mlcc.fit$segmentation, benchmarkClustering)
misclassification(mlcc.fit$segmentation, benchmarkClustering, max(table(benchmarkClustering)), 2)
integration(mlcc.fit$segmentation, benchmarkClustering)
## ---- warning=FALSE------------------------------------------------------
mlcc.fit3 <- mlcc.reps(comp, numb.clusters = 2, numb.runs = 0, max.dim = 8,
initial.segmentations = list(benchmarkClustering), numb.cores = 1)
print(mlcc.fit3)
mclust::adjustedRandIndex(mlcc.fit3$segmentation, benchmarkClustering)
misclassification(mlcc.fit3$segmentation, benchmarkClustering, max(table(benchmarkClustering)), 2)
integration(mlcc.fit3$segmentation, benchmarkClustering)
|
/scratch/gouwar.j/cran-all/cranData/varclust/inst/doc/varclustTutorial.R
|
---
title: "varclust package tutorial"
author: "Piotr Sobczyk"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{varclust tutorial}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
## Tutorial for **varclust** package
#### Introduction
**varclust** is a package that enables dimension reduction via variables clustering.
We assume that each group of variables can be summarized with few latent variables.
It also provides a function to determine number of principal components in PCA.
This tutorial will gently introduce you to usage of package **varclust** and
familiarize with its options.
You can install **varclust** from github (current development version).
```
install_github("psobczyk/varclust")
```
or from CRAN
```
install.package("varclust")
```
#### Main usage example
```{r, results='hide', message=FALSE, warning=FALSE}
library(varclust)
library(mclust)
```
Let us consider some real genomic data. We're going to use
[FactoMineR package](http://factominer.free.fr/) data.
As they are no longer available online we added them to this package
This data consists of two types of variables. First group are gene
expression data. The second is RNA data.
Please note that it may take few minutes to run the following code:
```{r, warning=FALSE}
comp_file_name <- system.file("extdata", "gene.csv", package = "varclust")
comp <- read.table(comp_file_name, sep=";", header=T, row.names=1)
benchmarkClustering <- c(rep(1, 68), rep(2, 356))
comp <- as.matrix(comp[,-ncol(comp)])
set.seed(2)
mlcc.fit <- mlcc.bic(comp, numb.clusters = 1:10, numb.runs = 10, max.dim = 8, greedy = TRUE,
estimate.dimensions = TRUE, numb.cores = 1, verbose = FALSE)
print(mlcc.fit)
plot(mlcc.fit)
mclust::adjustedRandIndex(mlcc.fit$segmentation, benchmarkClustering)
misclassification(mlcc.fit$segmentation, benchmarkClustering, max(table(benchmarkClustering)), 2)
integration(mlcc.fit$segmentation, benchmarkClustering)
```
Please note that although we use *benchmarkClustering* as a reference, it is not
an oracle. Some variables from expression data can be highly correlated and act together with RNA data.
##### More details about the method
The algorithm aims to reduce dimensionality of data by clustering variables.
It is assumed that variables lie in few low-rank subspaces. Our iterative algorithm
recovers their partition as well as estimates number of clusters and dimensions
of subspaces. This kind of problem is called Subspace Clustering. For a reference
comparing multiple approaches [see here.](http://cis.jhu.edu/~rvidal/publications/SPM-Tutorial-Final.pdf)
#### Running algorithm with some initial segmentation
You should also use **mlcc.reps** function if you have some apriori knowledge regarding true segmentation.
You can enforce starting point
```{r, warning=FALSE}
mlcc.fit3 <- mlcc.reps(comp, numb.clusters = 2, numb.runs = 0, max.dim = 8,
initial.segmentations = list(benchmarkClustering), numb.cores = 1)
print(mlcc.fit3)
mclust::adjustedRandIndex(mlcc.fit3$segmentation, benchmarkClustering)
misclassification(mlcc.fit3$segmentation, benchmarkClustering, max(table(benchmarkClustering)), 2)
integration(mlcc.fit3$segmentation, benchmarkClustering)
```
#### Execution time
Execution time of **mlcc.bic** depends mainly on:
1. Number of clusters (*numb.clusters*)
2. Number of variables
3. Number of runs of k-means algorithm (*numb.runs*)
For a dataset of 1000 variables and 10 clusters computation takes about
8 minutes on Intel(R) Core(TM) i7-4770 CPU @ 3.40GHz.
#### Choosing values of parameters
* If possible one should use multiple cores for computation. By default all
but one cores are used. User can override this with **numb.cores** parameter
* For more precise segmentation one should increase **numb.runs**.
Default value is 20
* Parameter **max.dim** should reflect how large we expect subspaces to be.
Default value is 4
* If parameter **greedy** is TRUE (value set by default) the number of clusters
is estimated in a greedy way. So program stops after getting first
BIC local maximum
* If **estimate.dimensions** is TRUE subspaces dimensions are estimated.
Otherwise all subspaces are assumed to be of dimension *max.dim*
|
/scratch/gouwar.j/cran-all/cranData/varclust/inst/doc/varclustTutorial.Rmd
|
---
title: "varclust package tutorial"
author: "Piotr Sobczyk"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{varclust tutorial}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
## Tutorial for **varclust** package
#### Introduction
**varclust** is a package that enables dimension reduction via variables clustering.
We assume that each group of variables can be summarized with few latent variables.
It also provides a function to determine number of principal components in PCA.
This tutorial will gently introduce you to usage of package **varclust** and
familiarize with its options.
You can install **varclust** from github (current development version).
```
install_github("psobczyk/varclust")
```
or from CRAN
```
install.package("varclust")
```
#### Main usage example
```{r, results='hide', message=FALSE, warning=FALSE}
library(varclust)
library(mclust)
```
Let us consider some real genomic data. We're going to use
[FactoMineR package](http://factominer.free.fr/) data.
As they are no longer available online we added them to this package
This data consists of two types of variables. First group are gene
expression data. The second is RNA data.
Please note that it may take few minutes to run the following code:
```{r, warning=FALSE}
comp_file_name <- system.file("extdata", "gene.csv", package = "varclust")
comp <- read.table(comp_file_name, sep=";", header=T, row.names=1)
benchmarkClustering <- c(rep(1, 68), rep(2, 356))
comp <- as.matrix(comp[,-ncol(comp)])
set.seed(2)
mlcc.fit <- mlcc.bic(comp, numb.clusters = 1:10, numb.runs = 10, max.dim = 8, greedy = TRUE,
estimate.dimensions = TRUE, numb.cores = 1, verbose = FALSE)
print(mlcc.fit)
plot(mlcc.fit)
mclust::adjustedRandIndex(mlcc.fit$segmentation, benchmarkClustering)
misclassification(mlcc.fit$segmentation, benchmarkClustering, max(table(benchmarkClustering)), 2)
integration(mlcc.fit$segmentation, benchmarkClustering)
```
Please note that although we use *benchmarkClustering* as a reference, it is not
an oracle. Some variables from expression data can be highly correlated and act together with RNA data.
##### More details about the method
The algorithm aims to reduce dimensionality of data by clustering variables.
It is assumed that variables lie in few low-rank subspaces. Our iterative algorithm
recovers their partition as well as estimates number of clusters and dimensions
of subspaces. This kind of problem is called Subspace Clustering. For a reference
comparing multiple approaches [see here.](http://cis.jhu.edu/~rvidal/publications/SPM-Tutorial-Final.pdf)
#### Running algorithm with some initial segmentation
You should also use **mlcc.reps** function if you have some apriori knowledge regarding true segmentation.
You can enforce starting point
```{r, warning=FALSE}
mlcc.fit3 <- mlcc.reps(comp, numb.clusters = 2, numb.runs = 0, max.dim = 8,
initial.segmentations = list(benchmarkClustering), numb.cores = 1)
print(mlcc.fit3)
mclust::adjustedRandIndex(mlcc.fit3$segmentation, benchmarkClustering)
misclassification(mlcc.fit3$segmentation, benchmarkClustering, max(table(benchmarkClustering)), 2)
integration(mlcc.fit3$segmentation, benchmarkClustering)
```
#### Execution time
Execution time of **mlcc.bic** depends mainly on:
1. Number of clusters (*numb.clusters*)
2. Number of variables
3. Number of runs of k-means algorithm (*numb.runs*)
For a dataset of 1000 variables and 10 clusters computation takes about
8 minutes on Intel(R) Core(TM) i7-4770 CPU @ 3.40GHz.
#### Choosing values of parameters
* If possible one should use multiple cores for computation. By default all
but one cores are used. User can override this with **numb.cores** parameter
* For more precise segmentation one should increase **numb.runs**.
Default value is 20
* Parameter **max.dim** should reflect how large we expect subspaces to be.
Default value is 4
* If parameter **greedy** is TRUE (value set by default) the number of clusters
is estimated in a greedy way. So program stops after getting first
BIC local maximum
* If **estimate.dimensions** is TRUE subspaces dimensions are estimated.
Otherwise all subspaces are assumed to be of dimension *max.dim*
|
/scratch/gouwar.j/cran-all/cranData/varclust/vignettes/varclustTutorial.Rmd
|
#assign("p11", NULL, envir = .GlobalEnv)
#assign("p12", NULL, envir = .GlobalEnv)
#assign("p21", NULL, envir = .GlobalEnv)
#assign("p22", NULL, envir = .GlobalEnv)
#p11 <<- par() p22 <<- par() p12 <<- par() p21 <<- par()
#p11 <- par() p22 <- par() p12 <- par() p21 <- par()
.varDiagOptions <- new.env(FALSE, globalenv())
assign("p11", NULL, envir = .varDiagOptions)
assign("p12", NULL, envir = .varDiagOptions)
assign("p21", NULL, envir = .varDiagOptions)
assign("p22", NULL, envir = .varDiagOptions)
gamsph<-function(h,th=rbind(1,1,1)){(0<h)*(h<=th[3])*(th[1]+th[2]*(3/2*(h/th[3])-1/2*(h/th[3])^3))+(h>th[3])*(th[1]+th[2])}
fth<-function(th,y,h1,w1=1){(y-gamsph(h1,th))/w1}
ftc<-function(th,y,h1,w1){(y-gamsph(h1,th))/gamsph(h1,th)}
ftg<-function(th,y,h1,cv1){cv1%*%(y-gamsph(h1,th))}
fts <- function(th, y, h1, cv1) {
cv1 %*% (y-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h1,th)^0.25)
}
ftsOpt <- function(th, y, h1, cv1) {
ret = cv1 %*% (y-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h1,th)^0.25)
mean(ret^2)
}
gamsph1<-function(h,th=rbind(1,1,1)){1}
gamsph2<-function(h,th=rbind(1,1,1)){(0<h)*(h<=th[3])*(3/2*(h/th[3])-1/2*(h/th[3])^3)+(h>th[3])}
gamsph3<-function(h,th=rbind(1,1,1)){(0<h)*(h<=th[3])*3/2*th[2]/th[3]*((h/th[3])^3-h/th[3])}
hyperg<-function(r){
f<-1+1.125*r+1.1484375*r^2+1.158007813*r^3+1.16317749*r^4+1.166408539*r^5;
#a<-0.75;
#b<-0.75;
#c<-0.5;
#k<-1;
#f<-1;
#n<-ceiling(10+exp((max(r)-1)*30)*500);
#n<-10;
#for (i in 2:50){
# k<-k*(a+i-2)*(b+i-2)/(c+i-2)*r/(i-1);
# f<-f+k
# }
f}
ficorr<-function(r){gamma(0.75)^2/(sqrt(pi)-gamma(0.75)^2)*((1-r^2)*hyperg(r^2)-1)}
estvar <- function(h0, y, iter=50, tolerance=0.0002, trace=1, th0=rbind(0,1,1))
{
#EJP added:
#stop("this function requires nlregb (an S-Plus proprietary function) to work")
n<-ceiling(sqrt(2*length(h0)))
#Vorbereitung fuer covgamma
n1<-n*(n-1)/2
#1. index der gamma[i,j] matrix
i1<-matrix(1:n,n,n)
#1. teil des zeilenindex der covgamma gamma matrix
k1<-matrix(i1[row(i1)<col(i1)],n1,n1)
#2. teil des zeilenindex der covgamma gamma matrix
k2<-matrix(t(i1)[row(i1)<col(i1)],n1,n1)
#1. teil des spaltenindex der covgamma gamma matrix
k3<-t(k1)
#2. teil des spaltenindex der covgamma gamma matrix
k4<-t(k2)
if(!missing(th0)) {
#EJP outcommented:
#opt<-nlregb(n*(n-1)/2,cbind(0,max(y/2),max(h0)),fts,y=y^0.25,h1=h0,cv1=diag(n1),lower=cbind(0,0,0))
opt<-optim(par = c(0,max(y/2),max(h0)), ftsOpt,
lower=cbind(0,0,0), method = "L-BFGS-B",
y=y^0.25, h1=h0, cv1=diag(n1))
th1 <- opt$par
}
else
th1<-th0
th1<-cbind(0,max(y/2),max(h0))
#th0<-th1_c(3.72635248595876, 15.5844183738953, 1.22109233789852)
#th1<-c(0.0000000,7.6516077,0.7808538)
for (i in 1:iter) {
if(trace>0)
print(i)
gg<-sqrt(2*gamsph(h0,th1))
#Spalte 1, Spalte 2, ...
#gamma vektor wird als matrix dargestellt
tt<-matrix(gg[(t(i1)-2)*(t(i1)-1)/2+i1],n,n)
#symmetrisierung
tt1<-tt
tt1[row(tt1)>col(tt1)]<-t(tt)[row(tt1)>col(tt1)]
#diagonale loeschen
tt1[row(tt1)==col(tt1)]<-0
#covgamma wird berechnet
cg<-matrix(tt1[(k4-1)*n+k1]+tt1[(k2-1)*n+k3]-tt1[(k3-1)*n+k1]-tt1[(k4-1)*n+k2],n1,n1)
cgcg<-outer(gg,gg,"*")
corg<-sqrt(cgcg)*ficorr((cg*lower.tri(cg))/cgcg)
corg<-sqrt(2)*(sqrt(pi)-gamma(0.75)^2)/pi*(corg+t(corg)+diag(gg))
infm<-solve(corg);
cv<-chol((infm+t(infm))/2);
#sc<-cbind(1/th1[2],1/th1[2],1/th1[3])
#EJP outcommented:
#opt<-nlregb(n*(n-1)/2,th1,fts,y=y^0.25,h1=h0,cv1=cv,lower=cbind(0,0,0))
opt <- optim(par = th1, ftsOpt,
lower=cbind(0,0,0), method = "L-BFGS-B",
y=y^0.25,h1=h0,cv1=cv)
if(trace>0) print(opt$par)
if(sum(abs((th1-opt$par)/(th1+0.00001)))<=tolerance)
break
th1<-opt$par
}
print("Fertig")
v<-list(pars=opt$par)
v$cg<-corg
v$res<-y^0.25-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h0,v$pars)^0.25
v$lof<-t(v$res)%*%solve(corg,v$res)
v
}
varobj<-function(m,iter=50,tolerance=0.0002,trace=1,loo=FALSE){
n<-dim(m)[1]
#a1<-t(m[,3]-t(matrix(m[,3],n,n)))
#b1<-t(m[,1]-t(matrix(m[,1],n,n)))
#c1<-t(m[,2]-t(matrix(m[,2],n,n)))
a1<-outer(m[,3],m[,3],FUN="-")
b1<-outer(m[,1],m[,1],FUN="-")
c1<-outer(m[,2],m[,2],FUN="-")
#d1<-cbind(sqrt(b1[row(b1)<col(b1)]^2+c1[row(c1)<col(c1)]^2),a1[row(a1)<col(a1)]^2)
y<-a1[row(a1)<col(a1)]^2/2
h0<-sqrt(b1[row(b1)<col(b1)]^2+c1[row(c1)<col(c1)]^2)
v<-estvar(h0,y,iter,tolerance,trace)
XM<-cbind(gamsph1(h0,v$pars),gamsph2(h0,v$pars),gamsph3(h0,v$pars))*(gamsph(h0,v$pars))^(-0.75)/4
v$info<-solve(t(XM)%*%solve(v$cg,XM))
loores<-matrix(0,n,n)
tha<-matrix(0,n,3)
lofa<-matrix(0,n,1)
cda<-matrix(0,n,1)
v$h<-h0
v$y<-y
if(loo==TRUE){
for (i in 1:n){
print(i)
m1<-m[-i,]
a1<-t(m1[,3]-t(matrix(m1[,3],n-1,n-1)))
b1<-t(m1[,1]-t(matrix(m1[,1],n-1,n-1)))
c1<-t(m1[,2]-t(matrix(m1[,2],n-1,n-1)))
y<-a1[row(a1)<col(a1)]^2/2
h0<-sqrt(b1[row(b1)<col(b1)]^2+c1[row(c1)<col(c1)]^2)
z<-estvar(h0,y,iter,tolerance,trace,th0=v$pars)
lofa[i,1]<-v$lof-z$lof
tha[i,]<-z$pars
cda[i,1]<-t(v$pars-z$pars)%*%v$info%*%(v$pars-z$pars)
mm2<-m[i,]
mm3<-t(t(m)-mm2)^2/2
h<-sqrt(mm3[,1]+mm3[,2])
loores[i,]<-mm3[,3]^0.25-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h,z$pars)^0.25
}
}
v$loores<-loores
v$tha<-tha
v$lofa<-lofa
v$cda<-cda
v$data<-m
class(v)<-"varobj"
v
}
print.varobj<-function(x,...){print(x$pars); print(x$lof);invisible(x)}
#r[row(r)<col(r)]<-v$res
#r<-r+t(r)
PlotDiag.varobj<-function(v, region = NULL, xyi = 0, zmv = 0) {
palette(c("black","cyan","magenta","green3","yellow","blue","white","red"))
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
XM<-cbind(gamsph1(v$h,v$pars),gamsph2(v$h,v$pars),gamsph3(v$h,v$pars))*(gamsph(v$h,v$pars))^(-0.75)/4
Vare<-v$cg-XM%*%solve(t(XM)%*%solve(v$cg,XM),t(XM))
#sig<-mean(sqrt(diag(Vare)))
e<-v$res
sig<-sqrt(sum(e^2)/(n-3))
gdd<-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25+e*sig/sqrt(diag(Vare))
r1<-v$loores[row(v$loores)<col(v$loores)]
tloores<-t(v$loores)
r2<-tloores[row(tloores)<col(tloores)]
resi<-v$loores-v$loores
resi[row(resi)<col(resi)]<-v$res
resi<-resi+t(resi)
n0<-length(v$lofa)
xn<-v$data[,c(2,1)]
xy<-matrix(0,n,4)
te<-crossprod(matrix(1,1,n0),t(xn[,1]))
xy[,1]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,1]),matrix(1,1,n0))
xy[,2]<-te[row(te)<col(te)]
te<-crossprod(matrix(1,1,n0),t(xn[,2]))
xy[,3]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,2]),matrix(1,1,n0))
xy[,4]<-te[row(te)<col(te)]
if(!missing(xyi)){
ix<-ceiling(sqrt((2*(xyi)+0.25))-0.5)+1
iy<-(xyi)-ceiling(sqrt((2*(xyi)+0.25))-0.5)/2*(ceiling(sqrt((2*(xyi)+0.25))-0.5)-1)
nl<-n
#*(n-1)/2
ind1<-ceiling(sqrt((2*(1:nl)+0.25))-0.5)+1
ind2<-(1:nl)-ceiling(sqrt((2*(1:nl)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:nl)+0.25))-0.5)-1)
}
paro<-par(no.readonly=TRUE)
par(mfrow=c(2,2), mar = c(3,2,2,1)+.1, bg="white")
#EJP: moved (1,1) to be first plotted:
#graphsheet(win.width=0.5,win.height=0.5,win.left=0,win.top=0,Name="1) MAP")
# plot map view as left plot in first row
dg<-1:length(v$lofa)
for(i in 1:length(v$lofa)){
dg[i]<-sum((0.822*(gamsph(v$h,c(v$tha[i,1],v$tha[i,2],v$tha[i,3]))^0.25-gamsph(v$h,v$pars)^0.25))^2)
}
#if(! exists("zmv")) zmv<-0
#if(zmv==0) plot(xn[,2],xn[,1],xlim=c(-0.6,1.1),ylim=c(-0.28, 0.28))
#if(zmv==0) plot(xn[,2],xn[,1],xlim=c(-1.1,1.1),ylim=c(-0.28, 0.28),lwd=3)
if (zmv==0 && !is.null(region))
plot(xn[,2],xn[,1],xlim=c(min(region[,1]), max(region[,1])),
ylim=c(min(region[,2]), max(region[,2])),lwd=3,
asp = 1, xlab = "", ylab = "")
else
plot(xn[,2],xn[,1],xlim=c(min(xn[,2]), max(xn[,2])),
ylim=c(min(xn[,1]), max(xn[,1])),lwd=3,
asp = 1, xlab = "", ylab = "")
#lolo<-lof[1,1]-lofa
z<-xn[,1]
if(zmv>0){
z<-switch(zmv,v$data[,3],v$cda,v$lofa,dg)
inc<-0.25
rmin<-0.03
epsi<-(max(z)-min(z))/(inc/rmin-1)
# symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,xlim=c(-0.63,1.14),ylim=c(-0.3, 0.28))
if(zmv>0 && !is.null(region))
symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,
xlim=c(min(region[,1]), max(region[,1])),
ylim=c(min(region[,2]), max(region[,2])),lwd=3)
else
symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,
xlim=c(min(xn[,2]), max(xn[,2])),
ylim=c(min(xn[,1]), max(xn[,1])),lwd=3)
# symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,lwd=3)
}
if(!is.null(region)) polygon(region[,1],region[,2],density=0,col=2)
title(paste("Map View",switch(zmv+1,'','(y)',"(Cook's Distance)",'(Mahalanobis Distance)',"(Cook's Distance)")))
#gsdmpv<-dev.cur()
if(!missing(xyi)){
segments(xy[xyi,3],xy[xyi,1],xy[xyi,4],xy[xyi,2],pch=16,col=3,lwd=3)
points(xy[xyi,3],xy[xyi,1],pch=16,col=6)
points(xy[xyi,4],xy[xyi,2],pch=16,col=8)
# identify(xn[,2],xn[,1],plot=T,pts=cbind(xn[ix,2],xn[ix,1]))
text(xn[ix,2],xn[ix,1]-(max(z)-min(z))/10,paste(ix))
# identify(xn[,2],xn[,1],plot=T,pts=cbind(xn[iy,2],xn[iy,1]))
text(xn[iy,2],xn[iy,1]-(max(z)-min(z))/10,paste(iy))
}
assign("p11", par(no.readonly=TRUE), envir = .varDiagOptions)
# EJP-end
#graphsheet(win.width=0.8,win.height=1,win.left=0,win.top=0,Name="Interactive Variogram Plot")
#windows(width = 8, height = 5.5,rescale="R")
#windows()
par(mfg=c(2,1))
#graphsheet(win.width=0.5,win.height=0.5,win.left=0,win.top=0.5,Name="3) LOO")
plot(matrix(cbind(v$res,v$res),n*2,1), matrix(cbind(r1,r2),n*2,1),
pch=1, xlab="", ylab="", lwd=1)
lines(c(min(v$res),max(v$res)),c(min(v$res),max(v$res)),col=8)
segments(v$res,r1,v$res,r2)
title("Leave One Out Residuals")
if(!missing(xyi)){
print("xyi")
print(xyi)
points(v$res[xyi],r1[xyi],pch=18,col=3)
points(v$res[xyi],r2[xyi],pch=18,col=5)
points(t(resi[ix,-ix]),t(v$loores[ix,-ix]),pch=16,col=6)
points(t(resi[iy,-iy]),t(v$loores[iy,-iy]),pch=16,col=8)
segments(v$res[xyi],r1[xyi],v$res[xyi],r2[xyi],col=3,lwd=5)
}
assign("p21", par(no.readonly=TRUE), envir = .varDiagOptions)
cv1<-cv
i<-1:n
di<-dim(v$cg)[1]
if(!missing(xyi)){
# di<-dim(v$cg)[1]
# pm<-diag(di)
# pm[xyi,]<-diag(di)[di,]
# pm[di,]<-diag(di)[xyi,]
# cg1<-pm%*%v$cg%*%pm
# i[n]<-xyi
# i[xyi]<-n
# print(max(abs(cv1-cv)))
i<-c(sample(seq(di)[-xyi]),xyi)
cg1<-v$cg[i,i]
infm<-solve(cg1);
cv1<-chol((infm+t(infm))/2);
}
par(mfg=c(2,2))
#graphsheet(win.width=0.5,win.height=0.5,win.left=0.5,win.top=0.5,Name="4) DCR")
x<-((2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25)[i]
y<-v$res[i]
cv1<-cv1/cv1[di,di]
plot(cv1%*%x,cv1%*%y,xlab="",ylab="",lwd=1)
if(!missing(xyi))
points(x[n],y[n],pch=16,col=3)
#sm<-lowess(cv1%*%x,cv1%*%y)
#lines(sm$x,sm$y,lwd=3)
glu<-min(cv1%*%x)
glo<-max(cv1%*%x)
lines(c(glu,glo),c(0,0))
title("Decorrelated Residuals")
assign("p22", par(no.readonly=TRUE), envir = .varDiagOptions)
xv<-seq(0.0001,max(v$h),0.01)
par(mfg=c(1,2))
#graphsheet(win.width=0.5,win.height=0.5,win.left=0.5,win.top=0,Name="2) SVC")
plot(v$h,gdd,xlab="",ylab="",lwd=1)
lines(xv,(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(xv,v$pars)^0.25,lwd=3)
title("Studentized Square Root Cloud")
if(!missing(xyi)){
points(v$h[ind1==ix | ind2 == ix],gdd[ind1==ix | ind2 == ix],pch=16,col=6)
points(v$h[ind1==iy | ind2 == iy],gdd[ind1==iy | ind2 == iy],pch=16,col=8)
points(v$h[xyi],gdd[xyi],pch=16,col=3)
}
assign("p12", par(no.readonly=TRUE), envir = .varDiagOptions)
par(paro)
n
}
CookRLF.varobj<-function(v){
n<-length(v$lofa)
lofa<-matrix(0,n,1)
i1<-matrix(1:n,n,n)
for (k in 1:n){
ii<-(i1[row(i1)<col(i1)]==k)|(t(i1)[row(t(i1))<col(t(i1))]==k)
cgt<-v$cg[!ii,!ii]
rt<-v$y[!ii]^0.25-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h[!ii],v$tha[k,])^0.25
lofa[k]<-rt%*%solve(cgt,rt)
}
dg<-1:length(v$lofa)
for(i in 1:length(v$lofa)){
dg[i]<-sum((0.822*(gamsph(v$h,c(v$tha[i,1],v$tha[i,2],v$tha[i,3]))^0.25-gamsph(v$h,v$pars)^0.25))^2)
}
plot((v$lof[1]-lofa)/v$lof[1]*187/19,dg/(3*v$lof[1])*187,ylab="Cook's Distance",xlab="Reduktion im Lack of Fit")
identify((v$lof[1]-lofa)/v$lof[1]*187/19,dg/(3*v$lof[1])*187)
}
QQVarcloud.varobj<-function(v){
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
plot(qnorm(seq(from=1/(2*n),length=n,by=1/n)),sort(cv%*%v$y),
xlab="quantile of standard normal distribution",
ylab="orderd decorrelated residual")
lines(c(-3,3),c(-3,3),col=8,lwd=3)
apply(t(apply(apply(matrix(rnorm(n*100),ncol=100),2,sort),1,quantile,probs=c(0.05/n,1-0.05/n))),2,lines,x=qnorm(seq(from=1/(2*n),length=n,by=1/n)))
}
QQDecorr.varobj<-function(v){
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
plot(qchisq(seq(from=1/(2*n),length=n,by=1/n),1),sort(v$y/gamsph(v$h,v$pars)),
xlab="quantile of Chi-square distribution",
ylab="ordered value of [Z(s)-Z(s')]^2/(2g(s-s'))")
apply(t(apply(apply(matrix(rchisq(n*100,1),ncol=100),2,sort),1,quantile,probs=c(0.05/n,1-0.05/n))),2,lines,x=qchisq(seq(from=1/(2*n),length=n,by=1/n),1))
lines(c(0,8),c(0,8),col=8,lwd=3)
}
interact.varobj<-function(v,region=NULL,g="s",pchi=0.05,zmv=0){
#Identifikation in studentisierter VC
palette(c("black","cyan","magenta","green3","yellow","blue","white","red"))
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
XM<-cbind(gamsph1(v$h,v$pars),gamsph2(v$h,v$pars),gamsph3(v$h,v$pars))*(gamsph(v$h,v$pars))^(-0.75)/4
Vare<-v$cg-XM%*%solve(t(XM)%*%solve(v$cg,XM),t(XM))
#sig<-mean(sqrt(diag(Vare)))
e<-v$res
sig<-sqrt(sum(e^2)/(n-3))
gdd<-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25+e*sig/sqrt(diag(Vare))
xn<-v$data[,c(2,1)]
r1<-v$loores[row(v$loores)<col(v$loores)]
tloores<-t(v$loores)
r2<-tloores[row(tloores)<col(tloores)]
resi<-v$loores-v$loores
resi[row(resi)<col(resi)]<-v$res
resi<-resi+t(resi)
n0<-length(v$lofa)
xn<-v$data[,c(2,1)]
xy<-matrix(0,n,4)
te<-crossprod(matrix(1,1,n0),t(xn[,1]))
xy[,1]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,1]),matrix(1,1,n0))
xy[,2]<-te[row(te)<col(te)]
te<-crossprod(matrix(1,1,n0),t(xn[,2]))
xy[,3]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,2]),matrix(1,1,n0))
xy[,4]<-te[row(te)<col(te)]
if(g=="l"){
par(mfrow=c(2,2), mfg=c(2,1))
par(get("p21", envir = .varDiagOptions))
par(fig=c(0,0.5,0,0.5))
xyi<-identify(matrix(cbind(v$res,v$res),n*2,1),matrix(cbind(r1,r2),n*2,1),plot=FALSE,n=1)
if(xyi>n) xyi<-xyi-n
}
if(g=="m"){
par(mfrow=c(2,2), mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
ix0<-identify(xn[,2],xn[,1],plot=TRUE,n=1)
points(xn[ix0,2],xn[ix0,1],pch=16,col=6)
ind1<-ceiling(sqrt((2*(1:n)+0.25))-0.5)+1
ind2<-(1:n)-ceiling(sqrt((2*(1:n)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:n)+0.25))-0.5)-1)
par(get("p12", envir = .varDiagOptions))
par(mfrow=c(2,2),mfg=c(1,2),fig=c(0.5,1,0.5,1))
# par(mfg=c(1,2,2,2))
# par(fig=c(0.5,1,0.5,1))
points(v$h[ind1==ix0 | ind2 == ix0],gdd[ind1==ix0 | ind2 == ix0],pch=16,col=6)
par(mfrow=c(2,2), mfg=c(2,1))
#par(p21)
par(get("p21", envir = .varDiagOptions))
par(fig=c(0,0.5,0,0.5))
points(t(resi[ix0,-ix0]),t(v$loores[ix0,-ix0]),pch=16,col=6)
par(mfrow=c(2,2), mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
iy0<-identify(xn[,2],xn[,1],plot=FALSE,n=1)
if(length(iy0)>0){
ix<-max(ix0,iy0)
iy<-min(ix0,iy0)
xyi<-(ix-1)*(ix-2)/2+iy}
else{
xyi<-0
# dev.off()
# PlotDiag.varobj(v,region,zmv=zmv)
# par(mfrow=c(2,2))
# par(mfg=c(1,1,2,2))
# par(p11)
# par(fig=c(0,0.5,0.5,1))
# points(xn[ix0,2],xn[ix0,1],pch=16,col=6)
# identify(xn[,2],xn[,1],plot=T,pts=cbind(xn[ix0,2],xn[ix0,1]))
ind1<-ceiling(sqrt((2*(1:n)+0.25))-0.5)+1
ind2<-(1:n)-ceiling(sqrt((2*(1:n)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:n)+0.25))-0.5)-1)
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[ind1==ix0 | ind2 == ix0],gdd[ind1==ix0 | ind2 == ix0],pch=16,col=6)
par(mfrow=c(2,2), mfg=c(2,1))
#par(p21)
par(get("p21", envir = .varDiagOptions))
par(fig=c(0,0.5,0,0.5))
points(t(resi[ix0,-ix0]),t(v$loores[ix0,-ix0]),pch=16,col=6)
}
}
if(g=="s"){
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
xyi<-identify(v$h,gdd,plot=FALSE,n=1)
}
if(g=="t"){
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
p<-locator(n=500,type="l",col=4)
m<-length(p$x)
lines(p$x[c(m,1)],p$y[c(m,1)],col=2)
i<-t(outer(v$h,p$x,FUN="-"))/(p$x[c(2:m,1)]-p$x)
gt<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))>=gdd&t((i>=0)&(i<=1)),1,"sum")
s<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))<=gdd&t((i>=0)&(i<=1)),1,"sum")
i0<-(s%%2)|(gt%%2)
dev.off()
PlotDiag.varobj(v,region)
par(mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[i0],gdd[i0],pch=16,col=3)
polygon(p,density=0,col=4)
par(mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
segments(xy[i0,3],xy[i0,1],xy[i0,4],xy[i0,2],pch=16,col=3,lwd=3)
xyi<-0
}
if(g=="x"){
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
i0<-(gdd-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25)/sig>qnorm(1-pchi/2)
i0a<-(-gdd+(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25)/sig>qnorm(1-pchi/2)
dev.off()
PlotDiag.varobj(v,region,zmv=zmv)
par(mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[i0],gdd[i0],pch=16,col=3)
points(v$h[i0a],gdd[i0a],pch=16,col=4)
xv<-seq(0.0001,max(v$h),0.01)
# lines(xv,gamsph(xv,v$pars)*qchisq(1-pchi,1),lty=4,lwd=2)
lines(xv,(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(xv,v$pars)^0.25+sig*qnorm(1-pchi/2),lty=4,lwd=2)
lines(xv,(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(xv,v$pars)^0.25-sig*qnorm(1-pchi/2),lty=4,lwd=2)
par(get("p11", envir = .varDiagOptions))
par(mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
print(xy[i0,])
par(fig=c(0,0.5,0.5,1))
segments(xy[i0,3],xy[i0,1],xy[i0,4],xy[i0,2],pch=16,col=3,lwd=2)
segments(xy[i0a,3],xy[i0a,1],xy[i0a,4],xy[i0a,2],pch=16,col=4,lwd=2)
xyi<-0
}
if(g=="n"){
par(mfrow=c(2,2), mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
p<-locator(n=500,type="l",pch=16,col=4)
m<-length(p$x)
lines(p$x[c(m,1)],p$y[c(m,1)],col=2)
i<-t(outer(xn[,2],p$x,FUN="-"))/(p$x[c(2:m,1)]-p$x)
gt<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))>=xn[,1]&t((i>=0)&(i<=1)),1,"sum")
s<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))<=xn[,1]&t((i>=0)&(i<=1)),1,"sum")
i0<-(s%%2)|(gt%%2)
nl<-length(v$h)
ind1<-ceiling(sqrt((2*(1:nl)+0.25))-0.5)+1
ind2<-(1:nl)-ceiling(sqrt((2*(1:nl)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:nl)+0.25))-0.5)-1)
i00<-match(ind1,(1:n0)[i0],nomatch=FALSE)&match(ind2,(1:n0)[i0],nomatch=FALSE)
dev.off()
PlotDiag.varobj(v,region)
par(mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[i00],gdd[i00],pch=16,col=3)
par(mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
polygon(p,density=0,col=4)
segments(xy[i00,3],xy[i00,1],xy[i00,4],xy[i00,2],pch=16,col=3,lwd=3)
xyi = 0
}
#print(xyi)
if(g!="t"&g!="x"&g!="n"& xyi>0){
dev.off()
PlotDiag.varobj(v,region,xyi=xyi,zmv=zmv)
}
xyi}
|
/scratch/gouwar.j/cran-all/cranData/vardiag/R/VARDIAG.R
|
#' Extra variables for domain estimation
#'
#' @description The function computes extra variables for domain estimation. Each unique \code{D} row defines a domain. Extra variables are computed for each \code{Y} variable.
#'
#'
#' @param Y Matrix of study variables. Any object convertible to \code{data.table} with numeric values, \code{NA} values are not allowed. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param D Matrix of domain variables. Any object convertible to \code{data.table}. The number of rows of \code{D} must match the number of rows of \code{Y}. Duplicated names are not allowed. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @return Numeric \code{data.table} containing extra variables for domain estimation.
#'
#' @references
#' Carl-Erik Sarndal, Bengt Swensson, Jan Wretman. Model Assisted Survey Sampling. Springer-Verlag, 1992, p.70.
#'
#' @seealso \code{\link{vardom}}, \code{\link{vardomh}}
#'
#' @keywords surveysampling
#' @examples
#'
#' ### Example 0
#'
#' domain(Y = 1, D = "A")
#'
#'
#' ### Example 1
#'
#' Y1 <- as.matrix(1 : 10)
#' colnames(Y1) <- "Y1"
#' D1 <- as.matrix(rep(1, 10))
#' colnames(D1) <- "D1"
#' domain(Y = Y1, D = D1)
#'
#' ### Example 2
#' Y <- matrix(1 : 20, 10, 2)
#' colnames(Y) <- paste0("Y", 1 : 2)
#' D <- matrix(rep(1 : 2, each = 5), 10, 1)
#' colnames(D) <- "D"
#' domain(Y, D)
#'
#' ### Example 3
#' Y <- matrix(1 : 20, 10, 2)
#' colnames(Y) <- paste0("Y", 1 : 2)
#' D <- matrix(rep(1 : 4, each = 5), 10, 2)
#' colnames(D) <- paste0("D", 1 : 2)
#' domain(Y, D)
#'
#' ### Example 4
#' Y <- matrix(1 : 20, 10, 2)
#' colnames(Y) <- paste0("Y", 1 : 2)
#' D <- matrix(c(rep(1 : 2, each = 5), rep(3, 10)), 10, 2)
#' colnames(D) <- paste0("D", 1 : 2)
#' domain(Y, D)
#'
#'
#' @import data.table
#' @import foreach
#' @export domain
domain <- function(Y, D, dataset = NULL, checking = TRUE) {
if (checking) {
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
check.names = TRUE, isnumeric = TRUE, grepls = "__") }
Ynrow <- nrow(Y)
if (checking) {
D <- check_var(vars = D, varn = "Dom", dataset = dataset,
check.names = TRUE, Ynrow = Ynrow, isnumeric = FALSE,
ischaracter = TRUE, dif_name = "percoun", grepls = "__") }
Dom_agg <- unique(D)
setkeyv(Dom_agg, names(Dom_agg))
i <- k <- NULL
domen <- foreach(i = 1 : ncol(Y), .combine = data.table) %:%
foreach(k = 1:nrow(Dom_agg), .combine = data.table) %do%
ifelse(rowSums(D == Dom_agg[k, ][rep(1, Ynrow), ]) == ncol(D), Y[[i]], 0)
domen <- data.table(domen, check.names = TRUE)
setnames(domen, names(domen), namesD(Y = Y, D = D, uniqueD = TRUE))
domen <- data.table(domen, check.names = TRUE)
return(domen[])
}
namesD <- function(Y, D, uniqueD = TRUE) {
if (uniqueD) {Dom_agg <- unique(D)
} else Dom_agg <- D
setkeyv(Dom_agg, names(Dom_agg))
h <- vector(mode = "character", length = nrow(Dom_agg))
for (i in 1:nrow(Dom_agg)) {
cc <- paste(names(D), Dom_agg[i, ], sep = ".")
h[i] <- paste(cc, collapse = "__")
}
foreach(i = 1 : ncol(Y), .combine = c) %do% paste(names(Y)[i], h, sep="__")
}
check_var <- function(vars, varn, varntype = NULL, dataset,
check.names = FALSE, ncols = 0, Yncol = 0,
Ynrow = 0, Xnrow = 0, isnumeric = FALSE,
ischaracter = FALSE, mustbedefined = TRUE,
isvector = FALSE, grepls = NULL, dif_name = "",
namesID1 = "namesid1", duplicatednames = FALSE,
withperiod = TRUE, varnout = NULL, varname = NULL,
PSUs = NULL, country = NULL, countryX = NULL,
years = NULL, Domen = NULL, yearsX = NULL,
periods = NULL, periodsX = NULL, ID_level1 = NULL,
dX = "", use.gender = FALSE, kern_method = "gaussian"){
N <- NULL
if (varn %in% c("g", "q") & (is.null(class(vars)) | any(class(vars) == "function"))) stop("'g' must be numeric", call. = FALSE)
if (is.null(vars)) {
if (Xnrow > 0 & varn %in% c("q", "ind_gr")) { vars <- rep(1, Xnrow)
dataset <- NULL}
if (Ynrow > 0 & varn == "id") { vars <- 1:Ynrow
dataset <- NULL}}
if (!is.null(varntype)) {
if (varntype == "kern_method") if (length(vars) != 1 | !any(vars %in% c("gaussian", "smooth_splines"))) stop("'kern_method' must be gaussian or smooth_splines", call. = FALSE)
if (varntype == "pinteger" & (varn %in% c("boots_count", "percentratio") | (varn %in% c("r", "h_breaks") & kern_method == "smooth_splines"))) {
if (length(vars) != 1 | any(!is.numeric(vars) | vars < 1)) stop(paste0("'", varn, "' must be a positive integer"), call. = FALSE)
if (all(is.numeric(vars))) if (vars %% 1 != 0) stop(paste0("'", varn, "' must be a positive integer"), call. = FALSE)
}
if (varntype == "logical") if (length(vars) != 1 | !any(is.logical(vars))) stop(paste0("'", varn, "' must be logical"), call. = FALSE)
if (kern_method == "smooth_splines") {
if (varntype == "numeric01") if (length(vars) != 1 | any(!is.numeric(vars) | vars < 0 | vars > 1)) {
stop(paste0("'", varn, "' must be a numeric value in [0, 1]"), call. = FALSE) }
} else if (kern_method == "gaussian" & any(varn %in% c("ro", "r"))) vars <- NULL
if (varntype == "integer0100") {
if (length(vars) != 1 | any(!is.integer(vars) | vars < 0 | vars > 100)) {
stop(paste0("'", varn, "' must be a integer value in [0, 100]"), call. = FALSE) }}
if (varntype == "numeric0100") {
if ((length(vars) != 1 & varn != "k") | any(!is.numeric(vars) | vars < 0 | vars > 100)) {
stop(paste0("'", varn, "' must be a numeric value", ifelse(varn != "k", "s", ""), " in [0, 100]"), call. = FALSE) }}
if (varntype == "change_type") if (length(vars) != 1 | any(!(vars %in% c("absolute", "relative")))) {
stop("'change_type' must be 'absolute' or 'relative'", call. = FALSE) }
if (varntype == "method") if (length(vars) != 1 | any(!(vars %in% c("cros", "netchanges")))) {
stop("'method' must be 'cros' or 'netchanges'", call. = FALSE) }
}
if ((!is.null(vars) & is.null(varntype)) | any(varn %in% c("alpha", "percentratio", "percentage",
"order_quant", "kern_method", "h_breaks",
"boots_count", "dh", "method", "use.estVar",
"use.gender", "confidence", "frate",
"change_type", "linratio", "outp_lin",
"outp_res", "netchanges", "withperiod",
"ID_level1_max", "fh_zero", "PSU_level"))
| (any(varn %in% c("ro", "r")) & (kern_method == "gaussian" | !is.null(vars)))) mustbedefined <- FALSE
if (!is.null(vars) & is.null(varntype)) {
if (!withperiod & varn == "period") stop(paste0("'period' must be NULL for those data"), call. = FALSE)
if(!is.null(dataset)) {
dataset <- data.table(dataset)
if (min(vars %in% names(dataset)) != 1) stop(paste0("'", varn, "' does not exist in 'dataset", dX, "'!"), call. = FALSE)
if (min(vars %in% names(dataset)) == 1) vars <- dataset[, vars, with = FALSE]}
vars <- data.table(vars, check.names = check.names)
mkvars <- make.names(rep("vars", length(vars)), unique = TRUE)
mkvarn <- make.names(rep(varn, length(vars)), unique = TRUE)
if (all(names(vars) == mkvars)) setnames(vars, mkvars, mkvarn)
if (ischaracter) vars[, (names(vars)) := lapply(.SD, as.character)]
if (anyNA(vars)) stop(paste0("'", varn, "' has missing values"), call. = FALSE)
if (Ynrow > 0) if (nrow(vars) != Ynrow) stop(paste0("'", varn, "' length must be equal with 'Y' row count"), call. = FALSE)
if (Xnrow > 0) if (nrow(vars) != Xnrow) stop(paste0("'", varn, "' length must be equal with 'X' row count"), call. = FALSE)
if (Yncol > 0) if (ncol(vars) != Yncol) stop(paste0("'", varn, "' length must be equal with 'Y' column count"), call. = FALSE)
if (ncols > 0) if (ncol(vars) != ncols) stop(paste0("'", varn, "' must be ", ncols, " column data.frame, matrix, data.table"), call. = FALSE)
if (isnumeric) if(!all(sapply(vars, is.numeric))) stop(paste0("'", varn, "' must be numeric"), call. = FALSE)
if (!is.null(grepls)) if (any(grepl(grepls, names(vars)))) stop(paste0("'", varn, "' is not allowed column names with '", grepls, "'"), call. = FALSE)
if (any(names(vars) %in% dif_name)) stop(paste0("'", varn, "' must be different name"), call. = FALSE)
if (any(names(vars) == namesID1)) setnames(vars, names(vars), paste0(names(vars), "_", varn))
if (use.gender & varn %in% c("years", "yearsX")){
parb <- unique(substring(vars[[1]], nchar(vars[[1]])-1, nchar(vars[[1]])))
if (!all(parb %in% c("_1", "_2")) | length(parb) != 2) {
stop(paste0("'", varn, "' must be ended with '_1' and '_2'"), call. = FALSE) }}
if (duplicatednames == TRUE & !is.null(vars)) {
if (any(duplicated(names(vars))))
stop(paste0("'", varn, "' are duplicate column names: "),
paste(names(vars)[duplicated(names(vars))], collapse = ","), call. = FALSE) }
if (ncols == 1 & isvector) vars <- vars[[names(vars)]]
if (any(vars == 0) & varn == "g") stop("'g' value can not be 0", call. = FALSE)
if (varn == "q") if (any(is.infinite(vars))) stop("'q' value can not be infinite", call. = FALSE)
varns <- c(switch(as.integer(!is.null(country)) + 1, NULL, "country"),
switch(as.integer(!is.null(years)) + 1, NULL, "years"),
switch(as.integer(!is.null(periods) & varn != "yearX") + 1, NULL,
paste0(ifelse(varn %in% c("ID_level2", "subperiodsX", "X_ID_level1"), "sub", ""), "periods")))
if (varn %in% c("id", "ID_level2")) {
dd <- vars
if (!is.null(years)) dd <- data.table(years, dd)
if (!is.null(periods)) dd <- data.table(periods, dd)
if (!is.null(country)) dd <- data.table(country, dd)
dd <- nrow(dd[, .N, by = names(dd)][N > 1]) > 0
if (dd) stop(paste0("'", varn, "' by ", paste(varns, collapse = ", "), " are duplicate values"), call. = FALSE)
}
if (varn %in% c("year1", "year2")) {
setnames(vars, names(vars), names(years))
if (anyNA(merge(vars, years, all.x = TRUE,
by = names(periods), allow.cartesian = TRUE)))
stop(paste0("'", varn, "' row must be exist in 'years'"), call. = FALSE)}
if (varn %in% c("period1", "period2")) {
setnames(vars, names(vars), names(periods))
if (anyNA(merge(vars, periods, all.x = TRUE,
by = names(periods), allow.cartesian = TRUE)))
stop(paste0("'", varn, "' row must be exist in 'period'"), call. = FALSE)}
if (varn == "PSU_sort") {
psuag <- data.table(vars, PSUs)
if (!is.null(periods)) psuag <- data.table(periods, psuag)
psuag <- psuag[, .N, by = names(psuag)][, N := NULL]
psuag <- rbindlist(list(psuag[, .N, by = c(names(periods), names(PSUs))],
psuag[, .N, by = c(names(periods), "vars")]),
use.names = TRUE, fill = TRUE)
if (nrow(psuag[N > 1]) > 0) stop("'PSU_sort' must be equal for each 'PSU'", call. = FALSE)}
if (varn == "gender") {
if (length(unique(vars)) != 2) stop("'gender' must be exactly two values", call. = FALSE)
if (!all(vars %in% 1:2)) stop("'gender' must be value 1 for male, 2 for females", call. = FALSE) }
if (varn %in% c("countryX", "periodX", "yearsX", "subperiodsX", "X_ID_level1")) {
if (names(vars) != varname) stop(paste0("'", varn, "' must be equal with '", varnout,"' names"), call. = FALSE)
ncolvars <- ifelse(is.null(vars), 0, ncol(vars))
if (ncolvars != length(varname)) stop(paste0("'", varn, "' length must be equal with '",varnout,"' row count"), call. = FALSE)
}
if (varn == "subperiods") {
subn <- data.table(Domen, years, vars)
subn <- subn[, .N, by = names(subn)]
griez <- c(names(years), names(Domen))
subn <- subn[, .N, by = griez][["N"]]
griez <- NULL
if (!is.null(Domen)) griez <- "'Dom', "
if (any(max(subn) != subn)) stop(paste0(griez, "'years', 'subperiods' must be ", max(subn)), call. = FALSE)
}
if (varn == "countryX") {
varsX <- vars[, .N, keyby = names(vars)][, N := NULL]
country <- country[, .N, keyby = names(country)][, N := NULL]
if (!identical(country, varsX)) stop("'unique(country)' and 'unique(countryX)' records have different", call. = FALSE)
}
if (varn %in% c("periodX", "yearsX", "subperiodsX", "X_ID_level1")) {
periX <- copy(vars)
if (!is.null(periodsX)) periX <- data.table(periodsX, periX)
if (!is.null(yearsX)) periX <- data.table(yearsX, periX)
if (!is.null(countryX)) periX <- data.table(countryX, periX)
peri <- NULL
if (!is.null(ID_level1)) peri <- ID_level1
if (!is.null(periods)) peri <- switch(as.integer(!is.null(peri)) + 1, data.table(periods), data.table(periods, peri))
if (!is.null(years)) peri <- switch(as.integer(!is.null(peri)) + 1, data.table(years), data.table(years, peri))
if (!is.null(country)) peri <- switch(as.integer(!is.null(peri)) + 1, data.table(country), data.table(country, peri))
peri <- peri[, .N, keyby = names(peri)][, N := NULL]
periX <- periX[, .N, keyby = names(periX)]
varnsX <- paste0(varns, "X")
if (varn == "X_ID_level1") {
varns <- c(varns, "ID_level1")
varnsX <- c(varnsX, "X_ID_level1")
if (nrow(periX[N > 1]) > 0) stop("'X_ID_level1' have duplicates", call. = FALSE) }
periX[, N := NULL]
if (!identical(peri, periX)) {
stop(paste0("'unique(", paste(varns, collapse = ", "), ")' and 'unique(",
paste(varnsX, collapse = ", "), ")' records have different"), call. = FALSE)
}
peri <- periX <- NULL
}
} else if (mustbedefined) stop(paste0("'", varn, "' must be defined!"), call. = FALSE)
if (is.data.table(vars)) vars <- vars[]
return(vars)
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/domain.R
|
#' Estimation of weighted percentiles
#'
#' @description The function computes the estimates of weighted percentiles.
#'
#' @param Y Study variable (for example equalized disposable income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weights Optional weight variable. One dimensional object convert to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, the estimates of percentiles are computed for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param period Optional variable for survey period. If supplied, linearization of at-risk-of-poverty threshold is done for each survey period. Object convertible to \code{data.table} or variable names as character, column numbers as numeric vector.
#' @param k A vector of values between 0 and 100 specifying the percentiles to be computed (0 gives the minimum, 100 gives the maximum).
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @return A data.table containing the estimates of weighted income percentiles specified by \code{k}.
#'
#' @references
#' Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat.
#'
#' @seealso \code{\link{linarpt}}, \code{\link{linarpr}}, \code{\link{linqsr}}
#' @keywords Linearization
#'
#' @examples
#' library("laeken")
#' data("eusilc")
#' incPercentile(Y = "eqIncome", weights = "rb050", Dom = "db040", dataset = eusilc)
#'
#' @import data.table
#' @import utils
#' @import laeken
#' @export incPercentile
incPercentile <- function(Y, weights = NULL, sort = NULL,
Dom = NULL, period = NULL,
k = c(20, 80), dataset = NULL,
checking = TRUE) {
## initializations
if (checking) {
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
weights <- check_var(vars = weights, varn = "weights",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
}
dataset <- NULL
namesDom <- names(Dom)
if (!is.null(period)) {
if (!is.null(Dom)) { Dom <- data.table(period, Dom)
} else Dom <- period }
# Percentiles by domain (if requested)
N <- NULL
if(!is.null(Dom)) {
Dom_app <- do.call("paste", c(as.list(Dom), sep = "__"))
q1 <- lapply(split(Dom[, .I], Dom_app), function(i) {
Yind <- Y[i]
weightsind <- weights[i]
sortind <- sort[i]
order <- if(is.null(sortind)) order(Yind) else order(Yind, sortind)
Yind <- Yind[order]
weightsind <- weightsind[order] # also works if 'weights' is NULL
percentile <- weightedQuantile(Yind, weightsind, probs = k / 100,
sorted = FALSE, na.rm = FALSE)
q <- data.table(Dom[i][1], t(percentile))})
q <- rbindlist(q1)
setnames(q, names(q)[ncol(Dom) + 1 : length(k)], paste0("x", k))
if (!is.null(period) & !is.null(namesDom)) {
q1 <- q[, .N, keyby = namesDom][, N := NULL]
q2 <- q[, .N, by = names(period)][, N := NULL]
qrs <- rbindlist(lapply(1:nrow(q2), function(i) {
data.table(q2[i], q1) }))
qrs[, (c(paste0("x", k))) := 0]
qrs <- rbind(q, qrs)
q <- qrs[, lapply(.SD, sum), keyby = names(Dom), .SDcols = paste0("x", k)]
}
setkeyv(q, names(Dom))
} else { order <- if(is.null(sort)) order(Y) else order(Y, sort)
Y <- Y[order]
weights <- weights[order] # also works if 'weights' is NULL
percentile <- weightedQuantile(Y, weights, probs = k / 100,
sorted = TRUE, na.rm = FALSE)
q <- data.table(t(percentile))
setnames(q, names(q)[1 : length(k)], paste0("x", k))
}
## return results
return(q[])
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/incPercentile.R
|
#' Linearization of the ratio estimator
#'
#' @description Computes linearized variable for the ratio estimator.
#'
#'
#' @param Y Matrix of numerator variables. Any object convertible to \code{data.table} with numeric values, \code{NA} values are not allowed.
#' @param Z Matrix of denominator variables. Any object convertible to \code{data.table} with numeric values, \code{NA} values are not allowed.
#' @param weight Weight variable. One dimensional object convertible to one-column \code{data.table}.
#' @param Dom Optional variables used to define population domains. If supplied, the linearized variables are computed for each domain. An object convertible to \code{data.table}.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param percentratio Positive integer value. All linearized variables are multiplied with \code{percentratio} value, by default - 1.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @return The function returns the \code{data.table} of the linearized variables for the ratio estimator.
#'
#' @references
#' Carl-Erik Sarndal, Bengt Swensson, Jan Wretman. Model Assisted Survey Sampling. Springer-Verlag, 1992, p.178.
#'
#' @keywords survey
#'
#' @examples
#' library("data.table")
#' Y <- data.table(Y = rchisq(10, 3))
#' Z <- data.table(Z = rchisq(10, 3))
#' weights <- rep(2, 10)
#' data.table(Y, Z, weights,
#' V1 = lin.ratio(Y, Z, weights, percentratio = 1),
#' V10 = lin.ratio(Y, Z, weights, percentratio = 10),
#' V100 = lin.ratio(Y, Z, weights, percentratio = 100))
#'
#' @seealso \code{\link{domain}},
#' \code{\link{vardom}},
#' \code{\link{vardomh}},
#' \code{\link{vardcros}},
#' \code{\link{vardchanges}},
#' \code{\link{vardannual}}
#'
#' @import data.table
#' @export lin.ratio
lin.ratio <- function(Y, Z, weight, Dom = NULL, dataset = NULL, percentratio = 1, checking = TRUE) {
if (checking) {
percentratio <- check_var(vars = percentratio, varn = "percentratio", varntype = "pinteger")
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
check.names = TRUE, isnumeric = TRUE, grepls = "__")
Ynrow <- nrow(Y)
Yncol <- ncol(Y)
Z <- check_var(vars = Z, varn = "Z", dataset = dataset,
check.names = TRUE, Yncol = Yncol, Ynrow = Ynrow,
isnumeric = TRUE, mustbedefined = FALSE)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE)
}
if (!is.null(Dom)) Yd <- domain(Y, Dom) else Yd <- Y
if (!is.null(Dom)) Zd <- domain(Z, Dom) else Zd <- Z
Y_est <- colSums(Yd * weight)
Z_est <- colSums(Zd * weight)
R_est <- Y_est / Z_est
percentratio <- as.integer(percentratio)
U <- percentratio * t((1 / Z_est) * t(Yd - t(R_est * t(Zd))))
return(data.table(U))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/lin.ratio.R
|
#' Linearization of at-risk-of-poverty rate
#'
#' @description Estimates the at-risk-of-poverty rate (defined as the proportion of persons with equalized disposable income below at-risk-of-poverty threshold) and computes linearized variable for variance estimation.
#'
#' @param Y Study variable (for example equalized disposable income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number).
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number or logical vector).
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number or logical vector).
#' @param Y_thres Variable (for example equalized disposable income) used for computation and linearization of poverty threshold. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. Variable specified for \code{inc} is used as \code{income_thres} if \code{income_thres} is not defined.
#' @param wght_thres Weight variable used for computation and linearization of poverty threshold. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number or logical vector. Variable specified for \code{weight} is used as \code{wght_thres} if \code{wght_thres} is not defined.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, linearization of at-risk-of-poverty threshold is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers as numeric vector.
#' @param period Optional variable for survey period. If supplied, linearization of at-risk-of-poverty threshold is done for each survey period. Object convertible to \code{data.table} or variable names as character, column numbers as numeric vector.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param percentage A numeric value in range \eqn{\left[ 0,100 \right]}{[0,100]} for \eqn{p} in the formula for at-risk-of-poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#' For example, to compute at-risk-of-poverty threshold equal to 60\% of some income quantile, \eqn{p} #'should be set equal to 60.
#' @param order_quant A numeric value in range \eqn{\left[ 0,100 \right]}{[0,100]} for \eqn{\alpha} in the formula #'for at-risk-of-poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#' For example, to compute at-risk-of-poverty threshold equal to some percentage of median income, \eqn{\alpha} should be set equal to 50.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @details The implementation strictly follows the Eurostat definition.
#'
#' @return A list with four objects are returned:
#' \itemize{
#' \item \code{quantile} - a \code{data.table} containing the estimated value of the quantile used for at-risk-of-poverty threshold estimation.
#' \item \code{threshold} - a \code{data.table} containing the estimated at-risk-of-poverty threshold.
#' \item \code{value} - a \code{data.table} containing the estimated at-risk-of-poverty rate (in percentage).
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the at-risk-of-poverty rate (in percentage).
#' }
#'
#' @references
#' Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat. \cr
#' Guillaume Osier (2009). Variance estimation for complex indicators of poverty and inequality. \emph{Journal of the European Survey Research Association}, Vol.3, No.3, pp. 167-195, ISSN 1864-3361, URL \url{https://ojs.ub.uni-konstanz.de/srm/article/view/369}. \cr
#' Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#'
#'
#' @seealso \code{\link{linarpt}},
#' \code{\link{varpoord}},
#' \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#'
#' @keywords Linearization
#'
#' @examples
#' library("data.table")
#' library("laeken")
#' data("eusilc")
#' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc)
#'
#' # Full population
#' d <- linarpr(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = NULL,
#' dataset = dataset1, percentage = 60,
#' order_quant = 50L)
#' d$value
#'
#' \dontrun{
#' # By domains
#' dd <- linarpr(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = "db040",
#' dataset = dataset1, percentage = 60,
#' order_quant = 50L)
#' dd}
#'
#' @import data.table
#' @import laeken
#'
#' @export linarpr
linarpr <- function(Y, id = NULL, weight = NULL, Y_thres = NULL,
wght_thres = NULL, sort = NULL, Dom = NULL,
period = NULL, dataset = NULL, percentage = 60,
order_quant = 50, var_name = "lin_arpr",
checking = TRUE) {
## initializations
if (min(dim(data.table(var_name)) == 1) != 1) {
stop("'var_name' must have defined one name of the linearized variable")}
if (checking) {
percentage <- check_var(vars = percentage, varn = "percentage",
varntype = "numeric0100")
order_quant <- check_var(vars = order_quant, varn = "order_quant",
varntype = "numeric0100")
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
Y_thres <- check_var(vars = Y_thres, varn = "Y_thres",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
wght_thres <- check_var(vars = wght_thres, varn = "wght_thres",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
dataset <- NULL
if (is.null(Y_thres)) Y_thres <- Y
if (is.null(wght_thres)) wght_thres <- weight
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind = ind0)
period1_agg <- data.table(unique(period1))
# ARPR by domain (if requested)
quantile <- incPercentile(Y = Y_thres,
weights = wght_thres,
sort = sort, Dom = NULL,
period = period,
k = order_quant,
dataset = NULL,
checking = FALSE)
setnames(quantile, names(quantile)[ncol(quantile)], "quantile")
if (ncol(quantile) > 1) setkeyv(quantile, head(names(quantile), -1))
threshold <- copy(quantile)
threshold[, threshold := percentage / 100 * quantile]
threshold[, quantile := NULL]
arpr_id <- id
if (!is.null(period)) arpr_id <- data.table(arpr_id, period)
if (!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom_agg))
arpr_v <- c()
arpr_m <- copy(arpr_id)
for(i in 1 : nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
ind <- as.integer(rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
arprl <- lapply(1 : nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- cbind(period_agg[j], Dom_agg[i])
setkeyv(rown, names(rown))
rown2 <- copy(rown)
rown <- merge(rown, quantile, all.x = TRUE)
} else {rown <- quantile
rown2 <- Dom_agg[i] }
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
arpr_l <- arprlinCalc(Y1 = Y[indj],
ids = arpr_id[indj],
wght1 = weight[indj],
indicator = ind[indj],
Y_thresh = Y_thres[indj],
wght_thresh = wght_thres[indj],
percent = percentage,
order_quants = order_quant,
quant_val = rown[["quantile"]])
list(arpr = data.table(rown2, arpr = arpr_l$rate_val_pr), lin = arpr_l$lin)
})
arprs <- rbindlist(lapply(arprl, function(x) x[[1]]))
arprlin <- rbindlist(lapply(arprl, function(x) x[[2]]))
setnames(arprlin, names(arprlin), c(names(arpr_id), var_nams))
arpr_m <- merge(arpr_m, arprlin, all.x = TRUE, by = names(arpr_id))
arpr_v <- rbind(arpr_v, arprs)
}
} else { arprl <- lapply(1:nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- period_agg[j]
rown <- merge(rown, quantile, all.x = TRUE,
by = names(rown))
} else rown <- quantile
ind2 <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
arpr_l <- arprlinCalc(Y1 = Y[ind2],
ids = arpr_id[ind2],
wght1 = weight[ind2],
indicator = ind0[ind2],
Y_thresh = Y_thres[ind2],
wght_thresh = wght_thres[ind2],
percent = percentage,
order_quants = order_quant,
quant_val = rown[["quantile"]])
if (!is.null(period)) {
arprs <- data.table(period_agg[j], arpr = arpr_l$rate_val_pr)
} else arprs <- data.table(arpr = arpr_l$rate_val_pr)
list(arpr = arprs, lin = arpr_l$lin)
})
arpr_v <- rbindlist(lapply(arprl, function(x) x[[1]]))
arpr_m <- rbindlist(lapply(arprl, function(x) x[[2]]))
setnames(arpr_m, names(arpr_m), c(names(arpr_id), var_name))
}
arpr_m[is.na(arpr_m)] <- 0
setkeyv(arpr_m, names(arpr_id))
return(list(quantile = quantile, threshold = threshold, value = arpr_v, lin = arpr_m))
}
## workhorse
arprlinCalc <- function(Y1, ids, wght1, indicator,
Y_thresh, wght_thresh, percent,
order_quants = NULL, quant_val) {
N <- dat <- eqIncome1 <- NULL
#---- 1. Linearization of the poverty threshold ----
arpt_calcs <- arptlinCalc(inco = Y_thresh, ids = ids,
wght = wght_thresh,
indicator = rep(1, length(ids)),
order_quants = order_quants,
quant_val = quant_val,
percentag = percent)
lin_thres <- arpt_calcs[[names(arpt_calcs)[2]]]
thres_val <- percent / 100 * quant_val
wt <- indicator * wght1
N <- sum(wt) # Estimated (sub)population size
poor <- (Y1 <= thres_val)
rate_val <- sum(wt * poor) / N # Estimated poverty rate */
rate_val_pr <- 100 * rate_val
#---- 2. Linearization of the poverty rate -----
h <- bandwith_plug(y = Y1, w = wt)
f_quant2 <- gaussian_kern(inco = Y1, wt = wt,
quant_val = quant_val, hh = h)
#****************************************************************************************
# LINEARIZED VARIABLE OF THE POVERTY RATE (IN %) *
#****************************************************************************************
lin <- 100 * ((1 / N) * indicator * ((Y1 <= thres_val) - rate_val) + f_quant2 * lin_thres)
lin_id <- data.table(ids, lin)
return(list(rate_val = rate_val, rate_val_pr = rate_val_pr, lin = lin_id))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/linarpr.R
|
#' Linearization of at-risk-of-poverty threshold
#'
#' @description Estimates the at-risk-of-poverty threshold (defined as percentage (usually 60\%) of equalised disposable income after social transfers quantile (usually median)) and computes linearized variable for variance estimation.
#'
#' @param Y Study variable (for example equalised disposable income after social transfers). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, linearization of at-risk-of-poverty threshold is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers as numeric vector.
#' @param period Optional variable for survey period. If supplied, linearization of at-risk-of-poverty threshold is done for each survey period. Object convertible to \code{data.table} or variable names as character, column numbers as numeric vector.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param percentage A numeric value in range \eqn{\left[ 0,100 \right]}{[0,100]} for \eqn{p} in the formula for at-risk-of-poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#' For example, to compute poverty threshold equal to 60\% of some income quantile, \eqn{p} should be set equal to 60.
#' @param order_quant A numeric value in range \eqn{\left[ 0,100 \right]}{[0,100]} for \eqn{\alpha} in the formula for at-risk-of-poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#' For example, to compute poverty threshold equal to some percentage of median income, \eqn{\alpha} should be set equal to 50.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#'
#' @details The implementation strictly follows the Eurostat definition.
#'
#' @return A list with three objects are returned:
#' \itemize{
#' \item \code{quantile} - a \code{data.table} containing the estimated value of the quantile used for at-risk-of-poverty threshold estimation.
#' \item \code{value} - a \code{data.table} containing the estimated at-risk-of-poverty threshold (in percentage).
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the at-risk-of-poverty threshold (in percentage).
#' }
#'
#'
#' @references
#' Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat. \cr
#' Guillaume Osier (2009). Variance estimation for complex indicators of poverty and inequality. \emph{Journal of the European Survey Research Association}, Vol.3, No.3, pp. 167-195, ISSN 1864-3361, URL \url{https://ojs.ub.uni-konstanz.de/srm/article/view/369}. \cr
#' Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#'
#' @seealso \code{\link{linarpr}}, \code{\link{incPercentile}},
#' \code{\link{varpoord}} , \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#'
#' @keywords Linearization
#'
#' @examples
#' library("data.table")
#' library("laeken")
#' data("eusilc")
#' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc)
#'
#' # Full population
#' d1 <- linarpt(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = NULL,
#' dataset = dataset1, percentage = 60,
#' order_quant = 50L)
#' d1$value
#'
#' \dontrun{
#' # By domains
#' d2 <- linarpt(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = "db040",
#' dataset = dataset1, percentage = 60,
#' order_quant = 50L)
#' d2$value}
#'
#' @import data.table
#' @import laeken
#'
#' @export linarpt
linarpt <- function(Y, id = NULL, weight = NULL, sort = NULL,
Dom = NULL, period=NULL, dataset = NULL,
percentage = 60, order_quant = 50,
var_name = "lin_arpt", checking = TRUE) {
## initializations
if (min(dim(as.data.frame(var_name)) == 1) != 1) {
stop("'var_name' must have defined name of the linearized variable")}
if (checking) {
percentag <- check_var(vars = percentage, varn = "percentage",
varntype = "numeric0100")
order_quant <- check_var(vars = order_quant, varn = "order_quant",
varntype = "numeric0100")
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
dataset <- NULL
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind = ind0)
period1_agg <- data.table(unique(period1))
# ARPT by domain (if requested)
quantile <- incPercentile(Y = Y,
weights = weight,
sort = sort,
Dom = Dom,
period = period,
k = order_quant,
dataset = NULL,
checking = FALSE)
quantile <- data.table(quantile)
setnames(quantile, names(quantile)[ncol(quantile)], "quantile")
if (ncol(quantile) > 1) setkeyv(quantile, head(names(quantile), -1))
threshold <- copy(quantile)
threshold[, threshold := percentage / 100 * quantile]
threshold[, quantile := NULL]
arpt_id <- id
if (!is.null(period)) arpt_id <- data.table(arpt_id, period)
if(!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom_agg))
arpt_m <- copy(arpt_id)
for(i in 1 : nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
ind <- as.integer(rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
arpt_l <- lapply(1:nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- cbind(period_agg[j], Dom_agg[i])
} else rown <- Dom_agg[i]
setkeyv(rown, names(rown))
rown2 <- copy(rown)
rown <- merge(rown, quantile, all.x = TRUE)
ind2 <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
arptl <- arptlinCalc(inco = Y[ind2],
ids = arpt_id[ind2],
wght = weight[ind2],
indicator = ind[ind2],
order_quants = order_quant,
quant_val = rown[["quantile"]],
percentag = percentage)
})
arptl <- rbindlist(arpt_l)
setnames(arptl, names(arptl), c(names(arpt_id), var_nams))
arpt_m <- merge(arpt_m, arptl, all.x = TRUE, by = names(arpt_id))
}
} else { arptl <- lapply(1 : nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- period_agg[j]
setkeyv(rown, names(rown))
rown <- merge(rown, quantile, all.x = TRUE)
} else rown <- quantile
ind2 <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
arptl <- arptlinCalc(inco = Y[ind2],
ids = arpt_id[ind2],
wght = weight[ind2],
indicator = ind0[ind2],
order_quants = order_quant,
quant_val = rown[["quantile"]],
percentag = percentage)
})
arpt_m <- rbindlist(arptl)
setnames(arpt_m, names(arpt_m), c(names(arpt_id), var_name))
}
arpt_m[is.na(arpt_m)] <- 0
setkeyv(arpt_m, names(arpt_id))
return(list(quantile = quantile, value = threshold, lin = arpt_m))
}
## workhorse
arptlinCalc <- function(inco, ids, wght, indicator,
order_quants, quant_val, percentag) {
wt <- wght * indicator
N <- sum(wt); # Estimated (sub)population size
h <- bandwith_plug(y = inco, w = wt)
f_quant <- gaussian_kern(inco = inco, wt = wt,
quant_val = quant_val, hh = h)
#****************************************************************************************
#* LINEARIZED VARIABLE OF THE POVERTY THRESHOLD *
#****************************************************************************************
lin <- - (percentag / 100) * (1 / N) * indicator * ((inco <= quant_val) - order_quants / 100) / f_quant
lin_id <- data.table(ids, lin)
return(lin_id)
}
bandwith_plug <- function(y, w) {
N <- sum(w)
# h=S/N^(1/5)
1 / N * sqrt(N * sum(w * y ^ 2) - (sum(y * w)) ^ 2) * N ^ (-0.2)
}
gaussian_kern <- function(inco, wt, quant_val, hh){
N <- sum(wt); # Estimated (sub)population size
u <- (quant_val - inco) / hh
vect_f <- exp(-(u^2) / 2) / sqrt(2 * pi)
f_quant <- sum(vect_f * wt) / (N * hh) # Estimate of F'(quantile)
return(f_quant)
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/linarpt.R
|
#' Linearization of the aggregate replacement ratio
#'
#' @description Estimates the aggregate replacement ratio (defined as the gross median individual pension income of the population aged 65-74 relative to the gross median individual earnings from work of the population aged 50-59, excluding other social benefits) and computes linearized variable for variance estimation.
#'
#' @param Y Numerator variable (for gross pension income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Y_den Denominator variable (for example gross individual earnings). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param age Age variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param pl085 Retirement variable (Number of months spent in retirement or early retirement). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param month_at_work Variable for total number of month at work (sum of the number of months spent at full-time work as employee, number of months spent at part-time work as employee, number of months spent at full-time work as self-employed (including family worker), number of months spent at part-time work as self-employed (including family worker)). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, linearization of at-risk-of-poverty threshold is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers as numeric vector.
#' @param period Optional variable for survey period. If supplied, linearization of at-risk-of-poverty threshold is done for each survey period. Object convertible to \code{data.table} or variable names as character, column numbers as numeric vector.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param order_quant A numeric value in range \eqn{\left[ 0,100 \right]}{[0,100]} for \eqn{\alpha} in the formula #'for at-risk-of-poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#'For example, to compute at-risk-of-poverty threshold equal to some percentage of median income, \eqn{\alpha} #'should be set equal to 50.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @details The implementation strictly follows the Eurostat definition.
#'
#' @return A list with four objects are returned:
#' \itemize{
#' \item \code{value} - a \code{data.table} containing the estimated the aggregate replacement ratio.
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the aggregate replacement ratio.
#' }
#'
#' @references
#' Working group on Statistics on Income and Living Conditions (2015) Task 5 - Improvement and optimization of calculation of net change. \emph{LC- 139/15/EN}, Eurostat. \cr
#' Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#'
#' @seealso \code{\link{varpoord}},
#' \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#' @keywords Linearization
#'
#' @examples
#' library("data.table")
#' library("laeken")
#' data("eusilc")
#' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc)
#' dataset1$pl085 <- 12 * trunc(runif(nrow(dataset1), 0, 2))
#' dataset1$month_at_work <- 12 * trunc(runif(nrow(dataset1), 0, 2))
#'
#' # Full population
#' d <- linarr(Y = "eqIncome", Y_den = "eqIncome",
#' id = "IDd", age = "age",
#' pl085 = "pl085", month_at_work = "month_at_work",
#' weight = "rb050", Dom = NULL,
#' dataset = dataset1, order_quant = 50L)
#' d$value
#'
#' \dontrun{
#' # By domains
#' dd <- linarr(Y = "eqIncome", Y_den = "eqIncome",
#' id = "IDd", age = "age",
#' pl085 = "pl085", month_at_work = "month_at_work",
#' weight = "rb050", Dom = "db040",
#' dataset = dataset1, order_quant = 50L)
#' dd}
#'
#' @import data.table
#' @import laeken
#' @export linarr
linarr <- function(Y, Y_den, id = NULL, age, pl085, month_at_work,
weight = NULL, sort = NULL, Dom = NULL,
period = NULL, dataset = NULL, order_quant = 50,
var_name = "lin_arr", checking = TRUE) {
## initializations
if (min(dim(data.table(var_name)) == 1) != 1) {
stop("'var_name' must have defined one name of the linearized variable")}
if (checking) {
order_quant <- check_var(vars = order_quant, varn = "order_quant",
varntype = "numeric0100")
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
Y_den <- check_var(vars = Y_den, varn = "Y_den",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
age <- check_var(vars = age, varn = "age",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
pl085 <- check_var(vars = pl085, varn = "pl085",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
month_at_work <- check_var(vars = month_at_work, varn = "month_at_work",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
dataset <- NULL
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind = ind0)
period1_agg <- data.table(unique(period1))
# RMI by domain (if requested)
age_65_74pl <- data.table(age_65_74pl = as.integer(65 <= age & age <= 74 & pl085 == 12))
age_50_59mo <- data.table(age_50_59mo = as.integer(50 <= age & age <= 59 & month_at_work == 12))
if (!is.null(Dom)) { age_65_74pl <- data.table(age_65_74pl, Dom)
age_50_59mo <- data.table(age_50_59mo, Dom) }
quantile1 <- incPercentile(Y = Y,
weights = weight,
sort = sort,
Dom = age_65_74pl,
period = period,
k = order_quant,
dataset = NULL,
checking = FALSE)
quantile2 <- incPercentile(Y = Y_den,
weights = weight,
sort = sort,
Dom = age_50_59mo,
period = period,
k = order_quant,
dataset = NULL,
checking = FALSE)
quantile1 <- quantile1[age_65_74pl == 1][, age_65_74pl := NULL]
quantile2 <- quantile2[age_50_59mo == 1][, age_50_59mo := NULL]
setnames(quantile1, names(quantile1)[ncol(quantile1)], "quantile_65_74pl")
setnames(quantile2, names(quantile2)[ncol(quantile2)], "quantile_50_59mo")
sk <- length(names(quantile2)) - 1
if (sk > 0) {
quantile <- merge(quantile1, quantile2, all = TRUE,
by = names(quantile1)[1 : sk])
} else quantile <- data.table(quantile1, quantile2)
arr_id <- id
quantile1 <- quantile2 <- NULL
age_65_74pl <- age_65_74pl[["age_65_74pl"]]
age_50_59mo <- age_50_59mo[["age_50_59mo"]]
if (!is.null(period)) arr_id <- data.table(arr_id, period)
if (!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom_agg))
arr_v <- c()
arr_m <- copy(arr_id)
for(i in 1:nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
ind <- as.integer(rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
arrl <- lapply(1:nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- cbind(period_agg[j], Dom_agg[i])
setkeyv(rown, names(rown))
rown2 <- copy(rown)
rown <- merge(rown, quantile, all.x = TRUE)
} else {rown <- quantile[i]
rown2 <- Dom_agg[i] }
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
arr_l <- arrlinCalc(Y_num = Y[indj],
Y_den = Y_den[indj],
ids = arr_id[indj],
wght = weight[indj],
indicator = ind[indj],
order_quants = order_quant,
age_65_74pl = age_65_74pl[indj],
age_50_59mo = age_50_59mo[indj],
quant_65_74pls = rown[["quantile_65_74pl"]],
quant_50_59mon = rown[["quantile_50_59mo"]])
list(arr = data.table(rown2, arr = arr_l$arr_val), lin = arr_l$lin)
})
arrs <- rbindlist(lapply(arrl, function(x) x[[1]]))
arrlin <- rbindlist(lapply(arrl, function(x) x[[2]]))
setnames(arrlin, names(arrlin), c(names(arr_id), var_nams))
arr_m <- merge(arr_m, arrlin, all.x = TRUE, by=names(arr_id))
arr_v <- rbind(arr_v, arrs)
}
} else { arrl <- lapply(1:nrow(period1_agg), function(j) {
if (!is.null(period)) { rown <- period_agg[j]
rown <- merge(rown, quantile, all.x = TRUE,
by = names(rown))
} else rown <- quantile
ind2 <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
arr_l <- arrlinCalc(Y_num = Y[ind2],
Y_den = Y_den[ind2],
ids = arr_id[ind2],
wght = weight[ind2],
indicator = ind0[ind2],
order_quants = order_quant,
age_65_74pl = age_65_74pl[ind2],
age_50_59mo = age_50_59mo[ind2],
quant_65_74pls = rown[["quantile_65_74pl"]],
quant_50_59mon = rown[["quantile_50_59mo"]])
if (!is.null(period)) {
arrs <- data.table(period_agg[j], arr = arr_l$arr_val)
} else arrs <- data.table(arr = arr_l$arr_val)
list(arr = arrs, lin = arr_l$lin)
})
arr_v <- rbindlist(lapply(arrl, function(x) x[[1]]))
arr_m <- rbindlist(lapply(arrl, function(x) x[[2]]))
setnames(arr_m, names(arr_m), c(names(arr_id), var_name))
}
arr_m[is.na(arr_m)] <- 0
setkeyv(arr_m, names(arr_id))
return(list(value = arr_v, lin = arr_m))
}
## workhorse
arrlinCalc <- function(Y_num, Y_den, ids, wght, indicator, order_quants,
age_65_74pl, age_50_59mo, quant_65_74pls, quant_50_59mon) {
dom1 <- (age_65_74pl == 1) * indicator
dom2 <- (age_50_59mo == 1) * indicator
# Size of the domains
N1 <- sum(wght * dom1)
N2 <- sum(wght * dom2)
arr_val <- quant_65_74pls / quant_50_59mon # Estimated aggregate replacement ratio
# Bandwith parameter - h=S/N^(1/5) (calculated over the whole population)
h1 <- sqrt((sum(wght * Y_num * Y_num) - sum(wght * Y_num) * sum(wght * Y_num) / sum(wght)) / sum(wght)) / exp(0.2 * log(sum(wght)))
h2 <- sqrt((sum(wght * Y_den * Y_den) - sum(wght * Y_den) * sum(wght * Y_den) / sum(wght)) / sum(wght)) / exp(0.2 * log(sum(wght)))
#---- 1. Linearization of the median income of people aged below 65 ----
u1 <- (quant_65_74pls - Y_num) / h1
vect_f1 <- exp(-(u1^2) / 2) / sqrt(2 * pi)
f_quant1 <- sum(vect_f1 * wght * dom1) / (N1 * h1) # Estimate of F'(quantile)
lin_quant_65_74pl <- -(1 / N1) * dom1 * ((Y_num <= quant_65_74pls) - order_quants / 100) / f_quant1 # Linearized variable
#---- 2. Linearization of the median income of people aged above 65 -----
u2 <- (quant_50_59mon - Y_den) / h2
vect_f2 <- exp(-(u2^2)/2) / sqrt(2 * pi)
f_quant2 <- sum(vect_f2 * wght * dom2) / (N2 * h2) # Estimate of F'(quantile)
lin_quant_50_59mon <- -(1 / N2) * dom2 * ((Y_den <= quant_50_59mon) - order_quants / 100) / f_quant2 # Linearized variable
#********************************************************************************
# 3. Linearization of the relative median income ratio *
#********************************************************************************
lin <- (quant_50_59mon * lin_quant_65_74pl - quant_65_74pls * lin_quant_50_59mon) / (quant_50_59mon * quant_50_59mon)
lin_id <- data.table(ids, lin)
return(list(arr_val = arr_val, lin = lin_id))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/linarr.R
|
#' Linearization of the Gini coefficient I
#'
#' @description Estimate the Gini coefficient, which is a measure for inequality, and its linearization.
#'
#' @param Y Study variable (for example equalized disposable income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, linearization of the Gini is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param period Optional variable for survey period. If supplied, linearization of the Gini is done for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' return A list with two objects are returned by the function:
#' \itemize{
#' \item \code{value} - a \code{data.table} containing the estimated Gini coefficients (in percentage) by G. Osier and Eurostat.
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the Gini coefficients (in percentage) by G. Osier.}
#'
#' @references
#' Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat. \cr
#' Guillaume Osier (2009). Variance estimation for complex indicators of poverty and inequality. \emph{Journal of the European Survey Research Association}, Vol.3, No.3, pp. 167-195, ISSN 1864-3361, URL \url{https://ojs.ub.uni-konstanz.de/srm/article/view/369}. \cr
#' Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#'
#' @seealso \code{\link{lingini2}},
#' \code{\link{linqsr}},
#' \code{\link{varpoord}},
#' \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#'
#' @keywords Linearization
#'
#' @examples
#' library("laeken")
#' library("data.table")
#' data("eusilc")
#' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc)[1 : 3,]
#'
#' # Full population
#' dat1 <- lingini(Y = "eqIncome", id = "IDd",
#' weight = "rb050", dataset = dataset1)
#' dat1$value
#'
#' \dontrun{
#' # By domains
#' dat2 <- lingini(Y = "eqIncome", id = "IDd", weight = "rb050",
#' Dom = c("db040"), dataset = dataset1)
#' dat2$value}
#'
#' @import data.table
#' @import laeken
#' @export lingini
lingini <- function(Y, id = NULL, weight = NULL,
sort = NULL, Dom = NULL, period = NULL,
dataset = NULL, var_name = "lin_gini",
checking = TRUE) {
## initializations
if (min(dim(as.data.frame(var_name)) == 1) != 1) {
stop("'var_name' must have defined name of the linearized variable")}
if (checking) {
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind = ind0)
period1_agg <- data.table(unique(period1))
# Gini by domain (if requested)
gini_id <- id
if (!is.null(period)) gini_id <- data.table(period, gini_id)
if (!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom_agg))
Gini <- c()
gini_m <- copy(gini_id)
for(i in 1 : nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
indi <- (rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
gini_l <- lapply(1 : nrow(period1_agg), function(j) {
indj <- ((rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1)) & (indi))
if (!is.null(period)) { rown <- cbind(period_agg[j], Dom_agg[i])
} else rown <- Dom_agg[i]
ginil <- linginiCalc(x = Y[indj],
ids = gini_id[indj],
weights = weight[indj],
sort=sort[indj])
list(data.table(rown, ginil$Gini), ginil$lin)
})
giniv <- rbindlist(lapply(gini_l, function(x) x[[1]]))
ginilin <- rbindlist(lapply(gini_l, function(x) x[[2]]))
setnames(ginilin, names(ginilin), c(names(gini_id), var_nams))
gini_m <- merge(gini_m, ginilin, all = TRUE, by = names(gini_id))
Gini <- rbind(Gini, giniv)
}
} else { gini_l <- lapply(1 : nrow(period1_agg), function(j) {
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
ginil <- linginiCalc(x = Y[indj],
ids = gini_id[indj],
weights = weight[indj],
sort = sort[indj])
if (!is.null(period)) {
list(data.table(period_agg[j], ginil$Gini), ginil$lin)
} else ginil
})
Gini <- rbindlist(lapply(gini_l, function(x) x[[1]]))
gini_m <- rbindlist(lapply(gini_l, function(x) x[[2]]))
setnames(gini_m, names(gini_m), c(names(gini_id), var_name))
}
gini_m[is.na(gini_m)] <- 0
setkeyv(gini_m, names(gini_id))
return(list(value = Gini, lin = gini_m))
}
## workhorse
linginiCalc <- function(x, ids, weights = NULL, sort = NULL) {
# sort values and weights
order <- if(is.null(sort)) order(x) else order(x, sort)
x <- x[order] # order values
ids <- ids[order] # order values
if (is.null(weights)) { weights <- rep.int(1, length(x)) # equal weights
} else weights <- weights[order] # order weights
## calculations
taille <- nrow(weights) # Sample size
wx <- weights * x # weighted values
N <- sum(weights) # Estimated population size
cw <- cumsum(weights) # cumulative sum of weights
T<- sum(wx) # Estimated total income
Num_eu <- 2 * sum(wx * cw) - sum(weights^2 * x)
Num <- sum((2 * cw - 1) * wx)
Den <- N * T;
Gini_eu <- 100 * (Num_eu / Den - 1)
Gini <- Num / Den - 1
Gini_pr <- 100 * Gini
# COMPUTATION OF A LINEARIZED VARIABLE
F <- cumsum(weights / N) # Estimation of the cumulative distribution function
G <- cumsum(wx) # Weighted partial sum
# LINEARIZED VARIABLE OF THE GINI COEFFICIENT (IN %)
lin <- 100 * (2 * (T - G + wx + N * (x * F)) - x - (Gini + 1) * (T + N * x)) / (N * T)
if (is.nan(Gini)) Gini_pr <- lin <- 0
Gini_pr <- data.table(Gini = Gini_pr, Gini_eu = Gini_eu)
lin_id <- data.table(ids, lin)
return(list(Gini = Gini_pr, lin = lin_id))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/lingini.R
|
#' Linearization of the Gini coefficient II
#'
#' @description Estimate the Gini coefficient, which is a measure for inequality, and its linearization.
#'
#' @param Y Study variable (for example equalized disposable income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, linearization of the Gini is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param period Optional variable for survey period. If supplied, linearization of the Gini is done for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @return A list with two objects are returned by the function:
#' \itemize{
#' \item \code{value} - a \code{data.table} containing the estimated Gini coefficients (in percentage) by Langel and Tille (2012) and Eurostat.
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the Gini coefficients (in percentage) by Langel and Tille (2012).
#' }
#'
#' @references
#' Eric Graf, Yves Tille, Variance Estimation Using Linearization for Poverty and Social Exclusion Indicators, Survey Methodology, June 2014 61 Vol. 40, No. 1, pp. 61-79, Statistics Canada, Catalogue no. 12-001-X, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/12-001-x2014001-eng.pdf} \cr
#' Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#' Matti Langel, Yves Tille, Corrado Gini, a pioneer in balanced sampling and inequality theory. \emph{Metron - International Journal of Statistics}, 2011, vol. LXIX, n. 1, pp. 45-65, URL \url{http://dx.doi.org/10.1007/BF03263549}. \cr
#' Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat. \cr
#'
#' @seealso \code{\link{lingini}},
#' \code{\link{linqsr}},
#' \code{\link{varpoord}},
#' \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#' @keywords Linearization
#'
#' @examples
#' library("data.table")
#' library("laeken")
#' data("eusilc")
#' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc)
#'
#' # Full population
#' dat1 <- lingini2(Y = "eqIncome", id = "IDd",
#' weight = "rb050", dataset = dataset1)
#' dat1$value
#'
#' \dontrun{
#' # By domains
#' dat2 <- lingini2(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = c("db040"),
#' dataset = dataset1)
#' dat2$value}
#'
#'
#' @import data.table
#' @import laeken
#' @export lingini2
lingini2 <- function(Y, id = NULL, weight = NULL,
sort = NULL, Dom = NULL, period = NULL,
dataset = NULL, var_name = "lin_gini2",
checking = TRUE) {
## initializations
if (min(dim(as.data.frame(var_name)) == 1) != 1) {
stop("'var_name' must have defined name of the linearized variable")}
if (checking) {
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind = ind0)
period1_agg <- data.table(unique(period1))
# Gini by domain (if requested)
gini_id <- id
if (!is.null(period)) gini_id <- data.table(period, gini_id)
if (!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom_agg))
Gini <- c()
gini_m <- copy(gini_id)
for(i in 1 : nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
indi <- (rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
gini_l <- lapply(1 : nrow(period1_agg), function(j) {
indj <- ((rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))&(indi))
if (!is.null(period)) { rown <- cbind(period_agg[j], Dom_agg[i])
} else rown <- Dom_agg[i]
ginil <- lingini2Calc(x = Y[indj],
ids = gini_id[indj],
weights = weight[indj],
sort = sort[indj])
list(data.table(rown, ginil$Gini), ginil$lin)
})
giniv <- rbindlist(lapply(gini_l, function(x) x[[1]]))
ginilin <- rbindlist(lapply(gini_l, function(x) x[[2]]))
setnames(ginilin, names(ginilin), c(names(gini_id), var_nams))
gini_m <- merge(gini_m, ginilin, all = TRUE, by = names(gini_id))
Gini <- rbind(Gini, giniv)
}
} else { gini_l <- lapply(1 : nrow(period1_agg), function(j) {
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
ginil <- lingini2Calc(x = Y[indj],
ids = gini_id[indj],
weights = weight[indj],
sort = sort[indj])
if (!is.null(period)) {
list(data.table(period_agg[j], ginil$Gini), ginil$lin)
} else ginil
})
Gini <- rbindlist(lapply(gini_l, function(x) x[[1]]))
gini_m <- rbindlist(lapply(gini_l, function(x) x[[2]]))
setnames(gini_m, names(gini_m), c(names(gini_id), var_name))
}
gini_m[is.na(gini_m)] <- 0
setkeyv(gini_m, names(gini_id))
return(list(value = Gini, lin = gini_m))
}
# workhorse
lingini2Calc <- function(x, ids, weights = NULL, sort = NULL) {
# sort values and weights
order <- if(is.null(sort)) order(x) else order(x, sort)
x <- x[order] # order values
ids <- ids[order] # order values
if (is.null(weights)) { weights <- rep.int(1, length(x)) # equal weights
} else weights <- weights[order] # order weights
## calculations
wx <- weights * x # weighted values
N <- sum(weights) # Estimated population size
cw <- cumsum(weights) # cumulative sum of weights
T <- sum(wx) # Estimated total income
dt <- data.table(x = x, weights = weights, wx = wx, key = "x")
weights0 <- wx0 <- NULL
dt1 <- dt[, list(weights0 = sum(weights),
wx0 = sum(wx)), keyby = x][,
Nk := cumsum(weights0)][,
wx1 := cumsum(wx0)]
dt <- merge(dt, dt1)
# Nk - estimation of the cumulative distribution function
Nk <- dt[["Nk"]]
# wx - weighted partial sum
wx1 <- dt[["wx1"]]
dt <- dt1 <- NULL
Num_eu <- 2 * sum(wx * cw) - sum(weights^2 * x)
Num <- 2 * sum(wx * Nk) - sum(weights^2 * x)
Den <- N * T
Gini_eu <- 100 * (Num_eu / Den - 1)
Gini <- Num / Den - 1
Gini_pr <- 100 * Gini
# LINEARIZED VARIABLE OF THE GINI COEFFICIENT (IN %)
lin <- 100 * (2 * Nk * (x - wx1 / Nk) + T - N * x - Gini * (T + N * x)) / (N * T)
if (is.nan(Gini)) Gini_pr <- lin <- 0
Gini_pr <- data.table(Gini = Gini_pr, Gini_eu = Gini_eu)
lin_id <- data.table(ids, lin = lin)
return(list(Gini=Gini_pr, lin = lin_id))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/lingini2.R
|
#' Linearization of the gender pay (wage) gap.
#'
#' @description Estimation of gender pay (wage) gap and computation of linearized variables for variance estimation.
#'
#' @param Y Study variable (for example the gross hourly earning). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param gender Numerical variable for gender, where 1 is for males, but 2 is for females. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, estimation and linearization of gender pay (wage) gap is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param period Optional variable for survey period. If supplied, estimation and linearization of gender pay (wage) gap is done for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @return A list with two objects are returned:
#' \itemize{
#' \item \code{value} - a \code{data.table} containing the estimated gender pay (wage) gap (in percentage).
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the gender pay (wage) gap (in percentage) for variance estimation.
#' }
#'
#' @references
#' Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat. \cr
#' Guillaume Osier (2009). Variance estimation for complex indicators of poverty and inequality. \emph{Journal of the European Survey Research Association}, Vol.3, No.3, pp. 167-195, ISSN 1864-3361, URL \url{https://ojs.ub.uni-konstanz.de/srm/article/view/369}. \cr
#' Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#'
#' @seealso \code{\link{linqsr}}, \code{\link{lingini}},
#' \code{\link{varpoord}} , \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#' @keywords Linearization
#'
#' @examples
#' library("data.table")
#' library("laeken")
#' data("ses")
#' dataset1 <- data.table(ID = paste0("V", 1 : nrow(ses)), ses)
#'
#' dataset1[, IDnum := .I]
#'
#' setnames(dataset1, "sex", "sexf")
#' dataset1[sexf == "male", sex:= 1]
#' dataset1[sexf == "female", sex:= 2]
#'
#' # Full population
#' gpgs1 <- lingpg(Y = "earningsHour", gender = "sex",
#' id = "IDnum", weight = "weights",
#' dataset = dataset1)
#' gpgs1$value
#'
#' \dontrun{
#' # Domains by education
#' gpgs2 <- lingpg(Y = "earningsHour", gender = "sex",
#' id = "IDnum", weight = "weights",
#' Dom = "education", dataset = dataset1)
#' gpgs2$value
#'
#' # Sort variable
#' gpgs3 <- lingpg(Y = "earningsHour", gender = "sex",
#' id = "IDnum", weight = "weights",
#' sort = "IDnum", Dom = "education",
#' dataset = dataset1)
#' gpgs3$value
#'
#' # Two survey periods
#' dataset1[, year := 2010]
#' dataset2 <- copy(dataset1)
#' dataset2[, year := 2011]
#' dataset1 <- rbind(dataset1, dataset2)
#'
#' gpgs4 <- lingpg(Y = "earningsHour", gender = "sex",
#' id = "IDnum", weight = "weights",
#' sort = "IDnum", Dom = "education",
#' period = "year", dataset = dataset1)
#' gpgs4$value
#' names(gpgs4$lin)}
#'
#' @import data.table
#' @import laeken
#' @export lingpg
lingpg <- function(Y, gender = NULL, id = NULL,
weight = NULL, sort = NULL,
Dom = NULL, period = NULL,
dataset = NULL, var_name = "lin_gpg",
checking = TRUE) {
## initializations
if (min(dim(as.data.frame(var_name)) == 1) != 1) {
stop("'var_name' must have defined name of the linearized variable")}
if (is.null(gender)) stop("'gender' must be supplied")
if (checking) {
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
gender <- check_var(vars = gender, varn = "gender",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind = ind0)
period1_agg <- data.table(unique(period1))
# GPG by domain (if requested)
gpg_id <- id
if (!is.null(period)) gpg_id <- data.table(gpg_id, period)
if(!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom_agg))
gpg_v <- c()
gpg_m <- copy(gpg_id)
for(i in 1 : nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
indi <- (rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
gpg_l <- lapply(1 : nrow(period1_agg), function(j) {
indj <- ((rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))&(indi))
if (!is.null(period)) { rown <- cbind(period_agg[j], Dom_agg[i])
} else rown <- Dom_agg[i]
gpgl <- linGapCalc(x = Y[indj], gend = gender[indj],
ids = gpg_id[indj], weights = weight[indj],
sort = sort[indj])
list(data.table(rown, gpg = gpgl$gpg_pr), gpgl$lin)
})
gpgs <- rbindlist(lapply(gpg_l, function(x) x[[1]]))
gpglin <- rbindlist(lapply(gpg_l, function(x) x[[2]]))
setnames(gpglin, names(gpglin), c(names(gpg_id), var_nams))
gpg_m <- merge(gpg_m, gpglin, all.x = TRUE, by = names(gpg_id))
gpg_v <- rbind(gpg_v, gpgs)
}
} else { gpg_l <- lapply(1 : nrow(period1_agg), function(j) {
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
gpg_l <- linGapCalc(x = Y[indj], gend = gender[indj],
ids = gpg_id[indj], weights = weight[indj],
sort = sort[indj])
if (!is.null(period)) {
gpgs <- data.table(period_agg[j], gpg = gpg_l$gpg_pr)
} else gpgs <- data.table(gpg = gpg_l$gpg_pr)
list(gpg = gpgs, lin = gpg_l$lin)
})
gpg_v <- rbindlist(lapply(gpg_l, function(x) x[[1]]))
gpg_m <- rbindlist(lapply(gpg_l, function(x) x[[2]]))
setnames(gpg_m, names(gpg_m), c(names(gpg_id), var_name))
}
gpg_m[is.na(gpg_m)] <- 0
setkeyv(gpg_m, names(gpg_id))
return(list(value = gpg_v, lin = gpg_m))
}
## workhorse
linGapCalc <- function(x, gend, ids, weights = NULL, sort = NULL) {
if(is.null(gend)) stop("'gender' must be supplied")
if (length(gend) != length(x)) stop("'x' is not the same as 'gend'")
if (length(gend) != length(weights)) stop("'weights' is not the same as 'gend'")
if (is.null(weights)) weights <- rep.int(1, length(x)) # equal weights
indic_men <- ifelse(gend == 1, 1, 0)
indic_women <- ifelse(gend == 2, 1, 0)
x[is.na(x)] <- 0
Nmen <- sum(weights * indic_men)
Nwomen <- sum(weights * indic_women)
SINCmen <- sum(weights * x * indic_men)
SINCwomen <- sum(weights * x * indic_women)
Num <- SINCmen / Nmen - SINCwomen / Nwomen
Den <- SINCmen / Nmen
gpg <- Num / Den # Estimated gender pay gap
gpg_pr <- gpg * 100
#-------------------------- Linearized variable (in %) -----------------------
lin <- 100 * (1 - gpg) * ((indic_women / Nwomen) - (indic_men / Nmen) + ((x * indic_men) / SINCmen) - ((x * indic_women) / SINCwomen))
#-----------------------------------------------------------------------------
if (length(unique(gend)) != 2 | is.nan(gpg)) gpg_pr <- lin <- 0
lin_id <- data.table(ids, lin)
gpg <- data.table(gpg_pr = gpg_pr)
return(list(gpg_pr = gpg_pr, lin = lin_id))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/lingpg.R
|
#' Linearization of the median income of individuals below the At Risk of Poverty Threshold
#'
#' @description Estimation of the median income of individuals below At Risk of Poverty Threshold and computation of linearized variable for variance estimation. The At Risk of Poverty Threshold is estimated for the whole population always. The median income is estimated for the whole population or for each domain.
#'
#'
#' @param Y Study variable (for example equalized disposable income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, linearization of the median income of persons below a poverty threshold is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param period Optional variable for survey period. If supplied, linearization of the median income of persons below a poverty threshold is done for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param percentage A numeric value in range \eqn{[0,100]} for \eqn{p} in the formula for poverty threshold computation: \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).} For example, to compute poverty threshold equal to 60\% of some income quantile, \eqn{p} should be set equal to 60.
#' @param order_quant A numeric value in range \eqn{[0,100]} for \eqn{\alpha} in the formula for poverty threshold computation: \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100)}. For example, to compute poverty threshold equal to some percentage of median income, \eqn{\alpha} should be set equal to 50.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @return A list with two objects are returned by the function:
#' \itemize{
#' \item \code{value} - a \code{data.table} containing the estimated median income of individuals below the At Risk of Poverty Threshold.
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the median income below the At Risk of Poverty Threshold.
#' }
#'
#' @references
#'Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat. \cr
#'Guillaume Osier (2009). Variance estimation for complex indicators of poverty and inequality. \emph{Journal of the European Survey Research Association}, Vol.3, No.3, pp. 167-195, ISSN 1864-3361, URL \url{https://ojs.ub.uni-konstanz.de/srm/article/view/369}. \cr
#'Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#'
#' @seealso \code{\link{linarpt}},
#' \code{\link{linrmpg}},
#' \code{\link{varpoord}},
#' \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#'
#' @keywords Linearization
#'
#' @examples
#' library("laeken")
#' library("data.table")
#' data("eusilc")
#' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc)
#'
#' # Full population
#' d <- linpoormed(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = NULL,
#' dataset = dataset1, percentage = 60,
#' order_quant = 50L)
#'
#' \dontrun{
#' # Domains by location of houshold
#' dd <- linpoormed(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = "db040",
#' dataset = dataset1, percentage = 60,
#' order_quant = 50L)
#' dd}
#'
#' @import data.table
#' @import laeken
#' @export linpoormed
linpoormed <- function(Y, id = NULL, weight = NULL,
sort = NULL, Dom = NULL, period = NULL,
dataset = NULL, percentage = 60,
order_quant = 50, var_name = "lin_poormed",
checking = TRUE) {
## initializations
if (min(dim(as.data.frame(var_name)) == 1) != 1) {
stop("'var_name' must be defined name of the linearized variable")}
if (checking) {
percentage <- check_var(vars = percentage, varn = "percentage",
varntype = "numeric0100")
order_quant <- check_var(vars = order_quant, varn = "order_quant",
varntype = "numeric0100")
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind = ind0)
period1_agg <- data.table(unique(period1))
# Poor median people by domain (if requested)
quantile <- incPercentile(Y = Y,
weights = weight,
sort = sort,
Dom = NULL,
period = period,
k = order_quant,
dataset = NULL,
checking = FALSE)
setnames(quantile, names(quantile)[ncol(quantile)], "quantile")
if (ncol(quantile) > 1) setkeyv(quantile, head(names(quantile), -1))
threshold <- copy(quantile)
threshold[, threshold := percentage / 100 * quantile]
threshold[, quantile := NULL]
poor_med_id <- id
if (!is.null(period)) poor_med_id <- data.table(poor_med_id, period)
if(!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom))
poor_med_v <- c()
poor_med_m <- copy(poor_med_id)
for(i in 1 : nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
ind <- as.integer(rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
poor_medl <- lapply(1 : nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- cbind(period_agg[j], Dom_agg[i])
setkeyv(rown, names(rown))
rown2 <- copy(rown)
rown <- merge(rown, quantile, all.x = TRUE)
} else { rown <- quantile
rown2 <- Dom_agg[i] }
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
poormed_l <- linpoormedCalc(inco = Y[indj],
ids = poor_med_id[indj],
wght = weight[indj],
sort = sort[indj],
ind = ind[indj],
percentag = percentage,
order_quants = order_quant,
quant_val = rown[["quantile"]])
list(poor_people_median = data.table(rown2, poor_people_median = poormed_l$poor_people_median),
lin = poormed_l$lin)
})
poor_people_med <- rbindlist(lapply(poor_medl, function(x) x[[1]]))
poor_people_medlin <- rbindlist(lapply(poor_medl, function(x) x[[2]]))
setnames(poor_people_medlin, names(poor_people_medlin), c(names(poor_med_id), var_nams))
poor_med_m <- merge(poor_med_m, poor_people_medlin,
all.x = TRUE, by = names(poor_med_id))
poor_med_v <- rbind(poor_med_v, poor_people_med)
}
} else { poormed_l <- lapply(1:nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- period_agg[j]
rown <- merge(rown, quantile, all.x = TRUE,
by = names(rown))
} else rown <- quantile
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
poor_medl <- linpoormedCalc(inco = Y[indj],
ids = poor_med_id[indj],
wght = weight[indj],
sort = sort[indj],
ind = ind0[indj],
percentag = percentage,
order_quants = order_quant,
quant_val = rown[["quantile"]])
if (!is.null(period)) {
poor_med_v <- data.table(period_agg[j], poor_people_median = poor_medl$poor_people_median)
} else poor_med_v <- data.table(poor_people_median = poor_medl$poor_people_median)
list(poor_med_v = poor_med_v, lin = poor_medl$lin)
})
poor_med_v <- rbindlist(lapply(poormed_l, function(x) x[[1]]))
poor_med_m <- rbindlist(lapply(poormed_l, function(x) x[[2]]))
setnames(poor_med_m, names(poor_med_m), c(names(poor_med_id), var_name))
}
poor_med_m[is.na(poor_med_m)] <- 0
setkeyv(poor_med_m, names(poor_med_id))
return(list(quantile = quantile, threshold = threshold, value = poor_med_v, lin = poor_med_m))
}
## workhorse
linpoormedCalc <- function(inco, ids, wght, sort, ind, percentag, order_quants, quant_val) {
wt <- ind * wght
thres_val <- percentag / 100 * quant_val
N0 <- sum(wght) # Estimated whole population size
N <- sum(wt) # Estimated (sub)population size
poor <- (inco <= thres_val) * ind
inc1 <- inco[poor == 1]
wght1 <- wght[poor == 1]
sort1 <- sort[poor == 1]
rate_val <- sum(wt * poor) / N # Estimated poverty rate
rate_val_pr <- 100 * rate_val # Estimated poverty rate
poor_people_median <- incPercentile(Y = inc1,
weights = wght1,
sort = sort1,
Dom = NULL,
period = NULL,
k = order_quants,
dataset = NULL,
checking = FALSE)
poor_people_median <- poor_people_median[[paste0("x", order_quants)]]
#*************************************************************************************
#** LINEARIZATION OF THE MEDIAN INCOME BELOW THE POVERTY THRESHOLD **
#*************************************************************************************
h <- sqrt((sum(wght * inco * inco) - sum(wght * inco) * sum(wght * inco) / sum(wght)) / sum(wght)) / exp(0.2 * log(sum(wght)))
# h=S/N^(1/5)
#--------------------------------------------------
#----- LINEARIZATION OF THE POVERTY THRESHOLD -----
#--------------------------------------------------
u1 <- (quant_val - inco) / h
vect_f1 <- exp( -(u1^2) / 2) / sqrt(2 * pi)
f_quant1 <- sum(vect_f1 * wght)/(N0 * h)
lin_thres <- - (percentag / 100) * (1 / N0) * ((inco <= quant_val) - order_quants / 100) / f_quant1
# ---------------------------------------------
# ----- LINEARIZATION OF THE POVERTY RATE -----
# ---------------------------------------------
u2 <- (thres_val - inco) / h
vect_f2 <- exp(-(u2^2) / 2) / sqrt(2 * pi)
f_quant2 <- sum(vect_f2 * wt) / (N * h)
lin_rate <- (1 / N) * ind * ((inco <= thres_val) - rate_val) + f_quant2 * lin_thres
# --------------------------------------------------------
# ----- LINEARIZATION OF POOR PEOPLE'S MEDIAN INCOME -----
# --------------------------------------------------------
u3 <- (poor_people_median - inco) / h
vect_f3 <- exp(- (u3^2) / 2) / sqrt(2 * pi)
f_quant3 <- sum(vect_f3 * wt) / (N * h)
lin_median <- (0.5 * lin_rate - (1 / N) * ind * ((inco <= poor_people_median) - 0.5 * rate_val)) / f_quant3
lin_id <- data.table(ids, lin_median)
return(list(poor_people_median = poor_people_median, lin = lin_id))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/linpoormed.R
|
#' Linearization of the Quintile Share Ratio
#'
#' @description Estimate the Quintile Share Ratio, which is defined as the ratio of the sum of equalized disposable income received by the top 20\% to the sum of equalized disposable income received by the bottom 20\%, and its linearization.
#'
#'
#' @param Y Study variable (for example equalized disposable income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, linearization of the income quantile share ratio is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param period Optional variable for survey period. If supplied, linearization of the income quantile share ratio is done for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param alpha a numeric value in range \eqn{[0,100]} for the order of the Quintile Share Ratio.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @return A list with two objects are returned by the function:
#' \itemize{
#' \item \code{value} - a \code{data.table} containing the estimated Quintile Share Ratio by G. Osier and Eurostat papers.
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the Quintile Share Ratio by G. Osier paper.
#' }
#'
#' @references
#'Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat. \cr
#'Guillaume Osier (2009). Variance estimation for complex indicators of poverty and inequality. \emph{Journal of the European Survey Research Association}, Vol.3, No.3, pp. 167-195, ISSN 1864-3361, URL \url{https://ojs.ub.uni-konstanz.de/srm/article/view/369}. \cr
#'Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#'
#' @seealso \code{\link{incPercentile}},
#' \code{\link{varpoord}},
#' \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#'
#' @keywords Linearization
#'
#' @examples
#' library("data.table")
#' library("laeken")
#' data("eusilc")
#' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc)
#'
#' # Full population
#' dd <- linqsr(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = NULL,
#' dataset = dataset1, alpha = 20)
#' dd$value
#'
#' \dontrun{
#' # By domains
#' dd <- linqsr(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = "db040",
#' dataset = dataset1, alpha = 20)
#' dd$value}
#'
#' @import data.table
#' @import laeken
#' @export linqsr
linqsr <- function(Y, id = NULL, weight = NULL,
sort = NULL, Dom = NULL, period = NULL,
dataset = NULL, alpha = 20,
var_name = "lin_qsr",
checking = TRUE) {
## initializations
if (min(dim(as.data.frame(var_name)) == 1) != 1) {
stop("'var_name' must have defined name of the linearized variable")}
if (checking) {
alpha <- check_var(vars = alpha, varn = "alpha",
varntype = "numeric0100")
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind = ind0)
period1_agg <- data.table(unique(period1))
# QSR by domain (if requested)
QSR_id <- id
if (!is.null(period)) QSR_id <- data.table(QSR_id, period)
if (!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom_agg))
QSR_v <- c()
QSR_m <- copy(QSR_id)
for(i in 1:nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
ind <- as.integer(rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
QSR_l <- lapply(1:nrow(period1_agg), function(j) {
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
QSR_l <- linQSRCalc(income = Y[indj],
ids = QSR_id[indj],
weights = weight[indj],
sort = sort[indj],
ind = ind[indj],
alpha = alpha)
if (!is.null(period)) {
list(QSR = data.table(period_agg[j], Dom_agg[i], QSR_l$QSR), lin = QSR_l$lin)
} else list(QSR = data.table(Dom_agg[i], QSR_l$QSR), lin = QSR_l$lin)
})
QSRs <- rbindlist(lapply(QSR_l, function(x) x[[1]]))
QSRlin <- rbindlist(lapply(QSR_l, function(x) x[[2]]))
setnames(QSRlin, names(QSRlin), c(names(QSR_id), var_nams))
QSR_m <- merge(QSR_m, QSRlin, all.x = TRUE, by = names(QSR_id))
QSR_v <- rbind(QSR_v, QSRs)
}
} else { QSRl <- lapply(1:nrow(period1_agg), function(j) {
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
QSR_l <- linQSRCalc(income = Y[indj],
ids = QSR_id[indj],
weights = weight[indj],
sort = sort[indj],
ind = ind0[indj],
alpha = alpha)
if (!is.null(period)) {
list(QSR = data.table(period_agg[j], QSR_l$QSR), lin = QSR_l$lin)
} else list(QSR = data.table(QSR_l$QSR), lin = QSR_l$lin)
})
QSR_v <- rbindlist(lapply(QSRl, function(x) x[[1]]))
QSR_m <- rbindlist(lapply(QSRl, function(x) x[[2]]))
setnames(QSR_m, names(QSR_m), c(names(QSR_id), var_name))
}
QSR_m[is.na(QSR_m)] <- 0
setkeyv(QSR_m, names(QSR_id))
return(list(value = QSR_v, lin = QSR_m))
}
linQSRCalc<-function(income, ids, weights = NULL, sort = NULL, ind = NULL, alpha) {
#--------------------------------------------------------------------------------
#----- COMPUTATION OF ESTIMATED VALUES OF THE NUMERATOR AND THE DENOMINATOR -----
#--------------------------------------------------------------------------------
if (is.null(ind)) ind <- data.frame(ind = rep.int(1, length(ids)))
alpha2 <- 100 - alpha
if (sum(weights) > 0 & sum(ind) > 0) {
quantile <- incPercentile(Y = income, weights = weights,
sort = sort, Dom = data.table(ind),
period = NULL, k = c(alpha, alpha2),
dataset = NULL, checking = FALSE)
quant_inf <- quantile[ind == 1][[paste0("x", alpha)]]
quant_sup <- quantile[ind == 1][[paste0("x", alpha2)]]
wght <- weights * ind
v <- weights * income * ind
indinf <- (income <= quant_inf)
indsup <- (income > quant_sup)
num_eu <- sum(v * indsup) / sum(wght[indsup]) # Numerator
den_eu <- sum(v * indinf) / sum(wght[indinf]) # Denominator
num <- sum(v * indsup) # Numerator
den <- sum(v * indinf) # Denominator
QSR <- num / den
QSR_eu <- num_eu / den_eu
#**********************************************************************
#* LINEARIZATION OF THE INCOME QUANTILE SHARE RATIO *
#**********************************************************************
#----------------------------------------------
#----- LINEARIZATION OF THE TWO QUANTILES -----
#----------------------------------------------
N <- sum(wght) # Estimated (sub)population size
h <- sqrt((sum(wght * income * income) - sum(wght * income) * sum(wght * income) / sum(wght)) / sum(wght)) / exp(0.2 * log(sum(wght)))
# h=S/N^(1/5)
# 1. Linearization of the bottom quantile
u1 <- (quant_inf - income) / h;
vect_f1 <- exp(-(u1^2) / 2) / sqrt(2 * pi)
f_quant1 <- sum(vect_f1 * wght) / (N * h)
lin_inf <- -(1 / N) * ((income <= quant_inf) - alpha / 100) / f_quant1
# 2. Linearization of the top quantile
u2 <- (quant_sup - income) / h
vect_f2 <- exp( - (u2^2) / 2) / sqrt(2 * pi)
f_quant2 <- sum(vect_f2 * wght) / (N * h)
lin_sup <- - (1 / N) * ((income <= quant_sup) - alpha2 / 100) / f_quant2
# 3. Linearization of the total income for the top quantile
u3 <- (quant_sup - income) / h
vect_f3 <- exp(- (u3^2) / 2) / sqrt(2 * pi)
f_quant3 <- sum(vect_f3 * v) / h
lin_num <- ind * (income - income * (income <= quant_sup) - f_quant3 * lin_sup)
# 4. Linearization of the total income for the bottom quantile
u4 <- (quant_inf - income) / h
vect_f4 <- exp( - (u4^2) / 2) / sqrt(2 * pi)
f_quant4 <- sum(vect_f4 * v) / h
lin_den <- ind * (income * (income <= quant_inf) + f_quant4 * lin_inf)
#****************************************************************************
# LINEARIZED VARIABLE OF THE QUANTILE SHARE RATIO
#****************************************************************************
lin <- (den * lin_num - num * lin_den) / (den * den)
} else { QSR <- lin <- 0
QSR_eu <- NaN }
if (is.nan(QSR)) QSR <- lin <- 0
lin_id <- data.table(ids, lin = lin)
QSR <- data.table(QSR = QSR, QSR_eu = QSR_eu)
return(list(QSR = QSR, lin = lin_id))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/linqsr.R
|
#' Linearization of the relative median income ratio
#'
#' @description Estimates the relative median income ratio (defined as the ratio of the median equivalised disposable income of people aged above age to the median equivalised disposable income of those aged below 65) and computes linearized variable for variance estimation.
#'
#' @param Y Study variable (for example equalized disposable income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param age Age variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, linearization of at-risk-of-poverty threshold is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers as numeric vector.
#' @param period Optional variable for survey period. If supplied, linearization of at-risk-of-poverty threshold is done for each survey period. Object convertible to \code{data.table} or variable names as character, column numbers as numeric vector.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param order_quant A numeric value in range \eqn{\left[ 0,100 \right]}{[0,100]} for \eqn{\alpha} in the formula for at-risk-of-poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{Z(\alpha/100).}
#'For example, to compute the relative median income ratio to some percentage of median income, \eqn{\alpha} should be set equal to 50.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#'
#' @details The implementation strictly follows the Eurostat definition.
#'
#' @return A list with four objects are returned:
#' \itemize{
#' \item \code{value} - a \code{data.table} containing the estimated relative median income ratio.
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the relative median income ratio.
#' }
#'
#' @references
#'Working group on Statistics on Income and Living Conditions (2015) Task 5 - Improvement and optimization of calculation of net change. \emph{LC- 139/15/EN}, Eurostat. \cr
#'Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#'
#' @keywords Linearization
#' @seealso \code{\link{varpoord}},
#' \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#'
#' @examples
#' library("laeken")
#' library("data.table")
#' data("eusilc")
#' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc)
#'
#' # Full population
#' d <- linrmir(Y = "eqIncome", id = "IDd", age = "age",
#' weight = "rb050", Dom = NULL,
#' dataset = dataset1, order_quant = 50L)
# d$value
#'
#' \dontrun{
#' # By domains
#' dd <- linrmir(Y = "eqIncome", id = "IDd", age = "age",
#' weight = "rb050", Dom = "db040",
#' dataset = dataset1, order_quant = 50L)
#' dd}
#'
#' @import data.table
#' @import laeken
#'
#' @export linrmir
linrmir <- function(Y, id = NULL, age, weight = NULL,
sort = NULL, Dom = NULL, period = NULL,
dataset = NULL, order_quant = 50,
var_name = "lin_rmir", checking = TRUE) {
## initializations
if (min(dim(data.table(var_name)) == 1) != 1) {
stop("'var_name' must have defined one name of the linearized variable")}
if (checking) {
order_quant <- check_var(vars = order_quant, varn = "order_quant",
varntype = "numeric0100")
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
age <- check_var(vars = age, varn = "age",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind=ind0)
period1_agg <- data.table(unique(period1))
# RMIR by domain (if requested)
age_under_65s <- data.table(age_under_65s = as.integer(age < 65))
if (!is.null(Dom)) age_under_65s <- data.table(age_under_65s, Dom)
quantile <- incPercentile(Y = Y,
weights = weight,
sort = sort,
Dom = age_under_65s,
period = period,
k = order_quant,
dataset = NULL,
checking = TRUE)
quantile_under_65 <- quantile[age_under_65s == 1][, age_under_65s := NULL]
quantile_over_65 <- quantile[age_under_65s == 0][, age_under_65s := NULL]
setnames(quantile_under_65, names(quantile_under_65)[ncol(quantile_under_65)], "quantile_under_65")
setnames(quantile_over_65, names(quantile_over_65)[ncol(quantile_over_65)], "quantile_over_65")
sk <- length(names(quantile_under_65)) - 1
if (sk > 0) {
setkeyv(quantile_under_65, names(quantile_under_65)[1 : sk])
setkeyv(quantile_over_65, names(quantile_over_65)[1 : sk])
quantile <- merge(quantile_under_65, quantile_over_65, all = TRUE)
} else quantile <- data.table(quantile_under_65, quantile_over_65)
rmir_id <- id
age_under_65s <- age_under_65s[["age_under_65s"]]
if (!is.null(period)) rmir_id <- data.table(rmir_id, period)
if (!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom_agg))
rmir_v <- c()
rmir_m <- copy(rmir_id)
for(i in 1:nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
ind <- as.integer(rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
rmirl <- lapply(1 : nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- cbind(period_agg[j], Dom_agg[i])
setkeyv(rown, names(rown))
rown2 <- copy(rown)
rown <- merge(rown, quantile, all.x = TRUE)
} else {rown <- quantile[i]
rown2 <- Dom_agg[i] }
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
rmir_l <- rmirlinCalc(Y1 = Y[indj],
ids = rmir_id[indj],
wght = weight[indj],
indicator = ind[indj],
order_quants = order_quant,
age_under_65 = age_under_65s[indj],
quant_under_65 = rown[["quantile_under_65"]],
quant_over_65 = rown[["quantile_over_65"]])
list(rmir = data.table(rown2, rmir = rmir_l$rmir_val), lin = rmir_l$lin)
})
rmirs <- rbindlist(lapply(rmirl, function(x) x[[1]]))
rmirlin <- rbindlist(lapply(rmirl, function(x) x[[2]]))
setnames(rmirlin, names(rmirlin), c(names(rmir_id), var_nams))
rmir_m <- merge(rmir_m, rmirlin, all.x = TRUE, by = names(rmir_id))
rmir_v <- rbind(rmir_v, rmirs)
}
} else { rmirl <- lapply(1:nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- period_agg[j]
rown <- merge(rown, quantile, all.x = TRUE,
by = names(rown))
} else rown <- quantile
ind2 <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
rmir_l <- rmirlinCalc(Y1 = Y[ind2],
ids = rmir_id[ind2],
wght = weight[ind2],
indicator = ind0[ind2],
order_quants = order_quant,
age_under_65 = age_under_65s[ind2],
quant_under_65 = rown[["quantile_under_65"]],
quant_over_65 = rown[["quantile_over_65"]])
if (!is.null(period)) {
rmirs <- data.table(period_agg[j], rmir = rmir_l$rmir_val)
} else rmirs <- data.table(rmir = rmir_l$rmir_val)
list(rmir = rmirs, lin = rmir_l$lin)
})
rmir_v <- rbindlist(lapply(rmirl, function(x) x[[1]]))
rmir_m <- rbindlist(lapply(rmirl, function(x) x[[2]]))
setnames(rmir_m, names(rmir_m), c(names(rmir_id), var_name))
}
rmir_m[is.na(rmir_m)] <- 0
setkeyv(rmir_m, names(rmir_id))
return(list(value = rmir_v, lin = rmir_m))
}
## workhorse
rmirlinCalc <- function(Y1, ids, wght, indicator, order_quants, age_under_65, quant_under_65, quant_over_65) {
dom1 <- (age_under_65 == 1) * indicator
dom2 <- (age_under_65 == 0) * indicator
# Size of the domains
N1 <- sum(wght * dom1)
N2 <- sum(wght * dom2)
rmir_val <- quant_over_65 / quant_under_65 # Estimated relative median income ratio
# Bandwith parameter - h=S/N^(1/5) (calculated over the whole population)
h <- sqrt((sum(wght * Y1 * Y1) - sum(wght * Y1) * sum(wght * Y1) / sum(wght)) / sum(wght)) / exp(0.2 * log(sum(wght)))
#---- 1. Linearization of the median income of people aged below 65 ----
u1 <- (quant_under_65 - Y1) / h
vect_f1 <- exp(-(u1^2) / 2) / sqrt(2 * pi)
f_quant1 <- sum(vect_f1 * wght * dom1) / (N1 * h) # Estimate of F'(quantile)
lin_quant_under_65 <- - (1 / N1) * dom1 * ((Y1 <= quant_under_65) - order_quants / 100) / f_quant1 # Linearized variable
#---- 2. Linearization of the median income of people aged above 65 -----
u2 <- (quant_over_65 - Y1) / h
vect_f2 <- exp(-(u2^2) / 2) / sqrt(2 * pi)
f_quant2 <- sum(vect_f2 * wght * dom2) / (N2 * h) # Estimate of F'(quantile)
lin_quant_over_65 <- -(1 / N2) * dom2 * ((Y1 <= quant_over_65) - order_quants / 100) / f_quant2 # Linearized variable
#********************************************************************************
# 3. Linearization of the relative median income ratio *
#********************************************************************************
lin <- (quant_under_65 * lin_quant_over_65 - quant_over_65 * lin_quant_under_65) / (quant_under_65 * quant_under_65)
lin_id <- data.table(ids, lin)
return(list(rmir_val = rmir_val, lin = lin_id))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/linrmir.R
|
#' Linearization of the relative median at-risk-of-poverty gap
#'
#' @description Estimate the relative median at-risk-of-poverty gap, which is defined as the relative difference between the median equalized disposable income of persons below the At Risk of Poverty Threshold and the At Risk of Poverty Threshold itself (expressed as a percentage of the at-risk-of-poverty threshold) and its linearization.
#'
#' @param Y Study variable (for example equalized disposable income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param id Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param weight Optional weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, linearization of the relative median at-risk-of-poverty gap is done for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param period Optional variable for survey period. If supplied, linearization of the relative median at-risk-of-poverty gap is done for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param percentage A numeric value in range \eqn{[0,100]} for \eqn{p} in the formula for poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#'For example, to compute poverty threshold equal to 60\% of some income quantile, \eqn{p} should be set equal to 60.
#' @param order_quant A numeric value in range \eqn{[0,100]} for \eqn{\alpha} in the formula for poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#'For example, to compute poverty threshold equal to some percentage of median income, \eqn{\alpha} should be set equal to 50.
#' @param var_name A character specifying the name of the linearized variable.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' return A list with two objects are returned by the function:
#' \itemize{
#' \item \code{value} - a \code{data.table} containing the estimated relative median at-risk-of-poverty gap (in percentage).
#' \item \code{lin} - a \code{data.table} containing the linearized variables of the relative median at-risk-of-poverty gap (in percentage).
#' }
#'
#'
#' @references
#'Working group on Statistics on Income and Living Conditions (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay gap. \emph{EU-SILC 131-rev/04}, Eurostat. \cr
#'Guillaume Osier (2009). Variance estimation for complex indicators of poverty and inequality. \emph{Journal of the European Survey Research Association}, Vol.3, No.3, pp. 167-195, ISSN 1864-3361, URL \url{https://ojs.ub.uni-konstanz.de/srm/article/view/369}. \cr
#'Jean-Claude Deville (1999). Variance estimation for complex statistics and estimators: linearization and residual techniques. Survey Methodology, 25, 193-203, URL \url{https://www150.statcan.gc.ca/n1/pub/12-001-x/1999002/article/4882-eng.pdf}. \cr
#'
#' @seealso \code{\link{linarpt}},
#' \code{\link{linarpr}},
#' \code{\link{linpoormed}},
#' \code{\link{varpoord}},
#' \code{\link{vardcrospoor}},
#' \code{\link{vardchangespoor}}
#' @keywords Linearization
#'
#'
#' @examples
#' library("data.table")
#' library("laeken")
#' data("eusilc")
#' dataset1 <- data.table(IDd = paste0("V", 1 : nrow(eusilc)), eusilc)
#'
#' # Full population
#' d <- linrmpg(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = NULL,
#' dataset = dataset1, percentage = 60,
#' order_quant = 50L)
#' d$value
#' d$threshold
#'
#' \dontrun{
#' # By domains
#' dd <- linrmpg(Y = "eqIncome", id = "IDd",
#' weight = "rb050", Dom = "db040",
#' dataset = dataset1, percentage = 60,
#' order_quant = 50L)
#' dd$value}
#'
#' @import data.table
#' @import laeken
#'
#' @export linrmpg
linrmpg <- function(Y, id = NULL, weight = NULL, sort = NULL,
Dom = NULL, period = NULL, dataset = NULL,
percentage = 60, order_quant = 50,
var_name = "lin_rmpg", checking = TRUE) {
## initializations
if (min(dim(as.data.frame(var_name)) == 1) != 1) {
stop("'var_name' must have defined name of the linearized variable")}
if (checking) {
percentage <- check_var(vars = percentage, varn = "percentage",
varntype = "numeric0100")
order_quant <- check_var(vars = order_quant, varn = "order_quant",
varntype = "numeric0100")
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
weight <- check_var(vars = weight, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
grepls = "__")
id <- check_var(vars = id, varn = "id", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
periods = period)
}
## computations
ind0 <- rep.int(1, length(Y))
period_agg <- period1 <- NULL
if (!is.null(period)) { period1 <- copy(period)
period_agg <- data.table(unique(period))
} else period1 <- data.table(ind = ind0)
period1_agg <- data.table(unique(period1))
# Relative median at-risk-of-poverty gap by domain (if requested)
quantile <- incPercentile(Y = Y,
weights = weight,
sort = sort,
Dom = NULL,
period = period,
k = order_quant,
dataset = NULL,
checking = FALSE)
setnames(quantile, names(quantile)[ncol(quantile)], "quantile")
if (ncol(quantile)>1) setkeyv(quantile, head(names(quantile), -1))
threshold <- copy(quantile)
threshold[, threshold := percentage / 100 * quantile]
threshold[, quantile := NULL]
rmpgap_id <- id
if (!is.null(period)) rmpgap_id <- data.table(period, rmpgap_id)
if (!is.null(Dom)) {
Dom_agg <- data.table(unique(Dom))
setkeyv(Dom_agg, names(Dom))
rmpgap_v <- c()
rmpgap_m <- copy(rmpgap_id)
for(i in 1:nrow(Dom_agg)) {
g <- c(var_name, paste(names(Dom), as.matrix(Dom_agg[i,]), sep = "."))
var_nams <- do.call(paste, as.list(c(g, sep = "__")))
ind <- as.integer(rowSums(Dom == Dom_agg[i,][ind0,]) == ncol(Dom))
rmpgapl <- lapply(1 : nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- cbind(period_agg[j], Dom_agg[i])
setkeyv(rown, names(rown))
rown2 <- copy(rown)
rown <- merge(rown, quantile, all.x = TRUE)
} else { rown <- quantile
rown2 <- Dom_agg[i] }
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
rmpgap_l <- linrmpgCalc(inco = Y[indj],
ids = rmpgap_id[indj],
wght = weight[indj],
sort = sort[indj],
ind = ind[indj],
percentag = percentage,
order_quants = order_quant,
quant_val = rown[["quantile"]])
list(rmpgap = data.table(rown2, rmpgap = rmpgap_l$rmpgap),
lin = rmpgap_l$lin)
})
rmpgaps <- rbindlist(lapply(rmpgapl, function(x) x[[1]]))
rmpgaplin <- rbindlist(lapply(rmpgapl, function(x) x[[2]]))
setnames(rmpgaplin, names(rmpgaplin), c(names(rmpgap_id), var_nams))
rmpgap_m <- merge(rmpgap_m, rmpgaplin, all.x = TRUE, by = names(rmpgap_id))
rmpgap_v <- rbind(rmpgap_v, rmpgaps)
}
} else {rmpgap_l <- lapply(1 : nrow(period1_agg), function(j) {
if (!is.null(period)) {
rown <- period_agg[j]
setkeyv(rown, names(rown))
rown <- merge(rown, quantile, all.x = TRUE)
} else rown <- quantile
indj <- (rowSums(period1 == period1_agg[j,][ind0,]) == ncol(period1))
rmpgapl <- linrmpgCalc(inco = Y[indj],
ids = rmpgap_id[indj],
wght = weight[indj],
sort = sort[indj],
ind = ind0[indj],
percentag = percentage,
order_quants = order_quant,
quant_val = rown[["quantile"]])
if (!is.null(period)) {
rmpgap_v <- data.table(period_agg[j], rmpgap = rmpgapl$rmpgap)
} else rmpgap_v <- data.table(rmpgap = rmpgapl$rmpgap)
list(rmpgap_v = rmpgap_v, lin = rmpgapl$lin)
})
rmpgap_v <- rbindlist(lapply(rmpgap_l, function(x) x[[1]]))
rmpgap_m <- rbindlist(lapply(rmpgap_l, function(x) x[[2]]))
setnames(rmpgap_m, names(rmpgap_m), c(names(rmpgap_id), var_name))
}
rmpgap_m[is.na(rmpgap_m)] <- 0
setkeyv(rmpgap_m, names(rmpgap_id))
return(list(quantile = quantile, threshold = threshold, value = rmpgap_v, lin = rmpgap_m))
}
## workhorse
linrmpgCalc <- function(inco, ids, wght, sort, ind, percentag, order_quants, quant_val) {
wt <- ind * wght
thres_val <- percentag / 100 * quant_val
N0 <- sum(wght) # Estimated whole population size
N <- sum(wt) # Estimated (sub)population size
poor <- (inco <= thres_val) * ind
inc1 <- inco[poor == 1]
wght1 <- wght[poor == 1]
sort1 <- sort[poor == 1]
rate_val <- sum(wt * poor) / N # Estimated poverty rate
rate_val_pr <- 100 * rate_val # Estimated poverty rate
poor_people_median <- incPercentile(Y = inc1,
weights = wght1,
sort = sort1,
Dom = NULL,
period = NULL,
k = order_quants,
dataset = NULL,
checking = FALSE)
poor_people_median <- poor_people_median[[paste0("x", order_quants)]]
#*************************************************************************************
#** LINEARIZATION OF THE MEDIAN INCOME BELOW THE POVERTY THRESHOLD **
#*************************************************************************************
h <- sqrt((sum(wght * inco * inco) - sum(wght * inco) * sum(wght * inco) / sum(wght)) / sum(wght)) / exp(0.2 * log(sum(wght)))
# h=S/N^(1/5)
#--------------------------------------------------
#----- LINEARIZATION OF THE POVERTY THRESHOLD -----
#--------------------------------------------------
u1 <- (quant_val - inco) / h
vect_f1 <- exp(-(u1^2) / 2)/sqrt(2 * pi)
f_quant1 <- sum(vect_f1 * wght)/(N0 * h)
lin_thres <- - (percentag / 100) * (1 / N0) * ((inco <= quant_val) - order_quants / 100) / f_quant1
# ---------------------------------------------
# ----- LINEARIZATION OF THE POVERTY RATE -----
# ---------------------------------------------
u2 <- (thres_val - inco) / h
vect_f2 <- exp(-(u2^2) / 2) / sqrt(2 * pi)
f_quant2 <- sum(vect_f2 * wt) / (N * h)
lin_rate <- (1 / N) * ind * ((inco <= thres_val) - rate_val) + f_quant2 * lin_thres
# --------------------------------------------------------
# ----- LINEARIZATION OF POOR PEOPLE'S MEDIAN INCOME -----
# --------------------------------------------------------
u3 <- (poor_people_median - inco) / h
vect_f3 <- exp(-(u3^2) / 2) / sqrt(2 * pi)
f_quant3 <- sum(vect_f3 * wt) / (N * h)
lin_median <- (0.5 * lin_rate - (1 / N) * ind * ((inco <= poor_people_median) - 0.5 * rate_val)) / f_quant3
#*****************************************************************************************
# LINEARIZED VARIABLE OF THE RELATIVE MEDIAN GAP (IN %) *
#*****************************************************************************************
lin_gap <- 100 * (poor_people_median * lin_thres / (thres_val * thres_val) - lin_median / thres_val)
rmpgap <- 100 - 100 * poor_people_median / thres_val
lin_id <- data.table(ids, lin_gap)
return(list(rmpgap = rmpgap, lin = lin_id))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/linrmpg.R
|
#' Residual estimation of calibration
#'
#' @description Computes the estimation residuals of calibration.
#'
#' @param Y Matrix of the variable of interest.
#' @param X Matrix of the auxiliary variables for the calibration estimator. This is the matrix of the sample calibration variables.
#' @param weight Weight variable. One dimensional object convertible to one-column \code{data.frame}.
#' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.frame}.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @return A list with objects are returned by the function:
#' \itemize{
#' \item \code{residuals} - a numeric \code{data.table} containing the estimated residuals of calibration.
#' \item \code{betas} - a numeric \code{data.table} containing the estimated coefficients of calibration.
#' }
#'
#' @details
#' The function implements the following estimator:
#' \deqn{e_k=Y_k-X_k^{'}B }
#'where
#'\deqn{\hat{B} = \left(\sum_{s} weight_k q_k X_k X^{'}_{k} \right)^{-1} \left(\sum_{s} weight_k q_k X_k Y_k \right)}.
#'
#' @references
#'Sixten Lundstrom and Carl-Erik Sarndal. Estimation in the presence of Nonresponse and Frame Imperfections. Statistics Sweden, 2001, p. 43-44.
#'
#' @seealso \code{\link{domain}}, \code{\link{lin.ratio}}, \code{\link{linarpr}},
#' \code{\link{linarpt}}, \code{\link{lingini}}, \code{\link{lingini2}},
#' \code{\link{lingpg}}, \code{\link{linpoormed}}, \code{\link{linqsr}},
#' \code{\link{linrmpg}}, \code{\link{vardom}}, \code{\link{vardomh}},
#' \code{\link{varpoord}}, \code{\link{variance_est}}, \code{\link{variance_othstr}}
#'
#' @keywords survey
#'
#'
#' @examples
#' Y <- matrix(rchisq(10, 3), 10, 1)
#' X <- matrix(rchisq(20, 3), 10, 2)
#' w <- rep(2, 10)
#' q <- rep(1, 10)
#' residual_est(Y, X, w, q)
#'
#' ### Test2
#' Y <- matrix(rchisq(10, 3), 10, 1)
#' X <- matrix(c(rchisq(10, 2), rchisq(10, 2) + 10), 10, 2)
#' w <- rep(2, 10)
#' q <- rep(1, 10)
#' residual_est(Y, X, w, q)
#' as.matrix(lm(Y ~ X - 1, weights = w * q)$residuals)
#'
#' @export residual_est
#'
#' @import data.table
#' @import MASS
#' @import stats
#' @import utils
residual_est <- function (Y, X, weight, q, dataset = NULL, checking = TRUE) {
if (checking) {
Y <- check_var(vars = Y, varn = "Y_residual", dataset = dataset,
check.names = TRUE, isnumeric = TRUE)}
Ynrow <- nrow(Y)
Yncol <- ncol(Y)
Y <- as.data.frame.matrix(Y)
if (checking) {
X <- check_var(vars = X, varn = "X", dataset = dataset,
check.names = TRUE, Ynrow = Ynrow,
isnumeric = TRUE, grepls = "__")
weight <- check_var(vars = weight, varn = "weight", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
q <- check_var(vars = q, varn = "q", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
}
X <- as.matrix(X)
ee <- as.data.frame(matrix(NA, Ynrow, Yncol))
ws <- weight * q
B <- t(X * ws)
matr <- ginv(B %*% X) %*% B
B <- ws <- q <- weight <- NULL
betas <- c()
for (i in 1:ncol(Y)) {
beta <- matr %*% Y[, i]
ee[, i] <- Y[, i] - X %*% beta
betas <- rbind(betas, data.frame(Yname = names(Y)[i], t(beta)))
}
names(ee) <- names(Y)
Y <- X <- beta <- NULL
return(list(residuals = data.table(ee),
betas = data.table(betas)))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/residual_est.R
|
#'The estimation of the simple random sampling.
#'
#' @description Computes the estimation of the simple random sampling.
#'
#' @param Y The variables of interest.
#' @param w Weight variable. One dimensional object convertible to one-column \code{data.frame}.
#'
#' @return A list with objects are returned by the function:
#' \itemize{
#' \item \code{S2p} - a \code{data.table} containing the values of the variance estimation of the population.
#' \item \code{varsrs} - a \code{data.table} containing the values of the variance estimation of the simple random sampling.
#' }
#'
#' @references
#'Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en}
#'
#' @seealso \code{\link{vardom}},
#' \code{\link{vardomh}},
#' \code{\link{varpoord}}
#'
#' @keywords variance
#'
#' @examples
#' Ys <- matrix(rchisq(10, 3), 10, 1)
#' ws <- c(rep(2, 5), rep(3, 5))
#' var_srs(Ys, ws)
#'
#' @import data.table
#' @import surveyplanning
#'
#' @export var_srs
var_srs <- function(Y, w = rep(1, length(Y))){
### Checking
# Y
Y <- data.table(Y, check.names = TRUE)
n <- nrow(Y)
if (anyNA(Y)) print("'Y' has missing values")
if (!all(sapply(Y, is.numeric))) stop("'Y' must be numerical")
# w
w <- data.frame(w)
if (anyNA(w)) stop("'w' has missing values")
if (nrow(w) != n) stop("'w' must be equal with 'Y' row count")
if (ncol(w) != 1) stop("'w' must be vector or 1 column data.frame, matrix, data.table")
w <- w[, 1]
if (!is.numeric(w)) stop("'w' must be numeric")
### Calculation
# N
Nn <- sum(w)
konst <- Nn^2 * (1 - n / Nn) / n
s2p <- Y[, lapply(.SD, function(x) s2(x, w))]
varsrs <- konst * s2p
return(list(S2p = s2p, varsrs = varsrs))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/var_srs.R
|
#' Variance estimation for measures of annual net change or annual for single and multistage stage cluster sampling designs
#'
#' @description Computes the variance estimation for measures of annual net change or annual for single and multistage stage cluster sampling designs.
#'
#' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ID_level2}{Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, variables are calculated for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param Z Optional variables of denominator for ratio estimation. If supplied, the ratio estimation is computed. Object convertible to \code{data.table} or variable names as character, column numbers. This variable is \code{NULL} by default.
#' @param gender Numerical variable for gender, where 1 is for males, but 2 is for females. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param country Variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param years Variable for the all survey years. The values for each year are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param subperiods Variable for the all survey sub-periods. The values for each sub-period are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset}{Optional survey data object convertible to \code{data.table}.
#' @param year1 The vector of years from variable \code{years} describes the first year for measures of annual net change.
#' @param year2 The vector of years from variable \code{periods} describes the second year for measures of annual net change.
#' @param X Optional matrix of the auxiliary variables for the calibration estimator. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param countryX Optional variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param yearsX Variable of the all survey years. If supplied, residual estimation of calibration is done independently for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param subperiodsX Variable for the all survey sub-periods. If supplied, residual estimation of calibration is done independently for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param X_ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ind_gr Optional variable by which divided independently X matrix of the auxiliary variables for the calibration. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param g Optional variable of the g weights. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param datasetX Optional survey data object in household level convertible to \code{data.table}.
#' @param frate Positive numeric value. Sampling rate in percentage, by default - 0.
#' @param percentratio Positive numeric value. All linearized variables are multiplied with \code{percentratio} value, by default - 1.
#' @param use.estVar Logical value. If value is \code{TRUE}, then \code{R} function \code{estVar} is used for the estimation of covariance matrix of the residuals. If value is \code{FALSE}, then \code{R} function \code{estVar} is not used for the estimation of covariance matrix of the residuals.
#' @param use.gender Logical value. If value is \code{TRUE}, then \code{subperiods} is defined together with \code{gender}.
#' @param confidence optional; either a positive value for confidence interval. This variable by default is 0.95.
#' @param method character value; value 'cros' is for measures of annual or value 'netchanges' is for measures of annual net change. This variable by default is netchanges.
#'
#' @return A list with objects are returned by the function:
#' \itemize{
#' \item \code{crossectional_results} - a \code{data.table} containing: \cr
#' \code{year} - survey years, \cr
#' \code{subperiods} - survey sub-periods, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{sample_size} - the sample size (in numbers of individuals), \cr
#' \code{pop_size} - the population size (in numbers of individuals), \cr
#' \code{total} - the estimated totals, \cr
#' \code{variance} - the estimated variance of cross-sectional or longitudinal measures, \cr
#' \code{sd_w} - the estimated weighted variance of simple random sample, \cr
#' \code{sd_nw} - the estimated variance estimation of simple random sample, \cr
#' \code{pop} - the population size (in numbers of households), \cr
#' \code{sampl_siz} - the sample size (in numbers of households), \cr
#' \code{stderr_w} - the estimated weighted standard error of simple random sample, \cr
#' \code{stderr_nw} - the estimated standard error of simple random sample, \cr
#' \code{se} - the estimated standard error of cross-sectional or longitudinal, \cr
#' \code{rse} - the estimated relative standard error (coefficient of variation), \cr
#' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr
#' \code{absolute_margin_of_error} - the estimated absolute margin of error, \cr
#' \code{relative_margin_of_error} - the estimated relative margin of error, \cr
#' \code{CI_lower} - the estimated confidence interval lower bound, \cr
#' \code{CI_upper} - the estimated confidence interval upper bound, \cr
#' \code{confidence_level} - the positive value for confidence interval.
#' \item \code{crossectional_var_grad} - a \code{data.table} containing: \cr
#' \code{year} - survey years, \cr
#' \code{subperiods} - survey sub-periods, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{grad} - the estimated gradient, \cr
#' \code{var} - the estimated a design-based variance.
#' \item \code{vardchanges_grad_var} - a \code{data.table} containing: \cr
#' \code{year_1} - survey years of \code{years1}, \cr
#' \code{subperiods_1} - survey sub-periods of \code{years1}, \cr
#' \code{year_2} - survey years of \code{years2}, \cr
#' \code{subperiods_2} - survey sub-periods of \code{years2}, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{nams} - gradient names, numerator (num) and denominator (den), for each year, \cr
#' \code{grad} - the estimated gradient, \cr
#' \code{cros_var} - the estimated a design-based variance.
#' \item \code{vardchanges_rho} - a \code{data.table} containing: \cr
#' \code{year} - survey years of \code{years} for cross-sectional estimates, \cr
#' \code{subperiods} - survey sub-periods of \code{years} for cross-sectional estimates, \cr
#' \code{year_1} - survey years of \code{years1}, \cr
#' \code{subperiods_1} - survey sub-periods of \code{years1}, \cr
#' \code{year_2} - survey years of \code{years2}, \cr
#' \code{subperiods_2} - survey sub-periods of \code{years2}, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{nams} - gradient names, numerator (num) and denominator (den), for each year, \cr
#' \code{rho} - the estimated correlation matrix.
#' \item \code{vardchanges_var_tau} - a \code{data.table} containing: \cr
#' \code{year_1} - survey years of \code{years1}, \cr
#' \code{subperiods_1} - survey sub-periods of \code{years1}, \cr
#' \code{year_2} - survey years of \code{years2}, \cr
#' \code{subperiods_2} - survey sub-periods of \code{years2}, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{nams} - gradient names, numerator (num) and denominator (den), for each year, \cr
#' \code{var_tau} - the estimated covariance matrix.
#' \item \code{vardchanges_results} - a \code{data.table} containing: \cr
#' \code{year} - survey years of \code{years} for measures of annual, \cr
#' \code{subperiods} - survey sub-periods of \code{years} for measures of annual, \cr
#' \code{year_1} - survey years of \code{years1} for measures of annual net change, \cr
#' \code{subperiods_1} - survey sub-periods of \code{years1} for measures of annual net change, \cr
#' \code{year_2} - survey years of \code{years2} for measures of annual net change, \cr
#' \code{subperiods_2} - survey sub-periods of \code{years2} for measures of annual net change, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{estim_1} - the estimated value for period1, \cr
#' \code{estim_2} - the estimated value for period2, \cr
#' \code{estim} - the estimated value, \cr
#' \code{var} - the estimated variance, \cr
#' \code{se} - the estimated standard error, \cr
#' \code{CI_lower} - the estimated confidence interval lower bound, \cr
#' \code{CI_upper} - the estimated confidence interval upper bound, \cr
#' \code{confidence_level} - the positive value for confidence interval, \cr
#' \code{significant} - is the the difference significant
#' \item \code{X_annual} - a \code{data.table} containing: \cr
#' \code{year} - survey years of \code{years} for measures of annual, \cr
#' \code{year_1} - survey years of \code{years1} for measures of annual net change, \cr
#' \code{year_2} - survey years of \code{years2} for measures of annual net change, \cr
#' \code{period} - period1 and period2 together, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{cros_se} - the estimated cross-sectional standard error.
#' \item \code{A_matrix} - a \code{data.table} containing: \cr
#' \code{year} - survey years of \code{years1} for measures of annual, \cr
#' \code{year_1} - survey years of \code{years1} for measures of annual net change, \cr
#' \code{year_2} - survey years of \code{years2} for measures of annual net change, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{cols} - the estimated matrix_A columns, \cr
#' \code{matrix_A} - the estimated matrix A.
#' \item \code{annual_sum} - a \code{data.table} containing: \cr
#' \code{year} - survey years, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{totalY} - the estimated value of variables of interest for period1, \cr
#' \code{totalZ} - optional the estimated value of denominator for period2, \cr
#' \code{estim} - the estimated value for year.
#' \item \code{annual_results} - a \code{data.table} containing: \cr
#' \code{year} - survey years of \code{years} for measures of annual, \cr
#' \code{year_1} - survey years of \code{years1} for measures of annual net change, \cr
#' \code{year_2} - survey years of \code{years2} for measures of annual net change, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{estim_1} - the estimated value for period1 for measures of annual net change, \cr
#' \code{estim_2} - the estimated value for period2 for measures of annual net change, \cr
#' \code{estim} - the estimated value, \cr
#' \code{var} - the estimated variance, \cr
#' \code{se} - the estimated standard error, \cr
#' \code{rse} - the estimated relative standard error (coefficient of variation), \cr
#' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr
#' \code{absolute_margin_of_error} - the estimated absolute margin of error for period1 for measures of annual, \cr
#' \code{relative_margin_of_error} - the estimated relative margin of error in percentage for measures of annual, \cr
#' \code{CI_lower} - the estimated confidence interval lower bound, \cr
#' \code{CI_upper} - the estimated confidence interval upper bound, \cr
#' \code{confidence_level} - the positive value for confidence interval, \cr
#' \code{significant} - is the the difference significant
#' }
#'
#'
#' @references
#' Guillaume Osier, Virginie Raymond, (2015), Development of methodology for the estimate of variance of annual net changes for LFS-based indicators. Deliverable 1 - Short document with derivation of the methodology. \cr
#' Guillaume Osier, Yves Berger, Tim Goedeme, (2013), Standard error estimation for the EU-SILC indicators of poverty and social exclusion, Eurostat Methodologies and Working papers, URL \url{http://ec.europa.eu/eurostat/documents/3888793/5855973/KS-RA-13-024-EN.PDF}. \cr
#' Eurostat Methodologies and Working papers, Handbook on precision requirements and variance estimation for ESS household surveys, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr
#' Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en} \cr
#'
#' @seealso \code{\link{domain}},
#' \code{\link{vardcros}},
#' \code{\link{vardchanges}}
#'
#' @keywords vardannual
#'
#' @examples
#'
#' ### Example
#' library("data.table")
#'
#' set.seed(1)
#'
#' data("eusilc", package = "laeken")
#' eusilc1 <- eusilc[1:20, ]
#' rm(eusilc)
#'
#' dataset1 <- data.table(rbind(eusilc1, eusilc1),
#' year = c(rep(2010, nrow(eusilc1)),
#' rep(2011, nrow(eusilc1))))
#' rm(eusilc1)
#'
#' dataset1[, country := "AT"]
#' dataset1[, half := .I - 2 * trunc((.I - 1) / 2)]
#' dataset1[, quarter := .I - 4 * trunc((.I - 1) / 4)]
#' dataset1[age < 0, age := 0]
#'
#' PSU <- dataset1[, .N, keyby = "db030"][, N := NULL][]
#' PSU[, PSU := trunc(runif(.N, 0, 5))]
#'
#' dataset1 <- merge(dataset1, PSU, all = TRUE, by = "db030")
#' rm(PSU)
#'
#' dataset1[, strata := "XXXX"]
#' dataset1[, employed := trunc(runif(.N, 0, 2))]
#' dataset1[, unemployed := trunc(runif(.N, 0, 2))]
#' dataset1[, labour_force := employed + unemployed]
#' dataset1[, id_lv2 := paste0("V", .I)]
#'
#' vardannual(Y = "employed", H = "strata",
#' PSU = "PSU", w_final = "rb050",
#' ID_level1 = "db030", ID_level2 = "id_lv2",
#' Dom = NULL, Z = NULL, years = "year",
#' subperiods = "half", dataset = dataset1,
#' percentratio = 100, confidence = 0.95,
#' method = "cros")
#'
#' \dontrun{
#' vardannual(Y = "employed", H = "strata",
#' PSU = "PSU", w_final = "rb050",
#' ID_level1 = "db030", ID_level2 = "id_lv2",
#' Dom = NULL, Z = NULL, country = "country",
#' years = "year", subperiods = "quarter",
#' dataset = dataset1, year1 = 2010, year2 = 2011,
#' percentratio = 100, confidence = 0.95,
#' method = "netchanges")
#'
#' vardannual(Y = "unemployed", H = "strata",
#' PSU = "PSU", w_final = "rb050",
#' ID_level1 = "db030", ID_level2 = "id_lv2",
#' Dom = NULL, Z = "labour_force",
#' country = "country", years = "year",
#' subperiods = "quarter", dataset = dataset1,
#' year1 = 2010, year2 = 2011,
#' percentratio = 100, confidence = 0.95,
#' method = "netchanges")
#' }
#'
#' @import data.table
#' @import MASS
#' @import laeken
#'
#' @export vardannual
# # Development ####
# # For production this section should be disabled
#
# # Load helper functions
# source(file = "vardpoor/R/domain.R", echo = FALSE)
# source(file = "vardpoor/R/vardcros.R", echo = FALSE)
# source(file = "vardpoor/R/vardchanges.R", echo = FALSE)
#
# # Generate example data
# library("data.table")
#
# set.seed(1)
#
# data("eusilc", package = "laeken")
# eusilc1 <- eusilc[1:20, ]
# rm(eusilc)
#
# dataset1 <- data.table(rbind(eusilc1, eusilc1),
# year = c(rep(2010, nrow(eusilc1)),
# rep(2011, nrow(eusilc1))))
# rm(eusilc1)
#
# dataset1[, country := "AT"]
# dataset1[, half := .I - 2 * trunc((.I - 1) / 2)]
# dataset1[, quarter := .I - 4 * trunc((.I - 1) / 4)]
# dataset1[age < 0, age := 0]
#
# PSU <- dataset1[, .N, keyby = "db030"][, N := NULL][]
# PSU[, PSU := trunc(runif(.N, 0, 5))]
#
# dataset1 <- merge(dataset1, PSU, all = TRUE, by = "db030")
# rm(PSU)
#
# dataset1[, strata := "XXXX"]
# dataset1[, employed := trunc(runif(.N, 0, 2))]
# dataset1[, unemployed := trunc(runif(.N, 0, 2))]
# dataset1[, labour_force := employed + unemployed]
# dataset1[, id_lv2 := paste0("V", .I)]
#
#
# # Define function arguments for testing
# Y <- "employed"
# H <- "strata"
# PSU <- "PSU"
# w_final <- "rb050"
# ID_level1 <- "db030"
# ID_level2 <- "id_lv2"
# Dom <- NULL
# Z <- NULL
# gender <- NULL
# country <- NULL
# years <- "year"
# subperiods <- "half"
# dataset <- copy(dataset1)
# year1 <- NULL
# year2 <- NULL
# X <- NULL
# countryX <- NULL
# yearsX <- NULL
# subperiodsX <- NULL
# X_ID_level1 <- NULL
# ind_gr <- NULL
# g <- NULL
# q <- NULL
# datasetX <- NULL
# frate <- 0
# percentratio <- 100
# use.estVar <- FALSE
# use.gender <- FALSE
# confidence <- 0.95
# method <- "cros"
# Function definition ####
vardannual <- function(Y, H, PSU, w_final, ID_level1,
ID_level2, Dom = NULL, Z = NULL,
gender = NULL, country = NULL,
years, subperiods, dataset = NULL,
year1 = NULL, year2 = NULL, X = NULL,
countryX = NULL, yearsX = NULL,
subperiodsX = NULL, X_ID_level1 = NULL,
ind_gr = NULL, g = NULL, q = NULL,
datasetX = NULL, frate = 0, percentratio = 1,
use.estVar = FALSE, use.gender = FALSE,
confidence = 0.95, method = "cros") {
### Checking
. <- NULL
outp_res <- FALSE
method <- check_var(vars = method, varn = "method", varntype = "method")
percentratio <- check_var(vars = percentratio, varn = "percentratio",
varntype = "pinteger")
use.estVar <- check_var(vars = use.estVar, varn = "use.estVar",
varntype = "logical")
use.gender <- check_var(vars = use.gender, varn = "use.gender",
varntype = "logical")
confidence <- check_var(vars = confidence, varn = "confidence",
varntype = "numeric01")
frate <- check_var(vars = frate, varn = "frate", varntype = "numeric0100")
if(!is.null(X)) {
if (is.null(datasetX)) datasetX <- copy(dataset)
equal_dataset <- identical(dataset, datasetX) & !is.null(dataset)
if (equal_dataset) {
X_ID_level1 <- ID_level1
countryX <- country
}
}
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
check.names = TRUE, isnumeric = TRUE, grepls = "__")
Ynrow <- nrow(Y)
Yncol <- ncol(Y)
H <- check_var(vars = H, varn = "H", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
dif_name = "dataH_stratas")
w_final <- check_var(vars = w_final, varn = "w_final",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
isnumeric = TRUE, isvector = TRUE)
gender <- check_var(vars = gender, varn = "gender",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
isnumeric = TRUE, isvector = TRUE,
mustbedefined = FALSE)
Z <- check_var(vars = Z, varn = "Z", dataset = dataset,
check.names = TRUE, Yncol = Yncol, Ynrow = Ynrow,
isnumeric = TRUE, mustbedefined = FALSE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
ncols = 0, Yncol = 0, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE, grepls = "__")
country <- check_var(vars = country, varn = "country",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
dif_name = c("percoun", "period_country", "Nrs"))
years <- check_var(vars = years, varn = "years", dataset = dataset,
ncols = 1, Yncol = 0, Ynrow = Ynrow, ischaracter = TRUE,
dif_name = c("percoun", "period_country", names(country),
"yearg", "Nrs"),
use.gender = use.gender)
yearg <- NULL
years[, yearg := substr(get(names(years)), 1,
nchar(get(names(years))) - ifelse(use.gender, 2, 0))]
yearm <- names(years)[1 + use.gender]
if (method != "cros") {
year1 <- check_var(vars = year1, varn = "year1", dataset = NULL, ncols = 1,
ischaracter = TRUE,
years = years[, 1 + use.gender, with = FALSE])
year2 <- check_var(vars = year2, varn = "year2", dataset = NULL, ncols = 1,
ischaracter = TRUE,
years = years[, 1 + use.gender, with = FALSE])
} else {
if (!missing(year1)) if (!is.null(year1)) stop("'year1' must be NULL")
if (!missing(year2)) if (!is.null(year2)) stop("'year2' must be NULL")
year1 <- years[, .N, by = yearm][, N := NULL]
year2 <- years[, .N, by = yearm][, N := NULL]
}
subperiods <- check_var(vars = subperiods, varn = "subperiods",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, years = years, Domen = Dom,
dif_name = c("percoun", "period_country",
names(country), "yearg", "Nrs"))
subpm <- names(subperiods)
subn <- data.table(years, subperiods, Dom)
subn <- subn[, .N, by = c(names(subn))]
subn <- max(subn[, .N, by = names(years)][["N"]])
ID_level1 <- check_var(vars = ID_level1, varn = "ID_level1",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE)
ID_level2 <- check_var(vars = ID_level2, varn = "ID_level2",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, namesID1 = names(ID_level1),
country = country, years = years, periods = subperiods)
PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
namesID1 = names(ID_level1))
if (!is.null(X) | !is.null(ind_gr) | !is.null(g) | !is.null(q) |
!is.null(countryX) | !is.null(yearsX) | !is.null(subperiodsX) |
!is.null(X_ID_level1) | !is.null(datasetX)) {
X <- check_var(vars = X, varn = "X", dataset = datasetX,
isnumeric = TRUE,
dif_name = c(names(years), names(subperiods),
names(country), names(H), names(PSU),
names(ID_level1), "w_final", names(Y),
"w_design", "g", "q"), dX = "X")
Xnrow <- nrow(X)
ind_gr <- check_var(vars = ind_gr, varn = "ind_gr",
dataset = datasetX, ncols = 1,
Xnrow = Xnrow, ischaracter = TRUE,
dif_name = c(names(years), names(subperiods),
names(country), names(H), names(PSU),
names(ID_level1), "w_final", names(Y),
names(X), "w_design", "g", "q"), dX = "X")
g <- check_var(vars = g, varn = "g", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE, dX = "X")
q <- check_var(vars = q, varn = "q", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE, dX = "X")
countryX <- check_var(vars = countryX, varn = "countryX",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, mustbedefined = !is.null(country),
varnout = "country", varname = names(country),
country = country, dX = "X")
yearsX <- check_var(vars = yearsX, varn = "yearsX", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, ischaracter = TRUE,
mustbedefined = !is.null(years), varnout = "years",
varname = names(years)[1], country = country,
countryX = countryX, years = years[, 1, with = FALSE],
use.gender = use.gender, dX = "X")
subperiodsX <- check_var(vars = subperiodsX, varn = "subperiodsX",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE,
mustbedefined = !is.null(subperiods),
varnout = "subperiods",
varname = names(subperiods),
country = country, countryX = countryX,
years = years[, 1, with = FALSE], dX = "X",
yearsX = yearsX, periods = subperiods)
X_ID_level1 <- check_var(vars = X_ID_level1, varn = "X_ID_level1",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, varnout = "ID_level1",
varname = names(ID_level1), country = country,
countryX = countryX,
years = years[, 1, with = FALSE],
yearsX = yearsX, periods = subperiods, dX = "X",
periodsX = subperiodsX, ID_level1 = ID_level1)
}
dataset <- datasetX <- NULL
ids <- nams <- cros_se <- num1 <- totalY <- totalZ <- NULL
estim_1 <- estim_2 <- avar <- N <- estim <- NULL
var_est2 <- se <- rse <- cv <- CI_lower <- CI_upper <- NULL
Nr_sar <- cols <- Nrs <- percoun <- totalY_male <- NULL
totalZ_male <- totalY_female <- totalZ_female <- confidence_level <- NULL
pers <- data.table(years, subperiods,
pers = paste0(years[[1]], "__", subperiods[[1]]))
# Not necessary as `yearg` is allready available
# pers[, yearg := substr(get(names(years)[1]), 1,
# nchar(get(names(years)[1])) -
# ifelse(use.gender, 2, 0))]
if (!is.null(X)) {
persX <- data.table(yearsX, subperiodsX,
pers = paste0(yearsX[[names(yearsX)]], "__",
subperiodsX[[names(subperiodsX)]]))
}
sarak <- pers[, .N, keyby = names(pers)][, N := NULL][]
cros_calc <- vardcros(Y = Y, H = H, PSU = PSU, w_final = w_final,
ID_level1 = ID_level1, ID_level2 = ID_level2,
Dom = Dom, Z = Z, gender = gender,
country = country, period = pers[, "pers"],
dataset = NULL, X = X, countryX = countryX,
periodX = persX[, "pers"], X_ID_level1 = X_ID_level1,
ind_gr = ind_gr, g = g, q = q, datasetX = NULL,
linratio = !is.null(Z),
percentratio = percentratio,
use.estVar = use.estVar,
ID_level1_max = is.null(X),
outp_res = outp_res,
withperiod = TRUE,
netchanges = TRUE,
confidence = confidence,
checking = FALSE)
countryX <- periodX <- yearX <- NULL
X_ID_level1 <- ind_gr <- g <- q <- ID_level2 <- NULL
subperiods <- w_final <- NULL
years <- years[, .N, by = c(names(years)[1])][, N := NULL][]
pers12 <- paste("pers", 1:2, sep = "_")
if (method == "cros") {
period12 <- c(yearm, paste(subpm, 1:2, sep = "_"))
years12 <- yearm
} else {
period12 <- paste(rep(c(yearm, subpm), 2), c(1, 1, 2, 2), sep = "_")
years12 <- paste(yearm, 1:2, sep = "_")
}
atsyear <- rbindlist(list(data.table(Nrs = 1:nrow(year1), yrs = 1, year1),
data.table(Nrs = 1:nrow(year2), yrs = 2, year2)))
if (method == "cros") atsyear <- data.table(Nrs = 1:nrow(year1),
yrs = 1, year1)
atsyear <- merge(atsyear, sarak, all.x = TRUE, by = yearm,
sort = FALSE, allow.cartesian = TRUE)
atsyear[, ids := 1:.N, by = "Nrs"]
nr1 <- max(atsyear[["ids"]])
yrs <- rbindlist(lapply(1:(nr1 - 1), function(j) {
atsy1 <- atsyear[ids == j]
atsy2 <- atsyear[ids %in% c((j + 1):nr1)]
if (method == "cros") {
atsy2[, (yearm) := NULL]
setnames(atsy1, names(atsy1)[-c(1:2)],
paste0(names(atsy1)[-c(1:2)], "_1"))
setnames(atsy2, names(atsy2)[-1],
paste0(names(atsy2)[-1], "_2"))
} else {
setnames(atsy1, names(atsy1)[-2], paste0(names(atsy1)[-2], "_1"))
setnames(atsy2, names(atsy2)[-2], paste0(names(atsy2)[-2], "_2"))
}
merge(atsy1, atsy2, all = TRUE, by = "Nrs")
}))
if (method != "cros") {
yr12 <- rbind(data.table(Nrs = 1:nrow(year1), yearg = year1[[1]]),
data.table(Nrs = 1:nrow(year1), yearg = year2[[1]]))
} else yr12 <- data.table(Nrs = 1:nrow(year1), yearg = year1[[1]])
if (!is.null(Dom)) {
Y1 <- namesD(Y, Dom, uniqueD = TRUE)
Z1 <- NULL
if (!is.null(Z)) Z1 <- namesD(Z, Dom, uniqueD = TRUE)
} else {
Y1 <- names(Y)
Z1 <- names(Z)
}
Y <- names(Y)
Z <- names(Z)
names_country <- names(country)
PSU <- names(PSU)
H <- names(H)
Dom <- names(Dom)
yrs_without <- yrs[, .N, by = c("pers_1", "pers_2")]
data <- cros_calc$data_net_changes
changes_calc <- vardchanges_calculation(Y1 = Y1, Z1 = Z1, Dom = Dom,
names_country = names_country,
per = "pers", PSU = PSU, H = H,
period1 = yrs_without[, .(pers = get("pers_1"))],
period2 = yrs_without[, .(pers = get("pers_2"))],
cros_var_grad = cros_calc$var_grad,
change_type = "absolute",
data = data, linratio = !is.null(Z),
annual = TRUE,
percentratio = percentratio,
use.estVar = use.estVar,
confidence = confidence, poor = FALSE)
pers <- pers[, .N, keyby = names(pers)][, N := NULL][]
crossectional_results <- merge(pers, cros_calc$results,
all = TRUE, by = "pers")
crossectional_results[, (c("pers", "yearg")) := NULL]
if (is.null(names(country))) crossectional_results[, percoun := NULL]
gender <- data <- yrs_without <- cros_calc <- NULL
cros_var_grad <- merge(sarak, changes_calc$cros_var_grad,
all.y = TRUE, by = c("pers"))
rho <- merge(yrs, changes_calc$rho_matrix,
all.y = TRUE,
by = c("pers_1", "pers_2"),
allow.cartesian = TRUE)
sar <- c("Nrs", names_country, "namesY", "namesZ", Dom)
sar <- sar[sar %in% names(rho)]
rho[, Nr_sar := .GRP, by = sar]
rho1 <- rho[nams == "num2"]
rho1[, ids := 1:.N, by = sar]
rhoj <- rho[,.N, keyby = sar][, N := NULL][]
max_ids <- max(atsyear[["ids"]])
yr12cros <- merge(yr12, cros_var_grad, by = "yearg",
allow.cartesian = TRUE, sort = FALSE)
apstr <- lapply(1:max(rho[["Nr_sar"]]), function(j) {
rho2 <- rho1[Nr_sar == j]
A_matrix <- diag(1, max_ids, max_ids)
for (k in 1:max(rho2[["ids"]])) {
at <- rho2[k == ids]
A_matrix[at[["ids_1"]], at[["ids_2"]]] <- at[["rho_num1"]]
A_matrix[at[["ids_2"]], at[["ids_1"]]] <- at[["rho_num1"]]
if (method != "cros") {
if (at[["ids_2"]] > subn & at[["ids_1"]] < subn + 1) {
A_matrix[at[["ids_1"]], at[["ids_2"]]] <- -at[["rho_num1"]]
A_matrix[at[["ids_2"]], at[["ids_1"]]] <- -at[["rho_num1"]]
}
}
}
cros_rho <- merge(yr12cros, rho2[1, sar, with = FALSE], by = sar, sort = FALSE)
cros_rho[, cols := paste0("V", 1:.N)]
cros_rho[, cros_se := sqrt(num1)]
X <- cros_rho[["cros_se"]]
annual_var <- data.table(rho2[1, sar, with = FALSE],
(1 - frate / 100) / (subn) ^ 2 *
(t(X) %*% A_matrix) %*% X)
setnames(annual_var, c("V1"), c("var"))
A_matrix <- data.table(rho2[1, sar, with = FALSE],
cols = paste0("V", 1:nrow(A_matrix)), A_matrix)
list(cros_rho, A_matrix, annual_var)
})
cros_rho <- rbindlist(lapply(apstr, function(x) x[[1]]))
A_matrix <- rbindlist(lapply(apstr, function(x) x[[2]]))
annual_var <- rbindlist(lapply(apstr, function(x) x[[3]]))
sars <- c(names(country), yearm, Dom, "namesY", "namesZ")
sars <- sars[sars %in% names(cros_var_grad)]
sarsb <- sars[!(sars %in% yearm)]
sarc <- c("totalY", "totalZ", "totalY_male",
"totalY_female", "totalZ_male", "totalZ_female")
sarc <- sarc[sarc %in% names(cros_var_grad)]
ysum <- cros_var_grad[, lapply(.SD, mean), by = sars, .SDcols = sarc]
if (!is.null(ysum$totalZ_male)) {
ysum[, estim := (totalY_male / totalZ_male -
totalY_female / totalZ_female) * percentratio]
} else if (!is.null(ysum$totalY_male)) {
ysum[, estim := (totalY_male - totalY_female) * percentratio]
} else if (!is.null(ysum$totalZ)) {
ysum[, estim := totalY / totalZ * percentratio]
} else ysum[, estim := totalY]
year1m <- year1[[yearm]]
ysum1 <- ysum[get(yearm) %in% year1m, c(sars, "estim"), with = FALSE]
years1 <- copy(year1)[, Nrs := 1:.N]
ysum1 <- merge(years1, ysum1, by = yearm, sort = FALSE,
allow.cartesian = TRUE)
if (method != "cros") {
years2 <- copy(year2)[, Nrs := 1:.N]
year2m <- year2[[yearm]]
ysum2 <- ysum[get(yearm) %in% year2m, c(sars, "estim"), with = FALSE]
ysum2 <- merge(years2, ysum2, by = yearm, sort = FALSE,
allow.cartesian = TRUE)
setnames(ysum1, c("estim", yearm), c("estim_1", paste0(yearm, "_1")))
setnames(ysum2, c("estim", yearm), c("estim_2", paste0(yearm, "_2")))
ysum <- merge(ysum1, ysum2, all.x = TRUE, by = c("Nrs", sarsb))
ysum[, estim := estim_2 - estim_1]
} else ysum <- ysum1
ysum1 <- ysum2 <- NULL
annual_results <- merge(ysum, annual_var, by = c("Nrs", sarsb), sort = FALSE)
estim <- "estim"
if (method != "cros") estim <- c("estim_1", "estim_2", "estim")
annual_results <- annual_results[, c(years12, sarsb, estim, "var"),
with = FALSE]
ysum <- ysum[, c(years12, sarsb, estim), with = FALSE]
grad_var <- merge(yrs[, c(pers12, period12), with = FALSE],
changes_calc$grad_var, all.y = TRUE,
by = pers12, allow.cartesian = TRUE)
grad_var[, (pers12) := NULL]
var_tau <- merge(yrs[, c(pers12, period12), with = FALSE],
changes_calc$var_tau, all.y = TRUE,
by = pers12, allow.cartesian = TRUE)
var_tau[, (pers12) := NULL]
vardchanges_results <- merge(yrs[, c(pers12, period12), with = FALSE],
changes_calc$changes_results, all.y = TRUE,
by = pers12, allow.cartesian = TRUE)
vardchanges_results[, (pers12) := NULL]
X_annual <- cros_rho
if(method != "cros") {
atsyear <- data.table(Nrs = 1:nrow(year1), year1, year2, check.names = TRUE)
setnames(atsyear, names(atsyear)[2:3], years12)
X_annual <- merge(atsyear, cros_rho, all.y = TRUE, by = "Nrs", sort = FALSE)
} else atsyear <- data.table(Nrs = 1:nrow(years), years, check.names = TRUE)
vars <- c(years12, subpm, sarsb, "cols", "cros_se")
X_annual <- X_annual[, vars[vars %in% names(X_annual)], with = FALSE]
A_matrix <- merge(atsyear, A_matrix, all.y = TRUE, by = "Nrs", sort = FALSE)
A_matrix[, Nrs := NULL]
annual_results[, var_est2 := var]
annual_results[xor(is.na(var_est2), var_est2 < 0), var_est2 := NA]
annual_results[, se := sqrt(var_est2)]
annual_results[, var_est2 := NULL]
annual_results[, rse := se / estim]
annual_results[, cv := rse * 100]
tsad <- qnorm(0.5 * (1 + confidence))
absolute_margin_of_error <- relative_margin_of_error <- NULL
annual_results[, absolute_margin_of_error := tsad * se]
annual_results[, relative_margin_of_error := tsad * cv]
annual_results[, CI_lower := estim - tsad * se]
annual_results[, CI_upper := estim + tsad * se]
annual_results[, confidence_level := confidence]
if (method != "cros") {
significant <- NULL
annual_results[, significant := "YES"]
annual_results[CI_lower <= 0 & CI_upper >= 0, significant := "NO"]
}
list(crossectional_results = crossectional_results,
crossectional_var_grad = cros_var_grad,
vardchanges_grad_var = grad_var,
vardchanges_rho = rho,
vardchanges_var_tau = var_tau,
vardchanges_results = vardchanges_results,
X_annual = X_annual, A_matrix = A_matrix,
annual_sum = ysum,
annual_results = annual_results)
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/vardannual.R
|
#' Variance estimation for measures of change for single and multistage stage cluster sampling designs
#'
#' @description Computes the variance estimation for measures of change for single and multistage stage cluster sampling designs.
#'
#' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ID_level2 Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, variables are calculated for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param Z Optional variables of denominator for ratio estimation. If supplied, the ratio estimation is computed. Object convertible to \code{data.table} or variable names as character, column numbers. This variable is \code{NULL} by default.
#' @param gender Numerical variable for gender, where 1 is for males, but 2 is for females. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param country Variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param period Variable for the all survey periods. The values for each period are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param period1 The vector of periods from variable \code{periods} describes the first period.
#' @param period2 The vector of periods from variable \code{periods} describes the second period.
#' @param X Optional matrix of the auxiliary variables for the calibration estimator. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param countryX Optional variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param periodX Optional variable of the all survey periods. If supplied, residual estimation of calibration is done independently for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param X_ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ind_gr Optional variable by which divided independently X matrix of the auxiliary variables for the calibration. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param g Optional variable of the g weights. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param datasetX Optional survey data object in household level convertible to \code{data.table}.
#' @param linratio Logical value. If value is \code{TRUE}, then the linearized variables for the ratio estimator is used for variance estimation. If value is \code{FALSE}, then the gradients is used for variance estimation.
#' @param percentratio Positive numeric value. All linearized variables are multiplied with \code{percentratio} value, by default - 1.
#' @param use.estVar Logical value. If value is \code{TRUE}, then \code{R} function \code{estVar} is used for the estimation of covariance matrix of the residuals. If value is \code{FALSE}, then \code{R} function \code{estVar} is not used for the estimation of covariance matrix of the residuals.
#' @param outp_res Logical value. If \code{TRUE} estimated residuals of calibration will be printed out.
#' @param confidence optional; either a positive value for confidence interval. This variable by default is 0.95 .
#' @param change_type character value net changes type - absolute or relative.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#' @return A list with objects are returned by the function:
#' \itemize{
#' \item \code{res_out} - a \code{data.table} containing the estimated residuals of calibration with ID_level1 and PSU by periods and countries (if available).
#' #' \item \code{crossectional_results} - a \code{data.table} containing: \cr
#' \code{period} - survey periods, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{sample_size} - the sample size (in numbers of individuals), \cr
#' \code{pop_size} - the population size (in numbers of individuals), \cr
#' \code{total} - the estimated totals, \cr
#' \code{variance} - the estimated variance of cross-sectional or longitudinal measures, \cr
#' \code{sd_w} - the estimated weighted variance of simple random sample, \cr
#' \code{sd_nw} - the estimated variance estimation of simple random sample, \cr
#' \code{pop} - the population size (in numbers of households), \cr
#' \code{sampl_siz} - the sample size (in numbers of households), \cr
#' \code{stderr_w} - the estimated weighted standard error of simple random sample, \cr
#' \code{stderr_nw} - the estimated standard error of simple random sample, \cr
#' \code{se} - the estimated standard error of cross-sectional or longitudinal, \cr
#' \code{rse} - the estimated relative standard error (coefficient of variation), \cr
#' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr
#' \code{absolute_margin_of_error} - the estimated absolute margin of error, \cr
#' \code{relative_margin_of_error} - the estimated relative margin of error, \cr
#' \code{CI_lower} - the estimated confidence interval lower bound, \cr
#' \code{CI_upper} - the estimated confidence interval upper bound.
#' #' \item \code{crossectional_var_grad} - a \code{data.table} containing: \cr
#' \code{periods} - survey periods, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{grad} - the estimated gradient, \cr
#' \code{var} - the estimated a design-based variance.
#' \item \code{rho} - a \code{data.table} containing: \cr
#' \code{periods_1} - survey periods of \code{periods1}, \cr
#' \code{periods_2} - survey periods of \code{periods2}, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{nams} - the variable names in correlation matrix, \cr
#' \code{rho} - the estimated correlation matrix.
#' \item \code{var_tau} - a \code{data.table} containing: \cr
#' \code{periods_1} - survey periods of \code{periods1}, \cr
#' \code{periods_2} - survey periods of \code{periods2}, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{nams} - the variable names in correlation matrix, \cr
#' \code{var_tau} - the estimated covariance matrix.
#' \item \code{changes_results} - a \code{data.table} containing: \cr
#' \code{periods_1} - survey periods of \code{periods1}, \cr
#' \code{periods_2} - survey periods of \code{periods2}, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - variable with names of variables of interest, \cr
#' \code{namesZ} - optional variable with names of denominator for ratio estimation, \cr
#' \code{estim_1} - the estimated value for period1, \cr
#' \code{estim_2} - the estimated value for period2, \cr
#' \code{estim} - the estimated value, \cr
#' \code{var} - the estimated variance, \cr
#' \code{se} - the estimated standard error, \cr
#' \code{CI_lower} - the estimated confidence interval lower bound, \cr
#' \code{CI_upper} - the estimated confidence interval upper bound. \cr
#' \code{significant} - is the the difference significant.
#' }
#'
#' @references
#'Guillaume Osier, Yves Berger, Tim Goedeme, (2013), Standard error estimation for the EU-SILC indicators of poverty and social exclusion, Eurostat Methodologies and Working papers, URL \url{http://ec.europa.eu/eurostat/documents/3888793/5855973/KS-RA-13-024-EN.PDF}. \cr
#'Eurostat Methodologies and Working papers, Handbook on precision requirements and variance estimation for ESS household surveys, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr
#'Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en} \cr
#'
#' @seealso \code{\link{domain}},
#' \code{\link{vardcros}},
#' \code{\link{vardchangespoor}}
#'
#' @keywords vardchanges
#'
#' @examples
#'
#' ### Example
#' library("data.table")
#' library("laeken")
#' data("eusilc")
#' set.seed(1)
#' eusilc1 <- eusilc[1:40,]
#' set.seed(1)
#' dataset1 <- data.table(rbind(eusilc1, eusilc1),
#' year = c(rep(2010, nrow(eusilc1)),
#' rep(2011, nrow(eusilc1))))
#' dataset1[age < 0, age := 0]
#' PSU <- dataset1[, .N, keyby = "db030"][, N := NULL]
#' PSU[, PSU := trunc(runif(nrow(PSU), 0, 5))]
#' dataset1 <- merge(dataset1, PSU, all = TRUE, by = "db030")
#' PSU <- eusilc <- NULL
#' dataset1[, strata := c("XXXX")]
#'
#' dataset1[, t_pov := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, exp := 1]
#'
#' # At-risk-of-poverty (AROP)
#' dataset1[, pov := ifelse (t_pov == 1, 1, 0)]
#' dataset1[, id_lev2 := paste0("V", .I)]
#'
#'
#' result <- vardchanges(Y = "pov", H = "strata",
#' PSU = "PSU", w_final = "rb050",
#' ID_level1 = "db030", ID_level2 = "id_lev2",
#' Dom = NULL, Z = NULL, period = "year",
#' dataset = dataset1, period1 = 2010,
#' period2 = 2011, change_type = "absolute")
#' result
#'
#' \dontrun{
#' data("eusilc")
#' dataset1 <- data.table(rbind(eusilc, eusilc),
#' year = c(rep(2010, nrow(eusilc)),
#' rep(2011, nrow(eusilc))))
#' dataset1[age < 0, age := 0]
#' PSU <- dataset1[,.N, keyby = "db030"][, N := NULL]
#' PSU[, PSU := trunc(runif(nrow(PSU), 0, 100))]
#' dataset1 <- merge(dataset1, PSU, all = TRUE, by = "db030")
#' PSU <- eusilc <- NULL
#' dataset1[, strata := "XXXX"]
#'
#' dataset1[, t_pov := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, t_dep := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, t_lwi := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, exp := 1]
#' dataset1[, exp2 := 1 * (age < 60)]
#'
#' # At-risk-of-poverty (AROP)
#' dataset1[, pov := ifelse (t_pov == 1, 1, 0)]
#'
#' # Severe material deprivation (DEP)
#' dataset1[, dep := ifelse (t_dep == 1, 1, 0)]
#'
#' # Low work intensity (LWI)
#' dataset1[, lwi := ifelse (t_lwi == 1 & exp2 == 1, 1, 0)]
#'
#' # At-risk-of-poverty or social exclusion (AROPE)
#' dataset1[, arope := ifelse (pov == 1 | dep == 1 | lwi == 1, 1, 0)]
#' dataset1[, dom := 1]
#' dataset1[, id_lev2 := .I]
#'
#' result <- vardchanges(Y = c("pov", "dep", "lwi", "arope"),
#' H = "strata", PSU = "PSU", w_final = "rb050",
#' ID_level1 = "db030", ID_level2 = "id_lev2",
#' Dom = "rb090", Z = NULL, period = "year",
#' dataset = dataset1, period1 = 2010,
#' period2 = 2011, change_type = "absolute")
#' result}
#'
#' @import data.table
#' @import laeken
#'
#' @export vardchanges
vardchanges <- function(Y, H, PSU, w_final,
ID_level1, ID_level2,
Dom = NULL, Z = NULL,
gender = NULL,
country = NULL, period,
dataset = NULL,
period1, period2,
X = NULL, countryX = NULL,
periodX = NULL, X_ID_level1 = NULL,
ind_gr = NULL, g = NULL,
q = NULL, datasetX = NULL,
linratio = FALSE,
percentratio = 1,
use.estVar = FALSE,
outp_res = FALSE,
confidence = 0.95,
change_type = "absolute",
checking = TRUE) {
### Checking
change_type <- check_var(vars = change_type, varn ="change_type", varntype = "change_type")
if (checking) {
percentratio <- check_var(vars = percentratio, varn = "percentratio", varntype = "pinteger")
linratio <- check_var(vars = linratio, varn = "linratio", varntype = "logical")
use.estVar <- check_var(vars = use.estVar, varn = "use.estVar", varntype = "logical")
outp_res <- check_var(vars = outp_res, varn = "outp_res", varntype = "logical")
confidence <- check_var(vars = confidence, varn = "confidence", varntype = "numeric01")
if(!is.null(X)) {
if (is.null(datasetX)) datasetX <- copy(dataset)
equal_dataset <- identical(dataset, datasetX) & !is.null(dataset)
if (equal_dataset) { X_ID_level1 <- ID_level1
countryX <- country }}
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
check.names = TRUE, isnumeric = TRUE, grepls = "__")
Ynrow <- nrow(Y)
Yncol <- ncol(Y)
H <- check_var(vars = H, varn = "H", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
dif_name = "dataH_stratas")
w_final <- check_var(vars = w_final, varn = "w_final",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
isnumeric = TRUE, isvector = TRUE)
gender <- check_var(vars = gender, varn = "gender",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
isnumeric = TRUE, isvector = TRUE,
mustbedefined = FALSE)
Z <- check_var(vars = Z, varn = "Z", dataset = dataset,
check.names = TRUE, Yncol = Yncol, Ynrow = Ynrow,
isnumeric = TRUE, mustbedefined = FALSE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
ncols = 0, Yncol = 0, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE, grepls = "__")
country <- check_var(vars = country, varn = "country",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
dif_name = c("percoun", "period_country"))
period <- check_var(vars = period, varn = "period",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, duplicatednames = TRUE,
dif_name = c("percoun", "period_country", names(country)))
period1 <- check_var(vars = period1, varn = "period1", dataset = NULL,
ncols = 1, ischaracter = TRUE, periods = period)
period2 <- check_var(vars = period2, varn = "period2", dataset = NULL,
ncols = 1, ischaracter = TRUE, periods = period)
ID_level1 <- check_var(vars = ID_level1, varn = "ID_level1",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE)
ID_level2 <- check_var(vars = ID_level2, varn = "ID_level2",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, namesID1 = names(ID_level1),
country = country, periods = period)
PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset,
ncols = 1, Yncol = 0, Ynrow = Ynrow,
ischaracter = TRUE, namesID1 = names(ID_level1))
if(!is.null(X) | !is.null(ind_gr) | !is.null(g) | !is.null(q) | !is.null(countryX) |
!is.null(periodX) | !is.null(X_ID_level1) | !is.null(datasetX)) {
X <- check_var(vars = X, varn = "X", dataset = datasetX,
check.names = TRUE, isnumeric = TRUE,
dif_name = c(names(period), names(country), names(H),
names(PSU), names(ID_level1), "w_final",
names(Y), "w_design", "g", "q"), dX = "X")
Xnrow <- nrow(X)
ind_gr <- check_var(vars = ind_gr, varn = "ind_gr",
dataset = datasetX, ncols = 1,
Xnrow = Xnrow, ischaracter = TRUE, dX = "X",
dif_name = c(names(period), names(country), names(H),
names(PSU), names(ID_level1), "w_final",
names(Y), names(X), "w_design", "g", "q"))
g <- check_var(vars = g, varn = "g", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE, dX = "X")
q <- check_var(vars = q, varn = "q", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE, dX = "X")
countryX <- check_var(vars = countryX, varn = "countryX",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, mustbedefined = !is.null(country),
varnout = "country", varname = names(country),
country = country, dX = "X")
periodX <- check_var(vars = periodX, varn = "periodX",
dataset = datasetX, ncols = 1,
Xnrow = Xnrow, ischaracter = TRUE,
mustbedefined = !is.null(period),
duplicatednames = TRUE, varnout = "period",
varname = names(period), country = country,
countryX = countryX, periods = period, dX = "X")
X_ID_level1 <- check_var(vars = X_ID_level1, varn = "X_ID_level1",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, varnout = "ID_level1",
varname = names(ID_level1), country = country,
countryX = countryX, periods = period,
periodsX = periodX, ID_level1 = ID_level1, dX = "X")
}
}
percoun <- dataset <- datasetX <- NULL
cros_calc <- vardcros(Y = Y, H = H, PSU = PSU, w_final = w_final,
ID_level1 = ID_level1, ID_level2 = ID_level2,
Dom = Dom, gender = gender, Z = Z,
country = country, period = period,
dataset = NULL, X = X, countryX = countryX,
periodX = periodX, X_ID_level1 = X_ID_level1,
ind_gr = ind_gr, g = g, q = q, datasetX = NULL,
linratio = linratio,
percentratio = percentratio,
use.estVar = use.estVar,
ID_level1_max = is.null(X),
outp_res = outp_res,
withperiod = TRUE,
netchanges = TRUE,
confidence = confidence,
checking = FALSE)
if (!is.null(Dom)) {
Y1 <- namesD(Y, Dom, uniqueD = TRUE)
if (!is.null(Z)) Z1 <- namesD(Z, Dom, uniqueD = TRUE)
} else { Y1 <- names(Y)
Z1 <- names(Z) }
countryX <- periodX <- X_ID_level1 <- NULL
X_ID_level1 <- ind_gr <- g <- q <- NULL
Y <- Z <- gender <- dataset <- w_final <- NULL
changes_calc <- vardchanges_calculation(Y1 = Y1, Z1 = Z1, Dom = names(Dom),
names_country = names(country), per = names(period),
PSU = names(PSU), H = names(H), period1 = period1,
period2 = period2, cros_var_grad = cros_calc$var_grad,
change_type = change_type, data = cros_calc$data_net_changes,
linratio = linratio, annual = FALSE,
percentratio = percentratio, use.estVar = use.estVar,
confidence = confidence, poor = FALSE)
Y1 <- Z1 <- Dom <- period <- PSU <- H <- period1 <- period2 <- NULL
crossectional_results <- cros_calc$results
if (is.null(names(country))) crossectional_results[, percoun := NULL]
list(lin_out <- cros_calc$lin_out,
res_out = cros_calc$res_out,
crossectional_results = crossectional_results,
crossectional_var_grad = changes_calc$cros_var_grad,
grad_var = changes_calc$grad_var,
rho = changes_calc$rho_matrix,
var_tau = changes_calc$var_tau,
changes_results = changes_calc$changes_results)
}
vardchanges_calculation <- function(Y1, Z1, Dom, names_country,
per, PSU, H, period1, period2,
cros_var_grad, change_type, data,
linratio, annual, percentratio,
use.estVar, confidence,
poor = FALSE){
country <- ifelse(!is.null(names_country), names_country, "percoun")
#sarp <- c(country, H, PSU)
sarp <- c(country, PSU)
namesY <- namesZ <- ind <- nameYs <- nameZs <- grad1 <- grad2 <- NULL
rot_1 <- rot_2 <- rot_1_rot_2 <- stratasf <- name1 <- NULL
num1 <- num1num1 <- den1den1 <- den1 <- num2num2 <- NULL
den2den2 <- den2 <- num1den1 <- num1num2 <- num2 <- NULL
num1den2 <- den1num2 <- den1den2 <- num2den2 <- num1_1 <- NULL
den1_1 <- num1den1 <- den1den1 <- num1_2 <- den1_2 <- NULL
estim <- estim_1 <- estim_2 <- grad1_1 <- grad1_2 <- NULL
CI_upper <- grad2_1 <- ids_nr <- rot <- grad2_2 <- NULL
se <- CI_lower <- valueY1_1 <- valueZ1_1 <- valueY1_2 <- NULL
valueZ1_2 <- nh <- period_country_1 <- period_country_2 <- NULL
nhcor <- significant <- id_nams <- nams <- ids_nr <- NULL
N <- percoun <- confidence_level <- NULL
per1 <- paste0(per, "_1")
per2 <- paste0(per, "_2")
period1[, ind := .I]
period2[, ind := .I]
setnames(period1, per, per1)
setnames(period2, per, per2)
period1 <- merge(period1, period2, by = "ind")
period2 <- NULL
var_grad1 <- merge(period1, cros_var_grad, all.x = TRUE,
by.x = per1, by.y = per,
allow.cartesian = TRUE)
var_grad2 <- merge(period1, cros_var_grad, all.x = TRUE,
by.x = per2, by.y = per,
allow.cartesian = TRUE)
sarc <- c("ind", per1, per2, country, Dom, "type", "namesY", "namesZ")
sarc <- sarc[sarc %in% names(var_grad1)]
sar <- names(var_grad1)[!(names(var_grad1) %in% sarc)]
setnames(var_grad1, sar, paste0(sar, "_1"))
setnames(var_grad2, sar, paste0(sar, "_2"))
var_grad <- merge(var_grad1, var_grad2, all = TRUE, by = sarc)
var_grad[, ids_nr := 1 : .N]
if (change_type == "relative"){
if (!linratio & !is.null(Z1)){
var_grad[, grad1_1 := - valueY1_2 * valueZ1_1 / (valueZ1_2 * (valueY1_1)^2)]
var_grad[, grad1_2 := valueY1_2 / (valueZ1_2 * valueY1_1)]
var_grad[, grad2_1 := valueZ1_1 / (valueZ1_2 * valueY1_1)]
var_grad[, grad1_1 := - valueY1_2 * valueZ1_1 / ((valueZ1_2)^2 * valueY1_1)]
} else {
var_grad[, grad1_1 := - valueY1_2 / (valueY1_1)^2]
var_grad[, grad1_2 := 1 / valueY1_1] }
} else {
if (!is.null(var_grad$grad1_1) & !poor){
var_grad[, grad1_1 := - grad1_1]
var_grad[, grad2_1 := - grad2_1]
} else {var_grad[, grad1_1 := - 1]
var_grad[, grad1_2 := 1] }}
var_grad11 <- copy(var_grad)
var_grad12 <- copy(var_grad)
var_grad11[, (c("grad", "cros_var", "id_nams", "nams")) := list(grad1_1, num1_1, 1, "num1")]
var_grad12[, (c("grad", "cros_var", "nams")) := list(grad1_2, num1_2, "num2")]
var_grad12[, id_nams := 2 + as.numeric(!is.null(var_grad$grad2_1))]
var_grad21 <- var_grad22 <- NULL
if (!is.null(var_grad$grad2_1)) {
var_grad21 <- copy(var_grad)
var_grad22 <- copy(var_grad)
var_grad21[, (c("grad", "cros_var", "id_nams", "nams")) := list(grad2_1, den1_1, 2, "den1")]
var_grad22[, (c("grad", "cros_var", "id_nams", "nams")) := list(grad2_2, den1_2, 4, "den2")]
}
var_gradn <- rbindlist(list(var_grad11, var_grad12,
var_grad21, var_grad22), fill = TRUE)
var_gradn <- var_gradn[, c(sarc, "ids_nr", "id_nams",
"nams", "grad", "cros_var"), with = FALSE]
var_grad11 <- var_grad12 <- NULL
var_grad21 <- var_grad22 <- NULL
vstrata1 <- vstrata2 <- vstrata12 <- NULL
data[, rot := 1]
data1 <- merge(period1, data, all.x = TRUE,
by.x = per1, by.y = per,
allow.cartesian = TRUE)
data2 <- merge(period1, data, all.x = TRUE,
by.x = per2, by.y = per,
allow.cartesian = TRUE)
sard <- names(data)[!(names(data) %in% c(sarp, per))]
data[is.na(ids_nr)]
setnames(data1, sard, paste0(sard, "_1"))
setnames(data2, sard, paste0(sard, "_2"))
data <- merge(data1, data2, all = TRUE, by = c("ind", per1, per2, sarp))
if (country == "country") {
data[is.na(period_country_1), period_country_1 := paste(get(per1), country, sep = "_")]
data[is.na(period_country_2), period_country_2 := paste(get(per2), country, sep = "_")]
} else {
data[is.na(period_country_1), period_country_1 := paste(get(per1), get(country), sep = "_")]
data[is.na(period_country_2), period_country_2 := paste(get(per2), get(country), sep = "_")] }
data1 <- data2 <- NULL
recode.NA <- function(DT, cols = seq_len(ncol(DT))) {
for (j in cols) if (is.numeric(DT[[j]]))
set(DT, which(is.na(DT[[j]])), j, ifelse(is.integer(DT[[j]]), 0L, 0))
}
data[, nh := .N, by = c("ind", paste0("period_country_", 1:2), paste0(H, "_", 1:2))]
Hq1 <- paste0(H, "q_1")
Hq2 <- paste0(H, "q_2")
data[, (Hq1) := as.factor(get(paste0(H, "_1")))]
data[, (Hq2) := as.factor(get(paste0(H, "_2")))]
data[, paste0(Hq1, "_", levels(get(Hq1)))] -> dataHq1
data[, paste0(Hq2, "_", levels(get(Hq2)))] -> dataHq2
data[, (dataHq1) := transpose(lapply(get(Hq1), FUN = function(x){as.numeric(x == levels(get(Hq1)))})) ]
data[, (dataHq2) := transpose(lapply(get(Hq2), FUN = function(x){as.numeric(x == levels(get(Hq2)))})) ]
den1 <- den2 <- NULL
sard <- sard[!(sard %in% c(H, "period_country"))]
recode.NA(data, c(dataHq1, dataHq2, paste0(sard, "_1"), paste0(sard, "_2")))
saraks <- CJ(dataHq1, dataHq2, unique = FALSE)
saraks[, vstrata1 := paste0(" rot_1 : ", get("dataHq1"), " ")]
saraks[, vstrata2 := paste0(" rot_2 : ", get("dataHq2"), " ")]
saraks[, vstrata12 := paste0(" rot_1 : rot_2 : ", get("dataHq1"), " : " , get("dataHq2"), " ")]
saraks <- unique(unlist(saraks[, c("vstrata1", "vstrata2", "vstrata12")]))
saraks <- paste(saraks, collapse = "+")
if (poor) Y1 <- sard[1:(length(sard) - 1)]
fit <- lapply(1 : length(Y1), function(i) {
fitd <- lapply(split(data, data[["ind"]]), function(data1) {
fits <- lapply(split(data1, data1[[country]]), function(DT3c) {
y1 <- paste0(Y1[i], "_1")
y2 <- paste0(Y1[i], "_2")
if (!is.null(namesZ) & !linratio) {
z1 <- paste0(",", Z1[i], "_1")
z2 <- paste0(",", Z1[i], "_2")
} else z1 <- z2 <- ""
funkc <- as.formula(paste0("cbind(", y1, z1, ", ",
y2, z2, ") ~ 0 + ", saraks))
res <- lm(funkc, data = DT3c)
if (use.estVar) { res <- data.table(estVar(res))
} else res <- data.table(res$res)
if (!is.null(namesZ) & !linratio) {
setnames(res, names(res), c("num1", "den1", "num2", "den2"))
res[, nameZs := Z1[i]]
} else setnames(res, names(res), c("num1", "num2"))
nosv <- c("num1", "den1", "num2", "den2")
nosv <- names(res)[names(res) %in% nosv]
Zvn <- as.integer(!is.null(namesZ) & !linratio)
res[, nameYs := Y1[i]]
keynames <- c(country, "ind", paste0(per, "_1"),
paste0(per, "_2"), "nameYs", "nameZs")
keynames <- keynames[keynames %in% c(names(DT3c), names(res))]
if (use.estVar) {
res <- data.table(id_nams = 1 : nrow(res), nams = nosv, res, DT3c[1])
} else {
res <- data.table(res, DT3c)
if (annual) { res[, nhcor := ifelse(nh > 1, nh / (nh - 1), 1)]
} else res[, nhcor := 1]
res[, num1num1 := num1 * num1 * nhcor]
res[, num2num2 := num2 * num2 * nhcor]
res[, num1num2 := num1 * num2 * nhcor]
res[, id_nams := 0]
res[, nams := ""]
if (!is.null(namesZ) & !linratio) {
res[, den1den1 := den1 * den1 * nhcor]
res[, den2den2 := den2 * den2 * nhcor]
res[, num1den1 := num1 * den1 * nhcor]
res[, num1den2 := num1 * den2 * nhcor]
res[, den1num2 := den1 * num2 * nhcor]
res[, den1den2 := den1 * den2 * nhcor]
res[, num2den2 := num2 * den2 * nhcor] }
varsp <- c("num1num1", "den1den1",
"num2num2", "den2den2",
"num1den1", "num1num2",
"num1den2", "den1num2",
"den1den2", "num2den2")
varsp <- varsp[varsp %in% names(res)]
fits <- res[, lapply(.SD, sum), keyby = c(keynames,
"id_nams", "nams"),
.SDcols = varsp]
fits1 <- copy(fits)
fits1[, (c("id_nams", "nams")) := list(1, "num1")]
setnames(fits1, (c("num1num1", "num1num2")), c("num1", "num2"))
fits2 <- copy(fits)
fits2[, id_nams := 2 + as.numeric(!is.null(fits$den2den2))]
fits2[, nams := "num2"]
setnames(fits2, c("num1num2", "num2num2"), c("num1", "num2"))
fits3 <- fits4 <- NULL
if (!is.null(fits$den2den2)){
setnames(fits1, c("num1den1", "num1den2"), c("den1", "den2"))
setnames(fits2, c("den1num2", "num2den2"), c("den1", "den2"))
fits3 <- copy(fits)
fits3[, (c("id_nams", "nams")) := list(2, "den1")]
setnames(fits3, c("num1den1", "den1num2",
"den1den1", "den1den2"),
c("num1", "num2", "den1", "den2"))
fits4 <- copy(fits)
fits4[, (c("id_nams", "nams")) := list(4, "den2")]
setnames(fits4, c("num1den2", "num2den2",
"den1den2", "den2den2"),
c("num1", "num2", "den1", "den2"))
}
res <- rbindlist(list(fits1, fits2, fits3, fits4), fill = TRUE)
fits <- fits1 <- fits2 <- fits3 <- fits4 <- NULL
}
fits <- res[, lapply(.SD, sum),
keyby = c(keynames, "id_nams", "nams"),
.SDcols = nosv]
return(fits)
})
rbindlist(fits)
})
rbindlist(fitd)
})
res <- rbindlist(fit)
set(res, j = country, value = as.character(res[[country]]))
if (poor) var_gradn[, namesY := paste0("lin_", tolower(get("type")))]
if (!is.null(Dom)) {
var_gradn[, paste0(Dom, "_ss") := lapply(Dom, function(x) paste0(x,".", get(x)))]
var_gradn[, nameYs := Reduce(function(x, y)
paste(x, y, sep = "__"), .SD),
.SDcols = c("namesY", paste0(Dom, "_ss"))]
if (!is.null(namesZ)) { var_gradn[, nameZs := Reduce(function(x, y)
paste(x, y, sep = "__"), .SD),
.SDcols = c("namesZ", paste0(Dom, "_ss"))]
}
var_gradn[, (paste0(Dom, "_ss")) := NULL]
} else { var_gradn[, nameYs := namesY]
if (!is.null(namesZ)) var_gradn[, nameZs := namesZ]}
if (poor) var_gradn[, namesY := NULL]
nameYZ <- c("nameYs", "nameZs")
nameYZ <- nameYZ[nameYZ %in% names(res)]
sars <- c(country, "ind", paste0(per, "_1"),
paste0(per, "_2"), nameYZ)
data <- merge(res, var_gradn, all = TRUE, by = c(sars, "id_nams", "nams"))
res <- fit <- var_gradn <- NULL
rmax <- max(data[!is.na(ids_nr), .N, by = "ids_nr"][["ids_nr"]])
nosv <- c("num1", "den1", "num2", "den2")
nosv <- names(data)[names(data) %in% nosv]
dat <- lapply(1:rmax, function(i) {
res <- data[get("ids_nr") == i]
res1 <- as.matrix(res[, nosv, with = FALSE])
rhod <- diag(sqrt(1 / diag(res1)), length(nosv), length(nosv))
rhod <- data.table((t(rhod) %*% res1) %*% rhod)
setnames(rhod, names(rhod), paste0("rho_", nosv))
dmatr <- diag(sqrt(res[["cros_var"]] / diag(res1)),
length(nosv), length(nosv))
var_tau <- data.table((t(dmatr) %*% res1) %*% dmatr)
dmatr <- data.table(dmatr)
setnames(dmatr, names(dmatr), paste0("d_", nosv))
setnames(var_tau, names(var_tau), paste0("var_tau_", nosv))
res <- data.table(res, rhod, dmatr, var_tau)
var_t <- (t(res[["grad"]]) %*% as.matrix(var_tau)) %*% res[["grad"]]
var_t <- var_t[, 1]
var_grads <- var_grad[get("ids_nr") == i]
if (change_type == "absolute") {
var_grads[, estim := estim_2 - estim_1]
} else var_grads[, estim := estim_2 / estim_1 * percentratio]
var_grads[, var := var_t]
list(matricas = res, data = var_grads) })
matricas <- rbindlist(lapply(dat, function(x) x[[1]]))
datas <- rbindlist(lapply(dat, function(x) x[[2]]))
if (change_type == "relative" | (!is.null(datas$namesZ) & !linratio)) {
datas[, var:=var * (percentratio) ^ 2] }
datas[var >= 0, se := sqrt(var)]
tsad <- qnorm(0.5 * (1 + confidence))
datas[, CI_lower := estim - tsad * se]
datas[, CI_upper := estim + tsad * se]
sarc <- c(sarc, "nams")
sarc <- sarc[!(sarc %in% c("ind"))]
rho_matrix <- matricas[, c(sarc, paste0("rho_", nosv)), with = FALSE]
var_tau <- matricas[, c(sarc, paste0("var_tau_", nosv)), with = FALSE]
grad_var <- matricas[, c(sarc, "grad", "cros_var"), with = FALSE]
namesYZ <- c("namesY", "namesZ", "type")
namesYZ <- names(datas)[(names(datas) %in% namesYZ)]
changes_results <- datas[, c(paste0(per,"_", c(1, 2)), country, Dom,
namesYZ, "estim_1", "estim_2", "estim",
"var", "se", "CI_lower", "CI_upper"), with = FALSE]
changes_results[, confidence_level := confidence]
changes_results[, significant := TRUE]
boundss <- as.numeric(change_type == "relative")
changes_results[CI_lower <= boundss & CI_upper >= boundss, significant := FALSE]
if (is.null(names_country)) {
cros_var_grad[, percoun := NULL]
grad_var[, percoun := NULL]
rho_matrix[, percoun := NULL]
var_tau[, percoun := NULL]
changes_results[, percoun := NULL] }
list(cros_var_grad = cros_var_grad,
grad_var = grad_var,
rho_matrix = rho_matrix,
var_tau = var_tau,
changes_results = changes_results)
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/vardchanges.R
|
#' Variance estimation for measures of change for sample surveys for indicators on social exclusion and poverty
#'
#' @description Computes the variance estimation for measures of change for indicators on social exclusion and poverty.
#'
#' @param Y Study variable (for example equalized disposable income or gross pension income). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param age Age variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param pl085 Retirement variable (Number of months spent in retirement or early retirement). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param month_at_work Variable for total number of month at work (sum of the number of months spent at full-time work as employee, number of months spent at part-time work as employee, number of months spent at full-time work as self-employed (including family worker), number of months spent at part-time work as self-employed (including family worker)). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Y_den Denominator variable (for example gross individual earnings). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Y_thres Variable (for example equalized disposable income) used for computation and linearization of poverty threshold. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. Variable specified for \code{inc} is used as \code{income_thres} if \code{income_thres} is not defined.
#' @param wght_thres Weight variable used for computation and linearization of poverty threshold. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. Variable specified for \code{weight} is used as \code{wght_thres} if \code{wght_thres} is not defined.
#' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number or logical vector with only one \code{TRUE} value (length of the vector has to be the same as the column count of \code{dataset}).
#' @param ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ID_level2 Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, variables are calculated for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param country Variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param period Variable for the all survey periods. The values for each period are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param period1 The vector from variable \code{period} describes the first period.
#' @param period2 The vector from variable \code{period} describes the second period.
#' @param gender Numerical variable for gender, where 1 is for males, but 2 is for females. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param dataset Optional survey data object convertible to \code{data.frame}.
#' @param X Optional matrix of the auxiliary variables for the calibration estimator. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param countryX Optional variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param periodX Optional variable of the survey periods and countries. If supplied, residual estimation of calibration is done independently for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param X_ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ind_gr Optional variable by which divided independently X matrix of the auxiliary variables for the calibration. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param g Optional variable of the g weights. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param datasetX Optional survey data object in household level convertible to \code{data.table}.
#' @param percentage A numeric value in range \eqn{[0,100]} for \eqn{p} in the formula for poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#' For example, to compute poverty threshold equal to 60\% of some income quantile, \eqn{p} should be set equal to 60.
#' @param order_quant A numeric value in range \eqn{[0,100]} for \eqn{\alpha} in the formula for poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#'For example, to compute poverty threshold equal to some percentage of median income, \eqn{\alpha} should be set equal to 50.
#' @param alpha a numeric value in range \eqn{[0,100]} for the order of the income quantile share ratio (in percentage).
#' @param use.estVar Logical value. If value is \code{TRUE}, then \code{R} function \code{estVar} is used for the estimation of covariance matrix of the residuals. If value is \code{FALSE}, then \code{R} function \code{estVar} is not used for the estimation of covariance matrix of the residuals.
#' @param confidence optional; either a positive value for confidence interval. This variable by default is 0.95.
#' @param outp_lin Logical value. If \code{TRUE} linearized values of the ratio estimator will be printed out.
#' @param outp_res Logical value. If \code{TRUE} estimated residuals of calibration will be printed out.
#' @param type a character vector (of length one unless several.ok is TRUE), example "linarpr","linarpt", "lingpg", "linpoormed", "linrmpg", "lingini", "lingini2", "linqsr", "linarr", "linrmir", "all_choices".
#' @param change_type character value net changes type - absolute or relative.
#'
#' @return A list with objects are returned by the function:
#' \itemize{
#' \item \code{cros_lin_out} - a \code{data.table} containing the linearized values of the ratio estimator with ID_level2 and PSU by periods and countries (if available).
#' \item \code{cros_res_out} - a \code{data.table} containing the estimated residuals of calibration with ID_level1 and PSU by periods and countries (if available).
#' \item \code{crossectional_results} - a \code{data.table} containing: \cr
#' \code{period} - survey periods, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{type} - type variable, \cr
#' \code{count_respondents} - the count of respondents, \cr
#' \code{pop_size} - the population size (in numbers of individuals), \cr
#' \code{estim} - the estimated value, \cr
#' \code{se} - the estimated standard error, \cr
#' \code{var} - the estimated variance, \cr
#' \code{rse} - the estimated relative standard error (coefficient of variation), \cr
#' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage.
#' \item \code{changes_results} - a \code{data.table} containing: \cr
#' \code{period} - survey periods, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{type} - type variable, \cr
#' \code{estim_1} - the estimated value for period1, \cr
#' \code{estim_2} - the estimated value for period2, \cr
#' \code{estim} - the estimated value, \cr
#' \code{se} - the estimated standard error, \cr
#' \code{var} - the estimated variance, \cr
#' \code{rse} - the estimated relative standard error (coefficient of variation), \cr
#' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage.}
#'
#' @references
#' Guillaume Osier, Yves Berger, Tim Goedeme, (2013), Standard error estimation for the EU-SILC indicators of poverty and social exclusion, Eurostat Methodologies and Working papers, URL \url{http://ec.europa.eu/eurostat/documents/3888793/5855973/KS-RA-13-024-EN.PDF}. \cr
#' Eurostat Methodologies and Working papers, Handbook on precision requirements and variance estimation for ESS household surveys, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr
#' Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en} \cr
#'
#' @examples
#'
#' ### Example
#' library("laeken")
#' library("data.table")
#' data(eusilc)
#' set.seed(1)
#' dataset1 <- data.table(rbind(eusilc, eusilc),
#' year = c(rep(2010, nrow(eusilc)),
#' rep(2011, nrow(eusilc))),
#' country = c(rep("AT", nrow(eusilc)),
#' rep("AT", nrow(eusilc))))
#' dataset1[age < 0, age := 0]
#' PSU <- dataset1[, .N, keyby = "db030"][, N := NULL]
#' PSU[, PSU := trunc(runif(nrow(PSU), 0, 100))]
#' PSU$inc <- runif(nrow(PSU), 20, 100000)
#' dataset1 <- merge(dataset1, PSU, all = TRUE, by = "db030")
#' PSU <- eusilc <- NULL
#' dataset1[, strata := c("XXXX")]
#' dataset1$pl085 <- 12 * trunc(runif(nrow(dataset1), 0, 2))
#' dataset1$month_at_work <- 12 * trunc(runif(nrow(dataset1), 0, 2))
#' dataset1[, id_l2 := paste0("V", .I)]
#' result <- vardchangespoor(Y = "inc", age = "age",
#' pl085 = "pl085", month_at_work = "month_at_work",
#' Y_den = "inc", Y_thres = "inc",
#' wght_thres = "rb050", H = "strata",
#' PSU = "PSU", w_final="rb050",
#' ID_level1 = "db030", ID_level2 = "id_l2",
#' Dom = c("rb090"), country = "country",
#' period = "year", sort = NULL,
#' period1 = c(2010, 2011),
#' period2 = c(2011, 2010),
#' gender = NULL, dataset = dataset1,
#' percentage = 60, order_quant = 50L,
#' alpha = 20, confidence = 0.95,
#' type = "linrmpg")
#' result
#'
#'
#' @seealso \code{\link{domain}},
#' \code{\link{vardchanges}},
#' \code{\link{vardcros}},
#' \code{\link{vardcrospoor}}
#'
#' @keywords vardchanges
#'
#'
#' @import data.table
#' @import laeken
#'
#' @export vardchangespoor
vardchangespoor <- function(Y, age = NULL,
pl085 = NULL,
month_at_work = NULL,
Y_den = NULL,
Y_thres = NULL,
wght_thres = NULL,
H, PSU, w_final,
ID_level1, ID_level2,
Dom = NULL, country = NULL,
period, sort = NULL,
period1, period2,
gender = NULL, dataset = NULL,
X = NULL, countryX = NULL,
periodX = NULL, X_ID_level1 = NULL,
ind_gr = NULL, g = NULL, q = NULL,
datasetX = NULL, percentage = 60,
order_quant = 50, alpha = 20,
use.estVar = FALSE,
confidence = 0.95,
outp_lin = FALSE,
outp_res = FALSE,
type = "linrmpg",
change_type = "absolute") {
### Checking
. <- NULL
change_type <- check_var(vars = change_type, varn = "change_type", varntype = "change_type")
all_choices <- c("linarpr", "linarpt", "lingpg",
"linpoormed", "linrmpg", "lingini",
"lingini2", "linqsr", "linrmir", "linarr")
type <- tolower(type)
type <- match.arg(type, all_choices, length(type) > 1)
percentage <- check_var(vars = percentage, varn = "percentage", varntype = "numeric0100")
order_quant <- check_var(vars = order_quant, varn = "order_quant", varntype = "numeric0100")
alpha <- check_var(vars = alpha, varn = "alpha", varntype = "numeric0100")
use.estVar <- check_var(vars = use.estVar, varn = "use.estVar", varntype = "logical")
confidence <- check_var(vars = confidence, varn = "confidence", varntype = "numeric01")
if(!is.null(X)) {
if (is.null(datasetX)) datasetX <- copy(dataset)
equal_dataset <- identical(dataset, datasetX) & !is.null(dataset)
if (equal_dataset) { X_ID_level1 <- ID_level1
countryX <- country }}
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
w_final <- check_var(vars = w_final, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
age <- check_var(vars = age, varn = "age", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE,
mustbedefined = any(c("linarr", "linrmir") %in% type))
pl085 <- check_var(vars = pl085, varn = "pl085", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE,
mustbedefined = any(type == "linarr"))
month_at_work <- check_var(vars = month_at_work, varn = "month_at_work",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
isnumeric = TRUE, isvector = TRUE,
mustbedefined = any(type == "linarr"))
gender <- check_var(vars = gender, varn = "gender", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE, mustbedefined = any(type == "lingpg"))
Y_den <- check_var(vars = Y_den, varn = "Y_den", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE,
mustbedefined = any(type == "linarr"))
Y_thres <- check_var(vars = Y_thres, varn = "Y_thres",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
wght_thres <- check_var(vars = wght_thres, varn = "wght_thres",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
H <- check_var(vars = H, varn = "H", dataset = dataset,
ncols = 1, Yncol = 0, Ynrow = Ynrow,
ischaracter = TRUE,
dif_name = c("type", "nameYs", "dataH_stratas"))
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
dif_name = c("type", "nameYs"),
grepls = "__")
country <- check_var(vars = country, varn = "country",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
dif_name = c("percoun", "period_country",
"type", "nameYs"))
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, duplicatednames = TRUE,
dif_name = c("percoun", "period_country",
"type", "nameYs", names(country)))
period1 <- check_var(vars = period1, varn = "period1", dataset = NULL,
ncols = 1, ischaracter = TRUE, periods = period)
period2 <- check_var(vars = period2, varn = "period2", dataset = NULL,
ncols = 1, ischaracter = TRUE, periods = period)
ID_level1 <- check_var(vars = ID_level1, varn = "ID_level1",
dataset = dataset, ncols = 1, Yncol = 0,
Ynrow = Ynrow, ischaracter = TRUE)
ID_level2 <- check_var(vars = ID_level2, varn = "ID_level2",
dataset = dataset, ncols = 1, Yncol = 0,
Ynrow = Ynrow, ischaracter = TRUE,
namesID1 = names(ID_level1), country = country,
periods = period)
PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset,
ncols = 1, Yncol = 0, Ynrow = Ynrow,
ischaracter = TRUE, namesID1 = names(ID_level1))
if(!is.null(X) | !is.null(ind_gr) | !is.null(g) | !is.null(q) | !is.null(countryX) |
!is.null(periodX) | !is.null(X_ID_level1) | !is.null(datasetX)) {
X <- check_var(vars = X, varn = "X", dataset = datasetX,
check.names = TRUE, isnumeric = TRUE,
dif_name = c(names(period), names(country), names(H),
names(PSU), names(ID_level1), "w_final",
"w_design", "g", "q", "type", "nameYs"),
dX = "X")
Xnrow <- nrow(X)
ind_gr <- check_var(vars = ind_gr, varn = "ind_gr",
dataset = datasetX, ncols = 1,
Xnrow = Xnrow, ischaracter = TRUE,
dif_name = c(names(period), names(country), names(H),
names(PSU), names(ID_level1), "w_final",
"w_design", "g", "q", "type", "nameYs"),
dX = "X")
g <- check_var(vars = g, varn = "g", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE, dX = "X")
q <- check_var(vars = q, varn = "q", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE, dX = "X")
countryX <- check_var(vars = countryX, varn = "countryX",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, mustbedefined = !is.null(country),
varnout = "country", varname = names(country),
country = country, dX = "X")
periodX <- check_var(vars = periodX, varn = "periodX",
dataset = datasetX, ncols = 1,
Xnrow = Xnrow, ischaracter = TRUE,
mustbedefined = !is.null(period),
duplicatednames = TRUE, varnout = "period",
varname = names(period), country = country,
countryX = countryX, periods = period, dX = "X")
X_ID_level1 <- check_var(vars = X_ID_level1, varn = "X_ID_level1",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, varnout = "ID_level1",
varname = names(ID_level1), country = country,
countryX = countryX, periods = period,
periodsX = periodX, ID_level1 = ID_level1, dX = "X")
}
if (is.null(Y_thres)) Y_thres <- Y
if (is.null(wght_thres)) wght_thres <- w_final
cros_calc <- vardcrospoor(Y = Y, age = age, pl085 = pl085,
month_at_work = month_at_work,
Y_den = Y_den, Y_thres = Y_thres,
H = H, PSU = PSU, w_final = w_final,
ID_level1 = ID_level1, ID_level2 = ID_level2,
Dom = Dom, country = country,
period = period, sort = sort,
gender = gender, dataset = NULL,
X = X, countryX = countryX,
periodX = periodX, X_ID_level1 = X_ID_level1,
ind_gr = ind_gr, g = g, q = q,
datasetX = NULL, percentage = percentage,
order_quant = order_quant,
alpha = alpha,
use.estVar = use.estVar,
withperiod = TRUE,
netchanges = TRUE,
confidence = confidence,
outp_lin = outp_lin,
outp_res = outp_res,
type = type, checking = FALSE)
cros_lin_out <- cros_calc$lin_out
cros_res_out <- cros_calc$res_out
data_res <- cros_calc$res_out
data <- cros_calc$data_net_changes
crossectional_results <- copy(cros_calc$results)
ID_level1 <- ID_level2 <- percoun <- cros_calc <- NULL
sar <- c(names(period), names(country), names(Dom), "percoun", "type", "estim", "var")
sar <- sar[sar %in% names(crossectional_results)]
cros_var_grad <- crossectional_results[, sar, with = FALSE]
setnames(cros_var_grad, "var", "num1")
value <- nameYs <- NULL
var_grad0 <- melt(data, id = c(names(period), names(country)), measure.vars = c(names(data)[grepl("lin", names(data))]))
var_grad0 <- var_grad0[, .(valueY1 = sum(value)), keyby = c(names(period), names(country), "variable")]
setnames(var_grad0, "variable", "nameYs")
if (!is.null(Dom)) {
cros_var_grad[, nameYs := namesD(cros_var_grad[, "type"], cros_var_grad[, names(Dom), with = FALSE], uniqueD = FALSE)]
cros_var_grad[, nameYs := paste0("lin_", tolower(type), "__", substr(nameYs, 7, nchar(nameYs)))]
} else cros_var_grad[, nameYs := paste0("lin_", tolower(type))]
cros_var_grad <- merge(cros_var_grad, var_grad0, all = TRUE, by = c(names(period), names(country), "nameYs"))
cros_var_grad[, nameYs := NULL]
var_grad0 <- NULL
changes_calc <- vardchanges_calculation(Y1 = "type", Z1 = NULL, Dom = names(Dom),
names_country = names(country), per = names(period),
PSU = names(PSU), H = names(H), period1 = period1,
period2 = period2, cros_var_grad = cros_var_grad,
change_type = change_type, data = data, linratio = FALSE,
annual = FALSE, percentratio = 1,
use.estVar = use.estVar, confidence = confidence,
poor = TRUE)
Y1 <- Z1 <- Dom <- period <- PSU <- H <- period1 <- period2 <- NULL
crossectional_results <- cros_calc$results
if (is.null(names(country))) crossectional_results[, percoun := NULL]
list(lin_out <- cros_calc$lin_out,
res_out = cros_calc$res_out,
crossectional_results = crossectional_results,
crossectional_var_grad = changes_calc$cros_var_grad,
grad_var = changes_calc$grad_var,
rho = changes_calc$rho_matrix,
var_tau = changes_calc$var_tau,
changes_results = changes_calc$changes_results)
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/vardchangespoor.R
|
#' Variance estimation for measures of annual net change or annual for single stratified sampling designs
#'
#' @description Computes the variance estimation for measures of annual net change or annual for single stratified sampling designs.
#'
#' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, variables are calculated for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param periods Variable for the all survey periods. The values for each period are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param periods1 The vector of periods from variable \code{periods} describes the first period for measures of change.
#' @param periods2 The vector of periods from variable \code{periods} describes the second period for measures of change.
#' @param in_sample Sample variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param in_frame Frame variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param percentratio Positive numeric value. All linearized variables are multiplied with \code{percentratio} value, by default - 1.
#' @param confidence optional; either a positive value for confidence interval. This variable by default is 0.95.
#'
#'
#' @return A list with objects are returned by the function:
#' \itemize{
#' \item \code{crossectional_results} - a \code{data.table} containing: \cr
#' \code{year} - survey years, \cr
#' \code{subperiods} - survey sub-periods, \cr
#' \code{variable} - names of variables of interest, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{estim} - the estimated value, \cr
#' \code{var} - the estimated variance of cross-sectional and longitudinal measures, \cr
#' \code{sd_w} - the estimated weighted variance of simple random sample, \cr
#' \code{se} - the estimated standard error of cross-sectional or longitudinal, \cr
#' \code{rse} - the estimated relative standard error (coefficient of variation), \cr
#' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr
#' \code{absolute_margin_of_error} - the estimated absolute margin of error, \cr
#' \code{relative_margin_of_error} - the estimated relative margin of error, \cr
#' \code{CI_lower} - the estimated confidence interval lower bound, \cr
#' \code{CI_upper} - the estimated confidence interval upper bound, \cr
#' \code{confidence_level} - the positive value for confidence interval.
#' \item \code{annual_results} - a \code{data.table} containing:
#' \code{year_1} - survey years of \code{years1} for measures of annual net change, \cr
#' \code{year_2} - survey years of \code{years2} for measures of annual net change, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{variable} - names of variables of interest, \cr
#' \code{estim_2} - the estimated value for period2 for measures of annual net change, \cr
#' \code{estim_1} - the estimated value for period1 for measures of annual net change, \cr
#' \code{estim} - the estimated value, \cr
#' \code{var} - the estimated variance, \cr
#' \code{se} - the estimated standard error, \cr
#' \code{rse} - the estimated relative standard error (coefficient of variation), \cr
#' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr
#' \code{absolute_margin_of_error} - the estimated absolute margin of error for period1 for measures of annual, \cr
#' \code{relative_margin_of_error} - the estimated relative margin of error in percentage for measures of annual, \cr
#' \code{CI_lower} - the estimated confidence interval lower bound, \cr
#' \code{CI_upper} - the estimated confidence interval upper bound, \cr
#' \code{confidence_level} - the positive value for confidence interval, \cr
#' \code{significant} - is the the difference significant
#' }
#'
#' @references
#' Guillaume OSIER, Virginie RAYMOND, (2015), Development of methodology for the estimate of variance of annual net changes for LFS-based indicators. Deliverable 1 - Short document with derivation of the methodology.
#'
#' @seealso \code{\link{vardchanges}},
#' \code{\link{vardannual}}
#'
#' @keywords vardannual
#'
#' @import data.table
#' @import stringr
#'
#' @export vardchangstrs
vardchangstrs <- function(Y, H, PSU, w_final,
Dom = NULL, periods = NULL,
dataset, periods1, periods2,
in_sample, in_frame,
confidence = 0.95,
percentratio = 1){
B_l <- Bl_sum <- CI_lower <- CI_upper <- D_l <- NULL
Dl_sum <- P_hl <- ap_hl <- covv <- cv <- estim <- NULL
estim_1 <- estim_2 <- fGhl_sum <- fa1_hl <- fa2_hl <- NULL
g_hl <- ids <- in_sample_1 <- in_sample_2 <- ind_1 <- NULL
ind_2 <- kor <- n_q <- nh1 <- nh2 <- pop1 <- pop2 <- NULL
pop_q <- rse <- rse2 <- sa1_hl <- sa1hl_sum <- NULL
sa2_hl <- sa2hl_sum <- se <- sghl_sum <- type <- NULL
var_1 <- var_2 <- variable_new <- NULL
dataset[, (periods) := lapply(.SD[, periods, with = FALSE], as.character)]
calc <- vardom(Y = Y, H = H, PSU = PSU,
w_final = w_final,
Dom = Dom, period = periods,
dataset = dataset[in_sample == 1])$all_result
calc3 <- copy(calc)
outvars <- c("variable", Dom, periods, "estim", "var")
calc <- calc[, outvars, with = FALSE]
setnafill(dataset, type = "const", fill = 0, cols = Y)
if (!is.null(Dom)) { dats <- domain(Y, D = Dom, dataset = dataset)
Yvars <- names(dats)
dataset <- data.table(dataset, dats)
rm(dats)
} else Yvars <- Y
periods1 <- data.table(as.character(periods1))
setnames(periods1, names(periods1), periods)
periods1[, ids := 1:.N]
sar <- c(periods, Yvars, H, w_final, in_sample, in_frame)
frame1 <- merge(periods1, dataset[, c(PSU, sar), with = FALSE],
all.x = TRUE, by = periods, sort = FALSE, allow.cartesian = TRUE)
setnames(frame1, sar, paste0(sar, "_1"))
periods2 <- data.table(as.character(periods2))
setnames(periods2, names(periods2), periods)
periods2[, ids := 1:.N]
frame2 <- merge(periods2, dataset[, c(PSU, sar), with = FALSE],
all.x = TRUE, by = periods, sort = FALSE, allow.cartesian = TRUE)
setnames(frame2, sar, paste0(sar, "_2"))
dat_ids <- merge(periods1, periods2, by = "ids", all = TRUE)
setnames(dat_ids, names(dat_ids), c("ids", paste0(periods, "_", 1:2)))
frame <- merge(frame1, frame2, by = c("ids", PSU), all = TRUE)
rm(list = c(paste0("periods", 1:2), paste0("frame", 1:2)))
setnafill(frame, type = "const", fill = 0,
cols = c(paste0(c(in_sample, in_frame), "_1"),
paste0(c(in_sample, in_frame), "_2")))
frame[, D_l := as.numeric(get(paste0(in_frame, "_1")) == 1 & get(paste0(in_frame, "_2")) == 0)]
frame[, P_hl := as.numeric(get(paste0(in_frame, "_1")) == 1 & get(paste0(in_frame, "_2")) == 1)]
frame[, B_l := as.numeric(get(paste0(in_frame, "_1")) == 0 & get(paste0(in_frame, "_2")) == 1)]
frame[, g_hl := as.numeric(get(paste0(in_sample, "_1")) == 1 & get(paste0(in_sample, "_2")) == 1)]
frame[, sa1_hl := as.numeric(P_hl == 1 & get(paste0(in_sample, "_1")) == 1)]
frame[, sa2_hl := as.numeric(P_hl == 1 & get(paste0(in_sample, "_2")) == 1)]
frame[, fa1_hl := as.numeric(P_hl == 1 & get(paste0(in_frame, "_1")) == 1)]
frame[, fa2_hl := as.numeric(P_hl == 1 & get(paste0(in_frame, "_2")) == 1)]
frame[, `:=`(fGhl_sum = .N, sghl_sum = sum(g_hl),
sa1hl_sum = sum(sa1_hl), sa2hl_sum = sum(sa2_hl),
fa1hl_sum = sum(fa1_hl), fa2hl_sum = sum(fa2_hl),
Dl_sum = sum(D_l), Bl_sum = sum(B_l)),
keyby = c("ids", paste0(H, "_", 1:2))]
frame[, pop1 := .N, by = c("ids", paste0(H, "_", 1))]
frame[, pop2 := .N, by = c("ids", paste0(H, "_", 2))]
frame[, nh1 := sum(get(paste0(in_sample, "_1"))), by = c("ids", paste0(H, "_", 1))]
frame[, nh2 := sum(get(paste0(in_sample, "_2"))), by = c("ids", paste0(H, "_", 2))]
frame[sa1hl_sum >= 1 & sa2hl_sum >= 1 & sghl_sum >= 1, type := "Type1"]
frame[sa1hl_sum >= 1 & sa2hl_sum >= 1 & sghl_sum == 0, type := "Type3"]
frame[is.na(type) & (sa1hl_sum == 0 | sa2hl_sum == 0) & sghl_sum == 0, type := "Type2"]
sample_data <- frame[get(paste0(in_sample, "_1")) == 1 | get(paste0(in_sample, "_2")) == 1]
sample_data[P_hl > 0, ap_hl := sa1hl_sum * sa2hl_sum / sghl_sum ]
sample_data[type == "Type1", pop_q := fGhl_sum]
sample_data[type == "Type1", n_q := as.numeric(ap_hl)]
sample_data[type == "Type1", ind_1 := get(paste0(w_final, "_1")) * ap_hl / fGhl_sum]
sample_data[type == "Type1", ind_2 := get(paste0(w_final, "_2")) * ap_hl / fGhl_sum]
sample_data[type == "Type2" & sa1hl_sum > 0 & sa2hl_sum == 0, pop_q := pop1]
sample_data[type == "Type2" & sa1hl_sum == 0 & sa2hl_sum > 0, pop_q := pop2]
sample_data[type == "Type2" & sa1hl_sum > 0 & sa2hl_sum == 0, n_q := as.numeric(nh1)]
sample_data[type == "Type2" & sa1hl_sum == 0 & sa2hl_sum > 0, n_q := as.numeric(nh2)]
sample_data[type == "Type2", ind_1 := 1]
sample_data[type == "Type2", ind_2 := 1]
sample_data[type == "Type3", pop_q := fGhl_sum]
sample_data[type == "Type3", n_q := as.numeric(fGhl_sum)]
sample_data[type == "Type3", ind_1 := get(paste0(w_final, "_1"))]
sample_data[type == "Type3", ind_2 := get(paste0(w_final, "_2"))]
sample_data[D_l == 1, pop_q := pop1]
sample_data[D_l == 1, n_q := as.numeric(nh1)]
sample_data[D_l == 1, ind_1 := 1]
sample_data[D_l == 1, ind_2 := 0]
sample_data[B_l == 1, pop_q := pop2]
sample_data[B_l == 1, n_q := as.numeric(nh2)]
sample_data[B_l == 1, ind_1 := 0]
sample_data[B_l == 1, ind_2 := 1]
rm(list = c("frame", "dataset"))
setnafill(sample_data, type = "const", fill = 0,
cols = c(paste0(Yvars, "_1"), paste0(Yvars, "_2")))
aggr1 <- sample_data[, lapply(Yvars, function(x) { sum(get(paste0(x, "_1")) * get(paste0(x, "_2")) * ind_1 * ind_2 * in_sample_1 * in_sample_2)}),
keyby = c("ids", paste0(H, "_", 1:2), "pop_q", "n_q")]
setnames(aggr1, paste0("V", 1:length(Yvars)), paste0(Yvars, "d1"))
aggr2 <- sample_data[, lapply(Yvars, function(x) { sum(get(paste0(x, "_1")) * ind_1 * in_sample_1) * sum(get(paste0(x, "_2")) * ind_2 * in_sample_2)}),
keyby = c("ids", paste0(H, "_", 1:2), "pop_q", "n_q")]
setnames(aggr2, paste0("V", 1:length(Yvars)), paste0(Yvars, "d2"))
aggr1 <- merge(aggr1, aggr2, all = TRUE)
aggr1[, kor := as.numeric(pop_q * (pop_q - n_q) / (n_q * (n_q - 1)))]
aggr1[pop_q == n_q | n_q < 2, kor := 0]
apgr200 <- copy(aggr1)
aggr2 <- aggr1[, lapply(Yvars, function(x) { sum(kor * (get(paste0(x, "d1")) - 1 / n_q * get(paste0(x, "d2"))))}),
by = c("ids")]
setnames(aggr2, paste0("V", 1:length(Yvars)), Yvars)
aggr2 <- melt(aggr2, id.vars = c("ids"), variable.name = "variable_new")
aggr2 <- merge(dat_ids, aggr2, by = "ids", all.y = TRUE)
rm(list = c("dat_ids", "aggr1", "PSU", "w_final", "sar", "Yvars", "H"))
setnames(aggr2, "value", "covv")
calc2 <- copy(calc)
if (!is.null(Dom)) calc2[, (paste0(Dom, "at1at")) := lapply(Dom, function(x) paste(x, get(x), sep = "."))]
vDom <- calc2[, "variable", with = FALSE]
if (!is.null(Dom)) vDom <- calc2[, c("variable", paste0(Dom, "at1at")), with = FALSE]
calc2$variable_new <- do.call("paste", c(as.list(vDom), sep = "__"))
calc2[, variable_new := str_replace_all(variable_new, "[ ]", ".")]
if (!is.null(Dom)) calc2[, (paste0(Dom, "at1at")) := NULL]
vsars <- names(calc2)[which(outvars == periods):(ncol(calc2)-1)]
setnames(calc2, vsars, paste0(vsars, "_1"))
all_result <- merge(aggr2, calc2, by = c("variable_new", paste0(periods, "_1")), all.x = TRUE)
setnames(calc2, paste0(vsars, "_1"), paste0(vsars, "_2"))
vsars2 <- c("variable", Dom)
all_result <- merge(all_result, calc2, by = c("variable_new", vsars2, paste0(periods, "_2")), all.x = TRUE)
all_result[, estim := estim_2 / estim_1 * percentratio]
all_result[, rse2 := var_1 / (estim_1) ^ 2 + var_2 / (estim_2) ^ 2 - 2 * covv / (estim_1 * estim_2)]
all_result[, var := estim * rse2 * percentratio ^ 2]
all_result[var >= 0, se := sqrt(var)]
all_result[rse2 > 0, rse := sqrt(rse2)]
all_result[, cv := 100 * rse]
tsad <- qnorm(0.5 * (1 + confidence))
all_result[, CI_lower := estim - tsad * se]
all_result[, CI_upper := estim + tsad * se]
sars <- c(Dom, "variable")
all_result <- all_result[, c(paste0(periods, "_", 1:2), sars,
paste0("estim_", 1:2), "estim",
paste0("var_", 1:2), "covv", "var", "se",
"cv", "CI_lower", "CI_upper"), with = FALSE]
return(list(vardom_results = calc3,
all_result = all_result[]))
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/vardchangstrs.R
|
#' Variance estimation for cross-sectional, longitudinal measures for single and multistage stage cluster sampling designs
#'
#' @description Computes the variance estimation for cross-sectional and longitudinal measures for any stage cluster sampling designs.
#'
#' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ID_level2 Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, variables are calculated for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param Z Optional variables of denominator for ratio estimation. If supplied, the ratio estimation is computed. Object convertible to \code{data.table} or variable names as character, column numbers. This variable is \code{NULL} by default.
#' @param gender Numerical variable for gender, where 1 is for males, but 2 is for females. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param country Variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param period Variable for the survey periods. The values for each period are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param X Optional matrix of the auxiliary variables for the calibration estimator. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param countryX Optional variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param periodX Optional variable of the survey periods and countries. If supplied, residual estimation of calibration is done independently for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param X_ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ind_gr Optional variable by which divided independently X matrix of the auxiliary variables for the calibration. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param g Optional variable of the g weights. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param datasetX Optional survey data object in household level convertible to \code{data.table}.
#' @param linratio Logical value. If value is \code{TRUE}, then the linearized variables for the ratio estimator is used for variance estimation. If value is \code{FALSE}, then the gradients is used for variance estimation.
#' @param percentratio Positive numeric value. All linearized variables are multiplied with \code{percentratio} value, by default - 1.
#' @param use.estVar Logical value. If value is \code{TRUE}, then \code{R} function \code{estVar} is used for the estimation of covariance matrix of the residuals. If value is \code{FALSE}, then \code{R} function \code{estVar} is not used for the estimation of covariance matrix of the residuals.
#' @param ID_level1_max Logical value. If value is \code{TRUE}, then the size of sample for variance under simple random sampling is taken as maximum value of size in ID_level1 . If value is \code{FALSE}, then the size of sample for variance under simple random sampling is taken as count of ID_level2 in ID_level1.
#' @param outp_res Logical value. If \code{TRUE} estimated residuals of calibration will be printed out.
#' @param withperiod Logical value. If \code{TRUE} is value, the results is with period, if \code{FALSE}, without period.
#' @param netchanges Logical value. If value is TRUE, then produce two objects: the first object is aggregation of weighted data by period (if available), country, strata and PSU, the second object is an estimation for Y, the variance, gradient for numerator and denominator by country and period (if available). If value is FALSE, then both objects containing \code{NULL}.
#' @param confidence Optional positive value for confidence interval. This variable by default is 0.95.
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#'
#' @return A list with four objects are returned by the function:
#'\itemize{
#' \item \code{res_out} - a \code{data.table} containing the estimated residuals of calibration with ID_level1 and PSU.
#' \item \code{data_net_changes} - a \code{data.table} containing aggregation of weighted data by period (if available) and countries (if available), country, strata, PSU.
#' \item \code{var_grad} - a \code{data.table} containing estimation for Y, the variance, gradient for numerator and denominator by period, country (if available) and population domains (if available).
#' \item results A \code{data.table} containing: \cr
#' \code{period} - survey periods, \cr
#' \code{country} - survey countries (if available), \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{namesY} - names of variables of interest, \cr
#' \code{namesZ} - optional variable for names of denominator for ratio estimation, \cr
#' \code{sample_size} - the sample size (in numbers of individuals), \cr
#' \code{pop_size} - the population size (in numbers of individuals), \cr
#' \code{total} - the estimated totals, \cr
#' \code{variance} - the estimated variance of cross-sectional or longitudinal measures, \cr
#' \code{sd_w} - the estimated weighted variance of simple random sample, \cr
#' \code{sd_nw} - the estimated variance estimation of simple random sample, \cr
#' \code{pop} - the population size (in numbers of households), \cr
#' \code{sampl_siz} - the sample size (in numbers of households), \cr
#' \code{stderr_w} - the estimated weighted standard error of simple random sample, \cr
#' \code{stderr_nw} - the estimated standard error of simple random sample, \cr
#' \code{se} - the estimated standard error of cross-sectional or longitudinal, \cr
#' \code{rse} - the estimated relative standard error (coefficient of variation), \cr
#' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage, \cr
#' \code{absolute_margin_of_error} - the estimated absolute margin of error, \cr
#' \code{relative_margin_of_error} - the estimated relative margin of error, \cr
#' \code{CI_lower} - the estimated confidence interval lower bound, \cr
#' \code{CI_upper} - the estimated confidence interval upper bound, \cr
#' \code{confidence_level} - the positive value for confidence interval.
#' }
#'
#' @references
#'Guillaume Osier, Yves Berger, Tim Goedeme, (2013), Standard error estimation for the EU-SILC indicators of poverty and social exclusion, Eurostat Methodologies and Working papers, URL \url{http://ec.europa.eu/eurostat/documents/3888793/5855973/KS-RA-13-024-EN.PDF}. \cr
#'Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en} \cr
#'Eurostat Methodologies and Working papers, Handbook on precision requirements and variance estimation for ESS household surveys, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}. \cr
#'
#' @seealso \code{\link{domain}},
#' \code{\link{lin.ratio}}
#'
#' @keywords vardcros
#'
#'
#' @examples
#' library("data.table")
#' library("laeken")
#'
#' # Example 1
#' data(eusilc)
#' set.seed(1)
#' dataset1 <- data.table(eusilc)
#' dataset1[, year := 2010]
#' dataset1[, country := "AT"]
#' dataset1[age < 0, age := 0]
#' PSU <- dataset1[, .N, keyby = "db030"][, N := NULL]
#' PSU[, PSU := trunc(runif(nrow(PSU), 0, 100))]
#' dataset1 <- merge(dataset1, PSU, by = "db030", all = TRUE)
#' PSU <- eusilc <- 0
#'
#' dataset1[, strata := "XXXX"]
#' dataset1[, t_pov := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, t_dep := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, t_lwi := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, exp := 1]
#' dataset1[, exp2 := 1 * (age < 60)]
#'
#' # At-risk-of-poverty (AROP)
#' dataset1[, pov := ifelse (t_pov == 1, 1, 0)]
#'
#' # Severe material deprivation (DEP)
#' dataset1[, dep := ifelse (t_dep == 1, 1, 0)]
#'
#' # Low work intensity (LWI)
#' dataset1[, lwi := ifelse (t_lwi == 1 & exp2 == 1, 1, 0)]
#'
#' # At-risk-of-poverty or social exclusion (AROPE)
#' dataset1[, arope := ifelse (pov == 1 | dep == 1 | lwi == 1, 1, 0)]
#'
#' result11 <- vardcros(Y="arope", H = "strata",
#' PSU = "PSU", w_final = "rb050",
#' ID_level1 = "db030", ID_level2 = "rb030",
#' Dom = "rb090", Z = NULL, country = "country",
#' period = "year", dataset = dataset1,
#' linratio = FALSE, withperiod = TRUE,
#' netchanges = TRUE, confidence = .95)
#'
#' \dontrun{
#' # Example 2
#' data(eusilc)
#' set.seed(1)
#' dataset1 <- data.table(rbind(eusilc, eusilc),
#' year = c(rep(2010, nrow(eusilc)),
#' rep(2011, nrow(eusilc))))
#' dataset1[, country := "AT"]
#' dataset1[age < 0, age := 0]
#' PSU <- dataset1[, .N, keyby = "db030"][, N := NULL]
#' PSU[, PSU := trunc(runif(nrow(PSU), 0, 100))]
#' dataset1 <- merge(dataset1, PSU, by = "db030", all = TRUE)
#' PSU <- eusilc <- 0
#' dataset1[, strata := "XXXX"]
#' dataset1[, strata := as.character(strata)]
#' dataset1[, t_pov := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, t_dep := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, t_lwi := trunc(runif(nrow(dataset1), 0, 2))]
#' dataset1[, exp := 1]
#' dataset1[, exp2 := 1 * (age < 60)]
#'
#' # At-risk-of-poverty (AROP)
#' dataset1[, pov := ifelse(t_pov == 1, 1, 0)]
#'
#' # Severe material deprivation (DEP)
#' dataset1[, dep := ifelse(t_dep == 1, 1, 0)]
#'
#' # Low work intensity (LWI)
#' dataset1[, lwi := ifelse(t_lwi == 1 & exp2 == 1, 1, 0)]
#'
#' # At-risk-of-poverty or social exclusion (AROPE)
#' dataset1[, arope := ifelse(pov == 1 | dep == 1 | lwi == 1, 1, 0)]
#'
#' result11 <- vardcros(Y = c("pov", "dep", "arope"),
#' H = "strata", PSU = "PSU", w_final = "rb050",
#' ID_level1 = "db030", ID_level2 = "rb030",
#' Dom = "rb090", Z = NULL, country = "country",
#' period = "year", dataset = dataset1,
#' linratio = FALSE, withperiod = TRUE,
#' netchanges = TRUE, confidence = .95)
#'
#' dataset2 <- dataset1[exp2 == 1]
#' result12 <- vardcros(Y = c("lwi"), H = "strata",
#' PSU = "PSU", w_final = "rb050",
#' ID_level1 = "db030", ID_level2 = "rb030",
#' Dom = "rb090", Z = NULL,
#' country = "country", period = "year",
#' dataset = dataset2, linratio = FALSE,
#' withperiod = TRUE, netchanges = TRUE,
#' confidence = .95)
#'
#' ### Example 3
#' data(eusilc)
#' set.seed(1)
#' year <- 2011
#' dataset1 <- data.table(rbind(eusilc, eusilc, eusilc, eusilc),
#' rb010 = c(rep(2008, nrow(eusilc)),
#' rep(2009, nrow(eusilc)),
#' rep(2010, nrow(eusilc)),
#' rep(2011, nrow(eusilc))))
#' dataset1[, rb020 := "AT"]
#'
#' dataset1[, u := 1]
#' dataset1[age < 0, age := 0]
#' dataset1[, strata := "XXXX"]
#' PSU <- dataset1[, .N, keyby = "db030"][, N:=NULL]
#' PSU[, PSU := trunc(runif(nrow(PSU), 0, 100))]
#' dataset1 <- merge(dataset1, PSU, by = "db030", all = TRUE)
#' thres <- data.table(rb020 = as.character(rep("AT", 4)),
#' thres = c(11406, 11931, 12371, 12791),
#' rb010 = 2008:2011)
#' dataset1 <- merge(dataset1, thres, all.x = TRUE, by = c("rb010", "rb020"))
#' dataset1[is.na(u), u := 0]
#' dataset1 <- dataset1[u == 1]
#'
#' #############
#' # T3 #
#' #############
#'
#' T3 <- dataset1[rb010 == year - 3]
#' T3[, strata1 := strata]
#' T3[, PSU1 := PSU]
#' T3[, w1 := rb050]
#' T3[, inc1 := eqIncome]
#' T3[, rb110_1 := db030]
#' T3[, pov1 := inc1 <= thres]
#' T3 <- T3[, c("rb020", "rb030", "strata", "PSU", "inc1", "pov1"), with = FALSE]
#'
#' #############
#' # T2 #
#' #############
#' T2 <- dataset1[rb010 == year - 2]
#' T2[, strata2 := strata]
#' T2[, PSU2 := PSU]
#' T2[, w2 := rb050]
#' T2[, inc2 := eqIncome]
#' T2[, rb110_2 := db030]
#' setnames(T2, "thres", "thres2")
#' T2[, pov2 := inc2 <= thres2]
#' T2 <- T2[, c("rb020", "rb030", "strata2", "PSU2", "inc2", "pov2"), with = FALSE]
#'
#' #############
#' # T1 #
#' #############
#' T1 <- dataset1[rb010 == year - 1]
#' T1[, strata3 := strata]
#' T1[, PSU3 := PSU]
#' T1[, w3 := rb050]
#' T1[, inc3 := eqIncome]
#' T1[, rb110_3 := db030]
#' setnames(T1, "thres", "thres3")
#' T1[, pov3 := inc3 <= thres3]
#' T1 <- T1[, c("rb020", "rb030", "strata3", "PSU3", "inc3", "pov3"), with = FALSE]
#'
#' #############
#' # T0 #
#' #############
#' T0 <- dataset1[rb010 == year]
#' T0[, PSU4 := PSU]
#' T0[, strata4 := strata]
#' T0[, w4 := rb050]
#' T0[, inc4 := eqIncome]
#' T0[, rb110_4 := db030]
#' setnames(T0, "thres", "thres4")
#' T0[, pov4 := inc4 <= thres4]
#' T0 <- T0[, c("rb010", "rb020", "rb030", "strata4", "PSU4", "w4", "inc4", "pov4"), with = FALSE]
#' apv <- merge(T3, T2, all = TRUE, by = c("rb020", "rb030"))
#' apv <- merge(apv, T1, all = TRUE, by = c("rb020", "rb030"))
#' apv <- merge(apv, T0, all = TRUE, by = c("rb020", "rb030"))
#' apv <- apv[(!is.na(inc1)) & (!is.na(inc2)) & (!is.na(inc3)) & (!is.na(inc4))]
#' apv[, ppr := ifelse(((pov4 == 1) & ((pov1 == 1 & pov2 == 1 & pov3 == 1)
#' | (pov1 == 1 & pov2 == 1 & pov3 == 0)
#' | (pov1 == 1 & pov2 == 0 & pov3 == 1)
#' | (pov1 == 0 & pov2 ==1 & pov3 == 1))), 1, 0)]
#'
#' result20 <- vardcros(Y = "ppr", H = "strata", PSU = "PSU",
#' w_final = "w4", ID_level1 = "rb030",
#' ID_level2 = "rb030", Dom = NULL,
#' Z = NULL, country = "rb020",
#' period = "rb010", dataset = apv,
#' linratio = FALSE,
#' withperiod = TRUE,
#' netchanges = FALSE,
#' confidence = .95)
#' result20}
#'
#'
#' @import data.table
#'
#' @export vardcros
vardcros <- function(Y, H, PSU, w_final,
ID_level1,
ID_level2,
Dom = NULL,
Z = NULL,
gender = NULL,
country = NULL,
period,
dataset = NULL,
X = NULL,
countryX = NULL,
periodX = NULL,
X_ID_level1 = NULL,
ind_gr = NULL,
g = NULL,
q = NULL,
datasetX = NULL,
linratio = FALSE,
percentratio=1,
use.estVar = FALSE,
ID_level1_max = TRUE,
outp_res = FALSE,
withperiod = TRUE,
netchanges = TRUE,
confidence = .95,
checking = TRUE) {
### Checking
if (checking) {
percentratio <- check_var(vars = percentratio, varn = "percentratio", varntype = "pinteger")
linratio <- check_var(vars = linratio, varn = "linratio", varntype = "logical")
netchanges <- check_var(vars = netchanges, varn = "netchanges", varntype = "logical")
withperiod <- check_var(vars = withperiod, varn = "withperiod", varntype = "logical")
use.estVar <- check_var(vars = use.estVar, varn = "use.estVar", varntype = "logical")
ID_level1_max <- check_var(vars = ID_level1_max, varn = "ID_level1_max", varntype = "logical")
outp_res <- check_var(vars = outp_res, varn = "outp_res", varntype = "logical")
confidence <- check_var(vars = confidence, varn = "confidence", varntype = "numeric01")
if (all(ID_level1_max, !is.null(X))) stop("'ID_level1_max' must be ", !ID_level1_max, "!", call. = FALSE)
if (all(!is.null(Z), !is.null(X), !linratio)) stop("'linratio' must be TRUE", call. = FALSE)
if (all(!is.null(gender), !is.null(Z), !linratio)) stop("'linratio' must be TRUE", call. = FALSE)
if (all(is.null(Z), linratio)) stop("'linratio' must be FALSE", call. = FALSE)
if(!is.null(X)) {
if (is.null(datasetX)) datasetX <- copy(dataset)
equal_dataset <- identical(dataset, datasetX) & !is.null(dataset)
if (equal_dataset) { X_ID_level1 <- ID_level1
countryX <- country }}
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
check.names = TRUE, isnumeric = TRUE, grepls = "__")
Ynrow <- nrow(Y)
Yncol <- ncol(Y)
H <- check_var(vars = H, varn = "H", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
dif_name = "dataH_stratas")
w_final <- check_var(vars = w_final, varn = "w_final",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
isnumeric = TRUE, isvector = TRUE)
gender <- check_var(vars = gender, varn = "gender",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
isnumeric = TRUE, isvector = TRUE,
mustbedefined = FALSE)
Z <- check_var(vars = Z, varn = "Z", dataset = dataset,
check.names = TRUE, Yncol = Yncol, Ynrow = Ynrow,
isnumeric = TRUE, mustbedefined = FALSE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
ncols = 0, Yncol = 0, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
duplicatednames = TRUE, grepls = "__")
country <- check_var(vars = country, varn = "country",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
dif_name = c("percoun", "period_country"))
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, duplicatednames = TRUE,
withperiod = withperiod,
dif_name = c("percoun", "period_country", names(country)))
ID_level1 <- check_var(vars = ID_level1, varn = "ID_level1",
dataset = dataset, ncols = 1, Yncol = 0,
Ynrow = Ynrow, ischaracter = TRUE)
ID_level12 <- check_var(vars = ID_level2, varn = "ID_level2",
dataset = dataset, ncols = 1, Yncol = 0,
Ynrow = Ynrow, ischaracter = TRUE,
namesID1 = names(ID_level1), country = country,
periods = period)
PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset,
ncols = 1, Yncol = 0, Ynrow = Ynrow,
ischaracter = TRUE, namesID1 = names(ID_level1))
if(!is.null(X)) {
X <- check_var(vars = X, varn = "X", dataset = datasetX,
check.names = TRUE, isnumeric = TRUE,
grepls = "__",
dif_name = c(names(period), names(country), names(H),
names(PSU), names(ID_level1), names(Y),
"w_final", "w_design", "g", "q"))
Xnrow <- nrow(X)
ind_gr <- check_var(vars = ind_gr, varn = "ind_gr",
dataset = datasetX, ncols = 1,
Xnrow = Xnrow, ischaracter = TRUE,
dif_name = c(names(period), names(country), names(H),
names(PSU), names(ID_level1), names(Y),
names(X), "w_final", "w_design", "g", "q"))
g <- check_var(vars = g, varn = "g", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE)
q <- check_var(vars = q, varn = "q", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE)
countryX <- check_var(vars = countryX, varn = "countryX",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, mustbedefined = !is.null(country),
varnout = "country", varname = names(country),
country = country)
periodX <- check_var(vars = periodX, varn = "periodX",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, mustbedefined = !is.null(period),
duplicatednames = TRUE, varnout = "period",
varname = names(period), country = country,
countryX = countryX, periods = period)
X_ID_level1 <- check_var(vars = X_ID_level1, varn = "X_ID_level1",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, varnout = "ID_level1",
varname = names(ID_level1), country = country,
countryX = countryX, periods = period,
periodsX = periodX, ID_level1 = ID_level1)
}
}
dataset <- datasetX <- NULL
# Calculation
sar_nr <- N <- nameY <- nameZ <- variable <- NULL
sample_size <- totalY <- totalZ <- Z1 <- percoun <- NULL
totalY_male <- totalZ_male <- totalY_female <- NULL
totalZ_female <- gender2 <- i <- NULL
# Design weights
if (!is.null(X)) {
idh <- data.table(ID_level1)
idhx <- data.table(X_ID_level1)
if (!is.null(countryX)) {idh <- data.table(country, idh)
idhx <- data.table(countryX, idhx)}
if (!is.null(periodX)) {idh <- data.table(period, idh)
idhx <- data.table(periodX, idhx)}
idhx <- data.table(idhx, g)
setnames(idhx, names(idhx)[c(1 : (ncol(idhx)-1))], names(idh))
idg <- merge(idh, idhx, by = names(idh), sort = FALSE)
w_design <- w_final / idg[[ncol(idg)]]
idg <- data.table(idg, w_design = w_design)
idh <- idg[, .N, keyby = c(names(idh), "w_design")]
if (nrow(X) != nrow(idh)) stop("Aggregated 'w_design' length must the same as matrix 'X'")
idg <- idhx <- idh <- NULL
} else w_design <- w_final
# Domains
size <- data.table(size = rep(1, nrow(Y)))
if (!is.null(Dom)) { size1 <- domain(Y = size, D = Dom,
dataset = NULL,
checking = FALSE)
Y1 <- domain(Y = Y, D = Dom,
dataset = NULL,
checking = FALSE)
} else { size1 <- copy(size)
Y1 <- Y }
namesDom <- names(Dom)
if (!is.null(country)) { DTp <- data.table(country)
} else DTp <- data.table(percoun = rep("1", nrow(size)))
if (withperiod) DTp <- data.table(period, DTp)
namesperc <- names(DTp)
namesperc2 <- c("period_country", namesperc)
period_country <- do.call("paste", c(as.list(DTp), sep = "_"))
if (!is.null(Z)) {
if (!is.null(Dom)) Z1 <- domain(Y = Z, D = Dom,
dataset = NULL,
checking = FALSE) else Z1 <- Z
if (linratio){
sorts <- unlist(split(Y1[, .I], period_country))
lin1 <- lapply(split(Y1[, .I], period_country),
function(i)
if (!is.null(gender)) {
data.table(sar_nr = i,
lin.ratio(Y = Y1[i] * (gender == 1),
Z = Z1[i] * (gender == 1),
weight = w_final[i],
Dom = NULL, dataset = NULL,
percentratio = percentratio,
checking = FALSE)-
lin.ratio(Y = Y1[i] * (gender == 2),
Z = Z1[i] * (gender == 2),
weight = w_final[i],
Dom = NULL, dataset = NULL,
percentratio = percentratio,
checking = FALSE))
} else { data.table(sar_nr = i,
lin.ratio(Y = Y1[i], Z = Z1[i],
weight = w_final[i],
Dom = NULL, dataset = NULL,
percentratio = percentratio,
checking = FALSE))})
Y2 <- rbindlist(lin1)
setkeyv(Y2, "sar_nr")
Y2[, sar_nr := NULL]
if (any(is.na(Y2))) print("Results are calculated, but there are cases where Z = 0")
} else Y2 <- data.table(copy(Y1), copy(Z1))
} else if (!is.null(gender)) { Y2 <- Y1[i] * (gender == 1) - Y1[i] * (gender == 2)
} else Y2 <- copy(Y1)
namesY2 <- names(Y2)
namesY2w <- paste0(namesY2, "w")
namesY <- names(Y)
namesZ <- names(Z)
names_H <- names(H)
namesY1 <- names(Y1)
namesZ1 <- names(Z1)
names_id1 <- names(ID_level1)
names_id2 <- names(ID_level2)
names_PSU <- names(PSU)
names_size1 <- names(size1)
namesYZ <- c(namesY, namesZ)
namesY1Z1 <- c(namesY1, namesZ1)
names_country <- names(country)
names_size1w <- paste0(names_size1, "w")
size1w <- size1 * w_final
setnames(size1w, names_size1, names_size1w)
DT <- data.table(period_country, DTp, H, PSU, ID_level1, ID_level2,
w_final, w_design, size1, size1w, Y2)
DTc <- DT[, lapply(.SD, sum, na.rm = TRUE),
by = c(namesperc2, names_H, names_PSU,
names_id1, "w_final", "w_design"),
.SDcols = c(names_size1, names_size1w, namesY2)]
H <- PSU <- id <- DTp <- country <- NULL
DTagg <- data.table(DT[, namesperc, with = FALSE], w_final)
if (!is.null(Dom)) DTagg <- data.table(DTagg, Dom)
DTagg <- data.table(DTagg, sample_size = 1,
pop_size = w_final, w_final * Y)
if (!is.null(gender)) DTagg <- data.table(DTagg, gender)
if (!is.null(Z)) DTagg <- data.table(DTagg, w_final * Z)
gnamesDom <- namesDom
if (!is.null(gender)) gnamesDom <- c("gender", gnamesDom)
DTaggs <- DTagg[, lapply(.SD, sum, na.rm = TRUE),
keyby = c(namesperc, namesDom),
.SDcols = c("sample_size", "pop_size")]
DTagg <- DTagg[, lapply(.SD, sum, na.rm = TRUE),
keyby = c(namesperc, gnamesDom),
.SDcols = namesYZ]
vars <- data.table(variable = namesY, namesY = namesY)
if (!is.null(namesZ)) vars <- data.table(variable = as.character(1 : length(namesY)),
namesY = namesY, namesZ = namesZ)
varsYZ <- list(namesY)
if (!is.null(namesZ)) varsYZ <- list(namesY, namesZ)
DTagg <- melt(DTagg, id = c(namesperc, gnamesDom),
measure = varsYZ,
variable.factor = FALSE)
setnames(DTagg, ifelse(!is.null(DTagg$value1), "value1", "value"), "totalY")
totYZ <- "totalY"
if (!is.null(Z)) {totYZ <- c(totYZ, "totalZ")
setnames(DTagg, "value2", "totalZ")}
if (!is.null(gender)) {
funkc <- as.formula(paste0(paste(c(namesperc, namesDom, "variable"), collapse= "+"), "~ gender2"))
DTagg[gender == 1, gender2 := "male"]
DTagg[gender == 2, gender2 := "female"]
DTagg <- dcast(DTagg, funkc, sum, value.var = totYZ)
}
DTagg <- merge(DTagg, vars, by = "variable")[, variable := NULL]
DTagg <- merge(DTagg, DTaggs, all.x = TRUE,
by = c(namesperc, namesDom))
if (!is.null(namesDom)) DTagg[,(paste0(namesDom, "_new")) := lapply(namesDom,
function(x) make.names(paste0(x,".", get(x))))]
varsYZ <- vars <- nameY1 <- nameZ1 <- valueY1 <- valueZ1 <- Dom <- NULL
Z1 <- Y1 <- period_country <- Y2 <- total <- pop_size <- NULL
stderr_nw <- nhcor <- num1 <- num <- den1 <- den <- num_den1 <- NULL
grad1 <- grad2 <- estim <- sd_nw <- stderr_w <- sd_w <- se <- rse <- NULL
cv <- CI_lower <- absolute_margin_of_error <- CI_upper <- totalZ <- NULL
relative_margin_of_error <- NULL
# Calibration
res_outp <- NULL
if (!is.null(X)) {
X0 <- data.table(X_ID_level1, ind_gr, q, g, X)
if (!is.null(countryX)) X0 <- data.table(countryX, X0)
if (!is.null(periodX)) X0 <- data.table(periodX, X0)
nos <- c(names(periodX), names(countryX), names(ID_level1))
DT1 <- merge(DTc, X0, by = nos, sort = FALSE)
ind_gr <- DT1[, c(namesperc, names(ind_gr)), with = FALSE]
ind_period <- do.call("paste", c(as.list(ind_gr), sep = "_"))
res <- lapply(split(DT1[, .I], ind_period), function(i)
data.table(DT1[i, nos, with = FALSE],
res <- residual_est(Y = DT1[i, namesY2, with = FALSE],
X = DT1[i, names(X), with = FALSE],
weight = DT1[i][["w_design"]],
q = DT1[i][["q"]], dataset = NULL,
checking = FALSE)))
res <- rbindlist(res)
setnames(res, namesY2, namesY2w)
DTc <- merge(DTc, res, by = nos)
if (outp_res) res_outp <- DTc[, c(nos, names_PSU, "w_final", namesY2w), with = FALSE]
} else DTc[, (namesY2w) := .SD[, namesY2, with = FALSE]]
DTc[, (namesY2w) := .SD[, namesY2w, with = FALSE] * get("w_final")]
#--------------------------------------------------------*
# AGGREGATION AT PSU LEVEL ("ULTIMATE CLUSTER" APPROACH) |
#--------------------------------------------------------*
DT1 <- DTc[, lapply(.SD, sum, na.rm = TRUE), keyby = c(namesperc2,
names_H, names_PSU), .SDcols = namesY2w]
setnames(DT1, namesY2w, namesY2)
DTnet <- copy(DT1)
if (!netchanges) DTnet <- NULL
DT2 <- DT1[, lapply(.SD, sum, na.rm = TRUE),
keyby = namesperc, .SDcols = namesY2]
varsYZ <- list(namesY1)
if (!is.null(namesZ1) & !linratio) varsYZ <- list(namesY1, namesZ1)
DT2 <- melt(DT2, id = namesperc,
measure = varsYZ,
variable.factor = FALSE)
if (!is.null(namesZ1) & !linratio) {setnames(DT2, c("value1", "value2"),
c("valueY1", "valueZ1"))
} else setnames(DT2, ifelse(!is.null(DT2$value1), "value1", "value"), "valueY1")
if (!is.null(namesZ1) & !linratio) {
vars <- data.table(variable = 1 : length(namesY1))
} else vars <- data.table(variable = namesY1)
if (!is.null(namesDom)) { vars <- data.table(vars, nameY1 = namesY1,
t(data.frame(strsplit(namesY1, "__"))))
setnames(vars, names(vars)[3 : length(vars)],
c("namesY", paste0(namesDom, "_new")))
} else {vars <- data.table(vars, nameY1 = namesY1, namesY = namesY1) }
if (!is.null(namesZ1)) { vars <- data.table(vars, nameZ1 = namesZ1)
if (!is.null(namesDom)) {
varsZ <- data.table(nameZ1 = namesZ1,
t(data.frame(strsplit(namesZ1, "__"))))
setnames(varsZ, names(varsZ)[2 : length(varsZ)],
c("namesZ", paste0(namesDom, "_new")))
varsZ[, (paste0(namesDom, "_new")) := NULL]
vars <- merge(vars, varsZ, by = "nameZ1")
} else vars[, namesZ := nameZ1] }
vars <- vars[, lapply(vars, as.character)]
DT2 <- merge(DT2, vars, by = "variable")
DT2[, variable := NULL]
vars <- varsZ <- NULL
vars <- c(namesperc, paste0(namesDom, "_new"), "namesY", "namesZ")
vars <- names(DT2)[names(DT2) %in% vars]
DTagg <- merge(DTagg, DT2, by = vars)
DT2 <- vars <- NULL
# VECTOR OF THE PARTIAL DERIVATIVES (GRADIENT FUNCTION)
if (!is.null(namesZ1) & !linratio) {
DTagg[, grad1 := 1 / valueZ1]
DTagg[, grad2 := - valueY1 / valueZ1^2]
}
# NUMBER OF PSUs PER STRATUM
setkeyv(DT1, c(namesperc2, names_H))
stratasf <- nh <- nhcor <- NULL
DT1[, nh := .N, by = c(namesperc2, names_H)]
#--------------------------------------------------------------------------*
# MULTIVARIATE REGRESSION APPROACH USING STRATUM DUMMIES AS REGRESSORS AND |
# STANDARD ERROR ESTIMATION |
#--------------------------------------------------------------------------*
DT1H <- DT1[[names_H]]
DT1H <- factor(DT1H)
if (length(levels(DT1H)) == 1) { DT1[, stratasf := 1]
DT1H <- "stratasf"
} else { DT1H <- data.table(model.matrix( ~ DT1H - 1, DT1H, contrasts = "contr.SAS"))
DT1 <- cbind(DT1, DT1H)
DT1H <- names(DT1H) }
fits <-lapply(1 : length(namesY1), function(i) {
fitss <- lapply(split(DT1, DT1$period_country), function(DT1c) {
y <- namesY1[i]
if ((!is.null(namesZ1))&(!linratio)) z <- paste0(",", toString(namesZ1[i])) else z <- ""
funkc <- as.formula(paste("cbind(", trimws(toString(y)), z, ")~ 0 + ",
paste(c(0, DT1H), collapse = "+")))
res1 <- lm(funkc, data = DT1c)
if (use.estVar == TRUE) {res1 <- data.table(crossprod(res1$res))
} else res1 <- data.table(res1$res)
setnames(res1, names(res1)[1], "num")
res1[, nameY1 := y]
if (!is.null(namesZ1) & !linratio) {
setnames(res1, names(res1)[2], "den")
res1[, nameZ1 := namesZ1[i]]
}
if (use.estVar == TRUE) {
setnames(res1, "num", "num1")
if (!is.null(namesZ1) & !linratio) {
res1[, num_den1 := res1[["den"]][1]]
res1[, den1 := res1[["den"]][2]] }
res1 <- data.table(res1[1], DT1c[1])
} else {
res1 <- data.table(res1, DT1c)
res1[, nhcor := ifelse(nh > 1, nh / (nh - 1), 1)]
res1[, num1 := nhcor * num * num]
if (!is.null(namesZ1) & !linratio) {
res1[, num_den1 := nhcor * num * den]
res1[, den1 := nhcor * den * den]
}}
namep <- c("nameY1", "nameZ1")
namep <- namep[namep %in% names(res1)]
varsp <- c("num1", "den1", "num_den1")
varsp <- varsp[varsp %in% names(res1)]
fits <- res1[, lapply(.SD, sum),
keyby = c("period_country",
namesperc, namep),
.SDcols = varsp]
return(fits)
})
return(rbindlist(fitss))
})
res <- rbindlist(fits)
DT1 <- fits <- DT1H <- NULL
vars <- c(namesperc, namesDom, "nameY1", "nameZ1")
vars <- names(res)[names(res) %in% vars]
res <- merge(DTagg, res, by = vars)
DTagg <- total <- NULL
res[, var := num1]
if (!is.null(gender)) {
res[, estim := totalY_male - totalY_female]
if (!is.null(res$totalZ)) res[, estim := (totalY_male / totalZ_male - totalY_female / totalZ_female) * percentratio]
} else {
res[, estim := totalY]
if (!is.null(res$totalZ)) res[, estim := totalY / totalZ * percentratio] }
if (!is.null(res$totalZ) & !linratio) {
res[, var := (grad1 * grad1 * num1) +
(grad2 * grad2 * den1) +
2 * (grad1 * grad2 * num_den1)]
res[, var := var * (percentratio)^2] }
main <- c(namesperc, namesDom, "namesY", "nameY1")
if (!is.null(namesDom)) main <- c(main, paste0(namesDom, "_new"))
if (!is.null(res$namesZ)) main <- c(main, "namesZ", "nameZ1")
main <- c(main, "sample_size", "pop_size")
if (is.null(gender)) { main <- c(main, "totalY")
} else main <- c(main, c("totalY_male", "totalY_female"))
if (!is.null(res$namesZ)) { if (is.null(gender)) { main <- c(main, "totalZ")
} else main <- c(main, c("totalZ_male", "totalZ_female")) }
main2 <- c(main, "estim", "valueY1")
if (!is.null(namesZ1) & !linratio) main2 <- c(main2, "valueZ1")
main2 <- c(main2, "num1")
if (!is.null(namesZ1) & !linratio) main2 <- c(main2, "den1", "grad1", "grad2")
if (netchanges) { res1 <- res[, main2[!(main2 %in% c("sample_size",
"pop_size", "nameY1",
paste0(namesDom, "_new"),
"nameZ1"))], with = FALSE]
} else res1 <- NULL
main <- c(main, "estim", "var")
res22 <- res[, main, with = FALSE]
#-------------------------------------------------------------------------*
# DESIGN EFFECT (DEFF) ESTIMATION - VARIANCE UNDER SIMPLE RANDOM SAMPLING |
#-------------------------------------------------------------------------*
# We aggregate the target variables at household level
DTs <- DT[, lapply(.SD, sum, na.rm = TRUE),
keyby = c(namesperc2, names_id1, "w_final"),
.SDcols = c(names_size1, names_size1w, namesY2)]
if (ID_level1_max) {
DTm <- DT[, lapply(.SD, max, na.rm = TRUE), keyby = c(namesperc2, names_id1), .SDcols = names_size1]
} else {
DTm <- DT[, lapply(.SD, sum, na.rm = TRUE), keyby = c(namesperc2, names_id1), .SDcols = names_size1]
}
setnames(DTm, names_size1, paste0(names_size1, "m"))
DTs <- merge(DTs, DTm, by = c(namesperc2, names_id1))
# Linearised variables
if (!is.null(namesZ1) & !linratio) {
lin1 <- lapply(split(DTs[, .I], DTs$period_country), function(i)
lin.ratio(Y = DTs[i, namesY1, with = FALSE],
Z = DTs[i, namesZ1, with = FALSE],
weight = DTs[["w_final"]][i], Dom = NULL,
percentratio = percentratio))
Y2a <- rbindlist(lin1)
setnames(Y2a, names(Y2a), paste0("lin___", namesY1))
DTs <- data.table(DTs, Y2a)
Y2a <- paste0("lin___", namesY1)
} else Y2a <- namesY1
w_final <- DTs[["w_final"]]
DTsd <- DTs[, lapply(.SD[, Y2a, with = FALSE], function(x)
sum(w_final*((x-sum(w_final*x)/sum(w_final))^2))/(sum(w_final)-1)),
keyby = "period_country"]
setnames(DTsd, Y2a, paste0("sd_w__", namesY1))
DTs <- merge(DTs, DTsd, by = "period_country")
DTm <- DTs[, lapply(.SD[, paste0(names_size1, "m"), with = FALSE], function(x) sum(w_final * x, na.rm = TRUE)),
keyby = "period_country"]
setnames(DTm, paste0(names_size1, "m"), paste0("pop_", names_size1))
DTs <- merge(DTs, DTm, by = "period_country")
DTsd <- DTs[, lapply(.SD, sd, na.rm = TRUE), keyby = "period_country", .SDcols = Y2a]
setnames(DTsd, Y2a, paste0("sd_nw__", namesY1))
DTs <- merge(DTs, DTsd, by = "period_country")
DTm <- DTs[, lapply(.SD, sum, na.rm = TRUE), keyby = "period_country", .SDcols = names_size1]
setnames(DTm, names_size1, paste0("samp_", names_size1))
DTs <- merge(DTs, DTm, by = "period_country")
DTx <- DTs[, .N, keyby = c(namesperc, paste0("sd_w__", namesY1),
paste0("sd_nw__", namesY1),
paste0("pop_", names_size1),
paste0("samp_", names_size1))]
DTx[, N := NULL]
main <- melt(DTx[, c(namesperc, paste0("sd_w__", namesY1)), with = FALSE], id = namesperc)
main[, nameY1 := substr(variable, 7, nchar(trimws(as.character(variable))))]
main[, variable := NULL]
setnames(main, "value", "sd_w")
res <- merge(res, main, all.x = TRUE, by = c(namesperc, "nameY1"))
main <- melt(DTx[, c(namesperc, paste0("sd_nw__", namesY1)), with = FALSE], id = namesperc)
main[, nameY1 := substr(variable, 8, nchar(trimws(as.character(variable))))]
main[, variable := NULL]
setnames(main, "value", "sd_nw")
res <- merge(res, main, all = TRUE, by = c(namesperc, "nameY1"))
main <- melt(DTx[, c(namesperc, paste0("pop_", names_size1)), with = FALSE], id = namesperc)
if (!is.null(namesDom)){
main[, Dom := substr(variable, 11, nchar(trimws(as.character(variable))))]
vars <- unique(main[["Dom"]])
vars <- data.table(Dom=vars, t(data.frame(strsplit(vars, "__"))))
setnames(vars, names(vars)[2 : length(vars)], paste0(namesDom, "_new"))
main <- merge(main, vars, all.x = TRUE, by = "Dom") }
main[, variable := NULL]
setnames(main, "value", "pop")
nds <- namesperc
if (!is.null(namesDom)) nds <- c(namesperc, paste0(namesDom, "_new"))
res <- merge(res, main, all.x = TRUE, by = nds)
main <- melt(DTx[, c(namesperc, paste0("samp_", names_size1)), with = FALSE], id = namesperc)
if (!is.null(namesDom)) main[, Dom := substr(variable, 12, nchar(trimws(as.character(variable))))]
main[, variable := NULL]
setnames(main, "value", "sampl_siz")
if (is.null(namesDom)) nds <- namesperc else nds <- c(namesperc, "Dom")
res <- merge(res, main, all = TRUE, by = nds)
res[sample_size < pop_size, stderr_nw := 100 * sqrt((1 - (sample_size / pop_size)) / pop_size * sd_nw * sd_nw / sample_size)]
res[sample_size < pop_size, stderr_w := 100 * sqrt((1 - (sample_size / pop_size)) / pop_size * sd_w * sd_w / sample_size)]
DT <- DTw <- DTx <- DTs <- DTsd <- sd1 <- nds <- NULL
res[, se := sqrt(var)]
res[, rse := se / estim]
res[, cv := rse * 100]
tsad <- qnorm(0.5 * (1 + confidence))
res[, absolute_margin_of_error := tsad * se]
res[, relative_margin_of_error := tsad * cv]
res[, CI_lower := estim - tsad * se]
res[, CI_upper := estim + tsad * se]
main <- namesperc
if (!is.null(namesDom)) main <- c(main, namesDom)
main <- c(main, "namesY")
if (!is.null(res$namesZ)) main <- c(main, "namesZ")
main <- c(main, "sample_size", "pop_size", "estim", "se",
"var", "rse", "cv", "absolute_margin_of_error",
"relative_margin_of_error", "CI_lower", "CI_upper",
"sd_w", "sd_nw", "pop", "sampl_siz", "stderr_nw",
"stderr_w")
main <- main[main %in% names(res)]
res <- res[, main, with = FALSE]
if (!netchanges & is.null(names_country)) {
if (!is.null(DTnet)) DTnet[, percoun := NULL]
res1[, percoun := NULL]
res[, percoun := NULL] }
list(data_net_changes = DTnet, res_out = res_outp, var_grad = res1, results = res)
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/vardcros.R
|
#' Variance estimation for cross-sectional, longitudinal measures for indicators on social exclusion and poverty
#'
#' Computes the variance estimation for cross-sectional and longitudinal measures for indicators on social exclusion and poverty.
#' @param Y Variables of interest. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param age Age variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param pl085 Retirement variable (Number of months spent in retirement or early retirement). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param month_at_work Variable for total number of month at work (sum of the number of months spent at full-time work as employee, number of months spent at part-time work as employee, number of months spent at full-time work as self-employed (including family worker), number of months spent at part-time work as self-employed (including family worker)). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Y_den Denominator variable (for example gross individual earnings). One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Y_thres Variable (for example equalized disposable income) used for computation and linearization of poverty threshold. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number or logical vector with only one \code{TRUE} value (length of the vector has to be the same as the column count of \code{dataset}). Variable specified for \code{inc} is used as \code{income_thres} if \code{income_thres} is not defined.
#' @param wght_thres Weight variable used for computation and linearization of poverty threshold. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number. Variable specified for \code{weight} is used as \code{wght_thres} if \code{wght_thres} is not defined.
#' @param H The unit stratum variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param PSU Primary sampling unit variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param w_final Weight variable. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ID_level2 Optional variable for unit ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param Dom Optional variables used to define population domains. If supplied, variables are calculated for each domain. An object convertible to \code{data.table} or variable names as character vector, column numbers.
#' @param country Variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param period Variable for the survey periods. The values for each period are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param sort Optional variable to be used as tie-breaker for sorting. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param gender Numerical variable for gender, where 1 is for males, but 2 is for females. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param dataset Optional survey data object convertible to \code{data.table}.
#' @param X Optional matrix of the auxiliary variables for the calibration estimator. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param countryX Optional variable for the survey countries. The values for each country are computed independently. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param periodX Optional variable of the survey periods and countries. If supplied, residual estimation of calibration is done independently for each time period. Object convertible to \code{data.table} or variable names as character, column numbers.
#' @param X_ID_level1 Variable for level1 ID codes. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param ind_gr}{Optional variable by which divided independently X matrix of the auxiliary variables for the calibration. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param g Optional variable of the g weights. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param q Variable of the positive values accounting for heteroscedasticity. One dimensional object convertible to one-column \code{data.table} or variable name as character, column number.
#' @param datasetX Optional survey data object in household level convertible to \code{data.table}.
#' @param percentage A numeric value in range \eqn{[0,100]} for \eqn{p} in the formula for poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#'For example, to compute poverty threshold equal to 60\% of some income quantile, \eqn{p} should be set equal to 60.
#' @param order_quant A numeric value in range \eqn{[0,100]} for \eqn{\alpha} in the formula for poverty threshold computation:
#' \deqn{\frac{p}{100} \cdot Z_{\frac{\alpha}{100}}.}{p/100 * Z(\alpha/100).}
#'For example, to compute poverty threshold equal to some percentage of median income, \eqn{\alpha} should be set equal to 50.
#' @param alpha a numeric value in range \eqn{[0,100]} for the order of the income quantile share ratio (in percentage).
#' @param use.estVar}{Logical value. If value is \code{TRUE}, then \code{R} function \code{estVar} is used for the estimation of covariance matrix of the residuals. If value is \code{FALSE}, then \code{R} function \code{estVar} is not used for the estimation of covariance matrix of the residuals.
#' @param withperiod Logical value. If \code{TRUE} is value, the results is with period, if \code{FALSE}, without period.
#' @param netchanges Logical value. If value is TRUE, then produce two objects: the first object is aggregation of weighted data by period (if available), country, strata and PSU, the second object is an estimation for Y, the variance, gradient for numerator and denominator by country and period (if available). If value is FALSE, then both objects containing \code{NULL}.
#' @param confidence Optional positive value for confidence interval. This variable by default is 0.95.
#' @param outp_lin Logical value. If \code{TRUE} linearized values of the ratio estimator will be printed out.
#' @param outp_res Logical value. If \code{TRUE} estimated residuals of calibration will be printed out.
#' @param type a character vector (of length one unless several.ok is TRUE), example "linarpr","linarpt", "lingpg", "linpoormed", "linrmpg", "lingini", "lingini2", "linqsr", "linarr", "linrmir".
#' @param checking Optional variable if this variable is TRUE, then function checks data preparation errors, otherwise not checked. This variable by default is TRUE.
#'
#'
#' @return A list with objects are returned by the function:
#' \itemize{
#' \item \code{lin_out} - a \code{data.table} containing the linearized values of the ratio estimator with ID_level2 and PSU.
#' \item \code{res_out} - a \code{data.table} containing the estimated residuals of calibration with ID_level1 and PSU.
#' \item \code{data_net_changes} - a \code{data.table} containing aggregation of weighted data by period (if available), country, strata, PSU.
#' \item \code{results} - a \code{data.table} containing: \cr
#' \code{period} - survey periods, \cr
#' \code{country} - survey countries, \cr
#' \code{Dom} - optional variable of the population domains, \cr
#' \code{type} - type variable, \cr
#' \code{count_respondents} - the count of respondents, \cr
#' \code{pop_size} - the population size (in numbers of individuals), \cr
#' \code{estim} - the estimated value, \cr
#' \code{se} - the estimated standard error, \cr
#' \code{var} - the estimated variance, \cr
#' \code{rse} - the estimated relative standard error (coefficient of variation), \cr
#' \code{cv} - the estimated relative standard error (coefficient of variation) in percentage.}
#'
#'
#' @references
#' Guillaume Osier, Yves Berger, Tim Goedeme, (2013), Standard error estimation for the EU-SILC indicators of poverty and social exclusion, Eurostat Methodologies and Working papers, URL \url{http://ec.europa.eu/eurostat/documents/3888793/5855973/KS-RA-13-024-EN.PDF}.
#' Yves G. Berger, Tim Goedeme, Guillame Osier (2013). Handbook on standard error estimation and other related sampling issues in EU-SILC, URL \url{https://ec.europa.eu/eurostat/cros/content/handbook-standard-error-estimation-and-other-related-sampling-issues-ver-29072013_en}
#' Eurostat Methodologies and Working papers, Handbook on precision requirements and variance estimation for ESS household surveys, 2013, URL \url{http://ec.europa.eu/eurostat/documents/3859598/5927001/KS-RA-13-029-EN.PDF}
#'
#' @seealso \code{\link{linrmir}},
#' \code{\link{linarr}},
#' \code{\link{vardchanges}}
#'
#' @keywords vardcros
#'
#' @examples
#'
#' library("data.table")
#' library("laeken")
#' data(eusilc)
#' set.seed(1)
#' dataset1 <- data.table(rbind(eusilc, eusilc),
#' year = c(rep(2010, nrow(eusilc)),
#' rep(2011, nrow(eusilc))))
#' dataset1[age < 0, age := 0]
#' PSU <- dataset1[, .N, keyby = "db030"][, N := NULL]
#' PSU[, PSU := trunc(runif(nrow(PSU), 0, 100))]
#' PSU$inc <- runif(nrow(PSU), 20, 100000)
#' dataset1 <- merge(dataset1, PSU, all = TRUE, by = "db030")
#' PSU <- eusilc <- NULL
#' dataset1[, strata := "XXXX"]
#' dataset1[, strata := as.character(strata)]
#' dataset1$pl085 <- 12 * trunc(runif(nrow(dataset1), 0, 2))
#' dataset1$month_at_work <- 12 * trunc(runif(nrow(dataset1), 0, 2))
#' dataset1[, id_l2 := paste0("V", .I)]
#'
#' result <- vardcrospoor(Y = "inc", age = "age",
#' pl085 = "pl085",
#' month_at_work = "month_at_work",
#' Y_den = "inc", Y_thres = "inc",
#' wght_thres = "rb050",
#' H = "strata", PSU = "PSU",
#' w_final = "rb050", ID_level1 = "db030",
#' ID_level2 = "id_l2",
#' Dom = c("rb090", "db040"),
#' country = NULL, period = "year",
#' sort = NULL, gender = NULL,
#' dataset = dataset1,
#' percentage = 60,
#' order_quant = 50L,
#' alpha = 20,
#' confidence = 0.95,
#' type = "linrmpg")
#'
#' \dontrun{
#' result2 <- vardcrospoor(Y = "inc", age = "age",
#' pl085 = "pl085",
#' month_at_work = "month_at_work",
#' Y_den = "inc", Y_thres = "inc",
#' wght_thres = "rb050",
#' H = "strata", PSU = "PSU",
#' w_final = "rb050", ID_level1 = "db030",
#' ID_level2 = "id_l2",
#' Dom = c("rb090", "db040"),
#' period = "year", sort = NULL,
#' gender = NULL, dataset = dataset1,
#' percentage = 60,
#' order_quant = 50L,
#' alpha = 20,
#' confidence = 0.95,
#' type = "linrmpg")
#' result2}
#'
#' @import data.table
#' @import laeken
#' @export vardcrospoor
vardcrospoor <- function(Y, age = NULL, pl085 = NULL,
month_at_work = NULL, Y_den = NULL,
Y_thres = NULL, wght_thres = NULL,
H, PSU, w_final, ID_level1, ID_level2,
Dom = NULL, country = NULL,
period, sort = NULL, gender = NULL,
dataset = NULL, X = NULL,
countryX = NULL, periodX = NULL,
X_ID_level1 = NULL, ind_gr = NULL,
g = NULL, q = NULL, datasetX = NULL,
percentage = 60, order_quant = 50,
alpha = 20, use.estVar = FALSE,
withperiod = TRUE, netchanges = TRUE,
confidence = .95, outp_lin = FALSE,
outp_res = FALSE, type = "linrmpg",
checking = TRUE) {
### Checking
. <- NULL
all_choices <- c("linarpr", "linarpt", "lingpg",
"linpoormed", "linrmpg", "lingini",
"lingini2", "linqsr", "linrmir", "linarr")
type <- tolower(type)
type <- match.arg(type, all_choices, length(type) > 1)
percentage <- check_var(vars = percentage, varn = "percentage", varntype = "numeric0100")
order_quant <- check_var(vars = order_quant, varn = "order_quant", varntype = "numeric0100")
alpha <- check_var(vars = alpha, varn = "alpha", varntype = "numeric0100")
netchanges <- check_var(vars = netchanges, varn = "netchanges", varntype = "logical")
withperiod <- check_var(vars = withperiod, varn = "withperiod", varntype = "logical")
use.estVar <- check_var(vars = use.estVar, varn = "use.estVar", varntype = "logical")
confidence <- check_var(vars = confidence, varn = "confidence", varntype = "numeric01")
if (checking) {
if (!is.null(X)) {
if (is.null(datasetX)) datasetX <- copy(dataset)
equal_dataset <- identical(dataset, datasetX) & !is.null(dataset)
if (equal_dataset) { X_ID_level1 <- ID_level1
countryX <- country }}
Y <- check_var(vars = Y, varn = "Y", dataset = dataset,
ncols = 1, isnumeric = TRUE,
isvector = TRUE, grepls = "__")
Ynrow <- length(Y)
w_final <- check_var(vars = w_final, varn = "weight",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE)
age <- check_var(vars = age, varn = "age", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE,
mustbedefined = any(c("linarr", "linrmir") %in% type))
pl085 <- check_var(vars = pl085, varn = "pl085", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE,
mustbedefined = any(type == "linarr"))
month_at_work <- check_var(vars = month_at_work, varn = "month_at_work",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
isnumeric = TRUE, isvector = TRUE,
mustbedefined = any(type == "linarr"))
gender <- check_var(vars = gender, varn = "gender", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE,
isvector = TRUE, mustbedefined = any(type == "lingpg"))
Y_den <- check_var(vars = Y_den, varn = "Y_den", dataset = dataset,
ncols = 1, Ynrow = Ynrow, isnumeric = TRUE, isvector = TRUE,
mustbedefined = any(type == "linarr"))
Y_thres <- check_var(vars = Y_thres, varn = "Y_thres",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
wght_thres <- check_var(vars = wght_thres, varn = "wght_thres",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
H <- check_var(vars = H, varn = "H", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
dif_name = "dataH_stratas")
sort <- check_var(vars = sort, varn = "sort",
dataset = dataset, ncols = 1,
Ynrow = Ynrow, mustbedefined = FALSE,
isnumeric = TRUE, isvector = TRUE)
Dom <- check_var(vars = Dom, varn = "Dom", dataset = dataset,
Ynrow = Ynrow, ischaracter = TRUE,
mustbedefined = FALSE, duplicatednames = TRUE,
dif_name = c("type", "spectype"),
grepls = "__")
country <- check_var(vars = country, varn = "country",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, mustbedefined = FALSE,
dif_name = c("percoun", "period_country",
"type", "spectype"))
period <- check_var(vars = period, varn = "period",
dataset = dataset, Ynrow = Ynrow,
ischaracter = TRUE, duplicatednames = TRUE,
withperiod = withperiod,
dif_name = c("percoun", "period_country",
names(country), "type", "spectype"))
ID_level1 <- check_var(vars = ID_level1, varn = "ID_level1",
dataset = dataset, ncols = 1, Yncol = 0,
Ynrow = Ynrow, ischaracter = TRUE)
ID_level2 <- check_var(vars = ID_level2, varn = "ID_level2",
dataset = dataset, ncols = 1, Ynrow = Ynrow,
ischaracter = TRUE, namesID1 = names(ID_level1),
country = country, periods = period)
PSU <- check_var(vars = PSU, varn = "PSU", dataset = dataset,
ncols = 1, Ynrow = Ynrow, ischaracter = TRUE,
namesID1 = names(ID_level1))
if (!is.null(X) | !is.null(ind_gr) | !is.null(g) | !is.null(q) | !is.null(countryX)
| !is.null(periodX) | !is.null(X_ID_level1) | !is.null(datasetX)) {
X <- check_var(vars = X, varn = "X", dataset = datasetX,
check.names = TRUE, isnumeric = TRUE,
dif_name = c(names(period), names(country), names(H),
names(PSU), names(ID_level1), "w_final",
"w_design", "g", "q", "type", "spectype"), dX = "X")
Xnrow <- nrow(X)
ind_gr <- check_var(vars = ind_gr, varn = "ind_gr",
dataset = datasetX, ncols = 1,
Xnrow = Xnrow, ischaracter = TRUE,
dif_name = c(names(period), names(country), names(H),
names(PSU), names(ID_level1), "w_final",
names(X), "w_design", "g", "q",
"type", "spectype"), dX = "X")
g <- check_var(vars = g, varn = "g", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE, dX = "X")
q <- check_var(vars = q, varn = "q", dataset = datasetX,
ncols = 1, Xnrow = Xnrow, isnumeric = TRUE,
isvector = TRUE, dX = "X")
countryX <- check_var(vars = countryX, varn = "countryX",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, mustbedefined = !is.null(country),
varnout = "country", varname = names(country),
country = country, dX = "X")
periodX <- check_var(vars = periodX, varn = "periodX",
dataset = datasetX, ncols = 1,
Xnrow = Xnrow, ischaracter = TRUE,
mustbedefined = !is.null(period),
duplicatednames = TRUE, varnout = "period",
varname = names(period), country = country,
countryX = countryX, periods = period, dX = "X")
X_ID_level1 <- check_var(vars = X_ID_level1, varn = "X_ID_level1",
dataset = datasetX, ncols = 1, Xnrow = Xnrow,
ischaracter = TRUE, varnout = "ID_level1",
varname = names(ID_level1), country = country,
countryX = countryX, periods = period, dX = "X",
periodsX = periodX, ID_level1 = ID_level1)
}
}
if (is.null(Y_thres)) Y_thres <- Y
if (is.null(wght_thres)) wght_thres <- w_final
namesDom <- names(Dom)
# Calculation
Dom1 <- n_h <- stratasf <- name1 <- nhcor <- n_h <- var <- NULL
num <- count_respondents <- value <- estim <- pop_size <- NULL
period_country <- N <- se <- rse <- cv <- namesY <- H_sk <- NULL
estim <- c()
if (!is.null(country)) { countryper <- copy(country)
} else countryper <- data.table(percoun = rep("1", length(Y)))
if (!is.null(period)) countryper <- data.table(period, countryper)
idper <- data.table(ID_level1, ID_level2, countryper)
countryperid2 <- c(names(countryper), names(ID_level2))
size <- copy(countryper)
if (!is.null(Dom)) size <- data.table(size, Dom)
names_size <- names(size)
size <- data.table(size, sk = 1, w_final)
size <- size[, .(count_respondents = .N,
pop_size = sum(w_final)), keyby = names_size]
Y1 <- data.table(idper)
Y1$period_country <- do.call("paste", c(as.list(Y1[, names(countryper), with = FALSE]), sep = "_"))
Y1 <- data.table(Y1, H, PSU, w_final, check.names = TRUE)
namesY1 <- names(Y1)
if ("linarpt" %in% type) {
varpt <- linarpt(Y = Y, id = ID_level2,
weight = w_final, sort = sort,
Dom = Dom, period = countryper,
dataset = NULL, percentage = percentage,
order_quant = order_quant,
var_name = "lin_arpt", checking = FALSE)
Y1 <- merge(Y1, varpt$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("ARPT", varpt$value, NA)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
varpt <- esti <- NULL
}
if ("linarpr" %in% type) {
varpr <- linarpr(Y = Y, id = ID_level2,
weight = w_final, Y_thres = Y_thres,
wght_thres = wght_thres, sort = sort,
Dom = Dom, period = countryper,
dataset = NULL, percentage = percentage,
order_quant = order_quant, var_name = "lin_arpr",
checking = FALSE)
Y1 <- merge(Y1, varpr$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("ARPR", varpr$value, NA)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
varpr <- esti <- NULL
}
if (("lingpg" %in% type) & (all(!is.null(gender)))) {
vgpg <- lingpg(Y = Y, gender = gender, id = ID_level2,
weight = w_final, sort = sort, Dom = Dom,
period = countryper, dataset = NULL,
var_name = "lin_gpg", checking = FALSE)
Y1 <- merge(Y1, vgpg$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("GPG", vgpg$value, NA)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
vgpg <- esti <- NULL
}
if ("linpoormed" %in% type) {
vporm <- linpoormed(Y = Y, id = ID_level2, weight = w_final,
sort = sort, Dom = Dom, period = countryper,
dataset = NULL, percentage = percentage,
order_quant = order_quant, var_name = "lin_poormed",
checking = FALSE)
Y1 <- merge(Y1, vporm$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("POORMED", vporm$value, NA)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
vporm <- esti <- NULL
}
if ("linrmpg" %in% type) {
vrmpg <- linrmpg(Y = Y, id = ID_level2, weight = w_final,
sort = sort, Dom = Dom, period = countryper,
dataset = NULL, percentage = percentage,
order_quant = order_quant, var_name = "lin_rmpg",
checking = FALSE)
Y1 <- merge(Y1, vrmpg$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("RMPG", vrmpg$value, NA)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
vrmpg <- esti <- NULL
}
if ("linqsr" %in% type) {
vqsr <- linqsr(Y = Y, id = ID_level2, weight = w_final,
sort = sort, Dom = Dom, period = countryper,
dataset = NULL, alpha = alpha, var_name = "lin_qsr",
checking = FALSE)
Y1 <- merge(Y1, vqsr$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("QSR", vqsr$value)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
vqsr <- esti <- NULL
}
if ("lingini" %in% type) {
vgini <- lingini(Y = Y, id = ID_level2, weight = w_final,
sort = sort, Dom = Dom, period = countryper,
dataset = NULL, var_name = "lin_gini",
checking = FALSE)
Y1 <- merge(Y1, vgini$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("GINI", vgini$value)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
vgini <- vginia <- esti <- NULL
}
if ("lingini2" %in% type) {
vgini2 <- lingini2(Y = Y, id = ID_level2, weight = w_final,
sort = sort, Dom = Dom, period = countryper,
dataset = NULL, var_name = "lin_gini2",
checking = FALSE)
Y1 <- merge(Y1, vgini2$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("GINI2", vgini2$value)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
vgini2 <- esti <- NULL
}
if (("linrmir" %in% type) & all(!is.null(age))) {
vrmir <- linrmir(Y = Y, id = ID_level2, age = age,
weight = w_final, sort = sort, Dom = Dom,
period = countryper, dataset = NULL,
order_quant = order_quant, var_name = "lin_rmir",
checking = FALSE)
Y1 <- merge(Y1, vrmir$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("RMIR", vrmir$value, NA)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
vrmir <- esti <- NULL
}
if (("linarr" %in% type) & all(!is.null(age)
& !is.null(pl085) & !is.null(month_at_work))) {
varr <- linarr(Y = Y, Y_den = Y_den, id = ID_level2, age = age,
pl085 = pl085, month_at_work = month_at_work, weight = w_final,
sort = sort, Dom = Dom, period = countryper, dataset = NULL,
order_quant = order_quant, var_name = "lin_arr",
checking = FALSE)
Y1 <- merge(Y1, varr$lin, all.x = TRUE, by = countryperid2)
esti <- data.table("ARR", varr$value, NA)
setnames(esti, names(esti)[c(1, -1:0 + ncol(esti))],
c("type", "value", "value_eu"))
estim <- rbind(estim, esti)
varr <- esti <- NULL
}
lin_out <- copy(Y1)
if (!outp_lin) lin_out <- NULL
setnames(estim, "value", "estim")
estim$period_country <- do.call("paste", c(as.list(estim[, names(countryper), with = FALSE]), sep = "_"))
nams <- names(countryper)
if (!is.null(namesDom)) nams <- c(nams, namesDom)
estim <- merge(estim, size, all = TRUE, by = nams)
namesY2 <- names(Y1)[!(names(Y1) %in% namesY1)]
namesY2w <- paste0(namesY2, "w")
# Calibration
w_design <- res_outp <- NULL
names_id <- names(ID_level1)
names_H <- names(H)
names_PSU <- names(PSU)
namesperc <- c("period_country", names(countryper))
namesDT1k <- c(namesperc, names_H, names_PSU)
DTc <- Y1[, lapply(.SD, sum, na.rm = TRUE), keyby = c(namesDT1k, names(ID_level1)), .SDcols = namesY2]
if (!is.null(X)) {
X0 <- data.table(X_ID_level1, ind_gr, q, g, X)
X0 <- data.table(X_ID_level1, ind_gr, q, g, X)
if (!is.null(countryX)) X0 <- data.table(countryX, X0)
if (!is.null(periodX)) X0 <- data.table(periodX, X0)
nos <- c(names(periodX), names(countryX), names(ID_level1))
DT1 <- merge(DTc, X0, by = nos)
DT1[, w_design := w_final / g ]
ind_gr <- DT1[, c(namesperc, names(ind_gr)), with = FALSE]
ind_period <- do.call("paste", c(as.list(ind_gr), sep = "_"))
res <- lapply(split(DT1[, .I], ind_period), function(i)
data.table(DT1[i, nos, with = FALSE],
res <- residual_est(Y = DT1[i, namesY2, with = FALSE],
X = DT1[i, names(X), with = FALSE],
weight = DT1[i][["w_design"]],
q = DT1[i][["q"]], dataset = NULL,
checking = FALSE)))
res <- rbindlist(res)
setnames(res, namesY2, namesY2w)
DTc <- merge(DTc, res, by = nos)
if (outp_res) res_outp <- DTc[, c(nos, names_PSU, "w_final", namesY2w), with = FALSE]
} else DTc[, (namesY2w) := .SD[, namesY2, with = FALSE]]
DTc[, (namesY2w) := .SD[, namesY2, with = FALSE] * get("w_final")]
size <- ID_level1 <- ID_level2 <- Dom <- country <- NULL
country <- H <- PSU <- nh <- nh_cor <- NULL
#--------------------------------------------------------*
# AGGREGATION AT PSU LEVEL ("ULTIMATE CLUSTER" APPROACH) |
#--------------------------------------------------------*
DTY2 <- DTc[, lapply(.SD, sum, na.rm = TRUE), keyby = namesDT1k, .SDcols = namesY2w]
setnames(DTY2, namesY2w, namesY2)
DT1 <- copy(DTY2)
if (!netchanges) DT1 <- NULL
# NUMBER OF PSUs PER STRATUM
setkeyv(DTY2, c(namesperc, names_H))
DTY2[, nh := .N, by = c(namesperc, names_H)]
#--------------------------------------------------------------------------*
# MULTIVARIATE REGRESSION APPROACH USING STRATUM DUMMIES AS REGRESSORS AND |
# STANDARD ERROR ESTIMATION |
#--------------------------------------------------------------------------*
DTY2[, (names_H) := as.factor(get(names_H))]
DTY2[, paste0(names_H, "_", levels(get(names_H)))] -> DTY2H
DTY2[, (DTY2H) := transpose(lapply(get(names_H), FUN = function(x){as.numeric(x == levels(get(names_H)))})) ]
namesY2m <- make.names(namesY2)
setnames(DTY2, namesY2, namesY2m)
fits <- lapply(1:length(namesY2), function(i) {
fitss <- lapply(split(DTY2, DTY2$period_country), function(DTY2c) {
y <- namesY2m[i]
funkc <- as.formula(paste("cbind(", trimws(toString(y)), ") ~ ",
paste(c(-1, DTY2H), collapse = "+")))
res1 <- lm(funkc, data = DTY2c)
if (use.estVar == TRUE) {res1 <- data.table(crossprod(res1$res))
} else res1 <- data.table(res1$res)
setnames(res1, names(res1)[1], "num")
res1[, namesY := y]
if (use.estVar == TRUE) {
setnames(res1, "num", "var")
res1 <- data.table(res1[1], DTY2c[1])
} else {
res1 <- data.table(res1, DTY2c)
res1[, nhcor := ifelse(nh > 1, nh / (nh - 1), 1)]
res1[, var := nhcor * num * num]
}
fits <- res1[, lapply(.SD, sum),
keyby = c(namesperc, "namesY"),
.SDcols = "var"]
return(fits)
})
return(rbindlist(fitss))
})
res <- rbindlist(fits)
estim[, namesY := paste0("lin_", tolower(type))]
if (!is.null(namesDom)) {
Dom1 <- estim[, lapply(namesDom, function(x) make.names(paste0(x, ".", get(x))))]
Dom1 <- Dom1[, Dom := Reduce(function(x, y) paste(x, y, sep = "__"), .SD)]
estim <- data.table(estim, Dom1 = Dom1[, Dom])
estim[, namesY := paste0(namesY, "__", Dom1)]
}
res <- merge(estim, res, all = TRUE,
by = names(res)[!(names(res) %in% "var")])
Dom1 <- estim <- DT3H <- NULL
if (is.null(res$Dom1)) res[, Dom1 := "1"]
res[, (c("namesY", "Dom1", "period_country")) := NULL]
res[, se := sqrt(var)]
res[, rse := se / estim]
res[, cv := rse * 100]
res <- res[, c(names(countryper), namesDom, "type", "count_respondents",
"pop_size", "estim", "se", "var", "rse", "cv"), with = FALSE]
list(lin_out = lin_out, res_out = res_outp, data_net_changes = DT1, results = res)
}
|
/scratch/gouwar.j/cran-all/cranData/vardpoor/R/vardcrospoor.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.