content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' @import Matrix #' @import LRMF3 #' @importFrom tibble as_tibble tibble #' @importFrom glue glue #' @importFrom RSpectra svds #' @keywords internal #' @aliases vsp-package "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/vsp/R/vsp-package.R
#' Semi-Parametric Factor Analysis via Vintage Sparse PCA #' #' This code implements TODO. #' #' @param x Either a graph adjacency matrix, [igraph::igraph] or #' [tidygraph::tbl_graph]. If `x` is a [matrix] or [Matrix::Matrix] #' then `x[i, j]` should correspond to the edge going from node `i` #' to node `j`. #' #' @param edge_weights When `x` is an [igraph::igraph], an edge attribute #' to use to form a weighted adjacency matrix. #' #' @param rank The number of factors to calculate. #' #' @param center Should the adjacency matrix be row *and* column centered? #' Defaults to `FALSE`. #' #' @param recenter Should the varimax factors be re-centered around the #' original factor means? Only used when `center = TRUE`, defaults to `FALSE`. #' #' @param degree_normalize Should the regularized graph laplacian be used instead of the #' raw adjacency matrix? Defaults to `TRUE`. If `center = TRUE`, `A` will #' first be centered and then normalized. #' #' @param renormalize Should the regularized graph laplacian be used instead of the #' raw adjacency matrix? Defaults to `TRUE`. If `center = TRUE`, `A` will #' first be centered and then normalized. #' #' @param tau_row Row regularization term. Default is `NULL`, in which case #' we use the row degree. Ignored when `degree_normalize = FALSE`. #' #' @param tau_col Column regularization term. Default is `NULL`, in which case #' we use the column degree. Ignored when `degree_normalize = FALSE`. #' #' @param kaiser_normalize_u Whether or not to use Kaiser normalization #' when rotating the left singular vectors `U`. Defaults to `FALSE`. #' #' @param kaiser_normalize_v Whether or not to use Kaiser normalization #' when rotating the right singular vectors `V`. Defaults to `FALSE`. #' #' @param rownames Character vector of row names of `x`. These row names #' are propagated into the row names of the `U` and `Z`. Defaults #' to `NULL`. #' #' @param colnames Character vector of column names of `x`. These column names #' are propagated into the row names of the `V` and `Y`. Defaults #' to `NULL`. #' #' @inheritParams rlang::args_dots_empty #' #' @details Sparse SVDs use `RSpectra` for performance. #' #' @return An object of class `vsp`. TODO: Details #' #' @export #' #' @examples #' #' library(LRMF3) #' #' vsp(ml100k, rank = 2) #' vsp <- function(x, rank, ...) { UseMethod("vsp") } #' @rdname vsp #' @export vsp.default <- function(x, rank, ...) { stop(glue("No `vsp` method for objects of class {class(x)}. ")) } #' @importFrom invertiforms DoubleCenter RegularizedLaplacian #' @importFrom invertiforms transform inverse_transform #' @rdname vsp #' @export vsp.matrix <- function(x, rank, ..., center = FALSE, recenter = FALSE, degree_normalize = TRUE, renormalize = FALSE, tau_row = NULL, tau_col = NULL, kaiser_normalize_u = FALSE, kaiser_normalize_v = FALSE, rownames = NULL, colnames = NULL) { rlang::check_dots_empty() if (!is.integer(rank)) rank <- round(rank) if (rank < 2) stop("`rank` must be at least two.", call. = FALSE) if (recenter && !center) stop("`recenter` must be FALSE when `center` is FALSE.", call. = FALSE) if (renormalize && !degree_normalize) stop("`renormalize` must be FALSE when `degree_normalize` is FALSE.", call. = FALSE) if (is.null(rownames)) { rownames <- rownames(x) } if (is.null(colnames)) { colnames <- colnames(x) } n <- nrow(x) d <- ncol(x) transformers <- list() if (center) { centerer <- DoubleCenter(x) transformers <- append(transformers, centerer) L <- transform(centerer, x) } else{ L <- x } if (degree_normalize) { scaler <- RegularizedLaplacian(L, tau_row = tau_row, tau_col = tau_col) transformers <- append(transformers, scaler) L <- transform(scaler, L) } # this includes a call to isSymmetric that we might be able to skip out on s <- svds(L, k = rank, nu = rank, nv = rank) # do kaiser normalization by hand so we can deal with rows of all zeros if (kaiser_normalize_u) { s$u <- safe_row_l2_normalize(s$u) } if (kaiser_normalize_v) { s$v <- safe_row_l2_normalize(s$v) } R_U <- stats::varimax(s$u, normalize = FALSE)$rotmat R_V <- stats::varimax(s$v, normalize = FALSE)$rotmat Z <- sqrt(n) * s$u %*% R_U Y <- sqrt(d) * s$v %*% R_V B <- t(R_U) %*% Diagonal(n = rank, x = s$d) %*% R_V / (sqrt(n) * sqrt(d)) fa <- vsp_fa( u = s$u, d = s$d, v = s$v, Z = Z, B = B, Y = Y, R_U = R_U, R_V = R_V, transformers = transformers, rownames = rownames, colnames = colnames ) if (renormalize) { fa <- inverse_transform(scaler, fa) } if (recenter) { fa <- inverse_transform(centerer, fa) } fa <- make_skew_positive(fa) fa } #' Perform varimax rotation on a low rank matrix factorization #' #' @inheritParams vsp #' #' @param centerer TODO #' @param scaler TODO #' #' @export #' #' @examples #' #' library(LRMF3) #' library(RSpectra) #' #' s <- svds(ml100k, k = 2) #' mf <- as_svd_like(s) #' fa <- vsp(mf, rank = 2) #' vsp.svd_like <- function(x, rank, ..., centerer = NULL, scaler = NULL, recenter = FALSE, renormalize = FALSE, kaiser_normalize_u = FALSE, kaiser_normalize_v = FALSE, rownames = NULL, colnames = NULL) { rlang::check_dots_empty() n <- nrow(x$u) d <- nrow(x$v) # do kaiser normalization by hand so we can deal with rows of all zeros if (kaiser_normalize_u) { x$u <- safe_row_l2_normalize(x$u) } if (kaiser_normalize_v) { x$v <- safe_row_l2_normalize(x$v) } R_U <- stats::varimax(x$u, normalize = FALSE)$rotmat R_V <- stats::varimax(x$v, normalize = FALSE)$rotmat Z <- sqrt(n) * x$u %*% R_U Y <- sqrt(d) * x$v %*% R_V B <- t(R_U) %*% Diagonal(n = rank, x = x$d) %*% R_V / (sqrt(n) * sqrt(d)) fa <- vsp_fa( u = x$u, d = x$d, v = x$v, Z = Z, B = B, Y = Y, R_U = R_U, R_V = R_V, transformers = list(centerer, scaler), rownames = rownames, colnames = colnames ) if (!is.null(scaler) && renormalize) { fa <- inverse_transform(scaler, fa) } if (!is.null(centerer) && recenter) { fa <- inverse_transform(centerer, fa) } fa <- make_skew_positive(fa) fa } #' @rdname vsp #' @export vsp.Matrix <- vsp.matrix #' @rdname vsp #' @export vsp.dgCMatrix <- vsp.matrix #' @rdname vsp #' @export vsp.igraph <- function(x, rank, ..., edge_weights = NULL) { x <- igraph::get.adjacency(x, sparse = TRUE, attr = edge_weights) vsp.matrix(x, rank, ...) }
/scratch/gouwar.j/cran-all/cranData/vsp/R/vsp.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" )
/scratch/gouwar.j/cran-all/cranData/vsp/inst/doc/bff.R
--- title: "Interpreting factors with bff(), the Best Feature Function" author: "Fan Chen" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{bff} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Intro In post-clustering analysis, the Best Feature Function (BFF) is useful in selecting representative features for each cluster, especially in the case when additional covariates are available for each feature. For example, consider a social network of $n$ users partitioned into $k$ clusters, and each user possess a series of text document (covariates). We want to summarize words that are representative to each cluster. The BFF is suitable for this type of task. This document describes the intuition behind the BFF as a follow-up step after the `vsp` (vintage specral clustering) and touches several technical issues regarding implementation. ## Methodology For simplicity, we consider a symmetric square input matrix (e.g., the adjacency matrix of an undirected graph); the analysis on rectangular input is also supported by `bff()`. Given a data matrix $A \in \mathbb{R}^{n \times n}$, the `vsp` returns an approximation with factorization, $ZBY^T$, where $B \in \mathbb{R}^{k \times k}$ is low-rank, and $Y \in \mathbb{R}^{n \times k}$ encodes the loadings of each feature (i.e., columns of $A$) with respect to clusters. In particular, when $A$ is the adjacency matrix of an undirected Blockmodel graph, each row of $Y$ decodes the block (cluster) membership of the vertex (feature). Generally, the loading $Y_{ij}$ (for $i=1,...,n$ and $j=1,...,k$) can be interpreted as an membership measure of the $i$-th feature to the $j$-th cluster. <!-- When normalized, it is also an estimator of mixed membership. --> Now, suppose in addition that we have covariates on each feature, $D \in \mathbb{R}^{n \times p}$, where $p$ is the dimension of covariates. For example, $D$ can be a document-term matrix, where all text data associated with $i$-th (for $i=1,...,n$) feature are pooled into a meta document, and $p$ under this circumstance is the size of corpus (i.e., total number of words/terms), and $D_{il}$ is the frequency of word $l$ (for $l=1,...,p$) appearing in the $i$-th document. The BFF then uses $Y$ and $D$ to produce an assessment of covariates "best" for each cluster. To start with, suppose both $Y$ and $D$ has only non-negative entries.Define the importance, $I \in \mathbb{R}^{p \times k}$, of the $l$-th covariate to the $j$-th cluster by the average of $l$-th covariate (the $l$-th columns of $D$), weighted by the $j$-th column of $Y$, $$I_{lj} = \sum_{j=1}^n D_{jl} Y_{ij}, \text{ for } l=1,...,p,i=1,...n,$$ or compactly (side note: the cross product $\langle D,Y \rangle$ is defined as $D^T Y$ as in convention), $$I=\langle D,Y \rangle.$$ As such, a higher value in $I_{lj}$ indicates more significant importance. BFF selects the "best" covariates for each cluster according to the $j$-th (for $j=1, ..., k$) column of $I$. ## Implementation Below are a few notes on the implementation of BFF: * **Positive skewness**. When $D$ is a document-term matrix (a.k.a., bags of words), it holds that all elements are non-negative. However, there is absolutely no guarantee that $Y$ has all non-negative entries. This motivates the positive-skew transformation, i.e., we flip the signs of those columns of $Y$ that have negative sample third [moment](https://en.wikipedia.org/wiki/Moment_(mathematics)). * **Handling negative elements**. For now, we undergo a rather ad-hoc solution to the existence of negative elements in $Y$ -- pretending they have little effects. In the above importance calculation, negative weights ($Y_{ij}<0$) are equally treated as $-1$. In other words, the negative elements result in some subtractions/reduction/contraction in the importance metrics. * **Weight normalization**. In BFF, we utilize the $Y$ matrix as a way of weighting covariates (in $D$). It is therefore natrual to expect the columns of $Y$ to be (akin to) some probability distributions, i.e., the probability to select one member from the cluster at random. Recall also that the columns of $Y$ all have (or close to) unit $\ell_2$-norm. Hence, additional transformation is needed: we normalized $Y$ by column. In particular, this is done separately for positive and negative elements. * **Variance stabilization**. If we model $I_{lj}$ with Poisson rate model $\text{Poisson}(\lambda_{lj})$, the sample mean and variance are coupled (i.e., both have the expectation of $\lambda_{lj}$). In order to standardize our importance measure $I$, we need to decouple these two statistics. Performing a square-root transformation, $f(x)=\sqrt{x}$, does the trick; it stabilizes the sampling variance, which becomes nearly constant.
/scratch/gouwar.j/cran-all/cranData/vsp/inst/doc/bff.Rmd
--- title: "Interpreting factors with bff(), the Best Feature Function" author: "Fan Chen" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{bff} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Intro In post-clustering analysis, the Best Feature Function (BFF) is useful in selecting representative features for each cluster, especially in the case when additional covariates are available for each feature. For example, consider a social network of $n$ users partitioned into $k$ clusters, and each user possess a series of text document (covariates). We want to summarize words that are representative to each cluster. The BFF is suitable for this type of task. This document describes the intuition behind the BFF as a follow-up step after the `vsp` (vintage specral clustering) and touches several technical issues regarding implementation. ## Methodology For simplicity, we consider a symmetric square input matrix (e.g., the adjacency matrix of an undirected graph); the analysis on rectangular input is also supported by `bff()`. Given a data matrix $A \in \mathbb{R}^{n \times n}$, the `vsp` returns an approximation with factorization, $ZBY^T$, where $B \in \mathbb{R}^{k \times k}$ is low-rank, and $Y \in \mathbb{R}^{n \times k}$ encodes the loadings of each feature (i.e., columns of $A$) with respect to clusters. In particular, when $A$ is the adjacency matrix of an undirected Blockmodel graph, each row of $Y$ decodes the block (cluster) membership of the vertex (feature). Generally, the loading $Y_{ij}$ (for $i=1,...,n$ and $j=1,...,k$) can be interpreted as an membership measure of the $i$-th feature to the $j$-th cluster. <!-- When normalized, it is also an estimator of mixed membership. --> Now, suppose in addition that we have covariates on each feature, $D \in \mathbb{R}^{n \times p}$, where $p$ is the dimension of covariates. For example, $D$ can be a document-term matrix, where all text data associated with $i$-th (for $i=1,...,n$) feature are pooled into a meta document, and $p$ under this circumstance is the size of corpus (i.e., total number of words/terms), and $D_{il}$ is the frequency of word $l$ (for $l=1,...,p$) appearing in the $i$-th document. The BFF then uses $Y$ and $D$ to produce an assessment of covariates "best" for each cluster. To start with, suppose both $Y$ and $D$ has only non-negative entries.Define the importance, $I \in \mathbb{R}^{p \times k}$, of the $l$-th covariate to the $j$-th cluster by the average of $l$-th covariate (the $l$-th columns of $D$), weighted by the $j$-th column of $Y$, $$I_{lj} = \sum_{j=1}^n D_{jl} Y_{ij}, \text{ for } l=1,...,p,i=1,...n,$$ or compactly (side note: the cross product $\langle D,Y \rangle$ is defined as $D^T Y$ as in convention), $$I=\langle D,Y \rangle.$$ As such, a higher value in $I_{lj}$ indicates more significant importance. BFF selects the "best" covariates for each cluster according to the $j$-th (for $j=1, ..., k$) column of $I$. ## Implementation Below are a few notes on the implementation of BFF: * **Positive skewness**. When $D$ is a document-term matrix (a.k.a., bags of words), it holds that all elements are non-negative. However, there is absolutely no guarantee that $Y$ has all non-negative entries. This motivates the positive-skew transformation, i.e., we flip the signs of those columns of $Y$ that have negative sample third [moment](https://en.wikipedia.org/wiki/Moment_(mathematics)). * **Handling negative elements**. For now, we undergo a rather ad-hoc solution to the existence of negative elements in $Y$ -- pretending they have little effects. In the above importance calculation, negative weights ($Y_{ij}<0$) are equally treated as $-1$. In other words, the negative elements result in some subtractions/reduction/contraction in the importance metrics. * **Weight normalization**. In BFF, we utilize the $Y$ matrix as a way of weighting covariates (in $D$). It is therefore natrual to expect the columns of $Y$ to be (akin to) some probability distributions, i.e., the probability to select one member from the cluster at random. Recall also that the columns of $Y$ all have (or close to) unit $\ell_2$-norm. Hence, additional transformation is needed: we normalized $Y$ by column. In particular, this is done separately for positive and negative elements. * **Variance stabilization**. If we model $I_{lj}$ with Poisson rate model $\text{Poisson}(\lambda_{lj})$, the sample mean and variance are coupled (i.e., both have the expectation of $\lambda_{lj}$). In order to standardize our importance measure $I$, we need to decouple these two statistics. Performing a square-root transformation, $f(x)=\sqrt{x}$, does the trick; it stabilizes the sampling variance, which becomes nearly constant.
/scratch/gouwar.j/cran-all/cranData/vsp/vignettes/bff.Rmd
#' Aquaporin Dataset #' #' Dataset with molecular dynamics simulations for the yeast aquaporin (Aqy1) - the gated water channel of the yeast Pichi pastoris. #' The dataset contains only the diameter Y of the channel which is used in the data analysis in (Klockmann and Krivobokova, 2023). #' The diameter Y is measured by the distance between two centers of mass of certain residues of the protein. #' The dataset includes a 100 nanosecond time frame, split into 20000 equidistant observations. #' The full dataset, including the Euclidean coordinates of all 783 atoms, is available from the authors. #' For more details see (Klockmann, Krivobokova; 2023). #' #' @format A data frame with 20000 rows and 1 variable: #' \itemize{ #' \item{\code{Y}: }{the diameter of the channel} #'} #'@source see (Klockmann, Krivobokova; 2023). #'@examples #'data(aquaporin) "aquaporin"
/scratch/gouwar.j/cran-all/cranData/vstdct/R/data.R
############################################################## #periodic DEMMLER-REINSCH BASIS FUNCTIONS ############################################################## # Euler-Frobenius Polynomial # # param p degree of polynomial # param t r-dim. vector of evaluation points # return res r-dim. vector with Euler-Frobenius poly. of degree p at t, for details see (Schwarz, Krivobokova; 2016). # keyword Internal EF.poly <-function(p,t){ if (p<0){ stop ("p must be non-negative") } res=switch(p+1, #+1 to get cases 0,1,... instead of 1,2,... t^0, #p=0 t^0, #p=1 t+1, #p=2 t^2+4*t+1, #p=3 t^3+11*t^2+11*t+1, #p=4 t^4+26*t^3+66*t^2+26*t+1) #p=5 if (p>5){ res=c() for (te in t){ sum=0 for (i in 0:p){ j=seq(0,p-i) sum=sum+(choose(p+1,i)*(-1)^i*sum(j^p*te^(p-i-j))) } res=c(res,sum) } } return(res) } # Q_p-Polynomials # # param pminus1 degree of polynomial # param x r-dim. vector of evaluation points # return res r-dim. vector with Q polynomial of degree pminus1 at x, for details see (Schwarz, Krivobokova; 2016). # keyword Internal Q_pminus1 <- function(pminus1,x){ if (pminus1<0){ stop ("pminus1 must be non-negative") } res=switch(pminus1+1, #+1 to get cases 0,1,... instead of 1,2,... x^0, #Q_(p-1)=Q_0 1/2+cos(pi*x)^2/2, #Q_(p-1)=Q1 1/3+2*cos(pi*x)^2/3, #Q_(p-1)=Q2 5/24+3*cos(pi*x)^2/4+cos(pi*x)^4/24, #Q_(p-1)=Q3 2/15+11*cos(pi*x)^2/15+2*cos(pi*x)^4/15, #Q_(p-1)=Q4 (61+479*cos(pi*x)^2+179*cos(pi*x)^4+cos(pi*x)^6)/720, #Q_(p-1)=Q5 17/315+4*cos(pi*x)^2/7+38*cos(pi*x)^4/105+4*cos(pi*x)^6/315) #Q_(p-1)=Q6 return (res) } #INPUT ##x: ##N: ##q: of smoothing spline #OUTPUT ##basis: #' Periodic Demmler-Reinsch Basis #' #' Calculates the periodic Demmler-Reinsch basisfor a given smoothness and a given vector of grid points. For details see (Schwarz, Krivobokova; 2016). #' @param x \code{m}-dim. vector with grid values in \[0,1\] #' @param n dimension of the basis #' @param q penalization order, \code{q=1,2,3,4} are available #' @return \code{mxn} dimensional matrix with the \code{n} DR basis functions evaluated at grid points \code{x} #' @export #' @examples DR.basis(seq(1,10)/10,5,2) DR.basis<-function(x,n,q){ if (!(q %in% c(1,2,3,4))){ stop("q can only attain the vaules 1,2,3,4") } p=2*q-1 t=n*x+q Q.M=Q_pminus1(p-1,(1:n)/n) #build exponential spline j=seq(0,p) z=exp(-2*pi*1i*(1:n)/n) basis=c() for (te in t){ res=c() t.int=floor(te) idx=0 for(ze in z){ idx=idx+1 if(ze==1){ res=c(res,1/Q.M[idx]) }else{ res=c(res,ze^t.int*(1-1/ze)^p*sum(choose(p,j)*(te-t.int)^(p-j)*sapply(j,EF.poly,ze)/(factorial(p)*(ze-1)^j))/Q.M[idx]) } } basis=matrix(rbind(basis,res),ncol=n) } # if(all(abs(Im(basis))<10^(-8))){ # basis=Re(basis)} return(basis) }
/scratch/gouwar.j/cran-all/cranData/vstdct/R/per_DR_basis.R
#library(nlme) check if needed #library(MASS) check if needed #' Data Examples #' #' [vstdct::example1], [vstdct::example2] and [vstdct::example3] generate i.i.d. vectors from a given distribution with different Toeplitz covariance matrices. #' The covariance function \eqn{\sigma} of the Toeplitz covariance matrix of #' \itemize{ #' \item{`example1`: }{has a polynomial decay, \eqn{\sigma(\tau)= sd^2(1+|\tau|)^{-gamma}},} #' \item{`example2`: }{follows an \eqn{ARMA(2,2)} model with coefficients \eqn{(0.7,-0.4,-0.2,0.2)} and innovations variance \eqn{sd^2},} #' \item{`example3`: }{yields a Lipschitz continuous spectral density \eqn{f} that is not differentiable, i.e. \eqn{f(x)= sd^2({|\sin(x+0.5\pi)|^{gamma}+0.45})}} #' } #' @name Data Examples #' @param p vector length #' @param n sample size #' @param sd standard deviation #' @param gamma polynomial decay of covariance function for `example1` resp. exponent for `example3` #' @param family distribution of the simulated data. Available distributions are "`Gaussian`", "`Gamma`", "`Uniform`". The default is "`Gaussian`". #' #' @return A list containing the following elements: #' \itemize{ #' \item{`Y`: }{`pxn` dimensional data matrix} #' \item{`sdf`: }{true spectral density function} #' \item{`acf`: }{true covariance function} #' } #' @export #' @examples example1(p=10, n=1, sd=1, gamma=1.2, family="Gaussian") example1<-function(p, n, sd, gamma,family="Gaussian"){ x=seq(0,1,length=p) acf=c(sd^2,sd^2/(1+seq(1,p-1))^gamma) Sigma=stats::toeplitz(acf) sdf=acf2sdf(acf) if (family=="Gaussian") Y=matrix(MASS::mvrnorm(n, mu=numeric(p), Sigma=Sigma),n,p) if (family=="Gamma"){ Y=matrix(NA,n,p) Sigma.sq2=t(chol(Sigma)) for (i in 1:n){ Y[i,]=Sigma.sq2%*%(stats::rgamma(n=p,shape=2,scale=1/sqrt(2))-sqrt(2)) } } if (family=="Uniform"){ Y=matrix(NA,n,p) Sigma.sq2=t(chol(Sigma)) for (i in 1:n){ Y[i,]=Sigma.sq2%*%(stats::runif(n=p,min=-sqrt(3),max=sqrt(3))) } } return(list(Y=Y,sdf=sdf,acf=acf)) } #' #' example3(p, n, sd, gamma,family="Gaussian") #' [vstdct::example1], [vstdct::example2] and [vstdct::example3] generate i.i.d. vectors from a given distribution with different Toeplitz covariance matrices. #' The covariance function \eqn{\sigma} of the Toeplitz covariance matrix of #' \itemize{ #' \item{`example1`: }{has a polynomial decay, \eqn{\sigma(\tau)= sd^2(1+|\tau|)^{-gamma}},} #' \item{`example2`: }{follows an \eqn{ARMA(2,2)} model with coefficients \eqn{(0.7,-0.4,-0.2,0.2)} and innovations variance \eqn{sd^2},} #' \item{`example3`: }{yields a Lipschitz continuous spectral density \eqn{f} that is not differentiable, i.e. \eqn{f(x)= sd^2{|\sin(x+0.5\pi)|^{gamma}+0.45}}} #' } #' @name Data Examples #' @param p vector length #' @param n sample size #' @param sd standard deviation #' @param gamma polynomial decay of covariance function for `example1` resp. exponent for `example3` #' @param family distribution of the simulated data. Available distributions are "`Gaussian`", "`Gamma`", "`Uniform`". The default is "`Gaussian`". #' #' @export #' @examples example2(p=10,n=1,sd=1,family="Gaussian") example2<-function(p,n,sd,family="Gaussian"){ x=seq(0,1,length=p) pp=2 qq=2 coef=c(0.7, -0.4,-0.2, 0.2) R.mat=nlme::corMatrix(nlme::Initialize(nlme::corARMA(coef,p=pp,q=qq),data=diag(1:p))) Sigma=sd^2*R.mat acf=Sigma[1,] sdf=acf2sdf(acf) if (family=="Gaussian") Y=matrix(MASS::mvrnorm(n, mu=numeric(p), Sigma=Sigma),n,p) if (family=="Gamma"){ Y=matrix(NA,n,p) for (i in 1:n){ Y[i,]=sqrt(sd)*stats::arima.sim(n = p, list(ar = c(0.7, -0.4), ma = c(-0.2, 0.2)), innov=stats::rgamma(n=p,shape=2,scale=1/sqrt(2))-sqrt(2)) } } if (family=="Uniform"){ Y=matrix(NA,n,p) for (i in 1:n){ Y[i,]=sqrt(sd)*stats::arima.sim(n = p, list(ar = c(0.7, -0.4), ma = c(-0.2, 0.2)), innov=stats::runif(p,min=-sqrt(3), max=sqrt(3))) } } return(list(Y=Y,sdf=sdf,acf=acf)) } #' [vstdct::example1], [vstdct::example2] and [vstdct::example3] generate i.i.d. vectors from a given distribution with different Toeplitz covariance matrices. #' The covariance function \eqn{\sigma} of the Toeplitz covariance matrix of #' \itemize{ #' \item{`example1`: }{has a polynomial decay, \eqn{\sigma(\tau)= sd^2(1+|\tau|)^{-gamma}},} #' \item{`example2`: }{follows an \eqn{ARMA(2,2)} model with coefficients \eqn{(0.7,-0.4,-0.2,0.2)} and innovations variance \eqn{sd^2},} #' \item{`example3`: }{yields a Lipschitz continuous spectral density \eqn{f} that is not differentiable, i.e. \eqn{f(x)= sd^2{|\sin(x+0.5\pi)|^{gamma}+0.45}}} #' } #' @name Data Examples #' @param p vector length #' @param n sample size #' @param sd standard deviation #' @param gamma polynomial decay of covariance function for `example1` resp. exponent for `example3` #' @param family distribution of the simulated data. Available distributions are "`Gaussian`", "`Gamma`", "`Uniform`". The default is "`Gaussian`". #' #' @export #' @examples example3(p=10, n=1, sd=1, gamma=2,family="Gaussian") example3<-function(p, n, sd, gamma,family="Gaussian"){ x=seq(0,1,length=p) sdf=sd^2*(abs(sin(pi*x +0.5*pi))^gamma+0.45) acf=sdf2acf(sdf) Sigma=stats::toeplitz(acf) if (family=="Gaussian") Y=matrix(MASS::mvrnorm(n, mu=numeric(p), Sigma=Sigma),n,p) if (family=="Gamma"){ Y=matrix(NA,n,p) Sigma.sq2=t(chol(Sigma)) for (i in 1:n){ Y[i,]=Sigma.sq2%*%(stats::rgamma(n=p,shape=2,scale=1/sqrt(2))-sqrt(2)) } } if (family=="Uniform"){ Y=matrix(NA,n,p) Sigma.sq2=t(chol(Sigma)) for (i in 1:n){ Y[i,]=Sigma.sq2%*%(stats::runif(n=p,min=-sqrt(3),max=sqrt(3))) } } return(list(Y=Y,sdf=sdf,acf=acf)) }
/scratch/gouwar.j/cran-all/cranData/vstdct/R/simulate_data_fcts.R
# Discrete Cosine Transform matrix # # Computes the Discrete Cosine Transform I (DCT-I) matrix. # param N matrix dimension # return the \code{NxN}-dimensional DCT-I matrix DCT.matrix<-function(N){ seq=seq(0,N-1) arg=(pi*seq/(N-1)) DCT=sapply(seq, function(j){cos(arg*j)}) fac=matrix(1/sqrt(2),N,N) fac[2:(N-1), 2:(N-1)]=1 fac[1,1]=fac[1,N]=fac[N,1]=fac[N,N]=1/2 DCT=DCT*fac*sqrt(2)/sqrt(N-1) return(DCT) } # Sinc Function # # Input: x vector # Output: y sinc(x) sinc <- function(x){ y=sin(x)/x y[is.na(y)]=1 return(y) } # Auto-covariance to Spectral Density Function # # Input: x vector with the auto-covariance function # Output: y vector with the spectral density function acf2sdf <- function(x){ N=length(x) y=2*dtt::dtt(x, type="dct", variant = 1) #*sqrt(2/(N-1)) #y=Re(fft(c(x,x[(N-1):2]))) #the same return (y) } # Spectral Density to Auto-covariance Function # # Input: x vector with the spectral density function # Output: y vector with the auto-covariance function sdf2acf <- function(x){ N=length(x) y=dtt::dtt(x, type="dct", variant = 1)/(N-1) return (y) }
/scratch/gouwar.j/cran-all/cranData/vstdct/R/trigonometric_fcts.R
#library(matrixcalc) #check if needed ############################################################## #1.DATA TRANSFORMATION FUNCTIONS ############################################################## # Data Binning # # Each \code{p}-dimensional vector is divided into \code{Te} consecutive bins. The data points in each bin and across the \code{n} samples are summed. # param w \code{nxp} dimensional data matrix # param Te number of bins for data binning. \code{Te} should be smaller than the vector length \code{p}. # param p vector length # param n sample size # return A list containing the following elements: # \itemize{ # \item{\code{m}: }{number of data points per bin, that is \code{m=n*round(p/Te)}} # \item{\code{w.star}: }{\code{Te}-dimensional vector with \code{m} summed data points for each bin. The bin number \code{Te} may be modified to guarantee at least two data points per bin. If \code{p/Te} is not an integer, the vector dimension can be different than \code{Te} and in the last bin more than \code{m} data points may be summed.} # } Binning<-function(w,Te,p,n){ if (Te>p){ stop("Te must be smaller or equal than p.") } if(sum(dim(w) == c(n,p))!=2){ stop("dimension of w does not match c(n,p)") } if(p/Te == 1 && n>1){ #"n>1 case" m=1 w.star=colSums(w) }else{ #"time series case" m=max(2,round(p/Te)) #m=max(2,floor(p/Te)) start=seq(1,p,by=m) start=start[start<=(p-m+1)] #last bin may contains #obs>=m end=c(start[-1]-1,p) w.star=mapply(function(start,end){sum(w[,start:end])},start,end) } return(list(m=m*n,w.star=w.star)) } # Variance Stabilizing Transform # # Applies the variance stabilizing transform for gamma distribution to a vector. For details see (Brown et al., \emph{"Nonparametric regression in exponential families."}, The Annals of Statistics 38.4 (2010): 2005-2046). # param w.star vector with binned data points # param m number of summed data points per bin # return vector with binned and variance stabilized data, i.e. \code{log(./m)/sqrt(2)} is applied entrywise to \code{w.star}. VST<-function(w.star,m){ y.star=log(w.star/m)/sqrt(2) return(y.star) } #' Data Transformation #' #' Applies the Discrete Cosine I transform, data binning and the variance stabilizing transform function to the data. #' @param y \code{nxp} dimensional data matrix #' @param Te number of bins for data binning. \code{Te} should be smaller than the vector length \code{p}. #' @param dct.out logical. If \code{TRUE}, the \code{p}-dim. DCT-I matrix is returned. The default is \code{FALSE}. #' @return A list containing the following elements: #' \itemize{ #' \item{\code{m}: }{number of data points per bin, that is \code{m=n*round(p/Te)}. If \code{p/Te} is not an integer, the first/last bin may contain more than \code{m} data points.} #' \item{\code{y.star}: }{\code{2Te-2} dimensional vector with binned, variance stabilized and mirrowed data. The bin number \code{Te} may be modified to guarantee at least two data points per bin. If \code{p/Te} is not an integer, the vector dimension is \code{2*floor(p/round(p/Te))-2}.} #' \item{\code{dct.matrix}: }{\code{p}-dim. DCT-I matrix (if \code{dct.out}=TRUE)} #' } #' @export Data.trafo<-function(y,Te,dct.out=FALSE){ n=dim(y)[1] p=dim(y)[2] if (Te>p){ stop("Te must be smaller or equal than p") } if (is.null(n)){ p=length(y) n=1 } dct.matrix=DCT.matrix(p) w=(y%*%dct.matrix)^2 Bins=Binning(w,Te,p,n) y.star=VST(Bins$w.star,Bins$m) TTe=length(y.star) y.star=c(y.star[2:(TTe-1)],y.star[TTe:1]) if(dct.out){ return(list(m=Bins$m,y.star=y.star,dct.matrix=dct.matrix)) }else{ return(list(m=Bins$m,y.star=y.star)) } } ############################################################## #2.SPECTRAL DENSITY ESTIMATION ############################################################## # Periodic Smoothing Spline # # computes the smoothing spline estimator of order q using the periodic Demmler-Reinsch basis. # param x vector with values in \[0,1\] at which the periodic smoothing spline is evaluated # param y vector with data points on equi-spaced design points # param lambda smoothing parameter # param q penalization order # return f periodic smoothing spline estimator evaluated at x # keyword Internal SSper.estimator<-function(x,y,lambda,q){ if (!(q %in% c(1,2,3,4))){ stop("q can only attain the vaules 1,2,3,4") } if (lambda<=0){ stop("lambda must be positive") } N=length(y) x.equi=seq(1,N)/N evals=(2*pi*1:N)^(2*q)*sinc(pi*x.equi)^(2*q)/Q_pminus1(2*q-2,x.equi) f=stats::fft(y[N:1])[N:1]/(1+lambda*evals) if(identical(x,seq(1,N)/N)){ f=Re(stats::fft(f[N:1])[N:1])/N }else{ f=c(Re(DR.basis(x,N,q)%*%f/N)) #imaginary part zero by construction } return(list(f=f,evals=evals)) } # Spectral Density Estimator # # Estimates the spectral density from the binned and transformed data points. A periodic smoothing spline is fitted for the log-spectral density \eqn{H(f)}, where \eqn{ H(y)= \left \{ \phi(m/2) + \log \left ( 2y/m\right ) \right\}/\sqrt{2}} and \eqn{\phi} is the digamma function. # The spectral density is then obtained by applying the inverse variance stabilizing transform \eqn{H^{-1}(y)= m\exp \left \{\sqrt{2}y-\phi\left (m/2\right) \right\}/2}. # param x vector with values in \[0,1\] at which the spectral density estimator is evaluated # param y.star vector with binned, variance stabilized and mirrowed data # param lambda smoothing parameter # param q penalization order # param m number of data points per bin # return A list containing the following elements: # \itemize{ # \item{\code{f}: }{vector with the spectral density estimator evaluated at \code{x}} # \item{\code{Hf}: }{vector with the log spectral density \eqn{H(f)} evaluated at \code{x}} # } sdf.estimator<-function(x,y.star,lambda,q,m){ if (!(q %in% c(1,2,3,4))){ stop("q can only attain the vaules 1,2,3,4") } if (lambda<=0){ stop("lambda must be positive") } Hf=SSper.estimator(x,y.star,lambda,q)$f f=m/2*exp(sqrt(2)*Hf-digamma(m/2)) return(list(f=f,Hf=Hf)) } ############################################################## #3.TOEPLITZ MATRIX ESTIMATION ############################################################## #' Toeplitz Covariance and Precision Matrix Estimator #' #' Estimates the Toeplitz covariance matrix, the inverse matrix and the spectral density from a sample of \code{n} i.i.d. \code{p}-dimensional vectors with mean zero. #' @param y \code{nxp} dimensional data matrix #' @param Te number of bins for data binning. #' @param q penalization order, \code{q=1,2,3,4} are available #' @param method to select the smoothing parameter of the smoothing spline. Available methods are restricted maxmimum likelihodd "\code{ML}", generalized cross-validation "\code{GCV}" and the oracle versions "\code{ML-oracle}", "\code{GCV-oracle}". #' @param f.true Te-dimensional vector with the true spectral density function evaluated at equi-sapced points in \[0,\code{pi}\]. Only required, if an oracle method ("\code{ML-oracle}", "\code{GCV-oracle}") is chosen for \code{method}. #' @return A list containing the following elements: #' \itemize{ #' \item{\code{toep}: }{\code{p}-dim. Toeplitz covariance matrix} #' \item{\code{toep.inv}: }{\code{p}-dim. precision matrix} #' \item{\code{acf}: }{\code{p}-dim. vector with the covariance function} #' \item{\code{sdf}: }{\code{p}-dim. vector with the spectral density in the interval \[0,1\]} #' } #' @export #' @examples #' #EXAMPLE 1: Simulate Gaussian ARMA(2,2) #' library(nlme) #' library(MASS) #' p=100 #' n=1 #' Sigma=1.44*corMatrix(Initialize(corARMA(c(0.7, -0.4,-0.2, 0.2),p=2,q=2),data=diag(1:p))) #' Y=matrix(mvrnorm(n, mu=numeric(p), Sigma=Sigma),n,p) #' fit.toep=Toep.estimator(y=Y,Te=10,q=2,method="GCV")$toep #' #' #' #EXAMPLE 2: AQUAPORIN DATA #' data(aquaporin) #' n=length(aquaporin$Y) #' y.train=aquaporin$Y[1:(0.01*n)] #' y.train=y.train-mean(y.train) #' fit.toep=Toep.estimator(y=y.train,Te=10,q=1,method="ML")$toep Toep.estimator<-function(y,Te,q,method,f.true=NULL){ n=dim(y)[1] p=dim(y)[2] if (is.null(n)){ p=length(y) n=1 y=matrix(y,n,p) } if (!(q %in% c(1,2,3,4))){ stop("q can only attain the vaules 1,2,3,4") } if (Te>p){ stop("Te must be smaller or equal than p") } if (! method %in% c("GCV","GCV-oracle","ML","ML-oracle")){ stop("method is invalid, choose one of GCV, GCV-oracle, ML, ML-oracle") } Duplicate<-function(x){ r=length(x) return(c(x[2:(r-1)],x[r:1])) } Data=Data.trafo(y,Te) if(method=="GCV") lambda=GCV.opt(Data$y.star,q) if(method=="GCV-oracle"){ if (is.null(f.true)){ stop("true spectral density is missing") } f.true=Duplicate(f.true) log.sdf.true=VST(f.true,m=1) lambda=GCV.opt(y=NULL,q,oracle=TRUE,log.sdf.true,sigma=1/Data$m) } if(method=="ML") lambda=ML.opt(Data$y.star,q) if(method=="ML-oracle"){ if (is.null(f.true)){ stop("true spectral density is missing") } f.true=Duplicate(f.true) log.sdf.true=VST(f.true,m=1) lambda=ML.opt(y=NULL,q,oracle=TRUE,log.sdf.true,sigma=1/Data$m) } xx=seq(0,0.5,length=p) #due to symmetry: evaluation on [0,0.5] sufficient sdf.fit=sdf.estimator(xx,Data$y.star,lambda,q,Data$m)$f acf.fit=sdf2acf(sdf.fit) #DFT approximation to integral #acf=integrate(f.estimator,0,1,Data$y.star,lambda,q,Data$m)[[1]] #R approximation to integral acf.fit.inv=sdf2acf(1/sdf.fit) #DFT approximation to integral return(list(toep=stats::toeplitz(acf.fit),toep.inv=stats::toeplitz(acf.fit.inv),acf=acf.fit,sdf=sdf.fit)) } ################################################################# #4. SMOOTHING PARAMETER SELECTION ################################################################# # Generalized Cross-Validation # # param y N-dim. vector with regression data points # param q penalization order # param oracle logical. If TRUE, then the GCV-oracle smoothing parameter is calculated. # param f.true N-dim. vector with the true regression function evaluated at equi-spaced points. Only required if oracle=TRUE. # param sigma variance of the regression data # return lambda estimated smoothing parameter # keyword Internal GCV.opt<-function(y,q,oracle=FALSE,f.true=NULL,sigma=NULL){ GCV<-function(lambda,y,q){ N=length(y) x=seq(1,N)/N evals=(2*pi*1:N)^(2*q)*sinc(pi*x)^(2*q)/Q_pminus1(2*q-2,x) term1=stats::fft(y[N:1])[N:1]*(lambda*evals/(1+lambda*evals))^2 term1=c(y%*%Re(stats::fft(term1[N:1])[N:1])/N) #inverse=TRUE not needed for symmetric vector trace=sum(1/(1+lambda*evals)) score=term1/(N*(1-trace/N)^2) return(score) } GCV.oracle<-function(lambda,f.true,q,sigma){ N=length(f.true) x=seq(1,N)/N evals=(2*pi*1:N)^(2*q)*sinc(pi*x)^(2*q)/Q_pminus1(2*q-2,x) norm.term=stats::fft(f.true[N:1])[N:1]*lambda*evals/(1+lambda*evals) norm.term=Re(stats::optimize(norm.term[N:1])[N:1])/N trace=sum(1/(1+lambda*evals)^2) score=(sum(norm.term^2)+trace*sigma)/N return(score) } if(oracle) lambda=stats::optimize(GCV.oracle,interval=c(.Machine$double.eps,1),f.true=f.true,q=q,sigma=sigma,tol=.Machine$double.eps)$minimum #1 else lambda=stats::optimize(GCV,interval=c(.Machine$double.eps,1),y=y,q=q,tol=.Machine$double.eps)$minimum #1 #lambda=uniroot(GCV2,interval=c(.Machine$double.eps,1),y=y,q=q,tol=.Machine$double.eps)$root return(lambda) } # Maximum Likelihood method # # param y N-dim. vector with regression data points #param q penalization order # param oracle logical. If TRUE, then the ML-oracle smoothing parameter is calculated. # param f.true N-dim. vector with the true regression function evaluated at equi-spaced points. Only required if oracle=TRUE. # param sigma variance of the regression data # return estimated smoothing parameter # keyword Internal ML.opt<-function(y,q,oracle=FALSE,f.true=NULL,sigma=NULL){ ML<-function(lambda,y,q){ N=length(y) x=seq(1,N)/N evals=(2*pi*1:N)^(2*q)*sinc(pi*x)^(2*q)/Q_pminus1(2*q-2,x) term1=stats::fft(y[N:1])[N:1]*(1/(1+lambda*evals)-1/(1+lambda*evals)^2) term1=c(y%*%Re(stats::fft(term1[N:1])[N:1])/N) #inverse=TRUE not needed for symmetric vector fac=(sum(1/(1+lambda*evals))-q)/(N-q) term2=stats::fft(y[N:1])[N:1]*(lambda*evals/(1+lambda*evals)) term2=c(y%*%Re(stats::fft(term2[N:1])[N:1])/N) #inverse=TRUE not needed for symmetric vector score=(term1-term2*fac)/N return(score) } ML.oracle<-function(lambda,q,f.true,sigma){ N=length(f.true) x=seq(1,N)/N evals=(2*pi*1:N)^(2*q)*sinc(pi*x)^(2*q)/Q_pminus1(2*q-2,x) term1=stats::fft(f.true[N:1])[N:1]*(1/(1+lambda*evals)-1/(1+lambda*evals)^2) term1=c(f.true%*%Re(stats::fft(term1[N:1])[N:1])/N) #inverse=TRUE not needed for symmetric vector term2=sum(1/(1+lambda*evals)^2) score=(term1-sigma*(term2-q))/N return(score) } if(oracle) lambda=stats::uniroot(ML.oracle,interval=c(.Machine$double.eps,1),f.true=f.true,q=q,sigma=sigma,tol=.Machine$double.eps, extendInt = "yes")$root #1 else lambda=stats::uniroot(ML,interval=c(.Machine$double.eps,1),y=y,q=q,tol=.Machine$double.eps, extendInt = "yes")$root #1 return(lambda) }
/scratch/gouwar.j/cran-all/cranData/vstdct/R/vst_dct_fcts.R
#' Azure DevOps Account #' #' @details #' For the majority of functions that are within this \code{vsts_account} object, you can get help about the #' query or body parameter with \code{?vsts_<function name>}. #' #' @param user username for the Azure DevOps account #' @param pass password for the Azure DevOps account #' @param domain domain name for the Azure DevOps account #' @param project (optional) project name within the domain of the Azure DevOps account #' @param repo (optional) repository name with the project of the Azure DevOps domain #' #' @docType class #' @format An \code{\link[R6]{R6Class}} generator object #' @keywords data #' #' @examples #' \dontrun{ #' proj <- vsts_account$new( #' "<username>", "<password>", "<domain>", #' "<project>", "<repo>" #' ) #' str(proj) #' } #' #' @export vsts_account <- R6::R6Class( classname = "vsts", public = list( initialize = function(user = NA, pass = NA, domain = NA, project = NULL, repo = NULL) { private$user <- user private$pass <- pass private$auth_key <- private$get_auth_key(user, pass) private$domain <- domain private$projects <- vsts_get_projects(private$domain, private$auth_key, quiet = TRUE)$name if (!is.null(project)) { if (!project %in% private$projects) stop(project, " is not available in selected domain.") private$project <- project private$repos <- vsts_get_repos(private$domain, private$project, private$auth_key, quiet = TRUE)$name } if (!is.null(repo)) { if (is.null(project)) stop("`project` needs to be specified to include repository.") if (!repo %in% private$repos) stop(repo, " is not available in selected project.") private$repo <- repo } }, set_domain = function(dom) { private$domain <- dom proj_df <- vsts_get_projects(private$domain, private$auth_key, quiet = TRUE) private$projects <- proj_df$name invisible(TRUE) }, set_project = function(proj) { if (is.null(private$projects)) { proj_df <- vsts_get_projects(private$domain, private$auth_key, quiet = TRUE) private$projects <- proj_df$name } if (!proj %in% private$projects) stop(proj, " not in available projects.") private$project <- proj repo_df <- vsts_get_repos(private$domain, private$project, private$auth_key, quiet = TRUE) private$repos <- repo_df$name invisible(TRUE) }, set_repo = function(repo) { if (is.null(private$repos)) { repo_df <- vsts_get_repos(private$domain, private$project, private$auth_key, quiet = TRUE) private$repos <- repo_df$name } if (!repo %in% private$repos) stop(repo, " not in available repositories.") private$repo <- repo invisible(TRUE) }, get_projects = function() vsts_get_projects(private$domain, private$auth_key), get_repos = function() { private$proj_check() vsts_get_repos(private$domain, private$project, private$auth_key) }, create_repo = function(repo) { private$proj_check() if (repo %in% private$repos) stop(repo, " already exists in ", private$project, ".") content <- vsts_create_repo(private$domain, private$project, repo, private$auth_key) private$repos <- vsts_get_repos(private$domain, private$project, private$auth_key, quiet = TRUE)$name invisible(content) }, delete_repo = function(repo) { private$proj_check() if (!repo %in% private$repos) stop(repo, " does not exist in ", private$project, ".") content <- vsts_delete_repo(private$domain, private$project, repo, private$auth_key) private$repos <- vsts_get_repos(private$domain, private$project, private$auth_key, quiet = TRUE)$name invisible(content) }, get_commits = function(query = NULL) { private$repo_check() vsts_get_commits(private$domain, private$project, private$repo, private$auth_key, query = query) }, get_workitems = function(query = NULL) { vsts_get_workitems(private$domain, private$auth_key, query = query) }, get_workitem = function(id) { vsts_get_workitem(private$domain, private$auth_key, id = id) }, create_workitem = function(item_type, ...) { private$proj_check() vsts_create_workitem(private$domain, private$project, item_type, private$auth_key, ...) }, get_releases = function(query = NULL) { private$proj_check() vsts_get_releases(private$domain, private$project, private$auth_key, query = query) }, get_release = function(id) { private$proj_check() vsts_get_release(private$domain, private$project, id, private$auth_key) }, create_release = function(body = NULL) { private$proj_check() vsts_create_release(private$domain, private$project, private$auth_key, body = body) }, deploy_release = function(release_id, env_id) { private$proj_check() vsts_deploy_release(private$domain, private$project, release = release_id, env = env_id, private$auth_key ) }, get_build_defs = function(query = NULL) { private$proj_check() vsts_get_build_defs(private$domain, private$project, private$auth_key, query = query) }, get_release_defs = function() { private$proj_check() vsts_get_release_defs(private$domain, private$project, private$auth_key) }, custom_command = function(url, verb, body = NULL, query = NULL) { vsts_run_command(url, verb, private$auth_key, body, query) } ), private = list( # Making sure these are not easily editable user = NULL, domain = NULL, project = NULL, repo = NULL, pass = NULL, auth_key = NULL, projects = NULL, repos = NULL, get_auth_key = function(user, pass) vsts_auth_key(user, pass), proj_check = function() { if (is.null(private$project)) stop("Project name must be added to object. Use `$set_project()`") }, repo_check = function() { if (is.null(private$repo)) stop("Repository name must be added to object. Use `$set_repo()`") } ) )
/scratch/gouwar.j/cran-all/cranData/vstsr/R/account.R
#' Azure DevOps Authentication Key #' #' @description #' Creation of a Azure DevOps authentication key that will be used when running any of the API calls. #' #' @param user username to access Azure DevOps project #' @param pass password to access Azure DevOps project #' #' @return #' An authentication key string in the form of 'Basic <Base 64 of \code{user}:\code{pass}>' #' #' @details #' For more information about authentication check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/?view=azure-devops-rest-6.1#create-the-request} #' #' @examples #' # Using credentials #' auth_key <- vsts_auth_key("<username>", "<password>") #' #' # Using PAT token #' auth_key <- vsts_auth_key(NULL, "<token>") #' #' @export vsts_auth_key <- function(user, pass) paste("Basic", RCurl::base64(paste(user, pass, sep = ":")))
/scratch/gouwar.j/cran-all/cranData/vstsr/R/authentication.R
#' Azure DevOps Project Build Definition Information #' #' @description #' These functions will allow you to scrape build definition information from Azure DevOps. #' #' @details #' For more information about the build definition API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/build/definitions/list}. #' #' @param domain The name of the Azure DevOps organization. #' @param project the name of the project in \code{domain} to look at #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' @param query a list of extra parameters that can be sent to the API call. Check details for access to list #' of options. #' #' @examples #' \dontrun{ #' # Add in own details to get a non-NULL output #' auth_key <- vsts_auth_key("<username>", "<password>") #' vsts_get_build_defs("domain", "project", auth_key) #' } #' #' @rdname vsts_build_def #' @export vsts_get_build_defs <- function(domain, project, auth_key, query = NULL) { uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/build/definitions?api-version=5.0") response <- httr::GET(uri, httr::add_headers(Authorization = auth_key), query = query) if (httr::status_code(response) != 200) { send_failure_message(response, "get build definition list") return(invisible(NULL)) } httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value }
/scratch/gouwar.j/cran-all/cranData/vstsr/R/build.R
#' Azure DevOps Custom API Calls #' #' @description #' For any requirement not currently in place in the \code{vstsr} package, then this function will allow you #' to use the relevant API call without any extra requirements. #' #' For the most part it is just a shell of \code{\link[httr]{VERB}} but will have the \code{auth_key} set up already. #' #' @param url the URI of the API call to run #' @param verb name of the verb to use #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' @param body check \code{\link[httr]{VERB}} for more details. If the object is a named list, then it will be #' transformed into a JSON string so that can be added to the call. Use #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops} to find out any required parameter for the body. #' @param query a list of extra parameters that can be sent to the API call. If not required then leave as \code{NULL} #' #' @examples #' \dontrun{ #' auth_key <- vsts_auth_key("<username>", "<password>") #' # Get commits of a repository #' URL <- file.path( #' "https://dev.azure.com", #' domain, #' project, #' "_apis/git/repositories", #' repository_id, #' "commits?api-version=5.0" #' ) #' vsts_run_command(URL, "GET", auth_key) #' } #' #' @export vsts_run_command <- function(url, verb, auth_key, body = NULL, query = NULL) { if (!is.null(body)) { content_body <- jsonlite::toJSON(body, auto_unbox = TRUE) } else { content_body <- NULL } response <- httr::VERB( verb = verb, url = url, config = httr::add_headers(Authorization = auth_key), httr::content_type_json(), query = query, body = content_body ) if (httr::status_code(response) >= 300) { send_failure_message(response, "run custom command") return(invisible(NULL)) } httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE) }
/scratch/gouwar.j/cran-all/cranData/vstsr/R/custom.R
#' Azure DevOps Project Git Repositories #' #' @description #' These functions will allow you to scrape git repository information from Azure DevOps. #' #' @details #' For more information about git repository API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/git/commits/get-commits}. #' #' @param domain The name of the Azure DevOps organization. #' @param project the name of the project in \code{domain} to look at #' @param repo the name of the repository in \code{project} to look at #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' @param query a list of extra parameters that can be sent to the API call: #' \describe{ #' \item{\code{branch}}{[character] the name of a branch in the repository (cannot combine with \code{commit})} #' \item{\code{commit}}{[character] the id of a commit in the repository (cannot combine with \code{branch})} #' \item{\code{itemPath}}{[character] path of an item in the repository} #' \item{\code{committer}}{[character] name of the person who committed the change} #' \item{\code{author}}{[character] name of the author} #' \item{\code{fromDate}}{[Date] start date to search from} #' \item{\code{toDate}}{[Date] end date to search from} #' } #' #' @examples #' \dontrun{ #' # Add in own details to get a non-NULL output #' auth_key <- vsts_auth_key("<username>", "<password>") #' vsts_get_commits("domain", "project", "repo", auth_key) #' } #' #' @export vsts_get_commits <- function(domain, project, repo, auth_key, query = NULL) { repos <- vsts_get_repos(domain, project, auth_key, quiet = TRUE) repo_id <- repos[repos$name == repo, "id"] if (is.null(repo_id) || length(repo_id) == 0) { cat("Unable to find ", repo, " in ", project, ".\n", sep = "") return(NULL) } uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/git/repositories", repo_id, "commits?api-version=6.0") response <- httr::GET(uri, httr::add_headers(Authorization = auth_key), query = query) if (httr::status_code(response) != 200) { send_failure_message(response, "get commit list") return(NULL) } httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value }
/scratch/gouwar.j/cran-all/cranData/vstsr/R/git.R
#' Azure DevOps Projects #' #' @description #' These functions will allow you to scrape project information from Azure DevOps. #' #' @details #' For more information about project API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/core/Projects}. #' #' @param domain The name of the Azure DevOps organization. #' @param auth_key Authentication key generated by using \code{\link{vsts_auth_key}} #' @param quiet logical whether want general running information from printing. Any issue with the API call will #' still show up if set to \code{TRUE} #' #' @rdname vsts_project #' @export vsts_get_projects <- function(domain, auth_key, quiet = FALSE) { uri <- file.path(AZURE_HOME_URL, domain, "_apis/projects?api-version=6.0") response <- httr::GET(uri, httr::add_headers(Authorization = auth_key)) if (httr::status_code(response) != 200) { send_failure_message(response, "get project list") return(NULL) } content <- httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value if (!quiet) cat("Available projects:", paste(content$name, collapse = ", "), "\n") content }
/scratch/gouwar.j/cran-all/cranData/vstsr/R/project.R
#' Azure DevOps Project Release Definition Information #' #' @description #' These functions will allow you to scrape release definition information from Azure DevOps. #' #' @details #' For more information about release definition API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/release/releases}. #' #' @param domain The name of the Azure DevOps organization. #' @param project the name of the project in \code{domain} to look at #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' @param quiet logical whether want general running information from printing. Any #' issue with the API call will still show up if set to \code{TRUE} #' #' @examples #' \dontrun{ #' # Add in own details to get a non-NULL output #' auth_key <- vsts_auth_key("<username>", "<password>") #' vsts_get_release_defs("domain", "project", auth_key) #' } #' #' @rdname vsts_release_def #' @export vsts_get_release_defs <- function(domain, project, auth_key, quiet = FALSE) { uri <- file.path(AZURE_VSRM_URL, domain, project, "_apis/release/definitions?api-version=6.0") response <- httr::GET(uri, httr::add_headers(Authorization = auth_key)) if (httr::status_code(response) != 200) { send_failure_message(response, "get release definition list") return(NULL) } content <- httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value if (!quiet) cat("Available release definitions:", paste(content$name, collapse = ", "), "\n") content } #' Azure DevOps Project Release Information #' #' @description #' These functions will allow you to create releases from Azure DevOps. #' #' @details #' The \code{artifacts} object within the body contains two items: #' \itemize{ #' \item{alias}{[character] Sets alias of artifact.} #' \item{instanceReference}{[list] Sets instance reference of artifact. e.g. for build artifact it is build number.} #' } #' #' For more information about release API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/vsts/release/releases}. #' #' @param domain The name of the Azure DevOps organization. #' @param project the name of the project in \code{domain} to look at #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' @param body a list of extra parameters that can need to be sent to the API call (* mandatory): #' \describe{ #' \item{\code{artifacts *}}{[list] Sets list of artifact to create a release. Check \code{Details} for more information.} #' \item{\code{definitionId *}}{[integer] Sets definition Id to create a release.} #' \item{\code{description *}}{[character] Sets description to create a release.} #' \item{\code{isDraft}}{[logical] Sets 'true' to create release in draft mode, 'false' otherwise.} #' \item{\code{manualEnvironments}}{[character] Sets list of environments to manual as condition.} #' \item{\code{properties}}{[list] The class represents a property bag as a collection of key-value pairs.} #' \item{\code{reason}}{[character] Sets reason to create a release.} #' } #' #' @examples #' \dontrun{ #' # Add in own details to get a non-NULL output #' auth_key <- vsts_auth_key("<username>", "<password>") #' art_list <- list( #' list(alias = "Art1", instanceReference = list(id = 1)), #' list(alias = "Art2", instanceReference = list(id = 2)) #' ) #' body <- list( #' definitionId = 1, description = "R API Release", #' artifacts = I(art_list) #' ) #' vsts_create_release("domain", "project", auth_key, body) #' } #' @export vsts_create_release <- function(domain, project, auth_key, body) { uri <- file.path(AZURE_VSRM_URL, domain, project, "_apis/release/releases?api-version=6.0") request_body <- jsonlite::toJSON(body, auto_unbox = TRUE) response <- httr::POST(uri, httr::add_headers(Authorization = auth_key), httr::content_type("application/json"), body = request_body ) if (httr::status_code(response) >= 300) { send_failure_message(response, paste0("create release definition #", body[["definitionId"]])) return(NULL) } httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE) } #' Azure DevOps Project Release Information #' #' @description #' These functions will allow you to scrape releases from Azure DevOps. #' #' @details #' For more information about release API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/release/Releases}. #' #' @param domain The name of the Azure DevOps organization. #' @param project the name of the project in \code{domain} to look at #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' @param query a list of extra parameters that can be sent to the API call #' #' @examples #' \dontrun{ #' # Add in own details to get a non-NULL output #' auth_key <- vsts_auth_key("<username>", "<password>") #' vsts_get_releases("domain", "project", auth_key) #' } #' #' @rdname vsts_get_release #' @export vsts_get_releases <- function(domain, project, auth_key, query = NULL) { uri <- file.path(AZURE_VSRM_URL, domain, project, "_apis/release/releases?api-version=6.0") response <- httr::GET(uri, httr::add_headers(Authorization = auth_key)) if (response$status_code != 200) { send_failure_message(response, "get releases list") return(NULL) } content <- httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value content } #' @param release Release Definition ID #' #' @rdname vsts_get_release #' @export vsts_get_release <- function(domain, project, release, auth_key) { uri <- file.path(AZURE_VSRM_URL, domain, project, "_apis/release/releases", paste0(release, "?api-version=6.0")) response <- httr::GET(uri, httr::add_headers(Authorization = auth_key), httr::content_type("application/json")) if (httr::status_code(response) != 200) { send_failure_message(response, "get release") return(NULL) } httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE) } #' Azure DevOps Project Release Environment Information #' #' @description #' These functions will allow you to run release environment tasks from Azure DevOps. #' #' @details #' For more information about release environment API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/release/releases/update-release-environment}. #' #' @param domain The name of the Azure DevOps organization. #' @param project the name of the project in \code{domain} to look at #' @param release the release ID of the release #' @param env the release environment ID to release on #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' #' @examples #' \dontrun{ #' # Add in own details to get a non-NULL output #' auth_key <- vsts_auth_key("<username>", "<password>") #' vsts_deploy_release("domain", "project", auth_key, 1, 1) #' } #' #' @rdname vsts_release_env #' @export vsts_deploy_release <- function(domain, project, release, env, auth_key) { uri <- file.path( AZURE_VSRM_URL, domain, project, "_apis/Release/releases", release, "environments", paste0(env, "?api-version=5.0-preview.6") ) request_body <- jsonlite::toJSON(list(status = "inProgress"), auto_unbox = TRUE) response <- httr::PATCH( uri, httr::add_headers(Authorization = auth_key), httr::content_type("application/json"), body = request_body ) if (httr::status_code(response) != 200) { send_failure_message(response, "send release") return(NULL) } cat("Deployment of release has started.\n") httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE) }
/scratch/gouwar.j/cran-all/cranData/vstsr/R/release.R
#' Azure DevOps Project Repositories #' #' @description #' These functions will allow you to scrape project information from Azure DevOps. #' #' @details #' For more information about repository API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/git}. #' #' @param domain The name of the Azure DevOps organization #' @param project Project ID or project name #' @param repo the name of the repository in \code{project} to look at. Leave as \code{""} to get all repositories #' within all projects of the domain #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' @param quiet logical whether want general running information from printing. Any issue with the API call will #' still show up if set to \code{TRUE} #' #' @examples #' \dontrun{ #' # Add in own details to get a non-NULL output #' auth_key <- vsts_auth_key("<username>", "<password>") #' #' # Get repo list #' vsts_get_repos("domain", "project", auth_key) #' #' # Create new repo #' vsts_create_repo("domain", "project", "repo", auth_key) #' #' # Delete existing repo #' vsts_delete_repo("domain", "project", "repo", auth_key) #' } #' #' @rdname vsts_repo #' @export vsts_get_repos <- function(domain, project, auth_key, quiet = FALSE) { uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/git/repositories?api-version=6.0") response <- httr::GET(uri, httr::add_headers(Authorization = auth_key)) if (httr::status_code(response) != 200) { send_failure_message(response, "get repos list") return(NULL) } content <- httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value if (!quiet) cat("Available repositories:", paste(content$name, collapse = ", "), "\n") content } #' @rdname vsts_repo #' @export vsts_create_repo <- function(domain, project, repo, auth_key, quiet = FALSE) { uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/git/repositories?api-version=6.0") response <- httr::POST( uri, httr::add_headers(Authorization = auth_key), httr::content_type_json(), body = jsonlite::toJSON(list(name = repo), auto_unbox = TRUE) ) if (httr::status_code(response) != 201) { send_failure_message(response, "create repository") return(NULL) } if (!quiet) cat(repo, "repository has been created in", project, "\n") httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE) } #' @rdname vsts_repo #' @export vsts_delete_repo <- function(domain, project, repo, auth_key, quiet = FALSE) { repos <- vsts_get_repos(domain, project, auth_key, quiet = TRUE) repo_id <- repos[repos$name == repo, "id"] if (is.null(repo_id) || length(repo_id) == 0) { cat("Unable to find", repo, "in", project, "\n") return(NULL) } uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/git/repositories", paste0(repo_id, "?api-version=6.0")) response <- httr::DELETE(uri, httr::add_headers(Authorization = auth_key)) if (httr::status_code(response) != 204) { send_failure_message(response, "delete repository") return(NULL) } if (!quiet) cat(repo, "repository has been deleted from", project, "\n") return(TRUE) }
/scratch/gouwar.j/cran-all/cranData/vstsr/R/repository.R
AZURE_HOME_URL <- "https://dev.azure.com" AZURE_VSRM_URL <- "https://vsrm.dev.azure.com" send_failure_message <- function(response, message = NULL) { cat(httr::http_condition(response, "message", message)$message, "\n") content <- httr::content(response) if (!is.null(content[["message"]])) { cat(content$message, "\n") } invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/vstsr/R/utils.R
#' vstsr: A package connecting R and Azure DevOps #' #' This package takes a look at the Azure DevOps API calls and wraps them around in easy to use R functions. This #' includes looking at projects, repositories, work items, and eventually sections such as builds and releases. #' #' For more information about Azure DevOps APIs, take a look at \url{https://docs.microsoft.com/en-us/rest/api/vsts} #' #' @section Projects: #' This looks at the projects available in a visual studio instance. This lets you change between any project that you might #' have access to. #' #' @section Repositories: #' This looks primarily at the Git repositories available, and whether you want to create a new repository or delete an existing one. #' #' @section Work Items: #' This will track any existing work item for a project, and also the ability to create a new work item for a project. Useful if working #' within a development team and automate inclusion of creating bugs upon creating any certain R error. #' #' @section Releases: #' This will look at the releases available for a project, both the definitions and the actual releases. There is the ability to #' deploy the created releases to a new environment. #' #' @section Custom: #' For any non-predefined API service available in the package, it is always possible to run the \code{\link{vsts_run_command}} which #' will use any enabled Azure DevOps API call. #' #' @docType package #' @name vstsr NULL #' @import R6 #' @import httr #' @importFrom RCurl base64 #' @importFrom jsonlite toJSON fromJSON
/scratch/gouwar.j/cran-all/cranData/vstsr/R/vstsr-package.R
#' Azure DevOps Project Get Work Items #' #' @description #' These functions will allow you to scrape work item information from a particular Azure DevOps project. #' #' @details #' For more information about work item API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/wit/work-items}. #' #' @param domain The name of the Azure DevOps organization. #' @param project Project ID or project name #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' @param id ID of the work item to retrieve #' @param query a list of extra parameters that can be sent to the API call: #' \describe{ #' \item{\code{ids}}{[character] a comma-separated list of up to 200 IDs of the work items to get} #' \item{\code{fields}}{[character] a comma-separated list of up to 100 fields to get with each work item. #' If not specified, all fields with values are returned. Calculated fields such as Attached File Count must be specifically #' queried for using this parameter.} #' \item{\code{asOf}}{[Date] gets the work items as they existed at this time} #' \item{\code{ErrorPolicy}}{[character] determines if the call will throw an error when encountering a work item (default behavior) #' that doesn't exist (\code{throw}) or simply omit it (\code{omit})} #' } #' #' @rdname vsts_get_wk #' @export vsts_get_workitems <- function(domain, project, auth_key, id, query = list()) { uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/wit/workitemsbatch?api-version=6.0") response <- httr::POST( uri, httr::add_headers(Authorization = auth_key), httr::content_type_json(), query = query ) if (httr::status_code(response) >= 300) { send_failure_message(response, "get work items list") return(NULL) } httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value } #' @rdname vsts_get_wk #' @export vsts_get_workitem <- function(domain, project, auth_key, id) { uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/wit/workitems", paste0(id, "?api-version=6.0")) response <- httr::GET(uri, httr::add_headers(Authorization = auth_key)) if (httr::status_code(response) != 200) { send_failure_message(response, paste0("get work item #", id)) return(NULL) } httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE) } #' Azure DevOps Project Work Items #' #' @description #' These functions will allow you to scrape work item information from a particular Azure DevOps project. #' #' @details #' For more information about work item API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/wit/work-items}. #' #' @param domain The name of the Azure DevOps organization. #' @param project the name of the project in \code{domain} to look at #' @param item_type the type of work item to be created #' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}} #' @param ... arguments passed to \code{\link{vsts_get_workitem_fields}} #' #' @rdname vsts_create_wk #' @export vsts_create_workitem <- function(domain, project, item_type, auth_key, ...) { item_type_info <- vsts_get_itemtypes(domain, project, auth_key) item_types <- item_type_info$name item_type <- item_types[match(tolower(item_type), tolower(item_types))] if (is.na(item_type)) { cat("item_type not available for project. Select from:", paste(item_types, collapse = ", "), "\n") return(NULL) } request_info <- vsts_get_workitem_fields(...) request_body <- jsonlite::toJSON(request_info, auto_unbox = TRUE) uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/wit/workitems", paste0("$", item_type, "?api-version=4.1")) response <- httr::POST( utils::URLencode(uri), httr::add_headers(Authorization = auth_key), httr::content_type("application/json-patch+json"), body = request_body ) if (httr::status_code(response) != 200) { send_failure_message(response, paste("add", item_type, "to", project)) return(NULL) } httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE) } vsts_get_itemtypes <- function(domain, project, auth_key) { uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/wit/workitemtypes?api-version=4.1") response <- httr::GET(uri, httr::add_headers(Authorization = auth_key)) if (httr::status_code(response) != 200) { send_failure_message(response, "get work item types") return(NULL) } httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value } #' Azure DevOps Work Item Fields #' #' @description #' This contains all the fields required of any work item in a visual studio project and helps #' add/rename the fields of the selected work item. #' #' @details #' For more information about work item fields API calls check #' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/wit/Fields}. #' #' @param System.Title [character] title of the Azure DevOps work item #' @param System.Description [character] description of the Azure DevOps work item #' @param System.TeamProject [character] name of the Azure DevOps project #' @param System.AreaPath [character] path of the Azure DevOps work item #' @param System.IterationPath [character] name of the Azure DevOps iteration path #' @param Microsoft.VSTS.Common.Priority [integer] priority of the work item - 1 to 4 #' @param ... other fields that might have been missed out originally #' #' @export vsts_get_workitem_fields <- function(System.Title, System.Description, System.TeamProject, System.AreaPath, System.IterationPath, Microsoft.VSTS.Common.Priority, ...) { field_list <- as.list(match.call()) field_list <- field_list[-1] data.frame( op = "add", path = file.path("/fields", names(field_list)), from = NA, value = unlist(field_list, use.names = FALSE) ) }
/scratch/gouwar.j/cran-all/cranData/vstsr/R/workitem.R
#' Data Frame to HTML Function #' #' This function takes a data frame or matrix with column names and outputs an HTML table version of that data frame. #' #' This function is designed to feed HTML versions of variable tables to \code{vtable()}, \code{sumtable()}, and \code{labeltable()}. #' #' Multi-column cells are supported. Set the cell's contents to \code{"content_MULTICOL_c_5"} where "content" is the content of the cell, "c" is the cell's alignment (l, c, r), and 5 is the number of columns to span. Then fill in the cells that need to be deleted to make room with "DELETECELL". #' #' If the first column and row begins with the text "HEADERROW", then the first row will be put above the column names. #' #' @param data Data set; accepts any format with column names. #' @param out Determines where the completed table is sent. Set to \code{"browser"} to open HTML file in browser using \code{browseURL()}, \code{"viewer"} to open in RStudio viewer using \code{viewer()}, if available, or \code{"htmlreturn"} to return the HTML code. Defaults to Defaults to \code{"viewer"} if RStudio is running and \code{"browser"} if it isn't. #' @param anchor Character variable to be used to set an \code{<a name>} tag for the table. #' @param file Saves the completed variable table file to HTML with this filepath. May be combined with any value of \code{out}. #' @param note Table note to go after the last row of the table. #' @param note.align Alignment of table note, l, r, or c. #' @param col.width Vector of page-width percentages, on 0-100 scale, overriding default column widths in HTML table. Must have a number of elements equal to the number of columns in the resulting table. #' @param col.align Vector of 'left', 'right', 'center', etc. to be used with the HTML table text-align attribute in each column. If you want to get tricky, you can add a \code{";"} afterwards and keep putting in whatever CSS attributes you want. They will be applied to the whole column. #' @param row.names Flag determining whether or not the row names should be included in the table. Defaults to \code{FALSE}. #' @param no.escape Vector of column indices for which special characters should not be escaped (perhaps they include markup text of their own). #' @examples #' #' if(interactive()) { #' df <- data.frame(var1 = 1:4,var2=5:8,var3=c('A','B','C','D'), #' var4=as.factor(c('A','B','C','C')),var5=c(TRUE,TRUE,FALSE,FALSE)) #' dftoHTML(df,out="browser") #' } #' #' @export dftoHTML <- function(data,out=NA,file=NA,note = NA,note.align = 'l',anchor=NA,col.width=NA,col.align=NA,row.names=FALSE,no.escape = NA) { if (is.null(colnames(data))) { stop('Requires data with variable names or column names.') } if (!is.na(file) & !is.character(file)) { stop('Incorrect file name.') } if (min(is.na(col.width)) == 0 & (!is.vector(col.width) | !is.numeric(col.width) | sum(is.na(col.width)) > 0)) { stop('col.width must be a numeric vector with no missing values.') } if (min(is.na(col.width)) == 0 & (max(col.width) > 100 | min(col.width) < 0)) { stop('Elements of col.width must be between 0 and 100.') } if (!is.logical(row.names)) { stop('The row.names option must be TRUE or FALSE.') } #If row.names = TRUE, the row names must be included as their own column if (row.names==TRUE) { data <- cbind(row.names(data),data) names(data)[1] <- "Row Names" } #This assumes we work with characters for (i in 1:ncol(data)) { data[[i]] <- as.character(data[[i]]) } #Put in the note if (!is.na(note)) { data[nrow(data)+1,] <- c(paste0(note,'_MULTICOL_',note.align,'_all'), rep('DELETECELL',ncol(data)-1)) } #Set default column widths if (identical(col.width, NA)) { col.width <- rep(100/dim(data)[2],dim(data)[2]) } #Set default column align if (identical(col.align, NA)) { col.align <- rep('left',dim(data)[2]) } #Turn column widths to rounded characters col.width <- as.character(round(as.numeric(col.width,2))) #Combine column widths and aligns to form a style argument style <- paste0('width:',col.width,'%; text-align:',col.align) #Escape characters for (i in (1:ncol(data))[!(1:ncol(data) %in% no.escape)]) { data[[i]] <- as.character(data[[i]]) data[[i]] <- gsub('\\&','\\&amp',data[[i]]) data[[i]] <- gsub('<','\\&lt',data[[i]]) data[[i]] <- gsub('>','\\&gt',data[[i]]) } cellprocess <- function(x,celltype,style,maxall) { if (grepl('_MULTICOL_',x)) { #Split into the text and arguments spl <- strsplit(x,'_MULTICOL_') mcargs <- strsplit(spl[[1]][2],'_') #If it's "all", make it all the following DELETECELLs if (mcargs[[1]][2] == 'all') { mcargs[[1]][2] <- as.character(maxall) } align <- ifelse(mcargs[[1]][1] == 'l','left', ifelse(mcargs[[1]][1] == 'r','right', ifelse(mcargs[[1]][1] == 'c','center','oops'))) if (align == 'oops') { stop("Unsupported multi-column alignment used. Use l, r, or c.") } #And construct the multicol x <- paste0('<',celltype, ' colspan = \"',mcargs[[1]][2],'\"', ' style = \"text-align: ',align,'\">', spl[[1]][1],'</',celltype,'>') } else { x <- paste0('<',celltype, ' style = \"',style,'\">', x,'</',celltype,'>') } return(x) } # Do this separately so as to allow for multicolumns rowprocess <- function(x,celltype) { x <- unname(x) x <- as.character(x) x[is.na(x)] <- '' rowstyle <- style[x != 'DELETECELL'] # How many DELETECELLs follow each cell? Necessary for MULTICOL_X_all # Only bother if we have DELETECELLs if (any(x == 'DELETECELL')) { rl <- rle(x) #Start with 1s and only override if you are right next to a DELETECELL maxall <- rep(1,length(x)) #Add 1 because we want to include both DELETECELLs and the original multicol maxall[which(x != 'DELETECELL' & c(utils::tail(x,-1) == 'DELETECELL',FALSE))] <- rl$lengths[rl$values == 'DELETECELL'] + 1 maxall <- maxall[x != 'DELETECELL'] } else { maxall <- rep(0,length(x)) } x <- x[x != 'DELETECELL'] x <- sapply(1:length(x), function(y) cellprocess(x[y],celltype,rowstyle[y],maxall[y])) return(paste('<tr>',paste(x, collapse = ''),'</tr>\n',sep ='')) } #Begin table.html table.html <- '<table>' #Add an anchor if there is one if (!is.na(anchor)) { table.html <- paste0(table.html,'<a name = \"',anchor,'\">') } #Get the column headers heads <- colnames(data) headrow <- rowprocess(heads,'th') #Header row #Check for a secondary header row if (substr(data[1,1],1,9) == 'HEADERROW') { data[1,1] <- substring(data[1,1],10) hrow <- rowprocess(data[1,],'th') data <- data[2:nrow(data),] headrow <- paste(hrow,headrow) } #Convert rows of data to LaTeX rows <- apply(data, 1, function(x) rowprocess(x,'td')) rows <- paste(rows, collapse = '') #Then finally, we take those <td>DATA</td><td>DATA</td> strings, wrap them #in <tr> and </tr>, and finally stick them all together to make the bulk of our table table.html <- paste0(table.html, headrow, rows, '</table>') ####### APPLICATION OF FILE OPTION if (!is.na(file)) { #If they forgot a file extension, fill it in if (!grepl("\\.htm",file)) { file <- paste(file,'.html',sep='') } filepath <- file.path(file) #Create temporary html file writeLines(table.html,filepath) } #For better evaluating if statements if (is.na(out)) { out = '' } ####### APPLICATION OF OUT OPTION #If the plan is to produce a viewable HTML, create it if (out == 'viewer' | out == 'browser' | out == '') { #Get temporary dirpath tempDir <- tempfile() #Create temporary directory dir.create(tempDir) #Get temporary filepath htmlpath <- file.path(tempDir,'dftoHTML.html') #Create temporary html file writeLines(table.html,htmlpath) } #Either print the variable table to the help window #or return a variable table to the screen, as desired if (Sys.getenv('RSTUDIO')=='1' & (out == 'viewer' | out == '')) { rstudioapi::viewer(htmlpath) } else if (Sys.getenv('RSTUDIO')=='' & out == 'viewer') { stop('out = viewer is not a valid option if RStudio is not running.') } else if ((Sys.getenv('RSTUDIO')=='' & out == '') | (out == 'browser')) { utils::browseURL(htmlpath) } else if (out == 'htmlreturn') { return(table.html) } else { stop('Unrecognized value of out. Set to \"viewer\", \"browser\", \"htmlreturn\", or leave blank.') } } #' Data Frame to LaTeX Function #' #' This function takes a data frame or matrix with column names and outputs a lightly-formatted LaTeX table version of that data frame. #' #' This function is designed to feed LaTeX versions of variable tables to \code{vtable()}, \code{sumtable()}, and \code{labeltable()}. #' #' Multi-column cells are supported. Wrap the cell's contents in a \code{multicolumn} tag as normal, and then fill in any cells that need to be deleted to make room for the multi-column cell with "DELETECELL". Or use the MULTICOL syntax of \code{dftoHTML}, that works too. #' #' If the first column and row begins with the text "HEADERROW", then the first row will be put above the column names. #' #' @param data Data set; accepts any format with column names. #' @param file Saves the completed table to LaTeX with this filepath. #' @param fit.page uses a LaTeX resizebox to force the table to a certain width. Often \code{'\\textwidth'}. #' @param frag Set to TRUE to produce only the LaTeX table itself, or FALSE to produce a fully buildable LaTeX. Defaults to TRUE. #' @param title Character variable with the title of the table. #' @param note Table note to go after the last row of the table. #' @param note.align Set the alignment for the multi-column table note. Usually "l", but if you have a long note you might want to set it with "p{}" #' @param anchor Character variable to be used to set a label tag for the table. #' @param align Character variable with standard LaTeX formatting for alignment, for example \code{'lccc'}. You can also use this to force column widths with \code{p} in standard LaTeX style. Defaults to the first column being left-aligned and all others centered. Be sure to escape special characters, in particular backslashes (i.e. \code{p{.25\\\\textwidth}} instead of \code{p{.25\\textwidth}}). #' @param row.names Flag determining whether or not the row names should be included in the table. Defaults to \code{FALSE}. #' @param no.escape Vector of column indices for which special characters should not be escaped (perhaps they include markup text of their own). #' @examples #' df <- data.frame(var1 = 1:4,var2=5:8,var3=c('A','B','C','D'), #' var4=as.factor(c('A','B','C','C')),var5=c(TRUE,TRUE,FALSE,FALSE)) #' dftoLaTeX(df, align = 'ccccc') #' #' @export dftoLaTeX <- function(data,file=NA,fit.page = NA, frag=TRUE,title=NA,note=NA,note.align='l',anchor=NA,align=NA,row.names=FALSE,no.escape = NA) { if (is.null(colnames(data))) { stop('Requires data with variable names or column names.') } if (!is.na(file) & !is.character(file)) { stop('Incorrect file name.') } if (!is.na(align) & (!is.character(align) | length(align) > 1)) { stop('Align must be a single character variable.') } if (!is.logical(row.names)) { stop('The row.names option must be TRUE or FALSE.') } # tibble 3.0.0 and <3.0.0 each break on different code if ('tbl_df' %in% class(data)) { data <- as.data.frame(data) } #If row.names = TRUE, the row names must be included as their own column if (row.names==TRUE) { data <- cbind(row.names(data),data) names(data)[1] <- "Row Names" } #Work with everything as strings for (i in 1:ncol(data)) { data[[i]] <- as.character(data[[i]]) } #Defaults if (is.na(align)) { align <- paste(rep('l',ncol(data)),collapse = '') } multicoller <- function(x,maxall) { if (grepl('_MULTICOL_',x)) { #Split into the text and arguments spl <- strsplit(x,'_MULTICOL_') mcargs <- strsplit(spl[[1]][2],'_') #If it's "all", make it all the columns if (mcargs[[1]][2] == 'all') { mcargs[[1]][2] <- as.character(maxall) } #And construct the multicol x <- paste0('\\multicolumn{',mcargs[[1]][2],'}{',mcargs[[1]][1],'}{',spl[[1]][1],'}') } return(x) } # Process multicols multicol.row <- function(x) { x <- as.character(x) x[is.na(x)] <- '' # How many DELETECELLs follow each cell? Necessary for MULTICOL_X_all # Only bother if we have DELETECELLs if (any(x == 'DELETECELL')) { rl <- rle(x) #Start with 1s and only override if you are right next to a DELETECELL maxall <- rep(1,length(x)) #Add 1 because we want to include both DELETECELLs and the original multicol maxall[which(x != 'DELETECELL' & c(utils::tail(x,-1) == 'DELETECELL',FALSE))] <- rl$lengths[rl$values == 'DELETECELL'] + 1 } else { maxall <- rep(0,length(x)) } x <- sapply(1:length(x), function(j) multicoller(x[j],maxall[j])) return(x) } for (i in 1:nrow(data)) { data[i,] <- multicol.row(as.character(data[i,])) } #Escape characters (Do this after multicol since that has _) for (i in (1:ncol(data))[!(1:ncol(data) %in% no.escape)]) { for (char in c('\\&','\\%','\\$','\\#','\\_')) { data[[i]] <- gsub(char,paste0('\\',char),data[[i]]) } data[[i]] <- gsub('\\~','\\\\textasciitilde',data[[i]]) data[[i]] <- gsub('\\^','\\\\textasciicircum',data[[i]]) } if (!is.na(note)) { for (char in c('\\&','\\%','\\$','\\#','\\_')) { note <- gsub(char,paste0('\\',char),note) } note <- gsub('\\~','\\\\textasciitilde',note) note <- gsub('\\^','\\\\textasciicircum',note) } #Begin table latex code by opening the table table.latex <- '\\begin{table}[!htbp] \\centering \\renewcommand*{\\arraystretch}{1.1}' #Add a caption if there is one if (!is.na(title)) { table.latex <- paste0(table.latex,'\\caption{',title,'}') } #Add an anchor if there is one if (!is.na(anchor)) { table.latex <- paste0(table.latex,'\\label{',anchor,'}') } # If there's a resizebox if (!is.na(fit.page)) { table.latex <- paste0(table.latex, '\\resizebox{',fit.page,'}{!}{') } #Start the tabular table.latex <- paste0(table.latex,'\n\\begin{tabular}{',align,'}\n\\hline\n\\hline\n') #Get the column headers heads <- colnames(data) #Process heads <- multicol.row(heads) # Allow for multicolumns heads <- heads[heads != 'DELETECELL'] headrow <- paste(heads, collapse = ' & ') headrow <- paste(headrow,'\\\\ \n\\hline\n') # Do this separately so as to allow for multicolumns rowprocess <- function(x) { x <- unname(x) x <- as.character(x) x <- x[x != 'DELETECELL'] return(paste(x, collapse = ' & ')) } #Check for a header row if (substr(data[1,1],1,9) == 'HEADERROW') { data[1,1] <- substring(data[1,1],10) hrow <- paste(rowprocess(data[1,]),' \\\\ \n') data <- data[2:nrow(data),] headrow <- paste(hrow,headrow) } #Convert rows of data to LaTeX rows <- apply(data, 1, rowprocess) rows <- paste(rows, collapse = ' \\\\ \n') #Paste the opener, header row, and rows table.latex <- paste0(table.latex,headrow,rows) #And close the table table.latex <- paste0(table.latex,'\\\\ \n\\hline\n\\hline\n') if (!is.na(note)) { table.latex <- paste0(table.latex, '\\multicolumn{',ncol(data),'}{',note.align,'}{',note,'}\\\\ \n') } table.latex <- paste0(table.latex,'\\end{tabular}\n') if (!is.na(fit.page)) { table.latex <- paste0(table.latex,'}\n') } table.latex <- paste0(table.latex,'\\end{table}\n') #Make into a page if requested if (!frag) { table.latex <- paste0('\\documentclass{article}\n\\begin{document}\n\n', table.latex, '\n\n\\end{document}') } ####### APPLICATION OF FILE OPTION if (!is.na(file)) { #If they forgot a file extension, fill it in if (!grepl("\\.tex",file)) { file <- paste(file,'.tex',sep='') } filepath <- file.path(file) #Create temporary tex file writeLines(table.latex,filepath) } return(table.latex) }
/scratch/gouwar.j/cran-all/cranData/vtable/R/dftotable.R
# Forces evaluation of all functions # In order to produce referential transparency and ensure the sub-arguments # don't get copied over # see similar function in the scales package force_all <- function(...) { list(...) } #' Function-returning wrapper for format #' #' This function takes a set of options for the \code{format()} function and returns a function that itself calls \code{format()} with those settings. #' #' The only differences are: #' #' 1. \code{scientific} is set to \code{FALSE} by default, and \code{trim} is set to \code{TRUE} #' 2. Passing a \code{NA} value produces \code{''} instead of \code{'NA'}. #' 3. In addition to standard \code{format()} options, it also accepts a \code{percent} option to apply percentage formatting, and \code{prefix} and \code{suffix} options to apply prefixes or suffixes to formatted numbers. #' 4. Has an attribute \code{'big.mark'} storing the \code{'big.mark'} option chosen. #' #' This is in the spirit of the \code{label_} functions in the scales package, except that it uses \code{format()}'s focus on significant digits instead of fixed decimal places, which is good for numbers that range across multiple orders of magnitude, common in \code{sumtable()} and \code{vtable()}. #' #' @param percent Whether to apply percentage formatting. Set to \code{TRUE} if 1 = 100\%. Or, optionally, set to any other number that represents 100\%. So \code{percent = TRUE} or \code{percent = 1} will interpret \code{.9} as \code{90\%}, or \code{percent = 100} will format \code{90} as \code{90\%}. #' @param prefix A prefix to apply to the formatted number. For example, \code{prefix = '$'} would format \code{4} as \code{$4}. #' @param suffix A suffix to apply to the formatted number. If specified alongside \code{percent}, the suffix comes after the \%. #' @param scale A scalar value to be multiplied by all numbers prior to formatting. \code{scale = 1/1000}, for example, would convert the units into thousands. This is applied before \code{digits}. #' @param digits Number of significant digits. #' @param nsmall The minimum number of digits to the right of the decimal point. #' @param big.mark A character to mark thousands places, for example producing "1,000" instead of "1000". #' @param trim Whether numbers should be trimmed to their own size, rather than being right-justified to a common width. Unlike the actual \code{format()}, this defaults to \code{TRUE}. Note that in most vtable applications, the formatting function is applied one value at a time, rather than to a vector, so \code{trim = FALSE} may not work as intended. #' @param scientific Whether numbers should be encoded in scientific format. Unlike the actual \code{format()}, this defaults to \code{FALSE}. #' @param ... Arguments to be passed to \code{format()}. See \code{help(format)}. All other parameters listed above except for \code{percent}, \code{prefix}, or \code{suffix} are also just part of \code{format}, but may be of particular interest, or have been included to show how defaults have changed. #' @examples #' x <- c(1, 1000, .000235, 1298.255, NA) #' my.formatting.func = formatfunc(digits = 3, prefix = '$') #' my.formatting.func(x) #' #' @export formatfunc formatfunc <- function(percent = FALSE, prefix = '', suffix = '', scale = 1, digits = NULL, nsmall = 0L, big.mark = '', trim = TRUE, scientific = FALSE, ...) { if (!is.null(digits)) { if (is.na(digits)) { digits <- NULL } } scalefactor <- 1 if (is.numeric(percent)) { scalefactor <- 1/percent percent <- TRUE } if (percent) { scalefactor <- scalefactor*100 suffix <- paste0('%',suffix) } force_all(scalefactor, digits, nsmall, big.mark, trim, scientific, prefix, suffix, scale, ...) the_function <- function(x) { x_fmt <- format(abs(x*scalefactor*scale), digits = digits, nsmall = nsmall, big.mark = big.mark, trim = trim, scientific = scientific, ...) if (prefix != '' & trim == FALSE) { # If we need a prefix but we have blank-space padding at the start, add the prefix after first_nonspace <- regexpr('\\S',x_fmt) x_fmt <- sapply(1:length(x_fmt), function(i) paste0( c(rep(' ', first_nonspace[i]-1), ifelse(x*scalefactor*scale < 0, '-',''), prefix, substr(x_fmt[i],first_nonspace[i],nchar(x_fmt[i]))), suffix, collapse = '')) } else { x_fmt <- paste0(ifelse(x*scalefactor*scale < 0, '-',''), prefix, x_fmt, suffix) } x_fmt[is.na(x)] <- '' return(x_fmt) } attr(the_function, 'big.mark') <- big.mark return(the_function) } #' Number of unique values in a vector #' #' This function takes a vector and returns the number of unique values in that vector. #' #' This function is just shorthand for \code{length(unique(x))}, with a shorter name for reference in the \code{vtable} or \code{sumtable} \code{summ} option. #' #' @param x A vector. #' @examples #' x <- c(1, 1, 2, 3, 4, 4, 4) #' nuniq(x) #' #' @export nuniq <- function(x) { return(length(unique(x))) } #' Weighted standard deviation #' #' This is a basic weighted standard deviation function, mainly for internal use with \code{sumtable}. For a more fully-fledged weighted SD function, see \code{Hmisc::wtd.var}, although it uses a slightly differend degree-of-freedom correction. #' #' @param x A numeric vector. #' @param w A vector of weights. Negative weights are not allowed. #' @param na.rm Set to \code{TRUE} to remove indices with missing values in \code{x} or \code{w}. #' @examples #' x <- c(1, 1, 2, 3, 4, 4, 4) #' w <- c(4, 1, 3, 7, 0, 2, 5) #' weighted.sd(x, w) #' #' @export weighted.sd weighted.sd <- function(x, w, na.rm = TRUE) { if (length(x) != length(w)) { stop('Weights and data must be the same length.') } if (min(w) < 0) { stop('Negative weights found.') } if (na.rm) { missings <- is.na(x) | is.na(w) x <- x[!missings] w <- w[!missings] } weightsum <- sum(w) if (weightsum == 0) { stop('Weights sum to 0 among nonmissing data.') } mean_x <- sum(w*x)/weightsum num_nonzero <- sum(w > 0) var_x <- sum(w*((x-mean_x)^2))/(weightsum*(num_nonzero-1)/num_nonzero) sd_x <- sqrt(var_x) return(sd_x) } #' Proportion or number of missing values in a vector #' #' This function calculates the proportion of values in a vector that are NA. #' #' This function just shorthand for \code{mean(is.na(x))}, with a shorter name for reference in the \code{vtable} or \code{sumtable} \code{summ} option. #' #' @param x A vector. #' @examples #' x <- c(1, 1, NA, 2, 3, NA) #' propNA(x) #' @export propNA <- function(x) { mean(is.na(x)) } #' Number of missing values in a vector #' #' This function calculates the number of values in a vector that are NA. #' #' This function just shorthand for \code{sum(is.na(x))}, with a shorter name for reference in the \code{vtable} or \code{sumtable} \code{summ} option. #' #' @param x A vector. #' @examples #' x <- c(1, 1, NA, 2, 3, NA) #' countNA(x) #' @export countNA <- function(x) { sum(is.na(x)) } #' Number of nonmissing values in a vector #' #' This function calculates the number of values in a vector that are not NA. #' #' This function just shorthand for \code{sum(!is.na(x))}, with a shorter name for reference in the \code{vtable} or \code{sumtable} \code{summ} option. #' #' If \code{big.mark} is specified, will return a formatted string instead of a number, where the formatting is based on \code{format(x, big.mark = big.mark, scientific = FALSE, ...)}. #' #' @param x A vector. #' @param big.mark Argument to pass to \code{format()}, if a formatted string is desired. #' @param scientific Argument to pass to \code{format()} if \code{big.mark} is specified. Defaults to \code{FALSE}, unlike in \code{format()}. #' @param ... Other arguments to pass to \code{format()}. Ignored if \code{big.mark} is not specified. #' @examples #' x <- c(1, 1, NA, 2, 3, NA) #' notNA(x) #' notNA(1:10000, big.mark = ',') #' @export notNA <- function(x, big.mark = NULL, scientific = FALSE, ...) { if (is.null(big.mark)) { return(sum(!is.na(x))) } else { return(format(sum(!is.na(x)), big.mark = big.mark, scientific = scientific, ...)) } } #' Returns a vector of 100 percentiles #' #' This function calculates 100 percentiles of a vector and returns all of them. #' #' This function just shorthand for \code{quantile(x,1:100/100)}, with a shorter name for reference in the \code{vtable} or \code{sumtable} \code{summ} option, and which works with \code{sumtable} \code{summ.names} styling. #' #' @param x A vector. #' @examples #' x <- 1:500 #' pctile(x)[50] #' quantile(x,.5) #' median(x) #' @export pctile <- function(x) { stats::quantile(x,1:100/100) } #' Checks if information is lost by rounding #' #' This function takes a vector and checks if any information is lost by rounding to a certain number of digits. #' #' Returns \code{TRUE} if rounding to \code{digits} digits after the decimal can be done without losing information. #' #' @param x A vector. #' @param digits How many digits to round to. #' @examples #' is.round(1:5) #' #' x <- c(1, 1.2, 1.23) #' is.round(x) #' is.round(x,digits=2) #' @export is.round <- function(x,digits=0) { !any(!(x == round(x,digits))) } # Evaluate a series of functions # # Internal for summ, evaluates a function while allowing for the possibility that the class isn't right to evaluate that function parsesumm <- function(x,summuse,summnames) { # Run each of the functions on the variable and get results results <- lapply(summuse, function(y) parsefcn(x,y)) # If it's a number, round it results <- lapply(results, function(y) if(is.numeric(y)) { round(y,3) } else { y }) # Get rid of functions that evaluated to NA (i.e. don't work) summnames <- summnames[!is.na(results)] results <- results[!is.na(results)] # Paste together results <- paste(summnames,sapply(results, as.character), sep = "") # And bring it all together with a break between each return(paste0(results, collapse = '<br/>')) } # Evaluate a function allowing it to not work parsefcn <- function(x,y, ...) { list2env(list(...),envir=environment()) result <- suppressWarnings(try(eval(parse(text=y)),silent = TRUE)) if (inherits(result,'try-error')) { result <- NA } return(result) } # Evaluate a function allowing it to not work # Special version for sumtable that does the NA-dropping internally parsefcn_summ <- function(x,y, ...) { list2env(list(...),envir=environment()) if (!any(sapply(c('anyNA','propNA','countNA'),function(z) grepl(z,y)))) { x <- x[!is.na(x)] } result <- suppressWarnings(try(eval(parse(text=y)),silent = TRUE)) if (inherits(result,'try-error')) { result <- NA } return(result) } # Create a summary statistics table for a single variable # Internal function for sumtable summary.row <- function(data,var,st, title,summ,cla,factor.percent, factor.count,factor.numeric,digits,fixed.digits, wts = NULL, fmt = NULL, skip.format = NULL, factor.not.numeric.count = notNA) { numcols <- length(summ) if (cla == 'header') { st[1,] <- c( paste0(title,'_MULTICOL_l_all'), rep('DELETECELL',numcols)) } else if (cla == 'factor' & !factor.numeric) { #Get data va <- data[[var]] #Total number of obs nonmiss <- factor.not.numeric.count(va) nonmissnum <- notNA(va) #Header row st[1,] <- c(title,nonmiss, #Take this out for now. #ifelse(factor.percent,'100%','1'), '', rep('',numcols-2)) #And now the per-factor stuff mat <- as.data.frame(table(va)) #aggregate can't handle all-NAs if (nonmissnum > 0) { matlabel <- stats::aggregate(y~x, data.frame(y = 1, x = va), FUN = factor.not.numeric.count, drop = FALSE) matlabel$y[is.na(matlabel$y)] <- 0 propcalc <- mat$Freq/nonmissnum if (!is.null(wts) & grepl('wts',summ[2])) { propcalc <- sapply(mat$va, function(x) stats::weighted.mean(va == x, w = wts, na.rm = TRUE)) } } else { matlabel <- stats::aggregate(y~x, data.frame(y = 1, x = factor(levels(va), levels = levels(va))), FUN = factor.not.numeric.count, drop = FALSE) matlabel$y <- 0 propcalc <- rep(NA,length(mat$Freq)) } propcalc <- propcalc*(100^factor.percent) mat$va <- paste('...',mat$va) mat$Freq <- matlabel$y if (fixed.digits) { mat$Prop <- sapply(1:length(propcalc), function(x) format(propcalc[x], digits=max(digits[2]-2*factor.percent,1), nsmall=max(digits[2]-2*factor.percent,0), scientific = FALSE)) st[1,2] <- format(as.numeric(st[1,2]), max(digits = digits[1],1), nsmall = digits[1], scientific = FALSE) } else { mat$Prop <- round(propcalc, digits=max(digits[2]-2*factor.percent,0)) } if (!factor.count) { mat$Freq <- '' } if (factor.percent) { mat$Prop <- paste0(mat$Prop,'%') } if (ncol(mat) < ncol(st)) { mat[,(ncol(mat)+1):(ncol(st))] <- '' } if (nonmissnum == 0) { mat$Prop <- '' } names(mat) <- names(st) st <- rbind(st,mat) } else if (cla == 'factor' & factor.numeric) { #Header row st[1,] <- c(title,rep('',numcols)) #Get data. as.data.frame() because tibbles no longer work with this syntax. va <- as.data.frame(data)[,var] #Create dummies to treat as numeric mat <- stats::model.matrix(~-1+va) #And names facnames <- paste('...',levels(va)) #Run each of the functions on the variable and get results results <- lapply(1:ncol(mat), function(x) lapply(summ, function(y) parsefcn_summ(mat[,x],y, wts = wts[!is.na(va)]))) #Format results <- lapply(1:length(results), function(x) sapply(1:length(results[[x]]), function(y) ifelse(summ[y] %in% skip.format, results[[x]][[y]], fmt(results[[x]][[y]])))) #Round if (paste0(deparse(fmt), collapse = '') == 'function (x) x') { if (fixed.digits) { results <- lapply(results, function(x) sapply(1:length(x), function(y) ifelse(is.character(x[y]), x[y], format(x[y],digits=digits[y],nsmall = max(digits[y],1),scientific=FALSE)))) } else { results <- lapply(results, function(x) sapply(1:length(x), function(y) ifelse(is.character(x[y]), x[y], as.character(round(x[y],digits=digits[y]))))) } } #Add factor names results <- lapply(1:length(results), function(x) c(facnames[x],results[[x]])) #And construct results <- as.data.frame(do.call(rbind,results)) names(results) <- names(st) st <- rbind(st,results) } else { #Get data va <- data[[var]] #Run each of the functions on the variable and get results results <- lapply(summ, function(y) parsefcn_summ(va,y, wts = wts[!is.na(va)])) #Format results <- lapply(1:length(results), function(y) ifelse(summ[y] %in% skip.format, results[[y]], fmt(results[[y]]))) # If our formatting function was function(x) x, apply digits options if (paste0(deparse(fmt), collapse = '') == 'function (x) x') { if (fixed.digits) { results <- sapply(1:length(results), function(y) ifelse(is.character(results[[y]]), results[[y]], format(results[[y]],digits=max(digits[y],1),nsmall = digits[y],scientific = FALSE))) } else { results <- sapply(1:length(results), function(y) ifelse(is.character(results[[y]]), results[[y]], round(results[[y]],digits=max(digits[y],1)))) } } #And construct st[1,] <- c(title,results) } return(st) } # cbinds character data frames with potentially unequal rows # takes a list of data.frames cbind_unequal <- function(x) { #Get longest length rowseach <- sapply(x,nrow) mostrows <- max(rowseach) #Loop through and add blank rows for (i in 1:length(x)) { if (rowseach[i] < mostrows) { x[[i]][(rowseach[i]+1):mostrows,] <- '' } } return(do.call(cbind,x)) } #For a table that is going to be seen "raw", remove all the multicolumn stuff clean_multicol <- function(df) { df[1,1] <- gsub('HEADERROW','',df[1,1]) clean_content <- function(x) { x <- sapply(x, function(y) gsub('DELETECELL','',y)) x <- sapply(x, function(y) ifelse(grepl('_MULTICOL_',y), substr(y,1,gregexpr('_MULTICOL_',y)[[1]]-1), y)) } for (i in 1:ncol(df)) { df[[i]] <- clean_content(df[[i]]) } names(df) <- clean_content(names(df)) return(df) } #For a table that is going to be seen "raw", remove all the multicolumn stuff #Except for the top row which can become a multi-column header. Good for groups! clean_multicol_kable <- function(df,title,note=NA) { # If the first row is a header, chop it off and save for later hasheader <- FALSE if (grepl('HEADERROW',df[1,1])) { headerrow <- df[1,] df <- df[2:nrow(df),] hasheader <- TRUE } df[1,1] <- gsub('HEADERROW','',df[1,1]) clean_content <- function(x) { x <- sapply(x, function(y) gsub('DELETECELL','',y)) x <- sapply(x, function(y) ifelse(grepl('_MULTICOL_',y), substr(y,1,gregexpr('_MULTICOL_',y)[[1]]-1), y)) } for (i in 1:ncol(df)) { df[[i]] <- clean_content(df[[i]]) } names(df) <- clean_content(names(df)) # For this one, directly return the kable if (knitr::is_html_output()) { fmt <- 'html' } else if (knitr::is_latex_output()) { fmt <- 'latex' } else { fmt <- NULL } # And format the header if there is one if (hasheader) { # Get rid of deleted cells if (is.null(fmt)) { headerrow[headerrow == 'DELETECELL'] = '' } else { headerrow <- headerrow[headerrow != 'DELETECELL'] } # HEADERROW itself is blank headerrow <- gsub('HEADERROW','',headerrow) # No alignment control anyway headerrow <- gsub('_c_','_l_',headerrow) headerrow <- gsub('_r_','_l_',headerrow) } if (is.null(fmt)) { if (hasheader) { headerrow <- gsub('_MULTICOL.*$','',headerrow) names(headerrow) = names(df) df = rbind(headerrow, df) } kb <- knitr::kable(df, caption = title, row.names = FALSE) } else if (fmt == 'html') { # escape by hand bc of test column which should not be escaped cols_to_escape <- 1:ncol(df) cols_to_escape <- cols_to_escape[names(df) != 'Test'] for (c in cols_to_escape) { df[[c]] <- gsub("&", "&amp;", df[[c]], fixed = TRUE) df[[c]] <- gsub("<", "&lt;", df[[c]], fixed = TRUE) df[[c]] <- gsub(">", "&gt;", df[[c]], fixed = TRUE) } kb <- knitr::kable(df, caption = title, row.names = FALSE, format = fmt, escape = FALSE) } else if (fmt == 'latex') { # escape by hand bc of test column which should not be escaped cols_to_escape <- 1:ncol(df) cols_to_escape <- cols_to_escape[names(df) != 'Test'] for (c in cols_to_escape) { # do backslash separately so we don't escape the backslashes we write df[[c]] <- gsub("\\\\", "\\\\\\\\", df[[c]]) df[[c]] <- gsub("([&%$#_\\{\\}~^])", "\\\\\\1", df[[c]]) } kb <- knitr::kable(df, caption = title, row.names = FALSE, format = fmt, booktabs = TRUE, escape = FALSE) } # And now add the header if (hasheader & !is.null(fmt)) { headercol <- eval(parse(text=paste('c(', paste( sapply(headerrow, FUN = function(x) ifelse(grepl('_MULTICOL_l_',x), paste0('"',strsplit(x,'_MULTICOL_l_')[[1]][1],'"=',strsplit(x,'_MULTICOL_l_')[[1]][2]), paste0('"',x,'"'))), collapse = ','), ')'))) kb <- kableExtra::add_header_above(kb,headercol) } if (!is.na(note)) { kb <- kableExtra::add_footnote(kb, note, notation = 'none') } return(kb) }
/scratch/gouwar.j/cran-all/cranData/vtable/R/helpers.R
#' Group-Independence Test Function #' #' This function takes in two variables of equal length, the first of which is a categorical variable, and performs a test of independence between them. It returns a character string with the results of that test for putting in a table. #' #' In an attempt (and perhaps an encouragement) to use this function in weird ways, and because it's not really expected to be used directly, input is not sanitized. Have fun! #' #' @param x A categorical variable. #' @param y A variable to test for independence with \code{x}. This can be a factor or numeric variable. If you want a numeric variable treated as categorical, convert to a factor first. #' @param w A vector of weights to pass to the appropriate test. #' @param factor.test Used when \code{y} is a factor, a function that takes \code{x} and \code{y} as its first arguments and returns a list with three arguments: (1) The name of the test for printing, (2) the test statistic, and (3) the p-value. Defaults to a Chi-squared test if there are no weights, or a design-based F statistic (Rao & Scott Aadjustment, see \code{survey::svychisq}) with weights, which requires that the \code{survey} package be installed. WARNING: the Chi-squared test's assumptions fail with small sample sizes. This function will be attempted for all non-numeric \code{y}. #' @param numeric.test Used when \code{y} is numeric, a function that takes \code{x} and \code{y} as its first arguments and returns a list with three arguments: (1) The name of the test for printing, (2) the test statistic, and (3) the p-value. Defaults to a group differences F test. If you only have two groups and would prefer an absolute t-statistic to an F-statistic, pass \code{vtable:::groupt.it}. #' @param star.cutoffs A numeric vector indicating the p-value cutoffs to use for reporting significance stars. Defaults to \code{c(.01,.05,.1)}. If you don't want stars, remove them from the \code{format} argument. #' @param star.markers A character vector indicating the symbols to use to indicate significance cutoffs associated with \code{star.cuoffs}. Defaults to \code{c('***','**','*')}. If you don't want stars, remove them from the \code{format} argument. #' @param digits Number of digits after the decimal to round the test statistic and p-value to. #' @param fixed.digits \code{FALSE} will cut off trailing \code{0}s when rounding. \code{TRUE} retains them. Defaults to \code{FALSE}. #' @param format The way in which the four elements returned by (or calculated after) the test - \code{{name}}, \code{{stat}}, \code{{pval}}, and \code{{stars}} - will be arranged in the string output. Note that the default \code{'{name}={stat}{stars}'} does not contain the p-value, and also does not contain superscript for the stars since it doesn't know what markup language you're aiming for. For LaTeX you may prefer \code{'{name}$={stat}^{{stars}}$'}, and for HTML \code{'{name}={stat}<sup>{stars}</sup>'}. #' @param opts The options listed above, entered in named-list format. #' @examples #' #' data(mtcars) #' independence.test(mtcars$cyl,mtcars$mpg) #' #' @export independence.test independence.test <- function(x,y,w=NA, factor.test = NA, numeric.test = NA, star.cutoffs = c(.01,.05,.1), star.markers = c('***','**','*'), digits = 3, fixed.digits = FALSE, format = '{name}={stat}{stars}', opts = list()) { #Bring in opts list2env(opts,envir=environment()) #Are we using factor.test or numeric.test cla <- is.numeric(y) # Backwards consistency if (length(w) == 1) { if (is.na(w)) { w <- NULL } } #Fill in defaults if (identical(factor.test,NA)) { factor.test <- chisq.it } if (identical(numeric.test,NA)) { numeric.test <- groupf.it } if (cla) { result <- numeric.test(x,y,w) } else { result <- factor.test(x,y,w) } #Get stars #Order smallest to biggest star.markers <- star.markers[order(star.cutoffs)] star.cutoffs <- star.cutoffs[order(star.cutoffs)] #Find the first value that qualifies underneath <- result[[3]] < star.cutoffs stars <- star.markers[underneath][1] stars <- ifelse(is.na(stars),'',stars) #Rounding #First, check if we're going to get a 0, so we can set that separate is.zero <- result[[3]] < 10^(-digits) if (fixed.digits) { result[[2]] <- format(result[[2]],digits=digits,nsmall=digits) result[[3]] <- format(result[[3]],digits=digits,nsmall=digits) } else { result[[2]] <- round(result[[2]],digits=digits) result[[3]] <- round(result[[3]],digits=digits) } if (is.zero) { result[[3]] <- paste0('<',10^(-digits)) } #And format the result printout <- format #Fill in our four things printout <- gsub('\\{name\\}',result[[1]],printout) printout <- gsub('\\{stat\\}',result[[2]],printout) printout <- gsub('\\{pval\\}',result[[3]],printout) printout <- gsub('\\{stars\\}',stars,printout) return(printout) } groupt.it <- function(x, y, w = NULL) { if (length(unique(x)) > 2) { stop('groupt.it cannot be used with more than two groups.') } return(groupf.it(x=x, y=y, w=w, t = TRUE)) } # Internal chi-square and group-F tests that return things in independence.test format chisq.it <- function(x,y,w=NULL) { if (is.null(w)) { suppressWarnings(result <- stats::chisq.test(x,y)) return(list( 'X2', unname(result$statistic), result$p.value )) } else { # Create survey design d <- data.frame(x = x, y = y, w = w) errmess <- try(sdes <- survey::svydesign(~1, data = d, weights = ~w)) if (grepl('Error in loadNamespace',errmess[1])) { stop('Using weights with group.test = TRUE and factor variables requires the survey package. install.packages("survey")') } ftest <- survey::svychisq(~x+y, sdes) return(list( 'F', unname(ftest$statistic), unname(ftest$p.value) )) } } groupf.it <- function(x,y,w=NULL, t = FALSE) { result <- stats::anova(stats::lm(y~factor(x),weights = w)) statname <- 'F' stat <- result$`F value`[1] if (t) { statname <- 't' stat <- sqrt(stat) } return(list( statname, stat, result$`Pr(>F)`[1] )) }
/scratch/gouwar.j/cran-all/cranData/vtable/R/independencetest.R
#' Label Table Function #' #' This function output a descriptive table listing, for each value of a given variable, either the label of that value, or all values of another variable associated with that value. The table is output either to the console or as an HTML file that can be viewed continuously while working with data. #' #' Outputting the label table as a help file will make it easy to search through value labels, or to see the correspondence between the values of one variable and the values of another. #' #' Labels that are not in the data will also be reported in the table. #' #' @param var A vector. Label table will show, for each of the values of this variable, its label (if labels can be found with \code{sjlabelled::get_labels()}), or the values in the \code{...} variables. #' @param ... As described above. If specified, will show the values of these variables, instead of the labels of var, even if labels can be found. #' @param out Determines where the completed table is sent. Set to \code{"browser"} to open HTML file in browser using \code{browseURL()}, \code{"viewer"} to open in RStudio viewer using \code{viewer()}, if available. Use \code{"htmlreturn"} to return the HTML code to R, \code{"return"} to return the completed variable table to R in data frame form, or \code{"kable"} to return it in \code{knitr::kable()} form. Combine \code{out = "csv"} with \code{file} to write to CSV (dropping most formatting). Additional options include \code{"latex"} for a LaTeX table or \code{"latexpage"} for a full buildable LaTeX page. Defaults to \code{"viewer"} if RStudio is running, \code{"browser"} if it isn't, or a \code{"kable"} passed through \code{kableExtra::kable_styling()} defaults if it's an RMarkdown document being built with \code{knitr}. #' @param count Set to \code{TRUE} to also report the number of observations for each value of \code{var} in the data. #' @param percent Set to \code{TRUE} to also report the percentage of non-missing observation for each value of \code{var} in the data. #' @param file Saves the completed variable table file to HTML with this filepath. May be combined with any value of \code{out}, although note that \code{out = "return"} and \code{out = "kable"} will still save the standard labeltable HTML file as with \code{out = "viewer"} or \code{out = "browser"}.. #' @param desc Description of variable (or labeling system) to be included with the table. #' @param note Table note to go after the last row of the table. #' @param note.align Set the alignment for the multi-column table note. Usually "l", but if you have a long note in LaTeX you might want to set it with "p{}" #' @param anchor Character variable to be used to set an anchor link in HTML tables, or a label tag in LaTeX. #' @examples #' \dontshow{ #' #These tests use the out='htmlreturn' option #' #so that the same process of generating HTML is followed #' #but a browser window is not opened during testing. #' #This process is identical to regular operation except that #' #HTML is written to the R output rather than a browser. #' #' #Input a single labelled variable to see a table relating values to labels. #' #Values not present in the data will be included in the table but moved to the end. #' library(sjlabelled) #' data(efc) #' labeltable(efc$e15relat,out='htmlreturn') #' #' #Include multiple variables to see, for each value of the first variable, #' #each value of the others present in the data. #' data(efc) #' labeltable(efc$e15relat,efc$e16sex,efc$e42dep,out='htmlreturn') #' #' #Commonly, the multi-variable version might be used to recover the original #' #values of encoded variables #' data(USJudgeRatings) #' USJudgeRatings$Judge <- row.names(USJudgeRatings) #' USJudgeRatings$JudgeID <- as.numeric(as.factor(USJudgeRatings$Judge)) #' labeltable(USJudgeRatings$JudgeID,USJudgeRatings$Judge,out='htmlreturn') #' } #' if(interactive()){ #' #Input a single labelled variable to see a table relating values to labels. #' #Values not present in the data will be included in the table but moved to the end. #' library(sjlabelled) #' data(efc) #' labeltable(efc$e15relat) #' #' #Include multiple variables to see, for each value of the first variable, #' #each value of the others present in the data. #' data(efc) #' labeltable(efc$e15relat,efc$e16sex,efc$e42dep) #' #' #Commonly, the multi-variable version might be used to recover the original #' #values of encoded variables #' data(USJudgeRatings) #' USJudgeRatings$Judge <- row.names(USJudgeRatings) #' USJudgeRatings$JudgeID <- as.numeric(as.factor(USJudgeRatings$Judge)) #' labeltable(USJudgeRatings$JudgeID,USJudgeRatings$Judge) #' } #' @export labeltable <- function(var,...,out=NA,count=FALSE,percent=FALSE,file=NA,desc=NA,note=NA,note.align = NA,anchor=NA) { #Just in case, noting that if ...s are labeled, #but a package that supports the class isn't loaded it messes things up comp.vars <- data.frame(lapply(list(...),function(x) sjlabelled::unlabel(x))) names(comp.vars) <- sapply(as.list(substitute(list(...)))[-1L], function(x) utils::tail(as.character(x),n=1)) if (ncol(comp.vars)==0 & is.null(unlist(sjlabelled::get_labels(var))) & !(count) & !(percent)) { stop('Either var must have labels, variables must be specified in ..., or count or percent must be TRUE.') } if (!is.na(desc) & !is.character(desc)) { stop('desc must be a character.') } if (!identical(out,NA) & !(out %in% c('viewer', 'browser','return','htmlreturn','kable','latex','latexpage','csv'))) { stop('out must be viewer, browser, return, htmlreturn, kable, latex, or latexpage') } if (identical(out, 'csv') & is.na(file)) { warning('out = "csv" will just return the vtable as a data.frame unless combined with file') } #Get actual name of variable var.name <- deparse(substitute(var)) var.name <- utils::tail(strsplit(var.name,'\\$')[[1]],1) #labels version if (ncol(comp.vars)==0) { #Put in a data frame for working with lt <- data.frame(var) #Drop missings lt <- stats::na.omit(lt) #Only need one of each value lt <- subset(lt,!duplicated(lt)) #Why aren't labels preserved with subset anyway?? lt$var <- sjlabelled::set_labels(lt$var,labels=attr(var,'labels')) #Create the column with labels lt$labs <- sjlabelled::as_label(lt$var) lt <- lt[order(lt$var),] lt$var <- as.character(lt$var) #And a row with the unused labels #extract all labels labs <- attr(var,'labels') #find which ones aren't present labs <- labs[!(names(labs) %in% lt$labs)] if (length(labs) > 0) { #Get into the same format as above lt2 <- data.frame(labs) names(lt2)[1] <- 'var' lt2$var <- paste(lt2$var,' [NOT IN DATA]',sep='') lt2$labs <- row.names(lt2) lt <- rbind(lt,lt2) } names(lt) <- c(var.name,'Label') } else { #comp.var version # No missing data please! var <- as.character(var) var[is.na(var)] <- 'NA' #Put in a data frame for working with prelt <- data.frame(var,comp.vars) #Only need one of each value prelt <- subset(prelt,!duplicated(prelt)) lt <- data.frame( var=unique(prelt$var), lapply(names(prelt)[-1],function(y) sapply(unique(prelt$var),function(x) paste0(unique(subset(prelt,prelt$var==x)[[y]]),collapse=', ')))) lt <- lt[order(lt$var),] names(lt) <- c(var.name,names(prelt)[-1]) } # Do counts and/or percentages if (count | percent) { cts = as.data.frame(table(var)) cts$Pct = paste0(format(100*cts$Freq/sum(cts$Freq), digits = 2, nsmall = 2, scientific = FALSE),'%') names(cts) = c(var.name, 'Count','Percent') if (!count) { cts$Count = NULL } if (!percent) { cts$Percent = NULL } lt = merge(lt, cts, by = var.name) } # Row names have gotten funky row.names(lt) <- 1:nrow(lt) ####### LATEX OUTPUT if (!identical(out, NA) & out %in% c('latex','latexpage')) { align <- paste0('l',rep('c',ncol(lt)-1)) #Table only if (out == 'latex') { return(cat(dftoLaTeX(lt, file = file, align = align, title = 'Label Table', note = note, note.align = note.align, anchor=anchor))) } #Now for the full page out.latex <- '\\documentclass{article}\n\\begin{document}\n\nlabeltable \\{vtable\\}\n\n' out.latex <- paste(out.latex, '\\textbf{\\LARGE ', var.name,'}\n\n') #Applying description #Applying description if (!is.na(desc)) { out.latex <- paste(out.latex,desc,'\n\n') } #And bring in the table itself out.latex <- paste(out.latex,dftoLaTeX(lt, align = align, title = 'Label Table', note = note, note.align = note.align, anchor=anchor),'\n\n\\end{document}',sep='') ####### APPLICATION OF FILE OPTION if (!is.na(file)) { #If they forgot a file extension, fill it in if (!grepl("\\.tex",file)) { file <- paste(file,'.tex',sep='') } filepath <- file.path(file) #Create temporary tex file writeLines(out.latex,filepath) } return(cat(out.latex)) } ####### CONSTRUCTION OF HTML #Head of file out.html <- paste(' <html style=\"font-family:Helvetica,Arial,Sans\"> <head><title>',var.name,' Label Table</title>', '<style type = \"text/css\"> p { font-size:smaller; } table { border: 0px; border-collapse:collapse; font-size:smaller; table-layout:fixed; margin-left:0%; margin-right:auto; } .headtab { width: 100%; margin-left:auto; margin-right:auto; } th { background-color: #FFFFFF; font-weight:bold; text-align:left; } table tr:nth-child(odd) td { background-color: #FFFFFF; padding:4px; word-wrap: break-word; word-break:break-all; } table tr:nth-child(even) td { background-color: #D3D3D3; padding:4px; word-wrap: break-word; word-break:break-all; }</style></head><body>',sep='') #Dataset name and description out.html <- paste(out.html, '<table class=\"headtab\">', '<tr><td style=\"text-align:left\">labeltable {vtable}</td>', '<td style=\"text-align:right\">Variable Documentation: Label Table</td></tr></table>', '<h1>',var.name,'</h1>') #Applying description if (!is.na(desc)) { out.html <- paste(out.html,'<p>',desc,'</p>',sep='') } out.html <- paste(out.html,'<h3>Label Table</h3>',sep='') #And bring in the table itself out.html <- paste(out.html,dftoHTML(lt, note = note, note.align = note.align, anchor=anchor, out='htmlreturn'),'</body></html>',sep='') ####### APPLICATION OF FILE OPTION if (!is.na(file)) { if (identical(out, 'csv')) { #If they forgot a file extension, fill it in if (!grepl("\\.csv",file)) { file <- paste(file,'.csv',sep='') } filepath <- file.path(file) #Create temporary html file utils::write.csv(lt,filepath, row.names = FALSE) } else { #If they forgot a file extension, fill it in if (!grepl("\\.htm",file)) { file <- paste(file,'.html',sep='') } filepath <- file.path(file) #Create temporary html file writeLines(out.html,filepath) } } #For more easily working with if statements if (is.na(out)) { out = '' } ####### APPLICATION OF OUT OPTION #If the plan is to produce a viewable HTML, create it if (out == 'viewer' | out == 'browser' | out == '') { #Get temporary dirpath tempDir <- tempfile() #Create temporary directory dir.create(tempDir) #Get temporary filepath htmlpath <- file.path(tempDir,'labeltable.html') #Create temporary html file writeLines(out.html,htmlpath) } #Either print the variable table to the help window #or return a variable table to the screen, as desired if (out == 'kable' | (isTRUE(getOption('knitr.in.progress')) & out == '')) { #kable can't handle blank rows. These should not occur in labeltable but just in case lt <- lt[!apply(lt,MARGIN=1,FUN=function(x) !any(!(x==rep('',ncol(lt))))),] #I don't know how this would happen but just in case lt <- lt[!apply(lt,MARGIN=1,FUN=function(x) propNA(x) == 1),] if (knitr::is_latex_output()) { kb <- knitr::kable(lt, booktabs = TRUE, format = 'latex') if (!is.na(note)) { kb <- kableExtra::add_footnote(kb, note, notation = 'none') } } else if(knitr::is_html_output()) { kb <- knitr::kable(lt, format = 'html') if (!is.na(note)) { kb <- kableExtra::add_footnote(kb, note, notation = 'none') } } else { kb <- knitr::kable(lt) } # If it's just a default RMarkdown kable, style it for HTML because the default is ew if (isTRUE(getOption('knitr.in.progress')) & out == '') { kb <- kableExtra::kable_styling(kb) } return(kb) } else if (Sys.getenv('RSTUDIO')=='1' & (out == 'viewer' | out == '')) { rstudioapi::viewer(htmlpath) } else if (Sys.getenv('RSTUDIO')=='' & out == 'viewer') { stop('out = viewer is not a valid option if RStudio is not running.') } else if ((Sys.getenv('RSTUDIO')=='' & out == '') | (out == 'browser')) { utils::browseURL(htmlpath) } else if (out == 'return'| out == 'csv') { return(lt) } else if (out == 'htmlreturn') { return(cat(out.html)) } }
/scratch/gouwar.j/cran-all/cranData/vtable/R/labeltable.R
#' Summary Table Function #' #' This function will output a summary statistics variable table either to the console or as an HTML file that can be viewed continuously while working with data, or sent to file for use elsewhere. \code{st()} is the same thing but requires fewer key presses to type. #' #' There are many, many functions in R that will produce a summary statisics table for you. So why use \code{sumtable()}? \code{sumtable()} serves two main purposes: #' #' (1) In the same spirit as \code{vtable()}, it makes it easy to view the summary statistics \emph{as you work}, either in the Viewer pane or in a browser window. #' #' (2) \code{sumtable()} is designed to \emph{have nice defaults} and is not really intended for deep customization. It's got lots of options, sure, but they're only intended to go so far. So you can have a summary statistics table without much work. #' #' Keeping with point (2), \code{sumtable()} is designed for use by people who want the kind of table that \code{sumtable()} produces, which is itself heavily influenced by the kinds of summary statistics tables you often see in economics papers. In that regard it is most similar to \code{stargazer::stargazer()} except that it can handle tibbles, factor variables, grouping, and produce multicolumn tables, or \code{summarytools::dfSummary()} or \code{skimr::skim()} except that it is easier to export with nice formatting. If you want a lot of control over your summary statistics table, check out the packages gtsummary, arsenal, qwraps2, or Amisc, and about a million more. #' #' If you would like to include a \code{sumtable} in an RMarkdown document, it should just work! If you leave \code{out} blank, it will default to a nicely-formatted \code{knitr::kable()}, although this will drop some formatting elements like multi-column cells (or do \code{out="kable"} to get an unformatted \code{kable} that you can format yourself). If you prefer the \code{vtable} package formatting, then use \code{out="latex"} if outputting to LaTeX or \code{out="htmlreturn"} for HTML, both with \code{results="asis"} in the code chunk. Alternately, in HTML, you can use the \code{file} option to write to file and use a \code{<iframe>} to include it. #' #' @param data Data set; accepts any format with column names. #' @param vars Character vector of column names to include, in the order you'd like them included. Defaults to all numeric, factor, and logical variables, plus any character variables with six or fewer unique values. You can include strings that aren't columns in the data (including blanks) - these will create rows that are blank except for the string (left-aligned), for spacers or subtitles. #' @param out Determines where the completed table is sent. Set to \code{"browser"} to open HTML file in browser using \code{browseURL()}, \code{"viewer"} to open in RStudio viewer using \code{viewer()}, if available. Use \code{"htmlreturn"} to return the HTML code to R, \code{"latex"} to return LaTeX code to R (use \code{"latexdoc"} to get a full buildable document rather than a fragment), \code{"return"} to return the completed summary table to R in data frame form, or \code{"kable"} to return it in \code{knitr::kable()} form. Combine \code{out = "csv"} with \code{file} to write to CSV (dropping most formatting). Defaults to \code{"viewer"} if RStudio is running, \code{"browser"} if it isn't, or a \code{"kable"} passed through \code{kableExtra::kable_styling()} defaults if it's an RMarkdown document being built with \code{knitr}. #' @param file Saves the completed summary table file to file with this filepath. May be combined with any value of \code{out}, although note that \code{out = "return"} and \code{out = "kable"} will still save the standard sumtable HTML file as with \code{out = "viewer"} or \code{out = "browser"}. #' @param summ Character vector of summary statistics to include for numeric and logical variables, in the form \code{'function(x)'}. Defaults to \code{c('notNA(x)','mean(x)','sd(x)','min(x)','pctile(x)[25]','pctile(x)[75]','max(x)')} if there's one column, or \code{c('notNA(x)','mean(x)','sd(x)')} if there's more than one. If all variables in a column are factors it defaults to \code{c('sum(x)','mean(x)')} for the factor dummies. If the table has multiple variable-columns and you want different statistics in each, include a list of character vectors instead. This option is flexible, and allows any summary statistic function that takes in a column and returns a single number. For example, \code{summ=c('mean(x)','mean(log(x))')} will provide the mean of each variable as well as the mean of the log of each variable. Keep in mind the special vtable package helper functions designed specifically for this option \code{propNA}, \code{countNA}, \code{notNA}, and \code{notNA}, which report counts and proportions of NAs, or counts of not-NAs, in the vectors, \code{nuniq}, which reports the number of unique values, and \code{pctile}, which returns a vector of the 100 percentiles of the variable. NAs will be omitted from all calculations other than \code{propNA(x)} and \code{countNA(x)}. #' @param summ.names Character vector of names for the summary statistics included. If \code{summ} is at default, defaults to \code{c('N','Mean','Std. Dev.','Min','Pctl. 25','Pctl. 75','Max')} (or the appropriate shortened version with multiple columns) unless all variables in the column are factors in which case it defaults to \code{c('N','Percent')}. If \code{summ} has been set but \code{summ.names} has not, defaults to \code{summ} with the \code{(x)}s removed and the first letter capitalized. If the table has multiple variable-columns and you want different statistics in each, include a list of character vectors instead. #' @param add.median Adds \code{"median(x)"} to the set of default summary statistics. Has no effect if \code{"summ"} is also specified. #' @param group Character variable with the name of a column in the data set that statistics are to be calculated over. Value labels will be used if found for numeric variables. Changes the default \code{summ} to \code{c('mean(x)','sd(x)')}. #' @param group.long By default, if \code{group} is specified, each group will get its own set of columns. Set \code{group.long = TRUE} to instead basically just make a regular \code{sumtable()} for each group and stack them on top of each other. Good for when you have lots of groups. You can also set it to \code{'l'}, \code{'c'}, or \code{'r'} to determine how the group names are aligned. Defaults to centered. #' @param group.test Set to \code{TRUE} to perform tests of whether each variable in the table varies over values of \code{group}. Only works with \code{group.long = FALSE}. Performs a joint F-test (using \code{anova(lm))}) for numeric variables, and a Chi-square test of independence (\code{chisq.test}) for categorical variables. If you want to adjust things like which tests are used, significance star levels, etc., see the help file for \code{independence.test} and pass in a named list of options for that function. #' @param group.weights \emph{THIS OPTION DOES NOT AUTOMATICALLY WEIGHT ALL CALCULATIONS.} This is mostly to be used with \code{group} and \code{group.long = FALSE}, and while it's more flexible than that, you've gotta read this to figure out how else to use it. That's why I gave it the weird name. Set this to a vector of weights, or a string representing a column name with weights. If \code{summ} is not customized, this will replace \code{'mean(x)'} and \code{'sd(x)'} with the equivalent weighted versions \code{'weighted.mean(x, w = wts)'} and \code{'weighted.sd(x, w = wts)'} It will also add weights to the default \code{group.test} tests. This will not add weights to any other calculations, or to any custom \code{group.test} weights (although you can always do that yourself by customizing \code{summ} and passing in weights with this argument-the weights can be referred to in your function as \code{wts}). This is generally intended for things like post-matching balance tables. If you specify a column name, that column will be removed from the rest of the table, so if you want it to be kept, specify this as a numeric vector instead. If you have a variable in your data called \code{'wts'} that will mess the use of this option up, I recommend changing that. #' @param col.breaks Numeric vector indicating the variables (or number of elements of \code{vars}) after which to start a new column. So for example with a data set with six variables, \code{c(3,5)} would put the first three variables in the first column, the next two in the middle, and the last on the right. Cannot be combined with \code{group} unless \code{group.long = TRUE}. #' @param digits Number of digits after the decimal place to report. Set to a single number for consistent digits, or a vector the same length as \code{summ} for different digits for each calculation, or a list of vectors that match up to a multi-column \code{summ}. Defaults to 0 for the first calculation (N, usually) and 2 afterwards. #' @param fixed.digits Deprecated; currently only works if \code{numformat = NA}. \code{FALSE} will cut off trailing \code{0}s when rounding. \code{TRUE} retains them. Defaults to \code{FALSE}. #' @param numformat A function that takes a numeric input and produces labeled output, which you might construct using the \code{formatfunc} function or the \code{label_} functions from the scales package. Provide a single function to apply to all variables, or a list of functions the same length as the number of variables to format each variable differently. The formatting function will skip over \code{notNA, countNA, propNA} calculations by default. Factor percentages will ignore this entirely; you can use \code{NA} to skip them when specifying a list. Alternately, you can specify strings giving the shorthand for the appropriate formatting: the string containing \code{'comma'} will set \code{big.mark = ','}, \code{'decimal'} will set \code{big.mark = '.', decimal.mark = ','}, \code{'percent'} will do percentage formatting (with 1 = 100\%), and \code{'A|B'} will use \code{'A'} as a prefix and \code{'B'} as a suffix (specifying suffix optional, so \code{numformat = '$'} gives \code{'$3'}). Anything more complex than that will require you pass a \code{formatfunc} or similar function. Specifying a character vector will respect your \code{digits} option if \code{digits} is a single value rather than a vector or list, but will otherwise use the defaults of those functions. You can mix together specifying your own functions and specifying character strings. At the moment there is no way to do different formatting for different columns of the same variable, other than \code{skip.format}. Set to \code{NA} to revert to the old way of formatting. #' @param skip.format Set of functions in \code{summ} that are not subject to \code{format}. Does nothing if \code{format} is not specified. #' @param factor.percent Set to \code{TRUE} to show factor means as percentages instead of proportions, i.e. \code{50\%} with a column header of "Percent" rather than \code{.5} with a column header of "Mean". Defaults to \code{TRUE}. #' @param factor.counts Set to \code{TRUE} to show a count of each factor level in the first column. Defaults to \code{TRUE}. #' @param factor.numeric By default, factor variable dummies basically ignore the \code{summ} argument and show count (or nothing) in the first column and percent or proportion in the second. Set this to \code{TRUE} to instead treat the dummies like numeric binary variables with values 0 and 1. #' @param logical.numeric By default, logical variables are treated as factors with \code{TRUE = "Yes"} and \code{FALSE = "No"}. Set this to \code{FALSE} to instead treat them as numeric variables rather than factors, with \code{TRUE = 1} and \code{FALSE = 0}. #' @param logical.labels When turning logicals into factors, use these labels for \code{FALSE} and \code{TRUE}, respectively, rather than "No" and "Yes". #' @param labels Variable labels. labels will accept four formats: (1) A vector of the same length as the number of variables in the data that will be included in the table (tricky to use if many are being dropped, also won't work for your \code{group} variable), in the same order as the variables in the data set, (2) A matrix or data frame with two columns and more than one row, where the first column contains variable names (in any order) and the second contains labels, (3) A matrix or data frame where the column names (in any order) contain variable names and the first row contains labels, or (4) TRUE to look in the data for variable labels set by the haven package, \code{set_label()} from sjlabelled, or \code{label()} from Hmisc. #' @param title Character variable with the title of the table. #' @param note Table note to go after the last row of the table. Will follow significance star note if \code{group.test = TRUE}. #' @param anchor Character variable to be used to set an anchor link in HTML tables, or a label tag in LaTeX. #' @param col.width Vector of page-width percentages, on 0-100 scale, overriding default column widths in an HTML table. Must have a number of elements equal to the number of columns in the resulting table. #' @param col.align For HTML output, a character vector indicating the HTML \code{text-align} attributes to be used in the table (for example \code{col.align = c('left','center','center')}. Defaults to variable-name columns left-aligned and all others right-aligned (with a little extra padding between columns with \code{col.breaks}). If you want to get tricky, you can add a \code{";"} afterwards and keep putting in whatever CSS attributes you want. They will be applied to the whole column. #' @param align For LaTeX output, string indicating the alignment of each column. Use standard LaTeX syntax (i.e. \code{l|ccc}). Defaults to left in the first column and right-aligned afterwards, with \code{@{\\hskip .2in}} spacers if you have \code{col.breaks}. If \code{col.width} is specified, defaults to all \code{p{}} columns with widths set by \code{col.width}. If you want the columns aligned on a decimal point, see \href{https://tex.stackexchange.com/questions/2746/aligning-numbers-by-decimal-points-in-table-columns#2747}{this explainer}. #' @param note.align For LaTeX output, set the alignment for the multi-column table note. Usually "l", but if you have a long note in LaTeX you might want to set it with "p{}" #' @param fit.page For LaTeX output, uses a resizebox to force the table to a certain width. Set to \code{NA} to omit. #' @param simple.kable For \code{out = 'kable'}, if you want the \code{kable} printed to console rather than HTML or PDF, then the multi-column headers and table notes won't work. Set \code{simple.kable = TRUE} to skip both. #' @param obs.function The function to use (and, potentially, format) to count the number of observations for the N column. This should take a vector and return a single number or string. Uses the same string formatting as \code{summ}. If not specified, will check if \code{numformat} is specified using \code{formatfunc} or a string. If not, this will be \code{'notNA(x)'}. If it is, will be \code{'notNA(x)'} with the \code{big.mark} argument set to match the first function listed in \code{numformat}. #' @param opts The same \code{sumtable} options as above, but in a named list format. Useful for applying the same set of options to multiple \code{sumtable}s. #' @examples #' # Examples are only run interactively because they open HTML pages in Viewer or a browser. #' if (interactive()) { #' data(iris) #' #' # Sumtable handles both numeric and factor variables #' st(iris) #' #' # Output to LaTeX as well for easy integration #' # with RMarkdown, or \input{} into your LaTeX docs #' # (specify file too to save the result) #' st(iris, out = 'latex') #' #' # Summary statistics by group #' iris$SL.above.median <- iris$Sepal.Length > median(iris$Sepal.Length) #' st(iris, group = 'SL.above.median') #' #' # Add a group test, or report by-group in "long" format #' st(iris, group = 'SL.above.median', group.test = TRUE) #' st(iris, group = 'SL.above.median', group.long = TRUE) #' #' # Going all out! Adding variable labels with labels, #' # spacers and variable "category" titles with vars, #' # Changing the presentation of the factor variable, #' # and putting the factor in its own column with col.breaks #' var.labs <- data.frame(var = c('SL.above.median','Sepal.Length', #' 'Sepal.Width','Petal.Length', #' 'Petal.Width'), #' labels = c('Above-median Sepal Length','Sepal Length', #' 'Sepal Width','Petal Length', #' 'Petal Width')) #' st(iris, #' labels = var.labs, #' vars = c('Sepal Variables','SL.above.median','Sepal.Length','Sepal.Width', #' 'Petal Variables','Petal.Length','Petal.Width', #' 'Species'), #' factor.percent = FALSE, #' col.breaks = 7) #' #' # Format the results #' # use rep so there are enough observations to see the comma separators #' irisrep = do.call('rbind', replicate(100, iris, simplify = FALSE)) #' # Comma separator for thousands, including for N. #' st(irisrep, numformat = 'comma') #' # Dollar formatting for sepal.width, decimal (1.000,00) formatting for the rest #' st(iris, numformat = c('decimal','Sepal.Width' = '$')) #' # Custom formatting throughout, note the big.mark = ',' will also be picked up by N #' st(irisrep, numformat = formatfunc(digits = 2, nsmall = 2, big.mark = ',')) #' #' } #' @rdname sumtable #' @export sumtable <- function(data,vars=NA,out=NA,file=NA, summ=NA, summ.names=NA, add.median = FALSE, group=NA,group.long=FALSE,group.test=FALSE,group.weights=NA, col.breaks=NA, digits=2,fixed.digits=FALSE,numformat=formatfunc(digits = digits, big.mark = ''),skip.format = c('notNA(x)','propNA(x)','countNA(x)', obs.function), factor.percent=TRUE, factor.counts=TRUE,factor.numeric=FALSE, logical.numeric=FALSE,logical.labels=c("No","Yes"),labels=NA,title='Summary Statistics', note = NA, anchor=NA,col.width=NA,col.align=NA, align=NA, note.align = 'l', fit.page = '\\textwidth', simple.kable=FALSE,obs.function = NA, opts=list()) { #Bring in opts list2env(opts,envir=environment()) #######CHECK INPUTS if (is.null(colnames(data))) { stop('Requires data with variable names or column names.') } if (!is.na(file) & !is.character(file)) { stop('Incorrect file name.') } if (!is.na(out)) { if (!(out %in% c('return','viewer','browser','htmlreturn','latex','kable','csv','latexpage'))) { stop('Unrecognized option for out.') } } if (!identical(vars,NA) & !is.character(vars)) { stop('vars must be a character vector.') } if (!identical(note,NA) & !is.character(note)) { stop('note must be a character vector.') } if (!identical(anchor,NA) & !is.character(anchor)) { stop('anchor must be a character variable.') } if (min(is.na(col.width)) == 0 & (!is.vector(col.width) | !is.numeric(col.width) | sum(is.na(col.width)) > 0)) { stop('col.width must be a numeric vector with no missing values.') } if (min(is.na(col.width)) == 0 & (max(col.width) > 100 | min(col.width) < 0)) { stop('Elements of col.width must be between 0 and 100.') } if (!is.logical(add.median)) { stop('add.median must be TRUE or FALSE.') } if (!is.list(summ)) { if (min(is.na(summ)) == 0 & (!is.vector(summ) | !is.character(summ) | sum(is.na(summ)) > 0)) { stop('summ must be a character vector with no missing values.') } } if (!is.list(summ.names)) { if (min(is.na(summ.names)) == 0 & (!is.vector(summ.names) | !is.character(summ.names) | sum(is.na(summ.names)) > 0)) { stop('summ.names must be a character vector with no missing values.') } } if (!is.na(group) & !is.character(group)) { stop('group must be a string referring to a grouping variable in the data.') if (!(group %in% colnames(data))) { stop('group must be a column name in the data.') } } if (!is.logical(group.test) & !is.list(group.test)) { stop('group.test must be TRUE, FALSE, or a named list of options to pass to independence.test\'s opts argument.') } if (!identical(group.test,FALSE) & is.na(group)) { warning('group.test will be ignored, since no group is set.') } if (!identical(group.test,FALSE) & group.long == TRUE) { warning('group.test is incompatible with group.long == TRUE and will be ignored.') } if (!is.logical(factor.numeric) | !is.logical(logical.numeric)) { stop("factor.numeric and logical.numeric must each be TRUE or FALSE") } if (is.logical(group.long)) { group.long.align <- 'c' } else if (is.character(group.long)) { if (group.long %in% c('l','r','c')) { group.long.align <- group.long group.long <- TRUE } else { stop('group.long must be TRUE, FALSE, or a character l, c, or r.') } } else { stop('group.long must be TRUE, FALSE, or a character l, c, or r.') } if (!is.logical(fixed.digits)) { stop('fixed.digits must be TRUE or FALSE.') } if (fixed.digits) { warning('fixed.digits is deprecated and will be removed in a future version in favor of a setting in ') } if (!is.numeric(col.breaks) & !identical(col.breaks,NA)) { stop('col.breaks must be numeric.') } if (!is.na(group) & !identical(col.breaks,NA) & group.long == FALSE) { stop('group cannot be combined with col.breaks unless group.long = TRUE.') } if (!is.numeric(digits) & !is.list(digits) & !identical(digits,NA)) { stop('digits must be numeric.') } if (!is.list(numformat)) { if (length(numformat) > 1) { numformat = as.list(numformat) } else { numformat = list(numformat) } } # All elements of numformat must be NA, character, or function # if character, replace with function equivalent for (fm in 1:length(numformat)) { if (is.function(numformat[[fm]])) { } else if (is.na(numformat[[fm]])) { numformat[[fm]] = function(x) x } else if (is.character(numformat[[fm]])) { set_digits <- ifelse(is.na(digits), NULL, digits) set_bigmark <- '' set_decimalmark <- getOption("OutDec") set_percent <- FALSE set_prefix = '' set_suffix = '' if (grepl('comma', numformat[[fm]])) { set_bigmark <- ',' numformat[[fm]] <- gsub('comma','',numformat[[fm]]) } if (grepl('decimal',numformat[[fm]])) { set_bigmark <- '.' set_decimalmark <- ',' numformat[[fm]] <- gsub('decimal','',numformat[[fm]]) } if (grepl('percent', numformat[[fm]])) { set_percent <- TRUE numformat[[fm]] <- gsub('percent','',numformat[[fm]]) } if (nchar(numformat[[fm]]) > 0) { if (grepl('|', numformat[[fm]])) { format_split <- strsplit(numformat[[fm]],'|', fixed = TRUE)[[1]] set_prefix <- format_split[1] set_suffix <- format_split[2] } else { set_prefix <- numformat[[fm]] } } numformat[[fm]] <- formatfunc(percent = set_percent, prefix = set_prefix, suffix = set_suffix, digits = digits, big.mark = set_bigmark, decimal.mark = set_decimalmark) } else { stop('Each element of numformat must be NA, a string, or a function.') } } if (is.na(obs.function)) { obs.function <- 'notNA(x)' if (!is.null(attr(numformat[[1]], 'big.mark'))) { obs.function <- paste0('notNA(x, "', attr(numformat[[1]], 'big.mark'), '")') } skip.format <- skip.format[!is.na(skip.format)] skip.format <- c(skip.format, obs.function) } if (!is.logical(factor.percent) | !is.logical(factor.counts)) { stop('factor.percent and factor.counts must each be TRUE or FALSE.') } if (!is.character(title)) { stop('title must be a character variable.') } if (!identical(out,NA) & !(out %in% c('viewer', 'browser','return','htmlreturn','kable','latex','latexpage', 'csv'))) { stop('out must be viewer, browser, return, htmlreturn, kable, latex, or latexpage') } if (identical(out, 'csv') & is.na(file)) { warning('out = "csv" will just return the vtable as a data.frame unless combined with file') } # Weights wts <- NULL if (length(group.weights) > 1) { wts <- group.weights } if (length(group.weights) == 1) { if (is.character(group.weights)) { wts <- data[[group.weights]] data[[group.weights]] <- NULL } } if (!identical(group.weights, NA) & is.null(wts)) { stop('group.weights must be a vector of length nrow(data), or the name of a column in data') } if (!is.numeric(wts) & !is.null(wts)) { stop('group.weights must be numeric.') } if ((length(wts) != nrow(data)) & !is.null(wts)) { stop('group.weights must be the same length as the number of rows in data') } if (!is.null(wts)) { if (min(wts, na.rm = TRUE) < 0) { stop('No negative weights allowed in group.weights') } } if (!is.null(wts)) { # Drop missing values havewts <- !is.na(wts) wts <- wts[havewts] data <- subset(data, havewts) } #One-column matrices run into some problems later on if (is.matrix(data) & dim(data)[2] == 1) { data <- as.data.frame(data) } #######CONVERT ALL LABELED NUMERIC VARIABLES AND LOW-NUNIQ CHARACTERS TO FACTORS #Get classes of each variable, only caring about numeric/character/factor/logical/other var.classes <- sapply(data, function(x) ifelse( is.factor(x), 'factor', ifelse( is.logical(x), 'logical', ifelse( is.character(x), 'character', ifelse( is.numeric(x), 'numeric', 'other'))))) labwarning <- FALSE for (c in 1:ncol(data)) { #Factorize each character variable with six or fewer unique values if (var.classes[c] == 'character') { if (vtable::nuniq(data[[c]]) <= 6) { data[[c]] <- as.factor(data[[c]]) } else { if (names(data)[c] %in% vars) { warning('You have specified a variable in vars that is a character variable with a large number of different values. It will be excluded. If you are sure you want it in the table, convert it to a factor before calling sumtable.') } vars <- vars[!(vars == names(data)[c])] } } else if (var.classes[c] == 'logical') { #Turn logicals to numerics if logical.numeric = FALSE if (logical.numeric) { data[[c]] <- as.numeric(data[[c]]) } else { # Otherwise make them factors data[[c]] <- factor(data[[c]], levels = c(FALSE,TRUE), labels = logical.labels) } } else if (var.classes[c] == 'numeric') { # If a numeric variable has value labels, turn this into a factor if ('labelled' %in% class(data[[c]]) | ('haven_labelled' %in% class(data[[c]]) | !is.null(unlist(sjlabelled::get_labels(data[[c]]))))) { #DON'T include variables with unlabelled values unlabvals <- length(sjlabelled::get_labels(data[[c]])) == length(sjlabelled::get_labels(data[[c]], non.labelled = TRUE)) if (!unlabvals) { data[[c]] <- as.numeric(data[[c]]) labwarning <- TRUE } else { #Turn into the appropriately-titled factor suppressWarnings(data[[c]] <- sjlabelled::as_label(data[,c,drop=FALSE])) } } } } if (labwarning) { warning('Some labelled variables have unlabeled values. Treating these as numeric variables and ignoring labels.') } #Re-get classes of each variable, only caring about numeric/character/factor/logical var.classes <- sapply(data, function(x) ifelse( is.factor(x), 'factor', ifelse( is.logical(x), 'logical', ifelse( is.character(x), 'character', ifelse( is.numeric(x), 'numeric', 'other'))))) #Do we have factor and also a potentially-non-compliant summ? factor.warning <- FALSE if (any(var.classes == 'factor') & !identical(summ,NA) & !factor.numeric) { if (is.list(summ) & !identical(col.breaks,NA)) { ext.col.breaks <- c(1,col.breaks,ncol(data)) for (i in 1:length(summ)) { if ((!(summ[[i]][1] %in% c('length(x)', obs.function)) | !(summ[[i]][2] %in% 'mean(x)')) & any(var.classes[ext.col.breaks[i]:ext.col.breaks[i+1]] == 'factor')) { factor.warning <- TRUE } } } else if (!is.list(summ)) { if (!(summ[1] %in% c('length(x)',obs.function)) | !(summ[2] %in% 'mean(x)')) { factor.warning <- TRUE } } else { if (!(summ[[1]][1] %in% c('length(x)',obs.function)) | !(summ[[1]][2] %in% 'mean(x)')) { factor.warning <- TRUE } } } if (factor.warning) { warning('Factor variables ignore custom summ options. Cols 1 and 2 are count and percentage.\nBeware combining factors with a custom summ unless factor.numeric = TRUE.') } #######DEFAULTS if (identical(vars,NA)) { #By default, include numeric and factor vars (includable logicals and characters already converted) colkeeps <- sapply(1:ncol(data), function(x) ifelse( is.factor(data[[x]]) | is.numeric(data[[x]]), x, 0)) if (sum(colkeeps > 0) == 0) { stop('It doesn\'t look like you have any variables that belong in a sumtable. Check your data. Use vars to explicitly choose variables, or convert things to numeric or factor before sending to sumtable.') } vars <- names(data)[colkeeps[colkeeps > 0]] #But not whatever is being used for group! if (!is.na(group)) { vars <- vars[vars != group] } var.classes <- sapply(as.data.frame(data[,vars]), function(x) ifelse( is.factor(x), 'factor', 'numeric')) } else { #Note that if vars is explicitly defined it might contain non-variables var.classes <- sapply(vars, function(x) ifelse( !(x %in% names(data)), 'header', ifelse( is.factor(data[[x]]), 'factor', ifelse( is.logical(data[[x]]), 'logical', ifelse( is.character(data[[x]]), 'character', ifelse( is.numeric(data[[x]]), 'numeric', 'other')))))) } if (identical(col.breaks,NA)) { col.breaks <- length(vars) } if (utils::tail(col.breaks,1) < length(vars)) { col.breaks[length(col.breaks) + 1] <- length(vars) } #Get a list of the variables that each column covers col.windows <- c(0,col.breaks) col.vars <- lapply(1:length(col.breaks), function(x) (col.windows[x]+1):col.breaks[x]) #Summary function defaults, and fill in summ.names as well #Are we filling summ.names at the same time? fill.sn <- identical(summ.names,NA) if (identical(summ,NA)) { summ <- list() if (fill.sn) { summ.names <- list() } for (i in 1:length(col.vars)) { if (all(var.classes[col.vars[[i]]] == 'factor')) { summ[[i]] <- c('sum(x)','mean(x)') if (fill.sn & factor.percent) { summ.names[[i]] <- c('N','Percent') } else { summ.names[[i]] <- c('N','Mean') } # If there are weights if (!is.null(wts)) { summ[[i]] <- c('sum(x)', 'stats::weighted.mean(x, w = wts, na.rm = TRUE)') if (fill.sn) { summ.names[[i]][2] <- paste0(summ.names[[i]][2], ' (Weighted)') } } } else if ((is.na(group) | group.long == TRUE) & length(col.breaks) == 1) { summ[[i]] <- c(obs.function,'mean(x)','sd(x)','min(x)','pctile(x)[25]','pctile(x)[75]','max(x)') if (fill.sn) { summ.names[[i]] <- c('N','Mean','Std. Dev.','Min','Pctl. 25','Pctl. 75','Max') } # Add median if desired if (add.median) { summ[[i]] <- c(obs.function,'mean(x)','sd(x)','min(x)','pctile(x)[25]','median(x)','pctile(x)[75]','max(x)') if (fill.sn) { summ.names[[i]] <- c('N','Mean','Std. Dev.','Min','Pctl. 25','Pctl. 50', 'Pctl. 75','Max') } } # If there are weights if (!is.null(wts)) { summ[[i]][summ[[i]] == 'mean(x)'] <- 'stats::weighted.mean(x, w = wts, na.rm = TRUE)' summ[[i]][summ[[i]] == 'sd(x)'] <- 'weighted.sd(x, w = wts)' if (fill.sn) { summ.names[[i]][summ.names[[i]] == 'Mean'] <- 'Wt. Mean' summ.names[[i]][summ.names[[i]] == 'Std. Dev.'] <- 'Wt. SD' } } } else if ((is.na(group) | group.long == TRUE) & length(col.breaks) > 1) { summ[[i]] <- c(obs.function,'mean(x)','sd(x)') if (fill.sn) { summ.names[[i]] <- c('N','Mean','Std. Dev.') } if (add.median) { summ[[i]] <- c(obs.function,'mean(x)','sd(x)', 'median(x)') if (fill.sn) { summ.names[[i]] <- c('N','Mean','Std. Dev.', 'Median') } } # If there are weights if (!is.null(wts)) { summ[[i]][summ[[i]] == 'mean(x)'] <- 'stats::weighted.mean(x, w = wts, na.rm = TRUE)' summ[[i]][summ[[i]] == 'sd(x)'] <- 'weighted.sd(x, w = wts)' if (fill.sn) { summ.names[[i]][summ.names[[i]] == 'Mean'] <- 'Wt. Mean' summ.names[[i]][summ.names[[i]] == 'Std. Dev.'] <- 'Wt. SD' } } } else { summ[[i]] <- c(obs.function,'mean(x)','sd(x)') if (fill.sn) { summ.names[[i]] <- c('N','Mean','SD') } if (add.median) { summ[[i]] <- c(obs.function,'mean(x)','sd(x)', 'median(x)') if (fill.sn) { summ.names[[i]] <- c('N','Mean','SD', 'Median') } } # If there are weights if (!is.null(wts)) { summ[[i]][summ[[i]] == 'mean(x)'] <- 'stats::weighted.mean(x, w = wts, na.rm = TRUE)' summ[[i]][summ[[i]] == 'sd(x)'] <- 'weighted.sd(x, w = wts)' if (fill.sn) { summ.names[[i]][summ.names[[i]] == 'Mean'] <- 'Wt. Mean' summ.names[[i]][summ.names[[i]] == 'SD'] <- 'Wt. SD' } } } } } else if (!is.list(summ)) { #If summ was entered as a vector, turn it into a list #And copy it if there are multiple columns summ <- lapply(1:length(col.vars), function(x) summ) } #Figure if digits started as a list or vector. If it did, #ignore the auto-zero-digits for integers digits.was.list <- is.list(digits) if (is.vector(digits)) { if (length(digits) > 1) { digits.was.list <- TRUE } } #Now fill in values for digits if (identical(digits,NA)) { digits <- list() for (i in 1:length(col.breaks)) { digits[[i]] <- rep(3,length(summ[[i]])) digits[[i]][1] <- 0 } } else if (is.numeric(digits)) { if (length(digits) == 1) { digopt <- digits digits <- list() for (i in 1:length(col.breaks)) { digits[[i]] <- rep(digopt,length(summ[[i]])) } } else { digits <- lapply(1:length(col.breaks), function(x) digits) } } #If we have fixed.digits and digits weren't #explicitly set by list, #set digits to 0 for integers if (fixed.digits & !digits.was.list) { for (i in 1:length(summ)) { for (j in 1:length(summ[[i]])) { # Attempt to calc each variable for this function calcs <- sapply(vars, function(x) parsefcn_summ(data[[x]],summ[[i]][j])) calcs <- calcs[!is.na(calcs)] if (is.round(calcs) | summ[[i]][j] == obs.function) { digits[[i]][j] <- 0 } } } } # If numformat is a single thing, fill it in if (length(numformat) == 1) { single.numformat = numformat[[1]] numformat = list() for (i in 1:length(vars)) { numformat[[i]] = single.numformat } } # If numformat is a named list, fill it in if (length(numformat) < length(vars)) { new.numformat <- list() for (v in vars) { if (v %in% names(numformat)) { new.numformat[[v]] <- numformat[[v]] } else { new.numformat[[v]] <- numformat[[1]] } } numformat <- new.numformat rm(new.numformat) } #And fill in summ.names the rest of the way #If a vector was specified for summ.names, turn it into a list if (!fill.sn & !is.list(summ.names)) { summ.names <- lapply(1:length(col.vars), function(x) summ.names) } #If summ.names is still missing, create it from summ if (identical(summ.names,NA)) { summ.names <- list() for (i in 1:length(col.vars)) { functionsused <- summ[[i]] functionsused <- sub('\\(x\\)','',functionsused) firstletters <- toupper(substring(functionsused,1,1)) summ.names[[i]] <- paste0(firstletters,substring(functionsused,2)) } } #group.test defaults #send the options to .opts, and make group.test be logical if (identical(group.test,TRUE)) { if (out %in% c('latex','latexpage') | (isTRUE(getOption('knitr.in.progress')) & is.na(out) & isTRUE(knitr::is_latex_output()))) { group.test.opts <- list(format = '{name}$={stat}^{{stars}}$') } else if (out %in% c('return','kable','csv') | (isTRUE(getOption('knitr.in.progress')) & is.na(out) & isFALSE(knitr::is_latex_output()) & isFALSE(knitr::is_html_output()))) { group.test.opts <- list(format = '{name}={stat}{stars}') } else { group.test.opts <- list(format = '{name}={stat}<sup>{stars}</sup>') } } else if (is.list(group.test)) { group.test.opts <- group.test group.test <- TRUE } starnote <- NA_character_ ####### APPLY LABELS OPTION vartitles <- vars grouptitle <- group ####### APPLICATION OF LABELS OPTION #Pull from label attribute if present if (identical(labels,TRUE)) { labs <- sapply(vars, function(x) attr(data[[x]],'label')) has.no.labs <- unlist(sapply(labs, is.null)) vartitles[!has.no.labs] <- unlist(labs[!has.no.labs]) if (!is.na(group)) { if (!is.null(attr(data[[group]],'label'))) { grouptitle <- attr(data[[group]],'label') } } } else if (!identical(labels,NA)) { if (is.vector(labels)) { #Make sure it's the right length if (length(labels) == length(vars)) { vartitles[!is.na(labels)] <- labels[!is.na(labels)] } else { stop('label vector must have as many elements as there are variables as will be in the sumtable. Use NA elements to fill in, or see help(sumtable) for other label formats that do not require every variable to have a label.') } #Check for multi-row two-column format } else if(dim(labels)[1] > 1 & dim(labels)[2] == 2) { # What we have now temp.df <- data.frame(vars = vars, stringsAsFactors = FALSE) #Put labels in mergeable format labs <- as.data.frame(labels, stringsAsFactors = FALSE) names(labs) <- c('vars','vartitles') #They gotta be strings labs$vars <- as.character(labs$vars) labs$vartitles <- as.character(labs$vartitles) #Hold original order temp.df$order <- 1:nrow(temp.df) #Bring in variable labels by name, allow NA labels with all.x=TRUE temp.df <- merge(temp.df,labs,sort=FALSE,all.x=TRUE) temp.df <- temp.df[order(temp.df$order),] # Fill in the NAs with the column titles temp.df$vartitles[is.na(temp.df$vartitles)] <- temp.df$vars[is.na(temp.df$vartitles)] vartitles <- temp.df$vartitles if (!is.na(group)) { if (sum(labels[[1]] == group) > 0) { grouptitle <- labels[labels[[1]] == group,2] } } #Check if it's in the one-row variable-name format } else if (dim(labels)[1]==1 & !is.null(colnames(labels))) { #Put into two-column format labs <- data.frame(vars=colnames(labels),vartitles=as.character(t(labels[1,])),stringsAsFactors = FALSE) # What we have now temp.df <- data.frame(vars = vars, stringsAsFactors = FALSE) #Hold original order temp.df$order <- 1:nrow(temp.df) #Bring in variable labels by name, allow NA labels with all.x=TRUE temp.df <- merge(temp.df,labs,sort=FALSE,all.x=TRUE) temp.df <- temp.df[order(temp.df$order),] # Fill in the NAs with the column titles temp.df$vartitles[is.na(temp.df$vartitles)] <- temp.df$vars[is.na(temp.df$vartitles)] vartitles <- temp.df$vartitles if (!is.na(group)) { if (!is.null(labels[[group]])) { grouptitle <- labels[[group]][1] } } } else{ stop('Unrecognized label format. See help(vtable).') } } ####### FORM SUMMARY TABLES TO BUILD ON if (is.na(group)) { # Create one for each column st <- list() for (i in 1:length(col.breaks)) { #Initialize with no rows st[[i]] <- utils::read.csv(text = paste(c('Variable',summ.names[[i]]), collapse =','), check.names = FALSE) contents <- lapply(col.vars[[i]], function(x) { summary.row(data, vars[x], st[[i]], vartitles[x], summ[[i]], var.classes[x], factor.percent, factor.counts, factor.numeric, digits[[i]], fixed.digits, wts, numformat[[x]], skip.format, function(x) eval(parse(text = obs.function))) }) contents <- do.call(rbind, contents) st[[i]] <- rbind(st[[i]],contents) } #Make sure everybody has the same number of rows and bind st <- cbind_unequal(st) } else if(!group.long) { # One for each group st <- list() # Groups to loop over grouplevels <- sort(unique(data[[group]])) for (i in 1:length(grouplevels)) { #Initialize with no rows st[[i]] <- utils::read.csv(text = paste(c('Variable',summ.names[[1]]), collapse =','), check.names = FALSE) st[[i]][1,] <- c(paste0('HEADERROW',grouptitle), paste0(grouplevels[i],'_MULTICOL_c_',length(summ.names[[1]])), rep('DELETECELL',length(summ.names[[1]])-1)) contents <- lapply(1:length(vars), function(x) summary.row(data[data[[group]] == grouplevels[i],], vars[x], st[[i]], vartitles[x], summ[[1]], var.classes[x], factor.percent, factor.counts, factor.numeric, digits[[1]], fixed.digits, wts[data[[group]] == grouplevels[i]], numformat[[x]], skip.format, function(x) eval(parse(text = obs.function)))) #On the last one, if there's a test, add it if (group.test & i == length(grouplevels)) { #Redo header with a Test column st[[i]] <- utils::read.csv(text = paste(c('Variable',summ.names[[1]], 'Test'), collapse =','), check.names = FALSE) st[[i]][1,] <- c(paste0('HEADERROW',grouptitle), paste0(grouplevels[i],'_MULTICOL_c_',length(summ.names[[1]])), rep('DELETECELL',length(summ.names[[1]])-1),'') for (x in 1:length(vars)) { #Sometimes perhaps an error! test.result <- suppressWarnings( try(independence.test(data[[group]], data[[vars[x]]], w = wts, opts=group.test.opts), silent = TRUE)) if (inherits(test.result,'try-error')) { test.result <- '' } #We'll be no.escaping later, so escape the < in tiny pvals now if (!(out %in% c('latex','latexpage'))) { test.result <- gsub('<0','\\&lt0',test.result) } contents[[x]]$Test <- c(test.result, rep('',nrow(contents[[x]])-1)) } } contents <- do.call(rbind, contents) st[[i]] <- rbind(st[[i]],contents) if (i > 1) { st[[i]]$Variable <- NULL } } st <- cbind_unequal(st) #If we did a test, add a table note if (group.test) { #It's possible they have chosen a format without stars havenote <- TRUE if (!is.null(group.test.opts[['format']])) { havenote <- grepl('\\{stars\\}',group.test.opts[['format']]) } if (havenote) { star.cutoffs <- c(.01,.05,.1) star.markers <- c('***','**','*') if (!is.null(group.test.opts[['star.cutoffs']])) { star.cutoffs <- group.test.opts[['star.cutoffs']] } if (!is.null(group.test.opts[['star.markers']])) { star.markers <- group.test.opts[['star.markers']] } #Order biggest to smallest star.markers <- star.markers[order(-star.cutoffs)] star.cutoffs <- star.cutoffs[order(-star.cutoffs)] starnote <- paste0(paste0(star.markers,' p<',star.cutoffs),collapse = '; ') starnote <- paste0('Statistical significance markers: ',starnote) } } } else { # One for each group st <- list() # Groups to loop over grouplevels <- sort(unique(data[[group]])) st.all <- list() for (j in 1:length(grouplevels)) { for (i in 1:length(col.breaks)) { #Initialize with no rows st[[i]] <- utils::read.csv(text = paste(c('Variable',summ.names[[i]]), collapse =','), check.names = FALSE) contents <- lapply(col.vars[[i]], function(x) summary.row(data[data[[group]] == grouplevels[j],], vars[x], st[[i]], vartitles[x], summ[[i]], var.classes[x], factor.percent, factor.counts, factor.numeric, digits[[i]], fixed.digits, wts[data[[group]] == grouplevels[j]], numformat[[x]], skip.format, function(x) eval(parse(text = obs.function)))) summcontents <- do.call(rbind, contents) st[[i]] <- rbind(st[[i]],summcontents) } st.all[[j]] <- cbind_unequal(st) #Header rows header.rows <- st.all[[j]][1,] addrow = 0 if (j > 1) { header.rows[1,] <- rep('',ncol(header.rows)) addrow = 1 } header.rows[nrow(header.rows)+addrow,] <- c( paste0(grouptitle,': ',grouplevels[j],'_MULTICOL_',group.long.align,'_',ncol(header.rows)), rep('DELETECELL',ncol(header.rows)-1)) st.all[[j]] <- rbind(header.rows,st.all[[j]]) } st <- do.call(rbind,st.all) } ####### APPLICATION OF COL.WIDTH AND ALIGN DEFAULTS if (identical(col.width,NA) & identical(align,NA)) { align <- rep('r',ncol(st)) align[names(st) == 'Variable'] <- 'l' #Padding only for non-first Variables, for col.breaks align[names(st) == 'Variable'] <- '@{\\hskip .1in}l' if (names(st)[1] == 'Variable') { align[1] <- 'l' } if (group.test) { align[names(st) == 'Test'] <- 'l' } align <- paste0(align, collapse = '') } else { align <- paste0('p{',col.width/100,'\\textwidth}') if (sum(names(st) == 'Variable') > 1) { align[names(st) == 'Variable'][-1] <- paste0('@{\\hskip .2in}',align[names(st) == 'Variable'][-1]) } align <- paste0(align,collapse='') } if (identical(col.width,NA)) { col.width <- rep(1,ncol(st)) #Any variable name columns are expanded col.width[names(st) == 'Variable'] <- 2 if (group.test) { col.width[names(st) == 'Test'] <- 1.5 } #Add it up totalwidth <- sum(col.width) #If total amount is less than two name-spaces, let table take up 60% of screen #From 2-3 name-spaces, 80% #More than 3 is full-screen tablescale <- 60 + 20*(totalwidth>=2) + 20*(totalwidth>=3) #And rescale column widths col.width <- (col.width/totalwidth)*tablescale } #col.align defaults if (identical(col.align, NA)) { col.align <- rep('right',ncol(st)) #Padding only for non-first Variables, for col.breaks col.align[names(st) == 'Variable'] <- 'left; padding-left:10px' if (names(st)[1] == 'Variable') { col.align[1] <- 'left' } if (group.test) { col.align[names(st) == 'Test'] <- 'left' } } if (!is.na(group)) { #Center the column names unless it's a "variable" column names(st)[names(st) != 'Variable'] <- paste0(names(st)[names(st) != 'Variable'],'_MULTICOL_c_1') } # Finalize note if (!is.na(note) & !is.na(starnote)) { note <- paste0(starnote,'. ',note) } else if (!is.na(starnote)) { note <- starnote } ####### LATEX OUTPUT if (!identical(out, NA) & out %in% c('latex','latexpage')) { #Table only if (out == 'latex') { return(cat(dftoLaTeX(st, file = file, align = align, anchor = anchor, title = title, note = note, note.align = note.align, fit.page = fit.page, no.escape = ifelse(group.test,ncol(st),NA)))) } #Now for the full page out.latex <- '\\documentclass{article}\n\\begin{document}\n\n%% sumtable \\{vtable\\}\n\n' #And bring in the table itself out.latex <- paste(out.latex,dftoLaTeX(st, align = align, anchor = anchor, title = title, note = note, note.align = note.align, fit.page = fit.page, no.escape = ifelse(group.test,ncol(st),NA)),'\n\n\\end{document}',sep='') ####### APPLICATION OF FILE OPTION if (!is.na(file)) { #If they forgot a file extension, fill it in if (!grepl("\\.tex",file)) { file <- paste(file,'.tex',sep='') } filepath <- file.path(file) #Create temporary tex file writeLines(out.latex,filepath) } return(cat(out.latex)) } ####### CONSTRUCTION OF HTML #Head of file out.html <- paste(' <html style=\"font-family:Helvetica,Arial,Sans\"> <head><title>Summary Statistics</title>', '<style type = \"text/css\"> p { font-size:smaller; } table { border: 0px; border-collapse:collapse; font-size:smaller; table-layout:fixed; margin-left:0%; margin-right:auto; } .headtab { width: 100%; margin-left:auto; margin-right:auto; } th { background-color: #FFFFFF; font-weight:bold; text-align:left; } table tr:nth-child(odd) td { background-color: #FFFFFF; padding:4px; word-wrap: break-word; word-break:break-all; } table tr:nth-child(even) td { background-color: #D3D3D3; padding:4px; word-wrap: break-word; word-break:break-all; }</style></head><body>',sep='') #Dataset name and description out.html <- paste(out.html, '<table class=\"headtab\">', '<tr><td style=\"text-align:left\">sumtable {vtable}</td>', '<td style=\"text-align:right\">Summary Statistics</td></tr></table>', '<h1>',title,'</h1>') #And bring in the table itself out.html <- paste(out.html,dftoHTML(st,out='htmlreturn',col.width=col.width, col.align=col.align,anchor=anchor, note = note, note.align = note.align, no.escape = ifelse(group.test,ncol(st),NA)),'</body></html>',sep='') ####### APPLICATION OF FILE OPTION if (!is.na(file)) { if (identical(out,'csv')) { #If they forgot a file extension, fill it in if (!grepl("\\.csv",file)) { file <- paste(file,'.csv',sep='') } filepath <- file.path(file) #Create temporary html file utils::write.csv(clean_multicol(st),filepath, row.names = FALSE) } else { #If they forgot a file extension, fill it in if (!grepl("\\.htm",file)) { file <- paste(file,'.html',sep='') } filepath <- file.path(file) #Create temporary html file writeLines(out.html,filepath) } } #For more easily working with if statements if (is.na(out)) { out = '' } ####### APPLICATION OF OUT OPTION #If the plan is to produce a viewable HTML, create it if (out == 'viewer' | out == 'browser' | out == '') { #Get temporary dirpath tempDir <- tempfile() #Create temporary directory dir.create(tempDir) #Get temporary filepath htmlpath <- file.path(tempDir,'sumtable.html') #Create temporary html file writeLines(out.html,htmlpath) } #Either print the variable table to the help window #or return a variable table to the screen, as desired if (out == 'kable' | (isTRUE(getOption('knitr.in.progress')) & out == '')) { #kable can't handle the blank rows group.long makes st <- st[!apply(st,MARGIN=1,FUN=function(x) !any(!(x==rep('',ncol(st))))),] #I don't know how this would happen but just in case st <- st[!apply(st,MARGIN=1,FUN=function(x) propNA(x) == 1),] if (!simple.kable) { st <- clean_multicol_kable(st,title,note) if (isTRUE(getOption('knitr.in.progress')) & out == '') { if (isTRUE(knitr::is_html_output())) { st <- kableExtra::kable_styling(st) } } return(st) } else { st <- knitr::kable(clean_multicol(st), caption = title) return(st) } } else if (Sys.getenv('RSTUDIO')=='1' & (out == 'viewer' | out == '')) { rstudioapi::viewer(htmlpath) } else if (Sys.getenv('RSTUDIO')=='' & out == 'viewer') { stop('out = viewer is not a valid option if RStudio is not running.') } else if ((Sys.getenv('RSTUDIO')=='' & out == '') | (out == 'browser')) { utils::browseURL(htmlpath) } else if (out == 'return' | out == 'csv') { return(clean_multicol(st)) } else if (out == 'htmlreturn') { return(cat(out.html)) } } #' @rdname sumtable #' @export st <- sumtable
/scratch/gouwar.j/cran-all/cranData/vtable/R/sumtable.R
#' Variable Table Function #' #' This function will output a descriptive variable table either to the console or as an HTML file that can be viewed continuously while working with data. \code{vt()} is the same thing but requires fewer key presses to type. #' #' Outputting the variable table as a help file will make it easy to search through variable names or labels, or to refer to information about the variables easily. #' #' This function is in a similar spirit to \code{promptData()}, but focuses on variable documentation rather than dataset documentation. #' #' If you would like to include a \code{vtable} in an RMarkdown document, it should just work! If you leave \code{out} blank, it will default to a nicely-formatted \code{knitr::kable()}, although this will drop some formatting elements like multi-column cells (or do \code{out="kable"} to get an unformatted \code{kable} that you can format yourself). If you prefer the \code{vtable} package formatting, then use \code{out="latex"} if outputting to LaTeX or \code{out="htmlreturn"} for HTML, both with \code{results="asis"} in the code chunk. Alternately, in HTML, you can use the \code{file} option to write to file and use a \code{<iframe>} to include it. #' #' @param data Data set; accepts any format with column names. If variable labels are set with the haven package, \code{set_label()} from sjlabelled, or \code{label()} from Hmisc, \code{vtable} will extract them automatically. #' @param out Determines where the completed table is sent. Set to \code{"browser"} to open HTML file in browser using \code{browseURL()}, \code{"viewer"} to open in RStudio viewer using \code{viewer()}, if available. Use \code{"htmlreturn"} to return the HTML code to R. Use \code{"return"} to return the completed variable table to R in data frame form or \code{"kable"} to return it as a \code{knitr::kable()}. Additional options include \code{"csv"} to write to CSV in conjunction with \code{file} (although this will drop most additional formatting), \code{"latex"} for a LaTeX table or \code{"latexpage"} for a full buildable LaTeX page. Defaults to \code{"viewer"} if RStudio is running, \code{"browser"} if it isn't, or a \code{"kable"} passed through \code{kableExtra::kable_styling()} defaults if it's an RMarkdown document being built with \code{knitr}. #' @param file Saves the completed variable table file to HTML or .tex with this filepath. May be combined with any value of \code{out}, although note that \code{out = "return"} and \code{out = "kable"} will still save the standard vtable HTML file as with \code{out = "viewer"} or \code{out = "browser"}. #' @param labels Variable labels. labels will accept three formats: (1) A vector of the same length as the number of variables in the data, in the same order as the variables in the data set, (2) A matrix or data frame with two columns and more than one row, where the first column contains variable names (in any order) and the second contains labels, or (3) A matrix or data frame where the column names (in any order) contain variable names and the first row contains labels. Setting the labels parameter will override any variable labels already in the data. Set to \code{"omit"} if the data set has embedded labels but you don't want any labels in the table. #' @param class Set to \code{TRUE} to include variable classes in the variable table. Defaults to \code{TRUE}. #' @param values Set to \code{TRUE} to include the range of values of each variable: min and max for numeric variables, list of factors for factor or ordered variables, and 'TRUE FALSE' for logicals. values will detect and use value labels set by the sjlabelled or haven packages, as long as every value is labelled. Defaults to \code{TRUE}. #' @param missing Set to \code{TRUE} to include the number of NAs in the variable. Defaults to \code{FALSE}. #' @param index Set to \code{TRUE} to include the index number of the column with the variable name. Defaults to \code{FALSE}. #' @param factor.limit Sets maximum number of factors that will be included if \code{values = TRUE}. Set to 0 for no limit. Defaults to 5. #' @param char.values Set to \code{TRUE} to include values of character variables as though they were factors, if \code{values = TRUE}. Or, set to a character vector of variable names to list values of only those character variables. Defaults to \code{FALSE}. Has no effect if \code{values = FALSE}. #' @param data.title Character variable with the title of the dataset. #' @param desc Character variable offering a brief description of the dataset itself. This will by default include information on the number of observations and the number of columns. To remove this, set \code{desc='omit'}, or include any description and then include \code{'omit'} as the last four characters. #' @param note Table note to go after the last row of the table. #' @param anchor Character variable to be used to set an anchor link in HTML tables, or a label tag in LaTeX. #' @param col.width Vector of page-width percentages, on 0-100 scale, overriding default column widths in HTML table. Must have a number of elements equal to the number of columns in the resulting table. #' @param col.align For HTML output, a character vector indicating the HTML \code{text-align} attributes to be used in the table (for example \code{col.align = c('left','center','center')}. Defaults to all left-aligned. If you want to get tricky, you can add a \code{";"} afterwards and keep putting in whatever CSS attributes you want. They will be applied to the whole column. #' @param align For LaTeX output, string indicating the alignment of each column. Use standard LaTeX syntax (i.e. \code{l|ccc}). Defaults to all \code{p{}} columns with widths set using the same defaults as with \code{col.width}. Be sure to escape special characters, in particular backslashes (i.e. \code{p{.25\\\\textwidth}} instead of \code{p{.25\\textwidth}}). #' @param note.align Set the alignment for the multi-column table note. Usually "l", but if you have a long note in LaTeX you might want to set it with "p{}" #' @param fit.page For LaTeX output, uses a resizebox to force the table to a certain width. Set to \code{NA} to omit. Often \code{'\\textwidth'}. #' @param summ Character vector of summary statistics to include for numeric and logical variables, in the form \code{'function(x)'}. This option is flexible, and allows any summary statistic function that takes in a column and returns a single number. For example, \code{summ=c('mean(x)','mean(log(x))')} will provide the mean of each variable as well as the mean of the log of each variable. Keep in mind the special vtable package helper functions designed specifically for this option \code{propNA}, \code{countNA}, and \code{notNA}, which report counts and proportions of NAs, or counts of not-NAs, in the vectors, \code{nuniq}, which reports the number of unique values, and \code{pctile}, which returns a vector of the 100 percentiles of the variable. NAs will be omitted from all calculations other than \code{propNA(x)} and \code{countNA(x)}. #' @param lush Set to \code{TRUE} to select a set of options with more information: sets \code{char.values} and \code{missing} to \code{TRUE}, and sets summ to \code{c('mean(x)', 'sd(x)', 'nuniq(x)')}. \code{summ} can be overwritten by setting \code{summ} to something else. #' @param opts The same \code{vtable} options as above, but in a named list format. Useful for applying the same set of options to multiple \code{vtable}s. #' @examples #' \dontshow{ #' #These tests use the out='htmlreturn' option #' #so that the same process of generating HTML is followed #' #but a browser window is not opened during testing. #' #This process is identical to regular operation except that #' #HTML is written to the R output rather than a browser. #' df <- data.frame(var1 = 1:4,var2=5:8,var3=c('A','B','C','D'), #' var4=as.factor(c('A','B','C','C')),var5=c(TRUE,TRUE,FALSE,FALSE)) #' #' #Demonstrating different options: #' vtable(df,labels=c('Number 1','Number 2','Some Letters', #' 'Some Labels','You Good?'), #' out='htmlreturn') #' vtable(subset(df,select=c(1,2,5)), #' labels=c('Number 1','Number 2','You Good?'),class=FALSE,values=FALSE, #' out='htmlreturn') #' vtable(subset(df,select=c('var1','var4')), #' labels=c('Number 1','Some Labels'), #' factor.limit=1,col.width=c(10,10,40,35), #' out='htmlreturn') #' #' #Different methods of applying variable labels: #' labelsmethod2 <- data.frame(var1='Number 1',var2='Number 2', #' var3='Some Letters',var4='Some Labels',var5='You Good?') #' vtable(df,labels=labelsmethod2,out='htmlreturn') #' labelsmethod3 <- data.frame(a =c("var1","var2","var3","var4","var5"), #' b=c('Number 1','Number 2','Some Letters','Some Labels','You Good?')) #' vtable(df,labels=labelsmethod3,out='htmlreturn') #' #' #Using value labels and pre-labeled data: #' library(sjlabelled) #' df <- set_label(df,c('Number 1','Number 2','Some Letters', #' 'Some Labels','You Good?')) #' df$var1 <- set_labels(df$var1,labels=c('A little','Some more', #' 'Even more','A lot')) #' vtable(df,out='htmlreturn') #' #' #efc is data with embedded variable and value labels from the sjlabelled package #' library(sjlabelled) #' data(efc) #' vtable(efc,out='htmlreturn') #' #' #Adding summary statistics for variable mean and proportion of data that is missing. #' vtable(efc,summ=c('mean(x)','propNA(x)'),out='htmlreturn') #' #' } #' if(interactive()){ #' df <- data.frame(var1 = 1:4,var2=5:8,var3=c('A','B','C','D'), #' var4=as.factor(c('A','B','C','C')),var5=c(TRUE,TRUE,FALSE,FALSE)) #' #' #Demonstrating different options: #' vtable(df,labels=c('Number 1','Number 2','Some Letters', #' 'Some Labels','You Good?')) #' vtable(subset(df,select=c(1,2,5)), #' labels=c('Number 1','Number 2','You Good?'),class=FALSE,values=FALSE) #' vtable(subset(df,select=c('var1','var4')), #' labels=c('Number 1','Some Labels'), #' factor.limit=1,col.width=c(10,10,40,35)) #' #' #Different methods of applying variable labels: #' labelsmethod2 <- data.frame(var1='Number 1',var2='Number 2', #' var3='Some Letters',var4='Some Labels',var5='You Good?') #' vtable(df,labels=labelsmethod2) #' labelsmethod3 <- data.frame(a =c("var1","var2","var3","var4","var5"), #' b=c('Number 1','Number 2','Some Letters','Some Labels','You Good?')) #' vtable(df,labels=labelsmethod3) #' #' #Using value labels and pre-labeled data: #' library(sjlabelled) #' df <- set_label(df,c('Number 1','Number 2','Some Letters', #' 'Some Labels','You Good?')) #' df$var1 <- set_labels(df$var1,labels=c('A little','Some more', #' 'Even more','A lot')) #' vtable(df) #' #' #efc is data with embedded variable and value labels from the sjlabelled package #' library(sjlabelled) #' data(efc) #' vtable(efc) #' #' #Displaying the values of a character vector #' data(USJudgeRatings) #' USJudgeRatings$Judge <- row.names(USJudgeRatings) #' vtable(USJudgeRatings,char.values=c('Judge')) #' #' #Adding summary statistics for variable mean and proportion of data that is missing. #' vtable(efc,summ=c('mean(x)','propNA(x)')) #' #' } #' @rdname vtable #' @export vtable <- function(data,out=NA,file=NA,labels=NA,class=TRUE,values=TRUE,missing=FALSE, index=FALSE,factor.limit=5,char.values=FALSE, data.title=NA,desc=NA,note = NA,note.align = 'l', anchor=NA,col.width=NA,col.align=NA, align=NA,fit.page = NA, summ=NA,lush=FALSE,opts=list()) { #Bring in opts list2env(opts,envir=environment()) #######CHECK INPUTS if (is.null(colnames(data))) { stop('Requires data with variable names or column names.') } if (!is.na(file) & !is.character(file)) { stop('Incorrect file name.') } if (!is.logical(class)) { stop('The class option must be TRUE or FALSE.') } if (!is.logical(values)) { stop('The values option must be TRUE or FALSE.') } if (!is.logical(missing)) { stop('The missing option must be TRUE or FALSE.') } if (!is.logical(index)) { stop('The index option must be TRUE or FALSE.') } if (!is.numeric(factor.limit) | factor.limit%%1 != 0) { stop('factor.limit must be an integer. Set to 0 for unlimited factors.') } if (!(is.logical(char.values) | is.character(char.values))) { stop('char.values must be FALSE, TRUE, or a character vector.') } if (!is.na(data.title) & !is.character(data.title)) { stop('data.title must be a character variable.') } if (!is.na(desc) & !is.character(desc)) { stop('desc must be a character variable.') } if (!identical(anchor,NA) & !is.character(anchor)) { stop('anchor must be a character variable.') } if (min(is.na(col.width)) == 0 & (!is.vector(col.width) | !is.numeric(col.width) | sum(is.na(col.width)) > 0)) { stop('col.width must be a numeric vector with no missing values.') } if (min(is.na(col.width)) == 0 & (max(col.width) > 100 | min(col.width) < 0)) { stop('Elements of col.width must be between 0 and 100.') } if (min(is.na(summ)) == 0 & (!is.vector(summ) | !is.character(summ) | sum(is.na(summ)) > 0)) { stop('summ must be a character vector with no missing values.') } if (!is.logical(lush)) { stop('lush must be logical.') } if (!identical(out,NA) & !(out %in% c('viewer', 'browser','return','htmlreturn','kable','latex','latexpage', 'csv'))) { stop('out must be viewer, browser, return, htmlreturn, kable, latex, or latexpage') } if (identical(out, 'csv') & is.na(file)) { warning('out = "csv" will just return the vtable as a data.frame unless combined with file') } #One-column matrices run into some problems later on if (is.matrix(data) & dim(data)[2] == 1) { data <- as.data.frame(data) } ####### APPLICATION OF LUSH DEFAULTS if (lush) { char.values <- TRUE missing <- TRUE if (is.na(summ)) { summ <- c('mean(x)', 'sd(x)', 'nuniq(x)') } } ####### APPLICATION OF DATA.TITLE OPTION #If data.title is missing, fill in with name of the data frame read in if (is.na(data.title)) { data.title <- deparse(substitute(data)) } ####### FORM VARIABLE TABLE TO BUILD ON #If index = TRUE, start with that. Otherwise, start with Name if (index==TRUE) { vt <- data.frame(Index=1:ncol(data), Name=colnames(data)) } else { #Start table with variable names or column names (earlier error check ensures one exists) vt <- data.frame(Name = colnames(data)) } ####### APPLICATION OF CLASS OPTION #If user asks for variable classes, add them to the variable table if (class == TRUE) { #If multiple classes listed, take the first vt$Class <- sapply(data,function(x) class(x)[1]) } ####### APPLICATION OF LABELS OPTION #Pull from label attribute if present #label attribute works for labels from Hmisc, sjlabelled, haven labs <- sapply(data, function(x) attr(x,'label')) #If there were any, add them if (!is.null(unlist(labs))) { vt$Label <- labs } #If labels are directly included, override the embedded labels #Use ifelse construction so that labels[1] is not evaluated for two-column #label styles (which throws an error) if (min(is.na(labels))==0 & ifelse(length(labels) == 1,labels[1] != "omit",TRUE)) { #Override embedded labels vt$Label <- NULL #Figure out the format of the data and fill in labs appropriately #First, check if it's just a vector of labels if (is.vector(labels)) { #Make sure it's the right length if (length(labels) == dim(vt)[1]) { vt$Label <- labels } else { stop('label vector must have as many elements as there are variables in the data. Use NA elements to fill in, or see help(vtable) for other label formats that do not require every variable to have a label.') } #Check if it's in the two-column format } else if(dim(labels)[1] > 1 & dim(labels)[2] == 2) { #Put labels in mergeable format labels <- as.data.frame(labels) names(labels) <- c('Name','Label') #Hold original order vt$order <- 1:nrow(vt) #Bring in variable labels by name, allow NA labels with all.x=TRUE vt <- merge(vt,labels,sort=FALSE,all.x=TRUE) vt <- vt[order(vt$order),] vt$order <- NULL #Keep row names in order rownames(vt) <- 1:nrow(vt) #Check if it's in the one-row variable-name format } else if (dim(labels)[1]==1 & !is.null(colnames(labels))) { #Put into two-column format labs <- data.frame(Name=colnames(labels),Label=as.character(t(labels[1,]))) #Hold original order vt$order <- 1:nrow(vt) #Bring in variable labels by name, allow NA labels with all.x=TRUE vt <- merge(vt,labs,sort=FALSE,all.x=TRUE) vt <- vt[order(vt$order),] vt$order <- NULL rownames(vt) <- 1:nrow(vt) } else{ stop('Unrecognized label format. See help(vtable).') } } else if (min(is.na(labels))==0 & ifelse(length(labels) == 1,labels[1] == "omit",FALSE)) { vt$Label <- NULL } ####### We need only one class #If there are multiples and one is factor, treat as factor if (sum(sapply(data,function(x) (length(class(x)) >1) & (is.factor(x)))) > 0) { data[,sapply(data,function(x) (length(class(x)) >1) & (is.factor(x)))] <- as.data.frame(sapply(data[,sapply(data,function(x) (length(class(x)) >1) & (is.factor(x)))],function(x) factor(x,ordered=FALSE)), stringsAsFactors = TRUE) } #Similarly, only take one class if it's numeric, UNLESS it's haven_labelled. if (sum(sapply(data,function(x) (length(class(x)) >1) & (is.numeric(x)) & !('haven_labelled' %in% class(x)))) > 0) { data[,sapply(data,function(x) (length(class(x)) >1) & (is.numeric(x)) & !('haven_labelled' %in% class(x)))] <- as.data.frame(sapply(data[,sapply(data,function(x) (length(class(x)) >1) & (is.numeric(x)) & !('haven_labelled' %in% class(x)))],function(x) as.numeric(haven::zap_labels(x)))) } ####### APPLICATION OF VALUES OPTION #If user wants values, show the possible values the variable can take if (values == TRUE) { ####### APPLICATION OF CHAR.VALUES OPTION if (inherits(char.values,'logical')) { if (char.values == TRUE) { #See which are characters charvariables <- as.logical(unlist(sapply(data,function(x) max(class(x) == "character")))) #and convert data[,charvariables] <- as.data.frame(sapply(data[,charvariables],function(x) as.factor(x)), stringsAsFactors = TRUE) #clean rm(charvariables) } } else if (inherits(char.values,'character')) { #See which variables are in the list charvariables <- names(data) %in% char.values #and convert data[,charvariables] <- as.data.frame(sapply(data[,charvariables],function(x) as.factor(x)), stringsAsFactors = TRUE) #clean rm(charvariables) } #Create variable to hold ranges vt$Values <- '' #Are there any labelled values? #allow both for the labelled class and non-factor variables with value labels if (sum(unlist(sapply(data,function(x) class(x) == "labelled")))+ sum(sapply(data,function(x) !is.factor(x) & !is.null(unlist(sjlabelled::get_labels(x)))))>0) { #Since we've already extracted class, if necessary, #we can just turn these into factor variables with an included #numerical coding for clarity #Identify which variables have labels havelabels <- as.logical(unlist(sapply(data,function(x) max(class(x) == "labelled")))) #Include variables not of the class labelled or factor but which have labels havelabels[sapply(data,function(x) !is.factor(x) & !is.null(unlist(sjlabelled::get_labels(x,attr.only=TRUE))))] <- TRUE #DON'T include variables with unlabelled values unlabvals <- sapply(data[,havelabels,drop = FALSE], function(x) length(sjlabelled::get_labels(x)) == length(sjlabelled::get_labels(x, non.labelled = TRUE))) if (sum(!unlabvals) > 0) { havelabels[havelabels] <- unlabvals warning('Some labelled variables have unlabeled values. Treating these as numeric variables and ignoring labels.') } if (sum(havelabels) > 0) { vallabs <- sjlabelled::get_labels(data,values='as.name') #Add numerical coding vallabscode <- lapply(vallabs, function(x) paste(names(x),': ',x,sep='')) #Make sure the labels are named chr vectors for (v in names(vallabscode)) { names(vallabscode[[v]]) <- names(vallabs[[v]]) } #Set new coded labels among the variables with value labels suppressMessages(suppressWarnings(data[,havelabels] <- sjlabelled::set_labels(data[,havelabels,drop=FALSE],labels=vallabscode[havelabels]))) #And turn into the appropriately-titled factor suppressWarnings(data[,havelabels] <- sjlabelled::as_label(data[,havelabels,drop=FALSE])) } } #If there are any factors: if (sum(sapply(data,is.factor)) > 0) { #Fill in description of factor levels for factor factorlevels <- lapply(subset(data,select=sapply(data,is.factor)),levels) #If there's not a limit on the number of factors requested toomany <- rep(FALSE,length(factorlevels)) ####### APPLICATION OF FACTOR.LIMIT OPTION #if there's a limit on the number of factors requested if (factor.limit > 0) { #Find out which variables have too many factors numcut <- sapply(factorlevels,function(x) length(x) - factor.limit) toomany <- numcut > 0 #Cut back to the limit factorlevels <- lapply(factorlevels,function(x) x[1:min(factor.limit,length(x))]) } #Make sure each factor surrounded by ' factorlevels <- paste('\'', lapply(factorlevels,paste,collapse='\' \''), '\'',sep='') #If some were cut, indicate that factorlevels <- ifelse(toomany,paste(factorlevels,'and', numcut, 'more'),factorlevels) #And fill in for output table vt[sapply(data,is.factor),]$Values <- factorlevels } #If there are any dates: if (sum(sapply(data,function(x) max(class(x) %in% c('Date','POSIXct','POSIXt','POSIXlt')) & min(is.na(x)) == 0)) > 0) { #Get minimums, be sure to skip any variables that are always NA min <- lapply(subset(data,select=sapply(data,function(x) max(class(x) %in% c('Date','POSIXct','POSIXt','POSIXlt')) & min(is.na(x)) == 0)),function(x) min(x,na.rm=TRUE)) min <- sapply(min, as.character) #Get maximums max <- lapply(subset(data,select=sapply(data,function(x) max(class(x) %in% c('Date','POSIXct','POSIXt','POSIXlt')) & min(is.na(x)) == 0)),function(x) max(x,na.rm=TRUE)) max <- sapply(max, as.character) #Range description range <- paste('Time:',min,'to',max) #Fill in for output table vt[sapply(data,function(x) max(class(x) %in% c('Date','POSIXct','POSIXt','POSIXlt')) & min(is.na(x)) == 0),]$Values <- range } #If there are any numeric variables: if (sum(sapply(data,function(x) is.numeric(x) & min(is.na(x)) == 0)) > 0) { #Get minimums, be sure to skip any variables that are always NA min <- lapply(subset(data,select=sapply(data,function(x) is.numeric(x) & min(is.na(x)) == 0)),function(x) round(min(x,na.rm=TRUE),3)) #Get maximums max <- lapply(subset(data,select=sapply(data,function(x) is.numeric(x) & min(is.na(x)) == 0)),function(x) round(max(x,na.rm=TRUE),3)) #Range description range <- paste('Num:',min,'to',max) #Fill in for output table vt[sapply(data,function(x) is.numeric(x) & min(is.na(x)) == 0),]$Values <- range } #Binary variables if (sum(sapply(data,is.logical))>0) { #Fill in for output table vt[sapply(data,is.logical),]$Values <- 'TRUE FALSE' } } ####### APPLICATION OF MISSING OPTION #If user asks for number of missing values in the column, add them to the variable table if (missing==TRUE) { vt$Missing <- sapply(data, countNA) } ####### APPLICATION OF SUMM OPTION #Check if anything included for summ if (min(is.na(summ)) == 0) { #Create blank to fill in vt$Summary = '' #First, do the propNA and countNA functions #Do separately so it can be applied to factors and characters #and also because all other functions are run only on nonmissings #So if we have NA counts and also factors to do them to if ('propNA(x)' %in% summ) { vt$Summary <- #Start the summary variable off by pasting together the propNA name paste('propNA: ', #with a summary calculation performed on each character or factor column of the data sapply(data, #and in particular that function is mean(is.na(x)) function(x) round(propNA(x),3)),sep='') #If propNA isn't the only thing, use a line break to separate this from the next if (length(summ) > 1) { vt$Summary <- paste(vt$Summary,'<br/>',sep='') } } #Now do the exact same thing for countNA as was done for propNA if ('countNA(x)' %in% summ) { vt$Summary <- #Start the summary variable off by pasting together the countNA name paste(vt$Summary,'countNA: ', #with a summary calculation performed on each character or factor column of the data sapply(data, countNA),sep='') #If there's still more to come, add a line break if (length(summ[!summ %in% c('propNA(x)','countNA(x)')])>0) { vt$Summary <- paste(vt$Summary,'<br/>',sep='') } } #If there are propNA or countNA functions, drop them since we just used them summ <- summ[!summ %in% c('propNA(x)','countNA(x)')] #Create copy of summ for actual use summuse <- summ #And a copy for naming the summary stats summnames <- summ #Change names for presentation. If the name is simple #(i.e. ends in just (x)), cut that out. Otherwise, leave intact for clarity #replace the names of all names that end in '(x)' summnames[substring(summnames,nchar(summnames)-2)=='(x)'] <- #with a substring of those names that starts at 1 and ends before (x) substring(summnames[substring(summnames,nchar(summnames)-2)=='(x)'], 1,nchar(summnames[substring(summnames,nchar(summnames)-2)=='(x)'])-3) #and tack on a ': ' that will go between the name and the number summnames <- paste(summnames,': ',sep='') #Now do all the stats. #Comments are numbered for the purpose of reading them in order vt$Summary <- #8. And finally paste it together with what we already have paste(vt$Summary, #3. Go through each of those variables one by one to calculate summary stats sapply( #2 Turn it into a list and restrict each of the columns to nonmissing #(can't just use complete.cases - you want each variable to have all its nonmissings) #(can't use na.rm since some functions don't take it) #(can't do this at the level of x on the innermost sapply since it may be difficult to locate the x that connotates data if the function has the letter x in it) lapply(as.list( #1. Take all the variables and just keep nonmissings data),function(x) x[!is.na(x)]), #4. within each of those variables, paste together a bunch of summary stats # Send to parsesumm so as to handle different cases # If it's a date, and summnames was set by lush = TRUE, use median and nuniq function(x) if (lush == TRUE & max(class(x) %in% c('Date','POSIXct','POSIXt','POSIXlt')) == 1) { parsesumm(x, c('median(x)','nuniq(x)'), c('median: ', 'nuniq: ')) } else { parsesumm(x,summuse,summnames) }),sep='') } ####### APPLICATION OF COL.WIDTH OPTION #column percentages #Check which columns we have to account for haslabels <- 'Label' %in% colnames(vt) hasclass <- 'Class' %in% colnames(vt) hasvalues <- 'Values' %in% colnames(vt) hassumm <- 'Summary' %in% colnames(vt) #If col.width not manually set, use defaults if (sum(!is.na(col.width)) == 0) { #initialize vector col.width <- rep(0,length(colnames(vt))) #Default ratios: #Name:class:label:values:summ #1:.5:1.75:1.25:.75 col.width[colnames(vt)=='Index'] <- .25 col.width[colnames(vt)=='Name'] <- 1 col.width[colnames(vt)=='Class'] <- .5 col.width[colnames(vt)=='Label'] <- 1.75 col.width[colnames(vt)=='Values'] <- 1.25 col.width[colnames(vt)=='Missing'] <- .5 col.width[colnames(vt)=='Summary'] <- .9 #Add it up totalwidth <- sum(col.width) #If total amount is less than two name-spaces, let table take up 60% of screen #From 2-3 name-spaces, 80% #More than 3 is full-screen tablescale <- 60 + 20*(totalwidth>=2) + 20*(totalwidth>=3) #And rescale column widths col.width <- (col.width/totalwidth)*tablescale } #col.align defaults if (identical(col.align, NA)) { col.align <- rep('left',ncol(vt)) } #Do we have a summary? if ('Summary' %in% names(vt)) { # Don't escape any line breaking stuff in there no.escape <- which(names(vt) == 'Summary') } else { no.escape <- NA } ####### LATEX OUTPUT if (!identical(out, NA) & out %in% c('latex','latexpage')) { #Some <br/>s are hardcoded in there if ('Summary' %in% names(vt)) { vt$Summary <- gsub('<br/>',' \\\\\\\\ ',vt$Summary) vt$Summary <- paste0('\\begin{tabular}[c]{@{}c@{}}',vt$Summary,'\\end{tabular}') } if (is.na(align)) { col.width <- col.width/100 align <- paste0(paste0('p{',col.width,'\\textwidth}'),collapse='') } #Table only if (out == 'latex') { return(cat(dftoLaTeX(vt, file = file, align = align, note = note, note.align = note.align, anchor = anchor, title = 'Variable Table', fit.page = fit.page, no.escape = no.escape))) } #Now for the full page out.latex <- '\\documentclass{article}\n\\begin{document}\n\nvtable \\{vtable\\}\n\n' out.latex <- paste(out.latex, '\\textbf{\\LARGE ', data.title,'}\n\n') #Applying description #By default, this is number of obs and number of columns, plus whatever is in desc. #"omit" will leave that out. description <- paste('This data contains ',dim(data)[1],' rows and ',dim(data)[2],' columns.',sep='') #four possibilities: desc is NA (print description), #desc is just omit (print nothing, do nothing) #desc is other stuff followed by omit (just print the other stuff) #desc is other stuff not followed by omit (print desc and the other stuff) #First, check for blank desc if (is.na(desc)) { out.latex <- paste(out.latex,description,'\n\n',sep='') #Evaluate these only if desc is nonmissing } else if(desc == "omit") { #Do nothing here #Next, stuff followed by omit } else if(substring(desc,nchar(desc)-3)=="omit" & desc != "omit") { #Don't actually print the omit! out.latex <- paste(out.latex,substring(desc,1,nchar(desc)-4),'\n\n',sep='') #Finally, stuff not followed by omit } else { out.latex <- paste(out.latex,desc,' ',description,'\n\n',sep='') } #And bring in the table itself out.latex <- paste(out.latex,dftoLaTeX(vt, align = align, anchor = anchor, note = note, note.align = note.align, title = 'Variable Table', fit.page = fit.page, no.escape = no.escape),'\n\n\\end{document}',sep='') ####### APPLICATION OF FILE OPTION if (!is.na(file)) { #If they forgot a file extension, fill it in if (!grepl("\\.tex",file)) { file <- paste(file,'.tex',sep='') } filepath <- file.path(file) #Create temporary tex file writeLines(out.latex,filepath) } return(cat(out.latex)) } ####### CONSTRUCTION OF HTML #Head of file out.html <- paste(' <html style=\"font-family:Helvetica,Arial,Sans\"> <head><title>',data.title,'</title>', '<style type = \"text/css\"> p { font-size:smaller; } table { border: 0px; border-collapse:collapse; font-size:smaller; table-layout:fixed; margin-left:0%; margin-right:auto; } .headtab { width: 100%; margin-left:auto; margin-right:auto; } th { background-color: #FFFFFF; font-weight:bold; text-align:left; } table tr:nth-child(odd) td { background-color: #FFFFFF; padding:4px; word-wrap: break-word; word-break:break-all; } table tr:nth-child(even) td { background-color: #D3D3D3; padding:4px; word-wrap: break-word; word-break:break-all; }</style></head><body>',sep='') #Dataset name and description out.html <- paste(out.html, '<table class=\"headtab\">', '<tr><td style=\"text-align:left\">vtable {vtable}</td>', '<td style=\"text-align:right\">Variable Documentation</td></tr></table>', '<h1>',data.title,'</h1>') #Applying description #By default, this is number of obs and number of columns, plus whatever is in desc. #"omit" will leave that out. description <- paste('This data contains ',dim(data)[1],' rows and ',dim(data)[2],' columns.',sep='') #four possibilities: desc is NA (print description), #desc is just omit (print nothing, do nothing) #desc is other stuff followed by omit (just print the other stuff) #desc is other stuff not followed by omit (print desc and the other stuff) #First, check for blank desc if (is.na(desc)) { out.html <- paste(out.html,'<p>',description,'</p>',sep='') #Evaluate these only if desc is nonmissing } else if(desc == "omit") { #Do nothing here #Next, stuff followed by omit } else if(substring(desc,nchar(desc)-3)=="omit" & desc != "omit") { #Don't actually print the omit! out.html <- paste(out.html,'<p>',substring(desc,1,nchar(desc)-4),'</p>',sep='') #Finally, stuff not followed by omit } else { out.html <- paste(out.html,'<p>',desc,' ',description,'</p>',sep='') } out.html <- paste(out.html,'<h3>Variable Table</h3>',sep='') #And bring in the table itself out.html <- paste(out.html,dftoHTML(vt,out='htmlreturn', col.width=col.width, col.align=col.align, note = note, note.align = note.align, anchor=anchor, no.escape = no.escape),'</body></html>',sep='') ####### APPLICATION OF FILE OPTION if (!is.na(file)) { if (identical(out,'csv')) { #If they forgot a file extension, fill it in if (!grepl("\\.csv",file)) { file <- paste(file,'.csv',sep='') } for (i in 1:ncol(vt)) { vt[[i]] <- gsub('<br/>','; ',vt[[i]]) } filepath <- file.path(file) utils::write.csv(vt, file = filepath, row.names = FALSE) } else { #If they forgot a file extension, fill it in if (!grepl("\\.htm",file)) { file <- paste(file,'.html',sep='') } filepath <- file.path(file) #Create temporary html file writeLines(out.html,filepath) } } #For more easily working with if statements if (is.na(out)) { out = '' } ####### APPLICATION OF OUT OPTION #If the plan is to produce a viewable HTML, create it if (out == 'viewer' | out == 'browser' | out == '') { #Get temporary dirpath tempDir <- tempfile() #Create temporary directory dir.create(tempDir) #Get temporary filepath htmlpath <- file.path(tempDir,'vtable.html') #Create temporary html file writeLines(out.html,htmlpath) } #Either print the variable table to the help window #or return a variable table to the screen, as desired #Default to kable if in knitr if (out == 'kable' | (isTRUE(getOption('knitr.in.progress')) & out == '')) { for (i in 1:ncol(vt)) { # Don't end on a break vt[[i]] <- gsub('<br/>$','',vt[[i]]) # But replace all internal ones with commas vt[[i]] <- gsub('<br/>',', ',vt[[i]]) } #kable can't handle blank rows. These should not occur in vtable but just in case vt <- vt[!apply(vt,MARGIN=1,FUN=function(x) !any(!(x==rep('',ncol(vt))))),] #I don't know how this would happen but just in case vt <- vt[!apply(vt,MARGIN=1,FUN=function(x) propNA(x) == 1),] if (knitr::is_latex_output()) { kb <- knitr::kable(vt, caption = data.title, row.names = FALSE, booktabs = TRUE, format = 'latex') if (!is.na(note)) { kb <- kableExtra::add_footnote(kb, note, notation = 'none') } } else if(knitr::is_html_output()) { kb <- knitr::kable(vt, caption = data.title, row.names = FALSE, format = 'html') if (!is.na(note)) { kb <- kableExtra::add_footnote(kb, note, notation = 'none') } } else { kb <- knitr::kable(vt, caption = data.title, row.names = FALSE) } # If it's just a default RMarkdown kable, style it for HTML because the default is ew if (isTRUE(getOption('knitr.in.progress')) & out == '') { if (isTRUE(knitr::is_html_output())) { kb <- kableExtra::kable_styling(kb) } } return(kb) } else if (Sys.getenv('RSTUDIO')=='1' & (out == 'viewer' | out == '')) { rstudioapi::viewer(htmlpath) } else if (Sys.getenv('RSTUDIO')=='' & out == 'viewer') { stop('out = "viewer" is not a valid option if RStudio is not running.') } else if ((Sys.getenv('RSTUDIO')=='' & out == '') | (out == 'browser')) { utils::browseURL(htmlpath) } else if (out == 'return' | out == 'csv') { for (i in 1:ncol(vt)) { vt[[i]] <- gsub('<br/>','; ',vt[[i]]) } return(vt) } else if (out == 'htmlreturn') { return(cat(out.html)) } } #' @rdname vtable #' @import kableExtra #' @export vt <- vtable
/scratch/gouwar.j/cran-all/cranData/vtable/R/vtable.R
## ---- eval = FALSE------------------------------------------------------------ # dftoHTML(data, # out=NA, # file=NA, # note=NA, # anchor=NA, # col.width=NA, # col.align=NA, # row.names=FALSE, # no.escape=NA) ## ---- eval = FALSE------------------------------------------------------------ # library(vtable) # # data(LifeCycleSavings) # dftoHTML(LifeCycleSavings) ## ---- eval=FALSE-------------------------------------------------------------- # library(vtable) # # data(LifeCycleSavings) # dftoHTML(LifeCycleSavings) # dftoHTML(LifeCycleSavings,out='browser') # dftoHTML(LifeCycleSavings,out='viewer') # htmlcode <- dftoHTML(LifeCycleSavings,out='htmlreturn') ## ---- eval=FALSE-------------------------------------------------------------- # data(LifeCycleSavings) # dftoHTML(LifeCycleSavings,file='lifecycledata_htmlversion.html') ## ---- eval = FALSE------------------------------------------------------------ # dftoHTML(LifeCycleSavings,note='Data from Belsley, Kuh, and Welsch 1980'). ## ---- eval = FALSE------------------------------------------------------------ # #Let's make sr much bigger for some reason # dftoHTML(LifeCycleSavings,col.width=c(60,10,10,10,10)) ## ---- eval=FALSE-------------------------------------------------------------- # dftoHTML(LifeCycleSavings,row.names=TRUE) ## ---- eval=FALSE-------------------------------------------------------------- # #Don't escape columns 1 or 2 # dftoHTML(LifeCycleSavings,no.escape=1:2) ## ---- eval = FALSE------------------------------------------------------------ # dftoLaTeX(data, # file=NA, # frag=TRUE, # title=NA, # note=NA, # anchor=NA, # align=NA, # row.names=FALSE, # no.escape=NA) ## ---- eval = FALSE------------------------------------------------------------ # library(vtable) # # data(LifeCycleSavings) # dftoLaTeX(LifeCycleSavings) ## ---- eval=FALSE-------------------------------------------------------------- # data(LifeCycleSavings) # dftoLaTeX(LifeCycleSavings,file='lifecycledata_latexversion.tex') ## ---- eval = FALSE------------------------------------------------------------ # dftoLaTeX(LifeCycleSavings,note='Data from Belsley, Kuh, and Welsch 1980'). ## ---- eval = FALSE------------------------------------------------------------ # dftoLaTeX(LifeCycleSavings,anchor='tab:LCS') ## ---- eval = FALSE------------------------------------------------------------ # dftoLaTeX(LifeCycleSavings,row.names=TRUE,align='p{.25\\textwidth}ccccc') ## ---- eval=FALSE-------------------------------------------------------------- # dftoLaTeX(LifeCycleSavings,row.names=TRUE) ## ---- eval=FALSE-------------------------------------------------------------- # #Don't escape columns 1 or 2 # dftoLaTeX(LifeCycleSavings,no.escape=1:2)
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/dftotable.R
--- title: "dftoHTML and dftoLaTeX: Data Frame Formatting" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{dftoHTML and dftoLaTeX: Data Frame Formatting} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on `dftoHTML()`/`dftoLaTeX()`. `dftoHTML()` and `dftoLaTeX` are helper functions used by `vtable()`, `sumtable()`, and `labeltable()`. They takes any data frame or matrix with column names and outputs HTML or LaTeX table code for that data. Note that none of the vignettes in this example are set to run because `dftoHTML` and `dftoLaTeX` output is intended to go to places other than Markdown (although both can certainly be used with 'asis' chunks to produce results in Markdown). ----- # The `dftoHTML()` function `dftoHTML()` syntax follows the following outline: ```{r, eval = FALSE} dftoHTML(data, out=NA, file=NA, note=NA, anchor=NA, col.width=NA, col.align=NA, row.names=FALSE, no.escape=NA) ``` `dftoHTML()` largely exists to serve `vtable()`, `sumtable()`, and `labeltable()`. What it does is takes a data set `data` and returns an HTML table with the contents of that data. Outside of its use with other `vtable` functions, `dftoHTML()` can also be used to keep a view of the data file open while working on the data, avoiding repeated calls to `head()` or similar, or switching back and forth between code tabs and data view tabs. ------ ## `data` `dftoHTML()` will accept any data set with a `colnames()` attribute. ```{r, eval = FALSE} library(vtable) data(LifeCycleSavings) dftoHTML(LifeCycleSavings) ``` ## out The `out` option determines what will be done with the resulting variable documentation file. There are several options for `out`: | Option | Result | |------------| -----------------------------------------| | browser | Loads HTML version of `data` in web browser. | | viewer | Loads HTML version of `data` in Viewer pane (RStudio only). | | htmlreturn | Returns HTML code for `data`. | By default, `vtable` will select 'viewer' if running in RStudio, and 'browser' otherwise. ```{r, eval=FALSE} library(vtable) data(LifeCycleSavings) dftoHTML(LifeCycleSavings) dftoHTML(LifeCycleSavings,out='browser') dftoHTML(LifeCycleSavings,out='viewer') htmlcode <- dftoHTML(LifeCycleSavings,out='htmlreturn') ``` ## `file` The `file` argument will write the HTML version of `data` to an HTML file and save it. Will automatically append 'html' filetype if the filename does not include a period. ```{r, eval=FALSE} data(LifeCycleSavings) dftoHTML(LifeCycleSavings,file='lifecycledata_htmlversion.html') ``` ## `note` `note` will add a table note in the last row. ```{r, eval = FALSE} dftoHTML(LifeCycleSavings,note='Data from Belsley, Kuh, and Welsch 1980'). ``` ## `anchor` `anchor` will add an anchor ID (`<a name = `) to allow other parts of your document to link to it, if you are including your table in a larger document. ## `col.width` `dftoHTML()` will select, by default, equal column widths for all columns in `data`. `col.width`, as a vector of percentage column widths on the 0-100 scale, will override these defaults. ```{r, eval = FALSE} #Let's make sr much bigger for some reason dftoHTML(LifeCycleSavings,col.width=c(60,10,10,10,10)) ``` ## `col.align` `col.align` can be used to adjust text alignment in HTML output. Set to 'left', 'right', or 'center' to align all columns, or give a vector of column alignments to do each column separately. While this is not intended usage, you can add additional CSS arguments (i.e. `'left; padding:5px'`) and it will apply that CSS to every cell in the column. ## `row.names` The `row.names` flag determines whether the row names of the data are included as the first column in the output table. ```{r, eval=FALSE} dftoHTML(LifeCycleSavings,row.names=TRUE) ``` ## `no.escape` If the data passed to `dftoHTML()` contains special HTML characters like '<', `dftoHTML()` will escape them. This could cause you some sort of existential crisis if you wanted to put HTML formatting in your data to be displayed. So set `no.escape` to a vector of column indices to skip the character-escaping process for those columns. ```{r, eval=FALSE} #Don't escape columns 1 or 2 dftoHTML(LifeCycleSavings,no.escape=1:2) ``` ----- # The `dftoLaTeX()` function `dftoLaTeX()` syntax follows the following outline: ```{r, eval = FALSE} dftoLaTeX(data, file=NA, frag=TRUE, title=NA, note=NA, anchor=NA, align=NA, row.names=FALSE, no.escape=NA) ``` `dftoLaTeX()` largely exists to serve `vtable()`, `sumtable()`, and `labeltable()`. What it does is takes a data set `data` and returns an LaTeX table with the contents of that data. You could also use it on its own to write any data frame to LaTeX table format. ------ ## `data` `dftoLaTeX()` will accept any data set with a `colnames()` attribute. ```{r, eval = FALSE} library(vtable) data(LifeCycleSavings) dftoLaTeX(LifeCycleSavings) ``` ## `file` The `file` argument will write the TeX version of `data` to a .tex file and save it. Will automatically append 'tex' filetype if the filename does not include a period. ```{r, eval=FALSE} data(LifeCycleSavings) dftoLaTeX(LifeCycleSavings,file='lifecycledata_latexversion.tex') ``` ## `note` `note` will add a table note in the last row. ```{r, eval = FALSE} dftoLaTeX(LifeCycleSavings,note='Data from Belsley, Kuh, and Welsch 1980'). ``` ## `anchor` `anchor` will add an anchor ID (`\label{}`) to allow other parts of your document to link to it, if you are including your output in a larger document. ```{r, eval = FALSE} dftoLaTeX(LifeCycleSavings,anchor='tab:LCS') ``` ## `align` This is a single string, which will be used as column alignment in standard LaTeX syntax, for example 'lccc' for the left column left-aligned and the other three centered. Accepts 'p{}' and other LaTeX column types. Don't forget to escape backslashes! Defaults to all left-aligned. ```{r, eval = FALSE} dftoLaTeX(LifeCycleSavings,row.names=TRUE,align='p{.25\\textwidth}ccccc') ``` ## `row.names` The `row.names` flag determines whether the row names of the data are included as the first column in the output table. ```{r, eval=FALSE} dftoLaTeX(LifeCycleSavings,row.names=TRUE) ``` ## `no.escape` If the data passed to `dftoLaTeX()` contains special HTML characters like '~' or '^', `dftoLaTeX()` will escape them. This could cause you some sort of existential crisis if you wanted to put LaTeX formatting in your data to be displayed. So set `no.escape` to a vector of column indices to skip the character-escaping process for those columns. ```{r, eval=FALSE} #Don't escape columns 1 or 2 dftoLaTeX(LifeCycleSavings,no.escape=1:2) ```
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/dftotable.Rmd
## ---- eval=FALSE-------------------------------------------------------------- # labeltable(var, # ..., # out=NA, # count=FALSE, # percent=FALSE, # file=NA, # desc=NA, # note=NA, # note.align=NA, # anchor=NA) ## ----------------------------------------------------------------------------- #Include a single labelled variable to show how the values of that variable correspond to its value labels. library(vtable) library(sjlabelled) data(efc) labeltable(efc$e15relat) ## ----------------------------------------------------------------------------- #Include more than one variable to show, for each value of the first, what values of the others are present in the data. data(mtcars) labeltable(mtcars$cyl,mtcars$carb,mtcars$am) ## ----------------------------------------------------------------------------- library(vtable) data(LifeCycleSavings) labeltable(efc$e15relat, count = TRUE, percent = TRUE) ## ---- eval=FALSE-------------------------------------------------------------- # library(vtable) # # data(LifeCycleSavings) # labeltable(efc$e15relat,file='lifecycle_variabledocumentation')
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/labeltable.R
--- title: "labeltable: Label Table" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{labeltable: Label Table} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on `labeltable()`. `labeltable()` is designed to take a single variable and show the values it is associated with. This can also be used to generate data documentation if desired, or can just be an easy way to look at label values, or learn more about the data you're working with. If that variable has value labels from the `sjlabelled` or `haven` packages, it will show how the values in the data correspond to the value labels. Alternately, you can include other variables as well, and `labeltable()` will show, for each value of the variable you're interested in, the values that those other variables take. This can be handy, for example, if you used some variables to create a numeric ID and want to remember what original values correspond to each ID. It can also act as sort of a cross-tabulation. ----- # The `labeltable()` function `vtable()` syntax follows the following outline: ```{r, eval=FALSE} labeltable(var, ..., out=NA, count=FALSE, percent=FALSE, file=NA, desc=NA, note=NA, note.align=NA, anchor=NA) ``` `labeltable()` is a function that shows the values that correspond to `var`. This could be value label values, or it could be the values found in the data for the `...` variables. ```{r} #Include a single labelled variable to show how the values of that variable correspond to its value labels. library(vtable) library(sjlabelled) data(efc) labeltable(efc$e15relat) ``` ```{r} #Include more than one variable to show, for each value of the first, what values of the others are present in the data. data(mtcars) labeltable(mtcars$cyl,mtcars$carb,mtcars$am) ``` ## `out` The `out` option determines what will be done with the resulting label table file. There are several options for `out`: | Option | Result | |------------| -----------------------------------------| | browser | Loads output in web browser. | | viewer | Loads output in Viewer pane (RStudio only). | | htmlreturn | Returns HTML code for output file. | | return | Returns output table in data.frame format. | | csv | Returns output table in data.frame format and, with a `file` option, saves that to CSV. | | kable | Returns a `knitr::kable()` | | latex | Returns a LaTeX table. | | latexpage | Returns an independently-buildable LaTeX document. | By default, `vtable` will select 'viewer' if running in RStudio, and 'browser' otherwise. If it's being built in an RMarkdown document with `knitr`, it will default to 'kable'. ## `count` and `percent` These options allow `labeltable()` to act as a sort of `table()`, where it will also include the counts and/or percentage of the variable that takes each value. ```{r} library(vtable) data(LifeCycleSavings) labeltable(efc$e15relat, count = TRUE, percent = TRUE) ``` ## `file` The `file` argument will write the variable documentation file to an HTML file and save it. Will automatically append 'html' filetype if the filename does not include a period. ```{r, eval=FALSE} library(vtable) data(LifeCycleSavings) labeltable(efc$e15relat,file='lifecycle_variabledocumentation') ``` ## `desc`, `note`, and `anchor`. `desc` will include a description of the data set (or whatever you like) in the file, which may be useful for documentation purposes. `note` will add a table note in the last row. `anchor` will add an anchor ID (`<a name = ` in HTML or `\label{}` in LaTeX) to allow other parts of your document to link to it, if you are including your vtable in a larger document. `desc` will only show up in full-page `labeltable`s. That is, you won't get them with `out = 'kable'`, `out = 'return'`, `out = 'csv'`, or `out = 'latex'` (although `out = 'latexpage'` works). `note` and `anchor` will only show up in formats that support multi-column cells and anchoring, so they won't work with `out = 'kable'`, `out = 'csv'`, or `out = 'return'`. ## `note.align` This option is used only with LaTeX output (`out` is 'latex' or 'latexpage'). `note.align` is a single string used for alignment, specifically for any table notes set with `note`, which enters as part of a `\multicolumn` argument. It accepts 'p{}' and other LaTeX column types. Be sure to escape special characters, in particular backslashes.
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/labeltable.Rmd
## ----------------------------------------------------------------------------- library(vtable) st(iris) ## ---- eval=FALSE-------------------------------------------------------------- # sumtable(data, # vars=NA, # out=NA, # file=NA, # summ=NA, # summ.names=NA, # add.median=FALSE, # group=NA, # group.long=FALSE, # group.test=FALSE, # group.weights =NA, # col.breaks=NA, # digits=2, # fixed.digits=FALSE, # numformat = formatfunc(digits = digits, big.mark = ''), # skip.format = c('notNA(x)','propNA(x)','countNA(x)'), # factor.percent=TRUE, # factor.counts=TRUE, # factor.numeric=FALSE, # logical.numeric=FALSE, # logical.labels=c('No','Yes'), # labels=NA, # title='Summary Statistics', # note = NA, # anchor=NA, # col.width=NA, # col.align=NA, # align=NA, # note.align='l', # fit.page=NA, # simple.kable=FALSE, # obs.function=NA) # opts=list()) ## ---- eval = FALSE------------------------------------------------------------ # data(LifeCycleSavings) # st(LifeCycleSavings, vars = c('pop15','pop75')) ## ---- eval = FALSE------------------------------------------------------------ # data(LifeCycleSavings) # sumtable(LifeCycleSavings) # vartable <- vtable(LifeCycleSavings,out='return') # # #I can easily \input this into my LaTeX doc: # vt(LifeCycleSavings,out='latex',file='mytable1.tex') ## ---- eval=FALSE-------------------------------------------------------------- # data(LifeCycleSavings) # st(LifeCycleSavings,file='lifecycle_summary') ## ----------------------------------------------------------------------------- sumtable(iris, summ=c('notNA(x)', 'mean(x)', 'median(x)', 'propNA(x)')) ## ----------------------------------------------------------------------------- #Getting complex st(iris, col.breaks = 4, summ = list( c('notNA(x)','mean(x)','sd(x^2)','min(x)','max(x)'), c('notNA(x)','mean(x)') ), summ.names = list( c('N','Mean','SD of X^2','Min','Max'), c('Count','Percent') )) ## ----------------------------------------------------------------------------- st(iris, group = 'Species', group.test = TRUE) ## ----------------------------------------------------------------------------- st(iris, group = 'Species', group.long = TRUE) ## ----------------------------------------------------------------------------- #Let's put species in a column by itself #There are five variables here, Species is last, #so break the column after the first four variables. st(iris, col.breaks = 4) ## ----------------------------------------------------------------------------- #Why not three columns? sumtable(mtcars, col.breaks = c(4,8)) ## ----------------------------------------------------------------------------- st(iris, digits = 5) ## ----------------------------------------------------------------------------- st(iris, digits = 3, fixed.digits = TRUE, numformat = NA) ## ----------------------------------------------------------------------------- st(iris, numformat = c('|cm', 'Sepal.Width' = 'percent')) ## ----------------------------------------------------------------------------- st(iris, factor.percent = FALSE, factor.counts = FALSE) ## ----------------------------------------------------------------------------- st(iris, factor.numeric = TRUE) ## ----------------------------------------------------------------------------- #Note that LifeCycleSavings has five variables data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75', NA, 'numeric % growth rate of dpi') sumtable(LifeCycleSavings,labels=labs) ## ----------------------------------------------------------------------------- #Note that LifeCycleSavings has five variables #with names 'sr', 'pop15', 'pop75', 'dpi', and 'ddpi' labs <- data.frame(nonsensename1 = c('sr', 'pop15', 'pop75'), nonsensename2 = c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75')) st(LifeCycleSavings,labels=labs) ## ----------------------------------------------------------------------------- labs <- data.frame(sr = 'numeric aggregate personal savings', pop15 = 'numeric % of population under 15', pop75 = 'numeric % of population over 75') sumtable(LifeCycleSavings,labels=labs) ## ---- eval=FALSE-------------------------------------------------------------- # #The variable names in this data set are pretty short, and the value labels are # #a little cramped, so let's move that over. # st(LifeCycleSavings, # col.width=c(9,rep(13,7))) ## ---- eval = FALSE------------------------------------------------------------ # st(LifeCycleSavings,col.align = 'right') ## ---- eval = FALSE------------------------------------------------------------ # sumtable(iris,align = 'p{.3\\textwidth}ccccccc', fit.page = '\\textwidth', out = 'latex')
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/sumtable.R
--- title: "sumtable: Summary Statistics" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{sumtable: Summary Statistics} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on `sumtable()`. `sumtable()` takes a dataset and outputs a formatted summary statistics table. Summary statistics an be for the whole data set at once or by-group. There are a huge number of R packages that will make a summary statistics table for you. What does `vtable::sumtable()` bring that isn't already there? First, like other `vtable` functions, `sumtable()` by default prints its results to Viewer (in RStudio) or the browser (elsewhere), making it easy to look at information about your data while continuing to work on it. Second, `sumtable()` is designed to *have nice defaults* and be *fast to work with*. By fast to work with that's both in the sense that you should just be able to ask for a sumtable and have it pretty much be what you want immediately, and also in the sense of trying to keep the number of keystrokes low (thus the `st()` shortcut, and the intent of not having to set a bunch of options). `sumtable()` has customization options, but they're certainly not as extensive as with a package like `gtsummary` or `arsenal`. Nor should they be! If you want full control over your table, those packages are already great, we don't need another package that does that. However, if you want the kind of table `sumtable()` produces (and I think a lot of you do!) then it's perfect and easy. This makes `sumtable()` very similar in spirit to the summary statistics functionality of `stargazer::stargazer()`, except with some additional important bonuses, like working with `tibble`s, factor variables, producing summary statistics by group, and being a summary-statistics-only function so the documentation isn't entwined with a bunch of regression-table functionality. Like, look at this. Isn't this what you basically already want? After loading the package this took eight keystrokes and no option-setting: ```{r} library(vtable) st(iris) ``` ----- # The `sumtable()` function `sumtable()` (or `st()` for short) syntax follows the following outline: ```{r, eval=FALSE} sumtable(data, vars=NA, out=NA, file=NA, summ=NA, summ.names=NA, add.median=FALSE, group=NA, group.long=FALSE, group.test=FALSE, group.weights =NA, col.breaks=NA, digits=2, fixed.digits=FALSE, numformat = formatfunc(digits = digits, big.mark = ''), skip.format = c('notNA(x)','propNA(x)','countNA(x)'), factor.percent=TRUE, factor.counts=TRUE, factor.numeric=FALSE, logical.numeric=FALSE, logical.labels=c('No','Yes'), labels=NA, title='Summary Statistics', note = NA, anchor=NA, col.width=NA, col.align=NA, align=NA, note.align='l', fit.page=NA, simple.kable=FALSE, obs.function=NA) opts=list()) ``` The goal of `sumtable()` is to take a data set `data` and output a usually-HTML (but `data.frame`, `kable`, `csv`, and `latex` options are there too) file with summary statistics for each of the variables in `data`. There are several options as to how the table will be constructed, and each of these options are explained below. Throughout, the output will be built as `kable`s since this is an RMarkdown document. However, generally you can leave `out` at its default and it will publish an HTML table to Viewer (in RStudio) or the browser (otherwise). This will also include some additional information about your data that can't be demonstrated in this vignette: ## `data` and `vars` The `data` argument can take any `data.frame`, `data.table`, `tibble`, or `matrix`, as long as it has a valid set of variable names stored in the `colnames()` attribute. By default, `sumtable` will include in the summary statistics table every variable in the data set that is (1) numeric, (2) factor, (3) logical, or (4) a character variable with six or fewer unique values (as a factor), and it will include them in the order they're in the data. You can override that variable list with `vars`, which is just a vector of variable names to include. You can use this to force `sumtable` to ignore variables you don't want, or to include variables it doesn't by default. ```{r, eval = FALSE} data(LifeCycleSavings) st(LifeCycleSavings, vars = c('pop15','pop75')) ``` ## `out` The `out` option determines what will be done with the resulting summary statistics table. There are several options for `out`: | Option | Result | |------------| -----------------------------------------| | browser | Loads output in web browser. | | viewer | Loads output in Viewer pane (RStudio only). | | htmlreturn | Returns HTML code for output file. | | return | Returns summary table in `data.frame` format. Depending on options, the data frame may be entirely character variables. | | csv | Returns summary table in `data.frame` format and, with a `file` option, saves that to CSV. | | kable | Returns a `knitr::kable()` | | latex | Returns a LaTeX table. | | latexpage | Returns an independently-buildable LaTeX document. | By default, `sumtable` will select 'viewer' if running in RStudio, and 'browser' otherwise. If it's being built in an RMarkdown document with `knitr`, it will default to 'kable'. Note that an RMarkdown default to 'kable' will also include some nice formatting, where `out = 'kable'` directly will give you a more basic `kable` you can format yourself. Also be aware that some of these formats, like 'return', do not support multi-column cells, and so instead you'll have headers squished into one cell, with blank cells next to them. ```{r, eval = FALSE} data(LifeCycleSavings) sumtable(LifeCycleSavings) vartable <- vtable(LifeCycleSavings,out='return') #I can easily \input this into my LaTeX doc: vt(LifeCycleSavings,out='latex',file='mytable1.tex') ``` ## `file` The `file` argument will write the variable documentation file to an HTML or LaTeX file and save it. Will automatically append 'html' or 'tex' filetype if the filename does not include a period. ```{r, eval=FALSE} data(LifeCycleSavings) st(LifeCycleSavings,file='lifecycle_summary') ``` ## `summ` and `summ.names` `summ` is the set of summary statistics functions to run and include in the table. It is very flexible, hopefully without being difficult to use. It takes a character vector in which each element is of the form `function(x)`, where `function(x)` is any function that takes a vector and returns a single numeric value. For example, `summ=c('mean(x)','median(x)','mean(log(x))')` would calculate the mean, median, and mean of the log for each variable. `summ.names` is just the heading-title of the corresponding `summ`. So in this example that might be `summ.names=c('Mean','Median','Mean of Log')`. Factor variables largely ignore `summ` (unless `factor.numeric = TRUE`) and will just report counts in the first column and means/percentages in the second. You may want to consider this when selecting the order you put your `summ` in if you have both factor and numeric variables. `summ` treats as special two `vtable` functions: `propNA(x)` and `countNA(x)`, which give the proportion and count of NA values, and the count of non-NA values in the variable, respectively. These two functions are the only functions that include NA values in their calculations. By default, `summ` is `c('notNA(x)', 'mean(x)', 'sd(x)', 'min(x)', 'pctile(x)[25]', 'pctile(x)[75]', 'max(x)')` in 'one-column' tables. If there's more than one column either due to the `col.breaks` option or the `group` option, it defaults to `c('notNA(x)', 'mean(x)', 'sd(x)')`. Alternately, if a given column of variables is entirely made up of factor variables, it defaults to `c('notNA(x)','mean(x)')`. These precise defaults have corresponding default `summ.names`. If you set your own `summ` but not `summ.names`, it will try to guess the name by taking your function, removing `(x)`, and capitalizing. so 'mean(x)' becomes 'Mean'. If you want to get complex you can. If there are multiple 'columns' of summary statistics and you want different statistics in each column, make `summ` and `summ.names` into a list, where each entry is a character vector of the calculations/names you want in that column. ```{r} sumtable(iris, summ=c('notNA(x)', 'mean(x)', 'median(x)', 'propNA(x)')) ``` ```{r} #Getting complex st(iris, col.breaks = 4, summ = list( c('notNA(x)','mean(x)','sd(x^2)','min(x)','max(x)'), c('notNA(x)','mean(x)') ), summ.names = list( c('N','Mean','SD of X^2','Min','Max'), c('Count','Percent') )) ``` ## `group`, `group.long`, and `group.test` `sumtable()` allows for the calculation of summary statistics by group. `group` is a character variable containing the column name in `data` that you want to calculate summary statistics separately for. At the moment this supports only a single variable, although you can combine multiple variables into a single one yourself before using `sumtable`. `group.long` is a flag for whether you want the different group summary statistics stacked side-by-side (`group.long = FALSE`), making for easier comparisons, or on top of each other (`group.long = TRUE`), giving things a bit more room to breathe and allowing space for more statistics. Defaults to `FALSE`. `group.test`, which is only compatible with `group.long = FALSE`, performs a test of independence between the variable in `group` and each of the variables in your summary statistics table. Defaults to `FALSE`. Default `group.test = TRUE` behavior is to perform a group F-test (with `anova(lm())`) for numeric variables, and a chi-squared test (with `chisq.test`) for factor, logical, and character variables, returning results in the format 'Test statistic name = Test statistic^significance stars'. If you want to change any of that, instead of `group.test = TRUE`, set `group.test` equal to a named list of options that will be send to the `opts` argument of `independence.test`. See `help(independence.test)`. Be aware that the table produced with `group` uses multi-column cells. So it will not look quite as nice when outputting to a format that does not support multi-column cells, like `out='return'`. Multi-column cells are supported in `out='kable'` for `group`, as below, but are not supported on other rows of the `kable`. Multi-column cells are supported for `out='kable'` only for HTML and LaTeX output. ```{r} st(iris, group = 'Species', group.test = TRUE) ``` ```{r} st(iris, group = 'Species', group.long = TRUE) ``` ## `group.weights` This allows you to pass a set of weights for your data (as a vector or as a string column name). **HOWEVER,** it does not automatically weight all the results. If you leave `summ` as its default, then it will use `weighted.mean(x, w = wts)` and `weighted.sd(x, w = wts)` in place of wherever it would normally have `mean(x)` and `sd(x)`. Factor proportions are calculated using `mean(x)`, so this is covered. Weights will also be passed to `independence.test()` if `group.test = TRUE`, and so tests of independence across groups will be weighted as well. **No other calculations will be automatically weighted.** This is really designed to be used with `group` and `group.test = TRUE` to create weighted balance tables, which is why it's called `group.weights` (and to avoid anyone thinking it weights everything, which would be the natural conclusion if it were just called `weights`). If you want to use the weights with other functions, you can. You'll just need to specify `summ` yourself. Just pass `summ` a string describing function that takes weights and refer to `wts` as the weights, e.g. `'weighted.mean(x, w = wts)'` for a weighted mean, as above. ## `col.breaks` Sometimes you don't need all that much information on each variable, but you have a lot of variables and your table gets long! `col.breaks` will break up the variables in your table into multiple columns, and put them side by side. Also handy if you want to mix numeric and factor variables - put all your factors in a second column to economize on space. Incompatible with `group` unless `group.long = TRUE`. Set `col.breaks` to be a numeric vector. `sumtable()` will start a new column after that many variables have been processed. ```{r} #Let's put species in a column by itself #There are five variables here, Species is last, #so break the column after the first four variables. st(iris, col.breaks = 4) ``` ```{r} #Why not three columns? sumtable(mtcars, col.breaks = c(4,8)) ``` ## `digits` and `fixed.digits` `digits` indicates how many digits after the decimal place should be displayed. `fixed.digits` determines whether trailing zeros are maintained. `fixed.digits` only works if `numformat = NA`, and will eventually be deprecated for a `formatfunc(drop0trailing=TRUE)` setting in `numformat`. ```{r} st(iris, digits = 5) ``` ```{r} st(iris, digits = 3, fixed.digits = TRUE, numformat = NA) ``` ## Other Numerical Formatting Options Should the numbers in the summary table be formatted in some way? By default, "number of nonmissing observations" values are formatted with `notNA()` formatting (but specifically, whatever is specified in `obs.function`), and the rest are not formatted except for having the number of digits set with `digits` or `fixed.digits`. You can specify `numformat` to set numerical formatting for numeric variables. `numformat` accepts as an argument functions that accept a number and return a formatted string, as generated by `formatfunc()` or the `label_` functions in the scales package. So, for example, `numformat = formatfunc(prefix = '$')` would give all your numbers dollar formatting. You can also use string shorthand as shortcuts for some `formatfunc()` settings. `'comma'` will set `big.mark = ','`, `'decimal'` will set `big.mark = '.', decimal.mark = ','`, `'percent'` will do percentage formatting (with 1 = 100%), and `'A|B'` will use `'A'` as a prefix and `'B'` as a suffix (specifying suffix optional, so `numformat = '$'` gives `'$3'`). This will also respect your `digits` choice (which `formatfunc()` directly won't do). These can be combined. `'comma$|M'` will turn `1000` into `$1,000M`. Although if you're getting complex you may as well just set `formatfunc()` yourself. You can specify different formatting functions for different variables by either specifying a string vector of those shorthands, or a list of functions. You can either provide an unnamed vector/list with length equal to the number of variables in the data, or you can provide a named vector/list that specifies the formatting for specific variables. You can apply a format to all variables not specifically named by making it an unnamed first entry, for example `numformat = c('dollar','sharevariable' = 'percent')` to make everything dollar-formatted except for 'sharevariable', which becomes percent-formatted. Note that any functions that match the ones in the `skip.format` option will not have this formatting applied to them at all. It is not currently possible otherwise to apply two different kinds of formatting to different columns of the `sumtable`. ```{r} st(iris, numformat = c('|cm', 'Sepal.Width' = 'percent')) ``` ## Factor, Logical, and Character Display Options How should factor, logical, and character variables (all of which get turned into factors in the `sumtable`-making process) be shown on the `sumtable()`? In all `sumtable()s`, there is one row for the name of the factor variable, with the number of nonmissing observations of the variable. Then there's one row for each of the values (for logicals, FALSE and TRUE become 'No' and 'Yes', or pick your own labels with `logical.labels`), showing the count and percentage (i.e. 50%) of observations with that value. Set `factor.percent = FALSE` to report the proportion of observations (.5) instead of the percentage (50%) for the values. Set `factor.counts = FALSE` to omit the count for the individual values. So you'll see the number of nonmissing observations for the variable overall, and then just the percentage/proportion for each of the values. Set `factor.numeric = TRUE` and/or `logical.numeric = TRUE` to ignore all this special treatment for factor/logical variables (respectively), and just treat each of the values as numeric binary variables. `factor.numeric` also covers character variables. ```{r} st(iris, factor.percent = FALSE, factor.counts = FALSE) ``` ```{r} st(iris, factor.numeric = TRUE) ``` ## `labels` The `labels` argument will attach variable labels to the variables in `data`. If variable labels are embedded in `data` and those labels are what you want, then set `labels = TRUE`. If you'd like to set your own labels that aren't embedded in the data, there are three formats available: ### `labels` as a vector `labels` can be set to be a vector of equal length to the number of variables in `data` (or in `vars` if that's set), and in the same order. You can use `NA`s for padding if you only want labels for some varibles and just want to use the regular variable names for others. This option is not recommended if you have set `group`, as it gets tricky to figure out what order to put the labels in. ```{r} #Note that LifeCycleSavings has five variables data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75', NA, 'numeric % growth rate of dpi') sumtable(LifeCycleSavings,labels=labs) ``` ### `labels` as a two-column data set `labels` can be set to a two-column data set (any type will do) where the first column has the variable names, and the second column has the labels. The column names don't matter. This approach does __not__ require that every variable name in `data` has a matching label. ```{r} #Note that LifeCycleSavings has five variables #with names 'sr', 'pop15', 'pop75', 'dpi', and 'ddpi' labs <- data.frame(nonsensename1 = c('sr', 'pop15', 'pop75'), nonsensename2 = c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75')) st(LifeCycleSavings,labels=labs) ``` ### `labels` as a one-row data set `labels` can be set to a one-row data set in which the column names are the variable names in `data` and the first row is the variable names. The `labels` argument can take any data type including data frame, data table, tibble, or matrix, as long as it has a valid set of variable names stored in the `colnames()` attribute. This approach does __not__ require that every variable name in `data` has a matching label. ```{r} labs <- data.frame(sr = 'numeric aggregate personal savings', pop15 = 'numeric % of population under 15', pop75 = 'numeric % of population over 75') sumtable(LifeCycleSavings,labels=labs) ``` ## `title`, `note`, and `anchor` `title` will include a title for your table. `note` will add a table note in the last row. `anchor` will add an anchor ID (`<a name = ` in HTML or `\label{}` in LaTeX) to allow other parts of your document to link to it, if you are including your `sumtable` in a larger document. `title` will only show up in output formats with titles. That is, you won't get them with `out = 'return'`. `note` and `anchor` will only show up in formats that support multi-column cells and anchoring, so `anchor` won't work with `out = 'kable'` and neither will work with `out = 'return'` or `out = 'csv'`. ## `col.width` `sumtable()` will select default column widths by basically just giving the column with the variable name a little more space than the `summ`-based columns. `col.width`, as a vector of percentage column widths on the 0-100 scale, will override these defaults. Doesn't apply to `out = 'kable'`, `out = 'csv'`, or `out = 'return'`. ```{r, eval=FALSE} #The variable names in this data set are pretty short, and the value labels are #a little cramped, so let's move that over. st(LifeCycleSavings, col.width=c(9,rep(13,7))) ``` ## `col.align` `col.align` can be used to adjust text alignment in HTML output. Set to 'left', 'right', or 'center' to align all columns, or give a vector of column alignments to do each column separately. If you want to get tricky, you can add a semicolon afterwards and keep putting in whatever CSS attributes you want. They will be applied to the whole column. This option is only for HTML output and will only work with `out` values of 'browser', 'viewer', or 'htmlreturn'. ```{r, eval = FALSE} st(LifeCycleSavings,col.align = 'right') ``` ## `align`, `note.align`, and `fit.page` These options are used only with LaTeX output (`out` is 'latex' or 'latexpage'). `align` and `note.align` are single strings used for alignment. `align` will be used as column alignment in standard LaTeX syntax, for example 'lccc' for the left column left-aligned and the other three centered. `note.align` is an alignment note specifically for any table notes set with `note` (or significance stars), which enters as part of a `\multicolumn` argument. These both accept 'p{}' and other LaTeX column types. Defaults to left-aligned 'Variable' columns and right-aligned everything else. If `col.widths` is specified, `align` defaults to 'p{}' columns, with widths set by `col.width`. `fit.page` can be used to ensure that the table is a certain width, and will be used as an entry to a `\resizebox{}`. Set to `\\textwidth` to set the table to text width, or `.9\\textwidth` for 90% of the page, and so on, or any recognized width value in LaTeX. For all of these, be sure to escape special characters, in particular backslashes. ```{r, eval = FALSE} sumtable(iris,align = 'p{.3\\textwidth}ccccccc', fit.page = '\\textwidth', out = 'latex') ``` ## `opts` You can create a named list where the names are the above options and the values are the settings for those options, and input it into `sumtable` using `opts=`. This is an easy way to set the same options for many `sumtable`s.
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/sumtable.Rmd
--- title: "vtable for Data Exploration" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{vtable for Data Exploration} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. Please see the vignettes/articles available on these main functions, as well as on the vtable helper functions.
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/vtable.Rmd
## ---- eval=FALSE-------------------------------------------------------------- # vtable(data, # out=NA, # file=NA, # labels=NA, # class=TRUE, # values=TRUE, # missing=FALSE, # index=FALSE, # factor.limit=5, # char.values=FALSE, # data.title=NA, # desc=NA, # note=NA, # anchor=NA, # col.width=NA, # col.align=NA, # align=NA, # note.align='l', # fit.page=NA, # summ=NA, # lush=FALSE, # opts=list()) ## ----------------------------------------------------------------------------- library(vtable) #Example 1, using base data LifeCycleSavings data(LifeCycleSavings) vtable(LifeCycleSavings, out='kable') ## ----------------------------------------------------------------------------- #Example 2, using efc data with embedded variable labels library(sjlabelled) data(efc) #Don't forget the handy shortcut vt()! vt(efc) ## ---- eval = FALSE------------------------------------------------------------ # data(LifeCycleSavings) # vtable(LifeCycleSavings) # vtable(LifeCycleSavings,out='browser') # vtable(LifeCycleSavings,out='viewer') # htmlcode <- vtable(LifeCycleSavings,out='htmlreturn') # vartable <- vtable(LifeCycleSavings,out='return') # # #I can easily \input this into my LaTeX doc: # vt(LifeCycleSavings,out='latex',file='mytable1.tex') ## ---- eval=FALSE-------------------------------------------------------------- # data(LifeCycleSavings) # vt(LifeCycleSavings,file='lifecycle_variabledocumentation') ## ----------------------------------------------------------------------------- #Note that LifeCycleSavings has five variables data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75', 'numeric real per-capita disposable income', 'numeric % growth rate of dpi') vtable(LifeCycleSavings,labels=labs) ## ----------------------------------------------------------------------------- labs <- c('numeric aggregate personal savings',NA,NA,NA,NA) vtable(LifeCycleSavings,labels=labs) ## ----------------------------------------------------------------------------- #Note that LifeCycleSavings has five variables #with names 'sr', 'pop15', 'pop75', 'dpi', and 'ddpi' data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- data.frame(nonsensename1 = c('sr', 'pop15', 'pop75'), nonsensename2 = c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75')) vt(LifeCycleSavings,labels=labs) ## ----------------------------------------------------------------------------- #Note that LifeCycleSavings has five variables #with names 'sr', 'pop15', 'pop75', 'dpi', and 'ddpi' data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- data.frame(sr = 'numeric aggregate personal savings', pop15 = 'numeric % of population under 15', pop75 = 'numeric % of population over 75') vtable(LifeCycleSavings,labels=labs) ## ----------------------------------------------------------------------------- data(LifeCycleSavings) vtable(LifeCycleSavings,values=FALSE) vtable(LifeCycleSavings) #CO2 contains factor variables data(CO2) vtable(CO2) ## ----------------------------------------------------------------------------- #efc contains labeled values #Note that the original value labels do not easily tell you what numerical #value each label maps to, but vtable() does. library(sjlabelled) data(efc) vtable(efc) ## ---- eval=FALSE-------------------------------------------------------------- # data(USJudgeRatings) # USJudgeRatings$Judge <- row.names(USJudgeRatings) # USJudgeRatings$SecondCharacter <- 'Less Interesting' # USJudgeRatings$ThirdCharacter <- 'Less Interesting Still!' # # #Show values for all character variables # vtable(USJudgeRatings,char.values=TRUE) # #Or just for a subset # vtable(USJudgeRatings,char.values=c('Judge','SecondCharacter')) ## ---- eval=FALSE-------------------------------------------------------------- # library(vtable) # # data(LifeCycleSavings) # vtable(LifeCycleSavings) # vtable(LifeCycleSavings,data.title='Intercountry Life-Cycle Savings Data', # desc='omit') # vtable(LifeCycleSavings,data.title='Intercountry Life-Cycle Savings Data', # desc='Data on the savings ratio 1960–1970. omit') # vtable(LifeCycleSavings,data.title='Intercountry Life-Cycle Savings Data', # desc='Data on the savings ratio 1960–1970', # note='Data from Belsley, Kuh, and Welsch (1980)') ## ---- eval=FALSE-------------------------------------------------------------- # library(sjlabelled) # data(efc) # #The variable names in this data set are pretty short, and the value labels are # #a little cramped, so let's move that over. # vtable(efc,col.width=c(10,10,40,40)) ## ---- eval = FALSE------------------------------------------------------------ # library(sjlabelled) # data(efc) # vtable(efc,col.align = 'right') ## ---- eval = FALSE------------------------------------------------------------ # library(sjlabelled) # data(efc) # vtable(efc,align = 'p{.3\\textwidth}cc', fit.page = '\\textwidth', out = 'latex') ## ----------------------------------------------------------------------------- library(sjlabelled) data(efc) vtable(efc,summ=c('mean(x)','countNA(x)'))
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/vtablefunction.R
--- title: "Variable Table (vtable)" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{Variable Table (vtable)} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on `vtable()`. `vtable()` takes a dataset and outputs a formatted variable documentation file. This serves several purposes. First, it allows for an easy generation of a variable documentation file, without requiring that one has already been created and made accessible through `help(data)`, or dealing with creating and finding R help documentation files. Second, it produces a list of variables (and, if provided, their labels) that can be easily viewed while working with the data, preventing repeated calls to `head()`, and making it much easier to work with confusingly-named variables. Third, the variable documentation file can be opened in a browser (with option `out='browser'`, saving to file and opening directly, or by opening in the RStudio Viewer pane and clicking 'Show in New Window') where it can be easily searched with standard Find-in-Page functions like Ctrl/Cmd-F, allowsing you to search for the variable or variable label you want. ----- # The `vtable()` function `vtable()` (or `vt()` for short) syntax follows the following outline: ```{r, eval=FALSE} vtable(data, out=NA, file=NA, labels=NA, class=TRUE, values=TRUE, missing=FALSE, index=FALSE, factor.limit=5, char.values=FALSE, data.title=NA, desc=NA, note=NA, anchor=NA, col.width=NA, col.align=NA, align=NA, note.align='l', fit.page=NA, summ=NA, lush=FALSE, opts=list()) ``` The goal of `vtable()` is to take a data set `data` and output a usually-HTML (but `data.frame`, `kable`, `csv`, and `latex` options are there too) file with documentation concerning each of the variables in `data`. There are several options as to what will be included in the documentation file, and each of these options are explained below. Throughout, the output will be built as `kable`s since this is an RMarkdown document. However, generally you can leave `out` at its default and it will publish an HTML table to Viewer (in RStudio) or the browser (otherwise). This will also include some additional information about your data that can't be demonstrated in this vignette: ## `data` The `data` argument can take any `data.frame`, `data.table`, `tibble`, or `matrix`, as long as it has a valid set of variable names stored in the `colnames()` attribute. The goal of `vtable()` is to produce documentation of each of the variables in this data set and display that documentation, one variable per row on the output `vtable`. If `data` has embedded variable or value labels, as the data set `efc` does below, `vtable()` will extract and use them automatically. ```{r} library(vtable) #Example 1, using base data LifeCycleSavings data(LifeCycleSavings) vtable(LifeCycleSavings, out='kable') ``` ```{r} #Example 2, using efc data with embedded variable labels library(sjlabelled) data(efc) #Don't forget the handy shortcut vt()! vt(efc) ``` ## `out` The `out` option determines what will be done with the resulting variable documentation file. There are several options for `out`: | Option | Result | |------------| -----------------------------------------| | browser | Loads variable documentation in web browser. | | viewer | Loads variable documentation in Viewer pane (RStudio only). | | htmlreturn | Returns HTML code for variable documentation file. | | return | Returns variable documentation table in data frame format. | | csv | Returns variable documentatoin in data.frame format and, with a `file` option, saves that to CSV. | | kable | Returns a `knitr::kable()` | | latex | Returns a LaTeX table. | | latexpage | Returns an independently-buildable LaTeX document. | By default, `vtable` will select 'viewer' if running in RStudio, and 'browser' otherwise. If it's being built in an RMarkdown document with `knitr`, it will default to 'kable'. Note that an RMarkdown default to 'kable' will also include some nice formatting, where `out = 'kable'` directly will give you a more basic `kable` you can format yourself. ```{r, eval = FALSE} data(LifeCycleSavings) vtable(LifeCycleSavings) vtable(LifeCycleSavings,out='browser') vtable(LifeCycleSavings,out='viewer') htmlcode <- vtable(LifeCycleSavings,out='htmlreturn') vartable <- vtable(LifeCycleSavings,out='return') #I can easily \input this into my LaTeX doc: vt(LifeCycleSavings,out='latex',file='mytable1.tex') ``` ## `file` The `file` argument will write the variable documentation file to an HTML or LaTeX file and save it. Will automatically append 'html' or 'tex' filetype if the filename does not include a period. ```{r, eval=FALSE} data(LifeCycleSavings) vt(LifeCycleSavings,file='lifecycle_variabledocumentation') ``` ## `labels` The `labels` argument will attach variable labels to the variables in `data`. If variable labels are embedded in `data` and those labels are what you want, the `labels` argument is unnecessary. Set `labels='omit'` if there are embedded labels but you do not want them in the table. `labels` can be used in any one of three formats. ### `labels` as a vector `labels` can be set to be a vector of equal length to the number of variables in `data`, and in the same order. `NA` values can be used for padding if some variables do not have labels. ```{r} #Note that LifeCycleSavings has five variables data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75', 'numeric real per-capita disposable income', 'numeric % growth rate of dpi') vtable(LifeCycleSavings,labels=labs) ``` ```{r} labs <- c('numeric aggregate personal savings',NA,NA,NA,NA) vtable(LifeCycleSavings,labels=labs) ``` ### `labels` as a two-column data set `labels` can be set to a two-column data set (any type will do) where the first column has the variable names, and the second column has the labels. The column names don't matter. This approach does __not__ require that every variable name in `data` has a matching label. ```{r} #Note that LifeCycleSavings has five variables #with names 'sr', 'pop15', 'pop75', 'dpi', and 'ddpi' data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- data.frame(nonsensename1 = c('sr', 'pop15', 'pop75'), nonsensename2 = c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75')) vt(LifeCycleSavings,labels=labs) ``` ### `labels` as a one-row data set `labels` can be set to a one-row data set in which the column names are the variable names in `data` and the first row is the variable names. The `labels` argument can take any data type including data frame, data table, tibble, or matrix, as long as it has a valid set of variable names stored in the `colnames()` attribute. This approach does __not__ require that every variable name in `data` has a matching label. ```{r} #Note that LifeCycleSavings has five variables #with names 'sr', 'pop15', 'pop75', 'dpi', and 'ddpi' data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- data.frame(sr = 'numeric aggregate personal savings', pop15 = 'numeric % of population under 15', pop75 = 'numeric % of population over 75') vtable(LifeCycleSavings,labels=labs) ``` ## `class` The `class` flag will either report or not report the class of each variable in the resulting variable table. By default this is set to `TRUE`. ## `values` The `values` flag will either report or not report the values that each variable takes. Numeric variables will report a range, logicals will report 'TRUE FALSE', and factor variables will report the first `factor.limit` (default 5) factors listed. If the variable is numeric but has value labels applied by the `sjlabelled` package, `vtable()` will find them and report the numeric-label crosswalk. ```{r} data(LifeCycleSavings) vtable(LifeCycleSavings,values=FALSE) vtable(LifeCycleSavings) #CO2 contains factor variables data(CO2) vtable(CO2) ``` ```{r} #efc contains labeled values #Note that the original value labels do not easily tell you what numerical #value each label maps to, but vtable() does. library(sjlabelled) data(efc) vtable(efc) ``` ## `missing` The `missing` flag, set to TRUE, will report the number of missing values in each variable. Defaults to FALSE. ## `index` The `index` flag will either report or not report the index number of each variable. Defaults to FALSE. ## `factor.limit` If `values` is set to `TRUE`, then `factor.limit` limits the number of factors displayed on the variable table. `factor.limit` is by default 5, to cut down on clutter. The table will include the phrase "and more" to indicate that some factors have been cut off. Setting `factor.limit=0` will include all factors. If `values=FALSE`, `factor.limit` does nothing. ## `char.values` If `values` is set to `TRUE`, then `char.values = TRUE` instructs `vtable` to list the values that character variables take, as though they were factors. If you only want some of the character variables to have their values listed, use a character vector to indicate which variables. ```{r, eval=FALSE} data(USJudgeRatings) USJudgeRatings$Judge <- row.names(USJudgeRatings) USJudgeRatings$SecondCharacter <- 'Less Interesting' USJudgeRatings$ThirdCharacter <- 'Less Interesting Still!' #Show values for all character variables vtable(USJudgeRatings,char.values=TRUE) #Or just for a subset vtable(USJudgeRatings,char.values=c('Judge','SecondCharacter')) ``` ## `data.title`, `desc`, `note`, and `anchor` `data.title` will include a data title in the variable documentation file. If not set manually, this will default to the variable name for `data`. `desc` will include a description of the data set in the variable documentation file. This will by default include information on the number of observations and the number of columns. To remove this, set `desc='omit'`, or include any description and then include 'omit' as the last four characters. `note` will add a table note in the last row. `note.align` determines its left/right/center alignment, but is only used with LaTeX (see below). `anchor` will add an anchor ID (`<a name = ` in HTML or `\label{}` in LaTeX) to allow other parts of your document to link to it, if you are including your `vtable` in a larger document. `data.title` and `desc` will only show up in full-page `vtable`s. That is, you won't get them with `out = 'return'`, `out = 'csv'`, or `out = 'latex'` (although `out = 'latexpage'` works). `note` and `anchor` will only show up in formats that support multi-column cells and anchoring, so they won't work with `out = 'return'` or `out = 'csv'`. `out = 'kable'` is a half-exception in that it will use `data.title` as the caption for the `kable`, and will use the `note` as a footnote, but won't use `desc` or `anchor`. ```{r, eval=FALSE} library(vtable) data(LifeCycleSavings) vtable(LifeCycleSavings) vtable(LifeCycleSavings,data.title='Intercountry Life-Cycle Savings Data', desc='omit') vtable(LifeCycleSavings,data.title='Intercountry Life-Cycle Savings Data', desc='Data on the savings ratio 1960–1970. omit') vtable(LifeCycleSavings,data.title='Intercountry Life-Cycle Savings Data', desc='Data on the savings ratio 1960–1970', note='Data from Belsley, Kuh, and Welsch (1980)') ``` ## `col.width` `vtable()` will select default column widths for the variable table depending on which measures `(name, class, label, values, summ)` are included. `col.width`, as a vector of percentage column widths on the 0-100 scale, will override these defaults. ```{r, eval=FALSE} library(sjlabelled) data(efc) #The variable names in this data set are pretty short, and the value labels are #a little cramped, so let's move that over. vtable(efc,col.width=c(10,10,40,40)) ``` ## `col.align` `col.align` can be used to adjust text alignment in HTML output. Set to 'left', 'right', or 'center' to align all columns, or give a vector of column alignments to do each column separately. If you want to get tricky, you can add a semicolon afterwards and keep putting in whatever CSS attributes you want. They will be applied to the whole column. This option is only for HTML output and will only work with `out` values of 'browser', 'viewer', or 'htmlreturn'. ```{r, eval = FALSE} library(sjlabelled) data(efc) vtable(efc,col.align = 'right') ``` ## `align`, `note.align`, and `fit.page` These options are used only with LaTeX output (`out` is 'latex' or 'latexpage'). `align` and `note.align` are single strings used for alignment. `align` will be used as column alignment in standard LaTeX syntax, for example 'lccc' for the left column left-aligned and the other three centered. `note.align` is an alignment note specifically for any table notes set with `note` (or significance stars), which enters as part of a `\multicolumn` argument. These both accept 'p{}' and other LaTeX column types. Defaults to left-aligned 'Variable' columns and right-aligned everything else. If `col.widths` is specified, `align` defaults to 'p{}' columns, with widths set by `col.width`. `fit.page` can be used to ensure that the table is a certain width, and will be used as an entry to a `\resizebox{}`. Set to `\\textwidth` to set the table to text width, or `.9\\textwidth` for 90% of the page, and so on, or any recognized width value in LaTeX. For all of these, be sure to escape special characters, in particular backslashes. ```{r, eval = FALSE} library(sjlabelled) data(efc) vtable(efc,align = 'p{.3\\textwidth}cc', fit.page = '\\textwidth', out = 'latex') ``` ## `summ` `summ` will calculate summary statistics for all variables that report valid output on the given summary statistics functions. `summ` is very flexible. It takes a character vector in which each element is of the form `function(x)`, where `function(x)` is any function that takes a vector and returns a single numeric value. For example, `summ=c('mean(x)','median(x)','mean(log(x))')` would calculate the mean, median, and mean of the log for each variable. `summ` treats as special two `vtable` functions: `propNA(x)` and `countNA(x)`, which give the proportion and count of NA values, and the count of non-NA values in the variable, respectively. These two functions are always reported first, and are the only functions that include NA values in their calculations. ```{r} library(sjlabelled) data(efc) vtable(efc,summ=c('mean(x)','countNA(x)')) ``` ## `lush` The default `vtable` settings may not be to your liking, and in particular you may prefer more information. Setting `lush = TRUE` is an easy way to get more information. It will force `char.values` and `missing` to `TRUE`, and will also set a default `summ` value of `c('mean(x)', 'sd(x)', 'nuniq(x)')`. ## `opts` You can create a named list where the names are the above options and the values are the settings for those options, and input it into `vtable` using `opts=`. This is an easy way to set the same options for many `vtable`s.
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/vtablefunction.Rmd
## ----------------------------------------------------------------------------- library(vtable) my_formatter_func <- formatfunc(percent = TRUE, digits = 3, nsmall = 2, big.mark = ',') my_formatter_func(523.2355987) ## ----------------------------------------------------------------------------- library(vtable) #Some random normal data, and its percentiles d <- rnorm(1000) pc <- pctile(d) #25th, 50th, 75th percentile pc[c(25,50,75)] ## ----------------------------------------------------------------------------- #Inverse normal CDF with 100 points of articulation plot(pc) ## ----------------------------------------------------------------------------- x <- 1:100 w <- 1:100 weighted.mean(x, w) sd(x) weighted.sd(x, w) ## ---- eval = FALSE------------------------------------------------------------ # independence.test(x,y,w=NA, # factor.test = NA, # numeric.test = NA, # star.cutoffs = c(.01,.05,.1), # star.markers = c('***','**','*'), # digits = 3, # fixed.digits = FALSE, # format = '{name}={stat}{stars}', # opts = list()) ## ----------------------------------------------------------------------------- data(iris) independence.test(iris$Species, iris$Sepal.Length, star.cutoffs = c(.05,.01,.001)) ## ----------------------------------------------------------------------------- independence.test(iris$Species, iris$Sepal.Width, digits=1) ## ----------------------------------------------------------------------------- independence.test(iris$Species, iris$Sepal.Width, digits=4, fixed.digits = TRUE) ## ----------------------------------------------------------------------------- independence.test(iris$Species, iris$Sepal.Width, format = 'Pr(>{name}): {pval}{stars}')
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/vtablehelpers.R
--- title: "vtable Bonus Functions" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{vtable Bonus Functions} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on some bonus helper functions that come with `vtable` that have been exported because they may be handy to you. This can come in handy for saving a little time, and can help you avoid having to create an unnamed function when you need to call a function. ----- # Shortcut Helper Functions `vtable` includes four shortcut functions. These are generally intended for use with the `summ` option in `vtable` and `sumtable` because nested functions don't look very nice in a `vtable`, or in a `sumtable` unless you explicitly set the `summ.names`. ## `nuniq` `nuniq(x)` returns `length(unique(x))`, the number of unique values in the vector. ## `countNA`, `propNA`, and `notNA` These three functions are shortcuts for dealing with missing data. You have probably written out the nested versions of these many times! | Function | Short For | |------------| -----------------------------------------| | `countNA()` | `sum(is.na())` | | `propNA()` | `mean(is.na())` | | `notNA()` | `sum(!is.na())` | Note that `notNA()` also has some additional formatting options, which you would probably ignore if using it iteractively. ## `is.round` This function is a shortcut for `!any(!(x == round(x,digits)))`. It takes two arguments: a vector `x` and a number of `digits` (0 by default). It checks whether you can round to `digits` digits without losing any information. ----- # Other Helper Functions ## `formatfunc` `formatfunc()` is a function that returns a function, which itself helps format numbers using the `format()` function, in the same spirit as the `label_` functions in the scales package. It is largely used for the `numformat` argument of `sumtable()`. `formatfunc()` for the most part takes the same arguments as `format()`, and so `help(format)` can be a guide for using it. However, there are some differences. Some defaults are changed. By default, `scientific = FALSE, trim = TRUE`. There are four new arguments as well. `percent = TRUE` will format the number as a percentage by multiplying it by 100 and adding a % at the end. You can instead set `percent` equal to some number, and that number will instead be taken as 100%, instead of 1. So `percent = 100`, for example, will just add a % at the end without doing any multiplying. `prefix` and `suffix` will, naturally, add prefixes or suffixes to the formatted number. So `prefix = '$', suffix = 'M'`, for example, will produce a function that will turn `3` into `$3M`. `scale` will multiply the number by `scale` before formatting it. So `prefix = '$', suffix = 'M', scale = 1/1000000` will turn `3000000` into `$3M`. ```{r} library(vtable) my_formatter_func <- formatfunc(percent = TRUE, digits = 3, nsmall = 2, big.mark = ',') my_formatter_func(523.2355987) ``` ## `pctile` `pctile(x)` is short for `quantile(x,1:100/100)`. So in one sense this is another shortcut function. But this inherently lets you interact with percentiles a bit differently. While `quantile()` has you specify which percentile you want in the function call, `pctile()` returns an object with all integer percentiles, and you can pull out which ones you want afterwards. `pctile(x)[50]` is the 50th percentile, etc.. This can be convenient in several applications, an obvious one being in `sumtable`. ```{r} library(vtable) #Some random normal data, and its percentiles d <- rnorm(1000) pc <- pctile(d) #25th, 50th, 75th percentile pc[c(25,50,75)] ``` ```{r} #Inverse normal CDF with 100 points of articulation plot(pc) ``` ## `weighted.sd` `weighted.sd(x, w)` is a function to calculate a weighted standard deviation of `x` using `w` as weights, much like the base `weighted.mean()` does for means. It is mostly used as a helper function for `sumtable()` when `group.weights` is specified. However, you can use it on its own if you like. Unlike `weighted.mean()`, setting `na.rm = TRUE` will account for missings both in `x` and `w`. The weighted standard deviation is calculated as $$ \frac{\sum_i(w_i*(x_i-\bar{x}_w)^2)}{\frac{N_w-1}{N_w}\sum_iw_i} $$ Where $\bar{x}_w$ is the weighted mean of $x$, and $N_w$ is the number of observations with a nonzero weight. ```{r} x <- 1:100 w <- 1:100 weighted.mean(x, w) sd(x) weighted.sd(x, w) ``` # `independence.test` `independence.test` is a helper function for `sumtable(group.test=TRUE)` that tests for independence between a categorical variable `x` and another variable `y` that may be categorical or numerical. Then, it outputs a *formatted string* as its output, with significance stars, for printing. The function takes the format ```{r, eval = FALSE} independence.test(x,y,w=NA, factor.test = NA, numeric.test = NA, star.cutoffs = c(.01,.05,.1), star.markers = c('***','**','*'), digits = 3, fixed.digits = FALSE, format = '{name}={stat}{stars}', opts = list()) ``` ## `factor.test` and `numeric.test` These are functions that actually perform the independence test. `numeric.test` is used when `y` is numeric, and `factor.test` is used in all other instances. Specifically, these functions should take only `x`, `y`, and `w=NULL` as arguments, and should return a list with three elements: the name of the test statistic, the test statistic itself, and the p-value of the test. By default, these are the internal functions `vtable:::chisq.it` for `factor.test` and `vtable:::groupf.it` for `numeric.test`, so you can take a look at those (just put `vtable:::chisq.it` in the terminal and it will show you the function's code) if you'd like to make your own test functions. ## `star.cutoffs` and `star.markers` These are numeric and character vectors, respectively, used for p-value cutoffs and to create significance markers. `star.cutoffs` indicates the cutoffs, and `star.markers` indicates the markers to be used with each cutoff, in the same order. So with `star.cutoffs = c(.01,.05,.1)` and `star.markers = c('***','**','*')`, each p-value below .01 will get marked with `'***'`, each from .01 to .05 will get `'**'`, and each from .05 to .1 will get `*`. Defaults are set to "economics defaults" (.1, .05, .01). But these are of course easy to change. ```{r} data(iris) independence.test(iris$Species, iris$Sepal.Length, star.cutoffs = c(.05,.01,.001)) ``` ## `digits` and `fixed.digits` `digits` indicates how many digits after the decimal place from the test statistics and p-values should be displayed. `fixed.digits` determines whether trailing zeros are maintained. ```{r} independence.test(iris$Species, iris$Sepal.Width, digits=1) ``` ```{r} independence.test(iris$Species, iris$Sepal.Width, digits=4, fixed.digits = TRUE) ``` ## `format` This is the printing format that the output will produce, incorporating the name of the test statistic `{name}`, the test statistic `{stat}`, the significance markers `{stars}`, and the p-value `{pval}`. If your `independence.test` is heading out to another format besides being printed in the R console, you may want to add additional markup like `'{name}$={stat}^{stars}$'}` in LaTeX or `'{name}={stat}<sup>{stars}</sup>'` in HTML. If you do this, be sure to think carefully about escaping or not escaping characters as appropriate when you print! ```{r} independence.test(iris$Species, iris$Sepal.Width, format = 'Pr(>{name}): {pval}{stars}') ``` ## `opts` You can create a named list where the names are the above options and the values are the settings for those options, and input it into `independence.test` using `opts=`. This is an easy way to set the same options for many `independence.test`s.
/scratch/gouwar.j/cran-all/cranData/vtable/inst/doc/vtablehelpers.Rmd
--- title: "dftoHTML and dftoLaTeX: Data Frame Formatting" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{dftoHTML and dftoLaTeX: Data Frame Formatting} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on `dftoHTML()`/`dftoLaTeX()`. `dftoHTML()` and `dftoLaTeX` are helper functions used by `vtable()`, `sumtable()`, and `labeltable()`. They takes any data frame or matrix with column names and outputs HTML or LaTeX table code for that data. Note that none of the vignettes in this example are set to run because `dftoHTML` and `dftoLaTeX` output is intended to go to places other than Markdown (although both can certainly be used with 'asis' chunks to produce results in Markdown). ----- # The `dftoHTML()` function `dftoHTML()` syntax follows the following outline: ```{r, eval = FALSE} dftoHTML(data, out=NA, file=NA, note=NA, anchor=NA, col.width=NA, col.align=NA, row.names=FALSE, no.escape=NA) ``` `dftoHTML()` largely exists to serve `vtable()`, `sumtable()`, and `labeltable()`. What it does is takes a data set `data` and returns an HTML table with the contents of that data. Outside of its use with other `vtable` functions, `dftoHTML()` can also be used to keep a view of the data file open while working on the data, avoiding repeated calls to `head()` or similar, or switching back and forth between code tabs and data view tabs. ------ ## `data` `dftoHTML()` will accept any data set with a `colnames()` attribute. ```{r, eval = FALSE} library(vtable) data(LifeCycleSavings) dftoHTML(LifeCycleSavings) ``` ## out The `out` option determines what will be done with the resulting variable documentation file. There are several options for `out`: | Option | Result | |------------| -----------------------------------------| | browser | Loads HTML version of `data` in web browser. | | viewer | Loads HTML version of `data` in Viewer pane (RStudio only). | | htmlreturn | Returns HTML code for `data`. | By default, `vtable` will select 'viewer' if running in RStudio, and 'browser' otherwise. ```{r, eval=FALSE} library(vtable) data(LifeCycleSavings) dftoHTML(LifeCycleSavings) dftoHTML(LifeCycleSavings,out='browser') dftoHTML(LifeCycleSavings,out='viewer') htmlcode <- dftoHTML(LifeCycleSavings,out='htmlreturn') ``` ## `file` The `file` argument will write the HTML version of `data` to an HTML file and save it. Will automatically append 'html' filetype if the filename does not include a period. ```{r, eval=FALSE} data(LifeCycleSavings) dftoHTML(LifeCycleSavings,file='lifecycledata_htmlversion.html') ``` ## `note` `note` will add a table note in the last row. ```{r, eval = FALSE} dftoHTML(LifeCycleSavings,note='Data from Belsley, Kuh, and Welsch 1980'). ``` ## `anchor` `anchor` will add an anchor ID (`<a name = `) to allow other parts of your document to link to it, if you are including your table in a larger document. ## `col.width` `dftoHTML()` will select, by default, equal column widths for all columns in `data`. `col.width`, as a vector of percentage column widths on the 0-100 scale, will override these defaults. ```{r, eval = FALSE} #Let's make sr much bigger for some reason dftoHTML(LifeCycleSavings,col.width=c(60,10,10,10,10)) ``` ## `col.align` `col.align` can be used to adjust text alignment in HTML output. Set to 'left', 'right', or 'center' to align all columns, or give a vector of column alignments to do each column separately. While this is not intended usage, you can add additional CSS arguments (i.e. `'left; padding:5px'`) and it will apply that CSS to every cell in the column. ## `row.names` The `row.names` flag determines whether the row names of the data are included as the first column in the output table. ```{r, eval=FALSE} dftoHTML(LifeCycleSavings,row.names=TRUE) ``` ## `no.escape` If the data passed to `dftoHTML()` contains special HTML characters like '<', `dftoHTML()` will escape them. This could cause you some sort of existential crisis if you wanted to put HTML formatting in your data to be displayed. So set `no.escape` to a vector of column indices to skip the character-escaping process for those columns. ```{r, eval=FALSE} #Don't escape columns 1 or 2 dftoHTML(LifeCycleSavings,no.escape=1:2) ``` ----- # The `dftoLaTeX()` function `dftoLaTeX()` syntax follows the following outline: ```{r, eval = FALSE} dftoLaTeX(data, file=NA, frag=TRUE, title=NA, note=NA, anchor=NA, align=NA, row.names=FALSE, no.escape=NA) ``` `dftoLaTeX()` largely exists to serve `vtable()`, `sumtable()`, and `labeltable()`. What it does is takes a data set `data` and returns an LaTeX table with the contents of that data. You could also use it on its own to write any data frame to LaTeX table format. ------ ## `data` `dftoLaTeX()` will accept any data set with a `colnames()` attribute. ```{r, eval = FALSE} library(vtable) data(LifeCycleSavings) dftoLaTeX(LifeCycleSavings) ``` ## `file` The `file` argument will write the TeX version of `data` to a .tex file and save it. Will automatically append 'tex' filetype if the filename does not include a period. ```{r, eval=FALSE} data(LifeCycleSavings) dftoLaTeX(LifeCycleSavings,file='lifecycledata_latexversion.tex') ``` ## `note` `note` will add a table note in the last row. ```{r, eval = FALSE} dftoLaTeX(LifeCycleSavings,note='Data from Belsley, Kuh, and Welsch 1980'). ``` ## `anchor` `anchor` will add an anchor ID (`\label{}`) to allow other parts of your document to link to it, if you are including your output in a larger document. ```{r, eval = FALSE} dftoLaTeX(LifeCycleSavings,anchor='tab:LCS') ``` ## `align` This is a single string, which will be used as column alignment in standard LaTeX syntax, for example 'lccc' for the left column left-aligned and the other three centered. Accepts 'p{}' and other LaTeX column types. Don't forget to escape backslashes! Defaults to all left-aligned. ```{r, eval = FALSE} dftoLaTeX(LifeCycleSavings,row.names=TRUE,align='p{.25\\textwidth}ccccc') ``` ## `row.names` The `row.names` flag determines whether the row names of the data are included as the first column in the output table. ```{r, eval=FALSE} dftoLaTeX(LifeCycleSavings,row.names=TRUE) ``` ## `no.escape` If the data passed to `dftoLaTeX()` contains special HTML characters like '~' or '^', `dftoLaTeX()` will escape them. This could cause you some sort of existential crisis if you wanted to put LaTeX formatting in your data to be displayed. So set `no.escape` to a vector of column indices to skip the character-escaping process for those columns. ```{r, eval=FALSE} #Don't escape columns 1 or 2 dftoLaTeX(LifeCycleSavings,no.escape=1:2) ```
/scratch/gouwar.j/cran-all/cranData/vtable/vignettes/dftotable.Rmd
--- title: "labeltable: Label Table" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{labeltable: Label Table} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on `labeltable()`. `labeltable()` is designed to take a single variable and show the values it is associated with. This can also be used to generate data documentation if desired, or can just be an easy way to look at label values, or learn more about the data you're working with. If that variable has value labels from the `sjlabelled` or `haven` packages, it will show how the values in the data correspond to the value labels. Alternately, you can include other variables as well, and `labeltable()` will show, for each value of the variable you're interested in, the values that those other variables take. This can be handy, for example, if you used some variables to create a numeric ID and want to remember what original values correspond to each ID. It can also act as sort of a cross-tabulation. ----- # The `labeltable()` function `vtable()` syntax follows the following outline: ```{r, eval=FALSE} labeltable(var, ..., out=NA, count=FALSE, percent=FALSE, file=NA, desc=NA, note=NA, note.align=NA, anchor=NA) ``` `labeltable()` is a function that shows the values that correspond to `var`. This could be value label values, or it could be the values found in the data for the `...` variables. ```{r} #Include a single labelled variable to show how the values of that variable correspond to its value labels. library(vtable) library(sjlabelled) data(efc) labeltable(efc$e15relat) ``` ```{r} #Include more than one variable to show, for each value of the first, what values of the others are present in the data. data(mtcars) labeltable(mtcars$cyl,mtcars$carb,mtcars$am) ``` ## `out` The `out` option determines what will be done with the resulting label table file. There are several options for `out`: | Option | Result | |------------| -----------------------------------------| | browser | Loads output in web browser. | | viewer | Loads output in Viewer pane (RStudio only). | | htmlreturn | Returns HTML code for output file. | | return | Returns output table in data.frame format. | | csv | Returns output table in data.frame format and, with a `file` option, saves that to CSV. | | kable | Returns a `knitr::kable()` | | latex | Returns a LaTeX table. | | latexpage | Returns an independently-buildable LaTeX document. | By default, `vtable` will select 'viewer' if running in RStudio, and 'browser' otherwise. If it's being built in an RMarkdown document with `knitr`, it will default to 'kable'. ## `count` and `percent` These options allow `labeltable()` to act as a sort of `table()`, where it will also include the counts and/or percentage of the variable that takes each value. ```{r} library(vtable) data(LifeCycleSavings) labeltable(efc$e15relat, count = TRUE, percent = TRUE) ``` ## `file` The `file` argument will write the variable documentation file to an HTML file and save it. Will automatically append 'html' filetype if the filename does not include a period. ```{r, eval=FALSE} library(vtable) data(LifeCycleSavings) labeltable(efc$e15relat,file='lifecycle_variabledocumentation') ``` ## `desc`, `note`, and `anchor`. `desc` will include a description of the data set (or whatever you like) in the file, which may be useful for documentation purposes. `note` will add a table note in the last row. `anchor` will add an anchor ID (`<a name = ` in HTML or `\label{}` in LaTeX) to allow other parts of your document to link to it, if you are including your vtable in a larger document. `desc` will only show up in full-page `labeltable`s. That is, you won't get them with `out = 'kable'`, `out = 'return'`, `out = 'csv'`, or `out = 'latex'` (although `out = 'latexpage'` works). `note` and `anchor` will only show up in formats that support multi-column cells and anchoring, so they won't work with `out = 'kable'`, `out = 'csv'`, or `out = 'return'`. ## `note.align` This option is used only with LaTeX output (`out` is 'latex' or 'latexpage'). `note.align` is a single string used for alignment, specifically for any table notes set with `note`, which enters as part of a `\multicolumn` argument. It accepts 'p{}' and other LaTeX column types. Be sure to escape special characters, in particular backslashes.
/scratch/gouwar.j/cran-all/cranData/vtable/vignettes/labeltable.Rmd
--- title: "sumtable: Summary Statistics" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{sumtable: Summary Statistics} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on `sumtable()`. `sumtable()` takes a dataset and outputs a formatted summary statistics table. Summary statistics an be for the whole data set at once or by-group. There are a huge number of R packages that will make a summary statistics table for you. What does `vtable::sumtable()` bring that isn't already there? First, like other `vtable` functions, `sumtable()` by default prints its results to Viewer (in RStudio) or the browser (elsewhere), making it easy to look at information about your data while continuing to work on it. Second, `sumtable()` is designed to *have nice defaults* and be *fast to work with*. By fast to work with that's both in the sense that you should just be able to ask for a sumtable and have it pretty much be what you want immediately, and also in the sense of trying to keep the number of keystrokes low (thus the `st()` shortcut, and the intent of not having to set a bunch of options). `sumtable()` has customization options, but they're certainly not as extensive as with a package like `gtsummary` or `arsenal`. Nor should they be! If you want full control over your table, those packages are already great, we don't need another package that does that. However, if you want the kind of table `sumtable()` produces (and I think a lot of you do!) then it's perfect and easy. This makes `sumtable()` very similar in spirit to the summary statistics functionality of `stargazer::stargazer()`, except with some additional important bonuses, like working with `tibble`s, factor variables, producing summary statistics by group, and being a summary-statistics-only function so the documentation isn't entwined with a bunch of regression-table functionality. Like, look at this. Isn't this what you basically already want? After loading the package this took eight keystrokes and no option-setting: ```{r} library(vtable) st(iris) ``` ----- # The `sumtable()` function `sumtable()` (or `st()` for short) syntax follows the following outline: ```{r, eval=FALSE} sumtable(data, vars=NA, out=NA, file=NA, summ=NA, summ.names=NA, add.median=FALSE, group=NA, group.long=FALSE, group.test=FALSE, group.weights =NA, col.breaks=NA, digits=2, fixed.digits=FALSE, numformat = formatfunc(digits = digits, big.mark = ''), skip.format = c('notNA(x)','propNA(x)','countNA(x)'), factor.percent=TRUE, factor.counts=TRUE, factor.numeric=FALSE, logical.numeric=FALSE, logical.labels=c('No','Yes'), labels=NA, title='Summary Statistics', note = NA, anchor=NA, col.width=NA, col.align=NA, align=NA, note.align='l', fit.page=NA, simple.kable=FALSE, obs.function=NA) opts=list()) ``` The goal of `sumtable()` is to take a data set `data` and output a usually-HTML (but `data.frame`, `kable`, `csv`, and `latex` options are there too) file with summary statistics for each of the variables in `data`. There are several options as to how the table will be constructed, and each of these options are explained below. Throughout, the output will be built as `kable`s since this is an RMarkdown document. However, generally you can leave `out` at its default and it will publish an HTML table to Viewer (in RStudio) or the browser (otherwise). This will also include some additional information about your data that can't be demonstrated in this vignette: ## `data` and `vars` The `data` argument can take any `data.frame`, `data.table`, `tibble`, or `matrix`, as long as it has a valid set of variable names stored in the `colnames()` attribute. By default, `sumtable` will include in the summary statistics table every variable in the data set that is (1) numeric, (2) factor, (3) logical, or (4) a character variable with six or fewer unique values (as a factor), and it will include them in the order they're in the data. You can override that variable list with `vars`, which is just a vector of variable names to include. You can use this to force `sumtable` to ignore variables you don't want, or to include variables it doesn't by default. ```{r, eval = FALSE} data(LifeCycleSavings) st(LifeCycleSavings, vars = c('pop15','pop75')) ``` ## `out` The `out` option determines what will be done with the resulting summary statistics table. There are several options for `out`: | Option | Result | |------------| -----------------------------------------| | browser | Loads output in web browser. | | viewer | Loads output in Viewer pane (RStudio only). | | htmlreturn | Returns HTML code for output file. | | return | Returns summary table in `data.frame` format. Depending on options, the data frame may be entirely character variables. | | csv | Returns summary table in `data.frame` format and, with a `file` option, saves that to CSV. | | kable | Returns a `knitr::kable()` | | latex | Returns a LaTeX table. | | latexpage | Returns an independently-buildable LaTeX document. | By default, `sumtable` will select 'viewer' if running in RStudio, and 'browser' otherwise. If it's being built in an RMarkdown document with `knitr`, it will default to 'kable'. Note that an RMarkdown default to 'kable' will also include some nice formatting, where `out = 'kable'` directly will give you a more basic `kable` you can format yourself. Also be aware that some of these formats, like 'return', do not support multi-column cells, and so instead you'll have headers squished into one cell, with blank cells next to them. ```{r, eval = FALSE} data(LifeCycleSavings) sumtable(LifeCycleSavings) vartable <- vtable(LifeCycleSavings,out='return') #I can easily \input this into my LaTeX doc: vt(LifeCycleSavings,out='latex',file='mytable1.tex') ``` ## `file` The `file` argument will write the variable documentation file to an HTML or LaTeX file and save it. Will automatically append 'html' or 'tex' filetype if the filename does not include a period. ```{r, eval=FALSE} data(LifeCycleSavings) st(LifeCycleSavings,file='lifecycle_summary') ``` ## `summ` and `summ.names` `summ` is the set of summary statistics functions to run and include in the table. It is very flexible, hopefully without being difficult to use. It takes a character vector in which each element is of the form `function(x)`, where `function(x)` is any function that takes a vector and returns a single numeric value. For example, `summ=c('mean(x)','median(x)','mean(log(x))')` would calculate the mean, median, and mean of the log for each variable. `summ.names` is just the heading-title of the corresponding `summ`. So in this example that might be `summ.names=c('Mean','Median','Mean of Log')`. Factor variables largely ignore `summ` (unless `factor.numeric = TRUE`) and will just report counts in the first column and means/percentages in the second. You may want to consider this when selecting the order you put your `summ` in if you have both factor and numeric variables. `summ` treats as special two `vtable` functions: `propNA(x)` and `countNA(x)`, which give the proportion and count of NA values, and the count of non-NA values in the variable, respectively. These two functions are the only functions that include NA values in their calculations. By default, `summ` is `c('notNA(x)', 'mean(x)', 'sd(x)', 'min(x)', 'pctile(x)[25]', 'pctile(x)[75]', 'max(x)')` in 'one-column' tables. If there's more than one column either due to the `col.breaks` option or the `group` option, it defaults to `c('notNA(x)', 'mean(x)', 'sd(x)')`. Alternately, if a given column of variables is entirely made up of factor variables, it defaults to `c('notNA(x)','mean(x)')`. These precise defaults have corresponding default `summ.names`. If you set your own `summ` but not `summ.names`, it will try to guess the name by taking your function, removing `(x)`, and capitalizing. so 'mean(x)' becomes 'Mean'. If you want to get complex you can. If there are multiple 'columns' of summary statistics and you want different statistics in each column, make `summ` and `summ.names` into a list, where each entry is a character vector of the calculations/names you want in that column. ```{r} sumtable(iris, summ=c('notNA(x)', 'mean(x)', 'median(x)', 'propNA(x)')) ``` ```{r} #Getting complex st(iris, col.breaks = 4, summ = list( c('notNA(x)','mean(x)','sd(x^2)','min(x)','max(x)'), c('notNA(x)','mean(x)') ), summ.names = list( c('N','Mean','SD of X^2','Min','Max'), c('Count','Percent') )) ``` ## `group`, `group.long`, and `group.test` `sumtable()` allows for the calculation of summary statistics by group. `group` is a character variable containing the column name in `data` that you want to calculate summary statistics separately for. At the moment this supports only a single variable, although you can combine multiple variables into a single one yourself before using `sumtable`. `group.long` is a flag for whether you want the different group summary statistics stacked side-by-side (`group.long = FALSE`), making for easier comparisons, or on top of each other (`group.long = TRUE`), giving things a bit more room to breathe and allowing space for more statistics. Defaults to `FALSE`. `group.test`, which is only compatible with `group.long = FALSE`, performs a test of independence between the variable in `group` and each of the variables in your summary statistics table. Defaults to `FALSE`. Default `group.test = TRUE` behavior is to perform a group F-test (with `anova(lm())`) for numeric variables, and a chi-squared test (with `chisq.test`) for factor, logical, and character variables, returning results in the format 'Test statistic name = Test statistic^significance stars'. If you want to change any of that, instead of `group.test = TRUE`, set `group.test` equal to a named list of options that will be send to the `opts` argument of `independence.test`. See `help(independence.test)`. Be aware that the table produced with `group` uses multi-column cells. So it will not look quite as nice when outputting to a format that does not support multi-column cells, like `out='return'`. Multi-column cells are supported in `out='kable'` for `group`, as below, but are not supported on other rows of the `kable`. Multi-column cells are supported for `out='kable'` only for HTML and LaTeX output. ```{r} st(iris, group = 'Species', group.test = TRUE) ``` ```{r} st(iris, group = 'Species', group.long = TRUE) ``` ## `group.weights` This allows you to pass a set of weights for your data (as a vector or as a string column name). **HOWEVER,** it does not automatically weight all the results. If you leave `summ` as its default, then it will use `weighted.mean(x, w = wts)` and `weighted.sd(x, w = wts)` in place of wherever it would normally have `mean(x)` and `sd(x)`. Factor proportions are calculated using `mean(x)`, so this is covered. Weights will also be passed to `independence.test()` if `group.test = TRUE`, and so tests of independence across groups will be weighted as well. **No other calculations will be automatically weighted.** This is really designed to be used with `group` and `group.test = TRUE` to create weighted balance tables, which is why it's called `group.weights` (and to avoid anyone thinking it weights everything, which would be the natural conclusion if it were just called `weights`). If you want to use the weights with other functions, you can. You'll just need to specify `summ` yourself. Just pass `summ` a string describing function that takes weights and refer to `wts` as the weights, e.g. `'weighted.mean(x, w = wts)'` for a weighted mean, as above. ## `col.breaks` Sometimes you don't need all that much information on each variable, but you have a lot of variables and your table gets long! `col.breaks` will break up the variables in your table into multiple columns, and put them side by side. Also handy if you want to mix numeric and factor variables - put all your factors in a second column to economize on space. Incompatible with `group` unless `group.long = TRUE`. Set `col.breaks` to be a numeric vector. `sumtable()` will start a new column after that many variables have been processed. ```{r} #Let's put species in a column by itself #There are five variables here, Species is last, #so break the column after the first four variables. st(iris, col.breaks = 4) ``` ```{r} #Why not three columns? sumtable(mtcars, col.breaks = c(4,8)) ``` ## `digits` and `fixed.digits` `digits` indicates how many digits after the decimal place should be displayed. `fixed.digits` determines whether trailing zeros are maintained. `fixed.digits` only works if `numformat = NA`, and will eventually be deprecated for a `formatfunc(drop0trailing=TRUE)` setting in `numformat`. ```{r} st(iris, digits = 5) ``` ```{r} st(iris, digits = 3, fixed.digits = TRUE, numformat = NA) ``` ## Other Numerical Formatting Options Should the numbers in the summary table be formatted in some way? By default, "number of nonmissing observations" values are formatted with `notNA()` formatting (but specifically, whatever is specified in `obs.function`), and the rest are not formatted except for having the number of digits set with `digits` or `fixed.digits`. You can specify `numformat` to set numerical formatting for numeric variables. `numformat` accepts as an argument functions that accept a number and return a formatted string, as generated by `formatfunc()` or the `label_` functions in the scales package. So, for example, `numformat = formatfunc(prefix = '$')` would give all your numbers dollar formatting. You can also use string shorthand as shortcuts for some `formatfunc()` settings. `'comma'` will set `big.mark = ','`, `'decimal'` will set `big.mark = '.', decimal.mark = ','`, `'percent'` will do percentage formatting (with 1 = 100%), and `'A|B'` will use `'A'` as a prefix and `'B'` as a suffix (specifying suffix optional, so `numformat = '$'` gives `'$3'`). This will also respect your `digits` choice (which `formatfunc()` directly won't do). These can be combined. `'comma$|M'` will turn `1000` into `$1,000M`. Although if you're getting complex you may as well just set `formatfunc()` yourself. You can specify different formatting functions for different variables by either specifying a string vector of those shorthands, or a list of functions. You can either provide an unnamed vector/list with length equal to the number of variables in the data, or you can provide a named vector/list that specifies the formatting for specific variables. You can apply a format to all variables not specifically named by making it an unnamed first entry, for example `numformat = c('dollar','sharevariable' = 'percent')` to make everything dollar-formatted except for 'sharevariable', which becomes percent-formatted. Note that any functions that match the ones in the `skip.format` option will not have this formatting applied to them at all. It is not currently possible otherwise to apply two different kinds of formatting to different columns of the `sumtable`. ```{r} st(iris, numformat = c('|cm', 'Sepal.Width' = 'percent')) ``` ## Factor, Logical, and Character Display Options How should factor, logical, and character variables (all of which get turned into factors in the `sumtable`-making process) be shown on the `sumtable()`? In all `sumtable()s`, there is one row for the name of the factor variable, with the number of nonmissing observations of the variable. Then there's one row for each of the values (for logicals, FALSE and TRUE become 'No' and 'Yes', or pick your own labels with `logical.labels`), showing the count and percentage (i.e. 50%) of observations with that value. Set `factor.percent = FALSE` to report the proportion of observations (.5) instead of the percentage (50%) for the values. Set `factor.counts = FALSE` to omit the count for the individual values. So you'll see the number of nonmissing observations for the variable overall, and then just the percentage/proportion for each of the values. Set `factor.numeric = TRUE` and/or `logical.numeric = TRUE` to ignore all this special treatment for factor/logical variables (respectively), and just treat each of the values as numeric binary variables. `factor.numeric` also covers character variables. ```{r} st(iris, factor.percent = FALSE, factor.counts = FALSE) ``` ```{r} st(iris, factor.numeric = TRUE) ``` ## `labels` The `labels` argument will attach variable labels to the variables in `data`. If variable labels are embedded in `data` and those labels are what you want, then set `labels = TRUE`. If you'd like to set your own labels that aren't embedded in the data, there are three formats available: ### `labels` as a vector `labels` can be set to be a vector of equal length to the number of variables in `data` (or in `vars` if that's set), and in the same order. You can use `NA`s for padding if you only want labels for some varibles and just want to use the regular variable names for others. This option is not recommended if you have set `group`, as it gets tricky to figure out what order to put the labels in. ```{r} #Note that LifeCycleSavings has five variables data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75', NA, 'numeric % growth rate of dpi') sumtable(LifeCycleSavings,labels=labs) ``` ### `labels` as a two-column data set `labels` can be set to a two-column data set (any type will do) where the first column has the variable names, and the second column has the labels. The column names don't matter. This approach does __not__ require that every variable name in `data` has a matching label. ```{r} #Note that LifeCycleSavings has five variables #with names 'sr', 'pop15', 'pop75', 'dpi', and 'ddpi' labs <- data.frame(nonsensename1 = c('sr', 'pop15', 'pop75'), nonsensename2 = c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75')) st(LifeCycleSavings,labels=labs) ``` ### `labels` as a one-row data set `labels` can be set to a one-row data set in which the column names are the variable names in `data` and the first row is the variable names. The `labels` argument can take any data type including data frame, data table, tibble, or matrix, as long as it has a valid set of variable names stored in the `colnames()` attribute. This approach does __not__ require that every variable name in `data` has a matching label. ```{r} labs <- data.frame(sr = 'numeric aggregate personal savings', pop15 = 'numeric % of population under 15', pop75 = 'numeric % of population over 75') sumtable(LifeCycleSavings,labels=labs) ``` ## `title`, `note`, and `anchor` `title` will include a title for your table. `note` will add a table note in the last row. `anchor` will add an anchor ID (`<a name = ` in HTML or `\label{}` in LaTeX) to allow other parts of your document to link to it, if you are including your `sumtable` in a larger document. `title` will only show up in output formats with titles. That is, you won't get them with `out = 'return'`. `note` and `anchor` will only show up in formats that support multi-column cells and anchoring, so `anchor` won't work with `out = 'kable'` and neither will work with `out = 'return'` or `out = 'csv'`. ## `col.width` `sumtable()` will select default column widths by basically just giving the column with the variable name a little more space than the `summ`-based columns. `col.width`, as a vector of percentage column widths on the 0-100 scale, will override these defaults. Doesn't apply to `out = 'kable'`, `out = 'csv'`, or `out = 'return'`. ```{r, eval=FALSE} #The variable names in this data set are pretty short, and the value labels are #a little cramped, so let's move that over. st(LifeCycleSavings, col.width=c(9,rep(13,7))) ``` ## `col.align` `col.align` can be used to adjust text alignment in HTML output. Set to 'left', 'right', or 'center' to align all columns, or give a vector of column alignments to do each column separately. If you want to get tricky, you can add a semicolon afterwards and keep putting in whatever CSS attributes you want. They will be applied to the whole column. This option is only for HTML output and will only work with `out` values of 'browser', 'viewer', or 'htmlreturn'. ```{r, eval = FALSE} st(LifeCycleSavings,col.align = 'right') ``` ## `align`, `note.align`, and `fit.page` These options are used only with LaTeX output (`out` is 'latex' or 'latexpage'). `align` and `note.align` are single strings used for alignment. `align` will be used as column alignment in standard LaTeX syntax, for example 'lccc' for the left column left-aligned and the other three centered. `note.align` is an alignment note specifically for any table notes set with `note` (or significance stars), which enters as part of a `\multicolumn` argument. These both accept 'p{}' and other LaTeX column types. Defaults to left-aligned 'Variable' columns and right-aligned everything else. If `col.widths` is specified, `align` defaults to 'p{}' columns, with widths set by `col.width`. `fit.page` can be used to ensure that the table is a certain width, and will be used as an entry to a `\resizebox{}`. Set to `\\textwidth` to set the table to text width, or `.9\\textwidth` for 90% of the page, and so on, or any recognized width value in LaTeX. For all of these, be sure to escape special characters, in particular backslashes. ```{r, eval = FALSE} sumtable(iris,align = 'p{.3\\textwidth}ccccccc', fit.page = '\\textwidth', out = 'latex') ``` ## `opts` You can create a named list where the names are the above options and the values are the settings for those options, and input it into `sumtable` using `opts=`. This is an easy way to set the same options for many `sumtable`s.
/scratch/gouwar.j/cran-all/cranData/vtable/vignettes/sumtable.Rmd
--- title: "vtable for Data Exploration" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{vtable for Data Exploration} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. Please see the vignettes/articles available on these main functions, as well as on the vtable helper functions.
/scratch/gouwar.j/cran-all/cranData/vtable/vignettes/vtable.Rmd
--- title: "Variable Table (vtable)" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{Variable Table (vtable)} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on `vtable()`. `vtable()` takes a dataset and outputs a formatted variable documentation file. This serves several purposes. First, it allows for an easy generation of a variable documentation file, without requiring that one has already been created and made accessible through `help(data)`, or dealing with creating and finding R help documentation files. Second, it produces a list of variables (and, if provided, their labels) that can be easily viewed while working with the data, preventing repeated calls to `head()`, and making it much easier to work with confusingly-named variables. Third, the variable documentation file can be opened in a browser (with option `out='browser'`, saving to file and opening directly, or by opening in the RStudio Viewer pane and clicking 'Show in New Window') where it can be easily searched with standard Find-in-Page functions like Ctrl/Cmd-F, allowsing you to search for the variable or variable label you want. ----- # The `vtable()` function `vtable()` (or `vt()` for short) syntax follows the following outline: ```{r, eval=FALSE} vtable(data, out=NA, file=NA, labels=NA, class=TRUE, values=TRUE, missing=FALSE, index=FALSE, factor.limit=5, char.values=FALSE, data.title=NA, desc=NA, note=NA, anchor=NA, col.width=NA, col.align=NA, align=NA, note.align='l', fit.page=NA, summ=NA, lush=FALSE, opts=list()) ``` The goal of `vtable()` is to take a data set `data` and output a usually-HTML (but `data.frame`, `kable`, `csv`, and `latex` options are there too) file with documentation concerning each of the variables in `data`. There are several options as to what will be included in the documentation file, and each of these options are explained below. Throughout, the output will be built as `kable`s since this is an RMarkdown document. However, generally you can leave `out` at its default and it will publish an HTML table to Viewer (in RStudio) or the browser (otherwise). This will also include some additional information about your data that can't be demonstrated in this vignette: ## `data` The `data` argument can take any `data.frame`, `data.table`, `tibble`, or `matrix`, as long as it has a valid set of variable names stored in the `colnames()` attribute. The goal of `vtable()` is to produce documentation of each of the variables in this data set and display that documentation, one variable per row on the output `vtable`. If `data` has embedded variable or value labels, as the data set `efc` does below, `vtable()` will extract and use them automatically. ```{r} library(vtable) #Example 1, using base data LifeCycleSavings data(LifeCycleSavings) vtable(LifeCycleSavings, out='kable') ``` ```{r} #Example 2, using efc data with embedded variable labels library(sjlabelled) data(efc) #Don't forget the handy shortcut vt()! vt(efc) ``` ## `out` The `out` option determines what will be done with the resulting variable documentation file. There are several options for `out`: | Option | Result | |------------| -----------------------------------------| | browser | Loads variable documentation in web browser. | | viewer | Loads variable documentation in Viewer pane (RStudio only). | | htmlreturn | Returns HTML code for variable documentation file. | | return | Returns variable documentation table in data frame format. | | csv | Returns variable documentatoin in data.frame format and, with a `file` option, saves that to CSV. | | kable | Returns a `knitr::kable()` | | latex | Returns a LaTeX table. | | latexpage | Returns an independently-buildable LaTeX document. | By default, `vtable` will select 'viewer' if running in RStudio, and 'browser' otherwise. If it's being built in an RMarkdown document with `knitr`, it will default to 'kable'. Note that an RMarkdown default to 'kable' will also include some nice formatting, where `out = 'kable'` directly will give you a more basic `kable` you can format yourself. ```{r, eval = FALSE} data(LifeCycleSavings) vtable(LifeCycleSavings) vtable(LifeCycleSavings,out='browser') vtable(LifeCycleSavings,out='viewer') htmlcode <- vtable(LifeCycleSavings,out='htmlreturn') vartable <- vtable(LifeCycleSavings,out='return') #I can easily \input this into my LaTeX doc: vt(LifeCycleSavings,out='latex',file='mytable1.tex') ``` ## `file` The `file` argument will write the variable documentation file to an HTML or LaTeX file and save it. Will automatically append 'html' or 'tex' filetype if the filename does not include a period. ```{r, eval=FALSE} data(LifeCycleSavings) vt(LifeCycleSavings,file='lifecycle_variabledocumentation') ``` ## `labels` The `labels` argument will attach variable labels to the variables in `data`. If variable labels are embedded in `data` and those labels are what you want, the `labels` argument is unnecessary. Set `labels='omit'` if there are embedded labels but you do not want them in the table. `labels` can be used in any one of three formats. ### `labels` as a vector `labels` can be set to be a vector of equal length to the number of variables in `data`, and in the same order. `NA` values can be used for padding if some variables do not have labels. ```{r} #Note that LifeCycleSavings has five variables data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75', 'numeric real per-capita disposable income', 'numeric % growth rate of dpi') vtable(LifeCycleSavings,labels=labs) ``` ```{r} labs <- c('numeric aggregate personal savings',NA,NA,NA,NA) vtable(LifeCycleSavings,labels=labs) ``` ### `labels` as a two-column data set `labels` can be set to a two-column data set (any type will do) where the first column has the variable names, and the second column has the labels. The column names don't matter. This approach does __not__ require that every variable name in `data` has a matching label. ```{r} #Note that LifeCycleSavings has five variables #with names 'sr', 'pop15', 'pop75', 'dpi', and 'ddpi' data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- data.frame(nonsensename1 = c('sr', 'pop15', 'pop75'), nonsensename2 = c('numeric aggregate personal savings', 'numeric % of population under 15', 'numeric % of population over 75')) vt(LifeCycleSavings,labels=labs) ``` ### `labels` as a one-row data set `labels` can be set to a one-row data set in which the column names are the variable names in `data` and the first row is the variable names. The `labels` argument can take any data type including data frame, data table, tibble, or matrix, as long as it has a valid set of variable names stored in the `colnames()` attribute. This approach does __not__ require that every variable name in `data` has a matching label. ```{r} #Note that LifeCycleSavings has five variables #with names 'sr', 'pop15', 'pop75', 'dpi', and 'ddpi' data(LifeCycleSavings) #These variable labels are taken from help(LifeCycleSavings) labs <- data.frame(sr = 'numeric aggregate personal savings', pop15 = 'numeric % of population under 15', pop75 = 'numeric % of population over 75') vtable(LifeCycleSavings,labels=labs) ``` ## `class` The `class` flag will either report or not report the class of each variable in the resulting variable table. By default this is set to `TRUE`. ## `values` The `values` flag will either report or not report the values that each variable takes. Numeric variables will report a range, logicals will report 'TRUE FALSE', and factor variables will report the first `factor.limit` (default 5) factors listed. If the variable is numeric but has value labels applied by the `sjlabelled` package, `vtable()` will find them and report the numeric-label crosswalk. ```{r} data(LifeCycleSavings) vtable(LifeCycleSavings,values=FALSE) vtable(LifeCycleSavings) #CO2 contains factor variables data(CO2) vtable(CO2) ``` ```{r} #efc contains labeled values #Note that the original value labels do not easily tell you what numerical #value each label maps to, but vtable() does. library(sjlabelled) data(efc) vtable(efc) ``` ## `missing` The `missing` flag, set to TRUE, will report the number of missing values in each variable. Defaults to FALSE. ## `index` The `index` flag will either report or not report the index number of each variable. Defaults to FALSE. ## `factor.limit` If `values` is set to `TRUE`, then `factor.limit` limits the number of factors displayed on the variable table. `factor.limit` is by default 5, to cut down on clutter. The table will include the phrase "and more" to indicate that some factors have been cut off. Setting `factor.limit=0` will include all factors. If `values=FALSE`, `factor.limit` does nothing. ## `char.values` If `values` is set to `TRUE`, then `char.values = TRUE` instructs `vtable` to list the values that character variables take, as though they were factors. If you only want some of the character variables to have their values listed, use a character vector to indicate which variables. ```{r, eval=FALSE} data(USJudgeRatings) USJudgeRatings$Judge <- row.names(USJudgeRatings) USJudgeRatings$SecondCharacter <- 'Less Interesting' USJudgeRatings$ThirdCharacter <- 'Less Interesting Still!' #Show values for all character variables vtable(USJudgeRatings,char.values=TRUE) #Or just for a subset vtable(USJudgeRatings,char.values=c('Judge','SecondCharacter')) ``` ## `data.title`, `desc`, `note`, and `anchor` `data.title` will include a data title in the variable documentation file. If not set manually, this will default to the variable name for `data`. `desc` will include a description of the data set in the variable documentation file. This will by default include information on the number of observations and the number of columns. To remove this, set `desc='omit'`, or include any description and then include 'omit' as the last four characters. `note` will add a table note in the last row. `note.align` determines its left/right/center alignment, but is only used with LaTeX (see below). `anchor` will add an anchor ID (`<a name = ` in HTML or `\label{}` in LaTeX) to allow other parts of your document to link to it, if you are including your `vtable` in a larger document. `data.title` and `desc` will only show up in full-page `vtable`s. That is, you won't get them with `out = 'return'`, `out = 'csv'`, or `out = 'latex'` (although `out = 'latexpage'` works). `note` and `anchor` will only show up in formats that support multi-column cells and anchoring, so they won't work with `out = 'return'` or `out = 'csv'`. `out = 'kable'` is a half-exception in that it will use `data.title` as the caption for the `kable`, and will use the `note` as a footnote, but won't use `desc` or `anchor`. ```{r, eval=FALSE} library(vtable) data(LifeCycleSavings) vtable(LifeCycleSavings) vtable(LifeCycleSavings,data.title='Intercountry Life-Cycle Savings Data', desc='omit') vtable(LifeCycleSavings,data.title='Intercountry Life-Cycle Savings Data', desc='Data on the savings ratio 1960–1970. omit') vtable(LifeCycleSavings,data.title='Intercountry Life-Cycle Savings Data', desc='Data on the savings ratio 1960–1970', note='Data from Belsley, Kuh, and Welsch (1980)') ``` ## `col.width` `vtable()` will select default column widths for the variable table depending on which measures `(name, class, label, values, summ)` are included. `col.width`, as a vector of percentage column widths on the 0-100 scale, will override these defaults. ```{r, eval=FALSE} library(sjlabelled) data(efc) #The variable names in this data set are pretty short, and the value labels are #a little cramped, so let's move that over. vtable(efc,col.width=c(10,10,40,40)) ``` ## `col.align` `col.align` can be used to adjust text alignment in HTML output. Set to 'left', 'right', or 'center' to align all columns, or give a vector of column alignments to do each column separately. If you want to get tricky, you can add a semicolon afterwards and keep putting in whatever CSS attributes you want. They will be applied to the whole column. This option is only for HTML output and will only work with `out` values of 'browser', 'viewer', or 'htmlreturn'. ```{r, eval = FALSE} library(sjlabelled) data(efc) vtable(efc,col.align = 'right') ``` ## `align`, `note.align`, and `fit.page` These options are used only with LaTeX output (`out` is 'latex' or 'latexpage'). `align` and `note.align` are single strings used for alignment. `align` will be used as column alignment in standard LaTeX syntax, for example 'lccc' for the left column left-aligned and the other three centered. `note.align` is an alignment note specifically for any table notes set with `note` (or significance stars), which enters as part of a `\multicolumn` argument. These both accept 'p{}' and other LaTeX column types. Defaults to left-aligned 'Variable' columns and right-aligned everything else. If `col.widths` is specified, `align` defaults to 'p{}' columns, with widths set by `col.width`. `fit.page` can be used to ensure that the table is a certain width, and will be used as an entry to a `\resizebox{}`. Set to `\\textwidth` to set the table to text width, or `.9\\textwidth` for 90% of the page, and so on, or any recognized width value in LaTeX. For all of these, be sure to escape special characters, in particular backslashes. ```{r, eval = FALSE} library(sjlabelled) data(efc) vtable(efc,align = 'p{.3\\textwidth}cc', fit.page = '\\textwidth', out = 'latex') ``` ## `summ` `summ` will calculate summary statistics for all variables that report valid output on the given summary statistics functions. `summ` is very flexible. It takes a character vector in which each element is of the form `function(x)`, where `function(x)` is any function that takes a vector and returns a single numeric value. For example, `summ=c('mean(x)','median(x)','mean(log(x))')` would calculate the mean, median, and mean of the log for each variable. `summ` treats as special two `vtable` functions: `propNA(x)` and `countNA(x)`, which give the proportion and count of NA values, and the count of non-NA values in the variable, respectively. These two functions are always reported first, and are the only functions that include NA values in their calculations. ```{r} library(sjlabelled) data(efc) vtable(efc,summ=c('mean(x)','countNA(x)')) ``` ## `lush` The default `vtable` settings may not be to your liking, and in particular you may prefer more information. Setting `lush = TRUE` is an easy way to get more information. It will force `char.values` and `missing` to `TRUE`, and will also set a default `summ` value of `c('mean(x)', 'sd(x)', 'nuniq(x)')`. ## `opts` You can create a named list where the names are the above options and the values are the settings for those options, and input it into `vtable` using `opts=`. This is an easy way to set the same options for many `vtable`s.
/scratch/gouwar.j/cran-all/cranData/vtable/vignettes/vtablefunction.Rmd
--- title: "vtable Bonus Functions" author: "Nick Huntington-Klein" date: "`r Sys.Date()`" output: rmarkdown::html_vignette <!-- output: rmarkdown::html_vignette. pdf_document --> vignette: > %\VignetteIndexEntry{vtable Bonus Functions} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The `vtable` package serves the purpose of outputting automatic variable documentation that can be easily viewed while continuing to work with data. `vtable` contains four main functions: `vtable()` (or `vt()`), `sumtable()` (or `st()`), `labeltable()`, and `dftoHTML()`/`dftoLaTeX()`. This vignette focuses on some bonus helper functions that come with `vtable` that have been exported because they may be handy to you. This can come in handy for saving a little time, and can help you avoid having to create an unnamed function when you need to call a function. ----- # Shortcut Helper Functions `vtable` includes four shortcut functions. These are generally intended for use with the `summ` option in `vtable` and `sumtable` because nested functions don't look very nice in a `vtable`, or in a `sumtable` unless you explicitly set the `summ.names`. ## `nuniq` `nuniq(x)` returns `length(unique(x))`, the number of unique values in the vector. ## `countNA`, `propNA`, and `notNA` These three functions are shortcuts for dealing with missing data. You have probably written out the nested versions of these many times! | Function | Short For | |------------| -----------------------------------------| | `countNA()` | `sum(is.na())` | | `propNA()` | `mean(is.na())` | | `notNA()` | `sum(!is.na())` | Note that `notNA()` also has some additional formatting options, which you would probably ignore if using it iteractively. ## `is.round` This function is a shortcut for `!any(!(x == round(x,digits)))`. It takes two arguments: a vector `x` and a number of `digits` (0 by default). It checks whether you can round to `digits` digits without losing any information. ----- # Other Helper Functions ## `formatfunc` `formatfunc()` is a function that returns a function, which itself helps format numbers using the `format()` function, in the same spirit as the `label_` functions in the scales package. It is largely used for the `numformat` argument of `sumtable()`. `formatfunc()` for the most part takes the same arguments as `format()`, and so `help(format)` can be a guide for using it. However, there are some differences. Some defaults are changed. By default, `scientific = FALSE, trim = TRUE`. There are four new arguments as well. `percent = TRUE` will format the number as a percentage by multiplying it by 100 and adding a % at the end. You can instead set `percent` equal to some number, and that number will instead be taken as 100%, instead of 1. So `percent = 100`, for example, will just add a % at the end without doing any multiplying. `prefix` and `suffix` will, naturally, add prefixes or suffixes to the formatted number. So `prefix = '$', suffix = 'M'`, for example, will produce a function that will turn `3` into `$3M`. `scale` will multiply the number by `scale` before formatting it. So `prefix = '$', suffix = 'M', scale = 1/1000000` will turn `3000000` into `$3M`. ```{r} library(vtable) my_formatter_func <- formatfunc(percent = TRUE, digits = 3, nsmall = 2, big.mark = ',') my_formatter_func(523.2355987) ``` ## `pctile` `pctile(x)` is short for `quantile(x,1:100/100)`. So in one sense this is another shortcut function. But this inherently lets you interact with percentiles a bit differently. While `quantile()` has you specify which percentile you want in the function call, `pctile()` returns an object with all integer percentiles, and you can pull out which ones you want afterwards. `pctile(x)[50]` is the 50th percentile, etc.. This can be convenient in several applications, an obvious one being in `sumtable`. ```{r} library(vtable) #Some random normal data, and its percentiles d <- rnorm(1000) pc <- pctile(d) #25th, 50th, 75th percentile pc[c(25,50,75)] ``` ```{r} #Inverse normal CDF with 100 points of articulation plot(pc) ``` ## `weighted.sd` `weighted.sd(x, w)` is a function to calculate a weighted standard deviation of `x` using `w` as weights, much like the base `weighted.mean()` does for means. It is mostly used as a helper function for `sumtable()` when `group.weights` is specified. However, you can use it on its own if you like. Unlike `weighted.mean()`, setting `na.rm = TRUE` will account for missings both in `x` and `w`. The weighted standard deviation is calculated as $$ \frac{\sum_i(w_i*(x_i-\bar{x}_w)^2)}{\frac{N_w-1}{N_w}\sum_iw_i} $$ Where $\bar{x}_w$ is the weighted mean of $x$, and $N_w$ is the number of observations with a nonzero weight. ```{r} x <- 1:100 w <- 1:100 weighted.mean(x, w) sd(x) weighted.sd(x, w) ``` # `independence.test` `independence.test` is a helper function for `sumtable(group.test=TRUE)` that tests for independence between a categorical variable `x` and another variable `y` that may be categorical or numerical. Then, it outputs a *formatted string* as its output, with significance stars, for printing. The function takes the format ```{r, eval = FALSE} independence.test(x,y,w=NA, factor.test = NA, numeric.test = NA, star.cutoffs = c(.01,.05,.1), star.markers = c('***','**','*'), digits = 3, fixed.digits = FALSE, format = '{name}={stat}{stars}', opts = list()) ``` ## `factor.test` and `numeric.test` These are functions that actually perform the independence test. `numeric.test` is used when `y` is numeric, and `factor.test` is used in all other instances. Specifically, these functions should take only `x`, `y`, and `w=NULL` as arguments, and should return a list with three elements: the name of the test statistic, the test statistic itself, and the p-value of the test. By default, these are the internal functions `vtable:::chisq.it` for `factor.test` and `vtable:::groupf.it` for `numeric.test`, so you can take a look at those (just put `vtable:::chisq.it` in the terminal and it will show you the function's code) if you'd like to make your own test functions. ## `star.cutoffs` and `star.markers` These are numeric and character vectors, respectively, used for p-value cutoffs and to create significance markers. `star.cutoffs` indicates the cutoffs, and `star.markers` indicates the markers to be used with each cutoff, in the same order. So with `star.cutoffs = c(.01,.05,.1)` and `star.markers = c('***','**','*')`, each p-value below .01 will get marked with `'***'`, each from .01 to .05 will get `'**'`, and each from .05 to .1 will get `*`. Defaults are set to "economics defaults" (.1, .05, .01). But these are of course easy to change. ```{r} data(iris) independence.test(iris$Species, iris$Sepal.Length, star.cutoffs = c(.05,.01,.001)) ``` ## `digits` and `fixed.digits` `digits` indicates how many digits after the decimal place from the test statistics and p-values should be displayed. `fixed.digits` determines whether trailing zeros are maintained. ```{r} independence.test(iris$Species, iris$Sepal.Width, digits=1) ``` ```{r} independence.test(iris$Species, iris$Sepal.Width, digits=4, fixed.digits = TRUE) ``` ## `format` This is the printing format that the output will produce, incorporating the name of the test statistic `{name}`, the test statistic `{stat}`, the significance markers `{stars}`, and the p-value `{pval}`. If your `independence.test` is heading out to another format besides being printed in the R console, you may want to add additional markup like `'{name}$={stat}^{stars}$'}` in LaTeX or `'{name}={stat}<sup>{stars}</sup>'` in HTML. If you do this, be sure to think carefully about escaping or not escaping characters as appropriate when you print! ```{r} independence.test(iris$Species, iris$Sepal.Width, format = 'Pr(>{name}): {pval}{stars}') ``` ## `opts` You can create a named list where the names are the above options and the values are the settings for those options, and input it into `independence.test` using `opts=`. This is an easy way to set the same options for many `independence.test`s.
/scratch/gouwar.j/cran-all/cranData/vtable/vignettes/vtablehelpers.Rmd
#' Center and scale a set of variables. #' #' Center and scale a set of variables. Other columns are passed through. #' #' @param d data.frame to work with #' @param center named vector of variables to center #' @param scale named vector of variables to scale #' @return d with centered and scaled columns altered #' #' @examples #' #' d <- data.frame(x = 1:5, #' y = c('a', 'a', 'b', 'b', 'b')) #' vars_to_transform = "x" #' t <- base::scale(as.matrix(d[, vars_to_transform, drop = FALSE]), #' center = TRUE, scale = TRUE) #' t #' #' centering <- attr(t, "scaled:center") #' scaling <- attr(t, "scaled:scale") #' center_scale(d, center = centering, scale = scaling) #' #' @export #' center_scale <- function(d, center, scale) { for(ni in intersect(names(center), names(d))) { d[[ni]] <- d[[ni]] - center[[ni]] } for(ni in intersect(names(scale), names(d))) { si <- scale[[ni]] if(all(is.finite(si), si!=0.0)) { d[[ni]] <- d[[ni]] / si } } d }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/center_scale.R
# pass a variable through (removing NAs) (should only by used for numerics) .passThrough <- function(col,args,doCollar) { treated <- as.numeric(col) treated[.is.bad(treated)] <- args$nadist if(doCollar) { treated[treated<args$cuts[[1]]] <- args$cuts[[1]] treated[treated>args$cuts[[2]]] <- args$cuts[[2]] } treated } as_rquery.vtreat_pass_through <- function(tstep, ..., var_restriction = NULL) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::as_rquery.vtreat_pass_through treatmentplan requires the rquery package") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::as_rquery.vtreat_pass_through") if((!is.null(var_restriction)) && (!(tstep$newvars %in% var_restriction))) { return(NULL) } args <- tstep$args list( exprs = tstep$newvars %:=% paste0("ifelse(is.na(", tstep$origvar, "), ", args$nadist, ", ", tstep$origvar, ")"), optree_generators = list(), tables = list() ) } .mkPassThrough <- function(..., origVarName, xcol, ycol, zC, zTarget, weights, collarProb, catScaling, missingness_imputation, imputation_map) { wrapr::stop_if_dot_args(substitute(list(...)), ".mkPassThrough") xcol <- as.numeric(xcol) napositions <- .is.bad(xcol) nna <- sum(napositions) if(nna>=length(xcol)) { return(c()) } if(collarProb>0.0) { cuts <- as.numeric(stats::quantile(xcol[!napositions], probs=c(collarProb,1-collarProb))) } else { cuts <- c(min(xcol[!napositions]),max(xcol[!napositions])) } if((!is.null(imputation_map)) && (origVarName %in% names(imputation_map))) { specific_imputation_method <- imputation_map[[origVarName]] if(!is.null(specific_imputation_method)) { missingness_imputation <- specific_imputation_method } } if(!is.null(missingness_imputation)) { if(is.numeric(missingness_imputation)) { nadist = missingness_imputation } else { nadist = missingness_imputation(xcol[!napositions], weights[!napositions]) } if((!is.numeric(nadist)) || (length(nadist)!=1)) { nadist <- NA_real_ } } else { nadist <- .wmean(xcol[!napositions], weights[!napositions]) } if(is.na(nadist)) { nadist <- 0 } xcol[napositions] <- nadist if(max(xcol)<=min(xcol)) { return(c()) } newVarName <- vtreat_make_names(origVarName) if(isTRUE(getOption('vtreat.use_clean_suffix', FALSE))) { newVarName <- vtreat_make_names(paste(origVarName,'clean',sep='_')) } treatment <- list(origvar=origVarName, newvars=newVarName, f=.passThrough, args=list(nadist=nadist,cuts=cuts), treatmentName='Scalable pass through', treatmentCode='clean', needsSplit=FALSE, extraModelDegrees=0) class(treatment) <- c('vtreat_pass_through', 'vtreatment') if((!catScaling)||(is.null(zC))) { treatment$scales <- linScore(newVarName,xcol,ycol,weights) } else { treatment$scales <- catScore(newVarName,xcol,zC,zTarget,weights) } treatment }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/cleanTreatment.R
# apply a classification impact model # replace level with stored code .customCodeCat <- function(col,args, doCollar) { col <- .preProcCat(col,args$levRestriction) unhandledNovel <- !(col %in% names(args$conditionalScore)) keys <- col pred <- numeric(length(col)) if(length(args$conditionalScore)>0) { keys[unhandledNovel] <- names(args$conditionalScore)[[1]] # just to prevent bad lookups pred <- as.numeric(args$conditionalScore[keys]) } pred[unhandledNovel] <- args$missingValueCode pred } #' Make a categorical input custom coder. #' #' @param ... not used, force arguments to be set by name #' @param customCode code name #' @param coder user supplied variable re-coder (see vignette for type signature) #' @param codeSeq argments to custom coder #' @param v variable name #' @param vcolin data column, character #' @param zoY outcome column as numeric #' @param zC if classification outcome column as character #' @param zTarget if classification target class #' @param weights per-row weights #' @param catScaling optional, if TRUE use glm() linkspace, if FALSE use lm() for scaling. #' @return wrapped custom coder #' makeCustomCoderCat <- function(..., customCode, coder, codeSeq, v,vcolin,zoY,zC,zTarget, weights = NULL,catScaling = FALSE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::makeCustomCoderNum") levRestriction <- NULL vcol <- .preProcCat(vcolin,levRestriction) if(is.null(weights)) { weights <- rep(1.0, length(vcol)) } extraModelDegrees <- max(0,length(unique(vcolin))-1) scores <- NULL tryCatch( if(is.null(zC)) { scores <- coder(v,vcol,zoY,weights) } else { scores <- coder(v,vcol,zC==zTarget,weights) }, error = function(e) { warning(e) } ) if(is.null(scores)) { return(NULL) } if((!is.numeric(scores)) || (length(scores)!=length(vcol))) { scores <- rep(0.0, length(vcol)) } if('center' %in% codeSeq) { # shift scores to be mean zero with respect to weights scores <- scores - .wmean(scores, weights) } missingValueCode <- .wmean(scores, weights) d <- data.frame(x = vcol, pred = scores) # TODO: weighted version agg <- aggregate(pred~x, data=d, mean) conditionalScore <- as.list(as.numeric(agg$pred)) names(conditionalScore) <- as.character(agg$x) conditionalScore <- conditionalScore[names(conditionalScore)!='zap'] # don't let zap group code newVarName <- vtreat_make_names(paste(v, customCode, sep='_')) treatment <- list(origvar=v, newvars=newVarName, f=.customCodeCat, args=list(conditionalScore=conditionalScore, levRestriction=levRestriction, missingValueCode=missingValueCode), treatmentName=paste('Custom Code:', customCode), treatmentCode=customCode, needsSplit=TRUE, extraModelDegrees=extraModelDegrees) pred <- treatment$f(vcolin, treatment$args, FALSE) if(!.has.range.cn(pred)) { return(NULL) } class(treatment) <- 'vtreatment' if(!catScaling) { treatment$scales <- linScore(newVarName,pred,as.numeric(zC==zTarget),weights) } else { treatment$scales <- catScore(newVarName,pred,zC,zTarget,weights) } treatment } # apply linear interpolation on known numeric levels .customCodeNum <- function(col, args, doCollar) { treated <- as.numeric(col) naposns <- .is.bad(treated) treated[naposns] <- args$missingValueCode if(sum(!naposns)>0) { xg <- pmax(args$minX, pmin(args$maxX, col[!naposns])) if(doCollar) { xg <- pmax(min(args$cuts), pmin(max(args$cuts), xg)) } eval_fn <- args$eval_fn if(!is.null(eval_fn)) { treated[!naposns] <- eval_fn(xg) } else { method <- args$method if(is.null(method)) { method <- "linear" } treated[!naposns] <- stats::approx(x = args$predXs, y = args$predYs, xout = xg, method = method, rule = 2)$y } } treated <- as.numeric(treated) # strip any attributes fails <- .is.bad(treated) if(any(fails)) { treated[fails] <- args$missingValueCode } treated } #' Make a numeric input custom coder. # #' @param ... not used, force arguments to be set by name #' @param customCode code name #' @param coder user supplied variable re-coder (see vignette for type signature) #' @param codeSeq argments to custom coder #' @param v variable name #' @param vcolin data column, numeric #' @param zoY outcome column as numeric #' @param zC if classification outcome column as character #' @param zTarget if classification target class #' @param weights per-row weights #' @param catScaling optional, if TRUE use glm() linkspace, if FALSE use lm() for scaling. #' @return wrapped custom coder #' makeCustomCoderNum <- function(..., customCode, coder, codeSeq, v,vcolin,zoY,zC,zTarget, weights = NULL, catScaling = FALSE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::makeCustomCoderNum") xcol <- as.numeric(vcolin) napositions <- .is.bad(xcol) nna <- sum(napositions) if(nna>=length(xcol)) { return(NULL) } if(is.null(weights)) { weights <- rep(1.0, length(vcolin)) } xNotNA <- xcol[!napositions] minX <- min(xNotNA) maxX <- max(xNotNA) yNotNa <- zoY[!napositions] wNotNa <- weights[!napositions] if(max(xNotNA)<=min(xNotNA)) { return(NULL) } cuts <- c(min(xNotNA), max(xNotNA)) if('center' %in% codeSeq) { # shift scores to be mean zero with respect to weights yNotNa <- yNotNa - .wmean(yNotNa, wNotNa) } missingValueCode <- .wmean(yNotNa, wNotNa) extraModelDegrees <- max(0,length(unique(xNotNA))) scores <- NULL tryCatch( if(is.null(zC)) { scores <- coder(v, xNotNA, zoY[!napositions], wNotNa) } else { scores <- coder(v, xNotNA, (zC[!napositions])==zTarget, wNotNa) }, error = function(e) { warning(e) } ) if(is.null(scores)) { return(NULL) } method <- attr(scores, "method") if(is.null(method)) { method <- "linear" } approx_table <- NULL predXs <- NULL predYs <- NULL eval_fn <- attr(scores, "eval_fn") if(is.null(eval_fn)) { approx_table <- attr(scores, "approx_table") if(!is.null(approx_table)) { predXs <- approx_table$predXs predYs <- approx_table$predYs } else { if((!is.numeric(scores)) || (length(scores)!=length(xcol))) { return(NULL) } d <- data.frame(x = xcol, pred = scores) # TODO: weighted version agg <- aggregate(pred~x, data=d, mean) predXs <- agg$x if(length(predXs)<=1) { return(NULL) } predYs <- as.numeric(agg$pred) ord <- order(agg$x) predXs <- predXs[ord] predYs <- predYs[ord] # sample down if(length(predXs)>10000) { idxs <- sort(unique(c(1, round(seq(1, length(predXs), length.out=10000)), length(predXs)))) predXs <- predXs[idxs] predYs <- predYs[idxs] } } } newVarName <- vtreat_make_names(paste(v, customCode, sep='_')) treatment <- list(origvar=v, newvars=newVarName, f=.customCodeNum, args=list(minX = minX, maxX = maxX, predXs = predXs, predYs = predYs, eval_fn = eval_fn, method = method, cuts = cuts, missingValueCode = missingValueCode), treatmentName=paste('Custom Code:', customCode), treatmentCode=customCode, needsSplit=TRUE, extraModelDegrees=extraModelDegrees) pred <- treatment$f(vcolin, treatment$args, FALSE) if(!.has.range.cn(pred)) { return(NULL) } class(treatment) <- 'vtreatment' if(!catScaling) { treatment$scales <- linScore(newVarName,pred,as.numeric(zC==zTarget),weights) } else { treatment$scales <- catScore(newVarName,pred,zC,zTarget,weights) } treatment }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/customCoder.R
.xform_cat <- function(col, arg) { known_values <- arg$known_values invalid_mark <- arg$invalid_mark col <- as.character(col) bads <- is.na(col) | (!(col %in% known_values)) col[bads] <- invalid_mark col } .xform_num <- function(col, arg) { replacement <- arg$replacement col <- as.numeric(col) bads <- is.na(col) | is.nan(col) | is.infinite(col) col[bads] <- replacement col } .ind_na <- function(col, arg) { col <- as.numeric(col) bads <- is.na(col) | is.nan(col) | is.infinite(col) v <- rep(0, length(col)) v[bads] <- 1 v } .xform_zap <- function(col, arg) { NULL } #' Design a simple treatment plan to indicate missingingness and perform simple imputation. #' #' #' @param dframe data.frame to drive design. #' @param ... not used, forces later arguments to bind by name. #' @param varlist character, names of columns to process. #' @param invalid_mark character, name to use for NA levels and novel levels. #' @param drop_constant_columns logical, if TRUE drop columns that do not vary from the treatment plan. #' @param missingness_imputation function of signature f(values: numeric), simple missing value imputer. #' @param imputation_map map from column names to functions of signature f(values: numeric), simple missing value imputers. #' @return simple treatment plan. #' #' @examples #' #' d <- wrapr::build_frame( #' "x1", "x2", "x3" | #' 1 , 4 , "A" | #' NA , 5 , "B" | #' 3 , 6 , NA ) #' #' plan <- design_missingness_treatment(d) #' prepare(plan, d) #' #' prepare(plan, data.frame(x1=NA, x2=NA, x3="E")) #' #' @seealso \code{\link{prepare.simple_plan}} #' #' @export #' design_missingness_treatment <- function(dframe, ..., varlist = colnames(dframe), invalid_mark = "_invalid_", drop_constant_columns = FALSE, missingness_imputation = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::design_missingness_treatment") force(invalid_mark) ops <- list() for(ci in varlist) { vi <- dframe[[ci]] if(is.null(vi)) { stop(paste("vtreat::design_missingness_treatment: column", ci, "not found")) } if(drop_constant_columns) { if(!.has.range(vi)) { next } } if(is.logical(vi) || (is.numeric(vi) && (!is.factor(vi)))) { vi <- as.numeric(vi) mean_v <- 0.0 bads <- is.na(vi) | is.nan(vi) | is.infinite(vi) if(any(!bads)) { missing_v <- base::mean if(!is.null(missingness_imputation)) { missing_v <- missingness_imputation } if((!is.null(imputation_map)) && (ci %in% names(imputation_map))) { missing_v <- imputation_map[[ci]] } if(is.function(missing_v)) { mean_v <- missing_v(vi[!bads]) } else { mean_v <- missing_v } } ops <- c(ops, list( list( col = ci, nm = vtreat_make_names(ci), f = .xform_num, code = "numeric", args = list(replacement = mean_v)))) if(any(bads)) { ops <- c(ops, list( list( col = ci, nm = vtreat_make_names(paste0(ci, "_isBAD")), f = .ind_na, code = "is_bad", args = list()))) } } else { vi <- as.character(vi) ops <- c(ops, list( list( col = ci, nm = vtreat_make_names(ci), f = .xform_cat, code = "cat", args = list(known_values = sort(unique(vi)), invalid_mark = invalid_mark)))) } } class(ops) <- "simple_plan" ops } #' Prepare a simple treatment. #' #' @param treatmentplan A simple treatment plan. #' @param dframe data.frame to be treated. #' @param ... not used, present for S3 signature consistency. #' #' @examples #' #' d <- wrapr::build_frame( #' "x1", "x2", "x3" | #' 1 , 4 , "A" | #' NA , 5 , "B" | #' 3 , 6 , NA ) #' #' plan <- design_missingness_treatment(d) #' prepare(plan, d) #' #' prepare(plan, data.frame(x1=NA, x2=NA, x3="E")) #' #' @seealso \code{\link{design_missingness_treatment}}, \code{\link{prepare}} #' #' @export #' prepare.simple_plan <- function(treatmentplan, dframe, ...) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::prepare.simple_plan") res <- dframe for(pi in treatmentplan) { ci <- pi$col res[[ci]] <- NULL } for(pi in treatmentplan) { ci <- pi$col vi <- dframe[[ci]] if(is.null(vi)) { stop(paste("vtreat::prepare.simple_plan: column", ci, " must be in data.frame")) } vi <- pi$f(vi, pi$args) res[[pi$nm]] <- vi } res } #' @export format.simple_plan <- function(x, ...) { steps <- lapply( x, function(xi) { data.frame(origName = xi$col, varName = xi$nm, code = xi$code, stringsAsFactors = FALSE) }) steps <- .rbindListOfFrames(steps) format(steps) } #' @export as.character.simple_plan <- function (x, ...) { format(x, ...) } #' #' Print treatmentplan. #' @param x treatmentplan #' @param ... additional args (to match general signature). #' @export print.simple_plan <- function(x, ...) { print(format(x), ...) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/design_missing_Z.R
# apply a deviation fact # replace level with deviance (could add other summaries such as median) .catD <- function(col,args,doCollar) { pred <- numeric(length(col)) if(length(args$scorable)>0) { col <- .preProcCat(col,args$levRestriction) unhandledNovel <- !(col %in% args$scorable) keys <- col if(length(args$scores)>0) { keys[unhandledNovel] <- args$scorable[[1]] # just to prevent bad lookups pred <- as.numeric(args$scores[keys]) } pred[unhandledNovel] <- args$unhandledNovelCode # assume large deviation on unseen levels } pred } as_rquery.vtreat_cat_d <- function(tstep, ..., var_restriction) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::as_rquery.vtreat_cat_d treatmentplan requires the rquery package") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::as_rquery.vtreat_cat_d") if((!is.null(var_restriction)) && (!(tstep$newvars %in% var_restriction))) { return(NULL) } args <- tstep$args rquery_code_categorical(colname = tstep$origvar, resname = tstep$newvars, coding_levels = names(args$scores), effect_values = args$scores, levRestriction = args$levRestriction, default_value = 0.0) } # build a deviation fact # see: https://win-vector.com/2012/07/23/modeling-trick-impact-coding-of-categorical-variables-with-many-levels/ .mkCatD <- function(origVarName,vcolin,rescol,smFactor,levRestriction,weights) { vcol <- .preProcCat(vcolin,levRestriction) extraModelDegrees <- max(0,length(unique(vcolin))-1) num <- tapply(rescol*weights,vcol,sum) den <- tapply(weights,vcol,sum) scorable <- setdiff(names(den)[den>=2],'zap') if(length(scorable)<=0) { return(NULL) } condMean <- as.list(num/den) resids <- rescol-as.numeric(condMean[vcol]) scores <- sqrt(tapply(resids*resids*weights,vcol,sum)/pmax(den-1,1)) unhandledNovelCode <- 1.0 if(length(scorable)>0) { unhandledNovelCode <- max(scores[scorable]) } scores <- as.list(scores) scores <- scores[names(scores)!='zap'] # don't let zap code newVarName <- vtreat_make_names(paste(origVarName,'catD',sep='_')) treatment <- list(origvar=origVarName, newvars=newVarName, f=.catD, args=list(scores=scores, scorable=scorable, unhandledNovelCode=unhandledNovelCode, levRestriction=levRestriction), treatmentName='Deviation Fact', treatmentCode='catD', needsSplit=TRUE, extraModelDegrees=extraModelDegrees) pred <- treatment$f(vcolin,treatment$args) if(!.has.range.cn(pred)) { return(NULL) } class(treatment) <- c('vtreat_cat_d', 'vtreatment') treatment$scales <- linScore(newVarName,pred,rescol,weights) treatment }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/deviationFact.R
# apply a classification impact model # replace level with logit(P[y==target|level]) - logit(P[y==target]) .catBayes <- function(col,args,doCollar) { col <- .preProcCat(col,args$levRestriction) unhandledNovel <- !(col %in% names(args$conditionalScore)) keys <- col pred <- numeric(length(col)) if(length(args$conditionalScore)>0) { keys[unhandledNovel] <- names(args$conditionalScore)[[1]] # just to prevent bad lookups pred <- as.numeric(args$conditionalScore[keys]) } pred[unhandledNovel] <- 0.0 pred } as_rquery.vtreat_cat_Bayes <- function(tstep, ..., var_restriction) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::as_rquery.vtreat_cat_Bayes treatmentplan requires the rquery package") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::as_rquery.vtreat_cat_Bayes") if((!is.null(var_restriction)) && (!(tstep$newvars %in% var_restriction))) { return(NULL) } args <- tstep$args rquery_code_categorical(colname = tstep$origvar, resname = tstep$newvars, coding_levels = names(args$conditionalScore), effect_values = args$conditionalScore, levRestriction = args$levRestriction, default_value = 0.0) } # build a classification impact model # see: https://win-vector.com/2012/07/23/modeling-trick-impact-coding-of-categorical-variables-with-many-levels/ .mkCatBayes <- function(origVarName,vcolin,rescol,resTarget,smFactor,levRestriction,weights,catScaling) { vcol <- .preProcCat(vcolin,levRestriction) extraModelDegrees <- max(0,length(unique(vcolin))-1) epsilon <- 1.0e-6 smFactor <- max(smFactor,1.0e-4) # T/F is true false of the quantity to be predicted # C is the feature we are looking at nT <- sum(as.numeric(rescol==resTarget)*weights) # weighted sum of true examples nF <- sum(as.numeric(rescol!=resTarget)*weights) # weighted sum of false examples nCandT <- tapply(as.numeric(rescol==resTarget)*weights,vcol,sum) # weighted sum of true examples for a given C (vector) nCandF <- tapply(as.numeric(rescol!=resTarget)*weights,vcol,sum) # weighted sum of false examples for a give C (vector) probT <- pmax(epsilon,pmin(1-epsilon,nT/(nT+nF))) # unconditional probabilty target is true pCgivenT <- (nCandT+smFactor)/(nT+smFactor) # probability of a given evidence C, condition on outcome=T pCgivenF <- (nCandF+smFactor)/(nF+smFactor) # probability of a given evidence C, condition on outcome=F pTgivenCunnorm <- pCgivenT*probT # Bayes law, corret missing a /pC term (which we will normalize out) pFgivenCunnorm <- pCgivenF*(1-probT) # Bayes law, corret missing a /pC term (which we will normalize out) pTgivenC <- pTgivenCunnorm/(pTgivenCunnorm+pFgivenCunnorm) # conditionalScore <- log(pTgivenC/probT) # log probability ratio (so no effect is coded as zero) conditionalScore <- .logit(pTgivenC) - .logit(probT) # logit probabilty conditionalScore <- as.list(conditionalScore) conditionalScore <- conditionalScore[names(conditionalScore)!='zap'] # don't let zap group code # fall back for novel levels, use zero impact newVarName <- vtreat_make_names(paste(origVarName,'catB',sep='_')) treatment <- list(origvar=origVarName, newvars=newVarName, f=.catBayes, args=list(conditionalScore=conditionalScore, levRestriction=levRestriction), treatmentName='Bayesian Impact Code', treatmentCode='catB', needsSplit=TRUE, extraModelDegrees=extraModelDegrees) pred <- treatment$f(vcolin,treatment$args) if(!.has.range.cn(pred)) { return(NULL) } class(treatment) <- c('vtreat_cat_Bayes', 'vtreatment') if(!catScaling) { treatment$scales <- linScore(newVarName,pred,as.numeric(rescol==resTarget),weights) } else { treatment$scales <- catScore(newVarName,pred,rescol,resTarget,weights) } if(treatment$scales$a <= 0) { return(NULL) # fitting a noise effect } treatment }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/effectTreatmentC.R
# apply a numeric impact model # replace level with .wmean(x|category) - .wmean(x) .catNum <- function(col,args,doCollar) { col <- .preProcCat(col,args$levRestriction) unhandledNovel <- !(col %in% names(args$scores)) keys <- col pred <- numeric(length(col)) if(length(args$scores)>0) { keys[unhandledNovel] <- names(args$scores)[[1]] # just to prevent bad lookups pred <- as.numeric(args$scores[keys]) } # mean delta impact averaged over all possibilities, should be zero pred[unhandledNovel] <- 0.0 pred } as_rquery.vtreat_can_num <- function(tstep, ..., var_restriction) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::as_rquery.vtreat_can_num treatmentplan requires the rquery package") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::as_rquery.vtreat_can_num") if((!is.null(var_restriction)) && (!(tstep$newvars %in% var_restriction))) { return(NULL) } args <- tstep$args rquery_code_categorical(colname = tstep$origvar, resname = tstep$newvars, coding_levels = names(args$scores), effect_values = args$scores, levRestriction = args$levRestriction, default_value = 0.0) } # build a numeric impact model # see: https://win-vector.com/2012/07/23/modeling-trick-impact-coding-of-categorical-variables-with-many-levels/ .mkCatNum <- function(origVarName,vcolin,rescol,smFactor,levRestriction,weights) { vcol <- .preProcCat(vcolin,levRestriction) extraModelDegrees <- max(0,length(unique(vcolin))-1) baseMean <- .wmean(rescol,weights) num <- tapply(rescol*weights,vcol,sum) den <- tapply(weights,vcol,sum) scores <- as.list((num+smFactor*baseMean)/(den+smFactor)-baseMean) scores <- scores[names(scores)!='zap'] # don't let zap code newVarName <- vtreat_make_names(paste(origVarName,'catN',sep='_')) treatment <- list(origvar=origVarName, newvars=newVarName, f=.catNum, args=list(scores=scores, levRestriction=levRestriction), treatmentName='Scalable Impact Code', treatmentCode='catN', needsSplit=TRUE, extraModelDegrees=extraModelDegrees) pred <- treatment$f(vcolin,treatment$args) if(!.has.range.cn(pred)) { return(NULL) } class(treatment) <- c('vtreat_can_num', 'vtreatment') treatment$scales <- linScore(newVarName,pred,rescol,weights) if(treatment$scales$a <= 0) { return(NULL) # fitting a noise effect } treatment }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/effectTreatmentN.R
# pyvtreat style interfaces for the vtreat code merge_params <- function(..., params = NULL, user_params = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::merge_args") if(length(user_params) > 0) { for(k in names(user_params)) { if(!(k %in% names(params))) { stop(paste("parameter key", k, "not recognized")) } params[[k]] <- user_params[[k]] } } return(params) } #' vtreat classification parameters. #' #' A list of settings and values for vtreat binomial classification fitting. #' Please see #' \url{https://github.com/WinVector/vtreat/blob/main/Examples/fit_transform/fit_transform_api.md}, #' \code{\link{mkCrossFrameCExperiment}}, #' \code{\link{designTreatmentsC}}, and #' \code{\link{prepare.treatmentplan}} for details. #' #' @param user_params list of user overrides. #' @return filled out parameter list #' #' @export classification_parameters <- function(user_params = NULL) { params = list( minFraction = 0.02, smFactor = 0.0, rareCount = 0, rareSig = NULL, collarProb = 0.00, codeRestriction = NULL, customCoders = NULL, splitFunction = NULL, ncross = 3, forceSplit = FALSE, catScaling = TRUE, verbose = FALSE, use_parallel = TRUE, missingness_imputation = NULL, pruneSig = NULL, scale = FALSE, doCollar= FALSE, varRestriction = NULL, trackedValues = NULL, check_for_duplicate_frames = TRUE) merged_params <- merge_params(params = params, user_params = user_params) class(merged_params) <- 'classification_parameters' return(merged_params) } #' Stateful object for designing and applying binomial outcome treatments. #' #' Hold settings and results for binomial classification data preparation. #' #' Please see #' \url{https://github.com/WinVector/vtreat/blob/main/Examples/fit_transform/fit_transform_api.md}, #' \code{\link{mkCrossFrameCExperiment}}, #' \code{\link{designTreatmentsC}}, and #' \code{\link{prepare.treatmentplan}} for details. #' #' @param ... not used, force arguments to be specified by name. #' @param var_list Names of columns to treat (effective variables). #' @param outcome_name Name of column holding outcome variable. \code{dframe[[outcomename]]} must be only finite and non-missing values. #' @param outcome_target Value/level of outcome to be considered "success", and there must be a cut such that \code{dframe[[outcomename]]==outcometarget} at least twice and dframe[[outcomename]]!=outcometarget at least twice. #' @param cols_to_copy list of extra columns to copy. #' @param params parameters list from \code{classification_parameters} #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' #' #' @export #' BinomialOutcomeTreatment <- function(..., var_list, outcome_name, outcome_target = TRUE, cols_to_copy = NULL, params = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::BinomialOutcomeTreatment") if((!is.null(params)) && (!('classification_parameters' %in% class(params)))) { stop("vtreat::BinomialOutcomeTreatment expected class classification_parameters") } if(missing(outcome_name)) { stop("vtreat::BinomialOutcomeTreatment outcome_name is required") } if(!is.character(outcome_name)) { stop("vtreat::BinomialOutcomeTreatment outcome_name must be character class") } if(length(outcome_name) != 1) { stop("vtreat::BinomialOutcomeTreatment outcome_name must be length 1") } params <- classification_parameters(params) var_list <- setdiff(var_list, c(outcome_name, cols_to_copy)) settings <- list( var_list = var_list, outcome_name = outcome_name, outcome_target = outcome_target, cols_to_copy = cols_to_copy, params = params, imputation_map = imputation_map, state = new.env(parent = emptyenv()) ) assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) obj <- list(settings = settings) class(obj) <- "vtreat_pipe_step" obj$treatment_type <- "BinomialOutcomeTreatment" fit <- function(dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::BinomialOutcomeTreatment$fit") assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) tp <- designTreatmentsC( dframe = dframe, varlist = settings$var_list, outcomename = settings$outcome_name, outcometarget = settings$outcome_target, weights = weights, minFraction = settings$params$minFraction, smFactor = settings$params$smFactor, rareCount = settings$params$rareCount, rareSig = settings$params$rareSig, collarProb = settings$params$collarProb, codeRestriction = settings$params$codeRestriction, customCoders = settings$params$customCoders, splitFunction = settings$params$splitFunction, ncross = settings$params$ncross, forceSplit = settings$params$forceSplit, catScaling = settings$params$catScaling, verbose = settings$params$verbose, parallelCluster = parallelCluster, use_parallel = settings$params$use_parallel, missingness_imputation = settings$params$missingness_imputation, imputation_map = settings$imputation_map) assign("transform", tp, envir = settings$state) assign("score_frame", tp$scoreFrame, envir = settings$state) invisible(obj) # allow method chaining } transform <- function(dframe, ..., parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::BinomialOutcomeTreatment$transform") tp <- mget('transform', envir = settings$state, inherits = FALSE, ifnotfound = list('transform' = NULL))[[1]] if(is.null(tp)) { stop("tried to use transform() on a not-fit treatment") } if(isTRUE(settings$params$check_for_duplicate_frames)) { old_obj_id <- tp$fit_obj_id fit_obj_id <- NULL if(!is.null(old_obj_id)) { fit_obj_id <- id_f(dframe) } if(!is.null(fit_obj_id)) { if(fit_obj_id == old_obj_id) { warning("possibly called transform() on same data frame as fit(), this can lead to over-fit. To avoid this, please use fit_transform().") } } } res <- prepare( treatmentplan = tp, dframe = dframe, pruneSig= settings$params$pruneSig, scale= settings$params$scale, doCollar= settings$params$doCollar, varRestriction= settings$params$varRestriction, codeRestriction= settings$params$codeRestriction, trackedValues= settings$params$trackedValues, extracols = settings$cols_to_copy, parallelCluster = parallelCluster, use_parallel = settings$params$use_parallel, check_for_duplicate_frames = FALSE) return(res) } fit_transform <- function(dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::BinomialOutcomeTreatment$fit_transform") assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) ce <- mkCrossFrameCExperiment( dframe = dframe, varlist = settings$var_list, outcomename = settings$outcome_name, outcometarget = settings$outcome_target, weights = weights, minFraction = settings$params$minFraction, smFactor = settings$params$smFactor, rareCount = settings$params$rareCount, rareSig = settings$params$rareSig, collarProb = settings$params$collarProb, codeRestriction = settings$params$codeRestriction, customCoders = settings$params$customCoders, splitFunction = settings$params$splitFunction, ncross = settings$params$ncross, forceSplit = settings$params$forceSplit, catScaling = settings$params$catScaling, verbose = settings$params$verbose, parallelCluster = parallelCluster, use_parallel = settings$params$use_parallel, missingness_imputation = settings$params$missingness_imputation, imputation_map = settings$imputation_map) tp <- ce$treatments assign("transform", tp, envir = settings$state) assign("score_frame", tp$scoreFrame, envir = settings$state) res <- ce$crossFrame for(c in settings$cols_to_copy) { res[[c]] <- dframe[[c]] } return(res) } get_score_frame <- function() { res <- get('score_frame', envir = settings$state, inherits = FALSE) return(res) } get_transform <- function() { res <- get('transform', envir = settings$state, inherits = FALSE) return(res) } get_feature_names <- function(input_features=NULL) { sf <- get('score_frame', envir = settings$state, inherits = FALSE) want <- sf$varMoves if(!is.null(input_features)) { want <- want & (sf$origName %in% input_features) } return(sf$varName[want]) } fresh_copy <- function() { BinomialOutcomeTreatment( var_list = settings$var_list, outcome_name = settings$outcome_name, outcome_target = settings$outcome_target, cols_to_copy = settings$cols_to_copy, params = settings$params, imputation_map = settings$imputation_map) } # get globalenv early on environment chain for seralization # See pseudo-SEXPTYPEs in https://cran.r-project.org/doc/manuals/r-release/R-ints.html f_env <- new.env(parent = globalenv()) assign("settings", settings, envir = f_env) for(nm in c("fit", "transform", "fit_transform", "get_score_frame", "get_transform", "get_feature_names", "fresh_copy")) { fi <- get(nm) environment(fi) <- f_env assign(nm, fi, envir = f_env) } # build up result object obj$fit = fit obj$transform = transform obj$fit_transform = fit_transform obj$score_frame = get_score_frame obj$get_score_frame = get_score_frame obj$get_transform = get_transform obj$get_feature_names = get_feature_names obj$fresh_copy = fresh_copy assign("obj", obj, envir = f_env) return(obj) } #' vtreat regression parameters. #' #' A list of settings and values for vtreat regression fitting. #' Please see #' \url{https://github.com/WinVector/vtreat/blob/main/Examples/fit_transform/fit_transform_api.md}, #' \code{\link{mkCrossFrameCExperiment}}, #' \code{\link{designTreatmentsC}}, and #' \code{\link{mkCrossFrameNExperiment}}, #' \code{\link{designTreatmentsN}}, #' \code{\link{prepare.treatmentplan}} for details. #' #' @param user_params list of user overrides. #' @return filled out parameter list #' #' @export regression_parameters <- function(user_params = NULL) { params = list( minFraction = 0.02, smFactor = 0.0, rareCount = 0, rareSig = NULL, collarProb = 0.00, codeRestriction = NULL, customCoders = NULL, splitFunction = NULL, ncross = 3, forceSplit = FALSE, catScaling = TRUE, verbose = FALSE, use_parallel = TRUE, missingness_imputation = NULL, pruneSig = NULL, scale = FALSE, doCollar= FALSE, varRestriction = NULL, trackedValues = NULL, check_for_duplicate_frames = TRUE) merged_params <- merge_params(params = params, user_params = user_params) class(merged_params) <- 'regression_parameters' return(merged_params) } #' Stateful object for designing and applying numeric outcome treatments. #' #' Hold settings and results for regression data preparation. #' #' Please see #' \url{https://github.com/WinVector/vtreat/blob/main/Examples/fit_transform/fit_transform_api.md}, #' \code{\link{mkCrossFrameNExperiment}}, #' \code{\link{designTreatmentsN}}, and #' \code{\link{prepare.treatmentplan}} for details. #' #' @param ... not used, force arguments to be specified by name. #' @param var_list Names of columns to treat (effective variables). #' @param outcome_name Name of column holding outcome variable. \code{dframe[[outcomename]]} must be only finite non-missing values. #' @param cols_to_copy list of extra columns to copy. #' @param params parameters list from \code{regression_parameters} #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' #' #' @export #' NumericOutcomeTreatment <- function(..., var_list, outcome_name, cols_to_copy = NULL, params = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::NumericOutcomeTreatment") if((!is.null(params)) && (!('regression_parameters' %in% class(params)))) { stop("vtreat::NumericOutcomeTreatment expected class regression_parameters") } if(missing(outcome_name)) { stop("vtreat::NumericOutcomeTreatment outcome_name is required") } if(!is.character(outcome_name)) { stop("vtreat::NumericOutcomeTreatment outcome_name must be character class") } if(length(outcome_name) != 1) { stop("vtreat::NumericOutcomeTreatment outcome_name must be length 1") } params <- regression_parameters(params) var_list <- setdiff(var_list, c(outcome_name, cols_to_copy)) settings <- list( var_list = var_list, outcome_name = outcome_name, cols_to_copy = cols_to_copy, params = params, imputation_map = imputation_map, state = new.env(parent = emptyenv()) ) assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) obj <- list(settings = settings) class(obj) <- "vtreat_pipe_step" obj$treatment_type <- "NumericOutcomeTreatment" fit <- function(dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::NumericOutcomeTreatment$fit") assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) tp <- designTreatmentsN( dframe = dframe, varlist = settings$var_list, outcomename = settings$outcome_name, weights = weights, minFraction = settings$params$minFraction, smFactor = settings$params$smFactor, rareCount = settings$params$rareCount, rareSig = settings$params$rareSig, collarProb = settings$params$collarProb, codeRestriction = settings$params$codeRestriction, customCoders = settings$params$customCoders, splitFunction = settings$params$splitFunction, ncross = settings$params$ncross, forceSplit = settings$params$forceSplit, verbose = settings$params$verbose, parallelCluster = parallelCluster, use_parallel = settings$params$use_parallel, missingness_imputation = settings$params$missingness_imputation, imputation_map = settings$imputation_map) assign("transform", tp, envir = settings$state) assign("score_frame", tp$scoreFrame, envir = settings$state) invisible(obj) # allow method chaining } transform <- function(dframe, ..., parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::NumericOutcomeTreatment$transform") tp <- mget('transform', envir = settings$state, inherits = FALSE, ifnotfound = list('transform' = NULL))[[1]] if(is.null(tp)) { stop("tried to use transform() on a not-fit treatment") } if(isTRUE(settings$params$check_for_duplicate_frames)) { old_obj_id <- tp$fit_obj_id fit_obj_id <- NULL if(!is.null(old_obj_id)) { fit_obj_id <- id_f(dframe) } if(!is.null(fit_obj_id)) { if(fit_obj_id == old_obj_id) { warning("possibly called transform() on same data frame as fit(), this can lead to over-fit. To avoid this, please use fit_transform().") } } } res <- prepare( treatmentplan = tp, dframe = dframe, pruneSig= settings$params$pruneSig, scale= settings$params$scale, doCollar= settings$params$doCollar, varRestriction= settings$params$varRestriction, codeRestriction= settings$params$codeRestriction, trackedValues= settings$params$trackedValues, extracols = settings$cols_to_copy, parallelCluster = parallelCluster, use_parallel = settings$params$use_parallel, check_for_duplicate_frames = FALSE) return(res) } fit_transform <- function(dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::NumericOutcomeTreatment$fit_transform") assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) ce <- mkCrossFrameNExperiment( dframe = dframe, varlist = settings$var_list, outcomename = settings$outcome_name, weights = weights, minFraction = settings$params$minFraction, smFactor = settings$params$smFactor, rareCount = settings$params$rareCount, rareSig = settings$params$rareSig, collarProb = settings$params$collarProb, codeRestriction = settings$params$codeRestriction, customCoders = settings$params$customCoders, splitFunction = settings$params$splitFunction, ncross = settings$params$ncross, forceSplit = settings$params$forceSplit, verbose = settings$params$verbose, parallelCluster = parallelCluster, use_parallel = settings$params$use_parallel, missingness_imputation = settings$params$missingness_imputation, imputation_map = settings$imputation_map) tp <- ce$treatments assign("transform", tp, envir = settings$state) assign("score_frame", tp$scoreFrame, envir = settings$state) res <- ce$crossFrame for(c in settings$cols_to_copy) { res[[c]] <- dframe[[c]] } return(res) } get_score_frame <- function() { res <- get('score_frame', envir = settings$state, inherits = FALSE) return(res) } get_transform <- function() { res <- get('transform', envir = settings$state, inherits = FALSE) return(res) } get_feature_names <- function(input_features=NULL) { sf <- get('score_frame', envir = settings$state, inherits = FALSE) want <- sf$varMoves if(!is.null(input_features)) { want <- want & (sf$origName %in% input_features) } return(sf$varName[want]) } fresh_copy <- function() { NumericOutcomeTreatment( var_list = settings$var_list, outcome_name = settings$outcome_name, cols_to_copy = settings$cols_to_copy, params = settings$params, imputation_map = settings$imputation_map) } # get globalenv early on environment chain for seralization # See pseudo-SEXPTYPEs in https://cran.r-project.org/doc/manuals/r-release/R-ints.html f_env <- new.env(parent = globalenv()) assign("settings", settings, envir = f_env) for(nm in c("fit", "transform", "fit_transform", "get_score_frame", "get_transform", "get_feature_names", "fresh_copy")) { fi <- get(nm) environment(fi) <- f_env assign(nm, fi, envir = f_env) } # build up result object obj$fit = fit obj$transform = transform obj$fit_transform = fit_transform obj$score_frame = get_score_frame obj$get_score_frame = get_score_frame obj$get_transform = get_transform obj$get_feature_names = get_feature_names obj$fresh_copy = fresh_copy assign("obj", obj, envir = f_env) return(obj) } #' vtreat multinomial parameters. #' #' A list of settings and values for vtreat multinomial classification fitting. #' Please see #' \url{https://github.com/WinVector/vtreat/blob/main/Examples/fit_transform/fit_transform_api.md}, #' \code{\link{mkCrossFrameMExperiment}} and #' \code{\link{prepare.multinomial_plan}} for details. #' #' @param user_params list of user overrides. #' @return filled out parameter list #' #' @export multinomial_parameters <- function(user_params = NULL) { params = list( minFraction=0.02, smFactor=0.0, rareCount=0, rareSig=1, collarProb=0.0, codeRestriction=NULL, customCoders=NULL, scale=FALSE,doCollar=FALSE, splitFunction=NULL,ncross=3, forceSplit = FALSE, catScaling=FALSE, y_dependent_treatments = c("catB"), verbose=FALSE, use_parallel = TRUE, missingness_imputation = NULL, imputation_map = NULL, check_for_duplicate_frames = TRUE) merged_params <- merge_params(params = params, user_params = user_params) class(merged_params) <- 'multinomial_parameters' return(merged_params) } #' Stateful object for designing and applying multinomial outcome treatments. #' #' Hold settings and results for multinomial classification data preparation. #' #' Please see #' \url{https://github.com/WinVector/vtreat/blob/main/Examples/fit_transform/fit_transform_api.md}, #' \code{\link{mkCrossFrameMExperiment}} and #' \code{\link{prepare.multinomial_plan}} for details. #' #' Note: there currently is no \code{designTreatmentsM}, #' so \code{MultinomialOutcomeTreatment$fit()} is implemented in terms #' of \code{MultinomialOutcomeTreatment$fit_transform()} #' #' @param ... not used, force arguments to be specified by name. #' @param var_list Names of columns to treat (effective variables). #' @param outcome_name Name of column holding outcome variable. \code{dframe[[outcomename]]} must be only finite non-missing values. #' @param cols_to_copy list of extra columns to copy. #' @param params parameters list from \code{multinomial_parameters} #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' #' #' @export #' MultinomialOutcomeTreatment <- function(..., var_list, outcome_name, cols_to_copy = NULL, params = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::MultinomialOutcomeTreatment") if((!is.null(params)) && (!('multinomial_parameters' %in% class(params)))) { stop("vtreat::MultinomialOutcomeTreatment expected class multinomial_parameters") } if(missing(outcome_name)) { stop("vtreat::MultinomialOutcomeTreatment outcome_name is required") } if(!is.character(outcome_name)) { stop("vtreat::MultinomialOutcomeTreatment outcome_name must be character class") } if(length(outcome_name) != 1) { stop("vtreat::MultinomialOutcomeTreatment outcome_name must be length 1") } params <- multinomial_parameters(params) var_list <- setdiff(var_list, c(outcome_name, cols_to_copy)) settings <- list( var_list = var_list, outcome_name = outcome_name, cols_to_copy = cols_to_copy, params = params, imputation_map = imputation_map, state = new.env(parent = emptyenv()) ) assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) obj <- list(settings = settings) class(obj) <- "vtreat_pipe_step" obj$treatment_type <- "MultinomialOutcomeTreatment" transform <- function(dframe, ..., parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::MultinomialOutcomeTreatment$transform") tp <- mget('transform', envir = settings$state, inherits = FALSE, ifnotfound = list('transform' = NULL))[[1]] if(is.null(tp)) { stop("tried to use transform() on a not-fit treatment") } if(isTRUE(settings$params$check_for_duplicate_frames)) { old_obj_id <- tp$fit_obj_id fit_obj_id <- NULL if(!is.null(old_obj_id)) { fit_obj_id <- id_f(dframe) } if(!is.null(fit_obj_id)) { if(fit_obj_id == old_obj_id) { warning("possibly called transform() on same data frame as fit(), this can lead to over-fit. To avoid this, please use fit_transform().") } } } res <- prepare( treatmentplan = tp, dframe = dframe, parallelCluster = parallelCluster, pruneSig= settings$params$pruneSig, scale= settings$params$scale, doCollar= settings$params$doCollar, varRestriction= settings$params$varRestriction, codeRestriction= settings$params$codeRestriction, trackedValues= settings$params$trackedValues, extracols= settings$cols_to_copy, use_parallel= settings$params$use_parallel, check_for_duplicate_frames = FALSE) return(res) } fit_transform <- function(dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::MultinomialOutcomeTreatment$fit_transform") assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) td <- mkCrossFrameMExperiment( dframe = dframe, varlist = settings$var_list, outcomename = settings$outcome_name, weights = weights, parallelCluster = parallelCluster, minFraction=settings$params$minFraction, smFactor=settings$params$smFactor, rareCount=settings$params$rareCount, rareSig=settings$params$rareSig, collarProb=settings$params$collarProb, codeRestriction=settings$params$codeRestriction, customCoders=settings$params$customCoders, scale=settings$params$scale, doCollar=settings$params$doCollar, splitFunction=settings$params$splitFunction, ncross=settings$params$ncross, forceSplit = settings$params$forceSplit, catScaling=settings$params$catScaling, y_dependent_treatments = settings$params$y_dependent_treatments, verbose=settings$params$verbose, use_parallel = settings$params$use_parallel, missingness_imputation = settings$params$missingness_imputation, imputation_map = settings$params$imputation_map) assign("transform", td$treat_m, envir = settings$state) assign("score_frame", td$score_frame, envir = settings$state) res <- td$cross_frame for(c in settings$cols_to_copy) { res[[c]] <- dframe[[c]] } return(res) } fit <- function(dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::MultinomialOutcomeTreatment$fit") fit_transform(dframe = dframe, weights = weights, parallelCluster = parallelCluster) invisible(obj) # allow method chaining } get_score_frame <- function() { res <- get('score_frame', envir = settings$state, inherits = FALSE) return(res) } get_transform <- function() { res <- get('transform', envir = settings$state, inherits = FALSE) return(res) } get_feature_names <- function(input_features=NULL) { sf <- get('score_frame', envir = settings$state, inherits = FALSE) want <- sf$varMoves if(!is.null(input_features)) { want <- want & (sf$origName %in% input_features) } return(unique(sf$varName[want])) } fresh_copy <- function() { MultinomialOutcomeTreatment( var_list = settings$var_list, outcome_name = settings$outcome_name, cols_to_copy = settings$cols_to_copy, params = settings$params, imputation_map = settings$imputation_map) } # get globalenv early on environment chain for seralization # See pseudo-SEXPTYPEs in https://cran.r-project.org/doc/manuals/r-release/R-ints.html f_env <- new.env(parent = globalenv()) assign("settings", settings, envir = f_env) for(nm in c("fit", "transform", "fit_transform", "get_score_frame", "get_transform", "get_feature_names", "fresh_copy")) { fi <- get(nm) environment(fi) <- f_env assign(nm, fi, envir = f_env) } # build up result object obj$fit = fit obj$transform = transform obj$fit_transform = fit_transform obj$score_frame = get_score_frame obj$get_score_frame = get_score_frame obj$get_transform = get_transform obj$get_feature_names = get_feature_names obj$fresh_copy = fresh_copy assign("obj", obj, envir = f_env) return(obj) } #' vtreat unsupervised parameters. #' #' A list of settings and values for vtreat unsupervised fitting. #' Please see #' \url{https://github.com/WinVector/vtreat/blob/main/Examples/fit_transform/fit_transform_api.md}, #' \code{\link{designTreatmentsZ}}, and #' \code{\link{prepare.treatmentplan}} for details. #' #' @param user_params list of user overrides. #' @return filled out parameter list #' #' @export unsupervised_parameters <- function(user_params = NULL) { params = list( minFraction = 0.02, rareCount = 0, collarProb = 0.00, codeRestriction = NULL, customCoders = NULL, verbose = FALSE, use_parallel = TRUE, missingness_imputation = NULL, pruneSig = NULL, scale = FALSE, doCollar= FALSE, varRestriction = NULL, trackedValues = NULL) merged_params <- merge_params(params = params, user_params = user_params) class(merged_params) <- 'unsupervised_parameters' return(merged_params) } #' Stateful object for designing and applying unsupervised treatments. #' #' Hold settings and results for unsupervised data preparation. #' #' Please see #' \url{https://github.com/WinVector/vtreat/blob/main/Examples/fit_transform/fit_transform_api.md}, #' \code{\link{designTreatmentsZ}} and #' \code{\link{prepare.treatmentplan}} for details. #' #' Note: for \code{UnsupervisedTreatment} \code{fit_transform(d)} is implemented #' as \code{fit(d)$transform(d)}. #' #' @param ... not used, force arguments to be specified by name. #' @param var_list Names of columns to treat (effective variables). #' @param cols_to_copy list of extra columns to copy. #' @param params parameters list from \code{unsupervised_parameters} #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' #' #' @export #' UnsupervisedTreatment <- function(..., var_list, cols_to_copy = NULL, params = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::UnsupervisedTreatment") if((!is.null(params)) && (!('unsupervised_parameters' %in% class(params)))) { stop("vtreat::UnsupervisedTreatment expected class unsupervised_parameters") } params <- unsupervised_parameters(params) var_list <- setdiff(var_list, cols_to_copy) settings <- list( var_list = var_list, cols_to_copy = cols_to_copy, params = params, imputation_map = imputation_map, state = new.env(parent = emptyenv()) ) assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) obj <- list(settings = settings) class(obj) <- "vtreat_pipe_step" obj$treatment_type <- "UnsupervisedTreatment" fit <- function(dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::UnsupervisedTreatment$fit") assign("transform", NULL, envir = settings$state) assign("score_frame", NULL, envir = settings$state) tp <- designTreatmentsZ( dframe = dframe, varlist = settings$var_list, weights = weights, minFraction = settings$params$minFraction, collarProb = settings$params$collarProb, codeRestriction = settings$params$codeRestriction, customCoders = settings$params$customCoders, parallelCluster = parallelCluster, verbose = settings$params$verbose, use_parallel = settings$params$use_parallel, missingness_imputation = settings$params$missingness_imputation, imputation_map = settings$imputation_map) assign("transform", tp, envir = settings$state) assign("score_frame", tp$scoreFrame, envir = settings$state) invisible(obj) # allow method chaining } transform <- function(dframe, ..., parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::UnsupervisedTreatment$transform") tp <- mget('transform', envir = settings$state, inherits = FALSE, ifnotfound = list('transform' = NULL))[[1]] if(is.null(tp)) { stop("tried to use transform() on a not-fit treatment") } res <- prepare( treatmentplan = tp, dframe = dframe, scale= settings$params$scale, doCollar= settings$params$doCollar, varRestriction= settings$params$varRestriction, codeRestriction= settings$params$codeRestriction, trackedValues= settings$params$trackedValues, extracols = settings$cols_to_copy, parallelCluster = parallelCluster, use_parallel = settings$params$use_parallel, check_for_duplicate_frames = FALSE) return(res) } fit_transform <- function(dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::UnsupervisedTreatment$fit_transform") fit(dframe = dframe, weights = weights, parallelCluster = parallelCluster) res <- transform(dframe = dframe, parallelCluster = parallelCluster) return(res) } get_score_frame <- function() { res <- get('score_frame', envir = settings$state, inherits = FALSE) return(res) } get_transform <- function() { res <- get('transform', envir = settings$state, inherits = FALSE) return(res) } get_feature_names <- function(input_features=NULL) { sf <- get('score_frame', envir = settings$state, inherits = FALSE) want <- sf$varMoves if(!is.null(input_features)) { want <- want & (sf$origName %in% input_features) } return(sf$varName[want]) } fresh_copy <- function() { UnsupervisedTreatment( var_list = settings$var_list, cols_to_copy = settings$cols_to_copy, params = settings$params, imputation_map = settings$imputation_map) } # get globalenv early on environment chain for seralization # See pseudo-SEXPTYPEs in https://cran.r-project.org/doc/manuals/r-release/R-ints.html f_env <- new.env(parent = globalenv()) assign("settings", settings, envir = f_env) for(nm in c("fit", "transform", "fit_transform", "get_score_frame", "get_transform", "get_feature_names", "fresh_copy")) { fi <- get(nm) environment(fi) <- f_env assign(nm, fi, envir = f_env) } # build up result object obj$fit = fit obj$transform = transform obj$fit_transform = fit_transform obj$score_frame = get_score_frame obj$get_score_frame = get_score_frame obj$get_transform = get_transform obj$get_feature_names = get_feature_names obj$fresh_copy = fresh_copy assign("obj", obj, envir = f_env) return(obj) } #' @export format.vtreat_pipe_step <- function(x, ...) { return(x$treatment_type) } #' @export as.character.vtreat_pipe_step <- function(x, ...) { return(format(x, ...)) } #' @export print.vtreat_pipe_step <- function(x, ...) { print(format(x, ...)) sf <- x$score_frame() if(!is.null(sf)) { cols <- c('origName', 'varName', 'code', 'rsq', 'sig', 'extraModelDegrees', 'recommended') cols <- intersect(cols, colnames(sf)) sf <- sf[, cols, drop = FALSE] if(!is.null(sf)) { sf <- sf[order(sf$origName, sf$varName), , drop = FALSE] rownames(sf) <- NULL print(sf) } } invisible(x) } #' @export apply_right.vtreat_pipe_step <- function(pipe_left_arg, pipe_right_arg, pipe_environment, left_arg_name, pipe_string, right_arg_name) { pipe_right_arg$transform(pipe_left_arg) } # S3 interface, immutable to be more R-like #' Fit first arguemnt to data in second argument. #' #' Update the state of first argument to have learned or fit from second argument. #' #' Note: input vps is not altered, fit is in returned value. #' #' @param vps vtreat pipe step, object specifying fit #' @param dframe data.frame, data to fit from. #' @param ... not used, forces later arguments to bind by name. #' @param weights optional, per-dframe data weights. #' @param parallelCluster optional, parallel cluster to run on. #' @return new fit object #' #' @export fit <- function(vps, dframe, ..., weights = NULL, parallelCluster = NULL) { UseMethod("fit") } #' @export fit.vtreat_pipe_step <- function(vps, dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::fit.vtreat_pipe_step") vps <- vps$fresh_copy() vps$fit(dframe = dframe, weights = weights, parallelCluster = parallelCluster) } #' Transform second argument by first. #' #' Apply first argument to second as a transform. #' #' @param vps vtreat pipe step, object defining transform. #' @param dframe data.frame, data to transform #' @param ... not used, forces later arguments to bind by name. #' @param parallelCluster optional, parallel cluster to run on. #' @return transformed dframe #' #' @export apply_transform <- function(vps, dframe, ..., parallelCluster = NULL) { # don't use transform name to stay out of base::transform's way. UseMethod("apply_transform") } #' @export apply_transform.vtreat_pipe_step <- function(vps, dframe, ..., parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::apply_transform.vtreat_pipe_step") vps$transform(dframe = dframe, parallelCluster = parallelCluster) } #' @export prepare.vtreat_pipe_step <- function(treatmentplan, dframe, ...) { # vtreat prepare interface treatmentplan$transform(dframe = dframe, ...) } #' Fit and transform in a cross-validated manner. #' #' Update the state of first argument to have learned or fit from second argument, and compute a cross #' validated example of such a transform. #' #' Note: input vps is not altered, fit is in returned list. #' #' @param vps vtreat pipe step, object specifying fit. #' @param dframe data.frame, data to fit from. #' @param ... not used, forces later arguments to bind by name. #' @param weights optional, per-dframe data weights. #' @param parallelCluster optional, parallel cluster to run on. #' @return @return named list containing: treatments and cross_frame #' #' @export fit_transform <- function(vps, dframe, ..., weights = NULL, parallelCluster = NULL) { UseMethod("fit_transform") } #' @export fit_transform.vtreat_pipe_step <- function(vps, dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::fit_transform.vtreat_pipe_step") vps <- vps$fresh_copy() cross_frame <- vps$fit_transform(dframe = dframe, weights = weights, parallelCluster = parallelCluster) list(treatments = vps, cross_frame = cross_frame) } #' Fit and prepare in a cross-validated manner. #' #' Update the state of first argument to have learned or fit from second argument, and compute a cross #' validated example of such a transform. #' #' Note: input vps is not altered, fit is in returned list. #' #' @param vps vtreat pipe step, object specifying fit. #' @param dframe data.frame, data to fit from. #' @param ... not used, forces later arguments to bind by name. #' @param weights optional, per-dframe data weights. #' @param parallelCluster optional, parallel cluster to run on. #' @return @return named list containing: treatments and cross_frame #' #' @export fit_prepare <- function(vps, dframe, ..., weights = NULL, parallelCluster = NULL) { UseMethod("fit_transform") } #' @export fit_prepare.vtreat_pipe_step <- function(vps, dframe, ..., weights = NULL, parallelCluster = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::fit_prepare.vtreat_pipe_step") vps <- vps$fresh_copy() cross_frame <- vps$fit_transform(dframe = dframe, weights = weights, parallelCluster = parallelCluster) list(treatments = vps, cross_frame = cross_frame) } #' Return score frame from vps. #' #' Return previously fit score frame. #' #' @param vps vtreat pipe step, mutable object to read from. #' @return score frame #' #' @export get_score_frame <- function(vps) { UseMethod("get_score_frame") } #' @export get_score_frame.vtreat_pipe_step <- function(vps) { vps$get_score_frame() } #' Return underlying transform from vps. #' #' Return previously fit transform. #' #' @param vps vtreat pipe step, mutable object to read from. #' @return transform #' #' @export get_transform <- function(vps) { UseMethod("get_transform") } #' @export get_transform.vtreat_pipe_step <- function(vps) { vps$get_transform() } #' Return feasible feature names. #' #' Return previously fit feature names. #' #' @param vps vtreat pipe step, mutable object to read from. #' @return feature names #' #' @export get_feature_names <- function(vps) { UseMethod("get_feature_names") } #' @export get_feature_names.vtreat_pipe_step <- function(vps) { vps$get_feature_names() }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/ft.R
# return categorical indicators .catInd <- function(col,args,doCollar) { col <- .preProcCat(col,args$levRestriction) nres <- length(args$tracked) vals <- vector('list',nres) for(j in seq_len(nres)) { vi <- ifelse(col==args$tracked[j],1.0,0.0) vals[[j]] <- vi } vals } as_rquery.vtreat_cat_ind <- function(tstep, ..., var_restriction) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::as_rquery.vtreat_cat_ind treatmentplan requires the rquery package") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::as_rquery.vtreat_cat_ind") if((!is.null(var_restriction)) && (length(intersect(tstep$newvars, var_restriction))<=0)) { return(NULL) } origvar <- tstep$origvar exprs <- c() for(i in seq_len(length(tstep$arg$tracked))) { li <- tstep$arg$tracked[[i]] vi <- tstep$newvars[[i]] if(is.null(var_restriction) || (vi %in% var_restriction)) { if(li == "NA") { expri <- paste0("ifelse(is.na(", origvar, "), 1, 0)") } else { li <- gsub("^x ", "", li) expri <- paste0("ifelse(is.na(", origvar, "), 0, ifelse(", origvar, " == \"", li, "\", 1, 0))") } names(expri) <- vi exprs <- c(exprs, expri) } } if(length(exprs)<=0) { return(NULL) } list( exprs = exprs, optree_generators = list(), tables = list() ) } # same signature as .mkCatInd (except no parallelCluster argument) .mkCatInd_a <- function(origVarName, vcolin, ynumeric, zC, zTarget, minFraction, levRestriction, weights, catScaling) { tracked <- levRestriction$tracked if(length(tracked)<=0) { return(c()) } newVarNames <- vtreat_make_names(paste(origVarName,'lev',tracked,sep="_")) treatment <- list(origvar=origVarName, newvars=newVarNames, f=.catInd, args=list(tracked=tracked, levRestriction=levRestriction), treatmentName='Categoric Indicators', treatmentCode='lev', needsSplit=FALSE, extraModelDegrees=0) class(treatment) <- c('vtreat_cat_ind', 'vtreatment') pred <- treatment$f(vcolin,treatment$args) treatment$pred <- pred treatment } .mkCatNworker <- function(newVarNames, pred, ynumeric, weights) { force(newVarNames) force(pred) force(ynumeric) force(weights) function(j) { linScore(newVarNames[[j]], pred[[j]], ynumeric, weights) } } .mkCatCworker <- function(newVarNames, pred, zC, zTarget, weights) { force(newVarNames) force(pred) force(zC) force(zTarget) force(weights) function(j) { catScore(newVarNames[[j]], pred[[j]], zC, zTarget, weights) } } .mkCatInd_scales <- function(treatment, ynumeric, zC, zTarget, weights, catScaling, ..., parallelCluster = NULL, use_parallel = TRUE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::.mkCatInd_scales") newVarNames <- treatment$newvars pred <- treatment$pred treatment$pred <- NULL treatment$scales <- NULL if(length(newVarNames)>0) { if((!catScaling)||(is.null(zC))) { worker <- .mkCatNworker(newVarNames, pred, ynumeric, weights) } else { worker <- .mkCatCworker(newVarNames, pred, zC, zTarget, weights) } scaleList <- plapply(seq_len(length(newVarNames)), worker, parallelCluster = parallelCluster, use_parallel = use_parallel) treatment$scales <- .rbindListOfFrames(scaleList) } treatment } # build categorical indicators .mkCatInd <- function(origVarName, vcolin, ynumeric, zC, zTarget, minFraction, levRestriction, weights, catScaling, ..., parallelCluster = NULL, use_parallel = TRUE) { treatment <- .mkCatInd_a(origVarName, vcolin, ynumeric, zC, zTarget, minFraction, levRestriction, weights, catScaling) treatment <- .mkCatInd_scales(treatment, ynumeric, zC, zTarget, weights, catScaling, parallelCluster = parallelCluster, use_parallel = use_parallel) treatment }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/indicatorTreatment.R
# return if a variable is NA .isBAD <- function(col,args,doCollar) { treated <- ifelse(.is.bad(col),1.0,0.0) treated } as_rquery.vtreat_is_bad <- function(tstep, ..., var_restriction) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::as_rquery.vtreat_is_bad treatmentplan requires the rquery package") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::as_rquery.vtreat_is_bad") if((!is.null(var_restriction)) && (!(tstep$newvars %in% var_restriction))) { return(NULL) } args <- tstep$args list( exprs = tstep$newvars %:=% paste0("ifelse(is.na(", tstep$origvar, "), ", 1, ", ", 0, ")"), optree_generators = list(), tables = list() ) } .mkIsBAD <- function(origVarName,xcol,ynumeric,zC,zTarget,weights,catScaling) { badIDX <- .is.bad(xcol) nna <- sum(badIDX) if((nna<=0)||(nna>=length(xcol))) { return(c()) } newVarName <- vtreat_make_names(paste(origVarName,'isBAD',sep='_')) treatment <- list(origvar=origVarName, newvars=newVarName, f=.isBAD, args=list(), treatmentName='is.bad', treatmentCode='isBAD', needsSplit=FALSE, extraModelDegrees=0) class(treatment) <- c('vtreat_is_bad', 'vtreatment') if((!catScaling)||(is.null(zC))) { treatment$scales <- linScore(newVarName,ifelse(badIDX,1.0,0.0),ynumeric,weights) } else { treatment$scales <- catScore(newVarName,ifelse(badIDX,1.0,0.0),zC,zTarget,weights) } treatment }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/isBadTreatment.R
#' @importFrom stats predict NULL #' Solve for best non-decreasing fit using isotone regression #' (from the "isotone" package \url{https://CRAN.R-project.org/package=isotone}). #' #' Return a vector of length y that is a function of x #' (differs at must where x differs) obeying the same order #' constraints as x. This vector is picked as close to #' y (by square-distance) as possible. #' #' Please see \url{https://github.com/WinVector/vtreat/blob/main/extras/MonotoneCoder.md}. #' #' @param varName character, name of variable #' @param x numeric, factor, or character input (not empty, no NAs). #' @param y numeric or castable to such (same length as x no NAs), output to match #' @param w numeric positive, same length as x (weights, can be NULL) #' @return isotonicly adjusted y (non-decreasing) #' #' #' @examples #' #' if(requireNamespace("isotone", quietly = TRUE)) { #' solveNonDecreasing('v', 1:3, c(1,2,1)) #' } #' #' @export #' solveNonDecreasing <- function(varName, x, y, w=NULL) { if(!requireNamespace("isotone", quietly = TRUE)) { stop("vtreat::solveNonDecreasing requires the isotone package") } if(is.character(x)) { x <- as.factor(x) } x <- as.numeric(x) n <- length(x) if(n<=0) { return(NULL) } y <- as.numeric(y) if(length(y)!=n) { stop(paste("solveNonDecreasing", varName, "expect length(y)==length(x)")) } if(is.null(w)) { w <- rep(1.0, n) } if(!is.numeric(w)) { stop(paste("solveNonDecreasing", varName, "expect w numeric")) } if(length(w)!=n) { stop(paste("solveNonDecreasing", varName, "expect length(w)==length(x)")) } if(min(w)<=0) { stop(paste("solveNonDecreasing", varName, "expect positive weights")) } d <- data.frame(x=x, y=y, w=w) # get some corner cases if(n<=2) { v <- sum(w*y)/sum(w) if(n<=1) { return(v) } if((y[[2]]>=y[[1]])&&(x[[2]]>x[[1]])) { return(as.numeric(y)) } return(c(v,v)) } dord <- order(d$x) # see: # https://win-vector.com/2017/09/02/permutation-theory-in-action/ # https://win-vector.com/2017/05/18/on-indexing-operators-and-composition/ invPerm <- 1:n invPerm[dord] <- 1:n d <- d[dord, , drop=FALSE] Atot <- matrix(ncol=2,nrow=0,data=0) # build order relations to insist on a monotone function transform # first all order constraints Atot <- cbind(1:(n-1),2:n) # then any additional equality constraints to force result to be a # function of x noIncrease <- which(d$x[1:(n-1)]>=d$x[2:n]-1.0e-6) if(length(noIncrease)>0) { Atot <- rbind(Atot,cbind(noIncrease+1,noIncrease)) } if(!is.matrix(Atot)) { stop("solveNonDecreasing: !is.matrix(Atot)") } if(nrow(Atot)<1) { stop("solveNonDecreasing: nrow(Atot)<1") } if(ncol(Atot)!=2) { stop("solveNonDecreasing: ncol(Atot)!=2") } if(min(Atot)<1) { stop("solveNonDecreasing: min(Atot)<1") } if(max(Atot)>length(d$y)) { stop("solveNonDecreasing: (max(Atot)>length(d$y)") } # sum of squares objective is default if y is specified sqIso <- isotone::activeSet(Atot, y=d$y, weights=d$w) adjPred <- sqIso$x # undo permutation adjPred <- adjPred[invPerm] adjPred } #' Solve for best non-increasing fit. #' #' Return a vector of length y that is a function of x #' (differs at must where x differs) obeying the opposite order #' constraints as x. This vector is picked as close to #' y (by square-distance) as possible. #' #' Please see \url{https://github.com/WinVector/vtreat/blob/main/extras/MonotoneCoder.md}. #' #' @param varName character, name of variable #' @param x numeric, factor, or character input (not empty, no NAs). #' @param y numeric (same length as x no NAs), output to match #' @param w numeric positive, same length as x (weights, can be NULL) #' @return isotonicly adjusted y (non-decreasing) #' #' #' @examples #' #' #' if(requireNamespace("isotone", quietly = TRUE)) { #' solveNonIncreasing('v', 1:3, c(1,2,1)) #' } #' #' @export #' solveNonIncreasing <- function(varName, x, y, w=NULL) { -solveNonDecreasing(varName, x, -y, w) } #' Solve for best single-direction (non-decreasing or non-increasing) fit. #' #' Return a vector of length y that is a function of x #' (differs at must where x differs) obeying the either the same #' order contraints or the opposite order #' constraints as x. This vector is picked as close to #' y (by square-distance) as possible. #' #' Please see \url{https://github.com/WinVector/vtreat/blob/main/extras/MonotoneCoder.md}. #' #' @param varName character, name of variable #' @param x numeric, factor, or character input (not empty, no NAs). #' @param y numeric (same length as x no NAs), output to match #' @param w numeric positive, same length as x (weights, can be NULL) #' @return isotonicly adjusted y (non-decreasing) #' #' #' @examples #' #' if(requireNamespace("isotone", quietly = TRUE)) { #' solveIsotone('v', 1:3, c(1,2,1)) #' } #' #' @export #' solveIsotone <- function(varName, x, y, w=NULL) { soln1 <- solveNonDecreasing(varName, x, y, w) d1 <- sum((y-soln1)^2) soln2 <- solveNonIncreasing(varName, x, y, w) d2 <- sum((y-soln2)^2) if(d1<=d2) { return(soln1) } return(soln2) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/isotone.R
#' Function to build multi-outcome vtreat cross frame and treatment plan. #' #' Please see \code{vignette("MultiClassVtreat", package = "vtreat")} \url{https://winvector.github.io/vtreat/articles/MultiClassVtreat.html}. #' #' @param dframe data to learn from #' @param varlist character, vector of indpendent variable column names. #' @param outcomename character, name of outcome column. #' @param ... not used, declared to forced named binding of later arguments #' @param weights optional training weights for each row #' @param minFraction optional minimum frequency a categorical level must have to be converted to an indicator column. #' @param smFactor optional smoothing factor for impact coding models. #' @param rareCount optional integer, allow levels with this count or below to be pooled into a shared rare-level. Defaults to 0 or off. #' @param rareSig optional numeric, suppress levels from pooling at this significance value greater. Defaults to NULL or off. #' @param collarProb what fraction of the data (pseudo-probability) to collar data at if doCollar is set during \code{\link{prepare.multinomial_plan}}. #' @param codeRestriction what types of variables to produce (character array of level codes, NULL means no restriction). #' @param customCoders map from code names to custom categorical variable encoding functions (please see \url{https://github.com/WinVector/vtreat/blob/main/extras/CustomLevelCoders.md}). #' @param scale optional if TRUE replace numeric variables with regression ("move to outcome-scale"). #' @param doCollar optional if TRUE collar numeric variables by cutting off after a tail-probability specified by collarProb during treatment design. #' @param splitFunction (optional) see vtreat::buildEvalSets . #' @param ncross optional scalar>=2 number of cross-validation rounds to design. #' @param forceSplit logical, if TRUE force cross-validated significance calculations on all variables. #' @param catScaling optional, if TRUE use glm() linkspace, if FALSE use lm() for scaling. #' @param y_dependent_treatments character what treatment types to build per-outcome level. #' @param verbose if TRUE print progress. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods. #' @param missingness_imputation function of signature f(values: numeric, weights: numeric), simple missing value imputer. #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' @return a names list containing cross_frame, treat_m, score_frame, and fit_obj_id #' #' @seealso \code{\link{prepare.multinomial_plan}} #' #' @examples #' #' # numeric example #' set.seed(23525) #' #' # we set up our raw training and application data #' dTrainM <- data.frame( #' x = c('a', 'a', 'a', 'a', 'b', 'b', NA, NA), #' z = c(1, 2, 3, 4, 5, NA, 7, NA), #' y = c(0, 0, 0, 1, 0, 1, 2, 1)) #' #' dTestM <- data.frame( #' x = c('a', 'b', 'c', NA), #' z = c(10, 20, 30, NA)) #' #' # we perform a vtreat cross frame experiment #' # and unpack the results into treatmentsM, #' # dTrainMTreated, and score_frame #' unpack[ #' treatmentsM = treat_m, #' dTrainMTreated = cross_frame, #' score_frame = score_frame #' ] <- mkCrossFrameMExperiment( #' dframe = dTrainM, #' varlist = setdiff(colnames(dTrainM), 'y'), #' outcomename = 'y', #' verbose = FALSE) #' #' # the score_frame relates new #' # derived variables to original columns #' score_frame[, c('origName', 'varName', 'code', 'rsq', 'sig', 'outcome_level')] %.>% #' print(.) #' #' # the treated frame is a "cross frame" which #' # is a transform of the training data built #' # as if the treatment were learned on a different #' # disjoint training set to avoid nested model #' # bias and over-fit. #' dTrainMTreated %.>% #' head(.) %.>% #' print(.) #' #' # Any future application data is prepared with #' # the prepare method. #' dTestMTreated <- prepare(treatmentsM, dTestM, pruneSig=NULL) #' #' dTestMTreated %.>% #' head(.) %.>% #' print(.) #' #' @export #' mkCrossFrameMExperiment <- function(dframe, varlist, outcomename, ..., weights=c(), minFraction=0.02,smFactor=0.0, rareCount=0,rareSig=1, collarProb=0.0, codeRestriction=NULL, customCoders=NULL, scale=FALSE,doCollar=FALSE, splitFunction=vtreat::kWayCrossValidation, ncross=3, forceSplit = FALSE, catScaling=FALSE, y_dependent_treatments = c("catB"), verbose=FALSE, parallelCluster=NULL, use_parallel = TRUE, missingness_imputation = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::mkCrossFrameMExperiment") y_levels <- sort(unique(as.character(dframe[[outcomename]]))) y_l_names <- vtreat_make_names(y_levels) if(length(y_levels)<2) { stop("vtreat::mkCrossFrameMExperiment outcome must have 2 or more levels") } if(length(codeRestriction) > 0) { y_dependent_treatments = intersect(y_dependent_treatments, codeRestriction) } # build y-independent variable treatments treatments_0 <- designTreatmentsZ(dframe, varlist, weights=weights, minFraction=minFraction, rareCount=rareCount, collarProb=collarProb, codeRestriction=codeRestriction, customCoders=customCoders, verbose= verbose, parallelCluster=parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map = imputation_map) # score them tf_0 <- prepare(treatments_0, dframe, extracols = outcomename, parallelCluster = parallelCluster, use_parallel = use_parallel) sf_0 <- treatments_0$scoreFrame[order(treatments_0$scoreFrame$varName), , drop = FALSE] sframe_0 <- lapply( y_levels, function(y_target) { zCS <- tf_0[[outcomename]]==y_target zoYS <- ifelse(zCS,1,0) z_vars <- setdiff(colnames(tf_0), outcomename) swkr <- .mkScoreColWorker(zoYS, zCS, TRUE, weights) newVarsSP <- lapply(z_vars, function(nv) { list(nv=nv,dfc=tf_0[[nv]]) }) sframe_0 <- plapply(newVarsSP,swkr, parallelCluster = parallelCluster, use_parallel = use_parallel) sframe_0 <- do.call(rbind, sframe_0) sframe_0 <- sframe_0[order(sframe_0$varName), , drop = FALSE] sframe_0$outcome_level <- y_target sframe_0$needsSplit <- sf_0$needsSplit sframe_0$extraModelDegrees <- sf_0$extraModelDegrees sframe_0$origName <- sf_0$origName sframe_0$code <- sf_0$code rownames(sframe_0) <- NULL sframe_0 }) sframe_0 <- do.call(rbind, sframe_0) sframe_0 <- augment_score_frame(sframe_0) # get columns to match rownames(sframe_0) <- NULL rm(list = c("tf_0")) # get a shared split plan to minimize data leakage if(is.null(splitFunction)) { splitFunction <- kWayCrossValidation } evalSets <- splitFunction(nRows = nrow(dframe), nSplits = ncross, dframe=dframe, y = dframe[[outcomename]]) splitFunction <- pre_comp_xval(nRows=nrow(dframe), ncross, evalSets) # build one set of y-dependent treatments per possible y outcome names(y_l_names) <- y_levels cfe_list <- NULL if(length(y_dependent_treatments) > 0) { cfe_list <- lapply( y_levels, function(y_target) { cfe <- mkCrossFrameCExperiment( dframe, varlist, outcomename, y_target, weights=weights, minFraction=minFraction, smFactor=smFactor, rareCount=rareCount, rareSig=rareSig, collarProb=collarProb, codeRestriction=y_dependent_treatments, customCoders=customCoders, scale=scale, doCollar=doCollar, splitFunction=splitFunction, ncross=ncross, forceSplit = forceSplit, catScaling=catScaling, verbose= verbose, parallelCluster=parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map = imputation_map) cross_frame_i = cfe$crossFrame cross_frame_i[[outcomename]] <- NULL score_frame_i <- cfe$treatments$scoreFrame vars_found <- score_frame_i$varName new_vars <- paste0(y_l_names[[y_target]], "_", vars_found) vars_forward_map_i <- new_vars names(vars_forward_map_i) <- vars_found vars_reverse_map_i <- vars_found names(vars_reverse_map_i) <- new_vars colnames(cross_frame_i) <- vars_forward_map_i[colnames(cross_frame_i)] score_frame_i$outcome_level <- y_target score_frame_i$varName <- vars_forward_map_i[score_frame_i$varName] list(treatments_i = cfe$treatments, cross_frame_i = cross_frame_i, score_frame_i = score_frame_i, vars_forward_map_i = vars_forward_map_i, vars_reverse_map_i = vars_reverse_map_i) }) names(cfe_list) <- NULL # make sure no names } # build an overall cross-frame for training dy <- data.frame(y = as.character(dframe[[outcomename]]), stringsAsFactors = FALSE) colnames(dy) = outcomename cbind_args <- c(list(prepare(treatments_0, dframe)), lapply(cfe_list, function(cfei) cfei$cross_frame_i), list(dy), stringsAsFactors = FALSE) cross_frame <- do.call( cbind, cbind_args) treatments_m <- NULL if(length(cfe_list) > 0) { score_frame <- do.call( rbind, lapply(cfe_list, function(cfei) cfei$score_frame_i)) rownames(score_frame) <- NULL # build a prepare function for new data treatments_m <- lapply(cfe_list, function(cfei) { list(treatment = cfei$treatments_i, score_frame_i = cfei$score_frame_i, vars_forward_map = cfei$vars_forward_map_i, vars_reverse_map = cfei$vars_reverse_map_i) }) score_frame <- rbind(sframe_0, score_frame) score_frame <- augment_score_frame(score_frame) # recompute augment on joined frame } else { score_frame = sframe_0 } # return components treat_m <- list( y_l_names = y_l_names, treatments_0 = treatments_0, treatments_m = treatments_m, fit_obj_id = id_f(dframe)) class(treat_m) <- "multinomial_plan" plan <- list(cross_frame = cross_frame, treat_m = treat_m, score_frame = score_frame, fit_obj_id = treat_m$fit_obj_id) plan } #' Function to apply mkCrossFrameMExperiment treatemnts. #' #' Please see \code{vignette("MultiClassVtreat", package = "vtreat")} \url{https://winvector.github.io/vtreat/articles/MultiClassVtreat.html}. #' #' @param treatmentplan multinomial_plan from mkCrossFrameMExperiment. #' @param dframe new data to process. #' @param ... not used, declared to forced named binding of later arguments #' @param pruneSig suppress variables with significance above this level #' @param scale optional if TRUE replace numeric variables with single variable model regressions ("move to outcome-scale"). These have mean zero and (for variables with significant less than 1) slope 1 when regressed (lm for regression problems/glm for classification problems) against outcome. #' @param doCollar optional if TRUE collar numeric variables by cutting off after a tail-probability specified by collarProb during treatment design. #' @param varRestriction optional list of treated variable names to restrict to #' @param codeRestriction optional list of treated variable codes to restrict to #' @param trackedValues optional named list mapping variables to know values, allows warnings upon novel level appearances (see \code{\link{track_values}}) #' @param extracols extra columns to copy. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods. #' @param check_for_duplicate_frames logical, if TRUE check if we called prepare on same data.frame as design step. #' @return prepared data frame. #' #' @seealso \code{\link{mkCrossFrameMExperiment}}, \code{\link{prepare}} #' @export #' prepare.multinomial_plan <- function(treatmentplan, dframe, ..., pruneSig= NULL, scale= FALSE, doCollar= FALSE, varRestriction= NULL, codeRestriction= NULL, trackedValues= NULL, extracols= NULL, parallelCluster= NULL, use_parallel= TRUE, check_for_duplicate_frames= TRUE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::prepare.multinomial_plan") treatments_0 <- treatmentplan$treatments_0 treatments_m <- treatmentplan$treatments_m y_l_names <- treatmentplan$y_l_names outcomename <- treatments_m[[1]]$treatment$outcomename old_fit_obj_id <- treatmentplan$fit_obj_id if(check_for_duplicate_frames && (!is.null(old_fit_obj_id))) { fit_obj_id <- id_f(dframe) if(!is.null(fit_obj_id)) { if(fit_obj_id == old_fit_obj_id) { warning("possibly called prepare() on same data frame as designTreatments*()/mkCrossFrame*Experiment(), this can lead to over-fit. To avoid this, please use mkCrossFrameMExperiment$crossFrame.") } } } treated <- prepare(treatments_0, dframe, pruneSig= pruneSig, scale= scale, doCollar= doCollar, varRestriction= varRestriction, codeRestriction= codeRestriction, trackedValues= trackedValues, extracols= extracols, parallelCluster= parallelCluster, use_parallel= use_parallel, check_for_duplicate_frames= FALSE) for(tli in treatments_m) { ti <- tli$treatment vars_forward_map_i <- tli$vars_forward_map vars_reverse_map_i <- tli$vars_reverse_map vri <- NULL if(length(varRestriction)>0) { common_keys <- intersect(varRestriction, names(vars_reverse_map_i)) if(length(common_keys)<=0) { next } vri <- vars_reverse_map_i[common_keys] } treated_i <- prepare(ti, dframe, pruneSig= pruneSig, scale= scale, doCollar= doCollar, varRestriction= vri, codeRestriction= codeRestriction, trackedValues= trackedValues, parallelCluster= parallelCluster, use_parallel= use_parallel, check_for_duplicate_frames = FALSE) treated_i[[outcomename]] <- NULL colnames(treated_i) <- vars_forward_map_i[colnames(treated_i)] treated <- cbind(treated, treated_i, stringsAsFactors = FALSE) } if(outcomename %in% colnames(dframe)) { treated[[outcomename]] <- dframe[[outcomename]] } treated } #' @export format.multinomial_plan <- function(x, ...) { "Multi Class Plan" } #' @export as.character.multinomial_plan <- function (x, ...) { format(x, ...) } #' #' Print treatmentplan. #' @param x treatmentplan #' @param ... additional args (to match general signature). #' @export print.multinomial_plan <- function(x, ...) { print(format(x), ...) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/mult_class.R
#' read application labels off a split plan. #' #' @param nRow number of rows in original data.frame. #' @param plan split plan #' @return vector of labels #' #' @seealso \code{\link{kWayCrossValidation}}, \code{\link{kWayStratifiedY}}, and \code{\link{makekWayCrossValidationGroupedByColumn}} #' #' @examples #' #' plan <- kWayStratifiedY(3,2,NULL,NULL) #' getSplitPlanAppLabels(3,plan) #' #' @export getSplitPlanAppLabels <- function(nRow,plan) { labels <- numeric(nRow) for(i in seq_len(length(plan))) { labels[plan[[i]]$app] <- i } labels } #' check if appPlan is a good carve-up of 1:nRows into nSplits groups #' #' @param nRows number of rows to carve-up #' @param nSplits number of sets to carve-up into #' @param appPlan carve-up to critique #' @param strictCheck logical, if true expect application data to be a carve-up and training data to be a maximal partition and to match nSplits. #' @return problem with carve-up (null if good) #' #' @seealso \code{\link{kWayCrossValidation}}, \code{\link{kWayStratifiedY}}, and \code{\link{makekWayCrossValidationGroupedByColumn}} #' #' @examples #' #' plan <- kWayStratifiedY(3,2,NULL,NULL) #' problemAppPlan(3,3,plan,TRUE) #' #' @export problemAppPlan <- function(nRows,nSplits,appPlan,strictCheck) { if(is.null(appPlan)) { return("appPlan was null") } if(!is.list(appPlan)) { return("appPlan needs be a list") } if((strictCheck)&&(nRows>1)&&(!is.null(nSplits))) { if(length(appPlan)!=nSplits) { return("didn't get requested number of groups in appPlan") } } fullSeq <- seq_len(nRows) seen <- c() for(i in seq_len(length(appPlan))) { si <- appPlan[[i]] if(!is.list(si)) { return("non list element in app plan") } ti <- si$train if(is.null(ti)) { return("missing train slot") } if(length(setdiff(ti,fullSeq))!=0) { return("unexpected symbols in train slot") } ai <- si$app if(is.null(ai)) { return("missing app slot") } if(length(setdiff(ai,fullSeq))!=0) { return("unexpected symbols in application slot") } if(nRows>1) { if(length(intersect(ti,ai))!=0) { return("train and application slots overlap") } } if(strictCheck) { if(length(setdiff(fullSeq,union(ai,ti)))>0) { return("non-maximal training set") } if(length(intersect(seen,ai))!=0) { return("repeated application row") } } seen <- union(seen,ai) } if(strictCheck) { if(length(seen)!=nRows) { return("not all rows appeared in application") } } NULL } #' One way holdout, a splitFunction in the sense of vtreat::buildEvalSets. #' #' Note one way holdout can leak target expected values, so it should not #' be preferred in nested modeling situations. #' Also, doesn't respect nSplits. #' #' @param nRows number of rows to split (integer >1). #' @param nSplits number of groups to split into (ignored). #' @param dframe original data frame (ignored). #' @param y numeric outcome variable (ignored). #' @return split plan #' #' @examples #' #' oneWayHoldout(3,NULL,NULL,NULL) #' #' @export oneWayHoldout <- function(nRows,nSplits,dframe,y) { if(nRows<=1) { return(NULL) } fullSeq <- seq_len(nRows) evalSets <- lapply(as.list(fullSeq), function(appi) { list(train=setdiff(fullSeq,appi),app=appi) }) attr(evalSets,'splitmethod') <- 'oneway' evalSets } #' k-fold cross validation, a splitFunction in the sense of vtreat::buildEvalSets #' #' @param nRows number of rows to split (>1). #' @param nSplits number of groups to split into (>1,<=nRows). #' @param dframe original data frame (ignored). #' @param y numeric outcome variable (ignored). #' @return split plan #' #' @examples #' #' kWayCrossValidation(7,2,NULL,NULL) #' #' @export kWayCrossValidation <- function(nRows,nSplits,dframe,y) { if((nRows<=1)||(nSplits<=1)||(nSplits>nRows)) { return(NULL) } fullSeq <- seq_len(nRows) perm <- sample.int(nRows,nRows,replace=FALSE) evalSets <- lapply(split(perm,1 + (fullSeq %% nSplits)), function(appi) { list(train=setdiff(fullSeq,appi),app=appi) }) names(evalSets) <- NULL attr(evalSets,'splitmethod') <- 'kwaycross' evalSets } #' k-fold cross validation stratified on y, a splitFunction in the sense of vtreat::buildEvalSets #' #' @param nRows number of rows to split (>1) #' @param nSplits number of groups to split into (<nRows,>1). #' @param dframe original data frame (ignored). #' @param y numeric outcome variable try to have equidistributed in each split. #' @return split plan #' #' @examples #' #' set.seed(23255) #' d <- data.frame(y=sin(1:100)) #' pStrat <- kWayStratifiedY(nrow(d),5,d,d$y) #' problemAppPlan(nrow(d),5,pStrat,TRUE) #' d$stratGroup <- vtreat::getSplitPlanAppLabels(nrow(d),pStrat) #' pSimple <- kWayCrossValidation(nrow(d),5,d,d$y) #' problemAppPlan(nrow(d),5,pSimple,TRUE) #' d$simpleGroup <- vtreat::getSplitPlanAppLabels(nrow(d),pSimple) #' summary(tapply(d$y,d$simpleGroup,mean)) #' summary(tapply(d$y,d$stratGroup,mean)) #' #' #' #' @export kWayStratifiedY <- function(nRows,nSplits,dframe,y) { if((nRows<=1)||(nSplits<=1)||(nSplits>nRows)) { return(NULL) } if(is.null(y)||(length(unique(y))<=1)) { return(kWayCrossValidation(nRows,nSplits,NULL,NULL)) } fullSeq <- seq_len(nRows) d <- data.frame(index=fullSeq,y=y) # initial permutation in case y has large constant blocks d <- d[order(sample.int(nRows,nRows,replace=FALSE)),] # order by y d <- d[order(d$y),] # mix within order segments rows_per_split <- ceiling(nRows/nSplits) mix_idx <- vector("list", rows_per_split) for(si in seq_len(rows_per_split)) { leftI <- si*nSplits - (nSplits-1) rightI <- min(si*nSplits,nRows) widthI <- 1+rightI-leftI if(widthI>1) { oldIndices <- leftI:rightI mix_idx[[si]] <- oldIndices[sample.int(widthI,widthI,replace=FALSE)] } else { mix_idx[[si]] <- leftI:rightI } } d[unlist(mix_idx),] <- d d$group <- (fullSeq %% nSplits) + 1 carveUp <- split(d$index,d$group) evalSets <- lapply(carveUp, function(appi) { list(train=setdiff(fullSeq,appi),app=appi) }) names(evalSets) <- NULL attr(evalSets,'splitmethod') <- 'kwaycrossystratified' evalSets } #' k-fold cross validation stratified with replacement on y, a splitFunction in the sense of vtreat::buildEvalSets . #' #' Build a k-fold cross validation sample where training sets are the same size as the original data, #' and built by sampling disjoint from test/application sets (sampled with replacement). #' #' @param nRows number of rows to split (>1) #' @param nSplits number of groups to split into (<nRows,>1). #' @param dframe original data frame (ignored). #' @param y numeric outcome variable try to have equidistributed in each split. #' @return split plan #' #' @examples #' #' set.seed(23255) #' d <- data.frame(y=sin(1:100)) #' pStrat <- kWayStratifiedYReplace(nrow(d),5,d,d$y) #' #' @export kWayStratifiedYReplace <- function(nRows,nSplits,dframe,y) { if((nRows<=1)||(nSplits<=1)||(nSplits>nRows)) { return(NULL) } if(is.null(y)||(length(unique(y))<=1)) { return(kWayCrossValidation(nRows,nSplits,NULL,NULL)) } fullSeq <- seq_len(nRows) d <- data.frame(index=fullSeq,y=y) # initial permutation in case y has large constant blocks d <- d[order(sample.int(nRows,nRows,replace=FALSE)),] # order by y d <- d[order(d$y),] # mix within order segments rows_per_split <- ceiling(nRows/nSplits) mix_idx <- vector("list", rows_per_split) for(si in seq_len(rows_per_split)) { leftI <- si*nSplits - (nSplits-1) rightI <- min(si*nSplits,nRows) widthI <- 1+rightI-leftI if(widthI>1) { oldIndices <- leftI:rightI mix_idx[[si]] <- oldIndices[sample.int(widthI,widthI,replace=FALSE)] } else { mix_idx[[si]] <- leftI:rightI } } d[unlist(mix_idx),] <- d d$group <- (fullSeq %% nSplits) + 1 carveUp <- split(d$index,d$group) evalSets <- lapply(carveUp, function(appi) { trainIdxs <- setdiff(fullSeq,appi) si <- sample.int(length(trainIdxs), nRows, replace=TRUE) list(train=trainIdxs[si], app=appi) }) names(evalSets) <- NULL attr(evalSets,'splitmethod') <- 'kwaycrossystratifiedreplace' evalSets } #' Build a k-fold cross validation splitter, respecting (never splitting) groupingColumn. #' #' @param groupingColumnName name of column to group by. #' @return splitting function in the sense of vtreat::buildEvalSets. #' #' @examples #' #' d <- data.frame(y=sin(1:100)) #' d$group <- floor(seq_len(nrow(d))/5) #' splitter <- makekWayCrossValidationGroupedByColumn('group') #' split <- splitter(nrow(d),5,d,d$y) #' d$splitLabel <- vtreat::getSplitPlanAppLabels(nrow(d),split) #' rowSums(table(d$group,d$splitLabel)>0) #' #' @export makekWayCrossValidationGroupedByColumn <- function(groupingColumnName) { force(groupingColumnName) function(nRows,nSplits,dframe,y) { if((nRows<=1)||(nSplits<=1)||(nSplits>nRows)) { return(NULL) } d <- data.frame(index=seq_len(nRows), group=as.character(dframe[[groupingColumnName]]), stringsAsFactors=FALSE) groups <- sort(unique(d$group)) groupedPlan <- NULL if((!is.null(y))&&(length(unique(y))>1)) { # try for y-stratification d$y <- y groupedY <- aggregate(y~group,data=d,FUN=mean)$y groupedPlan <- kWayStratifiedY(length(groups),nSplits,NULL,groupedY) } if(is.null(groupedPlan)) { groupedPlan <- kWayCrossValidation(length(groups),nSplits,NULL,NULL) } if(is.null(groupedPlan)) { return(NULL) } splitmethod <- attr(groupedPlan,'splitmethod') carveUp <- lapply(groupedPlan, function(gi) { d$index[d$group %in% groups[gi$app]] }) fullSeq <- seq_len(nRows) evalSets <- lapply(carveUp, function(appi) { list(train=setdiff(fullSeq,appi),app=appi) }) names(evalSets) <- NULL attr(evalSets,'splitmethod') <- paste0(splitmethod,'grouped') evalSets } } #' Build set carve-up for out-of sample evaluation. #' #' Return a carve-up of seq_len(nRows). Very useful for any sort of #' nested model situation (such as data prep, stacking, or super-learning). #' #' Also sets attribute "splitmethod" on return value that describes how the split was performed. #' attr(returnValue,'splitmethod') is one of: 'notsplit' (data was not split; corner cases #' like single row data sets), 'oneway' (leave one out holdout), 'kwaycross' (a simple #' partition), 'userfunction' (user supplied function was actually used), or a user specified attribute. #' Any user #' desired properties (such as stratification on y, or preservation of groups designated by #' original data row numbers) may not apply unless you see that 'userfunction' has been #' used. #' #' The intent is the user splitFunction only needs to handle "easy cases" #' and maintain user invariants. If the user splitFunction returns NULL, #' throws, or returns an unacceptable carve-up then vtreat::buildEvalSets #' returns its own eval set plan. The signature of splitFunction should #' be splitFunction(nRows,nSplits,dframe,y) where nSplits is the number of #' pieces we want in the carve-up, nRows is the number of rows to split, #' dframe is the original dataframe (useful for any group control variables), #' and y is a numeric vector representing outcome (useful for outcome stratification). #' #' Note that buildEvalSets may not always return a partition (such #' as one row dataframes), or if the user split function chooses to make rows eligible for #' application a different number of times. #' #' @param nRows scalar, >=1 number of rows to sample from. #' @param ... no additional arguments, declared to forced named binding of later arguments. #' @param dframe (optional) original data.frame, passed to user splitFunction. #' @param y (optional) numeric vector, outcome variable (possibly to stratify on), passed to user splitFunction. #' @param splitFunction (optional) function taking arguments nSplits,nRows,dframe, and y; returning a user desired split. #' @param nSplits integer, target number of splits. #' @return list of lists where the app portion of the sub-lists is a disjoint carve-up of seq_len(nRows) and each list as a train portion disjoint from app. #' #' @seealso \code{\link{kWayCrossValidation}}, \code{\link{kWayStratifiedY}}, and \code{\link{makekWayCrossValidationGroupedByColumn}} #' #' @examples #' #' # use #' buildEvalSets(200) #' #' # longer example #' # helper fns #' # fit models using experiment plan to estimate out of sample behavior #' fitModelAndApply <- function(trainData,applicaitonData) { #' model <- lm(y~x,data=trainData) #' predict(model,newdata=applicaitonData) #' } #' simulateOutOfSampleTrainEval <- function(d,fitApplyFn) { #' eSets <- buildEvalSets(nrow(d)) #' evals <- lapply(eSets, #' function(ei) { fitApplyFn(d[ei$train,],d[ei$app,]) }) #' pred <- numeric(nrow(d)) #' for(eii in seq_len(length(eSets))) { #' pred[eSets[[eii]]$app] <- evals[[eii]] #' } #' pred #' } #' #' # run the experiment #' set.seed(2352356) #' # example data #' d <- data.frame(x=rnorm(5),y=rnorm(5), #' outOfSampleEst=NA,inSampleEst=NA) #' #' # fit model on all data #' d$inSampleEst <- fitModelAndApply(d,d) #' # compute in-sample R^2 (above zero, falsely shows a #' # relation until we adjust for degrees of freedom) #' 1-sum((d$y-d$inSampleEst)^2)/sum((d$y-mean(d$y))^2) #' #' d$outOfSampleEst <- simulateOutOfSampleTrainEval(d,fitModelAndApply) #' # compute out-sample R^2 (not positive, #' # evidence of no relation) #' 1-sum((d$y-d$outOfSampleEst)^2)/sum((d$y-mean(d$y))^2) #' #' @export buildEvalSets <- function(nRows,..., dframe=NULL, y=NULL, splitFunction=NULL, nSplits=3) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::buildEvalSets") # check args if(nSplits<2) { stop("vtreat::buildEvalSets must have nSplits>=2") } if(!is.null(y)) { if(!is.numeric(y)) { stop('vtreat:buildEvalSets must have is.numeric(y)') } if(length(y)!=nRows) { stop('vtreat:buildEvalSets must have length(y)==nRows') } } if(!is.null(dframe)) { if(!is.data.frame(dframe)) { stop('vtreat:buildEvalSets must have is.data.frame(dframe)') } if(nrow(dframe)!=nRows) { stop('must have nrow(dframe)==nRows') } } # try user carve-up function if(!is.null(splitFunction)) { tryCatch({ evalSets <- splitFunction(nRows=nRows,nSplits=nSplits,dframe=dframe,y=y) problem <- problemAppPlan(nRows,nSplits,evalSets,FALSE) if(is.null(problem)) { if(is.null(attr(evalSets,'splitmethod'))) { attr(evalSets,'splitmethod') <- 'userfunction' } return(evalSets) } else { warning(paste("vtreat::buildEvalSets user carve-up rejected: ",problem)) } }, error = function(e) warning(paste('vtreat::buildEvalSets caught ', as.character(e),'from user splitFunction') )) } # deal with it ourselves if we have to fullSeq <- seq_len(nRows) # okay, we will carve-up on our own if(2*nSplits>nRows) { # one corner case if(nRows<=1) { # no split plan possible evalSets <- list(list(train=fullSeq,app=fullSeq)) attr(evalSets,'splitmethod') <- 'notsplit' } else { # not necissarilly number of splits the user requested evalSets <- kWayCrossValidation(nRows=nRows,nSplits= min(nSplits, nRows),dframe=NULL,y=NULL) problem <- problemAppPlan(nRows,min(nSplits, nRows),evalSets,TRUE) if(!is.null(problem)) { stop(paste("problem with vtreat::buildEvalSets",problem)) } } } else { # know 2*nSplits<=nRows if((!is.null(y))&&(max(y)>min(y))) { # Try for full y-stratified k-way cross val evalSets <- kWayStratifiedY(nRows=nRows,nSplits=nSplits,dframe=NULL,y=y) } else { evalSets <- kWayCrossValidation(nRows=nRows,nSplits=nSplits,dframe=NULL,y=NULL) } problem <- problemAppPlan(nRows,nSplits,evalSets,TRUE) if(!is.null(problem)) { stop(paste("problem with vtreat::buildEvalSets",problem)) } } evalSets } # make a "cross frame" that is a frame where each treated row was treated only # by a treatment plan not involving the given row .mkCrossFrame <- function( ..., dframe, referenceTreatments, varlist, newVarsS, outcomename, zoY, zC, zTarget, weights, minFraction, smFactor, rareCount, rareSig, collarProb, codeRestriction, customCoders, scale, doCollar, splitFunction, nSplits, catScaling, parallelCluster = NULL, use_parallel = TRUE, verbose = FALSE, missingness_imputation, imputation_map) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::.mkCrossFrame") dsub <- dframe[,c(varlist,outcomename),drop=FALSE] # build a carve-up plan evalSets <- buildEvalSets(length(zoY),dframe=dframe,y=zoY, splitFunction=splitFunction,nSplits=nSplits) crossFrameList <- vector('list',length(evalSets)) wtList <- vector('list',length(evalSets)) rList <- vector('list',length(evalSets)) foundCols <- c() # can include outcome for(ei in seq_len(length(evalSets))) { evalIndices <- evalSets[[ei]]$app buildIndices <- evalSets[[ei]]$train dsubiEval <- dsub[evalIndices, , drop=FALSE] dsubiBuild <- dsub[buildIndices, , drop=FALSE] zoYBuild <- zoY[buildIndices] zCBuild <- c() if(!is.null(zC)) { zCBuild <- zC[buildIndices] } wBuild <- weights[buildIndices] ti <- .designTreatmentsXS( dframe = dsubiBuild, varlist = varlist, outcomename = outcomename, zoY = zoYBuild, zC = zCBuild, zTarget = zTarget, weights = wBuild, minFraction = minFraction, smFactor = smFactor, rareCount = rareCount, rareSig = rareSig, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, justWantTreatments = TRUE, catScaling = catScaling, verbose = verbose, parallelCluster = parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map = imputation_map) fi <- .vtreatList(ti,dsubiEval,newVarsS, scale = scale, doCollar = doCollar, parallelCluster = parallelCluster, use_parallel = use_parallel) # fill in missing columns (a data leak potential, but a necessary step) droppedColumns <- setdiff(newVarsS,c(outcomename,colnames(fi))) if(length(droppedColumns)>0) { repFrame <- NULL if(!is.null(referenceTreatments)) { repFrame <- prepare(referenceTreatments,dsubiEval, pruneSig=NULL, varRestriction=droppedColumns, scale=scale,doCollar=doCollar, parallelCluster=parallelCluster, use_parallel = use_parallel) } for(v in droppedColumns) { fi[[v]] <- 0.0 if((!is.null(repFrame)) && (v %in% colnames(repFrame))) { fi[[v]] <- repFrame[[v]] } } } # make sure each frame has the same column structure (again a data leak) fi[[outcomename]] <- dsubiEval[[outcomename]] if(ei<=1) { foundCols <- colnames(fi) } else { foundCols <- intersect(foundCols,colnames(fi)) } crossFrameList[[ei]] <- fi wtList[[ei]] <- weights[evalIndices] rList[[ei]] <- evalIndices } # make sure each frame has the same column structure lostVars <- setdiff(newVarsS,foundCols) if(length(lostVars)>0) { warning(paste('cross frame procedures lost variables: ', paste(lostVars,collapse=', '), '(likely did not vary on one data carve-up)')) } crossFrameList <- lapply(crossFrameList, function(fi) { fi[,foundCols,drop=FALSE] }) # assemble frame crossFrame <- .rbindListOfFrames(crossFrameList) scoreWeights <- unlist(wtList) rowList <- unlist(rList) if((length(rowList)==nrow(dframe))&& all(sort(rowList)==(1:nrow(dframe)))&& (!all(rowList==(1:nrow(dframe))))) { # undo permuation crossFrame[rowList,] <- crossFrame scoreWeights[rowList] <- scoreWeights[rowList] } list(crossFrame=crossFrame,crossWeights=scoreWeights, method=attr(evalSets,'splitmethod'), evalSets=evalSets, foundCols=foundCols, lostVars=lostVars) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/outOfSample.R
#' Solve a numeric partial pooling problem. #' #' Please see \url{https://win-vector.com/2017/09/25/custom-level-coding-in-vtreat/} and #' \url{https://win-vector.com/2017/09/28/partial-pooling-for-lower-variance-variable-encoding/}. #' #' @param v character variable name #' @param vcol character, independent or input variable #' @param y numeric, dependent or outcome variable to predict #' @param w row/example weights #' @return scored training data column #' #' @export #' ppCoderN <- function(v, vcol, y, w = NULL) { if(!requireNamespace("lme4", quietly = TRUE)) { stop("vtreat::ppCoderN requires the lme4 package") } # regression case y ~ vcol d <- data.frame(x = vcol, y = y, stringsAsFactors = FALSE) m <- lme4::lmer(y ~ (1 | x), data=d, weights=w) predict(m, newdata=d) } #' Solve a categorical partial pooling problem. #' #' Please see \url{https://win-vector.com/2017/09/25/custom-level-coding-in-vtreat/} and #' \url{https://win-vector.com/2017/09/28/partial-pooling-for-lower-variance-variable-encoding/}. #' #' @param v character variable name #' @param vcol character, independent or input variable #' @param y logical, dependent or outcome variable to predict #' @param w row/example weights #' @return scored training data column #' #' @export #' ppCoderC <- function(v, vcol, y, w = NULL) { if(!requireNamespace("lme4", quietly = TRUE)) { stop("vtreat::ppCoderC requires the lme4 package") } # classification case y ~ vcol d <- data.frame(x = vcol, y = y, stringsAsFactors = FALSE) m = lme4::glmer(y ~ (1 | x), data=d, weights=w, family=binomial) predict(m, newdata=d, type='link') }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/partial_pooling.R
#' Patch columns into data.frame. #' #' Add columns from new_frame into old_frame, replacing any #' columns with matching names in orig_frame with values from #' new_frame. #' #' @param orig_frame data.frame to patch into. #' @param new_frame data.frame to take replacement columns from. #' @return patched data.frame #' #' @examples #' #' orig_frame <- data.frame(x = 1, y = 2) #' new_frame <- data.frame(y = 3, z = 4) #' patch_columns_into_frame(orig_frame, new_frame) #' #' @export #' patch_columns_into_frame <- function(orig_frame, new_frame) { if(ncol(new_frame)<=0) { return(orig_frame) } trimed_cols <- setdiff(colnames(orig_frame), colnames(new_frame)) if(length(trimed_cols)<=0) { return(new_frame) } orig_frame <- orig_frame[, trimed_cols, drop = FALSE] cbind(orig_frame, new_frame) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/patch_columns_into_frame.R
#' Pre-computed cross-plan (so same split happens each time). #' #' #' @param nRows number of rows to split (integer >1). #' @param nSplits number of groups to split into (ignored). #' @param splitplan split plan to actually use #' @return splitplan #' #' @examples #' #' p1 <- oneWayHoldout(3,NULL,NULL,NULL) #' p2 <- pre_comp_xval(3, 3, p1) #' p2(3, 3) #' #' @export pre_comp_xval <- function(nRows, nSplits, splitplan) { force(nRows) force(nSplits) force(splitplan) eRows <- nRows eSplits <- nSplits attr(splitplan, 'splitmethod') <- paste(attr(splitplan, 'splitmethod'), "( pre-computed", eRows, eSplits, ")") f <- function(nRows, nSplits, dframe, y) { if(nRows!=eRows) { stop("row count mismatch") } if(nSplits!=eSplits) { stop("split count mismatch") } return(splitplan) } f }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/pre_comp_xval.R
# apply a prevalence fact .catP <- function(col,args,doCollar) { col <- .preProcCat(col,NULL) unhandledNovel <- !(col %in% names(args$scores)) keys <- col pred <- numeric(length(col)) if(length(args$scores)>0) { keys[unhandledNovel] <- names(args$scores)[[1]] # just to prevent bad lookups pred <- as.numeric(args$scores[keys]) } pred[unhandledNovel] <- args$rare_score pred } as_rquery.vtreat_cat_p <- function(tstep, ..., var_restriction) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::as_rquery.vtreat_cat_p treatmentplan requires the rquery package") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::as_rquery.vtreat_cat_p") if((!is.null(var_restriction)) && (!(tstep$newvars %in% var_restriction))) { return(NULL) } args <- tstep$args rquery_code_categorical(colname = tstep$origvar, resname = tstep$newvars, coding_levels = names(args$scores), effect_values = args$scores, levRestriction = args$levRestriction, default_value = args$rare_score) } # build a prevalence fact .mkCatP <- function(origVarName,vcolin,zoY,zC,zTarget,levRestriction,weights,catScaling) { vcol <- .preProcCat(vcolin,c()) extraModelDegrees <- max(0,length(unique(vcolin))-1) num <- tapply(weights,vcol,sum) den <- sum(weights) scores <- num/den scores <- as.list(scores) newVarName <- vtreat_make_names(paste(origVarName,'catP',sep='_')) treatment <- list(origvar=origVarName, newvars=newVarName, f=.catP, args=list(scores=scores, levRestriction=levRestriction, rare_score = 0.5/den), treatmentName='Prevalence Code', treatmentCode='catP', needsSplit=TRUE, extraModelDegrees=extraModelDegrees) pred <- treatment$f(vcolin,treatment$args) if(!.has.range.cn(pred)) { return(NULL) } class(treatment) <- c('vtreat_cat_p','vtreatment') if((!catScaling)||(is.null(zC))) { treatment$scales <- linScore(newVarName,pred,zoY,weights) } else { treatment$scales <- catScore(newVarName,pred,zC,zTarget,weights) } treatment }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/prevalenceFact.R
#' @importFrom wrapr %:=% %.>% NULL # don't look unitialized . <- NULL #' Flatten a list of functions onto d. #' #' @param d object (usually a data souce) #' @param fnlist a list of functions #' @return fnlist[[length(fnlist)]](flatten_fn_list(d, fnlist[[-length(fnlist)]]) (or d if length(fnlist)<1) #' #' @seealso \code{\link{as_rquery_plan}} #' #' @keywords internal #' #' @export #' flatten_fn_list <- function(d, fnlist) { for(i in seq_len(length(fnlist))) { d <- fnlist[[i]](d) } d } #' Build a query mapping NaN, Infinity and -Infinity to NULL. #' #' Using PostgreSQL type design where math is altered to pick up #' these values by equality: #' \url{https://www.postgresql.org/docs/9.0/static/datatype-numeric.html}. #' #' @param rqplan an query plan produced by as_rquery_plan(). #' @param data_source relop, data source (usually a relop_table_source). #' @param col_sample sample of data to determine column types. #' @return sql_node conversion #' #' @keywords internal #' #' @noRd zap_bad_numeric_rqplan_vars_q <- function(rqplan, data_source, col_sample) { numvars <- lapply( rqplan$treatmentplans, function(tp) { tp$scoreFrame$origName[tp$scoreFrame$code %in% c("clean", "isBAD")] }) numvars <- unique(unlist(numvars)) numvars <- intersect(rquery::column_names(data_source), numvars) if(length(col_sample)>0) { check <- intersect(numvars, colnames(col_sample)) drop <- vapply(check, function(ci) { (!is.numeric(col_sample[[ci]])) || (is.integer(col_sample[[ci]])) }, logical(1)) drop <- check[drop] numvars <- setdiff(numvars, drop) } if(length(numvars)<=0) { return(data_source) } exprs <- lapply( numvars, function(vi) { list("(CASE WHEN", "(", as.name(vi), "=", list("NaN"), ") OR", "(", as.name(vi), "=", list("Infinity"), ") OR ", "(", as.name(vi), "= -", list("Infinity"), ")", "THEN NULL ELSE", as.name(vi), "END)") }) names(exprs) <- numvars rquery::sql_node(data_source, exprs, orig_columns = TRUE) } #' Materialize a treated data frame remotely. #' #' @param db a db handle. #' @param rqplan an query plan produced by as_rquery_plan(). #' @param data_source relop, data source (usually a relop_table_source). #' @param result_table_name character, table name to land result in #' @param ... force later arguments to bind by name. #' @param extracols extra columns to copy. #' @param temporary logical, if TRUE try to make result temporary. #' @param overwrite logical, if TRUE try to overwrite result. #' @param attempt_nan_inf_mapping logical, if TRUE attempt to map NaN and Infnity to NA/NULL (goot on PostgreSQL, not on Spark). #' @param col_sample sample of data to determine column types. #' @param return_ops logical, if TRUE return operator tree instead of materializing. #' @return description of treated table. #' #' @seealso \code{\link{as_rquery_plan}}, \code{\link{rqdatatable_prepare}} #' #' @export #' rquery_prepare <- function(db, rqplan, data_source, result_table_name, ..., extracols = NULL, temporary = FALSE, overwrite = TRUE, attempt_nan_inf_mapping = FALSE, col_sample = NULL, return_ops = FALSE) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::rquery_prepare requires the rquery package.") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::rquery_prepare") for(ni in names(rqplan$tables)) { rquery::rq_copy_to(db, ni, rqplan$tables[[ni]], overwrite = TRUE, temporary = TRUE) } if(!("relop" %in% class(data_source))) { stop("vtreat::rquery_prepare data_source must be an rquery::relop tree") } if(attempt_nan_inf_mapping) { data_source <- zap_bad_numeric_rqplan_vars_q(rqplan, data_source, col_sample) } ops <- flatten_fn_list(data_source, rqplan$optree_generators) selcols <- intersect(rquery::column_names(ops), unique(c(rqplan$outcomename, rqplan$newvars, extracols))) ops <- rquery::select_columns(ops, selcols) if(return_ops) { return(ops) } treated <- rquery::materialize(db, ops, table_name = result_table_name, temporary = temporary, overwrite = overwrite) for(ni in names(rqplan$tables)) { rquery::rq_remove_table(db, ni) } treated } #' @describeIn rquery_prepare old name for rquery_prepare function #' @export materialize_treated <- rquery_prepare #' Apply a treatment plan using rqdatatable. #' #' Note: does not treat map NaN or +-Infinity. #' This function is only for timings and demonstration, not for production use. #' #' @param rqplan an query plan produced by as_rquery_plan(). #' @param data_source a data.frame. #' @param ... force later arguments to bind by name. #' @param extracols extra columns to copy. #' @param partition_column character name of column to partition work by. #' @param parallelCluster a cluster object, created by package parallel or by package snow. If NULL, use the registered default cluster. #' @param use_parallel logical, if TRUE use parallel cluster (when available). #' @param non_join_mapping logical, if TRUE use non-join based column mapping. #' @param print_rquery logical, if TRUE print the rquery ops. #' @param env environment to work in. #' @return treated data. #' #' @keywords internal #' #' @seealso \code{\link{as_rquery_plan}}, \code{\link{rquery_prepare}} #' #' @export #' rqdatatable_prepare <- function(rqplan, data_source, ..., partition_column = NULL, parallelCluster = NULL, use_parallel = use_parallel, extracols = NULL, non_join_mapping = FALSE, print_rquery = FALSE, env = parent.frame()) { force(env) if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::rqdatatable_prepare requires the rquery package.") } if(!requireNamespace("rqdatatable", quietly = TRUE)) { stop("vtreat::rqdatatable_prepare requires the rqdatatable package.") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::rqdatatable_prepare") source_name <- substitute(data_source) if(is.name(source_name)) { source_name <- as.character(source_name) } else { source_name <- "vtreat_rqdatatable_source" } if(!is.data.frame(data_source)) { stop("vtreat::rqdatatable_prepare data_source must be a data.frame") } tables <- rqplan$tables if(non_join_mapping) { # in-place changing, will be visible outside dat <- data_source for(ti in rqplan$tables) { colsi <- colnames(ti) if(length(colsi)!=2) { stop("vtreat::rqdatatable_prepare merge tables must have 2 columns") } keycol <- intersect(colnames(ti), colnames(dat)) if(length(keycol)!=1) { stop("vtreat::rqdatatable_prepare merge tables must have 1 column in common with data") } newcol <- setdiff(colnames(ti), keycol) mpi <- ti[[newcol]] names(mpi) <- ti[[keycol]] dat[[newcol]] <- mpi[dat[[keycol]]] } source_hdl <- rquery::local_td(dat, name = source_name) ops <- flatten_fn_list(source_hdl, rqplan$optree_generators[length(rqplan$optree_generators)]) tables[[source_name]] <- dat } else { source_hdl <- rquery::local_td(data_source, name = source_name) ops <- flatten_fn_list(source_hdl, rqplan$optree_generators) tables[[source_name]] <- data_source } selcols <- intersect(rquery::column_names(ops), unique(c(rqplan$outcomename, rqplan$newvars, extracols, partition_column))) ops <- rquery::select_columns(ops, selcols) if(print_rquery) { cat(format(ops)) } if(is.null(partition_column) || (!use_parallel)) { treated <- rqdatatable::ex_data_table(ops, tables = tables, env = env) } else { treated <- rqdatatable::ex_data_table_parallel(ops, partition_column = partition_column, cl = parallelCluster, tables = tables, env = env) } treated } as_rquery <- function(tstep, ..., var_restriction = NULL) { UseMethod("as_rquery") } as_rquery.vtreatment <- function(tstep, ..., var_restriction = NULL) { warning(paste("vtreat::as_rquery not yet implemented for ", format(tstep), ", class", paste(class(tstep), collapse = ", "))) NULL } #' Convert vtreatment plans into a sequence of rquery operations. #' #' @param treatmentplans vtreat treatment plan or list of vtreat treatment plan sharing same outcome and outcome type. #' @param ... not used, force any later arguments to bind to names. #' @param var_restriction character, if not null restrict to producing these variables. #' @return list(optree_generator (ordered list of functions), temp_tables (named list of tables)) #' #' @examples #' #' if(requireNamespace("rquery", quietly = TRUE)) { #' dTrainC <- data.frame(x= c('a', 'a', 'a', 'b' ,NA , 'b'), #' z= c(1, 2, NA, 4, 5, 6), #' y= c(FALSE, FALSE, TRUE, FALSE, TRUE, TRUE), #' stringsAsFactors = FALSE) #' dTrainC$id <- seq_len(nrow(dTrainC)) #' treatmentsC <- designTreatmentsC(dTrainC, c("x", "z"), 'y', TRUE) #' print(prepare(treatmentsC, dTrainC)) #' rqplan <- as_rquery_plan(list(treatmentsC)) #' ops <- flatten_fn_list(rquery::local_td(dTrainC), rqplan$optree_generators) #' cat(format(ops)) #' if(requireNamespace("rqdatatable", quietly = TRUE)) { #' treated <- rqdatatable::ex_data_table(ops, tables = rqplan$tables) #' print(treated[]) #' } #' if(requireNamespace("DBI", quietly = TRUE) && #' requireNamespace("RSQLite", quietly = TRUE)) { #' db <- DBI::dbConnect(RSQLite::SQLite(), ":memory:") #' source_data <- rquery::rq_copy_to(db, "dTrainC", dTrainC, #' overwrite = TRUE, temporary = TRUE) #' #' rest <- rquery_prepare(db, rqplan, source_data, "dTreatedC", #' extracols = "id") #' resd <- DBI::dbReadTable(db, rest$table_name) #' print(resd) #' #' rquery::rq_remove_table(db, source_data$table_name) #' rquery::rq_remove_table(db, rest$table_name) #' DBI::dbDisconnect(db) #' } #' } #' #' @seealso \code{\link{rquery_prepare}} #' #' @export #' as_rquery_plan <- function(treatmentplans, ..., var_restriction = NULL) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::as_rquery.treatmentplan requires the rquery package") } wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::as_rquery") if("treatmentplan" %in% class(treatmentplans)) { treatmentplans <- list(treatmentplans) } if((!is.list(treatmentplans)) || (length(treatmentplans)<1)) { stop("vtreat::as_rquery_plan treatmentplans must be a non-empty list of treatmentplans") } res <- list( exprs = character(0), optree_generators = list(), tables = list() ) outcomename <- character(0) newvars <- character(0) for(tstep in treatmentplans) { if(!is.null(tstep)) { if(!('treatmentplan' %in% class(tstep))) { stop("vtreat::as_rquery_plan treatmentplans must be a non-empty list of treatmentplans") } newvarsi <- tstep$scoreFrame$varName if(!is.null(var_restriction)) { newvarsi <- intersect(newvarsi, var_restriction) } if(length(newvarsi)>0) { outcomename <- unique(c(outcomename, tstep$outcomename)) if(length(outcomename)!=1) { stop("vtreat::as_rquery_plan treatmentplans must all share outcomes") } if(length(intersect(newvarsi, newvars))>0) { stop("vtreat::as_rquery_plan treatmentplans must produce disjoint sets of variables") } newvars <- c(newvars, newvarsi) for(ti in tstep$treatments) { ri <- as_rquery(ti, var_restriction = var_restriction) if(!is.null(ri)) { for(fld in c("exprs", "optree_generators", "tables")) { res[[fld]] <- c(res[[fld]], ri[[fld]]) } } } } } } if(length(res$exprs)>0) { exprs <- res$exprs # don't get clobbered by res$exprs <- NULL assignment f <- function(d) { rquery::extend_se(d, exprs) } res$optree_generators <- c( res$optree_generators, list(f)) } res$exprs <- NULL res$treatmentplans = treatmentplans res$outcomename = outcomename res$newvars = newvars res } #' Build a function that will re-code a categorical value. #' #' @param colname character, name of column to re-code. #' @param resname character, name of column to produce. #' @param coding_levels character, levels to not re-map to 'rare' #' @param effect_values named map to numeric, levels #' @param ... not used, force later arguments to be bound by name. #' @param levRestriction level restriction object. #' @param default_value numeric, default value used on non-effect_values matches. #' @param name_source a wrapr::mk_tmp_name_source() #' @return function generator for rquery pipeline and advisory table. #' #' @noRd #' rquery_code_categorical <- function(colname, resname, coding_levels, effect_values, ..., levRestriction = NULL, default_value = 0.0, name_source = wrapr::mk_tmp_name_source("vtreat_tmp")) { if(!requireNamespace("rquery", quietly = TRUE)) { stop("vtreat::rquery_code_categorical requires the rquery package") } effect_values <- unlist(effect_values) wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::rquery_code_categorical") if(length(resname)!=1) { stop(paste("vtreat::rquery_code_categorical resname must be a single string", colname, "->", resname)) } # work out coding table coding_levels <- coding_levels[grep("^x ", as.character(coding_levels))] coding_levels <- sort(unique(gsub("^x ", "", coding_levels))) # sort kills NA tnum <- 1 while(TRUE) { new_novel_level <- paste0("new_novel_level_", tnum) if(!(new_novel_level %in% coding_levels)) { break } tnum <- tnum + 1 } new_novel_value <- as.numeric(effect_values[.preProcCat(new_novel_level, levRestriction)]) if(is.na(new_novel_value) || is.nan(new_novel_value) || is.infinite(new_novel_value)) { new_novel_value <- default_value } na_value <- as.numeric(effect_values[.preProcCat(NA_character_, levRestriction)]) if(is.na(na_value) || is.nan(na_value) || is.infinite(na_value)) { na_value <- default_value } ctab <- data.frame(levels = coding_levels, stringsAsFactors = FALSE) codes <- .preProcCat(ctab$levels, levRestriction) ctab$effect <- as.numeric(effect_values[codes]) ctab$effect[is.na(ctab$effect) | is.nan(ctab$effect) | is.infinite(ctab$effect)] <- default_value if(length(ctab$levels)!=length(unique(ctab$levels))) { # should not happen, but let's catch it here so later joins are gauranteed to not blow-up stop(paste("vtreat:::rquery_code_categorical encoding levels were not unique, var:", colname, "->", resname)) } if(nrow(ctab)>0) { names(ctab) <- c(colname, resname) code_tab <- name_source() ctabd <- rquery::table_source(code_tab, c(colname, resname)) expr <- resname %:=% paste0("ifelse(is.na(", colname, "), ", na_value, ", ifelse(is.na(", resname, "), ", new_novel_value, ", ", resname, "))") f <- function(d) { rquery::natural_join(d, ctabd, jointype = "LEFT", by = colname) } tables = list(code_tab = ctab) names(tables) <- code_tab } else { tables <- list() f <- list() expr <- resname %:=% paste0("ifelse(is.na(", colname, "), ", na_value, ", ", new_novel_value, ")") } list( exprs = expr, optree_generators = f, tables = tables) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/rquery_treatment.R
# low frequency model # encode x as lambdas # x_i = previous + lambda * next_x # copying forward the variable causes later models to pick up the previous as above. encode_x_as_lambdas <- function(x, minx, maxx, xs) { n <- length(x) k <- length(xs) x[is.na(x)] <- (minx+maxx)/2 x <- pmin(maxx, pmax(minx, x)) ff <- data.frame("intercept" = rep(1, n)) for(ki in seq_len(k)) { vname <- paste0("lambda_", sprintf("%05g", ki)) v <- numeric(n) left <- xs[[ki]] if(ki<k) { right <- xs[[ki+1]] } else { right <- maxx } v <- (x-left)/(right-left) v[x<left] <- 0 v[x>=right] <- 1 # copy to rest of the models ff[[vname]] <- v } ff } # Fit a piecewise linear function at cut-points fit_segments <- function(x, y, k, ..., w = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::fit_segments") if(is.null(w)) { w = numeric(length(x)) + 1 } meany = mean(y) missing_pred = meany na_posns = is.na(x) if(sum(na_posns)>20) { missing_pred = mean(y[na_posns]) } x <- x[!na_posns] y <- y[!na_posns] w <- w[!na_posns] n <- length(x) minx <- min(x) maxx <- max(x) xs <- sort(x) idxs <- sort(unique(c(1, round(seq(1, n, length.out = k))))) idxs <- pmin(n, pmax(1, idxs)) idxs <- idxs[idxs<n] xs <- sort(unique(xs[idxs])) xs <- xs[xs<maxx] ff <- encode_x_as_lambdas(x, minx, maxx, xs) vars <- colnames(ff) ff$y <- y f <- paste("y", paste(c("0", vars), collapse = " + "), sep = " ~ ") f <- as.formula(f) model <- stats::lm(f, data = ff, weights = w) coef <- model$coefficients coef[is.na(coef)] <- 0 list(k =k, minx = minx, maxx = maxx, xs = xs, meany = meany, missing_pred = missing_pred, coef = coef) } pred_segs <- function(model, x) { ff <- encode_x_as_lambdas(x, model$min, model$maxx, model$xs) preds <- as.matrix(ff) %*% model$coef preds[is.na(x)] <- model$missing_pred preds } #' Solve as piecewise linear problem, numeric target. #' #' Return a vector of length y that is a piecewise function of x. #' This vector is picked as close to #' y (by square-distance) as possible for a set of x-only determined #' cut-points. Cross-validates for a good number of segments. #' #' @param varName character, name of variable #' @param x numeric input (not empty, no NAs). #' @param y numeric or castable to such (same length as x no NAs), output to match #' @param w numeric positive, same length as x (weights, can be NULL) #' @return segmented y prediction #' #' #' @export #' solve_piecewise <- function(varName, x, y, w = NULL) { tryCatch({ n <- length(x) if(n<=2) { return(NULL) } nunique <- length(unique(x)) if(nunique<=2) { return(NULL) } if(is.null(w)) { w <- numeric(n) + 1 } xorig <- x order <- order(x) x <- x[order] y <- y[order] w <- w[order] if(n<=20) { # too small, 1 or 2 segments k <- 2 } else { # cross-val for a good k ks <- c(1, 2, 4, 8, 16, 32, 64) ks <- ks[ks<=min(n/10, nunique)] if(length(ks)<1) { return(NULL) } is_test <- seq_len(n) %in% sample.int(n, n, replace = FALSE)[seq_len(floor(n/2))] xvals <- vapply( ks, function(k) { model <- fit_segments(x[!is_test], y[!is_test], k=k, w=w[!is_test]) preds <- pred_segs(model, x[is_test]) mean((y[is_test] - preds)^2) }, numeric(1)) idx <- which.min(xvals) k <- ks[[idx]] # names(xvals) <- as.character(ks) # print(xvals) # print(k) } model <- fit_segments(x, y, k=k, w=w) estimate <- pred_segs(model, xorig) approx_table <- data.frame(predXs = sort(unique(c(min(x), model$xs, max(x))))) approx_table$predYs <- pred_segs(model, approx_table$predXs) attr(estimate, "approx_table") <- approx_table attr(estimate, "method") <- "linear" return(estimate) }, error = function(e) { return(NULL) }) } #' Solve as piecewise logit problem, categorical target. #' #' Return a vector of length y that is a piecewise function of x. #' This vector is picked as close to #' y (by square-distance) as possible for a set of x-only determined #' cut-points. Cross-validates for a good number of segments. #' #' @param varName character, name of variable #' @param x numeric input (not empty, no NAs). #' @param y numeric or castable to such (same length as x no NAs), output to match #' @param w numeric positive, same length as x (weights, can be NULL) #' @return segmented y prediction #' #' #' @export #' solve_piecewisec <- function(varName, x, y, w = NULL) { v <- solve_piecewise(varName = varName, x = x, y = y , w = w) # don't copy over approx table as we are piecewise logit, not piecewise linear. .logit(v) - .logit(.wmean(y, w)) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/segmented_variable.R
mk_spline_eval_fn <- function(spline, mn_x, mx_x) { force(spline) force(mn_x) force(mx_x) function(x) { x <- pmax(x, mn_x) x <- pmin(x, mx_x) stats::predict(spline, x)$y } } #' Spline variable numeric target. #' #' Return a spline approximation of data. #' #' @param varName character, name of variable #' @param x numeric input (not empty, no NAs). #' @param y numeric or castable to such (same length as x no NAs), output to match #' @param w numeric positive, same length as x (weights, can be NULL) #' @return spline y prediction #' #' #' @export #' spline_variable <- function(varName, x, y, w = NULL) { tryCatch({ n <- length(x) if(n<=2) { return(NULL) } nunique <- length(unique(x)) if(nunique<=2) { return(NULL) } if(is.null(w)) { w <- numeric(n) + 1 } d <- data.frame(x = x, y = y, w = w, orig_idx = seq_len(n)) d <- d[order(d$x, stats::runif(length(d$x))), , drop = FALSE] nknots <- min(nunique/2, 100) spline <- stats::smooth.spline(d$x, d$y, w = d$w, nknots = nknots, keep.data = FALSE, keep.stuff = FALSE, cv = TRUE)$fit estimate <- stats::predict(spline, x)$y attr(estimate, "eval_fn") <- mk_spline_eval_fn(spline, min(d$x), max(d$x)) attr(estimate, "method") <- "linear" return(estimate) }, error = function(e) { return(NULL) }) } #' Spline variable categorical target. #' #' Return a spline approximation of the change in log odds. #' #' @param varName character, name of variable #' @param x numeric input (not empty, no NAs). #' @param y numeric or castable to such (same length as x no NAs), output to match #' @param w numeric positive, same length as x (weights, can be NULL) #' @return spline y prediction #' #' #' @export #' spline_variablec <- function(varName, x, y, w = NULL) { v <- spline_variable(varName = varName, x = x, y = y , w = w) .logit(v) - .logit(.wmean(y, w)) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/spline_variable.R
#' Build a square windows variable, numeric target. #' #' Build a square moving average window (KNN in 1d). This is a high-frequency feature. #' #' @param varName character, name of variable #' @param x numeric input (not empty, no NAs). #' @param y numeric or castable to such (same length as x no NAs), output to match #' @param w numeric positive, same length as x (weights, can be NULL) IGNORED #' @return segmented y prediction #' #' @examples #' #' d <- data.frame(x = c(NA, 1:6), y = c(0, 0, 0, 1, 1, 0, 0)) #' square_window("v", d$x, d$y) #' #' #' @export #' square_window <- function(varName, x, y, w = NULL) { tryCatch({ n <- length(x) if(n<=10) { return(NULL) } meany = mean(y) d <- data.frame(x = x, y = y, orig_idx = seq_len(n)) d <- d[order(d$x, stats::runif(length(d$x))), , drop = FALSE] k <- max(min(20, floor(nrow(d)/3)), ceiling(nrow(d)/10000)) # customCoder down-samples at 10000 so no point having more points # user a convolution to build running windows ones <- rep(1, k) num <- stats::convolve(c(rep(0,k), d$y, rep(0,k)), ones, type="filter") den <- stats::convolve(c(rep(0,k), rep(1, length(d$x)), rep(0,k)), ones, type="filter") rat <- num/den d$est <- rat[(length(rat)-length(d$x))/2 + seq_len(length(d$x))] res <- rep(meany, n) res[d$orig_idx] <- d$est res }, error = function(e) { return(NULL) }) } #' Build a square windows variable, categorical target. #' #' Build a square moving average window (KNN in 1d). This is a high-frequency feature. #' Approximation of the change in log odds. #' #' @param varName character, name of variable #' @param x numeric input (not empty, no NAs). #' @param y numeric or castable to such (same length as x no NAs), output to match #' @param w numeric positive, same length as x (weights, can be NULL) IGNORED #' @return segmented y prediction #' #' @examples #' #' d <- data.frame(x = c(NA, 1:6), y = c(0, 0, 0, 1, 1, 0, 0)) #' square_window("v", d$x, d$y) #' #' #' @export #' square_windowc <- function(varName, x, y, w = NULL) { v <- square_window(varName = varName, x = x, y = y , w = w) .logit(v) - .logit(.wmean(y, w)) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/square_window.R
# importing all from parallel and then declaring getDefaultCluster() global to # maybe run parallel plapply <- function(workList, worker, ..., parallelCluster = NULL, use_parallel = TRUE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::plapply") use_parallel <- use_parallel && (length(workList)>1) && (isTRUE(getOption('vtreat.allow_parallel', TRUE))) && (requireNamespace("parallel", quietly=TRUE)) if(use_parallel && is.null(parallelCluster)) { gdc <- get0('getDefaultCluster', envir = asNamespace('parallel'), mode = 'function', ifnotfound = NULL) if(!is.null(gdc)) { parallelCluster <- gdc() } } if((!use_parallel) || is.null(parallelCluster)) { res <- lapply(workList, worker) } else { res <- parallel::parLapplyLB(parallelCluster, workList, worker) } res } # rbind a list of dataframes into one .rbindListOfFrames <- function(frame_list) { # catch trivial cases if(is.data.frame(frame_list)) { return(frame_list) } frame_list <- Filter(Negate(is.null), frame_list) if(length(frame_list)<=1) { if(length(frame_list)<=0) { return(NULL) } return(frame_list[[1]]) } # see if a package can supply a fast method if(isTRUE(getOption('vtreat.use_data.table_binding', TRUE))) { if(requireNamespace("data.table", quietly = TRUE)) { return(as.data.frame(data.table::rbindlist(frame_list), stringsAsFactor=FALSE)) } } # fall back to base R do.call(base::rbind, c(frame_list, list('stringsAsFactors' = FALSE))) } # take in a column and return a column that is safely one of the primative # types: numeric, character # if the column is more exotic (multiple classes, AsIs, other issues) return null # protects downstream code from surprises # given how diverse R types are this is no way we can defend again everything, # this is supposed to be a consistent defense against common unexpected conversions # see: https://win-vector.com/2015/04/09/what-can-be-in-an-r-data-frame-column/ .cleanColumn <- function(xcol,expectedLength) { if(is.null(xcol)) { return(NULL) } if("AsIs" %in% class(xcol)) { return(NULL) } xcol <- as.vector(xcol) # defend against arrays, converts POSIXct to numeric, but not POSIXlt if(is.null(xcol)||(length(xcol)!=expectedLength)) { return(NULL) } if("POSIXt" %in% class(xcol)) { return(as.numeric(xcol)) } if(length(class(xcol))!=1) { return(NULL) } if(class(xcol) %in% c('list','AsIs')) { return(NULL) } if(is.factor(xcol)||is.character(xcol)) { # factor, character case return(as.character(xcol)) } if(is.logical(xcol)||is.numeric(xcol)||is.integer(xcol)) { # is.numeric(factor('a')) returns false, but lets not have factors here anyway # logical (treat as an indicator), integer, numeric case # is.numeric(1:10) returns TRUE (integers get into this block) return(as.numeric(xcol)) } if(is.atomic(xcol)) { return(as.character(xcol)) } return(NULL) } .is.bad <- function(v) { is.na(v) | is.nan(v) | (!is.finite(v)) } # check if a vector has more than one value .has.range <- function(v) { lv <- length(v) if(lv<=1) { return(FALSE) } nna <- sum(is.na(v)) if(nna>=lv) { return(FALSE) } if(nna>0) { return(TRUE) } match1 <- v==v[[1]] sum(match1)<lv } # check if a clean numeric vector has more than one value .has.range.cn <- function(v) { lv <- length(v) if(lv<=1) { return(FALSE) } return(max(v)>min(v)) } #' Compute weighted mean #' #' Compute the weighted mean of x. #' #' @param x numeric vector without NA to compute mean of #' @param weights weights vector (or NULL) #' @return weighted mean #' #' @keywords internal #' #' @examples #' #' .wmean(c(1, 2, 3)) #' #' @export #' .wmean <- function(x, weights = NULL) { if(is.null(weights)) { return(mean(x)) } sum(x*weights)/sum(weights) } # build a weighted table .wTable <- function(v1,v2,wts) { v1 <- as.character(v1) v2 <- as.character(v2) v1Levs <- sort(unique(v1)) v2Levs <- sort(unique(v2)) if(is.null(wts)) { wts <- rep(1.0,length(v1)) } else { wts <- as.numeric(wts) } d <- data.frame(v1=v1, v2=v2, weights=wts, stringsAsFactors=FALSE) agg <- aggregate(weights~v1+v2,data=d,FUN=sum) mat <- matrix(data=0,nrow=length(v1Levs),ncol=length(v2Levs)) rownames(mat) <- v1Levs colnames(mat) <- v2Levs for(ii in seq_len(nrow(agg))) { mat[agg$v1[[ii]],agg$v2[[ii]]] <- agg$weights[[ii]] } as.table(mat) } #' Return in-sample linear stats and scaling. #' @param varName name of variable #' @param xcol numeric vector of inputs (no NA/NULL/NaN) #' @param ycol numeric vector of outcomes (no NA/NULL/NaN) #' @param weights numeric vector of data weights (no NA/NULL/NaN, all>0.0) #' @param numberOfHiddenDegrees optional scalar >= 0 number of additional modeling degrees of freedom to account for. #' @return significance estiamte and scaling. #' #' @noRd linScore <- function(varName,xcol,ycol,weights,numberOfHiddenDegrees=0) { if(is.null(weights)) { weights <- 1.0+numeric(length(xcol)) } a <- 0.0 rsq <- 0.0 sig <- 1.0 if(.has.range.cn(xcol) && .has.range.cn(ycol)) { suppressWarnings(tryCatch({ d <- data.frame(x=xcol,y=ycol,stringsAsFactors=FALSE) lmodel <- stats::lm(formula=y~x, data=d, weights=weights) a <- lmodel$coefficients[['x']] if(is.na(a) || is.infinite(a)) { a <- 0.0 } else { smodel <- summary(lmodel) n <- sum(weights) meany <- .wmean(d$y,weights) rss1 <- sum(weights*(d$y-meany)^2) rss2 <- sum(weights*smodel$residuals^2) rsq <- 1 - rss2/rss1 p1 <- 1 p2 <- 2 + numberOfHiddenDegrees if((n>p2)&&(rss1>rss2)&&(rss1>0)&&(p2>p1)) { if(rss2<=0) { sig <- 0.0 # summary(lm(y~x,data.frame(x=1:3,y=1:3))) # case } else { f = ((rss1-rss2)/(p2-p1))/(rss2/(n-p2)) sig <- stats::pf(f, p2-p1, n-p2, lower.tail=FALSE) } } } }, error=function(e){})) } b <- -.wmean(a*xcol,weights) data.frame(varName=varName, a=a,b=b, rsq=rsq,sig=sig, stringsAsFactors=FALSE) } #' return significnace 1 variable logistic regression #' @param varName name of variable #' @param x numeric (no NAs/NULLs) effective variable #' @param yC (no NAs/NULLs) outcome variable #' @param yTarget scalar target for yC to match (yC==tTarget is goal) #' @param weights (optional) numeric, non-negative, no NAs/NULLs at least two positive positions #' @param numberOfHiddenDegrees optional scalar >= 0 number of additional modeling degrees of freedom to account for. #' @return significance estimate of a 1-variable logistic regression #' #' @examples #' #' # d <- data.frame(y=c(1,1,0,0,1,1,0,0,1,1,1,1)) #' # d$x <- seq_len((nrow(d))) #' # vtreat:::catScore('x',d$x,d$y,1,NULL) #' #' @noRd catScore <- function(varName,x,yC,yTarget,weights,numberOfHiddenDegrees=0) { if(is.null(weights)) { weights <- rep(1.0, length(x)) } pRsq <- 0.0 sig <- 1.0 a <- 0.0 if(.has.range.cn(x) && .has.range.cn(as.numeric(yC==yTarget))) { tfp <- data.frame(x = x, y = (yC==yTarget), w = weights, stringsAsFactors=FALSE) suppressWarnings(tryCatch({ model <- stats::glm(stats::as.formula('y~x'), data=tfp, family=stats::binomial(link='logit'), weights=tfp$w) if(!model$converged) { # put in small oposite to prevent unbounded models # try to fix non-converge tfo <- tfp tfo$w <- tfo$w*1.0e-5 tfo$y <- !tfo$y tf <- rbind(tfp, tfo) model <- stats::glm(stats::as.formula('y~x'), data=tf, family=stats::binomial(link='logit'), weights=tf$w) } if(model$converged) { delta_deviance <- model$null.deviance - model$deviance if((model$null.deviance>0)&&(delta_deviance>0)) { delta_df <- model$df.null - model$df.residual + numberOfHiddenDegrees pRsq <- 1.0 - model$deviance/model$null.deviance sig <- stats::pchisq(delta_deviance, delta_df, lower.tail=FALSE) a <- model$coefficients[['x']] # bg <- model$coefficients[['(Intercept)']] # sigmoid <- function(x) { 1/(1+exp(-x)) } # max(abs(predict(model,type='response')-sigmoid(a*x+bg))) small } } }, error=function(e){})) } b <- -.wmean(a*x,weights) data.frame(varName=varName, rsq=pRsq,sig=sig, a=a,b=b, stringsAsFactors=FALSE) } char_fix_map <- c("!" = "_bang_", "@" = "_at_", "#" = "_hash_", "$" = "_dollar_", "%" = "_percent_", "^" = "_pow_", "&" = "_amp_", "*" = "_star_", "(" = "_oparen_", ")" = "_cparen_", "-" = "_minus_", "+" = "_plus_", "=" = "_eq_", "<" = "_lt_", ">" = "_gt_", ":" = "_colon_", ";" = "_semi_", "'" = "_tick_", "`" = "_btick_", "~" = "_tilde_", '"' = "_quote_", "," = "_comma_", "?" = "_qmark_", "/" = "_slash_", "\\" = "_bslash_", "|" = "_bar_", "{" = "_obrace_", "}" = "_cbrace_", "[" = "_obrack_", "]" = "_cbrack_") fancy_sub <- function(s_in) { s <- s_in # leaving _ and . as is at first step support <- paste(unique(s_in), collapse = '') support <- strsplit(support, "")[[1]] # split down to chars chars <- unique(support) needs_work <- intersect(support, names(char_fix_map)) for(c in needs_work) { s <- gsub(c, char_fix_map[[c]], s, fixed = TRUE) } s } vtreat_make_names <- function(nms_in, ..., fancy_names = TRUE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::vtreat_make_names") nms <- as.character(nms_in) if(fancy_names) { nms <- fancy_sub(nms) } nms <- gsub("[^[:alnum:]]+", "_", nms) nms <- make.names(nms, unique = TRUE, allow_ = TRUE) nms <- gsub(".", "_", nms, fixed = TRUE) # underbar to dot nms } .logit <- function(x, eps = 1.0e-6) { nms <- names(x) x <- pmax(eps, x) x <- pmin(1-eps, x) v <- log(x/(1-x)) names(v) <- nms v } #' @importFrom digest digest NULL # approximate object identity check, can return NULL id_f <- function(d) { if(!isTRUE(getOption('vtreat.check_for_nested_model_bias', TRUE))) { return(NULL) } return(digest::digest(d)) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/utils.R
#' Return variable evaluations. #' #' #' @param sf scoreFrame from from vtreat treatments #' @return per-original varaible evaluations #' #' #' @export #' variable_values <- function(sf) { res <- data.frame(rsq = tapply(sf$rsq, sf$origName, max)) res <- cbind(res, data.frame(count = tapply(numeric(nrow(sf))+1, sf$origName, sum))) res <- cbind(res, data.frame(sig = tapply(sf$sig, sf$origName, min))) res$sig <- pmin(1, res$sig*res$count) # Bonforroni correction res$var <- rownames(res) rownames(res) <- NULL res } #' Value variables for prediction a numeric outcome. #' #' #' @param dframe Data frame to learn treatments from (training data), must have at least 1 row. #' @param varlist Names of columns to treat (effective variables). #' @param outcomename Name of column holding outcome variable. dframe[[outcomename]] must be only finite non-missing values and there must be a cut such that dframe[[outcomename]] is both above the cut at least twice and below the cut at least twice. #' @param ... no additional arguments, declared to forced named binding of later arguments #' @param weights optional training weights for each row #' @param minFraction optional minimum frequency a categorical level must have to be converted to an indicator column. #' @param smFactor optional smoothing factor for impact coding models. #' @param rareCount optional integer, allow levels with this count or below to be pooled into a shared rare-level. Defaults to 0 or off. #' @param rareSig optional numeric, suppress levels from pooling at this significance value greater. Defaults to NULL or off. #' @param collarProb what fraction of the data (pseudo-probability) to collar data at if doCollar is set during \code{\link{prepare.treatmentplan}}. #' @param scale optional if TRUE replace numeric variables with regression ("move to outcome-scale"). #' @param doCollar optional if TRUE collar numeric variables by cutting off after a tail-probability specified by collarProb during treatment design. #' @param splitFunction (optional) see vtreat::buildEvalSets . #' @param ncross optional scalar>=2 number of cross-validation rounds to design. #' @param forceSplit logical, if TRUE force cross-validated significance calculations on all variables. #' @param verbose if TRUE print progress. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods. #' @param customCoders additional coders to use for variable importance estimate. #' @param codeRestriction codes to restrict to for variable importance estimate. #' @param missingness_imputation function of signature f(values: numeric, weights: numeric), simple missing value imputer. #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' @return table of variable valuations #' #' @export #' value_variables_N <- function(dframe,varlist, outcomename, ..., weights=c(), minFraction=0.02,smFactor=0.0, rareCount=0,rareSig=1, collarProb=0.00, scale=FALSE,doCollar=FALSE, splitFunction=NULL,ncross=3, forceSplit=FALSE, verbose= FALSE, parallelCluster=NULL, use_parallel = TRUE, customCoders = list('c.PiecewiseV.num' = vtreat::solve_piecewisec, 'n.PiecewiseV.num' = vtreat::solve_piecewise, 'c.knearest.num' = vtreat::square_windowc, 'n.knearest.num' = vtreat::square_window), codeRestriction = c("PiecewiseV", "knearest", "clean", "isBAD", "catB", "catP"), missingness_imputation = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::value_variables_N") cfn <- mkCrossFrameNExperiment( dframe= dframe, varlist = varlist, outcomename = outcomename, weights=weights, minFraction=minFraction,smFactor=smFactor, rareCount=rareCount,rareSig=rareSig, collarProb=collarProb, codeRestriction=codeRestriction, customCoders=customCoders, scale=scale,doCollar=doCollar, splitFunction=splitFunction,ncross=ncross, forceSplit=forceSplit, verbose= verbose, parallelCluster=parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map=imputation_map) variable_values(cfn$treatments$scoreFrame) } #' Value variables for prediction a categorical outcome. #' #' #' #' @param dframe Data frame to learn treatments from (training data), must have at least 1 row. #' @param varlist Names of columns to treat (effective variables). #' @param outcomename Name of column holding outcome variable. dframe[[outcomename]] must be only finite non-missing values. #' @param outcometarget Value/level of outcome to be considered "success", and there must be a cut such that dframe[[outcomename]]==outcometarget at least twice and dframe[[outcomename]]!=outcometarget at least twice. #' @param ... no additional arguments, declared to forced named binding of later arguments #' @param weights optional training weights for each row #' @param minFraction optional minimum frequency a categorical level must have to be converted to an indicator column. #' @param smFactor optional smoothing factor for impact coding models. #' @param rareCount optional integer, allow levels with this count or below to be pooled into a shared rare-level. Defaults to 0 or off. #' @param rareSig optional numeric, suppress levels from pooling at this significance value greater. Defaults to NULL or off. #' @param collarProb what fraction of the data (pseudo-probability) to collar data at if doCollar is set during \code{\link{prepare.treatmentplan}}. #' @param scale optional if TRUE replace numeric variables with regression ("move to outcome-scale"). #' @param doCollar optional if TRUE collar numeric variables by cutting off after a tail-probability specified by collarProb during treatment design. #' @param splitFunction (optional) see vtreat::buildEvalSets . #' @param ncross optional scalar>=2 number of cross-validation rounds to design. #' @param forceSplit logical, if TRUE force cross-validated significance calculations on all variables. #' @param catScaling optional, if TRUE use glm() linkspace, if FALSE use lm() for scaling. #' @param verbose if TRUE print progress. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods. #' @param customCoders additional coders to use for variable importance estimate. #' @param codeRestriction codes to restrict to for variable importance estimate. #' @param missingness_imputation function of signature f(values: numeric, weights: numeric), simple missing value imputer. #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' @return table of variable valuations #' #' #' @export #' value_variables_C <- function(dframe,varlist, outcomename,outcometarget, ..., weights=c(), minFraction=0.02,smFactor=0.0, rareCount=0,rareSig=1, collarProb=0.00, scale=FALSE,doCollar=FALSE, splitFunction=NULL,ncross=3, forceSplit = FALSE, catScaling = TRUE, verbose= FALSE, parallelCluster=NULL, use_parallel = TRUE, customCoders = list('c.PiecewiseV.num' = vtreat::solve_piecewisec, 'n.PiecewiseV.num' = vtreat::solve_piecewise, 'c.knearest.num' = vtreat::square_windowc, 'n.knearest.num' = vtreat::square_window), codeRestriction = c("PiecewiseV", "knearest", "clean", "isBAD", "catB", "catP"), missingness_imputation = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::value_variables_C") cfc <- mkCrossFrameCExperiment( dframe= dframe, varlist = varlist, outcomename= outcomename,outcometarget = outcometarget, ..., weights=weights, minFraction=minFraction,smFactor=smFactor, rareCount=rareCount,rareSig=rareSig, collarProb=collarProb, codeRestriction=codeRestriction, customCoders=customCoders, scale=scale,doCollar=doCollar, splitFunction=splitFunction,ncross=ncross, forceSplit = forceSplit, catScaling=catScaling, verbose= verbose, parallelCluster=parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map=imputation_map) variable_values(cfc$treatments$scoreFrame) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/variable_importance.R
# variable treatments type def: list { origvar, newvars, f(col,args), args, treatmentName, scales } can share orig var #' vtreat: A Statistically Sound 'data.frame' Processor/Conditioner #' #' A 'data.frame' processor/conditioner that prepares real-world data for predictive modeling in a statistically sound manner. #' 'vtreat' prepares variables so that data has fewer exceptional cases, making #' it easier to safely use models in production. Common problems 'vtreat' defends #' against: 'Inf', 'NA', too many categorical levels, rare categorical levels, and new #' categorical levels (levels seen during application, but not during training). #' 'vtreat::prepare' should be used as you would use 'model.matrix'. #' #' #'For more information: #' \itemize{ #' \item \code{vignette('vtreat', package='vtreat')} #' \item \code{vignette(package='vtreat')} #' \item Website: \url{https://github.com/WinVector/vtreat} } #' "_PACKAGE" #' @importFrom stats aggregate anova as.formula binomial chisq.test fisher.test glm lm lm.wfit pchisq pf quantile #' @importFrom utils packageVersion NULL #' #' Original variable name from a treatmentplan$treatment item. #' @param x vtreatment item. #' @seealso \code{\link{designTreatmentsC}} \code{\link{designTreatmentsN}} \code{\link{designTreatmentsZ}} #' @export #' vorig <- function(x) { x$origvar } #' #' New treated variable names from a treatmentplan$treatment item. #' @param x vtreatment item #' @seealso \code{\link{designTreatmentsC}} \code{\link{designTreatmentsN}} \code{\link{designTreatmentsZ}} #' @export vnames <- function(x) { x$newvars } #' #' Display treatment plan. #' @param x treatment plan #' @param ... additional args (to match general signature). #' @export format.vtreatment <- function(x, ...) { paste( 'vtreat \'',x$treatmentName, '\'(\'',x$origvar,'\'(',x$origType,',',x$origClass,')->', x$convertedColClass,'->\'', paste(x$newvars,collapse='\',\''), '\')',sep='') } #' @export as.character.vtreatment <- function (x, ...) { format(x, ...) } #' #' Print treatmentplan. #' @param x treatmentplan #' @param ... additional args (to match general signature). #' @seealso \code{\link{designTreatmentsC}}, \code{\link{designTreatmentsN}}, \code{\link{designTreatmentsZ}}, \code{\link{prepare.treatmentplan}} #' @export print.vtreatment <- function(x, ...) { print(format(x), ...) } #' @export format.treatmentplan <- function(x, ...) { sf <- x$scoreFrame cols <- c('origName', 'varName', 'code', 'rsq', 'sig', 'extraModelDegrees', 'recommended') cols <- intersect(cols, colnames(sf)) format(sf[ , cols, drop = FALSE]) } #' @export as.character.treatmentplan <- function (x, ...) { format(x, ...) } #' #' Print treatmentplan. #' @param x treatmentplan #' @param ... additional args (to match general signature). #' @seealso \code{\link{designTreatmentsC}}, \code{\link{designTreatmentsN}}, \code{\link{designTreatmentsZ}}, \code{\link{prepare.treatmentplan}} #' @export print.treatmentplan <- function(x, ...) { print(class(x)) print(format(x), ...) } # add in the recommendation column augment_score_frame <- function(score_frame) { n_treatment_types <- length(unique(score_frame$code)) code_counts <- table(score_frame$code) vcount <- code_counts[score_frame$code] score_frame$default_threshold <- 1/(n_treatment_types * vcount) score_frame$recommended <- score_frame$varMoves & (score_frame$sig < score_frame$default_threshold) score_frame } #' Build all treatments for a data frame to predict a categorical outcome. #' #' Function to design variable treatments for binary prediction of a #' categorical outcome. Data frame is assumed to have only atomic columns #' except for dates (which are converted to numeric). Note: re-encoding high cardinality #' categorical variables can introduce undesirable nested model bias, for such data consider #' using \code{\link{mkCrossFrameCExperiment}}. #' #' The main fields are mostly vectors with names (all with the same names in the same order): #' #' - vars : (character array without names) names of variables (in same order as names on the other diagnostic vectors) #' - varMoves : logical TRUE if the variable varied during hold out scoring, only variables that move will be in the treated frame #' - #' - sig : an estimate significance of effect #' #' See the vtreat vignette for a bit more detail and a worked example. #' #' Columns that do not vary are not passed through. #' #' Note: re-encoding high cardinality on training data can introduce nested model bias, consider using \code{mkCrossFrameCExperiment} instead. #' #' @param dframe Data frame to learn treatments from (training data), must have at least 1 row. #' @param varlist Names of columns to treat (effective variables). #' @param outcomename Name of column holding outcome variable. dframe[[outcomename]] must be only finite non-missing values. #' @param outcometarget Value/level of outcome to be considered "success", and there must be a cut such that dframe[[outcomename]]==outcometarget at least twice and dframe[[outcomename]]!=outcometarget at least twice. #' @param ... no additional arguments, declared to forced named binding of later arguments #' @param weights optional training weights for each row #' @param minFraction optional minimum frequency a categorical level must have to be converted to an indicator column. #' @param smFactor optional smoothing factor for impact coding models. #' @param rareCount optional integer, allow levels with this count or below to be pooled into a shared rare-level. Defaults to 0 or off. #' @param rareSig optional numeric, suppress levels from pooling at this significance value greater. Defaults to NULL or off. #' @param collarProb what fraction of the data (pseudo-probability) to collar data at if doCollar is set during \code{\link{prepare.treatmentplan}}. #' @param codeRestriction what types of variables to produce (character array of level codes, NULL means no restriction). #' @param customCoders map from code names to custom categorical variable encoding functions (please see \url{https://github.com/WinVector/vtreat/blob/main/extras/CustomLevelCoders.md}). #' @param splitFunction (optional) see vtreat::buildEvalSets . #' @param ncross optional scalar >=2 number of cross validation splits use in rescoring complex variables. #' @param forceSplit logical, if TRUE force cross-validated significance calculations on all variables. #' @param catScaling optional, if TRUE use glm() linkspace, if FALSE use lm() for scaling. #' @param verbose if TRUE print progress. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods (when parallel cluster is set). #' @param missingness_imputation function of signature f(values: numeric, weights: numeric), simple missing value imputer. #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' @return treatment plan (for use with prepare) #' @seealso \code{\link{prepare.treatmentplan}}, \code{\link{designTreatmentsN}}, \code{\link{designTreatmentsZ}}, \code{\link{mkCrossFrameCExperiment}} #' #' @examples #' #' dTrainC <- data.frame(x=c('a','a','a','b','b','b'), #' z=c(1,2,3,4,5,6), #' y=c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE)) #' dTestC <- data.frame(x=c('a','b','c',NA), #' z=c(10,20,30,NA)) #' treatmentsC <- designTreatmentsC(dTrainC,colnames(dTrainC),'y',TRUE) #' dTestCTreated <- prepare(treatmentsC,dTestC,pruneSig=0.99) #' #' @export designTreatmentsC <- function(dframe,varlist, outcomename, outcometarget = TRUE, ..., weights=c(), minFraction=0.02,smFactor=0.0, rareCount=0,rareSig=NULL, collarProb=0.00, codeRestriction=NULL, customCoders=NULL, splitFunction=NULL,ncross=3, forceSplit=FALSE, catScaling=TRUE, verbose=TRUE, parallelCluster=NULL, use_parallel= TRUE, missingness_imputation = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::designTreatmentsC") .checkArgs(dframe=dframe,varlist=varlist,outcomename=outcomename) if(!(outcomename %in% colnames(dframe))) { stop("outcomename must be a column name of dframe") } if(any(is.na(dframe[[outcomename]]))) { stop("There are missing values in the outcome column, can not apply designTreatmentsC.") } zoY <- ifelse(dframe[[outcomename]]==outcometarget,1.0,0.0) if(min(zoY)>=max(zoY)) { stop("dframe[[outcomename]]==outcometarget must vary") } treatments <- .designTreatmentsX( dframe = dframe, varlist = varlist, outcomename = outcomename, zoY = zoY, zC = dframe[[outcomename]], zTarget = outcometarget, weights = weights, minFraction = minFraction, smFactor = smFactor, rareCount = rareCount, rareSig = rareSig, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, splitFunction = splitFunction, ncross = ncross, forceSplit = forceSplit, catScaling = catScaling, verbose = verbose, parallelCluster = parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map = imputation_map) treatments$outcomeTarget <- outcometarget treatments$outcomeType <- 'Binary' treatments$fit_obj_id <- id_f(dframe) treatments$scoreFrame <- augment_score_frame(treatments$scoreFrame) treatments } #' build all treatments for a data frame to predict a numeric outcome #' #' Function to design variable treatments for binary prediction of a #' numeric outcome. Data frame is assumed to have only atomic columns #' except for dates (which are converted to numeric). #' Note: each column is processed independently of all others. #' Note: re-encoding high cardinality on training data #' categorical variables can introduce undesirable nested model bias, for such data consider #' using \code{\link{mkCrossFrameNExperiment}}. #' #' The main fields are mostly vectors with names (all with the same names in the same order): #' #' - vars : (character array without names) names of variables (in same order as names on the other diagnostic vectors) #' - varMoves : logical TRUE if the variable varied during hold out scoring, only variables that move will be in the treated frame #' - sig : an estimate significance of effect #' #' See the vtreat vignette for a bit more detail and a worked example. #' #' Columns that do not vary are not passed through. #' #' @param dframe Data frame to learn treatments from (training data), must have at least 1 row. #' @param varlist Names of columns to treat (effective variables). #' @param outcomename Name of column holding outcome variable. dframe[[outcomename]] must be only finite non-missing values and there must be a cut such that dframe[[outcomename]] is both above the cut at least twice and below the cut at least twice. #' @param ... no additional arguments, declared to forced named binding of later arguments #' @param weights optional training weights for each row #' @param minFraction optional minimum frequency a categorical level must have to be converted to an indicator column. #' @param smFactor optional smoothing factor for impact coding models. #' @param rareCount optional integer, allow levels with this count or below to be pooled into a shared rare-level. Defaults to 0 or off. #' @param rareSig optional numeric, suppress levels from pooling at this significance value greater. Defaults to NULL or off. #' @param collarProb what fraction of the data (pseudo-probability) to collar data at if doCollar is set during \code{\link{prepare.treatmentplan}}. #' @param codeRestriction what types of variables to produce (character array of level codes, NULL means no restriction). #' @param customCoders map from code names to custom categorical variable encoding functions (please see \url{https://github.com/WinVector/vtreat/blob/main/extras/CustomLevelCoders.md}). #' @param splitFunction (optional) see vtreat::buildEvalSets . #' @param ncross optional scalar >=2 number of cross validation splits use in rescoring complex variables. #' @param forceSplit logical, if TRUE force cross-validated significance calculations on all variables. #' @param verbose if TRUE print progress. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods (when parallel cluster is set). #' @param missingness_imputation function of signature f(values: numeric, weights: numeric), simple missing value imputer. #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' @return treatment plan (for use with prepare) #' @seealso \code{\link{prepare.treatmentplan}}, \code{\link{designTreatmentsC}}, \code{\link{designTreatmentsZ}}, \code{\link{mkCrossFrameNExperiment}} #' @examples #' #' dTrainN <- data.frame(x=c('a','a','a','a','b','b','b'), #' z=c(1,2,3,4,5,6,7),y=c(0,0,0,1,0,1,1)) #' dTestN <- data.frame(x=c('a','b','c',NA), #' z=c(10,20,30,NA)) #' treatmentsN = designTreatmentsN(dTrainN,colnames(dTrainN),'y') #' dTestNTreated <- prepare(treatmentsN,dTestN,pruneSig=0.99) #' #' @export designTreatmentsN <- function(dframe,varlist,outcomename, ..., weights=c(), minFraction=0.02,smFactor=0.0, rareCount=0,rareSig=NULL, collarProb=0.00, codeRestriction=NULL, customCoders=NULL, splitFunction=NULL,ncross=3, forceSplit=FALSE, verbose=TRUE, parallelCluster=NULL, use_parallel= TRUE, missingness_imputation = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::designTreatmentsN") .checkArgs(dframe=dframe,varlist=varlist,outcomename=outcomename) if(!(outcomename %in% colnames(dframe))) { stop("outcomename must be a column name of dframe") } if(any(is.na(dframe[[outcomename]]))) { stop("There are missing values in the outcome column, can not apply designTreatmentsN.") } ycol <- dframe[[outcomename]] if(min(ycol)>=max(ycol)) { stop("dframe[[outcomename]] must vary") } catScaling=FALSE treatments <- .designTreatmentsX( dframe = dframe, varlist = varlist, outcomename = outcomename, zoY = ycol, zC = c(), zTarget = c(), weights = weights, minFraction = minFraction, smFactor = smFactor, rareCount = rareCount, rareSig = rareSig, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, splitFunction = splitFunction, ncross = ncross, forceSplit = forceSplit, catScaling = catScaling, verbose = verbose, parallelCluster = parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map = imputation_map) treatments$outcomeType <- 'Numeric' treatments$fit_obj_id <- id_f(dframe) treatments$scoreFrame <- augment_score_frame(treatments$scoreFrame) treatments } #' Design variable treatments with no outcome variable. #' #' Data frame is assumed to have only atomic columns #' except for dates (which are converted to numeric). #' Note: each column is processed independently of all others. #' #' The main fields are mostly vectors with names (all with the same names in the same order): #' #' - vars : (character array without names) names of variables (in same order as names on the other diagnostic vectors) #' - varMoves : logical TRUE if the variable varied during hold out scoring, only variables that move will be in the treated frame #' #' See the vtreat vignette for a bit more detail and a worked example. #' #' Columns that do not vary are not passed through. #' #' @param dframe Data frame to learn treatments from (training data), must have at least 1 row. #' @param varlist Names of columns to treat (effective variables). #' @param ... no additional arguments, declared to forced named binding of later arguments #' @param weights optional training weights for each row #' @param minFraction optional minimum frequency a categorical level must have to be converted to an indicator column. #' @param rareCount optional integer, allow levels with this count or below to be pooled into a shared rare-level. Defaults to 0 or off. #' @param collarProb what fraction of the data (pseudo-probability) to collar data at if doCollar is set during \code{\link{prepare.treatmentplan}}. #' @param codeRestriction what types of variables to produce (character array of level codes, NULL means no restriction). #' @param customCoders map from code names to custom categorical variable encoding functions (please see \url{https://github.com/WinVector/vtreat/blob/main/extras/CustomLevelCoders.md}). #' @param verbose if TRUE print progress. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods (if parallel cluster is set). #' @param missingness_imputation function of signature f(values: numeric, weights: numeric), simple missing value imputer. #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' @return treatment plan (for use with prepare) #' @seealso \code{\link{prepare.treatmentplan}}, \code{\link{designTreatmentsC}}, \code{\link{designTreatmentsN}} #' @examples #' #' dTrainZ <- data.frame(x=c('a','a','a','a','b','b',NA,'e','e'), #' z=c(1,2,3,4,5,6,7,NA,9)) #' dTestZ <- data.frame(x=c('a','x','c',NA), #' z=c(10,20,30,NA)) #' treatmentsZ = designTreatmentsZ(dTrainZ, colnames(dTrainZ), #' rareCount=0) #' dTrainZTreated <- prepare(treatmentsZ, dTrainZ) #' dTestZTreated <- prepare(treatmentsZ, dTestZ) #' #' @export designTreatmentsZ <- function(dframe,varlist, ..., minFraction=0.0, weights=c(), rareCount=0, collarProb=0.0, codeRestriction=NULL, customCoders=NULL, verbose=TRUE, parallelCluster=NULL, use_parallel= TRUE, missingness_imputation = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::designTreatmentsZ") # build a name disjoint from column names outcomename <- setdiff(paste('VTREATTEMPCOL', seq_len(ncol(dframe) + length(varlist) + 1), sep='_'), c(colnames(dframe),varlist))[[1]] catScaling <- FALSE dframe[[outcomename]] <- 0 .checkArgs(dframe=dframe,varlist=varlist,outcomename=outcomename) ycol <- dframe[[outcomename]] treatments <- .designTreatmentsX( dframe = dframe, varlist = varlist, outcomename = outcomename, zoY = ycol, zC = c(), zTarget = c(), weights = weights, minFraction = minFraction, smFactor = 0, rareCount = rareCount, rareSig = 1, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, splitFunction = NULL, ncross = 3, forceSplit = FALSE, catScaling = catScaling, verbose = verbose, parallelCluster = parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map = imputation_map) treatments$outcomeType <- 'None' treatments$meanY <- NA treatments } #' Track unique character values for variables. #' #' Builds lists of observed unique character values of varlist variables from the data frame. #' #' @param dframe Data frame to learn treatments from (training data), must have at least 1 row. #' @param varlist Names of columns to treat (effective variables). #' @return named list of values seen. #' #' @seealso \code{\link{prepare.treatmentplan}}, \code{\link{novel_value_summary}} #' #' @examples #' #' set.seed(23525) #' zip <- c(NA, paste('z', 1:100, sep = "_")) #' N <- 500 #' d <- data.frame(zip = sample(zip, N, replace=TRUE), #' zip2 = sample(zip, N, replace=TRUE), #' y = runif(N)) #' dSample <- d[1:300, , drop = FALSE] #' tplan <- designTreatmentsN(dSample, #' c("zip", "zip2"), "y", #' verbose = FALSE) #' trackedValues <- track_values(dSample, c("zip", "zip2")) #' # don't normally want to catch warnings, #' # doing it here as this is an example #' # and must not have unhandled warnings. #' tryCatch( #' prepare(tplan, d, trackedValues = trackedValues), #' warning = function(w) { cat(paste(w, collapse = "\n")) }) #' #' @export #' track_values <- function(dframe, varlist) { observed_values <- lapply(varlist, function(vi) { unique(as.character(dframe[[vi]])) }) names(observed_values) <- varlist observed_values } #' Report new/novel appearances of character values. #' #' @param dframe Data frame to inspect. #' @param trackedValues optional named list mapping variables to know values, allows warnings upon novel level appearances (see \code{\link{track_values}}) #' @return frame of novel occurrences #' #' @seealso \code{\link{prepare.treatmentplan}}, \code{\link{track_values}} #' #' @examples #' #' set.seed(23525) #' zip <- c(NA, paste('z', 1:10, sep = "_")) #' N <- 10 #' d <- data.frame(zip = sample(zip, N, replace=TRUE), #' zip2 = sample(zip, N, replace=TRUE), #' y = runif(N)) #' dSample <- d[1:5, , drop = FALSE] #' trackedValues <- track_values(dSample, c("zip", "zip2")) #' novel_value_summary(d, trackedValues) #' #' @export #' novel_value_summary <- function(dframe, trackedValues) { novel <- data.frame(row_index = 1, column = "", value = "", stringsAsFactors = FALSE) novel <- novel[c(), , drop = FALSE] novels <- lapply(sort(intersect(names(trackedValues), colnames(dframe))), function(v) { newstuff <- !(dframe[[v]] %in% trackedValues[[v]]) if(sum(newstuff)>0) { idxs <- which(newstuff) vals <- as.character(dframe[[v]][idxs]) return(data.frame(row_index = idxs, column = v, value = vals, stringsAsFactors = FALSE)) } NULL }) novels <- c(list(novel), novels) novels <- novels[!is.null(novels)] .rbindListOfFrames(novels) } #' Apply treatments and restrict to useful variables. #' #' @param treatmentplan Plan built by designTreantmentsC() or designTreatmentsN() #' @param dframe Data frame to be treated #' @param ... no additional arguments, declared to forced named binding of later arguments #' #' @seealso \code{\link{prepare.treatmentplan}}, \code{\link{prepare.simple_plan}}, \code{\link{prepare.multinomial_plan}} #' #' @export prepare <- function(treatmentplan, dframe, ...) { UseMethod("prepare") } #' Apply treatments and restrict to useful variables. #' #' Use a treatment plan to prepare a data frame for analysis. The #' resulting frame will have new effective variables that are numeric #' and free of NaN/NA. If the outcome column is present it will be copied over. #' The intent is that these frames are compatible with more machine learning #' techniques, and avoid a lot of corner cases (NA,NaN, novel levels, too many levels). #' Note: each column is processed independently of all others. Also copies over outcome if present. #' Note: treatmentplan's are not meant for long-term storage, a warning is issued if the version of #' vtreat that produced the plan differs from the version running \code{prepare()}. #' #' @param treatmentplan Plan built by designTreantmentsC() or designTreatmentsN() #' @param dframe Data frame to be treated #' @param ... no additional arguments, declared to forced named binding of later arguments #' @param pruneSig suppress variables with significance above this level #' @param scale optional if TRUE replace numeric variables with single variable model regressions ("move to outcome-scale"). These have mean zero and (for variables with significant less than 1) slope 1 when regressed (lm for regression problems/glm for classification problems) against outcome. #' @param doCollar optional if TRUE collar numeric variables by cutting off after a tail-probability specified by collarProb during treatment design. #' @param varRestriction optional list of treated variable names to restrict to #' @param codeRestriction optional list of treated variable codes to restrict to #' @param trackedValues optional named list mapping variables to know values, allows warnings upon novel level appearances (see \code{\link{track_values}}) #' @param extracols extra columns to copy. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods. #' @param check_for_duplicate_frames logical, if TRUE check if we called prepare on same data.frame as design step. #' @return treated data frame (all columns numeric- without NA, NaN) #' #' @seealso \code{\link{mkCrossFrameCExperiment}}, \code{\link{mkCrossFrameNExperiment}}, \code{\link{designTreatmentsC}} \code{\link{designTreatmentsN}} \code{\link{designTreatmentsZ}}, \code{\link{prepare}} #' #' @examples #' #' # categorical example #' set.seed(23525) #' #' # we set up our raw training and application data #' dTrainC <- data.frame( #' x = c('a', 'a', 'a', 'b', 'b', NA, NA), #' z = c(1, 2, 3, 4, NA, 6, NA), #' y = c(FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE)) #' #' dTestC <- data.frame( #' x = c('a', 'b', 'c', NA), #' z = c(10, 20, 30, NA)) #' #' # we perform a vtreat cross frame experiment #' # and unpack the results into treatmentsC #' # and dTrainCTreated #' unpack[ #' treatmentsC = treatments, #' dTrainCTreated = crossFrame #' ] <- mkCrossFrameCExperiment( #' dframe = dTrainC, #' varlist = setdiff(colnames(dTrainC), 'y'), #' outcomename = 'y', #' outcometarget = TRUE, #' verbose = FALSE) #' #' # the treatments include a score frame relating new #' # derived variables to original columns #' treatmentsC$scoreFrame[, c('origName', 'varName', 'code', 'rsq', 'sig', 'extraModelDegrees')] %.>% #' print(.) #' #' # the treated frame is a "cross frame" which #' # is a transform of the training data built #' # as if the treatment were learned on a different #' # disjoint training set to avoid nested model #' # bias and over-fit. #' dTrainCTreated %.>% #' head(.) %.>% #' print(.) #' #' # Any future application data is prepared with #' # the prepare method. #' dTestCTreated <- prepare(treatmentsC, dTestC, pruneSig=NULL) #' #' dTestCTreated %.>% #' head(.) %.>% #' print(.) #' #' @export prepare.treatmentplan <- function(treatmentplan, dframe, ..., pruneSig= NULL, scale= FALSE, doCollar= FALSE, varRestriction= NULL, codeRestriction= NULL, trackedValues= NULL, extracols= NULL, parallelCluster= NULL, use_parallel= TRUE, check_for_duplicate_frames= TRUE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::prepare") .checkArgs1(dframe=dframe) if(!('treatmentplan' %in% class(treatmentplan))) { stop("treatmentplan must be of class treatmentplan") } vtreatVersion <- packageVersion('vtreat') if(is.null(treatmentplan$vtreatVersion) || (treatmentplan$vtreatVersion!=vtreatVersion)) { warning(paste('treatments designed with vtreat version', treatmentplan$vtreatVersion, 'and preparing data.frame with vtreat version', vtreatVersion)) } if(!is.data.frame(dframe)) { stop("dframe must be a data frame") } if(nrow(dframe)<=0) { stop("no rows") } old_fit_obj_id <- treatmentplan$fit_obj_id if(check_for_duplicate_frames && (!is.null(old_fit_obj_id))) { fit_obj_id <- id_f(dframe) if(!is.null(fit_obj_id)) { if(fit_obj_id == old_fit_obj_id) { warning("possibly called prepare() on same data frame as designTreatments*()/mkCrossFrame*Experiment(), this can lead to over-fit. To avoid this, please use mkCrossFrame*Experiment$crossFrame.") } } } if(!is.null(trackedValues)) { for(v in sort(intersect(names(trackedValues), colnames(dframe)))) { new_values <- setdiff(dframe[[v]], trackedValues[[v]]) if(length(new_values)>0) { if(length(new_values)>5) { vsample <- paste(new_values[1:5], collapse = ", ") vsample <- paste0(vsample, ", ...") } else { vsample <- paste(new_values, collapse = ", ") } wmsg <- paste0("vtreat::prepare: column \"", v, "\" has ", length(new_values), " previously unseen values:", vsample, " .") warning(wmsg) } } } if(treatmentplan$outcomeType=='None') { pruneSig <- NULL } useable <- treatmentplan$scoreFrame$varMoves if(!is.null(pruneSig)) { useable <- useable & (treatmentplan$scoreFrame$sig<=pruneSig) } useableVars <- treatmentplan$scoreFrame$varName[useable] if(!is.null(varRestriction)) { useableVars <- intersect(useableVars,varRestriction) } if(!is.null(codeRestriction)) { hasSelectedCode <- treatmentplan$scoreFrame$code %in% codeRestriction useableVars <- intersect(useableVars, treatmentplan$scoreFrame$varName[hasSelectedCode]) } if(length(useableVars)<=0) { stop('no useable vars') } vars_we_warned_on <- list() for(ti in treatmentplan$treatments) { if(length(intersect(ti$newvars,useableVars))>0) { newType <- paste(typeof(dframe[[ti$origvar]]), collapse = " ") newClass <- paste(class(dframe[[ti$origvar]]), collapse = " ") if((ti$origType!=newType) || (ti$origClass!=newClass)) { if(is.null(vars_we_warned_on[[ti$origvar]])) { warning(paste('variable',ti$origvar,'expected type/class', ti$origType,ti$origClass, 'saw ',newType,newClass)) vars_we_warned_on[ti$origvar] <- 1 } } } } treated <- .vtreatList(treatmentplan$treatments,dframe,useableVars,scale,doCollar, parallelCluster = parallelCluster, use_parallel = use_parallel) # copy outcome and extracols over when present for(ci in unique(c(treatmentplan$outcomename, extracols))) { if(ci %in% colnames(dframe)) { treated[[ci]] <- dframe[[ci]] } } treated } #' Run categorical cross-frame experiment. #' #' Builds a \code{\link{designTreatmentsC}} treatment plan and a data frame prepared #' from \code{dframe} that is "cross" in the sense each row is treated using a treatment #' plan built from a subset of dframe disjoint from the given row. #' The goal is to try to and supply a method of breaking nested model bias other than splitting #' into calibration, training, test sets. #' #' #' @param dframe Data frame to learn treatments from (training data), must have at least 1 row. #' @param varlist Names of columns to treat (effective variables). #' @param outcomename Name of column holding outcome variable. dframe[[outcomename]] must be only finite non-missing values. #' @param outcometarget Value/level of outcome to be considered "success", and there must be a cut such that dframe[[outcomename]]==outcometarget at least twice and dframe[[outcomename]]!=outcometarget at least twice. #' @param ... no additional arguments, declared to forced named binding of later arguments #' @param weights optional training weights for each row #' @param minFraction optional minimum frequency a categorical level must have to be converted to an indicator column. #' @param smFactor optional smoothing factor for impact coding models. #' @param rareCount optional integer, allow levels with this count or below to be pooled into a shared rare-level. Defaults to 0 or off. #' @param rareSig optional numeric, suppress levels from pooling at this significance value greater. Defaults to NULL or off. #' @param collarProb what fraction of the data (pseudo-probability) to collar data at if doCollar is set during \code{\link{prepare.treatmentplan}}. #' @param codeRestriction what types of variables to produce (character array of level codes, NULL means no restriction). #' @param customCoders map from code names to custom categorical variable encoding functions (please see \url{https://github.com/WinVector/vtreat/blob/main/extras/CustomLevelCoders.md}). #' @param scale optional if TRUE replace numeric variables with regression ("move to outcome-scale"). #' @param doCollar optional if TRUE collar numeric variables by cutting off after a tail-probability specified by collarProb during treatment design. #' @param splitFunction (optional) see vtreat::buildEvalSets . #' @param ncross optional scalar>=2 number of cross-validation rounds to design. #' @param forceSplit logical, if TRUE force cross-validated significance calculations on all variables. #' @param catScaling optional, if TRUE use glm() linkspace, if FALSE use lm() for scaling. #' @param verbose if TRUE print progress. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods. #' @param missingness_imputation function of signature f(values: numeric, weights: numeric), simple missing value imputer. #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' @return named list containing: treatments, crossFrame, crossWeights, method, and evalSets #' #' @seealso \code{\link{designTreatmentsC}}, \code{\link{designTreatmentsN}}, \code{\link{prepare.treatmentplan}} #' #' @examples #' #' # categorical example #' set.seed(23525) #' #' # we set up our raw training and application data #' dTrainC <- data.frame( #' x = c('a', 'a', 'a', 'b', 'b', NA, NA), #' z = c(1, 2, 3, 4, NA, 6, NA), #' y = c(FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE)) #' #' dTestC <- data.frame( #' x = c('a', 'b', 'c', NA), #' z = c(10, 20, 30, NA)) #' #' # we perform a vtreat cross frame experiment #' # and unpack the results into treatmentsC #' # and dTrainCTreated #' unpack[ #' treatmentsC = treatments, #' dTrainCTreated = crossFrame #' ] <- mkCrossFrameCExperiment( #' dframe = dTrainC, #' varlist = setdiff(colnames(dTrainC), 'y'), #' outcomename = 'y', #' outcometarget = TRUE, #' verbose = FALSE) #' #' # the treatments include a score frame relating new #' # derived variables to original columns #' treatmentsC$scoreFrame[, c('origName', 'varName', 'code', 'rsq', 'sig', 'extraModelDegrees')] %.>% #' print(.) #' #' # the treated frame is a "cross frame" which #' # is a transform of the training data built #' # as if the treatment were learned on a different #' # disjoint training set to avoid nested model #' # bias and over-fit. #' dTrainCTreated %.>% #' head(.) %.>% #' print(.) #' #' # Any future application data is prepared with #' # the prepare method. #' dTestCTreated <- prepare(treatmentsC, dTestC, pruneSig=NULL) #' #' dTestCTreated %.>% #' head(.) %.>% #' print(.) #' #' @export mkCrossFrameCExperiment <- function(dframe,varlist, outcomename,outcometarget, ..., weights=c(), minFraction=0.02,smFactor=0.0, rareCount=0,rareSig=1, collarProb=0.00, codeRestriction=NULL, customCoders=NULL, scale=FALSE,doCollar=FALSE, splitFunction=NULL,ncross=3, forceSplit = FALSE, catScaling=TRUE, verbose= TRUE, parallelCluster=NULL, use_parallel = TRUE, missingness_imputation = NULL, imputation_map = NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::mkCrossFrameCExperiment") .checkArgs(dframe=dframe,varlist=varlist,outcomename=outcomename) if(!is.data.frame(dframe)) { stop("dframe must be a data frame") } if(collarProb>=0.5) { stop("collarProb must be < 0.5") } if(nrow(dframe)<1) { stop("most have rows") } if(!(outcomename %in% colnames(dframe))) { stop("outcomename must be a column name of dframe") } if(any(is.na(dframe[[outcomename]]))) { stop("There are missing values in the outcome column, can not run mkCrossFrameCExperiment.") } if(is.null(weights)) { weights <- rep(1.0,nrow(dframe)) } if(verbose) { print(paste("vtreat", packageVersion("vtreat"), "start initial treatment design", date())) } treatments <- designTreatmentsC(dframe,varlist,outcomename,outcometarget, weights=weights, minFraction=minFraction,smFactor=smFactor, rareCount=rareCount,rareSig=rareSig, collarProb=collarProb, codeRestriction=codeRestriction, customCoders=customCoders, splitFunction=splitFunction,ncross=ncross, forceSplit = forceSplit, catScaling=catScaling, verbose=FALSE, parallelCluster=parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map = imputation_map) zC <- dframe[[outcomename]] zoY <- ifelse(zC==outcometarget,1,0) newVarsS <- treatments$scoreFrame$varName[(treatments$scoreFrame$varMoves) & (treatments$scoreFrame$sig<1)] if(verbose) { print(paste(" start cross frame work", date())) } crossDat <- .mkCrossFrame( dframe = dframe, referenceTreatments = treatments, varlist = varlist, newVarsS = newVarsS, outcomename = outcomename, zoY = zoY, zC = zC, zTarget = outcometarget, weights = weights, minFraction = minFraction, smFactor = smFactor, rareCount = rareCount, rareSig = rareSig, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, scale = scale, doCollar = doCollar, splitFunction = splitFunction, nSplits = ncross, catScaling = catScaling, parallelCluster = parallelCluster, use_parallel = use_parallel, verbose = FALSE, missingness_imputation = missingness_imputation, imputation_map = imputation_map) crossFrame <- crossDat$crossFrame newVarsS <- intersect(newVarsS,colnames(crossFrame)) goodVars <- newVarsS[vapply(newVarsS, function(v) { min(crossFrame[[v]])<max(crossFrame[[v]]) }, logical(1))] # Make sure scoreFrame and crossFrame are consistent in variables mentioned treatments$scoreFrame <- treatments$scoreFrame[treatments$scoreFrame$varName %in% goodVars,] treatments$scoreFrame <- augment_score_frame(treatments$scoreFrame) crossFrame <- crossFrame[,colnames(crossFrame) %in% c(goodVars,outcomename),drop=FALSE] if(verbose) { print(paste(" vtreat::mkCrossFrameCExperiment done", date())) } res <- list(treatments=treatments, crossFrame=crossFrame, crossWeights=crossDat$crossWeights, method=crossDat$method, evalSets=crossDat$evalSets) class(res) <- "vtreat_cross_frame_experiment" res } #' Run a numeric cross frame experiment. #' #' Builds a \code{\link{designTreatmentsN}} treatment plan and a data frame prepared #' from \code{dframe} that is "cross" in the sense each row is treated using a treatment #' plan built from a subset of dframe disjoint from the given row. #' The goal is to try to and supply a method of breaking nested model bias other than splitting #' into calibration, training, test sets. #' #' @param dframe Data frame to learn treatments from (training data), must have at least 1 row. #' @param varlist Names of columns to treat (effective variables). #' @param outcomename Name of column holding outcome variable. dframe[[outcomename]] must be only finite non-missing values and there must be a cut such that dframe[[outcomename]] is both above the cut at least twice and below the cut at least twice. #' @param ... no additional arguments, declared to forced named binding of later arguments #' @param weights optional training weights for each row #' @param minFraction optional minimum frequency a categorical level must have to be converted to an indicator column. #' @param smFactor optional smoothing factor for impact coding models. #' @param rareCount optional integer, allow levels with this count or below to be pooled into a shared rare-level. Defaults to 0 or off. #' @param rareSig optional numeric, suppress levels from pooling at this significance value greater. Defaults to NULL or off. #' @param collarProb what fraction of the data (pseudo-probability) to collar data at if doCollar is set during \code{\link{prepare.treatmentplan}}. #' @param codeRestriction what types of variables to produce (character array of level codes, NULL means no restriction). #' @param customCoders map from code names to custom categorical variable encoding functions (please see \url{https://github.com/WinVector/vtreat/blob/main/extras/CustomLevelCoders.md}). #' @param scale optional if TRUE replace numeric variables with regression ("move to outcome-scale"). #' @param doCollar optional if TRUE collar numeric variables by cutting off after a tail-probability specified by collarProb during treatment design. #' @param splitFunction (optional) see vtreat::buildEvalSets . #' @param ncross optional scalar>=2 number of cross-validation rounds to design. #' @param forceSplit logical, if TRUE force cross-validated significance calculations on all variables. #' @param verbose if TRUE print progress. #' @param parallelCluster (optional) a cluster object created by package parallel or package snow. #' @param use_parallel logical, if TRUE use parallel methods. #' @param missingness_imputation function of signature f(values: numeric, weights: numeric), simple missing value imputer. #' @param imputation_map map from column names to functions of signature f(values: numeric, weights: numeric), simple missing value imputers. #' @return named list containing: treatments, crossFrame, crossWeights, method, and evalSets #' #' @seealso \code{\link{designTreatmentsC}}, \code{\link{designTreatmentsN}}, \code{\link{prepare.treatmentplan}} #' #' @examples #' #' # numeric example #' set.seed(23525) #' #' # we set up our raw training and application data #' dTrainN <- data.frame( #' x = c('a', 'a', 'a', 'a', 'b', 'b', NA, NA), #' z = c(1, 2, 3, 4, 5, NA, 7, NA), #' y = c(0, 0, 0, 1, 0, 1, 1, 1)) #' #' dTestN <- data.frame( #' x = c('a', 'b', 'c', NA), #' z = c(10, 20, 30, NA)) #' #' # we perform a vtreat cross frame experiment #' # and unpack the results into treatmentsN #' # and dTrainNTreated #' unpack[ #' treatmentsN = treatments, #' dTrainNTreated = crossFrame #' ] <- mkCrossFrameNExperiment( #' dframe = dTrainN, #' varlist = setdiff(colnames(dTrainN), 'y'), #' outcomename = 'y', #' verbose = FALSE) #' #' # the treatments include a score frame relating new #' # derived variables to original columns #' treatmentsN$scoreFrame[, c('origName', 'varName', 'code', 'rsq', 'sig', 'extraModelDegrees')] %.>% #' print(.) #' #' # the treated frame is a "cross frame" which #' # is a transform of the training data built #' # as if the treatment were learned on a different #' # disjoint training set to avoid nested model #' # bias and over-fit. #' dTrainNTreated %.>% #' head(.) %.>% #' print(.) #' #' # Any future application data is prepared with #' # the prepare method. #' dTestNTreated <- prepare(treatmentsN, dTestN, pruneSig=NULL) #' #' dTestNTreated %.>% #' head(.) %.>% #' print(.) #' #' @export #' mkCrossFrameNExperiment <- function(dframe,varlist,outcomename, ..., weights=c(), minFraction=0.02,smFactor=0.0, rareCount=0,rareSig=1, collarProb=0.00, codeRestriction=NULL, customCoders=NULL, scale=FALSE,doCollar=FALSE, splitFunction=NULL,ncross=3, forceSplit=FALSE, verbose= TRUE, parallelCluster=NULL, use_parallel = TRUE, missingness_imputation = NULL, imputation_map=NULL) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat::mkCrossFrameNExperiment") .checkArgs(dframe=dframe,varlist=varlist,outcomename=outcomename) if(!is.data.frame(dframe)) { stop("dframe must be a data frame") } if(collarProb>=0.5) { stop("collarProb must be < 0.5") } if(nrow(dframe)<1) { stop("most have rows") } if(!(outcomename %in% colnames(dframe))) { stop("outcomename must be a column name of dframe") } if(any(is.na(dframe[[outcomename]]))) { stop("There are missing values in the outcome column, can not run mkCrossFrameNExperiment.") } catScaling=FALSE if(is.null(weights)) { weights <- rep(1.0,nrow(dframe)) } if(verbose) { print(paste("vtreat", packageVersion("vtreat"), "start initial treatment design", date())) } treatments <- designTreatmentsN(dframe,varlist,outcomename, weights=weights, minFraction=minFraction,smFactor=smFactor, rareCount=rareCount,rareSig=rareSig, collarProb=collarProb, codeRestriction = codeRestriction, customCoders = customCoders, splitFunction=splitFunction,ncross=ncross, forceSplit = forceSplit, verbose=FALSE, parallelCluster=parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map=imputation_map) zC <- NULL zoY <- dframe[[outcomename]] newVarsS <- treatments$scoreFrame$varName[(treatments$scoreFrame$varMoves) & (treatments$scoreFrame$sig<1)] if(verbose) { print(paste(" start cross frame work", date())) } crossDat <- .mkCrossFrame( dframe = dframe, referenceTreatments = treatments, varlist = varlist, newVarsS = newVarsS, outcomename = outcomename, zoY = zoY, zC = zC, zTarget = NULL, weights = weights, minFraction = minFraction, smFactor = smFactor, rareCount = rareCount, rareSig = rareSig, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, scale = scale, doCollar = doCollar, splitFunction = splitFunction, nSplits = ncross, catScaling = catScaling, parallelCluster = parallelCluster, use_parallel = use_parallel, verbose = FALSE, missingness_imputation = missingness_imputation, imputation_map=imputation_map) crossFrame <- crossDat$crossFrame newVarsS <- intersect(newVarsS,colnames(crossFrame)) goodVars <- newVarsS[vapply(newVarsS, function(v) { min(crossFrame[[v]])<max(crossFrame[[v]]) }, logical(1))] # Make sure scoreFrame and crossFrame are consistent in variables mentioned treatments$scoreFrame <- treatments$scoreFrame[treatments$scoreFrame$varName %in% goodVars,] treatments$scoreFrame <- augment_score_frame(treatments$scoreFrame) crossFrame <- crossFrame[,colnames(crossFrame) %in% c(goodVars,outcomename),drop=FALSE] if(verbose) { print(paste(" vtreat::mkCrossFrameNExperiment done", date())) } res <- list(treatments=treatments, crossFrame=crossFrame, crossWeights=crossDat$crossWeights, method=crossDat$method, evalSets=crossDat$evalSets) class(res) <- "vtreat_cross_frame_experiment" res } #' @export format.vtreat_cross_frame_experiment <- function(x, ...) { format(x$treatments) } #' @export print.vtreat_cross_frame_experiment <- function(x, ...) { print(format(x)) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/vtreat.R
.vtreatA <- function(vtreat,xcol,scale,doCollar) { dout <- as.data.frame(vtreat$f(xcol,vtreat$args,doCollar), stringsAsFactors=FALSE) colnames(dout) <- vtreat$newvars if(scale) { for(j in seq_along(vtreat$scales$a)) { dout[[j]] <- dout[[j]]*vtreat$scales$a[[j]] + vtreat$scales$b[[j]] } } dout } mkVtreatListWorker <- function(scale,doCollar) { force(scale) force(doCollar) function(tpair) { ti <- tpair$ti xcolOrig <- tpair$xcolOrig nRows <- length(xcolOrig) xcolClean <- .cleanColumn(xcolOrig,nRows) if(is.null(xcolClean)) { return(paste('column',ti$origvar, 'is not a type/class vtreat can work with (', paste(class(xcolOrig), collapse = " "),')')) } if(!is.null(ti$convertedColClass)) { curColClass <- paste(class(xcolClean), collapse = " ") if(curColClass!=ti$convertedColClass) { return(paste('column',ti$origvar,'expected to convert to ', ti$convertedColClass,'saw', paste(class(xcolOrig), collapse = " "), curColClass)) } } .vtreatA(ti,xcolClean,scale,doCollar) } } # colNames a subset of treated variable names .vtreatList <- function(treatments, dframe, colNames, scale, doCollar, ..., parallelCluster = NULL, use_parallel = TRUE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::.vtreatList") resCounts <- vapply(treatments,function(tij) { length(intersect(colNames,tij$newvars)) },numeric(1)) toProcess <- treatments[resCounts>0] toProcessP <- lapply(toProcess, function(ti) { list(ti=ti,xcolOrig=dframe[[ti$origvar]]) }) procWorker <- mkVtreatListWorker(scale,doCollar) gs <- plapply(toProcessP, procWorker, parallelCluster = parallelCluster, use_parallel = use_parallel) # pass back first error for(gi in gs) { if(is.character(gi)) { stop(gi) } } # unpack sub-frames into a list of columns cols <- vector('list',length(colNames)) names(cols) <- colNames for(ii in seq_len(length(toProcess))) { ti <- toProcess[[ii]] gi <- gs[[ii]] wants <- intersect(colNames,ti$newvars) for(vi in wants) { cols[[vi]] <- gi[[vi]] } } cols <- Filter(Negate(is.null),cols) # corner case, make sure we get the number of rows correct if(length(cols)<=0) { d <- data.frame(x=numeric(nrow(dframe)),stringsAsFactors=FALSE) d[['x']] <- NULL return(d) } as.data.frame(cols,stringsAsFactors=FALSE) } # pre-transform categorical column # convert it to character, convert NA to "NA" .preProcCat <- function(col, levRestriction) { origna <- is.na(col) # don't use blank as a key and get into defendable level space col <- paste('x',as.character(col)) col[origna] <- 'NA' if(!is.null(levRestriction)) { # map rare and novel levels to a new special level "rare" rares <- !(col %in% levRestriction$safeLevs) col[rares] <- 'rare' # remove any levels not eligable for the above treatment zaps <- col %in% levRestriction$supressedLevs col[zaps] <- 'zap' } col } .mkAOVWorkder <- function(yNumeric,vcol,weights) { force(yNumeric) force(vcol) force(weights) function(level) { # lm call here is okay, as (vcol==level) only has two possible values m <- stats::lm(yNumeric~(vcol==level),weights=weights) stats::anova(m)[1,'Pr(>F)'] } } # determine non-rare and significant levels for numeric/regression target # regression mode .safeLevelsR <- function(vcolin, yNumeric, weights, minFraction, rareCount, rareSig, ..., parallelCluster = NULL, use_parallel = TRUE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::.safeLevelsR") vcol <- .preProcCat(vcolin,c()) # first: keep only levels with enough weighted counts counts <- tapply(weights,vcol,sum) totMass <- sum(counts) safeLevs <- names(counts)[counts>rareCount] supressedLevs <- character(0) # re-code with rare symbol eligable vcol <- .preProcCat(vcolin,list(safeLevs=safeLevs,supressedLevs=supressedLevs)) counts <- tapply(weights,vcol,sum) totMass <- sum(counts) safeLevs <- names(counts) if((length(safeLevs)>0)&&(!is.null(rareSig))&&(rareSig<1)) { # second: keep only levels that look significantly different than grand mean aovCalc <-.mkAOVWorkder(yNumeric,vcol,weights) sigs <- as.numeric(plapply(safeLevs,aovCalc, parallelCluster = parallelCluster, use_parallel = use_parallel)) supressedLevs <- safeLevs[sigs>rareSig] } tracked <- names(counts)[counts/totMass>=minFraction] # levels eligable for indicators tracked <- setdiff(tracked, supressedLevs) tracked <- setdiff(tracked,'zap') # don't let zap group code list(safeLevs = safeLevs, supressedLevs = supressedLevs, tracked = tracked) } .mkCSigWorker <- function(zC,zTarget,vcol,weights) { force(zC) force(zTarget) force(vcol) force(weights) function(level) { #tab <- table(vcol==level,zC==zTarget) # not weighted tab <- .wTable(vcol==level,zC==zTarget,weights) if((nrow(tab)<=1)||(ncol(tab)<=1)) { return(1.0) } # tests not quite interchangable, but roughly give us the sorting we want. if(min(tab)<=10) { stats::fisher.test(tab)$p.value } else { tryCatch( stats::chisq.test(tab)$p.value, warning=function(w) { stats::fisher.test(tab)$p.value } ) } } } # determine non-rare and significant levels for numeric/regression target # classification mode .safeLevelsC <- function(vcolin, zC,zTarget, weights, minFraction, rareCount, rareSig, ..., parallelCluster = NULL, use_parallel = TRUE) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::.safeLevelsC") vcol <- .preProcCat(vcolin,c()) # first: keep only levels with enough weighted counts counts <- tapply(weights,vcol,sum) totMass <- sum(counts) safeLevs <- names(counts)[counts>rareCount] supressedLevs <- character(0) # re-code with rare symbol eligable vcol <- .preProcCat(vcolin,list(safeLevs=safeLevs,supressedLevs=supressedLevs)) counts <- tapply(weights,vcol,sum) totMass <- sum(counts) safeLevs <- names(counts) if((length(safeLevs)>0)&&(!is.null(rareSig))&&(rareSig<1)) { # second: keep only levels that look significantly different than grand mean sigCalc <- .mkCSigWorker(zC,zTarget,vcol,weights) sigs <- as.numeric(plapply(safeLevs,sigCalc, parallelCluster = parallelCluster, use_parallel = use_parallel)) supressedLevs <- safeLevs[sigs>rareSig] } tracked <- names(counts)[counts/totMass>=minFraction] # levels eligable for indicators tracked <- setdiff(tracked, supressedLevs) tracked <- setdiff(tracked,'zap') # don't let zap group code list(safeLevs = safeLevs, supressedLevs = supressedLevs, tracked = tracked) } .varDesignerW <- function(..., argv, zoY, zC, zTarget, weights, minFraction, smFactor, rareCount, rareSig, collarProb, codeRestriction, customCoders, catScaling, verbose, nRows, yMoves, codeRestictionWasNULL, missingness_imputation, imputation_map) { wrapr::stop_if_dot_args(substitute(list(...)), ".varDesignerW") v <- argv$v vcolOrig <- argv$vcolOrig vcol <- argv$vcol hasRange <- argv$hasRange levRestriction <- argv$levRestriction treatments <- list() acceptTreatment <- function(ti) { if(!is.null(ti)) { ti$origType <- typeof(vcolOrig) ti$origClass <- paste(class(vcolOrig), collapse = " ") ti$convertedColClass <- paste(class(vcol), collapse = " ") treatments[[length(treatments)+1]] <<- ti # Deliberate side-effect } } if(is.null(vcol)) { warning(paste('column',v, 'is not a type/class/value vtreat can work with (', paste(class(vcolOrig), collapse= " "),')')) } else { colclass <- paste(class(vcol), collapse = " ") if(.has.range(vcol)) { ti <- NULL if((colclass=='numeric') || (colclass=='integer')) { for(customCode in names(customCoders)) { coder <- customCoders[[customCode]] customeCodeV <- base::strsplit(customCode, '.', fixed=TRUE)[[1]] codeType <- customeCodeV[[1]] codeName <- customeCodeV[[2]] if(codeRestictionWasNULL || (codeName %in% codeRestriction)) { codeSeq <- NULL if(length(customeCodeV)>2) { codeSeq <- customeCodeV[seq(3, length(customeCodeV))] } if('num' %in% codeSeq) { if((codeType=='n')==is.null(zC)) { ti <- makeCustomCoderNum(customCode = codeName, coder = coder, codeSeq =codeSeq, v = v, vcolin = vcol, zoY = zoY, zC = zC, zTarget = zTarget, weights = weights, catScaling = catScaling) acceptTreatment(ti) } } } } ti = NULL if(codeRestictionWasNULL || ('clean' %in% codeRestriction)) { ti <- .mkPassThrough(origVarName = v, xcol = vcol, ycol = zoY, zC = zC, zTarget = zTarget, weights = weights, collarProb = collarProb, catScaling = catScaling, missingness_imputation = missingness_imputation, imputation_map = imputation_map) acceptTreatment(ti) } if(codeRestictionWasNULL || ('isBAD' %in% codeRestriction)) { ti <- .mkIsBAD(v,vcol,zoY,zC,zTarget,weights,catScaling) acceptTreatment(ti) } } else if((colclass=='character') || (colclass=='factor')) { # expect character or factor here for(customCode in names(customCoders)) { coder <- customCoders[[customCode]] customeCodeV <- base::strsplit(customCode, '.', fixed=TRUE)[[1]] codeType <- customeCodeV[[1]] codeName <- customeCodeV[[2]] if(codeRestictionWasNULL || (codeName %in% codeRestriction)) { codeSeq <- NULL if(length(customeCodeV)>2) { codeSeq <- customeCodeV[seq(3, length(customeCodeV))] } if(!('num' %in% codeSeq)) { if((codeType=='n')==is.null(zC)) { ti <- makeCustomCoderCat(customCode = codeName, coder = coder, codeSeq = codeSeq, v = v, vcolin = vcol, zoY = zoY, zC = zC, zTarget = zTarget, weights = weights, catScaling = catScaling) acceptTreatment(ti) } } } } ti = NULL if(length(levRestriction$safeLevs)>0) { if(codeRestictionWasNULL || ('lev' %in% codeRestriction)) { ti <- .mkCatInd_a(v,vcol,zoY,zC,zTarget,minFraction,levRestriction,weights,catScaling) if( (!is.null(ti)) && (length(ti$newvars)>0) ) { acceptTreatment(ti) } } if(is.null(ti)||(length(unique(vcol))>2)) { # make an impactmodel if catInd construction failed or there are more than 2 levels if(codeRestictionWasNULL || ('catP' %in% codeRestriction)) { ti <- .mkCatP(v,vcol,zoY,zC,zTarget,levRestriction,weights,catScaling) acceptTreatment(ti) } if(yMoves) { if(!is.null(zC)) { # in categorical mode if(codeRestictionWasNULL || ('catB' %in% codeRestriction)) { ti <- .mkCatBayes(v,vcol,zC,zTarget,smFactor,levRestriction,weights,catScaling) acceptTreatment(ti) } } if(is.null(zC)) { # is numeric mode if(codeRestictionWasNULL || ('catN' %in% codeRestriction)) { ti <- .mkCatNum(v,vcol,zoY,smFactor,levRestriction,weights) acceptTreatment(ti) } if(codeRestictionWasNULL || ('catD' %in% codeRestriction)) { ti <- .mkCatD(v,vcol,zoY,smFactor,levRestriction,weights) acceptTreatment(ti) } } } } } } else { warning(paste('variable',v,'has unexpected class:',colclass, ', skipping, (want one of numeric,integer,character,factor)')) } } } treatments } # design a treatment for a single variable # bind a bunch of variables, so we pass exactly what we need to sub-processes .mkVarDesigner <- function(..., zoY, zC,zTarget, weights, minFraction,smFactor,rareCount,rareSig, collarProb, codeRestriction, customCoders, catScaling, verbose, missingness_imputation, imputation_map) { wrapr::stop_if_dot_args(substitute(list(...)), ".mkVarDesigner") force(zoY) force(zC) force(zTarget) force(weights) force(minFraction) force(smFactor) force(rareCount) force(rareSig) force(collarProb) force(codeRestriction) force(customCoders) force(catScaling) force(verbose) force(missingness_imputation) force(imputation_map) nRows = length(zoY) yMoves <- .has.range.cn(zoY) # NULL is an alias for "don't restrict" codeRestictionWasNULL <- length(codeRestriction)<=0 function(argv) { .varDesignerW(argv = argv, zoY = zoY, zC = zC, zTarget = zTarget, weights = weights, minFraction = minFraction, smFactor = smFactor, rareCount = rareCount, rareSig = rareSig, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, catScaling = catScaling, verbose = verbose, nRows = nRows, yMoves = yMoves, codeRestictionWasNULL = codeRestictionWasNULL, missingness_imputation = missingness_imputation, imputation_map = imputation_map) } } .neatenScoreFrame <- function(sFrame) { # clean up sFrame a bit if(nrow(sFrame)>0) { sFrame[['sig']][.is.bad(sFrame[['sig']])] <- 1 } sFrame } .scoreCol <- function(varName,nxcol,zoY,zC,zTarget,weights, extraModelDegrees=0) { rsq <- 0.0 sig <- 1.0 catTarget <- !is.null(zC) varMoves <- .has.range.cn(nxcol) if(varMoves) { yMoves <- .has.range.cn(zoY) if(varMoves && yMoves) { lstat <- linScore(varName, nxcol, zoY, weights, extraModelDegrees) sig <- lstat$sig rsq <- lstat$rsq if(catTarget) { cstat <- catScore(varName, nxcol, zC,zTarget, weights, extraModelDegrees) sig <- cstat$sig rsq <- cstat$rsq } } } scoreFrameij <- data.frame(varName=varName, varMoves=varMoves, rsq=rsq, sig=sig, stringsAsFactors = FALSE) .neatenScoreFrame(scoreFrameij) } # used in initial scoring of variables .mkScoreVarWorker <- function(nRows,zoY,zC,zTarget,weights) { force(nRows) force(zoY) force(zC) force(zTarget) force(weights) function(tpair) { ti <- tpair$ti dfcol <- tpair$dfcol origName <- vorig(ti) xcolClean <- .cleanColumn(dfcol,nRows) fi <- .vtreatA(ti,xcolClean,FALSE,FALSE) scoreFrame <- lapply(seq_len(length(vnames(ti))), function(nvi) { nv <- vnames(ti)[[nvi]] .scoreCol(nv,fi[[nv]],zoY,zC,zTarget,weights, ti$extraModelDegrees) }) scoreFrame <- Filter(Negate(is.null),scoreFrame) if(length(scoreFrame)<=0) { return(NULL) } sFrame <- .rbindListOfFrames(scoreFrame) sFrame$needsSplit <- ti$needsSplit sFrame$extraModelDegrees <- ti$extraModelDegrees sFrame$origName <- origName sFrame$code <- ti$treatmentCode sFrame } } # used in re-scoring needsSplit variables on simulated out of sample # (cross) frame .mkScoreColWorker <- function(zoY,zC,zTarget,weights) { force(zoY) force(zC) force(zTarget) force(weights) function(nvpair) { nv <- nvpair$nv dfc <- nvpair$dfc scoreFrameij <- .scoreCol(nv,dfc,zoY,zC,zTarget,weights) scoreFrameij } } # build all treatments for a data frame to predict a given outcome .designTreatmentsXS <- function( ..., dframe, varlist, outcomename, zoY, zC, zTarget, weights, minFraction, smFactor, rareCount, rareSig, collarProb, codeRestriction, customCoders, justWantTreatments, catScaling, verbose = FALSE, parallelCluster = NULL, use_parallel = TRUE, missingness_imputation, imputation_map) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::.designTreatmentsXS") if(verbose) { print(paste("designing treatments",date())) } nRows = length(zoY) # In building the workList don't transform any variables (such as making # row selections), only select columns out of frame. This prevents # data growth prior to doing the work. workList <- lapply(varlist, function(v) { levRestriction <- c() hasRange <- FALSE vcolOrig <- dframe[[v]] vcol <- NULL if(length(vcolOrig)!=nRows) { warning(paste("wrong column length",v)) } else { vcol <- .cleanColumn(vcolOrig,nRows) if(.has.range(vcol)) { hasRange <- TRUE colclass <- paste(class(vcol), collapse = " ") if((colclass=='character') || (colclass=='factor')) { # expect character or factor here if(!is.null(zC)) { # in categorical mode levRestriction <- .safeLevelsC(vcol, zC, zTarget, weights, minFraction, rareCount, rareSig, parallelCluster = parallelCluster, use_parallel = use_parallel) } else { levRestriction <- .safeLevelsR(vcol, zoY, weights, minFraction, rareCount,rareSig, parallelCluster = parallelCluster, use_parallel = use_parallel) } } } } list(v=v, vcolOrig=vcolOrig, vcol=vcol, hasRange=hasRange, levRestriction=levRestriction )}) workList <- Filter(function(wi) {wi$hasRange}, workList) if(verbose) { print(paste(" have initial level statistics", date())) } # build the treatments we will return to the user worker <- .mkVarDesigner(zoY = zoY, zC = zC, zTarget = zTarget, weights = weights, minFraction = minFraction, smFactor = smFactor, rareCount = rareCount, rareSig = rareSig, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, catScaling = catScaling, verbose = verbose, missingness_imputation = missingness_imputation, imputation_map = imputation_map) treatments <- plapply(workList, worker, parallelCluster = parallelCluster, use_parallel = use_parallel) treatments <- unlist(treatments, recursive=FALSE) treatments <- Filter(Negate(is.null), treatments) # parallelize on levels for cat ind for(i in seq_len(length(treatments))) { ti <- treatments[[i]] if(ti$treatmentCode == 'lev') { ti <- .mkCatInd_scales(ti, zoY, zC, zTarget, weights, catScaling, parallelCluster = parallelCluster, use_parallel = use_parallel) treatments[[i]] <- ti; } } if(justWantTreatments) { return(treatments) } if(length(treatments)<=0) { stop('no usable vars') } # score variables if(verbose) { print(paste(" scoring treatments",date())) } is_ind <- vapply(treatments, function(ti) { ti$treatmentCode == "lev"}, logical(1)) treatments_ind <- treatments[is_ind] treatments_non_ind <- treatments[!is_ind] sFrame <- NULL if(length(treatments_non_ind)>0) { scrW <- .mkScoreVarWorker(nrow(dframe),zoY,zC,zTarget,weights) tP <- lapply(treatments_non_ind, function(ti) { list(ti=ti, dfcol=dframe[[vorig(ti)]]) }) sFrames <- plapply(tP, scrW, parallelCluster = parallelCluster, use_parallel = use_parallel) sFrames <- Filter(Negate(is.null), sFrames) sFrame <- .rbindListOfFrames(sFrames) } # Finer grain parallelism on indicators (more like .mkScoreColWorker ) if(length(treatments_ind)>0) { swkr <- .mkScoreColWorker(zoY, zC, zTarget, weights) sframel <- vector(length(treatments_ind), mode = "list") for(ii in seq_len(length(treatments_ind))) { ti <- treatments_ind[[ii]] origName <- ti$origvar dfcol <- dframe[[origName]] xcolClean <- .cleanColumn(dfcol, nRows) fi <- .vtreatA(ti, xcolClean, FALSE, FALSE) newVarsSP <- lapply(ti$newvars, function(nv) { list(nv = nv, dfc = fi[[nv]]) }) sfij <- plapply(newVarsSP, swkr, parallelCluster = parallelCluster, use_parallel = use_parallel) sfij <- .rbindListOfFrames(sfij) sfij$needsSplit <- FALSE sfij$extraModelDegrees <- 0 sfij$origName <- origName sfij$code <- ti$treatmentCode sframel[[ii]] <- sfij } sframel <- .rbindListOfFrames(sframel) sframel <- Filter(Negate(is.null), sframel) siFrame <- .rbindListOfFrames(sframel) sFrame <- rbind(sFrame, siFrame) } plan <- list(treatments=treatments, scoreFrame=sFrame, outcomename=outcomename) class(plan) <- 'treatmentplan' if(verbose) { print(paste("have treatment plan",date())) } plan } # build all treatments for a data frame to predict a given outcome .designTreatmentsX <- function( ..., dframe, varlist, outcomename, zoY, zC, zTarget, weights, minFraction, smFactor, rareCount, rareSig, collarProb, codeRestriction, customCoders, splitFunction, ncross, forceSplit, catScaling, verbose = FALSE, parallelCluster = NULL, use_parallel = TRUE, missingness_imputation, imputation_map) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat:::.designTreatmentsX") if(!is.data.frame(dframe)) { stop("dframe must be a data frame") } if(collarProb>=0.5) { stop("collarProb must be < 0.5") } if(nrow(dframe)<1) { stop("most have rows") } if(verbose) { print(paste("vtreat", packageVersion("vtreat"), "inspecting inputs", date())) } varlist <- setdiff(unique(varlist),outcomename) varlist <- intersect(varlist,colnames(dframe)) varlist <- as.character(varlist) if(is.null(weights)) { weights <- rep(1.0,nrow(dframe)) } else { if(!is.numeric(weights)) { stop("weights need to be numeric") } if(length(weights)!=nrow(dframe)) { stop("must have same number of weights as data frame rows") } goodPosns <- ifelse(.is.bad(weights),FALSE,weights>0.0) dframe <- dframe[goodPosns,,drop=FALSE] zoY <- zoY[goodPosns] weights <- weights[goodPosns] if(!is.null(zC)) { zC <- zC[goodPosns] } # the select goodPosns is duplicating the data frame, so it does cost # memory } if(nrow(dframe)<=0) { stop("no rows") } if(min(weights)<0) { stop("negative weights") } if(sum(weights)<=0) { stop("no non-zero weighted rows") } if(sum(.is.bad(zoY))>0) { stop("outcome variable had NAs") } if(rareCount<0) { stop("rarecount must not be negative") } treatments <- .designTreatmentsXS( dframe = dframe, varlist = varlist, outcomename = outcomename, zoY = zoY, zC = zC, zTarget = zTarget, weights = weights, minFraction = minFraction, smFactor = smFactor, rareCount = rareCount, rareSig = rareSig, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, justWantTreatments = FALSE, catScaling = catScaling, verbose = verbose, parallelCluster = parallelCluster, use_parallel = use_parallel, missingness_imputation = missingness_imputation, imputation_map = imputation_map) treatments$scoreFrame <- treatments$scoreFrame[treatments$scoreFrame$varMoves,] if(forceSplit) { treatments$scoreFrame$needsSplit <- TRUE } treatments$vtreatVersion <- packageVersion('vtreat') treatments$outcomeType <- 'notmarked' treatments$outcomeTarget <- outcomename treatments$meanY <- NA yMoves <- .has.range.cn(zoY) crossMethod = 'Notcross' if(yMoves) { splitVars <- unique(treatments$scoreFrame$origName[treatments$scoreFrame$needsSplit]) if(length(splitVars)>0) { newVarsS <- treatments$scoreFrame$varName[treatments$scoreFrame$needsSplit & treatments$scoreFrame$varMoves] if(verbose) { print(paste("rescoring complex variables",date())) } crossData <- .mkCrossFrame( dframe = dframe, referenceTreatments = treatments, varlist = splitVars, newVarsS = newVarsS, outcomename = outcomename, zoY = zoY, zC = zC, zTarget = zTarget, weights = weights, minFraction = minFraction, smFactor = smFactor, rareCount = rareCount, rareSig = rareSig, collarProb = collarProb, codeRestriction = codeRestriction, customCoders = customCoders, scale = FALSE, doCollar = FALSE, splitFunction = splitFunction, nSplits = ncross, catScaling = catScaling, parallelCluster = parallelCluster, use_parallel = use_parallel, verbose = FALSE, missingness_imputation = missingness_imputation, imputation_map = imputation_map) crossFrame <- crossData$crossFrame crossWeights <- crossData$crossWeights crossMethod <- crossData$method # score this frame if(is.null(zC)) { zoYS = crossFrame[[outcomename]] zCS = NULL } else { zCS = crossFrame[[outcomename]]==zTarget zoYS = ifelse(zCS,1,0) } swkr <- .mkScoreColWorker(zoYS,zCS,TRUE,crossWeights) newVarsSP <- lapply(newVarsS, function(nv) { list(nv=nv,dfc=crossFrame[[nv]]) }) sframe <- plapply(newVarsSP,swkr, parallelCluster = parallelCluster, use_parallel = use_parallel) sframe <- Filter(Negate(is.null),sframe) sframe <- .rbindListOfFrames(sframe) # overlay these results into treatments$scoreFrame nukeCols <- intersect(colnames(treatments$scoreFrame), c('sig','rsq')) for(v in newVarsS) { for(n in nukeCols) { if(v %in% sframe$varName) { treatments$scoreFrame[[n]][treatments$scoreFrame$varName==v] <- sframe[[n]][sframe==v] } else { treatments$scoreFrame[[n]][treatments$scoreFrame$varName==v] <- NA } } } # clean up sFrame a bit treatments$scoreFrame <- .neatenScoreFrame(treatments$scoreFrame) if(verbose) { print(paste("done rescoring complex variables",date())) } } } treatments$splitmethod <- crossMethod treatments$meanY <- .wmean(zoY,weights) treatments } .checkArgs1 <- function(dframe, ...) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat checkargs") if(missing(dframe)||(!is.data.frame(dframe))||(nrow(dframe)<0)||(ncol(dframe)<=0)) { stop("dframe must be a non-empty data frame") } } .checkArgs <- function(dframe, varlist, outcomename, ...) { wrapr::stop_if_dot_args(substitute(list(...)), "vtreat checkargs") if(missing(dframe)||(!is.data.frame(dframe))|| (nrow(dframe)<0)||(ncol(dframe)<=0)) { stop("dframe must be a non-empty data frame") } if(missing(varlist)) { stop("required argument varlist missing") } if((!is.character(varlist))||(length(varlist)<1)) { stop("varlist must be a non-empty character vector") } if(length(varlist)!=length(unique(varlist))) { stop("duplicate variable name in varlist") } # designTreatmentsZ calls this, so outcomename may not be in dframe if(missing(outcomename)|| (!is.character(outcomename))||(length(outcomename)!=1)) { stop("outcomename must be a length 1 character vector") } varlist <- setdiff(varlist, outcomename) varlist <- intersect(varlist, colnames(dframe)) if(length(varlist)<1) { stop("varlist must include non-outcome column names") } if(sum(colnames(dframe) %in% varlist)!=length(varlist)) { stop("ambigous (duplicate) column name in data frame") } }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/vtreatImpl.R
# pipes for vtreat #' @export apply_right.treatmentplan <- function(pipe_left_arg, pipe_right_arg, pipe_environment, left_arg_name, pipe_string, right_arg_name) { prepare(pipe_right_arg, pipe_left_arg) } #' @export apply_right.simple_plan <- function(pipe_left_arg, pipe_right_arg, pipe_environment, left_arg_name, pipe_string, right_arg_name) { prepare(pipe_right_arg, pipe_left_arg) } #' @export apply_right.multinomial_plan <- function(pipe_left_arg, pipe_right_arg, pipe_environment, left_arg_name, pipe_string, right_arg_name) { prepare(pipe_right_arg, pipe_left_arg) }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/vtreat_pipes.R
#' @importFrom wrapr apply_left #' @export NULL #' @importFrom wrapr apply_right #' @export NULL
/scratch/gouwar.j/cran-all/cranData/vtreat/R/wrapr_exports.R
.onAttach <- function(libname, pkgname) { vtreat_default_options <- list( vtreat.use_data.table_binding = TRUE, vtreat.use_clean_suffix = FALSE, vtreat.allow_parallel = TRUE, vtreat.check_for_nested_model_bias = TRUE ) op <- options() toset <- setdiff(names(vtreat_default_options), names(op)) if(length(toset)>0) { options(vtreat_default_options[toset]) } invisible() }
/scratch/gouwar.j/cran-all/cranData/vtreat/R/zzz.R
## ----libs--------------------------------------------------------------------- library("vtreat") ## ----mkex--------------------------------------------------------------------- # create example data set.seed(326346) sym_bonuses <- rnorm(3) names(sym_bonuses) <- c("a", "b", "c") sym_bonuses3 <- rnorm(3) names(sym_bonuses3) <- as.character(seq_len(length(sym_bonuses3))) n_row <- 1000 d <- data.frame( x1 = rnorm(n_row), x2 = sample(names(sym_bonuses), n_row, replace = TRUE), x3 = sample(names(sym_bonuses3), n_row, replace = TRUE), y = "NoInfo", stringsAsFactors = FALSE) d$y[sym_bonuses[d$x2] > pmax(d$x1, sym_bonuses3[d$x3], runif(n_row))] <- "Large1" d$y[sym_bonuses3[d$x3] > pmax(sym_bonuses[d$x2], d$x1, runif(n_row))] <- "Large2" knitr::kable(head(d)) ## ----tdef--------------------------------------------------------------------- # define problem vars <- c("x1", "x2", "x3") y_name <- "y" # build the multi-class cross frame and treatments cfe_m <- mkCrossFrameMExperiment(d, vars, y_name) ## ----crossframe--------------------------------------------------------------- # look at the data we would train models on str(cfe_m$cross_frame) ## ----treatment_plan----------------------------------------------------------- # pretend original data is new data to be treated # NA out top row to show processing for(vi in vars) { d[[vi]][[1]] <- NA } str(prepare(cfe_m$treat_m, d)) ## ----varimp------------------------------------------------------------------- knitr::kable( cfe_m$score_frame[, c("varName", "rsq", "sig", "outcome_level"), drop = FALSE]) ## ----varagg------------------------------------------------------------------- tapply(cfe_m$score_frame$rsq, cfe_m$score_frame$origName, max) tapply(cfe_m$score_frame$sig, cfe_m$score_frame$origName, min)
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/MultiClassVtreat.R
--- title: "Multi Class vtreat" author: "John Mount" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Multi Class vtreat} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- [`vtreat`](https://github.com/WinVector/vtreat) can now effectively prepare data for multi-class classification or multinomial modeling. The two functions needed ([`mkCrossFrameMExperiment()`](https://winvector.github.io/vtreat/reference/mkCrossFrameMExperiment.html) and the `S3` method [`prepare.multinomial_plan()`](https://winvector.github.io/vtreat/reference/prepare.multinomial_plan.html)) are now part of `vtreat`. Let's work a specific example: trying to model multi-class `y` as a function of `x1` and `x2`. ```{r libs} library("vtreat") ``` ```{r mkex} # create example data set.seed(326346) sym_bonuses <- rnorm(3) names(sym_bonuses) <- c("a", "b", "c") sym_bonuses3 <- rnorm(3) names(sym_bonuses3) <- as.character(seq_len(length(sym_bonuses3))) n_row <- 1000 d <- data.frame( x1 = rnorm(n_row), x2 = sample(names(sym_bonuses), n_row, replace = TRUE), x3 = sample(names(sym_bonuses3), n_row, replace = TRUE), y = "NoInfo", stringsAsFactors = FALSE) d$y[sym_bonuses[d$x2] > pmax(d$x1, sym_bonuses3[d$x3], runif(n_row))] <- "Large1" d$y[sym_bonuses3[d$x3] > pmax(sym_bonuses[d$x2], d$x1, runif(n_row))] <- "Large2" knitr::kable(head(d)) ``` We define the problem controls and use `mkCrossFrameMExperiment()` to build both a cross-frame and a treatment plan. ```{r tdef} # define problem vars <- c("x1", "x2", "x3") y_name <- "y" # build the multi-class cross frame and treatments cfe_m <- mkCrossFrameMExperiment(d, vars, y_name) ``` The cross-frame is the entity safest for training on (unless you have made separate data split for the treatment design step). It uses cross-validation to reduce nested model bias. Some notes on this issue are available [here](https://winvector.github.io/vtreat/articles/vtreatCrossFrames.html), and [here](https://github.com/WinVector/vtreat/blob/master/extras/vtreat.pdf). ```{r crossframe} # look at the data we would train models on str(cfe_m$cross_frame) ``` `prepare()` can apply the designed treatments to new data. Here we are simulating new data by re-using our design data. ```{r treatment_plan} # pretend original data is new data to be treated # NA out top row to show processing for(vi in vars) { d[[vi]][[1]] <- NA } str(prepare(cfe_m$treat_m, d)) ``` Obvious issues include: computing variable importance, and blow up and co-dependency of produced columns. These we leave for the next modeling step to deal with (this is our philosophy with most issues that involve joint distributions of variables). We also have per-outcome variable importance. ```{r varimp} knitr::kable( cfe_m$score_frame[, c("varName", "rsq", "sig", "outcome_level"), drop = FALSE]) ``` One can relate these per-target and per-treatment performances back to original columns by aggregating. ```{r varagg} tapply(cfe_m$score_frame$rsq, cfe_m$score_frame$origName, max) tapply(cfe_m$score_frame$sig, cfe_m$score_frame$origName, min) ```
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/MultiClassVtreat.Rmd
## ----savefile----------------------------------------------------------------- library("vtreat") dTrainC <- data.frame(x=c('a','a','a','b','b',NA,NA), z=c(1,2,3,4,NA,6,NA), y=c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE)) treatmentsC <- designTreatmentsC(dTrainC, colnames(dTrainC), 'y', TRUE, verbose= FALSE) fileName = paste0(tempfile(c('vtreatPlan')), '.RDS') saveRDS(treatmentsC,fileName) rm(list=c('treatmentsC')) ## ----loadfile----------------------------------------------------------------- library("vtreat") treatmentsC <- readRDS(fileName) dTestC <- data.frame(x=c('a','b','c',NA),z=c(10,20,30,NA)) dTestCTreated <- prepare(treatmentsC, dTestC, pruneSig= c()) # clean up unlink(fileName) ## ----dbsave------------------------------------------------------------------- con <- NULL if (requireNamespace('RSQLite', quietly = TRUE) && requireNamespace('DBI', quietly = TRUE)) { library("RSQLite") con <- dbConnect(drv=SQLite(), dbname=":memory:") # create table dbExecute(con, 'create table if not exists treatments (key varchar(200) primary key, treatment blob)') # wrap data df <- data.frame( key='treatmentsC', treatment = I(list(serialize(treatmentsC, NULL)))) # Clear any previous version dbExecute(con, "delete from treatments where key='treatmentsC'") # insert treatmentplan # depreciated # dbGetPreparedQuery(con, # 'insert into treatments (key, treatment) values (:key, :treatment)', # bind.data=df) dbExecute(con, 'insert into treatments (key, treatment) values (:key, :treatment)', params=df) constr <- paste(capture.output(print(con)),collapse='\n') paste('saved to db: ', constr) } rm(list= c('treatmentsC', 'dTestCTreated')) ## ----dbload------------------------------------------------------------------- if(!is.null(con)) { treatmentsList <- lapply( dbGetQuery(con, "select * from treatments where key='treatmentsC'")$treatment, unserialize) treatmentsC <- treatmentsList[[1]] dbDisconnect(con) dTestCTreated <- prepare(treatmentsC, dTestC, pruneSig= c()) print(dTestCTreated) }
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/SavingTreamentPlans.R
--- title: "Saving Treatment Plans" author: "John Mount" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Saving Treatment Plans} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- You can save and load treatment plans. Note: treatments plans are intended to be used with the version of `vtreat` they were constructed with (though we try to make plans forward-compatible). So it is good idea to have procedures to re-build treatment plans. The easiest way to save `vtreat` treatment plans is to use `R`'s built in `saveRDS` function. To save in a file: ```{r savefile} library("vtreat") dTrainC <- data.frame(x=c('a','a','a','b','b',NA,NA), z=c(1,2,3,4,NA,6,NA), y=c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE)) treatmentsC <- designTreatmentsC(dTrainC, colnames(dTrainC), 'y', TRUE, verbose= FALSE) fileName = paste0(tempfile(c('vtreatPlan')), '.RDS') saveRDS(treatmentsC,fileName) rm(list=c('treatmentsC')) ``` And then to restore and use. ```{r loadfile} library("vtreat") treatmentsC <- readRDS(fileName) dTestC <- data.frame(x=c('a','b','c',NA),z=c(10,20,30,NA)) dTestCTreated <- prepare(treatmentsC, dTestC, pruneSig= c()) # clean up unlink(fileName) ``` Treatment plans can also be stored as binary blobs in databases. Using ideas from [here](https://jfaganuk.github.io/2015/01/12/storing-r-objects-in-sqlite-tables/) gives us the following through the `DBI` interface. ```{r dbsave} con <- NULL if (requireNamespace('RSQLite', quietly = TRUE) && requireNamespace('DBI', quietly = TRUE)) { library("RSQLite") con <- dbConnect(drv=SQLite(), dbname=":memory:") # create table dbExecute(con, 'create table if not exists treatments (key varchar(200) primary key, treatment blob)') # wrap data df <- data.frame( key='treatmentsC', treatment = I(list(serialize(treatmentsC, NULL)))) # Clear any previous version dbExecute(con, "delete from treatments where key='treatmentsC'") # insert treatmentplan # depreciated # dbGetPreparedQuery(con, # 'insert into treatments (key, treatment) values (:key, :treatment)', # bind.data=df) dbExecute(con, 'insert into treatments (key, treatment) values (:key, :treatment)', params=df) constr <- paste(capture.output(print(con)),collapse='\n') paste('saved to db: ', constr) } rm(list= c('treatmentsC', 'dTestCTreated')) ``` And we can read the treatment back in as follows. ```{r dbload} if(!is.null(con)) { treatmentsList <- lapply( dbGetQuery(con, "select * from treatments where key='treatmentsC'")$treatment, unserialize) treatmentsC <- treatmentsList[[1]] dbDisconnect(con) dTestCTreated <- prepare(treatmentsC, dTestC, pruneSig= c()) print(dTestCTreated) } ```
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/SavingTreamentPlans.Rmd
## ----------------------------------------------------------------------------- set.seed(1999) d <- data.frame(x = seq(0, 15, by = 0.25)) d$y_ideal <- sin(d$x) d$x_noise <- d$x[sample.int(nrow(d), nrow(d), replace = FALSE)] d$y <- d$y_ideal + 0.5*rnorm(nrow(d)) dim(d) ## ----------------------------------------------------------------------------- cfe <- vtreat::mkCrossFrameNExperiment( d, varlist = c("x", "x_noise"), outcomename = "y") sf <- cfe$treatments$scoreFrame knitr::kable(sf[, c("varName", "rsq", "sig")]) ## ----------------------------------------------------------------------------- vf = vtreat::value_variables_N( d, varlist = c("x", "x_noise"), outcomename = "y") knitr::kable(vf[, c("var", "rsq", "sig")])
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/VariableImportance.R
--- title: "vtreat Variable Importance" author: "John Mount" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{vtreat Variable Importance} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- [`vtreat`](https://github.com/WinVector/vtreat)'s purpose is to produce pure numeric [`R`](https://www.r-project.org) `data.frame`s that are ready for [supervised predictive modeling](https://en.wikipedia.org/wiki/Supervised_learning) (predicting a value from other values). By ready we mean: a purely numeric data frame with no missing values and a reasonable number of columns (missing-values re-encoded with indicators, and high-degree categorical re-encode by effects codes or impact codes). Part of the `vtreat` philosophy is to assume after the `vtreat` variable processing the next step is a sophisticated [supervised machine learning](https://en.wikipedia.org/wiki/Supervised_learning) method. Under this assumption we assume the machine learning methodology (be it regression, tree methods, random forests, boosting, or neural nets) will handle issues of redundant variables, joint distributions of variables, overall regularization, and joint dimension reduction. However, an important exception is: variable screening. In practice we have seen wide data-warehouses with hundreds of columns overwhelm and defeat state of the art machine learning algorithms due to over-fitting. We have some synthetic examples of this ([here](https://win-vector.com/2014/02/01/bad-bayes-an-example-of-why-you-need-hold-out-testing/) and [here](https://win-vector.com/talks-and-presentations/)). The upshot is: even in 2018 you can not treat every column you find in a data warehouse as a variable. You must at least perform some basic screening. To help with this `vtreat` incorporates a per-variable linear significance report. This report shows how useful each variable is taken alone in a linear or generalized linear model (some details can be found [here](https://arxiv.org/abs/1611.09477)). However, this sort of calculation was optimized for speed, not discovery power. `vtreat` now includes a direct variable valuation system that works very well with complex numeric relationships. It is a function called [`vtreat::value_variables_N()`](https://winvector.github.io/vtreat/reference/value_variables_N.html) for numeric or regression problems and [`vtreat::value_variables_C()`](https://winvector.github.io/vtreat/reference/value_variables_C.html) for binomial classification problems. It works by fitting two transformed copies of each numeric variable to the outcome. One transform is a low frequency transform realized as an optimal `k`-segment linear model for a moderate choice of `k`. The other fit is a high-frequency trasnform realized as a `k`-nearest neighbor average for moderate choice of `k`. Some of the methodology is shown [here](https://github.com/WinVector/vtreat/blob/master/extras/SegFitter.md). We recommend using `vtreat::value_variables_*()` as an initial variable screen. Let's demonstrate this using the data from the segment fitter example. In our case the value to be predicted ("`y`") is a noisy copy of `sin(x)`. Let's set up our example data: ```{r} set.seed(1999) d <- data.frame(x = seq(0, 15, by = 0.25)) d$y_ideal <- sin(d$x) d$x_noise <- d$x[sample.int(nrow(d), nrow(d), replace = FALSE)] d$y <- d$y_ideal + 0.5*rnorm(nrow(d)) dim(d) ``` Now a simple linear valuation of the the variables can be produced as follows. ```{r} cfe <- vtreat::mkCrossFrameNExperiment( d, varlist = c("x", "x_noise"), outcomename = "y") sf <- cfe$treatments$scoreFrame knitr::kable(sf[, c("varName", "rsq", "sig")]) ``` Notice the signal carrying variable did not score better (having a larger `r`-squared and a smaller (better) significance value) than the noise variable (that is unrelated to the outcome). This is because the relation between `x` and `y` is not linear. Now let's try `vtreat::value_variables_N()`. ```{r} vf = vtreat::value_variables_N( d, varlist = c("x", "x_noise"), outcomename = "y") knitr::kable(vf[, c("var", "rsq", "sig")]) ``` Now the difference is night and day. The important variable `x` is singled out (scores very well), and the unimportant variable `x_noise` doesn't often score well. Though, as with all significance tests, useless variables can get lucky from time to time- (an issue that can be addressed by using a [Cohen's-`d` style calculation](https://win-vector.com/2017/09/08/remember-p-values-are-not-effect-sizes/)). Our modeling advice is: * Use `vtreat::value_variables_*()` * Pick all variables with `sig <= 1/number_of_variables_being_considered`. The idea is: each "pure noise" (or purely useless) variable has a significance that is distributed uniformly between zero and one. So the expected number of useless variables that make it through the above screening is `number_of_useless_varaibles * P[useless_sig <= 1/number_of_variables_being_considered]`. This equals `number_of_useless_varaibles * 1/number_of_variables_being_considered`. As `number_of_useless_varaibles <= number_of_variables_being_considered` we get this quantity is no more than one. So we expect a constant number of useless variables to sneak through this filter. The hope is: this should not be enough useless variables to overwhelm the next stage supervised machine learning step. Obviously there are situations where variable importance can not be discovered without considering joint distributions. The most famous one being "xor" where the concept to be learned is if an odd or even number of indicator variables are zero or one (each such variable is individual completely uninformative about the outcome until you have all of the variables simultaneously). However, for practical problems you often have that most variables have a higher marginal predictive power taken alone than they have in the final joint model (as other, better, variables consume some of common variables' predictive power in the joint model). With this in mind single variable screening often at least gives an indication where to look. In conclusion the `vtreat` package and `vtreat::value_variables_*()` can be a valuable addition to your supervised learning practice.
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/VariableImportance.Rmd
## ----------------------------------------------------------------------------- library("vtreat") packageVersion("vtreat") citation('vtreat') ## ----------------------------------------------------------------------------- # categorical example set.seed(23525) # we set up our raw training and application data dTrainC <- data.frame( x = c('a', 'a', 'a', 'b', 'b', NA, NA), z = c(1, 2, 3, 4, NA, 6, NA), y = c(FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE)) dTestC <- data.frame( x = c('a', 'b', 'c', NA), z = c(10, 20, 30, NA)) # we perform a vtreat cross frame experiment # and unpack the results into treatmentsC # and dTrainCTreated unpack[ treatmentsC = treatments, dTrainCTreated = crossFrame ] <- mkCrossFrameCExperiment( dframe = dTrainC, varlist = setdiff(colnames(dTrainC), 'y'), outcomename = 'y', outcometarget = TRUE, verbose = FALSE) # the treatments include a score frame relating new # derived variables to original columns treatmentsC$scoreFrame[, c('origName', 'varName', 'code', 'rsq', 'sig', 'extraModelDegrees', 'recommended')] %.>% knitr::kable(.) # the treated frame is a "cross frame" which # is a transform of the training data built # as if the treatment were learned on a different # disjoint training set to avoid nested model # bias and over-fit. dTrainCTreated %.>% head(.) %.>% knitr::kable(.) # Any future application data is prepared with # the prepare method. dTestCTreated <- prepare(treatmentsC, dTestC, pruneSig=NULL) dTestCTreated %.>% head(.) %.>% knitr::kable(.) ## ----------------------------------------------------------------------------- # numeric example set.seed(23525) # we set up our raw training and application data dTrainN <- data.frame( x = c('a', 'a', 'a', 'a', 'b', 'b', NA, NA), z = c(1, 2, 3, 4, 5, NA, 7, NA), y = c(0, 0, 0, 1, 0, 1, 1, 1)) dTestN <- data.frame( x = c('a', 'b', 'c', NA), z = c(10, 20, 30, NA)) # we perform a vtreat cross frame experiment # and unpack the results into treatmentsN # and dTrainNTreated unpack[ treatmentsN = treatments, dTrainNTreated = crossFrame ] <- mkCrossFrameNExperiment( dframe = dTrainN, varlist = setdiff(colnames(dTrainN), 'y'), outcomename = 'y', verbose = FALSE) # the treatments include a score frame relating new # derived variables to original columns treatmentsN$scoreFrame[, c('origName', 'varName', 'code', 'rsq', 'sig', 'extraModelDegrees')] %.>% knitr::kable(.) # the treated frame is a "cross frame" which # is a transform of the training data built # as if the treatment were learned on a different # disjoint training set to avoid nested model # bias and over-fit. dTrainNTreated %.>% head(.) %.>% knitr::kable(.) # Any future application data is prepared with # the prepare method. dTestNTreated <- prepare(treatmentsN, dTestN, pruneSig=NULL) dTestNTreated %.>% head(.) %.>% knitr::kable(.)
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreat.R
--- title: "vtreat package" author: "John Mount, Nina Zumel" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{vtreat package} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- `vtreat` is a `data.frame` processor/conditioner (available [for `R`](https://github.com/WinVector/vtreat), and [for `Python`](https://github.com/WinVector/pyvtreat)) that prepares real-world data for supervised machine learning or predictive modeling in a statistically sound manner. A nice video lecture on what sorts of problems `vtreat` solves can be found [here](https://youtu.be/sniHkkrAsOc?t=42). `vtreat` takes an input `data.frame` that has a specified column called "the outcome variable" (or "y") that is the quantity to be predicted (and must not have missing values). Other input columns are possible explanatory variables (typically numeric or categorical/string-valued, these columns may have missing values) that the user later wants to use to predict "y". In practice such an input `data.frame` may not be immediately suitable for machine learning procedures that often expect only numeric explanatory variables, and may not tolerate missing values. To solve this, `vtreat` builds a transformed `data.frame` where all explanatory variable columns have been transformed into a number of numeric explanatory variable columns, without missing values. The `vtreat` implementation produces derived numeric columns that capture most of the information relating the explanatory columns to the specified "y" or dependent/outcome column through a number of numeric transforms (indicator variables, impact codes, prevalence codes, and more). This transformed `data.frame` is suitable for a wide range of supervised learning methods from linear regression, through gradient boosted machines. The idea is: you can take a `data.frame` of messy real world data and easily, faithfully, reliably, and repeatably prepare it for machine learning using documented methods using `vtreat`. Incorporating `vtreat` into your machine learning workflow lets you quickly work with very diverse structured data. In all cases (classification, regression, unsupervised, and multinomial classification) the intent is that `vtreat` transforms are essentially one liners. The preparation commands are organized as follows: * **Regression**: [`R` regression example, fit/prepare interface](https://github.com/WinVector/vtreat/blob/master/Examples/Regression/Regression_FP.md), [`R` regression example, design/prepare/experiment interface](https://github.com/WinVector/vtreat/blob/master/Examples/Regression/Regression.md), [`Python` regression example](https://github.com/WinVector/pyvtreat/blob/master/Examples/Regression/Regression.md). * **Classification**: [`R` classification example, fit/prepare interface](https://github.com/WinVector/vtreat/blob/master/Examples/Classification/Classification_FP.md), [`R` classification example, design/prepare/experiment interface](https://github.com/WinVector/vtreat/blob/master/Examples/Classification/Classification.md), [`Python` classification example](https://github.com/WinVector/pyvtreat/blob/master/Examples/Classification/Classification.md). * **Unsupervised tasks**: [`R` unsupervised example, fit/prepare interface](https://github.com/WinVector/vtreat/blob/master/Examples/Unsupervised/Unsupervised_FP.md), [`R` unsupervised example, design/prepare/experiment interface](https://github.com/WinVector/vtreat/blob/master/Examples/Unsupervised/Unsupervised.md), [`Python` unsupervised example](https://github.com/WinVector/pyvtreat/blob/master/Examples/Unsupervised/Unsupervised.md). * **Multinomial classification**: [`R` multinomial classification example, fit/prepare interface](https://github.com/WinVector/vtreat/blob/master/Examples/Multinomial/MultinomialExample_FP.md), [`R` multinomial classification example, design/prepare/experiment interface](https://github.com/WinVector/vtreat/blob/master/Examples/Multinomial/MultinomialExample.md), [`Python` multinomial classification example](https://github.com/WinVector/pyvtreat/blob/master/Examples/Multinomial/MultinomialExample.md). In all cases: variable preparation is intended to be a "one liner." These current revisions of the examples are designed to be small, yet complete. So as a set they have some overlap, but the user can rely mostly on a single example for a single task type. For more detail please see here: [arXiv:1611.09477 stat.AP](https://arxiv.org/abs/1611.09477) (the documentation describes the `R` version, however all of the examples can be found worked in `Python` [here](https://github.com/WinVector/pyvtreat/tree/master/Examples/vtreat_paper1)). `vtreat` is available as an [`R` package](https://github.com/WinVector/vtreat), and also as a [`Python`/`Pandas` package](https://github.com/WinVector/vtreat). Even with modern machine learning techniques (random forests, support vector machines, neural nets, gradient boosted trees, and so on) or standard statistical methods (regression, generalized regression, generalized additive models) there are *common* data issues that can cause modeling to fail. vtreat deals with a number of these in a principled and automated fashion. In particular vtreat emphasizes a concept called "y-aware pre-processing" and implements: - Treatment of missing values through safe replacement plus indicator column (a simple but very powerful method when combined with downstream machine learning algorithms). - Treatment of novel levels (new values of categorical variable seen during test or application, but not seen during training) through sub-models (or impact/effects coding of pooled rare events). - Explicit coding of categorical variable levels as new indicator variables (with optional suppression of non-significant indicators). - Treatment of categorical variables with very large numbers of levels through sub-models (again [impact/effects coding](https://win-vector.com/2012/07/23/modeling-trick-impact-coding-of-categorical-variables-with-many-levels/)). - (optional) User specified significance pruning on levels coded into effects/impact sub-models. - Correct treatment of nested models or sub-models through data split (see [here](https://winvector.github.io/vtreat/articles/vtreatOverfit.html)) or through the generation of "cross validated" data frames (see [here](https://winvector.github.io/vtreat/articles/vtreatCrossFrames.html)); these are issues similar to what is required to build statistically efficient stacked models or super-learners). - Safe processing of "wide data" (data with very many variables, often driving common machine learning algorithms to over-fit) through [out of sample per-variable significance estimates and user controllable pruning](https://winvector.github.io/vtreat/articles/vtreatSignificance.html) (something we have lectured on previously [here](https://github.com/WinVector/WinVector.github.io/tree/master/DS) and [here](https://win-vector.com/2014/02/01/bad-bayes-an-example-of-why-you-need-hold-out-testing/)). - Collaring/Winsorizing of unexpected out of range numeric inputs. - (optional) Conversion of all variables into effects (or "y-scale") units (through the optional `scale` argument to `vtreat::prepare()`, using some of the ideas discussed [here](https://win-vector.com/2014/06/02/skimming-statistics-papers-for-the-ideas-instead-of-the-complete-procedures/)). This allows correct/sensible application of principal component analysis pre-processing in a machine learning context. - Joining in additional training distribution data (which can be useful in analysis, called "catP" and "catD"). The idea is: even with a sophisticated machine learning algorithm there are *many* ways messy real world data can defeat the modeling process, and vtreat helps with at least ten of them. We emphasize: these problems are already in your data, you simply build better and more reliable models if you attempt to mitigate them. Automated processing is no substitute for actually looking at the data, but vtreat supplies efficient, reliable, documented, and tested implementations of many of the commonly needed transforms. To help explain the methods we have prepared some documentation: - The [vtreat package overall](https://winvector.github.io/vtreat/index.html). - [Preparing data for analysis using R white-paper](https://winvector.github.io/DataPrep/EN-CNTNT-Whitepaper-Data-Prep-Using-R.pdf) - The [types of new variables](https://winvector.github.io/vtreat/articles/vtreatVariableTypes.html) introduced by vtreat processing (including how to limit down to domain appropriate variable types). - Statistically sound treatment of the nested modeling issue introduced by any sort of pre-processing (such as vtreat itself): [nested over-fit issues](https://winvector.github.io/vtreat/articles/vtreatOverfit.html) and a general [cross-frame solution](https://winvector.github.io/vtreat/articles/vtreatCrossFrames.html). - [Principled ways to pick significance based pruning levels](https://winvector.github.io/vtreat/articles/vtreatSignificance.html). Data treatments are "y-aware" (use distribution relations between independent variables and the dependent variable). For binary classification use `designTreatmentsC()` and for numeric regression use `designTreatmentsN()`. After the design step, `prepare()` should be used as you would use model.matrix. `prepare()` treated variables are all numeric and never take the value NA or +-Inf (so are very safe to use in modeling). In application we suggest splitting your data into three sets: one for building vtreat encodings, one for training models using these encodings, and one for test and model evaluation. The purpose of `vtreat` library is to reliably prepare data for supervised machine learning. We try to leave as much as possible to the machine learning algorithms themselves, but cover most of the truly necessary typically ignored precautions. The library is designed to produce a `data.frame` that is entirely numeric and takes common precautions to guard against the following real world data issues: - Categorical variables with very many levels. We re-encode such variables as a family of indicator or dummy variables for common levels plus an additional [impact code](https://win-vector.com/2012/07/23/modeling-trick-impact-coding-of-categorical-variables-with-many-levels/) (also called "effects coded"). This allows principled use (including smoothing) of huge categorical variables (like zip-codes) when building models. This is critical for some libraries (such as `randomForest`, which has hard limits on the number of allowed levels). - Rare categorical levels. Levels that do not occur often during training tend not to have reliable effect estimates and contribute to over-fit. vtreat helps with 2 precautions in this case. First the `rareLevel` argument suppresses levels with this count our below from modeling, except possibly through a grouped contribution. Also with enough data vtreat attempts to estimate out of sample performance of derived variables. Finally we suggest users reserve a portion of data for vtreat design, separate from any data used in additional training, calibration, or testing. - Novel categorical levels. A common problem in deploying a classifier to production is: new levels (levels not seen during training) encountered during model application. We deal with this by encoding categorical variables in a possibly redundant manner: reserving a dummy variable for all levels (not the more common all but a reference level scheme). This is in fact the correct representation for regularized modeling techniques and lets us code novel levels as all dummies simultaneously zero (which is a reasonable thing to try). This encoding while limited is cheaper than the fully Bayesian solution of computing a weighted sum over previously seen levels during model application. - Missing/invalid values NA, NaN, +-Inf. Variables with these issues are re-coded as two columns. The first column is clean copy of the variable (with missing/invalid values replaced with either zero or the grand mean, depending on the user chose of the `scale` parameter). The second column is a dummy or indicator that marks if the replacement has been performed. This is simpler than imputation of missing values, and allows the downstream model to attempt to use missingness as a useful signal (which it often is in industrial data). - Extreme values. Variables can be restricted to stay in ranges seen during training. This can defend against some run-away classifier issues during model application. - Constant and near-constant variables. Variables that "don't vary" or "nearly don't vary" are suppressed. - Need for estimated single-variable model effect sizes and significances. It is a dirty secret that even popular machine learning techniques need some variable pruning (when exposed to very wide data frames, see [here](https://win-vector.com/2014/02/01/bad-bayes-an-example-of-why-you-need-hold-out-testing/) and [here](https://www.youtube.com/watch?v=X_Rn3EOEjGE)). We make the necessary effect size estimates and significances easily available and supply initial variable pruning. The above are all awful things that often lurk in real world data. Automating these steps ensures they are easy enough that you actually perform them and leaves the analyst time to look for additional data issues. For example this allowed us to essentially automate a number of the steps taught in chapters 4 and 6 of [*Practical Data Science with R* (Zumel, Mount; Manning 2014)](https://win-vector.com/practical-data-science-with-r/) into a [very short worksheet](https://winvector.github.io/KDD2009/KDD2009RF.html) (though we think for understanding it is *essential* to work all the steps by hand as we did in the book). The 2nd edition of *Practical Data Science with R* covers using `vtreat` in `R` in chapter 8 "Advanced Data Preparation." The idea is: `data.frame`s prepared with the `vtreat` library are somewhat safe to train on as some precaution has been taken against all of the above issues. Also of interest are the `vtreat` variable significances (help in initial variable pruning, a necessity when there are a large number of columns) and `vtreat::prepare(scale=TRUE)` which re-encodes all variables into effect units making them suitable for y-aware dimension reduction (variable clustering, or principal component analysis) and for geometry sensitive machine learning techniques (k-means, knn, linear SVM, and more). You may want to do more than the `vtreat` library does (such as Bayesian imputation, variable clustering, and more) but you certainly do not want to do less. There have been a number of recent substantial improvements to the library, including: - Out of sample scoring. - Ability to use `parallel`. - More general calculation of effect sizes and significances. Some of our related articles (which should make clear some of our motivations, and design decisions): - [Modeling trick: impact coding of categorical variables with many levels](https://win-vector.com/2012/07/23/modeling-trick-impact-coding-of-categorical-variables-with-many-levels/) - [A bit more on impact coding](https://win-vector.com/2012/08/02/a-bit-more-on-impact-coding/) - [vtreat: designing a package for variable treatment](https://win-vector.com/2014/08/07/vtreat-designing-a-package-for-variable-treatment/) - [A comment on preparing data for classifiers](https://win-vector.com/2014/12/04/a-comment-on-preparing-data-for-classifiers/) - [Nina Zumel presenting on vtreat](https://www.slideshare.net/ChesterChen/vtreat) - [What is new in the vtreat library?](https://win-vector.com/2015/05/07/what-is-new-in-the-vtreat-library/) - [How do you know if your data has signal?](https://win-vector.com/2015/08/10/how-do-you-know-if-your-data-has-signal/) Examples of current best practice using `vtreat` (variable coding, train, test split) can be found [here](https://winvector.github.io/vtreat/articles/vtreatOverfit.html) and [here](https://winvector.github.io/KDD2009/KDD2009RF.html). Some small examples: We attach our packages. ```{r} library("vtreat") packageVersion("vtreat") citation('vtreat') ``` A small categorical example. ```{r} # categorical example set.seed(23525) # we set up our raw training and application data dTrainC <- data.frame( x = c('a', 'a', 'a', 'b', 'b', NA, NA), z = c(1, 2, 3, 4, NA, 6, NA), y = c(FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE)) dTestC <- data.frame( x = c('a', 'b', 'c', NA), z = c(10, 20, 30, NA)) # we perform a vtreat cross frame experiment # and unpack the results into treatmentsC # and dTrainCTreated unpack[ treatmentsC = treatments, dTrainCTreated = crossFrame ] <- mkCrossFrameCExperiment( dframe = dTrainC, varlist = setdiff(colnames(dTrainC), 'y'), outcomename = 'y', outcometarget = TRUE, verbose = FALSE) # the treatments include a score frame relating new # derived variables to original columns treatmentsC$scoreFrame[, c('origName', 'varName', 'code', 'rsq', 'sig', 'extraModelDegrees', 'recommended')] %.>% knitr::kable(.) # the treated frame is a "cross frame" which # is a transform of the training data built # as if the treatment were learned on a different # disjoint training set to avoid nested model # bias and over-fit. dTrainCTreated %.>% head(.) %.>% knitr::kable(.) # Any future application data is prepared with # the prepare method. dTestCTreated <- prepare(treatmentsC, dTestC, pruneSig=NULL) dTestCTreated %.>% head(.) %.>% knitr::kable(.) ``` A small numeric example. ```{r} # numeric example set.seed(23525) # we set up our raw training and application data dTrainN <- data.frame( x = c('a', 'a', 'a', 'a', 'b', 'b', NA, NA), z = c(1, 2, 3, 4, 5, NA, 7, NA), y = c(0, 0, 0, 1, 0, 1, 1, 1)) dTestN <- data.frame( x = c('a', 'b', 'c', NA), z = c(10, 20, 30, NA)) # we perform a vtreat cross frame experiment # and unpack the results into treatmentsN # and dTrainNTreated unpack[ treatmentsN = treatments, dTrainNTreated = crossFrame ] <- mkCrossFrameNExperiment( dframe = dTrainN, varlist = setdiff(colnames(dTrainN), 'y'), outcomename = 'y', verbose = FALSE) # the treatments include a score frame relating new # derived variables to original columns treatmentsN$scoreFrame[, c('origName', 'varName', 'code', 'rsq', 'sig', 'extraModelDegrees')] %.>% knitr::kable(.) # the treated frame is a "cross frame" which # is a transform of the training data built # as if the treatment were learned on a different # disjoint training set to avoid nested model # bias and over-fit. dTrainNTreated %.>% head(.) %.>% knitr::kable(.) # Any future application data is prepared with # the prepare method. dTestNTreated <- prepare(treatmentsN, dTestN, pruneSig=NULL) dTestNTreated %.>% head(.) %.>% knitr::kable(.) ``` Related work: * Cohen J, Cohen P (1983). Applied Multiple Regression/Correlation Analysis For The Behavioral Sciences. 2 edition. Lawrence Erlbaum Associates, Inc. ISBN 0-89859-268-2. * ["A preprocessing scheme for high-cardinality categorical attributes in classification and prediction problems"](https://dl.acm.org/doi/10.1145/507533.507538) Daniele Micci-Barreca; ACM SIGKDD Explorations, Volume 3 Issue 1, July 2001 Pages 27-32. * ["Modeling Trick: Impact Coding of Categorical Variables with Many Levels"](https://win-vector.com/2012/07/23/modeling-trick-impact-coding-of-categorical-variables-with-many-levels/) Nina Zumel; Win-Vector blog, 2012. * "Big Learning Made Easy – with Counts!", Misha Bilenko, Cortana Intelligence and Machine Learning Blog, 2015. ## Note Notes on controlling `vtreat`'s cross-validation plans can be found [here](https://github.com/WinVector/vtreat/blob/master/Examples/CustomizedCrossPlan/CustomizedCrossPlan.md). Note: `vtreat` is meant only for "tame names", that is: variables and column names that are also valid *simple* (without quotes) `R` variables names.
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreat.Rmd
## ----------------------------------------------------------------------------- set.seed(22626) mkData <- function(n) { d <- data.frame(xBad1=sample(paste('level',1:1000,sep=''),n,replace=TRUE), xBad2=sample(paste('level',1:1000,sep=''),n,replace=TRUE), xBad3=sample(paste('level',1:1000,sep=''),n,replace=TRUE), xGood1=rnorm(n), xGood2=rnorm(n)) # outcome only depends on "good" variables d$y <- rnorm(nrow(d))+0.2*d$xGood1 + 0.3*d$xGood2>0.5 # the random group used for splitting the data set, not a variable. d$rgroup <- sample(c("cal","train","test"),nrow(d),replace=TRUE) d } d <- mkData(2000) # devtools::install_github("WinVector/WVPlots") # library('WVPlots') plotRes <- function(d,predName,yName,title) { print(title) tab <- table(truth=d[[yName]],pred=d[[predName]]>0.5) print(tab) diag <- sum(vapply(seq_len(min(dim(tab))), function(i) tab[i,i],numeric(1))) acc <- diag/sum(tab) # if(requireNamespace("WVPlots",quietly=TRUE)) { # print(WVPlots::ROCPlot(d,predName,yName,title)) # } print(paste('accuracy',acc)) } ## ----badmixcalandtrain-------------------------------------------------------- dTrain <- d[d$rgroup!='test',,drop=FALSE] dTest <- d[d$rgroup=='test',,drop=FALSE] treatments <- vtreat::designTreatmentsC(dTrain,c('xBad1','xBad2','xBad3','xGood1','xGood2'), 'y',TRUE, rareCount=0 # Note: usually want rareCount>0, setting to zero to illustrate problem ) dTrainTreated <- vtreat::prepare(treatments,dTrain, pruneSig=c() # Note: usually want pruneSig to be a small fraction, setting to null to illustrate problems ) f <- wrapr::mk_formula("y", treatments$scoreFrame$varName) print(f) m1 <- glm(f, data=dTrainTreated,family=binomial(link='logit')) print(summary(m1)) # notice low residual deviance dTrain$predM1 <- predict(m1,newdata=dTrainTreated,type='response') plotRes(dTrain,'predM1','y','model1 on train') dTestTreated <- vtreat::prepare(treatments,dTest,pruneSig=c()) dTest$predM1 <- predict(m1,newdata=dTestTreated,type='response') plotRes(dTest,'predM1','y','model1 on test') ## ----separatecalandtrain------------------------------------------------------ dCal <- d[d$rgroup=='cal',,drop=FALSE] dTrain <- d[d$rgroup=='train',,drop=FALSE] dTest <- d[d$rgroup=='test',,drop=FALSE] # a nice heuristic, # expect only a constant number of noise variables to sneak past pruneSig <- 1/ncol(dTrain) treatments <- vtreat::designTreatmentsC(dCal, c('xBad1','xBad2','xBad3','xGood1','xGood2'), 'y',TRUE, rareCount=0 # Note: usually want rareCount>0, setting to zero to illustrate problem ) dTrainTreated <- vtreat::prepare(treatments,dTrain, pruneSig=pruneSig) newvars <- setdiff(colnames(dTrainTreated),'y') m1 <- glm(paste('y',paste(newvars,collapse=' + '),sep=' ~ '), data=dTrainTreated,family=binomial(link='logit')) print(summary(m1)) dTrain$predM1 <- predict(m1,newdata=dTrainTreated,type='response') plotRes(dTrain,'predM1','y','model1 on train') dTestTreated <- vtreat::prepare(treatments,dTest, pruneSig=pruneSig) dTest$predM1 <- predict(m1,newdata=dTestTreated,type='response') plotRes(dTest,'predM1','y','model1 on test') ## ----crossframes-------------------------------------------------------------- dTrain <- d[d$rgroup!='test',,drop=FALSE] dTest <- d[d$rgroup=='test',,drop=FALSE] prep <- vtreat::mkCrossFrameCExperiment(dTrain, c('xBad1','xBad2','xBad3','xGood1','xGood2'), 'y',TRUE, rareCount=0 # Note: usually want rareCount>0, setting to zero to illustrate problems ) treatments <- prep$treatments knitr::kable(treatments$scoreFrame[,c('varName','sig')]) colnames(prep$crossFrame) # vtreat::mkCrossFrameCExperiment doesn't take a pruneSig argument, but we can # prune on our own. print(pruneSig) newvars <- treatments$scoreFrame$varName[treatments$scoreFrame$sig<=pruneSig] # force in bad variables, to show we "belt and suspenders" deal with them # in that things go well in the cross-frame even if they sneak past pruning newvars <- sort(union(newvars,c("xBad1_catB","xBad2_catB","xBad3_catB"))) print(newvars) dTrainTreated <- prep$crossFrame ## ----xframemodel-------------------------------------------------------------- m1 <- glm(paste('y',paste(newvars,collapse=' + '),sep=' ~ '), data=dTrainTreated,family=binomial(link='logit')) print(summary(m1)) dTrain$predM1 <- predict(m1,newdata=dTrainTreated,type='response') plotRes(dTrain,'predM1','y','model1 on train') dTestTreated <- vtreat::prepare(treatments,dTest, pruneSig=c(),varRestriction=newvars) knitr::kable(head(dTestTreated)) dTest$predM1 <- predict(m1,newdata=dTestTreated,type='response') plotRes(dTest,'predM1','y','model1 on test')
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatCrossFrames.R
--- title: "vtreat cross frames" author: "John Mount, Nina Zumel" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{vtreat cross frames} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- Example demonstrating "cross validated training frames" (or "cross frames") in vtreat. Consider the following data frame. The outcome only depends on the "good" variables, not on the (high degree of freedom) "bad" variables. Modeling such a data set runs a high risk of over-fit. ```{r} set.seed(22626) mkData <- function(n) { d <- data.frame(xBad1=sample(paste('level',1:1000,sep=''),n,replace=TRUE), xBad2=sample(paste('level',1:1000,sep=''),n,replace=TRUE), xBad3=sample(paste('level',1:1000,sep=''),n,replace=TRUE), xGood1=rnorm(n), xGood2=rnorm(n)) # outcome only depends on "good" variables d$y <- rnorm(nrow(d))+0.2*d$xGood1 + 0.3*d$xGood2>0.5 # the random group used for splitting the data set, not a variable. d$rgroup <- sample(c("cal","train","test"),nrow(d),replace=TRUE) d } d <- mkData(2000) # devtools::install_github("WinVector/WVPlots") # library('WVPlots') plotRes <- function(d,predName,yName,title) { print(title) tab <- table(truth=d[[yName]],pred=d[[predName]]>0.5) print(tab) diag <- sum(vapply(seq_len(min(dim(tab))), function(i) tab[i,i],numeric(1))) acc <- diag/sum(tab) # if(requireNamespace("WVPlots",quietly=TRUE)) { # print(WVPlots::ROCPlot(d,predName,yName,title)) # } print(paste('accuracy',acc)) } ``` ## The Wrong Way Bad practice: use the same set of data to prepare variable encoding and train a model. ```{r badmixcalandtrain} dTrain <- d[d$rgroup!='test',,drop=FALSE] dTest <- d[d$rgroup=='test',,drop=FALSE] treatments <- vtreat::designTreatmentsC(dTrain,c('xBad1','xBad2','xBad3','xGood1','xGood2'), 'y',TRUE, rareCount=0 # Note: usually want rareCount>0, setting to zero to illustrate problem ) dTrainTreated <- vtreat::prepare(treatments,dTrain, pruneSig=c() # Note: usually want pruneSig to be a small fraction, setting to null to illustrate problems ) f <- wrapr::mk_formula("y", treatments$scoreFrame$varName) print(f) m1 <- glm(f, data=dTrainTreated,family=binomial(link='logit')) print(summary(m1)) # notice low residual deviance dTrain$predM1 <- predict(m1,newdata=dTrainTreated,type='response') plotRes(dTrain,'predM1','y','model1 on train') dTestTreated <- vtreat::prepare(treatments,dTest,pruneSig=c()) dTest$predM1 <- predict(m1,newdata=dTestTreated,type='response') plotRes(dTest,'predM1','y','model1 on test') ``` Notice above that we see a training accuracy of 98% and a test accuracy of 60%. Also notice the downstream model (the `glm`) erroneously thinks the `xBad?_cat` variables are significant (due to the large number of degrees of freedom hidden from the downstream model by the [impact/effect coding](https://win-vector.com/2012/07/23/modeling-trick-impact-coding-of-categorical-variables-with-many-levels/)). ## The Right Way: A Calibration Set Now try a proper calibration/train/test split: ```{r separatecalandtrain} dCal <- d[d$rgroup=='cal',,drop=FALSE] dTrain <- d[d$rgroup=='train',,drop=FALSE] dTest <- d[d$rgroup=='test',,drop=FALSE] # a nice heuristic, # expect only a constant number of noise variables to sneak past pruneSig <- 1/ncol(dTrain) treatments <- vtreat::designTreatmentsC(dCal, c('xBad1','xBad2','xBad3','xGood1','xGood2'), 'y',TRUE, rareCount=0 # Note: usually want rareCount>0, setting to zero to illustrate problem ) dTrainTreated <- vtreat::prepare(treatments,dTrain, pruneSig=pruneSig) newvars <- setdiff(colnames(dTrainTreated),'y') m1 <- glm(paste('y',paste(newvars,collapse=' + '),sep=' ~ '), data=dTrainTreated,family=binomial(link='logit')) print(summary(m1)) dTrain$predM1 <- predict(m1,newdata=dTrainTreated,type='response') plotRes(dTrain,'predM1','y','model1 on train') dTestTreated <- vtreat::prepare(treatments,dTest, pruneSig=pruneSig) dTest$predM1 <- predict(m1,newdata=dTestTreated,type='response') plotRes(dTest,'predM1','y','model1 on test') ``` Notice above that we now see training and test accuracies of 70%. We have defeated over-fit in two ways: training performance is closer to test performance, and test performance is better. Also we see that the model now properly considers the "bad" variables to be insignificant. ## Another Right Way: Cross-Validation Below is a more statistically efficient practice: building a cross training frame. ### The intuition Consider any trained statistical model (in this case our treatment plan and variable selection plan) as a two-argument function _f(A,B)_. The first argument is the training data and the second argument is the application data. In our case _f(A,B)_ is: `designTreatmentsC(A) %>% prepare(B)`, and it produces a treated data frame. When we use the same data in both places to build our training frame, as in > _TrainTreated = f(TrainData,TrainData)_, we are not doing a good job simulating the future application of _f(,)_, which will be _f(TrainData,FutureData)_. To improve the quality of our simulation we can call > _TrainTreated = f(CalibrationData,TrainData)_ where _CalibrationData_ and _TrainData_ are disjoint datasets (as we did in the earlier example) and expect this to be a good imitation of future _f(CalibrationData,FutureData)_. ### Cross-Validation and vtreat: The cross-frame. Another approach is to build a "cross validated" version of _f_. We split _TrainData_ into a list of 3 disjoint row intervals: _Train1_,_Train2_,_Train3_. Instead of computing _f(TrainData,TrainData)_ compute: > _TrainTreated = f(Train2+Train3,Train1) + f(Train1+Train3,Train2) + f(Train1+Train2,Train3)_ (where + denotes `rbind()`). The idea is this looks a lot like _f(TrainData,TrainData)_ except it has the important property that no row in the right-hand side is ever worked on by a model built using that row (a key characteristic that future data will have) so we have a good imitation of _f(TrainData,FutureData)_. In other words: we use cross validation to simulate future data. The main thing we are doing differently is remembering that we can apply cross validation to *any* two argument function _f(A,B)_ and not only to functions of the form _f(A,B)_ = `buildModel(A) %>% scoreData(B)`. We can use this formulation in stacking or super-learning with _f(A,B)_ of the form `buildSubModels(A) %>% combineModels(B)` (to produce a stacked or ensemble model); the idea applies to improving ensemble methods in general. See: - "General oracle inequalities for model selection" Charles Mitchell and Sara van de Geer - "On Cross-Validation and Stacking: Building seemingly predictive models on random data" Claudia Perlich and Grzegorz Swirszcz - "Super Learner" Mark J. van der Laan, Eric C. Polley, and Alan E. Hubbard In fact you can think of vtreat as a super-learner. In super learning cross validation techniques are used to simulate having built sub-model predictions on novel data. The simulated out of sample-applications of these sub models (and not the sub models themselves) are then used as input data for the next stage learner. In future application the actual sub-models are applied and their immediate outputs is used by the super model. <img src="superX.png" width="600"> In vtreat the sub-models are single variable treatments and the outer model construction is left to the practitioner (using the cross-frames for simulation and not the treatmentplan). In application the treatment plan is used. <img src="vtreatX.png" width="600"> ### Example Below is the example cross-run. The function `mkCrossFrameCExperiment` returns a treatment plan for use in preparing future data, and a cross-frame for use in fitting a model. ```{r crossframes} dTrain <- d[d$rgroup!='test',,drop=FALSE] dTest <- d[d$rgroup=='test',,drop=FALSE] prep <- vtreat::mkCrossFrameCExperiment(dTrain, c('xBad1','xBad2','xBad3','xGood1','xGood2'), 'y',TRUE, rareCount=0 # Note: usually want rareCount>0, setting to zero to illustrate problems ) treatments <- prep$treatments knitr::kable(treatments$scoreFrame[,c('varName','sig')]) colnames(prep$crossFrame) # vtreat::mkCrossFrameCExperiment doesn't take a pruneSig argument, but we can # prune on our own. print(pruneSig) newvars <- treatments$scoreFrame$varName[treatments$scoreFrame$sig<=pruneSig] # force in bad variables, to show we "belt and suspenders" deal with them # in that things go well in the cross-frame even if they sneak past pruning newvars <- sort(union(newvars,c("xBad1_catB","xBad2_catB","xBad3_catB"))) print(newvars) dTrainTreated <- prep$crossFrame ``` We ensured the undesirable `xBad*_catB` variables back in to demonstrate that even if they sneak past a lose `pruneSig`, the crossframe lets the downstream model deal with them correctly. To ensure more consistent filtering of the complicated variables one can increase the `ncross` argument in `vtreat::mkCrossFrameCExperiment`/`vtreat::mkCrossFrameNExperiment`. Now we fit the model to *the cross-frame* rather than to `prepare(treatments, dTrain)` (the treated training data). ```{r xframemodel} m1 <- glm(paste('y',paste(newvars,collapse=' + '),sep=' ~ '), data=dTrainTreated,family=binomial(link='logit')) print(summary(m1)) dTrain$predM1 <- predict(m1,newdata=dTrainTreated,type='response') plotRes(dTrain,'predM1','y','model1 on train') dTestTreated <- vtreat::prepare(treatments,dTest, pruneSig=c(),varRestriction=newvars) knitr::kable(head(dTestTreated)) dTest$predM1 <- predict(m1,newdata=dTestTreated,type='response') plotRes(dTest,'predM1','y','model1 on test') ``` We again get the better 70% test accuracy. And this is a more statistically efficient technique as we didn't have to restrict some data to calibration. The model fit to the cross-frame behaves similarly to the model produced via the process _f(CalibrationData, TrainData)_. Notice that the `xBad*_catB` variables fail to achieve significance in the downstream `glm` model, allowing that model to give them small coefficients and even (if need be) prune them out. This is the point of using a cross frame as we see in the first example the `xBad*_catB` are hard to remove if they make it to standard (non-cross) frames as they are hiding a lot of degrees of freedom from downstream modeling procedures.
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatCrossFrames.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(fig.width = 7) ## ----echo=FALSE, message=FALSE, warning=FALSE--------------------------------- library(vtreat) set.seed(23255) have_rqdatatable = requireNamespace("rqdatatable", quietly=TRUE) if(have_rqdatatable) { library(rqdatatable) } ## ----echo=FALSE, message=FALSE, warning=FALSE--------------------------------- # # takes the frame (d) and the outcome column (d$conc) # from the global environment # showGroupingBehavior = function(groupcol, title) { print(title) # display means of each group print("Group means:") means = tapply(d$conc, d[[groupcol]], mean) print(means) print(paste("Standard deviation of group means:", sd(means))) } ## ----data--------------------------------------------------------------------- # panel data for concentration in multiple subjects d <- datasets::Theoph head(d) summary(d) ## ----------------------------------------------------------------------------- # a somewhat arbitrary split of patients subnum = as.numeric(as.character(d$Subject)) d$modSplit = as.factor(subnum %% 3) ## ----------------------------------------------------------------------------- print(table(Subject=d$Subject, groupid=d$modSplit)) ## ----------------------------------------------------------------------------- # stratify by outcome only # forces concentration to be equivalent pStrat <- kWayStratifiedY(nrow(d),3,d,d$conc) attr(pStrat, "splitmethod") d$stratSplit <- vtreat::getSplitPlanAppLabels(nrow(d),pStrat) print(table(Subject=d$Subject, groupid=d$stratSplit)) ## ----------------------------------------------------------------------------- # stratify by patient and outcome # allows concentration to vary amoung individual patients splitter <- makekWayCrossValidationGroupedByColumn('Subject') split <- splitter(nrow(d),3,d,d$conc) attr(split, "splitmethod") d$subjectSplit <- vtreat::getSplitPlanAppLabels(nrow(d),split) print(table(Subject=d$Subject, groupid=d$subjectSplit)) ## ----echo=FALSE--------------------------------------------------------------- showGroupingBehavior("modSplit", "Arbitrary grouping") ## ----echo=FALSE--------------------------------------------------------------- showGroupingBehavior("subjectSplit", "Group by patient, stratify on y")
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatGrouping.R
--- title: "vtreat grouping example" author: "Nina Zumel, Nate Sutton" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{vtreat grouping example} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(fig.width = 7) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE} library(vtreat) set.seed(23255) have_rqdatatable = requireNamespace("rqdatatable", quietly=TRUE) if(have_rqdatatable) { library(rqdatatable) } ``` ```{r echo=FALSE, message=FALSE, warning=FALSE} # # takes the frame (d) and the outcome column (d$conc) # from the global environment # showGroupingBehavior = function(groupcol, title) { print(title) # display means of each group print("Group means:") means = tapply(d$conc, d[[groupcol]], mean) print(means) print(paste("Standard deviation of group means:", sd(means))) } ``` This vignette shows an example use of _y_-stratified sampling with a grouping restriction in `vtreat`. For this example, we will use the `Theosph` dataset: data from an experiment on the pharmacokinetics of theophylline. We will demonstrate the desired effects of _y_-stratification while also respecting a grouping constraint. ## The Data First, let's look at the data. ```{r data} # panel data for concentration in multiple subjects d <- datasets::Theoph head(d) summary(d) ``` We have twelve subjects, who each received a dose of the anti-asthma drug theophylline. The theophylline concentration in the patients' blood was then measured at eleven points during the next 25 hours. Most of the patients got about the same dose, although the dose information reported in the dataset is normalized by weight. ## Partitioning the Data for Modeling Suppose we wanted to fit a model to analyze how a patient's weight affects how theophylline is metabolized, and validate that model with three-fold cross-validation. It would be important that all readings from a given patient stay in the same fold. We might also want the population in each fold to have similar distributions of theophylline concentrations curves. Recall that the goal of _y_-stratification is to insure that all samples from the data have as close to identical _y_ distributions as possible. This becomes more difficult when we also have to obey a grouping constraint. Let's look at three ways of splitting the data into folds. First, we will split the data arbitrarily into three groups, using the modulo of the Subject id to do the splitting. ```{r} # a somewhat arbitrary split of patients subnum = as.numeric(as.character(d$Subject)) d$modSplit = as.factor(subnum %% 3) ``` We can verify that this split preserves groups, by looking at the table of subject observations in each fold. Each subject should only appear in a single fold. ```{r} print(table(Subject=d$Subject, groupid=d$modSplit)) ``` Now let's try the standard _y_ stratification in `vtreat`. ```{r} # stratify by outcome only # forces concentration to be equivalent pStrat <- kWayStratifiedY(nrow(d),3,d,d$conc) attr(pStrat, "splitmethod") d$stratSplit <- vtreat::getSplitPlanAppLabels(nrow(d),pStrat) print(table(Subject=d$Subject, groupid=d$stratSplit)) ``` We can see this partition didn't preserve the `Subject` grouping. Finally, we can try `vtreat`'s group-preserving split, which also tries to _y_-stratify as much as possible (by stratifying on the mean *y* observation from each group). ```{r} # stratify by patient and outcome # allows concentration to vary amoung individual patients splitter <- makekWayCrossValidationGroupedByColumn('Subject') split <- splitter(nrow(d),3,d,d$conc) attr(split, "splitmethod") d$subjectSplit <- vtreat::getSplitPlanAppLabels(nrow(d),split) print(table(Subject=d$Subject, groupid=d$subjectSplit)) ``` This is again a subject-preserving partition. We can compare the mean theophylline concentration and the average pharmacokinetic profile for each fold, for both of the subject-preserving partitions. We see that the stratification reduces some of the variation between folds. ### Arbitrary Partition ```{r echo=FALSE} showGroupingBehavior("modSplit", "Arbitrary grouping") ``` ### Group-preserving, _y_-stratified Partition ```{r echo=FALSE} showGroupingBehavior("subjectSplit", "Group by patient, stratify on y") ```
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatGrouping.Rmd
## ----------------------------------------------------------------------------- set.seed(22626) d <- data.frame(x=sample(paste('level',1:1000,sep=''),2000,replace=TRUE)) # independent variable. d$y <- runif(nrow(d))>0.5 # the quantity to be predicted, notice: independent of variables. d$rgroup <- round(100*runif(nrow(d))) # the random group used for splitting the data set, not a variable. ## ----------------------------------------------------------------------------- dTrain <- d[d$rgroup<=80,,drop=FALSE] dTest <- d[d$rgroup>80,,drop=FALSE] library('vtreat') treatments <- vtreat::designTreatmentsC(dTrain,'x','y',TRUE, rareCount=0 # Note: usually want rareCount>0, setting to zero to illustrate problem ) dTrainTreated <- vtreat::prepare(treatments,dTrain, pruneSig=c() # Note: usually want pruneSig to be a small fraction, setting to null to illustrate problem ) m1 <- glm(y~x_catB,data=dTrainTreated,family=binomial(link='logit')) print(summary(m1)) # notice low residual deviance dTrain$predM1 <- predict(m1,newdata=dTrainTreated,type='response') # devtools::install_github("WinVector/WVPlots") # library('WVPlots') plotRes <- function(d,predName,yName,title) { print(title) tab <- table(truth=d[[yName]],pred=d[[predName]]>0.5) print(tab) diag <- sum(vapply(seq_len(min(dim(tab))), function(i) tab[i,i],numeric(1))) acc <- diag/sum(tab) # if(requireNamespace("WVPlots",quietly=TRUE)) { # print(WVPlots::ROCPlot(d,predName,yName,title)) # } print(paste('accuracy',acc)) } # evaluate model on training plotRes(dTrain,'predM1','y','model1 on train') # evaluate model on test dTestTreated <- vtreat::prepare(treatments,dTest,pruneSig=c()) dTest$predM1 <- predict(m1,newdata=dTestTreated,type='response') plotRes(dTest,'predM1','y','model1 on test') ## ----------------------------------------------------------------------------- print(treatments$scoreFrame) ## ----------------------------------------------------------------------------- dCode <- d[d$rgroup<=20,,drop=FALSE] dTrain <- d[(d$rgroup>20) & (d$rgroup<=80),,drop=FALSE] treatments <- vtreat::designTreatmentsC(dCode,'x','y',TRUE, rareCount=0, # Note set this to something larger, like 5 rareSig=c() # Note set this to something like 0.3 ) dTrainTreated <- vtreat::prepare(treatments,dTrain, pruneSig=c() # Note: set this to filter, like 0.05 or 1/nvars ) m2 <- glm(y~x_catB,data=dTrainTreated,family=binomial(link='logit')) print(summary(m2)) # notice high residual deviance dTrain$predM2 <- predict(m2,newdata=dTrainTreated,type='response') plotRes(dTrain,'predM2','y','model2 on train') # We do not advise creating dCodeTreated for any purpose other than # diagnostic plotting. You should not use the treated coding data # for anything (as that would undo the benefit of having a separate # coding data subset). dCodeTreated <- vtreat::prepare(treatments,dCode,pruneSig=c()) dCode$predM2 <- predict(m2,newdata=dCodeTreated,type='response') plotRes(dCode,'predM2','y','model2 on coding set') dTestTreated <- vtreat::prepare(treatments,dTest,pruneSig=c()) dTest$predM2 <- predict(m2,newdata=dTestTreated,type='response') plotRes(dTest,'predM2','y','model2 on test set') ## ----------------------------------------------------------------------------- dTrain <- d[d$rgroup<=80,,drop=FALSE] xdat <- vtreat::mkCrossFrameCExperiment(dTrain,'x','y',TRUE, rareCount=0, # Note set this to something larger, like 5 rareSig=c()) treatments <- xdat$treatments print(treatments$scoreFrame) dTrainTreated <- xdat$crossFrame m3 <- glm(y~x_catB,data=dTrainTreated,family=binomial(link='logit')) print(summary(m3)) # notice high residual deviance dTrainTreated$predM3 <- predict(m3,newdata=dTrainTreated,type='response') plotRes(dTrainTreated,'predM3','y','model3 on train') dTestTreated <- vtreat::prepare(treatments,dTest,pruneSig=c()) dTest$predM3 <- predict(m3,newdata=dTestTreated,type='response') plotRes(dTest,'predM3','y','model3 on test set')
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatOverfit.R
--- title: "vtreat overfit" author: "John Mount, Nina Zumel" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{vtreat overfit} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- Example showing safe "best practice" use of the ['vtreat'](https://cran.r-project.org/package=vtreat) variable preparation library. For more on `vtreat` see [here](https://github.com/WinVector/vtreat). Below we generate an example data frame with no relation between x and y. We are using a synthetic data set so we know what the "right answer is" (no signal). False fitting on no-signal variables is bad for several reasons: * It creates undesirable biases in variable quality estimates and in subsequent models. * It "hides degrees of freedom" from subsequent models. * It creates the false impression you have a good result (which you may fail to falsify). * Complex bad variables can starve out simple weak good variables. This example shows things we don't want to happen, and then the additional precautions that help prevent them. ```{r} set.seed(22626) d <- data.frame(x=sample(paste('level',1:1000,sep=''),2000,replace=TRUE)) # independent variable. d$y <- runif(nrow(d))>0.5 # the quantity to be predicted, notice: independent of variables. d$rgroup <- round(100*runif(nrow(d))) # the random group used for splitting the data set, not a variable. ``` ## Bad Practice: Using the same data to treat and to train Using the same set of data to prepare the variable encoding and train the model can lead to the false belief (derived from the training set) that the model fit well. This is largely due to the treated variable appearing to consume only one degree of freedom, when it in fact consumes many more. In many cases a reasonable setting of `pruneSig` (say 0.01) will help against a noise variable being considered desirable, but selected variables may still be mis-used by downstream modeling. ```{r} dTrain <- d[d$rgroup<=80,,drop=FALSE] dTest <- d[d$rgroup>80,,drop=FALSE] library('vtreat') treatments <- vtreat::designTreatmentsC(dTrain,'x','y',TRUE, rareCount=0 # Note: usually want rareCount>0, setting to zero to illustrate problem ) dTrainTreated <- vtreat::prepare(treatments,dTrain, pruneSig=c() # Note: usually want pruneSig to be a small fraction, setting to null to illustrate problem ) m1 <- glm(y~x_catB,data=dTrainTreated,family=binomial(link='logit')) print(summary(m1)) # notice low residual deviance dTrain$predM1 <- predict(m1,newdata=dTrainTreated,type='response') # devtools::install_github("WinVector/WVPlots") # library('WVPlots') plotRes <- function(d,predName,yName,title) { print(title) tab <- table(truth=d[[yName]],pred=d[[predName]]>0.5) print(tab) diag <- sum(vapply(seq_len(min(dim(tab))), function(i) tab[i,i],numeric(1))) acc <- diag/sum(tab) # if(requireNamespace("WVPlots",quietly=TRUE)) { # print(WVPlots::ROCPlot(d,predName,yName,title)) # } print(paste('accuracy',acc)) } # evaluate model on training plotRes(dTrain,'predM1','y','model1 on train') # evaluate model on test dTestTreated <- vtreat::prepare(treatments,dTest,pruneSig=c()) dTest$predM1 <- predict(m1,newdata=dTestTreated,type='response') plotRes(dTest,'predM1','y','model1 on test') ``` The above is bad: we saw a "significant" model fit on training data (even though there is no relation to be found). This means the treated training data can be confusing to machine learning techniques and to the analyst. The issue is that the training data is no longer exchangeable with the test data because the training data was used to build the variable encodings. One way to avoid this is to not use the training data for variable encoding construction, but instead use a third set for this task. ### What went wrong? Notice that vtreat did not think there was any usable signal, and did not want us to use the variables: the values in `treatments$scoreFrame$sig` are all much larger than a nominally acceptable significance level like 0.05. The variables stayed in our model because we did not prune them (_ie_ we set `pruneSig=c()`). Also notice we set `rareCount=0`, which allows the use of very rare levels (which help drive the problem). ```{r} print(treatments$scoreFrame) ``` Subsequently, the down-stream machine learning (in this case a standard logistic regression) used the variable incorrectly. The modeling algorithm gave the variable a non-negligible coefficient (around 3) that it thought was reliably bounded away from zero; it also believed that the resulting model almost halved deviance (when in fact it explained nothing). So any variables that do get through may have distributional issues (and misleadingly low apparent degrees of freedom). **Rare levels of a categorical variable** The biggest contributors to this distributional issue tend to be rare levels of categorical variables. Since the individual levels are rare we have unreliable estimates for their effects, and if there are very many of them we may see quite a large effect in aggregate. To help combat this we have a control called `rareLevels`. Any level that is observed no more than `rareLevels` times during training is re-mapped to a new special level called _rare_ and not allowed to directly contribute (i.e. can not generate unique indicator columns, and doesn't have a direct effect on `catB` or `catN` encodings). If all the rare levels have a distinct behavior after grouping, the _rare_ level can capture that. **Impact-coding of categorical variables with many levels** Another undesirable effect is over-estimating significance of derived variable fit for `catB` and `catN` impact-coded variables. To fight this vtreat attempts to estimate out of sample or cross-validated effect significances (when it has enough data). With enough data, setting the `pruneSig` parameter during `prepare()` will help remove noise variables. One can set `pruneSig` to something like _1/number-of-columns_ to ensure that with high probability only an constant number of truly useless variables make it to later modeling. However, the significance of a given effect size for variables that actually have some signal (i.e. non-noise variables) can still be sensitive to in/out sample scoring and the hiding of degrees of freedom that occurs when a large categorical variable (that represents a large number of degrees of freedom) is re-coded as an impact or effect (which appears to have only a single degree of freedom). We next show how to avoid these undesirable illusory effects: better practice in partitioning and using training data. We are doing more with our data (essentially chaining models), so we have to take a bit more care with our data. ## Correct Practice 1/2: Use different data to treat and train Below is part of our suggested work pattern: coding/train/test split. ```{r} dCode <- d[d$rgroup<=20,,drop=FALSE] dTrain <- d[(d$rgroup>20) & (d$rgroup<=80),,drop=FALSE] treatments <- vtreat::designTreatmentsC(dCode,'x','y',TRUE, rareCount=0, # Note set this to something larger, like 5 rareSig=c() # Note set this to something like 0.3 ) dTrainTreated <- vtreat::prepare(treatments,dTrain, pruneSig=c() # Note: set this to filter, like 0.05 or 1/nvars ) m2 <- glm(y~x_catB,data=dTrainTreated,family=binomial(link='logit')) print(summary(m2)) # notice high residual deviance dTrain$predM2 <- predict(m2,newdata=dTrainTreated,type='response') plotRes(dTrain,'predM2','y','model2 on train') # We do not advise creating dCodeTreated for any purpose other than # diagnostic plotting. You should not use the treated coding data # for anything (as that would undo the benefit of having a separate # coding data subset). dCodeTreated <- vtreat::prepare(treatments,dCode,pruneSig=c()) dCode$predM2 <- predict(m2,newdata=dCodeTreated,type='response') plotRes(dCode,'predM2','y','model2 on coding set') dTestTreated <- vtreat::prepare(treatments,dTest,pruneSig=c()) dTest$predM2 <- predict(m2,newdata=dTestTreated,type='response') plotRes(dTest,'predM2','y','model2 on test set') ``` In the above example we saw training and test performance are similar -- and equally poor, as they should be since there is no signal. Though it didn't happen in this case, note the coding set can (falsely) show high performance. This is the bad behavior we wanted to isolate out of the training set. Remember, the goal isn't good performance on training- it is good performance on future data (simulated by test). So doing well on training and bad on test is worse than doing bad on both test and training. There are, of course, other methods to avoid the bias introduced in using the same data to both treat/encode the variables and to train the model. vtreat incorporates a number of these methods, including smoothing (controlled through `smFactor`) and pruning of rare levels (controlled through `rareSig`). ## Correct Practice 2/2: Use simulated out of sample methods (cross methods) Another effective technique: cross-constructed training frames can also be accessed by using `mkCrossFrameCExperiment` or `mkCrossFrameNExperiment`, which we demonstrate here. ```{r} dTrain <- d[d$rgroup<=80,,drop=FALSE] xdat <- vtreat::mkCrossFrameCExperiment(dTrain,'x','y',TRUE, rareCount=0, # Note set this to something larger, like 5 rareSig=c()) treatments <- xdat$treatments print(treatments$scoreFrame) dTrainTreated <- xdat$crossFrame m3 <- glm(y~x_catB,data=dTrainTreated,family=binomial(link='logit')) print(summary(m3)) # notice high residual deviance dTrainTreated$predM3 <- predict(m3,newdata=dTrainTreated,type='response') plotRes(dTrainTreated,'predM3','y','model3 on train') dTestTreated <- vtreat::prepare(treatments,dTest,pruneSig=c()) dTest$predM3 <- predict(m3,newdata=dTestTreated,type='response') plotRes(dTest,'predM3','y','model3 on test set') ``` Notice the glm significance is off, but the model quality is similar on train and test, and the scoreFrame significance is a correct indication.
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatOverfit.Rmd
## ----------------------------------------------------------------------------- library('vtreat') set.seed(2325) populationFrame <- data.frame( popsize = round(rlnorm(100,meanlog=log(4000),sdlog=1)), stringsAsFactors = FALSE) populationFrame$code <- paste0('z',formatC(sample.int(100000, size=nrow(populationFrame), replace=FALSE),width=5,flag='0')) rareCodes <- populationFrame$code[populationFrame$popsize<1000] # Draw individuals from code-regions proportional to size of code region # (or uniformly over all individuals labeled by code region). # Also add the outcome which has altered conditional probability for rareCodes. drawIndividualsAndReturnCodes <- function(n) { ords <- sort(sample.int(sum(populationFrame$popsize),size=n,replace=TRUE)) cs <- cumsum(populationFrame$popsize) indexes <- findInterval(ords,cs)+1 indexes <- indexes[sample.int(n,size=n,replace=FALSE)] samp <- data.frame(code=populationFrame$code[indexes], stringsAsFactors = FALSE) samp$inClass <- runif(n) < ifelse(samp$code %in% rareCodes,0.3,0.01) samp } ## ----------------------------------------------------------------------------- testSet <- drawIndividualsAndReturnCodes(2000) table(generatedAsRare=testSet$code %in% rareCodes,inClass=testSet$inClass) ## ----------------------------------------------------------------------------- designSet <- drawIndividualsAndReturnCodes(2000) treatments <- vtreat::designTreatmentsC(designSet,'code','inClass',TRUE, rareCount=5,rareSig=NULL, verbose=FALSE) treatments$scoreFrame[,c('varName','sig'),drop=FALSE] ## ----------------------------------------------------------------------------- designSetTreated <- vtreat::prepare(treatments,designSet,pruneSig=0.5) designSetTreated$code <- designSet$code summary(as.numeric(table(designSetTreated$code[designSetTreated$code_lev_rare==1]))) summary(as.numeric(table(designSetTreated$code[designSetTreated$code_lev_rare!=1]))) ## ---- fig.width=6------------------------------------------------------------- testSetTreated <- vtreat::prepare(treatments,testSet,pruneSig=0.5) testSetTreated$code <- testSet$code testSetTreated$newCode <- !(testSetTreated$code %in% unique(designSet$code)) testSetTreated$generatedAsRareCode <- testSetTreated$code %in% rareCodes # Show code_lev_rare==1 corresponds to a subset of rows with elevated inClass==TRUE rate. table(code_lev_rare=testSetTreated$code_lev_rare, inClass=testSetTreated$inClass) # Show newCodes get coded with code_level_rare==1. table(newCode=testSetTreated$newCode,code_lev_rare=testSetTreated$code_lev_rare) # Show newCodes tend to come from defined rareCodes. table(newCode=testSetTreated$newCode, generatedAsRare=testSetTreated$generatedAsRareCode) ## ---- fig.width=6------------------------------------------------------------- # Show code_catP's behavior on rare and novel levels. summary(testSetTreated$code_catP) summary(testSetTreated$code_catP[testSetTreated$code_lev_rare==1]) summary(testSetTreated$code_catP[testSetTreated$newCode]) summary(testSetTreated$code_catP[testSetTreated$generatedAsRareCode])
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatRareLevels.R
--- title: "vtreat Rare Levels" author: "John Mount" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{vtreat Rare Levels} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- For some categorical variables rarity can reflect structural features. For instance with United States Zip codes rare zip codes often represent low population density regions. When this is the case it can make sense to pool the rare levels into a new re-coded level called ``rare.'' If this new level is statistically significant it can be a usable modeling feature. This sort of pooling is only potentially useful if below a given training count behave similarly. This capability was more of an experimental demonstration of possible extensions of `vtreat` to have more inference capabilities about rare level than a commonly useful feature. Most of this power has since been captured in the more useful `catP` feature (also demonstrated here). Even more power is found in using an interaction of `catN` or `catB` with `catP`. An example of the rare level feature using `vtreat` is given below. First we set up some data by defining a set of population centers (`populationFrame`) and code to observe individuals (with replacement) uniformly from the combined population with a rare condition (`inClass`) that has elevated occurrence in observations coming from the small population centers (`rareCodes`). ```{r} library('vtreat') set.seed(2325) populationFrame <- data.frame( popsize = round(rlnorm(100,meanlog=log(4000),sdlog=1)), stringsAsFactors = FALSE) populationFrame$code <- paste0('z',formatC(sample.int(100000, size=nrow(populationFrame), replace=FALSE),width=5,flag='0')) rareCodes <- populationFrame$code[populationFrame$popsize<1000] # Draw individuals from code-regions proportional to size of code region # (or uniformly over all individuals labeled by code region). # Also add the outcome which has altered conditional probability for rareCodes. drawIndividualsAndReturnCodes <- function(n) { ords <- sort(sample.int(sum(populationFrame$popsize),size=n,replace=TRUE)) cs <- cumsum(populationFrame$popsize) indexes <- findInterval(ords,cs)+1 indexes <- indexes[sample.int(n,size=n,replace=FALSE)] samp <- data.frame(code=populationFrame$code[indexes], stringsAsFactors = FALSE) samp$inClass <- runif(n) < ifelse(samp$code %in% rareCodes,0.3,0.01) samp } ``` We then draw a sample we want to make some observations on. ```{r} testSet <- drawIndividualsAndReturnCodes(2000) table(generatedAsRare=testSet$code %in% rareCodes,inClass=testSet$inClass) ``` Notice that in the sample we can observe the elevated rate of `inClass==TRUE` conditioned on coming from a `code` that is one of the `rareCodes`. We could try to learn this relation using `vtreat`. To do this we set up another sample (`designSet`) to work on, so we are not inferring from `testSet` (where we will evaluate results). ```{r} designSet <- drawIndividualsAndReturnCodes(2000) treatments <- vtreat::designTreatmentsC(designSet,'code','inClass',TRUE, rareCount=5,rareSig=NULL, verbose=FALSE) treatments$scoreFrame[,c('varName','sig'),drop=FALSE] ``` We see in `treatments$scoreFrame` we have a level called `code_lev_rare`, which is where a number of rare levels are re-coding. We can also confirm levels that occur `rareCount` or fewer times are eligible to code to to `code_lev_rare`. ```{r} designSetTreated <- vtreat::prepare(treatments,designSet,pruneSig=0.5) designSetTreated$code <- designSet$code summary(as.numeric(table(designSetTreated$code[designSetTreated$code_lev_rare==1]))) summary(as.numeric(table(designSetTreated$code[designSetTreated$code_lev_rare!=1]))) ``` We can now apply this treatment to `testSet` to see how this inferred rare level performs. Notice also the `code_catP` which directly encodes prevalence or frequency of the level during training also gives usable estimate of size (likely a more useful one then the rare-level code itself). As we can see below the `code_lev_rare` correlates with the condition, and usefully re-codes novel levels (levels in `testSet` that were not seen in `designSet`) to rare. ```{r, fig.width=6} testSetTreated <- vtreat::prepare(treatments,testSet,pruneSig=0.5) testSetTreated$code <- testSet$code testSetTreated$newCode <- !(testSetTreated$code %in% unique(designSet$code)) testSetTreated$generatedAsRareCode <- testSetTreated$code %in% rareCodes # Show code_lev_rare==1 corresponds to a subset of rows with elevated inClass==TRUE rate. table(code_lev_rare=testSetTreated$code_lev_rare, inClass=testSetTreated$inClass) # Show newCodes get coded with code_level_rare==1. table(newCode=testSetTreated$newCode,code_lev_rare=testSetTreated$code_lev_rare) # Show newCodes tend to come from defined rareCodes. table(newCode=testSetTreated$newCode, generatedAsRare=testSetTreated$generatedAsRareCode) ``` ```{r, fig.width=6} # Show code_catP's behavior on rare and novel levels. summary(testSetTreated$code_catP) summary(testSetTreated$code_catP[testSetTreated$code_lev_rare==1]) summary(testSetTreated$code_catP[testSetTreated$newCode]) summary(testSetTreated$code_catP[testSetTreated$generatedAsRareCode]) ```
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatRareLevels.Rmd
## ----exampledata-------------------------------------------------------------- library('vtreat') dTrainC <- data.frame(x=c('a','a','a','b','b',NA), y=c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE)) treatmentsC <- designTreatmentsC(dTrainC,colnames(dTrainC),'y',TRUE, catScaling=FALSE, verbose=FALSE) dTrainCTreatedUnscaled <- prepare(treatmentsC,dTrainC,pruneSig=c(),scale=FALSE) dTrainCTreatedScaled <- prepare(treatmentsC,dTrainC,pruneSig=c(),scale=TRUE) ## ----printorig---------------------------------------------------------------- print(dTrainC) ## ----printunscaled------------------------------------------------------------ print(dTrainCTreatedUnscaled) ## ----printscaled-------------------------------------------------------------- print(dTrainCTreatedScaled) ## ----check-------------------------------------------------------------------- slopeFrame <- data.frame(varName = treatmentsC$scoreFrame$varName, stringsAsFactors = FALSE) slopeFrame$mean <- vapply(dTrainCTreatedScaled[, slopeFrame$varName, drop = FALSE], mean, numeric(1)) slopeFrame$slope <- vapply(slopeFrame$varName, function(c) { lm(paste('y', c, sep = '~'), data = dTrainCTreatedScaled)$coefficients[[2]] }, numeric(1)) slopeFrame$sig <- vapply(slopeFrame$varName, function(c) { treatmentsC$scoreFrame[treatmentsC$scoreFrame$varName == c, 'sig'] }, numeric(1)) slopeFrame$badSlope <- ifelse(is.na(slopeFrame$slope), TRUE, abs(slopeFrame$slope - 1) > 1.e-8) print(slopeFrame) ## ----catscale----------------------------------------------------------------- treatmentsC2 <- designTreatmentsC(dTrainC,colnames(dTrainC),'y',TRUE, catScaling=TRUE, verbose=FALSE) dTrainCTreatedScaled2 <- prepare(treatmentsC2,dTrainC,pruneSig=c(),scale=TRUE) print(dTrainCTreatedScaled2) ## ----checks------------------------------------------------------------------- colMeans(dTrainCTreatedScaled2) lm(y~x_lev_NA,data=dTrainCTreatedScaled) lm(y~x_lev_NA,data=dTrainCTreatedScaled2) ## ----------------------------------------------------------------------------- vapply(slopeFrame$varName, function(c) { glm(paste('y', c, sep = '~'),family=binomial, data = dTrainCTreatedScaled2)$coefficients[[2]] }, numeric(1)) ## ----------------------------------------------------------------------------- set.seed(235235) dTrainN <- data.frame(x1=rnorm(100), x2=rnorm(100), x3=rnorm(100), stringsAsFactors=FALSE) dTrainN$y <- 1000*(dTrainN$x1 + dTrainN$x2) cEraw <- vtreat::mkCrossFrameNExperiment(dTrainN, c('x1','x2','x3'),'y', scale=TRUE) newvars <- cEraw$treatments$scoreFrame$varName print(newvars) dM1 <- as.matrix(cEraw$crossFrame[, newvars]) pCraw <- stats::prcomp(dM1, scale.=FALSE,center=TRUE) print(pCraw) dTrainN$yScaled <- scale(dTrainN$y,center=TRUE,scale=TRUE) cEscaled <- vtreat::mkCrossFrameNExperiment(dTrainN, c('x1','x2','x3'),'yScaled', scale=TRUE) newvars_s <- cEscaled$treatments$scoreFrame$varName print(newvars_s) dM2 <- as.matrix(cEscaled$crossFrame[, newvars_s]) pCscaled <- stats::prcomp(dM2, scale.=FALSE,center=TRUE) print(pCscaled)
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatScaleMode.R
--- title: "vtreat scale mode" author: "Win-Vector LLC" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{vtreat scale mode} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <code>vtreat::prepare(scale=TRUE)</code> is a variation of <code>vtreat::prepare()</code> intended to prepare data frames so all the derived input or independent (`x`) variables are fully in outcome or dependent variable (`y`) units. This is in the sense of a linear regression for numeric `y`'s (`vtreat::designTreatmentsN` and `vtreat::mkCrossFrameNExperiment`). For classification problems (or categorical `y`'s) as of version `0.5.26` and newer (available [here](https://github.com/WinVector/vtreat)) scaling is established through a a logistic regression ["in link units"](https://github.com/WinVector/Examples/blob/master/PCR/YAwarePCAclassification.md) or as 0/1 indicators depending on the setting of the `catScaling` argument in `vtreat::designTreatmentsC` or `vtreat::mkCrossFrameNExperiment`. Prior to this version classification the scaling calculation (and only the scaling calculation) was always handled as a linear regression against a 0/1 `y`-indicator. `catScaling=FALSE` can be a bit faster as the underlying regression can be a bit quicker than a logistic regression. This is the appropriate preparation before a geometry/metric sensitive modeling step such as principal components analysis or clustering (such as k-means clustering). Normally (with <code>vtreat::prepare(scale=FALSE)</code>) vtreat passes through a number of variables with minimal alteration (cleaned numeric), builds 0/1 indicator variables for various conditions (categorical levels, presence of NAs, and so on), and builds some "in y-units" variables (catN, catB) that are in fact sub-models. With <code>vtreat::prepare(scale=TRUE)</code> all of these numeric variables are then re-processed to have mean zero, and slope 1 (when possible) when appropriately regressed against the y-variable. This is easiest to illustrate with a concrete example. ```{r exampledata} library('vtreat') dTrainC <- data.frame(x=c('a','a','a','b','b',NA), y=c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE)) treatmentsC <- designTreatmentsC(dTrainC,colnames(dTrainC),'y',TRUE, catScaling=FALSE, verbose=FALSE) dTrainCTreatedUnscaled <- prepare(treatmentsC,dTrainC,pruneSig=c(),scale=FALSE) dTrainCTreatedScaled <- prepare(treatmentsC,dTrainC,pruneSig=c(),scale=TRUE) ``` Note we have set `catScaling=FALSE` to ask that we treat `y` as a 0/1 indicator and scale using linear regression. The standard vtreat treated frame converts the original data from this: ```{r printorig} print(dTrainC) ``` into this: ```{r printunscaled} print(dTrainCTreatedUnscaled) ``` This is the "standard way" to run vtreat -- with the exception that for this example we set <code>pruneSig</code> to <code>NULL</code> to suppress variable pruning, instead of setting it to a value in the interval <code>(0,1)</code>. The principle is: vtreat inflicts the minimal possible alterations on the data, leaving as much as possible to the downstream machine learning code. This does turn out to already be a lot of alteration. Mostly vtreat is taking only steps that are unsafe to leave for later: re-encoding of large categoricals, re-coding of aberrant values, and bulk pruning of variables. However some procedures, in particular principal components analysis or geometric clustering, assume all of the columns have been fully transformed. The usual assumption ("more honored in the breach than the observance") is that the columns are centered (mean zero) and scaled. The non y-aware meaning of "scaled" is unit variance. However, vtreat is designed to emphasize y-aware processing and we feel the y-aware sense of scaling should be: unit slope when regressed against y. If you want standard scaling you can use the standard frame produced by vtreat and scale it yourself. If you want vtreat style y-aware scaling you (which we strongly think is the right thing to do) you can use <code>vtreat::prepare(scale=TRUE)</code> which produces a frame that looks like the following: ```{r printscaled} print(dTrainCTreatedScaled) ``` First we can check the claims. Are the variables mean-zero and slope 1 when regressed against y? ```{r check} slopeFrame <- data.frame(varName = treatmentsC$scoreFrame$varName, stringsAsFactors = FALSE) slopeFrame$mean <- vapply(dTrainCTreatedScaled[, slopeFrame$varName, drop = FALSE], mean, numeric(1)) slopeFrame$slope <- vapply(slopeFrame$varName, function(c) { lm(paste('y', c, sep = '~'), data = dTrainCTreatedScaled)$coefficients[[2]] }, numeric(1)) slopeFrame$sig <- vapply(slopeFrame$varName, function(c) { treatmentsC$scoreFrame[treatmentsC$scoreFrame$varName == c, 'sig'] }, numeric(1)) slopeFrame$badSlope <- ifelse(is.na(slopeFrame$slope), TRUE, abs(slopeFrame$slope - 1) > 1.e-8) print(slopeFrame) ``` The above claims are true with the exception of the derived variable <code>x_lev_x.b</code>. This is because the outcome variable <code>y</code> has identical distribution when the original variable <code>x=='b'</code> and when <code>x!='b'</code> (on half the time in both cases). This means <code>y</code> is perfectly independent of <code>x=='b'</code> and the regression slope must be zero (thus, cannot be 1). vtreat now treats this as needing to scale by a multiplicative factor of zero. Note also that the significance level associated with <code>x_lev_x.b</code> is large, making this variable easy to prune. The <code>varMoves</code> and significance facts in <code>treatmentsC\$scoreFrame</code> are about the un-scaled frame (where <code>x_lev_x.b</code> does in fact move). For a good discussion of the application of *y*-aware scaling to Principal Components Analysis please see [here](https://win-vector.com/2016/05/23/pcr_part2_yaware/). Previous versions of vtreat (0.5.22 and earlier) would copy variables that could not be sensibly scaled into the treated frame unaltered. This was considered the "most faithful" thing to do. However we now feel that this practice was not safe for many downstream procedures, such as principal components analysis and geometric clustering. ### Categorical outcome mode "catScaling=TRUE" As of version `0.5.26` `vtreat` also supports a "scaling mode for categorical outcomes." In this mode scaling is performed using the coefficient of a logistic regression fit on a categorical instead of the coefficient of a linear fit (with the outcome encoded as a zero/one indicator). The idea is with this mode on we are scaling as a logistic regression would- so we are in logistic regression "link space" (where logistic regression assume effects are additive). The mode may be well suited for principal components analysis or principal components regression where the target variable is a categorical (i.e. classification tasks). To ensure this effect we set the argument `catScaling=TRUE` in `vtreat::designTreatmentsC` or `vtreat::mkCrossFrameCExperiment`. WE demonstrate this below. ```{r catscale} treatmentsC2 <- designTreatmentsC(dTrainC,colnames(dTrainC),'y',TRUE, catScaling=TRUE, verbose=FALSE) dTrainCTreatedScaled2 <- prepare(treatmentsC2,dTrainC,pruneSig=c(),scale=TRUE) print(dTrainCTreatedScaled2) ``` Notice the new scaled frame is in a different scale than the original scaled frame. It likely is a function of the problem domain which scaling is more appropriate or useful. The new scaled columns are again mean-0 (so they are not exactly the logistic link values, which may not have been so shifted). The new scaled columns do not necessarily have linear model slope 1 as the original scaled columns did as we see below: ```{r checks} colMeans(dTrainCTreatedScaled2) lm(y~x_lev_NA,data=dTrainCTreatedScaled) lm(y~x_lev_NA,data=dTrainCTreatedScaled2) ``` The new scaled columns, however are in good logistic link units. ```{r} vapply(slopeFrame$varName, function(c) { glm(paste('y', c, sep = '~'),family=binomial, data = dTrainCTreatedScaled2)$coefficients[[2]] }, numeric(1)) ``` ### PCA/PCR The intended applications of scale mode include preparing data for metric sensitive applications such as KNN classification/regression and Principal Components Analysis/Regression. Please see [here](https://github.com/WinVector/Examples/tree/master/PCR) for an article series describing such applications. Overall the advice is to first use the following pattern: * Significance prune incoming variables. * Use *y*-aware scaling. * Significance prune resulting latent variables. However, practitioners experienced in principal components analysis may uncomfortable with the range of eigenvalues or singular values returned by *y*-aware analysis. If a more familiar scale is desired we suggest performing the *y*-aware scaling against an additional scaled and centered *y* to try to get ranges closer the traditional unit ranges. This can be achieved as shown below. ```{r} set.seed(235235) dTrainN <- data.frame(x1=rnorm(100), x2=rnorm(100), x3=rnorm(100), stringsAsFactors=FALSE) dTrainN$y <- 1000*(dTrainN$x1 + dTrainN$x2) cEraw <- vtreat::mkCrossFrameNExperiment(dTrainN, c('x1','x2','x3'),'y', scale=TRUE) newvars <- cEraw$treatments$scoreFrame$varName print(newvars) dM1 <- as.matrix(cEraw$crossFrame[, newvars]) pCraw <- stats::prcomp(dM1, scale.=FALSE,center=TRUE) print(pCraw) dTrainN$yScaled <- scale(dTrainN$y,center=TRUE,scale=TRUE) cEscaled <- vtreat::mkCrossFrameNExperiment(dTrainN, c('x1','x2','x3'),'yScaled', scale=TRUE) newvars_s <- cEscaled$treatments$scoreFrame$varName print(newvars_s) dM2 <- as.matrix(cEscaled$crossFrame[, newvars_s]) pCscaled <- stats::prcomp(dM2, scale.=FALSE,center=TRUE) print(pCscaled) ``` Notice the second application of `stats::prcomp` has more standard scaling of the reported standard deviations (though we still do not advise choosing latent variables based on mere comparisons to unit magnitude).
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatScaleMode.Rmd
## ----------------------------------------------------------------------------- signk <- function(n,k) { sigTab <- data.frame(y=c(rep(TRUE,n/2),rep(FALSE,n/2)),v=FALSE) sigTab[seq_len(k),'v'] <- TRUE vtreat::designTreatmentsC(sigTab,'v','y',TRUE,verbose=FALSE)$scoreFrame[1,'sig'] } sigTab <- data.frame(k=c(1,2,3,4,5,10,20,50,100)) # If you want to see a rare but perfect indicator of positive class # that's only on k times out of 1000, this is the lower bound on pruneSig sigTab$sigEst = vapply(sigTab$k,function(k) signk(1000,k),numeric(1)) sigTab$minusLogSig = -log(sigTab$sigEst) # we expect this to be approximately k print(sigTab) ## ----------------------------------------------------------------------------- set.seed(3346) n <- 1000 k <- 4 d <- data.frame(y=rbinom(n,size=1,prob=0.5)>0) d$catVarNoise <- rep(paste0('lev',sprintf("%03d",1:floor(n/k))),(k+1))[1:n] d$catVarPerfect <- paste0(d$catVar,substr(as.character(d$y),1,1)) d <- d[order(d$catVarPerfect),] head(d) treatmentsC <- vtreat::designTreatmentsC(d,c('catVarNoise','catVarPerfect'),'y',TRUE) # Estimate effect significance (not coefficient significance). estSigGLM <- function(xVar,yVar,numberOfHiddenDegrees=0) { d <- data.frame(x=xVar,y=yVar,stringsAsFactors = FALSE) model <- stats::glm(stats::as.formula('y~x'), data=d, family=stats::binomial(link='logit')) delta_deviance <- model$null.deviance - model$deviance delta_df <- model$df.null - model$df.residual + numberOfHiddenDegrees pRsq <- 1.0 - model$deviance/model$null.deviance sig <- stats::pchisq(delta_deviance, delta_df, lower.tail=FALSE) sig } prepD <- vtreat::prepare(treatmentsC,d,pruneSig=c()) ## ----scoreframe--------------------------------------------------------------- print(treatmentsC$scoreFrame[,c('varName','rsq','sig','extraModelDegrees')]) ## ----scoresignal-------------------------------------------------------------- summary(glm(y~d$catVarPerfect=='lev001T',data=d,family=binomial)) estSigGLM(prepD$catVarPerfect_catB,prepD$y,0) # wrong est estSigGLM(prepD$catVarPerfect_catB,prepD$y, numberOfHiddenDegrees=length(unique(d$catVarPerfect))-1) ## ----scorenoise--------------------------------------------------------------- summary(glm(y~d$catVarNoise=='lev001',data=d,family=binomial)) estSigGLM(prepD$catVarNoise_catB,prepD$y,0) # wrong est estSigGLM(prepD$catVarNoise_catB,prepD$y, numberOfHiddenDegrees=length(unique(d$catVarNoise))-1)
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatSignificance.R
--- title: "vtreat significance" author: "John Mount, Nina Zumel" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{vtreat significance} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- `vtreat::prepare` includes a required argument `pruneSig` that (if not NULL) is used to prune variables. Obviously significance depends on training set size (so is not an intrinsic property of just the variables) and there are issues of bias in the estimate (which vtreat attempts to eliminate by estimating significance of complex sub-model variables on cross-validated or out of sample data). As always there is a question of what to set a significance control to. Our advice is the following pragmatic: Use variable filtering on wide datasets (datasets with many columns or variables). Most machine learning algorithms can not defend themselves against large numbers of noise variables (including those algorithms that have cross-validation procedures built in). Examples are given [here](https://win-vector.com/2014/02/01/bad-bayes-an-example-of-why-you-need-hold-out-testing/). As an upper bound think of setting `pruneSig` below _1/numberOfColumns_. Setting `pruneSig` to _1/numberOfColumns_ means that (in expectation) only a constant number of pure noise variables (variables with no actual relation to the outcome we are trying to predict) should create columns. This means (under some assumptions, and in expectation) we expect only a bounded number of noisy columns to be exposed to downstream statistical and machine learning algorithms (which they can presumably handle). As a lower bound think of what sort of good variables get thrown out at a given setting of `pruneSig`. For example suppose our problem is categorization in a data set with _n/2_ positive examples and _n/2_ negative examples. Consider the observed significance of a rare indicator variable that is on _k_ times in training and is only on for positive instances. A random variable that is on _k_ times would achieve this purity with probability $2^{-k}$, so we expect it to have a _-log(significance)_ in the ballpark of _k_. So a `pruneSig` of $2^{-k}$ will filter all such variables out (be they good or bad). Thus if you want levels or indicators that are on only a _z_ fraction of the time on a training set of size _n_ you want `pruneSig` >> $2^{-z*n}$. Example: ```{r} signk <- function(n,k) { sigTab <- data.frame(y=c(rep(TRUE,n/2),rep(FALSE,n/2)),v=FALSE) sigTab[seq_len(k),'v'] <- TRUE vtreat::designTreatmentsC(sigTab,'v','y',TRUE,verbose=FALSE)$scoreFrame[1,'sig'] } sigTab <- data.frame(k=c(1,2,3,4,5,10,20,50,100)) # If you want to see a rare but perfect indicator of positive class # that's only on k times out of 1000, this is the lower bound on pruneSig sigTab$sigEst = vapply(sigTab$k,function(k) signk(1000,k),numeric(1)) sigTab$minusLogSig = -log(sigTab$sigEst) # we expect this to be approximately k print(sigTab) ``` For a data set with 100 variables (and 1000 rows), you might want to set `pruneSig` <= 0.01 to limit the number of pure noise variables that enter the model. Note that this value is smaller than the lower bounds given above for $k < 5$. This means that in a data set of this width and length, you may not be able to detect rare but perfect indicators that occur fewer than 5 times. You would have a chance of using such rare indicators in a _catN_ or _catB_ effects coded variable. Below we design a data frame with a perfect categorical variable (completely determines the outcome y) where each level occurs exactly 2 times. The individual levels are insignificant, but we can still extract a significant _catB_ effect coded variable. ```{r} set.seed(3346) n <- 1000 k <- 4 d <- data.frame(y=rbinom(n,size=1,prob=0.5)>0) d$catVarNoise <- rep(paste0('lev',sprintf("%03d",1:floor(n/k))),(k+1))[1:n] d$catVarPerfect <- paste0(d$catVar,substr(as.character(d$y),1,1)) d <- d[order(d$catVarPerfect),] head(d) treatmentsC <- vtreat::designTreatmentsC(d,c('catVarNoise','catVarPerfect'),'y',TRUE) # Estimate effect significance (not coefficient significance). estSigGLM <- function(xVar,yVar,numberOfHiddenDegrees=0) { d <- data.frame(x=xVar,y=yVar,stringsAsFactors = FALSE) model <- stats::glm(stats::as.formula('y~x'), data=d, family=stats::binomial(link='logit')) delta_deviance <- model$null.deviance - model$deviance delta_df <- model$df.null - model$df.residual + numberOfHiddenDegrees pRsq <- 1.0 - model$deviance/model$null.deviance sig <- stats::pchisq(delta_deviance, delta_df, lower.tail=FALSE) sig } prepD <- vtreat::prepare(treatmentsC,d,pruneSig=c()) ``` vtreat produces good variable significances using out of sample simulation (cross frames). ```{r scoreframe} print(treatmentsC$scoreFrame[,c('varName','rsq','sig','extraModelDegrees')]) ``` For categorical targets we have in the `scoreFrame` the `sig` column is the significance of the single variable logistic regression using the named variable (plus a constant term), and the `rsq` column is the "pseudo-r-squared" or portion of deviance explained (please see [here](https://win-vector.com/2011/09/14/the-simpler-derivation-of-logistic-regression/) for some notes). For numeric targets the `sig` column is the significance of the single variable linear regression using the named variable (plus a constant term), and the `rsq` column is the "r-squared" or portion of variance explained (please see [here](https://win-vector.com/2011/11/21/correlation-and-r-squared/)) for some notes). Signal carrying complex variables can score as significant, even those composed of rare levels. ```{r scoresignal} summary(glm(y~d$catVarPerfect=='lev001T',data=d,family=binomial)) estSigGLM(prepD$catVarPerfect_catB,prepD$y,0) # wrong est estSigGLM(prepD$catVarPerfect_catB,prepD$y, numberOfHiddenDegrees=length(unique(d$catVarPerfect))-1) ``` Noise variables (those without a relation to outcome) are also scored correctly as long was we account for the degrees of freedom. ```{r scorenoise} summary(glm(y~d$catVarNoise=='lev001',data=d,family=binomial)) estSigGLM(prepD$catVarNoise_catB,prepD$y,0) # wrong est estSigGLM(prepD$catVarNoise_catB,prepD$y, numberOfHiddenDegrees=length(unique(d$catVarNoise))-1) ```
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatSignificance.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) knitr::opts_chunk$set(fig.width = 7) ## ----------------------------------------------------------------------------- vtreat::kWayStratifiedY(3,2,NULL,NULL) ## ----------------------------------------------------------------------------- # This method is not a great idea as the data could have structure that strides # in the same pattern as this split. # Such technically is possible for any split, but we typically use # pseudo-random structure (that is not the same across many potential # split calls) to try and make it unlikely such structures # match often. modularSplit <- function(nRows,nSplits,dframe,y) { group <- seq_len(nRows) %% nSplits lapply(unique(group), function(gi) { list(train=which(group!=gi), app=which(group==gi)) }) } ## ----------------------------------------------------------------------------- vtreat::buildEvalSets(nRows=25,nSplits=3,splitFunction=modularSplit) ## ----------------------------------------------------------------------------- badSplit <- function(nRows,nSplits,dframe,y) { list(list(train=seq_len(nRows),app=seq_len(nRows))) } vtreat::buildEvalSets(nRows=5,nSplits=3,splitFunction=badSplit) ## ----warning=FALSE------------------------------------------------------------ library('vtreat') ## ----------------------------------------------------------------------------- set.seed(23255) d <- data.frame(y=sin(1:100)) # stratified 5-fold cross validation pStrat <- kWayStratifiedY(nrow(d),5,d,d$y) # check if the split is a good partition check = vtreat::problemAppPlan(nrow(d),5,pStrat,TRUE) if(is.null(check)) { print("Plan is good") } else { print(paste0("Problem with plan: ", check)) } d$stratGroup <- vtreat::getSplitPlanAppLabels(nrow(d),pStrat) # unstratified 5-fold cross validation pSimple <- kWayCrossValidation(nrow(d),5,d,d$y) # check if the split is a good partition; return null if so check = vtreat::problemAppPlan(nrow(d),5,pSimple,TRUE) if(is.null(check)) { print("Plan is good") } else { print(paste0("Problem with plan: ", check)) } d$simpleGroup <- vtreat::getSplitPlanAppLabels(nrow(d),pSimple) # mean(y) for each fold, unstratified tapply(d$y,d$simpleGroup,mean) # standard error of mean(y) sd(tapply(d$y,d$simpleGroup,mean)) # mean(y) for each fold, unstratified tapply(d$y,d$stratGroup,mean) # standard error of mean(y) sd(tapply(d$y,d$stratGroup,mean))
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatSplitting.R
--- title: "vtreat data splitting" author: "John Mount, Nina Zumel" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{vtreat data splitting} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) knitr::opts_chunk$set(fig.width = 7) ``` ## vtreat data set splitting ### Motivation [`vtreat`](https://github.com/WinVector/vtreat) supplies a number of data set splitting or cross-validation planning facilities. Some services are implicit such as the simulated out of sample scoring of high degree of freedom derived variables (such as `catB`, `catN`,`catD`, and `catP`; see [here](https://winvector.github.io/vtreathtml/vtreatVariableTypes.html) for a list of variable types). Some services are explicit such as `vtreat::mkCrossFrameCExperiment` and `vtreat::mkCrossFrameNExperiment` (please see [here](https://winvector.github.io/vtreathtml/vtreatCrossFrames.html)). And there is even a user-facing cross-validation planner in `vtreat::buildEvalSets` (try `help(buildEvalSets)` for details). We (Nina Zumel and John Mount) have written a lot on structured cross-validation; the most relevant article being [Random Test/Train Split is not Always Enough](https://win-vector.com/2015/01/05/random-testtrain-split-is-not-always-enough/). The point is that in retrospective studies random test/train split is *at best* a simulation of how a model will be applied in the future. It is not an actual experimental design as in a [randomized control trial](https://en.wikipedia.org/wiki/Randomized_controlled_trial). To be an effective simulation you must work to preserve structure that will be true in future application. The overall idea is: a better splitting plan helps build a model that actually performs better in practice. And porting such a splitting plan back to your evaluation procedures gives you a better estimate of this future model performance. A random test/train split attempts to preserve the following: * Future application data is exchangeable with training data (prior to model construction). * Future application data remains exchangeable with test data (even after model construction, as test data is not used in model construction). Note if there is a concept change (also called issues of non-stationarity) then future data is already not statistically exchangeable with training data (so can't preserve a property you never had). However even if your future data starts exchangeable with training data there is at least one (often) un-modeled difference between training data and future application data: * Future application data tends to be formed after (or in the future of) training data. This is usually an unstated structure of your problem solving plan: use annotated data from the past to build a supervised model for future un-annotated data. ### Examples With the above discussion under our belt we get back to the problem at hand. When creating an appropriate test/train split, we may have to consider one or more of the following: * **Stratification:** Stratification preserves the distribution or prevalence of the outcome variable (or any other variable, but vtreat only stratifies on _y_). For example, for a classification problem with a target class prevalence of 15%, stratifying on _y_ insures that both the training and test sets have target class prevalence of precisely 15% (or as close to that as is possible), not just "around" 15%, as would happen with a simple randomized test/train split. This is especially important for modeling rare events. * **Grouping:** By "grouping" we mean not splitting closely related events into test and train: if a set of rows constitutes a "group," then we want all those rows to go either into test or into train -- as a group. Typical examples are multiple events from a single customer (as you really want your model to predict behavior of new customers) or records close together in time (as latter application records will not be close in time to original training records). * **Structured back testing:** Structured back testing preserves the order of time ordered events. In finance it is considered ridiculous to use data from a Monday and a Wednesday to build a model for prices on the intervening Tuesday -- but this is the kind of thing that can happen if the training and evaluation data are partitioned using a simple random split. Our goal is for `vtreat` to be a domain agnostic, `y`-aware data conditioner. So `vtreat` should _y_-stratify its data splits throughout. Prior to version `0.5.26` `vtreat` used simple random splits. Now with version `0.5.26` (currently available from [Github](https://github.com/WinVector/vtreat)) `vtreat` defaults to stratified sampling throughout. Respecting things like locality of record grouping or ordering of time are domain issues and should be handled by the analyst. Any splitting or stratification plan requires domain knowledge and should represent domain sensitive trade-off between the competing goals of: * Having a random split. * Stability of distribution of outcome variable across splits. * Not cutting into "atomic" groups of records. * Not using data from the future to predict the past. * Having a lot of data in each split. * Having disjoint training and testing data. As of version `0.5.26` `vtreat` supports this by allowing a user specified data splitting function where the analyst can encode their desired domain invariants. The user-implemented splitting function should have the signature `function(nRows,nSplits,dframe,y)` where * `nRows` is the number of rows you are trying to split * `nSplits` is the number of split groups you want * `dframe` is the original data frame (which may contain grouping or order columns that you want), * `y` is the outcome variable converted to numeric The function should return a list of lists. The *i*th element should have slots `train` and `app`, where `[[i]]$train` designates the training data used to fit the model that evaluates the data designated by `[[i]]$app`. This is easiest to show through an example: ```{r} vtreat::kWayStratifiedY(3,2,NULL,NULL) ``` As we can see `vtreat::oneWayHoldout` builds three split sets where in each set the "application data rows" is a single row index and the corresponding training rows are the complementary row indexes. This is a leave-one-out [cross validation plan](https://en.wikipedia.org/wiki/Cross-validation_(statistics)). `vtreat` supplies a number of cross validation split/plan implementations: * `kWayStratifiedY`: k-way y-stratified cross-validation. This is the `vtreat` default splitting plan. * `makekWayCrossValidationGroupedByColumn`: k-way y-stratified cross-validation that preserves grouping (for example, all rows corresponding to a single customer or patient, etc). This is a complex splitting plan, and only recommended when absolutely needed. * `kWayCrossValidation`: k-way un-stratified cross-validation * `oneWayHoldout`: jackknife, or leave-one-out cross-validation. Note one way hold out can leak target expectations, so is not preferred for nested model situations. The function `buildEvalSets` takes one of the above splitting functions as input and returns a cross-validation plan that instantiates the desired splitting, while also guarding against corner cases. You can also explicitly specify the splitting plan when designing a vtreat variable treatment plan using `designTreatments[N\C]` or `mkCrossFrame[N\C]Experiment`. For issues beyond stratification the user may want to supply their own splitting plan. Such a function can then be passed into any `vtreat` operation that takes a `splitFunction` argument (such as `mkCrossFrameNExperiment`, `designTreatmentsN`, and many more). For example we can pass a user defined `splitFn` into `vtreat::buildEvalSets` as follows: For example to use a user supplied splitting function we would write the following function definition. ```{r} # This method is not a great idea as the data could have structure that strides # in the same pattern as this split. # Such technically is possible for any split, but we typically use # pseudo-random structure (that is not the same across many potential # split calls) to try and make it unlikely such structures # match often. modularSplit <- function(nRows,nSplits,dframe,y) { group <- seq_len(nRows) %% nSplits lapply(unique(group), function(gi) { list(train=which(group!=gi), app=which(group==gi)) }) } ``` This function can then be passed into any `vtreat` operation that takes a `splitFunction` argument (such as `mkCrossFrameNExperiment`, `designTreatmentsN`, and many more). For example we can pass the user defined `splitFn` into `vtreat::buildEvalSets` as follows: ```{r} vtreat::buildEvalSets(nRows=25,nSplits=3,splitFunction=modularSplit) ``` As stated above, the vtreat library code will try to use the user function for splitting, but will fall back to an appropriate vtreat function in corner cases that the user function may not handle (for example, too few rows, too few groups, and so on). Thus the user code can assume it is in a reasonable situation (and even safely return NULL if it can’t deal with the situation it is given). For example the following bad user split is detected and corrected: ```{r} badSplit <- function(nRows,nSplits,dframe,y) { list(list(train=seq_len(nRows),app=seq_len(nRows))) } vtreat::buildEvalSets(nRows=5,nSplits=3,splitFunction=badSplit) ``` Notice above the returned split does not meet all of the original desiderata, but is guaranteed to be a useful data partition. ### Implementations The file [outOfSample.R](https://github.com/WinVector/vtreat/blob/master/R/outOfSample.R) contains worked examples. In particular we would suggest running the code displayed when you type any of: * `help(kWayCrossValidation)` * `help(kWayStratifiedY)` * `help(makekWayCrossValidationGroupedByColumn)` * `help(oneWayHoldout)` For example from `help(kWayStratifiedY)` we can see that the distribution of `y` is much more similar in each fold when we stratify than when we don't: ```{r warning=FALSE} library('vtreat') ``` ```{r} set.seed(23255) d <- data.frame(y=sin(1:100)) # stratified 5-fold cross validation pStrat <- kWayStratifiedY(nrow(d),5,d,d$y) # check if the split is a good partition check = vtreat::problemAppPlan(nrow(d),5,pStrat,TRUE) if(is.null(check)) { print("Plan is good") } else { print(paste0("Problem with plan: ", check)) } d$stratGroup <- vtreat::getSplitPlanAppLabels(nrow(d),pStrat) # unstratified 5-fold cross validation pSimple <- kWayCrossValidation(nrow(d),5,d,d$y) # check if the split is a good partition; return null if so check = vtreat::problemAppPlan(nrow(d),5,pSimple,TRUE) if(is.null(check)) { print("Plan is good") } else { print(paste0("Problem with plan: ", check)) } d$simpleGroup <- vtreat::getSplitPlanAppLabels(nrow(d),pSimple) # mean(y) for each fold, unstratified tapply(d$y,d$simpleGroup,mean) # standard error of mean(y) sd(tapply(d$y,d$simpleGroup,mean)) # mean(y) for each fold, unstratified tapply(d$y,d$stratGroup,mean) # standard error of mean(y) sd(tapply(d$y,d$stratGroup,mean)) ``` Notice the increased similarity if distributions. ## Conclusion Controlling the way data is split in cross-validation -- preserving y-distribution, groups, and even ordering -- can improve the real world performance of models trained on such data. Obviously this adds some complexity and "places to go wrong", but it is a topic worth learning about.
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatSplitting.Rmd
## ----categoricalexample, tidy=FALSE------------------------------------------- library(vtreat) dTrainC <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6),y=c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE), stringsAsFactors = FALSE) treatmentsC <- designTreatmentsC(dTrainC,colnames(dTrainC),'y',TRUE) scoreColsToPrint <- c('origName','varName','code','rsq','sig','extraModelDegrees') print(treatmentsC$scoreFrame[,scoreColsToPrint]) ## ----map---------------------------------------------------------------------- # Build a map from vtreat names back to reasonable display names vmap <- as.list(treatmentsC$scoreFrame$origName) names(vmap) <- treatmentsC$scoreFrame$varName print(vmap['x_catB']) # Map significances back to original variables aggregate(sig~origName,data=treatmentsC$scoreFrame,FUN=min) ## ----numericexample, tidy=FALSE----------------------------------------------- library(vtreat) dTrainN <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6),y=as.numeric(c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE)), stringsAsFactors = FALSE) treatmentsN <- designTreatmentsN(dTrainN,colnames(dTrainN),'y') print(treatmentsN$scoreFrame[,scoreColsToPrint]) ## ----notargetexample, tidy=FALSE---------------------------------------------- library(vtreat) dTrainZ <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6), stringsAsFactors = FALSE) treatmentsZ <- designTreatmentsZ(dTrainZ,colnames(dTrainZ)) print(treatmentsZ$scoreFrame[, c('origName','varName','code','extraModelDegrees')]) ## ----restrict1---------------------------------------------------------------- dTrainN <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6),y=as.numeric(c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE)), stringsAsFactors = FALSE) treatmentsN <- designTreatmentsN(dTrainN,colnames(dTrainN),'y', codeRestriction = c('lev', 'catN', 'clean', 'isBAD'), verbose=FALSE) # no catP or catD variables print(treatmentsN$scoreFrame[,scoreColsToPrint]) ## ----restrict2---------------------------------------------------------------- dTreated = prepare(treatmentsN, dTrainN, codeRestriction = c('lev','clean', 'isBAD')) # no catN variables head(dTreated) ## ----selectvars--------------------------------------------------------------- dTrainN <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6),y=as.numeric(c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE)), stringsAsFactors = FALSE) treatmentsN <- designTreatmentsN(dTrainN,colnames(dTrainN),'y', codeRestriction = c('lev', 'catN', 'clean', 'isBAD'), verbose=FALSE) print(treatmentsN$scoreFrame[,scoreColsToPrint]) pruneSig <- 1.0 # don't filter on significance for this tiny example vScoreFrame <- treatmentsN$scoreFrame varsToUse <- vScoreFrame$varName[(vScoreFrame$sig<=pruneSig)] print(varsToUse) origVarNames <- sort(unique(vScoreFrame$origName[vScoreFrame$varName %in% varsToUse])) print(origVarNames) # prepare a treated data frame using only the "significant" variables dTreated = prepare(treatmentsN, dTrainN, varRestriction = varsToUse) head(dTreated) ## ----displayvars-------------------------------------------------------------- origVarNames <- sort(unique(vScoreFrame$origName[vScoreFrame$varName %in% varsToUse])) print(origVarNames) origVarSigs <- vScoreFrame[vScoreFrame$varName %in% varsToUse,] aggregate(sig~origName,data=origVarSigs,FUN=min)
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatVariableTypes.R
--- title: "Variable Types" author: "Win-Vector LLC" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Variable Types} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- 'vtreat' is a package that prepares arbitrary data frames into clean data frames that are ready for analysis (usually supervised learning). A clean data frame: - Only has numeric columns (other than the outcome). - Has no Infinite/NA/NaN values in the effective variable columns. To effect this encoding 'vtreat' replaces original variables or columns with new derived variables. In this note we will use variables and columns as interchangeable concepts. This note describes the current family of 'vtreat' derived variable types. 'vtreat' usage splits into three main cases: * When the target to predict is categorical. * When the target to predict is numeric. * When there is no supplied target to predict. In all cases vtreat variable names are built by appending a notation onto the original user supplied column name. In all cases the easiest way to examine the derived variables is to look at the `scoreFrame` component of the returned treatment plan. We will outline each of these situations below: ## When the target to predict is categorical An example categorical variable treatment is demonstrated below: ```{r categoricalexample, tidy=FALSE} library(vtreat) dTrainC <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6),y=c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE), stringsAsFactors = FALSE) treatmentsC <- designTreatmentsC(dTrainC,colnames(dTrainC),'y',TRUE) scoreColsToPrint <- c('origName','varName','code','rsq','sig','extraModelDegrees') print(treatmentsC$scoreFrame[,scoreColsToPrint]) ``` For each user supplied variable or column (in this case `x` and `z`) 'vtreat' proposes derived or treated variables. The mapping from original variable name to derived variable name is given by comparing the columns `origName` and `varName`. One can map facts about the new variables back to the original variables as follows: ```{r map} # Build a map from vtreat names back to reasonable display names vmap <- as.list(treatmentsC$scoreFrame$origName) names(vmap) <- treatmentsC$scoreFrame$varName print(vmap['x_catB']) # Map significances back to original variables aggregate(sig~origName,data=treatmentsC$scoreFrame,FUN=min) ``` In the `scoreFrame` the `sig` column is the significance of the single variable logistic regression using the named variable (plus a constant term), and the `rsq` column is the "pseudo-r-squared" or portion of deviance explained (please see [here](https://win-vector.com/2011/09/14/the-simpler-derivation-of-logistic-regression/) for some notes). Essentially a derived variable name is built by concatenating an original variable name and a treatment type (also recorded in the `code` column for convenience). The codes give the different 'vtreat' variable types (or really meanings, as all derived variables are numeric). For categorical targets the possible variable types are as follows: * **clean** : a numeric variable passed through with all NA/NaN/infinite values replaced with either zero or mean value of the non-NA/NaN/infinite examples of the variable. * **is\_Bad** : a companion to the 'clean' treatment. 'is\_Bad' is an indicator that indicates a value replacement has occurred. For many noisy datasets this column can be more informative than the clean column! * **lev** : a 0/1 indicator indicating a particular value of a categorical variable was present. For example `x_lev_x.a` is 1 when the original `x` variable had a value of "a". These indicators are essentially variables representing explicit encoding of levels as dummy variables. In some cases a special level code is used to represent pooled rare values. * **cat\_B** : a single variable Bayesian model of the change in logit-odds in outcome from mean distribution conditioned on the observed value of the original variable. In our example: `x_catB = logit(P[y==target|x]) - logit(P[y==target])`. This encoding is especially useful for categorical variables that have a large number of levels, but be aware it can obscure degrees of freedom if not used properly. * **cat\_P** : a "prevalence fact" about a categorical level. Tells us if the original level was rare or common. Probably not good for direct use in a model, but possibly useful for meta-analysis on the variable. ## When the target to predict is numeric An example numeric variable treatment is demonstrated below: ```{r numericexample, tidy=FALSE} library(vtreat) dTrainN <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6),y=as.numeric(c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE)), stringsAsFactors = FALSE) treatmentsN <- designTreatmentsN(dTrainN,colnames(dTrainN),'y') print(treatmentsN$scoreFrame[,scoreColsToPrint]) ``` The treatment of numeric targets is similar to that of categorical targets. In the numeric case the possible derived variable types are: * **clean** : a numeric variable passed through with all NA/NaN/infinite values replaced with either zero or mean value of the non-NA/NaN/infinite examples of the variable. * **is\_Bad** : a companion to the 'clean' treatment. 'is\_Bad' is an indicator that indicates a value replacement has occurred. For many noisy datasets this column can be more informative than the clean column! * **lev** : a 0/1 indicator indicating a particular value of a categorical variable was present. For example `x_lev_x.a` is 1 when the original `x` variable had a value of "a". These indicators are essentially variables representing explicit encoding of levels as dummy variables. In some cases a special level code is used to represent pooled rare values. * **cat\_N** : a single variable regression model of the difference in outcome expectation conditioned on the observed value of the original variable. In our example: `x_catN = E[y|x] - E[y]`. This encoding is especially useful for categorical variables that have a large number of levels, but be aware it can obscure degrees of freedom if not used properly. * **cat\_P** : a "prevalence fact" about a categorical level. Tells us if the original level was rare or common. Tells us if the original level was rare or common. Probably not good for direct use in a model, but possibly useful for met-analysis on the variable. * **cat\_D** : a "deviation fact" about a categorical level tells us if 'y' is concentrated or diffuse when conditioned on the observed level of the original categorical variable. Probably not good for direct use in a model, but possibly useful for meta-analysis on the variable. Note: for categorical targets we don't need `cat\_D` variables as this information is already encoded in `cat\_B` variables. In the `scoreFrame` the `sig` column is the significance of the single variable linear regression using the named variable (plus a constant term), and the `rsq` column is the "r-squared" or portion of variance explained (please see [here](https://win-vector.com/2011/11/21/correlation-and-r-squared/)) for some notes). ## When there is no supplied target to predict An example "no target" variable treatment is demonstrated below: ```{r notargetexample, tidy=FALSE} library(vtreat) dTrainZ <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6), stringsAsFactors = FALSE) treatmentsZ <- designTreatmentsZ(dTrainZ,colnames(dTrainZ)) print(treatmentsZ$scoreFrame[, c('origName','varName','code','extraModelDegrees')]) ``` Note: because there is no user supplied target the `scoreFrame` significance columns are not meaningful, and are populated only for regularity of code interface. Also indicator variables are only formed by `designTreatmentsZ` for `vtreat` 0.5.28 or newer. Beyond that the no-target treatments are similar to the earlier treatments. Possible derived variable types in this case are: * **clean** : a numeric variable passed through with all NA/NaN/infinite values replaced with either zero or mean value of the non-NA/NaN/infinite examples of the variable. * **is\_Bad** : a companion to the 'clean' treatment. 'is\_Bad' is an indicator that indicates a value replacement has occurred. For many noisy datasets this column can be more informative than the clean column! * **lev** : a 0/1 indicator indicating a particular value of a categorical variable was present. For example `x_lev_x.a` is 1 when the original `x` variable had a value of "a". These indicators are essentially variables representing explicit encoding of levels as dummy variables. In some cases a special level code is used to represent pooled rare values. * **cat\_P** : a "prevalence fact" about a categorical level. Tells us if the original level was rare or common. Probably not good for direct use in a model, but possibly useful for meta-analysis on the variable. ## Restricting to Specific Variable Types Both `designTreatmentsX` and `prepare` functions take an argument called `codeRestriction` that restricts the type of variable that is created. For example, you may not want to create `catP` and `catD` variables for a regression problem. ```{r restrict1} dTrainN <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6),y=as.numeric(c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE)), stringsAsFactors = FALSE) treatmentsN <- designTreatmentsN(dTrainN,colnames(dTrainN),'y', codeRestriction = c('lev', 'catN', 'clean', 'isBAD'), verbose=FALSE) # no catP or catD variables print(treatmentsN$scoreFrame[,scoreColsToPrint]) ``` Conversely, even if you have created a treatment plan for a particular type of variable, you may subsequently decide not to use it. For example, perhaps you only want to use indicator variables and not the `catN` variable for modeling. You can use `codeRestriction` in `prepare()`. ```{r restrict2} dTreated = prepare(treatmentsN, dTrainN, codeRestriction = c('lev','clean', 'isBAD')) # no catN variables head(dTreated) ``` `varRestriction` works similarly, only you must list the explicit variables to use. See the example below. ## Overall Variables that "do not move" (don't take on at least two values during treatment design) or don't achieve at least a minimal significance are suppressed. The `catB`/`catN` variables are essentially single variable models and are very useful for re-encoding categorical variables that take on a very large number of values (such as zip-codes). The intended use of 'vtreat' is as follows: * Data is split into three non-overlapping portions * One portion is used to "design treatments" (we sometime informally call this calibration). * Another portion is used to train a model. * The remaining portion is used to evaluate the model. 'vtreat' attempts to compute "out of sample" significances for each variable effect ( the `sig` column in `scoreFrame`) through cross-validation techniques. 'vtreat' is primarily intended to be "y-aware" processing. Of particular interest is using `vtreat::prepare()` with `scale=TRUE` which tries to put most columns in 'y-effect' units. This can be an important pre-processing step before attempting dimension reduction (such as principal components methods). The vtreat user should pick which sorts of variables they are want and also filter on estimated significance. Doing this looks like the following: ```{r selectvars} dTrainN <- data.frame(x=c('a','a','a','b','b',NA), z=c(1,2,3,4,NA,6),y=as.numeric(c(FALSE,FALSE,TRUE,FALSE,TRUE,TRUE)), stringsAsFactors = FALSE) treatmentsN <- designTreatmentsN(dTrainN,colnames(dTrainN),'y', codeRestriction = c('lev', 'catN', 'clean', 'isBAD'), verbose=FALSE) print(treatmentsN$scoreFrame[,scoreColsToPrint]) pruneSig <- 1.0 # don't filter on significance for this tiny example vScoreFrame <- treatmentsN$scoreFrame varsToUse <- vScoreFrame$varName[(vScoreFrame$sig<=pruneSig)] print(varsToUse) origVarNames <- sort(unique(vScoreFrame$origName[vScoreFrame$varName %in% varsToUse])) print(origVarNames) # prepare a treated data frame using only the "significant" variables dTreated = prepare(treatmentsN, dTrainN, varRestriction = varsToUse) head(dTreated) ``` We strongly suggest using the standard variables coded as 'lev', 'clean', and 'isBad'; and the "y aware" variables coded as 'catN' and 'catB'. The non sub-model variables ('catP' and 'catD') can be useful (possibly as interactions or guards on the corresponding 'catN' and 'catB' variables) but also encode distributional facts about the data that may or may not be appropriate depending on your problem domain. When displaying variables to end users we suggest using the original names and the min significance seen on any derived variable: ```{r displayvars} origVarNames <- sort(unique(vScoreFrame$origName[vScoreFrame$varName %in% varsToUse])) print(origVarNames) origVarSigs <- vScoreFrame[vScoreFrame$varName %in% varsToUse,] aggregate(sig~origName,data=origVarSigs,FUN=min) ``` ## Links * ['vtreat' on Github'](https://github.com/WinVector/vtreat) * ['vtreat' on CRAN](https://cran.r-project.org/package=vtreat)
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/doc/vtreatVariableTypes.Rmd
test_BO <- function() { suppressWarnings({ # build the common column types we are likely to encounter synthFrame <- function(n,censorD) { stringReps = c(rep('a',100),rep('b',100),'c',rep('d',20)) d <- data.frame(xN1=runif(n), xN2=runif(n), xN3=0.0, xL1=sample(c(TRUE,FALSE),size=n,replace=TRUE), xL2=sample(c(TRUE,FALSE,NA),size=n,replace=TRUE), xL3=FALSE, xS1=sample(stringReps,size=n,replace=TRUE), xS2=sample(c(stringReps,NA),size=n,replace=TRUE), xS3='a', xF1=sample(as.factor(c(100:200)),size=n,replace=TRUE), xF2=sample(as.factor(c('a','b','c','d',NA)),size=n,replace=TRUE), xF3=as.factor(c('a')), xI1=sample(as.integer(c(1,2,3)),size=n,replace=TRUE), xI2=sample(as.integer(c(1,2,3,NA)),size=n,replace=TRUE), xI3=as.integer(c(1)), xU1=NA, stringsAsFactors=FALSE) now <- Sys.time() d$t1 <- as.POSIXct(now+ceiling(as.numeric(now)*runif(n)/10)) d$t2 <- as.POSIXlt(now+ceiling(as.numeric(now)*runif(n)/10)) d[sample(1:n,5,replace=T),'xN2'] <- NA d[sample(1:n,5,replace=T),'xN2'] <- NaN d[sample(1:n,5,replace=T),'t1'] <- NA d[sample(1:n,5,replace=T),'t2'] <- NA if(censorD) { dFree <- rowSums(as.matrix(sapply(d,function(c) {ifelse(is.na(c),0,ifelse(as.character(c)=='d',1.0,0.0))})))<=0 d <- d[dFree,] n <- dim(d)[[1]] } toNum <- function(v) { if(class(v)[[1]]=='character') { v <- as.factor(v) } v <- as.numeric(v) meanY <- mean(v,na.rm=TRUE) if(is.na(meanY)) { meanY <- 0.0 } v[is.na(v)] <- meanY v <- v - meanY range <- max(1,max(v)-min(v)) v <- v/range v } dN <- as.matrix(sapply(d,toNum)) d$yN <- rowSums(dN) + runif(n) d$yC <- d$yN >= median(d$yN) d } set.seed(26236) dTrain <- synthFrame(200,TRUE) dTest <- synthFrame(20,FALSE) vars <- setdiff(colnames(dTrain),c('yN','yC')) verbose=FALSE for(smFactor in c(0.0,0.5)) { for(scale in c(FALSE,TRUE)) { if(verbose) { print(paste('**********************',smFactor,scale)) print('# numeric example') } treatmentsN <- designTreatmentsN(dTrain,vars,'yN',smFactor=smFactor, rareCount=2,rareSig=0.5, verbose=verbose) dTrainNTreated <- prepare(treatmentsN,dTrain,pruneSig=0.99, scale=scale, check_for_duplicate_frames=FALSE) nvars <- setdiff(colnames(dTrainNTreated),'yN') if(scale) { # all input variables should be mean 0 when scale is TRUE expect_true(max(abs(vapply(dTrainNTreated[,nvars],mean,numeric(1))))<1.0e-5) # all slopes should be 1 when scales is TRUE expect_true(max(abs(1- vapply(nvars, function(c) { lm(paste('yN',c,sep='~'), data=dTrainNTreated)$coefficients[[2]]}, numeric(1))))<1.0e-5) } modelN <- lm(paste('yN',paste(nvars,collapse=' + '),sep=' ~ '), data=dTrainNTreated) dTestNTreated <- prepare(treatmentsN,dTest,pruneSig=0.99,scale=scale, check_for_duplicate_frames=FALSE) dTestNTreated$pred <- predict(modelN,newdata=dTestNTreated) if(verbose) { print(summary(modelN)) } if(verbose) { print('# caterogic example') } treatmentsC <- designTreatmentsC(dTrain,vars,'yC',TRUE,smFactor=smFactor, catScaling=TRUE, verbose=verbose) dTrainCTreated <- prepare(treatmentsC,dTrain,pruneSig=0.99,scale=scale, check_for_duplicate_frames=FALSE) cvars <- setdiff(colnames(dTrainCTreated),'yC') if(scale) { # all input variables should be mean 0 when scale is TRUE expect_true(max(abs(vapply(dTrainCTreated[,cvars],mean,numeric(1))))<1.0e-5) # all slopes should be 1 when scales is TRUE expect_true(max(abs(1- vapply(cvars, function(c) { glm(paste('yC',c,sep='~'),family=binomial, data=dTrainCTreated)$coefficients[[2]]}, numeric(1))))<1.0e-5) } modelC <- glm(paste('yC',paste(cvars,collapse=' + '),sep=' ~ '), data=dTrainCTreated,family=binomial(link='logit')) dTestCTreated <- prepare(treatmentsC,dTest,pruneSig=0.99,scale=scale, check_for_duplicate_frames=FALSE) dTestCTreated$pred <- predict(modelC,newdata=dTestCTreated,type='response') if(verbose) { print(summary(modelC)) } } } }) invisible(NULL) } test_BO()
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/tinytest/test_BO.R
test_Car <- function() { dir <- system.file("tinytest", package = "vtreat", mustWork = TRUE) load(paste(dir, 'uci.car.data.Rdata', sep = "/")) set.seed(2352) # check for non-significance is tricky so repeat a few times. p_values <- numeric(0) for(rep in seq_len(5)) { uci.car.data$noise <- sample(paste0('v',1:100), nrow(uci.car.data), replace=TRUE) dYName <- "rating" dYTarget <- 'vgood' pvars <- setdiff(colnames(uci.car.data),dYName) treatmentsC <- designTreatmentsC(uci.car.data, pvars,dYName,dYTarget,verbose=FALSE) dTrainCTreated <- prepare(treatmentsC, uci.car.data, pruneSig=0.5, check_for_duplicate_frames=FALSE) cvars <- setdiff(colnames(dTrainCTreated),dYName) p_values <- c(p_values, min(treatmentsC$scoreFrame$sig[treatmentsC$scoreFrame$origName=='noise'])) } expect_true(max(p_values)>0.05) invisible(NULL) } test_Car()
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/tinytest/test_Car.R
test_ExpmtDesign <- function() { set.seed(2325235) for(nrowd in c(1,3,5,10,100,200,1000)) { y = rnorm(nrowd) eSets <- buildEvalSets(nrowd,y=y) if(nrowd>=100) { expect_true(attr(eSets,'splitmethod')=='kwaycrossystratified') } fullSeq <- seq_len(nrowd) expect_true(length(eSets)>0) for(ei in eSets) { expect_true(length(ei$train)>0) expect_true(length(ei$app)>0) expect_true(all(ei$train %in% fullSeq)) expect_true(all(ei$app %in% fullSeq)) } if(nrowd>1) { for(ei in eSets) { expect_true(length(intersect(ei$train,ei$app))==0) } } apps <- Reduce(c,lapply(eSets,function(ei) ei$app)) expect_true(length(apps)==nrowd) expect_true(length(unique(apps))==nrowd) problem <- problemAppPlan(nrowd,3,eSets,nrowd>=100) expect_true(is.null(problem)) } invisible(NULL) } test_ExpmtDesign()
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/tinytest/test_ExpmtDesign.R
test_ExpmtDesign2 <- function() { set.seed(2325235) nrowd = 200 y <- rnorm(nrowd) d <- data.frame(y=y) d$group= floor(seq_len(nrow(d))/5) splitFn <- makekWayCrossValidationGroupedByColumn('group') eSets <- buildEvalSets(nrowd,y=y,dframe=d, splitFunction=splitFn) expect_true(attr(eSets,'splitmethod')=='kwaycrossystratifiedgrouped') fullSeq <- seq_len(nrowd) expect_true(length(eSets)>0) for(ei in eSets) { expect_true(length(ei$train)>0) expect_true(length(ei$app)>0) expect_true(all(ei$train %in% fullSeq)) expect_true(all(ei$app %in% fullSeq)) } apps <- Reduce(c,lapply(eSets,function(ei) ei$app)) expect_true(length(apps)==nrowd) expect_true(length(unique(apps))==nrowd) problem <- problemAppPlan(nrowd,3,eSets,TRUE) expect_true(is.null(problem)) # check grouping property d$splitLabel <- vtreat::getSplitPlanAppLabels(nrow(d),eSets) rs <- rowSums(table(d$group,d$splitLabel)>0) expect_true(max(rs)==1) expect_true(min(rs)==1) invisible(NULL) } test_ExpmtDesign2()
/scratch/gouwar.j/cran-all/cranData/vtreat/inst/tinytest/test_ExpmtDesign2.R