content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
setNames <- function(x,nm){
names(x) <- nm
x
}
# a reasonable warning and message
warnf <- function(fmt,...){
warning(sprintf(fmt,...),call.=FALSE)
}
msgf <- function(fmt, ...){
message(sprintf(fmt, ...))
}
#' Select records (not) satisfying rules
#'
#' Apply validation rules or validation results to a data set and select only
#' those that satisfy all or violate at least one rule.
#'
#'
#' @param x A \code{data.frame}
#' @param y a \code{\link{validator}} object or a \code{\link{validation}} object.
#' @param include_missing Toggle: also select records that have \code{NA} output for a rule?
#' @param ... options passed to \code{\link{confront}}
#'
#' @note
#' An error is thrown if the rules or validation results in \code{y} can not be
#' interpreted record-by record (e.g. when one of the rules is of the form
#' \code{mean(foo)>0}).
#'
#'
#' @return For \code{satisfying}, the records in \code{x} satisfying all rules or
#' validation outcomes in \code{y}. For \code{violating} the records in
#' \code{x} violating at least one of the rules or validation outcomes
#' in \code{y}
#'
#' @examples
#' rules <- validator(speed >= 12, dist < 100)
#' satisfying(cars, rules)
#' violating(cars, rules)
#'
#' out <- confront(cars, rules)
#' summary(out)
#' satisfying(cars, out)
#' violating(cars, out)
#'
#' @family select-data
#' @export
satisfying <- function(x, y, include_missing=FALSE, ...){
UseMethod("satisfying")
}
#' @export
satisfying.data.frame <- function(x, y, include_missing=FALSE, ...){
stopifnot(inherits(y,"validator") | inherits(y,"validation"))
if (inherits(y,"validator")) y <- confront(x,y,...)
A <- values(y)
if (!is.array(A)|| nrow(A)!=nrow(x) ){
stop("Not all rules have record-wise output")
}
if (include_missing){
x[apply(A,1,function(d) all(d | is.na(d)) ),,drop=FALSE]
} else {
x[apply(A,1,function(d) all(d &!is.na(d)) ), , drop=FALSE]
}
}
#' @export
satisfying.default <- function(x,y,include_missing=FALSE, ...){
stop("Not implemented for ", class(x), call. = FALSE)
}
#' @rdname satisfying
#' @export
violating <- function(x, y, include_missing=FALSE, ...){
UseMethod("violating")
}
#' @export
violating.data.frame <- function(x, y, include_missing=FALSE, ...){
stopifnot(inherits(y,"validator") | inherits(y,"validation"))
if (inherits(y,"validator")) y <- confront(x,y,...)
A <- values(y)
if (!is.array(A)|| nrow(A)!=nrow(x) ){
stop("Not all rules have record-wise output")
}
if (include_missing){
x[apply(A, 1, function(d) any(!d | is.na(d))), , drop = FALSE]
} else {
x[apply(A,1,function(d) any(!d &!is.na(d))),,drop=FALSE]
}
}
#' @rdname satisfying
#' @export
violating.default <- function(x, y, include_missing=FALSE, ...){
stop("Not implemented for ", class(x), call. = TRUE)
}
#' @rdname satisfying
#' @export
lacking <- function(x,y, ...){
UseMethod("lacking")
}
#' @export
lacking.data.frame <- function(x, y, ...){
stopifnot(inherits(y,"validator") | inherits(y,"validation"))
if (inherits(y,"validator")) y <- confront(x,y,...)
A <- values(y)
if (!is.array(A)|| nrow(A)!=nrow(x) ){
stop("Not all rules have record-wise output")
}
x[apply(A,1,anyNA),,drop=FALSE]
}
#' @export
lacking.default <- function(x, y, ...){
stop("Not implemented for ", class(x), call. = TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/validate/R/utils.R
|
#' Data Validation Infrastructure
#'
#'
#' Data often suffer from errors and missing values. A necessary step before data
#' analysis is verifying and validating your data. Package \code{validate} is a
#' toolbox for creating validation rules and checking data against these rules.
#'
#'
#' @section Getting started:
#'
#' The easiest way to get started is through the examples given in \code{\link{check_that}}.
#'
#' The general workflow in \code{validate} follows the following pattern.
#' \itemize{
#' \item Define a set of rules or quality indicator using \code{\link{validator}} or \code{\link{indicator}}.
#' \item \code{\link{confront}} data with the rules or indicators,
#' \item Examine the results either graphically or by summary.
#' }
#'
#' There are several convenience functions that allow one to define rules from
#' the commandline, through a (freeform or yaml) file and to investigate and
#' maintain the rules themselves. Please have a look at the
#' \href{../doc/cookbook.html}{cookbook} for a comprehensive introduction.
#'
#' @references
#' An overview of this package, its underlying ideas and many examples
#' can be found in MPJ van der Loo and E. de Jonge (2018) \emph{Statistical
#' data cleaning with applications in R} John Wiley & Sons.
#'
#' Please use \code{citation("validate")} to get a citation for (scientific)
#' publications.
#'
#' @aliases validate-package
#' @name validate
#' @useDynLib validate, .registration=TRUE
#' @aliases package-validate validate
#' @import methods
#' @importFrom graphics legend par text axis abline lines strwidth
#' @importFrom utils getFromNamespace
#' @import settings
#' @import yaml
#' @import grid
"_PACKAGE"
|
/scratch/gouwar.j/cran-all/cranData/validate/R/validate_pkg.R
|
#' @include expressionset.R
NULL
#' Define validation rules for data
#'
#'
#' @section Validating expressions:
#' Each validating expression should evaluate to a \code{logical}. Allowed syntax of
#' the expression is described in \code{\link{syntax}}.
#'
#' @param ... A comma-separated list of validating expressions
#' @param .file (optional) A character vector of file locations (see also the
#' section on file parsing in the \code{\link{syntax}} help file).
#' @param .data (optional) A \code{data.frame} with columns \code{"rule"},
#' \code{"name"}, and \code{"description"}
#'
#'
#'
#' @family validator-methods
#' @return An object of class \code{validator} (see \code{\link{validator-class}}).
#'
#'
#' @example ../examples/validator.R
#' @export
validator <- function(..., .file, .data) new('validator',...
, .file = .file, .data=.data)
#### VALIDATOR CLASS ----------------------------------------------------------
#' Store a set of rich validating rules.
#'
#' @section Details:
#' A validator stores a set of validatin rules. It is a child class of
#' \code{\link{expressionset}} and
#' can be constructed with \code{\link{validator}}. \code{validator} contains
#' an extra slot \code{"language"} stating the language in which the validation
#' rule is expressed. The default, and currently only supported language is
#' the \code{validate} language implemented by this package.
#'
#' @section Exported S4 methods for \code{validator}:
#' \itemize{
#' \item{Methods inherited from \code{\link{expressionset}}}
#' \item{\code{\link{confront}}}
#' \item{\code{\link{compare}}}
#' }
#'
#'
#' @section See also:
#' \itemize{
#' \item{\code{\link{expressionset}}}
#' }
#'
#' @keywords internal
#'
setRefClass("validator"
, fields = list(._language = "character")
, contains = "expressionset"
, methods = list(
initialize = function(..., .file, .data){
ini_validator(.self,...,.file=.file, .data=.data)
}
, is_linear = function() linear(.self)
# extra argument: normalize=TRUE
, linear_coefficients = function(...) get_linear_coefficients(.self, ...)
)
)
ini_validator <- function(obj, ..., .file, .data){
check_primitives()
if (missing(.file) && missing(.data) ){
.ini_expressionset_cli(obj, ..., .prefix="V")
obj$._options <- .PKGOPT
} else if (!missing(.file)) {
.ini_expressionset_yml(obj, file=.file, .prefix="V")
} else if (!missing(.data)){
.ini_expressionset_df(obj, dat=.data, .prefix="V")
i <- validating(obj) | is_tran_assign(obj)
if (!all(i)){
r <- paste(which(!i),collapse=", ")
warning("Invalid syntax detected, ignoring rows ",r)
obj$rules <- obj$rules[i]
}
obj$._options <- .PKGOPT
}
#check rule validity
i <- validating(obj) | is_tran_assign(obj)
if ( !all(i) ){
invalid <- sapply(which(!i),function(k) deparse(expr(obj[[k]])))
wrn <- sprintf("\n[%03d] %s",which(!i), invalid)
warning(paste0(
"Invalid syntax detected, the following expressions have been ignored:"
, paste0(wrn,collapse="")
), call.=FALSE)
obj$rules <- obj$rules[i]
}
for ( r in seq_along(obj)){
if ( is.null( meta(obj[[r]])$language ) ) {
meta(obj[[r]],"language") <- paste("validate",utils::packageVersion("validate"))
}
if (is.null( meta(obj[[r]])$severity)) {
meta(obj[[r]],"severity") <- "error"
}
}
}
# note: for some reason this function is not testable from devtools::test('pkg')
check_primitives <- function(){
# todo: extract this from voptions()
prim <- c("<","<=","==","!=",">=",">","%in%")
for ( p in prim )
if (!identical(body(p),body(getFromNamespace(p,"base"))))
warning(sprintf("Using implementation of %s that differs from base::`%s`",p,p))
}
# Extract linear coeffiecients from linear expressions
#
# @section Details: Linear expressions are expressions of the form \eqn{\boldsymbol{Ay}} or
# \eqn{\boldsymbol{Ay}\odot\boldsymbol{b}}, where \eqn{\odot\in\{<,\leq,=,\geq,>\}}.
# This function uses \code{\link{is_linear}} to find linear expressions in \code{x} and returns
# the corresponding coefficients and possibly the operators.
#
# @param x An R object
# @param ... Arguments to be passed to other methods
#
# @return A list, containing matrix \eqn{\boldsymbol{A}}, and where possible matrix \eqn{\boldsymbol{b}}
# and a vector with comparison operators.
#
get_linear_coefficients <- function(x, normalize=TRUE,...){
x <- x[x$is_linear()]
calls <- .get_exprs(x,lin_eq_eps=0, lin_ineq_eps=0)
cols <- unique(unlist(lapply(calls, var_from_call)))
rows <- names(x)
bA <- matrix(0
, nrow = length(rows)
, ncol = length(cols) + 1
, dimnames = list(validator=rows, variable=c('CONSTANT',cols) )
)
lcoef <- lapply(calls, function(x) coefficients(left(x)))
rcoef <- lapply(calls, function(x) coefficients(right(x)))
for ( i in seq_along(lcoef) ){
cls <- names(lcoef[[i]])
bA[i,cls] <- lcoef[[i]]
cls <- names(rcoef[[i]])
bA[i,cls] <- bA[i,cls] - rcoef[[i]]
}
operators <- sapply(sapply(calls,`[[`,1),deparse)
if (normalize){
bA <- bA * operatorsign[operators]
operators <- normed_operators[operators]
}
list(A=bA[,-1,drop=FALSE],b = -1*bA[,1,drop=FALSE],operators=operators)
}
#' Combine two validator objects
#'
#' Combine two \code{\link{validator}} objects by addition. A new \code{validator}
#' object is created with default (global) option values. Previously set options
#' are ignored.
#'
#' @param e1 a \code{\link{validator}}
#' @param e2 a \code{\link{validator}}
#'
#' @section Note:
#' The \code{names} of the resulting object are made unique using \code{\link[base]{make.names}}.
#'
#'
#' @examples
#' validator(x>0) + validator(x<=1)
#' @family validator-methods
#' @export
setMethod("+", c("validator","validator"), function(e1, e2){
v <- validator()
v$rules <- c(e1$rules, e2$rules)
names(v) <- make.names(names(v),unique=TRUE)
v
})
#' Plot a validator object
#'
#' The matrix of variables x rules is plotted, in which rules that are
#' recognized as linear (in)equations are differently colored.
#' The augmented matrix is returned, but can also be calculated using
#' \code{variables(x, as="matrix")}.
#'
#' @export
#' @param x validator object with rules
#' @param y not used
#' @param use_blocks \code{logical} if \code{TRUE} the matrix is sorted
#' according to the connected sub sets of variables (aka blocks).
#' @param col \code{character} with color codes for plotting variables.
#' @param cex size of the variables plotted.
#' @param show_legend should a legend explaining the colors be drawn?
#' @param ... passed to image
#' @return (invisible) the matrix
#' @seealso \code{\link{variables}}
#' @family validator-methods
#' @family expressionset-methods
#' @example ../examples/plot.R
setMethod("plot","validator"
, function( x
, y
, use_blocks = TRUE
, col = c("#b2df8a", "#a6cee3") # Colorbrewer "Paired"
, cex = 1
, show_legend = TRUE
, ...
){
if (show_legend){
oldpar <- par(xpd=TRUE, mar=c(7,4,3,3))
on.exit(par(oldpar))
}
use_blocks <- isTRUE(use_blocks)
show_legend <- isTRUE(show_legend)
if (length(x) < 1){
message("No rules to be plotted")
return(invisible())
}
blocks <- if (use_blocks){
x$blocks()
}
A <- variables(x, as = "matrix")
Z <- A
Z[A] <- 2
Z[A & x$is_linear()] <- 1
is.na(Z) <- Z == 0
if (use_blocks){
# change row order, so blocks are identifyable
rule_order <- unlist(blocks)
var_order <- unlist(lapply(blocks, function(b){variables(x[b])}))
Z <- Z[rule_order, var_order, drop = FALSE]
}
Z <- t(Z)
ylim <- c(1, ncol(Z)) + c(-0.5, 0.5)
# if (show_legend){
# ylim[2] <- ylim[2] + 1 # needs extra space for legend
# }
graphics::image( x = seq_len(nrow(Z))
, y = seq_len(ncol(Z))
, z = Z
, col = col
, las = 1
, xlab = "variables"
, ylab= "rules"
, xaxt = "n"
, yaxt = "n"
# , ...
)
# label the y-axis with rule names
axis(2, at=seq_len(ncol(Z)), labels = colnames(Z), las=1)
var_text <- which(Z > 0, arr.ind = TRUE)
var_text <- data.frame(var_text)
var_text$labels <- colnames(A)[var_text$variable]
# variables
text( x = var_text$variable
, y = var_text$rule
, labels = var_text$labels
, cex = cex
)
if (use_blocks){
h <- sapply(x$blocks(), length)
h <- c(0,cumsum(h)) + 0.5
v <- lapply(blocks, function(b){variables(x[b])})
v <- sapply(v, length)
v <- c(0,cumsum(v)) + 0.5
graphics::rect( xleft = utils::head(v, -1)
, xright = utils::tail(v, -1)
, ybottom = utils::head(h, -1)
, ytop = utils::tail(h, -1)
, lty = 2
, border ="gray30")
}
if (show_legend){
legend( x = 0.5
, y = 0.2
, legend = c("linear rule", "other")
, fill=col
, bty="n"
)
}
F <- factor(Z, levels=c(1,2), labels = c("linear", "other"))
dim(F) <- dim(Z)
dimnames(F) <- dimnames(Z)
invisible(F)
})
|
/scratch/gouwar.j/cran-all/cranData/validate/R/validator.R
|
# parse yaml rule files
.readlines_utf8 <- function(file, encoding="unknown"){
lines <- readLines(con=file, encoding=encoding)
enc2utf8(lines)
}
filter_yrf_options <- function(lines){
index <- grep("^---[[:blank:]]*$",lines)
if (length(index) < 2 || index[1] == index[2] ){
NULL
} else {
if ( index[1]==1 ){
lines[(index[1]+1):(index[2]-1)]
} else {
NULL
}
}
}
# detect whether a string starts with a drive letter, tilde (home), "\\" or
is_full_path <- function(string){
grepl("(^[[:alpha:]^.]+:(/|\\\\))|(^\\\\)|(^//)|(^~/)|(^~\\\\).+",string)
}
# @rdname validate_extend
# @param lines lines read from a yaml file
# @export
# @keywords internal
.parse_yrf_options <- function(lines){
option_lines <- filter_yrf_options(lines)
L <- yaml::yaml.load(string = paste0(option_lines,collapse="\n"))
L$options
}
parse_yrf_include <- function(file){
lines <- .readlines_utf8(file)
option_lines <- filter_yrf_options(lines)
L <- yaml::yaml.load(string = paste0(option_lines,collapse="\n"))
paths <- L$include
rel_path <- !is_full_path(paths)
paths[rel_path] <- file.path(dirname(file),paths[rel_path])
paths
}
yrf_block_type <- function(block){
if ( is.null(block) ){
NULL
} else if ( any(c("options","include") %in% names(block)) ){
"options"
} else if ( identical(names(block),"rules") ){
"yrf"
} else {
"free"
}
}
valid_yaml <- function(string){
root <- names(yaml::yaml.load(string))
keys <- c("options","include","rules")
valid <- length(root) > 0 && all(root %in% keys)
if ( !valid & length(root) > 0 ){
warning(
sprintf("Found invalid keys: %s\n", paste0(root[!root %in% keys],collapse=", "))
)
}
valid
}
is_yaml <- function(string){
out <- tryCatch(yaml::yaml.load(string),error = function(e) FALSE)
!identical(out,FALSE)
}
is_r <- function(string){
out <- tryCatch(parse(text=string),error = function(e) FALSE)
!identical(out,FALSE)
}
# find yaml documents and parse them
yaml_blocks <- function(lines){
S <- strsplit(x = paste0(lines,collapse="\n"), split="---[[:blank:]]*\\n?")[[1]]
S <- Filter(function(x) nchar(x)>0,S)
lapply(S, function(s){
if ( is_yaml(s) && valid_yaml(s) ){
yaml::yaml.load(s)
} else if ( is_r(s) ){
s
} else {
cat(sprintf("\nThe following invalid block is skipped:\n %s\n",s))
warning("Blocks containing invalid yaml or R syntax detected")
NULL
}
})
}
# lines <- readlines_utf8("tmp/test.yml")
# blocks <- yaml_blocks(lines)
|
/scratch/gouwar.j/cran-all/cranData/validate/R/yaml.R
|
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
## ----echo=TRUE, eval=FALSE----------------------------------------------------
# install.packages("validate")
## ----echo=FALSE, include=!knitr::is_latex_output()----------------------------
#knitr::asis_output("
#[](https://creativecommons.org/licenses/by/4.0/)
#")
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
## ----include=FALSE, eval=knitr::is_latex_output()-----------------------------
# knitr::opts_chunk$set(comment=NA)
## -----------------------------------------------------------------------------
data(cars)
head(cars, 3)
## -----------------------------------------------------------------------------
library(validate)
rules <- validator(speed >= 0
, dist >= 0
, speed/dist <= 1.5
, cor(speed, dist)>=0.2)
## -----------------------------------------------------------------------------
out <- confront(cars, rules)
## -----------------------------------------------------------------------------
summary(out)
## ----fig.height=7, fig.width=7, echo=!knitr::is_latex_output(), eval=!knitr::is_latex_output()----
plot(out)
## ----label="validateplot", fig.height=5, fig.width=5, out.width="0.7\\textwidth", fig.align="center", echo=knitr::is_latex_output(), eval=knitr::is_latex_output(), fig.env="figure",fig.pos="!t", fig.cap="Plot of validation output."----
# plot(out)
## -----------------------------------------------------------------------------
violating(cars, out[1:3])
## -----------------------------------------------------------------------------
df_out <- as.data.frame(out)
head(df_out, 3)
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
## -----------------------------------------------------------------------------
library(validate)
data(SBS2000)
head(SBS2000, 3)
## -----------------------------------------------------------------------------
is.character("hihi")
is.character(3)
## -----------------------------------------------------------------------------
rules <- validator(
is.character(size)
, is.numeric(turnover)
)
out <- confront(SBS2000, rules)
summary(out)
## -----------------------------------------------------------------------------
rule <- validator(
!is.na(turnover)
, !is.na(other.rev)
, !is.na(profit)
)
out <- confront(SBS2000, rule)
summary(out)
## -----------------------------------------------------------------------------
rules <- validator(
!any(is.na(incl.prob))
, all(is.na(vat)) )
out <- confront(SBS2000, rules)
summary(out)
## -----------------------------------------------------------------------------
rules <- validator(
nchar(as.character(size)) >= 2
, field_length(id, n=5)
, field_length(size, min=2, max=3)
)
out <- confront(SBS2000, rules)
summary(out)
## -----------------------------------------------------------------------------
dat <- data.frame(x = c("2.54","2.66","8.142","23.53"))
## -----------------------------------------------------------------------------
rule <- validator( number_format(x, format="d.dd"))
values(confront(dat, rule))
## -----------------------------------------------------------------------------
x <- c("12.123","123.12345")
number_format(x, min_dig=4)
number_format(x, max_dig=3)
number_format(x, min_dig=2, max_dig=4)
number_format(x, min_dig=2, max_dig=10)
# specify the decimal separator.
number_format("12,123", min_dig=2, dec=",")
## -----------------------------------------------------------------------------
rule <- validator(field_format(id, "RET*")
, field_format(size, "sc?" ))
out <- confront(SBS2000, rule)
summary(out)
## -----------------------------------------------------------------------------
rule <- validator(
grepl("^sc[0-9]$", size)
, field_format(id, "^RET\\d{2}$" , type="regex") )
summary(confront(SBS2000, rule))
## -----------------------------------------------------------------------------
rules <- validator(TO = turnover >= 0
, TC = total.costs >= 0)
## -----------------------------------------------------------------------------
rules <- rules +
validator(PR = in_range(incl.prob, min=0, max=1))
## -----------------------------------------------------------------------------
out <- confront(SBS2000, rules, lin.ineq.eps=0)
## -----------------------------------------------------------------------------
summary(out)
## -----------------------------------------------------------------------------
period = sprintf("2018Q%d", 1:4)
period
## -----------------------------------------------------------------------------
in_range(period, min="2017Q2", max = "2018Q2")
## -----------------------------------------------------------------------------
rule <- validator(size %in% c("sc0","sc1","sc2","sc3"))
out <- confront(SBS2000, rule)
summary(out)
## -----------------------------------------------------------------------------
c(1, 3, NA) %in% c(1,2)
c(1, 3, NA) %vin% c(1,2)
## -----------------------------------------------------------------------------
rule <- validator(
x %in% read.csv("codelist.csv")$code
)
## Or, equivalently
rule <- validator(
valid_codes := read.csv("codelist.csv")$code
, x %in% valid_codes
)
## -----------------------------------------------------------------------------
codelist <- c("sc0","sc1","sc2","sc3")
rule <- validator(size %in% valid_codes)
# pass the codelist
out <- confront(SBS2000, rule
, ref=list(valid_codes=codelist))
summary(out)
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
## -----------------------------------------------------------------------------
library(validate)
data(samplonomy)
head(samplonomy, 3)
## -----------------------------------------------------------------------------
head(samplonomy,3)
## -----------------------------------------------------------------------------
rule <- validator(is_unique(region, period, measure))
out <- confront(samplonomy, rule)
# showing 7 columns of output for readability
summary(out)[1:7]
## -----------------------------------------------------------------------------
violating(samplonomy, out)
## -----------------------------------------------------------------------------
df <- data.frame(x = c(1,1), y = c("A",NA))
df
## -----------------------------------------------------------------------------
df <- data.frame(x=rep(1,3), y = c("A", NA, NA))
is_unique(df$x, df$y)
## -----------------------------------------------------------------------------
# y is unique, given x. But not by itself
df <- data.frame(x=rep(letters[1:2],each=3), y=rep(1:3,2))
# the split-apply-combine approach
unsplit(tapply(df$y, df$x, is_unique), df$x)
# the combined approach
is_unique(df$x, df$y)
## -----------------------------------------------------------------------------
rule <- validator(
contains_at_least(
keys = data.frame(period = as.character(2014:2019))
, by=list(region, measure) )
)
out <- confront(samplonomy, rule)
# showing 7 columns of output for readability
summary(out)[1:7]
## -----------------------------------------------------------------------------
head(violating(samplonomy, out))
## -----------------------------------------------------------------------------
years <- as.character(2014:2019)
quarters <- paste0("Q",1:4)
keyset <- expand.grid(
region = c("Agria", "Induston")
, period = sapply(years, paste0, quarters))
head(keyset)
## -----------------------------------------------------------------------------
rule <- validator(
contains_at_least(keys=minimal_keys, by=measure)
)
out <- confront(samplonomy, rule
, ref=list(minimal_keys=keyset))
# showing 7 columns of output for readability
summary(out)[1:7]
## -----------------------------------------------------------------------------
years <- as.character(2014:2019)
quarters <- paste0("Q",1:4)
keyset <- expand.grid(
region = c(
"Agria"
,"Crowdon"
,"Greenham"
,"Induston"
,"Mudwater"
,"Newbay"
,"Oakdale"
,"Samplonia"
,"Smokely"
,"Wheaton"
)
,period = c(years, sapply(years, paste0, quarters))
)
head(keyset)
## -----------------------------------------------------------------------------
rule <- validator(contains_exactly(all_keys, by=measure))
out <- confront(samplonomy, rule
, ref=list(all_keys=keyset))
# showing 7 columns of output for readability
summary(out)[1:7]
## -----------------------------------------------------------------------------
erroneous_records <- violating(samplonomy, out)
unique(erroneous_records$measure)
## -----------------------------------------------------------------------------
is_linear_sequence(c(1,2,3,4))
is_linear_sequence(c(8,6,4,2))
is_linear_sequence(c(2,4,8,16))
## -----------------------------------------------------------------------------
is_linear_sequence(c("2020Q1","2020Q2","2020Q3","2020Q4"))
## -----------------------------------------------------------------------------
is_linear_sequence(c("2020Q4","2020Q2","2020Q3","2020Q1"))
## -----------------------------------------------------------------------------
is_linear_sequence(c("2020Q4","2020Q2","2020Q3","2020Q1")
, begin = "2020Q2")
## -----------------------------------------------------------------------------
series <- c(1,2,3,4,1,2,3,3)
blocks <- rep(c("a","b"), each = 4)
is_linear_sequence(series, by = blocks)
## -----------------------------------------------------------------------------
in_linear_sequence(series, by = blocks)
## -----------------------------------------------------------------------------
is_linear_sequence(5)
## -----------------------------------------------------------------------------
blocks[8] <- "c"
data.frame(series = series, blocks = blocks)
in_linear_sequence(series, blocks)
## -----------------------------------------------------------------------------
in_linear_sequence(series, blocks, begin = 1, end = 4)
## -----------------------------------------------------------------------------
rule <- validator(
in_linear_sequence(period
, by = list(region, freq, measure))
)
out <- confront(samplonomy, rule)
summary(out)[1:7]
## ----results='hide'-----------------------------------------------------------
violating(samplonomy, out)
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
## -----------------------------------------------------------------------------
library(validate)
data(SBS2000)
head(SBS2000, 3)
## -----------------------------------------------------------------------------
rules <- validator(
is_complete(id)
, is_complete(id, turnover)
, is_complete(id, turnover, profit )
, all_complete(id)
)
out <- confront(SBS2000, rules)
# suppress last column for brevity
summary(out)[1:7]
## -----------------------------------------------------------------------------
rules <- validator(
total.rev - profit == total.costs
, turnover + other.rev == total.rev
, profit <= 0.6*total.rev
)
out <- confront(SBS2000, rules)
summary(out)
## -----------------------------------------------------------------------------
out <- confront(SBS2000, rules, lin.ineq.eps=0, lin.eq.eps=0.01)
summary(out)
## -----------------------------------------------------------------------------
rule <- validator(if (staff >= 1) staff.costs >= 1)
out <- confront(SBS2000, rule)
summary(out)
## -----------------------------------------------------------------------------
transactions <- data.frame(
sender = c("S21", "X34", "S45","Z22")
, receiver = c("FG0", "FG2", "DF1","KK2")
, value = sample(70:100,4)
)
## -----------------------------------------------------------------------------
forbidden <- data.frame(sender="S*",receiver = "FG*")
## -----------------------------------------------------------------------------
rule <- validator(does_not_contain(glob(forbidden_keys)))
out <- confront(transactions, rule, ref=list(forbidden_keys=forbidden))
## Suppress columns for brevity
summary(out)[1:7]
## -----------------------------------------------------------------------------
violating(transactions, out)
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
## -----------------------------------------------------------------------------
library(validate)
data(SBS2000)
head(SBS2000, 3)
## -----------------------------------------------------------------------------
data(samplonomy)
head(samplonomy, 3)
## -----------------------------------------------------------------------------
rule <- validator(
mean(profit, na.rm=TRUE) >= 1
, cor(turnover, staff, use="pairwise.complete.obs") > 0
)
out <- confront(SBS2000, rule)
# suppress some columns for brevity
summary(out)[1:7]
## -----------------------------------------------------------------------------
rule <- validator(
turnover <= 10*do_by(turnover, by=size, fun=median, na.rm=TRUE)
)
out <- confront(SBS2000, rule)
# suppress some columns for brevity
summary(out)[1:7]
## -----------------------------------------------------------------------------
medians <- with(SBS2000, do_by(turnover, by=size, fun=median, na.rm=TRUE))
head(data.frame(size = SBS2000$size, median=medians))
## -----------------------------------------------------------------------------
d <- data.frame(
hhid = c(1, 1, 2, 1, 2, 2, 3 )
, person = c(1, 2, 3, 4, 5, 6, 7 )
, hhrole = c("h","h","m","m","h","m","m")
)
d
## -----------------------------------------------------------------------------
rule <- validator(exists_one(hhrole == "h", by=hhid))
out <- confront(d, rule)
# suppress some columns for brevity
summary(out)
## -----------------------------------------------------------------------------
violating(d, out)
## -----------------------------------------------------------------------------
violating(d, validator(exists_any(hhrole=="h",by=hhid) ))
## -----------------------------------------------------------------------------
rule <- validator(exists_one(region=="Samplonia", by=list(period, measure)))
## -----------------------------------------------------------------------------
out <- confront(samplonomy, rule)
# suppress some columns for brevity
summary(out)[1:7]
## -----------------------------------------------------------------------------
violating(samplonomy, out)
## -----------------------------------------------------------------------------
data(nace_rev2)
head(nace_rev2[1:4])
## -----------------------------------------------------------------------------
dat <- data.frame(
nace = c("01","01.1","01.11","01.12", "01.2")
, volume = c(100 ,70 , 30 ,40 , 25 )
)
dat
## -----------------------------------------------------------------------------
dat$check <- hierarchy(dat$volume, dat$nace, nace_rev2[3:4])
dat
## -----------------------------------------------------------------------------
samplonia <- data.frame(
region = c("Agria", "Induston"
, "Wheaton", "Greenham"
, "Smokely", "Mudwater", "Newbay", "Crowdon")
, parent = c(rep("Samplonia",2), rep("Agria",2), rep("Induston",4))
)
samplonia
## -----------------------------------------------------------------------------
data(samplonomy)
head(samplonomy)
## -----------------------------------------------------------------------------
rule <- validator(
hierarchy(value, region, hierarchy=ref$codelist, by=list(period, measure))
)
out <- confront(samplonomy, rule, ref=list(codelist=samplonia))
summary(out)
## -----------------------------------------------------------------------------
warnings(out)
## -----------------------------------------------------------------------------
subset(samplonomy, region == "Induston" &
period == "2018Q2" &
measure == "export")
## -----------------------------------------------------------------------------
i <- !duplicated(samplonomy[c("region","period","measure")])
samplonomy2 <- samplonomy[i, ]
out <- confront(samplonomy2, rule, ref=list(codelist=samplonia))
# suppress some columns for brevity
summary(out)[1:7]
## -----------------------------------------------------------------------------
rules <- validator(
level0 = hierarchy(value, region, ref$level0, by=list(period, measure))
, level1 = hierarchy(value, region, ref$level1, by=list(period, measure))
)
out <- confront(samplonomy2, rules
, ref=list(level0=samplonia[1:2,], level1=samplonia[3:8,])
)
summary(out)
## -----------------------------------------------------------------------------
violating(samplonomy2, out["level0"])
## -----------------------------------------------------------------------------
rules <- validator(
part_whole_relation(value
, labels=region
, whole="Samplonia"
, part =c("Agria","Induston")
, by=list(measure, period)
)
)
## -----------------------------------------------------------------------------
out <- confront(samplonomy, rules)
# suppress some columns for brevity
summary(out)[1:7]
## -----------------------------------------------------------------------------
violating(samplonomy, out)
## -----------------------------------------------------------------------------
subset(samplonomy, region=="Agria" & period == "2015" & measure == "gdp")
## -----------------------------------------------------------------------------
subset(samplonomy, region=="Induston" & freq == "A" & measure=="export")
## -----------------------------------------------------------------------------
rules <- validator(part_whole_relation(value
, labels = period
, whole = rx("^\\d{4}$")
, by = list(region, substr(period,1,4), measure)
))
out <- confront(samplonomy, rules)
## -----------------------------------------------------------------------------
errors(out)
# suppress some columns for brevity
summary(out)[1:7]
## -----------------------------------------------------------------------------
lacking(samplonomy, out)
## -----------------------------------------------------------------------------
violating(samplonomy, out)
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
## -----------------------------------------------------------------------------
library(validate)
ii <- indicator(
BMI = (weight/2.2046)/(height*0.0254)^2
, mh = mean(height)
, mw = mean(weight))
out <- confront(women, ii)
## -----------------------------------------------------------------------------
out
## -----------------------------------------------------------------------------
summary(out)
## -----------------------------------------------------------------------------
head(add_indicators(women, out), 3)
## -----------------------------------------------------------------------------
women$id <- letters[1:15]
## -----------------------------------------------------------------------------
out <- confront(women, ii,key="id")
tail( as.data.frame(out) )
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
## ----echo=FALSE---------------------------------------------------------------
library(validate)
## -----------------------------------------------------------------------------
v <- validator(speed >= 0, dist>=0, speed/dist <= 1.5)
v
## -----------------------------------------------------------------------------
w <- v[c(1,3)]
## -----------------------------------------------------------------------------
w <- v[c("V1","V3")]
## -----------------------------------------------------------------------------
rules1 <- validator(speed>=0)
rules2 <- validator(dist >= 0)
all_rules <- rules1 + rules2
## -----------------------------------------------------------------------------
v[[3]]
## -----------------------------------------------------------------------------
rules <- validator(positive_speed = speed >= 0, ratio = speed/dist <= 1.5)
rules
## -----------------------------------------------------------------------------
names(rules)
names(rules)[1] <- "nonnegative_speed"
## -----------------------------------------------------------------------------
# add 'foo' to the first rule:
meta(rules[1],"foo") <- 1
# Add 'bar' to all rules
meta(rules,"bar") <- "baz"
## -----------------------------------------------------------------------------
v[[1]]
## -----------------------------------------------------------------------------
meta(v)
## -----------------------------------------------------------------------------
summary(v)
## -----------------------------------------------------------------------------
length(v)
## -----------------------------------------------------------------------------
variables(v)
variables(v,as="matrix")
## -----------------------------------------------------------------------------
rules <- validator(speed >= 0, dist >= 0, speed/dist <= 1.5)
df <- as.data.frame(rules)
## -----------------------------------------------------------------------------
rules <- validator(.data=df)
## ----eval=FALSE---------------------------------------------------------------
# ?syntax
## -----------------------------------------------------------------------------
sum_by(1:10, by = rep(c("a","b"), each=5) )
## -----------------------------------------------------------------------------
v <- validator(height>0, weight>0,height/weight < 0.5)
cf <- confront(women, rules)
aggregate(cf)
## -----------------------------------------------------------------------------
head(aggregate(cf,by='record'))
## -----------------------------------------------------------------------------
# rules with most violations sorting first:
sort(cf)
## -----------------------------------------------------------------------------
v <- validator(hite > 0, weight>0)
summary(confront(women, v))
## ----eval=TRUE, error=TRUE----------------------------------------------------
# this gives an error
confront(women, v, raise='all')
## -----------------------------------------------------------------------------
women1 <- women
rules <- validator(height == women_reference$height)
cf <- confront(women, rules, ref = list(women_reference = women1))
summary(cf)
## -----------------------------------------------------------------------------
rules <- validator( fruit %in% codelist )
fruits <- c("apple", "banana", "orange")
dat <- data.frame(fruit = c("apple","broccoli","orange","banana"))
cf <- confront(dat, rules, ref = list(codelist = fruits))
summary(cf)
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
library(validate)
## ----eval=FALSE---------------------------------------------------------------
# # basic range checks
# speed >= 0
# dist >= 0
#
# # ratio check
# speed / dist <= 1.5
## -----------------------------------------------------------------------------
rules <- validator(.file="myrules.R")
## -----------------------------------------------------------------------------
rules <- validator(.file="myrules.yaml")
rules
## -----------------------------------------------------------------------------
rules1 <- rules[c(1,3)]
export_yaml(rules1, file="myrules2.yaml")
## ----eval=FALSE---------------------------------------------------------------
# v <- validator(height>0, weight> 0)
# export_yaml(v,file="my_rules.yaml")
## ----eval=FALSE---------------------------------------------------------------
# df <- as.data.frame(v)
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
library(validate)
## ----eval=FALSE---------------------------------------------------------------
# sdmx_endpoint()
## ----eval=TRUE----------------------------------------------------------------
sdmx_endpoint(registry="global")
## ----eval=FALSE---------------------------------------------------------------
# codelist <- sdmx_codelist(
# endpoint = sdmx_endpoint("global")
# , agency_id = "ESTAT"
# , resource_id = "CL_ACTIVITY")
#
# head(codelist)
# [1] "_T" "_X" "_Z" "A" "A_B" "A01"
## ----eval=FALSE---------------------------------------------------------------
# Activity %in% global_codelist(agency_id="ESTAT", resource_id="CL_ACTIVITY")
## ----eval=FALSE---------------------------------------------------------------
# rules <- validator_from_dsd(endpoint = sdmx_endpoint("ESTAT")
# , agency_id = "ESTAT", resource_id = "STSALL", version="latest")
#
# length(rules)
# [1] 13
# rules[1]
# Object of class 'validator' with 1 elements:
# CL_FREQ: FREQ %in% sdmx_codelist(endpoint = "https://ec.europa.eu/tools/cspa_services_global/sdmxregistry/rest", agency_id = "SDMX", resource_id = "CL_FREQ", version = "2.0")
# Rules are evaluated using locally defined options
## ----eval=FALSE---------------------------------------------------------------
# rule[[1]]
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
## ----echo=FALSE---------------------------------------------------------------
library(validate)
## -----------------------------------------------------------------------------
library(validate)
data(SBS2000)
original <- SBS2000
version2 <- original
version2$other.rev <- abs(version2$other.rev)
version3 <- version2
version3$turnover[is.na(version3$turnover)] <- version3$vat[is.na(version3$turnover)]
## -----------------------------------------------------------------------------
cells(input = original, cleaned = version2, imputed = version3)
## -----------------------------------------------------------------------------
cells(input = original, cleaned = version2, imputed = version3
, compare="sequential")
## -----------------------------------------------------------------------------
version4 <- version3
version4$turnover[is.na(version4$turnover)] <- median(version4$turnover, na.rm=TRUE)
# from kEUR to EUR
version5 <- version4
version5$staff.costs <- version5$staff.costs * 1000
## -----------------------------------------------------------------------------
out <- cells(input = original
, cleaned = version2
, vat_imp = version3
, med_imp = version4
, units = version5)
par(mfrow=c(2,1))
barplot(out)
plot(out)
## -----------------------------------------------------------------------------
rules <- validator(other.rev >= 0
, turnover >= 0
, turnover + other.rev == total.rev
)
comparison <- compare(rules
, input = original
, cleaned = version2
, vat_imp = version3
, med_imp = version4
, units = version5)
comparison
## -----------------------------------------------------------------------------
par(mfrow=c(2,1))
barplot(comparison)
plot(comparison)
## ----eval=FALSE---------------------------------------------------------------
# ## Contents of clean_supermarkets.R
# library(validate)
#
# # 1. simulate reading data
# data(SBS2000)
# spm <- SBS2000[c("id","staff","turnover","other.rev","total.rev")]
#
# # 2. add a logger from 'validate'
# start_log(spm, logger=lbj_cells())
#
# # 3. assume empty values should be filled with 0
# spm <- transform(spm, other.rev = ifelse(is.na(other.rev),0,other.rev))
#
# # 4. assume that negative amounts have only a sign error
# spm <- transform(spm, other.rev = abs(other.rev))
#
# # 5a. ratio estimator for staff conditional on turnover
# Rhat <- with(spm, mean(staff,na.rm=TRUE)/mean(turnover,na.rm=TRUE))
#
# # 5b. impute 'staff' variable where possible using ratio estimator
# spm <- transform(spm, staff = ifelse(is.na(staff), Rhat * turnover, staff))
#
# # 6. write output
# write.csv(spm, "supermarkets_treated.csv", row.names = FALSE)
## -----------------------------------------------------------------------------
library(lumberjack)
run_file('clean_supermarkets.R')
## -----------------------------------------------------------------------------
logfile <- read.csv("spm_lbj_cells.csv")
## -----------------------------------------------------------------------------
logfile[3:4,]
## ----eval=FALSE---------------------------------------------------------------
# ## Contents of clean_supermarkets2.R
# library(validate)
#
# #1.a simulate reading data
# data(SBS2000, package="validate")
# spm <- SBS2000[c("id","staff","other.rev","turnover","total.rev")]
#
# # 1.b Create rule set
# rules <- validator(staff >= 0, other.rev>=0, turnover>=0
# , other.rev + turnover == total.rev)
#
#
# # 2. add two loggers
# start_log(spm, logger=lbj_cells())
# start_log(spm, logger=lbj_rules(rules))
#
# ## The rest is the same as above ...
## -----------------------------------------------------------------------------
run_file("clean_supermarkets2.R")
## -----------------------------------------------------------------------------
read.csv("spm_lbj_rules.csv")[3:4,]
## ----eval=FALSE---------------------------------------------------------------
# stop_log(spm, logger="lbj_rules",file="my_output.csv")
## ----include=FALSE------------------------------------------------------------
source("chunk_opts.R")
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/doc/cookbook.R
|
---
title: "The Data Validation Cookbook"
author: "Mark P.J. van der Loo"
date: "`r Sys.Date()`"
output:
bookdown::html_document2:
theme: paper
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: false
code_folding: none
code_download: false
vignette: >
%\VignetteIndexEntry{The Data Validation Cookbook}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r, include=FALSE}
source("chunk_opts.R")
```
# Preface {-}
This book is about checking data with the
[validate](https://cran.r-project.org/package=validate) package for
[R](https://www.r-project.org).
This version of the book was rendered with `validate` version
`r packageVersion("validate")`. The latest release of `validate` can be installed
from [CRAN](https://cran.r-project.org) as follows.
```{r, echo=TRUE, eval=FALSE}
install.packages("validate")
```
The purposes of this book include demonstrating the main tools and workflows of
the `validate` package, giving examples of common data validation tasks, and
showing how to analyze data validation results.
The book is organized as follows. Chapter \@ref(sect-intro) discusses the bare
necessities to be able to follow the rest of the book. Chapters
\@ref(sect-varlevelchecks) to \@ref(sect-statisticalchecks) form the 'cookbook'
part of the book and discuss many different ways to check your data by example.
Chapter \@ref(sect-indicators) is devoted to deriving plausibility measures
with the `validate` package. Chapters \@ref(sect-work) and
\@ref(sect-rulefiles) treat working with validate in-depth. Chapter
\@ref(sect-comparing) discusses how to compare two or more versions of a
dataset, possibly automated through the
[lumberjack](https://cran.r-project.org/package=lumberjack) package. The
section with Biblographical Notes lists some references and points out some
literature for further reading.
## Prerequisites {-}
Readers of this book are expected to have some knowledge of R. In particular,
you should know how to import data into R and know a little about working with
data frames and vectors.
## Citing this work {-}
To cite the `validate` package please use the following citation.
> MPJ van der Loo and E de Jonge (2021). Data Validation Infrastructure for
> R. _Journal of Statistical Software_, 97(10) [paper](https://www.jstatsoft.org/article/view/v097i10).
To cite this cookbook, please use the following citation.
> MPJ van der Loo (`r substr(as.Date(Sys.time()),1,4)`) _The Data Validation Cookbook_
> version `r packageVersion("validate")`. [https://data-cleaning.github.io/validate](http://data-cleaning.github.io/validate/)
## Acknowledgements {-}
This work was partially funded by European Grant Agreement 88287--NL-VALIDATION
of the European Statistcal System.
## Contributing {-}
If you find a mistake, or have some suggestions, please file an issue or a pull
request on the github page of the package:
[https://github.com/data-cleaning/validate](https://github.com/data-cleaning/validate).
If you do not have or want a github account, you can contact the author via the
e-mail address that is listed with the package.
## License {-}
```{r, echo=FALSE, include=!knitr::is_latex_output()}
#knitr::asis_output("
#[](https://creativecommons.org/licenses/by/4.0/)
#")
```
This work is licensed under [Creative Commons Attribution BY-NC 4.0 International License](https://creativecommons.org/licenses/by-nc/4.0/).
# Introduction to validate {#sect-intro}
```{r, include=FALSE}
source("chunk_opts.R")
```
> Data Validation is an activity verifying whether or not a combination of
> values is a member of a set of acceptable combinations [(Di Zio et al , 2015)](https://www.markvanderloo.eu/files/share/zio2015methodology.pdf)
The validate package is intended to make checking your data easy, maintainable, and reproducible.
It does this by allowing you to
- test data against a reusable set of data validation rules:
- investigate, summarise, and visualise data validation results;
- import and export rule sets from and to various formats;
- filter, select and otherwise manipulate data validation rules';
- investigate, summarise, and visualise rule sets.
For advanced rule manipulation there is the
[validatetools](https://cran.r-project.org/package=validatetools) package.
```{r, include=FALSE, eval=knitr::is_latex_output()}
knitr::opts_chunk$set(comment=NA)
```
## A quick example
Here's an example demonstrating the typical workflow. We'll use the built-in
`cars` data set, which contains 50 cases of speed and stopping distances of
cars.
```{r }
data(cars)
head(cars, 3)
```
Validating data is all about checking whether a data set meets presumptions or
expectations you have about it, and the validate package makes it easy for you
to define those expectations. Let's do a quick check on variables in the
`cars` data set. We first load the package, and create a list
of data quality demands with the `validator()` function.
```{r}
library(validate)
rules <- validator(speed >= 0
, dist >= 0
, speed/dist <= 1.5
, cor(speed, dist)>=0.2)
```
Here, the first three rules are record-wise checks: each record will yield one
answer. In the last rule we check whether speed and distance are positively
correlated this will yield a single `TRUE` or `FALSE` for the whole data set.
We now confront the data with those rules and save the output into a variable
called `out`.
```{r}
out <- confront(cars, rules)
```
The easiest way to check the results is with `summary()`.
```{r}
summary(out)
```
This returns a data frame with one line of information for each rule `V1`,
`V2`, `V3` and `V4`. To be precise:
- How many data items were checked against each rule.
- How many items passed, failed or resulted in `NA`.
- Whether the check resulted in an error (could not be performed) or gave an warning.
- The expression that was actually evaluated to perform the check.
The same information can be summarized graphically as follows
`r if( knitr::is_latex_output()) knitr::asis_output("(see Figure \\ref{fig:validateplot})")`.
```{r,fig.height=7, fig.width=7, echo=!knitr::is_latex_output(), eval=!knitr::is_latex_output()}
plot(out)
```
```{r, label="validateplot", fig.height=5, fig.width=5, out.width="0.7\\textwidth", fig.align="center", echo=knitr::is_latex_output(), eval=knitr::is_latex_output(), fig.env="figure",fig.pos="!t", fig.cap="Plot of validation output."}
plot(out)
```
In this plot each horizontal bar indicates the percentage of Failing, Passing,
and Missing cases. The table in the legend lists the total number of Fails,
Passes and Missings, summed over all checks. Here, we have 4 rules. The first
three rules yield 50 results each, while the last rule yields a single result.
Hence there are 151 validation results in total.
Using the function `violating` we can select the records that violate one or
more rules. We select only the first three results because the last rule can
not be interpreted record by record.
```{r}
violating(cars, out[1:3])
```
We can extract all individual resuls results using for example `as.data.frame`.
```{r}
df_out <- as.data.frame(out)
head(df_out, 3)
```
We see that in record `1`, rule `V1`, was satisfied (the result is `TRUE`), and
that validate left a bit of slack when executing the rule, to avoid false
negatives caused by machine rounding issues.
Summarizing, the basic workflow in `validate` is to create a rule set, confront
a data set with the rules in the rule set, and then analyze or use the results
further. To understand which checks you can perform with `validate` you only
need to remember the following.
> Any R expression that results in a `logical` is accepted by `validate` as a
> validation rule.
You are now ready to start validating your data, and navigate Chapters
\@ref(sect-availableunique)-\@ref(sect-statisticalchecks) to learn how to
define specific types of checks. Chapter~\@ref(sect-work), discusses more
details about working with `validate`.
# Variable checks {#sect-varlevelchecks}
```{r, include=FALSE}
source("chunk_opts.R")
```
Variable checks are checks that can be performed on a field-by-field basis. An
example is checking that a variable called `Age` is nonnegative, or of integer
type. Variable checks are among the simplest checks.
**Data**
In this section we will use the `SBS2000` dataset, that is included with `validate`.
```{r}
library(validate)
data(SBS2000)
head(SBS2000, 3)
```
See `?SBS2000` for a description.
## Variable type
In `R`, one can test the type of a variable using built-in functions such as
`is.numeric` or `is.character`.
```{r}
is.character("hihi")
is.character(3)
```
In `validate`, any function starting with `is.` ('is' followed by a dot) is
considered a validation function.
```{r}
rules <- validator(
is.character(size)
, is.numeric(turnover)
)
out <- confront(SBS2000, rules)
summary(out)
```
We see that each rule checks a single item, namely one column of data. The
first rule is violated (it is in fact a `factor` variable). The second rule
is satisfied.
## Missingness {#sect-missingness}
Use R's standard `is.na()` to check missing items in individual variables. Negate
it to check that values are available.
```{r}
rule <- validator(
!is.na(turnover)
, !is.na(other.rev)
, !is.na(profit)
)
out <- confront(SBS2000, rule)
summary(out)
```
We see that in `r summary(out)$fails[1]` cases the variable `turnover` is missing,
while `other.rev` and `profit` are missing respectively in `r summary(out)$fails[2]`
and `r summary(out)$fails[3]` occasions.
To demand that all items must be present or absent for a certain variable,
use R's quantifiers: `any()` or `all()`, possibly negated.
```{r}
rules <- validator(
!any(is.na(incl.prob))
, all(is.na(vat)) )
out <- confront(SBS2000, rules)
summary(out)
```
- To check whether records or parts thereof are completed, see \@ref(sect-iscomplete).
- To check whether records are available at all, see \@ref(sect-completeness).
## Field length
The number of characters in text fields can be tested using either R's standard
`nchar()` function, or with the convenience function `field_length`.
```{r}
rules <- validator(
nchar(as.character(size)) >= 2
, field_length(id, n=5)
, field_length(size, min=2, max=3)
)
out <- confront(SBS2000, rules)
summary(out)
```
One advantage of `check_field_length` is that its argument is converted to
character (recall that `size` is a `factor` variable). The function
`field_length` can be used to either test for exact field lengths or to
check whether the number of characters is within a certain range.
The field length is measured as the number of [code
points](https://en.wikipedia.org/wiki/Code_point). Use `type="width"` to
measure the printed width (nr of columns) or `type="bytes"` to count the number
of bytes.
## Format of numeric fields
For numbers that are stored in `character` type, there is a convenience
function called `number_format()` that accepts a variable name
and a format specification.
```{r}
dat <- data.frame(x = c("2.54","2.66","8.142","23.53"))
```
To check that the numbers are formatted with one figure before, and two figures
after the decimal point, we perform the following check.
```{r}
rule <- validator( number_format(x, format="d.dd"))
values(confront(dat, rule))
```
Here, the specification `format="d.dd"` describes the allowed numeric formats.
In this specification the `"d"` stands for a digit, any other character except
the asterisk (`*`) stands for itself. The asterisk is interpreted as 'zero or
more digits'. Here are some examples of how to define number formats.
|format | match | non-match |
|-------------|-------------------------------|-------------------------------------|
|`0.dddd` | `"0.4321"` | `"0.123"`,`"1.4563"` |
|`d.ddEdd` | `"3.14E00"` | `"31.14E00"` |
|`d.*Edd` | `"0.314E01"`,`"3.1415297E00"` | `"3.1415230"` |
|`d.dd*` | `"1.23"`, `"1.234"`,$\ldots$ | `"1.2"` |
The last example shows how to check for a minimal number of digits behind the decimal
point.
There are special arguments to check the number of decimal figures
after the decimal separator.
```{r}
x <- c("12.123","123.12345")
number_format(x, min_dig=4)
number_format(x, max_dig=3)
number_format(x, min_dig=2, max_dig=4)
number_format(x, min_dig=2, max_dig=10)
# specify the decimal separator.
number_format("12,123", min_dig=2, dec=",")
```
The arguments `min_dig`, `max_dig` and `dec` are ignored when `format` is
specified.
This function is convenient only for fairly simple number formats. Generic
pattern matching in strings is discussed in the next section.
## General field format
A simple way to check for more general format is to use [globbing
patterns](https://en.wikipedia.org/wiki/Glob_(programming)). In such patterns,
the asterisk wildcard character (`*`) is interpreted as 'zero or more
characters' and the question mark (`?`) is interpreted as 'any character'.
For example, to check that the `id` variable in `SBS2000` starts with `"RET"`,
and that the `size` variable has consists of `"sc"` followed by precisely one
character, we can do the following.
```{r}
rule <- validator(field_format(id, "RET*")
, field_format(size, "sc?" ))
out <- confront(SBS2000, rule)
summary(out)
```
Here, the globbing pattern `"RET*"` is understood as 'a string starting with
`"RET"`, followed by zero or more characters. The pattern `"sc?"` means 'a
string starting with `"sc"`, followed by a single character.
The most general way to check whether a field conforms to a pattern is to use a
[regular expression](https://en.wikipedia.org/wiki/Regular_expression). The
treatment of regular expressions is out of scope for this book, but we will
give a few examples. A good introduction to regular expressions is given by
> J. Friedl (2006) _Mastering regular expressions._ O'Reilley Media.
In `validate` one can use `grepl` or `field_format`, with the argument `type="regex"`
```{r}
rule <- validator(
grepl("^sc[0-9]$", size)
, field_format(id, "^RET\\d{2}$" , type="regex") )
summary(confront(SBS2000, rule))
```
Here, the expression `"^sc[0-9]$"` is a regular expression that should be read
as: the string starts (`"^"`) with `"sc"`, is followed by a number between 0
and 9 (`"[0-9]"`) and then ends (`"$"`). The regular expression `"^RET\\{d}2"`
indicates that a string must start (`"^"`) with `"RET"`, followed by two
digits (`"\\d{2}"`), after which the string must end (`"$"`).
Globbing patterns are easier to develop and easier to understand than regular
expressions, while regular expressions offer far more flexibility but are
harder to read. Complex and long regular expressions may have subtle matching
behaviour that is not immediately obvious to inexperienced users. It is
therefore advisable to test regular expressions with a a small dataset
representing realistic cases that contains both matches and non-matches. As a
rule of thumb we would advise to use globbing patterns unless those offer
insufficient flexibility.
## Numeric ranges
Numerical variables may have natural limits from below and/or
above. For one-sided ranges, you can use the standard comparison operators.
```{r}
rules <- validator(TO = turnover >= 0
, TC = total.costs >= 0)
```
If a variable is bounded both from above and below one can use two rules,
or use the convenience function `in_range`.
```{r}
rules <- rules +
validator(PR = in_range(incl.prob, min=0, max=1))
```
By default, `in_range` includes the boundaries of the range, so the above rule
is equivalent to `incl.prob >= 0` and `incl.prob <= 1`.
```{r}
out <- confront(SBS2000, rules, lin.ineq.eps=0)
```
Here we set `lin.ineq.eps=0` to keep `validate` from building in a
margin for machine rounding errors.
```{r}
summary(out)
```
For numeric ranges it is often a better idea to work with inclusive
inequalities ($\leq$, $\geq$) than with strict inequalities ($<$, $>$). Take
as an example the strict inequality demand `income > 0`. This means that _any_
income larger than zero is acceptable, including numbers such as $0.01$,
$0.000001$ and $10^{-\textrm{Googol}}$. In practice there is almost always a
natural minimal acceptable value that is usually dictated by the unit of
measurement. For example, if we measure income in whole Euros, a better demand
would be `income >= 1`.
## Ranges for times and periods
For objects of class `Date` and objects of class `POSIXct` one can use comparison
operators and `in_range` in the same way as for numerical data. The `in_range` function
has a convenience feature for period data that is coded in character data, as in
`"2018Q1"` for quarterly data.
We first generate some example data.
```{r}
period = sprintf("2018Q%d", 1:4)
period
```
The `in_range` function is capable of recognizing certain date or period
formats.
```{r}
in_range(period, min="2017Q2", max = "2018Q2")
```
It is possible to specify your own date-time format using `strftime` notation.
See `?in_range` and `?strptime` for specifications.
## Code lists
A code list is a set of values that a variable is allowed to assume. For small
code lists, one can use the `%in%` operator.
```{r}
rule <- validator(size %in% c("sc0","sc1","sc2","sc3"))
out <- confront(SBS2000, rule)
summary(out)
```
Notice that `validate` replaces `%in%` with `%vin%`. The reason is that `%vin%` has more consistent
behavior in the case of missing data. In particular,
```{r}
c(1, 3, NA) %in% c(1,2)
c(1, 3, NA) %vin% c(1,2)
```
For longer code lists it is convenient to refer to an externally provided list.
There are two ways of doing this: reading the list in the right-hand-size of `%in%`,
or passing a code list to `confront` as reference data.
Suppose we have a file called `codelist.csv` with a column `code`. We can define
a rule as follows.
```{r}
rule <- validator(
x %in% read.csv("codelist.csv")$code
)
## Or, equivalently
rule <- validator(
valid_codes := read.csv("codelist.csv")$code
, x %in% valid_codes
)
```
The disadvantage is that the rule now depends on a path that may or may not be
available at runtime.
The second option is to assume that a variable, say `valid_codes` exists at
runtime, and pass this with `confront`.
```{r}
codelist <- c("sc0","sc1","sc2","sc3")
rule <- validator(size %in% valid_codes)
# pass the codelist
out <- confront(SBS2000, rule
, ref=list(valid_codes=codelist))
summary(out)
```
This way, (very) large code lists can be used, but note that it does
require a 'contract' between variable names used in the rule set
and variables passed as reference data.
# Availability and uniqueness {#sect-availableunique}
```{r, include=FALSE}
source("chunk_opts.R")
```
In this Chapter it is demonstrated how to check whether records are available
and/or complete with respect to a set of keys, and whether they are unique.
The checks described here are typically useful for data in 'long' format, where
one column holds a value and all the other columns identify the value.
- To test for missing values in individual variables, see also \@ref(sect-missingness).
- To check whether records or parts thereof are completed, see \@ref(sect-iscomplete).
**Data**
In this Chapter the `samplonomy` dataset is used that comes with the `validate`
package.
```{r}
library(validate)
data(samplonomy)
head(samplonomy, 3)
```
## Long data
The samplonomy data set is structured in 'long form'. This means that each
record has a single `value` column, and one or more columns containing
character values that together describe what the value means.
```{r}
head(samplonomy,3)
```
The data set contains several time series for multiple measures
of the fictional country 'Samplonia'. There are time series for several
subregions of Samplonia.
Long format data is typically used as a transport format: it may be used to
bulk-load data into SQL-based data base systems, or to transfer data between
organisations in a unambiguous way.
Data in long form is in general much harder to check and process for
statistical purpose than data in wide format, where each variable is stored in
a separate column. The reason is that in long format relations between
different variables are spread out across records, and those records are not
necessarily ordered in any particular way prior to processing. This makes
interpretation of validation fails intrinsically harder for long-form data than
for wide-form data.
The `samplonomy` data set has a particularly nasty structure. It contains both
annual and quarterly time series for GDP, Import, Export and the Balance of
Trade (export less import). The period column therefore contains both quarterly
and annual labels. Furthermore, there are time series for the whole of
Samplonia (region Samplonia), for each of its two provinces (regions Agria and
Induston) and for each of its districts within Agria (Wheaton and Greenham) and
Induston (Smokely, Mudwater, Newbay and Oakdale).
Naturally, we expect that the key combinations are unique, that all time series
are gapless and complete, that the Balance of trade equals Export less Import
everywhere, that district values add up to the provinces', and that province
values add up to the total of Samplonia. Finally, the quarterly time series
must add up to the annual values.
## Uniqueness {#sect-uniqueness}
The function `is_unique()` checks whether combinations of variables (usually
key variables) uniquely identify a record. It accepts any positive number of
variable names and returns `FALSE` for each record that is duplicated with
respect to the designated variariables.
Here, we test whether region, period, and measure uniquely identify a value in
the `samplonomy` data set.
```{r}
rule <- validator(is_unique(region, period, measure))
out <- confront(samplonomy, rule)
# showing 7 columns of output for readability
summary(out)[1:7]
```
There are `r summary(out)$fails` fails. After extracting the individual
values for each record we can find the duplicated ones using a
convenience function from `validate`.
```{r}
violating(samplonomy, out)
```
There are a two subtleties to keep in mind when interpreting uniqueness. The
first has to do with missing values, and the second has to do with grouping.
To start with the missing value problem, take a look at the following two-record
data frame.
```{r}
df <- data.frame(x = c(1,1), y = c("A",NA))
df
```
How should we judge whether these two records are unique? A tempting option is
to say the the first record is unique, and to return `NA` for the second record
since it contains a missing value: R has the habit of returning `NA` from
calculations when an input value is `NA`. This choice is not invalid, but it
would have consequences for determining whether the first record is unique as
well. After all, it is possible to fill in a value in the missing field such
that the two records are duplicates. Therefore, if one would return `NA` for
the second record, the correct thing to do is to also return `NA` for the first
record. In R, the choice is made to treat `NA` as an actual value when checking
for duplicates or uniqe records (see `?duplicated` from base R). To see this
inspect the following code and output.
```{r}
df <- data.frame(x=rep(1,3), y = c("A", NA, NA))
is_unique(df$x, df$y)
```
The second subtlety has to do with grouping. You may want to test whether a
column is unique, given one or more other variables. It is tempting to think
that this requires a split-apply-combine approach where the dataset is first
split according to one or more grouping variables, check for uniqueness of the
column in each group, and then combine the results. However, such an approach
is not necessary as you can simply add the grouping variables to the list of
variables that _together_ must be unique.
As an example, consider the output of the following two approaches.
```{r}
# y is unique, given x. But not by itself
df <- data.frame(x=rep(letters[1:2],each=3), y=rep(1:3,2))
# the split-apply-combine approach
unsplit(tapply(df$y, df$x, is_unique), df$x)
# the combined approach
is_unique(df$x, df$y)
```
## Availability of records {#sect-completeness}
This section is on testing for availability of whole records. Testing for individual
missing values (`r NA`), is treated in \@ref(sect-missingness).
We wish to ensure that for each region, and each variable, the periods 2014,
2015, $\ldots$, 2019 are present. Using `contains_at_least` we can establish
this.
```{r}
rule <- validator(
contains_at_least(
keys = data.frame(period = as.character(2014:2019))
, by=list(region, measure) )
)
out <- confront(samplonomy, rule)
# showing 7 columns of output for readability
summary(out)[1:7]
```
The function `contains_at_least` splits the `samplonomy` dataset into blocks
according to values of `region` and `measure`. Next, it checks that in each
block the variable `period` contains at least the values 2014--2019.
The return value is a logical vector where the number of elements equals the
number of rows in the dataset under scrutiny. It is `TRUE` for each block
where all years are present, and `FALSE` for each block where one or more of the
years is missing. In this case 29 records are labeled as FALSE. These
can be found as follows.
```{r}
head(violating(samplonomy, out))
```
Inspection of these records shows that in this block, for `Agria` the GDP
for `"2015"` is missing.
We can perform a stricter check, and test whether for each `measure`, all
quarters `"2014Q1"` $\ldots$ `"2019Q4"` are present for each province (`Agria`
and `Induston`). First create a key set to test against.
```{r}
years <- as.character(2014:2019)
quarters <- paste0("Q",1:4)
keyset <- expand.grid(
region = c("Agria", "Induston")
, period = sapply(years, paste0, quarters))
head(keyset)
```
This key set will be referenced in the rule, and passed to `confront` as reference
data.
```{r}
rule <- validator(
contains_at_least(keys=minimal_keys, by=measure)
)
out <- confront(samplonomy, rule
, ref=list(minimal_keys=keyset))
# showing 7 columns of output for readability
summary(out)[1:7]
```
There are `r summary(out)$fails` fails. Inspecting the data set as above, we
see that for Induston, the `export` is missing in `"2018Q3"`.
Finally, we do a strict test, to check that for each `measure` all periods and
all regions are reported. We also demand that there are no more and no less
records than for each individual measure. For this, the function
`contains_exactly` can be used.
First create a keyset.
```{r}
years <- as.character(2014:2019)
quarters <- paste0("Q",1:4)
keyset <- expand.grid(
region = c(
"Agria"
,"Crowdon"
,"Greenham"
,"Induston"
,"Mudwater"
,"Newbay"
,"Oakdale"
,"Samplonia"
,"Smokely"
,"Wheaton"
)
,period = c(years, sapply(years, paste0, quarters))
)
head(keyset)
```
The keyset is passed as reference data to the rule using `confront`.
```{r}
rule <- validator(contains_exactly(all_keys, by=measure))
out <- confront(samplonomy, rule
, ref=list(all_keys=keyset))
# showing 7 columns of output for readability
summary(out)[1:7]
```
To find where the errors reside, we first select the records with an error and
then find the unique measures that occur in those records.
```{r}
erroneous_records <- violating(samplonomy, out)
unique(erroneous_records$measure)
```
So here, blocks containing GDP and Export have entire records missing.
## Gaps in (time) series
For time series, or possibly other series it is desirable that
there is a constant distance between each two elements of the series.
The mathematical term for such a series is called a _linear sequence_.
Here are some examples of linear series.
- The natural numbers: $1,2,3,\ldots$
- The even natural numbers $2, 4, 6, \ldots$
- Quarters periods: `"2020Q1"`, `"2020Q2"`, $\ldots$
- Years (these are just natural numbers): $2019, 2020, \ldots$
The `validate` functions `is_linear_sequence` and `in_linear_sequence` check
whether a variable represents a linear series, possibly in blocks defined by
categorical variables. They can be used interactively or as a rule in a
validator object. We first demonstrate how these functions work, and then give
an example with the `samplonomy` dataset.
```{r}
is_linear_sequence(c(1,2,3,4))
is_linear_sequence(c(8,6,4,2))
is_linear_sequence(c(2,4,8,16))
```
For character data, the function is capable of recognizing certain formats
for time periods.
```{r}
is_linear_sequence(c("2020Q1","2020Q2","2020Q3","2020Q4"))
```
See `?is_linear_sequence` for a full specification of supported
date-time formats.
It is not necessary for data to be sorted in order to be recognized as a
linear sequence.
```{r}
is_linear_sequence(c("2020Q4","2020Q2","2020Q3","2020Q1"))
```
One can force a begin and/or end point for the sequence as well.
```{r}
is_linear_sequence(c("2020Q4","2020Q2","2020Q3","2020Q1")
, begin = "2020Q2")
```
Finally it is possible to split a variable by one or more other columns and
check whether each block represents a linear sequence.
```{r}
series <- c(1,2,3,4,1,2,3,3)
blocks <- rep(c("a","b"), each = 4)
is_linear_sequence(series, by = blocks)
```
Now, this result is not very useful since now it is unknown which block
is not a linear series. This is where the function `in_linear_sequence` comes in.
```{r}
in_linear_sequence(series, by = blocks)
```
There are some subtleties. A single element is also a linear sequence (of length 1).
```{r}
is_linear_sequence(5)
```
This can yield surprises in cases of blocks of length 1.
```{r}
blocks[8] <- "c"
data.frame(series = series, blocks = blocks)
in_linear_sequence(series, blocks)
```
We now have three linear series, namely
- For `"a"`: `1,2,3,4`
- For `"b"`: `1,2,3`
- For `"c"`: `3`.
We can circumvent this by giving explicit bounds.
```{r}
in_linear_sequence(series, blocks, begin = 1, end = 4)
```
We now return to the `samplonomy` dataset. We wish to check that for
each measure and each area, the time series are linear series. Since there
are time series of different frequencies, we need to split the data by frequency
as well.
```{r}
rule <- validator(
in_linear_sequence(period
, by = list(region, freq, measure))
)
out <- confront(samplonomy, rule)
summary(out)[1:7]
```
We can find the blocks where records are not in sequence as follows (output not
printed here for brevity).
```{r, results='hide'}
violating(samplonomy, out)
```
Inspection of the selected records shows that for Agria the GDP for 2015 is
missing, and that for Induston the Export for 2018Q3 is missing while Export
for 2018Q2 occurs twice (but with different values)
# Multivariate checks
```{r, include=FALSE}
source("chunk_opts.R")
```
In this Chapter we treat tests that involve relationships between variables.
**Data**
In this Chapter we will use the `SBS2000` dataset that comes with `validate`.
```{r}
library(validate)
data(SBS2000)
head(SBS2000, 3)
```
## Completeness of records {#sect-iscomplete}
The functions `is_complete()` and `all_complete()` are convenience functions
that test for missing values or combinations thereof in records.
```{r}
rules <- validator(
is_complete(id)
, is_complete(id, turnover)
, is_complete(id, turnover, profit )
, all_complete(id)
)
out <- confront(SBS2000, rules)
# suppress last column for brevity
summary(out)[1:7]
```
Here, the first rule checks for missing data in the `id` variable, the second
rule checks whether subrecords with `id` and `turnover` are complete, and the
third rule checks whether subrecords with `id`, `turnover` and `profit` are
complete. The output is one logical value (`TRUE` or `FALSE`) for each record.
The fourth rule tests whether _all_ values are present in the `id` column, and
it results in a single `TRUE` or `FALSE`.
- To test for missing values in individual variables, see also \@ref(sect-missingness).
- To check whether records are available at all, see \@ref(sect-completeness).
## Balance equalities and inequalities
Balance restrictions occur for example in economic microdata, where financial
balances must be met.
```{r}
rules <- validator(
total.rev - profit == total.costs
, turnover + other.rev == total.rev
, profit <= 0.6*total.rev
)
out <- confront(SBS2000, rules)
summary(out)
```
Here, the first rule checks a balance between income, costs, and profit; the
second rule checks a sub-balance, and the third rule is a plausibility check
where we do not expect profit to exceed 60 per cent of the total revenue.
Observe that the expressions have been altered by `validate` to account for
possible machine rounding differences. Rather than testing whether variable $x$
equals variable $y$, `validate` will check $|x-y|\leq \epsilon$, where the
default value of $\epsilon$ is $10^{-8}$. The value of this tolerance can be
controlled for linear equalities and inequalities using respectively
`lin.eq.eps` and `lin.ineq.eps`.
```{r}
out <- confront(SBS2000, rules, lin.ineq.eps=0, lin.eq.eps=0.01)
summary(out)
```
See \@ref(sect-options) for more information on setting and resetting options.
## Conditional restrictions
Conditional restrictions are all about demanding certain value combinations.
In the following example we check that a business with staff also has staff
costs.
```{r}
rule <- validator(if (staff >= 1) staff.costs >= 1)
out <- confront(SBS2000, rule)
summary(out)
```
Here, combinations where there is a positive number of staff must be
accompanied with a positive staff cost.
Validate translates the rule `if ( P ) Q` to an expression of the form `!P |
Q`. The reason for this is that the latter can be evaluated faster
(vectorised).
The results are to be interpreted as follows. For each record, `validate` will
check that cases where `staff>=1` are accompanied by `staff.costs >= 1`. In
cases where this test results in `FALSE` this means that either the staff
number is too high, or the staff costs are too low. To be precise, the results
of a conditional restriction match those of an implication in first-order
logic as shown in the truth table below.
$$
\begin{array}{ll|c}
P & Q & P\Rightarrow Q\\
\hline
T & T & T\\
T & F & F\\
F & T & T\\
F & F & F\\
\end{array}
$$
## Forbidden value combinations
In some cases it is more convenient to have a list of forbidden (key) value
combinations than specifying such combinations individually. The function
`does_not_contain()` supports such situations.
As an example, let's first create some transaction data.
```{r}
transactions <- data.frame(
sender = c("S21", "X34", "S45","Z22")
, receiver = c("FG0", "FG2", "DF1","KK2")
, value = sample(70:100,4)
)
```
We assume that it is not possible for senders with codes starting with an `"S"`
to send something to receivers starting with `FG`. A convenient way to encode
such demands is to use
[globbing patterns](https://en.wikipedia.org/wiki/Glob_(programming)).
We create a data frame that lists forbidden combinations (here: one combination
of two key patterns).
```{r}
forbidden <- data.frame(sender="S*",receiver = "FG*")
```
Note that the column names of this data frame correspond to the columns in the transactions
data frame. We are now ready to check our transactions data frame.
```{r}
rule <- validator(does_not_contain(glob(forbidden_keys)))
out <- confront(transactions, rule, ref=list(forbidden_keys=forbidden))
## Suppress columns for brevity
summary(out)[1:7]
```
Observe that we use `glob(forbidden_keys)` to tell `does_not_contain` that the
key combinations in the `forbidden_keys` must be interpreted as globbing
patterns.
The records containing forbidden keys can be selected as follows.
```{r}
violating(transactions, out)
```
It is also possible to use regular expression patterns, by labeling the
forbidden key set with `rx()`. If no labeling is used, the key sets are
interpreted as string literals.
# Statistical checks {#sect-statisticalchecks}
```{r, include=FALSE}
source("chunk_opts.R")
```
Statistical checks involve group properties such as the means of columns. These
characteristics can be checked for whole columns or grouped by one or more
categorical variables. It is also possible to use group-wise computed
statistics in validation rules. For example if you want to compare individual
values with a mean within a group.
For long-form data it is possible to compare aggregate values with underlying
details. For example to test whether quarterly time series add up to annual
totals. It is also possible to check properties of groups, for example whether
in every household (a group of persons) there is exactly one head of household.
**Data**
In this Chapter we will use the `SBS2000` dataset that comes with `validate`.
```{r}
library(validate)
data(SBS2000)
head(SBS2000, 3)
```
We shall also use the `samplonomy` dataset that also comes with `validate`. See also
\@ref(long-data).
```{r}
data(samplonomy)
head(samplonomy, 3)
```
## Statistical and groupwise characteristics {#sect-groupwise}
Any R expression that ultimately is an equality or inequality check is
interpreted as a validation rule by validate. This means that any statistical
calculation can be input to a rule.
Here we check the mean profit and correlation coefficient between profit and
turnover.
```{r}
rule <- validator(
mean(profit, na.rm=TRUE) >= 1
, cor(turnover, staff, use="pairwise.complete.obs") > 0
)
out <- confront(SBS2000, rule)
# suppress some columns for brevity
summary(out)[1:7]
```
There are a few helper functions to compute group-wise statistics, and to make
comparing values with group aggregates possible.
For example, here we check whether each turnover is less than ten times
the group-wise median.
```{r}
rule <- validator(
turnover <= 10*do_by(turnover, by=size, fun=median, na.rm=TRUE)
)
out <- confront(SBS2000, rule)
# suppress some columns for brevity
summary(out)[1:7]
```
Here, in the right-hand side of the rule the group-wise median of turnover is
computed. The function `do_by` is very similar to functions such as `tapply`
in base R. The difference is that `do_by` works on vectors only (not on data
frames) and always repeats the values of `fun` so that the length of the output is
equal to the length of the input.
```{r}
medians <- with(SBS2000, do_by(turnover, by=size, fun=median, na.rm=TRUE))
head(data.frame(size = SBS2000$size, median=medians))
```
There are also some convenience functions, including `sum_by`, `mean_by`, `min_by`, and `max_by`.
## Group properties
In this section, we group data by one or more categorical variables and
check for each group whether a rule is satisfied. In particular we are
going to check whether each household in a small dataset has a unique
'head of household'.
We first create some data with household id (`hhid`) a person id (`person`) and
that person's role in the household (`hhrole`).
```{r}
d <- data.frame(
hhid = c(1, 1, 2, 1, 2, 2, 3 )
, person = c(1, 2, 3, 4, 5, 6, 7 )
, hhrole = c("h","h","m","m","h","m","m")
)
d
```
With `exists_one()` we can check that there is exactly one person
with the role `"h"` (head) in each household, by grouping on household id.
```{r}
rule <- validator(exists_one(hhrole == "h", by=hhid))
out <- confront(d, rule)
# suppress some columns for brevity
summary(out)
```
We can inspect the results by selecting the violating record groups.
```{r}
violating(d, out)
```
We see that household 1 has two heads of household, while household 3 has no head
of household.
To test whether _at least one_ head of household exists, one can use
`exists_any`:
```{r}
violating(d, validator(exists_any(hhrole=="h",by=hhid) ))
```
In the following example we check whether there is exactly one region called Samplonia
for each period and each measure in the `samplonomy` dataset.
```{r}
rule <- validator(exists_one(region=="Samplonia", by=list(period, measure)))
```
The first argument of `exists_one()` is a rule that has to be checked in every group
indicated by the `by` argument. The output is a logical vector with an element for
each record in the dataset under scrutiny. If a group of data fails the test, each record
in that group is indicated as wrong (`FALSE`).
```{r}
out <- confront(samplonomy, rule)
# suppress some columns for brevity
summary(out)[1:7]
```
Here, there are no groups that violate this assumption.
```{r}
violating(samplonomy, out)
```
## Code hierarchies and aggregation
Classifications and ontologies often have a hierarchical structure.
A well-known example is the
[NACE](https://en.wikipedia.org/wiki/Statistical_Classification_of_Economic_Activities_in_the_European_Community) classification of economic activities. In the NACE classification,
the economy is divided into 10 basic types of activities such as 'Agriculture'
or 'Mining and Quarrying', and each activity is again divided into subclasses,
such as 'Growing of rice' and 'Growing of Grapes' under 'Agriculture'. The
subdividing can go on for several levels. For statistics that describe an
economy according to the NACE classification, it is desirable that the
statistics of subclasses add up to their parent classes. This is what the
function 'hierarchy' does in 'validate'.
The `validate` package comes with a version of the NACE classification
(Revision 2, 2008) so we will use that as an example.
```{r}
data(nace_rev2)
head(nace_rev2[1:4])
```
The second and third column contain the necessary information: they list the
parent for each NACE code (where each parent is also a NACE code). To demonstrate
how `hierarchy()` works, we first create some example data.
```{r}
dat <- data.frame(
nace = c("01","01.1","01.11","01.12", "01.2")
, volume = c(100 ,70 , 30 ,40 , 25 )
)
dat
```
We see that the volumes for subclasses `"01.11"` and `"01.12"` add up to
`"01.1"` ( $30+40=70$ ). However, the volumes for `"01.1"` and `"01.2"` do not
add up to the volume for `"01"` ($70+25\not=100$). The `hierarchy()` function
checks all these relations.
Before using `hierarchy` in the setting of a `validator` object, we can examine
it directly.
```{r}
dat$check <- hierarchy(dat$volume, dat$nace, nace_rev2[3:4])
dat
```
We see that `hierarchy()` returns a `logical` vector with one element for each
record in the data. Each record that is involved in one or more aggregation
checks that fail is labeled `FALSE`. Here, this concerns the records with
labels `"01"`, `"01.1"` and `"01.2"`.
We will next look at a more complicated example, but first note the following.
The `hierarchy()` function
- can handle any statistical aggregate, `sum()` is just the default;
- supports globbing and regular expressions in the child values;
- has an adjustable tolerance value for comparing observed with computed aggregates;
- has configurable behaviour for cases of missing data;
- can be applied per-group, defined by one or more grouping variables (see next example).
See the help file `?hierarchy` for specification and examples.
**A more complicated example**
Samplonia is divided in two districts, each of which is divided into several
provinces. Let us define the hierarchical code list.
```{r}
samplonia <- data.frame(
region = c("Agria", "Induston"
, "Wheaton", "Greenham"
, "Smokely", "Mudwater", "Newbay", "Crowdon")
, parent = c(rep("Samplonia",2), rep("Agria",2), rep("Induston",4))
)
samplonia
```
Recall the structure of the `samplonomy` dataset.
```{r}
data(samplonomy)
head(samplonomy)
```
We will check whether regions sum to their parent regions, for each period
and for each measure.
```{r}
rule <- validator(
hierarchy(value, region, hierarchy=ref$codelist, by=list(period, measure))
)
out <- confront(samplonomy, rule, ref=list(codelist=samplonia))
summary(out)
```
We see that some aggregates add up correctly, and some don't. There is also
a warning which we should investigate.
```{r}
warnings(out)
```
If one of the groups contains a parent more than once it is not possible
to check whether child values add up to the aggregate. For this reason
the duplicated parent and all it's children are marked `FALSE`. Indeed we
find a duplicated record.
```{r}
subset(samplonomy, region == "Induston" &
period == "2018Q2" &
measure == "export")
```
Just to see if we can remove the warning, let us remove the duplicate
and re-run the check.
```{r}
i <- !duplicated(samplonomy[c("region","period","measure")])
samplonomy2 <- samplonomy[i, ]
out <- confront(samplonomy2, rule, ref=list(codelist=samplonia))
# suppress some columns for brevity
summary(out)[1:7]
```
The `hierarchy()` function marks every record `FALSE` that is involved
in any check. This may make it hard to figure out which check it failed.
One can get more detailed information, by checking different parts
of the hierarchy in separate rules.
```{r}
rules <- validator(
level0 = hierarchy(value, region, ref$level0, by=list(period, measure))
, level1 = hierarchy(value, region, ref$level1, by=list(period, measure))
)
out <- confront(samplonomy2, rules
, ref=list(level0=samplonia[1:2,], level1=samplonia[3:8,])
)
summary(out)
```
We can now select records involved in violating the highest level
rules separately.
```{r}
violating(samplonomy2, out["level0"])
```
From this it appears that in 2015, the GDP for Agria is missing, and in
2018Q3 there is no value for the export of Induston.
## General aggregates in long-form data
Checking aggregations in long-form format is more involved than for
data in wide format (as in Section \@ref(balance-equalities-and-inequalities)).
Here, we check in the `samplonomy` dataset that for each measure and each
period, the subregional data adds up to the regional data.
```{r}
rules <- validator(
part_whole_relation(value
, labels=region
, whole="Samplonia"
, part =c("Agria","Induston")
, by=list(measure, period)
)
)
```
The first argument of `part_whole_relation()` is the name of the variable
containing the values. Here, the column `value` from the samplonomy dataset.
The argument `labels` indicates the variable that labels parts and wholes.
Next, we define the label value that indicates a total. Here, a record with
region label `"Samplonia"` indicates a total. Under argument `part` we specify
the labels that have to add up to Samplonia, here the provinces Agria and
Induston. Note that there are more subregions in the dataset, for example the
district of Wheaton (a subregion of Agria). Since we do not specify them, these
are ignored. In the `by` argument we specify that the dataset must be split
into measure and period prior to checking the regional aggregates.
The output is one boolean value per record. For each block, defined by values
of `measure` and `period` either all values are `TRUE`, `FALSE`, or `NA`. The
latter indicates that the aggregate could not be computed because one of the
values is missing, or the computed aggregate could not be compared with the
aggregate in the data because it is missing (either the whole record may be
missing, or the value may be `NA`).
```{r}
out <- confront(samplonomy, rules)
# suppress some columns for brevity
summary(out)[1:7]
```
We can extract the truth values and then inspect the blocks with erroneous values
using standard R functionality.
```{r}
violating(samplonomy, out)
```
Recall that the rule was executed per block defined by `measure` and `period`.
Thus, the result indicates three errors: one in the block of records defined
by `measure=="gdp"` and `period=="2015"`, also in the blocks defined by
`measure=="export"` and `period==2018Q2` or `period=="2018Q3"`.
First, it seems that the 2015 GDP of Agria
is missing from the data set. This turns out indeed to be the case.
```{r}
subset(samplonomy, region=="Agria" & period == "2015" & measure == "gdp")
```
Second, it can be seen that for Induston, there are two export values for
`"2018Q2"` while the export value for `"2018Q3"` is missing.
### Notes {-}
Specifying (group-wise) aggregates is a fairly detailed job in the case of long
data. There are a few things to keep in mind when using this function.
- The argument `part` is optional. If not specified, every record not matching
with `whole` will be considered a detail that is to be used to compute the total.
In the current example this was not possible because besides Agria and Induston,
we have other subregions.
- In the example we used literal values to specify the keys that define parts
and wholes. It is possible to, recognize patterns, for example
any years (4 digits) as a whole and a quarter as a part. See also the next example.
Supported patterns include regular expressions (shown here) and globbing (see help file).
- It is important that the variables listed in `by` (if any) uniquely specify a
single aggregate. So here, for each measure and period, the label `"Samplonia"`
should occur at most once (if it does not occur the result will be `NA`).
- The default way to aggregate is to take the sum. You can specify other ways
to aggregate by passing an `aggregator` argument. For example `aggregator=mean`.
- By default, the aggregate in the data is compared with the computed aggregate
up to a tolerance of $10^{-8}$. This tolerance can be set using the `tol`
argument. E.g. for integer data you may want to set `tol=0`.
## Aggregates of time series in long format
We are going to check whether quarterly time series add up to the annual time
series. This is more complicated because of two subtleties.
First there is not one fixed aggregate key, like `"Samplonia"`. Rather, we
have a key _pattern_. Each total is defined by a period label that consists of
precisely four digits. So rather than recognizing a specific year we want to
recognize that a key represents any year. This can be done using a regular
expression of the form `"^\\d{4}$"`, where the `^` indicates 'start of string',
the `\\d{4}` indicates 'four times a digit' and `$` indicates 'end of string'.
Second, we wish to check annual totals against the sum over quarters for each region and each
measure. However, a value-combination of measure and region does not single out
a single value for `year`. For example, for the Induston export we have the following
annual data.
```{r}
subset(samplonomy, region=="Induston" & freq == "A" & measure=="export")
```
So in fact, we need to do the check _by year_ as well as by measure and region.
Fortunately, in this case it is easy to derive a variable that indicates the year
by selecting the first four characters from `period`.
```{r}
rules <- validator(part_whole_relation(value
, labels = period
, whole = rx("^\\d{4}$")
, by = list(region, substr(period,1,4), measure)
))
out <- confront(samplonomy, rules)
```
We use `rx("^\\d{4}")` to tell `part_whole_relation` that this string must be
interpreted as a regular expression. Here, we do not indicate `part` labels
explicitly: by default any record not matching `whole` will be treated as a
detail that must be used to compute the total.
```{r}
errors(out)
# suppress some columns for brevity
summary(out)[1:7]
```
We now get 9 fails and 10 missing values. We can filter out records that
have `NA` (lacking) results.
```{r}
lacking(samplonomy, out)
```
There are two blocks where the annual total could not be compared with
the sum over quarterly series. The balance value of Crowdon is missing
for `"2014Q1"` as well as the import value of Wheaton for `"2019Q2"`.
Similarly, we can inspect the failing blocks
```{r}
violating(samplonomy, out)
```
# Indicators {#sect-indicators}
```{r, include=FALSE}
source("chunk_opts.R")
```
Until now we have discussed various types of data validation rules: decisions
that assign True or False values to a data frame. In some cases it is
convenient to have a continuous value that can then be used in further
assessing the data.
A practical example is the so-called selective editing approach to data
cleaning. Here, each record in a data set is assigned a number that expresses
the risk a record poses for inferring a faulty conclusion. Records are then
ordered from high risk (records that both have suspicious values and large
influence on the final result) to low risk (records with unsuspected values and
little influence on the final result). Records with the highest risk are then
scrutinized by domain experts.
In `validate`, an indicator is a rule that returns an numerical value. Just
like `validator` objects are lists of validation rules, `indicator` objects
are lists of indicator rules. Indices can be computed by confronting data with
an `indicator`, and using `add_indices`, the computed indices can be added to
the dataset. You can import, export, select, and combine `indicator` objects
in the same way as `validator` objects.
## A first example
Here is a simple example of the workflow.
```{r}
library(validate)
ii <- indicator(
BMI = (weight/2.2046)/(height*0.0254)^2
, mh = mean(height)
, mw = mean(weight))
out <- confront(women, ii)
```
In the first statement we define an `indicator` object storing indicator
expressions. Next, we confront a dataset with these indicators. The result is
an object of class `indication`. It prints as follows.
```{r}
out
```
To study the results, the object can be summarized.
```{r}
summary(out)
```
Observe that the first indicator results in one value per record
while the second and third indicators (`mh`, `mw`) each return a single
value. The single values are repeated when indicator values are added
to the data.
```{r}
head(add_indicators(women, out), 3)
```
The result is a data frame with indicators attached.
The columns `error` and `warning` indicate whether calculation of the
indicators was problematic. For example because the output of an indicator rule
is not numeric, or when it uses variables that do not occur in the data. Use
`warnings(out)` or `errors(out)` to obtain the warning and error messages per
rule.
## Getting indicator values
Values can be obtained with the `values` function, or by converting to a
`data.frame`. In this example we add a unique identifier (this is optional) to
make it easier to identify the results with data afterwards.
```{r}
women$id <- letters[1:15]
```
Compute indicators and convert to `data.frame`.
```{r}
out <- confront(women, ii,key="id")
tail( as.data.frame(out) )
```
Observe that there is no key for indicators `mh` and `mw` since these are
constructed from multiple records.
# Working with validate {#sect-work}
```{r, include=FALSE}
source("chunk_opts.R")
```
```{r, echo=FALSE}
library(validate)
```
In this section we dive deeper into the the central object types used in the
package: the `validator` object type for storing lists of rules, and the
`confrontation` object type for storing the results of a validation.
## Manipulating rule sets
Validate stores rulesets into something called a `validator` object. The
`validator()` function creates such an object.
```{r}
v <- validator(speed >= 0, dist>=0, speed/dist <= 1.5)
v
```
Validator objects behave a lot like lists. For example, you can select items
to get a new `validator`. Here, we select the first and third element.
```{r}
w <- v[c(1,3)]
```
Here `w` is a new validator object holding only the first and third rule from
`v`. If not specified by the user, rules are given the default names `"V1"`,
`"V2"`, and so on. Those names can also be used for selecting rules.
```{r}
w <- v[c("V1","V3")]
```
Validator objects are reference objects. This means that if you do
```
w <- v
```
then `w` is not a copy of `v`. It is just another name for the same physical
object as `v`. To make an actual copy, you can select everything.
```
w <- v[]
```
It is also possible to concatenate two validator objects. For example when you
read two rule sets from two files (See \@ref(sect-readfromfile)). This is done
by adding them together with `+`.
```{r}
rules1 <- validator(speed>=0)
rules2 <- validator(dist >= 0)
all_rules <- rules1 + rules2
```
An empty validator object is created with `validator()`.
If you select a single element of a validator object, an object of class 'rule'
is returned. This is the validating expression entered by the user, plus some
(optional) metadata.
```{r}
v[[3]]
```
Users never need to manipulate rule objects, but it can be convenient to
inspect them. As you see, the rules have some automatically created metadata.
In the next section we demonstrate how to retrieve and set the metadata.
## Rule metadata
Validator objects behave a lot like lists. The only metadata in an R
list are the `names` of its elements. You can get and set names of a list
using the `names<-` function. Similarly, there are getter/setter functions
for rule metadata.
- `origin()` : Where was a rule defined?
- `names()` : The name per rule
- `created()` : when were the rules created?
- `label()` : Short description of the rule
- `description()`: Long description of the rule
- `meta()` : Set or get generic metadata
Names can be set on the command line, just like how you would do it for
an R list.
```{r}
rules <- validator(positive_speed = speed >= 0, ratio = speed/dist <= 1.5)
rules
```
Getting and setting names works the same as for lists.
```{r}
names(rules)
names(rules)[1] <- "nonnegative_speed"
```
The functions `origin()`, `created()`, `label()`, and `description()` work in
the same way. It is also possible to add generic key-value pairs as metadata.
Getting and setting follows the usual recycling rules of R.
```{r}
# add 'foo' to the first rule:
meta(rules[1],"foo") <- 1
# Add 'bar' to all rules
meta(rules,"bar") <- "baz"
```
Metadata can be made visible by selecting a single rule:
```{r}
v[[1]]
```
Or by extracting it to a `data.frame`
```{r}
meta(v)
```
Some general information is obtained with `summary`,
```{r}
summary(v)
```
Here, some properties per _block_ of rules is given. Two rules occur in the same
block if when they share a variable. In this case, all rules occur in the same
block.
The number of rules can be requested with `length`
```{r}
length(v)
```
With `variables`, the variables occurring per rule, or over all the rules can be requested.
```{r}
variables(v)
variables(v,as="matrix")
```
## Rules in data frames
You can read and write rules and their metadata from and to data frames. This
is convenient, for example in cases where rules are retrieved from a central
rule repository in a data base.
Exporting rules and their metadata can be done with `as.data.frame`.
```{r}
rules <- validator(speed >= 0, dist >= 0, speed/dist <= 1.5)
df <- as.data.frame(rules)
```
Reading from a data frame is done through the `.data` argument.
```{r}
rules <- validator(.data=df)
```
It is not necessary to define all possible metadata in the data frame. It is
sufficient to have three character columns, named `rule`, `name` and
`description` in any order.
## Validation rule syntax {#sect-syntax}
Conceptually, any R statement that will evaluate to a `logical` is considered a
validating statement. The validate package checks this when the user defines a
rule set, so for example calling `validator( mean(height) )` will result in a
warning since just computing `mean(x)` does not validate anything.
You will find a concise description of the syntax in the `syntax` help file.
```{r,eval=FALSE}
?syntax
```
In short, you can use
- Type checks: any function starting with `is.`
- Binary comparisons: `<, <=, ==, !=, >=, >` and `%in%`
- Unary logical operators: `!, all(), any()`
- Binary logical operators: `&, &&, |, ||` and logical implication, e.g. `if (staff > 0) staff.costs > 0`
- Pattern matching `grepl`
- [Functional dependency](https://en.wikipedia.org/wiki/Functional_dependency): $X\to Y + Z$ is represented by `X ~ Y + Z`.
There are some extra syntax elements that help in defining complex rules.
- Inspect the whole data set using `.`, e.g. `validator( nrow(.) > 10)`.
- Reuse a variable using `:=`, e.g. `validator(m := mean(x), x < 2*m )`.
- Apply the same rule to multiple groups with `var_group`. For example
`validator(G:=var_group(x,y), G > 0)` is equivalent to `validator(x>0, y>0)`.
A few helper functions are available to compute groupwise values on
variables (vectors). They differ from functions like `aggregate` or `tapply`
in that their result is always of the same length as the input.
```{r}
sum_by(1:10, by = rep(c("a","b"), each=5) )
```
This is useful for rules where you want to compare individual values with
group aggregates.
|function | computes |
|---------------------|----------------------------------|
| `do_by` | generic groupwise calculation |
| `sum_by` | groupwise sum |
| `min_by`, `max_by` | groupwise min, max |
| `mean_by` | groupwise mean |
| `median_by` | groupwise median |
See also Section \@ref(sect-groupwise).
There are a number of functions that perform a particular validation task that
would be hard to express with basic syntax. These are treated extensively
in Chapters \@ref(sect-varlevelchecks) to \@ref(sect-statisticalchecks), but
here is a quick overview.
|function | checks |
|---------------------|----------------------------------------------------------------|
|`in_range` | Numeric variable range |
|`is_unique` | Uniqueness of variable combinations |
|`all_unique` | Equivalent to `all(is_unique())` |
|`is_complete` | Completeness of records |
|`all_complete` | Equivalent to `all(is_complete())` |
|`exists_any` | For each group, check if any record satisfies a rule |
|`exists_one` | For each group, check if exactly one record satisfies a rule |
|`is_linear_sequence` | Linearity of numeric or date/time/period series |
|`in_linear_sequence` | Linearity of numeric of date/time/period series |
|`hierarchy` | Hierarchical aggregations |
|`part_whole_relation`| Generic part-whole relations |
|`field_length` | Field length |
|`number_format` | Numeric format in text fields |
|`field_format` | Field format |
|`contains_exactly` | Availability of records |
|`contains_at_least` | Availability of records |
|`contains_at_most` | Availability of records |
|`does_not_contain` | Correctness of key combinations |
## Confrontation objects
The outcome of confronting a validator object with a data set is an object of
class `confrontation`. There are several ways to extract information from a
`confrontation` object.
- `summary`: summarize output; returns a `data.frame`
- `aggregate`: aggregate validation in several ways
- `sort` : aggregate and sort in several ways
- `values`: Get the values in an array, or a list of arrays if rules have different output dimension structure
- `errors`: Retrieve error messages caught during the confrontation
- `warnings`: Retrieve warning messages caught during the confrontation.
By default aggregates are produced by rule.
```{r}
v <- validator(height>0, weight>0,height/weight < 0.5)
cf <- confront(women, rules)
aggregate(cf)
```
To aggregate by record, use `by='record'`
```{r}
head(aggregate(cf,by='record'))
```
Aggregated results can be automatically sorted, so records with the most violations or
rules that are violated most sort higher.
```{r}
# rules with most violations sorting first:
sort(cf)
```
Confrontation objects can be subsetted with single bracket operators (like
vectors), to obtain a sub-object pertaining only to the selected rules.
```
summary(cf[c(1,3)])
```
## Confrontation options {#sect-options}
By default, all errors and warnings are caught when validation rules are confronted with data. This can be switched off by setting the `raise` option to `"errors"` or `"all"`. The following
example contains a specification error: `hite` should be `height` and therefore the rule errors
on the `women` data.frame because it does not contain a column `hite`. The error is caught
(not resulting in a R error) and shown in the summary,
```{r}
v <- validator(hite > 0, weight>0)
summary(confront(women, v))
```
Setting `raise` to `all` results in a R error:
```{r eval=TRUE, error=TRUE}
# this gives an error
confront(women, v, raise='all')
```
Linear equalities form an important class of validation rules. To prevent
equalities to be strictly tested, there is an option called `lin.eq.eps` (with
default value $10^{-8}$) that allows one to add some slack to these tests. The
amount of slack is intended to prevent false negatives (unnecessary failures)
caused by machine rounding. If you want to check whether a sum-rule is
satisfied to within one or two units of measurement, it is cleaner to define
two inequalities for that.
## Using reference data
For some checks it is convenient to compare the data under scrutiny with
other data artifacts. Two common examples include:
- Data is checked against an earlier version of the same dataset.
- We wish to check the contents of a column against a code list,
and we do not want to put the code list hard-coded into the
rule set.
For this, we can use the `ref` option in confront. Here is how
to compare columns from two data frames row-by-row. The user
has to make sure that the rows of the data set under scrutiny
(`women`) matches row-wise with the reference data set (`women1`).
```{r}
women1 <- women
rules <- validator(height == women_reference$height)
cf <- confront(women, rules, ref = list(women_reference = women1))
summary(cf)
```
Here is how to make a code list available.
```{r}
rules <- validator( fruit %in% codelist )
fruits <- c("apple", "banana", "orange")
dat <- data.frame(fruit = c("apple","broccoli","orange","banana"))
cf <- confront(dat, rules, ref = list(codelist = fruits))
summary(cf)
```
# Rules in text files {#sect-rulefiles}
```{r, include=FALSE}
source("chunk_opts.R")
library(validate)
```
This Chapter is about importing and exporting rules from and to file, both in
free-form text and in YAML. We also discuss some more advanced features like
how to have one rule file include another file.
## Reading rules from file {#sect-readfromfile}
It is a very good idea to store and maintain rule sets outside of your R
script. Validate supports two file formats: simple text files and `yaml` files.
Here we only discuss simple text files, yaml files are treated in \@ref(sect-yamlfiles).
To try this, copy the following rules into a new text file and store it in a
file called `myrules.R`, in the current working directory of your R session.
```{r, eval=FALSE}
# basic range checks
speed >= 0
dist >= 0
# ratio check
speed / dist <= 1.5
```
Note that you are allowed to annotate the rules as you would with
regular R code. Reading these rules can be done as follows.
```{r}
rules <- validator(.file="myrules.R")
```
## Metadata in text files: `YAML` {#sect-yamlfiles}
[YAML](https://yaml.org) is a data format that aims to be easy to learn and
human-readable. The name 'YAML' is a [recursive
acronym](https://en.wikipedia.org/wiki/Recursive_acronym) that stands for
> YAML Ain't Markup Language.
Validate can read and write rule sets from and to YAML files. For example,
paste the following code into a file called `myrules.yaml`.
```
rules:
- expr: speed >= 0
name: 'speed'
label: 'speed positivity'
description: |
speed can not be negative
created: 2020-11-02 11:15:11
meta:
language: validate 0.9.3.36
severity: error
- expr: dist >= 0
name: 'dist'
label: 'distance positivity'
description: |
distance cannot be negative.
created: 2020-11-02 11:15:11
meta:
language: validate 0.9.3.36
severity: error
- expr: speed/dist <= 1.5
name: 'ratio'
label: 'ratio limit'
description: |
The speed to distance ratio can
not exceed 1.5.
created: 2020-11-02 11:15:11
meta:
language: validate 0.9.3.36
severity: error
```
We can read this file using `validator(.file=)` as before.
```{r}
rules <- validator(.file="myrules.yaml")
rules
```
Observe that the labels are printed between brackets. There are a few things
to note about these YAML files.
1. `rules:` starts a list of rules.
2. Each new rule starts with a dash (`-`)
3. Each element of a rule is denoted `name: <content>`. The only obligated
element is `expr`: the rule expression.
4. Spaces matter. Each element of a rule must be preceded by a newline and two spaces.
Subelements (as in `meta`) are indented again.
A full tutorial on YAML can be found at
[W3Cschools.io](https://www.w3schools.io/file/yaml-introduction/).
To export a rule set to yaml, use the `export_yaml()` function.
```{r}
rules1 <- rules[c(1,3)]
export_yaml(rules1, file="myrules2.yaml")
```
## Setting options
Both free-form and YAML files can optionally start with a header section where
options or file inclusions can be set. The header section is enclosed by lines
that contain three dashes (`---`) at the beginning of the line.
For example, in the following rule file we make sure that errors are not caught
but raised to run-time level, and we set the tolerance for checking linear equalities and
inequalities to zero.
```
---
options:
raise: errors
lin.eq.eps: 0
lin.ineq.eps: 0
---
turnover >= 0
staff >= 0
total.rev - profit == total.costs
```
The options you set here will be part of the `validator` object, that is
created once you read in the file. The options are valid for every
confrontation you use this validator for, unless they are overwritten during
the call to `confront()`.
The header section is interpreted as a block of YAML, so options and file
inclusions must be specified in that format.
## Including other rule files
In validate, rule files can include each other recursively. So file A can
include file B, which may include file C. This is useful for example in surveys
where the first part of the questionnaire goes to all respondents, and for the
second part, the contents of the questionnaire (and hence its variables) depend
on the respondent type. One could create files with specific rules for the
second part: one for each respondent group, and have each specific rule file
include the general rules that must hold for every respondent.
It can also be useful when different persons are responsible for different rule
sets.
File inclusion can be set through the `include` option in the YAML header.
```
---
include:
- petes_rules.yaml
- nancys_rules.yaml
options:
raise: errors
---
# start rule definitions here
```
## Exporting validator objects
There are three ways to do that. You can either write to a `yaml` file
immediately as follows
```{r, eval=FALSE}
v <- validator(height>0, weight> 0)
export_yaml(v,file="my_rules.yaml")
```
or you can get the `yaml` text string using `as_yaml`
```
cat(as_yaml(v))
```
Finally, you can convert a rule set to data frame and then export it
to a database.
```{r, eval=FALSE}
df <- as.data.frame(v)
```
# Rules from SDMX {#sect-sdmxrules}
**Note** This functionality is available for `validate` versions `1.1.0` or higher.
In this Chapter we first demonstrate how to use SDMX with the `validate`
package. In \@ref(moresdmx) we provide a bit more general information on the
SDMX landscape, registries, and their APIs.
```{r, include=FALSE}
source("chunk_opts.R")
library(validate)
```
## SDMX and `validate`
Statistical Data and Metadata eXchange, or SDMX is a standard for storing data
and the description of its structure, meaning, and content. The standard is
developed by the SDMX consortium (`https://sdmx.org`). It is used, amongst
others, in the [Official Statistics](https://en.wikipedia.org/wiki/Official_statistics) community to
exchange data in a standardized way.
A key aspect of SDMX is a standardized way to describe variables, data
structure (how is it stored), and code lists. This metadata is defined in an
_SDMX registry_ where data producers can download or query the necessary
metadata. Alternatively, metadata is distributed in a so-called Data Structure
Definition (DSD) file, which is usually in
[XML](https://en.wikipedia.org/wiki/XML) format.
For data validation, some aspects of the metadata are of interest. In
particular, code lists are interesting objects to test against. In validate
there are two ways to use SDMX codelists. The first is by referring to a
specific code list for a specific variable in an SDMX registry. The second way
is to derive a rule set from a DSD file that can be retrieved from a registry.
Below we discuss the following functions.
|function | what it does |
|--------------------|--------------------------------------------------|
|`sdmx_endpoint` | retrieve URL for SDMX endpoint |
|`sdmx_codelist` | retrieve sdmx codelist |
|`estat_codelist` | retrieve codelist from Eurostat SDMX registry |
|`global_codelist` | retrieve codelist from Global SDMX registry |
|`validator_from_dsd`| derive validation rules from DSD in SDMX registry|
## SDMX and API locations
SDMX metadata is typically exposed through a standardized REST API.
To query an SDMX registry, one needs to supply at least the following
information:
- The registry's API entry point. This is the base URL for the online registry.
You can specify it literally, or use one of the helper functions that
are aware of certan known SDMX registries.
- Agency ID: the ID of the agency that is responsible for the code list
- Resource ID: the name of the SDMX resource. This is usually the name of
a type of statistic, like STS (short term statistics).
- version: the code list version.
Some API endpoints are stored with the package. The function `sdmx_endpoint()`
returns endpoint URLs for several SDMX registries. Use
```{r,eval=FALSE}
sdmx_endpoint()
```
to get a list of valid endpoints. As an example, to retrieve the endpoint for the global
SDMX registry, use the following.
```{r, eval=TRUE}
sdmx_endpoint(registry="global")
```
## Code lists from SDMX registries
Code lists can be retrieved on-the-fly from one of the online SDMX registries.
In the following rule we retrieve the codelist of economic activities from the
[global SDMX registry](https://registry.sdmx.org/overview.html).
```{r, eval=FALSE}
codelist <- sdmx_codelist(
endpoint = sdmx_endpoint("global")
, agency_id = "ESTAT"
, resource_id = "CL_ACTIVITY")
head(codelist)
[1] "_T" "_X" "_Z" "A" "A_B" "A01"
```
Equivalently, and as a convenience, you could use `global_codelist()` to avoid
specifying the API endpoint explicitly. The output can be used in a rule.
```{r, eval=FALSE}
Activity %in% global_codelist(agency_id="ESTAT", resource_id="CL_ACTIVITY")
```
Since downloading codelists can take some time, any function that accesses online
SDMX registries will store the download in memory for the duration of the R session.
There is also a `estat_codelist()` function for downloading codelists from
the Eurostat SDMX registry.
## Derive rules from DSD
The functions described in the previous subsection allow you to check
variables against a particular SDMX code list. It is also possible to
download a complete Data Structure Definition and generate all checks
implied by the DSD.
```{r, eval=FALSE}
rules <- validator_from_dsd(endpoint = sdmx_endpoint("ESTAT")
, agency_id = "ESTAT", resource_id = "STSALL", version="latest")
length(rules)
[1] 13
rules[1]
Object of class 'validator' with 1 elements:
CL_FREQ: FREQ %in% sdmx_codelist(endpoint = "https://ec.europa.eu/tools/cspa_services_global/sdmxregistry/rest", agency_id = "SDMX", resource_id = "CL_FREQ", version = "2.0")
Rules are evaluated using locally defined options
```
There are 13 rules in total. For brevity, we only show the first rule here.
Observe that the first rule checks the variable `CL_FREQ` against a code list
that is retrieved from the global SDMX registry. A demonstration of the fact
that a DSD does not have to be fully self-contained and can refer to
metadata in other standard registries. If a data set is checked against this
rule, `validate` will download the codelist from the global registry and
compare each value in column `CL_FREQ` against the codelist.
Note that the `validator_from_dsd` function adds relevant metadata such as a
rule name, the origin of the rule and a short description. Try
```{r, eval=FALSE}
rule[[1]]
```
to see all information.
## More on `SDMX` {#moresdmx}
The Statistical Data and Metadata eXchange (SDMX) standard
is an ISO standard designed to facilitate the exchange or dissemination of
[Official
Statistics](https://en.wikipedia.org/wiki/Official_statistics#:~:text=Official%20statistics%20are%20statistics%20published,organizations%20as%20a%20public%20good.).
At the core it has a logical information model describing the key
characteristics of statistical data and metadata, which can be applied to any
statistical domain. Various data formats have been defined based on this
information model, such as SDMX-[CSV](https://www.rfc-editor.org/rfc/rfc4180),
SDMX-[JSON](https://www.json.org/json-en.html)), and - by far the most widely
known - SDMX-ML (data in [XML](https://www.w3.org/XML/)). A key aspect of the
SDMX standard is that one defines the metadata, including data structure,
variables, and code lists beforehand in order to describe what data is shared
or published. This metadata is defined in an *SDMX registry* where data
producers can download or query the necessary metadata. Alternatively metadata
is distributed in a so-called *Data Structure Definition* (DSD) file, which is
usually an XML format. Both types of modes should result in exactly the same
metadata agreements.
SDMX registries can be accessed through a [REST
API](https://en.wikipedia.org/wiki/Representational_state_transfer), using a
standardized set of parameters. We can distinguish between registries
that provide metadata and registries that provide the actual data.
For the validate package, the metadata registries are of interest. Some of
widely used metada registries include the following.
- [Global SDMX Registry](https://registry.sdmx.org/): for global metadata,
hosted by the SDMX consortium. The central place for ESS-wide metadata. This
registry hosts important statistical metadata such as for CPI/HICP, National
Accounts (NA), Environmental accounting (SEEA), BOP, GFS, FDI and many more.
Unfortunately not all ESS metadata is present in this registry.
- [Eurostat SDMX Registry](https://webgate.ec.europa.eu/sdmxregistry/): for
Eurostat-wide metadata, hosted by Eurostat. This registry contains statistical
metadata for all other official statistics in the European Statistical System
(ESS). Access is offered via SDMX 2.1 REST API.
- [IMF SDMX Central](https://sdmxcentral.imf.org/overview.html): Registry by
the IMF.
- [UNICEF](https://sdmx.data.unicef.org/): Registry by UNICEF
Organisations that at the time of writing (spring 2023) actively offer
automated access to their data (not just metadata) via an SDMX API include (but
not limited to) the European Central Bank
([ECB](https://sdw-wsrest.ecb.europa.eu/help/)),
the [OECD](https://data.oecd.org/api/) (in
[SDMX-JSON](https://data.oecd.org/api/sdmx-json-documentation/) or
[SDMX-ML](https://data.oecd.org/api/sdmx-ml-documentation/) format),
[Eurostat](https://ec.europa.eu/eurostat/web/sdmx-infospace),
the International Labour Organisation [ILO (`https://www.ilo.org/sdmx/index.html`)],
the [Worldbank](https://datahelpdesk.worldbank.org/knowledgebase/articles/1886701-sdmx-api-queries),
the Bank for International Settlements
([BIS](https://www.bis.org/statistics/sdmx_techspec.htm?accordion1=1&m=6%7C346%7C718)),
and the Italian Office of National Statistics (ISTAT).
The SDMX consortium does not maintain a list of active SDMX endpoints. The
[rsdmx R package](https://cran.r-project.org/package=rsdmx) maintains such a
list based on an earlier inventory of Data Sources, but at the time of writing
not all those links appear to be active.
Ideally, all SDMX providers would have implemented SDMX in a coordinated way so
that a client looking for SDMX metadata to validate its data before sending
could query the respective sources using one and the same API. The latest
version of the REST API is 2.1 which is described very well in the easy to use
[SDMX API cheat sheet](https://github.com/sdmx-twg/sdmx-rest/raw/master/doc/rest_cheat_sheet.pdf)
Inspecting the endpoints shows that not all providers implement all same
resource values. Depending on the provider an organization may decide which
elements of the API are exposed. For example, the API standard defines methods
to retrieve code lists from a DSD, but this functionality may or may not be
offered by an API instance. If it is not offered, this means the client
software needs to retrieve this metadata via other resource requests or
alternatively extract them locally from a DSD file. Finally we signal that on
a technical level the API of the various institutes may differ considerably and
that not all SDMX services implement the same version of SDMX.
This means that users should typically familiarize themselves somewhat with the
specific API they try to access (e.g. from `validate`).
# Comparing data sets {#sect-comparing}
```{r, include=FALSE}
source("chunk_opts.R")
```
```{r, echo=FALSE}
library(validate)
```
When processing data step by step, it is useful to gather information on the
contribution of each step to the final result. This way the whole process can
be monitored and the contribution of each step can be evaluated. Schematically,
a data processing step can be visualised as follows.
{width=50%}
Here, some input data is processed by some procedure that is parameterized,
usually by domain experts. The output data is again input for a next step.
In the following two sections we discuss two methods to compare two or more
versions of a data set. In the last section we demonstrate how `validate` can
be combined with the
[lumberjack](https://cran.r-project.org/package=lumberjack) package to automate
monitoring in an R script.
## Cell counts
One of the simplest ways to compare different versions of a data set is to
count how many cells have changed. In this setting it can be useful to
distinguish between changes from available to missing data (and _vice versa_)
and changes between data where the values change. When comparing two
data sets, say the input and the output data, the total number of cells
can be decomposed according to the following schema.
{width=70%}
The total number of cells (fields) in the output data can be decomposed into
those cells that are filled (available) and those that are empty (missing).
The missing ones are decomposed into those that were already missing in the
input data and those that are still missing. Similarly, the available values
can be decomposed into those that were missing before and have been imputed.
And those that already were available can be decomposed in those that are the
same as before (unadapted) and those that ave been changed (adapted).
With the `validate` package, these numbers can be computed for two or more
datasets using `cells()`. As an example, we first create three versions of the
`SBS2000` dataset. The first version is just the unaltered data. In the
second version we replace a revenue column with it's absolute value to 'repair'
cases with negative revenues. In the third version, we impute cases where
`turnover` is missing with the `vat` (value added tax) value, when available.
```{r}
library(validate)
data(SBS2000)
original <- SBS2000
version2 <- original
version2$other.rev <- abs(version2$other.rev)
version3 <- version2
version3$turnover[is.na(version3$turnover)] <- version3$vat[is.na(version3$turnover)]
```
We can now compare `version2` and `version3` to the original data set as follows.
```{r}
cells(input = original, cleaned = version2, imputed = version3)
```
The `cells` function accepts an arbitrary number of `name=data frame` arguments. The
names provided by the user are used as column names in the output. From the output we see
that the `cleaned` data set (`version2`) and in the `imputed` data set (`version3`) have
one adapted value compared to the original data. Similarly, no imputations took place in
preparing the `cleaned` data set, but a single value was imputed in the `imputed` dataset.
Since each data frame is compared to the first data frame, the last column can be considered
a 'cumulative' record of all changes that took place from beginning to end. It is also possible
to print differential changes, where each data set is compared with the previous one.
```{r}
cells(input = original, cleaned = version2, imputed = version3
, compare="sequential")
```
The output of `cells()` is an array of class `cellComparison`. The most
interesting about this is that `validate` comes with two plot methods for such
objects. To demonstrate this, we will create two more versions of the
`SBS2000` dataset.
```{r}
version4 <- version3
version4$turnover[is.na(version4$turnover)] <- median(version4$turnover, na.rm=TRUE)
# from kEUR to EUR
version5 <- version4
version5$staff.costs <- version5$staff.costs * 1000
```
```{r}
out <- cells(input = original
, cleaned = version2
, vat_imp = version3
, med_imp = version4
, units = version5)
par(mfrow=c(2,1))
barplot(out)
plot(out)
```
The bar plot and line plot convey the same information. The line plot is better
when the data sets are instances resulting from a sequential process. The bar
plot can be used more generally since it does not suggest a particular order.
## Comparing rule violations
When processing data it is interesting to compare how many data validations
are violated before and after a processing step. Comparing output data with
input data, we can decompose the total number of validation results of
the output data as follows.
{width=70%}
The total number of validation results in the output data van be split into
those that are verifiable (`TRUE` or `FALSE`) and those that are unverifiable
(`NA`). The unverifiable cases can be split into those that were also
unverifiable in the input data (still) and those that were verifiable in the
input data but can now not be verified, because certain fields have been
emptied. The verifiable cases can be split into those that yielded `FALSE`
(violated) and those that yielded `TRUE` (satisfied). Each can be split into
cases that stayed the same or changed with respect to the input data.
With `validate` the complete decomposition can be computed with `compare()`.
It takes as first argument a `validator` object and two or more data sets
to compare. We will use the data sets developed in the previous paragraph.
```{r}
rules <- validator(other.rev >= 0
, turnover >= 0
, turnover + other.rev == total.rev
)
comparison <- compare(rules
, input = original
, cleaned = version2
, vat_imp = version3
, med_imp = version4
, units = version5)
comparison
```
By default each data set is compared to the first dataset (`input=original`).
Hence the last column represents the cumulative change of all processing steps
since the first data set. It is possible to investigate local differences by
setting `how='sequential'`.
It is possible to plot the output for a graphical overview in two different
ways: a bar plot and a line plot.
```{r}
par(mfrow=c(2,1))
barplot(comparison)
plot(comparison)
```
## `validate` and `lumberjack`
The [lumberjack](https://cran.r-project.org/package=lumberjack) package makes
it easy to track changes in data in a user-defined way. The following example
is slightly adapted from the [JSS paper](https://www.jstatsoft.org/article/view/v098i01).
We create a script that reads data, performs a few data cleaning steps
and then writes the output. The script is stored in `clean_supermarkets.R` and
has the following code.
```{r, eval=FALSE}
## Contents of clean_supermarkets.R
library(validate)
# 1. simulate reading data
data(SBS2000)
spm <- SBS2000[c("id","staff","turnover","other.rev","total.rev")]
# 2. add a logger from 'validate'
start_log(spm, logger=lbj_cells())
# 3. assume empty values should be filled with 0
spm <- transform(spm, other.rev = ifelse(is.na(other.rev),0,other.rev))
# 4. assume that negative amounts have only a sign error
spm <- transform(spm, other.rev = abs(other.rev))
# 5a. ratio estimator for staff conditional on turnover
Rhat <- with(spm, mean(staff,na.rm=TRUE)/mean(turnover,na.rm=TRUE))
# 5b. impute 'staff' variable where possible using ratio estimator
spm <- transform(spm, staff = ifelse(is.na(staff), Rhat * turnover, staff))
# 6. write output
write.csv(spm, "supermarkets_treated.csv", row.names = FALSE)
```
In the first section we do not actually read data from a data source but take a
few columns from the SBS2000 data set that comes with the validate package.
The data to be processed is stored in a variable called `spm`. Next, in
section two, we use the `lumberjack` function `start_log()` to attach a logging
object of type `lbj_cells()` to the data under scrutiny. Two things are of
note here:
1. The call to `library(validate)` is necessary to be able to use `lbj_cells()`.
Alternatively you can use `validate::lbj_cells()`.
2. It is not necessary to load the `lumberjack` package in this script (although
it is no problem if you do).
In sections three and four, values for other revenue are imputed and then forced to
be nonnegative. In section 5 a ratio model is used to impute missing staff numbers.
In section 6 the output is written.
The purpose of the `lbh_cells()` logger is to record the output of `cells()`
after each step. To make sure this happens, run this file using `run_file()`
from the `lumberjack` package.
```{r}
library(lumberjack)
run_file('clean_supermarkets.R')
```
This command executed all code in `clean_supermarkets.R`, but `run_file()` also ensured
that all changes in the `spm` variable were recorded and logged using `lbj_cells()`.
The output is written to a `csv` file which we can read.
```{r}
logfile <- read.csv("spm_lbj_cells.csv")
```
The logfile variable has quite a lot of columns, so here we show just two rows.
```{r}
logfile[3:4,]
```
Each row in the output lists the step number, a time stamp, the expression used
to alter the contents of the variable under scrutiny, and all columns computed
by `cells()`. Since the logger always compares two consecutive steps, these
numbers are comparable to using `cells(comapare='sequential')`. For example, we
see that after step four, one value was adapted compared to the state after
step three. And in step three, 36 values were imputed compared to the state
created by step 2. In step four, no values were imputed.
It is also interesting to follow the progression of rule violations as the
`spm` dataset gets processed. This can be done with the `lbj_rules()` logger
that is exported by `validate`. Since `lumberjack` allows for multiple loggers
to be attached to an R object, we alter the first part of the above script as
follows, and store it in `clean_supermarkets2.R`
```{r, eval=FALSE}
## Contents of clean_supermarkets2.R
library(validate)
#1.a simulate reading data
data(SBS2000, package="validate")
spm <- SBS2000[c("id","staff","other.rev","turnover","total.rev")]
# 1.b Create rule set
rules <- validator(staff >= 0, other.rev>=0, turnover>=0
, other.rev + turnover == total.rev)
# 2. add two loggers
start_log(spm, logger=lbj_cells())
start_log(spm, logger=lbj_rules(rules))
## The rest is the same as above ...
```
Running the file again using lumberjack, we now get two log files.
```{r}
run_file("clean_supermarkets2.R")
```
Let's read the log file from `spm_lbj_rules.csv` and print row three and four.
```{r}
read.csv("spm_lbj_rules.csv")[3:4,]
```
We get the full output created by `validate::compare()`. For example we
see that after step three, 66 new cases satisfy one of the checks while two new
violations were introduced. The fourth step adds two new satisfied cases and no
new violations. The total number of violations after four steps equals five.
Until now the logging data was written to files that were determined automatically
by `lumberjack`. This is because `lumberjack` automatically dumps logging data
after processing executing the file when the user has not done so explicitly.
You can determine where to write the logging data by adding a `stop_log()`
statement anywhere in your code (but at the end would usually make most sense).
For example, add the following line of code at the end of
`clean_supermarkets2.R` to write the output of the `lbj_rules` logger to
`my_output.csv`.
```{r, eval=FALSE}
stop_log(spm, logger="lbj_rules",file="my_output.csv")
```
The format and way in which logging data is exported is fixed by the logger. So
`lbj_rules()` and `lbj_cells()` can only export to csv, and only the data we've
seen so far. The good news is that the `lumberjack` package itself contains
other loggers that may be of interest, and it is also possible to develop your
own logger. So it is possible to develop loggers that export data to a
database. See the [lumberjack paper](https://www.jstatsoft.org/article/view/v098i01) for a
short tutorial on how to write your own logger.
# Bibliographical notes {-}
```{r, include=FALSE}
source("chunk_opts.R")
```
More background on the validate package can be found in the paper
for the R Journal.
> MPJ van der Loo and E de Jonge (2020). [Data Validation Infrastructure for R](https://www.jstatsoft.org/article/view/v097i10). _Journal of Statistical Software_ 97(10)
The theory of data validation is described in the following paper.
> MPJ van der Loo, and E de Jonge (2020). [Data Validation](https://arxiv.org/abs/1912.09759). _In Wiley StatsRef: Statistics Reference Online (eds N. Balakrishnan, T. Colton, B. Everitt, W. Piegorsch, F. Ruggeri and J.L. Teugels)_.
Data validation is described in the wider context of data cleaning, in Chapter 6 of
the following book.
> MPJ van der Loo and E de Jonge (2018) [Statistical Data Cleaning With Applications in R](https://www.wiley.com/en-us/Statistical+Data+Cleaning+with+Applications+in+R-p-9781118897157). _John Wiley & Sons, NY_.
The following document describes data validation in the context of European
Official Statistics. It includes issues such as lifecycle management,
complexity analyses and examples from practice.
> M. Zio, N. Fursova, T. Gelsema, S. Giessing, U Guarnera, J. Ptrauskiene, Q. L. Kalben, M. Scanu, K. ten Bosch, M. van der Loo, and K. Walsdorfe (2015) [Methodology for data validation](https://www.markvanderloo.eu/files/share/zio2015methodology.pdf)
The `lumberjack` package discussed in Chapter \@ref(sect-comparing) is described in the following
paper.
> MPJ van der Loo (2020). [Monitoring Data in R with the lumberjack package](https://www.jstatsoft.org/article/view/v098i01). _Journal of Statistical Software_, 98(1)
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/doc/cookbook.Rmd
|
# File to bu run by run_validaitons
# Three rules
rules <- validator(height >= 0, weight >= 0, weight >= height)
checks <- confront(women, rules)
# programming over confrontations
if ( all(checks) ){ # should be TRUE
# Four rules
check_that(women, height/weight >= 0.4)
}
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/run_validation/validations.R
|
## validation object contents ----
cf <- check_that(women, height > 0, ape > 0,weight / height > 2 )
expect_equal(length(cf),3)
expect_equal(names(summary(cf))
, c("name","items","passes","fails","nNA","error","warning","expression"))
vl <- rep(TRUE,30)
vl[16:18] <- FALSE
expect_equivalent(values(cf), array(vl,dim=c(15,2)))
expect_equal(errors(cf)[[1]],"object 'ape' not found")
expect_equivalent(warnings(cf),list())
agg <- data.frame(
npass = c(V1=15,V3=12)
, nfail = c(0,3)
, nNA = c(0,0)
, rel.pass = c(1.0,0.8)
, rel.fail = c(0.0,0.2)
, rel.NA = c(0,0)
)
expect_equal(aggregate(cf),agg)
expect_equal(sort(cf),agg[2:1,])
expect_equivalent(class(cf[1]),"validation")
expect_equal(length(cf[1]),1)
# order of aggregated output
cf <- check_that(women, height<0, mean(height) < 0, height > 0, mean(height)>0)
expect_equal(rownames(aggregate(cf,by="rule")), names(cf))
# order of aggregated output, in presence of errors
#
r <- validator(height <0 # V1
, mean(height)<0 # V2
, ape>0 # V3 (gives error)
, height>0 # V4
, mean(weight)>0 # V5
, mean(height)<0 # V6
, weight <= 0 # V7
, var(weight) <= 0) # V8
v <- confront(women, r)
expect_equal(rownames(aggregate(v,by="rule"))
, sprintf("V%d",1:8)[-3])
# aggregation when keys are present ----
rules <- validator(turnover >= 0, other.rev>=0)
data(SBS2000)
out <- confront(SBS2000, rules, key="id")
agg <- aggregate(out, by="record")
expect_true("id" %in% colnames(agg))
srt <- sort(out, by="record")
expect_true("id" %in% colnames(srt))
## validation logical quantifiers ----
expect_false(all(confront(women, validator(height < 60, weight>0))))
expect_true(all(confront(women, validator(height > 0, weight>0))))
w <- women
w[1,1] <- NA
# there is at least one FALSE aready, so the conlcusion is that not
# all are TRUE.
expect_false(all(confront(w, validator(height < 60, weight>0))))
expect_true(is.na(
all(confront(w, validator(height>height[1], weight>0)))
))
## validation objects can be plotted ----
v <- validator(x>0,y>0,if(x >0 ) y> 0)
Z <- plot(v)
expect_equal(ncol(Z), length(v))
expect_equal(nrow(Z), length(variables(v)))
v <- validator(x>0,z>0)
cf <- confront(data.frame(z=1), v)
expect_message(plot(cf), pattern = "not included")
## indication object contents ----
ind <- indicator(mean(height),sd(weight), sum(foo))
cf <- confront(women, ind)
expect_equal(length(cf),3)
expect_equivalent(round(values(cf),3),array(c(65,15.499),dim=c(1,2)))
expect_equal(errors(cf)[[1]],"object 'foo' not found")
expect_equal(names(summary(cf)),
c("name"
,"items"
,"min"
,"mean"
,"max"
,"nNA"
,"error"
,"warning"
,"expression")
)
expect_equal(dim(summary(cf)),c(3,9))
ind <- indicator(x={"A"}) # returns character value
expect_true(validate:::has_warning(confront(women,ind)))
## confrontation method with custom na.values ----
v <- validator(x > 0)
d <- data.frame(x=c(1,-1,NA))
expect_equivalent(values(confront(d,v)), matrix(c(TRUE,FALSE,NA)) )
expect_equivalent(values(confront(d,v,na.value=FALSE)), matrix(c(TRUE,FALSE,FALSE)) )
expect_equivalent(values(confront(d,v,na.value=TRUE)), matrix(c(TRUE,FALSE,TRUE)) )
## Confrontation methods with reference data ----
v1 <- validator(height > 0, weight / height > 0, height == ref$height)
cf1 <- confront(women,v1,ref = women)
v2 <- validator(height > 0, weight / height > 0, height == w1$height)
cf2 <- confront(women,v2,ref=list(w1=women))
e <- new.env()
e$w1 <- women
cf3 <- confront(women, v2, ref=e)
expect_equal(summary(cf1)[1:7],summary(cf2)[1:7])
expect_equal(summary(cf2)[1:7],summary(cf3)[1:7])
# reference data can be anything
cf <- confront(data.frame(x = c(1,4,2))
, validator(x %in% codelist)
, ref = list(codelist = 1:3)
)
expect_equivalent(as.logical(values(cf)), c(TRUE, FALSE, TRUE))
# warning when the data-carrying environment has variables with the
# same name as the parent.
expect_warning(confront(
dat = data.frame(test=10)
, x = validator(test==test$aap)
, ref = list(test=data.frame(aap=7)))
)
# self-reference on data set.
expect_true(values(check_that(women,nrow(.)==15))[1,1])
# indicators with reference data
ref <- mean(women$height)/mean(iris$Sepal.Length)
e <- new.env()
e$ir <- iris
i <- indicator( mean(height)/mean(ir$Sepal.Length) )
expect_equivalent(values(confront(women,i,ref=e))[1],ref)
L <- as.list(e)
expect_equivalent(values(confront(women,i,ref=L))[1],ref)
i <- indicator( mean(height)/mean(ref$Sepal.Length) )
expect_equivalent(values(confront(women,i, ref=iris))[1], ref)
## confrontations with transient variables ----
v <- validator(rat := weight/height, rat >0)
expect_equivalent(values(confront(women,v)), array(TRUE,dim=c(15,1)))
## check_that works with simple example ----
dat <- data.frame(x=1:2, y=3:2)
cf <- check_that(dat, x >= y)
expect_equal(length(cf),1)
## Confrontations with slack on linear equalities --
v <- validator(x == 10)
d <- data.frame(x=9)
expect_false(values(confront(d,v)))
expect_true(values(confront(d,v,lin.eq.eps=2)))
# setting slack on equalities should not matter for inequalities
w <- validator(x > 10)
expect_false(values(confront(d,w)))
expect_false(values(confront(d,w,lin.eq.eps=2)))
# should also work in linear subexpressions
u <- validator( if (x == 10) y > 0)
d <- data.frame(x=9,y=-1)
expect_true(values(confront(d,u)))
expect_false(values(confront(d,u,lin.eq.eps=2)))
## Confrontations with slack on linear inequalities ----
v <- validator(x >= 0)
d <- data.frame(x = -1e-14)
expect_true( all(values(confront(d,v))) )
v <- validator(x <= 0)
d <- data.frame(x=1e-14)
expect_true( all(values(confront(d,v))) )
## coerce confrontations to data.frame ----
i <- indicator(mean(height),sd(weight))
v <- validator(height > 0, sd(weight)>0)
women$id <- letters[1:15]
women$id2 <- LETTERS[1:15]
expect_equal(nrow(as.data.frame(confront(women,i))),2)
expect_equal(ncol(as.data.frame(confront(women,i))),3)
expect_equal(ncol(as.data.frame(confront(women,i,key="id"))),4)
# multiple keys
d <- as.data.frame(confront(women, i, key=c("id","id2")))
expect_equal(ncol(d),5)
expect_equal(nrow(d), 2)
expect_equal(nrow(as.data.frame(confront(women,v))),16)
expect_equal(ncol(as.data.frame(confront(women,v))),3)
expect_equal(ncol(as.data.frame(confront(women,v,key="id"))),4)
# multiple keys
d <- as.data.frame(confront(women, v, key=c("id","id2")))
expect_equal(ncol(d),5)
expect_equal(nrow(d), 16)
v <- validator(hite>0,weight>0)
d <- confront(women,v)
expect_warning(as.data.frame(d))
#i <- indicator(mean(hite))
#expect_warning(as.data.frame(confront(women,i)))
## Printing a confrontation against an empty validator does not cause an error ----
d <- confront(data.frame("A" = 1:5), validator())
expect_silent(d$show())
# as.data.rame with no records should result in a zero-row data.frame
cf <- check_that(data.frame(y=10),x>0)
expect_equal(suppressWarnings(as.data.frame(cf))
, data.frame( name=character(0), value=logical(0), expression=character(0)) )
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_confrontation.R
|
# numbers
expect_true( is_linear_sequence(numeric(0)))
expect_true( is_linear_sequence(0) )
expect_true( is_linear_sequence(c(0,1)) )
expect_false( is_linear_sequence(c(pi, exp(1),7)) )
expect_false( is_linear_sequence(c(3,4,2,1,5), sort=FALSE) )
expect_true( is_linear_sequence(c(3,4,2,1,5), sort=TRUE) )
expect_true( is.na(is_linear_sequence(c(1,NA,2))) )
expect_true( is_linear_sequence(NA_integer_) )
expect_true( is_linear_sequence(rep(NA_integer_,2)) )
expect_false( is_linear_sequence(1:5, begin=2))
expect_false( is_linear_sequence(1:5, end=7))
expect_false( is_linear_sequence(1:5, begin=1, end=6))
expect_false( is_linear_sequence(1:5, begin=2, end=5))
# dates
expect_true( is_linear_sequence(as.Date("2015-12-17")) )
expect_true( is_linear_sequence( as.Date(c("2015-12-17","2015-12-19")) ) )
expect_false( is_linear_sequence(as.Date(c("2015-12-17","2015-12-19","2015-12-20"))) )
expect_true(
is_linear_sequence(
as.Date(c("2015-12-17","2015-12-19","2015-12-21"))
, begin = as.Date("2015-12-17")
, end = as.Date("2015-12-21")
) )
expect_true(is_linear_sequence(rep(1:5, each=2), by=rep(letters[1:2],5)))
# POSIXct
expect_true( is_linear_sequence( as.POSIXct("2015-12-17")) )
expect_true( is_linear_sequence( as.POSIXct(c("2015-12-17","2015-12-19")) ) )
expect_false( is_linear_sequence(as.POSIXct(c("2015-12-17","2015-12-19","2015-12-20")) ) )
# convesion of start/end?
expect_true(
is_linear_sequence(
as.POSIXct(c("2015-12-17","2015-12-19","2015-12-21"))
, begin= as.POSIXct("2015-12-17")
, end = as.POSIXct("2015-12-21")
) )
# character: auto-recognized formats
expect_true( is_linear_sequence(c("2012", "2013","2014")) )
expect_true( is_linear_sequence(c("2012M01", "2012M02", "2012M03")) )
expect_true( is_linear_sequence(c("2012Q1", "2012Q2", "2012Q3")) )
# conversion of start/end?
expect_true( is_linear_sequence(c("2012Q1", "2012Q2", "2012Q3"), begin="2012Q1") )
expect_false( is_linear_sequence(c("2012Q1", "2012Q2", "2012Q3"), end="2012Q4") )
# in validator context
d <- data.frame(
number = c(pi, exp(1), 7)
, date = as.Date(c("2015-12-17","2015-12-19","2015-12-20"))
, time = as.POSIXct(as.Date(c("2015-12-17","2015-12-19","2015-12-20")))
)
rules <- validator(
is_linear_sequence(number)
, is_linear_sequence(date)
, is_linear_sequence(time)
)
# nothing works
expect_false(any(confront(d,rules)))
## Groupwise series in long format
dat <- data.frame(
time = c(2012,2013,2012,2013,2015)
, type = c("hi","hi","ha","ha","ha")
)
expect_false(all(check_that(dat, is_linear_sequence(time))))
expect_equivalent(
values( check_that(dat, in_linear_sequence(time, type)) )[,1]
, c(TRUE,TRUE, FALSE, FALSE, FALSE)
)
# testing in_range
expect_true(in_range(1, min=0, max=1))
expect_false(in_range(1, min=0, max=1, strict=TRUE))
expect_true(in_range(as.Date("2018-03-01")
, min=as.Date("2012-01-01")
, max=as.Date("2018-03-01"))
)
expect_false(in_range(as.Date("2018-03-01")
, min=as.Date("2012-01-01")
, max=as.Date("2018-03-01"), strict=TRUE)
)
# testing part-whole relation checks
labels <- c("2018Q1", "2018Q2", "2018Q3", "2018Q4","2018")
values <- c(1,2,3,4, 10)
expect_equal(
part_whole_relation(values, labels, whole=rx("^\\d{4}$"))
, rep(TRUE, 5)
)
values[1] <- 2
expect_equal(
part_whole_relation(values, labels, whole=rx("^\\d{4}$"))
, rep(FALSE, 5)
)
values <- rep(values, 2)
values[1] <- 1
labels <- rep(labels, 2)
direction <- rep(c("import", "export"), each=5)
expect_equal(
part_whole_relation(values, labels, whole=rx("^\\d{4}$"), by=direction)
, c(rep(TRUE, 5), rep(FALSE, 5))
)
values[1] <- NA
expect_equal(
part_whole_relation(values, labels, whole=rx("^\\d{4}$"), by=direction)
, c(rep(NA, 5), rep(FALSE, 5))
)
expect_equal(
part_whole_relation(values, labels, whole=rx("^\\d{4}$"), by=direction, na.rm=TRUE)
, c(rep(FALSE, 5), rep(FALSE, 5))
)
# with string literals
local({
region <- c("foo", "bar","baz","bur","boo","fu")
amount <- c(10, 4:1, 25)
expect_equal(
part_whole_relation(amount, region, whole="foo", part=c("bar","bur","baz","boo"))
, rep(TRUE, length(region))
)
})
## testing do_by
x <- 1:10
y <- rep(letters[1:2],5)
expect_equal(do_by(x,y,sum), rep(c(25,30), 5))
x[1] <- NA
expect_equal(do_by(x,y,max), rep(c(NA,10),5))
expect_equal(sum_by(c(1,2),letters[1:2]), c(1,2))
expect_equal(min_by(c(1,2),letters[1:2]), c(1,2))
expect_equal(max_by(c(1,2),letters[1:2]), c(1,2))
expect_equal(mean_by(c(1,2),letters[1:2]), c(1,2))
# field lenght
expect_true(field_length("abc",3))
expect_false(field_length("abc",2))
expect_true(field_length("abc",min=1, max=3))
## number format
expect_true(number_format("12.34","dd.dd"))
expect_false(number_format("12.345","dd.dd"))
expect_true(number_format("0.123E45","0.d*Edd"))
expect_false(number_format("0.12x", "0.d*"))
expect_true(number_format("0.12x", "0.d*x"))
expect_true(number_format("12.34",min_dig=0))
expect_true(number_format("12.34",min_dig=1))
expect_true(number_format("12.34",min_dig=2))
expect_false(number_format("12.34",min_dig=3))
expect_true(number_format("12.34",max_dig=3))
expect_true(number_format("12.34",max_dig=2))
expect_false(number_format("12.34",max_dig=1))
expect_true(number_format("12.34",min_dig=1,max_dig=2))
expect_false(number_format("12.34",min_dig=3,max_dig=5))
expect_true(number_format("12,34",min_dig=1,max_dig=2, dec=","))
## Checking data against a fixed set of key-combinations
dat <- data.frame(
year = rep(c("2018","2019"),each=4)
, quarter = rep(sprintf("Q%d",1:4), 2)
, value = sample(20:50,8)
)
# explicit case
rule <- validator(contains_exactly(
expand.grid(year=c("2018","2019"), quarter=c("Q1","Q2","Q3","Q4"))
)
)
expect_equivalent(values(confront(dat, rule)), matrix(TRUE,nrow=8))
dat1 <- dat
dat2 <- dat[-1,]
dat1$foo <- "A"
dat2$foo <- "B"
rule <- validator(contains_exactly(
expand.grid(year=c("2018","2019"), quarter=c("Q1","Q2","Q3","Q4")), by=foo)
)
expect_equivalent(values(confront(rbind(dat1,dat2), rule))
, matrix(c(rep(TRUE,8), rep(FALSE,7)), nrow=15))
# cases using a reference keyset
keyset <- expand.grid(year=c("2018","2019"), quarter=c("Q1","Q2","Q3","Q4"))
keyset1 <- keyset[-1,]
rule <- validator(contains_exactly(all_keys))
expect_equivalent( as.logical(values(confront(dat, rule, ref=list(all_keys = keyset)))), rep(TRUE,8) )
expect_equivalent( as.logical(values(confront(dat, rule, ref=list(all_keys = keyset1)))), rep(FALSE,8))
dat1 <- dat[-1,]
rule <- validator(contains_at_most(all_keys))
expect_equivalent(
as.logical(values(confront(dat, rule, ref=list(all_keys = keyset))))
, rep(TRUE,8))
expect_equivalent(
as.logical(values(confront(dat, rule, ref=list(all_keys = keyset1))))
, c(FALSE, rep(TRUE,7))
)
rule <- validator(contains_at_least(all_keys))
expect_true(all(confront(dat, rule, ref=list(all_keys=keyset))))
expect_false(all(confront(dat1, rule, ref=list(all_keys=keyset))))
rule <- validator(does_not_contain(forbidden_keys))
expect_equivalent(
as.logical(values(confront(dat, rule, ref=list(forbidden_keys=keyset))))
, rep(FALSE, 8))
## Globbing and Regex ---------------------------------------------------------
transactions <- data.frame(
sender = c("S1","S2", "S3", "R1")
, receiver = c("R1","S1", "R1", "S1")
)
# a sender 'S*' cannot send to a sender
rule <- validator(does_not_contain(glob(data.frame(sender = "S*", receiver="S*"))))
expect_equal(as.logical(values(confront(transactions, rule))), c(TRUE, FALSE, TRUE, TRUE)
,info="globbing in does_not_contain" )
# Avoid failure on apple/darwin oldrel on CRAN that I cannot reproduce
# on any other platform.
mac_or_windows <- grepl("darwin", R.version$os) | .Platform$OS.type == "windows"
if (!(mac_or_windows & R.version.string <= "3.6.2")){
rule <- validator(does_not_contain(rx(data.frame(sender = "^S", receiver="^S"))))
expect_equal(as.logical(values(confront(transactions, rule))), c(TRUE, FALSE, TRUE, TRUE)
,info="regex in does_not_contain" )
# sender ending with a 2 cannot send to receiver ending with 1
rule <- validator(does_not_contain(rx(data.frame(sender = "2$", receiver="1$"))))
expect_equal(as.logical(values(confront(transactions, rule))), c(TRUE, FALSE, TRUE, TRUE)
,info="regex in does_not_contain" )
}
## Grouping -------------------------------------------------------------------
# data in 'long' format
dat <- expand.grid(
year = c("2018","2019")
, quarter = c("Q1","Q2","Q3","Q4")
, variable = c("import","export")
)
dat$value <- sample(50:100,nrow(dat))
periods <- expand.grid(
year = c("2018","2019")
, quarter = c("Q1","Q2","Q3","Q4")
)
rule <- validator(contains_exactly(all_periods, by=variable))
out <- confront(dat, rule, ref=list(all_periods=periods))
expect_equivalent(as.logical(values(out)), rep(TRUE,nrow(dat)))
# remove one export record
dat1 <- dat[-15,]
out1 <- confront(dat1, rule, ref=list(all_periods=periods))
values(out1)
expect_equivalent(as.logical(values(out1)), c(rep(TRUE,8),rep(FALSE, 7)) )
## Field format
expect_equal(field_format(c("X0Y","X12"), "^X\\dY",type="regex"), c(TRUE, FALSE))
expect_equal(field_format(c("X0Y","Y12"), "X*",type="glob"), c(TRUE, FALSE))
## hierarchy ------------------------------------------------------------------
#
d <- data.frame(
nace = c("01","01.1","01.11","01.12", "01.2")
, volume = c(100 ,70 , 30 ,40 , 25)
)
data(nace_rev2)
expect_equal(hierarchy(d$volume, labels=d$nace, hierarchy=nace_rev2[3:4])
, c(FALSE, FALSE, TRUE, TRUE, FALSE))
d <- data.frame(
nace = c("01","01.1","01.11","01.12", "01.2","foo")
, volume = c(100 ,70 , 30 ,40 , 25 , 60)
)
expect_equal(hierarchy(d$volume, labels=d$nace, hierarchy=nace_rev2[3:4])
, c(FALSE, FALSE, TRUE, TRUE, FALSE, TRUE))
expect_equal(hierarchy(d$volume, labels=d$nace, hierarchy=nace_rev2[3:4], na_value=NA)
, c(FALSE, FALSE, TRUE, TRUE, FALSE, NA))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_genericrules.R
|
## issue #91 is solved ----
data <- data.frame(A = 1)
rule <- validator(A > 0)
cf <- confront(data, rule)
expect_silent(plot(rule))
expect_silent(plot(cf))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_091.R
|
# all() on the result of checking a 0-row data frame must be TRUE
# (every statement about elements of the empty set is TRUE)
# similarly, any() should give FALSE: there are not any elements
# in the empty set for which a statement is TRUE.
cf <- check_that(data.frame(a = integer(0)), a == 1)
expect_true(all(cf))
expect_false(any(cf))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_092.R
|
v <- validator(
other.rev >= 0
, total.rev >= 0
, staff.costs >= 0
, total.costs >= 0
, turnover + other.rev == total.rev
, total.rev - total.costs == profit
)
blocks <- .blocks_expressionset(v)
expect_equal(length(blocks), 2)
expect_equal( length(intersect(blocks[[1]], blocks[[2]] )), 0 )
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_098.R
|
rulefile <- tempfile(fileext=".R")
writeLines("rules:\n-\n expr:\n name:", con=rulefile)
expect_warning(validator(.file = rulefile))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_109.R
|
expect_silent(
as.data.frame(validator(var_group(A, B)>0, C>0))
)
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_112.R
|
# Reported by Matthias Gomolka
# Subsetting confrontation objects does not work within lapply
df <- data.frame(a = 1:5, b = 3)
vl <- validator(a_gt_b = a > b, a_eq_b = a == b)
cf <- confront(df, vl)
names <- names(vl)
expect_silent(lapply(names, function(i) cf[i]))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_116.R
|
expect_silent(
add_indicators(women
, confront(women, indicator(mn=mean(height) )))
)
ii <- indicator(ratio=height/weight, mnw =mean(weight), mht = mean(height))
out <- confront(women, ii)
d <- add_indicators(women, out)
expect_equal(d$mnw, rep(mean(women$weight), nrow(women)) )
expect_equal(d$mht, rep(mean(women$height), nrow(women)) )
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_128.R
|
df <- data.frame(rule = c("x>0","x<1"), name=c(NA,"bla"))
expect_equal(names(validator(.data=df)),c("V1","bla"))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_148.R
|
df <- data.frame(x = integer(0L))
vl <- validator(x > 0L)
cf <- confront(df, vl)
expect_silent(aggregate(cf))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_149.R
|
rules_df <- data.frame( rule = c("height > 0", "weight > 0", "height < 1.5 * weight")
, name = c(1,1,2)
)
rules <- validator(.data = rules_df)
nms <- names(rules)
expect_equal(nms, unique(nms))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_gh_issue_151.R
|
## indicators ----
i <- indicator(height/weight, mean(height))
cf <- confront(women, i)
expect_equal(length(cf),2)
i <- indicator(as.character(height))
voptions(i, raise="all")
expect_warning(confront(women,i))
expect_equal(length(indicator(mean(x)) + indicator(mean(x)/sd(x))),2)
ii <- indicator(mean(x)) + indicator(mean(y))
expect_true(!any(duplicated(names(ii))))
# add indicators directly to data frame
ii <- indicator(
hihi = 2*sqrt(height)
, haha = log10(weight)
, lulz = mean(height)
, wo0t = median(weight)
)
out <- confront(women, ii)
expect_equal(ncol(add_indicators(women, ii)), ncol(women) + length(ii))
expect_equal(ncol(add_indicators(women, out)), ncol(women) + length(ii))
ii <- indicator(
BMI = (weight/2.2046)/(height*0.0254)^2
, mh = mean(height)
, mw = mean(weight))
out <- confront(women, ii)
expect_equal(names(add_indicators(women, out))
, c(names(women), names(ii)) )
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_indicator.R
|
## linear coeffiencts can be derived ----
v <- validator(x>0)
expect_equivalent(v$linear_coefficients()$A , matrix(-1,1,1))
expect_equivalent(v$linear_coefficients()$b, matrix(0,1,1))
w <- validator(2*x > 0)
expect_equivalent(w$linear_coefficients()$A,matrix(-2,1,1))
expect_equivalent(w$linear_coefficients()$b,matrix(0,1,1))
x <- validator(2*x + y > 0)
expect_equivalent(x$linear_coefficients()$A,matrix(c(-2,-1),1,2))
expect_equivalent(x$linear_coefficients()$b,matrix(0,1,1))
y <- validator(2*x + y > 3*z + w)
expect_equivalent(y$linear_coefficients()$A,matrix(c(-2,-1,3,1),1,4))
expect_equivalent(y$linear_coefficients()$b,matrix(0,1,1))
z <- validator(2*x + 1 + y -2 > 4 + z - 8)
expect_equivalent(z$linear_coefficients()$A,matrix(c(-2,-1,1),1,3))
expect_equivalent(z$linear_coefficients()$b,matrix(3,1,1))
## linear equalities are detected ----
for ( op in c("<", "<=", "==", ">=", ">") ){
expect_false(validate:::linear_call( parse(text=paste("x",op,'"a"'))[[1]] ))
}
expect_false(validate:::linear_call(expression( "a"*x < 3)[[1]]))
expect_false(validate:::linear_call(expression( x < -"a")[[1]]))
## normalisation can be switched off ----
# check normalisation
z <- validator(2*x + 1 + y -2 > 4 + z - 8)
expect_equivalent(z$linear_coefficients(normalize=FALSE)$A, -1*matrix(c(-2,-1,1),1,3))
expect_equivalent(z$linear_coefficients(normalize=FALSE)$b, -1*matrix(3,1,1))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_linear_coefficients.R
|
## lumberjack loggers
## lbj_cells ----
fl <- tempfile()
lbj <- lbj_cells(verbose=FALSE)
meta <- list(expr = expression(foo()), src = "foo()")
w1 <- women
w1[1,1] <- 2*w1[1,1]
lbj$add(meta,women,w1)
lbj$cells
expect_silent(lbj$dump(file=fl))
d <- read.csv(fl)
expect_equal(nrow(d),2)
expect_equal(d$adapted,c(0,1))
expect_warning( lbj$add(meta, d, data.frame(x=1:2)) )
## lbj_rules ----
fl <- tempfile()
v <- validator(x>0)
lbj <- lbj_rules(rules=v, verbose=FALSE)
meta <- list(expr = expression(foo()), src = "foo()")
d1 <- data.frame(x =-1)
d2 <- data.frame(x = 1)
lbj$add(meta,d1,d2)
expect_silent(lbj$dump(file=fl))
d <- read.csv(fl)
expect_equal(nrow(d),2)
expect_equal(d$satisfied,c(0,1))
expect_warning( lbj$add(meta,d1,data.frame(x=1:2)) )
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_lumberjack.R
|
# a number of methods not otherwise tested in testOptions or testParse
## Expressionset extraction ----
v <- validator(x > 0, y>0)
expect_equivalent(class(v[[1]]),"rule")
expect_equivalent(class(v[["V1"]]),"rule")
expect_equivalent(class(v[1]),"validator")
expect_equal(length(v[1]),1)
expect_equal(length(v[1:2]),2)
expect_equal(length(v["V1"]),1)
expect_equal(length(v[c("V1","V2")]),2)
expect_equivalent(class(summary(v)),"data.frame")
expect_true(all(c("block","nvar","rules") %in% names(summary(v))) )
## name setter ----
v <- validator(x>0,y>0,z>0)
expect_warning(names(v) <- c("A","B"))
expect_true(!any(duplicated(names(v))))
## Variables can be retrieved ----
expect_equal( variables(validator(x > 0)),'x')
expect_equal( sort(variables(validator(x > 0, y > 0))) , c('x','y') )
expect_equal( variables(validator(x>0, x<1 )), 'x')
expect_equal( sort(variables(validator(x +y > 0, y < 1))), c('x','y') )
expect_equal( variables(validator(x := 2*y, x>1)),'y')
expect_equal( sort(variables(validator(x := 2*y, x>1),dummy=TRUE)), c('x','y'))
v <- validator(
root = y := sqrt(x)
, average = mean(x) > 3
, sum = x + y == z
)
expect_equivalent(
variables(v,as='matrix')
, matrix(c(TRUE, TRUE, FALSE, TRUE), ncol=2, nrow=2)
)
expect_equivalent(
variables(v,as='matrix',dummy=TRUE)
, matrix(c(TRUE,FALSE,TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE), ncol=3, nrow=3)
)
v <- validator(x + y > 0, z>0)
expect_equal(sort(variables(v[[1]])), c('x','y'))
# test reuse of dummy variables to define other dummies.
# this also tests expand_assignments
v <- validator( dummy_x:=1, dummy_y:= dummy_x + 1, z > dummy_y)
expect_equal(variables(v, dummy=FALSE),"z")
## metadata ----
v <- validator(x>0,y>0)
meta(v,"foo") <-1
expect_equal(meta(v)$foo,c(1,1))
meta(v[2],"bar") <- "bla"
expect_equal(meta(v)$bar,c(NA,"bla"))
meta(v,"baz") <- 1:2
expect_equal(meta(v)$baz, 1:2)
## Confrontation extraction ----
cf <- check_that(women,height > weight, height > 0)
expect_equal(length(cf),2)
expect_equal(length(cf[1]),1)
# just a simple test to check consistency between barplot and confrontation objects.
## barplot doesn't crash ----
nullplot <- function(...){
pdf(NULL)
on.exit(dev.off())
barplot(check_that(women, height>0, weight/height > 2),...)
}
expect_warning(nullplot(), "deprecated")
expect_warning(nullplot(add_exprs=TRUE), "deprecated")
expect_warning(nullplot(add_legend=FALSE), "deprecated")
expect_warning(nullplot(topn=5),"deprecated")
## show methods do not crash ----
x <- capture.output(validator(x + y == z))
expect_true(any(nchar(x)>0))
# this gives a false positive in testthat 2.0.0 :/
# x <- capture.output(validator(x + y == z)[[1]])
x <- capture.output(check_that(women,height>0))
expect_true(any(nchar(x)>0))
## yaml export ----
# smoke test
as_yaml(validator(x>0))
export_yaml(x=validator(x>0), file=tempfile())
# test that options are included, only when provided
v <- validator(x>0)
expect_false(grepl("options:",as_yaml(v)))
voptions(v,raise="all")
expect_true(grepl("options:",as_yaml(v)))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_methods.R
|
## Options can be set and reset locally ----
v <- validator()
voptions(v, raise='all')
expect_false(voptions()$raise == voptions(v)$raise)
reset(v)
expect_equal(voptions(v,'raise'),"none")
## Options can be executed locally without side effects ----
v <- validator(x > 0)
d <- data.frame(y=1)
opt <- voptions()
# this should run normally
expect_true(inherits(confront(d,v),'confrontation'))
expect_error(confront(d,v,raise='all'))
# the above statement should not yield side effects
expect_true(inherits(confront(d,v),'confrontation'))
voptions(v, raise='all')
expect_error(confront(d,v))
expect_true(inherits(confront(d,v,raise='none'), 'confrontation'))
expect_error(confront(d,v))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_options.R
|
## file paths are interpreted correctly ----
expect_true(validate:::is_full_path("C:/hello"))
expect_true(validate:::is_full_path("//server/hello"))
expect_true(validate:::is_full_path("~/hello"))
expect_true(validate:::is_full_path("http://hello"))
expect_false(validate:::is_full_path("./hello"))
expect_false(validate:::is_full_path("reldir/hello"))
# windoze flavor
expect_true(validate:::is_full_path("C:\\hello"))
expect_true(validate:::is_full_path("\\\\server\\hello"))
expect_true(validate:::is_full_path("~\\hello"))
expect_false(validate:::is_full_path("reldir\\hello"))
# TODO: add file parsing tests
#setwd("pkg/tests/testthat/")
## Parsing freeform ----
expect_equal( length( validator(.file="yamltests/freeform.yaml") ) , 2)
expect_equal( length( indicator(.file="yamltests/indicator.yaml") ) , 2)
expect_equal( length( indicator(.file="yamltests/indicator2.yaml") ) , 2)
## Parsing yrf format ----
now <- Sys.time()
v <- validator(.file="yamltests/yamlrules.yaml")
expect_equal(length(v),2)
expect_equal(names(v),c("sumrule","conditional"))
expect_equivalent(origin(v),c("yamltests/yamlrules.yaml","yamltests/yamlrules.yaml"))
expect_equivalent(label(v),c("sum of x and y","if x positive then y also"))
expect_equivalent(description(v),c("a looong description here","a looong description here\n"))
expect_true(all(created(v)-now < 10))
expect_warning(validator(.file="yamltests/invalid.yaml"))
out <- capture.output(expect_warning(validator(.file="yamltests/invalidR.yaml")))
expect_true(any(nchar(out)>0))
## Duplicate names ----
v <- validator(.file="yamltests/duplicate_name.yaml")
nms <- names(v)
expect_equal(nms, unique(nms))
## Parsing options ----
v <- validator(.file="yamltests/yamloptions.yaml")
expect_equal(voptions(v,"raise"),"all")
expect_equal(length(v),1)
##Parsing metadata ----
v <- validator(.file="yamltests/yaml_with_meta.yaml")
expect_equal(meta(v)$foo,c("1",NA))
expect_equal(meta(v)$bar,c(NA,"2"))
## Parsing included files ----
v <- validator(.file="yamltests/top.yaml")
expect_equal(length(v),6)
expect_equivalent(origin(v)
, c( "yamltests/child1.yaml"
, "yamltests/child1.yaml"
, "yamltests/child3.yaml"
, "yamltests/child2.yaml"
, "yamltests/child2.yaml"
, "yamltests/top.yaml")
, info = "file inclusion order"
)
## validation from data.frames ----
d <- data.frame(
rule = c("x>0", "a + b == c")
, name = c("foo", "bar")
, description = c("hello world","Ola, mundo")
, stringsAsFactors=FALSE
)
expect_equal(length(validator(.data=d)),2)
expect_equal(length( validator(.data=d[-3]) ),2)
expect_error(validator(.data=d[-1]))
d$rule[2] <- "a+b"
expect_warning(validator(.data=d))
#
## var_from_call ----
# regular case, concering two variables
expect_equal(
validate:::var_from_call(expression(x > y)[[1]])
, c("x","y")
)
# case of no variables at all
expect_equal(
validate:::var_from_call(expression(1 > 0)[[1]])
, NULL
)
## validating_call ----
expect_true(validate:::validating_call(expression(x > y)[[1]]))
expect_true(validate:::validating_call(expression(x >= y)[[1]]))
expect_true(validate:::validating_call(expression(x == y)[[1]]))
expect_true(validate:::validating_call(expression(x != y)[[1]]))
expect_true(validate:::validating_call(expression(x <= y)[[1]]))
expect_true(validate:::validating_call(expression(x < y)[[1]]))
expect_true(validate:::validating_call(expression(identical(x,y))[[1]]))
expect_true(validate:::validating_call(expression(!(x > y))[[1]]))
expect_true(validate:::validating_call(expression(all(x > y))[[1]]))
expect_true(validate:::validating_call(expression(any(x > y))[[1]]))
expect_true(validate:::validating_call(expression(grepl('hello',x))[[1]]))
# Removed test (2020-09-08): can be done with "hihi" %in% names(.)
# expect_true(validate:::validating_call(expression(exists("hihi")==TRUE)[[1]]))
expect_true(validate:::validating_call(expression(if(x == 1) y == 1)[[1]]))
expect_true(validate:::validating_call(expression(xor(x == 1, y == 1))[[1]]))
expect_false(validate:::validating_call(expression(x)[[1]]))
## vectorizing if-statmentes ----
a <- validate:::vectorize( expression( if (P) Q )[[1]] )
b <- expression(!(P) |(Q))[[1]]
expect_identical(a,b)
a <- validate:::vectorize( expression( (if (P) Q) )[[1]] )
b <- expression( (!(P)|(Q)) )[[1]]
expect_identical(a,b)
a <- validate:::vectorize( expression( (if (P) Q) | Z )[[1]] )
b <- expression((!(P)|(Q)) | Z)[[1]]
expect_identical(a,b)
a <- expression(sapply(x,function(y) 2*y))[[1]]
b <- a
expect_identical(validate:::vectorize(a),b)
a <- validate:::vectorize( expression( (if (P) Q) | (if(A) B) )[[1]] )
b <- expression((!(P)|(Q))|(!(A)|(B)))[[1]]
expect_identical(a,b)
# nested if's. For some reasons, identical gives FALSE
a <- validate:::vectorize(expression( if (P) Q | if(A) B )[[1]])
b <- expression( !(P) | (Q | (!(A) | (B))) )[[1]]
expect_true(a == b)
e <- expression( if (P) Q else R)[[1]]
a <- validate:::vectorize(e)
b <- expression(
(!(P)|(Q)) & ((P)|(R))
)[[1]]
expect_identical(a,b)
## translation of rules to data.frame ----
v <- validator(x > y, 2*y-1==z)
expect_equal(nrow(as.data.frame(v)),2)
i <- indicator(mean(x), sd(y))
expect_equal(nrow(as.data.frame(i)),2)
## replacing %in% operator ----
e <- expression( x %in% y)[[1]]
expect_identical(validate:::replace_in(e)
, expression(x %vin% y)[[1]])
e <- expression( x %in% y | x %in% z)[[1]]
expect_identical(validate:::replace_in(e)
, expression(x %vin% y | x %vin% z)[[1]])
## negating numerical expressions
e <- expression(x > 1, x >=1, x < 1, x <=1, x == 1, x != 1, !(x == 1))
ne <- expression(x <= 1, x < 1, x >= 1, x > 1, x != 1, x == 1, (x == 1))
expect_identical( as.expression(lapply(e, validate:::negate))
, ne
)
## injecting eps
e <- quote(x >= 1)
expect_identical( validate:::replace_lin(e, dat=data.frame(x=1), eps_ineq = 0.1)
, quote(x - 1 >= -0.1)
)
e <- quote(!x>0)
expect_identical( validate:::replace_lin(e, dat=data.frame(x=1), eps_ineq = 0.1)
, quote(x <= 0
)
)
e <- quote(!(x>0))
expect_identical( validate:::replace_lin(e, dat=data.frame(x=1), eps_ineq = 0.1)
, quote(x <= 0
)
)
e <- quote(if (x > 1) y == 1 else z > 1)
expect_identical( validate:::replace_lin(e, dat=data.frame(x=1, y=1,z=1),eps_ineq = 0.1, eps_eq = 0.2)
, quote(if (x > 1) abs(y - 1) <= 0.2 else z > 1)
)
e <- quote(if (x > 1) y == 1 else z > 1)
e <- validate:::replace_if(e)
e <- validate:::replace_lin(e, dat=data.frame(x=1,y=1,z=1))
expect_identical( e
, quote( (x <= 1 | (abs(y - 1) <= 0.1))
& ((x > 1) | (z > 1))
)
)
e <- quote(a == b)
e <- validate:::replace_lin(e, dat=data.frame(a = 1, b=2))
expect_identical( e, quote(abs(a - b) <= 0.1))
e <- quote(a == b)
e <- validate:::replace_lin(e, dat=data.frame(a = "A", b="B"))
expect_identical( e, quote(a == b))
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_parse.R
|
### Rules from PoC ESSnet on validation
## Rule 01 poc ----
dat <- read.csv("pocdata/Rule_01.csv")
v <- validator(.file="pocrules/rule_01.txt")
expect_equivalent(values(confront(dat,v)),matrix(c(TRUE,FALSE,NA),nrow=3))
## Rule 02 poc
dat <- read.csv("pocdata/Rule_02.csv")
v <- validator(.file="pocrules/rule_02.txt")
expect_equivalent(values(confront(dat,v)), matrix(c(TRUE,FALSE, TRUE,NA),nrow=4))
## Rule 03 poc ----
v <- validator(.file="pocrules/rule_03.txt")
dat <- read.csv("pocdata/Rule_03_valid.csv")
expect_equivalent(values(confront(dat,v)),matrix(TRUE))
dat <- read.csv("pocdata/Rule_03_invalid.csv")
expect_equivalent(values(confront(dat,v)),matrix(FALSE))
dat <- read.csv("pocdata/Rule_03_invalid_with_missings.csv")
expect_equivalent(values(confront(dat,v)),matrix(FALSE))
## Rule 04 poc ----
v <- validator(.file="pocrules/rule_04.txt")
dat <- read.csv("pocdata/Rule_04.csv")
expect_equivalent(values(confront(dat,v,na.value=FALSE)),matrix(c(TRUE,FALSE,FALSE,FALSE),nrow=4))
## Rule 05 poc ----
v <- validator(.file="pocrules/rule_05.txt")
dat <- read.csv("pocdata/Rule_05.csv")
expect_equivalent(
values(confront(dat,v))
, matrix(c(TRUE, FALSE, NA, NA, FALSE, NA),nrow=6)
)
## Rule 06 poc ----
v <- validator(.file="pocrules/rule_06.txt")
dat <- read.csv("pocdata/Rule_06.csv")
expect_equivalent(
values(confront(dat,v))
, matrix(c(TRUE,TRUE,FALSE,FALSE,NA),nrow=5)
)
## Rule 07 poc ----
v <- validator(.file="pocrules/rule_07.txt")
dat <- read.csv("pocdata/Rule_07.csv")
expect_equivalent(
values(confront(dat,v))
, matrix(c(FALSE,TRUE,FALSE,TRUE),nrow=4)
)
## Rule 08 poc ----
v <- validator(.file="pocrules/rule_08.txt")
dat <- read.csv("pocdata/Rule_08HH.csv")
ref <- read.csv("pocdata/Rule_08PERSON.csv")
expect_equivalent(
values(confront(dat,v,ref=list(persons=ref)))
, matrix(c(TRUE,FALSE,TRUE,TRUE,NA,FALSE),nrow=6)
)
## Rule 09 poc ----
v <- validator(.file="pocrules/rule_09.txt")
dat <- read.csv("pocdata/Rule_09_undecided.csv")
expect_equivalent(values(confront(dat,v)), matrix(NA))
dat <- read.csv("pocdata/Rule_09_valid.csv")
expect_equivalent(values(confront(dat,v)), matrix(TRUE))
dat <- read.csv("pocdata/Rule_09_invalid.csv")
expect_equivalent(values(confront(dat,v)), matrix(FALSE))
## Rule 10 poc ----
v <- validator(.file="pocrules/rule_10.txt")
dat <- read.csv("pocdata/Rule_10_invalid1.csv")
expect_equivalent(values(confront(dat,v)), matrix(FALSE))
dat <- read.csv("pocdata/Rule_10_invalid2.csv")
expect_equivalent(values(confront(dat,v)), matrix(FALSE))
dat <- read.csv("pocdata/Rule_10_valid.csv")
expect_equivalent(values(confront(dat,v)), matrix(TRUE))
## Rule 11 poc ----
v <- validator(.file="pocrules/rule_11.txt")
dat <- read.csv("pocdata/Rule_11_invalid1.csv")
expect_equivalent(values(confront(dat,v)),matrix(FALSE))
dat <- read.csv("pocdata/Rule_11_invalid2.csv")
expect_equivalent(values(confront(dat,v)),matrix(FALSE))
dat <- read.csv("pocdata/Rule_11_undecided.csv")
expect_equivalent(values(confront(dat,v)),matrix(NA))
dat <- read.csv("pocdata/Rule_11_valid1.csv")
expect_equivalent(values(confront(dat,v)),matrix(TRUE))
dat <- read.csv("pocdata/Rule_11_valid2.csv")
expect_equivalent(values(confront(dat,v)),matrix(TRUE))
## Rule 12 poc ----
v <- validator(.file="pocrules/rule_12.txt")
dat <- read.csv("pocdata/Rule_12_invalid1.csv")
expect_equivalent(values(confront(dat,v)),matrix(FALSE))
dat <- read.csv("pocdata/Rule_12_invalid2.csv")
expect_equivalent(values(confront(dat,v)),matrix(FALSE))
dat <- read.csv("pocdata/Rule_12_valid.csv")
expect_equivalent(values(confront(dat,v)),matrix(TRUE))
## Rule 13 poc ----
v <- validator(.file="pocrules/rule_13.txt")
dat <- read.csv("pocdata/Rule_13_invalid.csv")
expect_equivalent(values(confront(dat,v)),matrix(FALSE))
dat <- read.csv("pocdata/Rule_13_valid.csv")
expect_equivalent(values(confront(dat,v)),matrix(TRUE))
## Rule 14 poc ----
v <- validator(.file="pocrules/rule_14.txt")
dat <- read.csv("pocdata/Rule_14_invalid.csv")
expect_equivalent(values(confront(dat,v)),matrix(FALSE))
dat <- read.csv("pocdata/Rule_14_valid.csv")
expect_equivalent(values(confront(dat,v)),matrix(TRUE))
## Rule 15 poc ----
v <- validator(.file="pocrules/rule_15.txt")
dat <- read.csv("pocdata/Rule_15.csv")
expect_equivalent(
values(confront(dat,v))
, matrix(c(TRUE,TRUE,TRUE,FALSE,TRUE),nrow=5)
)
## Rule 16 poc ----
v <- validator(.file="pocrules/rule_16.txt")
dat <- read.csv("pocdata/Rule_16_invalid.csv")
expect_equivalent(values(confront(dat,v)),matrix(FALSE))
dat <- read.csv("pocdata/Rule_16_valid.csv")
expect_equivalent(values(confront(dat,v)),matrix(TRUE))
## Rule 17 poc ----
v <- validator(.file="pocrules/rule_17.txt")
dat <- read.csv("pocdata/Rule_17HOUSEHOLDS.csv")
dat1 <- read.csv("pocdata/Rule_17PERSONS.csv")
expect_equivalent(
values( confront(dat,v,ref=list(person=dat1) ) )
, matrix(c(TRUE,FALSE,FALSE,TRUE,NA),nrow=5)
)
## Rule 18 poc ----
v <- validator(.file="pocrules/rule_18.txt")
dat <- read.csv("pocdata/Rule_18HOUSEHOLDS.csv")
dat1 <- read.csv("pocdata/Rule_18PERSONS_invalid.csv")
expect_equivalent(
values(confront(dat, v, ref=list(persons=dat1)))
,matrix(FALSE)
)
dat1 <- read.csv("pocdata/Rule_18PERSONS_valid.csv")
expect_equivalent(
values(confront(dat, v, ref=list(persons=dat1)))
, matrix(TRUE)
)
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_poc.R
|
expect_silent(out <- run_validation_file("run_validation/validations.R", verbose=FALSE))
expect_equal(length(out), 2)
expect_equal(length(out[[1]]),3)
expect_equal(length(out[[2]]),1)
# Methods
s <- summary(out)
s1 <- summary(out[[1]])
# the summary of a 'validations' object has 4 extra columns:
# file, call, first line nr, last line nr.
expect_equal(ncol(s), ncol(s1) + 4)
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_run_validation.R
|
if (!require('rsdmx', quietly=TRUE)) exit_file("rsdmx not installed")
if (!at_home()) exit_file("skipping API-calling sdmx tests")
# check if the global registry is up, if so: run test
if (ignore(expect_silent)(global_codelist("CL_FREQ", version="2.0"))){
expect_equal(length(global_codelist("CL_FREQ", version="2.0")), 9)
}
# check if the estat registry is up, if so: run test
if (ignore(expect_silent)(estat_codelist("CL_FREQ", version="2.0"))){
expect_equal(length(estat_codelist("CL_FREQ", version="2.0")), 9)
}
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_sdmx.R
|
## validation syntax is recognized ----
# fiets(x) is not a validation rule
expect_warning(validator(fiets(x)))
# these are not validating statements
expect_warning(validator(x?y))
expect_warning(validator(1>0))
## Exception handling can be switched
voptions(raise='none')
expect_equal(validate:::factory(function()stop('aap'), voptions)()$err, 'aap')
expect_equal(validate:::factory(function(){ warning('aap');7}, voptions)()$warn, 'aap')
voptions(raise='errors')
expect_error(validate:::factory(function() stop(), voptions)())
voptions(raise = 'all')
expect_error(validate:::factory(function() stop(),voptions)())
expect_warning(validate:::factory(function() warning(),voptions)())
# voptions('reset')
validate::reset()
## Functional dependencies
v1 <- validator(stad + straat ~ postcode)
dat <- data.frame(
straat = c('kerkstraat','kerkstraat','kerkstraat','kerkstraat')
,stad = c('DH','DH','H','DH')
,postcode = c('2495','2496','8888','2495')
)
cf <- confront(dat,v1)
expect_equivalent(values(cf),array(c(TRUE,FALSE,TRUE,TRUE),dim=c(4,1)))
## group_expansion ----
L <- list(expression(var_group(a,b)>0)[[1]])
expect_equal(length(validate:::expand_groups(L)),2)
# one expression not containing group
L <- list(expression(var_group(a,b)>0)[[1]],expression(x>0)[[1]])
expect_equal(length(validate:::expand_groups(L)),3)
# two groups (cartesian product)
L <- list(expression(var_group(a,b)>var_group(b,c))[[1]])
expect_equal(length(validate:::expand_groups(L)),4)
## Testing for uniqueness and completeness
expect_equal(is_unique(x=1:3), rep(TRUE,3))
expect_equal(is_unique(x=rep(1,3),y=rep(1,3)), rep(FALSE,3))
expect_true(all_unique(x=1:3))
expect_equal(is_complete(women$height, women$weight),rep(TRUE,15))
expect_true(all_complete(women$height, women$weight))
#expect_equal(occurs(c(1:3,2:0)), c(2,2,1,2,2,1))
w1 <- women
w1[1,1] <- NA
expect_equal(is_complete(w1$height, w1$weight), c(FALSE, rep(TRUE, 14)) )
# make sure these functions are recognized as validating syntax
expect_silent( v <- validator(
is_unique(x,y), all_unique(x,y), is_complete(x,y), all_complete(x,y)
))
expect_equal(length(v), 4)
## testing existance rules
# Persons and household. In each household, one can be
# 'h'ead of household.
# Household 1 has two heads, household 3 has no heads.
dd <- data.frame(
hhid = c(1, 1, 2, 1, 2, 2, 3 )
, person = c(1, 2, 3, 4, 5, 6, 7 )
, hhrole = c("h","h","m","m","h","m","m")
)
v <- validator(exists_one(hhrole=="h", hhid))
expect_equivalent(
values(confront(dd, v))
, matrix(c(FALSE, FALSE, TRUE, FALSE, TRUE ,TRUE, FALSE), nrow=7)
)
# Household 1 has an NA, household 3 has one member who is the head.
dd <- data.frame(
hhid = c(1, 1, 2, 1, 2, 2, 3 )
, person = c(1, 2, 3, 4, 5, 6, 7 )
, hhrole = c("h",NA,"m","m","h","m","h")
)
v <- validator(exists_one(hhrole=="h", hhid))
expect_equivalent(
values(confront(dd, v))
, matrix(c(NA, NA, TRUE, NA, TRUE ,TRUE, TRUE), nrow=7)
)
# again, but with na.rm=TRUE
v <- validator(exists_one(hhrole=="h", hhid, na.rm=TRUE))
expect_equivalent(
values(confront(dd, v))
, matrix(c(TRUE, TRUE, TRUE, TRUE, TRUE ,TRUE, TRUE), nrow=7)
)
# Households must have at least one member.
v <- validator(exists_any(hhrole == "m", by=hhid))
expect_equivalent(
values(confront(dd,v))
, matrix(c(NA, NA, TRUE, NA, TRUE, TRUE, FALSE), nrow=7)
)
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_syntax.R
|
## Options can be set
# warning on nonexistent option
expect_warning(voptions(fiets=3))
# invalid 'raise' value -- not implemented yet
#expect_error(voptions(raise='aap'))
# this should run without problems
reset(voptions)
expect_equal(voptions('raise')[[1]],'none')
## match_cells",{
d1 <- data.frame(id=paste(1:3),x=1:3,y=4:6)
d2 <- data.frame(id=paste(4:1),y=4:7,x=1:4)
expect_equal(
names(match_cells(d1,d2,id='id')[[1]])
,names(match_cells(d1,d2,id='id')[[2]])
)
expect_equal(
as.character(match_cells(d1,d2,id='id')[[1]][,'id'])
, as.character(match_cells(d1,d2,id='id')[[2]][,'id'])
)
## validating/indicating expressions can be named
expect_equal(names(validator(aap=x>3)),'aap')
expect_equal(names(indicator(fiets=mean(x))),'fiets')
## cells works",{
cls <- cells(women, women)
expect_equivalent(cls[,1], cls[,2])
d <- as.data.frame(cls)
expect_true(inherits(d, "data.frame"))
expect_equal(nrow(d), 9*2)
expect_equal(ncol(d), 3)
# code for these methods in confrontation.R
## other methods for 'variables
expect_equal(variables(women),c("height","weight"))
expect_equal(variables(as.list(women)),c("height","weight"))
expect_equal(variables(as.environment(women)),c("height","weight"))
## compare works
d1 <- data.frame(x=1:3,y=4:6)
d2 <- data.frame(x=c(NA,2,NA),y=c(4,5,NA))
v <- validator(x>0,y<5)
a <- array(
c( 6,6,0,0,0,4,4,0,2,2,0
,6,3,3,0,3,2,2,0,1,1,0 ),dim=c(11,2)
)
expect_equivalent(unclass(compare(v,d1,d2)),a)
d <- as.data.frame(compare(v,d1,d2))
expect_true(inherits(d,"data.frame"))
expect_equal(ncol(d),3)
expect_equal(nrow(d),11*2)
## comparison objects can be plotted
d1 <- data.frame(x=1:3,y=4:6)
d2 <- data.frame(x=c(NA,2,NA),y=c(4,5,NA))
rules <- validator(x>0,y<5)
expect_silent(plot(compare(rules, d1,d2)))
expect_silent(plot(cells(d1,d2)))
expect_equal(length(barplot(cells(d1,d2))),2)
expect_equal(length(barplot(compare(rules, d1,d2))),2)
## blocks works
v <- validator(x + y > z, q > 0, z + x == 3)
expect_equivalent(v$blocks()[[1]],c(1,3))
expect_equivalent(v$blocks()[[2]],2)
v <- validator(
x > 0
, y > 0
, x + y == z
, u + v == w
, u > 0)
expect_equal(length(v$blocks()),2)
v <- validator(x +y ==z, x+z>0)
expect_equal(length(v$blocks()),1)
## %vin% ----
expect_identical(
c("a","b") %vin% integer(0)
, logical(2)
)
expect_identical(
c("a","b") %vin% c("a","c","d")
, c(TRUE, FALSE)
)
expect_identical(
c("a",NA) %vin% c("a","c","d")
, c(TRUE, NA)
)
expect_identical(
c(NA,"b") %vin% c("a","c","d")
, c(NA,FALSE)
)
expect_identical(
c("a","b") %vin% c(NA,"c","d")
, c(NA,NA)
)
expect_identical(
c("a","b") %vin% c("a",NA,"d")
, c(TRUE,NA)
)
## utility record selector functions
expect_equal(
satisfying(women, validator(height>60))
, subset(women, height>60)
)
expect_equal(
satisfying(women, check_that(women, height>60))
, subset(women, height>60)
)
expect_equal(
violating(women, validator(height<=60))
, subset(women, height>60)
)
expect_equal(
violating(women, check_that(women, height<=60))
, subset(women, height>60)
)
local({
women[1,1] <- NA
expect_equal(
lacking(women, validator(height<=60))
, women[1,,drop=FALSE]
)
expect_equal(
lacking(women, check_that(women, height<=60))
, women[1,,drop=FALSE]
)
})
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_utils.R
|
## setting properties ----
v <- validator(x>0,y+x==1)
names(v)[1] <- "foo"
expect_equal(names(v),c("foo","V2"))
origin(v)[1] <- "faa"
expect_equivalent(origin(v),c("faa","command-line"))
label(v)[1] <- "fee"
expect_equivalent(label(v),c("fee",""))
description(v)[1] <- "foobar"
expect_equivalent(description(v),c("foobar",""))
# a name, label, origin or description should be a single 'character' element
expect_warning(origin(v)[1] <- c("fu","bar"))
expect_warning(names(v)[1] <- c("fu","bar"))
expect_warning(label(v)[1] <- c("fu","bar"))
expect_warning(description(v)[1] <- c("fu","bar"))
expect_true(
all(
c("language","severity") %in% names( meta(validator(x>0)) )
)
)
## composing validators ----
v <- validator(x>0) + validator(x<1)
expect_equal(length(v),2)
expect_true(!any(duplicated(names(v))))
## regression tests ----
# Issue #65 reported by Andrew R Gibson
# used to crash
v <- validator(weight<150, Fred < Jim)
created(v) <- rep(as.POSIXct('2015-01-01'), length(v))
# Issue #67 reported by Kevin Kuo
dat <- data.frame(A = c("X","Y"),B=c("Y","Y"),stringsAsFactors=FALSE)
expect_equivalent(values(check_that(dat,A == B)),array(c(FALSE,TRUE),dim=c(2,1)))
v <- validator(x>0) + validator(y>0)
# Issue #82 reported by Masafumi Okada
df <- data.frame(x=c("a","b"))
# used to crash 'which.call' because of bad comparison (using == crashes)
out <- check_that(df, x %in% c("a","b",NA))
# Issue #83 reported by Anne Petersen
v1 <- validator(sex == "Male")
# this should create a new copy but gave an error.
v2 <- v1[]
# Issue #82 reported by Anne Petersen
out <- capture.output(str(v1))
## plot validator works ----
v <- validator(x > 1, y + x > 1)
F <- plot(v)
v <- validator()
expect_message(plot(v),"No rules to be plotted")
v <- validator(x + y > 0)
F <- plot(v)
## rules are checked when reading from file
expect_warning(r <- validator(.file="txttests/rules.R"))
expect_equal(length(r),1)
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/test_validator.R
|
# one rule parses, another not
#
#
x > 10 # parses
mean(x) # should not parse
|
/scratch/gouwar.j/cran-all/cranData/validate/inst/tinytest/txttests/rules.R
|
if (knitr::is_latex_output()){
# note, the 'size' option does not work for some
# obscure reason not documented in the manuals.
# https://github.com/rstudio/rmarkdown/issues/388
knitr::opts_chunk$set(comment=NA)
options(width=60)
}
|
/scratch/gouwar.j/cran-all/cranData/validate/vignettes/chunk_opts.R
|
## Contens of clean_supermarkets.R
library(validate)
# 1. simulate reading data
data(SBS2000)
spm <- SBS2000[c("id","staff","turnover","other.rev","total.rev")]
# 2. add a logger from 'validate'
start_log(spm, logger=lbj_cells())
# 3. assume empty values should be filled with 0
spm <- transform(spm, other.rev = ifelse(is.na(other.rev),0,other.rev))
# 4. assume that negative amounts have only a sign error
spm <- transform(spm, other.rev = abs(other.rev))
# 5a. ratio estimator for staff conditional on turnover
Rhat <- with(spm, mean(staff,na.rm=TRUE)/mean(turnover,na.rm=TRUE))
# 5b. impute 'staff' variable where possible using ratio estimator
spm <- transform(spm, staff = ifelse(is.na(staff), Rhat * turnover, staff))
# 6. write output
write.csv(spm, "supermarkets_treated.csv", row.names = FALSE)
|
/scratch/gouwar.j/cran-all/cranData/validate/vignettes/clean_supermarkets.R
|
## Contents of clean_supermarkets2.R
library(validate)
#1.a simulate reading data
data(SBS2000, package="validate")
spm <- SBS2000[c("id","staff","other.rev","turnover","total.rev")]
# 1.b Create rule set
rules <- validator(staff >= 0, other.rev>=0, turnover>=0
, other.rev + turnover == total.rev)
# 2. add two loggers
start_log(spm, logger=lbj_cells())
start_log(spm, logger=lbj_rules(rules))
# 3. assume empty values should be filled with 0
spm <- transform(spm, other.rev = ifelse(is.na(other.rev),0,other.rev))
# 4. assume that negative amounts have only a sign error
spm <- transform(spm, other.rev = abs(other.rev))
# 5a. ratio estimator for staff conditional on turnover
Rhat <- with(spm, mean(staff,na.rm=TRUE)/mean(turnover,na.rm=TRUE))
# 5b. impute 'staff' variable where possible using ratio estimator
spm <- transform(spm, staff = ifelse(is.na(staff), Rhat * turnover, staff))
# 6. write output
write.csv(spm, "supermarkets_treated.csv", row.names = FALSE)
|
/scratch/gouwar.j/cran-all/cranData/validate/vignettes/clean_supermarkets2.R
|
---
title: "The Data Validation Cookbook"
author: "Mark P.J. van der Loo"
date: "`r Sys.Date()`"
output:
bookdown::html_document2:
theme: paper
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: false
code_folding: none
code_download: false
vignette: >
%\VignetteIndexEntry{The Data Validation Cookbook}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r, include=FALSE}
source("chunk_opts.R")
```
# Preface {-}
This book is about checking data with the
[validate](https://cran.r-project.org/package=validate) package for
[R](https://www.r-project.org).
This version of the book was rendered with `validate` version
`r packageVersion("validate")`. The latest release of `validate` can be installed
from [CRAN](https://cran.r-project.org) as follows.
```{r, echo=TRUE, eval=FALSE}
install.packages("validate")
```
The purposes of this book include demonstrating the main tools and workflows of
the `validate` package, giving examples of common data validation tasks, and
showing how to analyze data validation results.
The book is organized as follows. Chapter \@ref(sect-intro) discusses the bare
necessities to be able to follow the rest of the book. Chapters
\@ref(sect-varlevelchecks) to \@ref(sect-statisticalchecks) form the 'cookbook'
part of the book and discuss many different ways to check your data by example.
Chapter \@ref(sect-indicators) is devoted to deriving plausibility measures
with the `validate` package. Chapters \@ref(sect-work) and
\@ref(sect-rulefiles) treat working with validate in-depth. Chapter
\@ref(sect-comparing) discusses how to compare two or more versions of a
dataset, possibly automated through the
[lumberjack](https://cran.r-project.org/package=lumberjack) package. The
section with Biblographical Notes lists some references and points out some
literature for further reading.
## Prerequisites {-}
Readers of this book are expected to have some knowledge of R. In particular,
you should know how to import data into R and know a little about working with
data frames and vectors.
## Citing this work {-}
To cite the `validate` package please use the following citation.
> MPJ van der Loo and E de Jonge (2021). Data Validation Infrastructure for
> R. _Journal of Statistical Software_, 97(10) [paper](https://www.jstatsoft.org/article/view/v097i10).
To cite this cookbook, please use the following citation.
> MPJ van der Loo (`r substr(as.Date(Sys.time()),1,4)`) _The Data Validation Cookbook_
> version `r packageVersion("validate")`. [https://data-cleaning.github.io/validate](http://data-cleaning.github.io/validate/)
## Acknowledgements {-}
This work was partially funded by European Grant Agreement 88287--NL-VALIDATION
of the European Statistcal System.
## Contributing {-}
If you find a mistake, or have some suggestions, please file an issue or a pull
request on the github page of the package:
[https://github.com/data-cleaning/validate](https://github.com/data-cleaning/validate).
If you do not have or want a github account, you can contact the author via the
e-mail address that is listed with the package.
## License {-}
```{r, echo=FALSE, include=!knitr::is_latex_output()}
#knitr::asis_output("
#[](https://creativecommons.org/licenses/by/4.0/)
#")
```
This work is licensed under [Creative Commons Attribution BY-NC 4.0 International License](https://creativecommons.org/licenses/by-nc/4.0/).
# Introduction to validate {#sect-intro}
```{r, include=FALSE}
source("chunk_opts.R")
```
> Data Validation is an activity verifying whether or not a combination of
> values is a member of a set of acceptable combinations [(Di Zio et al , 2015)](https://www.markvanderloo.eu/files/share/zio2015methodology.pdf)
The validate package is intended to make checking your data easy, maintainable, and reproducible.
It does this by allowing you to
- test data against a reusable set of data validation rules:
- investigate, summarise, and visualise data validation results;
- import and export rule sets from and to various formats;
- filter, select and otherwise manipulate data validation rules';
- investigate, summarise, and visualise rule sets.
For advanced rule manipulation there is the
[validatetools](https://cran.r-project.org/package=validatetools) package.
```{r, include=FALSE, eval=knitr::is_latex_output()}
knitr::opts_chunk$set(comment=NA)
```
## A quick example
Here's an example demonstrating the typical workflow. We'll use the built-in
`cars` data set, which contains 50 cases of speed and stopping distances of
cars.
```{r }
data(cars)
head(cars, 3)
```
Validating data is all about checking whether a data set meets presumptions or
expectations you have about it, and the validate package makes it easy for you
to define those expectations. Let's do a quick check on variables in the
`cars` data set. We first load the package, and create a list
of data quality demands with the `validator()` function.
```{r}
library(validate)
rules <- validator(speed >= 0
, dist >= 0
, speed/dist <= 1.5
, cor(speed, dist)>=0.2)
```
Here, the first three rules are record-wise checks: each record will yield one
answer. In the last rule we check whether speed and distance are positively
correlated this will yield a single `TRUE` or `FALSE` for the whole data set.
We now confront the data with those rules and save the output into a variable
called `out`.
```{r}
out <- confront(cars, rules)
```
The easiest way to check the results is with `summary()`.
```{r}
summary(out)
```
This returns a data frame with one line of information for each rule `V1`,
`V2`, `V3` and `V4`. To be precise:
- How many data items were checked against each rule.
- How many items passed, failed or resulted in `NA`.
- Whether the check resulted in an error (could not be performed) or gave an warning.
- The expression that was actually evaluated to perform the check.
The same information can be summarized graphically as follows
`r if( knitr::is_latex_output()) knitr::asis_output("(see Figure \\ref{fig:validateplot})")`.
```{r,fig.height=7, fig.width=7, echo=!knitr::is_latex_output(), eval=!knitr::is_latex_output()}
plot(out)
```
```{r, label="validateplot", fig.height=5, fig.width=5, out.width="0.7\\textwidth", fig.align="center", echo=knitr::is_latex_output(), eval=knitr::is_latex_output(), fig.env="figure",fig.pos="!t", fig.cap="Plot of validation output."}
plot(out)
```
In this plot each horizontal bar indicates the percentage of Failing, Passing,
and Missing cases. The table in the legend lists the total number of Fails,
Passes and Missings, summed over all checks. Here, we have 4 rules. The first
three rules yield 50 results each, while the last rule yields a single result.
Hence there are 151 validation results in total.
Using the function `violating` we can select the records that violate one or
more rules. We select only the first three results because the last rule can
not be interpreted record by record.
```{r}
violating(cars, out[1:3])
```
We can extract all individual resuls results using for example `as.data.frame`.
```{r}
df_out <- as.data.frame(out)
head(df_out, 3)
```
We see that in record `1`, rule `V1`, was satisfied (the result is `TRUE`), and
that validate left a bit of slack when executing the rule, to avoid false
negatives caused by machine rounding issues.
Summarizing, the basic workflow in `validate` is to create a rule set, confront
a data set with the rules in the rule set, and then analyze or use the results
further. To understand which checks you can perform with `validate` you only
need to remember the following.
> Any R expression that results in a `logical` is accepted by `validate` as a
> validation rule.
You are now ready to start validating your data, and navigate Chapters
\@ref(sect-availableunique)-\@ref(sect-statisticalchecks) to learn how to
define specific types of checks. Chapter~\@ref(sect-work), discusses more
details about working with `validate`.
# Variable checks {#sect-varlevelchecks}
```{r, include=FALSE}
source("chunk_opts.R")
```
Variable checks are checks that can be performed on a field-by-field basis. An
example is checking that a variable called `Age` is nonnegative, or of integer
type. Variable checks are among the simplest checks.
**Data**
In this section we will use the `SBS2000` dataset, that is included with `validate`.
```{r}
library(validate)
data(SBS2000)
head(SBS2000, 3)
```
See `?SBS2000` for a description.
## Variable type
In `R`, one can test the type of a variable using built-in functions such as
`is.numeric` or `is.character`.
```{r}
is.character("hihi")
is.character(3)
```
In `validate`, any function starting with `is.` ('is' followed by a dot) is
considered a validation function.
```{r}
rules <- validator(
is.character(size)
, is.numeric(turnover)
)
out <- confront(SBS2000, rules)
summary(out)
```
We see that each rule checks a single item, namely one column of data. The
first rule is violated (it is in fact a `factor` variable). The second rule
is satisfied.
## Missingness {#sect-missingness}
Use R's standard `is.na()` to check missing items in individual variables. Negate
it to check that values are available.
```{r}
rule <- validator(
!is.na(turnover)
, !is.na(other.rev)
, !is.na(profit)
)
out <- confront(SBS2000, rule)
summary(out)
```
We see that in `r summary(out)$fails[1]` cases the variable `turnover` is missing,
while `other.rev` and `profit` are missing respectively in `r summary(out)$fails[2]`
and `r summary(out)$fails[3]` occasions.
To demand that all items must be present or absent for a certain variable,
use R's quantifiers: `any()` or `all()`, possibly negated.
```{r}
rules <- validator(
!any(is.na(incl.prob))
, all(is.na(vat)) )
out <- confront(SBS2000, rules)
summary(out)
```
- To check whether records or parts thereof are completed, see \@ref(sect-iscomplete).
- To check whether records are available at all, see \@ref(sect-completeness).
## Field length
The number of characters in text fields can be tested using either R's standard
`nchar()` function, or with the convenience function `field_length`.
```{r}
rules <- validator(
nchar(as.character(size)) >= 2
, field_length(id, n=5)
, field_length(size, min=2, max=3)
)
out <- confront(SBS2000, rules)
summary(out)
```
One advantage of `check_field_length` is that its argument is converted to
character (recall that `size` is a `factor` variable). The function
`field_length` can be used to either test for exact field lengths or to
check whether the number of characters is within a certain range.
The field length is measured as the number of [code
points](https://en.wikipedia.org/wiki/Code_point). Use `type="width"` to
measure the printed width (nr of columns) or `type="bytes"` to count the number
of bytes.
## Format of numeric fields
For numbers that are stored in `character` type, there is a convenience
function called `number_format()` that accepts a variable name
and a format specification.
```{r}
dat <- data.frame(x = c("2.54","2.66","8.142","23.53"))
```
To check that the numbers are formatted with one figure before, and two figures
after the decimal point, we perform the following check.
```{r}
rule <- validator( number_format(x, format="d.dd"))
values(confront(dat, rule))
```
Here, the specification `format="d.dd"` describes the allowed numeric formats.
In this specification the `"d"` stands for a digit, any other character except
the asterisk (`*`) stands for itself. The asterisk is interpreted as 'zero or
more digits'. Here are some examples of how to define number formats.
|format | match | non-match |
|-------------|-------------------------------|-------------------------------------|
|`0.dddd` | `"0.4321"` | `"0.123"`,`"1.4563"` |
|`d.ddEdd` | `"3.14E00"` | `"31.14E00"` |
|`d.*Edd` | `"0.314E01"`,`"3.1415297E00"` | `"3.1415230"` |
|`d.dd*` | `"1.23"`, `"1.234"`,$\ldots$ | `"1.2"` |
The last example shows how to check for a minimal number of digits behind the decimal
point.
There are special arguments to check the number of decimal figures
after the decimal separator.
```{r}
x <- c("12.123","123.12345")
number_format(x, min_dig=4)
number_format(x, max_dig=3)
number_format(x, min_dig=2, max_dig=4)
number_format(x, min_dig=2, max_dig=10)
# specify the decimal separator.
number_format("12,123", min_dig=2, dec=",")
```
The arguments `min_dig`, `max_dig` and `dec` are ignored when `format` is
specified.
This function is convenient only for fairly simple number formats. Generic
pattern matching in strings is discussed in the next section.
## General field format
A simple way to check for more general format is to use [globbing
patterns](https://en.wikipedia.org/wiki/Glob_(programming)). In such patterns,
the asterisk wildcard character (`*`) is interpreted as 'zero or more
characters' and the question mark (`?`) is interpreted as 'any character'.
For example, to check that the `id` variable in `SBS2000` starts with `"RET"`,
and that the `size` variable has consists of `"sc"` followed by precisely one
character, we can do the following.
```{r}
rule <- validator(field_format(id, "RET*")
, field_format(size, "sc?" ))
out <- confront(SBS2000, rule)
summary(out)
```
Here, the globbing pattern `"RET*"` is understood as 'a string starting with
`"RET"`, followed by zero or more characters. The pattern `"sc?"` means 'a
string starting with `"sc"`, followed by a single character.
The most general way to check whether a field conforms to a pattern is to use a
[regular expression](https://en.wikipedia.org/wiki/Regular_expression). The
treatment of regular expressions is out of scope for this book, but we will
give a few examples. A good introduction to regular expressions is given by
> J. Friedl (2006) _Mastering regular expressions._ O'Reilley Media.
In `validate` one can use `grepl` or `field_format`, with the argument `type="regex"`
```{r}
rule <- validator(
grepl("^sc[0-9]$", size)
, field_format(id, "^RET\\d{2}$" , type="regex") )
summary(confront(SBS2000, rule))
```
Here, the expression `"^sc[0-9]$"` is a regular expression that should be read
as: the string starts (`"^"`) with `"sc"`, is followed by a number between 0
and 9 (`"[0-9]"`) and then ends (`"$"`). The regular expression `"^RET\\{d}2"`
indicates that a string must start (`"^"`) with `"RET"`, followed by two
digits (`"\\d{2}"`), after which the string must end (`"$"`).
Globbing patterns are easier to develop and easier to understand than regular
expressions, while regular expressions offer far more flexibility but are
harder to read. Complex and long regular expressions may have subtle matching
behaviour that is not immediately obvious to inexperienced users. It is
therefore advisable to test regular expressions with a a small dataset
representing realistic cases that contains both matches and non-matches. As a
rule of thumb we would advise to use globbing patterns unless those offer
insufficient flexibility.
## Numeric ranges
Numerical variables may have natural limits from below and/or
above. For one-sided ranges, you can use the standard comparison operators.
```{r}
rules <- validator(TO = turnover >= 0
, TC = total.costs >= 0)
```
If a variable is bounded both from above and below one can use two rules,
or use the convenience function `in_range`.
```{r}
rules <- rules +
validator(PR = in_range(incl.prob, min=0, max=1))
```
By default, `in_range` includes the boundaries of the range, so the above rule
is equivalent to `incl.prob >= 0` and `incl.prob <= 1`.
```{r}
out <- confront(SBS2000, rules, lin.ineq.eps=0)
```
Here we set `lin.ineq.eps=0` to keep `validate` from building in a
margin for machine rounding errors.
```{r}
summary(out)
```
For numeric ranges it is often a better idea to work with inclusive
inequalities ($\leq$, $\geq$) than with strict inequalities ($<$, $>$). Take
as an example the strict inequality demand `income > 0`. This means that _any_
income larger than zero is acceptable, including numbers such as $0.01$,
$0.000001$ and $10^{-\textrm{Googol}}$. In practice there is almost always a
natural minimal acceptable value that is usually dictated by the unit of
measurement. For example, if we measure income in whole Euros, a better demand
would be `income >= 1`.
## Ranges for times and periods
For objects of class `Date` and objects of class `POSIXct` one can use comparison
operators and `in_range` in the same way as for numerical data. The `in_range` function
has a convenience feature for period data that is coded in character data, as in
`"2018Q1"` for quarterly data.
We first generate some example data.
```{r}
period = sprintf("2018Q%d", 1:4)
period
```
The `in_range` function is capable of recognizing certain date or period
formats.
```{r}
in_range(period, min="2017Q2", max = "2018Q2")
```
It is possible to specify your own date-time format using `strftime` notation.
See `?in_range` and `?strptime` for specifications.
## Code lists
A code list is a set of values that a variable is allowed to assume. For small
code lists, one can use the `%in%` operator.
```{r}
rule <- validator(size %in% c("sc0","sc1","sc2","sc3"))
out <- confront(SBS2000, rule)
summary(out)
```
Notice that `validate` replaces `%in%` with `%vin%`. The reason is that `%vin%` has more consistent
behavior in the case of missing data. In particular,
```{r}
c(1, 3, NA) %in% c(1,2)
c(1, 3, NA) %vin% c(1,2)
```
For longer code lists it is convenient to refer to an externally provided list.
There are two ways of doing this: reading the list in the right-hand-size of `%in%`,
or passing a code list to `confront` as reference data.
Suppose we have a file called `codelist.csv` with a column `code`. We can define
a rule as follows.
```{r}
rule <- validator(
x %in% read.csv("codelist.csv")$code
)
## Or, equivalently
rule <- validator(
valid_codes := read.csv("codelist.csv")$code
, x %in% valid_codes
)
```
The disadvantage is that the rule now depends on a path that may or may not be
available at runtime.
The second option is to assume that a variable, say `valid_codes` exists at
runtime, and pass this with `confront`.
```{r}
codelist <- c("sc0","sc1","sc2","sc3")
rule <- validator(size %in% valid_codes)
# pass the codelist
out <- confront(SBS2000, rule
, ref=list(valid_codes=codelist))
summary(out)
```
This way, (very) large code lists can be used, but note that it does
require a 'contract' between variable names used in the rule set
and variables passed as reference data.
# Availability and uniqueness {#sect-availableunique}
```{r, include=FALSE}
source("chunk_opts.R")
```
In this Chapter it is demonstrated how to check whether records are available
and/or complete with respect to a set of keys, and whether they are unique.
The checks described here are typically useful for data in 'long' format, where
one column holds a value and all the other columns identify the value.
- To test for missing values in individual variables, see also \@ref(sect-missingness).
- To check whether records or parts thereof are completed, see \@ref(sect-iscomplete).
**Data**
In this Chapter the `samplonomy` dataset is used that comes with the `validate`
package.
```{r}
library(validate)
data(samplonomy)
head(samplonomy, 3)
```
## Long data
The samplonomy data set is structured in 'long form'. This means that each
record has a single `value` column, and one or more columns containing
character values that together describe what the value means.
```{r}
head(samplonomy,3)
```
The data set contains several time series for multiple measures
of the fictional country 'Samplonia'. There are time series for several
subregions of Samplonia.
Long format data is typically used as a transport format: it may be used to
bulk-load data into SQL-based data base systems, or to transfer data between
organisations in a unambiguous way.
Data in long form is in general much harder to check and process for
statistical purpose than data in wide format, where each variable is stored in
a separate column. The reason is that in long format relations between
different variables are spread out across records, and those records are not
necessarily ordered in any particular way prior to processing. This makes
interpretation of validation fails intrinsically harder for long-form data than
for wide-form data.
The `samplonomy` data set has a particularly nasty structure. It contains both
annual and quarterly time series for GDP, Import, Export and the Balance of
Trade (export less import). The period column therefore contains both quarterly
and annual labels. Furthermore, there are time series for the whole of
Samplonia (region Samplonia), for each of its two provinces (regions Agria and
Induston) and for each of its districts within Agria (Wheaton and Greenham) and
Induston (Smokely, Mudwater, Newbay and Oakdale).
Naturally, we expect that the key combinations are unique, that all time series
are gapless and complete, that the Balance of trade equals Export less Import
everywhere, that district values add up to the provinces', and that province
values add up to the total of Samplonia. Finally, the quarterly time series
must add up to the annual values.
## Uniqueness {#sect-uniqueness}
The function `is_unique()` checks whether combinations of variables (usually
key variables) uniquely identify a record. It accepts any positive number of
variable names and returns `FALSE` for each record that is duplicated with
respect to the designated variariables.
Here, we test whether region, period, and measure uniquely identify a value in
the `samplonomy` data set.
```{r}
rule <- validator(is_unique(region, period, measure))
out <- confront(samplonomy, rule)
# showing 7 columns of output for readability
summary(out)[1:7]
```
There are `r summary(out)$fails` fails. After extracting the individual
values for each record we can find the duplicated ones using a
convenience function from `validate`.
```{r}
violating(samplonomy, out)
```
There are a two subtleties to keep in mind when interpreting uniqueness. The
first has to do with missing values, and the second has to do with grouping.
To start with the missing value problem, take a look at the following two-record
data frame.
```{r}
df <- data.frame(x = c(1,1), y = c("A",NA))
df
```
How should we judge whether these two records are unique? A tempting option is
to say the the first record is unique, and to return `NA` for the second record
since it contains a missing value: R has the habit of returning `NA` from
calculations when an input value is `NA`. This choice is not invalid, but it
would have consequences for determining whether the first record is unique as
well. After all, it is possible to fill in a value in the missing field such
that the two records are duplicates. Therefore, if one would return `NA` for
the second record, the correct thing to do is to also return `NA` for the first
record. In R, the choice is made to treat `NA` as an actual value when checking
for duplicates or uniqe records (see `?duplicated` from base R). To see this
inspect the following code and output.
```{r}
df <- data.frame(x=rep(1,3), y = c("A", NA, NA))
is_unique(df$x, df$y)
```
The second subtlety has to do with grouping. You may want to test whether a
column is unique, given one or more other variables. It is tempting to think
that this requires a split-apply-combine approach where the dataset is first
split according to one or more grouping variables, check for uniqueness of the
column in each group, and then combine the results. However, such an approach
is not necessary as you can simply add the grouping variables to the list of
variables that _together_ must be unique.
As an example, consider the output of the following two approaches.
```{r}
# y is unique, given x. But not by itself
df <- data.frame(x=rep(letters[1:2],each=3), y=rep(1:3,2))
# the split-apply-combine approach
unsplit(tapply(df$y, df$x, is_unique), df$x)
# the combined approach
is_unique(df$x, df$y)
```
## Availability of records {#sect-completeness}
This section is on testing for availability of whole records. Testing for individual
missing values (`r NA`), is treated in \@ref(sect-missingness).
We wish to ensure that for each region, and each variable, the periods 2014,
2015, $\ldots$, 2019 are present. Using `contains_at_least` we can establish
this.
```{r}
rule <- validator(
contains_at_least(
keys = data.frame(period = as.character(2014:2019))
, by=list(region, measure) )
)
out <- confront(samplonomy, rule)
# showing 7 columns of output for readability
summary(out)[1:7]
```
The function `contains_at_least` splits the `samplonomy` dataset into blocks
according to values of `region` and `measure`. Next, it checks that in each
block the variable `period` contains at least the values 2014--2019.
The return value is a logical vector where the number of elements equals the
number of rows in the dataset under scrutiny. It is `TRUE` for each block
where all years are present, and `FALSE` for each block where one or more of the
years is missing. In this case 29 records are labeled as FALSE. These
can be found as follows.
```{r}
head(violating(samplonomy, out))
```
Inspection of these records shows that in this block, for `Agria` the GDP
for `"2015"` is missing.
We can perform a stricter check, and test whether for each `measure`, all
quarters `"2014Q1"` $\ldots$ `"2019Q4"` are present for each province (`Agria`
and `Induston`). First create a key set to test against.
```{r}
years <- as.character(2014:2019)
quarters <- paste0("Q",1:4)
keyset <- expand.grid(
region = c("Agria", "Induston")
, period = sapply(years, paste0, quarters))
head(keyset)
```
This key set will be referenced in the rule, and passed to `confront` as reference
data.
```{r}
rule <- validator(
contains_at_least(keys=minimal_keys, by=measure)
)
out <- confront(samplonomy, rule
, ref=list(minimal_keys=keyset))
# showing 7 columns of output for readability
summary(out)[1:7]
```
There are `r summary(out)$fails` fails. Inspecting the data set as above, we
see that for Induston, the `export` is missing in `"2018Q3"`.
Finally, we do a strict test, to check that for each `measure` all periods and
all regions are reported. We also demand that there are no more and no less
records than for each individual measure. For this, the function
`contains_exactly` can be used.
First create a keyset.
```{r}
years <- as.character(2014:2019)
quarters <- paste0("Q",1:4)
keyset <- expand.grid(
region = c(
"Agria"
,"Crowdon"
,"Greenham"
,"Induston"
,"Mudwater"
,"Newbay"
,"Oakdale"
,"Samplonia"
,"Smokely"
,"Wheaton"
)
,period = c(years, sapply(years, paste0, quarters))
)
head(keyset)
```
The keyset is passed as reference data to the rule using `confront`.
```{r}
rule <- validator(contains_exactly(all_keys, by=measure))
out <- confront(samplonomy, rule
, ref=list(all_keys=keyset))
# showing 7 columns of output for readability
summary(out)[1:7]
```
To find where the errors reside, we first select the records with an error and
then find the unique measures that occur in those records.
```{r}
erroneous_records <- violating(samplonomy, out)
unique(erroneous_records$measure)
```
So here, blocks containing GDP and Export have entire records missing.
## Gaps in (time) series
For time series, or possibly other series it is desirable that
there is a constant distance between each two elements of the series.
The mathematical term for such a series is called a _linear sequence_.
Here are some examples of linear series.
- The natural numbers: $1,2,3,\ldots$
- The even natural numbers $2, 4, 6, \ldots$
- Quarters periods: `"2020Q1"`, `"2020Q2"`, $\ldots$
- Years (these are just natural numbers): $2019, 2020, \ldots$
The `validate` functions `is_linear_sequence` and `in_linear_sequence` check
whether a variable represents a linear series, possibly in blocks defined by
categorical variables. They can be used interactively or as a rule in a
validator object. We first demonstrate how these functions work, and then give
an example with the `samplonomy` dataset.
```{r}
is_linear_sequence(c(1,2,3,4))
is_linear_sequence(c(8,6,4,2))
is_linear_sequence(c(2,4,8,16))
```
For character data, the function is capable of recognizing certain formats
for time periods.
```{r}
is_linear_sequence(c("2020Q1","2020Q2","2020Q3","2020Q4"))
```
See `?is_linear_sequence` for a full specification of supported
date-time formats.
It is not necessary for data to be sorted in order to be recognized as a
linear sequence.
```{r}
is_linear_sequence(c("2020Q4","2020Q2","2020Q3","2020Q1"))
```
One can force a begin and/or end point for the sequence as well.
```{r}
is_linear_sequence(c("2020Q4","2020Q2","2020Q3","2020Q1")
, begin = "2020Q2")
```
Finally it is possible to split a variable by one or more other columns and
check whether each block represents a linear sequence.
```{r}
series <- c(1,2,3,4,1,2,3,3)
blocks <- rep(c("a","b"), each = 4)
is_linear_sequence(series, by = blocks)
```
Now, this result is not very useful since now it is unknown which block
is not a linear series. This is where the function `in_linear_sequence` comes in.
```{r}
in_linear_sequence(series, by = blocks)
```
There are some subtleties. A single element is also a linear sequence (of length 1).
```{r}
is_linear_sequence(5)
```
This can yield surprises in cases of blocks of length 1.
```{r}
blocks[8] <- "c"
data.frame(series = series, blocks = blocks)
in_linear_sequence(series, blocks)
```
We now have three linear series, namely
- For `"a"`: `1,2,3,4`
- For `"b"`: `1,2,3`
- For `"c"`: `3`.
We can circumvent this by giving explicit bounds.
```{r}
in_linear_sequence(series, blocks, begin = 1, end = 4)
```
We now return to the `samplonomy` dataset. We wish to check that for
each measure and each area, the time series are linear series. Since there
are time series of different frequencies, we need to split the data by frequency
as well.
```{r}
rule <- validator(
in_linear_sequence(period
, by = list(region, freq, measure))
)
out <- confront(samplonomy, rule)
summary(out)[1:7]
```
We can find the blocks where records are not in sequence as follows (output not
printed here for brevity).
```{r, results='hide'}
violating(samplonomy, out)
```
Inspection of the selected records shows that for Agria the GDP for 2015 is
missing, and that for Induston the Export for 2018Q3 is missing while Export
for 2018Q2 occurs twice (but with different values)
# Multivariate checks
```{r, include=FALSE}
source("chunk_opts.R")
```
In this Chapter we treat tests that involve relationships between variables.
**Data**
In this Chapter we will use the `SBS2000` dataset that comes with `validate`.
```{r}
library(validate)
data(SBS2000)
head(SBS2000, 3)
```
## Completeness of records {#sect-iscomplete}
The functions `is_complete()` and `all_complete()` are convenience functions
that test for missing values or combinations thereof in records.
```{r}
rules <- validator(
is_complete(id)
, is_complete(id, turnover)
, is_complete(id, turnover, profit )
, all_complete(id)
)
out <- confront(SBS2000, rules)
# suppress last column for brevity
summary(out)[1:7]
```
Here, the first rule checks for missing data in the `id` variable, the second
rule checks whether subrecords with `id` and `turnover` are complete, and the
third rule checks whether subrecords with `id`, `turnover` and `profit` are
complete. The output is one logical value (`TRUE` or `FALSE`) for each record.
The fourth rule tests whether _all_ values are present in the `id` column, and
it results in a single `TRUE` or `FALSE`.
- To test for missing values in individual variables, see also \@ref(sect-missingness).
- To check whether records are available at all, see \@ref(sect-completeness).
## Balance equalities and inequalities
Balance restrictions occur for example in economic microdata, where financial
balances must be met.
```{r}
rules <- validator(
total.rev - profit == total.costs
, turnover + other.rev == total.rev
, profit <= 0.6*total.rev
)
out <- confront(SBS2000, rules)
summary(out)
```
Here, the first rule checks a balance between income, costs, and profit; the
second rule checks a sub-balance, and the third rule is a plausibility check
where we do not expect profit to exceed 60 per cent of the total revenue.
Observe that the expressions have been altered by `validate` to account for
possible machine rounding differences. Rather than testing whether variable $x$
equals variable $y$, `validate` will check $|x-y|\leq \epsilon$, where the
default value of $\epsilon$ is $10^{-8}$. The value of this tolerance can be
controlled for linear equalities and inequalities using respectively
`lin.eq.eps` and `lin.ineq.eps`.
```{r}
out <- confront(SBS2000, rules, lin.ineq.eps=0, lin.eq.eps=0.01)
summary(out)
```
See \@ref(sect-options) for more information on setting and resetting options.
## Conditional restrictions
Conditional restrictions are all about demanding certain value combinations.
In the following example we check that a business with staff also has staff
costs.
```{r}
rule <- validator(if (staff >= 1) staff.costs >= 1)
out <- confront(SBS2000, rule)
summary(out)
```
Here, combinations where there is a positive number of staff must be
accompanied with a positive staff cost.
Validate translates the rule `if ( P ) Q` to an expression of the form `!P |
Q`. The reason for this is that the latter can be evaluated faster
(vectorised).
The results are to be interpreted as follows. For each record, `validate` will
check that cases where `staff>=1` are accompanied by `staff.costs >= 1`. In
cases where this test results in `FALSE` this means that either the staff
number is too high, or the staff costs are too low. To be precise, the results
of a conditional restriction match those of an implication in first-order
logic as shown in the truth table below.
$$
\begin{array}{ll|c}
P & Q & P\Rightarrow Q\\
\hline
T & T & T\\
T & F & F\\
F & T & T\\
F & F & F\\
\end{array}
$$
## Forbidden value combinations
In some cases it is more convenient to have a list of forbidden (key) value
combinations than specifying such combinations individually. The function
`does_not_contain()` supports such situations.
As an example, let's first create some transaction data.
```{r}
transactions <- data.frame(
sender = c("S21", "X34", "S45","Z22")
, receiver = c("FG0", "FG2", "DF1","KK2")
, value = sample(70:100,4)
)
```
We assume that it is not possible for senders with codes starting with an `"S"`
to send something to receivers starting with `FG`. A convenient way to encode
such demands is to use
[globbing patterns](https://en.wikipedia.org/wiki/Glob_(programming)).
We create a data frame that lists forbidden combinations (here: one combination
of two key patterns).
```{r}
forbidden <- data.frame(sender="S*",receiver = "FG*")
```
Note that the column names of this data frame correspond to the columns in the transactions
data frame. We are now ready to check our transactions data frame.
```{r}
rule <- validator(does_not_contain(glob(forbidden_keys)))
out <- confront(transactions, rule, ref=list(forbidden_keys=forbidden))
## Suppress columns for brevity
summary(out)[1:7]
```
Observe that we use `glob(forbidden_keys)` to tell `does_not_contain` that the
key combinations in the `forbidden_keys` must be interpreted as globbing
patterns.
The records containing forbidden keys can be selected as follows.
```{r}
violating(transactions, out)
```
It is also possible to use regular expression patterns, by labeling the
forbidden key set with `rx()`. If no labeling is used, the key sets are
interpreted as string literals.
# Statistical checks {#sect-statisticalchecks}
```{r, include=FALSE}
source("chunk_opts.R")
```
Statistical checks involve group properties such as the means of columns. These
characteristics can be checked for whole columns or grouped by one or more
categorical variables. It is also possible to use group-wise computed
statistics in validation rules. For example if you want to compare individual
values with a mean within a group.
For long-form data it is possible to compare aggregate values with underlying
details. For example to test whether quarterly time series add up to annual
totals. It is also possible to check properties of groups, for example whether
in every household (a group of persons) there is exactly one head of household.
**Data**
In this Chapter we will use the `SBS2000` dataset that comes with `validate`.
```{r}
library(validate)
data(SBS2000)
head(SBS2000, 3)
```
We shall also use the `samplonomy` dataset that also comes with `validate`. See also
\@ref(long-data).
```{r}
data(samplonomy)
head(samplonomy, 3)
```
## Statistical and groupwise characteristics {#sect-groupwise}
Any R expression that ultimately is an equality or inequality check is
interpreted as a validation rule by validate. This means that any statistical
calculation can be input to a rule.
Here we check the mean profit and correlation coefficient between profit and
turnover.
```{r}
rule <- validator(
mean(profit, na.rm=TRUE) >= 1
, cor(turnover, staff, use="pairwise.complete.obs") > 0
)
out <- confront(SBS2000, rule)
# suppress some columns for brevity
summary(out)[1:7]
```
There are a few helper functions to compute group-wise statistics, and to make
comparing values with group aggregates possible.
For example, here we check whether each turnover is less than ten times
the group-wise median.
```{r}
rule <- validator(
turnover <= 10*do_by(turnover, by=size, fun=median, na.rm=TRUE)
)
out <- confront(SBS2000, rule)
# suppress some columns for brevity
summary(out)[1:7]
```
Here, in the right-hand side of the rule the group-wise median of turnover is
computed. The function `do_by` is very similar to functions such as `tapply`
in base R. The difference is that `do_by` works on vectors only (not on data
frames) and always repeats the values of `fun` so that the length of the output is
equal to the length of the input.
```{r}
medians <- with(SBS2000, do_by(turnover, by=size, fun=median, na.rm=TRUE))
head(data.frame(size = SBS2000$size, median=medians))
```
There are also some convenience functions, including `sum_by`, `mean_by`, `min_by`, and `max_by`.
## Group properties
In this section, we group data by one or more categorical variables and
check for each group whether a rule is satisfied. In particular we are
going to check whether each household in a small dataset has a unique
'head of household'.
We first create some data with household id (`hhid`) a person id (`person`) and
that person's role in the household (`hhrole`).
```{r}
d <- data.frame(
hhid = c(1, 1, 2, 1, 2, 2, 3 )
, person = c(1, 2, 3, 4, 5, 6, 7 )
, hhrole = c("h","h","m","m","h","m","m")
)
d
```
With `exists_one()` we can check that there is exactly one person
with the role `"h"` (head) in each household, by grouping on household id.
```{r}
rule <- validator(exists_one(hhrole == "h", by=hhid))
out <- confront(d, rule)
# suppress some columns for brevity
summary(out)
```
We can inspect the results by selecting the violating record groups.
```{r}
violating(d, out)
```
We see that household 1 has two heads of household, while household 3 has no head
of household.
To test whether _at least one_ head of household exists, one can use
`exists_any`:
```{r}
violating(d, validator(exists_any(hhrole=="h",by=hhid) ))
```
In the following example we check whether there is exactly one region called Samplonia
for each period and each measure in the `samplonomy` dataset.
```{r}
rule <- validator(exists_one(region=="Samplonia", by=list(period, measure)))
```
The first argument of `exists_one()` is a rule that has to be checked in every group
indicated by the `by` argument. The output is a logical vector with an element for
each record in the dataset under scrutiny. If a group of data fails the test, each record
in that group is indicated as wrong (`FALSE`).
```{r}
out <- confront(samplonomy, rule)
# suppress some columns for brevity
summary(out)[1:7]
```
Here, there are no groups that violate this assumption.
```{r}
violating(samplonomy, out)
```
## Code hierarchies and aggregation
Classifications and ontologies often have a hierarchical structure.
A well-known example is the
[NACE](https://en.wikipedia.org/wiki/Statistical_Classification_of_Economic_Activities_in_the_European_Community) classification of economic activities. In the NACE classification,
the economy is divided into 10 basic types of activities such as 'Agriculture'
or 'Mining and Quarrying', and each activity is again divided into subclasses,
such as 'Growing of rice' and 'Growing of Grapes' under 'Agriculture'. The
subdividing can go on for several levels. For statistics that describe an
economy according to the NACE classification, it is desirable that the
statistics of subclasses add up to their parent classes. This is what the
function 'hierarchy' does in 'validate'.
The `validate` package comes with a version of the NACE classification
(Revision 2, 2008) so we will use that as an example.
```{r}
data(nace_rev2)
head(nace_rev2[1:4])
```
The second and third column contain the necessary information: they list the
parent for each NACE code (where each parent is also a NACE code). To demonstrate
how `hierarchy()` works, we first create some example data.
```{r}
dat <- data.frame(
nace = c("01","01.1","01.11","01.12", "01.2")
, volume = c(100 ,70 , 30 ,40 , 25 )
)
dat
```
We see that the volumes for subclasses `"01.11"` and `"01.12"` add up to
`"01.1"` ( $30+40=70$ ). However, the volumes for `"01.1"` and `"01.2"` do not
add up to the volume for `"01"` ($70+25\not=100$). The `hierarchy()` function
checks all these relations.
Before using `hierarchy` in the setting of a `validator` object, we can examine
it directly.
```{r}
dat$check <- hierarchy(dat$volume, dat$nace, nace_rev2[3:4])
dat
```
We see that `hierarchy()` returns a `logical` vector with one element for each
record in the data. Each record that is involved in one or more aggregation
checks that fail is labeled `FALSE`. Here, this concerns the records with
labels `"01"`, `"01.1"` and `"01.2"`.
We will next look at a more complicated example, but first note the following.
The `hierarchy()` function
- can handle any statistical aggregate, `sum()` is just the default;
- supports globbing and regular expressions in the child values;
- has an adjustable tolerance value for comparing observed with computed aggregates;
- has configurable behaviour for cases of missing data;
- can be applied per-group, defined by one or more grouping variables (see next example).
See the help file `?hierarchy` for specification and examples.
**A more complicated example**
Samplonia is divided in two districts, each of which is divided into several
provinces. Let us define the hierarchical code list.
```{r}
samplonia <- data.frame(
region = c("Agria", "Induston"
, "Wheaton", "Greenham"
, "Smokely", "Mudwater", "Newbay", "Crowdon")
, parent = c(rep("Samplonia",2), rep("Agria",2), rep("Induston",4))
)
samplonia
```
Recall the structure of the `samplonomy` dataset.
```{r}
data(samplonomy)
head(samplonomy)
```
We will check whether regions sum to their parent regions, for each period
and for each measure.
```{r}
rule <- validator(
hierarchy(value, region, hierarchy=ref$codelist, by=list(period, measure))
)
out <- confront(samplonomy, rule, ref=list(codelist=samplonia))
summary(out)
```
We see that some aggregates add up correctly, and some don't. There is also
a warning which we should investigate.
```{r}
warnings(out)
```
If one of the groups contains a parent more than once it is not possible
to check whether child values add up to the aggregate. For this reason
the duplicated parent and all it's children are marked `FALSE`. Indeed we
find a duplicated record.
```{r}
subset(samplonomy, region == "Induston" &
period == "2018Q2" &
measure == "export")
```
Just to see if we can remove the warning, let us remove the duplicate
and re-run the check.
```{r}
i <- !duplicated(samplonomy[c("region","period","measure")])
samplonomy2 <- samplonomy[i, ]
out <- confront(samplonomy2, rule, ref=list(codelist=samplonia))
# suppress some columns for brevity
summary(out)[1:7]
```
The `hierarchy()` function marks every record `FALSE` that is involved
in any check. This may make it hard to figure out which check it failed.
One can get more detailed information, by checking different parts
of the hierarchy in separate rules.
```{r}
rules <- validator(
level0 = hierarchy(value, region, ref$level0, by=list(period, measure))
, level1 = hierarchy(value, region, ref$level1, by=list(period, measure))
)
out <- confront(samplonomy2, rules
, ref=list(level0=samplonia[1:2,], level1=samplonia[3:8,])
)
summary(out)
```
We can now select records involved in violating the highest level
rules separately.
```{r}
violating(samplonomy2, out["level0"])
```
From this it appears that in 2015, the GDP for Agria is missing, and in
2018Q3 there is no value for the export of Induston.
## General aggregates in long-form data
Checking aggregations in long-form format is more involved than for
data in wide format (as in Section \@ref(balance-equalities-and-inequalities)).
Here, we check in the `samplonomy` dataset that for each measure and each
period, the subregional data adds up to the regional data.
```{r}
rules <- validator(
part_whole_relation(value
, labels=region
, whole="Samplonia"
, part =c("Agria","Induston")
, by=list(measure, period)
)
)
```
The first argument of `part_whole_relation()` is the name of the variable
containing the values. Here, the column `value` from the samplonomy dataset.
The argument `labels` indicates the variable that labels parts and wholes.
Next, we define the label value that indicates a total. Here, a record with
region label `"Samplonia"` indicates a total. Under argument `part` we specify
the labels that have to add up to Samplonia, here the provinces Agria and
Induston. Note that there are more subregions in the dataset, for example the
district of Wheaton (a subregion of Agria). Since we do not specify them, these
are ignored. In the `by` argument we specify that the dataset must be split
into measure and period prior to checking the regional aggregates.
The output is one boolean value per record. For each block, defined by values
of `measure` and `period` either all values are `TRUE`, `FALSE`, or `NA`. The
latter indicates that the aggregate could not be computed because one of the
values is missing, or the computed aggregate could not be compared with the
aggregate in the data because it is missing (either the whole record may be
missing, or the value may be `NA`).
```{r}
out <- confront(samplonomy, rules)
# suppress some columns for brevity
summary(out)[1:7]
```
We can extract the truth values and then inspect the blocks with erroneous values
using standard R functionality.
```{r}
violating(samplonomy, out)
```
Recall that the rule was executed per block defined by `measure` and `period`.
Thus, the result indicates three errors: one in the block of records defined
by `measure=="gdp"` and `period=="2015"`, also in the blocks defined by
`measure=="export"` and `period==2018Q2` or `period=="2018Q3"`.
First, it seems that the 2015 GDP of Agria
is missing from the data set. This turns out indeed to be the case.
```{r}
subset(samplonomy, region=="Agria" & period == "2015" & measure == "gdp")
```
Second, it can be seen that for Induston, there are two export values for
`"2018Q2"` while the export value for `"2018Q3"` is missing.
### Notes {-}
Specifying (group-wise) aggregates is a fairly detailed job in the case of long
data. There are a few things to keep in mind when using this function.
- The argument `part` is optional. If not specified, every record not matching
with `whole` will be considered a detail that is to be used to compute the total.
In the current example this was not possible because besides Agria and Induston,
we have other subregions.
- In the example we used literal values to specify the keys that define parts
and wholes. It is possible to, recognize patterns, for example
any years (4 digits) as a whole and a quarter as a part. See also the next example.
Supported patterns include regular expressions (shown here) and globbing (see help file).
- It is important that the variables listed in `by` (if any) uniquely specify a
single aggregate. So here, for each measure and period, the label `"Samplonia"`
should occur at most once (if it does not occur the result will be `NA`).
- The default way to aggregate is to take the sum. You can specify other ways
to aggregate by passing an `aggregator` argument. For example `aggregator=mean`.
- By default, the aggregate in the data is compared with the computed aggregate
up to a tolerance of $10^{-8}$. This tolerance can be set using the `tol`
argument. E.g. for integer data you may want to set `tol=0`.
## Aggregates of time series in long format
We are going to check whether quarterly time series add up to the annual time
series. This is more complicated because of two subtleties.
First there is not one fixed aggregate key, like `"Samplonia"`. Rather, we
have a key _pattern_. Each total is defined by a period label that consists of
precisely four digits. So rather than recognizing a specific year we want to
recognize that a key represents any year. This can be done using a regular
expression of the form `"^\\d{4}$"`, where the `^` indicates 'start of string',
the `\\d{4}` indicates 'four times a digit' and `$` indicates 'end of string'.
Second, we wish to check annual totals against the sum over quarters for each region and each
measure. However, a value-combination of measure and region does not single out
a single value for `year`. For example, for the Induston export we have the following
annual data.
```{r}
subset(samplonomy, region=="Induston" & freq == "A" & measure=="export")
```
So in fact, we need to do the check _by year_ as well as by measure and region.
Fortunately, in this case it is easy to derive a variable that indicates the year
by selecting the first four characters from `period`.
```{r}
rules <- validator(part_whole_relation(value
, labels = period
, whole = rx("^\\d{4}$")
, by = list(region, substr(period,1,4), measure)
))
out <- confront(samplonomy, rules)
```
We use `rx("^\\d{4}")` to tell `part_whole_relation` that this string must be
interpreted as a regular expression. Here, we do not indicate `part` labels
explicitly: by default any record not matching `whole` will be treated as a
detail that must be used to compute the total.
```{r}
errors(out)
# suppress some columns for brevity
summary(out)[1:7]
```
We now get 9 fails and 10 missing values. We can filter out records that
have `NA` (lacking) results.
```{r}
lacking(samplonomy, out)
```
There are two blocks where the annual total could not be compared with
the sum over quarterly series. The balance value of Crowdon is missing
for `"2014Q1"` as well as the import value of Wheaton for `"2019Q2"`.
Similarly, we can inspect the failing blocks
```{r}
violating(samplonomy, out)
```
# Indicators {#sect-indicators}
```{r, include=FALSE}
source("chunk_opts.R")
```
Until now we have discussed various types of data validation rules: decisions
that assign True or False values to a data frame. In some cases it is
convenient to have a continuous value that can then be used in further
assessing the data.
A practical example is the so-called selective editing approach to data
cleaning. Here, each record in a data set is assigned a number that expresses
the risk a record poses for inferring a faulty conclusion. Records are then
ordered from high risk (records that both have suspicious values and large
influence on the final result) to low risk (records with unsuspected values and
little influence on the final result). Records with the highest risk are then
scrutinized by domain experts.
In `validate`, an indicator is a rule that returns an numerical value. Just
like `validator` objects are lists of validation rules, `indicator` objects
are lists of indicator rules. Indices can be computed by confronting data with
an `indicator`, and using `add_indices`, the computed indices can be added to
the dataset. You can import, export, select, and combine `indicator` objects
in the same way as `validator` objects.
## A first example
Here is a simple example of the workflow.
```{r}
library(validate)
ii <- indicator(
BMI = (weight/2.2046)/(height*0.0254)^2
, mh = mean(height)
, mw = mean(weight))
out <- confront(women, ii)
```
In the first statement we define an `indicator` object storing indicator
expressions. Next, we confront a dataset with these indicators. The result is
an object of class `indication`. It prints as follows.
```{r}
out
```
To study the results, the object can be summarized.
```{r}
summary(out)
```
Observe that the first indicator results in one value per record
while the second and third indicators (`mh`, `mw`) each return a single
value. The single values are repeated when indicator values are added
to the data.
```{r}
head(add_indicators(women, out), 3)
```
The result is a data frame with indicators attached.
The columns `error` and `warning` indicate whether calculation of the
indicators was problematic. For example because the output of an indicator rule
is not numeric, or when it uses variables that do not occur in the data. Use
`warnings(out)` or `errors(out)` to obtain the warning and error messages per
rule.
## Getting indicator values
Values can be obtained with the `values` function, or by converting to a
`data.frame`. In this example we add a unique identifier (this is optional) to
make it easier to identify the results with data afterwards.
```{r}
women$id <- letters[1:15]
```
Compute indicators and convert to `data.frame`.
```{r}
out <- confront(women, ii,key="id")
tail( as.data.frame(out) )
```
Observe that there is no key for indicators `mh` and `mw` since these are
constructed from multiple records.
# Working with validate {#sect-work}
```{r, include=FALSE}
source("chunk_opts.R")
```
```{r, echo=FALSE}
library(validate)
```
In this section we dive deeper into the the central object types used in the
package: the `validator` object type for storing lists of rules, and the
`confrontation` object type for storing the results of a validation.
## Manipulating rule sets
Validate stores rulesets into something called a `validator` object. The
`validator()` function creates such an object.
```{r}
v <- validator(speed >= 0, dist>=0, speed/dist <= 1.5)
v
```
Validator objects behave a lot like lists. For example, you can select items
to get a new `validator`. Here, we select the first and third element.
```{r}
w <- v[c(1,3)]
```
Here `w` is a new validator object holding only the first and third rule from
`v`. If not specified by the user, rules are given the default names `"V1"`,
`"V2"`, and so on. Those names can also be used for selecting rules.
```{r}
w <- v[c("V1","V3")]
```
Validator objects are reference objects. This means that if you do
```
w <- v
```
then `w` is not a copy of `v`. It is just another name for the same physical
object as `v`. To make an actual copy, you can select everything.
```
w <- v[]
```
It is also possible to concatenate two validator objects. For example when you
read two rule sets from two files (See \@ref(sect-readfromfile)). This is done
by adding them together with `+`.
```{r}
rules1 <- validator(speed>=0)
rules2 <- validator(dist >= 0)
all_rules <- rules1 + rules2
```
An empty validator object is created with `validator()`.
If you select a single element of a validator object, an object of class 'rule'
is returned. This is the validating expression entered by the user, plus some
(optional) metadata.
```{r}
v[[3]]
```
Users never need to manipulate rule objects, but it can be convenient to
inspect them. As you see, the rules have some automatically created metadata.
In the next section we demonstrate how to retrieve and set the metadata.
## Rule metadata
Validator objects behave a lot like lists. The only metadata in an R
list are the `names` of its elements. You can get and set names of a list
using the `names<-` function. Similarly, there are getter/setter functions
for rule metadata.
- `origin()` : Where was a rule defined?
- `names()` : The name per rule
- `created()` : when were the rules created?
- `label()` : Short description of the rule
- `description()`: Long description of the rule
- `meta()` : Set or get generic metadata
Names can be set on the command line, just like how you would do it for
an R list.
```{r}
rules <- validator(positive_speed = speed >= 0, ratio = speed/dist <= 1.5)
rules
```
Getting and setting names works the same as for lists.
```{r}
names(rules)
names(rules)[1] <- "nonnegative_speed"
```
The functions `origin()`, `created()`, `label()`, and `description()` work in
the same way. It is also possible to add generic key-value pairs as metadata.
Getting and setting follows the usual recycling rules of R.
```{r}
# add 'foo' to the first rule:
meta(rules[1],"foo") <- 1
# Add 'bar' to all rules
meta(rules,"bar") <- "baz"
```
Metadata can be made visible by selecting a single rule:
```{r}
v[[1]]
```
Or by extracting it to a `data.frame`
```{r}
meta(v)
```
Some general information is obtained with `summary`,
```{r}
summary(v)
```
Here, some properties per _block_ of rules is given. Two rules occur in the same
block if when they share a variable. In this case, all rules occur in the same
block.
The number of rules can be requested with `length`
```{r}
length(v)
```
With `variables`, the variables occurring per rule, or over all the rules can be requested.
```{r}
variables(v)
variables(v,as="matrix")
```
## Rules in data frames
You can read and write rules and their metadata from and to data frames. This
is convenient, for example in cases where rules are retrieved from a central
rule repository in a data base.
Exporting rules and their metadata can be done with `as.data.frame`.
```{r}
rules <- validator(speed >= 0, dist >= 0, speed/dist <= 1.5)
df <- as.data.frame(rules)
```
Reading from a data frame is done through the `.data` argument.
```{r}
rules <- validator(.data=df)
```
It is not necessary to define all possible metadata in the data frame. It is
sufficient to have three character columns, named `rule`, `name` and
`description` in any order.
## Validation rule syntax {#sect-syntax}
Conceptually, any R statement that will evaluate to a `logical` is considered a
validating statement. The validate package checks this when the user defines a
rule set, so for example calling `validator( mean(height) )` will result in a
warning since just computing `mean(x)` does not validate anything.
You will find a concise description of the syntax in the `syntax` help file.
```{r,eval=FALSE}
?syntax
```
In short, you can use
- Type checks: any function starting with `is.`
- Binary comparisons: `<, <=, ==, !=, >=, >` and `%in%`
- Unary logical operators: `!, all(), any()`
- Binary logical operators: `&, &&, |, ||` and logical implication, e.g. `if (staff > 0) staff.costs > 0`
- Pattern matching `grepl`
- [Functional dependency](https://en.wikipedia.org/wiki/Functional_dependency): $X\to Y + Z$ is represented by `X ~ Y + Z`.
There are some extra syntax elements that help in defining complex rules.
- Inspect the whole data set using `.`, e.g. `validator( nrow(.) > 10)`.
- Reuse a variable using `:=`, e.g. `validator(m := mean(x), x < 2*m )`.
- Apply the same rule to multiple groups with `var_group`. For example
`validator(G:=var_group(x,y), G > 0)` is equivalent to `validator(x>0, y>0)`.
A few helper functions are available to compute groupwise values on
variables (vectors). They differ from functions like `aggregate` or `tapply`
in that their result is always of the same length as the input.
```{r}
sum_by(1:10, by = rep(c("a","b"), each=5) )
```
This is useful for rules where you want to compare individual values with
group aggregates.
|function | computes |
|---------------------|----------------------------------|
| `do_by` | generic groupwise calculation |
| `sum_by` | groupwise sum |
| `min_by`, `max_by` | groupwise min, max |
| `mean_by` | groupwise mean |
| `median_by` | groupwise median |
See also Section \@ref(sect-groupwise).
There are a number of functions that perform a particular validation task that
would be hard to express with basic syntax. These are treated extensively
in Chapters \@ref(sect-varlevelchecks) to \@ref(sect-statisticalchecks), but
here is a quick overview.
|function | checks |
|---------------------|----------------------------------------------------------------|
|`in_range` | Numeric variable range |
|`is_unique` | Uniqueness of variable combinations |
|`all_unique` | Equivalent to `all(is_unique())` |
|`is_complete` | Completeness of records |
|`all_complete` | Equivalent to `all(is_complete())` |
|`exists_any` | For each group, check if any record satisfies a rule |
|`exists_one` | For each group, check if exactly one record satisfies a rule |
|`is_linear_sequence` | Linearity of numeric or date/time/period series |
|`in_linear_sequence` | Linearity of numeric of date/time/period series |
|`hierarchy` | Hierarchical aggregations |
|`part_whole_relation`| Generic part-whole relations |
|`field_length` | Field length |
|`number_format` | Numeric format in text fields |
|`field_format` | Field format |
|`contains_exactly` | Availability of records |
|`contains_at_least` | Availability of records |
|`contains_at_most` | Availability of records |
|`does_not_contain` | Correctness of key combinations |
## Confrontation objects
The outcome of confronting a validator object with a data set is an object of
class `confrontation`. There are several ways to extract information from a
`confrontation` object.
- `summary`: summarize output; returns a `data.frame`
- `aggregate`: aggregate validation in several ways
- `sort` : aggregate and sort in several ways
- `values`: Get the values in an array, or a list of arrays if rules have different output dimension structure
- `errors`: Retrieve error messages caught during the confrontation
- `warnings`: Retrieve warning messages caught during the confrontation.
By default aggregates are produced by rule.
```{r}
v <- validator(height>0, weight>0,height/weight < 0.5)
cf <- confront(women, rules)
aggregate(cf)
```
To aggregate by record, use `by='record'`
```{r}
head(aggregate(cf,by='record'))
```
Aggregated results can be automatically sorted, so records with the most violations or
rules that are violated most sort higher.
```{r}
# rules with most violations sorting first:
sort(cf)
```
Confrontation objects can be subsetted with single bracket operators (like
vectors), to obtain a sub-object pertaining only to the selected rules.
```
summary(cf[c(1,3)])
```
## Confrontation options {#sect-options}
By default, all errors and warnings are caught when validation rules are confronted with data. This can be switched off by setting the `raise` option to `"errors"` or `"all"`. The following
example contains a specification error: `hite` should be `height` and therefore the rule errors
on the `women` data.frame because it does not contain a column `hite`. The error is caught
(not resulting in a R error) and shown in the summary,
```{r}
v <- validator(hite > 0, weight>0)
summary(confront(women, v))
```
Setting `raise` to `all` results in a R error:
```{r eval=TRUE, error=TRUE}
# this gives an error
confront(women, v, raise='all')
```
Linear equalities form an important class of validation rules. To prevent
equalities to be strictly tested, there is an option called `lin.eq.eps` (with
default value $10^{-8}$) that allows one to add some slack to these tests. The
amount of slack is intended to prevent false negatives (unnecessary failures)
caused by machine rounding. If you want to check whether a sum-rule is
satisfied to within one or two units of measurement, it is cleaner to define
two inequalities for that.
## Using reference data
For some checks it is convenient to compare the data under scrutiny with
other data artifacts. Two common examples include:
- Data is checked against an earlier version of the same dataset.
- We wish to check the contents of a column against a code list,
and we do not want to put the code list hard-coded into the
rule set.
For this, we can use the `ref` option in confront. Here is how
to compare columns from two data frames row-by-row. The user
has to make sure that the rows of the data set under scrutiny
(`women`) matches row-wise with the reference data set (`women1`).
```{r}
women1 <- women
rules <- validator(height == women_reference$height)
cf <- confront(women, rules, ref = list(women_reference = women1))
summary(cf)
```
Here is how to make a code list available.
```{r}
rules <- validator( fruit %in% codelist )
fruits <- c("apple", "banana", "orange")
dat <- data.frame(fruit = c("apple","broccoli","orange","banana"))
cf <- confront(dat, rules, ref = list(codelist = fruits))
summary(cf)
```
# Rules in text files {#sect-rulefiles}
```{r, include=FALSE}
source("chunk_opts.R")
library(validate)
```
This Chapter is about importing and exporting rules from and to file, both in
free-form text and in YAML. We also discuss some more advanced features like
how to have one rule file include another file.
## Reading rules from file {#sect-readfromfile}
It is a very good idea to store and maintain rule sets outside of your R
script. Validate supports two file formats: simple text files and `yaml` files.
Here we only discuss simple text files, yaml files are treated in \@ref(sect-yamlfiles).
To try this, copy the following rules into a new text file and store it in a
file called `myrules.R`, in the current working directory of your R session.
```{r, eval=FALSE}
# basic range checks
speed >= 0
dist >= 0
# ratio check
speed / dist <= 1.5
```
Note that you are allowed to annotate the rules as you would with
regular R code. Reading these rules can be done as follows.
```{r}
rules <- validator(.file="myrules.R")
```
## Metadata in text files: `YAML` {#sect-yamlfiles}
[YAML](https://yaml.org) is a data format that aims to be easy to learn and
human-readable. The name 'YAML' is a [recursive
acronym](https://en.wikipedia.org/wiki/Recursive_acronym) that stands for
> YAML Ain't Markup Language.
Validate can read and write rule sets from and to YAML files. For example,
paste the following code into a file called `myrules.yaml`.
```
rules:
- expr: speed >= 0
name: 'speed'
label: 'speed positivity'
description: |
speed can not be negative
created: 2020-11-02 11:15:11
meta:
language: validate 0.9.3.36
severity: error
- expr: dist >= 0
name: 'dist'
label: 'distance positivity'
description: |
distance cannot be negative.
created: 2020-11-02 11:15:11
meta:
language: validate 0.9.3.36
severity: error
- expr: speed/dist <= 1.5
name: 'ratio'
label: 'ratio limit'
description: |
The speed to distance ratio can
not exceed 1.5.
created: 2020-11-02 11:15:11
meta:
language: validate 0.9.3.36
severity: error
```
We can read this file using `validator(.file=)` as before.
```{r}
rules <- validator(.file="myrules.yaml")
rules
```
Observe that the labels are printed between brackets. There are a few things
to note about these YAML files.
1. `rules:` starts a list of rules.
2. Each new rule starts with a dash (`-`)
3. Each element of a rule is denoted `name: <content>`. The only obligated
element is `expr`: the rule expression.
4. Spaces matter. Each element of a rule must be preceded by a newline and two spaces.
Subelements (as in `meta`) are indented again.
A full tutorial on YAML can be found at
[W3Cschools.io](https://www.w3schools.io/file/yaml-introduction/).
To export a rule set to yaml, use the `export_yaml()` function.
```{r}
rules1 <- rules[c(1,3)]
export_yaml(rules1, file="myrules2.yaml")
```
## Setting options
Both free-form and YAML files can optionally start with a header section where
options or file inclusions can be set. The header section is enclosed by lines
that contain three dashes (`---`) at the beginning of the line.
For example, in the following rule file we make sure that errors are not caught
but raised to run-time level, and we set the tolerance for checking linear equalities and
inequalities to zero.
```
---
options:
raise: errors
lin.eq.eps: 0
lin.ineq.eps: 0
---
turnover >= 0
staff >= 0
total.rev - profit == total.costs
```
The options you set here will be part of the `validator` object, that is
created once you read in the file. The options are valid for every
confrontation you use this validator for, unless they are overwritten during
the call to `confront()`.
The header section is interpreted as a block of YAML, so options and file
inclusions must be specified in that format.
## Including other rule files
In validate, rule files can include each other recursively. So file A can
include file B, which may include file C. This is useful for example in surveys
where the first part of the questionnaire goes to all respondents, and for the
second part, the contents of the questionnaire (and hence its variables) depend
on the respondent type. One could create files with specific rules for the
second part: one for each respondent group, and have each specific rule file
include the general rules that must hold for every respondent.
It can also be useful when different persons are responsible for different rule
sets.
File inclusion can be set through the `include` option in the YAML header.
```
---
include:
- petes_rules.yaml
- nancys_rules.yaml
options:
raise: errors
---
# start rule definitions here
```
## Exporting validator objects
There are three ways to do that. You can either write to a `yaml` file
immediately as follows
```{r, eval=FALSE}
v <- validator(height>0, weight> 0)
export_yaml(v,file="my_rules.yaml")
```
or you can get the `yaml` text string using `as_yaml`
```
cat(as_yaml(v))
```
Finally, you can convert a rule set to data frame and then export it
to a database.
```{r, eval=FALSE}
df <- as.data.frame(v)
```
# Rules from SDMX {#sect-sdmxrules}
**Note** This functionality is available for `validate` versions `1.1.0` or higher.
In this Chapter we first demonstrate how to use SDMX with the `validate`
package. In \@ref(moresdmx) we provide a bit more general information on the
SDMX landscape, registries, and their APIs.
```{r, include=FALSE}
source("chunk_opts.R")
library(validate)
```
## SDMX and `validate`
Statistical Data and Metadata eXchange, or SDMX is a standard for storing data
and the description of its structure, meaning, and content. The standard is
developed by the SDMX consortium (`https://sdmx.org`). It is used, amongst
others, in the [Official Statistics](https://en.wikipedia.org/wiki/Official_statistics) community to
exchange data in a standardized way.
A key aspect of SDMX is a standardized way to describe variables, data
structure (how is it stored), and code lists. This metadata is defined in an
_SDMX registry_ where data producers can download or query the necessary
metadata. Alternatively, metadata is distributed in a so-called Data Structure
Definition (DSD) file, which is usually in
[XML](https://en.wikipedia.org/wiki/XML) format.
For data validation, some aspects of the metadata are of interest. In
particular, code lists are interesting objects to test against. In validate
there are two ways to use SDMX codelists. The first is by referring to a
specific code list for a specific variable in an SDMX registry. The second way
is to derive a rule set from a DSD file that can be retrieved from a registry.
Below we discuss the following functions.
|function | what it does |
|--------------------|--------------------------------------------------|
|`sdmx_endpoint` | retrieve URL for SDMX endpoint |
|`sdmx_codelist` | retrieve sdmx codelist |
|`estat_codelist` | retrieve codelist from Eurostat SDMX registry |
|`global_codelist` | retrieve codelist from Global SDMX registry |
|`validator_from_dsd`| derive validation rules from DSD in SDMX registry|
## SDMX and API locations
SDMX metadata is typically exposed through a standardized REST API.
To query an SDMX registry, one needs to supply at least the following
information:
- The registry's API entry point. This is the base URL for the online registry.
You can specify it literally, or use one of the helper functions that
are aware of certan known SDMX registries.
- Agency ID: the ID of the agency that is responsible for the code list
- Resource ID: the name of the SDMX resource. This is usually the name of
a type of statistic, like STS (short term statistics).
- version: the code list version.
Some API endpoints are stored with the package. The function `sdmx_endpoint()`
returns endpoint URLs for several SDMX registries. Use
```{r,eval=FALSE}
sdmx_endpoint()
```
to get a list of valid endpoints. As an example, to retrieve the endpoint for the global
SDMX registry, use the following.
```{r, eval=TRUE}
sdmx_endpoint(registry="global")
```
## Code lists from SDMX registries
Code lists can be retrieved on-the-fly from one of the online SDMX registries.
In the following rule we retrieve the codelist of economic activities from the
[global SDMX registry](https://registry.sdmx.org/overview.html).
```{r, eval=FALSE}
codelist <- sdmx_codelist(
endpoint = sdmx_endpoint("global")
, agency_id = "ESTAT"
, resource_id = "CL_ACTIVITY")
head(codelist)
[1] "_T" "_X" "_Z" "A" "A_B" "A01"
```
Equivalently, and as a convenience, you could use `global_codelist()` to avoid
specifying the API endpoint explicitly. The output can be used in a rule.
```{r, eval=FALSE}
Activity %in% global_codelist(agency_id="ESTAT", resource_id="CL_ACTIVITY")
```
Since downloading codelists can take some time, any function that accesses online
SDMX registries will store the download in memory for the duration of the R session.
There is also a `estat_codelist()` function for downloading codelists from
the Eurostat SDMX registry.
## Derive rules from DSD
The functions described in the previous subsection allow you to check
variables against a particular SDMX code list. It is also possible to
download a complete Data Structure Definition and generate all checks
implied by the DSD.
```{r, eval=FALSE}
rules <- validator_from_dsd(endpoint = sdmx_endpoint("ESTAT")
, agency_id = "ESTAT", resource_id = "STSALL", version="latest")
length(rules)
[1] 13
rules[1]
Object of class 'validator' with 1 elements:
CL_FREQ: FREQ %in% sdmx_codelist(endpoint = "https://ec.europa.eu/tools/cspa_services_global/sdmxregistry/rest", agency_id = "SDMX", resource_id = "CL_FREQ", version = "2.0")
Rules are evaluated using locally defined options
```
There are 13 rules in total. For brevity, we only show the first rule here.
Observe that the first rule checks the variable `CL_FREQ` against a code list
that is retrieved from the global SDMX registry. A demonstration of the fact
that a DSD does not have to be fully self-contained and can refer to
metadata in other standard registries. If a data set is checked against this
rule, `validate` will download the codelist from the global registry and
compare each value in column `CL_FREQ` against the codelist.
Note that the `validator_from_dsd` function adds relevant metadata such as a
rule name, the origin of the rule and a short description. Try
```{r, eval=FALSE}
rule[[1]]
```
to see all information.
## More on `SDMX` {#moresdmx}
The Statistical Data and Metadata eXchange (SDMX) standard
is an ISO standard designed to facilitate the exchange or dissemination of
[Official
Statistics](https://en.wikipedia.org/wiki/Official_statistics#:~:text=Official%20statistics%20are%20statistics%20published,organizations%20as%20a%20public%20good.).
At the core it has a logical information model describing the key
characteristics of statistical data and metadata, which can be applied to any
statistical domain. Various data formats have been defined based on this
information model, such as SDMX-[CSV](https://www.rfc-editor.org/rfc/rfc4180),
SDMX-[JSON](https://www.json.org/json-en.html)), and - by far the most widely
known - SDMX-ML (data in [XML](https://www.w3.org/XML/)). A key aspect of the
SDMX standard is that one defines the metadata, including data structure,
variables, and code lists beforehand in order to describe what data is shared
or published. This metadata is defined in an *SDMX registry* where data
producers can download or query the necessary metadata. Alternatively metadata
is distributed in a so-called *Data Structure Definition* (DSD) file, which is
usually an XML format. Both types of modes should result in exactly the same
metadata agreements.
SDMX registries can be accessed through a [REST
API](https://en.wikipedia.org/wiki/Representational_state_transfer), using a
standardized set of parameters. We can distinguish between registries
that provide metadata and registries that provide the actual data.
For the validate package, the metadata registries are of interest. Some of
widely used metada registries include the following.
- [Global SDMX Registry](https://registry.sdmx.org/): for global metadata,
hosted by the SDMX consortium. The central place for ESS-wide metadata. This
registry hosts important statistical metadata such as for CPI/HICP, National
Accounts (NA), Environmental accounting (SEEA), BOP, GFS, FDI and many more.
Unfortunately not all ESS metadata is present in this registry.
- [Eurostat SDMX Registry](https://webgate.ec.europa.eu/sdmxregistry/): for
Eurostat-wide metadata, hosted by Eurostat. This registry contains statistical
metadata for all other official statistics in the European Statistical System
(ESS). Access is offered via SDMX 2.1 REST API.
- [IMF SDMX Central](https://sdmxcentral.imf.org/overview.html): Registry by
the IMF.
- [UNICEF](https://sdmx.data.unicef.org/): Registry by UNICEF
Organisations that at the time of writing (spring 2023) actively offer
automated access to their data (not just metadata) via an SDMX API include (but
not limited to) the European Central Bank
([ECB](https://sdw-wsrest.ecb.europa.eu/help/)),
the [OECD](https://data.oecd.org/api/) (in
[SDMX-JSON](https://data.oecd.org/api/sdmx-json-documentation/) or
[SDMX-ML](https://data.oecd.org/api/sdmx-ml-documentation/) format),
[Eurostat](https://ec.europa.eu/eurostat/web/sdmx-infospace),
the International Labour Organisation [ILO (`https://www.ilo.org/sdmx/index.html`)],
the [Worldbank](https://datahelpdesk.worldbank.org/knowledgebase/articles/1886701-sdmx-api-queries),
the Bank for International Settlements
([BIS](https://www.bis.org/statistics/sdmx_techspec.htm?accordion1=1&m=6%7C346%7C718)),
and the Italian Office of National Statistics (ISTAT).
The SDMX consortium does not maintain a list of active SDMX endpoints. The
[rsdmx R package](https://cran.r-project.org/package=rsdmx) maintains such a
list based on an earlier inventory of Data Sources, but at the time of writing
not all those links appear to be active.
Ideally, all SDMX providers would have implemented SDMX in a coordinated way so
that a client looking for SDMX metadata to validate its data before sending
could query the respective sources using one and the same API. The latest
version of the REST API is 2.1 which is described very well in the easy to use
[SDMX API cheat sheet](https://github.com/sdmx-twg/sdmx-rest/raw/master/doc/rest_cheat_sheet.pdf)
Inspecting the endpoints shows that not all providers implement all same
resource values. Depending on the provider an organization may decide which
elements of the API are exposed. For example, the API standard defines methods
to retrieve code lists from a DSD, but this functionality may or may not be
offered by an API instance. If it is not offered, this means the client
software needs to retrieve this metadata via other resource requests or
alternatively extract them locally from a DSD file. Finally we signal that on
a technical level the API of the various institutes may differ considerably and
that not all SDMX services implement the same version of SDMX.
This means that users should typically familiarize themselves somewhat with the
specific API they try to access (e.g. from `validate`).
# Comparing data sets {#sect-comparing}
```{r, include=FALSE}
source("chunk_opts.R")
```
```{r, echo=FALSE}
library(validate)
```
When processing data step by step, it is useful to gather information on the
contribution of each step to the final result. This way the whole process can
be monitored and the contribution of each step can be evaluated. Schematically,
a data processing step can be visualised as follows.
{width=50%}
Here, some input data is processed by some procedure that is parameterized,
usually by domain experts. The output data is again input for a next step.
In the following two sections we discuss two methods to compare two or more
versions of a data set. In the last section we demonstrate how `validate` can
be combined with the
[lumberjack](https://cran.r-project.org/package=lumberjack) package to automate
monitoring in an R script.
## Cell counts
One of the simplest ways to compare different versions of a data set is to
count how many cells have changed. In this setting it can be useful to
distinguish between changes from available to missing data (and _vice versa_)
and changes between data where the values change. When comparing two
data sets, say the input and the output data, the total number of cells
can be decomposed according to the following schema.
{width=70%}
The total number of cells (fields) in the output data can be decomposed into
those cells that are filled (available) and those that are empty (missing).
The missing ones are decomposed into those that were already missing in the
input data and those that are still missing. Similarly, the available values
can be decomposed into those that were missing before and have been imputed.
And those that already were available can be decomposed in those that are the
same as before (unadapted) and those that ave been changed (adapted).
With the `validate` package, these numbers can be computed for two or more
datasets using `cells()`. As an example, we first create three versions of the
`SBS2000` dataset. The first version is just the unaltered data. In the
second version we replace a revenue column with it's absolute value to 'repair'
cases with negative revenues. In the third version, we impute cases where
`turnover` is missing with the `vat` (value added tax) value, when available.
```{r}
library(validate)
data(SBS2000)
original <- SBS2000
version2 <- original
version2$other.rev <- abs(version2$other.rev)
version3 <- version2
version3$turnover[is.na(version3$turnover)] <- version3$vat[is.na(version3$turnover)]
```
We can now compare `version2` and `version3` to the original data set as follows.
```{r}
cells(input = original, cleaned = version2, imputed = version3)
```
The `cells` function accepts an arbitrary number of `name=data frame` arguments. The
names provided by the user are used as column names in the output. From the output we see
that the `cleaned` data set (`version2`) and in the `imputed` data set (`version3`) have
one adapted value compared to the original data. Similarly, no imputations took place in
preparing the `cleaned` data set, but a single value was imputed in the `imputed` dataset.
Since each data frame is compared to the first data frame, the last column can be considered
a 'cumulative' record of all changes that took place from beginning to end. It is also possible
to print differential changes, where each data set is compared with the previous one.
```{r}
cells(input = original, cleaned = version2, imputed = version3
, compare="sequential")
```
The output of `cells()` is an array of class `cellComparison`. The most
interesting about this is that `validate` comes with two plot methods for such
objects. To demonstrate this, we will create two more versions of the
`SBS2000` dataset.
```{r}
version4 <- version3
version4$turnover[is.na(version4$turnover)] <- median(version4$turnover, na.rm=TRUE)
# from kEUR to EUR
version5 <- version4
version5$staff.costs <- version5$staff.costs * 1000
```
```{r}
out <- cells(input = original
, cleaned = version2
, vat_imp = version3
, med_imp = version4
, units = version5)
par(mfrow=c(2,1))
barplot(out)
plot(out)
```
The bar plot and line plot convey the same information. The line plot is better
when the data sets are instances resulting from a sequential process. The bar
plot can be used more generally since it does not suggest a particular order.
## Comparing rule violations
When processing data it is interesting to compare how many data validations
are violated before and after a processing step. Comparing output data with
input data, we can decompose the total number of validation results of
the output data as follows.
{width=70%}
The total number of validation results in the output data van be split into
those that are verifiable (`TRUE` or `FALSE`) and those that are unverifiable
(`NA`). The unverifiable cases can be split into those that were also
unverifiable in the input data (still) and those that were verifiable in the
input data but can now not be verified, because certain fields have been
emptied. The verifiable cases can be split into those that yielded `FALSE`
(violated) and those that yielded `TRUE` (satisfied). Each can be split into
cases that stayed the same or changed with respect to the input data.
With `validate` the complete decomposition can be computed with `compare()`.
It takes as first argument a `validator` object and two or more data sets
to compare. We will use the data sets developed in the previous paragraph.
```{r}
rules <- validator(other.rev >= 0
, turnover >= 0
, turnover + other.rev == total.rev
)
comparison <- compare(rules
, input = original
, cleaned = version2
, vat_imp = version3
, med_imp = version4
, units = version5)
comparison
```
By default each data set is compared to the first dataset (`input=original`).
Hence the last column represents the cumulative change of all processing steps
since the first data set. It is possible to investigate local differences by
setting `how='sequential'`.
It is possible to plot the output for a graphical overview in two different
ways: a bar plot and a line plot.
```{r}
par(mfrow=c(2,1))
barplot(comparison)
plot(comparison)
```
## `validate` and `lumberjack`
The [lumberjack](https://cran.r-project.org/package=lumberjack) package makes
it easy to track changes in data in a user-defined way. The following example
is slightly adapted from the [JSS paper](https://www.jstatsoft.org/article/view/v098i01).
We create a script that reads data, performs a few data cleaning steps
and then writes the output. The script is stored in `clean_supermarkets.R` and
has the following code.
```{r, eval=FALSE}
## Contents of clean_supermarkets.R
library(validate)
# 1. simulate reading data
data(SBS2000)
spm <- SBS2000[c("id","staff","turnover","other.rev","total.rev")]
# 2. add a logger from 'validate'
start_log(spm, logger=lbj_cells())
# 3. assume empty values should be filled with 0
spm <- transform(spm, other.rev = ifelse(is.na(other.rev),0,other.rev))
# 4. assume that negative amounts have only a sign error
spm <- transform(spm, other.rev = abs(other.rev))
# 5a. ratio estimator for staff conditional on turnover
Rhat <- with(spm, mean(staff,na.rm=TRUE)/mean(turnover,na.rm=TRUE))
# 5b. impute 'staff' variable where possible using ratio estimator
spm <- transform(spm, staff = ifelse(is.na(staff), Rhat * turnover, staff))
# 6. write output
write.csv(spm, "supermarkets_treated.csv", row.names = FALSE)
```
In the first section we do not actually read data from a data source but take a
few columns from the SBS2000 data set that comes with the validate package.
The data to be processed is stored in a variable called `spm`. Next, in
section two, we use the `lumberjack` function `start_log()` to attach a logging
object of type `lbj_cells()` to the data under scrutiny. Two things are of
note here:
1. The call to `library(validate)` is necessary to be able to use `lbj_cells()`.
Alternatively you can use `validate::lbj_cells()`.
2. It is not necessary to load the `lumberjack` package in this script (although
it is no problem if you do).
In sections three and four, values for other revenue are imputed and then forced to
be nonnegative. In section 5 a ratio model is used to impute missing staff numbers.
In section 6 the output is written.
The purpose of the `lbh_cells()` logger is to record the output of `cells()`
after each step. To make sure this happens, run this file using `run_file()`
from the `lumberjack` package.
```{r}
library(lumberjack)
run_file('clean_supermarkets.R')
```
This command executed all code in `clean_supermarkets.R`, but `run_file()` also ensured
that all changes in the `spm` variable were recorded and logged using `lbj_cells()`.
The output is written to a `csv` file which we can read.
```{r}
logfile <- read.csv("spm_lbj_cells.csv")
```
The logfile variable has quite a lot of columns, so here we show just two rows.
```{r}
logfile[3:4,]
```
Each row in the output lists the step number, a time stamp, the expression used
to alter the contents of the variable under scrutiny, and all columns computed
by `cells()`. Since the logger always compares two consecutive steps, these
numbers are comparable to using `cells(comapare='sequential')`. For example, we
see that after step four, one value was adapted compared to the state after
step three. And in step three, 36 values were imputed compared to the state
created by step 2. In step four, no values were imputed.
It is also interesting to follow the progression of rule violations as the
`spm` dataset gets processed. This can be done with the `lbj_rules()` logger
that is exported by `validate`. Since `lumberjack` allows for multiple loggers
to be attached to an R object, we alter the first part of the above script as
follows, and store it in `clean_supermarkets2.R`
```{r, eval=FALSE}
## Contents of clean_supermarkets2.R
library(validate)
#1.a simulate reading data
data(SBS2000, package="validate")
spm <- SBS2000[c("id","staff","other.rev","turnover","total.rev")]
# 1.b Create rule set
rules <- validator(staff >= 0, other.rev>=0, turnover>=0
, other.rev + turnover == total.rev)
# 2. add two loggers
start_log(spm, logger=lbj_cells())
start_log(spm, logger=lbj_rules(rules))
## The rest is the same as above ...
```
Running the file again using lumberjack, we now get two log files.
```{r}
run_file("clean_supermarkets2.R")
```
Let's read the log file from `spm_lbj_rules.csv` and print row three and four.
```{r}
read.csv("spm_lbj_rules.csv")[3:4,]
```
We get the full output created by `validate::compare()`. For example we
see that after step three, 66 new cases satisfy one of the checks while two new
violations were introduced. The fourth step adds two new satisfied cases and no
new violations. The total number of violations after four steps equals five.
Until now the logging data was written to files that were determined automatically
by `lumberjack`. This is because `lumberjack` automatically dumps logging data
after processing executing the file when the user has not done so explicitly.
You can determine where to write the logging data by adding a `stop_log()`
statement anywhere in your code (but at the end would usually make most sense).
For example, add the following line of code at the end of
`clean_supermarkets2.R` to write the output of the `lbj_rules` logger to
`my_output.csv`.
```{r, eval=FALSE}
stop_log(spm, logger="lbj_rules",file="my_output.csv")
```
The format and way in which logging data is exported is fixed by the logger. So
`lbj_rules()` and `lbj_cells()` can only export to csv, and only the data we've
seen so far. The good news is that the `lumberjack` package itself contains
other loggers that may be of interest, and it is also possible to develop your
own logger. So it is possible to develop loggers that export data to a
database. See the [lumberjack paper](https://www.jstatsoft.org/article/view/v098i01) for a
short tutorial on how to write your own logger.
# Bibliographical notes {-}
```{r, include=FALSE}
source("chunk_opts.R")
```
More background on the validate package can be found in the paper
for the R Journal.
> MPJ van der Loo and E de Jonge (2020). [Data Validation Infrastructure for R](https://www.jstatsoft.org/article/view/v097i10). _Journal of Statistical Software_ 97(10)
The theory of data validation is described in the following paper.
> MPJ van der Loo, and E de Jonge (2020). [Data Validation](https://arxiv.org/abs/1912.09759). _In Wiley StatsRef: Statistics Reference Online (eds N. Balakrishnan, T. Colton, B. Everitt, W. Piegorsch, F. Ruggeri and J.L. Teugels)_.
Data validation is described in the wider context of data cleaning, in Chapter 6 of
the following book.
> MPJ van der Loo and E de Jonge (2018) [Statistical Data Cleaning With Applications in R](https://www.wiley.com/en-us/Statistical+Data+Cleaning+with+Applications+in+R-p-9781118897157). _John Wiley & Sons, NY_.
The following document describes data validation in the context of European
Official Statistics. It includes issues such as lifecycle management,
complexity analyses and examples from practice.
> M. Zio, N. Fursova, T. Gelsema, S. Giessing, U Guarnera, J. Ptrauskiene, Q. L. Kalben, M. Scanu, K. ten Bosch, M. van der Loo, and K. Walsdorfe (2015) [Methodology for data validation](https://www.markvanderloo.eu/files/share/zio2015methodology.pdf)
The `lumberjack` package discussed in Chapter \@ref(sect-comparing) is described in the following
paper.
> MPJ van der Loo (2020). [Monitoring Data in R with the lumberjack package](https://www.jstatsoft.org/article/view/v098i01). _Journal of Statistical Software_, 98(1)
|
/scratch/gouwar.j/cran-all/cranData/validate/vignettes/cookbook.Rmd
|
# basic range checks
speed >= 0
dist >= 0
# ratio check
speed / dist <= 1.5
|
/scratch/gouwar.j/cran-all/cranData/validate/vignettes/myrules.R
|
#' Check Agreement Rate between Identical Trails
#'
#' @details
#' Evaluate workers' performance by agreement rate between identical trails
#' (Notice that this means the two input, results1 and results2, must be identical.);
#' Return 1) the exact agreement rate when both workers agree on the exact same choice, and
#' 2) the binary agreement rate when both workers get the task either right or wrong simultaneously
#'
#' @param results1 first batch of results; outputs from getResults()
#' @param results2 first batch of results; outputs from getResults()
#' @param key the local task record; outputs from recordTasks()
#' @param type Task structures to be specified. Must be one of "WI" (word intrusion),
#' "T8WSI" (top 8 word set intrusion), "R4WSI" (random 4 word set intrusion),
#' "LI" (Label Intrusion), and "OL" (Optimal Label)
#' @returns A numeric value to be returned with output.
#' @export
checkAgree <- function(results1, results2, key, type = NULL){
if(sum(!(key[[1]]$id %in% results1$local_task_id)) != 0){
key[[2]] <- key[[2]][key[[1]]$id %in% results1$local_task_id,]
key[[1]] <- key[[1]][key[[1]]$id %in% results1$local_task_id,]
}
# message(paste0(sum(results1[,5] != 0), ' / ', nrow(key[[2]]), ' results will be evaluated'))
# Remove the gold-standard HITs
results1 <- results1[key[[1]][,1] != "gold",]
results2 <- results2[key[[1]][,1] != "gold",]
key[[2]] <- key[[2]][key[[1]][,1] != "gold",]
key[[1]] <- key[[1]][key[[1]][,1] != "gold",]
# Agreement rate on picking up the EXACT same choice
AgreeExact <- sum(results1$result[results1$result != 0] == results2$result[results2$result != 0])/length(results1$result[results1$result != 0])
results1 <- as.matrix(results1)
results2 <- as.matrix(results2)
# Calculate the "correct" vector
indicator1 <- NULL
indicator2 <- NULL
if(type == "R4WSI0" | type == "T8WSI" | type == "LI" | type == "OL"){
for(i in 1:nrow(results1)){
if (results1[i,6] != 0){
correct <- as.vector(key[[2]][i,-1])[as.numeric(results1[i,6])] == as.character(key[[1]][i, (ncol(key[[1]])-1)])
} else {
correct <- NA
}
indicator1 <- c(indicator1, correct)
}
for(i in 1:nrow(results2)){
if (results2[i,6] != 0){
correct <- as.vector(key[[2]][i,-1])[as.numeric(results2[i,6])] == as.character(key[[1]][i, (ncol(key[[1]])-1)])
} else {
correct <- NA
}
indicator2 <- c(indicator2, correct)
}
} else if (type == "R4WSI"){
for(i in 1:nrow(results1)){
if (results1[i,6] != 0){
correct <- as.vector(key[[2]][i,])[as.numeric(results1[i,6])] == as.character(key[[1]][i, (ncol(key[[1]])-1)])
} else {
correct <- NA
}
indicator1 <- c(indicator1, correct)
}
for(i in 1:nrow(results2)){
if (results2[i,6] != 0){
correct <- as.vector(key[[2]][i,])[as.numeric(results2[i,6])] == as.character(key[[1]][i, (ncol(key[[1]])-1)])
} else {
correct <- NA
}
indicator2 <- c(indicator2, correct)
}
} else if (type == "WI"){
for(i in 1:nrow(results1)){
if (results1[i,6] != 0){
correct <- as.vector(key[[2]][i,])[as.numeric(results1[i,6])] == as.character(key[[1]][i, (ncol(key[[1]])-1)])
} else {
correct <- NA
}
indicator1 <- c(indicator1, correct)
}
for(i in 1:nrow(results2)){
if (results2[i,6] != 0){
correct <- as.vector(key[[2]][i,])[as.numeric(results2[i,6])] == as.character(key[[1]][i, (ncol(key[[1]])-1)])
} else {
correct <- NA
}
indicator2 <- c(indicator2, correct)
}
} else {
stop("Please specify a valid task type.")
}
# Agreement rate either both correcly or both wrongly
AgreeBinary <- sum(indicator1[!is.na(indicator1)] == indicator2[!is.na(indicator1)])/length(indicator1[!is.na(indicator1)])
output <- list(AgreeExact, AgreeBinary)
names(output) <- c("Both workers agree on the exact same choice",
"Both workers answer correctly or wrongly")
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/checkAgree.R
|
#' Combine the mass of words with the same root
#'
#' @details
#' Use as a preparing step for validating unstemmed topic models.
#'
#' @param mod Fitted structural topic models.
#' @param vocab A character vector specifying the words in the corpus. Usually, it
#' can be found in topic model output.
#' @param beta A matrix of word probabilities for each topic. Each row represents a
#' topic and each column represents a word. Note this should not be in the logged form.
#'
#' @import tm
#' @import SnowballC
#'
#' @return A list with two elements:
#' \item{newvocab}{A matrix of new vocabulary. Each row represents a topic and each column represents a unique stemmed word.}
#' \item{newbeta}{A matrix of new beta. Each row represents a topic and each column represents the sum of the probabilities of the words with the same root.}
#'
#' @export
combMass <- function(mod = NULL, vocab = NULL, beta = NULL){
if(inherits(mod, "STM")){
vocab <- mod$vocab
rawbeta <- exp(mod$beta$logbeta[[1]])
} else {
if(is.null(vocab) | is.null(beta)){
stop("\"vocab\" and \"beta\" must be specified for topic models that are not STM.")
}
rawbeta <- beta
}
# test: rowSums(rawbeta) == rep(1, nrow(beta))
stemmed_vocab <- stemDocument(vocab)
# colnames(rawbeta) <- stemmed_vocab
# newbeta <- t(rowsum(t(rawbeta), colnames(rawbeta)))
newvocab <- matrix(NA, nrow = nrow(rawbeta), ncol = length(unique(stemmed_vocab)))
newbeta <- matrix(NA, nrow = nrow(rawbeta), ncol = length(unique(stemmed_vocab)))
for (i in 1:nrow(rawbeta)){
mapping <- cbind.data.frame(vocab, stemmed_vocab, rawbeta[i,], stringsAsFactors = F)
colnames(mapping)[3] <- "prob"
maxtable <- aggregate(mapping$prob, by = list(mapping$stemmed_vocab), max)
sumtable <- aggregate(mapping$prob, by = list(mapping$stemmed_vocab), sum)
grouptable <- cbind.data.frame(maxtable, sumtable[,2])
colnames(grouptable) <- c("stemmed_vocab", "prob", "sum.prob")
mapping <- merge(mapping, grouptable,
by = c("stemmed_vocab", "prob"),
all.x = T, all.y = F)
mapping <- mapping[!is.na(mapping$sum.prob) & !duplicated(mapping[,c("stemmed_vocab", "prob")]),]
newvocab[i,] <- mapping$vocab
newbeta[i,] <- mapping$sum.prob
# # super slow
# for(j in 1:length(unique(stemmed_vocab))){
# group <- mapping[mapping$stemmed_vocab == unique(stemmed_vocab)[j],]
# newvocab[i, j] <- as.character(group$vocab[which.max(group[,3])])
# newbeta[i, j] <- sum(group[,3])
# }
}
return(list(newvocab, newbeta))
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/combMass.R
|
#' An Example Topic Model
#'
#' A structural topic model (STM) object generated from the \code{stm} package using a random
#' sample of US senators' Facebook posts.
#'
#' @format A STM object.
#'
#' @references
#' Roberts, Margaret E., Brandon M. Stewart, and Dustin Tingley. "Stm: An R package for structural
#' topic models." Journal of Statistical Software 91 (2019): 1-40.
#'
#' @source
#' See \url{https://CRAN.R-project.org/package=stm} for more details.
#'
#' @docType data
#' @name modtest
#' @usage data(modtest)
NULL
#' An Example of the Combined Mass for Words with the Same Roots
#'
#' A list of two with the words (the most frequent form in each topic) and the corresponding word
#' probabilities.
#'
#' \describe{
#' \item{\code{vocab}}{A matrix of words for each topic. Each row represents a
#' topic and each column represents the words. Words with the same roots are
#' only represented by the most common form in that topic.}
#' \item{\code{beta}}{A matrix of combined word probabilities for each topic.
#' Each row represents a topic and each column represents a combined word.}
#' }
#'
#' @format A list of two.
#'
#' @docType data
#' @name masstest
#' @usage data(masstest)
NULL
#' An Example Object of Prepared Documents
#'
#' An output from the \code{prepDocuments} function of the \code{stm} package.
#'
#' @format A list containing a documents and vocab object.
#'
#' @references
#' Roberts, Margaret E., Brandon M. Stewart, and Dustin Tingley. "Stm: An R package for structural
#' topic models." Journal of Statistical Software 91 (2019): 1-40.
#'
#' @source
#' See \url{https://CRAN.R-project.org/package=stm} for more details.
#'
#' @docType data
#' @name stmPreptest
#' @usage data(stmPreptest)
NULL
#' An Example Heldout Test Set
#'
#' An output from the \code{make.heldout} function of the \code{stm} package.
#'
#' @format A list of the heldout documents, vocab, and missing.
#'
#' @references
#' Roberts, Margaret E., Brandon M. Stewart, and Dustin Tingley. "Stm: An R package for structural
#' topic models." Journal of Statistical Software 91 (2019): 1-40.
#'
#' @source
#' See \url{https://CRAN.R-project.org/package=stm} for more details.
#'
#' @docType data
#' @name heldouttest
#' @usage data(heldouttest)
NULL
#' Example R4WSI0 Tasks
#'
#' Data of 15 example R4WSI0 Tasks structured as a matrix.
#'
#' Please note that the difference between the R4WSI0 examples used here and the R4WSI tasks
#' is that the R4WSI tasks do not present any documents.
#'
#' @format A matrix with 15 rows and 6 columns.
#' \describe{
#' \item{\code{topic}}{Index of topics}
#' \item{\code{doc}}{Example documents associated with each topic}
#' \item{\code{opt1}}{Words set option 1}
#' \item{\code{opt2}}{Words set option 2}
#' \item{\code{opt3}}{Words set option 3}
#' \item{\code{optcrt}}{Words set option 4, also the correct choice}
#' }
#'
#' @docType data
#' @keywords datasets
#' @name R4WSItasktest
#' @usage data(R4WSItasktest)
NULL
#' Example Gold-Standard R4WSI0 Tasks
#'
#' Data frame of 5 example gold-standard R4WSI0 Tasks.
#'
#' @format A data frame of 5 rows and 6 columns.
#' \describe{
#' \item{\code{topic}}{Index of topics}
#' \item{\code{doc}}{Example documents associated with each topic}
#' \item{\code{opt1}}{Words set option 1}
#' \item{\code{opt2}}{Words set option 2}
#' \item{\code{opt3}}{Words set option 3}
#' \item{\code{optcrt}}{Words set option 4, also the correct choice}
#' }
#'
#' @docType data
#' @keywords datasets
#' @name goldR4WSItest
#' @usage data(goldR4WSItest)
NULL
#' Example R4WSI Tasks with Regular and Gold-Standard Tasks
#'
#' Data frame of 20 example R4WSI0 Tasks, with 5 of them being gold-standard and 15 of them not.
#'
#' @format A data frame of 20 rows and 6 columns.
#' \describe{
#' \item{\code{topic}}{Index of topics}
#' \item{\code{id}}{Index of topics}
#' \item{\code{doc}}{Example documents associated with each topic}
#' \item{\code{opt1}}{Words set option 1}
#' \item{\code{opt2}}{Words set option 2}
#' \item{\code{opt3}}{Words set option 3}
#' \item{\code{optcrt}}{Words set option 4, also the correct choice}
#' }
#'
#' @docType data
#' @keywords datasets
#' @name allR4WSItasktest
#' @usage data(allR4WSItasktest)
NULL
#' Example Local Record of the R4WSI Tasks
#'
#' Local record generated by the \code{recordTasks} function.
#'
#' To be compared with the answers from the online workers to evaluate the topic model performance.
#'
#' @format A list of two data frames.
#' \describe{
#' \item{\code{data.frame1}}{A data frame of tasks with the \code{optcrt} indicating the
#' machine preficted choice.}
#' \item{\code{data.frame2}}{A data frame of tasks with randomized choices.
#' Exactly the same with what would be sent online.}
#' }
#'
#' @docType data
#' @name recordtest
#' @usage data(recordtest)
NULL
#' Example Results Retrieved from Mturk
#'
#' @format A data frame of ten example tasks retrieved from the Mturk with or without
#' online workers' answers.
#'
#' \describe{
#' \item{\code{assignment_id}}{Assignment id. Mturk assigned. If 0, then the task hasn't been completed.}
#' \item{\code{batch_id}}{User specified batch id.}
#' \item{\code{completed_at}}{Timestamp when the task was completed. If 0, then the task hasn't been completed.}
#' \item{\code{local_task_id}}{Local task id.}
#' \item{\code{mturk_hit_id}}{Mturk HIT id. Mturk assigned.}
#' \item{\code{result}}{Choice made by the worker. 1-4. If 0, then the task hasn't been completed.}
#' \item{\code{worker_id}}{Mturk worker id. If 0, then the task hasn't been completed.}
#' }
#'
#' @docType data
#' @keywords datasets
#' @name resultstest
#' @usage data(resultstest)
NULL
#' Example Answer Keys
#'
#' @format A list of two data frames. Similar to \code{recordtest}.
#'
#' \describe{
#' \item{\code{data.frame1}}{A data frame of tasks with the \code{optcrt} indicating the
#' machine predicted choice.}
#' \item{\code{data.frame2}}{A data frame of tasks with randomized choices.
#' Exactly the same with what would be sent online.}
#' }
#'
#' @docType data
#' @name keypostedtest
#' @usage data(keypostedtest)
NULL
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/data.R
|
#' Evaluate results
#'
#' @details
#' Evaluate worker performance by gold-standard HITs;
#' Return the accuracy rate (proportion correct) for a specified batch
#'
#' @param results results of human choice; outputs from getResults()
#' @param key the local task record; outputs form recordTasks()
#' @param type Task structures to be specified. Must be one of "WI" (word intrusion),
#' "T8WSI" (top 8 word set intrusion), "R4WSI" (random 4 word set intrusion),
#' "LI" (Label Intrusion), and "OL" (Optimal Label)
#' @returns A list containing the gold-standard HIT correct rate, gold-standard HIT correct rate by workers, and non-gold-standard HIT correct rate
#' @export
evalResults <- function(results, key, type = NULL){
if(sum(!(key[[1]]$id %in% results$local_task_id)) != 0){
key[[2]] <- key[[2]][key[[1]]$id %in% results$local_task_id,]
key[[1]] <- key[[1]][key[[1]]$id %in% results$local_task_id,]
}
message(paste0(sum(results[,5] != 0), ' / ', nrow(key[[2]]), ' results will be evaluated'))
results <- as.matrix(results)
indicator <- NULL
if(type == "R4WSI0" | type == "T8WSI" | type == "LI" | type == "OL"){
for(i in 1:nrow(results)){
if (results[i,6] != 0){
correct <- as.vector(key[[2]][i,-1])[as.numeric(results[i,6])] == as.character(key[[1]][i, (ncol(key[[1]])-1)])
} else {
correct <- NA
}
indicator <- c(indicator, correct)
}
} else if (type == "R4WSI"){
for(i in 1:nrow(results)){
if (results[i,6] != 0){
correct <- as.vector(key[[2]][i,])[as.numeric(results[i,6])] == as.character(key[[1]][i, (ncol(key[[1]])-1)])
} else {
correct <- NA
}
indicator <- c(indicator, correct)
}
} else if (type == "WI"){
for(i in 1:nrow(results)){
if (results[i,6] != 0){
correct <- as.vector(key[[2]][i,])[as.numeric(results[i,6])] == as.character(key[[1]][i, (ncol(key[[1]])-1)])
} else {
correct <- NA
}
indicator <- c(indicator, correct)
}
} else {
stop("Please specify a valid task type.")
}
# overall gold-standard hit correct rate
num <- sum(indicator[key[[1]][,1] == "gold"], na.rm = T)
denum <- sum(key[[1]][,1] == "gold" & results[,6] != 0)
goldcorrect <- c(num/denum, paste0(num, ' / ', denum))
message(paste0(num, ' / ', denum, ' gold-standard HITs are answered correct'))
# gold-standard hit correct rate by workers
goldcorrectbyworker <- table(results[key[[1]][,1] == "gold",5], indicator[key[[1]][,1] == "gold"])
# non-gold-standard HITs correct rate
num <- sum(indicator[key[[1]][,1] != "gold"], na.rm = T)
denum <- sum(key[[1]][,1] != "gold" & results[,6] != 0)
nongoldcorrect <- c(num/denum, paste0(num, ' / ', denum))
message(paste0(num, ' / ', denum, ' non-gold-standard HITs are answered correct'))
output <- list(goldcorrect, goldcorrectbyworker, nongoldcorrect)
names(output) <- c("Gold-standard HIT Correct Rate",
"Gold-standard HIT Correct Rate by Workers",
"Non-gold-standard HIT Correct Rate")
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/evalResults.R
|
#' Get results from Mturk
#'
#' @details
#' this function works for complete or incomplete batches
#'
#' @param batch_id any number or string to annotate the batch
#' @param hit_ids hit ids returned from the MTurk API, i.e., output of sendTasks()
#' @param retry if TRUE, retry retriving results from Mturk API five times; default to TRUE
#' @param retry_in_seconds default to 60 seconds
#' @param AWS_id AWS_ACCESS_KEY_ID
#' @param AWS_secret AWS_SECRET_ACCESS_KEY
#' @param sandbox sanbox setting
#'
#' @import pyMTurkR
#' @return a data frame with columns:
#' \item{batch_id}{an annotation for the batch}
#' \item{local_task_id}{an identifier for the task in the batch}
#' \item{mturk_hit_id}{the ID of the HIT in MTurk}
#' \item{assignment_id}{the ID of the assignment in MTurk}
#' \item{worker_id}{the ID of the worker who completed the assignment}
#' \item{result}{the worker's response to the task}
#' \item{completed_at}{the time when the worker submitted the assignment}
#'
#' @export
getResults <- function(batch_id = "unspecified",
hit_ids,
retry = TRUE,
retry_in_seconds = 60,
AWS_id = Sys.getenv("AWS_ACCESS_KEY_ID"),
AWS_secret = Sys.getenv("AWS_SECRET_ACCESS_KEY"),
sandbox = getOption("pyMTurkR.sandbox", TRUE)){
# check that amazon credentials & sandbox settings apply
if(nchar(Sys.getenv("AWS_ACCESS_KEY_ID")) == 0){
Sys.setenv(AWS_ACCESS_KEY_ID = AWS_id)
Sys.setenv(AWS_SECRET_ACCESS_KEY = AWS_secret)
}
options(pyMTurkR.sandbox = sandbox)
# convert all hit ids to character
task_ids <- as.character(hit_ids[[2]][,1])
mturk_ids <- as.character(hit_ids[[2]][,2])
# retrieve results from mturk
raw_results <- data.frame(stringsAsFactors = FALSE)
message('Start getting HITs...')
for(i in 1:length(mturk_ids)){
turk_data <- suppressMessages(GetAssignment(hit = mturk_ids[i],
get.answers = T))
if(nrow(turk_data$Answers) == 0){
this_hit_result <- as.data.frame(cbind(task_ids[i], mturk_ids[i], 0, 0, 0, 0),
stringsAsFactors = FALSE)
} else {
this_hit_result <- as.data.frame(cbind(task_ids[i], mturk_ids[i],
turk_data$Answers$AssignmentId,
turk_data$Answers$WorkerId,
as.numeric(turk_data$Answers$FreeText),
as.character(turk_data$Assignments$SubmitTime)),
stringsAsFactors = FALSE)
}
raw_results <- rbind(raw_results, this_hit_result)
}
results <- cbind(batch_id, raw_results, stringsAsFactors = FALSE)
colnames(results) <- c("batch_id", "local_task_id", "mturk_hit_id", "assignment_id", "worker_id", "result", "completed_at")
n_results <- sum(results$result != 0)
if(n_results == length(mturk_ids)){
message(paste0('All ', n_results, ' HITs retrieved'))
} else {
message(paste0(n_results, ' / ', length(mturk_ids), ' results retrieved'))
if(retry == T){
Sys.sleep(retry_in_seconds)
return(getResults(batch_id,
hit_ids,
retry,
retry_in_seconds,
AWS_id,
AWS_secret,
sandbox))
}
}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/getResults.R
|
#' Mix the gold-standard tasks with the tasks need to be validated
#'
#' @param tasks All tasks need to be validated
#' @param golds Gold standard tasks with the same structure
#' @return A data frame with the same structure as the input, where gold-standard tasks are randomly inserted
#' @export
mixGold <- function(tasks, golds){
if (nrow(tasks)/nrow(golds) != round(nrow(tasks)/nrow(golds))){
stop("The number of tasks needs to be a multiple of the number of golds.")
}
tasks <- as.data.frame(tasks, stringsAsFactors = F)
placeholder <- seq(0, (nrow(tasks)+nrow(golds) - 1), by = (nrow(tasks)/nrow(golds)+1))
indices <- sample(1:(nrow(tasks)/nrow(golds)+1), nrow(golds), replace = T)
if(indices[1] == 1){
indices[1] <- 2
}
for(i in 1:nrow(golds)){
tasks <- rbind(tasks[0:(placeholder[i]+indices[i]-1),],
golds[i,],
tasks[-(0:(placeholder[i]+indices[i]-1)),])
}
tasks <- cbind(tasks, 1:nrow(tasks))
row.names(tasks) <- 1:nrow(tasks)
colnames(tasks)[ncol(tasks)] <- "id"
return(tasks)
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/mixGold.R
|
#' Pick the optimal label from candidate labels
#'
#' @details
#' Users need to specify four plausible labels for each topic
#'
#' @param n The number of desired tasks
#' @param text.predict A data frame or matrix containing both the text and the indicator(s)
#' of the model predicted topic(s).
#' @param text.name variable name in `text.predict` that indicates the text
#' @param top1.name variable name in `text.predict` that indicates the top1 model predicted topic
#' @param labels.index The topic index in correspondence with the labels, e.g., c(10, 12, 15).
#' @param candidate.labels A list of vectors containing the user-defined labels assigned to the topics,
#' Must be in the same length and order with `labels.index`.
#' @return A matrix with n rows and 6 columns (topic, doc, opt1, opt2, opt3, optcrt) where optcrt is the correct label that was picked.
#'
#' @export
pickLabel <- function(n, text.predict = NULL, text.name = "text",
top1.name = "top1",
labels.index = NULL, candidate.labels = NULL){
if(!(is.data.frame(text.predict) | is.matrix(text.predict))){
stop("\"text.predict\" needs to be a data.frame or matrix")
}
if(!(text.name %in% colnames(text.predict))){
stop("Please specify the correct variable name for text.")
}
if(!(top1.name %in% colnames(text.predict))){
stop("Please specify the correct variable name for the model predicted most likely topic.")
}
text <- text.predict[,text.name]
top1 <- text.predict[,top1.name]
if(!all(labels.index %in% unique(top1))){
stop("Some topic (labels.index) does not have any corresponding representative text.
Consider removing that topic.")
}
if(length(candidate.labels) != length(labels.index)){
stop("\"candidate.labels\" and \"labels.index\" have to be of the same length and in the exact same order.")
}
topic <- rep(labels.index, length.out = n)
# define the output
out <- matrix(NA, ncol = 6, nrow = n)
colnames(out) <- c('topic', 'doc', 'opt1', 'opt2', 'opt3', 'optcrt')
for(i in 1:n){
# sample document by topic
k <- topic[i]
doc <- gsub('\n', '<br>', sample(text[top1 == k], 1))
# put together a task
out[i,] <- c(k, doc, unlist(candidate.labels[labels.index == k]))
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/pickLabel.R
|
#' Plot results
#'
#' @details
#' Visualize the accuracy rate (proportion correct) for a specified batch
#'
#' @param path path to store the plot
#' @param x a vector of counts of successes; could be obtained from getResults()
#' @param n a vector of counts of trials
#' @param taskname the name of the task for labeling, e.g., Word Intrusion, Optimal Label.
#' @param ... additional arguments to be passed to plot function
#'
#' @return Nothing is returned; a plot is created and saved as a pdf file.
#'
#' @export
plotResults <- function(path, x, n, taskname, ...){
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
pdf(path, width = 3, height = 7)
par(mgp = c(1.5, 0, 0), mar = c(2, 3, .7, .7))
plot(NULL,
main = NA,
ylim=c(0, 1.02),
xlim = c(0.8, 1.2),
ylab = "Proportion Correct",
xlab = NA,
cex.lab = 1.2,
axes = F, ...)
axis(side = 2, at = seq(0, 1, by = 0.2), col.ticks = NA, cex.axis = 1.2)
axis(side = 1, at = 1,
labels = taskname,
las = 1,
col = NA,
col.ticks = NA,
cex.axis = 1.2)
# legend(0.55, 1.03,
# c("Model"),
# col = c(blue),
# lty = 1,
# lwd = 3,
# cex = 1.2,
# bty = 'n')
# ----------------------------------------------------------------
# abline(h = 0.25, col = "gray", lty = 1, lwd = 2)
abline(h = 0.5, col = "gray", lty = 1, lwd = 2)
# -------------------------------- bars ---------------------------------
first <- x[1]/n[1]
second <- x[2]/n[2]
pool <- (first + second)/2
points(x = 1, y = first, pch = 20, col = "blue", cex = 1.5)
segments(x0 = 1, y0 = first-1.96*sqrt(first*(1-first)/n[1]), x1 = 1, y1 = first+1.96*sqrt(first*(1-first)/n[1]), col = "blue", lty = 2, lwd = 2)
points(x = 1.02, y = second, pch = 20, col = "blue", lty = 2, cex = 1.5)
segments(x0 = 1.02, y0 = second-1.96*sqrt(second*(1-second)/n[2]), x1 = 1.02, y1 = second+1.96*sqrt(second*(1-second)/n[2]), col = "blue", lty = 2, lwd = 2)
points(x = 1.05, y = pool, pch = 20, col = "blue", cex = 2)
segments(x0 = 1.05, y0 = pool-1.96*sqrt(pool*(1-pool)/(n[1]+n[2])), x1 = 1.05, y1 = pool+1.96*sqrt(pool*(1-pool)/(n[1]+n[2])), col = "blue", lwd = 3)
dev.off()
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/plotResults.R
|
#' Reform tasks to facilitate sending to Mturk
#'
#' @details
#' Randomize the order of options and record the tasks in a specified local directory
#'
#' @param type (character) one of WI, T8WSI, R4WSI
#' @param tasks (data.frame) outputs from validateTopic(), validateLabel(), or mixGold() if users mix in gold-standard HITs
#' @param path (character) path to record the tasks (with meta-information)
#' @return A list of two data frames, containing the original tasks and the randomized options respectively.
#' @export
record <- function(type, tasks, path) {
# ...
}
recordTasks <- function(type, tasks, path){
if(type == "WI"){
optionidx <- 2:6
optRandom <- tasks[,optionidx]
optRandom <- as.data.frame(t(apply(optRandom, 1, function(x) x[sample(length(x))])),
stringsAsFactors = F)
colnames(optRandom) <- paste0("word", 1:length(optionidx))
} else if (type == "R4WSI0"|type == "T8WSI"|type == "LI"|type == "OL"){
docindix <- 2
optionidx <- 3:6
optRandom <- tasks[,optionidx]
optRandom <- as.data.frame(t(apply(optRandom, 1, function(x) x[sample(length(x))])),
stringsAsFactors = F)
optRandom <- cbind.data.frame(tasks[,docindix], optRandom,
stringsAsFactors = F)
colnames(optRandom) <- c("passage", paste0("word", 1:length(optionidx)))
} else if (type == "R4WSI"){
optionidx <- 2:5
optRandom <- tasks[,optionidx]
optRandom <- as.data.frame(t(apply(optRandom, 1, function(x) x[sample(length(x))])),
stringsAsFactors = F)
colnames(optRandom) <- paste0("word", 1:length(optionidx))
} else {
stop("Please specify a validate task type.")
}
record <- list(tasks, optRandom)
save(record, file = path)
message(paste("Record saved to", path))
return(record)
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/recordTasks.R
|
#' Send prepared task to Mturk and record the API-returned HIT ids.
#'
#' @details
#' Pairs the local ids with Mturk ids and save them to specified paths
#'
#' @param hit_type find from the Mturk requester's dashboard
#' @param hit_layout find from the Mturk requester's dashboard
#' @param type one of WI, T8WSI, R4WSI
#' @param tasksrecord output of recordTasks()
#' @param tasksids ids of tasks to send in numeric form. If left unspecified, the whole batch will be posted
#' @param HITidspath path to record the returned HITids
#' @param n_assignments number of of assignments per task. For the validation tasks, people almost always want 1
#' @param expire_in_seconds default 8 hours
#' @param batch_annotation add if needed
#'
#'@return A list containing two elements:
#'\itemize{
#'\item{current_HIT_ids:}{A vector of the HIT IDs returned by the API.}}
#'\itemize{
#'\item{map_ids:}{A data frame that maps the tasksids to their corresponding HIT ids.}
#'}
#'
#' @import pyMTurkR
#'
#' @export
sendTasks <- function(hit_type = NULL,
hit_layout = NULL,
type = NULL,
tasksrecord = NULL,
tasksids = NULL,
HITidspath = NULL,
n_assignments = '1',
expire_in_seconds = as.character(60 * 60 * 8),
batch_annotation = NULL){
if(is.null(tasksids)){
tasksids <- tasksrecord[[1]][,"id"]
}
tasksids <- sort(tasksids)
tosend <- tasksrecord[[2]][tasksrecord[[1]][,"id"] %in% tasksids,]
if(type == "R4WSI0" | type == "T8WSI" | type == "LI" | type == "OL"){
hit_param_names <- c('passage', 'word1', 'word2', 'word3', 'word4')
} else if (type == "WI"){
hit_param_names <- c('word1', 'word2', 'word3', 'word4', 'word5')
} else if (type == "R4WSI"){
hit_param_names <- c('word1', 'word2', 'word3', 'word4')
} else {
stop("Invalid task types")
}
current_HIT_ids <- rep(NA, nrow(tosend))
map_ids <- as.data.frame(matrix(NA, nrow = nrow(tosend), ncol = 2))
colnames(map_ids) <- c("tasksids", "Mturkids")
message('Sending task to MTurk')
for(i in 1:nrow(tosend)){
hit_params <- list()
for(j in 1:length(hit_param_names)){
hit_params[[j]] <- list(Name = hit_param_names[j],
Value = tosend[i, j])
}
current_HIT_ids[i] <- suppressMessages(CreateHIT(hit.type = hit_type,
hitlayoutid = hit_layout,
hitlayoutparameters = hit_params,
assignments = n_assignments,
expiration = expire_in_seconds,
annotation = batch_annotation,
verbose = FALSE))$HITId
map_ids[i,] <- cbind(tasksids[i], current_HIT_ids[i])
}
HITids <- list(current_HIT_ids, map_ids)
save(HITids, file = HITidspath)
message(paste("HITids saved to", HITidspath))
return(HITids)
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/sendTasks.R
|
#' Tidy eval helpers
#'
#' @description
#' This page lists the tidy eval tools reexported in this package from
#' rlang. To learn about using tidy eval in scripts and packages at a
#' high level, see the [dplyr programming
#' vignette](https://dplyr.tidyverse.org/articles/programming.html)
#' and the [ggplot2 in packages
#' vignette](https://ggplot2.tidyverse.org/articles/ggplot2-in-packages.html).
#' The [Metaprogramming
#' section](https://adv-r.hadley.nz/metaprogramming.html) of [Advanced
#' R](https://adv-r.hadley.nz) may also be useful for a deeper dive.
#'
#' * The tidy eval operators `{{`, `!!`, and `!!!` are syntactic
#' constructs which are specially interpreted by tidy eval functions.
#' You will mostly need `{{`, as `!!` and `!!!` are more advanced
#' operators which you should not have to use in simple cases.
#'
#' The curly-curly operator `{{` allows you to tunnel data-variables
#' passed from function arguments inside other tidy eval functions.
#' `{{` is designed for individual arguments. To pass multiple
#' arguments contained in dots, use `...` in the normal way.
#'
#' ```
#' my_function <- function(data, var, ...) {
#' data %>%
#' group_by(...) %>%
#' summarise(mean = mean({{ var }}))
#' }
#' ```
#'
#' * [enquo()] and [enquos()] delay the execution of one or several
#' function arguments. The former returns a single expression, the
#' latter returns a list of expressions. Once defused, expressions
#' will no longer evaluate on their own. They must be injected back
#' into an evaluation context with `!!` (for a single expression) and
#' `!!!` (for a list of expressions).
#'
#' ```
#' my_function <- function(data, var, ...) {
#' # Defuse
#' var <- enquo(var)
#' dots <- enquos(...)
#'
#' # Inject
#' data %>%
#' group_by(!!!dots) %>%
#' summarise(mean = mean(!!var))
#' }
#' ```
#'
#' In this simple case, the code is equivalent to the usage of `{{`
#' and `...` above. Defusing with `enquo()` or `enquos()` is only
#' needed in more complex cases, for instance if you need to inspect
#' or modify the expressions in some way.
#'
#' * The `.data` pronoun is an object that represents the current
#' slice of data. If you have a variable name in a string, use the
#' `.data` pronoun to subset that variable with `[[`.
#'
#' ```
#' my_var <- "disp"
#' mtcars %>% summarise(mean = mean(.data[[my_var]]))
#' ```
#'
#' * Another tidy eval operator is `:=`. It makes it possible to use
#' glue and curly-curly syntax on the LHS of `=`. For technical
#' reasons, the R language doesn't support complex expressions on
#' the left of `=`, so we use `:=` as a workaround.
#'
#' ```
#' my_function <- function(data, var, suffix = "foo") {
#' # Use `{{` to tunnel function arguments and the usual glue
#' # operator `{` to interpolate plain strings.
#' data %>%
#' summarise("{{ var }}_mean_{suffix}" := mean({{ var }}))
#' }
#' ```
#'
#' * Many tidy eval functions like `dplyr::mutate()` or
#' `dplyr::summarise()` give an automatic name to unnamed inputs. If
#' you need to create the same sort of automatic names by yourself,
#' use `as_label()`. For instance, the glue-tunnelling syntax above
#' can be reproduced manually with:
#'
#' ```
#' my_function <- function(data, var, suffix = "foo") {
#' var <- enquo(var)
#' prefix <- as_label(var)
#' data %>%
#' summarise("{prefix}_mean_{suffix}" := mean(!!var))
#' }
#' ```
#'
#' Expressions defused with `enquo()` (or tunnelled with `{{`) need
#' not be simple column names, they can be arbitrarily complex.
#' `as_label()` handles those cases gracefully. If your code assumes
#' a simple column name, use `as_name()` instead. This is safer
#' because it throws an error if the input is not a name as expected.
#'
#' @return
#' This function does not return any value (NULL). It only serves to document
#' the tidy eval tools reexported in this package from rlang.
#'
#' @md
#' @name tidyeval
#' @keywords internal
#' @importFrom rlang enquo enquos .data := as_name as_label
#' @importFrom grDevices dev.off pdf
#' @importFrom graphics abline axis par points segments
#' @importFrom stats aggregate
#' @importFrom here here
#' @aliases enquo enquos .data := as_name as_label
#' @export enquo enquos .data := as_name as_label
NULL
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/utils-tidy-eval.R
|
#' Create validation tasks for labels assigned to the topics in the topic model of choice.
#'
#' @details
#' Users need to pick a topic model that they deem to be good and label the topics
#' they later would like to use as measures.
#'
#' @param type Task structures to be specified. Must be one of "LI" (Label Intrusion)
#' and "OL" (Optimal Label).
#' @param n The number of desired tasks
#' @param text.predict A data frame or matrix containing both the text and the indicator(s)
#' of the model predicted topic(s).
#' @param text.name variable name in `text.predict` that indicates the text
#' @param top1.name variable name in `text.predict` that indicates the top1 model predicted topic
#' @param top2.name variable name in `text.predict` that indicates the top2 model predicted topic
#' @param top3.name variable name in `text.predict` that indicates the top3 model predicted topic
#' @param labels The user-defined labels assigned to the topics
#' @param labels.index The topic index in correspondence with the labels, e.g., c(10, 12, 15).
#' Must be in the same length and order with `label`.
#' @param labels.add Labels from other broad catagories. Default to NULL. Users could
#' specify them to evaluate how well different broad categories are distinguished from
#' one another.
#'
#' #' value A matrix containing the validation tasks as described in the return section.
#'
#' @return A matrix containing the validation tasks. The matrix has six value columns:
#' \describe{
#' \item{topic}{The topic index associated with the document.}
#' \item{doc}{The text of the document.}
#' \item{opt1}{The first option label presented to the user.}
#' \item{opt2}{The second option label presented to the user.}
#' \item{opt3}{The third option label presented to the user.}
#' \item{optcrt}{The correct label for the document.}
#' }
#'
#'
#' @export
validateLabel <- function(type, n, text.predict = NULL, text.name = "text",
top1.name = "top1", top2.name = "top2", top3.name = "top3",
labels = NULL, labels.index = NULL, labels.add = NULL){
if(!(is.data.frame(text.predict) | is.matrix(text.predict))){
stop("\"text.predict\" needs to be a data.frame or matrix")
}
if(type == "OL"){
if(!(text.name %in% colnames(text.predict))){
stop("Please specify the correct variable name for text.")
}
if(!(top1.name %in% colnames(text.predict))){
stop("Please specify the correct variable name for the model predicted most likely topic.")
}
text <- text.predict[,text.name]
top1 <- text.predict[,top1.name]
if(!all(labels.index %in% unique(top1))){
stop("Some topic (labels.index) does not have any corresponding representative text.
Consider removing that topic.")
}
if(length(labels) != length(labels.index)){
stop("\"labels\" and \"labels.index\" have to be of the same length and in the exact same order.")
}
topic <- rep(labels.index, length.out = n)
# define the output
out <- matrix(NA, ncol = 6, nrow = n)
colnames(out) <- c('topic', 'doc', 'opt1', 'opt2', 'opt3', 'optcrt')
for(i in 1:n){
# sample document by topic
k <- topic[i]
doc <- gsub('\n', '<br>', sample(text[top1 == k], 1))
# prepare labels
best.label <- labels[labels.index == k]
if (is.null(labels.add)){
intr.labels <- sample(labels[labels.index != k], 3)
} else {
intr.labels <- sample(c(labels[labels.index != k], labels.add), 3)
}
# put together a task
out[i,] <- c(k, doc, intr.labels[1], intr.labels[2], intr.labels[3], best.label)
}
} else if (type == "LI"){
if(!(text.name %in% colnames(text.predict))){
stop("Please specify the correct variable name for text.")
}
if(!(top1.name %in% colnames(text.predict) &
top2.name %in% colnames(text.predict) &
top3.name %in% colnames(text.predict))){
stop("Please specify the correct variable names for the model predicted top3 topics.")
}
if(!all(labels.index %in% unique(c(text.predict[,c(top1.name)],
text.predict[,c(top2.name)],
text.predict[,c(top3.name)])))){
warning("Some topic (labels.index) does not have any corresponding representative text.
Consider removing that topic.")
}
if(!all(unique(c(text.predict[,c(top1.name)],
text.predict[,c(top2.name)],
text.predict[,c(top3.name)])) %in% labels.index)){
stop("The top3 topics associated with some text are not all relevant.
Consider refining the text pool.")
}
if(length(labels) != length(labels.index)){
stop("\"labels\" and \"labels.index\" have to be of the same length and in the exact same order.")
}
# define the output
out <- matrix(NA, ncol = 6, nrow = n)
colnames(out) <- c('topic', 'doc', 'opt1', 'opt2', 'opt3', 'optcrt')
for(i in 1:n){
# randomly sample a row from the pool
doc.idx <- sample(1:nrow(text.predict), 1)
# prepare doc and labels
doc <- gsub('\n', '<br>', text.predict[doc.idx, text.name])
label1 <- labels[labels.index == text.predict[doc.idx, top1.name]]
label2 <- labels[labels.index == text.predict[doc.idx, top2.name]]
label3 <- labels[labels.index == text.predict[doc.idx, top3.name]]
pred3 <- unlist(text.predict[doc.idx, c(top1.name, top2.name, top3.name)])
if (is.null(labels.add)){
intr.label <- sample(labels[!(labels.index %in% pred3)], 1)
} else {
intr.label <- sample(c(labels[!(labels.index %in% pred3)], labels.add), 1)
}
# put together the question
out[i,] <- c(toString(pred3), doc, label1, label2, label3, intr.label)
}
} else {
stop("Please specify a valid task structure.")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/validateLabel.R
|
#' Create validation tasks for topic model selection
#'
#' @details
#' Users need to fit their own topic models.
#'
#' @param type Task structures to be specified. Must be one of "WI" (word intrusion),
#' "T8WSI" (top 8 word set intrusion), and "R4WSI" (random 4 word set intrusion).
#' @param n The number of desired tasks
#' @param text The pool of documents to be shown to the Mturk workers
#' @param vocab A character vector specifying the words in the corpus. Usually, it
#' can be found in topic model output.
#' @param beta A matrix of word probabilities for each topic. Each row represents a
#' topic and each column represents a word. Note this should not be in the logged form.
#' @param theta A matrix of topic proportions. Each row represents a document and each
#' clums represents a topic. Must be specified if task = "T8WSI" or "R4WSI".
#' @param thres the threshold to draw words from, default to top 50 words.
#'
#' @return A matrix of validation tasks. Each row represents a task and each column
#' represents an aspect of a task, including the topic label, the document text (for
#' "T8WSI" and "R4WSI"), and five words, including four non-intrusive words and one
#' intrusive word.
#'
#' @export
validateTopic <- function(type, n, text = NULL, vocab, beta, theta = NULL, thres = 20){
if (type == "WI"){
if (is.vector(vocab)){
vocab <- matrix(vocab, nrow = nrow(beta), ncol = length(vocab), byrow = T)
}
if (ncol(vocab) != ncol(beta)){
stop("beta matrix does not correspond with the vocabulary.")
}
orderbeta <- t(apply(beta, 1, order, decreasing = TRUE))
topic <- rep(1:nrow(beta), length.out = n)
out <- matrix(NA, ncol = 6, nrow = n)
colnames(out) <- c('topic', 'opt1', 'opt2', 'opt3', 'opt4', 'optcrt')
for(i in 1:n){
k <- topic[i]
non.intr <- as.character(sample(vocab[k, orderbeta[k, 1:thres]], 4,
prob = beta[k, orderbeta[k, 1:thres]]))
intr.k <- sample((1:nrow(beta))[-k], 1)
intr <- as.character(sample(vocab[intr.k, orderbeta[intr.k, 1:thres]], 1,
prob = beta[intr.k, orderbeta[intr.k, 1:thres]]))
out[i,] <- c(k, non.intr, intr)
}
} else if (type == "T8WSI"){
if (length(text) != nrow(theta)){
stop("theta matrix does not correspond with the documents.")
}
out <- matrix(NA, ncol = 6, nrow = n)
colnames(out) <- c('topic', 'doc', 'opt1', 'opt2', 'opt3', 'optcrt')
topwords <- lapply(1:nrow(beta),
function(x) toString(vocab[x, order(beta[x,], decreasing = T)][1:8]))
for(i in 1:n){
doc.idx <- sample(1:length(text), 1)
# doc <- paste('<p align="left">', gsub('\n', '<br>', text[doc.idx]), '</p>')
doc <- gsub('\n', '<br>', text[doc.idx])
pred3 <- order(theta[doc.idx,], decreasing = T)[1:3]
intr <- sample(order(theta[doc.idx,], decreasing = T)[-(1:3)], 1)
out[i,] <- c(toString(pred3), doc,
topwords[[pred3[1]]], topwords[[pred3[2]]],
topwords[[pred3[3]]], topwords[[intr]])
}
} else if (type == "R4WSI0"){
if (ncol(vocab) != ncol(beta)){
stop("beta matrix does not correspond with the vocabulary.")
}
if (length(text) != nrow(theta)){
stop("theta matrix does not correspond with the documents.")
}
pred1 <- t(apply(theta, 1, order, decreasing = T))[,1] # predict top 1 topic for each doc
topic <- rep(sort(unique(pred1)), length.out = n)
orderbeta <- t(apply(beta, 1, order, decreasing = TRUE))
out <- matrix(NA, ncol = 6, nrow = n)
colnames(out) <- c('topic', 'doc', 'opt1', 'opt2', 'opt3', 'optcrt')
for(i in 1:n){
k <- topic[i]
# doc <- paste('<p align="left">', gsub('\n', '<br>', sample(text[pred1 == k], 1)), '</p>')
doc <- gsub('\n', '<br>', sample(text[pred1 == k], 1))
non.intr <- as.character(sample(vocab[k, orderbeta[k, 1:thres]], 12,
prob = beta[k, orderbeta[k, 1:thres]]))
intr.k <- sample((1:nrow(beta))[-k], 1)
intr <- as.character(sample(vocab[intr.k, orderbeta[intr.k, 1:thres]], 4,
prob = beta[intr.k, orderbeta[intr.k, 1:thres]]))
asgn.n.intr <- sample(c(rep(1:3, 4)))
out[i,] <- c(k, doc,
toString(non.intr[asgn.n.intr==1]),
toString(non.intr[asgn.n.intr==2]),
toString(non.intr[asgn.n.intr==3]),
toString(intr))
}
} else if (type == "R4WSI"){
if (is.vector(vocab)){
vocab <- matrix(vocab, nrow = nrow(beta), ncol = length(vocab), byrow = T)
}
if (ncol(vocab) != ncol(beta)){
stop("beta matrix does not correspond with the vocabulary.")
}
orderbeta <- t(apply(beta, 1, order, decreasing = TRUE))
topic <- rep(1:nrow(beta), length.out = n)
out <- matrix(NA, ncol = 5, nrow = n)
colnames(out) <- c('topic', 'opt1', 'opt2', 'opt3', 'optcrt')
for(i in 1:n){
k <- topic[i]
non.intr <- as.character(sample(vocab[k, orderbeta[k, 1:thres]], 12,
prob = beta[k, orderbeta[k, 1:thres]]))
intr.k <- sample((1:nrow(beta))[-k], 1)
intr <- as.character(sample(vocab[intr.k, orderbeta[intr.k, 1:thres]], 4,
prob = beta[intr.k, orderbeta[intr.k, 1:thres]]))
asgn.n.intr <- sample(c(rep(1:3, 4)))
out[i,] <- c(k,
toString(non.intr[asgn.n.intr==1]),
toString(non.intr[asgn.n.intr==2]),
toString(non.intr[asgn.n.intr==3]),
toString(intr))
}
} else {
stop("Please specify a valid task structure.")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/validateIt/R/validateTopic.R
|
COND_CHECK <-
"
# Conditional checks
{{#pairs}}
if ({{{cond}}}) {{{cons}}}
{{/pairs}}
"
derive_arule <- function(vars, d, fraction=0, debug = FALSE){
tab <- table(d[vars])
if (debug){
print(tab)
}
# strict checking, TODO relax this restriction with fraction
i <- which(tab == 0, arr.ind = TRUE)
rn <- rownames(tab)
cn <- colnames(tab)
lapply(seq_len(nrow(i)), function(r){
list( cond = atomic_check_expr( name = vars[1]
, value = rn[i[r,1]]
, is_logical = is.logical(d[[vars[1]]])
, negate = FALSE
),
cons = atomic_check_expr( name = vars[2]
, value = cn[i[r,2]]
, is_logical = is.logical(d[[vars[2]]])
, negate = TRUE
)
)
})
}
POS_CHECK_VAR <- "^\\.pos\\."
atomic_check_expr <- function(name, value, is_logical = FALSE, negate=FALSE){
if (is_logical){
value <- as.logical(value)
}
v <- as.symbol(name)
expr <- if (negate){
if (is.logical(value)){
value <- !value
bquote(.(v) == .(value))
} else {
bquote(.(v) != .(value))
}
} else {
bquote(.(v) == .(value))
}
if (isTRUE(grepl(POS_CHECK_VAR, name)) && is.logical(value)){
# note: negating value is done in line 44
v <- as.symbol(sub(POS_CHECK_VAR, "", name))
expr <- if (value) bquote(.(v) > 0) else bquote(.(v) <= 0)
}
deparse(expr)
}
#' @export
#' @rdname suggest_cond_rule
write_cond_rule <- function(d, vars=names(d), file = stdout()){
is_numeric <- sapply(d[vars], is.numeric)
for (v in vars[is_numeric]){
vc <- paste0(".pos.", v)
d[[vc]] <- d[[v]] > 0
vars <- c(vars, vc)
}
vars <- Filter(function(v){
is.logical(d[[v]])
# !is.numeric(d[[v]])
}, vars)
pairs <- do.call(c, combn(vars, 2, derive_arule, d = d, simplify = FALSE))
writeLines(
whisker::whisker.render(COND_CHECK, data = list(pairs=pairs)),
file
)
invisible(pairs)
}
#' Suggest a conditional rule
#'
#' Suggest a conditional rule based on a association rule.
#' This functions derives conditional rules based on the non-existance
#' of combinations of categories in pairs of variables.
#' For each numerical variable a logical variable is derived that tests for
#' positivity. It generates IF THEN rules based on two variables.
#' @export
#' @example example/na_check.R
#' @importFrom utils combn
#' @example example/conditional_rule.R
#' @inheritParams suggest_type_check
#' @returns `suggest_cond_rule` returns [validate::validator()] object with the suggested rules.
#' `write_cond_rule` returns invisibly a named list of ranges for each variable.
suggest_cond_rule <- function(d, vars = names(d)){
tf <- tempfile()
pairs <- write_cond_rule(d, vars = vars, file = tf)
if (length(pairs) == 0){
return(validate::validator())
}
rules <- validate::validator(.file=tf)
validate::description(rules) <-
sprintf("conditional rule")
validate::origin(rules) <-
sprintf("validatesuggest %s"
, packageVersion("validatesuggest")
)
names(rules) <- paste0("CR", seq_len(length(rules)))
rules
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/assoc_rule.R
|
#' Car owners data set (fictitious).
#'
#' A constructed data set useful for detecting conditinal dependencies.
#'
#'
#' @format A data frame with 200 rows and 4 variables. Each
#' row is a person with:
#' \describe{
#' \item{age}{age of person}
#' \item{driver_license}{has a driver license, only persons older then 17 can have a license
#' in this data set}
#' \item{income}{monthly income}
#' \item{owns_car}{only persons with a drivers license
#' , and a monthly income > 1500 can own a car}
#' \item{car_color}{NA when there is no car}
#' }
#' @example example/conditional_rule.R
"car_owner"
#' task2 dataset
#'
#' Fictuous test data set from European (ESSnet) project on validation 2017.
#' @references European (ESSnet) project on validation 2017
#'
#' @format
#' \describe{
#' \item{ID}{ID}
#' \item{Age}{Age of person}
#' \item{Married}{Marital status}
#' \item{Employed}{Employed or not}
#' \item{Working_hours}{Working hours}
#' }
"task2"
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/data.R
|
DOMAIN_CHECK <-
"# check the domain of variables
{{#vars}}
{{#isnumeric}}
{{{name}}} >= 0
{{/isnumeric}}
{{#islogical}}
{{{name}}} %in% c(TRUE, FALSE)
{{/islogical}}
{{#ischaracter}}
{{{name}}} %in% {{{values}}}
{{/ischaracter}}
{{/vars}}
"
#' @export
#' @rdname suggest_domain_check
write_domain_check <- function(d, vars=names(d), only_positive=TRUE, file=stdout()){
vars <- lapply(vars, function(name){
x <- d[[name]]
if (is.numeric(x)){
if (only_positive && any(x < 0, na.rm=TRUE)){
return(NULL)
}
return(list(name = name, isnumeric=TRUE))
} else if (is.logical(x)){
list(name = name, islogical=TRUE)
} else {
#TODO date and so on
x <- as.character(x)
values <- unique(x)
if (length(values) == length(x)){
warning("Skipped domain check '",name,"'", ", as it is unique for each record"
, call. = FALSE
)
return(NULL)
}
list( name = name
, ischaracter = TRUE
, values = deparse(values)
)
}
})
vars <- Filter(function(v) {!is.null(v)}, vars)
writeLines(
whisker::whisker.render(DOMAIN_CHECK, data = list(vars=vars)),
file
)
invisible(vars)
}
#' Suggest a range check
#'
#' @export
#' @inheritParams suggest_type_check
#' @param only_positive if `TRUE` only numerical values for positive values are included
#' @example example/range_check.R
#' @returns `suggest_domain_check` returns [validate::validator()] object with the suggested rules.
#' `write_domain_check` returns invisibly a named list of checks for each variable.
suggest_domain_check <- function(d, vars = names(d), only_positive=TRUE){
tf <- tempfile()
vars <- write_domain_check(d, vars, file = tf)
if (length(vars) == 0){
return(validate::validator())
}
rules <- validate::validator(.file = tf)
validate::description(rules) <-
sprintf("domain check")
validate::origin(rules) <-
sprintf("validatesuggest %s"
, packageVersion("validatesuggest")
)
names(rules) <- paste0("DC", seq_len(length(rules)))
rules
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/domain_check.R
|
NA_CHECK <-
"# check the type of variables
{{#vars}}
is.complete({{{name}}})
{{/vars}}
"
#' @export
#' @rdname suggest_na_check
write_na_check <- function(d, vars=names(d), file=stdout()){
# only columns that are complete in d or use a fraction?
vars <- Filter(function(name){
!anyNA(d[[name]])
}, vars)
vars <- lapply(vars, function(name){
x <- d[[name]]
list(name = name, type = class(x)[1])
})
writeLines(
whisker::whisker.render(NA_CHECK, data = list(vars=vars)),
file
)
invisible(vars)
}
#' Suggest a check for completeness.
#'
#' Suggest a check for completeness.
#' @export
#' @example example/na_check.R
#' @inheritParams suggest_type_check
#' @returns `suggest_na_check` returns [validate::validator()] object with the suggested rules.
#' `write_na_check` write the rules to file and returns invisibly a named list of ranges for each variable.
suggest_na_check <- function(d, vars = names(d)){
tf <- tempfile()
vars <- write_na_check(d, vars = vars, file = tf)
if (length(vars) == 0){
return(validate::validator())
}
rules <- validate::validator(.file=tf)
validate::description(rules) <-
sprintf("type check")
validate::origin(rules) <-
sprintf("validatesuggest %s"
, packageVersion("validatesuggest")
)
names(rules) <- paste0("NA", seq_len(length(rules)))
rules
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/na_check.R
|
POS_CHECK <-
"# check for positivity
{{#vars}}
{{{name}}} >= 0
{{/vars}}
"
#' @export
#' @rdname suggest_pos_check
write_pos_check <- function(d, vars=names(d), only_positive=TRUE, file=stdout()){
vars <- Filter(function(name){is.numeric(d[[name]])}, vars)
vars <- lapply(vars, function(name){
x <- d[[name]]
if (only_positive && any(x < 0, na.rm=TRUE)){
return(NULL)
}
return(list(name = name))
})
vars <- Filter(function(v){!is.null(v)}, vars)
writeLines(
whisker::whisker.render(POS_CHECK, data = list(vars=vars)),
file
)
invisible(vars)
}
#' Suggest a range check
#'
#' @export
#' @inheritParams suggest_type_check
#' @param only_positive if `TRUE` only numerical values for positive values are included
#' @example example/range_check.R
#' @returns `suggest_pos_check` returns [validate::validator()] object with the suggested rules.
#' `write_pos_check` write the rules to file and returns invisibly a named list of checks for each variable.
suggest_pos_check <- function(d, vars = names(d), only_positive=TRUE){
tf <- tempfile()
vars <- write_pos_check(d, vars, file = tf, only_positive = only_positive)
if (length(vars) == 0){
return(validate::validator())
}
rules <- validate::validator(.file = tf)
validate::description(rules) <-
sprintf("positivity check")
validate::origin(rules) <-
sprintf("validatesuggest %s"
, packageVersion("validatesuggest")
)
names(rules) <- paste0("PC", seq_len(length(rules)))
rules
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/pos_check.R
|
RANGE_CHECK <-
"# check the range of variables
{{#vars}}
{{#isnumeric}}
{{#in_range}}
in_range({{{name}}}, {{min}}, {{max}})
{{/in_range}}
{{^in_range}}
{{#min}}
{{{name}}} >= {{min}}
{{/min}}
{{#max}}
{{{name}}} <= {{max}}
{{/max}}
{{/in_range}}
{{/isnumeric}}
{{#islogical}}
{{{name}}} %in% c(TRUE, FALSE)
{{/islogical}}
{{#ischaracter}}
{{{name}}} %in% {{{values}}}
{{/ischaracter}}
{{/vars}}
"
#' @export
#' @rdname suggest_range_check
write_range_check <- function(d, vars=names(d), min=TRUE, max=FALSE, file=stdout()){
vars <- lapply(vars, function(name){
x <- d[[name]]
if (is.numeric(x)){
l <- list(name = name, isnumeric=TRUE)
if (isTRUE(min)){
l$min <- min(x, na.rm = TRUE)
}
if (isTRUE(max)){
l$max <- max(x, na.rm = TRUE)
}
l$in_range <- isTRUE(min && max)
l
} else if (is.logical(x)){
list(name = name, islogical=TRUE)
} else {
#TODO date and so on
x <- as.character(x)
values <- unique(x)
if (length(values) == length(x)){
warning("Skipped range check '",name,"'", ", as it is unique for each record"
, call. = FALSE
)
return(NULL)
}
list( name = name
, ischaracter = TRUE
, values = deparse(values)
)
}
})
writeLines(
whisker::whisker.render(RANGE_CHECK, data = list(vars=vars)),
file
)
invisible(vars)
}
#' Suggest a range check
#'
#' @export
#' @inheritParams suggest_type_check
#' @example example/range_check.R
#' @param min `TRUE` or `FALSE`, should the minimum value be checked?
#' @param max `TRUE` or `FALSE`, should the maximum value be checked?
#' @returns `suggest_range_check` returns [validate::validator()] object with the suggested rules.
#' `write_range_check` write the rules to file and returns invisibly a named list of ranges for each variable.
suggest_range_check <- function(d, vars = names(d), min=TRUE, max=FALSE){
tf <- tempfile()
vars <- write_range_check(d, vars, min=min, max=max, file = tf)
if (length(vars) == 0){
return(validate::validator())
}
rules <- validate::validator(.file = tf)
validate::description(rules) <-
sprintf("range check")
validate::origin(rules) <-
sprintf("validatesuggest %s"
, packageVersion("validatesuggest")
)
names(rules) <- paste0("RC", seq_len(length(rules)))
rules
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/range_check.R
|
RATIO_CHECK <-
"# check the ratio of highly correlated variables
{{#pairs}}
{{{var1}}} >= {{min}} * {{{var2}}}
{{{var1}}} <= {{max}} * {{{var2}}}
{{/pairs}}
"
#' @export
#' @rdname suggest_ratio_check
write_ratio_check <- function(d, vars=names(d), file=stdout(), lin_cor=0.95, digits=2){
vars <- Filter(function(v){
is.numeric(d[[v]])
}, vars)
cd <- stats::cor(d[vars], d[vars], "pairwise.complete.obs")
cdl <- which(abs(cd) >= lin_cor, arr.ind = TRUE)
cdl <- cdl[cdl[,1] < cdl[,2],]
cdl <- matrix(vars[cdl], ncol=2)
pairs <- lapply(seq_len(nrow(cdl)), function(r){
ratio_check(d, cdl[r,1], cdl[r,2], digits = digits)
})
pairs
writeLines(
whisker::whisker.render(RATIO_CHECK, data = list(pairs=pairs)),
file
)
invisible(pairs)
}
ratio_check <- function(d, var1, var2, digits = 2){
ratio <- d[[var1]]/d[[var2]]
ratio <- ratio[is.finite(ratio)] # remove all NA, divide by zero ***
list( var1 = var1
, var2 = var2
, min = round(min(ratio), digits = digits)
, max = round(max(ratio), digits = digits)
)
}
# write_ratio_check(retailers)
# write_ratio_check(SBS2000)
#' Suggest ratio checks
#'
#' Suggest ratio checks
#' @export
#' @example example/ratio_check.R
#' @inheritParams suggest_type_check
#' @param lin_cor threshold for abs correlation to be included (details)
#' @param digits number of digits for rounding
#' @returns `suggest_ratio_check` returns [validate::validator()] object with the suggested rules.
#' `write_ratio_check` write the rules to file and returns invisibly a named list of check for each variable.
suggest_ratio_check <- function(d, vars = names(d), lin_cor=0.95, digits=2){
tf <- tempfile()
pairs <- write_ratio_check(d, vars, lin_cor = lin_cor, file = tf, digits = digits)
if (length(pairs) == 0){
return(validate::validator())
}
rules <- validate::validator(.file = tf)
validate::description(rules) <-
sprintf("ratio check")
validate::origin(rules) <-
sprintf("validatesuggest %s"
, packageVersion("validatesuggest")
)
names(rules) <- paste0("RA", seq_len(length(rules)))
rules
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/ratio_check.R
|
#' @importFrom rpart rpart
get_tree <- function(d){
m <- rpart(car_color ~ ., data = d)
node <- as.numeric(row.names(m$frame))
lab <- labels(m)
parent_node <- get_parents(node)
}
get_parents <- function(node){
l <- lapply(node[-1], function(n){
floor(n * cumprod(rep(0.5, log2(n) - 1)))
})
}
SPLIT <- "[<>=]=? ?(.+)"
LEVS <- ".+=([a-z]+)$"
LOGICAL <- "(.+)(<)"
get_q <- function(m, d){
split_lab <- labels(m)
vars <- labels(m$terms)
split_vars <- sub(SPLIT, "", split_lab)
#TODO fix logical, factor and character
types <- attr(m$terms, "dataClasses")[split_vars]
expr <- vector(mode="expression", length = length(split_lab))
is_numeric <- which(types == "numeric")
expr[is_numeric] <- sapply(split_lab[is_numeric], str2lang)
is_character <- which(types == "character")
expr[is_character] <- sapply(split_lab[is_character], function(e){
let <- sub(LEVS, "\\1", e)
v <- sub(SPLIT, "", e)
f <- factor(d[[v]])
idx <- match(strsplit(let, "")[[1]], letters)
levs <- levels(f)[idx]
substitute(v %in% levs, list(v = as.symbol(v), levs=levs))
})
is_logical <- which(types == "logical")
expr[is_logical] <- sapply(split_lab[is_logical], function(e){
let <- sub(LEVS, "\\1", e)
v <- sub(SPLIT, "", e)
f <- factor(d[[v]])
idx <- match(strsplit(let, "")[[1]], letters)
levs <- levels(f)[idx]
substitute(v %in% levs, list(v = as.symbol(v), levs=levs))
})
expr
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/rpart.R
|
#' Suggest rules
#'
#' Suggests rules using the various suggestion checks.
#' Use the more specific `suggest` functions for more control.
#' @inheritParams suggest_type_check
#' @param domain_check if `TRUE` include domain_check
#' @param range_check if `TRUE` include range_check
#' @param type_check if `TRUE` include type_check
#' @param pos_check if `TRUE` include pos_check
#' @param na_check if `TRUE` include na_check
#' @param ratio_check if `TRUE` include ratio_check
#' @param unique_check if `TRUE` include unique_check
#' @param conditional_rule if `TRUE` include cond_rule
#' @export
#' @returns returns [validate::validator()] object with the suggested rules.
#' `write_all_suggestions` write the rules to file and returns invisibly a named list of ranges for each variable.
suggest_rules <- function( d
, vars = names(d)
, domain_check = TRUE
, range_check = TRUE
, pos_check = TRUE
, type_check = TRUE
, na_check = TRUE
, unique_check = TRUE
, ratio_check = TRUE
, conditional_rule = TRUE
){
rules <- validate::validator()
if (domain_check){
rules <- rules + suggest_domain_check(d, vars=vars)
}
if (range_check){
rules <- rules + suggest_range_check(d, vars=vars)
}
if (pos_check){
rules <- rules + suggest_pos_check(d, vars = vars)
}
if (type_check){
rules <- rules + suggest_type_check(d, vars= vars)
}
if (na_check){
rules <- rules + suggest_na_check(d, vars= vars)
}
if (unique_check){
rules <- rules + suggest_unique_check(d, vars= vars)
}
if (ratio_check){
rules <- rules + suggest_ratio_check(d, vars= vars)
}
if (conditional_rule){
rules <- rules + suggest_cond_rule(d, vars= vars)
}
rules
}
#' @export
#' @rdname suggest_rules
#' @aliases suggest_rules
suggest_all <- suggest_rules
# (rules <- suggest_rules(iris))
#' @export
#' @rdname suggest_rules
write_all_suggestions <- function( d
, vars=names(d)
, file=stdout()
, domain_check = TRUE
, range_check = TRUE
, type_check = TRUE
, pos_check = TRUE
, na_check = TRUE
, unique_check = TRUE
, ratio_check = TRUE
, conditional_rule = TRUE
){
text <-
"#Generated with `validatesuggest`
"
writeLines(text, file)
if (domain_check){
write_domain_check(d, vars = vars, file = file)
}
if (range_check){
write_range_check(d, vars = vars, file = file)
}
if (pos_check){
write_pos_check(d, vars = vars, file = file)
}
if (type_check){
write_type_check(d, vars = vars, file = file)
}
if (na_check){
write_na_check(d, vars = vars, file = file)
}
if (ratio_check){
write_ratio_check(d, vars = vars, file = file)
}
if (conditional_rule){
write_cond_rule(d, vars = vars, file = file)
}
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/suggest_rules.R
|
TYPE_CHECK <-
"# check the type of variables
{{#vars}}
is.{{{type}}}({{{name}}})
{{/vars}}
"
#' @export
#' @rdname suggest_type_check
#' @param file file to which the checks will be written to.
write_type_check <- function(d, vars=names(d), file=stdout()){
vars <- lapply(vars, function(name){
x <- d[[name]]
list(name = name, type = class(x)[1])
})
writeLines(
whisker::whisker.render(TYPE_CHECK, data = list(vars=vars)),
file
)
invisible(vars)
}
#' suggest type check
#' @export
#' @param d `data.frame`, used to generate the checks
#' @param vars `character` optionally the subset of variables to be used.
#' @returns `suggest_type_check` returns [validate::validator()] object with the suggested rules.
#' `write_type_check` write the rules to file and returns invisibly a named list of types for each variable.
suggest_type_check <- function(d, vars = names(d)){
tf <- tempfile()
vars <- write_type_check(d, vars = vars, file = tf)
if (length(vars) == 0){
return(validate::validator())
}
rules <- validate::validator(.file=tf)
validate::description(rules) <-
sprintf("type check")
validate::origin(rules) <-
sprintf("validatesuggest %s"
, packageVersion("validatesuggest")
)
names(rules) <- paste0("TC", seq_len(length(rules)))
rules
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/type_check.R
|
UNIQUE_CHECK <-
"# check if these columns are unique
{{#vars}}
all_unique({{{name}}})
{{/vars}}
"
#' Suggest range checks
#' @export
#' @rdname suggest_unique_check
write_unique_check <- function(d, vars=names(d), file=stdout(), fraction=0.95){
vars <- lapply(vars, function(name){
x <- d[[name]]
if (is.character(x) || is.factor(x)){ #|| is.integer(x)){
u <- unique(x)
if (length(u)/length(x) >= fraction){
list(name=name)
}
}
})
vars <- Filter(function(v){!is.null(v)}, vars)
writeLines(
whisker::whisker.render(UNIQUE_CHECK, data = list(vars=vars)),
file
)
invisible(vars)
}
#' @export
#' @inheritParams suggest_type_check
#' @param fraction if values in a column > `fraction` unique,
#' the check will be generated.
#' @returns `suggest_unique_check` returns [validate::validator()] object with the suggested rules.
#' `write_unique_check` write the rules to file and returns invisibly a named list of checks for each variable.
suggest_unique_check <- function(d, vars = names(d), fraction=0.95){
tf <- tempfile()
vars <- write_unique_check(d, vars, fraction=0.95, file = tf)
if (length(vars) == 0){
return(validate::validator())
}
rules <- validate::validator(.file = tf)
validate::description(rules) <-
sprintf("range check")
validate::origin(rules) <-
sprintf("validatesuggest %s"
, packageVersion("validatesuggest")
)
names(rules) <- paste0("UN", seq_len(length(rules)))
rules
}
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/unique_check.R
|
#' @keywords internal
#' @importFrom utils packageVersion
#' @section validatesuggest:
#' The goal of validatesuggest is to generate suggestions for
#' validation rules from a supplied
#' dataset. These can be used as a starting point for a rule set and are to be
#' adjusted by domain experts.
"_PACKAGE"
# The following block is used by usethis to automatically manage
# roxygen namespace tags. Modify with care!
## usethis namespace: start
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/R/validatesuggest-package.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(validatesuggest)
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/inst/doc/validatesuggest.R
|
---
title: "Data-driven data validation"
subtitle: "Introducing ‘validatesuggest’"
output: rmarkdown::html_vignette
vignette: >
%\VignetteEncoding{UTF-8}
%\VignetteIndexEntry{Data-driven data validation: ‘validatesuggest’}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
markdown:
wrap: 72
bibliography: references.bib
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Intro
Data validation is a cornerstone in data intense industries, such as the
art of making official statistics. To create and maintain high quality
statistical output, data needs to be checked before being used in
statistical processes. A number of projects have been executed to
streamline and optimize data validation processes, both within
organisations as well as among organisations. An important success
factor for effective data validation is the design and maintenance of
validation rules that cover the dynamics of the data to be checked. This
has led to the definition of standardized validation rules that cover
the most common use-cases in official statistics. Examples are the
definition of internationally agreed 'main types of validation rules' by
Eurostat , and the set of recipes and standard functions as offered in
the well-known R-package validate, which are documented in the online
cookbook [3]. In this presentation we take another approach to rule
maintenance. In addition to the knowledge of the domain specialist we
let the data speak. Properties of the data, such as type, range,
distribution, correlation can be used to derive rules that catch the
essentials of the data. Since the number of rules that could potentially
be derived from data in general could be endless, we use the existing
international and national standardized validation rule systems to know
what type of rules make sense. A refinement of the concept is to also
take the time dimension of time series data into consideration. That way
time-dependent validation rules com into reach. The suggested rules are
expressed in a human-readable form, so that the domain specialist / rule
maintainer can inspect and understand them, as the data-driven concept
is intended as a suggestion to the rule maintainers. They should always
be checked and interpreted before putting into production. The type of
rules currently implemented in the experimental R-package
'validatesuggest' are:
validate, @validate[]
## Usage
```{r setup}
library(validatesuggest)
```
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/inst/doc/validatesuggest.Rmd
|
atomic_check_expr <- validatesuggest:::atomic_check_expr
a <- atomic_check_expr("a", "FALSE", is_logical = TRUE)
expect_equal(a, "a == FALSE")
a <- atomic_check_expr("a", "hi", is_logical = FALSE)
expect_equal(a, "a == \"hi\"")
a <- atomic_check_expr(".pos.a", "TRUE", is_logical = TRUE)
expect_equal(a, "a > 0")
a <- atomic_check_expr(".pos.a", "FALSE", is_logical = TRUE)
expect_equal(a, "a <= 0")
a <- atomic_check_expr(".pos.a", "TRUE", is_logical = TRUE, negate = TRUE)
expect_equal(a, "a <= 0")
a <- atomic_check_expr(".pos.a", "FALSE", is_logical = TRUE, negate = TRUE)
expect_equal(a, "a > 0")
r <- suggest_cond_rule(car_owner)
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/inst/tinytest/test_assoc_rule.R
|
data(task2)
v <- suggest_cond_rule(task2)
expect_equal(length(v), 0)
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/inst/tinytest/test_cond_check.R
|
data("retailers", package="validate")
suggest_domain_check(retailers)
rules <- suggest_pos_check(retailers)
expect_equal(length(rules), 7)
rules <- suggest_pos_check(retailers, only_positive = FALSE)
expect_equal(length(rules), 9)
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/inst/tinytest/test_domain_check.R
|
data("car_owner")
v <- suggest_ratio_check(car_owner)
expect_equal(length(v), 0)
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/inst/tinytest/test_ratio_check.R
|
data("retailers", package="validate")
v <- suggest_unique_check(retailers)
expect_equal(length(v), 0)
data("SBS2000", package="validate")
v <- suggest_unique_check(SBS2000)
expect_equal(v[[1]]@expr, quote(all_unique(id)))
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/inst/tinytest/test_unique.R
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/inst/tinytest/test_validatesuggest.R
|
|
---
title: "Data-driven data validation"
subtitle: "Introducing ‘validatesuggest’"
output: rmarkdown::html_vignette
vignette: >
%\VignetteEncoding{UTF-8}
%\VignetteIndexEntry{Data-driven data validation: ‘validatesuggest’}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
markdown:
wrap: 72
bibliography: references.bib
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Intro
Data validation is a cornerstone in data intense industries, such as the
art of making official statistics. To create and maintain high quality
statistical output, data needs to be checked before being used in
statistical processes. A number of projects have been executed to
streamline and optimize data validation processes, both within
organisations as well as among organisations. An important success
factor for effective data validation is the design and maintenance of
validation rules that cover the dynamics of the data to be checked. This
has led to the definition of standardized validation rules that cover
the most common use-cases in official statistics. Examples are the
definition of internationally agreed 'main types of validation rules' by
Eurostat , and the set of recipes and standard functions as offered in
the well-known R-package validate, which are documented in the online
cookbook [3]. In this presentation we take another approach to rule
maintenance. In addition to the knowledge of the domain specialist we
let the data speak. Properties of the data, such as type, range,
distribution, correlation can be used to derive rules that catch the
essentials of the data. Since the number of rules that could potentially
be derived from data in general could be endless, we use the existing
international and national standardized validation rule systems to know
what type of rules make sense. A refinement of the concept is to also
take the time dimension of time series data into consideration. That way
time-dependent validation rules com into reach. The suggested rules are
expressed in a human-readable form, so that the domain specialist / rule
maintainer can inspect and understand them, as the data-driven concept
is intended as a suggestion to the rule maintainers. They should always
be checked and interpreted before putting into production. The type of
rules currently implemented in the experimental R-package
'validatesuggest' are:
validate, @validate[]
## Usage
```{r setup}
library(validatesuggest)
```
|
/scratch/gouwar.j/cran-all/cranData/validatesuggest/vignettes/validatesuggest.Rmd
|
INFIX_CAT_NAME <- ":"
# TODO maybe change the code below to directly generate mip_rules
# determine if a rule is categorical
is_cat_ <- function(expr, or=TRUE, ...){
# this allows for logicals such as "if (A) B"
if (is.symbol(expr)){
return(TRUE)
}
if(is.atomic(expr) || is.null(expr)){
return(is.logical(expr))
}
op = op_to_s(expr)
l <- left(expr)
r <- right(expr)
switch (op,
"%in%" = TRUE, # allow all literals (should check for character and logical)
"%vin%" = TRUE, # Added to comply with validate >= 0.2.2
"(" = is_cat_(l, or),
"!" = is_cat_(l, !or),
"==" = is.character(r) || is.logical(r),
"!=" = is.character(r) || is.logical(r),
"if" = is_cat_(l, !or) && is_cat_(r, or),
"|" = or && is_cat_(l, or) && is_cat_(r, or),
"||" = or && is_cat_(l, or) && is_cat_(r, or),
"&" = !or && is_cat_(l, or) && is_cat_(r, or),
"&&" = !or && is_cat_(l, or) && is_cat_(r, or),
FALSE
)
}
# cat var info, utility function for collecting info with get_catvar
cvi <- function(var, value, not){
list(list(
var = deparse(var),
value = eval(value), # we might want to evaluate in higher frame!
not = not)) # this indicates if "var %in% value" or "!(var %in% value)"
}
# collect variable information within a rule, assumes that is_cat_ has been used
# to check wether it is categorical
get_catvar <- function(expr, not = FALSE){
if (is.symbol(expr)){
return(cvi(expr, TRUE, not))
}
op = op_to_s(expr)
l <- left(expr)
r <- right(expr)
switch ( op,
"%in%" = cvi(l, r, not),
"%vin%" = cvi(l,r,not),
"==" = cvi(l, r, not),
"!=" = cvi(l, r, !not),
"if" = c( get_catvar(l, !not), get_catvar(r, not)),
"(" = get_catvar(l, not),
"!" = get_catvar(l, !not),
"|" = c( get_catvar(l, not), get_catvar(r, not)),
"||" = c( get_catvar(l, not), get_catvar(r, not)),
"&" = c( get_catvar(l, not), get_catvar(r, not)),
"&&" = c( get_catvar(l, not), get_catvar(r, not)),
NULL
)
}
# generate binary variable names from vars and their values.
bin_var_name <- function(x, infix=INFIX_CAT_NAME){
if (is.logical(x$value)){
x$var
} else {
if (is.numeric(x$value)){
warning("'", x$var, "' seems a categorical variable, please recode it as a factor in the data.
Only use character or logical values in %in% statements to prevent this warning.",
call. = FALSE)
}
paste0(x$var, infix, x$value)
}
}
# input is mip_rule, results is character vector with infix names
cat_var_name <- function(x, infix=INFIX_CAT_NAME){
suffix <- paste0(infix, ".*$")
gsub(suffix,"",names(x$a))
}
#' Check if rules are categorical
#'
#' Check if rules are categorical
#' @export
#' @param x validator object
#' @param ... not used
#' @return logical indicating which rules are purely categorical/logical
#' @example examples/categorical.R
is_categorical <- function(x, ...){
sapply(x$rules, function(rule){
is_cat_(rule@expr)
})
}
#' Get coefficient matrix from categorical rules
#'
#' Get coefficient matrix from categorical edits, similar to
#' linear_coefficients.
#'
#' @param x validator object
#' @param ... not used
#' @keywords internal
cat_coefficients <- function(x, ...){
stopifnot(inherits(x, "expressionset"))
mr <- cat_as_mip_rules(x, ...)
get_mr_matrix(mr)
}
#' get categorical rules as mip_rules
#'
#' @param x expressionset object
#' @param ... not used
#' @return list of mip_rule
#' @keywords internal
cat_as_mip_rules <- function(x, ...){
cat_rules <- x[is_categorical(x)]
lapply(cat_rules$rules, function(rule){
cat_mip_rule_(rule@expr, name=rule@name)
})
}
cat_mip_rule_ <- function(e, name, ...){
rule_l <- get_catvar(e)
a <- unlist(lapply(rule_l, function(x){
vars <- bin_var_name(x)
# if (x %in% set) +1, if (!(x %in% set)) -1
coef <- rep(if(x$not || all(x$value == FALSE)) -1L else 1L, length(vars))
names(coef) <- vars
coef
})
)
# sum(a_pos) + sum(1-a_neg) >= 1
# condition is that at least one of the variable is true, extract the negated memberships
b <- 1 - sum(sapply(rule_l, function(x){
x$not || all(x$value == FALSE)
}))
if ( length(rule_l) == 1){
if ( isTRUE(length(a) > 1)
|| op(e) == "=="
|| is.character(rule_l[[1]]$value)
){ # this is a strict(er) version and allows for some optimization
mip_rule(a = a, op = "==", b = b, rule = name, type=sapply(a, function(x) 'binary'))
} else {
mip_rule(a = -a, op = "<=", b = -b, rule = name, type=sapply(a, function(x) 'binary')) # needed for logical variables
}
} else {
mip_rule(a = -a, op = "<=", b = -b, rule = name, type=sapply(a, function(x) 'binary')) # normalized version of a*x >= b
}
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/categorical.R
|
is_condition_ <- function(expr, or=TRUE, top=TRUE, ...){
op <- op_to_s(expr)
l <- left(expr)
r <- right(expr)
if (op == 'if' && !top){
return(FALSE)
}
if (is_lin_(expr) || is_cat_(expr)){
return(!top) # this prohibits that a pure categorical or linear rule is detected as conditional
}
switch (op,
'if' = is_condition_(l, !or, FALSE) && is_condition_(r, or, FALSE),
"|" = or && is_condition_(l, or, FALSE) && is_condition_(r, or, FALSE),
"||" = or && is_condition_(l, or, FALSE) && is_condition_(r, or, FALSE),
"&" = !or && is_condition_(l, or, FALSE) && is_condition_(r, or, FALSE),
"&&" = !or && is_condition_(l, or, FALSE) && is_condition_(r, or, FALSE),
"!" = is_condition_(l, !or, FALSE),
"(" = is_condition_(l, or, FALSE),
FALSE
)
}
#' Check if rules are conditional rules
#'
#' Check if rules are conditional rules
#' @export
#' @param rules validator object containing validation rules
#' @param ... not used
#' @return logical indicating which rules are conditional
#' @example examples/conditional.R
is_conditional <- function(rules, ...){
stopifnot(inherits(rules, "validator"))
sapply(rules$rules, function(rule){
is_condition_(rule@expr)
})
}
cond_as_mip_rules <- function(x, ...){
cond_rules <- x[is_conditional(x)]
mr <- lapply(cond_rules$rules, function(rule){
#browser()
prefix <- paste0(rule@name, "._lin")
rl <- replace_linear(rule@expr, prefix=prefix)
mr_cat <- cat_mip_rule_(rl$cat, rule@name)
# convert linear expressions to linear mip_rules
mr_lin <- mapply( lin_mip_rule_, rl$linear, name=names(rl$linear),
SIMPLIFY = FALSE, USE.NAMES = FALSE
)
# normalize them (">", ">=" into "<", "<=")
mr_lin <- lapply(mr_lin, rewrite_mip_rule)
# make them soft/conditional on the variable <v>._<count> used in mr_cat
mr_lin <- lapply(mr_lin, soft_lin_rule, prefix="")
append(list(mr_cat), mr_lin)
})
unlist(mr, recursive = FALSE)
}
# replaces linear subexpressions with a binary variable
# assumes that expresssion is conditional
replace_linear <- function(e, prefix=".v"){
h <- new.env()
h$prefix <- prefix
cat <- rep_lin_(e, h=h)
list( cat = cat,
linear = h$expr
)
}
rep_lin_ <- function(e, or=TRUE, h=new.env()){
#browser()
op <- op_to_s(e)
l <- left(e)
r <- right(e)
if (is.atomic(e) || is.symbol(e) || is.null(e)){
return(e)
}
if (is_lin_(e)){
if (!or){
e <- invert_(e)
}
h$expr <- append(h$expr, e)
prefix <- if (is.null(h$prefix)) ".v" else h$prefix
name <- paste0(prefix, length(h$expr))
names(h$expr)[length(h$expr)] <- name
if (or){
return(substitute(!name, list(name=as.symbol(name))))
} else {
return(substitute(name, list(name=as.symbol(name))))
}
}
switch (op,
"if" = substitute( if (l) r,
list( l=rep_lin_(l, !or, h), r=rep_lin_(r, or, h))),
"|" = substitute( l | r,
list( l=rep_lin_(l, or, h), r=rep_lin_(r, or, h))),
"&&" = substitute( l && r,
list( l=rep_lin_(l, or, h), r=rep_lin_(r, or, h))),
"&" = substitute( l & r,
list( l=rep_lin_(l, or, h), r=rep_lin_(r, or, h))),
"||" = substitute( l || r,
list( l=rep_lin_(l, or, h), r=rep_lin_(r, or, h))),
"!" = substitute( !l,
list(l=rep_lin_(l, !or, h))),
"(" = substitute( (l),
list(l=rep_lin_(l, or, h))),
e
)
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/conditional.R
|
#' Find out which rules are conflicting
#'
#' Find out for a contradicting rule which rules are conflicting. This helps in determining and assessing conflicts in rule sets. Which
#' of the rules should stay and which should go?
#' @export
#' @example ./examples/feasible.R
#' @param x \code{\link{validator}} object with rules.
#' @param rule_name \code{character} with the names of the rules that are causing infeasibility.
#' @family feasibility
#' @return \code{character} with conflicting rules.
is_contradicted_by <- function(x, rule_name){
rn <- rule_name %in% names(x)
if (any(!rn)){
nms <- paste0('"',rule_name[!rn], '"', collapse = ", ")
warning("Rule(s) ", nms, " not found in rule set 'x'.", call. = FALSE)
}
N <- length(x)
weight <- rep(N, length(rule_name))
names(weight) <- rule_name
res <- character()
contra <- detect_infeasible_rules(x, weight = weight)
while (length(contra) && !any(contra %in% names(weight))){
res <- c(res, contra)
weight[contra] <- N
contra <- detect_infeasible_rules(x, weight = weight)
}
res
}
# x <- validator( x > 1, r2 = x < 0, x > 2)
# is_contradicted_by(x, "r2")
# make_feasible(x, weight = c(r2=10))
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/contradicted.R
|
#' Detect the range for numerical variables
#'
#' Detect for each numerical variable in a validation rule set, what its maximum and minimum values are.
#' This allows for manual rule set checking: does rule set \code{x} overly constrain numerical values?
#'
#' This procedure only finds minimum and maximum values, but misses gaps.
#'
#' @seealso \code{\link{detect_fixed_variables}}
#' @references Statistical Data Cleaning with R (2017), Chapter 8, M. van der Loo, E. de Jonge
#' @references Simplifying constraints in data editing (2015). Technical Report 2015|18, Statistics Netherlands, J. Daalmans
#' @example ./examples/detect_boundary.R
#' @export
#' @param x \code{\link{validator}} object, rule set to be checked
#' @param eps detected fixed values will have this precission.
#' @param ... currently not used
#' @family feasibility
#' @return \code{\link{data.frame}} with columns "variable", "lowerbound", "upperbound".
detect_boundary_num <- function(x, eps = 1e-8, ...){
x <- check_validator(x)
prec <- -log(eps, 10)
bounds <- sapply(get_variables_num(x), function(v){
bounds <- c(lower=-Inf, upper=Inf)
objective <- setNames(1, v)
lp <- to_lp(x, objective = objective)
lpSolveAPI::lp.control(lp, presolve="none")
res <- solve(lp)
if (res %in% c(0,1,4,12)){ # succesful, TODO warn if failure...
i <- match(v, colnames(lp))
bounds[1] <- lpSolveAPI::get.variables(lp)[i]
}
objective <- setNames(-1, v)
lp <- to_lp(x, objective = objective)
lpSolveAPI::lp.control(lp, presolve="none")
# TODO check if was succesfull
res <- solve(lp)
if (res %in% c(0,1,4,12)){
i <- match(v, colnames(lp))
bounds[2] <- lpSolveAPI::get.variables(lp)[i]
} else if (!(res %in% c(3,13))){
}
return(bounds)
}, simplify = TRUE)
if (length(bounds) == 0){ # when there are no numeric variables..
bounds <- matrix(ncol=0, nrow=2)
}
data.frame( variable = colnames(bounds)
, lowerbound = round(bounds[1,], prec)
, upperbound = round(bounds[2,], prec)
, stringsAsFactors = FALSE
)
}
#' Detect viable domains for categorical variables
#'
#' Detect viable domains for categorical variables
#' @example ./examples/detect_boundary.R
#' @param x \code{\link{validator}} object with rules
#' @param as_df return result as data.frame (before 0.4.5)
#' @param ... not used
#' @family feasibility
#' @return \code{data.frame} with columns \code{$variable}, \code{$value}, \code{$min}, \code{$max}. Each row is a
#' category/value of a categorical variable.
#' @export
detect_boundary_cat <- function(x, ..., as_df = FALSE){
var_cat <- get_variables_cat(x)
bounds <- sapply(seq_len(nrow(var_cat)), function(i){
bounds <- c(min=0L, max=1L)
v <- var_cat$bin_variable[i]
objective <- setNames(1, v)
lp <- to_lp(x, objective = objective)
lpSolveAPI::lp.control(lp, presolve="none")
res <- solve(lp)
if (res %in% c(0,1,4,12)){ # succesful, TODO warn if failure...
i <- match(v, colnames(lp))
bounds[1] <- lpSolveAPI::get.variables(lp)[i]
}
objective <- setNames(-1, v)
lp <- to_lp(x, objective = objective)
lpSolveAPI::lp.control(lp, presolve="none")
res <- solve(lp)
if (res %in% c(0,1,4,12)){ # succesful, TODO warn if failure...
i <- match(v, colnames(lp))
bounds[2] <- lpSolveAPI::get.variables(lp)[i]
}
bounds
})
#stop("To be implemented")
# for each category detect bound
if (length(bounds)){
bounds = cbind(var_cat[-1], t(bounds))
if (isTRUE(as_df)){
return(bounds)
}
vals <- subset(bounds, max == 1)
vals <- tapply(vals$value, vals$variable, c, simplify = FALSE)
lapply(vals, function(x) x) # trick make an list array into a named list
} else{
NULL
}
}
# rules <- x <- validator( x > 1
# , if (x > 0) A == 'a1'
# , B %in% c("b1", "b2")
# )
# detect_boundary_cat(rules)
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/detect_boundary.R
|
#' Detect fixed variables
#'
#' Detects variables that have a fixed value in the rule set.
#' To simplify a rule set, these variables can be substituted with their value.
#' @example ./examples/detect_fixed_variables.R
#' @seealso \code{\link{simplify_fixed_variables}}
#' @param x \code{\link{validator}} object with the validation rules.
#' @param eps detected fixed values will have this precission.
#' @param ... not used.
#' @family redundancy
#' @export
detect_fixed_variables <- function(x, eps = x$options("lin.eq.eps"), ...){
x <- check_validator(x)
bounds_num <- detect_boundary_num(x, eps = eps)
is_fixed_num <- (bounds_num$upperbound - bounds_num$lowerbound <= eps)
fixed_num <- NULL
if (any(is_fixed_num)){
fixed_num <- setNames(bounds_num$lowerbound, bounds_num$variable)[is_fixed_num]
fixed_num <- as.list(fixed_num)
}
fixed_cat <- NULL
bounds_cat <- detect_boundary_cat(x, as_df = TRUE)
if (NROW(bounds_cat)){
bounds_cat <- subset(bounds_cat, min == 1)
if (nrow(bounds_cat)){
fixed_cat <- as.list(setNames(bounds_cat$value, bounds_cat$variable))
}
}
c(fixed_num, fixed_cat)
}
#' Simplify fixed variables
#'
#' Detect variables of which the values are restricted to a single value by the
#' rule set. Simplify the rule set by replacing fixed variables with these values.
#'
#'
#' @export
#' @example ./examples/detect_fixed_variables.R
#' @param x \code{\link{validator}} object with validation rules
#' @param eps detected fixed values will have this precission.
#' @param ... passed to \code{\link{substitute_values}}.
#' @family redundancy
#' @return \code{\link{validator}} object in which
simplify_fixed_variables <- function(x, eps = 1e-8, ...){
x <- check_validator(x)
fv <- detect_fixed_variables(x, eps = eps, ...)
if (length(fv)) {
substitute_values(x, .values = fv, ...)
} else {
message("No fixed values found.")
x
}
}
# rules <- x <- validator( x > 1
# , if (x > 0) A == 'a1'
# , B %in% c("b1", "b2")
# )
# detect_fixed_variables(x)
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/detect_fixed_variables.R
|
op <- function(e){
if (is.call(e)) { e[[1]]
} else { e }
}
node <- op
op_to_s <- function(e){
deparse(op(e))
}
left <- function(e){
if (length(e) >= 2) e[[2]]
}
right <- function(e){
if (length(e) >= 3) e[[3]]
}
# look ahead
la <- function(e){
op_to_s(left(e))
}
# consume tokens, typically handy for brackets
consume <- function(e, token = "("){
while(op_to_s(e) == token){
e = left(e)
}
e
}
is_lin_eq <- function(e){
is_lin_(e) && op_to_s(e) == "=="
}
invert_or_negate <- function(e){
if (is_lin_(e)){
if (is_lin_eq(e)){
# Dirty Hack but it works for now. Ideally this should be split in two statements
substitute( l < r | l > r, list(l = left(e), r = right(e)))
} else {
invert_(e)
}
} else {
negate_(e)
}
}
# convert an expression to its disjunctive normal form
as_dnf <- function(expr, ...){
# assumes that the expression has been tested with is.conditional
clauses <- list()
# remove "("
expr <- consume(expr)
op_if <- op_to_s(expr)
cond <- NULL
cons <- expr
if (op_if == "if") {
cond <- left(expr)
cons <- right(expr)
} else if (op_if %in% c("|", "||")){
if (la(expr) == "!"){ # this is a rewritten if statement
cons <- right(expr)
cond <- left(left(expr))
}
} else if(op_if == "!"){
cond <- left(expr)
cons <- NULL
} else if (is_cat_(expr) || is_lin_(expr)){
return(structure(list(expr), class="dnf"))
} else {
stop("Invalid expression")
}
# build condition clauses
if (!is.null(cond)){
cond <- consume(cond)
op_and <- op_to_s(cond)
while(op_and %in% c("&", "&&")){
clauses[[length(clauses) + 1]] <- invert_or_negate(consume(right(cond)))
cond <- consume(left(cond))
op_and <- op_to_s(cond)
}
clauses[[length(clauses) + 1]] <- invert_or_negate(cond)
clauses <- rev(clauses)
}
# build consequent clauses
if (!is.null(cons)){
cons <- consume(cons)
op_or <- op_to_s(cons)
while(op_or %in% c("|", "||")){
clauses[[length(clauses) + 1]] <- consume(left(cons))
cons <- consume(right(cons))
op_or <- op_to_s(cons)
}
clauses[[length(clauses) + 1]] <- cons
}
# the nasty case of negating equalities...
clauses <- unlist(lapply(clauses, function(clause){
if (op_to_s(clause) == "|"){
as_dnf(clause)
} else if (op_to_s(clause) == "!"){
invert_or_negate(consume(left(clause)))
} else{
clause
}
}))
# unroll <- FALSE
# for (i in seq_along(clauses)){
# clause <- clauses[[i]]
# if (op_to_s(clause) == "|") { # got-ya
# clauses[[i]] <- as_dnf(clause)
# unroll <- TRUE
# }
# }
# if (unroll){
# clauses <- unlist(clauses)
# }
# forget about it
structure(clauses, class="dnf")
}
#as_clause <- as_dnf
deparse_all <- function(x, width.cutoff = 500L, ...){
text <- deparse(x, width.cutoff = width.cutoff, ...)
if (length(text) == 1){
return(text)
}
text <- sub("^\\s+", "", text)
paste0(text, collapse = "")
}
#' @export
as.character.dnf <- function(x, as_if = FALSE, ...){
x <- x[] # removes NULL entries
x_s <- sapply(x, deparse_all)
if (as_if && length(x) > 1){
x_i <- sapply(x, invert_or_negate)
x_i_s <- sapply(x_i, deparse_all)
s <- paste(utils::head(x_i_s, -1), collapse = " & ")
paste0("if (",s,") ", utils::tail(x_s, 1))
} else {
paste(x_s, collapse = ' | ')
}
}
#' @export
print.dnf <- function(x, as_if = FALSE, ...){
cat(as.character(x, as_if = as_if, ...))
}
#' @export
as.expression.dnf <- function(x, as_if = FALSE, ...){
parse(text=as.character(x, as_if = as_if, ...))
}
#' @export
`[.dnf` <- function(x, ...){
xs <- unclass(x)[...]
class(xs) <- class(x)
xs
}
dnf_to_mip_rule <- function(d, name = "", ...){
islin <- sapply(d, is_lin_)
d_l <- d[islin]
if (any(islin)){
if (length(d) == 1){ # pure numerical
return(list(lin_mip_rule_(d[[1]], name = name)))
}
names(d_l) <- paste0(name, "._lin", seq_along(d_l))
# replace linear parts with a negated symbol.
d[islin] <- sapply(names(d_l), function(n){
substitute(!V, list(V=as.name(n)))
})
# turn into mip_rules
d_l <- lapply(names(d_l), function(name){
e <- d_l[[name]]
mr <- lin_mip_rule_(e = e, name = name)
})
# replace "==" with two statements
is_eq <- sapply(d_l, function(mr) mr$op == "==")
d_l[is_eq] <- lapply(d_l[is_eq], function(mr){
mr$op = "<="
mr
})
d_l2 <- lapply(d_l[is_eq], function(mr){
mr$op = ">="
mr
})
##
# turn all linear subclauses into soft constraints.
d_l <- lapply(c(d_l, d_l2), function(mr){
mr <- rewrite_mip_rule(mr)
mr <- soft_lin_rule(mr, prefix = "")
mr
})
##
}
c( list(cat_mip_rule_(as.expression(d)[[1]], name = name))
, d_l # for pure categorical this is list()
)
}
# translates the validator rules into mip rules
to_miprules <- function(x, ...){
check_validator(x, check_infeasible = FALSE)
can_translate <- is_linear(x) | is_categorical(x) | is_conditional(x) | is_local_variable(x)
if (!all(can_translate)){
warning("Ignoring rules: ", paste(names(x)[!can_translate], collapse = ", "))
}
x <- x[can_translate]
exprs <- to_exprs(x)
mr <- lapply(names(exprs), function(name){
e <- exprs[[name]]
d <- as_dnf(e)
lapply( dnf_to_mip_rule(d, name = name)
, rewrite_mip_rule
)
})
unlist(mr, recursive = F)
}
to_lp <- function(x, objective = NULL, eps = 0.001){
check_validator(x, check_infeasible = FALSE)
rules <- to_miprules(x)
translate_mip_lp(rules = rules, objective = objective, eps = eps)
}
# as_dnf(quote(!(gender == "male") | x > 6))
# as_dnf(quote(if (y == 1) x > 6))
# as_dnf(quote( !(gender %in% "male" & y > 3) | x > 6))
# e <- quote( x == 1)
# invert_or_negate(e)
#' @export
print.mip_rule <- function(x, ...){
s <- paste(x$a, "*", names(x$a), collapse = " + ")
cat(paste0("[", x$rule, "]: ", s, " ", x$op, " ",x$b))
}
# e <- quote(if (A %in% "a" && x >= 0) y == 0)
# d <- as_dnf(e)
# dnf_to_mip_rule(d, name="rule1")
#
# e <- quote(if (A == "a") B == "b")
# d <- as_dnf(e)
# dnf_to_mip_rule(d, name="rule2")
#
# e <- quote(x + 2*y > 1 - x)
# d <- as_dnf(e)
# dnf_to_mip_rule(d, name="rule3")
# rules <- validator( rule1 = x + 2*y > 1, rule2 = A %in% c("a1", "a2"), rule3 = if(A == "a1") x > 2)
# rules
# lp <- to_lp(rules)
# solve(lp)
# lp
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/dnf.R
|
negate_ <- function(e, ...){
# don't do double negation: that complicates analysis of expressions
op <- node(e)
if (op == '!'){
return(consume(e[[2]]))
}
expr <- if (is.call(e) && op != '('){
if (op == "!="){
substitute( l == r, list(l = left(e), r = right(e)))
} else if (op == "=="){
if (is.logical(right(e))){
substitute( l == r, list(l = left(e), r = !right(e)))
} else {
substitute( l != r, list(l = left(e), r = right(e)))
}
}
else {
substitute( !(e), list(e=e) )
}
} else {
substitute( !e, list(e=e))
}
expr
}
invert_ <- function(e, ...){
op <- op_to_s(e)
s <- switch (op,
"<" = ">=",
">" = "<=",
"<=" = ">",
">=" = "<",
# "==" = "!=",
# "!=" = "==",
stop(op, " not supported")
)
substitute(a %op% b, list(a=left(e), b=right(e), "%op%"=as.symbol(s)))
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/expr_manip.R
|
#' Check the feasibility of a rule set
#'
#' An infeasible rule set cannot be satisfied by any data because of internal
#' contradictions. This function checks whether the record-wise linear,
#' categorical and conditional rules in a rule set are consistent.
#'
#' @example ./examples/feasible.R
#' @param x \code{validator} object with validation rules.
#' @param ... not used
#' @family feasibility
#' @return TRUE or FALSE
#' @export
is_infeasible <- function(x, ...){
lp <- to_lp(x) # TODO find out how to treat eps for linear inequalities...
lpSolveAPI::lp.control(lp, presolve="rows", break.at.first = TRUE)
res <- solve(lp)
# any of the following means that there is a solution found by lpSolveAPI:
# TODO generate errors if the lpSolveAPI gives other return values...
!(res %in% c(0,1,4,12))
}
is_feasible <- function(x, ...){
!is_infeasible(x, ...)
}
#' Make an infeasible system feasible.
#'
#' Make an infeasible system feasible, by removing the minimum (weighted) number of rules, such that the remaining
#' rules are not conflicting.
#' This function uses \code{\link{detect_infeasible_rules}} for determining the rules to be removed.
#' @export
#' @param x \code{\link{validator}} object with the validation rules.
#' @param ... passed to \code{\link{detect_infeasible_rules}}
#' @family feasibility
#' @example ./examples/feasible.R
#' @return \code{\link{validator}} object with feasible rules.
make_feasible <- function(x, ...){
dropping <- detect_infeasible_rules(x, ...)
if (length(dropping) == 0){
message("No infeasibility found, returning original rule set")
return(x)
}
message("Dropping rule(s): ", paste0('"', dropping, '"', collapse=", "))
x[-match(dropping, names(x))]
}
#' Detect which rules cause infeasibility
#'
#' Detect which rules cause infeasibility. This methods tries to remove the minimum number of rules to make the system
#' mathematically feasible. Note that this may not result in your desired system, because some rules may be more important
#' to you than others. This can be mitigated by supplying weights for the rules. Default weight is 1.
#' @export
#' @example ./examples/feasible.R
#' @param x \code{\link{validator}} object with rules
#' @param weight optional named \code{\link{numeric}} with weights. Unnamed variables in the weight are given the default
#' weight \code{1}.
#' @family feasibility
#' @param ... not used
#' @return \code{character} with the names of the rules that are causing infeasibility.
detect_infeasible_rules <- function(x, weight = numeric(), ...){
if (!is_infeasible(x)){
return(character())
}
mr <- to_miprules(x)
is_equality <- sapply(mr, function(m){
m$op == "==" && all(m$type == "double")
})
# replace each equality with two inequalities
if (any(is_equality)){
mr[is_equality] <- lapply(mr[is_equality], function(m){
m$op <- "<="
m
})
mr <- c(mr, lapply(mr[is_equality], function(m){
m$a <- -m$a
m$b <- -m$b
m
}))
}
# make all rules soft rules
objective <- numeric()
mr <- lapply(mr , function(r){
is_lin <- all(r$type == "double")
is_cat <- all(r$type == "binary")
if (is_lin){
r <- soft_lin_rule(r, prefix = ".delta_")
} else if (is_cat){
r <- soft_cat_rule(r, prefix = ".delta_")
} else {
return(r)
}
r$weight <- 1
objective[[paste0(".delta_", r$rule)]] <<- r$weight
r
})
# set the weights to the weights supplied by the user
if (!is.null(names(weight))){
names(weight) <- paste0(".delta_", names(weight))
objective[names(weight)] <- weight
}
lp <- translate_mip_lp(mr, objective = objective) #TODO figure out "eps" param
lpSolveAPI::lp.control( lp
#, verbose="full"
, presolve="none"
)
res <- solve(lp)
if (res %in% c(0,1,4,12)){
vars <- lpSolveAPI::get.variables(lp)
names(vars) <- colnames(lp)
idx <- grep("^\\.delta_", names(vars))
rules <- vars[idx]
names(rules) <- sub("^\\.delta_", "", names(rules))
dropping <- names(rules)[rules == 1]
dropping
} else {
stop("No solution found to make system feasible.", call. = FALSE)
}
}
# x <- validator( x > 1, r2 = x < 0, x > 2)
# detect_infeasible_rules(x, weight = c(r2=10))
# make_feasible(x, weight = c(r2=10))
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/feasible.R
|
#' Find which rule(s) make rule_name redundant
#'
#' Find out which rules are causing rule_name(s) to be redundant.
#' @example ./examples/redundancy.R
#' @export
#' @param x \code{\link{validator}} object with rule
#' @param rule_name \code{character} with the names of the rules to be checked
#' @param ... not used
#' @family redundancy
#' @return \code{character} with the names of the rule that cause the implication.
is_implied_by <- function(x, rule_name, ...){
check_validator(x)
idx <- match(rule_name, names(x), 0)
if (any(idx == 0L)){
nms <- paste0('"', rule_name[idx==0L],'"', collapse = ", ")
warning("Rule(s) ", nms, " not found in 'x'", call. = FALSE)
}
x_org <- x[-idx]
x_r <- x[idx] # contains the set of rules that are redundant
if (length(x_r) == 0){
return(character())
}
exprs_org <- to_exprs(x_org)
# TODO check if x_r can be transformed into mixed integer problem
exprs_r <- to_exprs(x_r)
negated_rules <- lapply(exprs_r, function(e){
dnf <- as_dnf(e)
neg_dnf <- lapply(dnf, invert_or_negate)
#neg_expr <- lapply(neg_dnf, as.expression)
neg_dnf
})
negated_rules <- unlist(negated_rules, recursive = FALSE)
names(negated_rules) <- paste0(".negated_", names(negated_rules))
test_rules <- do.call(validate::validator, c(exprs_org, negated_rules))
# set the weights for the negated rules to a large enough value
# weight <- rep(length(x), length(negated_rules))
# names(weight) <- names(negated_rules)
# detect_infeasible_rules(test_rules, weight)
is_contradicted_by(test_rules, names(negated_rules))
}
# rules <- x <- validator(r1 = x > 1, r2 = x > 2)
# rule_name <- "r1"
# is_implied_by(rules, rule_name = rule_name)
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/implied_by.R
|
# code is mainly copied from validate, but needed for linear sub expressions in
# conditional statements.
#' Check which rules are linear rules.
#' @export
#' @param x \code{\link{validator}} object containing data validation rules
#' @param ... not used
#' @return \code{logical} indicating which rules are (purely) linear.
is_linear <- function(x, ...){
stopifnot(inherits(x, "validator"))
sapply(x$rules, function(rule){
is_lin_(rule@expr)
})
}
# HACK
lin_as_mip_rules <- function(x, ...){
lin_rules <- x[is_linear(x)]
lapply(lin_rules$rules, function(rule){
rewrite_mip_rule(lin_mip_rule_(rule@expr, name=rule@name), eps=0)
})
}
# check if a (sub) expression is linear
is_lin_ <- function(expr, top=TRUE, ...){
op <- op_to_s(expr)
l <- left(expr)
r <- right(expr)
if (top){
if (!(op %in% c("==", ">", ">=", "<=", "<"))){ return(FALSE) }
return(is_lin_(l, FALSE) && is_lin_(r, FALSE))
}
if (is.null(expr)){
return(TRUE)
}
if (is.atomic(expr)){
return(is.numeric(expr))
}
if (is.symbol(expr)){ return(TRUE) }
if (op %in% c("+","-")){
return( is_lin_(l, FALSE) && is_lin_(r, FALSE))
}
if (op == "*"){
if (is.numeric(l) || is.numeric(left(l))){ return(is_lin_(r, FALSE)) }
if (is.numeric(r) || is.numeric(left(r))){ return(is_lin_(l, FALSE)) }
}
FALSE
}
#
# create a linear mip_rule from a linear expression.
# assumes that it is checked with is_lin_
lin_mip_rule_ <- function(e, sign=1, name, ...){
if (is.symbol(e)){
return(setNames(sign, deparse(e)))
}
if (is.numeric(e)){
return(c(.b=sign*e))
}
if (is.null(e)){ # catches unary operators +-
return(NULL)
}
op <- op_to_s(e)
l <- left(e)
r <- right(e)
if (op %in% c("==", ">", ">=", "<=", "<")){
coef <- c(lin_mip_rule_(l, sign), lin_mip_rule_(r, -sign), .b=0) # makes sure that .b exists
coef <- tapply(coef, names(coef), sum) # sum up coefficients
b <- names(coef) == ".b"
return(mip_rule(coef[!b], op = op, b = -coef[b], rule = name))
}
if (op == '-'){
if (is.null(r)){ # unary "-l"
return(lin_mip_rule_(l, -sign))
} # else binary "l-r"
return(c(lin_mip_rule_(l, sign), lin_mip_rule_(r, -sign)))
}
if (op == '+'){
if (is.null(r)){ # unary "+l"
return(lin_mip_rule_(l, sign))
} # else binary "l+r"
return(c(lin_mip_rule_(l, sign), lin_mip_rule_(r, sign)))
}
if (op == '*'){
if (is.numeric(left(l))){
l <- eval(l) # to deal with negative coefficients
}
if (is.numeric(l)){ return(lin_mip_rule_(r, sign*l)) }
if (is.numeric(left(r))){
r <- eval(r) # to deal with negative coefficients
}
if (is.numeric(r)){ return(lin_mip_rule_(l, sign*r)) }
}
stop("Invalid linear statement")
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/linear.R
|
is_local_var_ <- function(e){
return(op_to_s(e) == ":=")
}
is_local_variable <- function(rules, ...){
if (is.expression(rules)){
return(sapply(rules, is_local_var_))
}
stopifnot(inherits(rules, "validator"))
sapply(rules$rules, function(rule){
is_local_var_(rule@expr)
})
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/local_variable.R
|
# provides an interface to mip solvers.
# currently only lpSolveAPI, should be workable for glpt
#' translate linear rules into an lp problem
#' @importFrom lpSolveAPI dimnames<-.lpExtPtr
#' @param rules mip rules
#' @param objective function
#' @param eps accuracy for equality/inequality
translate_mip_lp <- function( rules
, objective=NULL
, eps = 1e-3
){
#browser()
lc <- get_mr_matrix(rules)
type <- get_mr_type(rules)
A <- lc$A
nvar <- ncol(A)
lps <- lpSolveAPI::make.lp( nrow = nrow(A)
, ncol = nvar
)
lpSolveAPI::lp.control( lps,
presolve = "rows",
epsint = 1e-15,
epspivot = 1e-15
)
dimnames(lps) <- dimnames(A)
for (v in 1:ncol(A)){
lpSolveAPI::set.column(lps, v, A[,v])
}
ops <- lc$operator
ops[ops=="=="] <- "="
ops[strict <- ops=="<"] <- "<="
is_binary <- type == "binary"
if (any(is_binary)){
columns <- type[is_binary]
columns <- match(names(columns), colnames(A))
lpSolveAPI::set.type(lps, columns, "binary")
}
is_double <- !is_binary
if (any(is_double)){
columns <- type[is_double]
columns <- match(names(columns), colnames(A))
lpSolveAPI::set.type(lps, columns, "real")
lpSolveAPI::set.bounds(lps, lower=rep(-Inf, length(columns)), columns=columns)
}
if (length(objective)){
obj <- objective[objective != 0]
columns <- match(names(obj), colnames(A))
if (any(is.na(columns))){
stop("Invalid objective function")
}
lpSolveAPI::set.objfn(lps, unname(obj), columns)
}
lpSolveAPI::set.constr.type(lps,types=ops)
b <- ifelse(strict, lc$b - eps, lc$b)
lpSolveAPI::set.constr.value(lps, b)
lps
}
### testing
# v <- validator( a>1, b+4 > c-z, A %in% "a")
# rules <- lin_as_mip_rules(v)
# translate_mip_lp(c(rules, cat_as_mip_rules(v)))
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/mip_lpsolve.R
|
#' Create a rule used by mip
#'
#' Create a rule used by mip
#' @param a named vector with coefficients
#' @param op operator in ("<=", "==", ">=", ">", "<")
#' @keywords internal
mip_rule <- function(a, op, b, rule, type, weight=Inf, ...){
if (missing(type)){
type <- rep("double", length(a))
names(type) <- names(a)
}
structure( list( a=a, op=op, b=unname(b)
, rule = rule
, type = type
, weight = weight
)
, class="mip_rule"
)
}
as.character.mip_rule <- function(x, ...){
a <- paste0(x$a, "*", names(x$a), collapse= ' + ')
# do some simplication
a <- gsub("\\b1\\*", "", a) # "1*" => ""
a <- gsub("\\+ -", "- ", a) # "+ -" => "- "
paste0(a, " ",x$op, " ", x$b, sep = "")
}
print.mip_rule <- function(x, ...){
a <- paste0(x$a, "*", names(x$a), collapse= ' + ')
# do some simplication
a <- gsub("\\b1\\*", "", a) # "1*" => ""
a <- gsub("\\+ -", "- ", a) # "+ -" => "- "
cat(x$rule, ": ", a, " ",x$op, " ", x$b, sep = "")
}
rewrite_mip_rule <- function(x, ...){
if (x$op == '>='){
x$a <- -x$a
x$op <- '<='
x$b <- -x$b
} else if (x$op == ">"){
x$a <- -x$a
x$op <- '<'
x$b <- -x$b
}
x
}
# get variables from a list of mip_rule objects
get_mr_vars <- function(x, ...){
unique(unlist(lapply(x, function(r) names(r$a))))
}
# get rules names from a list of mip_rule objects
get_mr_rules <- function(x, ...){
sapply(x, function(r){r$rule})
}
# get a coefficient matrix from a list of mip_rule objects
get_mr_matrix <- function(x, ...){
variable <- get_mr_vars(x, ...)
rule <- get_mr_rules(x, ...)
n_rule <- length(rule)
n_variable <- length(variable)
A <- matrix( 0
, nrow=n_rule, ncol=n_variable
, dimnames = list(rule=rule, variable=variable)
)
for (i in seq_len(n_rule)){
a <- x[[i]]$a
A[i, names(a)] <- a
}
op <- sapply(x, `[[`, 'op')
b <- unname(sapply(x, `[[`, 'b'))
list(A=A, operator=op, b=b)
}
get_mr_type <- function(x, ...){
type <- unlist(sapply(x, function(mr){
mr$type
}, simplify = FALSE))
vars <- names(type)
df <- unique(data.frame(vars=vars, type=type, stringsAsFactors = FALSE))
setNames(df$type, df$vars)
}
get_mr_expression <- function(x, ...){
expr <- parse(text=sapply(x, as.character))
names(expr) <- get_mr_rules(x, ...)
expr
}
get_mr_weights <- function(x, ...){
weight <- sapply(x, function(r){r$weight})
names(weight) <- get_mr_rules(x)
weight
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/mip_rule.R
|
#' Detect redundant rules without removing.
#'
#' Detect redundancies in a rule set.
#'
#' @note For removal of duplicate rules, simplify
#' @example ./examples/redundancy.R
#' @param x \code{\link{validator}} object with the validation rules.
#' @param ... not used.
#'
#' @family redundancy
#'
#' @export
detect_redundancy <- function(x, ...){
x <- check_validator(x)
can_be_checked <- is_linear(x) | is_categorical(x) | is_conditional(x)
vals <- to_exprs(x)
dnf_set <- lapply(vals[can_be_checked], as_dnf)
are_redundant <- sapply(seq_along(dnf_set), function(i){
is_redundant(dnf_set, i)
})
idx <- which(can_be_checked)[are_redundant]
ret <- logical(length = length(vals))
names(ret) <- names(vals)
ret[idx] <- TRUE
ret
}
#' Remove redundant rules
#'
#' Simplify a rule set by removing redundant rules
#' @export
#' @example ./examples/redundancy.R
#' @param x \code{\link{validator}} object with validation rules.
#' @param ... not used
#'
#' @family redundancy
#'
#' @return simplified \code{\link{validator}} object, in which redundant rules are removed.
remove_redundancy <- function(x, ...){
x <- check_validator(x)
can_be_checked <- is_linear(x) | is_categorical(x) | is_conditional(x)
vals <- to_exprs(x)
dnf_set <- lapply(vals[can_be_checked], as_dnf)
for (i in rev(seq_along(dnf_set))){ # remove later rules before older rules
if (is_redundant(dnf_set, i)){
dnf_set[[i]] <- list()
}
}
vals[can_be_checked] <- lapply(dnf_set, as.expression)
vals <- unlist(vals) # this removes empty expressions
do.call(validate::validator, vals)
}
# utility function for checking if rule i is redundant.
is_redundant <- function(dnf_set, i, ...){
dnf <- dnf_set[[i]]
negated_rules <- lapply(dnf, invert_or_negate)
# We allow the injection of multiple rules (a negation of a disjunction are multiple rules!)
dnf_set <- c(dnf_set[-i], negated_rules)
#names(dnf_set) <- make.unique(names(dnf_set))
exprs <- unlist(lapply(dnf_set, as.expression))
test_rules <- do.call(validate::validator, exprs)
# if (i == 2){
# for (n in ls()){
# cat(n, ': \n')
# print(get(n))
# }
# }
is_infeasible(test_rules)
}
# x <- validator( rule1 = x > 1
# , rule2 = x > 2
# )
# remove_redundancy(x)
# detect_redundancy(x)
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/redundancy.R
|
#' Simplify conditional statements
#'
#' Conditional rules may be constrained by the others rules in a validation rule set.
#' This procedure tries to simplify conditional statements.
#'
#' @references TODO non-constraining, non-relaxing
#' @example ./examples/simplify_conditional.R
#' @export
#' @param x \code{\link{validator}} object with the validation rules.
#' @param ... not used.
#' @return \code{\link{validator}} simplified rule set.
simplify_conditional <- function(x, ...){
x <- check_validator(x)
is_cond <- is_conditional(x) | is_categorical(x)
vals <- to_exprs(x)
for (i in which(is_cond)){
cond <- vals[[i]]
cond <- simplify_non_constraining(cond, vals)
vals[[i]] <- cond
cond <- simplify_non_relaxing(cond, vals)
vals[[i]] <- cond
}
# TODO set meta data correctly for the resulting rule set
do.call(validate::validator, vals)
}
simplify_non_relaxing <- function(cond_expr, vals){
clauses <- as_dnf(cond_expr)
clauses[] <- lapply(clauses, function(clause){
test_rules <- do.call(validate::validator, c(vals, clause))
if (is_infeasible(test_rules)){
return(NULL)
}
clause
})
is_null <- sapply(clauses, is.null)
as.expression(clauses[!is_null], as_if = TRUE)[[1]]
}
simplify_non_constraining <- function(cond_expr, vals){
clauses <- as_dnf(cond_expr)
for (clause in clauses){
clause_neg <- invert_or_negate(clause)
test_rules <- do.call(validate::validator, c(vals, clause_neg))
if (is_infeasible(test_rules)){
return(clause)
}
}
cond_expr
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/simplify_conditional.R
|
#' Simplify a rule set
#'
#' Simplifies a rule set set by applying different simplification methods. This is a convenience function that
#' works in common cases. The following simplification methods are executed:
#' \itemize{
#' \item \code{\link{substitute_values}}: filling in any parameters that are supplied via \code{.values} or \code{...}.
#' \item \code{\link{simplify_fixed_variables}}: find out if there are fixed values. If this is the case, they are substituted.
#' \item \code{\link{simplify_conditional}}: Simplify conditional statements, by removing clauses that are superfluous.
#' \item \code{\link{remove_redundancy}}: remove redundant rules.
#' }
#' For more control, these methods can be called separately.
#' @example ./examples/simplify_rules.R
#' @export
#' @param .x \code{\link{validator}} object with the rules to be simplified.
#' @param .values optional named list with values that will be substituted.
#' @param ... parameters that will be used to substitute values.
#' @family redundancy
simplify_rules <- function(.x, .values = list(...), ...){
.x <- substitute_values(.x, .values)
.x <- simplify_fixed_variables(.x)
.x <- simplify_conditional(.x)
.x <- remove_redundancy(.x)
.x
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/simplify_rules.R
|
#TODO rename to mip_*
# convert statements of A == '1' into A + .delta_A == '1'
soft_cat_rule <- function(x, prefix=".delta_", name = x$rule, ...){
stopifnot(inherits(x, "mip_rule"))
nm <- paste0(prefix, name, collapse = "")
delta <- setNames(1L, nm)
x$a <- c(x$a, delta)
x$type <- c(x$type, setNames("binary", nm))
x
}
soft_lin_rule <- function( x, prefix=".delta_", name = x$rule, ...
, M = 1e7
){
# assumes a mip_rule abides a*x <= b
# so a soft rules is of form a*x <= b + M*delta
stopifnot(inherits(x, "mip_rule"))
nms <- c(names(x$a), paste0(prefix, name, collapse = ""))
x$a <- c(x$a, -M)
x$type <- c(x$type, "binary")
names(x$a) <- nms
names(x$type) <- nms
x
}
suffix <- function(suffix){
function(x){
paste0(x, suffix)
}
}
eps_plus <- suffix("_eps_plus")
eps_min <- suffix("_eps_min")
#' expect values
#'
#' @param values named list of values.
#' @param weights named numeric of equal length as values.
#' @param ... not used
expect_values <- function(values, weights, ...){
if (missing(weights)){
weights <- rep(1, length(values))
names(weights) <- names(values)
}
stopifnot(
length(values) == length(weights),
all(names(values) %in% names(weights))
)
# assure that weights have same order as values
#weights <- weights[names(values)]
is_numeric <- vapply(values, is.numeric, TRUE)
lin_values <- values[is_numeric]
lin_is_na <- vapply(lin_values, is.na, TRUE)
lin_values[lin_is_na] <- -1
lin_rules1 <- lapply(names(lin_values), function(n){
a <- setNames(1, n)
b <- lin_values[[n]]
w <- weights[n]
if (is.finite(w)){
soft_lin_rule(mip_rule(a = a, op = "<=", b = b, rule = n, weight = w))
} else {
mip_rule(a = a, op = "==", b = b, rule = n, weight = Inf)
}
})
# set all NA values to 1 to create contradictory statement
lin_values[lin_is_na] <- 1
lin_rules2 <- lapply(names(lin_values), function(n){
a <- setNames(1, n)
b <- lin_values[[n]]
w <- weights[n]
if (is.finite(w)){
soft_lin_rule(mip_rule(a = -a, op = "<=", b = -b, rule = n, weight = w))
} else {
NULL
}
})
cat_values <- values[!is_numeric]
cat_rules <- lapply(names(cat_values), function(n){
value <- cat_values[[n]]
a <- setNames(1, paste0(n, INFIX_CAT_NAME, value))
b <- 1
if (is.logical(value)){
names(a) <- n
if (!value){
a <- -a
b <- 0
}
}
soft_cat_rule(mip_rule( a = a, op = "==", b = b, rule = n
, weight = weights[n], type=sapply(a, function(x) "binary")
)
)
})
c(lin_rules1, lin_rules2, cat_rules)
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/soft-rule.R
|
#' @importFrom utils packageVersion
NULL
#' substitute a value in a rule set
#'
#' Substitute values into expression, thereby simplifying the rule set.
#' Rules that evaluate to TRUE because of the substitution are removed.
#' @example ./examples/substitute_values.R
#' @param .x \code{validator} object with rules
#' @param .values (optional) named list with values for variables to substitute
#' @param .add_constraints \code{logical}, should values be added as constraints to the resulting validator object?
#' @param ... alternative way of supplying values for variables (see examples).
#' @export
substitute_values <- function (.x, .values = list(...), ..., .add_constraints = TRUE){
.x <- check_validator(.x)
if (length(.values) == 0){
return(.x)
}
vals <- lapply(to_exprs(.x), function(e){
e <- substituteDirect(e, .values)
tryCatch({
# Workaround to make upgrade possible
if (packageVersion("validate") < "0.2.2"){
r <- eval(e, envir = list(), enclos = NULL)
} else {
# we need to explicitly include this '%in%'-replacement from 'validate'
r <- eval(e, envir = list(`%vin%`=validate::`%vin%`), enclos = NULL)
}
# to deal with non-mip translatable rules
if (!is.logical(r)){
r <- e
}
r
}, error = function(x){
e
})
})
is_cond <- is_conditional(.x) | is_categorical(.x)
vals[is_cond] <- lapply(vals[is_cond], function(cond){
if (is.null(cond)){
return(TRUE)
}
clauses <- as_dnf(cond)
# try to simplify clauses
s_clauses <- lapply(clauses, function(clause){
tryCatch( r <- eval(clause, envir = list(), enclos = NULL)
, error = function(x) {
clause
})
})
class(s_clauses) <- class(clauses)
is_logi_clause <- sapply(s_clauses, is.logical)
if (any(unlist(s_clauses[is_logi_clause]))){
# one of the clause is TRUE so the whole statement is TRUE
TRUE
} else if (any(is_logi_clause)){
# remove parts that are FALSE
s_clauses <- s_clauses[!is_logi_clause]
as.expression(s_clauses, as_if = TRUE)[[1]] # turn into an expression
} else {
cond
}
})
is_logical <- sapply(vals, is.logical)
if (any(is_logical)) {
is_true <- unlist(vals[is_logical])
if (!all(is_true)) {
broken <- names(is_true)[!is_true]
warning("Invalid substition/rule set: rule(s) '", to_exprs(.x[broken]), "' evaluate to FALSE", call. = FALSE)
}
}
vals <- vals[!is_logical]
if (isTRUE(.add_constraints)){
eq_ <- lapply(names(.values), function(v){
substitute(v == value, list(v=as.symbol(v), value=.values[[v]]))
})
names(eq_) <- paste0(".const_", names(.values))
vals <- c(vals, eq_)
}
# TODO improve the metadata of the resulting validator object!
do.call(validate::validator, vals)
}
# library(validate)
# rules <- validator(rule1 = x > 1, rule2 = y > x)
# substitute_values(rules, list(x=2))
# #
# #
# rules <- validator(gender %in% c("male","female"), if (gender == "male") x > 6)
# substitute_values(rules, gender="female")
# x <- rules
# x
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/substitute_values.R
|
# makes a copy of the validation object
check_validator <- function(x, copy = TRUE, check_infeasible = TRUE){
if (!inherits(x, "validator")){
stop("This method needs a 'validator' object, but was given a '", class(x), "'.",call. = FALSE)
}
if (isTRUE(check_infeasible) && is_infeasible(x)){
stop("This rule set is infeasible. Please fix and repair the rule set with `make_feasible` before continuing.", call. = FALSE)
}
invisible(x)
}
to_exprs <- function(x, ...){
x$exprs( lin_eq_eps = 0
, lin_ineq_eps = 0
, replace_in = FALSE
, vectorize = FALSE
)
}
get_variables_num <- function(x){
var_num <- sapply(to_miprules(x), function(mr){
names(mr$type)[mr$type == "double"]
})
unique(unlist(var_num))
}
get_variables_cat <- function(x){
var_cat <- sapply(to_miprules(x), function(mr){
nms <- names(mr$type)
nms[mr$type == "binary" & grepl(":", nms)]
})
var_cat <- unique(unlist(var_cat))
if (length(var_cat) == 0){
return(
data.frame( bin_variable = character(0)
, variable = character(0)
, value = character(0)
, stringsAsFactors = FALSE
)
)
}
data.frame( bin_variable = var_cat
, variable = sub(":.*$", "", var_cat)
, value = sub("^.*:", "", var_cat)
, stringsAsFactors = FALSE
)
}
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/utils.R
|
#' Tools for validation rules
#'
#' \code{validatetools} is a utility package for managing validation rule sets
#' that are defined with \code{\link{validate}}. In production systems
#' validation rule sets tend to grow organically and accumulate redundant or
#' (partially) contradictory rules. `validatetools` helps to identify problems
#' with large rule sets and includes simplification methods for resolving
#' issues.
#'
#' @section Problem detection:
#'
#' The following methods allow for problem detection:
#'
#' \itemize{
#' \item \code{\link{is_infeasible}} checks a rule set for feasibility. An infeasible system must be corrected to be useful.
#' \item \code{\link{detect_boundary_num}} shows for each numerical variable the allowed range of values.
#' \item \code{\link{detect_boundary_cat}} shows for each categorical variable the allowed range of values.
#' \item \code{\link{detect_fixed_variables}} shows variables whose value is fixated by the rule set.
#' \item \code{\link{detect_redundancy}} shows which rules are already implied by other rules.
#' }
#'
#' @section Simplifying rule set:
#'
#' The following methods detect possible simplifications and apply them to a rule set.
#'
#' \itemize{
#' \item \code{\link{substitute_values}}: replace variables with constants.
#' \item \code{\link{simplify_fixed_variables}}: substitute the fixed variables with their values in a rule set.
#' \item \code{\link{simplify_conditional}}: remove redundant (parts of) conditional rules.
#' \item \code{\link{remove_redundancy}}: remove redundant rules.
#' }
#'
#' @references Statistical Data Cleaning with Applications in R, Mark van der Loo and Edwin de Jonge, ISBN: 978-1-118-89715-7
#'
#' @name validatetools
#' @importFrom methods substituteDirect
#' @importFrom stats setNames
#' @importFrom utils head tail
#' @import validate
#' @docType package
NULL
|
/scratch/gouwar.j/cran-all/cranData/validatetools/R/validatetools-package.R
|
#' @title ac
#' @description Calculates the Agreement coefficient (AC) from observed and
#' predicted values.
#' @inherit mae return author
#' @inheritParams mae
#' @return Agreement coefficient (AC).
#' @details Interpretation: larger is better.
#' @inherit mae return references
#'
#' @references Piikki K., Wetterlind J., Soderstrom M., Stenberg B. (2021). Perspectives
#' on validation in digital soil mapping of continuous attributes. A review.
#' Soil Use and Management. \doi{10.1111/sum.12694}
#'
#' @references Willmott, C. J. (1984). On the evaluation of model performance in
#' physical geography. In Spatial statistics and models. Springer, Dordrecht,
#' Netherlands.
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' ac(o=obs, p=pred)
#' @export
ac<-function(o, p){
a<-abs(p-mean(o))
b<-abs(o-mean(o))
1-(sse(o,p)/sum(a+b))
}
|
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/ac.R
|
#' @title adjr2
#' @description Calculates the Adjusted R2 (adjr2) from observed
#' values, predicted values and the number of model parameters.
#' @inherit mae return author
#' @inheritParams mae
#' @param k A number. The number of parameters in the model. Note that k includes
#' the intercept, so for example, k is 2 for a linear regression model.
#' @return Adjusted R2 (adjr2)
#' @details Interpretation: larger is better. Adjusted R2 (adjr2) punishes complexity of models;
#' a larger number of parameters (k) means a smaller adjr2 value.
#' @inherit mae return references
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' adjr2(o=obs, p=pred, k=2)
#'
#' @export
adjr2<-function(o, p, k){
n<-length(o)
1 - (n-1)/(n-k)*(sse(o, p)/sst(o))
}
|
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/adjr2.R
|
#' @title aic
#' @description Calculates the Akaike information criterion (AIC) from observed
#' values, predicted values, the number of observations and the number of
#' model parameters.
#' @inherit mae return author
#' @inheritParams mae
#' @param k A number. The number of parameters in the model. Note that k includes
#' the intercept, so for example, k is 2 for a linear regression model.
#' @return Akaike information criterion (AIC)
#' @details Interpretation: smaller is better. Akaike information criterion (AIC) punishes complexity of models;
#' a larger number of parameters (k) means a larger AIC value. As it is sensitive to the number of samples, AIC cannot easily be
#' compared between datasets of different sizes.
#' @inherit mae return references
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' aic(o=obs, p=pred, k=2)
#'
#' @export
aic<-function(o, p, k){
length(o)*log(mse(o,p))+2*k
}
|
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/aic.R
|
#' @title allmetrics
#' @description Calculates 31 different validation metrics from observed
#' values and predicted values. For the calculation of some metrics also the
#' number of model parameters are used.
#' @inherit mae return author
#' @inheritParams aic
#' @return A data.frame with all validation metrics for which functions are defined in
#' this package.
#' @details See respective functions.
#' @inherit mae return references
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' allmetrics(o=obs, p=pred, k=2)
#' @export
allmetrics<-function(o, p, k){
l<-list(
ac=ac(o, p),
adjr2=adjr2(o, p, k),
aic=aic(o, p, k),
e=e(o, p),
iqr=iqr(o),
lc=lc(o, p),
lccc=lccc(o, p),
mad=mad(o, p),
mae=mae(o, p),
mape=mape(o, p),
mare=mare(o, p),
mde=mde(o, p),
mdse=mdse(o, p),
me=me(o, p),
msdr=msdr(o, p),
mse=mse(o, p),
nmse=nmse(o, p),
nrmse=nrmse(o, p),
nu=nu(o, p),
precision = precision(o, p),
r=r(o, p),
r2=r2(o, p),
rmdse=rmdse(o, p),
rmse=rmse(o, p),
rpd=rpd(o, p),
rpiq=rpiq(o, p),
skew=skew(o, p),
smape=smape(o, p),
sse=sse(o, p),
sst=sst(o)
)
}
|
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/allmetrics.R
|
#' @title e
#' @description Calculates the Nash-Sutcliffe modelling efficiency (E) from observed and
#' predicted values.
#' @inherit mae return author
#' @inheritParams mae
#' @return Nash-Sutcliffe modelling efficiency (E).
#' @details Interpretation: a value of 1 means that all predicted values are
#' equal to the observed values. A value of 0 means that the predictions explain
#' as much of the variation in the observed values as the mean of the observed
#' values does. A negative value means that the predictions are less accurate
#' the mean of the observed values.
#' @references Nash, J. E., & Sutcliffe, J. V. (1970). River flow forecasting
#' through conceptual models part I. A discussion of principles. Journal of
#' hydrology, 10(3), 282-290.
#' @references Piikki K., Wetterlind J., Soderstrom M., Stenberg B. (2021). Perspectives
#' on validation in digital soil mapping of continuous attributes. A review.
#' Soil Use and Management. \doi{10.1111/sum.12694}
#'
#' @references Wilks D. S. (2011) Statistical Methods in the Atmospheric
#' Sciences, Academic Press, Oxford, UK.
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' e(o=obs, p=pred)
#'
#' @export
e<-function(o, p){
1 - (sse(o, p)/sst(o))
}
|
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/e.R
|
#' @title iqr
#' @description Calculates the Inter-quartile range (IQR) from a vector of
#' observed values.
#' @inherit mae return author
#'
#' @param o A numeric vector. Observed values.
#' @return Inter-quartile range (IQR).
#'
#' @details The inter-quartile range (IQR) is the difference between the
#' 75-percentile and the 25-percentile of the observed values.
#' @examples
#' obs<-c(1:10)
#' iqr(o=obs)
#' @export
iqr<-function(o){
q75<-stats::quantile(x=o, probs = 0.75, na.rm = T)
q25<-stats::quantile(x=o, probs = 0.25, na.rm = T)
as.numeric(q75-q25)
}
|
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/iqr.R
|
#' @title lc
#' @description Calculates the Lack of correlation (LC) from observed
#'and predicted values.
#' @inherit mae return author
#' @inheritParams mae
#' @return Lack of correlation (LC)
#' @details Interpretation: smaller is better.
#' @references Gauch H. G., Hwang J. G., & Fick G. W. 2003. Model evaluation
#' by comparison of model based predictions and measured values. Agronomy Journal,
#' 95(6), 1442-1446.
#' @references Piikki K., Wetterlind J., Soderstrom M., Stenberg B. (2021). Perspectives
#' on validation in digital soil mapping of continuous attributes. A review.
#' Soil Use and Management. \doi{10.1111/sum.12694}
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' lc(o=obs, p=pred)
#'
#' @export
lc<-function(o, p){
(1-r2(o,p))*sum((p^2)/length(p))
}
|
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/lc.R
|
#' @title lccc
#' @description Calculates Lin's concordance correlation coefficient (LCCC) from observed and
#' predicted values.
#' @inherit mae return author
#' @inheritParams mae
#' @return Lin's concordance correlation coefficient (LCCC).
#' @details Interpretation: Lin's concordance correlation coefficient (LCCC) can take values
#' between -1 and 1. LCCC-values close to 1 indicate a strong concordance between
#' predicted and observed values, while LCCC-values near -1 indicate a strong
#' discordance. LCCC-values close to 0 indicate no concordance. In a plot of
#' predicted values versus observed values, an LCCC-value of 1 means that the
#' all data points are on the 1.1-line.
#' @references Lawrence, I., & Lin, K. (1989). A concordance correlation
#' coefficient to evaluate reproducibility. Biometrics, 255-268.
#' @references Piikki K., Wetterlind J., Soderstrom M., Stenberg B. (2021). Perspectives
#' on validation in digital soil mapping of continuous attributes. A review.
#' Soil Use and Management. \doi{10.1111/sum.12694}
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' lccc(o=obs, p=pred)
#'
#' @export
lccc<-function(o, p){
r<-stats::cor(o,p)
sdo<-stats::sd(o)
sdp<-stats::sd(p)
bias<-mean(o)-mean(p)
a<-2*r*sdo*sdp
b<-(sdo^2+sdp^2+bias^2)
a/b
}
|
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/lccc.R
|
#' @title mad
#' @description Calculates the Median absolute deviation (MAD) from observed and
#' predicted values.
#' @inherit mae return author
#' @inheritParams mae
#' @return Median absolute deviation (MAD)
#'
#' @details Interpretation: smaller is better.
#'
#' @inherit mae return references
#'
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' mad(o=obs, p=pred)
#'
#' @export
mad<-function(o, p){
stats::median(abs(o-p))
}
|
/scratch/gouwar.j/cran-all/cranData/valmetrics/R/mad.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.