content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
# calculate sentiment score for each word and return compound scores for string
polarity_scores <- function(text){
#split text into vector of single words
wpe <- wordsPlusEmo(text)
sentiments <- NULL
for(i in seq_along(wpe)) {
if(neu_set == T) {valence <- 0} else {valence <- NA}
item <- tolower(wpe[i])
#check if item is in booster diction (single words)
if(item %in% names(BOOSTER_DICT)){
sentiments[i] <- valence
# sentiments <- c(sentiments, valence)
}
#check if item is in booster diction (compound words)
else if(i < length(wpe) && item == "kind" && tolower(wpe[i+1]) == "of"){
sentiments[i] <- valence
}
#check if item is in Vader dictionary
else if(item %in% vaderLexicon$V1){
sentiments[i] <-senti_valence(wpe, i, item) ### leaves NA for scoreless words
}
#check if item is in idiom diction (non-vader section)
else if(item %in% unlist(strsplit(tm::removeWords(names(IDIOMS), stopwords("en")), " "))){
sentiments[i] <- idioms_check(wpe, i, valence, non_dic = T)
}
############ ADDED THIS LINE TO COUNT NEUTRAL WORDS (WHICH DON'T EXIST IN VADER LEXICON)
else {
sentiments[i] <- valence
}
}
# modify results if "but" appears in text
but_results <- but_check(wpe, sentiments)
sentiments <- unlist(but_results[1])
names(sentiments) <- NULL
but_count <- unlist(but_results[2])
#return list of scores for each word in text
word_scores <- paste0("{",paste(sentiments,collapse = ", "), "}")
names(word_scores) <- "word_scores"
# send sentiment vector and text to function to analyze compound and individual affect (positive, negative, and neutral) scores
valence_dict <- score_val(sentiments, text)
# return results
results <- c(word_scores, valence_dict, but_count)
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/vader/R/vader_sentiment_vector.R
|
# Return a float for sentiment strength based on the input text for each word.
# Positive values are positive valence, negative value are negative valence.
##################################
# senti_valence helper functions #
##################################
#raw vader score with modification for ALL CAPS in mixed string
get_vader_score <- function(item){
# get raw vader score
vrow <- which(vaderLexicon$V1==item)
valence <- vaderLexicon[vrow,2]
return(valence)
}
##################################
#check for "no" as modifier
no_check <- function(wpe, i, item, valence){
if(item == "no" && (i < length(wpe) && tolower(wpe[i+1]) %in% vaderLexicon$V1) ||
item == "no" && (i < length(wpe)-1 && tolower(wpe[i+2]) %in% vaderLexicon$V1) ||
item == "no" && (i < length(wpe)-2 && tolower(wpe[i+3]) %in% vaderLexicon$V1 && tolower(wpe[i+2]) %in% c("or", "nor")))
{
valence <- 0
}
if((i > 1 && tolower(wpe[i-1]) == "no") ||
(i > 2 && tolower(wpe[i-2]) == "no") ||
(i > 3 && tolower(wpe[i-3]) == "no" && tolower(wpe[i-1]) == "or")) { # nor is already a negation modifier
valence <- get_vader_score(item) * N_SCALAR
}
return(valence)
}
##################################
#idiom dictionary check
dic_check <- function(idiomLength, checkMe, checkIdiom){
dicCheck <- paste0(idiomLength, collapse = '$|^')
dicCheck <- paste0('^', dicCheck, '$') #add word boundary to start and end of vector
dicCheck <- grep(dicCheck, vaderLexicon$V1)
dicCheck <- vaderLexicon[dicCheck, 1]
for(d in 1:length(dicCheck)) {names(dicCheck)[d] <- grep(dicCheck[d], idiomLength)}
if(min(names(dicCheck))==names(dicCheck[grep(checkMe, dicCheck)])) {valence <- IDIOMS[grep(checkIdiom, names(IDIOMS))]}
else {valence <- NA}
return(valence)
}
dic_check_nd <- function(idiomLength, checkIdiom) {
div <- tm::removeWords(idiomLength, stopwords("en"))
div <- length(div[div != ""])
valence <- IDIOMS[grep(checkIdiom, names(IDIOMS))] / div
return(valence)
}
#check idiom words
idioms_check <- function(wpe, i, valence, non_dic = F){
myStrSplit <- unlist(strsplit(tolower(wpe), " "))
# if text is only 1 word, then we shouldn't check idioms
if(length(myStrSplit) > 1){
if(length(grep("[[:punct:]]", myStrSplit)) == 0){
checkMe <- paste0("\\b", myStrSplit[i], "\\b")
if(length(grep(checkMe, names(IDIOMS))) > 0) {
checkIdiom <- NULL
strIndex <- i
found <- grep(checkMe, names(IDIOMS))
for(f in 1:length(found)){
idiomLength <- unlist(strsplit(names(IDIOMS)[found[f]], " "))
if(length(idiomLength) < 3) {
checkIdiom <- paste0(paste(myStrSplit[strIndex], myStrSplit[strIndex+1]),"|",paste(myStrSplit[strIndex-1], myStrSplit[strIndex]))
} else {
checkIdiom <- paste0(paste(myStrSplit[strIndex], myStrSplit[strIndex+1], myStrSplit[strIndex+2]), "|",
paste(myStrSplit[strIndex-1], myStrSplit[strIndex], myStrSplit[strIndex+1]))
if(i > 1) {checkIdiom <- paste0(checkIdiom, "|", paste(myStrSplit[strIndex-2], myStrSplit[strIndex-1], myStrSplit[strIndex]))}
}
if(length(grep(checkIdiom, names(IDIOMS))) > 0) {
if(non_dic == F) {valence <- dic_check(idiomLength, checkMe, checkIdiom)}
else if(!any(idiomLength %in% vaderLexicon$V1)) {valence <- dic_check_nd(idiomLength, checkIdiom)}
break
}
}
}
}
}
return(valence)
}
##################################
# Check whether ALL words or just SOME words in the input are ALL CAPS
# n.b. ^ = start of str, +$ = until end of str (otherwise checks if ANY letter is cap, and not whole word)
allcap_diff <- function(words){
is_upper <- 0
incl_abc <- 0 ### ADDED this b/c "I WANT MY 100" would:
for(i in 1:length(words)){ ### be mixed if comparing is_upper to length(words) BUT
if(grepl("[[:alpha:]]", words[i])) { ### be all caps if comparing is_upper to length(words with letters only)
incl_abc <- incl_abc + 1
if(grepl("^[^a-z]+$", words[i])) {is_upper <- is_upper + 1}
}
}
if(is_upper > 0 && is_upper < incl_abc) {TRUE} else {FALSE}
}
# check if sentiment laden word is in ALL CAPS (while others aren't)
# CAN'T use item because item has been lowered
all_caps <- function(wpe, i, is_cap_diff, valence){
if(is_cap_diff && grepl("^[[:upper:]]+$", wpe[i])){
if(valence > 0) {valence <- valence + C_INCR}
else{valence <- valence - C_INCR}
}
return(valence)
}
##################################
# Check if the preceding words increase, decrease, or negate/nullify the valence
# helper function modifies the direction of the scalar if valence < 0
# and modifies intensity of scalar if word is all caps in mixed case str
scalar_helper <- function(word, valence, is_cap_diff, scalar){
if (valence < 0){scalar <- (-scalar)}
# check if booster/dampener word is in ALLCAPS (while some words aren't)
if(is_cap_diff && grepl("^[^a-z]+$", word)) {
if(valence > 0) {scalar <- scalar + C_INCR}
else {scalar <- scalar - C_INCR}
}
return(scalar)
}
# function checks if scalar word is in dictionary and calls helper function
scalar_inc_dec <- function(wpe, i, start_i, is_cap_diff, valence){
scalar <- 0.0
w <- wpe[i-start_i]
w_lower <- tolower(wpe[i-start_i])
is_bigram <- "n"
#check if item is in booster diction (single words)
if(w_lower %in% names(BOOSTER_DICT)){
scalar <- BOOSTER_DICT[w_lower]
scalar <- scalar_helper(w, valence, is_cap_diff, scalar)
}
# check if item is in booster diction (compound words)
else if(start_i < length(wpe) && w_lower %in% c("kind", "sort", "just")) {
bigram <- paste(w, wpe[i-start_i+1])
if(tolower(bigram) %in% names(BOOSTER_DICT)) {
scalar <- BOOSTER_DICT[tolower(bigram)]
scalar <- scalar_helper(bigram, valence, is_cap_diff, scalar)
is_bigram <- "y"
}
}
return(c(scalar, is_bigram))
}
#get scalar
get_scalar <- function(wpe, i, start_i, is_cap_diff, valence){
booster_results <- scalar_inc_dec(wpe, i, start_i, is_cap_diff, valence)
s <- as.numeric(booster_results[1])
is_bigram <- booster_results[2]
if(is_bigram == "n"){
if(start_i==2 && s!=0) {s <- s*0.95}
if(start_i==3 && s!=0) {s <- s*0.9}
if(start_i==4 && s!=0) {s <- s*0}
} else {
if(start_i==3 && s!=0) {s <- s*0.95}
if(start_i==4 && s!=0) {s <- s*0.9}
}
return(s)
}
# Determine if input contains negation words
negated <- function(input_words) { ### incl_nt set in POLARITY_SCORES fx (set to TRUE unless changed by user)
w <- tolower(input_words)
if(any(w %in% NEGATE)) {TRUE} ### probably don't need (any) since only 1 word passed at a time
else if (isTRUE(incl_nt) && any(grepl("n't", w))) {TRUE}
else {FALSE}
# else if("least" %in% w) { ### removing this code section because
# i <- which(w == "least") ### a.) won't work since only 1 word passed at a time in code below
# if(i > 1 && w[i-1] != "at") {TRUE} ### b.) separate least check fx written
# }
}
#check negation words
negation_check <- function(wpe, start_i, i, valence){
wpe_lower <- tolower(wpe)
if(start_i==1){
if(negated(wpe_lower[i-1])) {valence <- valence * N_SCALAR}
}
if(start_i==2){
if(wpe_lower[i-2]=="never" && wpe_lower[i-1] %in% c("so", "this")) {valence <- valence*1.25}
else if(wpe_lower[i-2]=="without" && wpe_lower[i-1] =="doubt") {valence <- valence} #WITHOUT in NEGATE dict, so cancelling effect
else if(negated(wpe_lower[i-2])) {valence <- valence * N_SCALAR}
}
if(start_i==3){
if(wpe_lower[i-3]=="never" && (wpe_lower[i-2] %in% c("so", "this") || wpe_lower[i-1] %in% c("so", "this"))) {valence <- valence*1.25}
else if(wpe_lower[i-3]=="without" && (wpe_lower[i-2]=="doubt" || wpe_lower[i-1]=="doubt")) {valence <- valence}
else if(negated(wpe_lower[i-3])) {valence <- valence * N_SCALAR}
}
return(valence)
}
# dampen/amplify the scalar modifier of preceding words and emoticons (excluding the ones that immediately preceed the item)
# dampening/amplifying is based on their distance from the current item
modify <- function(wpe, i, is_cap_diff, valence){
for(start_i in c(1:4)){
# booster
if(i > start_i && (tolower(wpe[i-start_i]) == "kind" || !(tolower(wpe[i-start_i]) %in% vaderLexicon$V1))){
s <- get_scalar(wpe, i, start_i, is_cap_diff, valence)
valence <- valence + s
}
}
for(start_i in c(1:3)) {
if(i > start_i) {
# negation
valence <- negation_check(wpe, start_i, i, valence)
# idiom
# if(start_i == 3) {valence <- idioms_check(wpe, i, valence)} #this is from the original script, but not implementing idioms here
}
}
return(valence)
}
##################################
#check least modifier
least_check <- function(wpe, i, valence){
if(i > 2 && tolower(wpe[i-1]) == "least"){
if(!(tolower(wpe[i-2]) %in% c("at", "very"))) {valence <- valence * N_SCALAR}
} else if(i > 1 && tolower(wpe[i-1]) == "least") {valence <- valence * N_SCALAR}
return(valence)
}
#############################################################
# main function to calculate sentiment scores for each word #
#############################################################
senti_valence <- function(wpe, i, item){
is_cap_diff <- allcap_diff(wpe)
valence <- mean(get_vader_score(item)) ### getting mean of score because sometimes word is duped in dictionary
valence <- no_check(wpe, i, item, valence)
valence <- idioms_check(wpe, i, valence) ### moved idioms above modify (and all_caps)
valence <- all_caps(wpe, i, is_cap_diff, valence)
valence <- modify(wpe, i, is_cap_diff, valence)
valence <- least_check(wpe, i, valence)
return(valence)
}
|
/scratch/gouwar.j/cran-all/cranData/vader/R/vader_sentiment_word.R
|
#import vader lexicon
# vaderLexicon <- read.delim("vaderLexicon.txt", header = F, stringsAsFactors = F, quote = "")
#create vader constants
# empirically derived mean sentiment intensity rating increase/decrease for booster words
B_INCR = 0.293
B_DECR = -0.293
# empirically derived mean sentiment intensity rating increase for using ALLCAPs to emphasize a word
C_INCR = 0.733
N_SCALAR = -0.74
######################################################################################################################
NEGATE = c("aint",
"arent",
"cannot",
"cant",
"couldnt",
"darent",
"didnt",
"doesnt",
"ain't",
"aren't",
"can't",
"couldn't",
"daren't",
"didn't",
"doesn't",
"dont",
"hadnt",
"hasnt",
"havent",
"isnt",
"mightnt",
"mustnt",
"neither",
"don't",
"hadn't",
"hasn't",
"haven't",
"isn't",
"mightn't",
"mustn't",
"neednt",
"needn't",
"never",
"none",
"nope",
"nor",
"not",
"nothing",
"nowhere",
"oughtnt",
"shant",
"shouldnt",
"uhuh",
"wasnt",
"werent",
"oughtn't",
"shan't",
"shouldn't",
"uh-uh",
"wasn't",
"weren't",
"without",
"wont",
"wouldnt",
"won't",
"wouldn't",
"rarely",
"seldom",
"despite")
# booster/dampener 'intensifiers' or 'degree adverbs'
# http://en.wiktionary.org/wiki/Category:English_degree_adverbs
BOOSTER_POS_NAMES = c("absolutely",
"amazingly",
"awfully",
"completely",
"considerable",
"considerably",
"decidedly",
"deeply",
"effing",
"enormous",
"enormously",
"entirely",
"especially",
"exceptional",
"exceptionally",
"extreme",
"extremely",
"fabulously",
"flipping",
"flippin",
"frackin",
"fracking",
"fricking",
"frickin",
"frigging",
"friggin",
"fully",
"fuckin",
"fucking",
"fuggin",
"fugging",
"greatly",
"hella",
"highly",
"hugely",
"incredible",
"incredibly",
"intensely",
"major",
"majorly",
"more",
"most",
"particularly",
"purely",
"quite",
"really",
"remarkably",
"so",
"substantially",
"thoroughly",
"total",
"totally",
"tremendous",
"tremendously",
"uber",
"unbelievably",
"unusually",
"utter",
"utterly",
"very")
BOOSTER_POS <- rep(B_INCR, length(BOOSTER_POS_NAMES))
names(BOOSTER_POS) <- BOOSTER_POS_NAMES
BOOSTER_NEG_NAMES = c("almost",
"barely",
"hardly",
"just enough",
"kind of",
"kinda",
"kindof",
"kind-of",
"less",
"little",
"marginal",
"marginally",
"ocassional",
"occasionally",
"partly",
"scarce",
"scarcely",
"slight",
"slightly",
"somewhat",
"sort of",
"sorta",
"sortof",
"sort-of")
BOOSTER_NEG<- rep(B_DECR, length(BOOSTER_NEG_NAMES))
names(BOOSTER_NEG) <- BOOSTER_NEG_NAMES
BOOSTER_DICT = c(BOOSTER_POS, BOOSTER_NEG)
# check for special case idioms using a sentiment-laden keyword known to SAGE
IDIOMS = c("the shit" = 3,
"the bomb" = 3,
"bad ass" = 1.5, ### BOTH WORDS IN DICT
# "badass" = 1.5, ### removing because in dictionary as single word
"yeah right" = -2, ### right not in dic
"kiss of death" = -1.5, ### BOTH WORDS IN DICT
"to die for" = 3,
"cut the mustard" = 2, ### mustard not in dic
"hand to mouth" = -2, ### mouth not in dic
"upper hand" = 1, ### upper not in dic
################################################# IDIOMNS NOT IN VADER LEXICION
"back handed" = -2,
"blow smoke" = -2,
"blowing smoke" = -2,
"break a leg" = 2,
"cooking with gas" = 2,
"in the black" = 2,
"in the red" = -2,
"on the ball" = 2,
"under the weather" = -2)
|
/scratch/gouwar.j/cran-all/cranData/vader/R/vader_setup.R
|
#' Non-Domestic Vaccine Adverse Event Reporting System (VAERS) vaccine data for
#' Present
#'
#' A table containing the "remaining vaccine information (e.g., vaccine name,
#' manufacturer, lot number, route, site, and number of previous doses
#' administered), for each of the vaccines listed in Box 13 of the VAERS form.
#' There is a matching record in this file with the VAERSDATA file identified
#' by VAERS_ID."
#'
#'
#'
#' @format A data.table data frame with 77,784 rows and 8 variables:
#' \describe{
#' \item{VAERS_ID}{VAERS Identification Number}
#' \item{VAX_TYPE}{Administered Vaccine Type}
#' \item{VAX_MANU}{Vaccine Manufacturer}
#' \item{VAX_LOT}{Manufacturer's Vaccine Lot}
#' \item{VAX_DOSE}{Number of previous doses administered}
#' \item{VAX_ROUTE}{Vaccination Route}
#' \item{VAX_SITE}{Vaccination Site}
#' \item{VAX_NAME}{Vaccination Name}
#' }
#'
#'
#' @references
#' US Centers for Disease Control and Prevention (CDC) and the US Food and Drug Administration (FDA) Vaccine Adverse Event Reporting System (VAERS) \url{https://vaers.hhs.gov/index} and \url{https://vaers.hhs.gov/data/READMEJanuary2015.pdf}.
#'
#'
#'
#' @docType data
#' @name vaersNDvax
#' @usage vaersNDvax
#' @examples
#' library(data.table)
#' vaersNDvax
NULL
|
/scratch/gouwar.j/cran-all/cranData/vaersNDvax/R/data-vaersND_vax.R
|
#' vaersNDvax: Non-Domestic Vaccine Adverse Event Reporting System (VAERS)
#' Vaccine Data
#'
#' vaersNDvax provides the Non-Domestic Vaccine Adverse Event Reporting System
#' (VAERS) vaccine data for Present. "VAERS is a national vaccine safety
#' surveillance program co-sponsored by the US Centers for Disease Control
#' and Prevention (CDC) and the US Food and Drug Administration (FDA). VAERS
#' is a post-marketing safety surveillance program, collecting information
#' about adverse events (possible side effects) that occur after the
#' administration of vaccines licensed for use in the United States."
#'
#' @source VAERS \url{https://vaers.hhs.gov/index}
#'
#'
#' @docType package
#' @name vaersNDvax
NULL
|
/scratch/gouwar.j/cran-all/cranData/vaersNDvax/R/vaersNDvax.R
|
#' US Vaccine Adverse Event Reporting System (VAERS) vaccine data for Present
#'
#' A table containing the "remaining vaccine information (e.g., vaccine name,
#' manufacturer, lot number, route, site, and number of previous doses
#' administered), for each of the vaccines listed in Box 13 of the VAERS form.
#' There is a matching record in this file with the VAERSDATA file identified
#' by VAERS_ID."
#'
#'
#'
#' @format A data.table data frame with 19,016 rows and 8 variables:
#' \describe{
#' \item{VAERS_ID}{VAERS Identification Number}
#' \item{VAX_TYPE}{Administered Vaccine Type}
#' \item{VAX_MANU}{Vaccine Manufacturer}
#' \item{VAX_LOT}{Manufacturer's Vaccine Lot}
#' \item{VAX_DOSE}{Number of previous doses administered}
#' \item{VAX_ROUTE}{Vaccination Route}
#' \item{VAX_SITE}{Vaccination Site}
#' \item{VAX_NAME}{Vaccination Name}
#' }
#'
#'
#' @references
#' US Centers for Disease Control and Prevention (CDC) and the US Food and Drug Administration (FDA) Vaccine Adverse Event Reporting System (VAERS) \url{https://vaers.hhs.gov/} and \url{https://vaers.hhs.gov/docs/VAERSDataUseGuide_October2017.pdf}.
#'
#'
#'
#' @docType data
#' @name vaersvax
#' @usage vaersvax
#' @examples
#' library("data.table")
#' vaersvax
NULL
|
/scratch/gouwar.j/cran-all/cranData/vaersvax/R/data-vaers_vax.R
|
#' vaersvax: US Vaccine Adverse Event Reporting System (VAERS) Vaccine Data
#'
#' vaersvax provides the US Vaccine Adverse Event Reporting System
#' (VAERS) vaccine data for the Present. "The Vaccine Adverse Event Reporting
#' System (VAERS) is a national early warning system to detect possible safety
#' problems in U.S.-licensed vaccines. VAERS is co-managed by the Centers for
#' Disease Control and Prevention (CDC) and the U.S. Food and Drug
#' Administration (FDA)." For more information about the data, visit
#' <https://vaers.hhs.gov/>. For information about vaccination/immunization
#' hazards, visit <http://www.questionuniverse.com/rethink.html#vaccine>.
#'
#' @source VAERS \url{https://vaers.hhs.gov/}
#'
#'
#' @docType package
#' @name vaersvax
NULL
|
/scratch/gouwar.j/cran-all/cranData/vaersvax/R/vaers_vax.R
|
#' @name albums
#' @author Bruna Wundervald, \email{[email protected]}.
#' @export
#' @title An artist's music albums.
#' @description Gives information about the albums of an artist/band.
#' @param name The name of the artist/band.
#' @param message Should the function print something if the
#' required data is not found?
#' @return \code{gente} returns a data.frame with information
#' about the albums, as the id, name and year of release.
#' @details The variables returned by the function are extracted with
#' the Vagalume API.
#' @examples
#'
#' \dontrun{
#' albums("the-beatles")
#' albums("chico-buarque")
#' }
albums <- function (name, message = TRUE) {
name <- stringr::str_to_lower(name)
cont <- paste0("https://www.vagalume.com.br/", name, "/index.js") %>%
jsonlite::fromJSON()
if (!is.null(cont)) {
albums <- data.frame(cont$artist$album$item)[,-3]
names(albums)[2] <- "title"
}
else {
albums <- NULL
if (message)
print("Artist not found.")
}
return(albums)
}
|
/scratch/gouwar.j/cran-all/cranData/vagalumeR/R/albums.R
|
#' @name artistInfo
#' @author Bruna Wundervald, \email{[email protected]}.
#' @export
#' @title Artist Information
#' @description Gives some information about a given artist/band.
#' @param name The name of the artist/band.
#' @param message Should the function print something if the
#' required data is not found?
#' @return \code{artistInfo} returns a data.frame with the information.
#' @details The variables returned by the function are extracted with
#' the Vagalume API.
#' @examples
#'
#' \dontrun{
#' artistInfo("the-beatles")
#' artistInfo("chico-buarque")
#' }
#'
artistInfo <- function(name, message = TRUE) {
name <- stringr::str_to_lower(name)
cont <- paste0("https://www.vagalume.com.br/",name,"/index.js") %>%
jsonlite::fromJSON()
if(!is.null(cont)){
artist <- data.frame(id = cont$artist$id,
name = cont$artist$desc,
views = cont$artist$rank$views,
pos = cont$artist$rank$pos,
period = cont$artist$rank$period,
uniques = cont$artist$rank$uniques,
points = cont$artist$rank$points)
} else{
artist <- NULL
if(message)
print("Artist not found.")}
return(artist)
}
|
/scratch/gouwar.j/cran-all/cranData/vagalumeR/R/artistInfo.R
|
#' @name genre
#' @author Bruna Wundervald, \email{[email protected]}.
#' @export
#' @title An artist's musical genre(s)
#' @description Gives information about the genre (ou multiple genres)
#' of an artist/band.
#' @param name The name of the artist/band.
#' @param message Should the function print something if the
#' required data is not found?
#' @return \code{genre} returns a data.frame with information
#' about the genre(s).
#' @details The variables returned by the function are extracted with
#' the Vagalume API.
#' @examples
#'
#' \dontrun{
#' genre("the-beatles")
#' genre("chico-buarque")
#' }
genre <- function (name, message = TRUE) {
name <- stringr::str_to_lower(name)
cont <- paste0("https://www.vagalume.com.br/", name, "/index.js") %>%
jsonlite::fromJSON()
if (!is.null(cont)) {
genre <- data.frame(genre = cont$artist$genre$name)
}
else {
genre <- NULL
if (message)
print("Artist not found.")
}
return(genre)
}
|
/scratch/gouwar.j/cran-all/cranData/vagalumeR/R/genre.R
|
#' @name lyrics
#' @author Bruna Wundervald, \email{[email protected]}.
#' @export
#' @title Lyrics of a song.
#' @description Gives the lyrics text of a song and the translation text,
#' when the language of the song its not Portuguese.
#' @param identifier The identifier of the song.
#' @param type The type of identifier os the song ("name" or "id").
#' @param artist The name of the artist/band.
#' @param key The apikey.
#' @param message Should the function print something if the
#' required data is not found?
#' @return \code{lyrics} returns a data.frame with information
#' about the artist, the song and the texts.
#' @details The variables returned by the function are extracted with
#' the Vagalume API.
#' @examples
#'
#' \dontrun{
#' identifier <- "A-Day-In-The-Life"
#' key <- "your token"
#' artist <- "the-beatles"
#' type <- "name"
#' lyrics(identifier, type, artist, key)
#'
#' key <- "your token"
#' identifier <- "3ade68b4gdc96eda3"
#' type <- "id"
#' lyrics(identifier = identifier, type = type, key = key)
#' }
lyrics <- function(identifier, type, artist, key, message = TRUE){
artist <- stringr::str_to_lower(artist)
if(type == "id"){
req <-httr::GET(paste0("https://api.vagalume.com.br/search.php?apikey=",
key, "&musid=", identifier))
}
if(type == "name"){
req <- httr::GET(paste0("https://api.vagalume.com.br/search.php?art=",
artist,"&mus=",identifier,"&extra=relmus&apikey=", key))
}
cont <- httr::content(req, encoding = "UTF-8")
if(!is.null(cont$mus)){
mus <- cont$mus[[1]][c(1, 2, 4, 5)] %>%
purrr::transpose() %>%
purrr::map_df(data.frame) %>%
dplyr::mutate(id = identifier,
name = artist,
song.id = id,
song = name,
text = stringr::str_replace_all(
as.character(text), "[\n]" , " ")) %>%
`if`(!is.null(cont$mus[[1]]$translate[[1]]$lang),
dplyr::mutate(.,
language = cont$mus[[1]]$translate[[1]]$lang), .)
cont$mus[[1]]$translate
if(!is.null(cont$mus[[1]]$lang) && cont$mus[[1]]$lang > 1){
if(!is.null(cont$mus[[1]]$translate)){
mus <- mus %>%
dplyr::mutate(
translation =
stringr::str_replace_all(
cont$mus[[1]]$translate[[1]]$text,
"[\n]" , " "))
}
}
}
else{ mus <- NULL
if(message) print("Lyrics not found") }
Sys.sleep(2)
return(mus)
}
|
/scratch/gouwar.j/cran-all/cranData/vagalumeR/R/lyrics.R
|
#' @name relatedInfo
#' @author Bruna Wundervald, \email{[email protected]}.
#' @export
#' @title Artist's Related
#' @description Gives information about what artists/bands are
#' related to a specific artist/band.
#' @param name The name of the artist/band.
#' @param message Should the function print something if the
#' required data is not found?
#' @return \code{relatedInfo} returns a data.frame with information
#' about the related artists.
#' @details The variables returned by the function are extracted with
#' the Vagalume API.
#' @examples
#'
#' \dontrun{
#' relatedInfo("the-beatles")
#' relatedInfo("chico-buarque")
#' }
relatedInfo <- function(name, message = TRUE){
name <- stringr::str_to_lower(name)
cont <- paste0("https://www.vagalume.com.br/",name,"/index.js") %>%
jsonlite::fromJSON()
if(!is.null(cont$artist$related)){
rel <- data.frame(id = cont$artist$id,
name = cont$artist$desc,
rel.id = cont$artist$related$id,
related = cont$artist$related$name)
} else {
rel <- NULL
if(message) print("No related artists available.")
}
return(rel)
}
|
/scratch/gouwar.j/cran-all/cranData/vagalumeR/R/relatedInfo.R
|
#' @name songNames
#' @author Bruna Wundervald, \email{[email protected]}.
#' @export
#' @title Song names of an artist/band.
#' @description Gives information about the song names of an specific
#' artist/band.
#' @param name The name of the artist/band.
#' @param message Should the function print something if the
#' required data is not found?
#' @return \code{relatedInfo} returns a data.frame with information
#' about song names.
#' @details The variables returned by the function are extracted with
#' the Vagalume API.
#' @examples
#'
#' \dontrun{
#' songNames("the-beatles")
#' songNames("chico-buarque")
#' }
#'
songNames <- function(name, message = TRUE){
name <- stringr::str_to_lower(name)
cont <- paste0("https://www.vagalume.com.br/",name,"/index.js") %>%
jsonlite::fromJSON()
if(!is.null(cont)){
mus <- data.frame(id = cont$artist$id,
name = cont$artist$desc,
song.id = cont$artist$lyrics$item$id,
song = cont$artist$lyrics$item$desc)
} else{
mus <- NULL
if(message) print("No song names found.")
}
return(mus)
}
|
/scratch/gouwar.j/cran-all/cranData/vagalumeR/R/songNames.R
|
#' @name topLyrics
#' @author Bruna Wundervald, \email{[email protected]}.
#' @export
#' @title Top lyrics of an artist/band
#' @description Gives information about the top lyrics (most viewed)
#' about an specific artist/band.
#' @param name The name of the artist/band.
#' @param message Should the function print something if the
#' required data is not found?
#' @return \code{topLyrics} returns a data.frame with information
#' about the top lyrics.
#' @details The variables returned by the function are extracted with
#' the Vagalume API.
#' @examples
#'
#' \dontrun{
#' topLyrics("the-beatles")
#' topLyrics("chico-buarque")
#' }
#'
topLyrics <- function(name, message = TRUE){
name <- stringr::str_to_lower(name)
cont <- paste0("https://www.vagalume.com.br/",name,"/index.js") %>%
jsonlite::fromJSON()
if(!is.null(cont)){
top <- data.frame(id = cont$artist$id,
name = cont$artist$desc,
id.top = cont$artist$toplyrics$item$id,
song = cont$artist$toplyrics$item$desc)
} else{
top <- NULL
if(message) print("No top lyrics found.")
}
return(top)
}
|
/scratch/gouwar.j/cran-all/cranData/vagalumeR/R/topLyrics.R
|
#' Pipe operator
#'
#' See \code{\link[magrittr]{\%>\%}} for more details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @examples{
#'
#' iris %>% as.matrix()
#'}
NULL
# Get rid of NOTES
globalVariables(c(".", "name", "id", "text"))
|
/scratch/gouwar.j/cran-all/cranData/vagalumeR/R/utils.R
|
calc.VAlogL <-
function(y, Z, para.X, family, lambda, kappa, a, A, phi, S, d, offset = NULL, index.cov, eta = NULL)
{
if(is.null(offset))
offset <- numeric(length(y))
if(is.null(eta))
eta <- para.X %*% kappa + Z %*% a + offset
if(family$family == "gaussian")
{
out <- sum(dnorm(y, mean = eta, sd = sqrt(phi), log = TRUE)) - (0.5/phi) * sum((Z %*% A) * Z) + 0.5 * determinant(A)$mod + 0.5 * sum(d * log(lambda[lambda > 0]))
}
if(family$family == "binomialp")
{
ZAZ <- (Z %*% A) * Z
out <- sum(dbinom(y, 1, prob = binomial(link = "probit")$linkinv(eta + 0.5 * rowSums(ZAZ)), log = TRUE)) - (0.5/phi) * sum(ZAZ) + 0.5 * determinant(A)$mod + 0.5 * sum(d * log(lambda[lambda > 0]))
}
if(family$family == "binomial")
{
ZAZ <- rowSums((Z %*% A) * Z)
out <- sum(dbinom(y, 1, prob = binomial()$linkinv(eta + 0.5 * ZAZ), log = TRUE)) - 0.5 * sum(y * ZAZ) + 0.5 * determinant(A)$mod + 0.5 * sum(d * log(lambda[lambda > 0]))
}
if(family$family == "poisson")
{
ZAZ <- rowSums((Z %*% A) * Z)
out <- sum(dpois(y, lambda = exp(eta + 0.5 * ZAZ), log = TRUE)) - 0.5 * sum(y * ZAZ) + 0.5 * determinant(A)$mod + 0.5 * sum(d * log(lambda[lambda > 0]))
}
for(k2 in 1:length(lambda))
{
out <- out - 0.5 * lambda[k2] * sum(diag(crossprod(S[[k2]], A[index.cov == k2, index.cov == k2]))) - 0.5 * lambda[k2] * t(a[index.cov == k2]) %*% S[[k2]] %*% a[index.cov == k2]
}
return(as.numeric(out))
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/calc.VAlogL.R
|
gamsim <- function(n = 400, extra.X = NULL, beta = NULL, dist = "normal", scale = 1, offset = NULL)
{
if(is.null(offset))
offset <- numeric(n)
x0 <- runif(n, 0, 1)
x1 <- x0 * 0.7 + runif(n, 0, 0.3)
x2 <- runif(n, 0, 1)
x3 <- x2 * 0.9 + runif(n, 0, 0.1)
f0 <- function(x)
{
2 * sin(pi * x)
}
f1 <- function(x)
{
exp(2 * x)
}
f2 <- function(x)
{
0.2 * x^11 * (10 * (1 - x))^6 + 10 * (10 * x)^3 * (1 - x)^10
}
f3 <- function(x)
{
0 * x
}
# nonparametric component (centered or not)
f_noncenter <- f0(x0) + f1(x1) + f2(x2)
f_smooth <- f_noncenter - mean(f_noncenter)
if(!is.null(extra.X))
{
if(is.null(colnames(extra.X))) colnames(extra.X) <- paste("Para.X", 1:ncol(extra.X), sep = "")
f_all <- f_smooth + as.matrix(extra.X) %*% beta + offset
}
else
{
f_all <- f_smooth + offset
}
if(dist == "normal")
{
y <- rnorm(n, f_all, scale)
}
else if(dist == "poisson")
{
y <- rpois(n, exp(f_all))
}
else if(dist == "binomialp")
{
g <- binomial(link = "probit")$linkinv(f_all)
y <- rbinom(n, 1, g)
}
else if(dist == "binomial")
{
g <- binomial()$linkinv(f_all)
y <- rbinom(n, 1, g)
}
else
{
stop("dist not recognised")
}
if(!is.null(extra.X))
{
## f is linear predictor just for the smoothed part; linear.predictor includes parametric components if required
data <- data.frame(y = y, x0 = x0, x1 = x1, x2 = x2, x3 = x3, f = f_smooth, f0 = f0(x0), f1 = f1(x1), f2 = f2(x2), f3 = x3 * 0, linear.predictor = f_all, extra.X)
}
else
{
data <- data.frame(y = y, x0 = x0, x1 = x1, x2 = x2, x3 = x3, f = f_smooth, f0 = f0(x0), f1 = f1(x1), f2 = f2(x2), f3 = x3 * 0, linear.predictor = f_all)
}
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/gamsim.R
|
info.valouis <- function(y, para.X, Z, kappa, phi = 1, lambda, a, A, S, d, index.cov, mc.samps = 2000, family = gaussian(), offset = NULL, seed = NULL)
{
if(!is.null(seed))
{
old <- .Random.seed
on.exit( { .Random.seed <<- old } )
set.seed(seed)
}
if(is.null(offset))
offset <- numeric(length(y))
diminfo <- ncol(para.X) + length(lambda) + as.numeric(family$family == "gaussian")
n <- length(y)
mc_betas <- rmvnorm(mc.samps, mean = a, sigma = A)
mc_betasTSmc_betas <- matrix(0, nrow = mc.samps, ncol = length(lambda))
for(k in 1:length(lambda)) ## beta^T_j%*%S_j%*%beta_j, across the MC samples
{
mc_betasTSmc_betas[,k] <- rowSums((mc_betas[,index.cov==k] %*% S[[k]]) * mc_betas[,index.cov == k])
}
score.lambda <- matrix(0.5 * d/lambda, nrow = mc.samps, ncol = length(lambda), byrow = TRUE) - 0.5 * mc_betasTSmc_betas
rm(mc_betasTSmc_betas)
scorescoreT_out <- matrix(0, nrow = diminfo, ncol = diminfo) ## \sum\limits_{i=1}^n E{\partial \ell_{com}(y_i, beta) \partial \ell_{com}(y_i, beta)^T}
neghess_out <- matrix(0, nrow = diminfo, ncol = diminfo) ## \sum\limits_{i=1}^n E{\partial^2 \ell_{com}(y_i, beta)}
eta.para <- para.X %*% kappa
if(family$family == "gaussian")
{
for(i in 1:mc.samps)
{
res <- y - eta.para - as.vector(Z %*% mc_betas[i,]) - offset
cw_score <- c(crossprod(para.X,res)/phi, -0.5 * n/phi + 0.5/phi^2 * sum(res^2))
cw_score <- c(cw_score, score.lambda[i,])
scorescoreT_out <- scorescoreT_out + tcrossprod(cw_score)
}
scorescoreT_out <- scorescoreT_out/mc.samps
res <- y - eta.para - Z %*% a - offset
neghess_out[1:ncol(para.X),1:ncol(para.X)] <- crossprod(para.X)/phi
neghess_out[1:ncol(para.X),ncol(para.X)+1] <- crossprod(para.X, res)/phi^2
neghess_out[ncol(para.X)+1,1:ncol(para.X)] <- t(neghess_out[1:ncol(para.X),ncol(para.X)+1])
neghess_out[ncol(para.X)+1,ncol(para.X)+1] <- -0.5 * n/phi^2 + 1/phi^3 * (sum(res^2) + sum((Z %*% A) * Z))
diag(neghess_out)[(ncol(para.X)+2):diminfo] <- 0.5 * d/lambda^2
}
if(family$family == "poisson")
{
for(i in 1:mc.samps)
{
res <- y - exp(eta.para + as.vector(Z %*% mc_betas[i,]) + offset)
cw_score <- c(crossprod(para.X, res), score.lambda[i,])
scorescoreT_out <- scorescoreT_out + tcrossprod(cw_score)
}
scorescoreT_out <- scorescoreT_out/mc.samps
res <- exp(eta.para + Z %*% a + 0.5 * rowSums((Z %*% A) * Z) + offset)
neghess_out[1:ncol(para.X),1:ncol(para.X)] <- t(para.X) %*% (para.X * as.vector(res))
diag(neghess_out)[(ncol(para.X)+1):diminfo] <- 0.5 * d/lambda^2
}
if(family$family == "binomialp")
{
all.eta <- eta.para + Z %*% a + offset
seqa <- -Inf * (y == 0)
seqa[is.nan(seqa)] <- 0
seqb <- Inf * (y == 1)
seqb[is.nan(seqb)] <- 0
for(i in 1:mc.samps)
{
mc.u <- rtruncnorm(n, a = seqa, b = seqb, mean = all.eta)
res <- (mc.u - eta.para - as.vector(Z %*% mc_betas[i,]) - offset)
cw_score <- c(crossprod(para.X, res), score.lambda[i,])
scorescoreT_out <- scorescoreT_out + tcrossprod(cw_score)
}
scorescoreT_out <- scorescoreT_out/mc.samps
neghess_out[1:ncol(para.X),1:ncol(para.X)] <- crossprod(para.X)
diag(neghess_out)[(ncol(para.X)+1):diminfo] <- 0.5 * d/lambda^2
}
if(family$family == "binomial")
{
meanw <- numeric(n)
for(i in 1:mc.samps)
{
res <- binomial()$linkinv(eta.para + as.vector(Z %*% mc_betas[i,]) + offset)
cw_score <- c(crossprod(para.X, y-res), score.lambda[i,])
scorescoreT_out <- scorescoreT_out + tcrossprod(cw_score)
meanw <- meanw + binomial()$variance(res)
}
scorescoreT_out <- scorescoreT_out/mc.samps
meanw <- meanw/mc.samps
neghess_out[1:ncol(para.X),1:ncol(para.X)] <- crossprod(para.X, para.X * as.vector(meanw))
diag(neghess_out)[(ncol(para.X)+1):diminfo] <- 0.5 * d/lambda^2
}
rm(cw_score, score.lambda, res)
get.info <- neghess_out - scorescoreT_out
return(get.info)
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/info.valouis_internal.R
|
plot.vagam <- function(x, n = 100, alpha = 0.05, rug = TRUE, se = TRUE, xlim = NULL, ylim = NULL, xlab = NULL, ylab = NULL, main = NULL, select = NULL, ...)
{
if(class(x) != "vagam")
{
stop("x should be an object of class vagam. Thanks!")
}
if(is.null(x$smooth.X))
{
stop("Fitted GAM must contain the data. Thanks!")
}
if(is.null(select))
sel_plots <- 1:ncol(x$smooth.X)
if(!is.null(select))
sel_plots <- select
for(k2 in sel_plots)
{
calc_pred <- predict.vagam(object = x, new.smoothX = seq(min(x$smooth.X[,k2]), max(x$smooth.X[,k2]),
length = n), terms = k2, alpha = alpha)
plot(calc_pred$new.smoothX, calc_pred$prediction, type = "l", xlim = xlim, ylim = ylim, main = main,
xlab = ifelse(is.null(xlab), colnames(x$smooth.X)[k2], xlab),
ylab = ifelse(is.null(ylab), paste("Smooth of", colnames(x$smooth.X)[k2]), ylab), ...)
if(se)
{
lines(calc_pred$new.smoothX, calc_pred$lower, lty = 2, ...)
lines(calc_pred$new.smoothX, calc_pred$upper, lty = 2, ...)
}
if(rug)
{
rug(x$smooth.X[,k2], ...)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/plot.vagam.R
|
predict.vagam <- function(object, new.smoothX, new.paraX = NULL, terms = NULL, alpha = 0.05, type = "link", ...)
{
if(class(object) != "vagam")
{
stop("object should be an object of class vagam. Thanks!")
}
x <- object
if(alpha > 1 || alpha < 0)
{
stop("alpha should be a number between 0 and 1, for (1-alpha)% pointwise confidence intervals. Thanks!")
}
type <- match.arg(type, c("link", "response"))
if(!is.null(terms))
{
if(length(terms) != 1)
stop("terms should either be NULL, in which case the prediction is made on all terms, or should be a single number indicating which smoothing term is to be used for prediction. Thanks!")
if(!is.vector(new.smoothX))
stop("If terms is a single number indicating which smoothing term is to be used for prediction, then new.smoothX should be a vector corresponding to the covariate values for which predictions are to be calculated for that term. Thanks!")
}
if(is.null(terms))
{
if(ncol(new.smoothX) != length(x$no.knots))
stop("If terms is NULL, in which case the prediction is made on all terms, then new.smoothX should be a matrix with the name number of columns as the number of smoothing covariates fitted in x. Thanks!")
}
if(!is.null(new.paraX))
{
if(any(apply(new.paraX, 2, function(x) all(x == 1))))
stop("No intercept terms should be included in new.paraX, as this is included by default. Thanks!")
new.paraX <- cbind(1, new.paraX) ## Intercept included by default
if(is.null(x$para.stat))
stop("If new.paraX is supplied, in which case the prediction involves parametric terms, then x should include the relevant standard errors i.e., x$para.stat. Thanks!")
}
if(!is.null(terms)) ## Predictions made for a single smoothing covariates
{
# calculate total variance for a smoothing covariate
newZ <- Predict.matrix(x$basis.info[[terms]], data = data.frame(x0 = (new.smoothX)))
newZ <- newZ %*% x$basis.info[[terms]]$transform_mat
new_eta <- newZ %*% x$a[x$index.cov == terms]
sub.A <- x$A[x$index.cov == terms, x$index.cov == terms]
variance <- rowSums((newZ %*% sub.A) * newZ)
# construct pointwise prediction interval based on normality assumption
lbound <- new_eta - qnorm(alpha/2, lower.tail = FALSE) * sqrt(variance)
ubound <- new_eta + qnorm(alpha/2, lower.tail = FALSE) * sqrt(variance)
}
if(is.null(terms)) ## Predictions made across the smoothing (and parametric if supplied) covariates. Note the predictions will account for the intercept ONLY if new.paraX is supplied
{
# calculate total variance for smoothing covariates
newZ <- NULL
for(k2 in 1:length(x$no.knots))
{
tmpZ <- Predict.matrix(x$basis.info[[k2]], data = data.frame(x0 = (new.smoothX[,k2])))
newZ <- cbind(newZ, tmpZ %*% x$basis.info[[k2]]$transform.mat)
}
new_eta <- newZ %*% x$a
variance <- rowSums((newZ %*% x$A) * newZ)
if(!is.null(new.paraX))
{
new_eta <- new_eta + as.matrix(new.paraX) %*% x$kappa
variance <- variance + rowSums((new.paraX %*% solve(x$obs.info)[1:length(x$kappa),1:length(x$kappa)]) * new.paraX)
}
if(!is.null(x$offset))
new_eta <- new_eta + x$offset
# construct pointwise prediction interval based on normality assumption
lbound <- new_eta - qnorm(alpha/2, lower.tail = FALSE) * sqrt(variance)
ubound <- new_eta + qnorm(alpha/2, lower.tail = FALSE) * sqrt(variance)
}
if(type == "response")
{
new_eta <- x$family$linkinv(new_eta)
lbound <- x$family$linkinv(lbound)
ubound <- x$family$linkinv(ubound)
}
out <- data.frame(prediction = new_eta, se = sqrt(variance), lower.bound = lbound, upper.bound = ubound,
new.smoothX = new.smoothX)
if(!is.null(new.paraX))
out$new.paraX <- new.paraX
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/predict.vagam.R
|
print.vagam <- function(x, ...)
{
cat("Variational approximation for GAMs\n")
cat(paste("\nCall:", deparse(x$call, 200), "\n"))
cat(paste("\nEstimated regression coefficients for parametric component:"), x$kappa)
cat(paste("\nEstimated smoothing coefficients for nonparametric component:"), x$a)
cat(paste("\nEstimated smoothing parameters (or fixed if lambda was supplied):"), x$lambda)
cat(paste("\nNumber of interior knots used:"), x$no.knots)
cat(paste("\nMaximized value of the variational log-likelihood:"), x$logL)
cat("\n")
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/print.vagam.R
|
ridge.iwls <- function(x, y, family, penalty = NULL, initial.beta = NULL, max.steps = 1000, conv.eps = 1e-3, offset = NULL, gamma = 1, extra = FALSE)
{
y <- as.vector(y)
x <- as.matrix(x)
p <- ncol(x)
nobs <- nrow(x)
if(is.null(penalty))
penalty <- matrix(0, p, p)
if(is.null(offset))
offset <- numeric(length(y))
if(!is.null(penalty))
{
if(nrow(penalty) != p || ncol(penalty) != p)
stop("penalty could a square matrix with dimension ncol(x) by ncol(x). Thanks!")
}
converged <- FALSE
stop_at <- max.steps
beta_mat <- matrix(0, nrow = max.steps, ncol = p)
if(is.null(initial.beta))
initial.beta <- rep(0.01, p)
eta.new <- x %*% initial.beta + offset
for(i in 1:max.steps)
{
beta_mat[i,] <- initial.beta
mu.new <- family$linkinv(eta.new)
d.new <- family$mu.eta(eta.new)
v.new <- family$variance(mu.new)
weights <- c(d.new/sqrt(v.new))
x.star <- weights * x
y.tilde.star <- weights * (eta.new - offset + (y - mu.new)/d.new)
p.imat.new <- crossprod(x.star) + penalty
# print(p.imat.new)
inv.pimat.new <- chol2inv(chol(p.imat.new))
beta_new <- gamma * inv.pimat.new %*% crossprod(x.star, y.tilde.star) + (1 - gamma) * beta_mat[i,]
if((sum(abs(beta_new - initial.beta))/sum(abs(initial.beta)) <= conv.eps))
{
converged <- TRUE
stop_at <- i
if(i < max.steps)
break;
}
else
{
initial.beta <- beta_new
eta.new <- x %*% beta_new + offset
}
# print(c(beta_new))
}
out <- list(coefficients = as.vector(beta_new), family = family, converged = converged,
stop_at = stop_at, linear.predictors = eta.new)
if(extra) {
Infmat <- inv.pimat.new %*% crossprod(x.star)
tr.Infmat <- sum(diag(Infmat))
out$tr.Inf <- tr.Infmat
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/ridge.iwls_internal.R
|
simrun_binomial <- function(t, band_const, choose.n = 100,
num.holdout.points = 10, true.beta = c(-1, 0.5),
CI = 0.95)
{
set.seed(123 + t)
choose.k <- band_const * ceiling(choose.n^0.2)
all.results <- matrix(NA, nrow = 9, ncol = 5)
rownames(all.results) = c("comptime", "mse", "bias-para", "mse-para", "confint-para", "width-para", "width-smooth", "intervalscore-smooth","mse-meanresp")
colnames(all.results) = c("mgcv-default", "mgcv-psplines", "gamm4", "vagams-unstructuredA", "vagams-bdiagA")
sel.holdout.points <- sample(1:choose.n, num.holdout.points)
binomial_dat <- gamsim(n = choose.n, dist = "binomial", extra.X = data.frame(intercept = rep(1,choose.n), treatment = rep(c(0,1), each = choose.n/2)), beta = true.beta)
#############################
## 1) GAM mgcv with default
#############################
tic <- proc.time()
fit.mgcv1 <- gam(y~treatment + s(x0) + s(x1) + s(x2) + s(x3), data = binomial_dat, family = binomial(link = "logit"))
all.results[1,1] <- (proc.time() - tic)[3]
all.results[2,1] <- mean((fit.mgcv1$linear.predictors - binomial_dat$linear.predictor)^2)
all.results[3,1] <- fit.mgcv1$coefficients[2] - true.beta[2]
all.results[4,1] <- (fit.mgcv1$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.mgcv1)$p.coeff[2] - qnorm(0.5 + CI/2) *summary(fit.mgcv1)$se[2],
summary(fit.mgcv1)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.mgcv1)$se[2])
all.results[5,1] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,1] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- gam(y ~ treatment + s(x0) + s(x1) + s(x2) + s(x3), family = binomial(link = "logit"), data = new.dat)
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$coefficients[1:2], se.fit = c(get.pred$se.fit)) ## Only want smooth fit, so subtract the parametric component out (although se does not remove this!)
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points)
all.results[7,1] <- colMeans(do.holdfits)[1]
all.results[8,1] <- colMeans(do.holdfits)[3]
all.results[9,1] <- mean((binomial()$linkinv(fit.mgcv1$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
rm(holdout.fit)
###############################################
## 2) GAM mgcv with P splines and preset knots
###############################################
tic <- proc.time()
fit.mgcv2 <- gam(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = binomial_dat, family = binomial(link = "logit"))
all.results[1,2] <- (proc.time() - tic)[3]
all.results[2,2] <- mean((fit.mgcv2$linear.predictors - binomial_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,2] <- fit.mgcv2$coefficients[2] - true.beta[2]
all.results[4,2] <- (fit.mgcv2$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.mgcv2)$p.coeff[2] - qnorm(0.5 + CI/2) * summary(fit.mgcv2)$se[2],
summary(fit.mgcv2)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.mgcv2)$se[2])
all.results[5,2] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,2] <- diff(make.ci)
holdout.fit <- function(data, holdout.points) ## Not sure if you are holding out each point at a time, or you are holding out all chosen points in one go!
{
new.dat <- data[-holdout.points,]
fit1 <- gam(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), family = binomial(link = "logit"), data = new.dat)
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$coefficients[1:2], se.fit = c(get.pred$se.fit)) ## Only want smooth fit, so subtract the parametric component out (although se does not remove this!)
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points)
all.results[7,2] <- colMeans(do.holdfits)[1]
all.results[8,2] <- colMeans(do.holdfits)[3]
all.results[9,2] <- mean((binomial()$linkinv(fit.mgcv2$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
rm(holdout.fit)
###############################################
## 3) GAMM4 using mixed model parameterization
###############################################
tic <- proc.time()
fit.gamm4 <- gamm4(y ~ treatment + s(x0, bs = "ps", k = choose.k+2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = binomial_dat, family = binomial(link = "logit"))
all.results[1,3] <- (proc.time() - tic)[3]
all.results[2,3] <- mean((fit.gamm4$gam$linear.predictors - binomial_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,3] <- fit.gamm4$gam$coefficients[2] - true.beta[2]
all.results[4,3] <- (fit.gamm4$gam$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.gamm4$gam)$p.coeff[2] - qnorm(0.5 + CI/2) * summary(fit.gamm4$gam)$se[2],
summary(fit.gamm4$gam)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.gamm4$gam)$se[2])
all.results[5,3] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,3] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- gamm4(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), family = binomial(link = "logit"), data = new.dat)
get.pred <- predict.gam(fit1$gam, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$gam$coefficients[1:2], se.fit = c(get.pred$se.fit)) ## Only want smooth fit, so subtract the parametric component out (although se does not remove this!)
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- try(holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points), silent = TRUE)
if(!inherits(do.holdfits, "try-error"))
{
all.results[7,3] <- colMeans(do.holdfits)[1]
all.results[8,3] <- colMeans(do.holdfits)[3]
rm(holdout.fit)
}
all.results[9,3] <- mean((binomial()$linkinv(fit.gamm4$gam$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
#######################################################
## 4) VA with unstructed variational covariance matrix
#######################################################
tic <- proc.time()
Rprof()
# fit.va1 <- vagam(y = binomial_dat$y, smooth.X = binomial_dat[,c(2:5)], para.X = data.frame(treatment = binomial_dat$treatment), int.knots = choose.k, A.struct = "unstructured", save.data = TRUE, family = binomial(), para.se = TRUE, control = list(eps = 1e-3, maxit = 1000, trace = TRUE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
fit.va1 <- vagam(y = binomial_dat$y, smooth.X = binomial_dat[,c(2:5)], para.X = data.frame(treatment = binomial_dat$treatment), int.knots = choose.k, A.struct = "unstructured", save.data = TRUE, family = binomial(), para.se = TRUE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
Rprof(NULL)
all.results[1,4] <- (proc.time() - tic)[3]
all.results[2,4] <- mean((fit.va1$linear.predictors - binomial_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,4] <- fit.va1$kappa[2] - true.beta[2]
all.results[4,4] <- (fit.va1$kappa[2] - true.beta[2])^2
make.ci <- c(fit.va1$para.stat[1,2] - qnorm(0.5 + CI/2) * fit.va1$para.stat[2,2],
fit.va1$para.stat[1,2] + qnorm(0.5 + CI/2) * fit.va1$para.stat[2,2])
all.results[5,4] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,4] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- vagam(y = new.dat$y, smooth.X = new.dat[,2:5], para.X = data.frame(treatment = new.dat$treatment), int.knots = choose.k, A.struct = "unstructured", save.data = TRUE, para.se = FALSE, family = binomial(), control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
get.pred <- predict.vagam(fit1, new.smoothX = data[holdout.points,2:5])
make.ci <- cbind(get.pred$lower.bound, get.pred$upper.bound)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points)
all.results[7,4] <- colMeans(do.holdfits)[1]
all.results[8,4] <- colMeans(do.holdfits)[3]
all.results[9,4] <- mean((binomial()$linkinv(fit.va1$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
rm(holdout.fit)
###########################################################
## 5) VA with block diagonal variational covariance matrix
###########################################################
tic <- proc.time()
fit.va2 <- vagam(y = binomial_dat$y, smooth.X = binomial_dat[,2:5], para.X = data.frame(treatment = binomial_dat$treatment), int.knots = choose.k, A.struct = "block", save.data = TRUE, family = binomial(), para.se = TRUE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
all.results[1,5] <- (proc.time() - tic)[3]
all.results[2,5] <- mean((fit.va2$linear.predictors - binomial_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,5] <- fit.va2$kappa[2] - true.beta[2]
all.results[4,5] <- (fit.va2$kappa[2] - true.beta[2])^2
make.ci <- c(fit.va2$para.stat[1,2] - qnorm(0.5 + CI/2) * fit.va2$para.stat[2,2],
fit.va2$para.stat[1,2] + qnorm(0.5 + CI/2) * fit.va2$para.stat[2,2])
all.results[5,5] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,5] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- vagam(y = new.dat$y, smooth.X = new.dat[,2:5], para.X = data.frame(treatment = new.dat$treatment), int.knots = choose.k,
A.struct = "block", save.data = TRUE, para.se = FALSE, family = binomial(), control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
get.pred <- predict.vagam(fit1, new.smoothX = data[holdout.points,2:5])
make.ci <- cbind(get.pred$lower.bound, get.pred$upper.bound)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points)
all.results[7,5] <- colMeans(do.holdfits)[1]
all.results[8,5] <- colMeans(do.holdfits)[3]
all.results[9,5] <- mean((binomial()$linkinv(fit.va2$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
return(all.results)
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/simrun_binomial_internal.R
|
simrun_normal <- function(t, band_const, choose.n = 100, num.holdout.points = 10, true.beta = c(-1, 0.5), CI = 0.95)
{
set.seed(123 + t)
choose.k <- band_const * ceiling(choose.n^0.2) ## Don't know about this?
all.results <- matrix(NA, nrow = 9, ncol = 5)
rownames(all.results) = c("comptime", "mse", "bias-para", "mse-para", "confint-para", "width-para", "width-smooth", "intervalscore-smooth","mse-meanresp")
colnames(all.results) = c("mgcv-default", "mgcv-psplines", "gamm4", "vagams-unstructuredA", "vagams-bdiagA")
sel.holdout.points <- sample(1:choose.n, num.holdout.points)
normal_dat <- gamsim(n = choose.n, dist = "normal", extra.X = data.frame(intercept = rep(1,choose.n), treatment = rep(c(0,1), each = choose.n/2)),
beta = true.beta)
## 1) GAM mgcv with default
tic <- proc.time()
fit.mgcv1 <- gam(y ~ treatment + s(x0) + s(x1) + s(x2) + s(x3), data = normal_dat)
all.results[1,1] <- (proc.time() - tic)[3]
all.results[2,1] <- mean((fit.mgcv1$linear.predictors - normal_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,1] <- fit.mgcv1$coefficients[2] - true.beta[2]
all.results[4,1] <- (fit.mgcv1$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.mgcv1)$p.coeff[2] - qnorm(0.5 + CI/2) *summary(fit.mgcv1)$se[2],
summary(fit.mgcv1)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.mgcv1)$se[2])
all.results[5,1] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,1] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- gam(y ~ treatment + s(x0) + s(x1) + s(x2) + s(x3), data = new.dat)
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE, exclude = c("treatment"))
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$coefficients[1:2], se.fit = c(get.pred$se.fit)) ## Only want smooth fit, so subtract the parametric component out (although se does not remove this!)
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = normal_dat, holdout.points = sel.holdout.points)
all.results[7,1] <- colMeans(do.holdfits)[1]
all.results[8,1] <- colMeans(do.holdfits)[3]
all.results[9,1] <- mean((fit.mgcv1$linear.predictors - normal_dat$linear.predictor)^2)
## 2) GAM mgcv with P splines and preset knots
tic <- proc.time()
fit.mgcv2 <- gam(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = normal_dat)
all.results[1,2] <- (proc.time() - tic)[3]
all.results[2,2] <- mean((fit.mgcv2$linear.predictors - normal_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,2] <- fit.mgcv2$coefficients[2] - true.beta[2]
all.results[4,2] <- (fit.mgcv2$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.mgcv2)$p.coeff[2] - qnorm(0.5 + CI/2) * summary(fit.mgcv2)$se[2],
summary(fit.mgcv2)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.mgcv2)$se[2])
all.results[5,2] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,2] <- diff(make.ci)
holdout.fit <- function(data, holdout.points) ## Not sure if you are holding out each point at a time, or you are holding out all chosen points in one go!
{
new.dat <- data[-holdout.points,]
fit1 <- gam(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = new.dat)
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$coefficients[1:2], se.fit = c(get.pred$se.fit)) ## Only want smooth fit, so subtract the parametric component out (although se does not remove this!)
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = normal_dat, holdout.points = sel.holdout.points)
all.results[7,2] <- colMeans(do.holdfits)[1]
all.results[8,2] <- colMeans(do.holdfits)[3]
all.results[9,2] <- mean((fit.mgcv2$linear.predictors - normal_dat$linear.predictor)^2)
## 3) GAMM4 using mixed model parameterization
tic <- proc.time()
fit.gamm4 <- gamm4(y ~ treatment + s(x0, bs = "ps", k = choose.k+2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = normal_dat)
all.results[1,3] <- (proc.time() - tic)[3]
all.results[2,3] <- mean((fit.gamm4$gam$linear.predictors - normal_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,3] <- fit.gamm4$gam$coefficients[2] - true.beta[2]
all.results[4,3] <- (fit.gamm4$gam$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.gamm4$gam)$p.coeff[2] - qnorm(0.5 + CI/2) * summary(fit.gamm4$gam)$se[2],
summary(fit.gamm4$gam)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.gamm4$gam)$se[2])
all.results[5,3] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,3] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- gamm4(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = new.dat)
get.pred <- predict.gam(fit1$gam, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$gam$coefficients[1:2], se.fit = c(get.pred$se.fit)) ## Only want smooth fit, so subtract the parametric component out (although se does not remove this!)
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = normal_dat, holdout.points = sel.holdout.points)
all.results[7,3] <- colMeans(do.holdfits)[1]
all.results[8,3] <- colMeans(do.holdfits)[3]
all.results[9,3] <- mean((fit.gamm4$gam$linear.predictors - normal_dat$linear.predictor)^2)
## 4) VA with unstructured variational covariance matrix
tic <- proc.time()
fit.va1 <- vagam(y = normal_dat$y, smooth.X = normal_dat[,2:5], para.X = data.frame(treatment = normal_dat$treatment), int.knots = choose.k, A.struct = "unstructured", save.data = TRUE, para.se = TRUE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
all.results[1,4] <- (proc.time() - tic)[3]
all.results[2,4] <- mean((fit.va1$linear.predictors - normal_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,4] <- fit.va1$kappa[2] - true.beta[2]
all.results[4,4] <- (fit.va1$kappa[2] - true.beta[2])^2
make.ci <- c(fit.va1$para.stat[1,2] - qnorm(0.5 + CI/2) * fit.va1$para.stat[2,2],
fit.va1$para.stat[1,2] + qnorm(0.5 + CI/2) * fit.va1$para.stat[2,2])
all.results[5,4] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,4] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- vagam(y = new.dat$y, smooth.X = new.dat[,2:5], para.X = data.frame(treatment = new.dat$treatment), int.knots = choose.k, A.struct = "unstructured", save.data = TRUE, para.se = FALSE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
get.pred <- predict.vagam(fit1, new.smoothX = data[holdout.points,2:5])
make.ci <- cbind(get.pred$lower.bound, get.pred$upper.bound)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = normal_dat, holdout.points = sel.holdout.points)
all.results[7,4] <- colMeans(do.holdfits)[1]
all.results[8,4] <- colMeans(do.holdfits)[3]
all.results[9,4] <- mean((fit.va1$linear.predictors - normal_dat$linear.predictor)^2)
## 5) VA with block diagonal variational covariance matrix
tic <- proc.time()
fit.va2 <- vagam(y = normal_dat$y, smooth.X = normal_dat[,2:5], para.X = data.frame(treatment = normal_dat$treatment), int.knots = choose.k, A.struct = "block", save.data = TRUE, para.se = TRUE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
all.results[1,5] <- (proc.time() - tic)[3]
all.results[2,5] <- mean((fit.va2$linear.predictors - normal_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,5] <- fit.va2$kappa[2] - true.beta[2]
all.results[4,5] <- (fit.va2$kappa[2] - true.beta[2])^2
make.ci <- c(fit.va2$para.stat[1,2] - qnorm(0.5 + CI/2) * fit.va2$para.stat[2,2],
fit.va2$para.stat[1,2] + qnorm(0.5 + CI/2) * fit.va2$para.stat[2,2])
all.results[5,5] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,5] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- vagam(y = new.dat$y, smooth.X = new.dat[,2:5], para.X = data.frame(treatment = new.dat$treatment), int.knots = choose.k,
A.struct = "block", save.data = TRUE, para.se = FALSE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
get.pred <- predict.vagam(fit1, new.smoothX = data[holdout.points,2:5])
make.ci <- cbind(get.pred$lower.bound, get.pred$upper.bound)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = normal_dat, holdout.points = sel.holdout.points)
all.results[7,5] <- colMeans(do.holdfits)[1]
all.results[8,5] <- colMeans(do.holdfits)[3]
all.results[9,5] <- mean((fit.va2$linear.predictors - normal_dat$linear.predictor)^2)
return(all.results)
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/simrun_normal_internal.R
|
simrun_poisson <- function(t, band_const, choose.n = 100, num.holdout.points = 10, true.beta = c(-1, 0.5),
CI = 0.95)
{
set.seed(123 + t)
choose.k <- band_const * ceiling(choose.n^0.2)
all.results <- matrix(NA, nrow = 9, ncol = 5)
rownames(all.results) = c("comptime", "mse", "bias-para", "mse-para", "confint-para", "width-para", "width-smooth", "intervalscore-smooth","mse-meanresp")
colnames(all.results) = c("mgcv-default", "mgcv-psplines", "gamm4", "vagams-unstructuredA", "vagams-bdiagA")
sel.holdout.points <- sample(1:choose.n, num.holdout.points)
poisson_dat <- gamsim(n = choose.n, dist = "poisson", extra.X = data.frame(intercept = rep(1,choose.n), treatment = rep(c(0,1), each = choose.n/2)), beta = true.beta)
############################
## 1) GAM mgcv with default
############################
tic <- proc.time()
fit.mgcv1 <- gam(y~treatment + s(x0) + s(x1) + s(x2) + s(x3), data = poisson_dat, family = poisson())
all.results[1,1] <- (proc.time() - tic)[3]
all.results[2,1] <- mean((fit.mgcv1$linear.predictors - poisson_dat$linear.predictor)^2)
all.results[3,1] <- fit.mgcv1$coefficients[2] - true.beta[2]
all.results[4,1] <- (fit.mgcv1$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.mgcv1)$p.coeff[2] - qnorm(0.5 + CI/2) *summary(fit.mgcv1)$se[2],
summary(fit.mgcv1)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.mgcv1)$se[2])
all.results[5,1] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,1] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- gam(y ~ treatment + s(x0) + s(x1) + s(x2) + s(x3), family = poisson(), data = new.dat)
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE, type = "terms", exclude = c("treatment"))
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$coefficients[1:2], se.fit = c(get.pred$se.fit)) ## Only want smooth fit, so subtract the parametric component out (although se does not remove this!)
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = poisson_dat, holdout.points = sel.holdout.points)
all.results[7,1] <- colMeans(do.holdfits)[1]
all.results[8,1] <- colMeans(do.holdfits)[3]
all.results[9,1] <- mean((exp(fit.mgcv1$linear.predictors) - exp(poisson_dat$linear.predictor))^2)
rm(holdout.fit)
###############################################
## 2) GAM mgcv with P splines and preset knots
###############################################
tic <- proc.time()
fit.mgcv2 <- gam(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = poisson_dat, family = poisson())
all.results[1,2] <- (proc.time() - tic)[3]
all.results[2,2] <- mean((fit.mgcv2$linear.predictors - poisson_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,2] <- fit.mgcv2$coefficients[2] - true.beta[2]
all.results[4,2] <- (fit.mgcv2$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.mgcv2)$p.coeff[2] - qnorm(0.5 + CI/2) * summary(fit.mgcv2)$se[2], summary(fit.mgcv2)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.mgcv2)$se[2])
all.results[5,2] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,2] <- diff(make.ci)
holdout.fit <- function(data, holdout.points) ## Not sure if you are holding out each point at a time, or you are holding out all chosen points in one go!
{
new.dat <- data[-holdout.points,]
fit1 <- gam(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), family = poisson(), data = new.dat)
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$coefficients[1:2], se.fit = c(get.pred$se.fit)) ## Only want smooth fit, so subtract the parametric component out (although se does not remove this!)
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = poisson_dat, holdout.points = sel.holdout.points)
all.results[7,2] <- colMeans(do.holdfits)[1]
all.results[8,2] <- colMeans(do.holdfits)[3]
all.results[9,2] <- mean((exp(fit.mgcv2$linear.predictors) - exp(poisson_dat$linear.predictor))^2)
rm(holdout.fit)
###############################################
## 3) GAMM4 using mixed model parameterization
###############################################
tic <- proc.time()
fit.gamm4 <- gamm4(y ~ treatment + s(x0, bs = "ps", k = choose.k+2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = poisson_dat, family = poisson())
all.results[1,3] <- (proc.time() - tic)[3]
all.results[2,3] <- mean((fit.gamm4$gam$linear.predictors - poisson_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,3] <- fit.gamm4$gam$coefficients[2] - true.beta[2]
all.results[4,3] <- (fit.gamm4$gam$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.gamm4$gam)$p.coeff[2] - qnorm(0.5 + CI/2) * summary(fit.gamm4$gam)$se[2],
summary(fit.gamm4$gam)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.gamm4$gam)$se[2])
all.results[5,3] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,3] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- gamm4(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), family = poisson(), data = new.dat)
get.pred <- predict.gam(fit1$gam, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$gam$coefficients[1:2], se.fit = c(get.pred$se.fit)) ## Only want smooth fit, so subtract the parametric component out (although se does not remove this!)
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = poisson_dat, holdout.points = sel.holdout.points)
all.results[7,3] <- colMeans(do.holdfits)[1]
all.results[8,3] <- colMeans(do.holdfits)[3]
all.results[9,3] <- mean((exp(fit.gamm4$gam$linear.predictors) - exp(poisson_dat$linear.predictor))^2)
rm(holdout.fit)
#######################################################
## 4) VA with unstructed variational covariance matrix
#######################################################
tic <- proc.time()
fit.va1 <- vagam(y = poisson_dat$y, smooth.X = poisson_dat[,2:5], para.X = data.frame(treatment = poisson_dat$treatment), int.knots = choose.k, A.struct = "unstructured", save.data = TRUE, family = poisson(), para.se = TRUE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
all.results[1,4] <- (proc.time() - tic)[3]
all.results[2,4] <- mean((fit.va1$linear.predictors - poisson_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,4] <- fit.va1$kappa[2] - true.beta[2]
all.results[4,4] <- (fit.va1$kappa[2] - true.beta[2])^2
make.ci <- c(fit.va1$para.stat[1,2] - qnorm(0.5 + CI/2) * fit.va1$para.stat[2,2],
fit.va1$para.stat[1,2] + qnorm(0.5 + CI/2) * fit.va1$para.stat[2,2])
all.results[5,4] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,4] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- vagam(y = new.dat$y, smooth.X = new.dat[,2:5], para.X = data.frame(treatment = new.dat$treatment), int.knots = choose.k, A.struct = "unstructured", save.data = TRUE, family = poisson(), para.se = FALSE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
get.pred <- predict.vagam(fit1, new.smoothX = data[holdout.points,2:5])
make.ci <- cbind(get.pred$lower.bound, get.pred$upper.bound)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = poisson_dat, holdout.points = sel.holdout.points)
all.results[7,4] <- colMeans(do.holdfits)[1]
all.results[8,4] <- colMeans(do.holdfits)[3]
all.results[9,4] <- mean((exp(fit.va1$linear.predictors) - exp(poisson_dat$linear.predictor))^2)
rm(holdout.fit)
###########################################################
## 5) VA with block diagonal variational covariance matrix
###########################################################
tic <- proc.time()
fit.va2 <- vagam(y = poisson_dat$y, smooth.X = poisson_dat[,2:5], para.X = data.frame(treatment = poisson_dat$treatment), int.knots = choose.k, A.struct = "block", save.data = TRUE, family = poisson(), para.se = TRUE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
all.results[1,5] <- (proc.time() - tic)[3]
all.results[2,5] <- mean((fit.va2$linear.predictors - poisson_dat$linear.predictor)^2) ## Averaged across choose.n points in dataset
all.results[3,5] <- fit.va2$kappa[2] - true.beta[2]
all.results[4,5] <- (fit.va2$kappa[2] - true.beta[2])^2
make.ci <- c(fit.va2$para.stat[1,2] - qnorm(0.5 + CI/2) * fit.va2$para.stat[2,2],
fit.va2$para.stat[1,2] + qnorm(0.5 + CI/2) * fit.va2$para.stat[2,2])
all.results[5,5] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,5] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- vagam(y = new.dat$y, smooth.X = new.dat[,2:5], para.X = data.frame(treatment = new.dat$treatment), int.knots = choose.k, A.struct = "block", save.data = TRUE, family = poisson(), para.se = FALSE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
get.pred <- predict.vagam(fit1, new.smoothX = data[holdout.points,2:5])
make.ci <- cbind(get.pred$lower.bound, get.pred$upper.bound)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = poisson_dat, holdout.points = sel.holdout.points)
all.results[7,5] <- colMeans(do.holdfits)[1]
all.results[8,5] <- colMeans(do.holdfits)[3]
all.results[9,5] <- mean((exp(fit.va2$linear.predictors) - exp(poisson_dat$linear.predictor))^2)
return(all.results)
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/simrun_poisson_internal.R
|
summary.vagam <- function(object, ...)
{
print(object)
cat(paste("\nSummary statistics for nonparametric component:\n"))
print(object$smooth.stat)
cat(paste("\nSummary statistics for parametric component (if para.se = TRUE):\n"))
print(object$para.stat)
#cat(paste("\nSummary statistics for linear predictors:\n"))
#print(summary(object$linear.predictors))
cat("\n")
out <- list(call = object$call, para.coeff = object$kappa, smooth.coeff = object$a, smooth.param = object$lambda, phi = object$phi, logLik = object$logL, family = object$family, smooth.stat = object$smooth.stat, para.stat = object$para.stat)
class(out) <- "summary.vagam"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/summary.vagam.R
|
######################
## TODO
## 2) Redo calculation of information matrix to do sandwich formula
## 3) Allow more spline types, specifically more straight additive splines options and then possibly tensor products?
## 4) Port ridge.iwla to use C++
## 5) Allow subset, and na.action options
vagam <-
function(y, smooth.X, para.X = NULL, lambda = NULL, int.knots, family = gaussian(), A.struct = c("unstructured", "block"), offset = NULL, save.data = FALSE, para.se = FALSE, doIC = FALSE, control = list(eps = 1e-3, maxit = 1000, trace = TRUE, seed.number = 123, mc.samps = 4000, pois.step.size = 0.01))
{
if(!(family$family %in% c("gaussian", "poisson", "binomial"))) #"binomialp"
stop("Specified family currently no supported. Sorry!")
if(family$family == "gaussian")
family <- gaussian(link = "identity")
if(family$family == "poisson")
family <- poisson(link = "log")
if(family$family == "binomial")
family <- binomial(link = "logit")
if(family$family == "binomialp")
family <- binomial(link = "probit")
A.struct <- match.arg(A.struct, c("unstructured", "block"))
ncov_smooth_X <- ncol(smooth.X)
if(is.null(offset))
offset <- numeric(length(y))
if(!(length(int.knots) %in% c(1,ncov_smooth_X)))
stop("int.knots should either be a single number of a vector with length equal to ncol(smooth.X). Thanks!")
if(length(int.knots) == 1)
actual_int_knots <- rep(int.knots, ncov_smooth_X)
if(length(int.knots) > 1)
actual_int_knots <- int.knots
if(is.null(colnames(smooth.X)))
colnames(smooth.X) <- paste("smoothX", 1:ncov_smooth_X, sep = "")
if(!is.null(para.X))
{
para.X <- as.matrix(para.X)
if(is.null(colnames(para.X)))
colnames(para.X) <- paste("paraX", 1:ncol(para.X), sep = "")
if(any(apply(para.X, 2, function(x) all(x == 1))))
stop("No intercept terms should be included in para.X, as this is included by default. Thanks!")
para.X <- cbind(1, para.X) ## Intercept included by default
colnames(para.X)[1] <- "Intercept"
}
if(is.null(para.X))
{
para.X <- matrix(1, nrow = length(y), ncol = 1)
colnames(para.X) <- "Intercept"
}
if(is.null(lambda))
{
cw_lambda <- new_lambda <- rep(2, ncov_smooth_X)
message("Lambda updated as part of VA estimation. Yeah baby!")
}
if(!is.null(lambda))
{
if(length(lambda) != ncov_smooth_X)
stop("lambda should a vector with length equal to ncol(smooth.X). Thanks!")
cw_lambda <- new_lambda <- lambda
message("Lambda given. Thanks!")
}
## Construct B-splines basis, penalty matrix, and some other quantities for smooth.X, by extracting attributes from a mgcv gam fit
## no. of total knots = no. of interior knots[1] + 2*degree + 1, but then -1 to remove the intercept term
## no. of basis dimension = d_j = no. of interior knots[1]+ degree + 1, where degree of spline is typically set to 3 for cubic B-splines. But then we typically -1 to remove the intercept term. Then to ensure centering constraint is the satisfied, reduce by one to produce to d_j = no. of interior knots[1] + degree - 1
S <- basis_info <- vector("list", ncov_smooth_X)
d <- numeric(ncov_smooth_X) ## rank of Sj
index_cov <- NULL ## Indexes which covariate each column in Z belongs to
Z <- NULL
for(k in 1:ncov_smooth_X)
{
x0 <- smooth.X[,k]
get_basis <- smooth.construct(s(x0, bs = "ps", k = actual_int_knots[k]+3, m = c(2,1)), data = data.frame(x0), knots = NULL)
Z_k <- get_basis$X
transform_mat <- qr.Q(qr(as.matrix(colSums(Z_k))), complete = TRUE)[,-1] ## Centering constraint
Z_k <- Z_k %*% transform_mat
index_cov <- c(index_cov, rep(k, ncol(Z_k)))
colnames(Z_k) <- paste("smooth.X", k, 1:sum(index_cov == k), sep = "")
Z <- cbind(Z, Z_k)
S[[k]] <- crossprod(transform_mat, get_basis$S[[1]]) %*% transform_mat
d[k] <- get_basis$rank
get_basis$X <- NULL;
get_basis$transform_mat <- transform_mat;
basis_info[[k]] <- get_basis
rm(transform_mat, get_basis, x0)
}
cw_kappa <- new_kappa <- numeric(ncol(para.X)) ## coefficients for parametric part
names(new_kappa) <- names(para.X)
cw_a <- new_a <- numeric(ncol(Z)) ## coefficients for smooth part, also VA means
if(family$family == "poisson")
{
old <- .Random.seed
on.exit({ .Random.seed = old})
set.seed(control$seed.number)
cw_a <- new_a <- rnorm(ncol(Z)) ## Use random coefficients as it tends to work better for Poisson
}
names(new_a) <- colnames(Z)
cw_A <- new_A <- matrix(0, nrow = ncol(Z), ncol = ncol(Z)) ## VA covariance
rownames(new_A) <- colnames(new_A) <- colnames(Z)
cw_phi <- new_phi <- 1 ## variance
cw_logL <- -Inf
new_logL <- 10
diff_logL <- 10
diff_lambda <- 0
counter <- 1
if(family$family %in% c("gaussian","binomialp"))
ZZt <- crossprod(Z)
tic <- proc.time()
## VA...Let it rip!
while(diff_logL > control$eps)
{
if(counter > control$maxit)
break;
## Update kappa and a and lambda if required
## Calculation for Poisson somewhat unstable
ZA <- Z %*% cw_A
new_offset <- Z %*% cw_a + as.numeric(family$family %in% c("poisson","binomial")) * 0.5 * rowSums(ZA * Z) + offset
fit0 <- suppressWarnings(glm.fit(x = para.X, y = y, family = family, offset = new_offset, intercept = FALSE))
new_kappa <- fit0$coefficients
Q <- matrix(0, ncol(Z), ncol(Z))
for(k2 in 1:ncov_smooth_X)
Q[index_cov == k2,index_cov == k2] <- S[[k2]]*cw_lambda[k2]
new_offset <- para.X %*% new_kappa + as.numeric(family$family %in% c("poisson","binomial")) * 0.5 * rowSums(ZA * Z) + offset
if(family$family == "poisson")
{
fit0 <- ridge.iwls(x = Z, y = y, family = family, penalty = Q, initial.beta = cw_a, offset = new_offset, gamma = 0.1 + min(counter * control$pois.step.size, 0.9)) ## Gradually increase step size with counter
}
if(family$family != "poisson")
{
fit0 <- ridge.iwls(x = Z, y = y, family = family, penalty = cw_phi*Q, initial.beta = cw_a, offset = new_offset)
}
new_a <- fit0$coefficients
rm(new_offset)
## Update phi for Gaussian
new.eta <- para.X %*% new_kappa + Z %*% new_a + offset
if(family$family == "gaussian")
{
new_phi <- (sum((y - new.eta)^2) + sum(ZA * Z))/length(y)
}
## Update A
if(A.struct == "unstructured")
{
if(family$family %in% c("gaussian","binomialp"))
new_A <- chol2inv(chol(Q + (1/new_phi)*ZZt))
if(family$family == "poisson")
{
err2 <- 10;
cw_A <- new_A
while(err2 > 0.01)
{
denom <- Z * matrix(sqrt(exp(new.eta + 0.5 * rowSums(ZA * Z))), nrow = length(y), ncol = ncol(Z), byrow = FALSE)
denom <- Q + crossprod(denom)
new_A <- chol2inv(chol(denom))
err2 <- sum((cw_A - new_A)^2)/2
cw_A <- new_A
}
}
if(family$family == "binomial")
{
err2 <- 10;
cw_A <- new_A
while(err2 > 0.01)
{
denom <- Z * matrix(sqrt(binomial()$linkinv(new.eta + 0.5 * rowSums(ZA * Z))), nrow = length(y), ncol = ncol(Z), byrow = FALSE)
denom <- Q + crossprod(denom)
new_A <- chol2inv(chol(denom))
err2 <- sum((cw_A - new_A)^2)/2
cw_A <- new_A
}
}
}
if(A.struct == "block") ## This might actually be slower than doing the full thing!
{
for(k2 in 1:ncov_smooth_X)
{
if(family$family %in% c("gaussian", "binomialp"))
{
new_A[index_cov == k2, index_cov == k2] <- chol2inv(chol(Q[index_cov == k2,index_cov == k2] + (1/new_phi) * crossprod(Z[,index_cov == k2])))
}
if(family$family == "poisson")
{
err2 <- 10;
while(err2 > 0.01)
{
denom <- Z[,index_cov == k2] * matrix(sqrt(exp(new.eta + 0.5 * rowSums(ZA * Z))), nrow = length(y), ncol = sum(index_cov==k2), byrow = FALSE)
denom <- Q[index_cov == k2, index_cov == k2] + crossprod(denom)
new_A[index_cov == k2, index_cov == k2] <- chol2inv(chol(denom))
err2 <- sum((cw_A - new_A)^2)/2
cw_A <- new_A
}
}
if(family$family == "binomial")
{
err2 <- 10;
while(err2 > 0.01)
{
denom <- Z[,index_cov == k2] * matrix(sqrt(binomial()$linkinv(new.eta + 0.5 * rowSums(ZA * Z))), nrow = length(y), ncol = sum(index_cov==k2), byrow = FALSE)
denom <- Q[index_cov == k2, index_cov == k2] + crossprod(denom)
new_A[index_cov == k2, index_cov == k2] <- chol2inv(chol(denom))
err2 <- sum((cw_A - new_A)^2)/2
cw_A <- new_A
}
}
}
}
if(is.null(lambda))
{
for(k2 in 1:ncov_smooth_X)
{
new_lambda[k2] <- d[k2]/(sum(diag(crossprod(S[[k2]], new_A[index_cov == k2, index_cov == k2]))) + tcrossprod(new_a[index_cov == k2], S[[k2]]) %*% new_a[index_cov == k2])
}
}
## Calculate new VA logL
new_logL <- calc.VAlogL(y = y, Z = Z, para.X = para.X, family = family, lambda = new_lambda, kappa = new_kappa, a = new_a, A = new_A, phi = new_phi, S = S, d = d, index.cov = index_cov, eta = new.eta)
diff_logL <- new_logL - cw_logL
diff_lambda <- sum((new_lambda - cw_lambda)^2)
if(control$trace)
cat("Iteration:", counter, "\t Current VA logL:", cw_logL, " | New VA logL:", new_logL, " | Difference:", new_logL - cw_logL, "\n")
counter <- counter + 1
cw_logL <- new_logL
cw_lambda <- new_lambda
cw_kappa <- new_kappa
cw_a <- new_a
cw_A <- new_A
cw_phi <- new_phi
}
toc <- proc.time()
## Calculate stuff related to external smoothing parameter selection
ic_out <- rep(NA,2)
names(ic_out) <- c("AIC", "BIC")
if(doIC)
{
ic_out <- -2 * new_logL - new_phi + c(2 * fit0$tr.Inf * new_phi, log(length(y)) * fit0$tr.Inf * new_phi)
}
out <- list(kappa = new_kappa, a = new_a, A = new_A, lambda = new_lambda, IC = ic_out, phi = new_phi, linear.predictors = new.eta, offset = offset, logL = new_logL, no.knots = actual_int_knots, index.cov = index_cov, basis.info = basis_info, family = family, time.taken = toc-tic)
if(save.data)
{
out$y
out$para.X <- para.X
out$smooth.X <- smooth.X
out$Z <- Z
}
## Wald test for each smooth curve
smooth.X.waldstat <- smooth.X.waldpval <- numeric(ncov_smooth_X)
new_A.inv <- chol2inv(chol(new_A))
for(k2 in 1:ncov_smooth_X)
{
smooth.X.waldstat[k2] <- crossprod(new_a[index_cov == k2], new_A.inv[index_cov == k2,index_cov == k2]) %*% new_a[index_cov == k2]
smooth.X.waldpval[k2] <- pchisq(smooth.X.waldstat[k2], df = sum(index_cov==k2), lower.tail = FALSE)
}
smooth.X.wald <- round(rbind(smooth.X.waldstat,smooth.X.waldpval),5)
rownames(smooth.X.wald) <- c("Wald Statistic", "p-value")
colnames(smooth.X.wald) <- colnames(smooth.X)
rm(smooth.X.waldstat,smooth.X.waldpval)
out$smooth.stat <- smooth.X.wald
## Variatonal Observed information matrix for model params and lambas
if(para.se)
{
message("Calculating information matrix for model parameters...")
obs_info <- info.valouis(y = y, para.X = para.X, Z = Z, kappa = new_kappa, phi = new_phi, lambda = new_lambda, a = new_a, A = new_A, S = S, d = d, index.cov = index_cov, mc.samps = control$mc.samps, family = family)
## If there the information matrix is not +ve definite, then do a one-step to produce an unstructured A and recalculate the info matrix; hopefully this helps!
if(any(diag(obs_info) < 0) & family$family == "poisson")
{
message("Redoing...")
err2 <- 10;
cw_A <- new_A
#new.eta <- para.X %*% new_kappa + Z %*% new_a
while(err2 > 0.01)
{
denom <- Z * matrix(sqrt(exp(new.eta + 0.5 * rowSums((Z %*% cw_A) * Z))), nrow = length(y), ncol = ncol(Z), byrow = FALSE)
denom <- Q + crossprod(denom)
new_A <- chol2inv(chol(denom))
err2 <- sum((cw_A - new_A)^2)/2
cw_A <- new_A
}
obs_info <- info.valouis(y = y, para.X = para.X, Z = Z, kappa = new_kappa, phi = new_phi, lambda = new_lambda, a = new_a, A = cw_A, S = S, d = d, index.cov = index_cov, mc.samps = control$mc.samps, family = family)
}
obs_info <- info.valouis(y = y, para.X = para.X, Z = Z, kappa = new_kappa, phi = new_phi, lambda = new_lambda, a = new_a, A = new_A, S = S, d = d, index.cov = index_cov, mc.samps = control$mc.samps, family = family)
para_sderr <- sqrt(diag(solve(obs_info)))[1:ncol(para.X)]
para_X_stat <- round(rbind(new_kappa, para_sderr, new_kappa/para_sderr, 2 * pnorm(abs(new_kappa/para_sderr), lower.tail = FALSE)), 5)
rownames(para_X_stat) <- c("Estimate", "Std. Error", "Wald Statistic", "p-value")
colnames(para_X_stat) <- colnames(para.X)
out$para.stat <- para_X_stat
out$obs.info <- obs_info
}
if(!para.se)
{
out$para.stat <- NULL
}
class(out) <- "vagam"
out$call <- match.call()
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/vagam/R/vagam.R
|
express_lambda <- function(body) {
call("function", as.pairlist(alist(. = )), body)
}
deparse_call <- function(chk_item, fn_expr) {
call <- substitute(f(x), list(f = fn_expr, x = lazyeval::f_rhs(chk_item)))
deparse_collapse(call)
}
# Rewire the argument signature of a function
with_sig <- function(.f, .sig, .attrs) {
formals(.f) <- .sig
attributes(.f) <- .attrs
.f
}
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/call.R
|
localize_nm <- function(nm, what_is, ns, env) {
msg <- paste("Not", what_is(nm))
p <- getExportedValue(ns, nm)
localize(lazyeval::f_new(p, msg, env = env))
}
replace <- function(x, pattern, with, ...) {
gsub(pattern, with, x, ...)
}
delete <- function(x, pattern, ...) {
replace(x, pattern, "", ...)
}
scrub <- function(pattern_rm, pattern_sep) {
function(x) {
replace(delete(x, pattern_rm), pattern_sep, with = " ")
}
}
make_vld_chkrs <- function(nms, pattern, sep, ns, env = parent.frame()) {
what_is <- scrub(pattern, sep)
chkrs <- lapply(nms, localize_nm, ns = ns, what_is = what_is, env = env)
names(chkrs) <- nms %>%
delete(pattern) %>%
replace("\\.", with = "_") %>%
paste("vld", ., sep = "_")
chkrs[sort(names(chkrs))]
}
chkrs <- make_vld_chkrs(
nms = c(
"is.array", "is.call", "is.complex", "is.data.frame",
"is.environment", "is.expression", "is.factor", "is.language",
"is.matrix", "is.name", "is.null", "is.numeric",
"is.ordered", "is.pairlist", "is.primitive", "is.raw",
"is.recursive", "is.symbol", "is.table", "is.unsorted",
"is.function", "is.atomic", "is.list", "is.vector",
"is.double", "is.character", "is.integer", "is.logical"
),
pattern = "^is\\.",
sep = "\\.",
ns = "base"
)
chkrs$vld_true <- localize(lazyeval::f_new(is_true, "Not TRUE"))
chkrs$vld_false <- localize(lazyeval::f_new(is_false, "Not FALSE"))
chkrs$vld_all <- localize(lazyeval::f_new(all, "Not all TRUE"))
chkrs$vld_any <- localize(lazyeval::f_new(any, "None TRUE"))
chkrs$vld_empty <- localize(lazyeval::f_new(
quote({length(.) == 0L}), "Not empty"))
chkrs$vld_singleton <- localize(lazyeval::f_new(
quote({length(.) == 1L}), "Not singleton"))
chkrs$vld_closure <- localize(lazyeval::f_new(
quote({typeof(.) == "closure"}), "Not closure"))
chkrs$vld_formula <- localize(lazyeval::f_new(
quote({typeof(.) == "language" && inherits(., "formula")}), "Not formula"))
chkrs$vld_na <- localize(lazyeval::f_new(quote({isTRUE(is.na(.))}), "Not NA"))
chkrs$vld_nan <- localize(lazyeval::f_new(quote({isTRUE(is.nan(.))}), "Not NaN"))
chkrs$vld_scalar_numeric <- localize(lazyeval::f_new(
quote({is.numeric(.) && length(.) == 1L}), "Not scalar numeric"))
chkrs$vld_scalar_logical <- localize(lazyeval::f_new(
quote({is.logical(.) && length(.) == 1L}), "Not scalar logical"))
chkrs$vld_scalar_integer <- localize(lazyeval::f_new(
quote({is.integer(.) && length(.) == 1L}), "Not scalar integer"))
chkrs$vld_scalar_vector <- localize(lazyeval::f_new(
quote({is.vector(.) && length(.) == 1L}), "Not scalar vector"))
chkrs$vld_scalar_atomic <- localize(lazyeval::f_new(
quote({is.atomic(.) && length(.) == 1L}), "Not scalar atomic"))
chkrs$vld_scalar_list <- localize(lazyeval::f_new(
quote({is.list(.) && length(.) == 1L}), "Not scalar list"))
chkrs$vld_scalar_double <- localize(lazyeval::f_new(
quote({is.double(.) && length(.) == 1L}), "Not scalar double"))
chkrs$vld_scalar_complex <- localize(lazyeval::f_new(
quote({is.complex(.) && length(.) == 1L}), "Not scalar complex"))
chkrs$vld_scalar_character <- localize(lazyeval::f_new(
quote({is.character(.) && length(.) == 1L}), "Not scalar character"))
chkrs$vld_scalar_raw <- localize(lazyeval::f_new(
quote({is.raw(.) && length(.) == 1L}), "Not scalar raw"))
# Aliases
replace_msg <- function(chkr, msg) {
f <- globalize(chkr)
lazyeval::f_lhs(f) <- msg
localize(f)
}
chkrs_alias <- list(
"Not boolean" = chkrs$vld_scalar_logical,
"Not number" = chkrs$vld_scalar_numeric,
"Not string" = chkrs$vld_scalar_character
) %>%
Map(replace_msg, ., names(.)) %>%
setNames(replace(names(.), "^Not ", "vld_"))
chkrs <- c(chkrs, chkrs_alias)
for (nm in names(chkrs))
assign(nm, chkrs[[nm]])
#' @rawNamespace exportPattern("^vld_.*$")
NULL
# Documentation -----------------------------------------------------------
# Aliases, "Usage"
nms <- list()
nms$type <- paste0("vld_", c(
"logical", "integer", "double", "complex", "character", "raw"
))
nms$scalar <- c(
"vld_singleton", "vld_boolean", "vld_number", "vld_string",
paste0("vld_scalar_", c(
"logical", "integer", "double", "complex", "character", "raw",
"numeric", "atomic", "vector", "list"
))
)
nms$misc <- setdiff(names(chkrs), c(nms$type, nms$scalar))
nms <- lapply(nms, sort)
# Ensure that all checkers are accounted for
stopifnot(setequal(names(chkrs), unlist(nms, use.names = FALSE)))
# "See Also" (required because @family would overwrite our custom "See Also")
trim <- local({
gather <- function(x) {
gsub("\n([^\n])", " \\1", x)
}
tidy <- function(x) {
gsub(" \n|\n ", "\n", gsub(" +", " ", x))
}
function(x) {
trimws(tidy(gather(x)), which = "both")
}
})
prefix_with <- function(x, text) {
as.list(setNames(paste(text, x), names(x)))
}
base_predicate <- function(x, prefix = "^vld") {
sub(prefix, "is", gsub("_", ".", x))
}
join <- function(x) paste(x, collapse = ", ")
link <- list(
bare = "\\link{%s}",
ext = "\\code{\\link[%s]{%s}}"
)
misc_predicates <- nms$misc %>%
setdiff(c(
"vld_all", "vld_any", "vld_empty", "vld_formula", "vld_closure",
"vld_true", "vld_false"
)) %>%
base_predicate %>%
c("all", "any")
scalar_predicates <- nms$scalar %>%
setdiff(c("vld_boolean", "vld_number", "vld_singleton", "vld_string")) %>%
base_predicate("^vld.scalar")
predicates <- list(
type = sprintf(link$ext, "base", base_predicate(nms$type)),
scalar = sprintf(link$ext, "base", sort(scalar_predicates)),
misc = sprintf(link$ext, "base", sort(misc_predicates))
) %>%
lapply(join) %>%
prefix_with("Corresponding predicates:")
other <- trim("
\\code{\\link{globalize}} recovers the underlying check formula of global
scope.
\n\n
The notions of \\dQuote{scope} and \\dQuote{check item} are explained in the
\\emph{Check Formulae} section of \\link{firmly}.
")
family <- c("type", "scalar", "misc") %>%
setNames(paste(., "checkers", sep = "-"), .) %>%
lapply(function(nm) {
other <- unname(.[. != nm])
join(sprintf(link$bare, other))
}) %>%
prefix_with("Other checkers:")
ref <- Map(c, predicates, other, family)
#' Miscellaneous checkers
#'
#' @description
#' These functions make check formulae of local scope based on the
#' correspondingly named \pkg{base} R predicates \code{is.*} (e.g.,
#' \code{vld_data_frame} corresponds to the predicate
#' \code{\link[base]{is.data.frame}}), with the following exceptions:
#'
#' - `vld_empty` is based on the predicate `length(.) == 0`
#'
#' - `vld_formula` is based on the predicate
#' `typeof(.) == "language" && inherits(., "formula")`
#'
#' - `vld_closure` is based on the predicate `typeof(.) == "closure"`
#'
#' - `vld_true` and `vld_false` are based on the predicates
#' `identical(., TRUE)` and `identical(., FALSE)`, resp.
#'
#' The checkers \code{vld_true} and \code{vld_false} are all-purpose checkers to
#' specify \emph{arbitrary} input validation checks.
#'
#' @evalRd rd_alias(nms$misc)
#' @evalRd rd_usage(nms$misc)
#' @param \dots Check items, i.e., formulae that are one-sided or have a string
#' as left-hand side (see \emph{Check Formulae of Local Scope} in the
#' documentation page \link{firmly}). These are the expressions to check.
#' @return Check formula of local scope.
#' @details Each function \code{vld_*} is a function of class
#' \code{"check_maker"}, generated by \code{\link{localize}}.
#' @evalRd rd_seealso(ref$misc)
#' @examples
#' \dontrun{
#'
#' f <- function(x, y) "Pass"
#'
#' # Impose the condition that x is a formula
#' g <- firmly(f, vld_formula(~x))
#' g(z ~ a + b, 0) # [1] "Pass"
#' g(0, 0) # Error: "Not formula: x"
#'
#' # Impose the condition that x and y are disjoint (assuming they are vectors)
#' h <- firmly(f, vld_empty(~intersect(x, y)))
#' h(letters[1:3], letters[4:5]) # [1] "Pass"
#' h(letters[1:3], letters[3:5]) # Error: "Not empty: intersect(x, y)"
#'
#' # Use a custom error message
#' h <- firmly(f, vld_empty("x, y must be disjoint" ~ intersect(x, y)))
#' h(letters[1:3], letters[3:5]) # Error: "x, y must be disjoint"
#'
#' # vld_true can be used to implement any kind of input validation
#' ifelse_f <- firmly(ifelse, vld_true(~typeof(yes) == typeof(no)))
#' (w <- {set.seed(1); rnorm(5)})
#' # [1] -0.6264538 0.1836433 -0.8356286 1.5952808 0.3295078
#' ifelse_f(w > 0, 0, "1") # Error: "Not TRUE: typeof(yes) == typeof(no)"
#' ifelse_f(w > 0, 0, 1) # [1] 1 0 1 0 0
#' }
#'
#' @name misc-checkers
NULL
#' Type checkers
#'
#' These functions make check formulae of local scope based on the
#' correspondingly named (atomic) type predicate from \pkg{base} R.
#'
#' @evalRd rd_alias(nms$type)
#' @evalRd rd_usage(nms$type)
#' @param \dots Check items, i.e., formulae that are one-sided or have a string
#' as left-hand side (see \emph{Check Formulae of Local Scope} in the
#' documentation page \link{firmly}). These are the expressions to check.
#' @inherit misc-checkers
#' @evalRd rd_seealso(ref$type)
#' @examples
#' \dontrun{
#'
#' f <- function(x, y) "Pass"
#'
#' # Impose a check on x: ensure it's of type "logical"
#' f_firm <- firmly(f, vld_logical(~x))
#' f_firm(TRUE, 0) # [1] "Pass"
#' f_firm(1, 0) # Error: "Not logical: x"
#'
#' # Use a custom error message
#' f_firm <- firmly(f, vld_logical("x should be a logical vector" ~ x))
#' f_firm(1, 0) # Error: "x should be a logical vector"
#'
#' # To impose the same check on all arguments, apply globalize()
#' f_firmer <- firmly(f, globalize(vld_logical))
#' f_firmer(TRUE, FALSE) # [1] "Pass"
#' f_firmer(TRUE, 0) # Error: "Not logical: `y`"
#' f_firmer(1, 0) # Errors: "Not logical: `x`", "Not logical: `y`"
#' }
#' @name type-checkers
NULL
#' Scalar checkers
#'
#' @description These functions make check formulae of local scope based on the
#' correspondingly named scalar type predicate from \pkg{base} R. For example,
#' `vld_scalar_logical` creates check formulae (of local scope) for the
#' predicate `is.logical(.) && length(.) == 1`. The function `vld_singleton` is
#' based on the predicate `length(.) == 1`.
#'
#' The functions `vld_boolean`, `vld_number`, `vld_string` are aliases for
#' `vld_scalar_logical`, `vld_scalar_numeric`, `vld_scalar_character`, resp.
#' (with appropriately modified error messages).
#'
#' @evalRd rd_alias(nms$scalar)
#' @evalRd rd_usage(nms$scalar)
#' @param \dots Check items, i.e., formulae that are one-sided or have a string
#' as left-hand side (see \emph{Check Formulae of Local Scope} in the
#' documentation page \link{firmly}). These are the expressions to check.
#' @inherit misc-checkers
#' @evalRd rd_seealso(ref$scalar)
#' @examples
#' \dontrun{
#'
#' f <- function(x, y) "Pass"
#'
#' # Impose a check on x: ensure it's boolean (i.e., a scalar logical vector)
#' f_firm <- firmly(f, vld_boolean(~x))
#' f_firm(TRUE, 0) # [1] "Pass"
#' f_firm(c(TRUE, TRUE), 0) # Error: "Not boolean: x"
#'
#' # Use a custom error message
#' f_firm <- firmly(f, vld_boolean("x is not TRUE/FALSE/NA" ~ x))
#' f_firm(c(TRUE, TRUE), 0) # Error: "x is not TRUE/FALSE/NA"
#'
#' # To impose the same check on all arguments, apply globalize
#' f_firmer <- firmly(f, globalize(vld_boolean))
#' f_firmer(TRUE, FALSE) # [1] "Pass"
#' f_firmer(TRUE, 0) # Error: "Not boolean: `y`"
#' f_firmer(logical(0), 0) # Errors: "Not boolean: `x`", "Not boolean: `y`"
#' }
#' @name scalar-checkers
NULL
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/checkers.R
|
#' Is a formula a check formula?
#'
#' \code{is_check_formula(x)} checks whether \code{x} is a check formula, while
#' \code{is_checklist(x)} checks whether \code{x} is a \emph{checklist}, i.e., a
#' list of check formulae. (Neither function verifies logical consistency of the
#' implied checks.)
#'
#' @param x Object to test.
#' @return \code{is_check_formula}, resp. \code{is_checklist}, returns
#' \code{TRUE} or \code{FALSE}, according to whether \code{x} is or is not a
#' check formula, resp. checklist.
#' @seealso \link{firmly} (on the specification and use of check formulae)
#' @examples
#' is_check_formula(list(~x, ~y) ~ is.numeric) # [1] TRUE
#' is_check_formula("Not positive" ~ {. > 0}) # [1] TRUE
#'
#' is_checklist(list(list(~x, ~y) ~ is.numeric, "Not positive" ~ {. > 0}))
#' # [1] TRUE
#'
#' # Invalid checklists
#' is_checklist("Not positive" ~ {. > 0}) # [1] FALSE (not a list)
#' is_checklist(list(is.numeric ~ list(~ x))) # [1] FALSE (backwards)
#' is_checklist(list(list(log ~ x) ~ is.character)) # [1] FALSE (invalid check item)
#'
#' @name checklist
NULL
#' @rdname checklist
#' @export
is_check_formula <- function(x) {
inherits(x, "formula") && is_rhs_function(x) && is_lhs_checkitem(x)
}
#' @rdname checklist
#' @export
is_checklist <- function(x) {
is.list(x) && all(vapply(x, is_check_formula, logical(1)))
}
is_string <- function(x) {
typeof(x) == "character" && length(x) == 1L && !isTRUE(is.na(x))
}
is_gbl_check_formula <- function(x) {
inherits(x, "formula") && is_rhs_function(x) && is_string(f_eval_lhs(x))
}
is_rhs_function <- function(x) {
is_lambda(lazyeval::f_rhs(x)) || is.function(f_eval_rhs(x))
}
# Like magrittr, capture '{...}' as anonymous function
is_lambda <- function(x) {
is.call(x) && identical(x[[1L]], as.symbol("{"))
}
# To check that a formula is onesided, it is not enough to check
# is.null(f_eval_lhs(x)), for both NULL ~ x and ~x have NULL lhs.
is_onesided <- function(x) {
length(x) == 2L
}
is_f_onesided <- function(x) {
inherits(x, "formula") && is_onesided(x)
}
is_lhs_checkitem <- function(x) {
is_onesided(x) || {
lhs <- f_eval_lhs(x)
is_string(lhs) || is_flist(lhs)
}
}
is_check_expr <- function(x) {
inherits(x, "formula") && (is_onesided(x) || is_string(f_eval_lhs(x)))
}
is_flist <- function(x) {
is.list(x) && length(x) && all(vapply(x, is_check_expr, logical(1)))
}
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/checklist.R
|
#' Decompose a firmly applied function
#'
#' Decompose a firmly applied function (i.e., a function created by
#' \code{\link{firmly}}):
#' \itemize{
#' \item \code{firm_core} extracts the underlying \dQuote{core}
#' function—the function that is called when all arguments are valid.
#' \item \code{firm_checks} extracts the checks.
#' \item \code{firm_error} extracts the subclass of the error condition that
#' is signaled when an input validation error occurs.
#' \item \code{firm_args} extracts the names of arguments whose presence is to
#' be checked, i.e., those specified by the \code{.warn_missing} switch of
#' \code{\link{firmly}}.
#' }
#'
#' @param x Object to decompose.
#' @return If \code{x} is a firmly applied function:
#' \itemize{
#' \item \code{firm_core} returns a function.
#' \item \code{firm_checks} returns a data frame with components \code{expr}
#' (language), \code{env} (environment), \code{string} (character),
#' \code{msg} (character).
#' \item \code{firm_error} returns a character vector.
#' \item \code{firm_args} returns a character vector.
#' }
#' In the absence of the component to be extracted, these functions return
#' \code{NULL}.
#'
#' @seealso \code{\link{firmly}}
#' @examples
#' f <- function(x, y, ...) NULL
#' f_fm <- firmly(f, ~is.numeric, list(~x, ~y - x) ~ {. > 0})
#'
#' identical(firm_core(f_fm), f) # [1] TRUE
#' firm_checks(f_fm) # 4 x 4 data frame
#' firm_error(f_fm) # [1] "simpleError"
#' firm_args(f_fm) # NULL
#' firm_args(firmly(f_fm, .warn_missing = "y")) # [1] "y"
#'
#' @name components
NULL
#' @rdname components
#' @export
firm_core <- function(x) {
.subset2(environment(x), ".fn")
}
#' @rdname components
#' @export
firm_checks <- function(x) {
.subset2(environment(x), ".chks")
}
#' @rdname components
#' @export
firm_error <- function(x) {
.subset2(environment(x), ".error_class")
}
#' @rdname components
#' @export
firm_args <- function(x) {
.subset2(environment(.subset2(environment(x), ".warn")), ".args")
}
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/components.R
|
# Checking infrastructure -------------------------------------------------
# Make a list of check items
unfurl <- function(.symb, .nm, .msg, .env) {
chk_items <- lapply(.symb, lazyeval::f_new, env = .env)
if (is.null(.msg)) {
names(chk_items) <- character(length(chk_items))
} else {
names(chk_items) <- paste(.msg, encodeString(.nm, quote = "`"), sep = ": ")
}
chk_items
}
checks_df <- function(pred, items, env, string) {
n <- length(items)
x <- list(
expr = lapply(items, function(.) as.call(c(pred, lazyeval::f_rhs(.)))),
env = `[<-`(vector("list", n), list(env)),
string = string,
msg = names(items)
)
class(x) <- "data.frame"
attr(x, "row.names") <- .set_row_names(n)
x
}
# Assemble a data frame of checks from a check formula
assemble <- function(.chk, .nm, .symb, .env = lazyeval::f_env(.chk)) {
p <- lazyeval::f_rhs(.chk)
p_expr <- if (is_lambda(p)) express_lambda(p) else p
predicate <- eval(p_expr, .env)
lhs <- f_eval_lhs(.chk)
chk_items <- if (is.list(lhs)) {
# .chk: local scope
do.call(lazyeval::f_list, lhs)
} else {
# .chk: global scope (lhs: string/NULL)
unfurl(.symb, .nm, lhs, .env)
}
string <- vapply(chk_items, deparse_call, character(1), fn_expr = p_expr)
is_blank <- !nzchar(names(chk_items))
names(chk_items)[is_blank] <- sprintf("FALSE: %s", string[is_blank])
checks_df(predicate, chk_items, .env, string)
}
# Warning apparatus -------------------------------------------------------
warn <- function(.args) {
if (!length(.args)) {
return(NULL)
}
function(.call) {
missing <- setdiff(.args, names(.call[-1L]))
if (length(missing)) {
msg <- paste(
sprintf("Argument(s) expected but not specified in call %s:",
deparse_collapse(.call)),
quote_collapse(missing)
)
warning_wo_call(msg)
}
invisible(.call)
}
}
warning_closure <- function(.fn, .warn) {
force(.fn)
force(.warn)
function() {
call <- match.call()
encl <- parent.env(environment())
encl$.warn(call)
eval.parent(`[[<-`(call, 1L, encl$.fn))
}
}
# Validation apparatus ----------------------------------------------------
problems <- function(chks, verdict) {
vapply(seq_along(verdict), function(i) {
x <- verdict[[i]]
if (is_false(x)) {
chks$msg[[i]]
} else if (inherits(x, "error")) {
sprintf("Error evaluating check %s: %s", chks$string[[i]], x$message)
} else {
sprintf("Predicate value %s not TRUE/FALSE: %s",
chks$string[[i]], deparse_collapse(x))
}
}, character(1))
}
#' @importFrom stats runif setNames
validating_closure <- function(.chks, .sig, .nm, .fn, .warn, .error_class) {
force(.fn)
force(.warn)
force(.error_class)
# Input-validation environment
PROM.ENV <- sprintf("__PROM.ENV__%.12f", runif(1L))
make_promises <- eval(call("function", .sig, quote(environment())))
ve <- new.env(parent = emptyenv())
promises <- function(call, env_call) {
ve[[PROM.ENV]] <- eval(`[[<-`(call, 1L, make_promises), env_call)
parent.env(ve[[PROM.ENV]]) <- environment(.fn)
ve
}
# Ensure that promises in validation expressions are from ve[["PROM.ENV"]]
subs <- lapply(setNames(nm = .nm), function(.)
substitute(get(., e), list(. = ., e = as.name(PROM.ENV)))
)
exprs <- lapply(seq_len(nrow(.chks)), function(i) {
expr <- .chks$expr[[i]]
# 'expr' is a call, so its second component is the call arguments
expr[[2L]] <- eval(substitute(substitute(., subs), list(. = expr[[2L]])))
list(expr = expr, env = .chks$env[[i]])
})
deparse_w_defval <- function(call) {
.sig[names(call[-1L])] <- call[-1L]
.sig <- .sig[!vapply(.sig, identical, logical(1), quote(expr = ))]
deparse_collapse(as.call(c(call[[1L]], .sig)))
}
error <- function(message) {
structure(
list(message = message, call = NULL),
class = c(.error_class, "error", "condition")
)
}
# Local bindings to avoid (unlikely) clashes with formal arguments
enumerate_many <- match.fun("enumerate_many")
problems <- match.fun("problems")
function() {
call <- match.call()
encl <- parent.env(environment())
encl$.warn(call)
env <- encl$promises(call, parent.frame())
verdict <- suppressWarnings(lapply(encl$exprs, function(.)
tryCatch(eval(.$expr, `parent.env<-`(env, .$env)), error = identity)
))
pass <- vapply(verdict, isTRUE, logical(1))
if (all(pass)) {
eval.parent(`[[<-`(call, 1L, encl$.fn))
} else {
fail <- !pass
msg_call <- encl$deparse_w_defval(call)
msg_error <- encl$enumerate_many(
encl$problems(encl$.chks[fail, ], verdict[fail])
)
stop(encl$error(paste(msg_call, msg_error, sep = "\n")))
}
}
}
# Functional operators ----------------------------------------------------
# Represent non-dot arguments by name and symbol
# sig: pairlist
nomen <- function(sig) {
nm <- setdiff(names(sig), "...") %||% character(0)
list(nm = nm, symb = lapply(nm, as.symbol))
}
skip <- function(...) invisible()
firmly_ <- function(.f, ..., .checklist = list(),
.warn_missing = character(), .error_class = character()) {
chks <- unname(c(list(...), .checklist))
error_class_inapplicable <- is.null(firm_checks(.f))
if (!length(chks) && !length(.warn_missing) && error_class_inapplicable) {
return(.f)
}
if (!is_checklist(chks)) {
stop_wo_call("Invalid check formula(e)")
}
sig <- formals(.f)
arg <- nomen(sig)
if (!length(arg$nm)) {
if (length(.warn_missing)) {
stop_wo_call("Invalid `.warn_missing`: `.f` has no named argument")
}
# No arguments, so assume .error_class inapplicable, hence chks non-empty
warning_wo_call("Check formula(e) not applied: `.f` has no named argument")
return(.f)
}
arg_unknown <- !(.warn_missing %in% arg$nm)
if (any(arg_unknown)) {
stop_wo_call(sprintf("Invalid `.warn_missing`: %s not argument(s) of `.f`",
quote_collapse(.warn_missing[arg_unknown])))
}
fn <- if (is_firm(.f)) firm_core(.f) else .f
pre_chks <- firm_checks(.f)
maybe_warn <- warn(.warn_missing) %||% warn(firm_args(.f)) %||% skip
error_class <- .error_class %||% firm_error(.f) %||% "simpleError"
if (length(chks)) {
asm_chks <- unique(
do.call("rbind",
c(list(pre_chks),
lapply(chks, assemble, .nm = arg$nm, .symb = arg$symb)))
)
f <- validating_closure(asm_chks, sig, arg$nm, fn, maybe_warn, error_class)
} else {
# .warn_missing or .error_class is non-empty
f <- if (is.null(pre_chks)) {
warning_closure(fn, maybe_warn)
} else {
validating_closure(pre_chks, sig, arg$nm, fn, maybe_warn, error_class)
}
}
firm_closure(with_sig(f, sig, .attrs = attributes(.f)))
}
is_closure <- function(x) typeof(x) == "closure"
#' @export
is_firm <- function(x) {
is_closure(x) && inherits(x, "firm_closure")
}
firm_closure <- function(.f) {
.f <- match.fun(.f)
if (!inherits(.f, "firm_closure")) {
class(.f) <- c("firm_closure", class(.f))
}
.f
}
#' @export
firmly <- firmly_(
firmly_,
list("`.f` not an interpreted function" ~ .f) ~ is_closure,
list("`.checklist` not a list" ~ .checklist) ~ is.list,
list(
"`.warn_missing` not a character vector" ~ .warn_missing,
"`.error_class` not a character vector" ~ .error_class
) ~ {is.character(.) && !anyNA(.)}
)
#' @export
`%checkin%` <- function(.checks, .f) {
nms <- names(.checks) %||% character(length(.checks))
firmly(
.f,
.checklist = .checks[!nms %in% c(".warn_missing", ".error_class")],
.warn_missing = .checks[[".warn_missing"]] %||% character(),
.error_class = .checks[[".error_class"]] %||% character()
)
}
#' @export
loosely <- function(.f, .keep_check = FALSE, .keep_warning = FALSE) {
if (!inherits(.f, "firm_closure") || .keep_check && .keep_warning) {
return(.f)
}
f_chks <- if (.keep_check) firm_checks(.f) else NULL
f_args <- if (.keep_warning) firm_args(.f) else NULL
if (is.null(f_chks) && is.null(f_args)) {
return(firm_core(.f))
}
sig <- formals(.f)
f <- if (is.null(f_chks)) {
warning_closure(firm_core(.f), warn(f_args))
} else {
validating_closure(f_chks, sig, nomen(sig)$nm,
firm_core(.f), skip, firm_error(.f))
}
firm_closure(with_sig(f, sig, .attrs = attributes(.f)))
}
# Printing ----------------------------------------------------------------
#' @export
print.firm_closure <- function(x, ...) {
cat("<firm_closure>\n")
cat("\n* Core function:\n")
print.default(firm_core(x))
cat("\n* Checks (<predicate>:<error message>):\n")
calls <- firm_checks(x)
if (length(calls)) {
labels <- paste0(calls$string, ":\n", encodeString(calls$msg, quote = "\""))
cat(enumerate_many(labels))
} else {
cat("None\n")
}
cat("\n* Error subclass for check errors:\n")
subclass <- firm_error(x)
if (!is.null(subclass)) {
cat(paste(subclass, collapse = ", "), "\n")
} else {
cat("None\n")
}
cat("\n* Check for missing arguments:\n")
args <- firm_args(x)
if (!is.null(args) && length(args)) {
cat(quote_collapse(args), "\n")
} else {
cat("Not checked\n")
}
}
# Documentation -----------------------------------------------------------
#' Apply a function firmly
#'
#' \code{firmly} transforms a function into a function with input validation
#' checks. \code{loosely} undoes the application of \code{firmly}, by returning
#' the original function (without checks). \code{is_firm} is a predicate
#' function that checks whether an object is a firmly applied function, i.e.,
#' a function created by \code{firmly}.
#' \cr\cr
#' Use \code{\%checkin\%} to apply \code{firmly} as an operator. Since this
#' allows you to keep checks and arguments adjacent, it is the preferred way to
#' use \code{firmly} in scripts and packages.
#'
#' @aliases firmly %checkin% loosely is_firm
#' @evalRd rd_usage(c("firmly", "%checkin%", "loosely", "is_firm"))
#'
#' @param .f Interpreted function, i.e., closure.
#' @param \dots Input-validation check formula(e).
#' @param .checklist List of check formulae. (These are combined with check
#' formulae provided via \code{\dots}.)
#' @param .warn_missing Arguments of \code{.f} whose absence should raise a
#' warning (character).
#' @param .error_class Subclass of the error condition to be raised when an
#' input validation error occurs (character).
#' @param .checks List of check formulae, optionally containing character
#' vectors named \code{.warn_missing}, \code{.error_class}, corresponding to
#' the similarly named arguments.
#' @param .keep_check,.keep_warning Should existing checks, resp.
#' missing-argument warnings, be kept?
#' @param x Object to test.
#'
#' @section Check Formulae:
#' An \strong{input validation check} is specified by a \strong{check
#' formula}, a special \link[stats]{formula} of the form
#' \preformatted{<scope> ~ <predicate>}
#' where the right-hand side expresses \emph{what} to check, and the left-hand
#' side expresses \emph{where} to check it.
#'
#' The right-hand side \code{<predicate>} is a \strong{predicate} function,
#' i.e, a one-argument function that returns either \code{TRUE} or
#' \code{FALSE}. It is the condition to check/enforce. The left-hand side
#' \code{<scope>} is an expression specifying what the condition is to be
#' applied to: whether the condition is to be applied to all
#' (non-\code{\dots}) arguments of \code{.f} (the case of \dQuote{global
#' scope}), or whether the condition is to be selectively applied to certain
#' expressions of the arguments (the case of \dQuote{local scope}).
#'
#' According to \strong{scope}, there are two classes of check formulae:
#' \itemize{
#' \item \strong{Check formulae of global scope}
#' \preformatted{<string> ~ <predicate>}
#' \preformatted{~<predicate>}
#'
#' \item \strong{Check formulae of local scope}
#' \preformatted{list(<check_item>, <check_item>, ...) ~ <predicate>}
#' }
#'
#' \subsection{Check Formulae of Global Scope}{
#' A \strong{global check formula} is a succinct way of asserting that the
#' function \code{<predicate>} returns \code{TRUE} when called on each
#' (non-\code{\dots}) argument of \code{.f}. Each argument for which
#' \code{<predicate>} \emph{fails}—returns \code{FALSE} or is itself not
#' evaluable—produces an error message, which is auto-generated unless a
#' custom error message is supplied by specifying the string
#' \code{<string>}.
#'
#' \subsection{Example}{
#' The condition that all (non-\code{\dots}) arguments of a function must
#' be numerical can be enforced by the check formula
#' \preformatted{~is.numeric}
#' or
#' \preformatted{"Not numeric" ~ is.numeric}
#' if the custom error message \code{"Not numeric"} is to be used (in lieu
#' of an auto-generated error message).
#' }
#' }
#'
#' \subsection{Check Formulae of Local Scope}{
#' A \strong{local check formula} imposes argument-specific conditions. Each
#' \strong{check item} \code{<check_item>} is a formula of the form \code{~
#' <expression>} (one-sided) or \code{<string> ~ <expression>}; it imposes
#' the condition that the function \code{<predicate>} is \code{TRUE} for the
#' expression \code{<expression>}. As for global check formulae, each check
#' item for which \code{<predicate>} fails produces an error message, which
#' is auto-generated unless a custom error message is supplied by a string
#' as part of the left-hand side of the check item (formula).
#'
#' \subsection{Example}{
#' The condition that \code{x} and \code{y} must differ for the function
#' \code{function(x, y) \{1 / (x - y)\}} can be enforced by the local
#' check formula
#' \preformatted{list(~x - y) ~ function(.) abs(.) > 0}
#' or
#' \preformatted{list("x, y must differ" ~ x - y) ~ function(.) abs(.) > 0}
#' if the custom error message \code{"x, y must differ"} is to be used (in
#' lieu of an auto-generated error message).
#' }
#' }
#'
#' \subsection{Anonymous Predicate Functions}{
#' Following the
#' \href{https://cran.r-project.org/package=magrittr}{\pkg{magrittr}}
#' package, an anonymous (predicate) function of a single argument \code{.}
#' can be concisely expressed by enclosing the body of such a function
#' within curly braces \code{\{ \}}.
#'
#' \subsection{Example}{
#' The (onsided, global) check formula
#' \preformatted{~{. > 0}}
#' is equivalent to the check formula \code{~function(.) {. > 0}}
#' }
#' }
#'
#' @section Value:
#' \subsection{\code{firmly}}{
#' \code{firmly} does nothing when there is nothing to do: \code{.f} is
#' returned, unaltered, when both \code{.checklist} and \code{.warn_missing}
#' are empty, or when \code{.f} has no named argument and
#' \code{.warn_missing} is empty.
#'
#' Otherwise, \code{firmly} again returns a function that behaves
#' \emph{identically} to \code{.f}, but also performs input validation:
#' before a call to \code{.f} is attempted, its inputs are checked, and if
#' any check fails, an error halts further execution with a message
#' tabulating every failing check. (If all checks pass, the call to
#' \code{.f} respects lazy evaluation, as usual.)
#'
#' \subsection{Subclass of the input-validation error object}{
#' The subclass of the error object is \code{.error_class}, unless
#' \code{.error_class} is \code{character()}. In the latter case, the
#' subclass of the error object is that of the existing error object, if
#' \code{.f} is itself a firmly applied function, or it is
#' \code{"simpleError"}, otherwise.
#' }
#'
#' \subsection{Formal Arguments and Attributes}{
#' \code{firmly} preserves the attributes and formal arguments of
#' \code{.f} (except that the \code{"class"} attribute gains the component
#' \code{"firm_closure"}, unless it already contains it).
#' }
#' }
#' \subsection{\code{\%checkin\%}}{
#' \code{\%checkin\%} applies the check formula(e) in the list \code{.checks}
#' to \code{.f}, using \code{firmly}. The \code{.warn_missing} and
#' \code{.error_class} arguments of \code{firmly} may be specified as named
#' components of \code{.checks}.
#' }
#' \subsection{\code{loosely}}{
#' \code{loosely} returns \code{.f}, unaltered, when \code{.f} is not a
#' firmly applied function, or both \code{.keep_check} and
#' \code{.keep_warning} are \code{TRUE}.
#'
#' Otherwise, \code{loosely} returns the underlying (original) function,
#' stripped of any input validation checks imposed by \code{firmly}, unless
#' one of the flags \code{.keep_check}, \code{.keep_warning} is switched on:
#' if \code{.keep_check}, resp. \code{.keep_warning}, is \code{TRUE},
#' \code{loosely} retains any existing checks, resp. missing-argument
#' warnings, of \code{.f}.
#' }
#' \subsection{\code{is_firm}}{
#' \code{is_firm} returns \code{TRUE} if \code{x} is a firmly applied
#' function (i.e., has class \code{"firm_closure"}), and \code{FALSE},
#' otherwise.
#' }
#'
#' @seealso \code{firmly} is enhanced by a number of helper functions:
#' \itemize{
#' \item To verify that a check formula is syntactically correct, use the
#' predicates \code{\link{is_check_formula}}, \code{\link{is_checklist}}.
#' \item To make custom check-formula generators, use
#' \code{\link{localize}}.
#' \item Pre-made check-formula generators are provided to facilitate
#' argument checks for \link[=type-checkers]{types},
#' \link[=scalar-checkers]{scalar objects}, and
#' \link[=misc-checkers]{other} common data structures and input
#' assumptions. These functions are prefixed by \code{vld_}, for
#' convenient browsing and look-up in editors and IDE's that support name
#' completion.
#' \item To access the components of a firmly applied function, use
#' \code{\link{firm_core}}, \code{\link{firm_checks}},
#' \code{\link{firm_error}}, \code{\link{firm_args}}, (or simply
#' \code{\link[base]{print}} the function to display its components).
#' }
#'
#' @examples
#' \dontrun{
#'
#' dlog <- function(x, h) (log(x + h) - log(x)) / h
#'
#' # Require all arguments to be numeric (auto-generated error message)
#' dlog_fm <- firmly(dlog, ~is.numeric)
#' dlog_fm(1, .1) # [1] 0.9531018
#' dlog_fm("1", .1) # Error: "FALSE: is.numeric(x)"
#'
#' # Require all arguments to be numeric (custom error message)
#' dlog_fm <- firmly(dlog, "Not numeric" ~ is.numeric)
#' dlog_fm("1", .1) # Error: "Not numeric: `x`"
#'
#' # Alternatively, "globalize" a localized checker (see ?localize, ?globalize)
#' dlog_fm <- firmly(dlog, globalize(vld_numeric))
#' dlog_fm("1", .1) # Error: "Not double/integer: `x`"
#'
#'# Predicate functions can be specified anonymously or by name
#' dlog_fm <- firmly(dlog, list(~x, ~x + h, ~abs(h)) ~ function(x) x > 0)
#' dlog_fm <- firmly(dlog, list(~x, ~x + h, ~abs(h)) ~ {. > 0})
#' is_positive <- function(x) x > 0
#' dlog_fm <- firmly(dlog, list(~x, ~x + h, ~abs(h)) ~ is_positive)
#' dlog_fm(1, 0) # Error: "FALSE: is_positive(abs(h))"
#'
#' # Describe checks individually using custom error messages
#' dlog_fm <-
#' firmly(dlog,
#' list("x not positive" ~ x, ~x + h, "Division by 0 (=h)" ~ abs(h)) ~
#' is_positive)
#' dlog_fm(-1, 0)
#' # Errors: "x not positive", "FALSE: is_positive(x + h)", "Division by 0 (=h)"
#'
#' # Specify checks more succinctly by using a (localized) custom checker
#' req_positive <- localize("Not positive" ~ is_positive)
#' dlog_fm <- firmly(dlog, req_positive(~x, ~x + h, ~abs(h)))
#' dlog_fm(1, 0) # Error: "Not positive: abs(h)"
#'
#' # Combine multiple checks
#' dlog_fm <- firmly(dlog,
#' "Not numeric" ~ is.numeric,
#' list(~x, ~x + h, "Division by 0" ~ abs(h)) ~ {. > 0})
#' dlog_fm("1", 0) # Errors: "Not numeric: `x`", check-eval error, "Division by 0"
#'
#' # Any check can be expressed using isTRUE
#' err_msg <- "x, h differ in length"
#' dlog_fm <- firmly(dlog, list(err_msg ~ length(x) - length(h)) ~ {. == 0L})
#' dlog_fm(1:2, 0:2) # Error: "x, h differ in length"
#' dlog_fm <- firmly(dlog, list(err_msg ~ length(x) == length(h)) ~ isTRUE)
#' dlog_fm(1:2, 0:2) # Error: "x, h differ in length"
#'
#' # More succinctly, use vld_true
#' dlog_fm <- firmly(dlog, vld_true(~length(x) == length(h), ~all(abs(h) > 0)))
#' dlog_fm(1:2, 0:2)
#' # Errors: "Not TRUE: length(x) == length(h)", "Not TRUE: all(abs(h) > 0)"
#'
#' dlog_fm(1:2, 1:2) # [1] 0.6931472 0.3465736
#'
#' # loosely recovers the underlying function
#' identical(loosely(dlog_fm), dlog) # [1] TRUE
#'
#' # Use .warn_missing when you want to ensure an argument is explicitly given
#' # (see vignette("valaddin") for an elaboration of this particular example)
#' as_POSIXct <- firmly(as.POSIXct, .warn_missing = "tz")
#' Sys.setenv(TZ = "EST")
#' as_POSIXct("2017-01-01 03:14:16") # [1] "2017-01-01 03:14:16 EST"
#' # Warning: "Argument(s) expected ... `tz`"
#' as_POSIXct("2017-01-01 03:14:16", tz = "UTC") # [1] "2017-01-01 03:14:16 UTC"
#' loosely(as_POSIXct)("2017-01-01 03:14:16") # [1] "2017-01-01 03:14:16 EST"
#'
#' # Use firmly to constrain undesirable behavior, e.g., long-running computations
#' fib <- function(n) {
#' if (n <= 1L) return(1L)
#' Recall(n - 1) + Recall(n - 2)
#' }
#' fib <- firmly(fib, list("`n` capped at 30" ~ ceiling(n)) ~ {. <= 30L})
#' fib(21) # [1] 17711 (NB: Validation done only once, not for every recursive call)
#' fib(31) # Error: `n` capped at 30
#'
#' # Apply fib unrestricted
#' loosely(fib)(31) # [1] 2178309 (may take several seconds to finish)
#'
#' # firmly won't force an argument that's not involved in checks
#' g <- firmly(function(x, y) "Pass", list(~x) ~ is.character)
#' g(c("a", "b"), stop("Not signaled")) # [1] "Pass"
#'
#' # In scripts and packages, it is recommended to use the operator %checkin%
#' vec_add <- list(
#' ~is.numeric,
#' list(~length(x) == length(y)) ~ isTRUE,
#' .error_class = "inputError"
#' ) %checkin%
#' function(x, y) {
#' x + y
#' }
#'
#' # Or call firmly with .f explicitly assigned to the function
#' vec_add2 <- firmly(
#' ~is.numeric,
#' list(~length(x) == length(y)) ~ isTRUE,
#' .f = function(x, y) {
#' x + y
#' },
#' .error_class = "inputError"
#' )
#'
#' all.equal(vec_add, vec_add2) # [1] TRUE
#' }
#'
#' @name firmly
NULL
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/firmly.R
|
f_evaluator <- function(fexpr) {
force(fexpr)
function(f) {
if (!lazyeval::is_formula(f)) {
stop_wo_call("`f` is not a formula")
}
eval(fexpr(f), lazyeval::f_env(f))
}
}
# Simplified f_lhs, f_rhs, independent of lazyeval::f_new
f_eval_lhs <- f_evaluator(lazyeval::f_lhs)
f_eval_rhs <- f_evaluator(lazyeval::f_rhs)
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/formulas.R
|
`%>%` <- local({
as_curried_fn <- function(expr, env) {
if (!is.call(expr))
return(match.fun(expr))
if (no_dot_arg(expr))
expr <- curry_args(expr)
lambda(body = expr, env)
}
lambda <- function(body, env) {
expr_fn <- bquote(function(.) .(body))
f <- eval(expr_fn)
environment(f) <- env
f
}
no_dot_arg <- function(call) {
all(args(call) != quote(.))
}
curry_args <- function(call) {
call_dot <- c(call[[1]], quote(.), args(call))
as.call(call_dot)
}
args <- function(call) {
as.list(call)[-1]
}
function(lhs, rhs) {
expr <- substitute(rhs)
f <- as_curried_fn(expr, parent.frame())
f(lhs)
}
})
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/pipe.R
|
# Call signature of a function (specified by name)
call_sig <- function(x, width) {
stopifnot(is.character(x))
is_op <- grepl("^%.+%$", x)
x[ is_op] <- vapply(x[ is_op], call_sig_op, character(1), USE.NAMES = FALSE)
x[!is_op] <- vapply(x[!is_op], call_sig_fn, character(1), USE.NAMES = FALSE,
width = width)
x
}
call_sig_fn <- function(nm, width) {
# Allowed range of values for width.cutoff parameter of deparse()
stopifnot(width >= 20L, width <= 500L)
sig <- formals(get(nm, mode = "function"))
expr <- deparse(call("function", sig, quote(expr = ))) %>%
paste(collapse = "") %>%
sub("^function", nm, .) %>%
{parse(text = ., keep.source = FALSE)[[1L]]}
indent <- paste(rep(" ", nchar(nm)), collapse = "")
paste(deparse_lines(expr, indent, width), collapse = "\n")
}
# The inaptly named "width.cutoff" of deparse() is a _lower_ bound for lengths
deparse_lines <- function(expr, indent, width) {
w <- width
exceed_width <- TRUE
while (exceed_width) {
x <- deparse_reindent(expr, indent, w)
exceed_width <- any(vapply(x, nchar, integer(1)) > width)
w <- w - 1L
}
x
}
deparse_reindent <- function(expr, indent, width) {
expr %>%
deparse(width.cutoff = width) %>%
trimws(which = "both") %>%
{`[<-`(., -1L, value = paste(indent, .[-1L]))}
}
call_sig_op <- function(nm) {
op <- get(nm, mode = "function")
args <- names(formals(op))
nm_esc_pct <- gsub("%", "\\\\%", nm)
paste(args[1L], nm_esc_pct, args[2L])
}
# Convert a string-valued function into a vectorized function that joins strings
vec_strjoin <- function(f_str, join = "\n") {
force(f_str)
force(join)
function(x)
paste(vapply(x, f_str, character(1)), collapse = join)
}
# Make a function that makes raw Rd markup
rd_markup <- function(cmd, join = "", sep = "") {
force(join)
force(sep)
cmd_opening <- paste0("\\", cmd, "{")
function(x) {
stopifnot(is.character(x))
paste(cmd_opening, paste(x, collapse = join), "}", sep = sep)
}
}
#' Make raw Rd markup
#'
#' @param x Character vector of object names.
#' @name rd_markup
#' @noRd
NULL
#' @rdname rd_markup
#' @examples
#' rd_alias(c("firmly", "loosely"))
#' @noRd
rd_alias <- vec_strjoin(rd_markup("alias"))
#' @rdname rd_markup
#' @param width Width cutoff attempt, cf. \code{\link{deparse}}.
#' @examples
#' rd_usage("ls")
#' rd_usage(c("firmly", "loosely"), pos = "package:valaddin")
#' @noRd
rd_usage <- function(x, width = 80L) {
rd_markup("usage", join = "\n\n", sep = "\n")(call_sig(x, width))
}
#' @rdname rd_markup
#' @examples
#' rd_seealso("Logarithm: \\code{\\link{log}}, \\code{\\link[utils]{head}}")
#' @noRd
rd_seealso <- rd_markup("seealso", join = "\n\n", sep = "\n")
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/rawrd.R
|
#' Generate input-validation checks
#'
#' \code{localize} derives a function that \emph{generates} check formulae of
#' local scope from a check formula of global scope. \code{globalize} takes such
#' a check-formula generator and returns the underlying global check formula.
#' These operations are mutually invertible.
#'
#' @seealso The notion of \dQuote{scope} is explained in the \emph{Check
#' Formulae} section of \link{firmly}.
#'
#' Ready-made checkers for \link[=type-checkers]{types},
#' \link[=scalar-checkers]{scalar objects}, and
#' \link[=misc-checkers]{miscellaneous predicates} are provided as a
#' convenience, and as a model for creating families of check makers.
#' @examples
#' chk_pos_gbl <- "Not positive" ~ {. > 0}
#' chk_pos_lcl <- localize(chk_pos_gbl)
#' chk_pos_lcl(~x, "y not greater than x" ~ x - y)
#' # list("Not positive: x" ~ x, "y not greater than x" ~ x - y) ~ {. > 0}
#'
#' # localize and globalize are mutual inverses
#' identical(globalize(localize(chk_pos_gbl)), chk_pos_gbl) # [1] TRUE
#' all.equal(localize(globalize(chk_pos_lcl)), chk_pos_lcl) # [1] TRUE
#'
#' \dontrun{
#'
#' pass <- function(x, y) "Pass"
#'
#' # Impose local positivity checks
#' f <- firmly(pass, chk_pos_lcl(~x, "y not greater than x" ~ x - y))
#' f(2, 1) # [1] "Pass"
#' f(2, 2) # Error: "y not greater than x"
#' f(0, 1) # Errors: "Not positive: x", "y not greater than x"
#'
#' # Or just check positivity of x
#' g <- firmly(pass, chk_pos_lcl(~x))
#' g(1, 0) # [1] "Pass"
#' g(0, 0) # Error: "Not positive: x"
#'
#' # In contrast, chk_pos_gbl checks positivity for all arguments
#' h <- firmly(pass, chk_pos_gbl)
#' h(2, 2) # [1] "Pass"
#' h(1, 0) # Error: "Not positive: `y`"
#' h(0, 0) # Errors: "Not positive: `x`", "Not positive: `y`"
#'
#' # Alternatively, globalize the localized checker
#' h2 <- firmly(pass, globalize(chk_pos_lcl))
#' all.equal(h, h2) # [1] TRUE
#'
#' # Use localize to make parameterized checkers
#' chk_lte <- function(n, ...) {
#' err_msg <- paste("Not <=", as.character(n))
#' localize(err_msg ~ {. <= n})(...)
#' }
#' fib <- function(n) {
#' if (n <= 1L) return(1L)
#' Recall(n - 1) + Recall(n - 2)
#' }
#' capped_fib <- firmly(fib, chk_lte(30, ~ ceiling(n)))
#' capped_fib(19) # [1] 6765
#' capped_fib(31) # Error: "Not <= 30: ceiling(n)"
#' }
#'
#' @name input-validators
NULL
is_check_maker <- function(x) {
is_closure(x) && inherits(x, "check_maker")
}
#' @rdname input-validators
#' @export
#' @param chk Check formula of global scope \emph{with} custom error message,
#' i.e., a formula of the form \code{<string> ~ <predicate>}.
#' @return \code{localize} returns a function of class \code{"check_maker"} and
#' call signature \code{function(...)}:
#' \itemize{
#' \item The \code{\dots} are \strong{check items} (see \emph{Check Formulae
#' of Local Scope} in the documentation page \link{firmly}).
#' \item The return value is the check formula of local scope whose scope is
#' comprised of these check items, and whose predicate function is that of
#' \code{chk} (i.e., the right-hand side of \code{chk}). Unless a check
#' item has its own error message, the error message is derived from that
#' of \code{chk} (i.e., the left-hand side of \code{chk}).
#' }
localize <- list(
list("`chk` must be a formula of the form <string> ~ <predicate>" ~ chk) ~
is_gbl_check_formula
) %checkin%
function(chk) {
.msg <- f_eval_lhs(chk)
.rhs <- lazyeval::f_rhs(chk)
.env <- lazyeval::f_env(chk)
chkr <- function(...) {
fs <- list(...)
not_check_expr <- vapply(fs, Negate(is_check_expr), logical(1))
if (any(not_check_expr)) {
args <- paste(
vapply(fs[not_check_expr], deparse_collapse, character(1)),
collapse = ", "
)
stop_wo_call("LHS of formula(e) not empty or string: ", args)
}
wo_msg <- vapply(fs, is_onesided, logical(1))
fs[wo_msg] <- lapply(fs[wo_msg], function(f) {
lazyeval::f_lhs(f) <- paste(.msg, deparse_collapse(lazyeval::f_rhs(f)), sep = ": ")
f
})
lazyeval::f_new(.rhs, fs, .env)
}
structure(chkr, class = c("check_maker", class(chkr)))
}
#' @rdname input-validators
#' @export
#' @param chkr Function of class \code{"check_maker"}, i.e., a function created
#' by \code{localize}.
#' @return \code{globalize} returns the global-scope check formula from which
#' the function \code{chkr} is derived.
globalize <- list(
list("`chkr` must be a local checker function (see ?localize)" ~ chkr) ~
is_check_maker
) %checkin%
function(chkr) {
environment(chkr)$chk
}
#' @export
print.check_maker <- function(x, ...) {
env <- environment(x)
p <- env$.rhs
cat("<check_maker>\n")
cat("\n* Predicate function:\n")
if (is_lambda(p)) {
cat(deparse_collapse(express_lambda(p)), "\n")
} else {
print.default(p)
}
cat("\n* Error message:\n")
cat(encodeString(env$.msg, quote = "\""), "\n")
}
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/scope.R
|
# Empty-default operator
`%||%` <- function(x, y) {
if (is.null(x) || length(x) == 0L) y else x
}
# Hush `R CMD check` note (confused by `.` in pipes)
. <- NULL
is_true <- isTRUE
is_false <- function(x) identical(FALSE, x)
stop_wo_call <- function(...) stop(..., call. = FALSE)
warning_wo_call <- function(...) warning(..., call. = FALSE)
# Deparse a language object as a single string
deparse_collapse <- function(x) {
d <- deparse(x)
if (length(d) > 1L) {
paste(trimws(gsub("\\s+", " ", d), which = "left"), collapse = "")
} else {
d
}
}
# Typically used to list symbols, such as function argument names
quote_collapse <- function(xs) {
paste(encodeString(xs, quote = "`"), collapse = ", ")
}
# Collapse a character vector into an enumerated string
enumerate_many <- function(x, many = 2L) {
if (length(x) >= many) {
paste(
vapply(seq_along(x), function(i) sprintf("%d) %s\n", i, x[[i]]),
character(1)),
collapse = ""
)
} else {
paste0(x, "\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/utils.R
|
#' valaddin: Functional Input Validation
#'
#' \emph{valaddin} provides a functional operator, \code{\link{firmly}}, that
#' enhances functions with input validation. You supply a function \code{f}
#' along with input validation requirements, and \code{firmly} returns a
#' function that applies \code{f} \dQuote{firmly}: before a call to \code{f} is
#' attempted, its inputs are checked, and if any check fails, an error halts
#' further execution with a message tabulating every failing check. Because
#' \code{firmly} implements input validation by operating on whole functions
#' rather than values, it is suitable for both programming and interactive use.
#' \cr\cr
#' Using \code{firmly} to add input validation to your functions improves the
#' legibility, reusability, and reliability of your code:
#' \itemize{
#' \item Emphasize the core logic of your functions by excising validation
#' boilerplate.
#' \item Reduce duplication by reusing common checks across functions with
#' common input requirements.
#' \item Make function outputs more predictable by constraining their inputs.
#' \item Vary the strictness of a function according to need and circumstance.
#' }
#'
#' @details For an example-oriented overview of valaddin, see
#' \code{vignette("valaddin")}.
#'
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/valaddin-package.R
|
#' Validate objects
#'
#' @name validate
#' @examples
#' \dontrun{
#' library(magrittr)
#'
#' # Valid assertions: data frame returned (invisibly)
#' mtcars %>%
#' validate(
#' vld_all(~sapply(., is.numeric)),
#' ~{nrow(.) > 10},
#' vld_all(~c("mpg", "cyl") %in% names(.))
#' )
#'
#' # Invalid assertions: error raised
#' mtcars %>%
#' validate(
#' vld_all(~sapply(., is.numeric)),
#' ~{nrow(.) > 1000},
#' vld_all(~c("mpg", "cylinders") %in% names(.))
#' )
#' }
NULL
validator <- function(..., .checklist = list(), .error_class = character()) {
firmly_(function(.) invisible(.),
..., .checklist = .checklist, .error_class = .error_class)
}
#' @rdname validate
#' @param . Object to validate.
#' @param \dots Input-validation check formula(e).
#' @param .checklist List of check formulae. (These are combined with check
#' formulae provided via \code{\dots}.)
#' @param .error_class Subclass of the error condition to be raised when an
#' input validation error occurs (character).
#' @export
validate <- function(., ..., .checklist = list(),
.error_class = "validationError") {
# Assign validator to `validate` to get appropriate call in error message
validate <-
validator(..., .checklist = .checklist, .error_class = .error_class)
validate(.)
}
#' @rdname validate
#' @param .f Interpreted function, i.e., closure.
#' @param .checks List of check formulae, optionally containing a character
#' vector named \code{.error_class}, corresponding to the similarly named
#' argument.
#' @export
`%checkout%` <- function(.f, .checks) {
force(.f)
validate_out <- local({
nms <- names(.checks) %||% character(length(.checks))
chk <- .checks[nms != ".error_class"]
err <- .checks[[".error_class"]] %||% character()
validator(.checklist = chk, .error_class = err)
})
`formals<-`(
function () {
encl <- parent.env(environment())
encl$validate_out(eval.parent(`[[<-`(match.call(), 1L, encl$.f)))
},
value = formals(.f)
)
}
|
/scratch/gouwar.j/cran-all/cranData/valaddin/R/validate.R
|
## ---- include = FALSE---------------------------------------------------------
library(valaddin)
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
## -----------------------------------------------------------------------------
f <- function(x, h) (sin(x + h) - sin(x)) / h
## -----------------------------------------------------------------------------
ff <- firmly(f, ~is.numeric)
## ---- include = FALSE---------------------------------------------------------
tz_original <- Sys.getenv("TZ", unset = NA)
## -----------------------------------------------------------------------------
Sys.setenv(TZ = "CET")
(d <- as.POSIXct("2017-01-01 09:30:00"))
## -----------------------------------------------------------------------------
as.POSIXlt(d, tz = "EST")$hour
## -----------------------------------------------------------------------------
Sys.setenv(TZ = "EST")
d <- as.POSIXct("2017-01-01 09:30:00")
as.POSIXlt(d, tz = "EST")$hour
## ---- include = FALSE---------------------------------------------------------
if (isTRUE(is.na(tz_original))) {
Sys.unsetenv("TZ")
} else {
Sys.setenv(TZ = tz_original)
}
## -----------------------------------------------------------------------------
as.POSIXct <- firmly(as.POSIXct, .warn_missing = "tz")
## -----------------------------------------------------------------------------
as.POSIXct("2017-01-01 09:30:00")
as.POSIXct("2017-01-01 09:30:00", tz = "CET")
## -----------------------------------------------------------------------------
loosely(as.POSIXct)("2017-01-01 09:30:00")
identical(loosely(as.POSIXct), base::as.POSIXct)
## -----------------------------------------------------------------------------
w <- {set.seed(1); rnorm(5)}
ifelse(w > 0, w, 0)
## -----------------------------------------------------------------------------
z <- rep(1, 6)
pos <- 1:5
neg <- -6:-1
ifelse(z > 0, pos, neg)
## -----------------------------------------------------------------------------
chk_length_type <- list(
"'yes', 'no' differ in length" ~ length(yes) == length(no),
"'yes', 'no' differ in type" ~ typeof(yes) == typeof(no)
) ~ isTRUE
ifelse_f <- firmly(ifelse, chk_length_type)
## -----------------------------------------------------------------------------
deposit <- function(account, value) {
if (is_student(account)) {
account$fees <- 0
}
account$balance <- account$balance + value
account
}
is_student <- function(account) {
if (isTRUE(account$is_student)) TRUE else FALSE
}
## -----------------------------------------------------------------------------
bobs_acct <- list(balance = 10, fees = 3, is_student = FALSE)
## -----------------------------------------------------------------------------
deposit(bobs_acct, bobs_acct$fees)$balance
## -----------------------------------------------------------------------------
bobs_acct$is_student <- TRUE
## -----------------------------------------------------------------------------
bobs_acct <- list2env(bobs_acct)
## -----------------------------------------------------------------------------
deposit(bobs_acct, bobs_acct$fees)$balance
## -----------------------------------------------------------------------------
err_msg <- "`acccount` should not be an environment"
deposit <- firmly(deposit, list(err_msg ~ account) ~ Negate(is.environment))
## ---- eval = FALSE------------------------------------------------------------
# x <- "An expensive object"
# save(x, file = "my-precious.rda")
#
# x <- "Oops! A bug or lapse has tarnished your expensive object"
#
# # Many computations later, you again save x, oblivious to the accident ...
# save(x, file = "my-precious.rda")
## -----------------------------------------------------------------------------
# Argument `gear` is a list with components:
# fun: Function name
# ns : Namespace of `fun`
# chk: Formula that specify input checks
hardhat <- function(gear, env = .GlobalEnv) {
for (. in gear) {
safe_fun <- firmly(getFromNamespace(.$fun, .$ns), .$chk)
assign(.$fun, safe_fun, envir = env)
}
}
## -----------------------------------------------------------------------------
protection <- list(
list(
fun = "save",
ns = "base",
chk = list("Won't overwrite `file`" ~ file) ~ Negate(file.exists)
),
list(
fun = "load",
ns = "base",
chk = list("Won't load objects into current environment" ~ envir) ~
{!identical(., parent.frame(2))}
)
)
## -----------------------------------------------------------------------------
hardhat(protection)
## ---- eval = FALSE------------------------------------------------------------
# x <- "An expensive object"
# save(x, file = "my-precious.rda")
#
# x <- "Oops! A bug or lapse has tarnished your expensive object"
# #> Error: save(x, file = "my-precious.rda")
# #> Won't overwrite `file`
#
# save(x, file = "my-precious.rda")
#
# # Inspecting x, you notice it's changed, so you try to retrieve the original ...
# x
# #> [1] "Oops! A bug or lapse has tarnished your expensive object"
# load("my-precious.rda")
# #> Error: load(file = "my-precious.rda")
# #> Won't load objects into current environment
#
# # Keep calm and carry on
# loosely(load)("my-precious.rda")
#
# x
# #> [1] "An expensive object"
|
/scratch/gouwar.j/cran-all/cranData/valaddin/inst/doc/valaddin.R
|
---
title: "Using valaddin"
author: "Eugene Ha"
date: "2017-08-10"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using valaddin}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
library(valaddin)
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
```
_valaddin_ is a lightweight R package that enables you to transform an existing
function into a function with input validation checks. It does so without
requiring you to modify the body of the function, in contrast to doing input
validation using `stop` or `stopifnot`, and is therefore suitable for both
programmatic and interactive use.
This document illustrates the use of valaddin, by example. For usage details,
see the main documentation page, `?firmly`.
## Use cases
The workhorse of valaddin is the function `firmly`, which applies input
validation to a function, _in situ_. It can be used to:
### Enforce types for arguments
For example, to require that all arguments of the function
```{r}
f <- function(x, h) (sin(x + h) - sin(x)) / h
```
are numerical, apply `firmly` with the check formula `~is.numeric`[^1]:
```{r}
ff <- firmly(f, ~is.numeric)
```
`ff` behaves just like `f`, but with a constraint on the type of its arguments:
```{r, error = TRUE, purl = FALSE}
ff(0.0, 0.1)
ff("0.0", 0.1)
```
[^1]: The inspiration to use `~` as a quoting operator came from the vignette
[Non-standard
evaluation](https://cran.r-project.org/package=lazyeval/vignettes/lazyeval.html),
by Hadley Wickham.
### Enforce constraints on argument values
For example, use `firmly` to put a cap on potentially long-running computations:
```{r, error = TRUE, purl = FALSE}
fib <- function(n) {
if (n <= 1L) return(1L)
Recall(n - 1L) + Recall(n - 2L)
}
capped_fib <- firmly(fib, list("n capped at 30" ~ ceiling(n)) ~ {. <= 30L})
capped_fib(10)
capped_fib(50)
```
The role of each part of the value-constraining formula is evident:
- The right-hand side `{. <= 30L}` is the constraint itself, expressed as a
condition on `.`, a placeholder argument.
- The left-hand side `list("n capped at 30" ~ ceiling(n))` specifies the
expression for the placeholder, namely `ceiling(n)`, along with a message to
produce if the constraint is violated.
### Warn about pitfalls
If the default behavior of a function is problematic, or unexpected, you can use
`firmly` to warn you. Consider the function `as.POSIXct`, which creates a
date-time object:
```{r, include = FALSE}
tz_original <- Sys.getenv("TZ", unset = NA)
```
```{r}
Sys.setenv(TZ = "CET")
(d <- as.POSIXct("2017-01-01 09:30:00"))
```
The problem is that `d` is a potentially _ambiguous_ object (with hidden state),
because it's not assigned a time zone, explicitly. If you compute the local hour
of `d` using `as.POSIXlt`, you get an answer that interprets `d` according to
your current time zone; another user—or you, in another country, in the
future—may get a different result.
- If you're in CET time zone:
```{r}
as.POSIXlt(d, tz = "EST")$hour
```
- If you were to change to EST time zone and rerun the code:
```{r}
Sys.setenv(TZ = "EST")
d <- as.POSIXct("2017-01-01 09:30:00")
as.POSIXlt(d, tz = "EST")$hour
```
```{r, include = FALSE}
if (isTRUE(is.na(tz_original))) {
Sys.unsetenv("TZ")
} else {
Sys.setenv(TZ = tz_original)
}
```
To warn yourself about this pitfall, you can modify `as.POSIXct` to complain
when you've forgotten to specify a time zone:
```{r}
as.POSIXct <- firmly(as.POSIXct, .warn_missing = "tz")
```
Now when you call `as.POSIXct`, you get a cautionary reminder:
```{r}
as.POSIXct("2017-01-01 09:30:00")
as.POSIXct("2017-01-01 09:30:00", tz = "CET")
```
**NB**: The missing-argument warning is implemented by wrapping functions. The
underlying function `base::as.POSIXct` is called _unmodified_.
#### Use `loosely` to access the original function
Though reassigning `as.POSIXct` may seem risky, it is not, for the behavior is
unchanged (aside from the extra precaution), and the original `as.POSIXct`
remains accessible:
- With a namespace prefix: `base::as.POSIXct`
- By applying `loosely` to strip input validation: `loosely(as.POSIXct)`
```{r}
loosely(as.POSIXct)("2017-01-01 09:30:00")
identical(loosely(as.POSIXct), base::as.POSIXct)
```
### Decline handouts
R tries to help you express your ideas as concisely as possible. Suppose you
want to truncate negative values of a vector `w`:
```{r}
w <- {set.seed(1); rnorm(5)}
ifelse(w > 0, w, 0)
```
`ifelse` assumes (correctly) that you intend the `0` to be repeated `r length(w)`
times, and does that for you, automatically.
Nonetheless, R's good intentions have a darker side:
```{r}
z <- rep(1, 6)
pos <- 1:5
neg <- -6:-1
ifelse(z > 0, pos, neg)
```
This smells like a coding error. Instead of complaining that `pos` is too short,
`ifelse` recycles it to line it up with `z`. The result is probably not what you
wanted.
In this case, you don't need a helping hand, but rather a firm one:
```{r}
chk_length_type <- list(
"'yes', 'no' differ in length" ~ length(yes) == length(no),
"'yes', 'no' differ in type" ~ typeof(yes) == typeof(no)
) ~ isTRUE
ifelse_f <- firmly(ifelse, chk_length_type)
```
`ifelse_f` is more pedantic than `ifelse`. But it also spares you the
consequences of invalid inputs:
```{r, error = TRUE, purl = FALSE}
ifelse_f(w > 0, w, 0)
ifelse_f(w > 0, w, rep(0, length(w)))
ifelse(z > 0, pos, neg)
ifelse_f(z > 0, pos, neg)
ifelse(z > 0, as.character(pos), neg)
ifelse_f(z > 0, as.character(pos), neg)
```
### Reduce the risks of a lazy evaluation-style
When R make a function call, say, `f(a)`, the _value_ of the argument `a` is not
materialized in the body of `f` until it is actually needed. Usually, you can
safely ignore this as a technicality of R's evaluation model; but in some
situations, it can be problematic if you're not mindful of it.
Consider a bank that waives fees for students. A function to make deposits
might look like this[^2]:
```{r}
deposit <- function(account, value) {
if (is_student(account)) {
account$fees <- 0
}
account$balance <- account$balance + value
account
}
is_student <- function(account) {
if (isTRUE(account$is_student)) TRUE else FALSE
}
```
Suppose Bob is an account holder, currently not in school:
```{r}
bobs_acct <- list(balance = 10, fees = 3, is_student = FALSE)
```
If Bob were to deposit an amount to cover an future fee payment, his account
balance would be updated to:
```{r}
deposit(bobs_acct, bobs_acct$fees)$balance
```
Bob goes back to school and informs the bank, so that his fees will be waived:
```{r}
bobs_acct$is_student <- TRUE
```
But now suppose that, somewhere in the bowels of the bank's software, the type
of Bob's account object is converted from a list to an environment:
```{r}
bobs_acct <- list2env(bobs_acct)
```
If Bob were to deposit an amount to cover an future fee payment, his account
balance would now be updated to:
```{r}
deposit(bobs_acct, bobs_acct$fees)$balance
```
Becoming a student has cost Bob money. What happened to the amount deposited?
The culprit is lazy evaluation and the modify-in-place semantics of
environments. In the call `deposit(account = bobs_acct, value = bobs_acct$fee)`,
the value of the argument `value` is only set when it's used, which comes after
the object `fee` in the environment `bobs_acct` has already been zeroed out.
To minimize such risks, forbid `account` from being an environment:
```{r}
err_msg <- "`acccount` should not be an environment"
deposit <- firmly(deposit, list(err_msg ~ account) ~ Negate(is.environment))
```
This makes Bob a happy customer, and reduces the bank's liability:
```{r, error = TRUE, purl = FALSE}
bobs_acct <- list2env(list(balance = 10, fees = 3, is_student = TRUE))
deposit(bobs_acct, bobs_acct$fees)$balance
deposit(as.list(bobs_acct), bobs_acct$fees)$balance
```
[^2]: Adapted from an example in Section 6.3 of Chambers, _Extending R_, CRC
Press, 2016. For the sake of the example, ignore the fact that logic to handle
fees does not belong in a function for deposits!
### Prevent self-inflicted wounds
You don't mean to shoot yourself, but sometimes it happens, nonetheless:
```{r, eval = FALSE}
x <- "An expensive object"
save(x, file = "my-precious.rda")
x <- "Oops! A bug or lapse has tarnished your expensive object"
# Many computations later, you again save x, oblivious to the accident ...
save(x, file = "my-precious.rda")
```
`firmly` can safeguard you from such mishaps: implement a safety procedure
```{r}
# Argument `gear` is a list with components:
# fun: Function name
# ns : Namespace of `fun`
# chk: Formula that specify input checks
hardhat <- function(gear, env = .GlobalEnv) {
for (. in gear) {
safe_fun <- firmly(getFromNamespace(.$fun, .$ns), .$chk)
assign(.$fun, safe_fun, envir = env)
}
}
```
gather your safety gear
```{r}
protection <- list(
list(
fun = "save",
ns = "base",
chk = list("Won't overwrite `file`" ~ file) ~ Negate(file.exists)
),
list(
fun = "load",
ns = "base",
chk = list("Won't load objects into current environment" ~ envir) ~
{!identical(., parent.frame(2))}
)
)
```
then put it on
```{r}
hardhat(protection)
```
Now `save` and `load` engage safety features that prevent you from inadvertently
destroying your data:
```{r, eval = FALSE}
x <- "An expensive object"
save(x, file = "my-precious.rda")
x <- "Oops! A bug or lapse has tarnished your expensive object"
#> Error: save(x, file = "my-precious.rda")
#> Won't overwrite `file`
save(x, file = "my-precious.rda")
# Inspecting x, you notice it's changed, so you try to retrieve the original ...
x
#> [1] "Oops! A bug or lapse has tarnished your expensive object"
load("my-precious.rda")
#> Error: load(file = "my-precious.rda")
#> Won't load objects into current environment
# Keep calm and carry on
loosely(load)("my-precious.rda")
x
#> [1] "An expensive object"
```
**NB**: Input validation is implemented by wrapping functions; thus, if the
arguments are valid, the underlying functions `base::save`, `base::load` are
called _unmodified_.
## Toolbox of input checkers
_valaddin_ provides a collection of over 50 pre-made input checkers to
facilitate typical kinds of argument checks. These checkers are prefixed by
`vld_`, for convenient browsing and look-up in editors and IDE's that support
name completion.
For example, to create a type-checked version of the function `upper.tri`, which
returns an upper-triangular logical matrix, apply the checkers `vld_matrix`,
`vld_boolean` (here "boolean" is shorthand for "logical vector of length 1"):
```{r, error = TRUE, purl = FALSE}
upper_tri <- firmly(upper.tri, vld_matrix(~x), vld_boolean(~diag))
# upper.tri assumes you mean a vector to be a column matrix
upper.tri(1:2)
upper_tri(1:2)
# But say you actually meant (1, 2) to be a diagonal matrix
upper_tri(diag(1:2))
upper_tri(diag(1:2), diag = "true")
upper_tri(diag(1:2), TRUE)
```
### Check anything with `vld_true`
Any input validation can be expressed as an assertion that "such and such must
be true"; to apply it as such, use `vld_true` (or its complement, `vld_false`).
For example, the above hardening of `ifelse` can be redone as:
```{r, error = TRUE, purl = FALSE}
chk_length_type <- vld_true(
"'yes', 'no' differ in length" ~ length(yes) == length(no),
"'yes', 'no' differ in type" ~ typeof(yes) == typeof(no)
)
ifelse_f <- firmly(ifelse, chk_length_type)
z <- rep(1, 6)
pos <- 1:5
neg <- -6:-1
ifelse_f(z > 0, as.character(pos), neg)
ifelse_f(z > 0, c(pos, 6), neg)
ifelse_f(z > 0, c(pos, 6L), neg)
```
### Make your own input checker with `localize`
A check formula such as `~ is.numeric` (or `"Not number" ~ is.numeric`, if you
want a custom error message) imposes its condition "globally":
```{r, error = TRUE, purl = FALSE}
difference <- firmly(function(x, y) x - y, ~ is.numeric)
difference(3, 1)
difference(as.POSIXct("2017-01-01", "UTC"), as.POSIXct("2016-01-01", "UTC"))
```
With `localize`, you can concentrate a globally applied check formula to
specific expressions. The result is a _reusable_ custom checker:
```{r, error = TRUE, purl = FALSE}
chk_numeric <- localize("Not numeric" ~ is.numeric)
secant <- firmly(function(f, x, h) (f(x + h) - f(x)) / h, chk_numeric(~x, ~h))
secant(sin, 0, .1)
secant(sin, "0", .1)
```
(In fact, `chk_numeric` is equivalent to the pre-built checker `vld_numeric`.)
Conversely, apply `globalize` to impose your localized checker globally:
```{r, error = TRUE, purl = FALSE}
difference <- firmly(function(x, y) x - y, globalize(chk_numeric))
difference(3, 1)
difference(as.POSIXct("2017-01-01", "UTC"), as.POSIXct("2016-01-01", "UTC"))
```
## Related packages
### Packages that enhance valaddin
- [assertive](https://bitbucket.org/richierocks/assertive),
[assertthat](https://github.com/hadley/assertthat), and
[checkmate](https://github.com/mllg/checkmate) provide extensive collections
of predicate functions that you can use in conjunction with `firmly` and
`localize`.
- [ensurer](https://github.com/smbache/ensurer) and
[assertr](https://github.com/ropensci/assertr) provide ways to validate
function _values_.
### Other approaches to input validation
- [argufy](https://github.com/gaborcsardi/argufy) takes a different approach
to input validation, using [roxygen](https://github.com/r-lib/roxygen2)
comments to specify checks.
- [ensurer](https://github.com/smbache/ensurer) provides an experimental
replacement for `function` that builds functions with type-validated
arguments.
- [typeCheck](https://github.com/jimhester/typeCheck), together with
[Types for R](https://github.com/jimhester/types), enable the creation of
functions with type-validated arguments by means of special type
annotations. This approach is orthogonal to that of valaddin: whereas
valaddin specifies input checks as _predicate functions with scope_
(predicates are primary), typeCheck specifies input checks as _arguments
with type_ (arguments are primary).
|
/scratch/gouwar.j/cran-all/cranData/valaddin/inst/doc/valaddin.Rmd
|
---
title: "Using valaddin"
author: "Eugene Ha"
date: "2017-08-10"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using valaddin}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
library(valaddin)
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
```
_valaddin_ is a lightweight R package that enables you to transform an existing
function into a function with input validation checks. It does so without
requiring you to modify the body of the function, in contrast to doing input
validation using `stop` or `stopifnot`, and is therefore suitable for both
programmatic and interactive use.
This document illustrates the use of valaddin, by example. For usage details,
see the main documentation page, `?firmly`.
## Use cases
The workhorse of valaddin is the function `firmly`, which applies input
validation to a function, _in situ_. It can be used to:
### Enforce types for arguments
For example, to require that all arguments of the function
```{r}
f <- function(x, h) (sin(x + h) - sin(x)) / h
```
are numerical, apply `firmly` with the check formula `~is.numeric`[^1]:
```{r}
ff <- firmly(f, ~is.numeric)
```
`ff` behaves just like `f`, but with a constraint on the type of its arguments:
```{r, error = TRUE, purl = FALSE}
ff(0.0, 0.1)
ff("0.0", 0.1)
```
[^1]: The inspiration to use `~` as a quoting operator came from the vignette
[Non-standard
evaluation](https://cran.r-project.org/package=lazyeval/vignettes/lazyeval.html),
by Hadley Wickham.
### Enforce constraints on argument values
For example, use `firmly` to put a cap on potentially long-running computations:
```{r, error = TRUE, purl = FALSE}
fib <- function(n) {
if (n <= 1L) return(1L)
Recall(n - 1L) + Recall(n - 2L)
}
capped_fib <- firmly(fib, list("n capped at 30" ~ ceiling(n)) ~ {. <= 30L})
capped_fib(10)
capped_fib(50)
```
The role of each part of the value-constraining formula is evident:
- The right-hand side `{. <= 30L}` is the constraint itself, expressed as a
condition on `.`, a placeholder argument.
- The left-hand side `list("n capped at 30" ~ ceiling(n))` specifies the
expression for the placeholder, namely `ceiling(n)`, along with a message to
produce if the constraint is violated.
### Warn about pitfalls
If the default behavior of a function is problematic, or unexpected, you can use
`firmly` to warn you. Consider the function `as.POSIXct`, which creates a
date-time object:
```{r, include = FALSE}
tz_original <- Sys.getenv("TZ", unset = NA)
```
```{r}
Sys.setenv(TZ = "CET")
(d <- as.POSIXct("2017-01-01 09:30:00"))
```
The problem is that `d` is a potentially _ambiguous_ object (with hidden state),
because it's not assigned a time zone, explicitly. If you compute the local hour
of `d` using `as.POSIXlt`, you get an answer that interprets `d` according to
your current time zone; another user—or you, in another country, in the
future—may get a different result.
- If you're in CET time zone:
```{r}
as.POSIXlt(d, tz = "EST")$hour
```
- If you were to change to EST time zone and rerun the code:
```{r}
Sys.setenv(TZ = "EST")
d <- as.POSIXct("2017-01-01 09:30:00")
as.POSIXlt(d, tz = "EST")$hour
```
```{r, include = FALSE}
if (isTRUE(is.na(tz_original))) {
Sys.unsetenv("TZ")
} else {
Sys.setenv(TZ = tz_original)
}
```
To warn yourself about this pitfall, you can modify `as.POSIXct` to complain
when you've forgotten to specify a time zone:
```{r}
as.POSIXct <- firmly(as.POSIXct, .warn_missing = "tz")
```
Now when you call `as.POSIXct`, you get a cautionary reminder:
```{r}
as.POSIXct("2017-01-01 09:30:00")
as.POSIXct("2017-01-01 09:30:00", tz = "CET")
```
**NB**: The missing-argument warning is implemented by wrapping functions. The
underlying function `base::as.POSIXct` is called _unmodified_.
#### Use `loosely` to access the original function
Though reassigning `as.POSIXct` may seem risky, it is not, for the behavior is
unchanged (aside from the extra precaution), and the original `as.POSIXct`
remains accessible:
- With a namespace prefix: `base::as.POSIXct`
- By applying `loosely` to strip input validation: `loosely(as.POSIXct)`
```{r}
loosely(as.POSIXct)("2017-01-01 09:30:00")
identical(loosely(as.POSIXct), base::as.POSIXct)
```
### Decline handouts
R tries to help you express your ideas as concisely as possible. Suppose you
want to truncate negative values of a vector `w`:
```{r}
w <- {set.seed(1); rnorm(5)}
ifelse(w > 0, w, 0)
```
`ifelse` assumes (correctly) that you intend the `0` to be repeated `r length(w)`
times, and does that for you, automatically.
Nonetheless, R's good intentions have a darker side:
```{r}
z <- rep(1, 6)
pos <- 1:5
neg <- -6:-1
ifelse(z > 0, pos, neg)
```
This smells like a coding error. Instead of complaining that `pos` is too short,
`ifelse` recycles it to line it up with `z`. The result is probably not what you
wanted.
In this case, you don't need a helping hand, but rather a firm one:
```{r}
chk_length_type <- list(
"'yes', 'no' differ in length" ~ length(yes) == length(no),
"'yes', 'no' differ in type" ~ typeof(yes) == typeof(no)
) ~ isTRUE
ifelse_f <- firmly(ifelse, chk_length_type)
```
`ifelse_f` is more pedantic than `ifelse`. But it also spares you the
consequences of invalid inputs:
```{r, error = TRUE, purl = FALSE}
ifelse_f(w > 0, w, 0)
ifelse_f(w > 0, w, rep(0, length(w)))
ifelse(z > 0, pos, neg)
ifelse_f(z > 0, pos, neg)
ifelse(z > 0, as.character(pos), neg)
ifelse_f(z > 0, as.character(pos), neg)
```
### Reduce the risks of a lazy evaluation-style
When R make a function call, say, `f(a)`, the _value_ of the argument `a` is not
materialized in the body of `f` until it is actually needed. Usually, you can
safely ignore this as a technicality of R's evaluation model; but in some
situations, it can be problematic if you're not mindful of it.
Consider a bank that waives fees for students. A function to make deposits
might look like this[^2]:
```{r}
deposit <- function(account, value) {
if (is_student(account)) {
account$fees <- 0
}
account$balance <- account$balance + value
account
}
is_student <- function(account) {
if (isTRUE(account$is_student)) TRUE else FALSE
}
```
Suppose Bob is an account holder, currently not in school:
```{r}
bobs_acct <- list(balance = 10, fees = 3, is_student = FALSE)
```
If Bob were to deposit an amount to cover an future fee payment, his account
balance would be updated to:
```{r}
deposit(bobs_acct, bobs_acct$fees)$balance
```
Bob goes back to school and informs the bank, so that his fees will be waived:
```{r}
bobs_acct$is_student <- TRUE
```
But now suppose that, somewhere in the bowels of the bank's software, the type
of Bob's account object is converted from a list to an environment:
```{r}
bobs_acct <- list2env(bobs_acct)
```
If Bob were to deposit an amount to cover an future fee payment, his account
balance would now be updated to:
```{r}
deposit(bobs_acct, bobs_acct$fees)$balance
```
Becoming a student has cost Bob money. What happened to the amount deposited?
The culprit is lazy evaluation and the modify-in-place semantics of
environments. In the call `deposit(account = bobs_acct, value = bobs_acct$fee)`,
the value of the argument `value` is only set when it's used, which comes after
the object `fee` in the environment `bobs_acct` has already been zeroed out.
To minimize such risks, forbid `account` from being an environment:
```{r}
err_msg <- "`acccount` should not be an environment"
deposit <- firmly(deposit, list(err_msg ~ account) ~ Negate(is.environment))
```
This makes Bob a happy customer, and reduces the bank's liability:
```{r, error = TRUE, purl = FALSE}
bobs_acct <- list2env(list(balance = 10, fees = 3, is_student = TRUE))
deposit(bobs_acct, bobs_acct$fees)$balance
deposit(as.list(bobs_acct), bobs_acct$fees)$balance
```
[^2]: Adapted from an example in Section 6.3 of Chambers, _Extending R_, CRC
Press, 2016. For the sake of the example, ignore the fact that logic to handle
fees does not belong in a function for deposits!
### Prevent self-inflicted wounds
You don't mean to shoot yourself, but sometimes it happens, nonetheless:
```{r, eval = FALSE}
x <- "An expensive object"
save(x, file = "my-precious.rda")
x <- "Oops! A bug or lapse has tarnished your expensive object"
# Many computations later, you again save x, oblivious to the accident ...
save(x, file = "my-precious.rda")
```
`firmly` can safeguard you from such mishaps: implement a safety procedure
```{r}
# Argument `gear` is a list with components:
# fun: Function name
# ns : Namespace of `fun`
# chk: Formula that specify input checks
hardhat <- function(gear, env = .GlobalEnv) {
for (. in gear) {
safe_fun <- firmly(getFromNamespace(.$fun, .$ns), .$chk)
assign(.$fun, safe_fun, envir = env)
}
}
```
gather your safety gear
```{r}
protection <- list(
list(
fun = "save",
ns = "base",
chk = list("Won't overwrite `file`" ~ file) ~ Negate(file.exists)
),
list(
fun = "load",
ns = "base",
chk = list("Won't load objects into current environment" ~ envir) ~
{!identical(., parent.frame(2))}
)
)
```
then put it on
```{r}
hardhat(protection)
```
Now `save` and `load` engage safety features that prevent you from inadvertently
destroying your data:
```{r, eval = FALSE}
x <- "An expensive object"
save(x, file = "my-precious.rda")
x <- "Oops! A bug or lapse has tarnished your expensive object"
#> Error: save(x, file = "my-precious.rda")
#> Won't overwrite `file`
save(x, file = "my-precious.rda")
# Inspecting x, you notice it's changed, so you try to retrieve the original ...
x
#> [1] "Oops! A bug or lapse has tarnished your expensive object"
load("my-precious.rda")
#> Error: load(file = "my-precious.rda")
#> Won't load objects into current environment
# Keep calm and carry on
loosely(load)("my-precious.rda")
x
#> [1] "An expensive object"
```
**NB**: Input validation is implemented by wrapping functions; thus, if the
arguments are valid, the underlying functions `base::save`, `base::load` are
called _unmodified_.
## Toolbox of input checkers
_valaddin_ provides a collection of over 50 pre-made input checkers to
facilitate typical kinds of argument checks. These checkers are prefixed by
`vld_`, for convenient browsing and look-up in editors and IDE's that support
name completion.
For example, to create a type-checked version of the function `upper.tri`, which
returns an upper-triangular logical matrix, apply the checkers `vld_matrix`,
`vld_boolean` (here "boolean" is shorthand for "logical vector of length 1"):
```{r, error = TRUE, purl = FALSE}
upper_tri <- firmly(upper.tri, vld_matrix(~x), vld_boolean(~diag))
# upper.tri assumes you mean a vector to be a column matrix
upper.tri(1:2)
upper_tri(1:2)
# But say you actually meant (1, 2) to be a diagonal matrix
upper_tri(diag(1:2))
upper_tri(diag(1:2), diag = "true")
upper_tri(diag(1:2), TRUE)
```
### Check anything with `vld_true`
Any input validation can be expressed as an assertion that "such and such must
be true"; to apply it as such, use `vld_true` (or its complement, `vld_false`).
For example, the above hardening of `ifelse` can be redone as:
```{r, error = TRUE, purl = FALSE}
chk_length_type <- vld_true(
"'yes', 'no' differ in length" ~ length(yes) == length(no),
"'yes', 'no' differ in type" ~ typeof(yes) == typeof(no)
)
ifelse_f <- firmly(ifelse, chk_length_type)
z <- rep(1, 6)
pos <- 1:5
neg <- -6:-1
ifelse_f(z > 0, as.character(pos), neg)
ifelse_f(z > 0, c(pos, 6), neg)
ifelse_f(z > 0, c(pos, 6L), neg)
```
### Make your own input checker with `localize`
A check formula such as `~ is.numeric` (or `"Not number" ~ is.numeric`, if you
want a custom error message) imposes its condition "globally":
```{r, error = TRUE, purl = FALSE}
difference <- firmly(function(x, y) x - y, ~ is.numeric)
difference(3, 1)
difference(as.POSIXct("2017-01-01", "UTC"), as.POSIXct("2016-01-01", "UTC"))
```
With `localize`, you can concentrate a globally applied check formula to
specific expressions. The result is a _reusable_ custom checker:
```{r, error = TRUE, purl = FALSE}
chk_numeric <- localize("Not numeric" ~ is.numeric)
secant <- firmly(function(f, x, h) (f(x + h) - f(x)) / h, chk_numeric(~x, ~h))
secant(sin, 0, .1)
secant(sin, "0", .1)
```
(In fact, `chk_numeric` is equivalent to the pre-built checker `vld_numeric`.)
Conversely, apply `globalize` to impose your localized checker globally:
```{r, error = TRUE, purl = FALSE}
difference <- firmly(function(x, y) x - y, globalize(chk_numeric))
difference(3, 1)
difference(as.POSIXct("2017-01-01", "UTC"), as.POSIXct("2016-01-01", "UTC"))
```
## Related packages
### Packages that enhance valaddin
- [assertive](https://bitbucket.org/richierocks/assertive),
[assertthat](https://github.com/hadley/assertthat), and
[checkmate](https://github.com/mllg/checkmate) provide extensive collections
of predicate functions that you can use in conjunction with `firmly` and
`localize`.
- [ensurer](https://github.com/smbache/ensurer) and
[assertr](https://github.com/ropensci/assertr) provide ways to validate
function _values_.
### Other approaches to input validation
- [argufy](https://github.com/gaborcsardi/argufy) takes a different approach
to input validation, using [roxygen](https://github.com/r-lib/roxygen2)
comments to specify checks.
- [ensurer](https://github.com/smbache/ensurer) provides an experimental
replacement for `function` that builds functions with type-validated
arguments.
- [typeCheck](https://github.com/jimhester/typeCheck), together with
[Types for R](https://github.com/jimhester/types), enable the creation of
functions with type-validated arguments by means of special type
annotations. This approach is orthogonal to that of valaddin: whereas
valaddin specifies input checks as _predicate functions with scope_
(predicates are primary), typeCheck specifies input checks as _arguments
with type_ (arguments are primary).
|
/scratch/gouwar.j/cran-all/cranData/valaddin/vignettes/valaddin.Rmd
|
check.for.library <- function(lib.name = 'valection') {
return.flag <- TRUE;
if ('' == Sys.which(lib.name)) {
return.flag <- FALSE;
}
return(return.flag);
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/check.for.library.R
|
handle.missing.library <- function(lib.name = 'valection') {
lib.url <- '';
if (lib.name == 'valection') {
lib.url <- paste(
' It can be downloaded from',
'http://labs.oicr.on.ca/boutros-lab/software/valection'
);
}
stop(
paste0(
"You must install the ", lib.name, " library before using this package.",
lib.url
),
call. = FALSE
);
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/handle.missing.library.R
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage(
paste0("\n\n##########################\n"),
paste0("#### valection v", utils::packageVersion("valection"), " ####\n"),
paste0("##########################\n\n"),
paste0("checking C library availability..."),
paste0(ifelse(valection::check.for.library(), 'PASS', 'FAIL'))
);
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/onAttach.R
|
quoted <- function(string) {
return(paste("'", string, "'", sep = ""));
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/quoted.R
|
run.decreasing.with.overlap <- function(budget, infile, outfile, seed = NULL) {
if (valection::check.for.library()) {
command <- paste(
"valection",
budget,
"d",
valection::quoted(infile),
valection::quoted(outfile),
seed,
sep = " "
);
system(command);
} else {
valection::handle.missing.library();
}
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/run.decreasing.with.overlap.R
|
run.directed.sampling <- function(budget, infile, outfile, seed = NULL) {
if (valection::check.for.library()) {
command <- paste(
"valection",
budget,
"v",
valection::quoted(infile),
valection::quoted(outfile),
seed,
sep = " "
);
system(command);
} else {
valection::handle.missing.library();
}
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/run.directed.sampling.R
|
run.equal.per.caller <- function(budget, infile, outfile, seed = NULL) {
if (valection::check.for.library()) {
command <- paste(
"valection",
budget,
"e",
valection::quoted(infile),
valection::quoted(outfile),
seed,
sep = " "
);
system(command);
} else {
valection::handle.missing.library();
}
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/run.equal.per.caller.R
|
run.equal.per.overlap <- function(budget, infile, outfile, seed = NULL) {
if (valection::check.for.library()) {
command <- paste(
"valection",
budget,
"o",
valection::quoted(infile),
valection::quoted(outfile),
seed,
sep = " "
);
system(command);
} else {
valection::handle.missing.library();
}
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/run.equal.per.overlap.R
|
run.increasing.with.overlap <- function(budget, infile, outfile, seed = NULL) {
if (valection::check.for.library()) {
command <- paste(
"valection",
budget,
"i",
valection::quoted(infile),
valection::quoted(outfile),
seed,
sep = " "
);
system(command);
} else {
valection::handle.missing.library();
}
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/run.increasing.with.overlap.R
|
run.random.sampling <- function(budget, infile, outfile, seed = NULL) {
if (valection::check.for.library()) {
command <- paste(
"valection",
budget,
"t",
valection::quoted(infile),
valection::quoted(outfile),
seed,
sep = " "
);
system(command);
} else {
valection::handle.missing.library();
}
}
|
/scratch/gouwar.j/cran-all/cranData/valection/R/run.random.sampling.R
|
### R code from vignette source 'Valection_Introduction.Rnw'
###################################################
### code chunk number 1: setup
###################################################
options(width=100, signif=3, digits=3)
set.seed(0xdada)
###################################################
### code chunk number 2: eg.run.valection (eval = FALSE)
###################################################
## require('valection');
##
## # run the sampling to select 10 candidates
## run.equal.per.caller(
## budget = 10,
## infile = "/home/me/calls.valec",
## outfile = "/home/me/selections.txt",
## seed = 50
## );
|
/scratch/gouwar.j/cran-all/cranData/valection/inst/doc/Valection_Introduction.R
|
#' Make a roses are red poem
#'
#' Make a "roses are red ..." poem
#' about an R package.
#'
#' @param pkg A package
#' @param hint extra information to add to the prompt
#' @param emoji Should the poem include emojis ?
#' @param ... Passed to [openai::create_chat_completion()]
#'
#' @return A poem generated by ChatGPT via [openai::create_chat_completion()]
#'
#' @importFrom glue glue
#' @importFrom openai create_chat_completion
#' @examples
#' prompt("dplyr")
#'
#' \dontrun{
#' # this needs the OPENAI_API_KEY environment variable
#' # to be set. See https://irudnyts.github.io/openai/
#' roses("dplyr")
#' }
#' @export
roses <- function(pkg, hint = "", emoji = TRUE, ...) {
result <- create_chat_completion(
model = "gpt-3.5-turbo",
messages = list(
list(
"role" = "system",
"content" = "You are helpful assistant"
),
list(
"role" = "user",
"content" = prompt(pkg, hint, emoji)
)
),
...
)$choices$message.content
writeLines(result)
invisible(result)
}
#' @rdname roses
#' @export
prompt <- function(pkg, hint = "", emoji = TRUE) {
emoji_prompt <- if (isTRUE(emoji)) {
"Include a bunch of emojis"
} else {
"Don't include emojis"
}
glue('Make a 4 lines "roses are red ..." poem about the R package "{pkg}". {emoji_prompt}. {hint}')
}
|
/scratch/gouwar.j/cran-all/cranData/valentine/R/roses.R
|
#' Get series or series-group details
#'
#' \code{get_details} returns metadata from a Bank of Canada series or series
#' group.
#'
#' @param name A character of length 1 indicating the series or series group for
#' which information should be retrieved.
#' @param group A Boolean indicating whether the \code{name} is a series or a
#' series group.
#'
#' @return A list of series or group details.
#'
#' @examples
#' get_details("CES_C4E_LOSE_JOB_SK")
#' \dontrun{
#' get_details("BAPF_TRANSACTION_DATA")
#' }
#' get_details("BAPF_TRANSACTION_DATA", group = TRUE)
#'
#' @import httr
#' @importFrom jsonlite fromJSON
#'
#' @export
get_details <- function(name = NULL, group = FALSE) {
url <- modify_url("https://www.bankofcanada.ca/", path = paste0("valet/", ifelse(group, "groups/", "series/"), name, "/json"))
resp <- GET(url, user_agent("https://github.com/runkelcorey/valet"))
fromJSON(content(resp, "text"))[[2]]
}
|
/scratch/gouwar.j/cran-all/cranData/valet/R/get_details.R
|
#' Get series-group observations
#'
#' \code{get_group} returns observations from a Bank of Canada series group.
#'
#' @param name A \code{character} of length 1 indicating the series group to
#' retrieve.
#' @param ... Additional query parameters. Possible values are \code{start_date}
#' and/or \code{end_date} (both character), or one of \code{recent},
#' \code{recent_weeks}, \code{recent_months}, or \code{recent_years} (all
#' numeric).
#'
#' @return A \code{tibble}.
#'
#' @details Valet, the server-side API, does not always return observations
#' filtered by \strong{...} arguments for series groups, even if it will accept
#' the request.
#'
#' @examples
#' get_group("BAPF_TRANSACTION_DATA")
#' get_group("gbpp")
#'
#' \dontrun{
#' #this is a series
#' get_group("FXCADAUD")
#' }
#'
#' @importFrom purrr map_dfc map_chr
#' @importFrom dplyr mutate select rename_with
#' @importFrom readr type_convert
#'
#' @export
get_group <- function(name = NULL, ...) {
df <- valet(name = name, T, ...)[["content"]]
results <- df[["observations"]][-1] %>%
map_dfc(~ .x[["v"]]) %>%
select(names(df[["seriesDetail"]])) %>%
type_convert() %>%
suppressMessages()
for (x in names(results)) {
attr(results[[x]], "label") <- df[["seriesDetail"]][[x]][["label"]]
}
while (all(grepl(sub("_.*", "", names(results[1])), names(results)))) {
results <- rename_with(results, .fn = ~ tolower(sub(paste0(sub("_.*", "", names(results[1])), "_"), "", .x, fixed = T)))
}
if (names(df[["observations"]][1]) == "d")
{results <- mutate(results, date = as.Date.character(df[["observations"]][[1]], .before = 1))}
else {results <- mutate(results, id = df[["observations"]][[1]], .before = 1)}
results
}
|
/scratch/gouwar.j/cran-all/cranData/valet/R/get_group.R
|
#' List possible series or groups.
#'
#' \code{get_list} returns metadata about all Bank of Canada series or series
#' groups.
#'
#' @param type Either \code{series} or \code{groups}.
#'
#' @return A \code{tibble} of series or group information.
#'
#' @importFrom httr modify_url
#' @importFrom readr read_csv cols
#'
#' @export
get_list <- function(type = c("series", "groups")) {
url <- modify_url("https://www.bankofcanada.ca/", path = paste0("valet/lists/", match.arg(type, c("series", "groups")), "/csv"))
readr::read_csv(url, skip = 4, col_types = cols())
}
|
/scratch/gouwar.j/cran-all/cranData/valet/R/get_list.R
|
#' Get series observations
#'
#' \code{get_series} returns observations from one or more Bank of Canada
#' series, subject to some date filtering.
#'
#' @param name A \code{character} of at least length 1 indicating the series to
#' retrieve.
#' @param ... Additional query parameters. Possible values are \code{start_date}
#' and/or \code{end_date} (both character), or one of \code{recent},
#' \code{recent_weeks}, \code{recent_months}, or \code{recent_years} (all
#' numeric).
#'
#' @return A \code{tibble} of size \eqn{length(name) + 1}.
#'
#' @examples
#' get_series("FXCADAUD")
#'
#' \dontrun{
#' #this is a group
#' get_series("BAPF")
#' }
#'
#' @importFrom purrr map_dfc map_chr
#' @importFrom dplyr mutate select
#' @importFrom readr type_convert
#'
#' @export
get_series <- function(name = NULL, ...) {
results <- valet(name = paste(name, sep = "", collapse = ","), F, ...)[["content"]]
df <- results[["observations"]][-1] %>%
map_dfc(~ .x[["v"]]) %>%
mutate(date = results[["observations"]][["d"]]) %>%
type_convert() %>%
suppressMessages() %>%
select(date, names(results[["seriesDetail"]]))
for (x in names(df)) {
attr(df[[x]], "label") <- results[["seriesDetail"]][[x]][["label"]]
}
df
}
|
/scratch/gouwar.j/cran-all/cranData/valet/R/get_series.R
|
#' @importFrom utils str
print.valet <- function(x, ...) {
cat("Valet", x$series, "\n", sep = "")
str(x$content)
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/valet/R/print.valet.R
|
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
#' @param lhs A value or the magrittr placeholder.
#' @param rhs A function call using the magrittr semantics.
#' @return The result of calling `rhs(lhs)`.
NULL
|
/scratch/gouwar.j/cran-all/cranData/valet/R/utils-pipe.R
|
#' Retrieve Valet response
#'
#' \code{valet} is the core back-end to get responses from the Bank of Canada
#' API.
#'
#' @param name A character of at least length 1 indicating the series or series
#' group to retrieve.
#' @param group Boolean indicating whether the \code{name} is a series or a
#' series group.
#' @param ... Query parameters from other methods.
#'
#' @return A \code{valet} object.
#'
#' @examples
#' valet(name = "GBPP", group = TRUE, recent_weeks = 2)
#'
#' @import httr
#' @importFrom jsonlite fromJSON
#'
#' @export
valet <- function(name = NULL, group = FALSE, ...) {
url <- modify_url("https://www.bankofcanada.ca/", path = paste0("valet/observations/", ifelse(group, "group/", ""), name, "/json"), query = list(...))
resp <- GET(url, user_agent("https://github.com/runkelcorey/valet"))
parsed <- fromJSON(content(resp, "text"))
if (http_error(resp)) {
stop(
sprintf(
"Valet request failed [%s]\n%s\nSee %s",
status_code(resp),
parsed$message,
parsed$docs
),
call. = FALSE
)
}
structure(
list(
content = parsed,
path = name,
response = resp[-which(names(resp) == "content")]
),
class = "valet"
)
}
|
/scratch/gouwar.j/cran-all/cranData/valet/R/valet.R
|
#' Get Lat/Lon Coordinates for Testing
#'
#' This function gives quick access to lat/lon coordinates for a few points
#' around Ontario for testing and benchmarking purposes.
#'
#' @param dataset The name of a test dataset. By default, and if an unknown input
#' is given, it returns all values.
#'
#' @importFrom magrittr %>%
#' @return A tibble with one or more location names, latitudes, and longitudes.
#' @export
test_data <- function(dataset = NA){
name <- NULL
datasets <- tibble::tribble(~name, ~lat, ~lon,
"uottawa", 45.423382, -75.683170,
"parliament", 45.424774, -75.699473,
"cntower", 43.642748, -79.386602,
"portagestore", 45.534769, -78.707470,
"cdntirecentre", 45.297533, -75.927875,
"zwicksisland", 44.153853, -77.387684,
"bignickel", 46.473435, -81.033971,
"kenora", 49.765876, -94.487444,
"killarney", 46.012289, -81.401437)
result <- tibble::tibble()
result <- dplyr::filter(datasets, name == dataset)
if (nrow(result) == 0) result <- datasets
return(result)
}
#' Point-to-Point Routing with Valhalla
#'
#' This function calls Valhalla's `route` API to return turn-by-turn directions from one
#' origin to one destination. Several costing methods are supported, and there are
#' parameters that let you give custom options to Valhalla. **Please note that this
#' function requires access to a running instance of Valhalla.**
#'
#' For more details, please check the Valhalla API documentation here:
#'
#' * [https://valhalla.readthedocs.io/en/latest/api/turn-by-turn/api-reference/](https://valhalla.readthedocs.io/en/latest/api/turn-by-turn/api-reference/)
#'
#' @param from A tibble containing one origin location in columns named `lat` and
#' `lon`.
#' @param to A tibble containing one destination location in columns named `lat` and
#' `lon`.
#' @param costing The travel costing method. Values "auto", "bicycle", and "pedestrian"
#' all work.
#' @param unit Distance measurement units. Defaults to "kilometres".
#' @param minimum_reachability The minimum number of nodes a candidate network
#' needs to have before it is included. Try increasing this value (e.g. to
#' 500) if Valhalla is getting stuck in small disconnected road networks.
#' @param from_search_filter A named list of options provided to Valhalla API. Defaults set a
#' maximum road class ("motorway", the highest) and minimum road class ("residential",
#' which is one above the lowest, "service_other"). See API documentation for details.
#' @param to_search_filter A named list of options provided to Valhalla API. Defaults set a
#' maximum road class ("motorway", the highest) and minimum road class ("residential",
#' which is one above the lowest, "service_other"). See API documentation for details.
#' @param costing_options A named list of options provided to the Valhalla API that affect route costing,
#' e.g. willingness to travel on highways or through alleys. See API documentation for details.
#' @param hostname Hostname or IP address of your Valhalla instance. Defaults to "localhost".
#' @param port The port your Valhalla instance is monitoring. Defaults to 8002.
#' @return A trip object.
#'
#' @examples
#' \dontrun{
#' library(valhallr)
#' # set up origin and destination data
#' from <- test_data("uottawa")
#' to <- test_data("cdntirecentre")
#'
#' # calculate the trip
#' trip <- route(from = from, to = to)
#'
#' # show overall trip information
#' print_trip(trip, all_details = FALSE)
#'
#' # make an interactive map of the trip using the leaflet package
#' map_trip(trip, method = "leaflet")
#'}
#' @export
route <- function(from = NA, to = NA, costing = "auto", unit = "kilometers", from_search_filter = list(max_road_class = "motorway", min_road_class = "residential"), to_search_filter = list(max_road_class = "motorway", min_road_class = "residential"),minimum_reachability = 50, costing_options = list(), hostname = "localhost", port = 8002){
# see API reference here
#https://valhalla.readthedocs.io/en/latest/api/turn-by-turn/api-reference/
if ((nrow(from) > 1 ) | (nrow(to) > 1)) stop("Either `from` or `to` has more than one row. Please supply one-row tibbles with `lat` and `lon` columns.")
post_data <- list()
post_data$locations <- from %>%
dplyr::select("lat", "lon") %>%
dplyr::bind_rows({
to %>%
dplyr::select("lat", "lon")}
) %>%
dplyr::bind_cols(tibble::tibble(search_filter = list(from_search_filter, to_search_filter))) %>%
dplyr::bind_cols(tibble::tibble(minimum_reachability = rep(minimum_reachability, 2) ))
post_data$costing = costing
if (costing == "auto") post_data$costing_options$auto = costing_options
if (costing == "pedestrian") post_data$costing_options$pedestrian = costing_options
if (costing == "bicycle") post_data$costing_options$bicycle = costing_options
if (costing == "truck") post_data$costing_options$truck = costing_options
post_data$directions_options$units = unit
post_json <- jsonlite::toJSON(post_data, auto_unbox = TRUE)
url <- paste0("http://",hostname,":",port,"/route")
resp <- httr::POST(url = url,
body = post_json,
httr::user_agent("https://github.com/chris31415926535/valhallr"))
if (httr::http_type(resp) != "application/json") stop ("API did not return json.", call. = FALSE)
if (httr::http_error(resp)){
message("Error: API call returned error. Returning API response for debugging.")
return(resp)
}
resp_data <- jsonlite::fromJSON(httr::content(resp, type = "text", encoding = "UTF-8"))
return(resp_data[[1]])
}
#' Decode Valhalla Route Shape
#'
#' For point-to-point routing, Valhalla's API provides a route shapefile in a
#' special ASCII-encoded format. This function takes an encoded string, decodes
#' it, and returns the lat/lon coordinates as a tibble.
#'
#' To map the results, see also `valhallr::map_trip()`.
#'
#' @param encoded An encoded shapefile in ASCII format from Valhalla's API.
#'
#' @return A tibble containing point locations in `lat` and `lon` columns.
#' @export
decode <- function(encoded) {
# got algorithm from here (but I wrote the R version) https://valhalla.readthedocs.io/en/latest/decoding/
chars <- stringr::str_split(encoded, "")[[1]]
lats <- vector(mode = "integer", length = 1)
lons <- vector(mode = "integer", length = 1)
i <- 0
while (i < length(chars)){
shift <- 0
result <- 0
byte <- 0x20L
while (byte >= 0x20) {
i <- i + 1
byte <- chars[[i]] %>% utf8ToInt() - 63
result <- bitwOr(result, bitwAnd(byte, 0x1f) %>% bitwShiftL(shift))
shift <- shift + 5
if (byte < 0x20) break
}
if (bitwAnd(result, 1)) {
result <- result %>% bitwShiftR(1) %>% bitwNot()
} else {
result <- result %>% bitwShiftR(1)
}
lats <- c(lats, (lats[[length(lats)]] + result))
shift <- 0
result <- 0
byte <- 10000L
while (byte >= 0x20) {
i <- i + 1
byte <- chars[[i]] %>% utf8ToInt() - 63
result <- bitwOr(result, bitwAnd(byte, 0x1f) %>% bitwShiftL(shift))
shift <- shift + 5
if (byte < 0x20) break
}
if (bitwAnd(result, 1)) {
result <- result %>% bitwShiftR(1) %>% bitwNot()
} else {
result <- result %>% bitwShiftR(1)
}
lons <- c(lons, (lons[[length(lons)]] + result))
}
decoded <- tibble::tibble(lat = lats[2:length(lats)]/1000000,
lng = lons[2:length(lons)]/1000000)
return (decoded)
}
#' Source-to-Targets Origin/Destination Matrices with Valhalla
#'
#' @description This function creates a tidy (i.e. long) table of
#' origin-destination trip data using the Valhalla routing engine. For a set
#' of o origins and d destinations, it returns a tibble with (o x d) rows with
#' the travel distance and time between each pair. It can handle several
#' different travel modes and routing options. **Please note that this
#' function requires access to a running instance of Valhalla.**
#'
#' This function provides fine-grained control over Valhalla's API options.
#'
#' * For a user-friendly function, see the function `valhallr::od_table()`.
#' * For details about the API, see Valhalla's documentation here: [https://valhalla.readthedocs.io/en/latest/api/matrix/api-reference/](https://valhalla.readthedocs.io/en/latest/api/matrix/api-reference/)
#'
#'
#' @param froms A tibble containing origin locations in columns named `lat` and
#' `lon`.
#' @param tos A tibble containing destination locations in columns named `lat` and
#' `lon`.
#' @param costing The travel costing method: at present "auto", "bicycle", and "pedestrian"
#' are supported.
#' @param minimum_reachability The minimum number of nodes a candidate network
#' needs to have before it is included. Try increasing this value (e.g. to
#' 500) if Valhalla is getting stuck in small disconnected road networks.
#' @param from_search_filter A named list of options provided to Valhalla API. Defaults set a
#' maximum road class ("motorway", the highest) and minimum road class ("residential",
#' which is one above the lowest, "service_other"). See API documentation for details.
#' @param to_search_filter A named list of options provided to Valhalla API. Defaults set a
#' maximum road class ("motorway", the highest) and minimum road class ("residential",
#' which is one above the lowest, "service_other"). See API documentation for details.
#' @param costing_options A named list of options provided to the Valhalla API that affect route costing,
#' e.g. willingness to travel on highways or through alleys. See API documentation for details.
#' @param hostname Hostname or IP address of your Valhalla instance. Defaults to "localhost".
#' @param port The port your Valhalla instance is monitoring. Defaults to 8002.
#' @return A tibble showing the trip distances and times from each origin to each destination.
#'
#' @examples
#' \dontrun{
#' # NOTE: Assumes an instance of Valhalla is running on localhost:8002.
#' library(dplyr)
#' library(valhallr)
#' froms <- bind_rows(test_data("parliament"), test_data("uottawa"))
#' tos <- bind_rows(test_data("cdntirecentre"), test_data("parliament"))
#' st <- sources_to_targets(froms, tos)
#' }
#' @export
sources_to_targets <- function(froms, tos, costing = "auto",from_search_filter = list(max_road_class = "motorway", min_road_class = "residential"), to_search_filter = list(max_road_class = "motorway", min_road_class = "residential"), minimum_reachability = 50, costing_options = list(), hostname = "localhost", port = 8002){
post_data <- list()
post_data$sources = froms %>%
dplyr::bind_cols(tibble::tibble(search_filter = rep(list(from_search_filter)), nrow(froms)) ) %>%
#dplyr::bind_cols(tibble::tibble(search_filter = rep(list(list("min_road_class" = min_road_class)), nrow(froms)))) %>%
dplyr::bind_cols(tibble::tibble(minimum_reachability = rep(minimum_reachability, nrow(froms)) ))
post_data$targets = tos %>%
dplyr::bind_cols(tibble::tibble(search_filter = rep(list(to_search_filter)), nrow(tos)) ) %>%
#dplyr::bind_cols(tibble::tibble(search_filter = rep(list(list("min_road_class" = min_road_class)), nrow(tos)))) %>%
dplyr::bind_cols(tibble::tibble(minimum_reachability = rep(minimum_reachability, nrow(tos)) ))
post_data$costing = costing
if (costing == "auto") post_data$costing_options$auto = costing_options
if (costing == "pedestrian") post_data$costing_options$pedestrian = costing_options
if (costing == "bicycle") post_data$costing_options$bicycle = costing_options
if (costing == "truck") post_data$costing_options$truck = costing_options
post_json <- jsonlite::toJSON(post_data, auto_unbox = TRUE)
url <- paste0("http://",hostname,":",port,"/sources_to_targets")
resp <- httr::POST(url = url,
body = post_json,
httr::user_agent("https://github.com/chris31415926535/valhallr"))
if (httr::http_type(resp) != "application/json") stop ("API did not return json.", call. = FALSE)
if (httr::http_error(resp)){
message("Error: API call returned error. Returning API response for debugging.")
return(resp)
}
matrix <- jsonlite::fromJSON(httr::content(resp, type = "text", encoding = "UTF-8"))
mat_tibble <- matrix$sources_to_targets %>%
tibble::enframe() %>%
dplyr::select(-"name") %>%
tidyr::unnest(cols = "value")
}
#' Generate Tidy Origin-Destination Data using Valhalla
#'
#' @description This function creates a tidy (i.e. long) table of
#' origin-destination trip data using the Valhalla routing engine. For a set
#' of o origins and d destinations, it returns a tibble with (o x d) rows with
#' the travel distance and time between each pair. It can handle several
#' different travel modes and routing options.
#'
#' This function is a user-friendly wrapper around`valhalla::sources_to_targets()`,
#' which calls the Valhalla API directly. `sources_to_targets()` offers finer-
#' grained control over API options, and so this latter function may be more
#' useful for advanced users.
#'
#' Notable features of `od_matrix()`:
#'
#' * You can specify human-readable indices with `from_id_col` and
#' `to_id_col`. (Valhalla's API only returns zero-indexed integer
#' identifiers.)
#' * You can specify a `batch_size` to break computation into
#' several smaller API calls, to prevent your Valhalla instance from running
#' out of memory. This seems especially important for pedestrian routing,
#' where I've sometimes needed to use a batch size as small as 5.
#'
#'
#' @param froms A tibble containing origin locations in columns named `lat` and
#' `lon`, and an optional column with human-readable names.
#' @param from_id_col The name of the column in `froms` that contains
#' human-readable names.
#' @param tos A tibble containing destination locations in columns named `lat`
#' and `lon`, and an optional column with human-readable names.
#' @param to_id_col The name of the column in `tos` that contains human-readable
#' names.
#' @param costing The travel costing method: at present "auto", "bicycle", and
#' "pedestrian" are supported.
#' @param batch_size The number of origin points to process per API call.
#' @param minimum_reachability The minimum number of nodes a candidate network
#' needs to have before it is included. Try increasing this value (e.g. to
#' 500) if Valhalla is getting stuck in small disconnected road networks.
#' @param verbose Boolean. Defaults to FALSE. If TRUE, it will provide updates on
#' on the batching process (if applicable).
#' @param hostname Hostname or IP address of your Valhalla instance. Defaults to "localhost".
#' @param port The port your Valhalla instance is monitoring. Defaults to 8002.
#'
#' @return A tibble showing the trip distances and times from each origin to each named destination.
#' @importFrom rlang :=
#' @examples
#' \dontrun{
#' library(dplyr)
#' library(valhallr)
#' # set up our inputs
#' origins <- bind_rows(test_data("parliament"), test_data("uottawa"), test_data("cntower"))
#' destinations <- bind_rows(test_data("cdntirecentre"), test_data("parliament"))
#'
#' # generate a tidy origin-destination table
#' od <- od_table (froms = origins,
#' from_id_col = "name",
#' tos = destinations,
#' to_id_col = "name",
#' costing = "auto",
#' batch_size = 100,
#' minimum_reachability = 500)
#' }
#' @export
od_table <- function(froms, from_id_col, tos, to_id_col, costing = "auto", batch_size = 100, minimum_reachability = 500, verbose = FALSE, hostname = "localhost", port = 8002){
# note: got importFrom rlang trick here: https://stackoverflow.com/questions/58026637/no-visible-global-function-definition-for
from_index <- to_index <- NULL
# FIXME TODO: do input validation!!
# get the human-readable names of the from- and to-data
from_names <- froms %>%
dplyr::select(from_id_col) %>%
tibble::rowid_to_column(var = "from_index")
to_names <- tos %>%
dplyr::select(to_id_col) %>%
tibble::rowid_to_column(var = "to_index")
# if human-readable column names are identical, append "_from" and "_to" so they differ
if (from_id_col == to_id_col) {
new_from <- paste0(from_id_col,"_from")
from_names <- dplyr::rename(from_names, !!(new_from) := from_id_col)
from_id_col <- new_from
new_to <- paste0(to_id_col, "_to")
to_names <- dplyr::rename(to_names, !!(new_to) := to_id_col)
to_id_col <- new_to
}
# set up our batching
n_iters <- nrow(froms) %/% batch_size + 1
results <- list(rep(NA, n_iters))
# do each batch
for (i in 1:n_iters){
if (verbose) message(paste0(i,"/",n_iters))
start_index <- (i-1)*batch_size + 1
end_index <- min( (i*batch_size), nrow(froms))
froms_iter = froms[start_index:end_index, ] %>%
tidyr::drop_na()
od <- valhallr::sources_to_targets(froms = froms_iter,
tos = tos,
costing = costing,
minimum_reachability = minimum_reachability,
hostname = hostname,
port = port)
# FIXME TODO: confirm that sources_to_targets gave us meaningful data!
# make start_index match the original DB row number and doc row number
od <- od %>%
dplyr::mutate(from_index = from_index + start_index,
to_index = to_index + 1) %>%
dplyr::left_join(from_names, by = "from_index") %>%
dplyr::left_join(to_names, by = "to_index") %>%
dplyr::select(-to_index, -from_index)
# add results to our pre-built list
results[[i]] <- od
}
# get results back into a tibble
output <- results %>%
tibble::enframe() %>%
tidyr::unnest("value") %>%
dplyr::select(from_id_col, to_id_col, "distance", "time")
return(output)
}
#' Print Trip Summary and Turn-By-Turn Directions
#'
#' @param trip A trip response from `valhallr::route()`.
#' @param all_details Boolean. Should we print each turn-by-turn instruction
#' along with an overall summary?
#'
#' @return The input `trip` object, invisibly.
#' @inherit route examples
#' @export
print_trip <- function(trip, all_details = FALSE) {
cat (paste0("From lat/lng: ", trip$locations$lat[[1]], ", ", trip$locations$lon[[1]]))
cat (paste0("\nTo lat/lng: ", trip$locations$lat[[2]], ", ", trip$locations$lon[[2]]))
cat (paste0("\nTime: ", round(trip$summary$time/60, digits = 1), " minutes"))
cat (paste0("\nDist: ", trip$summary$length, " km\n"))
if (all_details){
maneuvers <- trip$legs$maneuvers[[1]]
for (i in 1:nrow(maneuvers)) {
sprintf("Step %d: %s\n", i, maneuvers[i,]$instruction) %>% cat()
sprintf(" Dist: %3.2f km\n", maneuvers[i,]$length ) %>% cat()
sprintf(" Time: %3.2f minutes\n", maneuvers[i,]$time/60) %>% cat()
}
}
invisible(trip)
}
#' Make a Map from a Trip
#'
#' @param trip A trip response from `valhallr::route()`.
#' @param method Which mapping service to use. Defaults to leaflet; also can use ggplot.
#'
#' @return A map object, either leaflet or ggplot.
#' @inherit route examples
#' @export
map_trip <- function(trip, method = "leaflet"){
## decode and turn into a sf line
trip_shp <- valhallr::decode(trip$legs$shape) %>%
sf::st_as_sf(coords = c("lng", "lat"), crs = "WGS84") %>%
dplyr::summarise(do_union = FALSE) %>%
sf::st_cast("LINESTRING")
# then plot with leaflet
if (method == "leaflet"){
trip_plot <- trip_shp %>%
leaflet::leaflet() %>%
leaflet::addTiles() %>%
leaflet::addPolylines()
}
if (method == "ggplot"){
trip_plot <- trip_shp %>%
ggplot2::ggplot() +
ggspatial::annotation_map_tile(progress = "none",
zoomin = -1,
cachedir = tempdir()) +
ggplot2::geom_sf(colour = "blue", size = 2)
}
trip_plot
}
#' Generate Isochrones
#'
#' An isochrone, also known as a service area, is a polygon that shows the
#' area reachable from a starting point by traveling along a road network
#' for a certain distance or time. This function provides an interface to
#' the Valhalla routing engine's isochrone API. It lets you provide a starting
#' point's latitude and longitude, a distance or time metric, and a vector
#' of distances/times, and if it's successful it returns an sf-class tibble of
#' polygons.
#'
#' More more information, please see Valhalla's API documentation:
#'
#' * [https://valhalla.readthedocs.io/en/latest/api/isochrone/api-reference/](https://valhalla.readthedocs.io/en/latest/api/isochrone/api-reference/)
#'
#' @param from A tibble containing one origin location in columns named `lat` and
#' `lon`.
#' @param costing The travel costing method: at present "auto", "bicycle", and "pedestrian"
#' are supported.
#' @param contours A numeric vector of values at which to produce the isochrones.
#' @param metric Distance or time. Accepts parameters "min" and "km".
#' @param min_road_class The minimum road classification Valhalla will consider. Defaults to `residential`.
#' @param minimum_reachability The minimum number of nodes a candidate network
#' needs to have before it is included.
#' @param hostname Hostname or IP address of your Valhalla instance. Defaults to "localhost".
#' @param port The port your Valhalla instance is monitoring. Defaults to 8002.
#'
#' @return An sf/tibble object containing isochrone polygons.
#' @examples
#' \dontrun{
#' library(valhallr)
#' # set up our departure point: the University of Ottawa
#' from <- test_data("uottawa")
#'
#' # generate a set of isochrones for travel by bicycle
#' i <- valhallr::isochrone(from, costing = "bicycle")
#'
#' # map the isochrones
#' map_isochrone(i)
#' }
#' @export
isochrone <- function(from, costing = "pedestrian", contours = c(5, 10, 15), metric = "min", min_road_class = "residential", minimum_reachability = 500, hostname = "localhost", port = 8002){
# see API reference here
# https://valhalla.readthedocs.io/en/latest/api/isochrone/api-reference/
# validating input
if (nrow(from) > 1) stop ("More than one location supplied. Please supply a one-row input tibble with `lat` and `lon` columns.")
if (! (("lat" %in% names(from)) & ("lon" %in% names(from))) ) stop ("From tibble must inclide one column named `lat` and one named `lon`.")
if (!metric %in% c("min", "km")) stop ("Invalid metric. Please use `min` for time in minutes or `km` for distance in kilometres.")
post_data <- list()
post_data$locations <- dplyr::select(from, "lat", "lon")
post_data$costing <- costing
if (metric == "min") post_data$contours <- tibble::tibble(time = contours)
if (metric == "km") post_data$contours <- tibble::tibble(distance = contours)
post_data$polygons <- TRUE
post_json <- jsonlite::toJSON(post_data, auto_unbox = TRUE)
url <- paste0("http://",hostname,":",port,"/isochrone")
resp <- httr::POST(url = url,
body = post_json,
httr::user_agent("https://github.com/chris31415926535/valhallr"))
if (httr::http_type(resp) != "application/json") stop ("API did not return json.", call. = FALSE)
if (httr::http_error(resp)){
message("Error: API call returned error. Returning API response for debugging.")
return(resp)
}
resp_data <- httr::content(resp, type = "text", encoding = "UTF-8") %>%
geojsonio::geojson_sf() %>%
tibble::as_tibble() %>%
sf::st_as_sf()
resp_data$costing <- costing
return (resp_data)
}
#' Generate maps of isochrones
#'
#' This is a convenience function that takes the output of `valhallr::isochrone()`
#' and generates either a static or interactive map.
#'
#' @param isochrone An isochrone sf object generated by `valhallr::isochrone()`.
#' @param method The method used to map it. Two methods are supported:
#' * "leaflet" produces an interactive HTML map using the Leaflet package.
#' * "ggplot" produces a static map.
#'
#' @return A plot of the isochrones, either a a leaflet object or a ggplot object.
#' @inherit isochrone examples
#' @export
map_isochrone <- function(isochrone, method = "leaflet") {
contour <- NULL
if (!method %in% c("leaflet", "ggplot")) stop ("Invalid map method. Please specify `leaflet` or `ggplot`.")
metric_name <- "ERROR: METRIC NOT DETECTED"
costing_name <- "ERROR: COSTING NOT DETECTED"
if (isochrone$metric[[1]] == "time") metric_name <- "Minutes"
if (isochrone$metric[[1]] == "distance") metric_name <- "Kilometres"
if (isochrone$costing[[1]] == "auto") costing_name <- "Driving"
if (isochrone$costing[[1]] == "pedestrian") costing_name <- "Walking"
if (isochrone$costing[[1]] == "bicycle") costing_name <- "Cycling"
if (method == "leaflet"){
iso_labels <- paste0(isochrone$contour, " ", metric_name, " ", costing_name) %>%
purrr::map(htmltools::HTML)
output <- isochrone %>%
leaflet::leaflet() %>%
leaflet::addTiles() %>%
leaflet::addPolygons(fillColor = ~ color,
label = iso_labels)
}
if (method == "ggplot"){
output <- isochrone %>%
tibble::as_tibble() %>%
sf::st_as_sf() %>%
ggplot2::ggplot() +
ggspatial::annotation_map_tile(progress = "none",
zoomin = -1,
cachedir = tempdir()) +
ggplot2::geom_sf(ggplot2::aes(fill = contour),
alpha = 0.3) +
ggplot2::labs(fill = metric_name)
}
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/valhallr/R/valhalla.R
|
---
title: "valhallr: A Tidy R Interface to the Valhalla Routing Engine"
author: Christopher Belanger
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{valhallr: A Tidy R Interface to the Valhalla Routing Engine}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
# Step 0: Install and run Valhalla
This package provides an R-native interface to the Valhalla routing engine, and assumes that you have access to a running Valhalla instance. If you don't, you won't be able to use any of the functions described in this vignette! Installing and configuring Valhalla is far out of scope for this vignette, but Valhalla is open-source and there are several free resources that you might find helpful.
Here are some example resources to help you get Valhalla up and running:
* Valhalla's GitHub page has all of the source code and discussions and issue trackers: [https://github.com/valhalla/](https://github.com/valhalla/)
* Valhalla's API documentation has a lot of detailed information: [https://valhalla.readthedocs.io/en/latest/](https://valhalla.readthedocs.io/en/latest/)
* GIS-OPS has a helpful guide for installing and running Valhalla: [https://gis-ops.com/valhalla-part-1-how-to-install-on-ubuntu/](https://gis-ops.com/valhalla-part-1-how-to-install-on-ubuntu/)
The rest of this vignette assumes a running Valhalla instance on localhost at the default port 8002.
# Load the package and point it to Valhalla
Once you have access to Valhalla, you're ready to start. The package lets you specify any hostname/IP address and port, but as a default it looks at port 8002 on "localhost". Here we'll set the hostname and port manually to make it
clear.
```r
library(valhallr)
host <- "localhost"
port <- 8002
```
# Turn-by-turn routing
The function `valhallr::route()` uses Valhalla to generate detailed turn-by-turn routing from an origin to a destination. It's straightforward to use: you provide origin and destination coordinates in tibbles with `lat` and `lon` columns, along with any number of optional options, and the API returns an object containing the resulting trip.
This example shows how to generate driving directions between the University of Ottawa and the Canadian Tire Centre, a stadium in Ottawa. It gets these coordinates from the function `valhallr::test_data()`, which can return coordinates for several points of interest around Ontario. It then calls `valhallr::route()` between these two locations with all default options, and then passes the result to `valhallr::print_trip()`.
```r
origin <- test_data("uottawa")
destination <- test_data("cdntirecentre")
t <- route(from = origin,
to = destination,
hostname = host,
port = port)
print_trip(t)
#> From lat/lng: 45.4234, -75.6832
#> To lat/lng: 45.2975, -75.9279
#> Time: 19.9 minutes
#> Dist: 28.693 km
```
We can see that Valhalla has generated a trip from uOttawa to the Canadian Tire Centre that's 28.693km long and would take 19.9 minutes to drive. But what does the trip look like?
We can answer this question with the function `valhallr::map_trip()` that takes the Valhalla trip object, decodes an embedded shapefile, and plots the result on a map. (You can use `valhallr::decode()` to extract the shapefile yourself for other purposes.) The `map_trip()` function supports an interactive map using **leaflet** or an static map using **ggplot2**. Here we'll generate a static map.
```r
map_trip(t, method = "ggplot")
```
<img src="valhallr-map_ottawa_auto-1.jpeg" title="plot of chunk map_ottawa_auto" alt="plot of chunk map_ottawa_auto" width="100%" />
What if we wanted to travel by bicycle instead? We can change our travel method from the default, "auto", using the `costing` parameter. Here we set it to "bicycle" and re-run the command:
```r
t <- route(from = origin,
to = destination,
costing = "bicycle",
hostname = host,
port = port)
print_trip(t)
#> From lat/lng: 45.4234, -75.6832
#> To lat/lng: 45.2975, -75.9279
#> Time: 108 minutes
#> Dist: 30.028 km
```
This new trip is a bit longer at 30.028km, and would take quite a bit longer at 108 minutes. When we map it, we can see that Valhalla has given us a plausible cycling trip that takes a scenic route along the riverside path and avoids major highways:
```r
map_trip(t, method = "ggplot")
```
<img src="valhallr-map_ottawa_bicycle-1.jpeg" title="plot of chunk map_ottawa_bicycle" alt="plot of chunk map_ottawa_bicycle" width="100%" />
# Origin-destination analyses
Many analyses require the shortest distance or time between a large number of locations without needing to know the specific routes taken. Sometimes this information is presented in origin-destination (OD) matrices or OD tables, which simply show the shortest travel distances/times between source locations and target locations. Valhalla has an API called "sources_to_targets" to generate this information. The **valhallr** package has two functions that call this API: `valhallr::sources_to_targets()` calls it directly and provides fine-grained access to configuration options, and `valhallr::od_table()` provides a higher-level interface with several user-friendly features. We'll look at each function in turn.
In this example, we need to find the shortest distances and times between three source locations (the Canadian parliament buildings, the University of Ottawa, and the CN Tower) and two destination locations (the Canadian Tire Centre in Ottawa, and Zwicks Island Park in Belleville).
To create an OD table, we set up our sources in a tibble called `froms`, our targets in a tibble called `tos`, and then pass them to `sources_to_targets()` using all default options.
```r
library(dplyr)
origins <- bind_rows(test_data("parliament"),
test_data("uottawa"),
test_data("cntower"))
destinations <- bind_rows(test_data("cdntirecentre"),
test_data("zwicksisland"))
st <- sources_to_targets(froms = origins,
tos = destinations,
hostname = host,
port = port)
st %>%
knitr::kable()
```
| distance| time| to_index| from_index|
|--------:|-----:|--------:|----------:|
| 29.498| 1232| 0| 0|
| 273.969| 10170| 1| 0|
| 28.693| 1194| 0| 1|
| 273.164| 10131| 1| 1|
| 389.276| 15963| 0| 2|
| 190.912| 7189| 1| 2|
`sources_to_targets()` returns results as they come from Valhalla, which has two disadvantages. First, it strips all human-readable names from the inputs and only returns zero-indexed identifiers. And second, the API call can fail for large requests with hundreds or thousands of locations if Valhalla runs out of memory.
The `valhallr::od_table()` function addresses these two problems by letting you specify human-readable names for each location, and by letting you send origin rows to Valhalla in batches. The trade-off is that `od_table()` doesn't give as fine-grained access to the underlying API, but it's easier and faster for many purposes.
Here we can see the results of calling `od_table()` with the same inputs as before, this time specifying the names of the human-readable id columns in each input tibble:
```r
od <- od_table (froms = origins,
from_id_col = "name",
tos = destinations,
to_id_col = "name",
hostname = host,
port = port)
od %>%
knitr::kable()
```
|name_from |name_to | distance| time|
|:----------|:-------------|--------:|-----:|
|parliament |cdntirecentre | 29.498| 1232|
|parliament |zwicksisland | 273.969| 10170|
|uottawa |cdntirecentre | 28.693| 1194|
|uottawa |zwicksisland | 273.164| 10131|
|cntower |cdntirecentre | 389.276| 15963|
|cntower |zwicksisland | 190.912| 7189|
The results are much easier to read, and would be simpler to feed forward into a further analysis (e.g. by left-joining with the original inputs to get the lat/lon information for mapping).
Although this example didn't use batching, note that this can be essential for larger analyses and seems especially important when using "pedestrian" costing. For some analyses I've been able to use a batch size of 100 for "auto" costing but have had to scale down to a batch size of 5 for "pedestrian" costing.
# Isochrones
Finally, **valhallr** provides access to Valhalla's isochrone API through the function `valhallr::isochrone()`. An isochrone, also known as a service area, is a polygon that shows the area reachable from a starting point by traveling along a road network for a certain distance or time. This function lets you provide a starting point's latitude and longitude, a distance or time metric, and a vector of distances/times, and if it's successful it returns an sf-class tibble of polygons.
For example, how far can you get from downtown Kenora on a bicycle using the default values of 5, 10, and 15 minutes?
```r
# set up our departure point: the University of Ottawa
origin <- test_data("kenora")
# generate an isochrone for travel by bicycle
i <- valhallr::isochrone(from = origin,
costing = "bicycle",
hostname = host,
port = port)
# map the isochrone
map_isochrone(i, method = "ggplot")
```
<img src="valhallr-map_isochrone_kenora-1.jpeg" title="plot of chunk map_isochrone_kenora" alt="plot of chunk map_isochrone_kenora" width="100%" />
Pretty far, by the looks of it! You can see how the isochrones follow the road network, and so give a reasonably realistic estimate of how far you could travel.
For another example, how far can you drive from Sudbury's Big Nickel in 30, 60, and 90 minutes?
```r
origin <- test_data("bignickel")
i <- valhallr::isochrone(from = origin,
costing = "auto",
contours = c(30,60,90),
metric = "min",
hostname = host,
port = port)
map_isochrone(i, method = "ggplot")
```
<img src="valhallr-map_isochrone_sudbury-1.jpeg" title="plot of chunk map_isochrone_sudbury" alt="plot of chunk map_isochrone_sudbury" width="100%" />
Again, quite far! You can see how the algorithm takes the road network and speed limits into account: once you get onto a major highway, the distances increase rapidly.
|
/scratch/gouwar.j/cran-all/cranData/valhallr/inst/doc/valhallr.Rmd
|
---
title: "valhallr: A Tidy R Interface to the Valhalla Routing Engine"
author: Christopher Belanger
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{valhallr: A Tidy R Interface to the Valhalla Routing Engine}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
# Step 0: Install and run Valhalla
This package provides an R-native interface to the Valhalla routing engine, and assumes that you have access to a running Valhalla instance. If you don't, you won't be able to use any of the functions described in this vignette! Installing and configuring Valhalla is far out of scope for this vignette, but Valhalla is open-source and there are several free resources that you might find helpful.
Here are some example resources to help you get Valhalla up and running:
* Valhalla's GitHub page has all of the source code and discussions and issue trackers: [https://github.com/valhalla/](https://github.com/valhalla/)
* Valhalla's API documentation has a lot of detailed information: [https://valhalla.readthedocs.io/en/latest/](https://valhalla.readthedocs.io/en/latest/)
* GIS-OPS has a helpful guide for installing and running Valhalla: [https://gis-ops.com/valhalla-part-1-how-to-install-on-ubuntu/](https://gis-ops.com/valhalla-part-1-how-to-install-on-ubuntu/)
The rest of this vignette assumes a running Valhalla instance on localhost at the default port 8002.
# Load the package and point it to Valhalla
Once you have access to Valhalla, you're ready to start. The package lets you specify any hostname/IP address and port, but as a default it looks at port 8002 on "localhost". Here we'll set the hostname and port manually to make it
clear.
```r
library(valhallr)
host <- "localhost"
port <- 8002
```
# Turn-by-turn routing
The function `valhallr::route()` uses Valhalla to generate detailed turn-by-turn routing from an origin to a destination. It's straightforward to use: you provide origin and destination coordinates in tibbles with `lat` and `lon` columns, along with any number of optional options, and the API returns an object containing the resulting trip.
This example shows how to generate driving directions between the University of Ottawa and the Canadian Tire Centre, a stadium in Ottawa. It gets these coordinates from the function `valhallr::test_data()`, which can return coordinates for several points of interest around Ontario. It then calls `valhallr::route()` between these two locations with all default options, and then passes the result to `valhallr::print_trip()`.
```r
origin <- test_data("uottawa")
destination <- test_data("cdntirecentre")
t <- route(from = origin,
to = destination,
hostname = host,
port = port)
print_trip(t)
#> From lat/lng: 45.4234, -75.6832
#> To lat/lng: 45.2975, -75.9279
#> Time: 19.9 minutes
#> Dist: 28.693 km
```
We can see that Valhalla has generated a trip from uOttawa to the Canadian Tire Centre that's 28.693km long and would take 19.9 minutes to drive. But what does the trip look like?
We can answer this question with the function `valhallr::map_trip()` that takes the Valhalla trip object, decodes an embedded shapefile, and plots the result on a map. (You can use `valhallr::decode()` to extract the shapefile yourself for other purposes.) The `map_trip()` function supports an interactive map using **leaflet** or an static map using **ggplot2**. Here we'll generate a static map.
```r
map_trip(t, method = "ggplot")
```
<img src="valhallr-map_ottawa_auto-1.jpeg" title="plot of chunk map_ottawa_auto" alt="plot of chunk map_ottawa_auto" width="100%" />
What if we wanted to travel by bicycle instead? We can change our travel method from the default, "auto", using the `costing` parameter. Here we set it to "bicycle" and re-run the command:
```r
t <- route(from = origin,
to = destination,
costing = "bicycle",
hostname = host,
port = port)
print_trip(t)
#> From lat/lng: 45.4234, -75.6832
#> To lat/lng: 45.2975, -75.9279
#> Time: 108 minutes
#> Dist: 30.028 km
```
This new trip is a bit longer at 30.028km, and would take quite a bit longer at 108 minutes. When we map it, we can see that Valhalla has given us a plausible cycling trip that takes a scenic route along the riverside path and avoids major highways:
```r
map_trip(t, method = "ggplot")
```
<img src="valhallr-map_ottawa_bicycle-1.jpeg" title="plot of chunk map_ottawa_bicycle" alt="plot of chunk map_ottawa_bicycle" width="100%" />
# Origin-destination analyses
Many analyses require the shortest distance or time between a large number of locations without needing to know the specific routes taken. Sometimes this information is presented in origin-destination (OD) matrices or OD tables, which simply show the shortest travel distances/times between source locations and target locations. Valhalla has an API called "sources_to_targets" to generate this information. The **valhallr** package has two functions that call this API: `valhallr::sources_to_targets()` calls it directly and provides fine-grained access to configuration options, and `valhallr::od_table()` provides a higher-level interface with several user-friendly features. We'll look at each function in turn.
In this example, we need to find the shortest distances and times between three source locations (the Canadian parliament buildings, the University of Ottawa, and the CN Tower) and two destination locations (the Canadian Tire Centre in Ottawa, and Zwicks Island Park in Belleville).
To create an OD table, we set up our sources in a tibble called `froms`, our targets in a tibble called `tos`, and then pass them to `sources_to_targets()` using all default options.
```r
library(dplyr)
origins <- bind_rows(test_data("parliament"),
test_data("uottawa"),
test_data("cntower"))
destinations <- bind_rows(test_data("cdntirecentre"),
test_data("zwicksisland"))
st <- sources_to_targets(froms = origins,
tos = destinations,
hostname = host,
port = port)
st %>%
knitr::kable()
```
| distance| time| to_index| from_index|
|--------:|-----:|--------:|----------:|
| 29.498| 1232| 0| 0|
| 273.969| 10170| 1| 0|
| 28.693| 1194| 0| 1|
| 273.164| 10131| 1| 1|
| 389.276| 15963| 0| 2|
| 190.912| 7189| 1| 2|
`sources_to_targets()` returns results as they come from Valhalla, which has two disadvantages. First, it strips all human-readable names from the inputs and only returns zero-indexed identifiers. And second, the API call can fail for large requests with hundreds or thousands of locations if Valhalla runs out of memory.
The `valhallr::od_table()` function addresses these two problems by letting you specify human-readable names for each location, and by letting you send origin rows to Valhalla in batches. The trade-off is that `od_table()` doesn't give as fine-grained access to the underlying API, but it's easier and faster for many purposes.
Here we can see the results of calling `od_table()` with the same inputs as before, this time specifying the names of the human-readable id columns in each input tibble:
```r
od <- od_table (froms = origins,
from_id_col = "name",
tos = destinations,
to_id_col = "name",
hostname = host,
port = port)
od %>%
knitr::kable()
```
|name_from |name_to | distance| time|
|:----------|:-------------|--------:|-----:|
|parliament |cdntirecentre | 29.498| 1232|
|parliament |zwicksisland | 273.969| 10170|
|uottawa |cdntirecentre | 28.693| 1194|
|uottawa |zwicksisland | 273.164| 10131|
|cntower |cdntirecentre | 389.276| 15963|
|cntower |zwicksisland | 190.912| 7189|
The results are much easier to read, and would be simpler to feed forward into a further analysis (e.g. by left-joining with the original inputs to get the lat/lon information for mapping).
Although this example didn't use batching, note that this can be essential for larger analyses and seems especially important when using "pedestrian" costing. For some analyses I've been able to use a batch size of 100 for "auto" costing but have had to scale down to a batch size of 5 for "pedestrian" costing.
# Isochrones
Finally, **valhallr** provides access to Valhalla's isochrone API through the function `valhallr::isochrone()`. An isochrone, also known as a service area, is a polygon that shows the area reachable from a starting point by traveling along a road network for a certain distance or time. This function lets you provide a starting point's latitude and longitude, a distance or time metric, and a vector of distances/times, and if it's successful it returns an sf-class tibble of polygons.
For example, how far can you get from downtown Kenora on a bicycle using the default values of 5, 10, and 15 minutes?
```r
# set up our departure point: the University of Ottawa
origin <- test_data("kenora")
# generate an isochrone for travel by bicycle
i <- valhallr::isochrone(from = origin,
costing = "bicycle",
hostname = host,
port = port)
# map the isochrone
map_isochrone(i, method = "ggplot")
```
<img src="valhallr-map_isochrone_kenora-1.jpeg" title="plot of chunk map_isochrone_kenora" alt="plot of chunk map_isochrone_kenora" width="100%" />
Pretty far, by the looks of it! You can see how the isochrones follow the road network, and so give a reasonably realistic estimate of how far you could travel.
For another example, how far can you drive from Sudbury's Big Nickel in 30, 60, and 90 minutes?
```r
origin <- test_data("bignickel")
i <- valhallr::isochrone(from = origin,
costing = "auto",
contours = c(30,60,90),
metric = "min",
hostname = host,
port = port)
map_isochrone(i, method = "ggplot")
```
<img src="valhallr-map_isochrone_sudbury-1.jpeg" title="plot of chunk map_isochrone_sudbury" alt="plot of chunk map_isochrone_sudbury" width="100%" />
Again, quite far! You can see how the algorithm takes the road network and speed limits into account: once you get onto a major highway, the distances increase rapidly.
|
/scratch/gouwar.j/cran-all/cranData/valhallr/vignettes/valhallr.Rmd
|
#--------------------------------------------------
#' @title Fit Artificial Neural Networks.
#'
#' @description
#' Fits a single hidden layer ANN model to input data \code{x} and output data
#' \code{y}.
#'
#' @param x matrix, data frame or vector of numeric input values, with
#' \code{ncol(x)} equal to the number of inputs/predictors and \code{nrow(x)}
#' equal to the number of examples. A vector is considered to comprise examples
#' of a single input or predictor variable.
#' @param y matrix, data frame or vector of target values for examples.
#' @param size number of hidden layer nodes. Can be zero.
#' @param act_hid activation function to be used at the hidden layer.
#' See `Details'.
#' @param act_out activation function to be used at the output layer.
#' See `Details'.
#' @param Wts initial weight vector. If \code{NULL} chosen at random.
#' @param rang initial random weights on [-rang,rang].
#' Default value is 0.5.
#' @param objfn objective function to be minimised when fitting
#' weights. This function may be user-defined with the first two arguments
#' corresponding to \code{y} (the observed target data) and \code{y_hat}
#' (the ANN output). If this function has additional parameters which require
#' optimizing, these must be defined in argument \code{par_of}
#' (see AR(1) case in `Examples'). Default is \code{sse} (internal
#' function to compute sum squared error, with error given by
#' \code{y - y_hat}) when \code{objfn = NULL}.
#' @param method the method to be used by \code{\link[stats]{optim}}
#' for minimising the objective function. May be ``Nelder-Mead'', ``BFGS'',
#' ``CG'', ``L-BFGS-B'', ``SANN'' or ``Brent''. Can be abbreviated.
#' Default is ``BFGS''.
#' @param maxit maximum number of iterations used by \code{\link[stats]{optim}}.
#' Default value is 1000.
#' @param abstol absolute convergence tolerance (stopping criterion)
#' used by \code{\link[stats]{optim}}. Default is \code{1e-4}.
#' @param reltol relative convergence tolerance (stopping criterion)
#' used by \code{\link[stats]{optim}}. Optimization stops if the value
#' returned by \code{objfn} cannot be reduced by a factor of
#' \code{reltol * (abs(val) + reltol)} at a step. Default is \code{1e-8}.
#' @param trace logical. Should optimization be traced?
#' Default = TRUE.
#' @param \dots arguments to be passed to user-defined \code{objfn}. Initial
#' values of any parameters (in addition to the ANN weights) requiring
#' optimization must be supplied in argument \code{par_of} (see AR(1) case
#' in `Examples').
#' @return object of class `ann' with components describing the ANN structure
#' and the following output components:
#' \item{wts}{best set of weights found.}
#' \item{par_of}{best values of additional \code{objfn} parameters. This
#' component will only be returned if a user-defined \code{objfn} is supplied
#' and argument \code{par_of} is included in the function call (see AR(1)
#' case in `Examples').}
#' \item{value}{value of objective function.}
#' \item{fitted.values}{fitted values for the training data.}
#' \item{residuals}{residuals for the training data.}
#' \item{convergence}{integer code returned by \code{\link[stats]{optim}}.
#' 0 indicates successful completion, see \code{\link[stats]{optim}} for
#' possible error codes.}
#' \item{derivs}{matrix of derivatives of hidden (columns \code{1:size})
#' and output (final column) nodes.}
#' @details
#' The ``linear'' activation, or transfer, function is the
#' identity function where the output of a node is equal to its input
#' \eqn{f(x) = x}.
#'
#' The ``sigmoid'' function is the standard logistic sigmoid function given
#' by \eqn{f(x) = \frac{1}{1+e^{-x}}}{f(x) = 1 / (1 + exp(-x))}.
#'
#' The ``tanh'' function is the hyperbolic tangent function given by
#' \eqn{f(x) = \frac{e^{x}-e^{-x}}{e^{x}+e^{-x}}}{
#' f(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))}
#'
#' The ``exp'' function is the exponential function given by
#' \eqn{f(x) = e^{x}}{f(x) = exp(x)}
#'
#' The default configuration of activation functions is
#' \code{act_hid = "tanh"} and \code{act_out = "linear"}.
#'
#' Optimization (minimization) of the objective function (\code{objfn}) is
#' performed by \code{\link[stats]{optim}} using the method specified.
#'
#' Derivatives returned are first-order partial derivatives of the hidden and
#' output nodes with respect to their inputs. These may be useful for
#' sensitivity analyses.
#'
#' @seealso \code{\link{predict.ann}}, \code{\link{validann}}
#'
#' @examples
#' ## fit 1-hidden node ann model with tanh activation at the hidden layer and
#' ## linear activation at the output layer.
#' ## Use 200 random samples from ar9 dataset.
#' ## ---
#' data("ar9")
#' samp <- sample(1:1000, 200)
#' y <- ar9[samp, ncol(ar9)]
#' x <- ar9[samp, -ncol(ar9)]
#' x <- x[, c(1,4,9)]
#' fit <- ann(x, y, size = 1, act_hid = "tanh", act_out = "linear", rang = 0.1)
#'
#' ## fit 3-hidden node ann model to ar9 data with user-defined AR(1) objective
#' ## function
#' ## ---
#' ar1_sse <- function(y, y_hat, par_of) {
#' err <- y - y_hat
#' err[-1] <- err[-1] - par_of * err[-length(y)]
#' sum(err ^ 2)
#' }
#' fit <- ann(x, y, size = 3, act_hid = "tanh", act_out = "linear", rang = 0.1,
#' objfn = ar1_sse, par_of = 0.7)
#'
#' @export
#' @importFrom stats optim
#' @importFrom stats predict
#' @importFrom stats residuals
#' @importFrom stats runif
#--------------------------------------------------
ann <- function(x, y, size, act_hid = c("tanh", "sigmoid", "linear", "exp"),
act_out = c("linear", "sigmoid", "tanh", "exp"), Wts = NULL,
rang = 0.5, objfn = NULL, method = "BFGS", maxit = 1000,
abstol = 1.0e-4, reltol = 1.0e-8, trace = TRUE, ...) {
# --------
sse <- function(y, y_hat) {
err <- y - y_hat
sum(err ^ 2)
}
if(is.null(objfn)) objfn <- sse
# --------
m <- match.call(expand.dots = TRUE)
parof_present <- FALSE
if("par_of" %in% names(m)) {
parof_present <- TRUE
par_of <- eval(m$par_of)
}
obj_fn <- function(y, y_hat) {
objfn(y, y_hat, ...)
}
act_hid <- match.arg(NULL, act_hid)
act_out <- match.arg(NULL, act_out)
if (is.vector(x)) {
x <- matrix(x, ncol = 1)
}
if (is.vector(y)) {
y <- matrix(y, ncol = 1)
}
if (any(is.na(x))) stop("missing values in 'x'")
if (any(is.na(y))) stop("missing values in 'y'")
if (dim(x)[1] != dim(y)[1]) stop("nrows of 'x' and 'y' must match")
n_patterns <- dim(x)[1]
n_inputs <- dim(x)[2]
n_outputs <- dim(y)[2]
# Set up network
net <- list()
class(net) <- "ann"
if (size == 0) {
net$layers <- 2
net$nodes <- c(n_inputs, n_outputs)
net$act_fn <- c("linear", act_out)
net$nwts <- as.integer( (net$nodes[1] + 1) * net$nodes[2])
} else {
net$layers <- 3
net$nodes <- c(n_inputs, size, n_outputs)
net$act_fn <- c("linear", act_hid, act_out)
net$nwts <- as.integer( (net$nodes[1] + 1) * net$nodes[2] +
(net$nodes[2] + 1) * net$nodes[3])
}
net$bias <- rep(1, net$layers)
if (is.null(Wts)) {
if (rang > 0) {
wts <- runif(net$nwts, -rang, rang)
} else {
wts <- rep(0, net$nwts)
}
} else {
wts <- Wts
}
if (length(wts) != net$nwts) stop("weights vector of incorrect length")
# ----------
objfn_1 <- function(par, y, obj_fn, ...) {
y_hat <- array(0, dim = c(n_patterns, n_outputs))
Zin <- Z <- list(net$layers)
# Forward propagation of info through network ---
# For each layer determine input and output to/from nodes
Zin[[1]] <- as.matrix(x)
Z[[1]] <- as.matrix(x)
par_ind <- 0
for (i in 2:net$layers) {
i_ind <- rep(1:net$nodes[i], each = net$nodes[i - 1])
j_ind <- rep(1:net$nodes[i - 1], net$nodes[i])
par_ind <- max(par_ind) + net$nodes[i] * (j_ind - 1) + i_ind
Zin[[i]] <- Z[[i - 1]] %*% t(matrix(par[par_ind],
ncol = net$nodes[i - 1],
byrow = TRUE))
# add bias terms
par_ind <- max(par_ind) + 1:net$nodes[i]
Zin[[i]] <- Zin[[i]] + matrix(rep(net$bias[i] * par[par_ind],
nrow(Zin[[i]])),
ncol = net$nodes[i],
byrow = TRUE)
Z[[i]] <- actfn(Zin[[i]], net$act_fn[i])
}
# Calculate model output
y_hat <- Z[[net$layers]]
# Evaluation of objective function
if(parof_present) {
par_of <- par[(net$nwts + 1):length(par)]
}
obj_fn(y, y_hat)
}
# ----------
if(parof_present) {
par <- c(wts, par_of)
} else {
par <- wts
}
tmp <- optim(par = par, fn = objfn_1, y = y, obj_fn = obj_fn,
method = method,
control = list(maxit = maxit, abstol = abstol, reltol = reltol,
trace = trace, REPORT = 20))
net$value <- tmp$value
net$wts <- tmp$par[1:(net$nwts)]
if(parof_present) net$par_of <- tmp$par[(net$nwts + 1):length(tmp$par)]
net$convergence <- tmp$convergence
tmp <- predict(net, x, derivs = TRUE)
net$fitted.values <- tmp$values
net$derivs <- tmp$derivs
tmp <- as.matrix(y - tmp$values)
dimnames(tmp) <- list(rownames(x), colnames(y))
net$residuals <- tmp
net$call <- match.call()
net
}
#-------------------------------------------------------------------------------
#' @title Predict new examples using a trained neural network.
#'
#' @description Predict new examples using a trained neural network.
#' @param object an object of class `ann' as returned by function \code{ann}.
#' @param newdata matrix, data frame or vector of input data.
#' A vector is considered to comprise examples of a single input or
#' predictor variable. If \code{x} is \code{NULL}, fitted outputs derived
#' from \code{object} will be returned.
#' @param derivs logical; should derivatives of hidden and output nodes be
#' returned? Default is \code{FALSE}.
#' @param \dots additional arguments affecting the predictions produced (not
#' currently used).
#' @return if \code{derivs = FALSE}, a vector of predictions is returned.
#'
#' Otherwise, a list with the following components is returned:
#' \item{values}{matrix of values returned by the trained ANN.}
#' \item{derivs}{matrix of derivatives of hidden (columns \code{1:object$size})
#' and output (final column) nodes.}
#' @details This function is a method for the generic function \code{predict()}
#' for class `ann'. It can be invoked by calling \code{predict(x)} for an
#' object \code{x} of class `ann'.
#'
#' \code{predict.ann} produces predicted values, obtained by evaluating the
#' `ann' model given \code{newdata}, which contains the inputs to be used
#' for prediction. If \code{newdata} is omitted, the
#' predictions are based on the data used for the fit.
#'
#' Derivatives may be returned for sensitivity analyses, for example.
#'
#' @seealso \code{\link{ann}}
#'
#' @examples
#' ## fit 1-hidden node `ann' model to ar9 data
#' data("ar9")
#' samp <- sample(1:1000, 200)
#' y <- ar9[samp, ncol(ar9)]
#' x <- ar9[samp, -ncol(ar9)]
#' x <- x[, c(1,4,9)]
#'
#' fit <- ann(x, y, size = 1, act_hid = "tanh", act_out = "linear", rang = 0.1)
#'
#' ## get model predictions based on a new sample of ar9 data.
#' samp <- sample(1:1000, 200)
#' y <- ar9[samp, ncol(ar9)]
#' x <- ar9[samp, -ncol(ar9)]
#' x <- x[, c(1,4,9)]
#'
#' sim <- predict(fit, newdata = x)
#'
#' ## if derivatives are required...
#' tmp <- predict(fit, newdata = x, derivs = TRUE)
#' sim <- tmp$values
#' derivs <- tmp$derivs
#' @export
# ----
predict.ann <- function(object, newdata = NULL, derivs = FALSE, ...) {
# Description: This function runs the model and calculates the
# objective function value for a particular set
# of parameter values
#-----
if (!inherits(object, "ann"))
stop("object not of class \"ann\"")
if (is.null(newdata)) {
y_hat <- fitted(object)
return(y_hat)
} else {
x <- newdata
if (is.vector(x)) {
x <- matrix(x, ncol = 1)
}
n_patterns <- dim(x)[1]
n_inputs <- dim(x)[2]
x <- matrix(unlist(x), ncol = n_inputs, nrow = n_patterns)
if (any(is.na(x))) stop("missing values in 'x'")
n_patterns <- dim(x)[1]
n_inputs <- dim(x)[2]
n_outputs <- object$nodes[object$layers]
y_hat <- array(0, dim = c(n_patterns, n_outputs))
Zin <- Z <- list(object$layers)
# Forward propagation of info through network ---
# For each layer determine input and output to/from nodes
Zin[[1]] <- x
Z[[1]] <- x
par_ind <- 0
for (i in 2:object$layers) {
i_ind <- rep(1:object$nodes[i], each = object$nodes[i - 1])
j_ind <- rep(1:object$nodes[i - 1], object$nodes[i])
par_ind <- max(par_ind) + object$nodes[i] * (j_ind - 1) + i_ind
Zin[[i]] <- Z[[i - 1]] %*% t(matrix(object$wts[par_ind],
ncol = object$nodes[i - 1],
byrow = TRUE))
# add bias terms
par_ind <- max(par_ind) + 1:object$nodes[i]
Zin[[i]] <- Zin[[i]] + matrix(rep(object$bias[i] * object$wts[par_ind],
nrow(Zin[[i]])),
ncol = object$nodes[i],
byrow = TRUE)
Z[[i]] <- actfn(Zin[[i]], object$act_fn[i])
}
# Calculate model output and error
y_hat <- Z[[object$layers]]
if(derivs) {
z_hat <- NULL
o_hat <- NULL
if (object$layers == 3)
z_hat <- der_actfn(Zin[[2]], object$act_fn[2])
if (object$layers == 3) {
o_hat <- der_actfn(Zin[[3]], object$act_fn[3])
} else {
o_hat <- der_actfn(Zin[[2]], object$act_fn[2])
}
return(list(values = y_hat, derivs = cbind(z_hat, o_hat)))
} else {
return(y_hat)
}
}
}
# -------------------------------------------------------------------------------
#' @title Return observed target values.
#' @description
#' Return observed target values used for fitting `ann' or `nnet' ANN models.
#' @param object an object of class `ann' as returned by \code{\link{ann}} or
#' of class `nnet' as returned by \code{\link[nnet]{nnet}}.
#' @return a 1-column matrix of observed target values.
#' @details This function can be invoked by calling \code{observed(x)} for an
#' object \code{x} of class `ann' or `nnet'.
#'
#' @examples
#' # Get observed values of y used to train ann object `fit'.
#' # ---
#' data("ar9")
#' samp <- sample(1:1000, 200)
#' y <- ar9[samp, ncol(ar9)]
#' x <- ar9[samp, -ncol(ar9)]
#' x <- x[, c(1,4,9)]
#' fit <- ann(x, y, size = 1, act_hid = "tanh", act_out = "linear", rang = 0.1)
#' y_obs <- observed(fit)
#' @export
# --------
observed <- function(object) {
UseMethod("observed")
}
# -----------
#' @export
observed.ann <- function(object) {
fitted(object) + residuals(object)
}
# -----------
#' @export
observed.nnet <- function(object) {
fitted(object) + residuals(object)
}
#-------------------------------------------------------------------------------
print.ann <- function(x, ...) {
if (!inherits(x, "ann")) stop("not a legitimate neural net fit")
if (x$layers == 3) {
cat("a ", x$nodes[1L], "-", x$nodes[2L], "-", x$nodes[3L],
" network", sep = "")
cat(" with", length(x$wts), "weights\n")
cat(x$act_fn[2L], "hidden units\n")
cat(x$act_fn[3L], "output units\n")
} else {
cat("a ", x$nodes[1L], "-", x$nodes[2L],
" network (no hidden layer)", sep = "")
cat(" with", length(x$wts), "weights\n")
cat(x$act_fn[2L], "output units\n")
}
cat("\n")
invisible(x)
}
#-------------------------------------------------------------------------------
actfn <- function(x, method = c("tanh", "sigmoid", "linear", "exp")) {
if (method == "tanh") {
val <- tanh(x)
} else if (method == "sigmoid") {
val <- 1 / (1 + exp(-x))
} else if (method == "linear") {
val <- x
} else if (method == "exp") {
val <- exp(x)
} else {
stop("Invalid activation fn : Must be \"tanh\", \"sigmoid\", \"linear\" or \"exp\".")
}
return(val)
}
#-------------------------------------------------------------------------------
der_actfn <- function(x, method = c("tanh", "sigmoid", "linear", "exp")) {
if (method == "tanh") {
val <- (1 / cosh(x)) ^ 2
} else if (method == "sigmoid") {
val <- actfn(x, "sigmoid") * (1 - actfn(x, "sigmoid"))
} else if (method == "linear") {
val <- matrix(rep(1, dim(x)[1] * dim(x)[2]), ncol = ncol(x))
} else if (method == "exp") {
val <- exp(x)
} else {
stop("Invalid activation fn : Must be \"tanh\", \"sigmoid\", \"linear\" or \"exp\".")
}
return(val)
}
#-------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/validann/R/ann.R
|
#' @title Data generated by autoregressive AR9 model.
#'
#' @description
#' Synthetically generated dataset containing values of dependent variable
#' \code{x_t} given values of \cr
#' \code{x_t-1, x_t-2, ..., x_t-15}.
#'
#' @details
#' This dataset was generated using the AR9 model first described in
#' Sharma (2000) and given by:
#'
#' \eqn{x_{t} = 0.3x_{t-1} - 0.6x_{t-4} - 0.5x_{t-9} + \epsilon_{t}}{%
#' x_t = 0.3x_t-1 - 0.6x_t-4 - 0.5x_t-9 + \epsilon_t}
#'
#' where \eqn{\epsilon_{t}}{\epsilon_t}
#'
#' @format A data frame with 1000 rows and 16 variables:
#' \describe{
#' \item{x_t-1,x_t-2,x_t-3,x_t-4,x_t-5,x_t-6,x_t-7,x_t-8,x_t-9,x_t-10,x_t-11,x_t-12,}{}
#' \item{x_t-13,x_t-14,x_t-15}{lagged values of
#' x_t in columns 1:15}
#' \item{x_t}{dependent variable in column 16}
#' }
#'
#' @references Sharma, A. (2000), Seasonal to interannual rainfall
#' probabilistic forecasts for improved water supply management: Part 1 -
#' a strategy for system predictor identification, Journal of Hydrology,
#' 239(1-4), 232-239, \url{http://dx.doi.org/10.1016/S0022-1694(00)00346-2}.
"ar9"
|
/scratch/gouwar.j/cran-all/cranData/validann/R/ar9.R
|
#--------------------------------------------------
#' @title Plot ANN validation results.
#'
#' @description Plot method for objects of class `validann'. Produces a series
#' of plots used for validating and assessing ANN models based on results
#' returned by \code{\link{validann}}.
#'
#' @param x object of class `validann' as returned
#' by \code{\link{validann}}. This is a list comprising metrics and
#' statistics that can be used for validating ANN models.
#' @param obs,sim vectors comprising observed (\code{obs}) and simulated
#' (\code{sim}) examples of a single response variable used for computing
#' \code{x} object.
#' @param gof logical; should goodness-of-fit plots be produced?
#' Default = TRUE.
#' @param resid logical; should residual analysis plots be produced?
#' Default = TRUE.
#' @param sa logical; should input sensitivity analysis plots be
#' produced? Default = TRUE.
#' @param display character string defining how plots should be
#' displayed. The default is ``multi'' where multiple plots are displayed
#' together according to whether they are goodness-of-fit, residual analysis
#' or sensitivity analysis plots. For ``single'', each plot is displayed on
#' its own. If the session is interactive, the user will be asked to confirm
#' a new page whether \code{display} is ``single'' or ``multi''.
#' @param profile character string defining which structural
#' validity Profile method outputs should be plotted. The default is ``all''
#' where outputs corresponding to 5 summary statistics are plotted together
#' with the median predicted response for each input value.
#' For ``median'', only the median response is plotted.
#' @param \dots Arguments to be passed to plot (not currently used).
#' @details This function can be invoked by calling
#' \code{plot(x, obs, sim)} for an object \code{x} of class
#' `validann'.
#'
#' To produce plots for all types of validation metrics and statistics,
#' \code{gof}, \code{resid} and \code{sa} must be
#' \code{TRUE} and corresponding results must have been successfully
#' computed by \code{\link{validann}} and returned in object \code{x}.
#'
#' If \code{gof} is \code{TRUE}, a scatter plot, Q-Q plot and
#' time/sample plot of observed (\code{obs}) versus predicted (\code{sim})
#' data are produced.
#'
#' If \code{resid} is \code{TRUE} and \code{x$residuals}
#' is not \code{NULL}, plots of the model residuals are produced including
#' histogram, Q-Q plot (standardized residuals compared to standard normal),
#' autocorrelation (acf), partial autocorrelation (pacf), standardized
#' residual versus predicted output (i.e. \code{sim}) and standardized
#' residual versus time/order of the data.
#'
#' If \code{sa} is \code{TRUE} and \code{x$y_hat} is not
#' \code{NULL}, model response values resulting from the Profile
#' sensitivity analysis are plotted against percentiles of each
#' input. If \code{x$rs} is not \code{NULL}, the relative sensitivities of
#' each input, as computed by the partial derivative (PaD) sensitivity
#' analysis, are plotted against predicted output.
#'
#' Setting \code{gof}, \code{resid} and/or \code{sa} to \code{FALSE}
#' will `turn off' the respective validation plots.
#'
#' @seealso \code{\link{validann}}
#' @examples
#' ## Build ANN model and compute replicative and structural validation results
#' data("ar9")
#' samp <- sample(1:1000, 200)
#' y <- ar9[samp, ncol(ar9)]
#' x <- ar9[samp, -ncol(ar9)]
#' x <- x[, c(1,4,9)]
#' fit <- ann(x, y, size = 1, act_hid = "tanh", act_out = "linear", rang = 0.1)
#' results <- validann(fit, x = x)
#' obs <- observed(fit)
#' sim <- fitted(fit)
#'
#' ## Plot replicative and structural validation results to the current device
#' ## - a single page for each type of validation
#' plot(results, obs, sim)
#'
#' ## Plot results to the current device - a single page for each plot
#' plot(results, obs, sim, display = "single")
#'
#' ## Plot replicative and structural validation results to single file
#' pdf("RepStructValidationPlots.pdf")
#' plot(results, obs, sim)
#' dev.off()
#'
#' ## Get predictive validation results for above model based on a new sample
#' ## of ar9 data.
#' samp <- sample(1:1000, 200)
#' y <- ar9[samp, ncol(ar9)]
#' x <- ar9[samp, -ncol(ar9)]
#' x <- x[, c(1,4,9)]
#' obs <- y
#' sim <- predict(fit, newdata = x)
#' results <- validann(fit, obs = obs, sim = sim, x = x)
#'
#' ## Plot predictive results only to file
#' pdf("PredValidationPlots.pdf")
#' plot(results, obs, sim, resid = FALSE, sa = FALSE)
#' dev.off()
#'
#' @export
#' @importFrom grDevices colors
#' @importFrom grDevices dev.interactive
#' @importFrom grDevices devAskNewPage
#' @importFrom graphics abline
#' @importFrom graphics hist
#' @importFrom graphics layout
#' @importFrom graphics legend
#' @importFrom graphics lines
#' @importFrom graphics par
#' @importFrom graphics plot
#' @importFrom graphics points
#' @importFrom graphics title
#' @importFrom stats acf
#' @importFrom stats pacf
#' @importFrom stats dnorm
#' @importFrom stats qnorm
#' @importFrom stats qqnorm
#' @importFrom stats qqplot
#--------------------------------------------------
plot.validann <- function(x, obs, sim, gof = TRUE, resid = TRUE, sa = TRUE,
display = c("multi", "single"),
profile = c("all", "median"), ...) {
# ask <- devAskNewPage(TRUE)
# on.exit(devAskNewPage(ask))
if (is.null(obs) & is.null(sim)) {
stop("'obs' and 'sim' objects required.")
}
display <- match.arg(NULL, display)
profile <- match.arg(NULL, profile)
if (display == "single" && dev.interactive()) devAskNewPage(ask = TRUE)
# Goodness-of-fit plots
# ----
if (gof == TRUE) {
if (is.null(obs)) {
message1 <- "'obs' data missing :"
message2 <- "Goodness-of-fit plots will not be produced."
warning(message1, message2, call. = FALSE, immediate. = FALSE)
} else if (is.null(sim)) {
message1 <- "'sim' data missing :"
message2 <- "Goodness-of-fit plots will not be produced."
warning(message1, message2, call. = FALSE, immediate. = FALSE)
} else {
if (display == "multi") {
m <- rbind(c(1, 2), c(3, 3))
layout(m)
} else {
m <- c(1, 1)
layout(m)
}
par(oma = c(0, 0, 1, 0), mar = c(4, 4, 3, 0.3))
# scatterplot - obs vs sim
min_plot <- min(obs, sim)
max_plot <- max(obs, sim)
plot(x = obs, y = sim, type = "p", pch = 21, col = "black",
bg = colors()[240], xlim = c(min_plot, max_plot),
ylim = c(min_plot, max_plot), xlab = "Observed", ylab = "Predicted",
main = "Scatter Plot")
abline(a = 0, b = 1, col = "red", lty = "dashed")
# qq plot of obs v sim
qqplot(obs, sim, pch = 21, col = "black", bg = colors()[240],
xlim = c(min_plot, max_plot), ylim = c(min_plot, max_plot),
xlab = "Observed", ylab = "Predicted",
main = "Q-Q Plot")
abline(a = 0, b = 1, col = "red", lty = "dashed")
nsamps <- length(obs)
plot(x = 1:nsamps, y = obs, type = "p", pch = 23, col = "black",
bg = "black", ylim = c(min_plot, max_plot),
xlab = "Sample", ylab = "Value")
points(x = 1:nsamps, y = sim, pch = 23, col = "black",
bg = colors()[240], cex = 0.8)
title(main = "Observed Vs Predicted", line = 2)
# add legend
par(oma = c(0, 0, 1, 0), mar = c(0, 0, 1, 0), new = TRUE)
plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n")
legend(x = "top", legend = c("Observed", "Predicted"),
pch = c(23, 23), col = c("black", "black"),
pt.bg = c("black", colors()[240]), pt.cex = c(1, 0.8),
horiz = TRUE, bty = "n", inset = c(0, 0), xpd = TRUE)
if (display == "multi") {
if (dev.interactive()) devAskNewPage(ask = TRUE)
title(main = "Goodness-of-fit", outer = TRUE)
}
}
}
# Residual analysis plots
# ----
if(resid == TRUE) {
if (is.null(sim)) {
message1 <- "'sim' data missing :"
message2 <- "Residual analysis plots will not be produced."
warning(message1, message2, call. = FALSE, immediate. = FALSE)
} else {
if(display == "multi") {
m <- rbind(c(1, 2), c(3, 4), c(5, 6))
layout(m)
} else {
m <- c(1, 1)
layout(m)
}
par(oma = c(0, 0, 1, 0), mar = c(4, 4, 3, 0.3))
# residuals histogram
tmp_hist <- hist(x$residuals, plot = FALSE)
tmp_norm <- dnorm(x$residuals, mean = 0,
sd = x$resid_stats$sd)
ymax <- max(tmp_hist$density, tmp_norm) * 1.2
plot(x = tmp_hist$mids, y = tmp_hist$density, type = "h", lwd = 30,
lend = 2, col = colors()[240], ylim = c(0, ymax), yaxs = "i",
xlab = "Residual", ylab = "Density", main = "Residuals Histogram")
lines(x = sort(x$residuals), y = tmp_norm[order(x$residuals)],
col = "red", lty = "dashed")
if (display == "multi") devAskNewPage(ask = FALSE)
# qq plot of residual vs normal distribution
sd_err <- (x$residuals - x$resid_stats$mean) /
x$resid_stats$sd
qqnorm(sd_err, pch = 21, col = "black", bg = colors()[240],
xlab = "Standard Normal Quantiles", ylab = "Standardized Residual",
main = "Residual Q-Q Plot")
abline(a = 0, b = 1, col = "red", lty = "dashed")
# residual autocorrelation plots
acf_tmp <- acf(x$residuals, plot = FALSE)
pacf_tmp <- pacf(x$residuals, plot = FALSE)
clim <- qnorm((1 + 0.95) / 2) / sqrt(acf_tmp$n.used)
ylim <- range(c(-clim, clim, acf_tmp$acf[, 1, 1]))
plot(acf_tmp$lag[, 1, 1], acf_tmp$acf[, 1, 1], type = "h", ylim = ylim,
xlab = "Lag", ylab = "ACF", main = "Residual Autocorrelation")
abline(h = 0)
abline(h = c(clim, -clim), col = "blue", lty = 2)
ylim <- range(c(-clim, clim, pacf_tmp$acf[, 1, 1]))
plot(pacf_tmp$lag[, 1, 1], pacf_tmp$acf[, 1, 1], type = "h", ylim = ylim,
xlab = "Lag", ylab = "Partial ACF",
main = "Residual Partial-Autocorrelation")
abline(h = 0)
abline(h = c(clim, -clim), col = "blue", lty = 2)
# Standardised residuals vs simulated
plot(x = sim, y = sd_err, type = "p", pch = 21, col = "black",
cex = 0.8, bg = colors()[240], xlab = "Predicted Value",
ylab = "Standardized Residual",
main = "Residuals Vs Simulated")
abline(h = 0, lty = "dashed", col = "red")
abline(h = -1.96, lty = "dashed", col = "blue")
abline(h = 1.96, lty = "dashed", col = "blue")
# Standardised residuals vs 'time'
plot(x = 1:length(sd_err), y = sd_err, type = "p", pch = 21,
col = "black", cex = 0.8, bg = colors()[240],
xlab = "Order", ylab = "Standardized Residual",
main = "Residuals Vs Order/Time")
abline(h = 0, lty = "dashed", col = "red")
abline(h = -1.96, lty = "dashed", col = "blue")
abline(h = 1.96, lty = "dashed", col = "blue")
if (display == "multi") {
if (dev.interactive()) devAskNewPage(ask = TRUE)
title(main = "Residual analysis", outer = TRUE)
}
}
}
# Sensitivity analysis plots
# ----
if(sa == TRUE) {
if (is.null(x$y_hat) && is.null(x$rs)) {
message1 <-
"Sensitivity analysis results missing : "
message2 <- "Sensitivity analysis plots will not be produced."
warning(message1, message2, call. = FALSE, immediate. = FALSE)
} else {
ninputs <- ncol(x$y_hat) / 6
}
if (!is.null(x$y_hat)) {
if (display == "multi") {
rem <- ninputs %% 2
rep <- ninputs %/% 2
if (rem > 0) rep <- rep + 1
rep <- min(rep, 3)
m <- matrix(1:(rep * 2), ncol = 2, byrow = TRUE)
layout(m)
} else {
m <- c(1, 1)
layout(m)
rep <- 1
}
par(oma = c(0, 0, 1, 3), mar = c(4, 4, 3, 0.3))
cols <- c("blue", "red", "gold", "magenta", "turquoise", "black")
for (i in 1:ninputs) {
y_hat <- x$y_hat[, (i - 1) * 6 + 1:6]
miny <- min(y_hat)
maxy <- max(y_hat) + 0.3 * (max(y_hat) - miny)
p_name <- colnames(y_hat)[1]
p_name <- substr(p_name, 1, nchar(p_name) - 2)
plot(x = seq(0, 100, by = 1), y = y_hat[, 6], type = "l",
col = "black",
ylim = c(miny, maxy), ylab = "Predicted Response",
xlab = paste("Percentile of Input:", p_name),
main = "")
title(main = p_name, line = 1)
if(profile == "all") {
for(j in 1:6) {
lines(x = seq(0, 100, by = 1), y = y_hat[, j], col = cols[j])
}
legend("top",
legend = c("Min.", "25%", "50%",
"75%", "Max.", "Median"),
cex = 0.6 + rep * 0.1, pch = 20, col = cols,
horiz = TRUE, bty = "n", xpd = NA)
}
if (dev.interactive() && (i %% (rep * 2) == 0)) {
devAskNewPage(ask = TRUE)
} else if (dev.interactive() && i == ninputs) {
devAskNewPage(ask = TRUE)
} else if (dev.interactive() && display == "single") {
devAskNewPage(ask = TRUE)
} else {
devAskNewPage(ask = FALSE)
}
title(main = "Sensitivity analysis - Profile", outer = TRUE)
}
}
if (!is.null(x$rs)) {
if (display == "multi") {
rem <- ninputs %% 2
rep <- ninputs %/% 2
if (rem > 0) rep <- rep + 1
rep <- min(rep, 3)
m <- matrix(1:(rep * 2), ncol = 2, byrow = TRUE)
layout(m)
} else {
m <- c(1, 1)
layout(m)
}
par(oma = c(0, 0, 1, 0), mar = c(4, 4, 3, 0.3))
yrange <- c(min(x$rs), max(x$rs))
for (i in 1:ninputs) {
plot(x = obs, y = x$rs[, i], type = "p", ylim = yrange,
xlab = "Observed Response Value", ylab = "Relative Sensitivity",
main = colnames(x$rs)[i])
if (dev.interactive() && (i %% (rep * 2) == 0)) {
devAskNewPage(ask = TRUE)
} else if (dev.interactive() && i == ninputs) {
devAskNewPage(ask = TRUE)
} else if (dev.interactive() && display == "single") {
devAskNewPage(ask = TRUE)
} else {
devAskNewPage(ask = FALSE)
}
title(main = "Sensitivity analysis - PaD - Relative", outer = TRUE)
}
par(oma = c(0, 0, 1, 0), mar = c(4, 4, 3, 0.3))
yrange <- c(min(x$as), max(x$as))
for (i in 1:ninputs) {
plot(x = obs, y = x$as[, i], type = "p", ylim = yrange,
xlab = "Observed Response Value", ylab = "Absolute Sensitivity",
main = colnames(x$as)[i])
if (dev.interactive() && (i %% (rep * 2) == 0)) {
devAskNewPage(ask = TRUE)
} else if (dev.interactive() && i == ninputs) {
devAskNewPage(ask = TRUE)
} else if (dev.interactive() && display == "single") {
devAskNewPage(ask = TRUE)
} else {
devAskNewPage(ask = FALSE)
}
title(main = "Sensitivity analysis - PaD - Absolute", outer = TRUE)
}
}
}
devAskNewPage(ask = FALSE)
}
#-------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/validann/R/plot.validann.R
|
#--------------------------------------------------
#' @title Validate Artificial Neural Networks.
#'
#' @description Compute metrics and statistics for predictive, replicative
#' and/or structural validation of artificial neural networks (ANNs).
#'
#' @param net an object of class `ann' (as returned by function
#' \code{\link{ann}}) or `nnet' (as returned using \code{\link[nnet]{nnet}}).
#' This is a list object comprising information about the fitted ANN model,
#' including values of weights, fitted target values, number of layers and
#' numbers of nodes in each layer, for example.
#' @param obs,sim vectors comprising observed (\code{obs}) and simulated
#' (\code{sim}) examples of a single response variable. These vectors are
#' used to compute model fit statistics. Optional if \code{net} is supplied
#' (see `Details').
#' @param x matrix, data frame or vector of input data used for
#' fitting \code{net} object. A vector is considered to comprise examples of
#' a single input or predictor variable. While \code{x} is optional,
#' sensitivity analyses useful for structural validation cannot be performed
#' if it is not supplied.
#' @param wts vector of ANN weights used to compute input
#' `relative importance' measures if \code{net} object is not supplied. Must
#' be supplied together with \code{nodes} in order to compute such metrics.
#' See `Details' for ordering of \code{wts} vector.
#' @param nodes vector indicating the number of nodes in each layer
#' of the ANN model. This vector should have 3 elements: nodes in input
#' layer, nodes in hidden layer (can be 0), and nodes in output layer.
#' If \code{net} object is not supplied, \code{nodes} must be supplied
#' together with \code{wts} if any structural validation metrics are to be
#' computed.
#' @param na.rm logical; should missing values (including NaN)
#' be removed from calculations? Default = TRUE.
#' @param \dots arguments to be passed to different validann methods,
#' see specific formulations for details.
#' @return list object of class `validann' with components dependent on
#' arguments passed to \code{validann} function:
#'
#' \item{metrics}{a data frame consisting of metrics:
#'
#' AME, PDIFF, MAE, ME, RMSE, R4MS4E, AIC, BIC, NSC, RAE, PEP, MARE,
#' MdAPE, MRE, MSRE, RVE, RSqr, IoAd, CE, PI, MSLE, MSDE, IRMSE, VE,
#' KGE, SSE and R.
#'
#' See Dawson et al. (2007) for definitions.}
#' \item{obs_stats}{a data frame consisting of summary statistics about the
#' \code{obs} dataset including mean, minimum, maximum, variance,
#' standard deviation, skewness and kurtosis.}
#' \item{sim_stats}{a data frame consisting of summary statistics about the
#' \code{sim} dataset including mean, minimum, maximum, variance,
#' standard deviation, skewness and kurtosis.}
#' \item{residuals}{a 1-column matrix of model residuals (\code{sim - obs}).}
#' \item{resid_stats}{a data frame consisting of summary statistics about the
#' model \code{residuals} including mean, minimum, maximum, variance,
#' standard deviation, skewness and kurtosis.}
#' \item{ri}{a data frame consisting of `relative importance' values for each
#' input. Only returned if \code{net} or \code{wts} and \code{nodes} are
#' supplied.
#'
#' If \code{net} is supplied, relative importance values computed using the
#' following 4 methods are returned:
#'
#' Garson's (Garson); connection weight (CW); Profile sensitivity
#' analysis (Profile); and partial derivative sensitivity analysis (PaD).
#'
#' In addition, if \code{net} is of class `ann' (as returned by function
#' \code{\link{ann}}) and the activation function used at the hidden
#' layer (\code{act_hid}) is "tanh", relative importance
#' values computed using the modified CW (MCW) are also returned.
#' This method requires that the hidden layer activation function be
#' symmetric about the origin.
#'
#' If \code{wts} and \code{nodes} are supplied, only relative importance
#' values computed using the Garson and CW methods are returned.
#'
#' See Gevrey et al. (2003), Olden et al. (2004) and Kingston et al. (2006)
#' for details of the relative importance methods.}
#' \item{y_hat}{a matrix of dimension \code{c(101, ncol(x) * 6)} of model
#' response values indicating the local sensitivity of the model to each
#' input in \code{x}. Only returned if \code{net} and \code{x} are supplied.
#'
#' The response values returned in \code{y_hat} are calculated using the
#' `Profile' sensitivity analysis method described in Gevrey et al. (2003).
#' Using this method, the local sensitivity of each input in \code{x} is
#' considered successively. For each input \code{x[,i]}, 5 synthetic data
#' sets are generated where inputs \code{x[,-i]} are successively fixed at
#' their minimum, 1st quartile, median, 3rd quartile and maximum values
#' (as calculated from \code{x}), while input \code{x[,i]} is varied between
#' its minimum and maximum value, increasing in increments of 1\% (giving
#' 101 synthetic values of \code{x[,i]} for each of the 5 sets of fixed
#' \code{x[,-i]}). These data are input into \code{net} and model response
#' values corresponding to the 5 summary statistics are computed.
#' These 5 sets of response values, together with a set of computed median
#' responses, are returned as y_hat[,(i - 1) * 6 + 1:6]. This process is
#' repeated for each input variable in \code{x}. See Gevrey et al. (2003)
#' for further details.}
#' \item{as}{a matrix of dimension \code{dim(x)} of `absolute sensitivity'
#' values for each input in \code{x} given the model output values
#' (i.e. \code{sim}). Only returned if \code{net} and \code{x} are
#' supplied and \code{net} is of class `ann'.
#'
#' The values in \code{as} are calculated according to the partial
#' derivative (PaD) sensitivity analysis method described in Gevrey et al.
#' (2003), which involves computing the first-order partial derivatives of
#' the ANN output with respect to each input. \code{net} must be of class
#' `ann' in order to access partial derivatives of the hidden layer nodes as
#' returned by \code{\link{ann}}.}
#' \item{rs}{a matrix of dimension \code{dim(x)} of `relative sensitivity'
#' values for each input in \code{x} given the model output values
#' (i.e. \code{sim}). Only returned if \code{net} and \code{x} are
#' supplied and \code{net} is of class `ann'.
#'
#' To compute the values in \code{rs}, the \code{as} values are normalised
#' by multiplying by \code{x[,i]}/\code{sim} as described in Mount et al.
#' (2013). As for \code{as}, \code{net} must be of class
#' `ann' in order to access partial derivatives of the hidden layer nodes as
#' returned by \code{\link{ann}}.}
#' @details To compute all possible validation metrics and statistics,
#' \code{net} must be supplied and must be of class `ann' (as returned by
#' \code{\link{ann}}) or `nnet' (as returned by \code{\link[nnet]{nnet}}).
#' However, a partial derivative (PaD) sensitivity analysis (useful for
#' structural validation) will only be carried out if \code{net} is of class
#' `ann'.
#'
#' If \code{obs} and \code{sim} data are supplied in addition to \code{net},
#' validation metrics are computed based on these. Otherwise, metrics and
#' statistics are computed based on \code{obs} and \code{sim} datasets
#' derived from the \code{net} object (i.e. the data used to fit \code{net}
#' and the fitted values). As such, both \code{obs} and \code{sim} must be
#' supplied if validation is to be based either on data not used for
#' training or on unprocessed training data (if training data were
#' preprocessed). If either \code{obs} or \code{sim} is specified but the
#' other isn't, both \code{obs} and \code{sim} will be derived from
#' \code{net} if supplied (and a warning will be given). Similarly, this
#' will occur if \code{obs} and \code{sim} are of different lengths.
#'
#' If \code{net} is not supplied, both \code{obs} and \code{sim} are
#' required. This may be necessary if validating an ANN model not built
#' using either the \code{\link[nnet]{nnet}} or \code{\link{ann}} functions.
#' In this case, both \code{wts} and \code{nodes} are also required if any
#' structural validation metrics are to be returned. If an ANN model has
#' \emph{K} input nodes, \emph{J} hidden nodes and a single output \emph{O},
#' with a bias node for both the hidden and output layers, the \code{wts} vector must be ordered
#' as follows:
#'
#' \code{c(Wi1h1,Wi1h2,...Wi1hJ,Wi2h1,...Wi2hJ,...,WiKh1,...,WiKhJ,Wi0h1,...,Wi0hJ,}\cr
#' \code{ Wh1O,...,WhJO,Wh0O)}
#'
#' where \code{Wikhj} is the weight between the \emph{k}th input and the
#' \emph{j}th hidden node and \code{WhjO} is the weight between the
#' \emph{j}th hidden node and the output. The bias weight on the \emph{j}th
#' hidden layer node is labelled \code{Wi0hj} while the bias weight on the
#' output is labelled \code{Wh0O}. The \code{wts} vector assumes the network
#' is fully connected; however, missing connections may be substituted by
#' zero weights. Skip-layer connections are not allowed.
#'
#' @references
#' Dawson, C.W., Abrahart, R.J., See, L.M., 2007. HydroTest: A web-based
#' toolbox of evaluation metrics for the standardised assessment of
#' hydrological forecasts. Environmental Modelling & Software, 22(7),
#' 1034-1052. \url{http://dx.doi.org/10.1016/j.envsoft.2006.06.008}.
#'
#' Olden, J.D., Joy, M.K., Death, R.G., 2004. An accurate comparison of
#' methods for quantifying variable importance in artificial neural networks
#' using simulated data. Ecological Modelling 178, 389-397.
#' \url{http://dx.doi.org/10.1016/j.ecolmodel.2004.03.013}.
#'
#' Gevrey, M., Dimopoulos, I., Lek, S., 2003. Review and comparison of methods
#' to study the contribution of variables in artificial neural network
#' models. Ecological Modelling 160, 249-264.
#' \url{http://dx.doi.org/10.1016/S0304-3800(02)00257-0}.
#'
#' Kingston, G.B., Maier, H.R., Lambert, M.F., 2006. Forecasting cyanobacteria
#' with Bayesian and deterministic artificial neural networks, in: IJCNN '06.
#' International Joint Conference on Neural Networks, 2006., IEEE.
#' pp. 4870-4877. \url{http://dx.doi.org/10.1109/ijcnn.2006.247166}.
#'
#' Mount, N.J., Dawson, C.W., Abrahart, R.J., 2013. Legitimising
#' data-driven models: exemplification of a new data-driven mechanistic
#' modelling framework. Hydrology and Earth System Sciences 17, 2827-2843.
#' \url{http://dx.doi.org/10.5194/hess-17-2827-2013}.
#'
#' @seealso \code{\link{ann}}, \code{\link{plot.validann}},
#' \code{\link{predict.ann}}
#' @examples
#' # get validation results for 1-hidden node `ann' model fitted to ar9 data
#' # based on training data.
#' # ---
#' data("ar9")
#' samp <- sample(1:1000, 200)
#' y <- ar9[samp, ncol(ar9)]
#' x <- ar9[samp, -ncol(ar9)]
#' x <- x[, c(1,4,9)]
#'
#' fit <- ann(x, y, size = 1, act_hid = "tanh", act_out = "linear", rang = 0.1)
#' results <- validann(fit, x = x)
#'
#' # get validation results for above model based on a new sample of ar9 data.
#' # ---
#' samp <- sample(1:1000, 200)
#' y <- ar9[samp, ncol(ar9)]
#' x <- ar9[samp, -ncol(ar9)]
#' x <- x[, c(1,4,9)]
#'
#' obs <- y
#' sim <- predict(fit, newdata = x)
#' results <- validann(fit, obs = obs, sim = sim, x = x)
#'
#' # get validation results for `obs' and `sim' data without ANN model.
#' # In this example `sim' is generated using a linear model. No structural
#' # validation of the model is possible, but `wts' are provided to compute the
#' # number of model parameters needed for the calculation of certain
#' # goodness-of-fit metrics.
#' # ---
#' samp <- sample(1:1000, 200)
#' y <- ar9[samp, ncol(ar9)]
#' x <- ar9[samp, -ncol(ar9)]
#' x <- as.matrix(x[, c(1,4,9)])
#' lmfit <- lm.fit(x, y)
#' sim <- lmfit$fitted.values
#' obs <- y
#' results <- validann(obs = obs, sim = sim, wts = lmfit$coefficients)
#'
#' # validann would be called in the same way if the ANN model used to generate
#' # `sim' was not available or was not of class `ann' or `nnet'. Ideally in
#' # this case, however, both `wts' and `nodes' should be supplied such that
#' # some structural validation metrics may be computed.
#' # ---
#' obs <- c(0.257, -0.891, -1.710, -0.575, -1.668, 0.851, -0.350, -1.313,
#' -2.469, 0.486)
#' sim <- c(-1.463, 0.027, -2.053, -1.091, -1.602, 2.018, 0.723, -0.776,
#' -2.351, 1.054)
#' wts <- c(-0.05217, 0.08363, 0.07840, -0.00753, -7.35675, -0.00066)
#' nodes <- c(3, 1, 1)
#' results <- validann(obs = obs, sim = sim, wts = wts, nodes = nodes)
#'
#' @export
#' @importFrom stats cor
#' @importFrom stats fitted
#' @importFrom stats median
#' @importFrom stats predict
#' @importFrom stats quantile
#' @importFrom stats sd
#' @importFrom stats var
#--------------------------------------------------
validann <- function(...) {
UseMethod("validann")
}
# -------------------------------------------------------------------------------
#' @describeIn validann Compute validation metrics when \code{net}
#' is of class `ann'.
#' @export
validann.ann <- function(net, obs = NULL, sim = NULL, x = NULL,
na.rm = TRUE, ...) {
results <- list()
if (is.null(obs) & is.null(sim)) {
obs <- observed(net)
sim <- fitted(net)
} else if (is.null(obs)) {
obs <- observed(net)
sim <- fitted(net)
message1 <- "'obs' data missing : 'obs' and 'sim' both derived from 'net'"
warning(message1, call. = FALSE, immediate. = FALSE)
} else if (is.null(sim)) {
obs <- observed(net)
sim <- fitted(net)
message1 <- "'sim' data missing : 'obs' and 'sim' both derived from 'net'"
warning(message1, call. = FALSE, immediate. = FALSE)
} else if (length(obs) != length(sim)) {
message1 <- "'obs' and 'sim' must be the same length : "
message2 <- "'obs' and 'sim' both derived from 'net'"
warning(message1, message2, call. = FALSE, immediate. = FALSE)
}
# Goodness-of-fit.
#----
npar <- length(net$wts)
packageStartupMessage("Computing goodness-of-fit...")
results_pred <- predictive_valid(obs, sim, npar, na.rm)
results <- append(results, results_pred)
packageStartupMessage("Done.")
# Residuals analysis.
#----
packageStartupMessage("Residuals analysis...")
results_rep <- replicative_valid(obs, sim, na.rm)
results <- append(results, results_rep)
packageStartupMessage("Done.")
# Structural validation.
#----
if (is.vector(x)) {
x <- matrix(x, ncol = 1)
}
packageStartupMessage("Computing structural validation metrics...")
results_struct <- structural_valid(net, x = x)
results <- append(results, results_struct)
packageStartupMessage("Done.")
class(results) <- "validann"
return(results)
}
# -------------------------------------------------------------------------------
#' @describeIn validann Compute validation metrics when \code{net}
#' is of class `nnet'.
#' @export
validann.nnet <- function(net, obs = NULL, sim = NULL, x = NULL,
na.rm = TRUE, ...) {
results <- list()
if (is.null(obs) & is.null(sim)) {
obs <- observed(net)
sim <- fitted(net)
} else if (is.null(obs)) {
obs <- observed(net)
sim <- fitted(net)
message1 <- "'obs' data missing : 'obs' and 'sim' both derived from 'net'"
warning(message1, call. = FALSE, immediate. = FALSE)
} else if (is.null(sim)) {
obs <- observed(net)
sim <- fitted(net)
message1 <- "'sim' data missing : 'obs' and 'sim' both derived from 'net'"
warning(message1, call. = FALSE, immediate. = FALSE)
} else if (length(obs) != length(sim)) {
message1 <- "'obs' and 'sim' must be the same length : "
message2 <- "'obs' and 'sim' both derived from 'net'"
warning(message1, message2, call. = FALSE, immediate. = FALSE)
}
# Goodness-of-fit.
#----
npar <- length(net$wts)
packageStartupMessage("Computing goodness-of-fit...")
results_pred <- predictive_valid(obs, sim, npar, na.rm)
results <- append(results, results_pred)
packageStartupMessage("Done.")
# Residuals analysis.
#----
packageStartupMessage("Residuals analysis...")
results_rep <- replicative_valid(obs, sim, na.rm)
results <- append(results, results_rep)
packageStartupMessage("Done.")
# Structural validation.
#----
if (is.vector(x)) {
x <- matrix(x, ncol = 1)
}
packageStartupMessage("Computing structural validation metrics...")
results_struct <- structural_valid(net, x = x)
results <- append(results, results_struct)
packageStartupMessage("Done.")
class(results) <- "validann"
return(results)
}
# -------------------------------------------------------------------------------
#' @describeIn validann Useful for predictive validation only or when ANN model
#' has not been developed using either \code{\link{ann}} or
#' \code{\link[nnet]{nnet}}. Limited structural validation metrics may be
#' computed and only if \code{wts} and \code{nodes} are supplied.
#' @export
validann.default <- function(obs, sim, wts = NULL, nodes = NULL,
na.rm = TRUE, ...) {
results <- list()
if (missing(obs) | missing(sim)) {
stop("Required 'obs' or 'sim' data missing")
} else if (length(obs) != length(sim)) {
stop("'obs' and 'sim' must be the same length")
}
if (is.null(wts)) {
if (!is.null(nodes)) {
npar <- (nodes[1] + 1) * nodes[2] + (nodes[2] + 1) * nodes[3]
} else {
npar <- NULL
}
} else {
npar <- length(wts)
}
# Goodness-of-fit.
#----
packageStartupMessage("Computing goodness-of-fit...")
results_pred <- predictive_valid(obs, sim, npar, na.rm)
results <- append(results, results_pred)
packageStartupMessage("Done.")
# Residuals analysis.
#----
packageStartupMessage("Residuals analysis...")
results_rep <- replicative_valid(obs, sim, na.rm)
results <- append(results, results_rep)
packageStartupMessage("Done.")
# Structural validation.
#----
if(!is.null(wts) && !is.null(nodes)) {
packageStartupMessage("Computing structural validation metrics...")
results_struct <- structural_valid(wts = wts, nodes = nodes)
results <- append(results, results_struct)
packageStartupMessage("Done.")
} else {
message1 <- "'wts' and/or 'nodes' not supplied : "
message2 <- "structural validity metrics not computed."
warning(message1, message2, call. = FALSE, immediate. = FALSE)
}
class(results) <- "validann"
return(results)
}
#-------------------------------------------------------------------------------
predictive_valid <- function(obs, sim, npar = NULL, na.rm = TRUE) {
if (is.null(npar)) {
message1 <- "No information on model dimension provided : "
}
rem <- vector()
if (length(obs) != length(sim)) {
stop("'obs' and 'sim' must be the same length")
} else if (any(is.na(obs))) {
message2 <- "missing values in 'obs'"
warning(message2, call. = FALSE, immediate. = FALSE)
rem <- which(is.na(obs))
} else if (any(is.na(sim))) {
message2 <- "missing values in 'sim'"
warning(message2, call. = FALSE, immediate. = FALSE)
rem <- c(rem, which(is.na(sim)))
}
# Remove NA values if na.rm = TRUE
if(na.rm & length(rem) > 0) {
obs <- obs[-rem]
sim <- sim[-rem]
}
resid <- obs - sim #sim - obs
# Compute metrics from HydroTest
nsamps <- length(obs)
ame <- max(abs(resid))
pdiff <- max(obs) - max(sim)
mae <- sum(abs(resid)) / nsamps
me <- sum(resid) / nsamps
rmse <- sqrt( sum( (resid) ^ 2) / nsamps)
r4ms4e <- ( sum( (resid) ^ 4) / nsamps) ^ (1 / 4)
if (!is.null(npar)) {
aic <- nsamps * log(rmse) + 2 * npar
bic <- nsamps * log(rmse) + npar * log(nsamps)
} else {
aic <- NA
bic <- NA
message3 <- "AIC and BIC not computed."
warning(message1, message3, call. = FALSE, immediate. = FALSE)
}
#-----
nsc_calc <- function(resid) {
sc <- diff(sign(resid))
sc[sc != 0] <- 1
sum(sc)
}
#-----
nsc <- nsc_calc(resid)
rae <- sum( abs(resid) ) / sum( abs(obs - mean(obs)) ) * 100
pep <- (max(obs) - max(sim)) / max(obs) * 100
mare <- mean(abs(resid) / obs) * 100
mdape <- median(abs( (resid) / obs)) * 100
mre <- mean( (resid) / obs) * 100
msre <- mean( ( (resid) / obs) ^ 2) * 100
rve <- sum(resid) / sum(obs) * 100
rsqr <- (sum( (obs - mean(obs)) * (sim - mean(sim)) ) /
sqrt( sum( (obs - mean(obs)) ^ 2) *
sum( (sim - mean(sim)) ^ 2))) ^ 2
ioad <- 1 - sum( (resid) ^ 2) /
sum((abs(obs - mean(obs)) +
abs(sim - mean(obs))) ^ 2)
ce <- 1 - sum( (resid) ^ 2) /
sum( (obs - mean(obs)) ^ 2)
pi <- 1 - sum( (resid[-1]) ^ 2) /
sum( (obs[-1] - obs[-nsamps]) ^ 2)
msle <- (log(obs + 1e-08) - log(sim + 1e-08)) ^ 2
msle <- sum(msle, na.rm = TRUE) / (nsamps - sum(is.na(msle)))
msde <- sum( ( (obs[-1] - obs[-nsamps]) -
(sim[-1] - sim[-nsamps])) ^ 2) / (nsamps - 1)
delta <- obs[-1] - obs[-nsamps]
irmse <- sqrt(sum( (delta - mean(delta)) ^ 2) / (nsamps - 1))
irmse <- rmse / irmse
ve <- 1 - sum(abs(resid)) / sum(obs)
alpha <- sd(sim) / sd(obs)
beta <- mean(sim) / mean(obs)
r <- cor(sim, obs)
kge <- 1 - sqrt( (r - 1) ^ 2 + (alpha - 1) ^ 2 + (beta - 1) ^ 2)
sse <- sum( (resid) ^ 2)
metrics <- data.frame(AME = ame, PDIFF = pdiff, MAE = mae, ME = me,
RMSE = rmse, R4MS4E = r4ms4e, AIC = aic,
BIC = bic, NSC = nsc, RAE = rae, PEP = pep,
MARE = mare, MdAPE = mdape, MRE = mre,
MSRE = msre, RVE = rve, RSqr = rsqr,
IoAd = ioad, CE = ce, PI = pi, MSLE = msle,
MSDE = msde, IRMSE = irmse, VE = ve,
KGE = kge, SSE = sse, R = r)
obs_stats <- stats(obs)
sim_stats <- stats(sim)
return(list(metrics = metrics, obs_stats = obs_stats, sim_stats = sim_stats))
}
# ------------------------------------------------------------------------------
replicative_valid <- function(obs, sim, na.rm = TRUE) {
rem <- vector()
if (length(obs) != length(sim)) {
stop("'obs' and 'sim' must be the same length")
} else if (any(is.na(obs))) {
message2 <- "missing values in 'obs'"
warning(message2, call. = FALSE, immediate. = FALSE)
rem <- which(is.na(obs))
} else if (any(is.na(sim))) {
message2 <- "missing values in 'sim'"
warning(message2, call. = FALSE, immediate. = FALSE)
rem <- c(rem, which(is.na(sim)))
}
# Remove NA values if na.rm = TRUE
if(na.rm & length(rem) > 0) {
obs <- obs[-rem]
sim <- sim[-rem]
}
resid <- (obs - sim)
resid_stats <- stats(resid)
return(list(residuals = resid, resid_stats = resid_stats))
}
# -------------------------------------------------------------------------------
structural_valid <- function(net, wts = NULL, nodes = NULL, x = NULL) {
results <- list()
# Compute relative importance of inputs via ocw, modified ocw and Garson's
# methods
if(missing(net)) {
net <- NULL
}
if(!is.null(net)) {
ninputs <- ifelse(is.null(net$n), net$nodes[1], net$n[1])
ri_gars <- garson_fn(net)
ri_cw <- cw_fn(net)
ri <- as.data.frame(matrix(c(unlist(ri_gars), unlist(ri_cw)),
ncol = ninputs, byrow = TRUE))
if(!is.null(x)) {
colnames(ri) <- colnames(x)
} else {
colnames(ri) <- paste("inp_", 1:ninputs, sep = "")
}
row.names(ri) <- c(names(ri_gars), names(ri_cw))
if(!is.null(x)) {
tmp <- profile_sa(net, x)
y_hat <- tmp$y_hat
ri_sa <- tmp$ri
results$y_hat <- y_hat
ri_old <- ri
ri <- rbind(ri, ri_sa)
row.names(ri) <- c(row.names(ri_old), "ri_Profile")
# If data$net is of class "ann", perform PD sensitivity analysis
if (inherits(net, "ann")) {
tmp <- PaD_sa(net, x)
results$as <- tmp$as
results$rs <- tmp$rs
ri_pdsa <- tmp$ri_pdsa
ri_old <- ri
if(ncol(x) == 1) {
ri <- rbind(ri, ri_pdsa[1,])
} else {
ri <- rbind(ri, ri_pdsa)
}
row.names(ri) <- c(row.names(ri_old), "ri_PaD")
} else {
message3 <-
"'net' not of class \"ann\" : "
message4 <- "PD sensitivity analysis not performed."
warning(message3, message4, call. = FALSE, immediate. = FALSE)
}
} else {
message1 <- "Input data (x) missing : "
message2 <- "No sensitivity analyses performed."
warning(message1, message2, call. = FALSE, immediate. = FALSE)
}
results$ri <- ri
} else if (!is.null(wts) && !is.null(nodes)) {
ninputs <- nodes[1]
ri_gars <- garson_fn(wts = wts, nodes = nodes)
ri_cw <- cw_fn(wts = wts, nodes = nodes)
ri <- as.data.frame(matrix(c(unlist(ri_gars), unlist(ri_cw)),
ncol = ninputs, byrow = TRUE))
if(!is.null(x)) {
colnames(ri) <- colnames(x)
} else {
colnames(ri) <- paste("inp_", 1:ninputs, sep = "")
}
row.names(ri) <- c(names(ri_gars), names(ri_cw))
results$ri <- ri
}
return(results)
}
# ------------------------------------------------------------------------------
stats <- function(x) {
stats_mean <- mean(x)
stats_min <- min(x)
stats_max <- max(x)
stats_var <- var(x)
stats_sd <- sd(x)
stats_skew <- moments::skewness(x)
stats_kurt <- moments::kurtosis(x)
return(data.frame(mean = stats_mean, min = stats_min, max = stats_max,
var = stats_var, sd = stats_sd, skewness = stats_skew,
kurtosis = stats_kurt))
}
#-------------------------------------------------------------------------------
profile_sa <- function(net, x) {
if (!(inherits(net, "nnet") | inherits(net, "ann"))) {
stop("'net' must be of class \"nnet\" or \"ann\" to perform local
sensitivity analysis.")
}
quarts_x <- apply(x, 2, quantile, probs = seq(0, 1, 0.25))
y_hat <- vector()
for (k in 1:ncol(x)) {
y_hat_0 <- vector()
for(q in 1:5) {
x_fix <- matrix(rep(quarts_x[q,], 101), ncol = ncol(x), byrow = TRUE)
x_tmp <- x_fix
x_tmp[, k] <- as.vector(quantile(x[, k], prob = seq(0, 1, by = 0.01)))
if (inherits(net, "nnet")) {
y_hat_0 <- cbind(y_hat_0, predict(net, newdata = x_tmp,
type = "raw"))
} else if (inherits(net, "ann")) {
y_hat_0 <- cbind(y_hat_0, predict(net, newdata = x_tmp))
}
}
y_hat_1 <- apply(y_hat_0, 1, median)
y_hat <- cbind(y_hat, y_hat_0, y_hat_1)
}
colnames(y_hat) <- paste(rep(colnames(x), each = 6),
c(seq(0, 1, 0.25)*100, "med"), sep = "_")
y_hat_meds <- y_hat[,seq(6, 6*ncol(x), 6)]
y_hat_rng <- apply(y_hat_meds, 2, range)
ri <- y_hat_rng[2,] - y_hat_rng[1,]
ri <- ri / sum(abs(ri)) * 100
return(list(y_hat = y_hat, ri_sa = ri))
}
#-------------------------------------------------------------------------------
PaD_sa <- function(net, x) {
if (inherits(net, "nnet")) {
stop("'net' must be of class \"ann\" to perform PD sensitivity analysis.")
}
if (!inherits(net, "ann")) stop("'net' not of class \"ann\"")
if (is.vector(x)) {
x <- matrix(x, ncol = 1)
}
npatterns <- dim(x)[1]
ninputs <- dim(x)[2]
O <- net$fitted.values
if(ncol(net$derivs) == 1) {
nhn <- 0
out_derivs <- matrix(net$derivs[, 1], ncol = 1)
hid_derivs <- NULL
} else {
out_derivs <- matrix(net$derivs[, ncol(net$derivs)], ncol = 1)
hid_derivs <- matrix(net$derivs[, -ncol(net$derivs)],
ncol = ncol(net$derivs) - 1)
nhn <- ncol(hid_derivs)
}
relsens <- abssens <- vector()
if(nhn > 0) {
for (i in 1:ninputs) {
sum1 <- rep(0, npatterns)
for (j in 1:net$nodes[2]) {
sum1 <- sum1 +
net$wts[(i - 1) * net$nodes[2] + j] * hid_derivs[, j] *
net$wts[net$nodes[1] * net$nodes[2] + net$nodes[2] + j] *
out_derivs[, 1]
}
relsens <- cbind(relsens, x[, i] / O * sum1)
abssens <- cbind(abssens, sum1)
}
} else {
for (i in 1:ninputs) {
sum1 <- net$wts[i] * out_derivs[, 1]
relsens <- cbind(relsens, x[, i] / O * sum1)
abssens <- cbind(abssens, sum1)
}
}
ssd <- matrix(colSums(abssens^2), nrow = 1)
rmsd <- sqrt(ssd/dim(abssens)[1])
rmsd <- rmsd / sum(rmsd) * 100
colnames(relsens) <- colnames(abssens) <- colnames(rmsd) <- names(x)
return(list(rs = relsens, as = abssens, ri_pdsa = rmsd))
}
#-------------------------------------------------------------------------------
cw_fn <- function(net, wts = NULL, nodes = NULL) {
# -------
# function for calculating overall connection weight (OCW) and
# relative importance (RI) of inputs
# -------
if (missing(net)) {
net <- NULL
}
if (!is.null(net)) {
if (inherits(net, "nnet")) {
nodes <- net$n
nhn <- nodes[2]
ninp <- nodes[1]
wts <- net$wts
act_fn <- "sigmoid"
indices <- matrix(seq(1, nodes[1] * nodes[2] + nodes[2]),
ncol = nodes[2])
out_ls <- list()
for (i in 1:ncol(indices)) {
out_ls[[paste("hidden", i)]] <- wts[indices[, i]]
}
out_ls[["out 1"]] <- wts[(max(indices) + 1):length(wts)]
ocw <- ocw_mod <- vector()
for (i in 1:nhn) {
ocw <- rbind(ocw, out_ls[[i]][2:(ninp + 1)] * out_ls[[nhn + 1]][i + 1])
}
ri <- colSums(ocw)
ri_denom <- sum(abs(ri))
ri <- ri / ri_denom
ri <- ri * 100
return(list(ri_CW = ri))
} else if (inherits(net, "ann")) {
nodes <- net$nodes
nhn <- ifelse(length(nodes) == 3, nodes[2], 0)
ninp <- nodes[1]
wts <- net$wts
act_fn <- net$act_fn[2]
out_ls <- list()
if(nhn > 0) {
indices <- matrix(seq(1, nodes[1] * nodes[2]), ncol = nodes[2],
byrow = TRUE)
for (i in 1:ncol(indices)) {
out_ls[[paste("hidden", i)]] <- wts[indices[, i]]
}
out_ls[["out 1"]] <- wts[(max(indices) + nodes[2] + 1):length(wts)]
ocw <- ocw_mod <- vector()
for (i in 1:nhn) {
ocw_mod <- rbind(ocw_mod, actfn(out_ls[[i]][1:ninp], method = act_fn) *
out_ls[[nhn + 1]][i])
ocw <- rbind(ocw, out_ls[[i]][1:ninp] * out_ls[[nhn + 1]][i])
}
ri <- colSums(ocw)
ri_mod <- colSums(ocw_mod)
} else {
out_ls[["out 1"]] <- wts
ocw_mod <- actfn(out_ls[[1]][1:ninp], method = act_fn)
ocw <- out_ls[[1]][1:ninp]
ri <- ocw
ri_mod <- ocw_mod
}
ri_denom <- sum(abs(ri))
ri <- ri / ri_denom
ri <- ri * 100
ri_denom <- sum(abs(ri_mod))
ri_mod <- ri_mod / ri_denom
ri_mod <- ri_mod * 100
if(act_fn == "tanh") {
return(list(ri_CW = ri, ri_MCW = ri_mod))
} else {
return(list(ri_CW = ri))
}
} else {
stop("'net' must be of class \"nnet\" or \"ann\"")
}
} else if (!is.null(wts) && !is.null(nodes)) {
nhn <- nodes[2]
ninp <- nodes[1]
out_ls <- list()
if(nhn > 0) {
indices <- matrix(seq(1, nodes[1] * nodes[2]), ncol = nodes[2],
byrow = TRUE)
for (i in 1:ncol(indices)) {
out_ls[[paste("hidden", i)]] <- wts[indices[, i]]
}
out_ls[["out 1"]] <- wts[(max(indices) + nodes[2] + 1):length(wts)]
ocw <- ocw_mod <- vector()
for (i in 1:nhn) {
ocw <- rbind(ocw, out_ls[[i]][1:ninp] * out_ls[[nhn + 1]][i])
}
ri <- colSums(ocw)
} else {
out_ls[["out 1"]] <- wts
ocw <- out_ls[[1]][1:ninp]
ri <- ocw
}
ri_denom <- sum(abs(ri))
ri <- ri / ri_denom
ri <- ri * 100
return(list(ri_CW = ri))
} else {
stop("either 'net' or 'wts' and 'nodes' must be supplied")
}
}
# ------------------------------------------------------------------------------
garson_fn <- function(net, wts = NULL, nodes = NULL) {
# -------
# Function to calculate Garson's measure of relative importance
# -------
if (missing(net)) {
net <- NULL
}
if (!is.null(net)) {
if (inherits(net, "nnet")) {
nodes <- net$n
nhn <- ifelse(length(nodes) == 3, nodes[2], 0)
ninp <- nodes[1]
wts <- net$wts
out_ls <- list()
if(nhn > 0) {
indices <- matrix(seq(1, nodes[1] * nodes[2] + nodes[2]),
ncol = nodes[2])
for (i in 1:ncol(indices)) {
out_ls[[paste("hidden", i)]] <- wts[indices[, i]]
}
out_ls[["out 1"]] <- wts[(max(indices) + 1):length(wts)]
ri <- vector()
for (i in 1:nhn) {
sum_wi <- sum(abs(out_ls[[i]][2:(ninp + 1)]))
sum_wo <- sum(abs(out_ls[[nhn + 1]][2:(nhn + 1)]))
ri <- rbind(ri, abs(out_ls[[i]][2:(ninp + 1)]) / sum_wi *
abs(out_ls[[nhn + 1]][i + 1]) / sum_wo)
}
ri <- colSums(ri) * 100
} else {
out_ls[["out 1"]] <- wts
sum_wi <- sum(abs(out_ls[[1]][1:ninp]))
ri <- abs(out_ls[[1]][1:ninp]) / sum_wi * 100
}
} else if (inherits(net, "ann")) {
nodes <- net$nodes
nhn <- ifelse(length(nodes) == 3, nodes[2], 0)
ninp <- nodes[1]
wts <- net$wts
out_ls <- list()
if(nhn > 0) {
indices <- matrix(seq(1, nodes[1] * nodes[2]), ncol = nodes[2],
byrow = TRUE)
for (i in 1:ncol(indices)) {
out_ls[[paste("hidden", i)]] <- wts[indices[, i]]
}
out_ls[["out 1"]] <- wts[(max(indices) + nodes[2] + 1):length(wts)]
ri <- vector()
for (i in 1:nhn) {
sum_wi <- sum(abs(out_ls[[i]][1:ninp]))
sum_wo <- sum(abs(out_ls[[nhn + 1]][1:nhn]))
ri <- rbind(ri, abs(out_ls[[i]][1:ninp]) / sum_wi *
abs(out_ls[[nhn + 1]][i]) / sum_wo)
}
ri <- colSums(ri) * 100
} else {
out_ls[["out 1"]] <- wts
sum_wi <- sum(abs(out_ls[[1]][1:ninp]))
ri <- abs(out_ls[[1]][1:ninp]) / sum_wi * 100
}
} else {
stop("'net' must be of class \"nnet\" or \"ann\"")
}
} else if (!is.null(wts) && !is.null(nodes)) {
nhn <- nodes[2]
ninp <- nodes[1]
out_ls <- list()
if(nhn > 0) {
indices <- matrix(seq(1, nodes[1] * nodes[2]), ncol = nodes[2],
byrow = TRUE)
for (i in 1:ncol(indices)) {
out_ls[[paste("hidden", i)]] <- wts[indices[, i]]
}
out_ls[["out 1"]] <- wts[(max(indices) + nodes[2] + 1):length(wts)]
ri <- vector()
for (i in 1:nhn) {
sum_wi <- sum(abs(out_ls[[i]][1:ninp]))
sum_wo <- sum(abs(out_ls[[nhn + 1]][1:nhn]))
ri <- rbind(ri, abs(out_ls[[i]][1:ninp]) / sum_wi *
abs(out_ls[[nhn + 1]][i]) / sum_wo)
}
ri <- colSums(ri) * 100
} else {
out_ls[["out 1"]] <- wts
sum_wi <- sum(abs(out_ls[[1]][1:ninp]))
ri <- abs(out_ls[[1]][1:ninp]) / sum_wi * 100
}
} else {
stop("either 'net' or 'wts' and 'nodes' must be supplied")
}
return(list(ri_Garson = ri))
}
# ------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/validann/R/validann.R
|
#' n_dupes
#'
#' @param x a df
#'
#' @return an integer; number of dupe rows
#' @keywords internal
#'
n_dupes <- function(x){(x %>% nrow) - (dplyr::distinct(x) %>% nrow) -> dupes ;dupes}
#' Confirm Distinct
#'
#' Confirm whether the rows of a data frame can be uniquely identified by the keys in the selected columns.
#' Also reports whether the dataframe has duplicates. If so, it is best to remove duplicates and re-run the function.
#'
#' @param .data A dataframe
#' @param ... (ID) columns
#'
#' @return a Logical value invisibly with description printed to console
#' @export
#'
#' @examples iris %>% confirm_distinct(Species, Sepal.Width)
confirm_distinct <- function(.data, ...) {
.data %>%
dplyr::ungroup() %>%
select_otherwise(..., return_type = "df") -> .data1
.data1 %>% names() %>% rlang::syms(.) -> cols
n_dupes(.data) -> d_rows
if(d_rows > 0) {
print(stringr::str_glue("database has {d_rows} duplicate rows"))
.data <- dplyr::distinct(.data)}
.data1 %>% dplyr::distinct(.) -> new_df
nrow(new_df) -> new_rows
names(new_df) %>% stringr::str_c( collapse = ", ") -> col_names
diff <- nrow(.data) - new_rows
if(diff == 0){
print(stringr::str_glue("database is distinct at {col_names}"))
invisible(TRUE)
}
else {
print(stringr::str_glue("database has {diff} duplicates at {col_names}"))
invisible(FALSE)
}
}
|
/scratch/gouwar.j/cran-all/cranData/validata/R/confirm_distinct.R
|
#' Confirm structural mapping between 2 columns
#'
#' The mapping between elements of 2 columns can have 4 different relationships: one - one, one - many, many - one, many - many.
#' This function returns a view of the mappings by row, and prints a summary to the console.
#'
#' @param .data a data frame
#' @param col1 column 1
#' @param col2 column 2
#' @param view View results?
#'
#' @return A view of mappings. Also returns the view as a data frame invisibly.
#' @export
#'
#' @examples iris %>% confirm_mapping(Species, Sepal.Width, view = FALSE)
confirm_mapping <- function(.data, col1, col2, view = T){
dupe_count <- one_to_many <- many_to_one <- NULL
.data <- dplyr::ungroup(.data)
.data %>%
dplyr::distinct({{ col1 }}, {{ col2 }}) %>%
janitor::get_dupes({{col1}}) %>%
dplyr::mutate(one_to_many = 1) %>%
dplyr::arrange({{col1}}, {{col2}}) -> .data1
.data %>%
dplyr::distinct({{ col1 }}, {{ col2 }}) %>%
janitor::get_dupes({{col2}}) %>%
dplyr::mutate(many_to_one = 1) %>%
dplyr::arrange({{col2}}, {{col1}})-> .data2
.data1 %>% nrow %>% `>`(0)-> one2m
.data2 %>% nrow %>% `>`(0) ->m2one
mapping_bools <- c(! (one2m | m2one), one2m & !m2one, m2one & !one2m, one2m & m2one)
switch(which(mapping_bools),
"1 - 1 mapping",
"1 - many mapping",
"many - 1 mapping",
"many - many mapping") -> mapping_desc
print(stringr::str_glue("{mapping_desc} between {rlang::as_name(rlang::ensym(col1))} and {rlang::as_name(rlang::ensym(col2))}"))
.data3 <-
dplyr::bind_rows(.data1, .data2) %>%
dplyr::select({{col1}}, {{col2}}, dupe_count, tidyselect::everything()) %>%
dplyr::mutate(dplyr::across(c(one_to_many, many_to_one), as.logical))
if(nrow(.data3) > 0 & view == T) {utils::View(.data3)}
invisible(.data3)
}
|
/scratch/gouwar.j/cran-all/cranData/validata/R/confirm_mapping.R
|
#' Confirm Overlap
#'
#' Prints a venn-diagram style summary of the unique value
#' overlap between two columns and also invisibly returns a dataframe that can be assigned to a variable
#' and queried with the overlap helpers. The helpers can return values that appeared only the first col, second col,
#' or both cols.
#'
#' @param vec1 vector 1
#' @param vec2 vector 2
#' @param return_tibble logical. If TRUE, returns a tibble. otherwise by default returns the database invisibly to be queried by helper functions.
#'
#' @return tibble. overlap summary or overlap table
#' @export
#'
#' @examples
#'
#' confirm_overlap(iris$Sepal.Width, iris$Sepal.Length) -> iris_overlap
#'
#' iris_overlap
#'
#' iris_overlap %>%
#' co_find_only_in_1()
#'
#' iris_overlap %>%
#' co_find_only_in_2()
#'
#' iris_overlap %>%
#' co_find_in_both()
confirm_overlap <- function(vec1, vec2, return_tibble = F){
x <- flag2 <- flag1 <- both_flags <- shared_names <- total_names <- NULL
rlang::enexpr(vec1) %>% deparse %>% stringr::str_replace(stringr::fixed("$"), "_") -> str_col1
rlang::enexpr(vec2) %>% deparse %>% stringr::str_replace(stringr::fixed("$"), "_") -> str_col2
stringr::str_glue("only_in_{str_col1}") -> nm_col1
stringr::str_glue("only_in_{str_col2}") -> nm_col2
stopifnot(typeof(vec1) == typeof(vec2) )
list(str_col1, nm_col1)
db1 <- tibble::tibble(x = vec1)
db2 <- tibble::tibble(x = vec2)
db1 %>% dplyr::distinct(x) %>% dplyr::filter(!is.na(x)) %>% dplyr::mutate(flag1 = 1) -> db1
db2 %>% dplyr::distinct(x) %>% dplyr::filter(!is.na(x)) %>% dplyr::mutate(flag2 = 1) -> db2
suppressMessages({dplyr::full_join(db1, db2) -> jdb})
jdb %>%
tidyr::replace_na(list(
flag1 = 0,
flag2 = 0)) %>%
dplyr::mutate(both_flags = flag1 + flag2) -> jdb
jdb %>%
dplyr::summarize(
!!nm_col1 := sum(flag1 == 1 & flag2 == 0),
!!nm_col2 := sum(flag1 == 0 & flag2 == 1),
shared_names = sum(both_flags == 2),
total_names = jdb %>% nrow,
pct_shared = scales::percent(shared_names/ total_names)) -> jdb_sum
jdb %>%
dplyr::rename("{str_col1}" := flag1,
"{str_col2}" := flag2) -> jdb
if(return_tibble){
jdb_sum
} else{
print(jdb_sum)
invisible(jdb)
}
}
#' Confirm Overlap internal
#'
#' A venn style summary of the overlap in unique values of 2 vectors
#'
#' @param vec1 vector 1
#' @param vec2 vector 2
#'
#' @return 1 row tibble
#' @keywords internal
#'
#' @examples confirm_overlap(iris$Sepal.Width, iris$Sepal.Length)
confirm_overlap_internal <- function(vec1, vec2){
x <- flag2 <- flag1 <- both_flags <- NULL
rlang::enexpr(vec1) %>% deparse %>% stringr::str_replace(stringr::fixed("$"), "_") -> str_col1
rlang::enexpr(vec2) %>% deparse %>% stringr::str_replace(stringr::fixed("$"), "_") -> str_col2
stringr::str_glue("only_in_{str_col1}") -> nm_col1
stringr::str_glue("only_in_{str_col2}") -> nm_col2
# stopifnot(typeof(vec1) == typeof(vec2) )
list(str_col1, nm_col1)
db1 <- tibble::tibble(x = vec1)
db2 <- tibble::tibble(x = vec2)
db1 %>% dplyr::distinct(x) %>% dplyr::filter(!is.na(x)) %>% dplyr::mutate(flag1 = 1) -> db1
db2 %>% dplyr::distinct(x) %>% dplyr::filter(!is.na(x)) %>% dplyr::mutate(flag2 = 1) -> db2
suppressMessages({dplyr::full_join(db1, db2) -> jdb})
jdb %>%
tidyr::replace_na(list(
flag1 = 0,
flag2 = 0)) %>%
dplyr::mutate(both_flags = flag1 + flag2) -> jdb
jdb %>%
dplyr::summarize(
!!nm_col1 := sum(flag1 == 1 & flag2 == 0),
!!nm_col2 := sum(flag1 == 0 & flag2 == 1),
shared_names = sum(both_flags == 2),
total_names = jdb %>% nrow) -> jdb_sum
shared_pct <- (jdb_sum$shared_names / jdb_sum$total_names * 100) %>% round
jdb_sum %>%
dplyr::mutate(shared_pct_names = stringr::str_c(shared_pct, "%")) -> jdb_sum1
jdb_sum1
}
|
/scratch/gouwar.j/cran-all/cranData/validata/R/confirm_overlap.R
|
#' confirm overlap - find entries only in df 1
#'
#'
#' @rdname confirm_overlap
#' @param co_output dataframe output from confirm_overlap
#'
#' @export
co_find_only_in_1 <- function(co_output){
names(co_output) -> nms
co_output %>%
dplyr::filter(.[[nms[3]]] == 0 & .[[nms[2]]] == 1) %>%
dplyr::select("{nms[2]}" := 1)
}
#' confirm overlap - find entries only in df 2
#'
#'
#' @rdname confirm_overlap
#' @param co_output dataframe output from confirm_overlap
#'
#' @export
co_find_only_in_2 <- function(co_output){
names(co_output) -> nms
co_output %>%
dplyr::filter(.[[nms[3]]] == 1 & .[[nms[2]]] == 0) %>%
dplyr::select("{nms[3]}" := 1)
}
#' confirm overlap - find entries in both dfs
#'
#'
#' @rdname confirm_overlap
#' @param co_output dataframe output from confirm_overlap
#'
#' @export
co_find_in_both <- function(co_output){
both_flags <- NULL
co_output %>%
dplyr::filter(both_flags == 2) %>%
dplyr::select(1)
}
|
/scratch/gouwar.j/cran-all/cranData/validata/R/confirm_overlap_helpers.R
|
#' confirm string length
#'
#' returns a count table of string lengths for a character column. The helper function `choose_strlen`
#' filters dataframe for rows containing specific string length for the specified column.
#'
#' @param mdb dataframe
#' @param col unquoted column
#'
#' @return prints a summary and returns a dataframe invisibly
#' @export
#'
#' @examples
#'
#' iris %>%
#' tibble::as_tibble() %>%
#' confirm_strlen(Species) -> iris_cs_output
#'
#' iris_cs_output
#'
#' iris_cs_output %>%
#' choose_strlen(6)
confirm_strlen <- function(mdb, col){
mdb %>% dplyr::ungroup() -> mdb
nm <- rlang::as_string(rlang::ensym(col))
col_nm <- rlang::as_string(stringr::str_glue("{nm}_chr_len"))
mdb %>%
dplyr::mutate(!!col_nm := stringr::str_length({{col}})) -> tmp_db
tmp_db %>% janitor::tabyl(!!col_nm) %>% janitor::adorn_pct_formatting() -> rt
print(rt)
invisible(tmp_db)
}
#' choose string length
#'
#'
#' @rdname confirm_strlen
#' @param cs_output dataframe. output from `confirm_strlen`
#' @param len integer vector.
#'
#' @return dataframe with original columns, filtered to the specific string length
#' @export
#'
choose_strlen <- function(cs_output, len) {
cs_output %>% names() %>% stringr::str_subset("_chr_len") -> my_col
cs_output %>%
dplyr::filter(.[[my_col]] %in% len)
}
|
/scratch/gouwar.j/cran-all/cranData/validata/R/confirm_strlength.R
|
#' Sample Data
#'
#' @keywords internal
#'
#' sample data with 125 rows
#'
#' @format 6 columns. 3 id and 3 values
#' \describe{
#' \item{ID_COL1}{4-5 distinct codes}
#' }
"sample_data1"
|
/scratch/gouwar.j/cran-all/cranData/validata/R/data.R
|
#' @keywords internal
"_PACKAGE"
# The following block is used by usethis to automatically manage
# roxygen namespace tags. Modify with care!
## usethis namespace: start
## usethis namespace: end
NULL
utils::globalVariables(names = c("!!", " %>% ", ":=", "."))
|
/scratch/gouwar.j/cran-all/cranData/validata/R/dataValidation-package.R
|
#' Names List
#'
#' @param df a df
#' @param len how many elements in combination
#'
#' @return a list of name combinations
#' @keywords internal
#'
names_list <- function(df, len){
df %>%
names %>%
gtools::combinations(n = length(.), r = len, v = . ) %>%
as.data.frame() %>%
as.list()
}
#' Make Distinct
#'
#' @param df a df
#' @param ... cols
#'
#' @return a list of name lists
#' @keywords internal
#'
make_distincts <- function(df, ...){
df %>%
dplyr::select(...) -> id_cols
nms_list <- list()
for(i in seq_along(id_cols)) {
nms_list %>% append(list(names_list(id_cols, i))) -> nms_list
}
nms_list
}
#' Automatically determine primary key
#'
#' Uses \code{confirm_distinct} in an iterative fashion to determine the primary keys.
#'
#' The goal of this function is to automatically determine which columns uniquely identify the rows of a dataframe.
#' The output is a printed description of the combination of columns that form unique identifiers at each level.
#' At level 1, the function tests if individual columns are primary keys
#' At level 2, the function tests n C 2 combinations of columns to see if they form primary keys.
#' The final level is testing all columns at once.
#'
#' * For completely unique columns, they are recorded in level 1, but then dropped from the data frame to facilitate
#' the determination of multi-column primary keys.
#' * If the dataset contains duplicated rows, they are eliminated before proceeding.
#'
#' @param df a data frame
#' @param ... columns or a tidyselect specification. defaults to everything
#' @param listviewer logical. defaults to TRUE to view output using the listviewer package
#'
#' @return list
#' @export
#'
#' @examples
#'
#' sample_data1 %>%
#' head
#'
#'
#'## on level 1, each column is tested as a unique identifier. the VAL columns have no
#'## duplicates and hence qualify, even though they normally would be considered as IDs
#'## on level 3, combinations of 3 columns are tested. implying that ID_COL 1,2,3 form a unique key
#'## level 2 does not appear, implying that combinations of any 2 ID_COLs do not form a unique key
#'
#' sample_data1 %>%
#' determine_distinct(listviewer = FALSE)
determine_distinct <- function(df, ..., listviewer = TRUE){
n_dupes(df) -> d_rows
if(d_rows > 0) {
print(stringr::str_glue("database has {d_rows} duplicate rows, and will eliminate them"))
df <- dplyr::distinct(df)}
get_unique_col_names(df) -> unique_names
df %>% framecleaner::select_otherwise(..., otherwise = tidyselect::everything(), return_type = "names") %>% setdiff(unique_names) -> db_names
df %>% dplyr::select(-tidyselect::any_of(unique_names)) -> df
make_distincts(df, tidyselect::any_of(db_names)) -> dst_list
distinct_combos <- list()
new_list <- list()
for(j in seq_along(dst_list)){
stringr::str_c("LEVEL ", j) -> col_nm
dst_list %>%
purrr::pluck(j) -> the_lev
filter_list(smaller_list = distinct_combos,
bigger_list = data.table::transpose(the_lev)) %>%
data.table::transpose() -> the_lev
utils::capture.output(
the_lev %>%
purrr::pmap_lgl(., ~confirm_distinct(df, ...)) -> dst_nms)
the_lev %>%
as.data.frame() %>%
dplyr::filter(dst_nms) -> d1
d1 %>%
rows_to_list() -> l1
l1 %>%
append(distinct_combos) -> distinct_combos
if(nrow(d1) != 0){
d1 %>%
tidyr::unite(col = !!col_nm, sep = ", ") %>%
append(new_list) -> new_list}
}
new_list[["LEVEL 1"]] <- as.list(unique_names)
new_list %>%
purrr::map(~if(rlang::is_empty(.)) {. <- 'no primary keys'} else{.}) -> output
if(listviewer){
output %>%
listviewer::jsonedit(.)
} else{
output
}
}
pivot_summary <- function(sumr, ...){
column <- rowname <- NULL
if (!missing(..1)) {
sumr %>%
tidyr::unite(col = "column", ..., remove = T) %>% dplyr::relocate(column) -> sumr1
sumr1 %>%
dplyr::select(-1) %>% as.matrix() %>% mode -> output_mode
}
else{
sumr -> sumr1
}
sumr1 %>%
t %>%
as.data.frame() %>%
tibble::rownames_to_column() %>%
tibble::as_tibble() %>%
dplyr::rename(column = rowname) %>%
dplyr::arrange(column) -> sumr2
if (!missing(..1)) {
sumr2 %>%
janitor::row_to_names(row_number = 1) %>%
dplyr::mutate(dplyr::across(-1, ~as(., output_mode)))-> sumr3
}
else{
sumr2 -> sumr3
}
sumr3
}
get_unique_col_names <- function(df){
nrow(df) -> rws
V1 <- column <- NULL
df %>%
dplyr::summarize(dplyr::across(.fns = ~dplyr::n_distinct(.) == rws)) %>%
pivot_summary() %>%
dplyr::filter(V1) %>%
dplyr::pull(column)
}
rows_to_list <- function(df){
df %>% t() %>% as.data.frame() %>% lapply(unlist)}
is_subset_list <- function(chr, chr_list){
any(purrr::map_lgl(chr_list, ~all(is.element(el = ., set = chr))))
}
filter_list <- function(smaller_list, bigger_list){
bigger_list %>% purrr::map_lgl(~is_subset_list(chr = ., chr_list = smaller_list)) -> logical_vec
purrr::discard(bigger_list, logical_vec)
}
|
/scratch/gouwar.j/cran-all/cranData/validata/R/determine_distinct.R
|
#' Determine pairwise structural mappings
#'
#' @param df a data frame
#' @param ... columns or a tidyselect specification
#' @param listviewer logical. defaults to TRUE to view output using the listviewer package
#'
#' @return description of mappings
#' @export
#'
#' @examples
#'
#' iris %>%
#' determine_mapping(listviewer = FALSE)
determine_mapping <- function(df, ..., listviewer = TRUE){
df %>%
framecleaner::select_otherwise(..., otherwise = tidyselect::everything(), return_type = "df") %>%
dplyr::ungroup(.) -> df1
if(ncol(df1) < 2){
rlang::abort("insufficient non-unique columns")
}
cnf_output <- list()
for(i in 2:ncol(df1)){
for(j in 1:(i-1)){
nm1 <- rlang::sym(names(df1)[i])
nm2 <- rlang::sym(names(df1)[j])
cnf_output <- append(cnf_output, utils::capture.output(suppressMessages(confirm_mapping(df1, !!nm1, !!nm2, view = F))) )
}
}
cnf_output %>% unlist -> cnf_output1
list_output <- list()
list_output[["1 - 1 mapping"]] <- stringr::str_subset(cnf_output1, "1 - 1 mapping") %>% stringr::str_remove(., "1 - 1 mapping between ")
list_output[["1 - many mapping"]] <- stringr::str_subset(cnf_output1, "1 - many mapping") %>% stringr::str_remove(., "1 - many mapping between ")
list_output[["many - 1 mapping"]] <- stringr::str_subset(cnf_output1, "many - 1 mapping") %>% stringr::str_remove(., "many - 1 mapping between ")
list_output[["many - many mapping"]] <- stringr::str_subset(cnf_output1, "many - many mapping") %>% stringr::str_remove(., "many - many mapping between ")
if(listviewer){
listviewer::jsonedit(list_output)} else {
list_output
}
}
|
/scratch/gouwar.j/cran-all/cranData/validata/R/determine_mapping.R
|
#' Determine Overlap
#'
#' Uses \code{confirm_overlap} in a pairise fashion to see venn style comparison of unique values between
#' the columns chosen by a tidyselect specification.
#'
#' @param db a data frame
#' @param ... tidyselect specification. Default being everything.
#'
#' @return tibble
#' @export
#'
#' @examples
#'
#' iris %>%
#' determine_overlap()
#'
determine_overlap <- function(db, ...) {
db %>%
framecleaner::select_otherwise(..., otherwise = tidyselect::everything(), return_type = "df") -> db1
names_list(db1, 2) -> db_names_list
for (i in 1:length(db_names_list$V1)) {
testit::has_error(confirm_overlap_internal(db[[db_names_list$V1[i]]], db[[db_names_list$V2[i]]]),
silent = T) -> fails_type_check
if(fails_type_check) {next}
else{
tibble::tibble(col1 = db_names_list$V1[i], col2 = db_names_list$V2[i]) %>%
dplyr::bind_cols(confirm_overlap_internal(db[[db_names_list$V1[i]]], db[[db_names_list$V2[i]]]) %>%
rlang::set_names(
c(
"names_only_col_1",
"names_only_col_2",
"shared_names",
"total_names",
"pct_shared_names"
)
)) -> tib_row
if (!exists("tib1")) {
tib_row -> tib1
} else{
tib1 %>%
dplyr::bind_rows(tib_row) -> tib1
}
}
}
tib1
}
|
/scratch/gouwar.j/cran-all/cranData/validata/R/determine_overlap.R
|
#' diagnose
#'
#' this function is inspired by the excellent `dlookr` package. It takes a dataframe and returns
#' a summary of unique and missing values of the columns.
#'
#' @param df dataframe
#' @param ... tidyselect
#' @importFrom framecleaner select_otherwise
#'
#' @return dataframe summary
#' @export
#'
#' @examples iris %>% diagnose()
diagnose <- function(df, ...) {
df <- select_otherwise(df, ..., otherwise = tidyselect::everything(), return_type = "df")
vars <- names(df)
variable_type <- purrr::map_chr(df, ~class(.)[1])
missing_count <- purrr::map_int(df, count_missing)
unique_count <- purrr::map_int(df, dplyr::n_distinct)
data_count <- nrow(df)
if(data_count == 0){
return(print("data frame is empty") )
}
tibble::tibble(variables = vars, types = variable_type,
missing_count = missing_count,
missing_percent = missing_count / data_count * 100,
unique_count = unique_count,
unique_rate = unique_count / data_count)
}
count_missing <- function(x){
sum(is.na(x))
}
#' diagnose_missing
#'
#' faster than diagnose if emphasis is on diagnosing missing values. Also, only shows the columns with
#' any missing values.
#'
#' @param df dataframe
#' @param ... optional tidyselect
#'
#' @return tibble summary
#' @export
#'
#' @examples
#'
#' iris %>%
#' framecleaner::make_na(Species, vec = "setosa") %>%
#' diagnose_missing()
diagnose_missing <- function(df, ...){
missings <- NULL
nrow(df) -> total_rows
df <- select_otherwise(df,
...,
otherwise = tidyselect::everything(),
return_type = "df")
missing_count <- purrr::map_df(df, count_missing)
missing_count %>%
t() %>%
as.data.frame() %>%
tibble::rownames_to_column() %>%
tibble::as_tibble() %>%
rlang::set_names(c("column", "missings")) %>%
dplyr::arrange(dplyr::desc(missings )) %>%
dplyr::filter(missings > 0) %>%
dplyr::mutate(missing_ratio = missings / total_rows) -> missing_count
missing_count %>%
unlist() %>%
rlang::is_empty() -> misscond
if(misscond){
print("no missings")} else{
missing_count
}
}
#' view_missing
#'
#' View rows of the dataframe where columns in the tidyselect specification contain missings
#' by default, detects missings in any column. The result is by default displayed in the viewer pane.
#' Can be returned as a tibble optionally.
#'
#' @param df dataframe
#' @param ... tidyselect
#' @param view logical. if false, returns tibble
#'
#' @return tibble
#' @export
#'
#' @examples
#'
#' iris %>%
#' framecleaner::make_na(Species, vec = "setosa") %>%
#' view_missing(view = FALSE)
view_missing <- function(df, ..., view = TRUE){
df %>% select_otherwise(..., otherwise = tidyselect::everything()) -> col_indx
df %>%
dplyr::filter(dplyr::if_any(tidyselect::any_of(col_indx), .fns = is.na)) -> missings
if(view){
utils::View(missings)
} else{
missings
}
}
#' diagnose category
#'
#' counts the distinct entries of categorical variables. The `max_distinct` argument limits the scope to
#' categorical variables with a maximum number of unique entries, to prevent overflow.
#'
#' @param .data dataframe
#' @param ... tidyselect
#' @param max_distinct integer
#'
#' @return dataframe
#' @export
#'
#' @examples
#'
#' iris %>%
#' diagnose_category()
diagnose_category <- function(.data, ..., max_distinct = 5){
n <- NULL
nrow(.data) -> total_rows
.data %>%
purrr::map_int(dplyr::n_distinct) %>%
subset(. < max_distinct) %>%
names() -> nms
.data %>%
select_otherwise(..., otherwise = where(is.character) | where(is.factor), return_type = "names") -> nms1
intersect(nms, nms1) -> nms2
purrr::map(nms2,
function(x) {.data %>%
dplyr::count(!!rlang::sym(x)) %>%
dplyr::mutate(column = names(.)[1], .before = 1) %>%
dplyr::rename(level = 2) %>%
dplyr::arrange(dplyr::desc(n))}) %>%
rlist::list.rbind() %>%
tibble::as_tibble() %>%
dplyr::mutate(ratio = n / total_rows)
}
#' data_mode
#'
#' @param x vector
#' @param prop show frequency as ratio? default T
#'
#' @return named double of length 1
#' @keywords internal
#'
data_mode <- function(x, prop = TRUE){
x %>%
table() -> xt
xt[which.max(xt)] -> xt_mode
if(prop){
xt_mode / length(x) -> xt_mode
}
xt_mode
}
#' diagnose_numeric
#'
#' Inputs a dataframe and returns various summary statistics of the numeric columns. For example `zeros` returns the number
#' of 0 values in that column. `minus` counts negative values and `infs` counts Inf values. Other rarer metrics
#' are also returned that may be helpful for quick diagnosis or understanding of numeric data. `mode` returns the most common
#' value in the column (chooses at random in case of tie) , and `mode_ratio` returns its frequency as a ratio of the total rows
#'
#' @param .data dataframe
#' @param ... tidyselect
#'
#' @return dataframe
#' @export
#'
#'
#' @examples
#'
#' library(framecleaner)
#'
#' iris %>%
#' diagnose_numeric
diagnose_numeric <- function(.data, ...){
.data %>%
select_otherwise(..., where(is.numeric), return_type = "df") -> df
fns <- list(zeros = ~sum(. == 0, na.rm = T),
minus = ~sum(. < 0, na.rm = T),
infs = ~sum(is.infinite(.), na.rm = T),
min = ~min(., na.rm = T),
mean = ~mean(., na.rm = T),
max = ~max(., na.rm = T),
`|x|<1 (ratio)` = ~mean( -1 < . & . < 1, na.rm =T) ,
integer_ratio = ~mean(as.integer(.) == ., na.rm =T),
mode = ~as.double(names(data_mode(.))),
mode_ratio = data_mode)
col_list <- list()
for(fun in seq_along(fns)){
purrr::map_dbl(df, fns[[fun]]) %>%
tibble::enframe(name = NULL, value = names(fns[fun])) -> alist
rlist::list.append(col_list, alist) -> col_list
}
message(stringr::str_c(nrow(.data), " rows"))
tibble::tibble(variables = names(df)) %>%
dplyr::bind_cols(
rlist::list.cbind(col_list)
) %>%
framecleaner::set_int()
}
|
/scratch/gouwar.j/cran-all/cranData/validata/R/diagnose.R
|
utils::globalVariables(".", package = "validata")
utils::globalVariables("where", package = "validata")
utils::globalVariables(":=", package = "validata")
|
/scratch/gouwar.j/cran-all/cranData/validata/R/globals.R
|
# guess_id_col <- function(x, min_distinct = 3L){
#
#
# if(!initial_test(x, min_distinct)) return(FALSE)
# if(!test_integer(x)) return(FALSE)
# if(!test_character(x)) return(FALSE)
#
# TRUE
#
# }
#
#
#
# guess_distribution <- function(x){
#
# d1 <- fitdistrplus::fitdist(x, "pois") %>% fitdistrplus::gofstat() %>% `$`(chisqpvalue)
# d2 <- fitdistrplus::fitdist(x, "norm") %>% fitdistrplus::gofstat() %>% `$`(chisqpvalue)
# d3 <- fitdistrplus::fitdist(x, "unif") %>% fitdistrplus::gofstat() %>% `$`(chisqpvalue)
#
# pvals <- c(pois = d1, norm = d1, unif = d3)
# names(pvals)[which.max(pvals)]
# }
#
#
#
#
# test_integer <- function(x){if(rlang::is_bare_integer(x)){
#
# clist <- list()
#
# rlist::list.append(clist, function(x) !any(x < 0, na.rm = T) ) -> clist
# rlist::list.append(clist, function(x) all(is.finite(x), na.rm = T)) -> clist
# rlist::list.append(clist, function(x) !all(dplyr::between(x, 0, 10), na.rm = T)) -> clist
# rlist::list.append(clist, function(x) BBmisc::computeMode(x) != 0) -> clist
# rlist::list.append(clist, function(x) guess_distribution(x) == "unif") -> clist
#
# for(fn in clist){
#
# if(!rlang::exec(fn, x)){
# return(F)
# }
# }
#
# }
# TRUE
# }
#
#
# test_character <- function(x){if(is.character(x)){
#
# clist <- list()
#
# rlist::list.append(clist, function(x) !any(stringr::str_detect(x, "http|www|:|yes|no"), na.rm = T) ) -> clist
# rlist::list.append(clist, function(x) !all(stringr::str_length(x) <= 1, na.rm = T)) -> clist
#
# for(fn in clist){
#
# if(!rlang::exec(fn, x)){
# return(F)
# }
# }
#
# }
#
# TRUE
# }
#
# initial_test <- function(x, min_distinct = 3){
#
# clist <- list()
#
# rlist::list.append(clist, function(x) !rlang::is_bare_double(x) ) -> clist
# rlist::list.append(clist, function(x) !is.logical(x)) -> clist
# rlist::list.append(clist, function(x) dplyr::n_distinct(x) >= min_distinct) -> clist
#
#
# for(fn in clist){
#
# if(!rlang::exec(fn, x)){
# return(F)
# }
# }
#
# TRUE
# }
#
#
|
/scratch/gouwar.j/cran-all/cranData/validata/R/guess_id_col.R
|
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
|
/scratch/gouwar.j/cran-all/cranData/validata/R/utils-pipe.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
iris <- tibble::tibble(iris)
## ----setup--------------------------------------------------------------------
library(validata)
library(tidyselect)
## -----------------------------------------------------------------------------
head(sample_data1)
## -----------------------------------------------------------------------------
sample_data1 %>%
confirm_distinct(ID_COL1)
## -----------------------------------------------------------------------------
sample_data1 %>%
confirm_distinct(ID_COL1, ID_COL2)
## -----------------------------------------------------------------------------
sample_data1 %>%
confirm_distinct(ID_COL1, ID_COL2, ID_COL3)
## -----------------------------------------------------------------------------
sample_data1 %>%
determine_distinct(matches("ID"))
## -----------------------------------------------------------------------------
sample_data1 %>%
confirm_mapping(ID_COL1, ID_COL2, view = F)
## -----------------------------------------------------------------------------
sample_data1 %>%
determine_mapping(everything())
## -----------------------------------------------------------------------------
confirm_overlap(iris$Sepal.Width, iris$Petal.Length) -> iris_overlap
## -----------------------------------------------------------------------------
print(iris_overlap)
## -----------------------------------------------------------------------------
iris_overlap %>%
co_find_only_in_1() %>%
head()
## -----------------------------------------------------------------------------
iris_overlap %>%
co_find_only_in_2() %>%
head()
## -----------------------------------------------------------------------------
iris_overlap %>%
co_find_in_both() %>%
head()
## ----eval=FALSE, include=FALSE------------------------------------------------
# iris %>%
# determine_overlap(everything())
## -----------------------------------------------------------------------------
iris %>%
confirm_strlen(Species) -> species_len
## -----------------------------------------------------------------------------
head(species_len)
## -----------------------------------------------------------------------------
species_len %>%
choose_strlen(len = 6) %>%
head()
## -----------------------------------------------------------------------------
iris %>%
diagnose()
|
/scratch/gouwar.j/cran-all/cranData/validata/inst/doc/PackageIntroduction.R
|
---
title: "PackageIntroduction"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{PackageIntroduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
iris <- tibble::tibble(iris)
```
```{r setup}
library(validata)
library(tidyselect)
```
# Distinct
## Confirm Distinct
In data analysis tasks we often have data sets with multiple possible ID columns, but it's not always clear which combination uniquely identifies each row.
sample_data1 has 125 row with 3 ID type columns and 3 value columns.
```{r}
head(sample_data1)
```
Let's use `confirm_distinct` iteratively to find the uniquely identifying columns of sample_data1.
```{r}
sample_data1 %>%
confirm_distinct(ID_COL1)
```
```{r}
sample_data1 %>%
confirm_distinct(ID_COL1, ID_COL2)
```
```{r}
sample_data1 %>%
confirm_distinct(ID_COL1, ID_COL2, ID_COL3)
```
Here we can conclude that the combination of 3 ID columns is the primary key for the data.
## Determine Distinct
These steps can be automated with the wrapper function `determine distinct`.
```{r}
sample_data1 %>%
determine_distinct(matches("ID"))
```
# Mapping
`confirm_mapping` tells you the mapping between two columns in a data frame:
- 1 - 1 mapping
- 1 - many mapping
- many - 1 mapping
- many - many mapping
## Confirm mapping
`confirm_mapping` gives the option to view which type of mapping is associated with each individual row.
```{r}
sample_data1 %>%
confirm_mapping(ID_COL1, ID_COL2, view = F)
```
## Determine mapping
```{r}
sample_data1 %>%
determine_mapping(everything())
```
# Overlap
The `overlap` functions give a venn style description of the values in 2 columns. This is especially useful before performing a `join` function, and you want to confirm that the dataframes have matching keys.
## Confirm Overlap
`confirm_overlap` is different from the other `confirm` functions in that it takes 2 vectors as arguments, instead of a data frame. This is to allow the user to test overlap between different dataframes, or arbitrary vectors if necessary
```{r}
confirm_overlap(iris$Sepal.Width, iris$Petal.Length) -> iris_overlap
```
`confirm_overlap` returns a summary data frame invisibly allowing you to access individual elements using the helper functions.
```{r}
print(iris_overlap)
```
Find the elements unique to the first column
```{r}
iris_overlap %>%
co_find_only_in_1() %>%
head()
```
Find the elements unique to the second column
```{r}
iris_overlap %>%
co_find_only_in_2() %>%
head()
```
Find the elements shared by both columns
```{r}
iris_overlap %>%
co_find_in_both() %>%
head()
```
## Determine Overlap
`determine_overlap` takes a dataframe and a tidyselect specification, and returns a tibble summarizing all of the pairwise overlaps. Only pairs with matching types are tested.
```{r eval=FALSE, include=FALSE,}
iris %>%
determine_overlap(everything())
```
Note that the `overlap` functions only test pairwise overlaps. For multi-column and large-scale overlap testing, see [Complex Upset Plots](https://krassowski.github.io/complex-upset/)
# string length
## confirm string length
Get a frequency table of string lengths in a character column.
Table is printed while the original df is returned invisibly with a column indicating the string lengths.
```{r}
iris %>%
confirm_strlen(Species) -> species_len
```
output is a dataframe
```{r}
head(species_len)
```
## choose string length
A helped function for the output of `confirm_strlen` that filters the database for chosen string lengths.
```{r}
species_len %>%
choose_strlen(len = 6) %>%
head()
```
# diagnose
Reproduction of diagnose from the dlookr package. Usually a good choice for first analyzing a data set.
```{r}
iris %>%
diagnose()
```
|
/scratch/gouwar.j/cran-all/cranData/validata/inst/doc/PackageIntroduction.Rmd
|
---
title: "PackageIntroduction"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{PackageIntroduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
iris <- tibble::tibble(iris)
```
```{r setup}
library(validata)
library(tidyselect)
```
# Distinct
## Confirm Distinct
In data analysis tasks we often have data sets with multiple possible ID columns, but it's not always clear which combination uniquely identifies each row.
sample_data1 has 125 row with 3 ID type columns and 3 value columns.
```{r}
head(sample_data1)
```
Let's use `confirm_distinct` iteratively to find the uniquely identifying columns of sample_data1.
```{r}
sample_data1 %>%
confirm_distinct(ID_COL1)
```
```{r}
sample_data1 %>%
confirm_distinct(ID_COL1, ID_COL2)
```
```{r}
sample_data1 %>%
confirm_distinct(ID_COL1, ID_COL2, ID_COL3)
```
Here we can conclude that the combination of 3 ID columns is the primary key for the data.
## Determine Distinct
These steps can be automated with the wrapper function `determine distinct`.
```{r}
sample_data1 %>%
determine_distinct(matches("ID"))
```
# Mapping
`confirm_mapping` tells you the mapping between two columns in a data frame:
- 1 - 1 mapping
- 1 - many mapping
- many - 1 mapping
- many - many mapping
## Confirm mapping
`confirm_mapping` gives the option to view which type of mapping is associated with each individual row.
```{r}
sample_data1 %>%
confirm_mapping(ID_COL1, ID_COL2, view = F)
```
## Determine mapping
```{r}
sample_data1 %>%
determine_mapping(everything())
```
# Overlap
The `overlap` functions give a venn style description of the values in 2 columns. This is especially useful before performing a `join` function, and you want to confirm that the dataframes have matching keys.
## Confirm Overlap
`confirm_overlap` is different from the other `confirm` functions in that it takes 2 vectors as arguments, instead of a data frame. This is to allow the user to test overlap between different dataframes, or arbitrary vectors if necessary
```{r}
confirm_overlap(iris$Sepal.Width, iris$Petal.Length) -> iris_overlap
```
`confirm_overlap` returns a summary data frame invisibly allowing you to access individual elements using the helper functions.
```{r}
print(iris_overlap)
```
Find the elements unique to the first column
```{r}
iris_overlap %>%
co_find_only_in_1() %>%
head()
```
Find the elements unique to the second column
```{r}
iris_overlap %>%
co_find_only_in_2() %>%
head()
```
Find the elements shared by both columns
```{r}
iris_overlap %>%
co_find_in_both() %>%
head()
```
## Determine Overlap
`determine_overlap` takes a dataframe and a tidyselect specification, and returns a tibble summarizing all of the pairwise overlaps. Only pairs with matching types are tested.
```{r eval=FALSE, include=FALSE,}
iris %>%
determine_overlap(everything())
```
Note that the `overlap` functions only test pairwise overlaps. For multi-column and large-scale overlap testing, see [Complex Upset Plots](https://krassowski.github.io/complex-upset/)
# string length
## confirm string length
Get a frequency table of string lengths in a character column.
Table is printed while the original df is returned invisibly with a column indicating the string lengths.
```{r}
iris %>%
confirm_strlen(Species) -> species_len
```
output is a dataframe
```{r}
head(species_len)
```
## choose string length
A helped function for the output of `confirm_strlen` that filters the database for chosen string lengths.
```{r}
species_len %>%
choose_strlen(len = 6) %>%
head()
```
# diagnose
Reproduction of diagnose from the dlookr package. Usually a good choice for first analyzing a data set.
```{r}
iris %>%
diagnose()
```
|
/scratch/gouwar.j/cran-all/cranData/validata/vignettes/PackageIntroduction.Rmd
|
#' @include validator.R
#' @include indicator.R
#' @include confrontation.R
NULL
setClassUnion('callOrNull',list('call','NULL'))
### COMPARE ----
# Mark an array as comparison so we can overload the 'show' and plot' function.
setClass('comparison', contains='array'
, slots=list(call = 'callOrNull')
, prototype = prototype(array(0,dim=c(0,0)), call=NULL)
)
setClass('validatorComparison',contains='comparison')
setMethod('show',signature('comparison'),function(object){
cat(sprintf('Object of class %s:\n',class(object)))
cat(sprintf('\n %s\n\n',call2text(object@call)))
print(object[,])
})
#' Compare similar data sets
#'
#' Compare versions of a data set by comparing their performance against a
#' set of rules or other quality indicators. This function takes two or
#' more data sets and compares the perfomance of data set \eqn{2,3,\ldots}
#' against that of the first data set (default) or to the previous one
#' (by setting \code{how='sequential'}).
#'
#' @param x An R object
#' @param ... data frames, comma separated. Names become column names in
#' the output.
#'
#' @example ../examples/compare.R
#' @export
setGeneric('compare', def = function(x,...) standardGeneric('compare'))
#' @section Comparing datasets by performance against validator objects:
#'
#' Suppose we have a current and a previous version of a data set. Both
#' can be inspected by \code{\link{confront}}ing them with a rule set.
#' The status changes in rule violations can be partitioned as shown in the
#' following figure.
#' \if{html}{\figure{rulesplit.png}{options: width=80\% alt="cellwise splitting"}}
#' \if{latex}{\figure{rulesplit.pdf}{options: width=13cm}}
#' This function computes the partition for two or more
#' datasets, comparing the current set to the first (default) or to the
#' previous (by setting \code{compare='sequential'}).
#'
#' @references
#' The figure is reproduced from MPJ van der Loo and E. De Jonge (2018)
#' \emph{Statistical Data Cleaning with applications in R} (John Wiley & Sons).
#'
#' @param how how to compare
#' @param .list Optional list of data sets, will be concatenated with \code{...}.
#' @rdname compare
#'
#'
#' @return
#' For \code{validator}: An array where each column represents
#' one dataset.
#' The rows count the following attributes:
#' \itemize{
#' \item{Number of validations performed}
#' \item{Number of validations that evaluate to \code{NA} (unverifiable)}
#' \item{Number of validations that evaluate to a logical (verifiable)}
#' \item{Number of validations that evaluate to \code{TRUE}}
#' \item{Number of validations that evaluate to \code{FALSE}}
#' \item{Number of extra validations that evaluate to \code{NA} (new unverifiable)}
#' \item{Number of validations that still evaluate to \code{NA} (still unverifialble)}
#' \item{Number of validations that still evaluate to \code{TRUE}}
#' \item{Number of extra validations that evaluate to \code{TRUE} }
#' \item{Number of validations that still evaluate to \code{FALSE}}
#' \item{Number of extra validations that evaluate to \code{FALSE}}
#' }
#'
#' @family validation-methods
#' @family comparing
#' @export
setMethod("compare", "validator",
function(x,... , .list=list(), how=c("to_first","sequential")){
L <- c(list(...),.list)
# An explicit check so the code also works for tibble objects
# who disguise as data frames but are really something else.
if (any(sapply(L, inherits, "tbl"))){
L <- lapply(L, function(d) if (inherits(d,"tbl")) as.data.frame(d) else d )
}
names(L) <- make_listnames(L)
how <- match.arg(how)
out <- if (how == "to_first"){
cbind(
rules_diff(x, L[[1]])
, vapply( seq_len( length(L) - 1 )
, function(i) rules_diff(x, L[[i+1]], L[[1]])
, FUN.VALUE = numeric(11) )
)
} else {
cbind(
rules_diff(x, L[[1]])
, vapply( seq_len( length(L) - 1 )
, function(i) rules_diff(x, L[[i+1]], L[[i]])
, FUN.VALUE = numeric(11) )
)
}
colnames(out) <- names(L)
names(dimnames(out)) <- c("Status","Version")
new('validatorComparison'
, out
, call = match.call(definition=compare, sys.call(sys.parent(1L)))
)
})
rules_diff <- function(rules, new, old=NULL){
cf_new <- values(confront(new,rules),simplify=FALSE)
validations = sum( sapply(cf_new, length) )
verifiable = sum( sapply(cf_new, function(x) sum(!is.na(x))) )
unverifiable = sum( sapply(cf_new, function(x) sum( is.na(x))) )
violated = sum(sapply(cf_new, function(x) sum(!x, na.rm=TRUE)))
satisfied = sum( sapply(cf_new, sum, na.rm=TRUE) )
if ( is.null(old) ){
still_unverifiable = unverifiable
new_unverifiable = 0
still_satisfied = satisfied
new_satisfied = 0
still_violated = violated
new_violated = 0
} else {
cf_old <- values(confront(old,rules),simplify=FALSE)
still_unverifiable = local({
s <- 0
for ( i in seq_len(length(cf_new)) ) {
s <- s + sum( is.na(cf_new[[i]]) & is.na(cf_old[[i]]) )
}
s
})
new_unverifiable = local({
s <- 0
for ( i in seq_len(length(cf_new)) ){
s <- s + sum(!is.na(cf_old[[i]]) & is.na(cf_new[[i]]))
}
s
})
still_satisfied = local({
s <- 0
for ( i in seq_len(length(cf_new)) ){
s <- s + sum(cf_old[[i]] & cf_new[[i]], na.rm=TRUE)
}
s
})
new_satisfied = local({
s <- 0
for ( i in seq_len(length(cf_new)) ){
s <- s + sum((!cf_old[[i]]|is.na(cf_old[[i]])) & cf_new[[i]], na.rm=TRUE)
}
s
})
still_violated = local({
s <- 0
for ( i in seq_len(length(cf_new)) ){
s <- s + sum(!cf_old[[i]] & !cf_new[[i]], na.rm=TRUE)
}
s
})
new_violated = local({
s <- 0
for ( i in seq_len(length(cf_new)) ){
s <- s + sum((cf_old[[i]]|is.na(cf_old[[i]])) & !cf_new[[i]], na.rm=TRUE)
}
s
})
} # end else
# output
c(validations = validations
, verifiable = verifiable
, unverifiable = unverifiable
, still_unverifiable = still_unverifiable
, new_unverifiable = new_unverifiable
, satisfied = satisfied
, still_satisfied = still_satisfied
, new_satisfied = new_satisfied
, violated = violated
, still_violated = still_violated
, new_violated = new_violated)
}
make_listnames <- function( L, base=sprintf("D%04d",seq_along(L)) ){
nm <- names(L)
if (is.null(nm)) return(base)
nm[nm==""] <- base[nm==""]
nm
}
#' Translate a validatorComparison object to data frame
#'
#' The performance of versions of a data set with regard to rule-based quality
#' requirements can be compared using using \code{\link{compare}}. The result is a
#' \code{validatorComparison} object, which can usefully be translated into a data
#' frame.
#'
#' @inheritParams as.data.frame
#'
#' @return A data frame with the following columns.
#' \itemize{
#' \item{\code{status}: Row names of the \code{validatorComparison} object.}
#' \item{\code{version}: Column names of the \code{validatorComparison} object.}
#' \item{\code{count}: Contents of the \code{validatorComparison} object.}
#' }
#'
#'
#' @example ../examples/compare.R
#' @family comparing
#' @export
setMethod("as.data.frame","validatorComparison", function(x,...){
x <- x[,]
class(x) <- "table"
setNames(as.data.frame(x,...),c("status","version","count"))
})
#' Line graph of validatorComparison object
#'
#' The performance of versions of a data set with regard to rule-based quality
#' requirements can be compared using using \code{\link{compare}}. The result is a
#' \code{validatorComparison} object. This method creates a line-graph, thus
#' suggesting an that an ordered sequence of data sets have been compared. See
#' also \code{\link{barplot,validatorComparison-method}} for an unordered version.
#'
#'
#' @param x Object of class \code{validatorComparison}.
#' @param xlab [\code{character}] label for x axis (default none)
#' @param ylab [\code{character}] label for y axis (default none)
#' @param las [\code{numeric}] in \code{{0,1,2,3}} determining axis label rotation
#' @param cex.axis [\code{numeric}] Magnification with respect to the current
#' setting of \code{cex} for axis annotation.
#' @param cex.legend [\code{numeric}] Magnification with respect to the current
#' setting of \code{cex} for legend annotation and title.
#' @param ... Graphical parameters, passed to \code{plot}. See \code{\link[graphics]{par}}.
#'
#' @family comparing
#' @export
setMethod("plot", "validatorComparison"
, function(x
, xlab=""
, ylab=""
, las=2
, cex.axis=0.8
, cex.legend=0.8,...){
oldpar <- par(mar=c(3,3,3,8),cex=1)
on.exit(par(oldpar))
status <- rownames(x)
version <- colnames(x)
dat <- as.data.frame(x)
# set color palettes, line style and line width mappings
cl_map <- lw_map <- lt_map <- setNames(vector(11,mode = "integer"), status)
lt_map[1:11] <- 1
lt_map[grepl("new",names(lt_map))] <- 2
lw_map[1:11] <- 1
lw_map[!grepl("(new)|(still)",names(lw_map))] <- 2
# Colors taken from brewer.pal(6,"Paired")
cl_map[1:11] <- "black"
cl_map[grepl("unverifiable", names(cl_map))] <- "#A6CEE3"
cl_map[grepl("satisfied", names(cl_map))] <- "#33A02C"
cl_map[grepl("violated", names(cl_map))] <- "#E31A1C"
cl_map["verifiable"] <- "#1F78B4"
# setup plot
n <- length(version)
plot(0,0
, col = 'white'
, xlim = c(1,length(version))
, ylim = c(0,max(x))
, las = las
, xaxt = 'n'
, xlab = xlab
, ylab = ylab
, cex.axis= cex.axis
, ...)
# vertical grid
abline(v = seq_along(version), col = "grey", lty = 3)
# graph the main lines
for (stat in status){
d <- dat[dat$status == stat, ]
lines( as.integer(d$version), d$count
, type='b', col=cl_map[stat]
, lw=lw_map[stat], lt=lt_map[stat], pch=16)
}
axis(side=1, labels=version, at=seq_along(version)
, las=1, padj=c(0,1)
, cex.axis=cex.axis)
# add legend
oldpar <- c(oldpar,par(xpd=TRUE))
legend(x=1.04*n,y=max(x)
, legend = gsub("_"," ",status)
, lwd = lw_map[status]
, lty = lt_map[status]
, col = cl_map[status]
, cex = cex.legend
, bty = "n"
, title = "Count"
)
invisible(NULL)
})
#' Barplot of validatorComparison object
#'
#' The performance of versions of a data set with regard to rule-based quality
#' requirements can be compared using using \code{\link{compare}}. The result is a
#' \code{validatorComparison} object. This method creates a stacked bar plot of
#' the results. See also \code{\link{plot,validatorComparison-method}} for a line
#' chart.
#'
#' @param height object of class \code{validatorComparison}
#' @param las [\code{numeric}] in \code{{0,1,2,3}} determining axis label rotation
#' @param cex.axis [\code{numeric}] Magnification with respect to the current
#' setting of \code{cex} for axis annotation.
#' @param cex.legend [\code{numeric}] Magnification with respect to the current
#' setting of \code{cex} for legend annotation and title.
#' @param wrap [\code{logical}] Toggle wrapping of x-axis labels when their width
#' exceeds the width of the column.
#' @param ... Graphical parameters passed to \code{\link[graphics]{barplot.default}}.
#'
#' @note Before plotting, underscores (\code{_}) and dots (\code{.}) in x-axis labels
#' are replaced with spaces.
#'
#' @example ../examples/compare.R
#' @family comparing
#' @export
setMethod("barplot", "validatorComparison", function(height
, las = 1
, cex.axis = 0.8
, cex.legend = cex.axis
, wrap = TRUE
, ...){
oldpar <- par(mar=c(3,3,3,8), xpd=TRUE)
on.exit(par(oldpar))
# turn into array
a <- height[,,drop=FALSE]
# Colors taken from RColorBrewer::brewer.pal(8,"Paired")
cl_map <- c(
"still_satisfied" = "#33A02C" # dark green
, "new_satisfied" = "#B2DF8A" # light green
, "still_unverifiable" = "#FF7F00" # dark yellow/orange
, "new_unverifiable" = "#FDBF6F" # light yellow
, "still_violated" = "#E31A1C" # dark red
, "new_violated" = "#FB9A99" # light red
)
a <- a[names(cl_map),,drop=FALSE]
x <- barplot(a
, col=cl_map
, las=las
, xaxt="n"
, cex.axis=cex.axis
, ...
)
xlabs <- colnames(a)
# simple wrapping heuristic
if (wrap){
# replace punctuation with spaces
xlabs <- trimws(gsub("[_.]"," ", colnames(a)))
# determine column width
colw <- if ( length(x) == 1) 1.0 else x[length(x)] - x[length(x)-1] - 0.2
i <- strwidth(xlabs) > colw
ncol <- ceiling(colw/strwidth("m"))
# wrap and fold
xlabs[i] <- sapply(xlabs[i]
, function(s) paste(strwrap(s, width=ncol),collapse="\n")
)
}
axis(side=1, labels=xlabs, at=x, cex.axis=cex.axis,lwd=0)
# compute legend position
leg_pos <- bp_leg_pos(x)
legend(x = leg_pos, y = sum(a[,1])
, legend= rev(sub("_"," ", names(cl_map)))
, fill=rev(cl_map),bty="n"
, title="Count", cex=cex.legend)
invisible(x)
})
bp_leg_pos <- function(x){
n <- length(x)
# the factor 1.04 is the default location of the
# box after the end of the scale.
leg_pos <- if (n ==1){
# 1.2 is default width for n==1
1.04 * 1.2
} else {
# bar width + 0.2 (default) separation
1.04 * (x[n] + (x[n] - x[n-1]-0.2)/2)
}
}
setClass('indicatorComparison',contains='comparison')
#' @return For \code{indicator}: A list with the following components:
#' \itemize{
#' \item{\code{numeric}: An array collecting results of scalar indicator (e.g. \code{mean(x)}).}
#' \item{\code{nonnumeric}: An array collecting results of nonnumeric scalar indicators (e.g. names(which.max(table(x))))}
#' \item{\code{array}: A list of arrays, collecting results of vector-indicators (e.g. x/mean(x))}
#' }
#'
#' @rdname compare
setMethod('compare','indicator',
function(x, ...,.list=NULL){
L <- c( list(...), .list)
if ( length(L) < 2 ) stop('you need at least two datasets')
names(L) <- make_listnames(L)
for ( i in seq_along(L) ){
if ( !matches(L[[1]],L[[i]]) )
stop('dataset ',names(L)[i],'does not match with dataset',names(L)[1])
}
n <- names(x)
v <- setNames( lapply(n, function(i) sapply(L, function(y) values(confront(x[i],y))[[1]] )), n)
# simplify where possible
is_array <- sapply(v,is.array)
is_numeric <- sapply(v,is.numeric)
w <- if (any(is_array) ){
lapply(v[is_array],function(x)new('indicatorComparison',x,call=sys.call(2)))
} else {
NULL
}
u <- if(any(is_numeric & !is_array)){
new('indicatorComparison',sapply(v[ is_numeric & !is_array],Id),call=sys.call(2))
} else {
NULL
}
v <- if (any(!is_numeric &!is_array)){
new('indicatorComparison',sapply(v[!is_numeric & !is_array],Id),call=sys.call(2))
} else {
NULL
}
out <- list(numeric=u,nonnumeric=v,array=w)
out[!sapply(out,is.null)]
})
matches <- function(x,y,id=NULL){
all(dim(x)==dim(y)) &&
all(names(x) == names(y)) &&
ifelse(is.null(id),TRUE, all(x[,id]==y[,id]))
}
### CELLS ----
setClass('cellComparison',contains='comparison')
#' Cell counts and differences for a series of datasets
#'
#' @section Comparing datasets cell by cell:
#'
#' When comparing the contents of two data sets, the total number of cells
#' in the current data set can be partitioned as in the following figure.
#'
#' \if{html}{\figure{cellsplit.png}{options: width=80\% alt="rulewise splitting"}}
#' \if{latex}{\figure{cellsplit.pdf}{options: width=13cm}}
#'
#' This function computes the partition for two or more
#' datasets, comparing the current set to the first (default) or to the
#' previous (by setting \code{compare='sequential'}).
#'
#' @section Details:
#' This function assumes that the datasets have the same dimensions and that both
#' rows and columns are ordered similarly.
#'
#' @references
#' The figure is reproduced from MPJ van der Loo and E. De Jonge (2018)
#' \emph{Statistical Data Cleaning with applications in R} (John Wiley & Sons).
#'
#' @param ... For \code{cells}: data frames, comma separated. Names will become
#' column names in the output. For \code{plot} or \code{barplot}: graphical parameters
#' (see \code{\link[graphics]{par}}).
#' @param .list A \code{list} of data frames; will be concatenated with
#' objects in \code{...}
#' @param compare How to compare the datasets.
#'
#'
#' @return An object of class \code{cellComparison}, which is really an array
#' with a few extra attributes. It counts the total number of cells, the number of
#' missings, the number of altered values and changes therein as compared to
#' the reference defined in \code{how}.
#'
#' @family comparing
#' @example ../examples/cells.R
#' @export
cells <- function(..., .list = NULL, compare=c("to_first","sequential")){
how <- match.arg(compare)
L <- c( list(...), .list )
names(L) <- make_listnames(L)
out <- if ( how == "to_first" ){
cbind(
cell_diff(L[[1]])
, vapply( seq_len(length(L)-1)
, function(i) cell_diff(L[[i+1]], L[[1]])
, FUN.VALUE = numeric(9))
)
} else {
cbind(
cell_diff( L[[1]] )
, vapply( seq_len(length(L)-1)
, function(i) cell_diff(L[[i+1]], L[[i]])
, FUN.VALUE = numeric(9) )
)
}
colnames(out) <- names(L)
new("cellComparison"
, out
, call = match.call( definition=cells, sys.call(sys.parent(1L)) )
)
}
cell_diff <- function(new, old=NULL){
n_cells <- prod(dim(new))
n_avail <- sum(!is.na(new))
n_miss <- sum( is.na(new))
if (is.null(old)){
c(
cells = n_cells
, available = n_avail
, still_available = sum(!is.na(new))
, unadapted = sum(!is.na(new))
, adapted = 0
, imputed = 0
, missing = n_miss
, still_missing = sum(is.na(new))
, removed = 0
)
} else {
c(
cells = n_cells
, available = n_avail
, still_available = sum(!is.na(new) & !is.na(old) )
, unadapted = sum( old == new, na.rm=TRUE )
, adapted = sum( old != new, na.rm=TRUE )
, imputed = sum( is.na(old) & !is.na(new) )
, missing = n_miss
, still_missing = sum( is.na(old) & is.na(new) )
, removed = sum(!is.na(old) & is.na(new) )
)
}
}
#' Translate cellComparison objects to data frame
#'
#' Versions of a data set can be cellwise compared using
#' \code{\link{cells}}. The result is a \code{cellComparison} object,
#' which can usefully be translated into a data frame.
#'
#' @inheritParams as.data.frame
#'
#' @return A data frame with the following columns.
#' \itemize{
#' \item{\code{status}: Row names of the \code{cellComparison} object.}
#' \item{\code{version}: Column names of the \code{cellComparison} object.}
#' \item{\code{count}: Contents of the \code{cellComparison} object.}
#' }
#'
#' @example ../examples/cells.R
#' @family comparing
#' @export
setMethod("as.data.frame","cellComparison", function(x,...){
x <- x[,]
class(x) <- "table"
setNames(as.data.frame(x,...),c("status","version","count"))
})
#' Line graph of a cellComparison object.
#'
#' Versions of a data set can be compared cell by cell
#' using \code{\link{cells}}. The result is a \code{cellComparison}
#' object. This method creates a line-graph, thus suggesting an
#' that an ordered sequence of data sets have been compared.
#' See also \code{\link{barplot,cellComparison-method}} for an
#' unordered version.
#'
#' @param x a \code{cellComparison} object.
#' @inheritParams plot,validatorComparison-method
#' @family comparing
#' @export
setMethod("plot","cellComparison"
, function(x
, xlab=""
, ylab=""
, las=2
, cex.axis=0.8
, cex.legend=0.8,...){
oldpar <- par(mar=c(3,3,3,8))
on.exit(par(oldpar))
status <- rownames(x)
version <- colnames(x)
dat <- as.data.frame(x)
cl_map <- lw_map <- lt_map <- setNames(rep(1,9), rownames(x))
lt_map[c("adapted","imputed","removed")] <- 2
lw_map[1:9] <- 1
lw_map[c("cells","available","missing","imputed")] <- 2
# Colors taken from RColorBrewer::brewer.pal(8,"Paired")
cl_map["cells"] <- "black"
cl_map[c("available","still_available")] <- "#1F78B4" # dark blue
cl_map[c("unadapted","adapted","imputed")] <- "#A6CEE3" # light blue
cl_map["missing"] <- "#FF7F00" # dark yellow
cl_map[c("removed","still_missing")] <- "#FDBF6F" # light yellow
n <- length(version)
plot(0,0,col='white'
, xlim=c(1,length(version))
, ylim=c(0,max(x))
, las=las
, xaxt='n'
, xlab=xlab
, ylab=ylab
, cex.axis=cex.axis,...)
abline(v=seq_along(version),col="grey",lt=3)
for (stat in status){
d <- dat[dat$status == stat, ]
lines( as.integer(d$version), d$count, type='b', col=cl_map[stat]
,lw = lw_map[stat], lty = lt_map[stat], pch = 16)
}
axis(side=1,labels=version,at=seq_along(version),
las=1,padj=rep(c(0,1),times=n),cex.axis=cex.axis)
oldpar <- c(oldpar,par(xpd=TRUE))
legend(x=1.04*n,y=max(x)
, legend = gsub("_"," ",status)
, lwd = lw_map[status]
, lty = lt_map[status]
, col = cl_map[status]
, cex = cex.legend
, bty = "n"
, title="Count"
)
invisible(NULL)
})
#' Barplot of cellComparison object
#'
#' Versions of a data set can be compared cell by cell using \code{\link{cells}}.
#' The result is a \code{cellComparison} object. This method creates a stacked bar
#' plot of the results. See also \code{\link{plot,cellComparison-method}} for a
#' line chart.
#'
#' @param height object of class \code{cellComparison}
#' @param las [\code{numeric}] in \code{{0,1,2,3}} determining axis label rotation
#' @param cex.axis [\code{numeric}] Magnification with respect to the current
#' setting of \code{cex} for axis annotation.
#' @param cex.legend [\code{numeric}] Magnification with respect to the current
#' setting of \code{cex} for legend annotation and title.
#' @param wrap [\code{logical}] Toggle wrapping of x-axis labels when their width
#' exceeds the width of the column.
#' @param ... Graphical parameters passed to \code{\link[graphics]{barplot.default}}.
#'
#' @note Before plotting, underscores (\code{_}) and dots (\code{.}) in x-axis
#' labels are replaced with spaces.
#'
#' @family comparing
#' @export
setMethod("barplot", "cellComparison", function(height
, las = 1
, cex.axis = 0.8
, cex.legend = cex.axis
, wrap = TRUE
, ...){
oldpar <- par(mar=c(3,3,3,8), xpd=TRUE)
on.exit(par(oldpar))
# turn into array
a <- height[,,drop=FALSE]
a <- a[c("unadapted","adapted","imputed","still_missing","removed"),,drop=FALSE]
# Colors taken from RColorBrewer::brewer.pal(8,"Paired")
cl_map <- c(
unadapted = "#4575B4" # dark blue
, adapted = "#91BFDB" # blue
, imputed = "#E0F3F8" # light blue
, still_missing = "#FC8D59" # orange
, removed = "#FEE090" # yellow
)
x <- barplot(a
, col=cl_map[rownames(a)]
, las=las
, xaxt="n"
, cex.axis=cex.axis
, ...
)
# replace punctuation with spaces
xlabs <- trimws(gsub("[_.]+"," ", colnames(a)))
# simple wrapping heuristic
if ( isTRUE(wrap) ){
barwidth <- if ( length(x) == 1) 1.0 else x[length(x)] - x[length(x)-1] - 0.2
i <- strwidth(xlabs) > barwidth
ncol <- ceiling(barwidth/strwidth("m"))
# wrap and fold
xlabs[i] <- sapply(xlabs[i]
, function(s) paste(strwrap(s, width=ncol),collapse="\n")
)
}
axis(side=1, labels=xlabs, at=x, cex.axis=cex.axis,lwd=0)
# compute legend position
leg_pos <- bp_leg_pos(x)
legend(x = leg_pos, y = sum(a[,1])
, legend= rev(sub("_"," ", rownames(a)))
, fill=rev(cl_map[rownames(a)]),bty="n"
, title="Count", cex=0.8)
invisible(x)
})
# helper function: compute reasonable location of
# legend in barplot.
bp_leg_pos <- function(x){
n <- length(x)
# the factor 1.04 is the default location of the
# box after the end of the scale.
leg_pos <- if (n ==1){
# 1.2 is default width for n==1
1.04 * 1.2
} else {
# bar width + 0.2 (default) separation
1.04 * (x[n] + (x[n] - x[n-1]-0.2)/2)
}
}
#' Create matching subsets of a sequence of data
#'
#' @param ... A sequence of \code{data.frame}s, possibly in the form of \code{<name>=<value>} pairs.
#' @param .list A list of \code{data.frame}s; will be concatenated with \code{...}.
#' @param id Names or indices of columns to use as index.
#'
#' @return A list of \code{data.frames}, subsetted and sorted so that all cells correspond.
#' @export
#' @family comparing
match_cells <- function(...,.list=NULL,id=NULL){
L <- c(list(...), .list)
# match columns
nm <- Reduce(intersect,lapply(L,names))
L <- lapply(L,`[`,nm)
if (!is.null(id)){ # match rows
ID <- lapply(L, function(d) do.call(paste0,as.list(d[id])))
ids <- Reduce(intersect, ID)
L <- lapply(seq_along(L), function(i) L[[i]][match(ids, ID[[i]],nomatch=0),])
}
L
}
|
/scratch/gouwar.j/cran-all/cranData/validate/R/compare.R
|
#' @include validator.R
#' @include indicator.R
NULL
# CONFRONTATION OBJECT --------------------------------------------------------
#' Superclass storing results of confronting data with rules
#'
#' @section Details:
#' This class is aimed at developers of this package or packages depending on
#' it. It is the parent of classes \code{\link{indication}} and
#' \code{\link{validation}} which are user-facing.
#'
#' Using \code{\link{confront}}, a set of rules can be executed in the context
#' of one or more (nested) environments holding data. The results of such evaluations
#' are stored in a \code{confrontation} object along with metadata.
#'
#' We strongly advise against accessing the data fields or methods internal to
#' this object directly, as we may change or remove them without notice. Use
#' the exported methods listed below in stead.
#'
#' @family confrontation-methods
#'
#' @aliases confrontation
#' @keywords internal
setRefClass("confrontation"
,fields = list(
._call = "call" # (user's) call that generated the object
, ._value = "list" # results of confrontation
, ._calls = "list" # calls executed during confrontation
, ._warn = "list" # list of 'warning' objects
, ._error = "list" # list of 'error' objects
, ._keys = "list" # list with at least 'keyset': an object containing the identifying variables in dat.
, ._event = "character" # Metadata identifying the confrontation event.
)
, methods=list(
show = function() .show_confrontation(.self)
)
)
confrontation_nwarn <- function(x) sum(vapply(x$._warn, function(w)!is.null(w), FUN.VALUE = logical(1)))
confrontation_nerrs <- function(x) sum(vapply(x$._error, function(w)!is.null(w), FUN.VALUE = logical(1)))
confrontation_nmiss <- function(x) sum(vapply(x$._value, anyNA, FUN.VALUE=logical(1)))
.show_confrontation <- function(.self){
cat(sprintf("Object of class '%s'\n",class(.self)))
cat(sprintf("Call:\n ")); print(.self$._call); cat('\n')
cat(sprintf('Rules confronted: %d\n', length(.self$._calls)))
cat(sprintf(' With missings: %d\n', confrontation_nmiss(.self) ))
cat(sprintf(' Threw warning: %d\n', confrontation_nwarn(.self) ))
cat(sprintf(' Threw errors : %d\n', confrontation_nerrs(.self) ))
}
# S4 GENERICS -----------------------------------------------------------------
#' Confront data with a (set of) expressionset(s)
#'
#' An expressionset is a general class storing rich expressions (basically
#' expressions and some meta data) which we call 'rules'. Examples of
#' expressionset implementations are \code{\link{validator}} objects, storing
#' validation rules and \code{\link{indicator}} objects, storing data quality
#' indicators. The \code{confront} function evaluates the expressions one by one
#' on a dataset while recording some process meta data. All results are stored in
#' a (subclass of a) \code{confrontation} object.
#'
#'
#' @param dat An R object carrying data
#' @param x An R object carrying \code{\link{rule}}s.
#' @param ref Optionally, an R object carrying reference data. See examples for usage.
#' @param ... Options used at execution time (especially \code{'raise'}).
#' See \code{\link{voptions}}.
#'
#' @section Reference data:
#'
#' Reference data is typically a \code{list} with a items such as
#' a code list, or a data frame of which rows match the rows of the
#' data under scrutiny.
#'
#'
#' @seealso \code{\link{voptions}}
#'
#' @family confrontation-methods
#' @family validation-methods
#' @family indication-methods
#'
#' @example ../examples/confront.R
#' @export
setGeneric("confront",
def = function(dat, x, ref, ...) standardGeneric("confront")
)
#' Get or set event information metadata from a 'confrontation' object.
#'
#' The purpose of event information is to store information that allows for
#' identification of the confronting event.
#'
#'
#' @param x an object of class \code{confrontation}
#'
#' @return A a character vector with elements
#' \code{"agent"}, which defaults to the R version and platform returned by
#' \code{R.version}, a timestamp (\code{"time"}) in ISO 8601 format and a
#' \code{"actor"} which is the user name returned by \code{Sys.info()}. The
#' last element is called \code{"trigger"} (default \code{NA_character_}), which
#' can be used to administrate the event that triggered the confrontation.
#'
#' @references
#' Mark van der Loo and Olav ten Bosch (2017)
#' \href{https://markvanderloo.eu/files/share/loo2017design.pdf}{Design of a generic machine-readable validation report structure},
#' version 1.0.0.
#'
#' @examples
#' data(retailers)
#' rules <- validator(turnover >= 0, staff >=0)
#' cf <- confront(retailers, rules)
#' event(cf)
#'
#' # adapt event information
#' u <- event(cf)
#' u["trigger"] <- "spontaneous validation"
#' event(cf) <- u
#' event(cf)
#'
#' @family confrontation-methods
#' @family validation-methods
#' @family indication-methods
#'
#' @export
setGeneric("event", def = function(x) standardGeneric("event"))
#' @rdname event
setGeneric("event<-", def=function(x, value) standardGeneric("event<-"))
## syntactic sugar function
#' Simple data validation interface
#'
#' @section Details:
#' Creates an object of class \code{\link{validator}} and \code{\link{confront}}s it with the data.
#' This function is easy to use in combination with the \pkg{magrittr} pipe operator.
#'
#' @param dat an R object carrying data
#' @param ... a comma-separated set of validating expressions.
#'
#' @return An object of class \code{\link{validation}}
#' @example ../examples/check_that.R
#' @family validation-methods
#' @export
check_that <- function(dat,...){
cf <- confront(dat,validator(...))
cf$._call <- sys.call()
cf
}
#' Get key set stored with a confrontation
#'
#' @inheritParams event
#'
#' @return If a confrontation is created with the \code{key=} option
#' set, this function returns the key set, otherwise \code{NULL}
#'
#' @export
setGeneric("keyset", def=function(x) standardGeneric("keyset"))
#' @rdname keyset
#' @family confrontation-methods
#' @export
setMethod("keyset", "confrontation", function(x){
x$._keys$keyset
})
#' Get values from object
#'
#'
#' @param x an R object
#' @param ... Arguments to pass to or from other methods
#'
#' @export
setGeneric('values',def=function(x,...) standardGeneric('values'))
#' Get messages from a confrontation object
#' @param x An object of class \code{\link{confrontation}}
#' @param ... Arguments to be passed to other methods.
#'
#'
#' @example ../examples/exceptions.R
#' @family confrontation-methods
#' @export
setGeneric("errors",def = function(x,...) standardGeneric("errors"))
# retrieve warnings from a confrontation object
setGeneric("warnings")
# useful ways to aggregate confrontations
setGeneric('aggregate')
# useful ways to sort confrontations
setGeneric('sort')
# S4 METHODS ------------------------------------------------------------------
## The below function is a worker that assumes all relevant data is present in
## an environment, possibly with a parent containing reference data. Most, if not
## all R-based 'confront' methods will convert to this form and call the worker.
##
## x a validator object
## dat an environment
## key a character indicating a key.
##
confront_work <- function(x, dat, key=NULL, class='confrontation', ...){
opts <- x$clone_options(...)
lin_eq_eps <- opts('lin.eq.eps')
lin_ineq_eps <- opts('lin.ineq.eps')
calls <- x$exprs(expand_assignments=TRUE
, lin_eq_eps=lin_eq_eps
, lin_ineq_eps=lin_ineq_eps
, dat=dat)
L <- execute(calls,dat,opts)
new(class,
._call = match.call(definition=confront,call=sys.call(sys.parent(2)))
, ._calls = calls
, ._value = lapply(L,"[[",1)
, ._warn = lapply(L,"[[",2)
, ._error = lapply(L,"[[",3)
, ._keys = list(keyset = key)
, ._event = c(
agent = sprintf("%s > %s %s.%s > validate %s"
, R.version[["platform"]]
, R.version[["language"]], R.version[["major"]], R.version[["minor"]]
, utils::packageVersion("validate") )
, time = format(Sys.time(),"%Y%m%dT%H%M%S%z")
, actor = Sys.info()[["user"]]
, trigger = NA_character_ )
)
}
#' @rdname select
#' @aliases [,confrontation-method
#' @family confrontation-methods
#' @export
setMethod("[","confrontation",function(x,i,j,...,drop=TRUE){
# this trycatch mechanism protects against an error
# occurring when confrontation objects are indexed
# within lapply. See GH issue #116.
call <- tryCatch(match.call(call=sys.call(sys.parent()))
, error = function(e) NULL)
if (is.null(call)){
call <- match.call()
}
new(class(x)
, ._call = call
, ._calls = x$._calls[i]
, ._value = x$._value[i]
, ._warn = x$._warn[i]
, ._error = x$._error[i]
, ._keys = x$._keys
)
})
#' @rdname event
#' @export
setMethod("event", signature = "confrontation", definition = function(x){
x$._event
})
#' @rdname event
#' @param value \code{[character]} vector of length 4 with event identifiers.
#' @export
setMethod("event<-","confrontation", function(x, value){
stopifnot(is.character(value))
stopifnot(all( names(value) == c("agent","time","actor","trigger") ) )
x$._event <- value
invisible(x)
})
#' @rdname length
#' @aliases length,confrontation-method
#' @family confrontation-methods
#' @export
setMethod("length","confrontation",function(x) length(x$._value))
# indicators serve a different purpose than validations.
#' Store results of evaluating indicators
#'
#' \bold{This feature is currently experimental and may change in the future}
#'
#' @section Details:
#' An \code{indication} stores a set of results generated by evaluating
#' an \code{\link{indicator}} in the context of data along with some metadata.
#'
#'
#' @section Exported S4 methods for \code{indication}:
#' \itemize{
#' \item{Methods exported for objects of class \code{\link{confrontation}}}
#' \item{\code{\link{summary,indication-method}}}
#' \item{\code{\link{values,indication-method}}}
#' }
#'
#' @keywords internal
#'
#' @section See also:
#' \itemize{
#' \item{\code{\link{confront}}}
#' \item{\code{\link{validation-class}}}
#' }
#' @aliases indication
#' @family indication-methods
setRefClass("indication", contains = "confrontation")
#' @rdname confront
setMethod("confront", signature("data.frame","indicator"), function(dat, x, key=NULL,...){
data_env <- list2env(dat)
data_env$. <- dat
confront_work(x, data_env, dat[key], class = "indication",...)
})
#' @rdname confront
setMethod("confront",signature("data.frame","indicator","environment"), function(dat, x, ref, key=NULL, ...){
data_env <- namecheck(list2env(dat,parent=ref))
data_env$. <- dat
confront_work(x,data_env, dat[key], class="indication",...)
})
#' @rdname confront
setMethod("confront",signature("data.frame","indicator","data.frame"),function(dat, x,ref, key=NULL,...){
env <- new.env()
env$ref <- ref
data_env <- namecheck(list2env(dat, parent=env))
data_env$. <- dat
confront_work(x, data_env, dat[key], class="indication", ...)
})
#' @rdname confront
setMethod("confront",signature("data.frame","indicator","list"),function(dat, x,ref,key=NULL,...){
env <- list2env(ref)
data_env <- namecheck(list2env(dat,parent=env))
data_env$. <- dat
confront_work(x, data_env, dat[key], class="indication",...)
})
#' @rdname validate-summary
#' @param object An R object
#' @param ... Currently unused
#'
#' @aliases validate-summary summary,indication-method
#' @section Indication:
#' Some basic information per evaluated indicator is reported: the number
#' of items to which the indicator was applied, the output \code{class},
#' some statistics (min, max, mean , number of NA)
#' and wether an exception occurred (warnings or errors). The evaluated
#' expression is reported as well.
#'
#' @family indication-methods
#' @export
setMethod('summary',signature('indication'), function(object,...){
data.frame(
name = names(object$._value)
, items = sapply(object$._value,length)
, min = get_stat(object,min,na.rm=TRUE)
, mean = get_stat(object,mean,na.rm=TRUE)
, max = get_stat(object,max,na.rm=TRUE)
, nNA = nas(object)
, error = has_error(object)
, warning = has_warning(object)
, expression = sapply(object$._calls,call2text)
, row.names=NULL
, stringsAsFactors=FALSE
)
})
# helper function: x is a confrontation object
get_stat <- function(x,what,...){
out <- rep(NA,length(x$._value))
i <- !is_null(x)
out[i] <- tryCatch(
sapply(x$._value[i],what,...)
, error = function(e) NA
, warning = function(e) NA
)
out
}
#' Store results of evaluating validating expressions
#'
#' @section Details:
#' A object of class \code{validation} stores a set of results generated by
#' evaluating an \code{\link{validator}} in the context of data along with some
#' metadata.
#'
#'
#' @aliases validation
#' @family validation-methods
setRefClass("validation", contains = "confrontation")
setMethod("show","validation",function(object){
cat(sprintf("Object of class '%s'\n",class(object)))
cat(sprintf("Call:\n ")); print(object$._call); cat('\n')
cat(sprintf('Rules confronted: %d\n', length(object$._calls)))
cat(sprintf(' With fails : %d\n', failed_confrontations(object)))
cat(sprintf(' With missings: %d\n', confrontation_nmiss(object)))
cat(sprintf(' Threw warning: %d\n', confrontation_nwarn(object)))
cat(sprintf(' Threw error : %d\n', confrontation_nerrs(object)))
})
#' @rdname confront
#' @param key (optional) name of identifying variable in x.
setMethod("confront", signature("data.frame","validator"), function(dat, x, key=NULL, ...){
data_env <- list2env(dat)
data_env$. <- dat
confront_work(x, data_env, dat[key],'validation',...)
})
namecheck <- function(x){
n1 <- ls(x)
n2 <- ls(parent.env(x))
i <- n1 %in% n2
if (any(i)){
n <- paste(paste0("'",n1[i],"'"),collapse=", ")
w <- sprintf("Possible reference ambiguity: both current data set and reference data have variables named %s.",n)
warning(w)
}
x
}
#' @rdname confront
setMethod("confront",signature("data.frame","validator","environment"), function(dat, x, ref, key=NULL, ...){
data_env <- namecheck(list2env(dat,parent=ref))
data_env$. <- dat
confront_work(x, data_env, dat[key], class="validation",...)
})
#' @rdname confront
setMethod("confront",signature("data.frame","validator","data.frame"),function(dat, x,ref, key=NULL,...){
env <- new.env()
env$ref <- ref
data_env <- namecheck(list2env(dat, parent=env))
data_env$. <- dat
confront_work(x, data_env, dat[key], class="validation", ...)
})
#' @rdname confront
setMethod("confront",signature("data.frame","validator","list"),function(dat, x,ref,key=NULL,...){
env <- list2env(ref)
data_env <- namecheck(list2env(dat,parent=env))
data_env$. <- dat
confront_work(x, data_env, dat[key], class="validation",...)
})
# match rows; prepare for 'left join'.
# of : an environment containing data.frames
# against: a reference data.frame to match againsty.
# using : a key (character)
#match_rows <- function(of, against, using){
# key1 <- against[,using]
# for ( nm in ls(of) ){
# i <- match(key1, of[[nm]][,using], nomatch = nrow(of) + 1)
# of[[nm]] <- of[[nm]][i,,drop=FALSE]
# }
#}
#add_names <- function(L,x,y,key){
# keys <- y[[key]]
# nkey <- length(keys)
# L <- lapply(L,function(v){
# if ( length(v[[1]]) == nkey )
# v[[1]] <- setNames(v[[1]], keys)
# v
# })
#}
# execute calls.
# - Assignments are stored in a separate environment and forgotten afterwards.
# - Failed assignments yield a warning.
execute <- function(calls,env,opts){
lapply(calls, function(g){
if ( g[[1]] == ":=" ){
var <- as.character(left(g))
if ( var %in% variables(env) )
warning(sprintf("Locally overwriting variable '%s'",var))
assign(var, tryCatch( eval(right(g), env), error=warning), envir=env)
} else {
val <- factory(eval,opts)(g, env)
if ( !is.na(opts('na.value')) ){
val[[1]] <- ifelse(is.na(val[[1]]), opts('na.value'), val[[1]])
}
val
}
}
)[!is.assignment(calls)]
}
# x inherits from 'confrontation'
has_error <- function(x) !sapply(x$._error,is.null)
has_warning <- function(x) !sapply(x$._warn, is.null)
has_value <- function(x) sapply(x$._value, function(a) !is.null(a))
is_null <- function(x) sapply(x$._value, is.null)
passes <- function(x){
sapply(x$._value, function(a)
if ( is.null(a) ) 0 else sum(a,na.rm=TRUE)
)
}
# return confrontation that failed
failed_confrontations <- function(x){
sum(fails(x) > 0)
}
fails <- function(x){
sapply(x$._value, function(a)
if ( is.null(a) ) 0 else sum(!a,na.rm=TRUE)
)
}
nas <- function(x){
sapply(x$._value, function(a)
if ( is.null(a) ) 0 else sum(is.na(a))
)
}
#' @rdname validate-summary
#' @section Validation:
#' Some basic information per evaluated validation rule is reported: the number of
#' items to which the rule was applied, the output \code{class}, some statistics
#' (passes, fails, number of NA) and wether an exception occurred (warnings or
#' errors). The evaluated expression is reported as well.
#' @family validation-methods
setMethod('summary',signature('validation'),function(object,...){
data.frame(
name = names(object$._value)
, items = sapply(object$._value,length)
, passes = passes(object)
, fails = fails(object)
, nNA = nas(object)
, error = has_error(object)
, warning = has_warning(object)
, expression = sapply(object$._calls, call2text)
, row.names=NULL
, stringsAsFactors=FALSE
)
})
#' @rdname values
setMethod('values',signature('confrontation'),function(x,...){
x$._value
})
#' @rdname values
#' @aliases values,validation-method
#' @param simplify Combine results with similar dimension structure into arrays?
#' @param drop if a single vector or array results, drop 'list' attribute?
#' @family confrontation-methods
setMethod('values',signature('validation'),function(x,simplify=TRUE,drop=TRUE,...){
int_values(x,simplify,drop,...)
})
#' @rdname values
#' @aliases values,indication-method
#' @family validation-methods
setMethod('values',signature('indication'),function(x,simplify=TRUE,drop=TRUE,...){
int_values(x,simplify,drop,...)
})
int_values <- function(x,simplify,drop,...){
out <- if ( simplify ){
simplify_list(x$._value[!is_null(x)])
} else {
getMethod(values,signature='confrontation')(x,...)
}
if (drop && length(out) == 1) out[[1]] else out
}
simplify_list <- function(L){
len <- sapply(L,num_result)
lapply(unique(len), function(l){
m <- sapply(L[len==l], get_result)
if ( l == 1 )
m <- matrix(m,nrow=1,dimnames=list(NULL,names(m)))
m
})
}
#' @rdname errors
setMethod("errors","confrontation",function(x,...){
i <- has_error(x)
x$._error[i]
})
#' @rdname errors
#' @export
setMethod("warnings","confrontation",function(x,...){
i <- has_warning(x)
x$._warn[i]
})
#' Get names from \code{confrontation} object
#'
#' @rdname names
#' @family validation-methods
#' @export
setMethod("names", "confrontation", function(x){
names(x$._value)
})
#' Plot validation results
#'
#' Creates a barplot of validation result. For each validation rule, a stacked bar
#' is plotted with percentages of failing, passing, and missing results.
#'
#' @param x a confrontation object.
#' @param fill \code{[character]} vector of length 3. Colors representing fails, passes, and missings
#' @param col Edge colors for the bars.
#' @param rulenames \code{[character]} vector of size \code{length(x)}. If not specified, names
#' are taken from \code{x}.
#' @param labels \code{[character]} vector of length 4. Replace legend annotation.
#' @param title \code{[character]} Change the default title.
#' @param xlab \code{[character]} Change the title
#' @param y not used
#' @param ... not used
#'
#' @details
#' The plot function tries to be smart about placing labels on the y axis. When
#' the number of bars becomes too large, no y axis annotation will be shown and the
#' bars will become space-filling.
#'
#'
#' @export
#' @family validation-methods
#' @example ../examples/plot.R
setMethod("plot","validation", function(x, y
, fill=c("#FE2712","#66B032","#dddddd")
, col=fill
, rulenames = names(x)
, labels=c("Fails","Passing","Missing","Total")
, title = NULL
, xlab = NULL
, ...)
{
stopifnot(length(rulenames) == length(x))
if(length(errors(x))>=1){
errs <- paste(names(errors(x)), sep=", ")
msgf("Rules %s not included in plot since they could not be executed. See ?errors"
, errs)
}
m <- aggregate(x, by="rule")
rulenames <- rulenames[!( names(x) %in% names(errors(x)) )]
if (is.null(m)){
msgf("Noting to plot")
return(NULL)
}
plot_validation(as.matrix(m[, c("nfail","npass","nNA"), drop=FALSE])
, fill = fill
, col = col
, rulenames = rulenames
, labels = labels
, title = title
, xlab = xlab )
})
#' Aggregate validation results
#'
#' Aggregate results of a validation.
#'
#' @param x An object of class \code{\link{validation}}
#' @param by Report on violations per rule (default) or per record?
#' @param drop drop list attribute if the result is list of length 1
#' @param ... Arguments to be passed to or from other methods.
#'
#' @return By default, a \code{data.frame} with the following columns.
#' \tabular{ll}{
#' keys \tab If confront was called with \code{key=}\cr
#' \code{npass} \tab Number of items passed\cr
#' \code{nfail} \tab Number of items failing\cr
#' \code{nNA} \tab Number of items resulting in \code{NA}\cr
#' \code{rel.pass} \tab Relative number of items passed\cr
#' \code{rel.fail} \tab Relative number of items failing\cr
#' \code{rel.NA} \tab Relative number of items resulting in \code{NA}
#' }
#' If \code{by='rule'} the relative numbers are computed with respect to the number
#' of records for which the rule was evaluated. If \code{by='record'} the relative numbers
#' are computed with respect to the number of rules the record was tested agains.
#'
#' When \code{by='record'} and not all validation results have the same dimension structure,
#' a list of \code{data.frames} is returned.
#'
#' @family validation-methods
#' @aliases aggregate,validation-method
#' @example ../examples/aggregate.R
#' @export
setMethod('aggregate',signature('validation'), function(x,by=c('rule','record'), drop=TRUE,...){
v <- values(x, drop=FALSE)
by <- match.arg(by)
aggr <- if ( by == 'rule') colSums else rowSums
ntot <- if ( by == 'rule') nrow else ncol
L <- lapply(v, function(y){
s <- if(is.null(dim(y))) 0 else aggr(y,na.rm=TRUE)
na <- if(is.null(dim(y))) 0 else aggr(is.na(y))
N <- if (is.null(dim(y))) 0 else ntot(y)
nfail = N - s - na
out <- data.frame(
npass = s
, nfail = nfail
, nNA = na
, rel.pass = s/N
, rel.fail = nfail/N
, rel.NA = na/N
)
keys <- x$._keys$keyset
if (by=="record" && nrow(out)==nrow(keys)) cbind(keys,out) else out
})
if ( length(L) == 1 && drop ) L <- L[[1]]
if ( by == 'rule' && !is.data.frame(L) ){
L <- do.call(rbind,L)
# values are not errors, and such rules are not included by 'values'
# we also put rules in the same order as in the 'validator' object.
ii <- match(names(x)[!names(x) %in% names(errors(x))], rownames(L))
L <- L[ii,, drop=FALSE]
}
L
})
#' Aggregate and sort the results of a validation.
#'
#' @param x An object of class \code{\link{validation}}
#' @param by Report on violations per rule (default) or per record?
#' @param drop drop list attribute if the result has a single argument.
#' @param decreasing Sort by decreasing number of passes?
#' @param ... Arguments to be passed to or from other methods.
#' @return A \code{data.frame} with the following columns.
#' \tabular{ll}{
#' keys \tab If confront was called with \code{key=}\cr
#' \code{npass} \tab Number of items passed\cr
#' \code{nfail} \tab Number of items failing\cr
#' \code{nNA} \tab Number of items resulting in \code{NA}\cr
#' \code{rel.pass} \tab Relative number of items passed\cr
#' \code{rel.fail} \tab Relative number of items failing\cr
#' \code{rel.NA} \tab Relative number of items resulting in \code{NA}
#' }
#' If \code{by='rule'} the relative numbers are computed with respect to the number
#' of records for which the rule was evaluated. If \code{by='record'} the relative numbers
#' are computed with respect to the number of rules the record was tested agains. By default
#' the most failed validations and records with the most fails are on the top.
#'
#' When \code{by='record'} and not all validation results have the same dimension structure,
#' a list of \code{data.frames} is returned.
#'
#' @family validation-methods
#' @aliases sort,validation-method
#' @example ../examples/aggregate.R
#' @export
setMethod('sort',signature('validation'),function(x, decreasing=FALSE, by=c('rule','record'), drop=TRUE,...){
v <- values(x, drop=FALSE)
by <- match.arg(by)
aggr <- if ( by == 'rule') colSums else rowSums
ntot <- if ( by == 'rule') nrow else ncol
L <- lapply(v, function(y){
s <- aggr(y,na.rm=TRUE)
i <- order(s,decreasing=decreasing)
s <- s[i]
na <- aggr(is.na(y))[i]
N <- ntot(y)
nfail = N - s - na
out <- data.frame(
npass = s
, nfail = nfail
, nNA = na
, rel.pass = s/N
, rel.fail = nfail/N
, rel.NA = na/N
)
if (by=="record"){
keys <- x$._keys$keyset
if (nrow(out)==nrow(keys)) cbind(keys[i,,drop=FALSE],out) else out
} else {
out
}
})
if ( length(L) == 1 && drop ) L <- L[[1]]
if ( by== 'rule' && !is.data.frame(L) ) L <- do.call(rbind,L)
L
})
#' Coerce a confrontation object to data frame
#'
#' Results of confronting data with validation rules or indicators
#' are created by a \code{\link{confront}}ation. The result is an
#' object (inheriting from) \code{confrontation}.
#'
#' @inheritParams as.data.frame
#'
#' @return A \code{data.frame} with columns
#' \itemize{
#' \item{\code{key} Where relevant, and only if \code{key} was specified
#' in the call to \code{\link{confront}}}
#' \item{\code{name} Name of the rule}
#' \item{\code{value} Value after evaluation}
#' \item{\code{expression} evaluated expression}
#' }
#'
#' @example ../examples/as.data.frame.R
#'
#' @export
#' @family confrontation-methods
setMethod("as.data.frame","confrontation", function(x,...){
ierr <- has_error(x)
if (any(ierr)){
warnf("Found %d rules that threw an error. These are omitted from data frame.", sum(ierr))
x <- x[!ierr]
}
v <- values(x, simplify=FALSE, drop=FALSE)
expr <- sapply(x$._calls, call2text)
nam <- names(x$._calls)
key_proto <- lapply(x$._keys$keyset, function(x) x[0])
nrec <- nrow(x$._keys$keyset)
L <- lapply(seq_along(v), function(i){
df <- data.frame(name=nam[i]
, value=v[[i]]
, expression=expr[i]
, row.names=NULL
, stringsAsFactors=FALSE)
if ( nrow(df) == nrow(x$._keys$keyset) ){
cbind(x$._keys$keyset, df)
} else if ( length(key_proto) > 0){
nana <- lapply(key_proto, function(d){
as(rep(NA, length(v[[i]])) , if (inherits(d, "factor")) "character" else class(d))
})
cbind(nana, df)
} else {
df
}
})
out <- do.call(rbind, L)
if ( is.null(out) ){
out <- data.frame( name=character(0)
, value=logical(0)
, expression=character(0))
}
out
})
#getkey <- function(x){
# k <- names(x)
# if (is.null(k)) NA_character_ else k
#}
#' Test if all validations resulted in TRUE
#'
#' @param x \code{validation} object (see \code{confront}).
#' @param ... ignored
#' @param na.rm [\code{logical}] If \code{TRUE}, \code{NA} values
#' are removed before the result is computed.
#' @family validation-methods
#' @export
#'
#' @examples
#' val <- check_that(women, height>60, weight>0)
#' all(val)
setMethod("all","validation",function(x,...,na.rm=FALSE){
res <- values(x, simplify=FALSE, drop=FALSE)
if (length(res) == 0) return(TRUE)
all(sapply(res, all, na.rm=na.rm), na.rm=na.rm)
})
#' Test if any validation resulted in TRUE
#'
#' @param x \code{validation} object (see \code{confront}).
#' @param ... ignored
#' @param na.rm [\code{logical}] If \code{TRUE}, \code{NA} values
#' are removed before the result is computed.
#'
#' @family validation-methods
#' @export
#'
#' @examples
#' val <- check_that(women, height>60, weight>0)
#' any(val)
setMethod("any","validation",function(x,...,na.rm=FALSE){
res <- values(x, simplify=FALSE, drop=FALSE)
if (length(res) == 0) return(FALSE)
any(sapply(res, any, na.rm=na.rm), na.rm=na.rm)
})
|
/scratch/gouwar.j/cran-all/cranData/validate/R/confrontation.R
|
#' @include parse.R
#' @include sugar.R
#' @include rule.R
NULL
#### EXPRESSIONSET OBJECT -----------------------------------------------------
#' Superclass for storing a set of rich expressions.
#'
#' @section Details:
#' This class is aimed at developers of this package or packages depending on
#' it, not at users. It is the parent object of both the \code{\link{validator}}
#' and the \code{\link{indicator}} class.
#'
#'
#' An \code{expressionset} is a reference class storing a list of
#' \code{\link{rule}}s. It contains a number of methods that are not exported
#' and may change or dissapear without notice. We strongly encourage developers
#' to use the exported S4 generics to set or extract variables
#'
#' @section Exported S4 methods for \code{expressionset}:
#' \itemize{
#' \item{\code{\link{variables}}}
#' \item{\code{\link{names}}}
#' \item{\code{\link{length,expressionset-method}}}
#' \item{\code{\link{created}}}
#' \item{\code{\link{origin}}}
#' \item{\code{\link{labels}}}
#' \item{\code{\link{description}}}
#' \item{\code{\link{[,expressionset-method}}}
#' \item{\code{\link{[[,expressionset-method}}}
#' \item{\code{\link{summary,expressionset-method}}}
#' }
#'
#'
#' @section Private S4 methods for \code{expressionset}:
#' \itemize{
#' \item{validating}
#' \item{linear}
#' \item{is_tran_assign}
#' }
#'
#'
#' @section See also:
#' \itemize{
#' \item{\code{\link{rule}}}
#' \item{\code{\link{validator}}}
#' \item{\code{\link{indicator}}}
#' }
#'
#'
#' @keywords internal
expressionset <- setRefClass("expressionset"
, fields = list(
rules = "list"
, ._options = "function"
)
, methods= list(
show = function() .show_expressionset(.self)
, exprs = function(...) .get_exprs(.self,...)
, blocks = function() .blocks_expressionset(.self)
, options = function(...) .self$._options(...)
, clone_options = function(...) settings::clone_and_merge(.self$._options,...)
)
)
#' Service for filling an expressionset from commandline
#'
#' @section Details:
#' This function is aimed at developers importing the package and
#' not at direct users of \pkg{validate}.
#'
#' @param obj An expressionset object (or an object inheriting from expressionset).
#' @param ... Comma-separated list of expressions
#' @param .prefix Prefix to use in default names.
#'
#' @export
#' @rdname validate_extend
#' @keywords internal
.ini_expressionset_cli <- function(obj, ..., .prefix="R"){
L <- as.list(substitute(list(...))[-1])
nm <- extract_names(L, prefix = .prefix)
cr <- Sys.time()
R <- vector(length(L), mode="list")
# note: we cannot set the description or the label when constructing
# from the commandline.
for ( i in seq_along(L) ){
R[[i]] <- rule(
expr = L[[i]]
, name = nm[i]
, origin="command-line"
, created = cr
)
}
obj$rules <- R
}
#' @param obj An expressionset object (or an object inheriting from expressionset).
#' @param dat a data.frame
#'
#' @export
#' @rdname validate_extend
#' @keywords internal
.ini_expressionset_df <- function(obj, dat, .prefix="R"){
n <- nrow(dat)
R <- vector(n, mode="list")
cr = Sys.time()
if ( is.null(dat[["name"]]) ){
npos <- npos(nrow(dat))
fmt <- paste0("%s%",npos,"d")
dat$name <- sprintf(fmt, .prefix, seq_len(nrow(dat)))
}
if (is.null(dat[["description"]])){
dat$description <- ""
}
if (is.null(dat[["label"]])){
dat$label <- ""
}
if (is.null(dat[["origin"]])){
dat$origin <- ""
}
if (is.null(dat[["rule"]])){
stop("No column called 'rule' found")
}
L <- setNames(vector(mode="list", length=nrow(dat)), dat$name)
dat$name <- extract_names(L)
dat$label <- as.character(dat$label)
dat$rule <- as.character(dat$rule)
dat$description <- as.character(dat$description)
for ( i in seq_len(n)){
R[[i]] <- rule(
expr = parse(text=dat$rule[i])[[1]]
, name = dat$name[i]
, origin = dat$origin[i]
, label = dat$label[i]
, description = dat$description[i]
, created = cr
)
}
names(R) <- names(R)
obj$rules <- R
# make names unique (in the identical way as the other creation methods)
names(obj) <- names(obj)
}
#' @param obj An expressionset object (or an object inheriting from expressionset).
#' @param file a filename
#' @param .prefix Prefix to use in default names.
#'
#' @export
#' @rdname validate_extend
#' @keywords internal
.ini_expressionset_yml <- function(obj, file, .prefix="R"){
S <- get_filestack_yml(file)
R <- list()
for ( fl in S )
R <- c(R, rules_from_yrf_file(fl,prefix=.prefix))
obj$rules <- R
# make names unique (in the identical way as the other creation methods)
names(obj) <- names(obj)
obj$._options <- .PKGOPT
# options only from the 'including' file (not from included)
local_opt <- options_from_yml(file)
if ( length(local_opt) > 0 )
do.call(obj$options, local_opt)
}
rules_from_block <- function(block, origin){
# helper functions.
rules_from_freeform <- function(string, origin){
S <- tryCatch(parse(text=string), error = function(e){
stop(sprintf("parsing freeform block. Parser returned:\n %s", e$msg))
})
lapply(S,function(s) rule(expr=s, origin=origin, created=now))
}
rules_from_yrf <- function(block, origin){
rules <- Filter(function(x) !is.null(x$expr), block$rules)
if (length(rules)<length(block$rules)){
warnf("skipped %d rules with empty expressions"
, length(block$rules)-length(rules))
}
lapply(rules, function(x){
rule(
expr = parse(text=x$expr)[[1]]
, name = as.character(x$name)
, label = as.character(x$label)
, description = as.character(x$description)
, origin = origin
, created = now
, meta = as.list(x$meta)
)
})
}
now <- Sys.time()
type <- yrf_block_type(block)
if ( identical(type,"free") ){
rules_from_freeform(block, origin=origin)
} else if (identical(type, "yrf")){
rules_from_yrf(block, origin=origin)
}
}
rules_from_yrf_file <- function(file,prefix="V"){
lines <- .readlines_utf8(file)
blocks <- yaml_blocks(lines)
rules <- unlist(lapply(blocks, rules_from_block, origin=file))
# set generic name if needed.
npos <- max(1,ceiling(log10(length(rules)+1)))
fmt <- paste0("%s%0",npos,"d")
generic <- sprintf(fmt,prefix,seq_along(rules))
for ( i in seq_along(rules) ){
if ( identical(rules[[i]]@name,character(0)) ) {
rules[[i]]@name <- generic[i]
}
}
rules
}
options_from_yml <- function(file){
lines <- .readlines_utf8(file)
.parse_yrf_options(lines)
}
# Get sequence of files to be processed from include statements.
# the filestack is returned reversely depth-first, e.g.
#
# ROOT
# - CHILD1
# - CHILD2
# - CHILD3
# is returnd in the order CHILD1 CHILD3 CHILD2 CHILD1 ROOT
#
get_filestack_yml <- function(file){
f <- function(fl, det=character(0)){
det <- c(fl,det)
if ( fl %in% det[-1])
stop(sprintf("Cyclic dependency detected in %s\n%s\n",fl,paste(rev(det),collapse="\n -> ")))
L <- parse_yrf_include(fl)
for ( x in L )
f(x,det)
filestack <<- c(filestack,fl)
}
filestack <- character(0)
f(file)
filestack
}
#' @param obj an expressionset object
#' @rdname validate_extend
#' @export
#' @keywords internal
.show_expressionset <- function(obj){
nr <- length(obj)
cat(sprintf(
"Object of class '%s' with %s elements:\n",class(obj)[1], nr
))
if (nr == 0) return(invisible(NULL))
nam <- names(obj)
lab <- label(obj)
lab <- paste0(nam,ifelse(nchar(lab)>0,paste0(" [",lab,"]"),lab))
n <- max(nchar(lab))
lab <- paste0(" ",format(lab,width=n),": "
, sapply(obj$exprs(
expand_groups=FALSE
, replace_in = FALSE
, lin_eq_eps=0
, lin_ineq_eps=0), call2text)
)
cat(noquote(paste(lab,collapse="\n")))
cat("\n")
optstr <- "Rules are evaluated using locally defined options\n"
cat(optstr[!identical(obj$._options,.PKGOPT)])
}
# from call to oneliner text
call2text <- function(x){
gsub("[[:blank:]]+"," ",paste(deparse(x),collapse=" "))
}
npos <- function(n) max(1,ceiling(log10(n+1)))
# get names from a list, replacing empty names values with numbers
extract_names <- function(L,prefix="V"){
npos <- npos(length(L))
fmt <- paste0("%s%0",npos,"d")
generic <- sprintf(fmt,prefix,seq_along(L))
given <- names(L)
if (is.null(given)) return(generic)
igen <- given %in% c("", NA)
given[igen] <- generic[igen]
make.names(given, unique=T)
}
#' @rdname validate_extend
#' @param expand_assignments Substitute assignments?
#' @param expand_groups Expand groups?
#' @param vectorize Vectorize if-statements?
#' @param replace_dollar Replace dollar with bracket index?
#' @param dat Optionally, a \code{data.frame} containing the data to which the
#' expressions will be applied. When provided, the only equalities \code{A==B}
#' that will be translated to \code{abs(A-B)<lin.eq.eps} are those where all
#' occurring variables are numeric in \code{dat}.
#' @export
#' @keywords internal
.get_exprs <- function(x, ...
, expand_assignments=FALSE
, expand_groups=TRUE
, vectorize=TRUE
, replace_dollar=TRUE
, replace_in = TRUE
, lin_eq_eps = x$options('lin.eq.eps')
, lin_ineq_eps = x$options('lin.ineq.eps')
, dat=NULL
){
exprs <- setNames(lapply(x$rules, expr ),names(x))
exprs <- set_ref(exprs)
if ( expand_assignments ) exprs <- expand_assignments(exprs)
if ( expand_groups ) exprs <- expand_groups(exprs)
ref <- get_ref(exprs)
if ( vectorize ) exprs <- lapply(exprs, vectorize)
if ( replace_dollar ) exprs <- lapply(exprs, replace_dollar)
if ( replace_in ) exprs <- lapply(exprs, replace_in)
exprs <- lapply(exprs, dat=dat, replace_lin, eps_eq = lin_eq_eps, eps_ineq = lin_ineq_eps)
# if (lin_eq_eps > 0) exprs <- lapply(exprs, replace_linear_restriction, eps=lin_eq_eps, dat=dat, op="==")
# if (lin_ineq_eps > 0) exprs <- lapply(exprs, replace_linear_restriction, eps=lin_ineq_eps, dat=dat, op="<=")
# if (lin_ineq_eps > 0) exprs <- lapply(exprs, replace_linear_restriction, eps=lin_ineq_eps, dat=dat, op=">=")
#
set_ref(exprs, ref)
}
# get or set reference attribute to list of expressions.
set_ref <- function(exprs, ref=seq_along(exprs)){
for (i in seq_along(exprs)) attr(exprs[[i]],"reference") <- ref[i]
exprs
}
get_ref <- function(exprs){
if (length(exprs)==0) return(numeric())
else sapply(exprs, function(d) attr(d,"reference"))
}
#' @rdname validate_extend
#' @param x An expressionset object
#' @export
#' @keywords internal
.blocks_expressionset <- function(x){
# variable x rule matrix
V <- variables(x,as="matrix")
# all connections
M <- V %*% t(V) > 0
# Algorithm: merge overlapping sets.
# B := {}
# A := {a1, a2,...,an}
# while ( A != {} )
# a := some a in A
# for b in B
# if ( a intersects b )
# b := a + b
# A := A - a
# break
# if ( a still in A)
# B := B + a
# A := A - a
B <- list()
A <- lapply(seq_len(nrow(M)),function(i) which(M[i,]))
while( length(A) > 0 ){
nL <- length(A)
a <- A[[1]]
for ( i in seq_along(B) ){
b <- B[[i]]
if ( any(a %in% b) ){
B[[i]] = unique(c(a,b))
A <- A[-1]
break
}
}
if (nL == length(A)){ # a still in L
B[[length(B)+1]] <- a
A <- A[-1]
}
}
B
}
# S4 GENERICS -----------------------------------------------------------------
#' Create a summary
#' @rdname validate-summary
#' @example ../examples/summary.R
setGeneric('summary')
#' Export to yaml file
#'
#' Translate an object to yaml format and write to file.
#'
#' Both \code{\link{validator}} and \code{\link{indicator}} objects can be
#' exported.
#'
#' @param x An R object
#' @param file A file location or connection (passed to \code{base::\link[base]{write}}).
#' @param ... Options passed to \code{yaml::\link[yaml]{as.yaml}}
#'
#'
#' @example ../examples/export_yaml.R
#'
#' @export
setGeneric("export_yaml",function(x,file,...) standardGeneric("export_yaml"))
#' @rdname export_yaml
#' @export
setGeneric("as_yaml", function(x,...) standardGeneric("as_yaml"))
# S4 IMPLEMENTATIONS ----------------------------------------------------------
#' @describeIn variables Variables occuring in \code{x} either as a single list, or per rule.
#' @param as how to return variables:
#' \itemize{
#' \item{\code{'vector'}} Return the uniqe vector of variables occurring in \code{x}.
#' \item{\code{'matrix'}} Return a boolean matrix, each row representing a rule, each column representing a variable.
#' \item{\code{'list'}} Return a named \code{list}, each entry containing a character vector with variable names.
#' }
#' @param dummy Also retrieve transient variables set with the \code{:=} operator.
#'
#' @family expressionset-methods
#' @example ../examples/variables.R
setMethod("variables", "expressionset", function(x, as=c('vector','matrix','list'), dummy=FALSE, ...){
as <- match.arg(as)
vars <- lapply(x$exprs(replace_dollar=FALSE, expand_assignments=!dummy),var_from_call)
u <- unique(unlist(vars))
switch(as
, 'vector' = u
, 'list' = vars
, 'matrix' = {
a <- array(FALSE,dim=c(length(vars),length(u)),dimnames=list(rule=names(vars),variable=u) )
for (i in seq_along(vars)) a[i,vars[[i]]] <- TRUE
a
})
})
#' @rdname voptions
#' @family expressionset-methods
setMethod('voptions','expressionset',function(x=NULL,...){
if (settings::is_setting(...)){
x$._options <- clone_and_merge(x$._options,...)
} else {
x$._options(...)
}
})
#' @rdname voptions
#' @family expressionset-methods
setMethod('reset','expressionset',function(x=NULL){
settings::reset(x$._options)
})
#' @rdname origin
#' @family expressionset-methods
setMethod("origin", "expressionset", function(x,...) sapply(x$rules,origin))
#' @rdname label
#' @family expressionset-methods
setMethod("label","expressionset",function(x,...) unlist(sapply(x$rules, label)))
#' @rdname description
#' @family expressionset-methods
setMethod("description", "expressionset", function(x,...) unlist(sapply(x$rules, description)))
#' @rdname meta
#' @param simplify Gather all metadata into a dataframe?
#' @family expressionset-methods
setMethod("meta","expressionset", function(x, simplify=TRUE,...){
L <- lapply(x$rules, function(r){
list(name=r@name
, label = label(r)
, description=description(r)
, origin = origin(r)
, created = created(r)
, meta = meta(r)
)
})
if (!simplify){
L
} else {
K <- lapply(L, function(m){
c(m[1:5], m[[6]])
})
cols <- Reduce(union, lapply(K,names))
U <- matrix(NA,nrow=length(K),ncol=length(cols),dimnames=list(NULL,cols))
U <- as.data.frame(U)
for (i in seq_along(K)){
k <- K[[i]]
U[i,names(k)] <- k
}
U$created <- .POSIXct(U$created)
U
}
})
#' @rdname created
#' @family expressionset-methods
setMethod("created", "expressionset", function(x,...){
# obj. of class POSIXct; sapply strips the POSIXct class attribute
cr <- rep(Sys.time(),length(x))
for ( i in seq_along(x)){
cr[i] <- created(x$rules[[i]])
}
cr
})
#' Extract or set names
#'
#' @param x An R object
#'
#' @return A \code{character} vector
#' @rdname names
#' @family expressionset-methods
#' @export
#' @example ../examples/properties.R
setMethod("names","expressionset",function(x){
sapply(x$rules, function(rule) rule@name)
})
# recycle x over y
recycle <- function(x,y){
m <- length(x)
n <- length(y)
remainder <- n %% m
times <- n %/% m
if (remainder > 0){
warning(gettext("longer object length is not a multiple of shorter object length"))
times <- times + 1
}
rep(x,times=times)[seq_len(n)]
}
#' @rdname meta
setReplaceMethod("meta",c("expressionset","character"),function(x,name,value){
values <- rep(value, times = (length(x) %/% length(value)+1))[seq_along(x)]
for ( i in seq_along(x$rules)){
rule <- x[[i]]
meta(rule, name) <- values[i]
x$rules[[i]] <- rule
}
x
})
#'
#'
#' When setting names, \code{value}s are recycled and made unique with
#' \code{\link{make.names}}
#'
#' @rdname names
#' @param value Value to set
#' @example ../examples/properties.R
#' @export
setReplaceMethod("names",c("expressionset","character"),function(x,value){
value <- make.names(recycle(value,x),unique=TRUE)
for ( i in seq_len(length(x))){
names(x$rules[[i]]) <- value[i]
}
x
})
#' @rdname origin
setReplaceMethod("origin",c("expressionset","character"), function(x,value){
value <- recycle(value, x)
for ( i in seq_len(length(x))){
origin(x$rules[[i]]) <- value[i]
}
x
})
#' @rdname label
#' @export
setReplaceMethod("label",c("expressionset","character"),function(x,value){
value <- recycle(value,x)
for ( i in seq_len(length(x))){
label(x$rules[[i]]) <- value[i]
}
x
})
#' @rdname description
#' @export
setReplaceMethod("description",c("expressionset","character"),function(x,value){
value <- recycle(value,x)
for ( i in seq_len(length(x))){
description(x$rules[[i]]) <- value[i]
}
x
})
#' @rdname created
#' @export
setReplaceMethod("created",c("expressionset","POSIXct"),function(x,value){
value <- recycle(value, x)
for ( i in seq_len(length(x))){
created(x$rules[[i]]) <- value[i]
}
x
})
setMethod("validating", "expressionset", function(x,...){
if (length(x) == 0) return(logical(0))
sapply(x$rules, validating)
})
setMethod("linear","expressionset", function(x,...){
if(length(x)==0) return(logical(0))
sapply(x$rules, linear)
})
#' @section Validator and indicator objects:
#' For these objects, the ruleset is split into subsets (blocks) that are disjunct in the
#' sense that they do not share any variables. For each block the number of variables,
#' the number of rules and the number of rules that are linear are reported.
#'
#' @return A \code{data.frame} with the information mentioned below is returned.
#'
#' @rdname validate-summary
#' @seealso \code{\link{plot,validator-method}}
#' @family expressionset-methods
setMethod('summary',signature('expressionset'),function(object,...){
b <- object$blocks()
data.frame(
block = seq_along(b)
, nvar = sapply(b,function(i) length(variables(object[i])))
, rules = sapply(b,length)
, linear = sapply(b,function(i) sum(object[i]$is_linear()))
, row.names=NULL
)
})
#' Determine the number of elements in an object.
#'
#' @param x An R object
#' @rdname length
#' @aliases length,expressionset-method
#' @export
setMethod("length","expressionset",function(x) length(x$rules))
#' Select a subset
#'
#' @section Details:
#' The \code{options} attribute will be cloned
#'
#' @param x An R object
#' @param i an index (numeric, boolean, character)
#' @param j not implemented
#' @param drop not implemented
#' @param ... Arguments to be passed to other methods
#'
#' @return An new object, of the same class as \code{x} subsetted according to \code{i}.
#' @rdname select
#' @aliases [,expressionset-method
#'
#' @export
#' @keywords internal
setMethod("[",signature("expressionset"), function(x,i,j,...,drop=TRUE){
if (missing(i)){
i <- seq_len(length(x))
} else if (is.character(i)){
i <- match(i,names(x))
}
out <- new(class(x))
out$rules <- x$rules[i]
out$._options = clone_and_merge(x$._options)
out
})
#' Replace a rule in a ruleseta
#'
#' @param x an R object
#' @param i index of length 1
#' @param value object of class \code{\link{rule}}
#' @export
#' @keywords internal
setMethod("[[<-",signature("expressionset"),function(x,i,value){
stopifnot(inherits(value,"rule"))
stopifnot(length(i)==1)
x$rules[[i]] <- value
x
})
#' Replace a subset of an expressionset with another expressionset
#'
#' @param x an R object inheriting from \code{expressionset}
#' @param i a \code{logical}, \code{character}, or \code{numeric} index
#' @param value an R object of the same class as \code{x}
#' @export
#' @keywords internal
setMethod("[<-",signature("expressionset"),function(x,i,value){
stopifnot(inherits(value,class(x)))
if (is.character(i)){
i <- match(i,names(x),nomatch=0)
}
x$rules[i] <- value$rules
x
})
#' @param exact Not implemented
#' @rdname select
#' @aliases [[,expressionset-method
#' @keywords internal
setMethod("[[",signature("expressionset"), function(x,i,j,...,exact=TRUE){
if ( is.character(i) ){
i <- which(i %in% names(x))
# workaround so default 'str' doesnt crash (see comments in issue #82)
if (length(i)==0) return(NULL)
}
x$rules[[i]]
})
setMethod("is_tran_assign","expressionset",function(x,...){
if (length(x)==0) return(logical(0))
sapply(x$rules,is_tran_assign)
})
#' @rdname export_yaml
setMethod("export_yaml","expressionset", function(x, file,...){
write(x = as_yaml(x,...), file=file)
})
#' @rdname export_yaml
setMethod("as_yaml","expressionset",function(x,...){
option_string <- ""
if (!identical(x$._options,.PKGOPT)){ # export options when set.
option_string <- paste0("---\n",yaml::as.yaml(list(options=x$options()),...),"---\n")
}
rule_string <- yaml::as.yaml(rapply(expressionset_to_list(x), f=function(y) paste0("",y),how="replace"),...)
paste0(option_string,rule_string)
})
expressionset_to_list <- function(x, expr_as_text=TRUE, ...){
list(
rules = lapply(x$rules, rule_to_list, expr_as_text = expr_as_text, ...)
)
}
#' Coerce to \code{data.frame}
#'
#' @param x Object to coerce
#' @param ... arguments passed to other methods
#' @param optional ignored
#' @param row.names ignored
#'
#' @export
#' @family expressionset-methods
#' @keywords internal
setGeneric("as.data.frame")
#' Translate an expressionset to data.frame
#'
#' Expressions are deparsed and combined in a \code{data.frame} with (some
#' of) their metadata. Observe that some information may be lost (e.g. options
#' local to the object).
#'
#'
#' @inheritParams as.data.frame
#' @param expand_assignments Toggle substitution of `:=` assignments.
#'
#'
#' @return A \code{data.frame} with elements \code{rule}, \code{name},
#' \code{label}, \code{origin}, \code{description}, and \code{created}.
#' @export
#' @family expressionset-methods
setMethod("as.data.frame","expressionset", function(x, expand_assignments=TRUE, ...){
rules <- x$exprs(expand_assignments=expand_assignments,...)
i_ref <- if (expand_assignments) sapply(rules, function(d) attr(d,"reference"))
else seq_along(rules)
rules <- sapply(rules, call2text)
dat <- cbind(meta(x,simplify=TRUE)[i_ref,,drop=FALSE],rule=rules)
# expanding assignments may add numbering to expressions
dat$name <- names(rules)
dat
})
|
/scratch/gouwar.j/cran-all/cranData/validate/R/expressionset.R
|
# factory function. Evaluate expressions, catch errors and warnings silently (per option).
factory <- function(fun,opts){
switch(opts('raise')
, 'none' = function(...) { # both errors and warnings are caught
warn <- err <- NULL
res <- withCallingHandlers(
tryCatch(outcheck(fun)(...), error=function(e) {
err <<- conditionMessage(e)
NULL
}), warning=function(w) {
warn <<- append(warn, conditionMessage(w))
invokeRestart("muffleWarning")
})
list(res, warn=warn, err=err)
}
, 'errors' = function(...) { # warnings are caught; errors are raised.
warn <- err <- NULL
res <- withCallingHandlers( outcheck(fun)(...)
, warning=function(w) {
warn <<- append(warn, conditionMessage(w))
#invokeRestart("muffleWarning")
})
list(res, warn=warn, err=err)
}
, 'all' = function(...){
warn <- err <- NULL
res <- outcheck(fun)(...) # errors and warnings are raised.
list(res,warn=warn,err=err)
}
)
}
outcheck <- function(fun){
function(...){
out <- fun(...)
if (!(is.numeric(out) | is.logical(out))){
warning("Expression did not evaluate to numeric or logical, returning NULL"
, call.=FALSE)
return(NULL)
} else {
return(out)
}
}
}
Id <- function(x) x
num_result <- function(x) if (is.list(x)) length(x$result) else length(x)
get_result <- function(x) if (is.list(x)) x$result else x
|
/scratch/gouwar.j/cran-all/cranData/validate/R/factory.R
|
#' Check whether a variable represents a linear sequence
#'
#' A variable \eqn{X = (x_1, x_2,\ldots, x_n)} (\eqn{n\geq 0}) represents a
#' \emph{linear sequence} when \eqn{x_{j+1} - x_j} is constant for all
#' \eqn{j\geq 1}. That is, elements in the series are equidistant and without
#' gaps.
#'
#' @details
#'
#' Presence of a missing value (\code{NA}) in \code{x} will result in \code{NA},
#' except when \code{length(x) <= 2} and \code{start} and \code{end} are
#' \code{NULL}. Any sequence of length \eqn{\leq 2} is a linear sequence.
#'
#'
#'
#' @param x An R vector.
#' @param sort \code{[logical]}. When set to \code{TRUE}, \code{x}
#' is sorted within each group before testing.
#' @param begin Optionally, a value that should equal \code{min(x)}
#' @param end Optionally, a value that should equal \code{max(x)}
#' @param by bare (unquoted) variable name or a list of unquoted variable names,
#' used to split \code{x} into groups. The check is executed for each group.
#' @param ... Arguments passed to other methods.
#'
#' @return For \code{is_linear_sequence}: a single \code{TRUE} or \code{FALSE},
#' equal to \code{all(in_linear_sequence)}.
#'
#' @examples
#'
#' is_linear_sequence(1:5) # TRUE
#' is_linear_sequence(c(1,3,5,4,2)) # FALSE
#' is_linear_sequence(c(1,3,5,4,2), sort=TRUE) # TRUE
#' is_linear_sequence(NA_integer_) # TRUE
#' is_linear_sequence(NA_integer_, begin=4) # FALSE
#' is_linear_sequence(c(1, NA, 3)) # FALSE
#'
#'
#' d <- data.frame(
#' number = c(pi, exp(1), 7)
#' , date = as.Date(c("2015-12-17","2015-12-19","2015-12-21"))
#' , time = as.POSIXct(c("2015-12-17","2015-12-19","2015-12-20"))
#' )
#'
#' rules <- validator(
#' is_linear_sequence(number) # fails
#' , is_linear_sequence(date) # passes
#' , is_linear_sequence(time) # fails
#' )
#' summary(confront(d,rules))
#'
#' ## check groupwise data
#' dat <- data.frame(
#' time = c(2012, 2013, 2012, 2013, 2015)
#' , type = c("hi", "hi", "ha", "ha", "ha")
#' )
#' rule <- validator(in_linear_sequence(time, by=type))
#' values(confront(dat, rule)) ## 2xT, 3xF
#'
#'
#' rule <- validator(in_linear_sequence(time, type))
#' values( confront(dat, rule) )
#'
#' @family cross-record-helpers
#'
#' @export
is_linear_sequence <- function(x, by=NULL,...) UseMethod("is_linear_sequence")
# workhorse function
is_lin_num_seq <- function(x, begin=NULL, end=NULL, sort=TRUE, tol=1e-8,...){
# Edge cases: empty sequence, or length 1 sequence with missing value. In
# those cases, return FALSE when any of begin or end is checked, otherwise
# return TRUE
if ( length(x) <= 2 && all(is.na(x)) )
return(is.null(begin) && is.null(end))
if (anyNA(x)) return(NA)
# the regular case
!anyNA(x) &&
(is.null(begin) || abs(begin - min(x)) <= tol) &&
(is.null(end) || abs(end - max(x)) <= tol) &&
( length(x) <= 1 || { if(sort) x <- sort(x)
d <- diff(x)
all(abs(d - d[1]) <= tol)
})
}
as_int <- function(x){
if( is.null(x)) NULL else as.integer(x)
}
as_num <- function(x){
if (is.null(x)) NULL else as.numeric(x)
}
all_lin_num_seq <- function(x, by=NULL, begin=NULL, end=NULL, sort=TRUE, tol=1e-8){
if (length(by) == 0){
is_lin_num_seq(x, begin=begin, end=end, sort=sort, tol=tol)
} else {
all(tapply(x, INDEX=by, FUN=is_lin_num_seq, begin=begin, end=end, sort=sort, tol=tol))
}
}
#' @rdname is_linear_sequence
#' @param tol numerical tolerance for gaps.
#' @export
is_linear_sequence.numeric <- function(x, by=NULL, begin=NULL, end=NULL, sort=TRUE, tol = 1e-8,...){
all_lin_num_seq(x, by=by, begin=begin, end=end, sort=sort, tol=1e-8)
}
#' @rdname is_linear_sequence
#' @export
is_linear_sequence.Date <- function(x, by=NULL, begin=NULL, end=NULL, sort=TRUE,...){
all_lin_num_seq(as.integer(x), by=by, begin=as_int(begin), end=as_int(end), sort=sort, tol=0)
}
#' @rdname is_linear_sequence
#' @export
is_linear_sequence.POSIXct <- function(x, by=NULL , begin=NULL, end=NULL, sort = TRUE, tol=1e-6,...){
# Note. POSIXct can express fractions of a second. Conversion from and to POSIXlt
# is better than microseconds, so that is what we use as default tolerance/
all_lin_num_seq(as.numeric(x), by=by, begin=as_num(begin), end=as_num(end), sort=sort, tol=tol)
}
#' @rdname is_linear_sequence
#'
#' @param format \code{[character]}. How to interpret \code{x} as a time period.
#' Either \code{"auto"} for automatic detection or a specification passed to
#' \code{\link{strptime}}. Automatically detected periods are of the form year:
#' \code{"2020"}, yearMmonth: \code{"2020M01"}, yearQquarter: \code{"2020Q3"},
#' or year-Qquarter: \code{"2020-Q3"}.
#'
#' @export
is_linear_sequence.character <- function(x, by=NULL, begin=NULL, end=NULL, sort=TRUE, format="auto",...){
if ( format == "auto" ){
y <- period_to_int(x, by=by)
begin <- period_to_int(begin)
end <- period_to_int(end)
is_linear_sequence.numeric(y, by=by, begin=begin, end=end, sort=sort, tol=0,...)
} else {
y <- strptime(x, format=format)
begin <- strptime(begin, format=format)
end <- strptime(end, format=format)
is_linear_sequence.POSIXct(y, by=by, begin=begin, end=end, sort=sort, tol=1e-6,...)
}
}
#' @rdname is_linear_sequence
#'
#' @return For \code{in_linear_sequence}: a \code{logical} vector with the same length as \code{x}.
#' @export
in_linear_sequence <- function(x, ...) UseMethod("in_linear_sequence")
in_lin_num_seq <- function(x, by=NULL, begin=NULL, end=NULL, sort=TRUE, tol=1e8,...){
rep(is_lin_num_seq(x, begin=begin, end=end, sort=sort, tol=tol), length(x))
}
## TODO: postpone conversion to integer to inside the split-apply-combine loop.
#' @rdname is_linear_sequence
#' @export
in_linear_sequence.character <- function(x, by=NULL, begin=NULL, end=NULL, sort=TRUE, format="auto",...){
if ( format == "auto" ){
y <- period_to_int(x,by=by)
begin <- period_to_int(begin)
end <- period_to_int(end)
in_linear_sequence.numeric(y, by=by, begin=begin, end=end, sort=sort, tol=0,...)
} else {
y <- strptime(x, format=format)
begin <- strptime(begin, format=format)
end <- strptime(end, format=format)
in_linear_sequence.POSIXct(y, by=by, begin=begin, end=end, sort=sort, tol=1e-6,...)
}
}
#' @rdname is_linear_sequence
#' @export
in_linear_sequence.numeric <- function(x, by=NULL, begin=NULL, end=NULL, sort=TRUE, tol=1e-8,...){
if (is.null(by)){
in_lin_num_seq(as.integer(x), begin=as_int(begin), end=as_int(end), sort=sort, tol=tol)
} else {
result <- tapply(as.integer(x), by, in_lin_num_seq, begin=as_int(begin), end=as_int(end), sort=sort, tol=tol)
unsplit(result, by)
}
}
#' @rdname is_linear_sequence
#' @export
in_linear_sequence.Date <- function(x, by=NULL, begin=NULL, end=NULL, sort=TRUE,...){
in_linear_sequence.numeric(as.integer(x), by=by, begin=as_int(begin), end=as_int(end), sort=TRUE, tol=0)
}
#' @rdname is_linear_sequence
#' @export
in_linear_sequence.POSIXct <- function(x, by=NULL, begin=NULL, end=NULL, sort=TRUE, tol=1e-6,...){
in_linear_sequence.numeric(as.numeric(x), by=by, begin=as_num(begin), end=as_num(end), sort=sort, tol=0)
}
period_type <- function(x, undefined=NA_character_){
if ( all( grepl("^[12][0-9]{3}$",x) ) ) return("annual")
if ( all( grepl("^[12][0-9]{3}-?Q[1-4]$",x) ) ) return("quarterly")
if ( all( grepl("^[12][0-9]{3}M[01][0-9]$",x) ) ) return("monthly")
warning("Cannot detect period notation: undefined period type or different period types in single column.", call.=FALSE)
undefined
}
# Turn a period into an integer
#
# Annual periods are turned in to the integer year. Quarterly
# and Monthly periods are turned in to the month number, counted
# from the year zero, so quarters and months have consecutive numbers
# accross years.
#
# @param x a \code{character} vector.
# @param by \code{character} split x into groups before coercion
#
#
#
#
period_to_int <- function(x, by=NULL){
if (is.null(x)) return(NULL)
f <- function(xx){
from <- period_type(xx)
if (is.na(from)) return(rep(NA, length(xx)))
if (from == "annual"){
res <- as.numeric(xx)
}
if (from == "quarterly" ){
L <- strsplit(xx,"-?Q")
year <- as.numeric(sapply(L, `[[`,1))
quarter <- as.numeric(sapply(L, `[[`, 2))
res <- 4*year + quarter-1
}
if ( from == "monthly" ){
L <- strsplit(xx, "M")
year <- as.numeric( sapply(L,`[[`,1) )
month <- as.numeric( sapply(L, `[[`, 2) )
res <- 12*year + month-1 == 1
}
res
}
if (is.null(by)) by <- character(length(x))
unsplit(lapply(split(x, f=by), f), f=by)
}
#' Check variable range
#'
#' Test wether a variable falls within a range.
#'
#' @param x A bare (unquoted) variable name.
#' @param min lower bound
#' @param max upper bound
#' @param ... arguments passed to other methods
#'
#'
#' @examples
#'
#' d <- data.frame(
#' number = c(3,-2,6)
#' , time = as.Date(c("2018-02-01", "2018-03-01", "2018-04-01"))
#' , period = c("2020Q1", "2021Q2", "2020Q3")
#' )
#'
#' rules <- validator(
#' in_range(number, min=-2, max=7, strict=TRUE)
#' , in_range(time, min=as.Date("2017-01-01"), max=as.Date("2018-12-31"))
#' , in_range(period, min="2020Q1", max="2020Q4")
#' )
#'
#' result <- confront(d, rules)
#' values(result)
#'
#'
#' @export
in_range <- function(x, min, max,...) UseMethod("in_range")
#' @rdname in_range
#' @param strict \code{[logical]} Toggle between including the range boundaries
#' (default) or not including them (when strict=TRUE).
#'
#' @export
in_range.default <- function(x, min, max, strict=FALSE, ...){
if (strict) x > min & x < max
else x >= min & x <= max
}
#' @rdname in_range
#'
#' @param format \code{[character]} of \code{NULL}. If \code{format=NULL} the
#' character vector is interpreted as is. And the whether a character lies
#' within a character range is determined by the collation order set by the
#' current locale. See the details of "\code{\link{<}}". If \code{format} is
#' not \code{NULL}, it specifies how to interpret the character vector as a
#' time period. It can take the value \code{"auto"} for automatic detection or
#' a specification passed to \code{\link{strptime}}. Automatically detected
#' periods are of the form year: \code{"2020"}, yearMmonth: \code{"2020M01"},
#' yearQquarter: \code{"2020Q3"}, or year-Qquarter: \code{"2020-Q3"}.
#'
#'
#' @export
in_range.character <- function(x, min, max, strict=FALSE, format = "auto",...){
if (is.null(format))
in_range.default(x=x, min=min, max=max, strict=strict, ...)
else if ( format == "auto" ){
y <- period_to_int(x, by=NULL)
ymin <- period_to_int(min)
ymax <- period_to_int(max)
in_range.default(y, min=ymin, max=ymax, strict=strict, ...)
} else {
y <- strptime(x, format=format)
ymin <- strptime(min, format=format)
ymax <- strptime(max, format=format)
in_range(y, min=ymin, max=ymax, strict=strict, ...)
}
}
#' Test whether details combine to a chosen aggregate
#'
#' Data in 'long' format often contain records representing totals
#' (or other aggregates) as well as records that contain details
#' that add up to the total. This function facilitates checking the
#' part-whole relation in such cases.
#'
#'
#' @param values A bare (unquoted) variable name holding the values to aggregate
#' @param labels A bare (unquoted) variable name holding the labels indicating
#' whether a value is an aggregate or a detail.
#' @param whole \code{[character]} literal label or pattern recognizing a whole
#' in \code{labels}. Use \code{\link{glob}} or \code{\link{rx}} to label
#' as a globbing or regular expression pattern (see examples).
#' @param part \code{[character]} vector of label values or pattern recognizing
#' a part in \code{labels}. Use \code{\link{glob}} or \code{\link{rx}}
#' to label as a globbing or regular expression pattern. When labeled
#' with \code{glob} or \code{rx}, it must be a single string. If `part` is
#' left unspecified, all values not recognized as an aggregate are
#' interpreted as details that must be aggregated to the whole.
#' @param aggregator \code{[function]} used to aggregate subsets of \code{x}. It should
#' accept a \code{numeric} vector and return a single number.
#' @param tol \code{[numeric]} tolerance for equality checking
#' @param by Name of variable, or \code{list} of bare variable names, used to
#' split the values and labels before computing the aggregates.
#' @param ... Extra arguments passed to aggregator (for example \code{na.rm=TRUE}).
#'
#'
#' @return A \code{logical} vector of size \code{length(value)}.
#'
#' @examples
#' df <- data.frame(
#' id = 10011:10020
#' , period = rep(c("2018Q1", "2018Q2", "2018Q3", "2018Q4","2018"),2)
#' , direction = c(rep("import",5), rep("export", 5))
#' , value = c(1,2,3,4,10, 3,3,3,3,13)
#' )
#' ## use 'rx' to interpret 'whole' as a regular expression.
#' rules <- validator(
#' part_whole_relation(value, period, whole=rx("^\\d{4}$")
#' , by=direction)
#' )
#'
#' out <- confront(df, rules, key="id")
#' as.data.frame(out)
#' @export
part_whole_relation <- function(values, labels, whole, part = NULL
, aggregator = sum, tol=1e-8, by = NULL, ...){
df <- data.frame(values=values, labels=labels)
f <- function(d, ...){
i_aggregate <- igrepl(whole, d$labels)
aggregate <- d$values[i_aggregate]
if (length(aggregate)>1){
stop(
sprintf("Multiple labels matching aggregate: %s. Expecting one"
, paste(aggregate,collapse=", "))
, call.=FALSE
)
}
i_details <- if (is.null(part)) !i_aggregate
else igrepl(part, d$labels)
details <- d$values[i_details]
out <- if (length(aggregate)==0){
FALSE
} else {
abs(aggregator(details, ...) - aggregate) < tol
}
values <- !logical(length(d$labels))
values[i_details | i_aggregate] <- out
values
}
if (is.null(by)){
return( f(df, ...) )
} else {
unsplit(lapply(split(df, by), f, ...),by)
}
}
#' Label objects for interpretation as pattern
#'
#' Label objects (typically strings or data frames containing keys combinations)
#' to be interpreted as regular expression or globbing pattern.
#'
#'
#' @param x Object to label as regular expression (\code{rx(x)}) or globbing
#' (\code{glob(x)}) pattern.
#'
#'
#'
#' @export
rx <- function(x){
structure(x, class=c("regex",class(x)))
}
#' @rdname rx
#' @export
glob <- function(x){
structure(x, class=c("glob",class(x)))
}
igrepl <- function(pattern, x,...){
if (inherits(pattern, "glob")){
Reduce(`|`, lapply(utils::glob2rx(pattern), grepl,x,...))
} else if (inherits(pattern, "regex",...)){
Reduce(`|`, lapply(pattern, grepl, x, ...))
} else {
x %in% pattern
}
}
#' split-apply-combine for vectors, with equal-length outptu
#'
#' Group \code{x} by one or more categorical variables, compute
#' an aggregate, repeat that aggregate to match the size of the
#' group, and combine results. The functions \code{sum_by} and
#' so on are convenience wrappers that call \code{do_by} internally.
#'
#' @param x A bare variable name
#' @param by a bare variable name, or a list of bare variable names, used to
#' split \code{x} into groups.
#' @param fun \code{[function]} A function that aggregates \code{x} to a single value.
#' @param ... passed as extra arguments to \code{fun} (e.g. \code{na.rm=TRUE}
#' @param na.rm Toggle ignoring \code{NA}
#'
#' @examples
#' x <- 1:10
#' y <- rep(letters[1:2], 5)
#' do_by(x, by=y, fun=max)
#' do_by(x, by=y, fun=sum)
#'
#' @family cross-record-helpers
#' @export
do_by <- function(x, by, fun, ...){
unsplit( lapply(split(x,by), function(d) rep(fun(d,...), length(d))),by)
}
#' @rdname do_by
#' @export
sum_by <- function(x, by, na.rm=FALSE) do_by(x,by,sum, na.rm=na.rm)
#' @rdname do_by
#' @export
mean_by <- function(x, by, na.rm=FALSE) do_by(x,by,mean, na.rm=na.rm)
#' @rdname do_by
#' @export
min_by <- function(x, by, na.rm=FALSE) do_by(x,by,min, na.rm=na.rm)
#' @rdname do_by
#' @export
max_by <- function(x, by, na.rm=FALSE) do_by(x,by,max, na.rm=na.rm)
#' Hiridoglu-Berthelot function
#'
#' A function to measure `outlierness' for skew distributed data with long
#' right tails. The method works by measuring deviation from a reference
#' value, by default the median. Deviation from above is measured as the
#' ratio between observed and refence values. Deviation from below is
#' measured as the inverse: the ratio between reference value and
#' observed values.
#'
#' @param x \code{[numeric]}
#' @param ref \code{[function]} or \code{[numeric]}
#' @param ... arguments passed to \code{ref} after \code{x}
#'
#' @return \eqn{\max\{x/ref(x), ref(x)/x\}-1} if \code{ref} is a function,
#' otherwise \eqn{\max\{x/ref, ref/x\}-1}
#'
#' @references
#' Hidiroglou, M. A., & Berthelot, J. M. (1986). Statistical editing and
#' imputation for periodic business surveys. Survey methodology, 12(1), 73-83.
#'
#' @export
#'
#' @family cross-record-helpers
#' @examples
#' x <- seq(1,20,by=0.1)
#' plot(x,hb(x), 'l')
#'
hb <- function(x, ref=stats::median,...){
refval <- if(is.numeric(ref)) ref else ref(x,...)
pmax(x/refval, refval/x) -1
}
#' Check number of code points
#'
#' A convenience function testing for field length.
#'
#'
#' @param x Bare (unquoted) name of a variable.
#' Otherwise a vector of class \code{character}. Coerced to character as
#' necessary.
#' @param n Number of code points required.
#' @param min Mimimum number of code points
#' @param max Maximum number of code points
#' @param ... passed to \code{nchar} (for example \code{type="width"})
#' @section Details:
#'
#' The number of code points (string length) may depend on current locale
#' settings or encoding issues, including those caused by inconsistent choices
#' of \code{UTF} normalization.
#'
#' @return A \code{[logical]} of size \code{length(x)}.
#'
#' @examples
#'
#' df <- data.frame(id = 11001:11003, year = c("2018","2019","2020"), value = 1:3)
#' rule <- validator(field_length(year, 4), field_length(id, 5))
#' out <- confront(df, rule)
#' as.data.frame(out)
#'
#' @family format-checkers
#'
#' @export
field_length <- function(x, n=NULL, min=NULL, max=NULL,...){
len <- nchar(as.character(x),...)
if (!is.null(n) & is.null(min) & is.null(max)){
len == n
} else if (!is.null(min) & !is.null(max) & is.null(n) ){
len >= min & len <= max
} else {
stop("Ill-specified check: either n, or min and max must be not-NULL")
}
}
#' Check whether a field conforms to a regular expression
#'
#' A convenience wrapper around \code{grepl} to make rule sets more readable.
#'
#' @param x Bare (unquoted) name of a variable.
#' Otherwise a vector of class \code{character}. Coerced to character as
#' necessary.
#' @param pattern \code{[character]} a regular expression
#' @param type \code{[character]} How to interpret \code{pattern}. In globbing,
#' the asterisk (`*`) is used as a wildcard that stands for 'zero or more
#' characters'.
#' @param ... passed to grepl
#'
#' @family format-checkers
#' @export
field_format <- function(x, pattern, type=c("glob","regex"), ...){
type <- match.arg(type)
if (type == "glob") pattern <- utils::glob2rx(pattern)
grepl(pattern, x=as.character(x),...)
}
#' Check the layouts of numbers.
#'
#' Convenience function to check layout of numbers stored as
#' a character vector.
#'
#'
#' @param x \code{[character]} vector. If \code{x} is not of type
#' \code{character} it will be converted.
#' @param format \code{[character]} denoting the number format (see below).
#' @param min_dig \code{[numeric]} minimal number of digits after decimal separator.
#' @param max_dig \code{[numeric]} maximum number of digits after decimal separator.
#' @param dec \code{[character]} decimal seperator.
#'
#'
#' @details
#' If \code{format} is specified, then \code{min_dig}, \code{max_dig} and \code{dec}
#' are ignored.
#'
#' Numerical formats can be specified as a sequence of characters. There are a few
#' special characters:
#' \itemize{
#' \item{\code{d}} Stands for digit.
#' \item{\code{*}} (digit globbing) zero or more digits
#' }
#'
#' Here are some examples.
#' \tabular{ll}{
#' \code{"d.dd"} \tab One digit, a decimal point followed by two digits.\cr
#' \code{"d.ddddddddEdd"}\tab Scientific notation with eight digits behind the decimal point.\cr
#' \code{"0.ddddddddEdd"}\tab Same, but starting with a zero.\cr
#' \code{"d,dd*"} \tab one digit before the comma and at least two behind it.\cr
#' }
#'
#'
#' @examples
#' df <- data.frame(number = c("12.34","0.23E55","0.98765E12"))
#' rules <- validator(
#' number_format(number, format="dd.dd")
#' , number_format(number, "0.ddEdd")
#' , number_format(number, "0.*Edd")
#' )
#'
#' out <- confront(df, rules)
#' values(out)
#'
#' # a few examples, without 'validator'
#' number_format("12.345", min_dig=2) # TRUE
#' number_format("12.345", min_dig=4) # FALSE
#' number_format("12.345", max_dig=2) # FALSE
#' number_format("12.345", max_dig=5) # TRUE
#' number_format("12,345", min_dig=2, max_dig=3, dec=",") # TRUE
#'
#' @family format-checkers
#' @export
number_format <- function(x, format=NULL, min_dig=NULL, max_dig=NULL, dec="."){
if ( !is.null(format) ){
rx <- utils::glob2rx(format, trim.tail=FALSE)
rx <- gsub("d", "\\d", rx, fixed=TRUE)
rx <- gsub(".*", "\\d*", rx, fixed=TRUE)
return( grepl(rx, as.character(x)) )
}
rx <- if (dec == ".") "^.*\\." else sprintf("^.*\\%s",dec)
decimal_digits <- sub(rx, "", x)
min_dig <- if (is.null(min_dig)) "0" else as.character(min_dig)
max_dig <- if (is.null(max_dig)) "" else as.character(max_dig)
rx <- sprintf("^\\d{%s,%s}$",min_dig,max_dig)
grepl(rx,decimal_digits)
}
#' Check records using a predifined table of (im)possible values
#'
#' Given a set of keys or key combinations, check whether all thos combinations
#' occur, or check that they do not occur. Supports globbing and regular
#' expressions.
#'
#'
#' @param keys A data frame or bare (unquoted) name of a data
#' frame passed as a reference to \code{confront} (see examples).
#' The column names of \code{keys} must also occurr in the columns
#' of the data under scrutiny.
#' @param by A bare (unquoted) variable or list of variable names that occur in
#' the data under scrutiny. The data will be split into groups according to
#' these variables and the check is performed on each group.
#' @param allow_duplicates \code{[logical]} toggle whether key combinations can occur
#' more than once.
#'
#' @details
#'
#' \tabular{ll}{
#' \code{contains_exactly} \tab dataset contains exactly the key set, no more, no less. \cr
#' \code{contains_at_least}\tab dataset contains at least the given keys. \cr
#' \code{contains_at_most} \tab all keys in the data set are contained the given keys. \cr
#' \code{does_not_contain} \tab The keys are interpreted as forbidden key combinations. \cr
#' }
#'
#'
#' @section Globbing:
#' Globbing is a simple method of defining string patterns where the asterisks
#' (\code{*}) is used a wildcard. For example, the globbing pattern
#' \code{"abc*"} stands for any string starting with \code{"abc"}.
#'
#'
#' @return
#' For \code{contains_exactly}, \code{contains_at_least}, and
#' \code{contains_at_most} a \code{logical} vector with one entry for each
#' record in the dataset. Any group not conforming to the test keys will have
#' \code{FALSE} assigned to each record in the group (see examples).
#'
#' @family cross-record-helpers
#' @family key-checkers
#'
#' @examples
#'
#' ## Check that data is present for all quarters in 2018-2019
#' dat <- data.frame(
#' year = rep(c("2018","2019"),each=4)
#' , quarter = rep(sprintf("Q%d",1:4), 2)
#' , value = sample(20:50,8)
#' )
#'
#' # Method 1: creating a data frame in-place (only for simple cases)
#' rule <- validator(contains_exactly(
#' expand.grid(year=c("2018","2019"), quarter=c("Q1","Q2","Q3","Q4"))
#' )
#' )
#' out <- confront(dat, rule)
#' values(out)
#'
#' # Method 2: pass the keyset to 'confront', and reference it in the rule.
#' # this scales to larger key sets but it needs a 'contract' between the
#' # rule definition and how 'confront' is called.
#'
#' keyset <- expand.grid(year=c("2018","2019"), quarter=c("Q1","Q2","Q3","Q4"))
#' rule <- validator(contains_exactly(all_keys))
#' out <- confront(dat, rule, ref=list(all_keys = keyset))
#' values(out)
#'
#' ## Globbing (use * as a wildcard)
#'
#' # transaction data
#' transactions <- data.frame(
#' sender = c("S21", "X34", "S45","Z22")
#' , receiver = c("FG0", "FG2", "DF1","KK2")
#' , value = sample(70:100,4)
#' )
#'
#' # forbidden combinations: if the sender starts with "S",
#' # the receiver can not start "FG"
#' forbidden <- data.frame(sender="S*",receiver = "FG*")
#'
#' rule <- validator(does_not_contain(glob(forbidden_keys)))
#' out <- confront(transactions, rule, ref=list(forbidden_keys=forbidden))
#' values(out)
#'
#'
#' ## Quick interactive testing
#' # use 'with':
#' with(transactions, does_not_contain(forbidden))
#'
#'
#'
#' ## Grouping
#'
#' # data in 'long' format
#' dat <- expand.grid(
#' year = c("2018","2019")
#' , quarter = c("Q1","Q2","Q3","Q4")
#' , variable = c("import","export")
#' )
#' dat$value <- sample(50:100,nrow(dat))
#'
#'
#' periods <- expand.grid(
#' year = c("2018","2019")
#' , quarter = c("Q1","Q2","Q3","Q4")
#' )
#'
#' rule <- validator(contains_exactly(all_periods, by=variable))
#'
#' out <- confront(dat, rule, ref=list(all_periods=periods))
#' values(out)
#'
#' # remove one export record
#'
#' dat1 <- dat[-15,]
#' out1 <- confront(dat1, rule, ref=list(all_periods=periods))
#' values(out1)
#' values(out1)
#'
#' @export
contains_exactly <- function(keys, by=NULL, allow_duplicates=FALSE){
given_keys <- do.call(paste, keys)
L <- list()
for ( keyname in names(keys) ) L[[keyname]] <- dynGet(keyname)
found_keys <- do.call(paste, L)
if (is.null(by)) by <- character(length(found_keys))
unsplit(lapply(split(found_keys, f=by), function(fk){
out <- all(fk %in% given_keys) && all(given_keys %in% fk)
if (!allow_duplicates) out <- out && !any(duplicated(fk))
rep(out, length(fk))
}), by)
}
#' @rdname contains_exactly
#' @export
contains_at_least <- function(keys, by=NULL){
L <- list()
for ( keyname in names(keys) ) L[[keyname]] <- dynGet(keyname)
given_keys <- do.call(paste, keys)
found_keys <- do.call(paste, L)
if (is.null(by)) by <- character(length(found_keys))
unsplit(lapply(split(found_keys, f=by), function(fk){
rep(all(given_keys %in% fk), length(fk))
}), by)
}
#' @rdname contains_exactly
#' @return
#' For \code{contains_at_least}: a \code{logical} vector equal to the number of
#' records under scrutiny. It is \code{FALSE} where key combinations do not match
#' any value in \code{keys}.
#' @export
contains_at_most <- function(keys, by=NULL){
L <- list()
for ( keyname in names(keys) ) L[[keyname]] <- dynGet(keyname)
contains(L, keys, by=by)
}
#' @rdname contains_exactly
#'
#'
#' @return
#' For \code{does_not_contain}: a \code{logical} vector with size equal to the
#' number of records under scrutiny. It is \code{FALSE} where key combinations
#' do not match any value in \code{keys}.
#' @export
does_not_contain <- function(keys){
L <- list()
for ( keyname in names(keys) ) L[[keyname]] <- dynGet(keyname)
!contains(L, keys, by=NULL)
}
# for each 'x' see if it matches any regular expression in 'pattern'
rxin <- function(x, pattern){
A <- sapply(pattern, grepl, x=x)
if (!is.array(A)) A <- matrix(A,ncol=length(pattern))
apply(A, 1, any)
}
# for each 'x' see if it matches any globbing pattern in 'pattern'
glin <- function(x, pattern){
pattern <- utils::glob2rx(pattern)
rxin(x, pattern)
}
get_keytype <- function(keys){
out <- grep("^(regex)|(glob)$", class(keys), value=TRUE)
if (length(out) < 1) out <- "fixed"
out
}
contains <- function(dat, keys, by){
keytype <- get_keytype(keys)
if (isTRUE(keytype=="regex") && length(keys) > 1){
# some preparations before pasting
for (keyname in names(keys)[-1]){
key <- keys[[keyname]]
keys[[keyname]] <- ifelse( substr(key,1,1) == "^"
, sub("^\\^", "", keys[[keyname]])
, paste0(".*", key) )
}
for (keyname in names(keys)[-length(keys)]){
key <- keys[[keyname]]
keys[[keyname]] <- ifelse( substr(key, nchar(key), nchar(key)) == "$"
, sub("\\$$", "", key)
, paste0(key, ".*"))
}
}
# note: globbing patterns may be pasted before transformation
# to regex.
given_keys <- do.call(paste, keys)
found_keys <- do.call(paste, dat)
if (is.null(by)) by <- character(length(found_keys))
unsplit(lapply(split(found_keys, f=by), function(fk){
switch(keytype
, "fixed" = fk %in% given_keys
, "glob" = glin(fk, given_keys)
, "regex" = rxin(fk, given_keys)
)
}), by)
}
#' Check aggregates defined by a hierarchical code list
#'
#' Check all aggregates defined by a code hierarchy.
#'
#'
#' @param values bare (unquoted) name of a variable that holds values that
#' must aggregate according to the \code{hierarchy}.
#' @param labels bare (unquoted) name of variable holding a grouping variable (a code
#' from a hierarchical code list)
#' @param hierarchy \code{[data.frame]} defining a hierarchical code list. The
#' first column must contain (child) codes, and the second column contains their
#' corresponding parents.
#' @param by A bare (unquoted) variable or list of variable names that occur in
#' the data under scrutiny. The data will be split into groups according
#' to these variables and the check is performed on each group.
#' @param na_value \code{[logical]} or \code{NA}. Value assigned to values that
#' do not occurr in checks.
#' @param aggregator \code{[function]} that aggregates children to their parents.
#' @param tol \code{[numeric]} tolerance for equality checking
#' @param ... arguments passed to \code{aggregator} (e.g. \code{na.rm=TRUE}).
#'
#'
#' @return A \code{logical} vector with the size of \code{length(values)}. Every
#' element involved in an aggregation error is labeled \code{FALSE} (aggregate
#' plus aggregated elements). Elements that are involved in correct
#' aggregations are set to \code{TRUE}, elements that are not involved in
#' any check get the value \code{na_value} (by default: \code{TRUE}).
#'
#'
#'
#' @family cross-record-helpers
#' @export
#' @examples
#' # We check some data against the built-in NACE revision 2 classification.
#' data(nace_rev2)
#' head(nace_rev2[1:4]) # columns 3 and 4 contain the child-parent relations.
#'
#' d <- data.frame(
#' nace = c("01","01.1","01.11","01.12", "01.2")
#' , volume = c(100 ,70 , 30 ,40 , 25 )
#' )
#' # It is possible to perform checks interactively
#' d$nacecheck <- hierarchy(d$volume, labels = d$nace, hierarchy=nace_rev2[3:4])
#' # we have that "01.1" == "01.11" + "01.12", but not "01" == "01.1" + "01.2"
#' print(d)
#'
#' # Usage as a valiation rule is as follows
#' rules <- validator(hierarchy(volume, labels = nace, hierarchy=validate::nace_rev_2[3:4]))
#' confront(d, rules)
#'
#' # you can also pass a hierarchy as a reference, for example.
#'
#' rules <- validator(hierarchy(volume, labels = nace, hierarchy=ref$nacecodes))
#' out <- confront(d, rules, ref=list(nacecodes=nace_rev2[3:4]))
#' summary(out)
#'
#' # set a output to NA when a code does not occur in the code list.
#' d <- data.frame(
#' nace = c("01","01.1","01.11","01.12", "01.2", "foo")
#' , volume = c(100 ,70 , 30 ,40 , 25 , 60)
#' )
#'
#' d$nacecheck <- hierarchy(d$volume, labels = d$nace, hierarchy=nace_rev2[3:4]
#' , na_value = NA)
#' # we have that "01.1" == "01.11" + "01.12", but not "01" == "01.1" + "01.2"
#' print(d)
#'
hierarchy <- function(values, labels, hierarchy, by=NULL, tol=1e-8, na_value=TRUE, aggregator = sum, ...){
if (is.null(by)) by <- character(length(values))
dat <- cbind(data.frame(values=values, labels=labels), by)
unsplit(lapply(split(dat, f=by)
, check_hagg, h=hierarchy, na_value = na_value, tol=tol, fun=aggregator,...)
, f=by)
}
check_hagg <- function(dat, h, na_value, tol, fun,...){
parents <- unique(h[,2])
keytype <- get_keytype(h)
out <- rep(na_value, nrow(dat))
for (parent in parents){
J <- dat$labels %in% parent
children <- h[,1][h[,2] == parent]
I <- switch(keytype
, "glob" = glin(dat$labels, children)
, "regex" = rxin(dat$labels, children)
, dat$labels %in% children)
# found 'parent' too often, so we can't check aggregate
if (sum(J) > 1){
grp <- paste0("(",paste(t(dat[1,-(1:2)]), collapse=", "),")")
msg <- "Parent '%s' occurs more than once (%d times) in group %s"
warning(sprintf(msg, parent, sum(J), grp), call.=FALSE)
out[I|J] <- FALSE
next
}
if (!any(J) && !any(I)) next
if (!any(J) && any(I)) out[I] <- FALSE # no parent but children present
if ( any(J) && !any(I)) out[J] <- FALSE # no children but parent present
ii <- I|J
test <- abs(dat$values[J] - fun(dat$value[I],...)) <= tol
if (any(J) && any(I)){
# equivalent, but slower statement:
# if ( any(J) && any(I)) out[ii] <- ifelse(is.na(out[ii]), test, out[ii] & test)
out[ii] <- (is.na(out[ii]) & test) | (!is.na(out[ii]) & out[ii] & test)
}
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/validate/R/genericrules.R
|
#' @include expressionset.R
NULL
# The 'indicator' class holds indicator definitions
#' Define indicators for data
#'
#' An indicator maps a data frame, or each record in a data frame to a number.
#' The purpose of this class is to store and apply expressions that define
#' indicators.
#'
#'
#' @param ... A comma-separated list of indicator definitions
#' @param .file (optional) A character vector of file locations
#'
#' @seealso \code{\link{syntax}}, \code{\link{add_indicators}}
#'
#' @export
#' @keywords internal
#' @example ../examples/indicator.R
indicator <- function(..., .file, .data) new('indicator',..., .file=.file, .data=.data)
#### INDICATOR CLASS ----------------------------------------------------------
#' Store a set of rich indicator expressions
#'
#' \bold{This feature is currently experimental and may change in future versions}
#'
#' @section Details:
#' An indicator stores a set of indicators. It is a child class of \code{\link{expressionset}} and
#' can be constructed with \code{\link{indicator}}.
#'
#' @section Exported S4 methods for \code{validator}:
#' \itemize{
#' \item{Methods inherited from \code{\link{expressionset}}}
#' \item{\code{\link{confront}}}
#' \item{\code{\link{compare}}}
#' }
#'
#'
#' @section See also:
#' \itemize{
#' \item{\code{\link{expressionset}}}
#' }
#'
#' @keywords internal
#'
setRefClass("indicator", contains='expressionset',
methods = list(
initialize = function(..., .file, .data) ini_indicator(.self
, ..., .file = .file, .data=.data)
)
)
ini_indicator <- function(obj, ..., .file, .data){
if (missing(.file) && missing(.data)){
.ini_expressionset_cli(obj, ..., .prefix="I")
obj$._options <- .PKGOPT
} else if (!missing(.file)) {
.ini_expressionset_yml(obj, .file, .prefix="I")
} else if (!missing(.data)){
.ini_expressionset_df(obj, dat=.data, .prefix="I")
obj$._options <- .PKGOPT
}
}
#' Combine two indicator objects
#'
#' Combine two \code{\link{indicator}} objects by addition. A new \code{indicator}
#' object is created with default (global) option values. Previously set options
#' are ignored.
#'
#' @param e1 a \code{\link{validator}}
#' @param e2 a \code{\link{validator}}
#'
#'
#' @family indicator-methods
#' @examples
#' indicator(mean(x)) + indicator(x/median(x))
#' @export
setMethod("+", c("indicator","indicator"), function(e1, e2){
ii <- indicator()
ii$rules <- c(e1$rules, e2$rules)
names(ii) <- make.names(names(ii),unique=TRUE)
ii
})
#' Add indicator values as columns to a data frame
#'
#' Compute and add externally defined indicators to data frame.
#' If necessary, values are recycled over records.
#'
#' @param dat \code{[data.frame]}
#' @param x \code{[indicator]} or \code{[indication]} object. See examples.
#'
#' @return \code{dat} with extra columns defined by \code{x} attached.
#'
#' @examples
#' ii <- indicator(
#' hihi = 2*sqrt(height)
#' , haha = log10(weight)
#' , lulz = mean(height)
#' , wo0t = median(weight)
#' )
#'
#' # note: mean and median are repeated
#' add_indicators(women, ii)
#'
#' # compute indicators first, then add
#' out <- confront(women, ii)
#' add_indicators(women, out)
#'
#' @family indicators
#'
#' @export
add_indicators <- function(dat, x){
if (inherits(x,"indicator")) x <- confront(dat, x)
vals <- values(x, simplify=FALSE)
n <- nrow(dat)
L <- lapply(vals, function(d){
if (length(d) == n){
d
} else if (length(d)==1) {
rep(d,n)
} else {
warnf("Skipping output that does not fit in data frame")
NULL
}
})
cbind(dat, do.call("cbind", L))
}
|
/scratch/gouwar.j/cran-all/cranData/validate/R/indicator.R
|
#' Logging object to use with the lumberjack package
#'
#' @section Details:
#'
#' This obeject can used with the function composition ('pipe') operator of the
#' \code{\link[lumberjack]{lumberjack}} package. The logging is based on
#' validate's \code{\link{cells}} function. The output is written to a
#' \code{csv} file wich contains the following columns.
#' \tabular{lll}{
#' \code{step} \tab\code{integer} \tab Step number \cr
#' \code{time} \tab\code{POSIXct} \tab Timestamp \cr
#' \code{expr} \tab\code{character}\tab Expression used on data \cr
#' \code{cells} \tab\code{integer} \tab Total nr of cells in dataset\cr
#' \code{available} \tab\code{integer} \tab Nr of non-NA cells\cr
#' \code{missing} \tab\code{integer} \tab Nr of empty (NA) cells\cr
#' \code{still_available}\tab\code{integer} \tab Nr of cells still available after expr\cr
#' \code{unadapted} \tab\code{integer} \tab Nr of cells still available and unaltered\cr
#' \code{unadapted} \tab\code{integer} \tab Nr of cells still available and altered\cr
#' \code{imputed} \tab\code{integer} \tab Nr of cells not missing anymore\cr
#' }
#'
#'
#' @section Note:
#' This logger is suited only for operations that do not change the dimensions
#' of the dataset.
#'
#' @docType class
#' @format A reference class object
#' @family loggers
#'
#' @export lbj_cells
#' @exportClass lbj_cells
lbj_cells <- setRefClass("lbj_cells"
, fields = list(
cells = "array"
, n = "numeric"
, t = "POSIXct"
, expr = "character"
, verbose = "logical"
, label = "character"
)
, methods = list(
initialize = function(..., verbose=TRUE, label=""){
"Create object. Optionally toggle verbosity."
.self$n <- 0
.self$t <- .POSIXct(numeric(0))
.self$verbose = verbose
.self$label = label
}
, add = function(meta, input, output){
"Add logging info based on in- and output"
if (!identical(dim(input),dim(output))){
warnf("dimensions changed, not logging %s",meta$src)
return()
}
cl <- cells(from = input, to = output)
tm <- as.POSIXct(Sys.time())
if ( .self$n == 0 ){
.self$cells <- cl[,1,drop=FALSE]
.self$t <- c(.self$t,tm)
.self$expr <- ""
}
.self$n <- .self$n+1
.self$t <- c(.self$t, tm)
.self$expr <- c(.self$expr, meta$src)
.self$cells <- cbind(.self$cells, cl[,2,drop=FALSE])
}
, dump = function(file=NULL,verbose=TRUE,...){
"Dump logging info to csv file.
All arguments in '...' except row.names are passed to 'write.csv'"
out <- .self$log_data()
outf <- if( !is.null(file) ) file
else if (.self$label == "" ) "lbj_cells.csv"
else paste0(.self$label, "_lbj_cells.csv")
write.csv(out, file=outf, row.names=FALSE,...)
.self$fmsg("Dumped a log at %s", normalizePath(outf))
}
, log_data = function(){
"Return logged data as a data.frame"
out <- data.frame(
step = if(.self$n > 0 ) 0:.self$n else integer(0)
, time = .self$t
, expression = .self$expr
)
cl <- t(.self$cells)
row.names(cl) <- NULL
cbind(out, cl)
}
, show = function(){
"Print method"
cat("Logging object of class lbj_cells with the following logging info\n")
print(.self$log_data())
}
, fmsg = function(fmt,...){
if (.self$verbose){
message(sprintf(fmt,...))
}
}
)
)
#' Logging object to use with the lumberjack package
#'
#' @family loggers
#' @export lbj_rules
#' @exportClass lbj_rules
lbj_rules <- setRefClass("lbj_rules",
fields = list(
compare = "array"
, rules = "validator"
, n = "numeric"
, t = "POSIXct"
, expr = "character"
, verbose = "logical"
, label = "character"
)
, methods = list(
initialize = function(rules, verbose=TRUE, label=""){
"Create object. Optionally toggle verbosity."
.self$n <- 0
.self$t <- .POSIXct(numeric(0))
.self$verbose <- verbose
.self$rules <- rules$copy()
.self$label <- label
v <- validator(x>0)
}
, add = function(meta, input, output){
if (!identical(dim(input),dim(output))){
warnf("dimensions changed, not logging %s",meta$src)
return()
}
tm <- as.POSIXct(Sys.time())
comp <- cbind(compare(.self$rules, input, output))
if ( .self$n == 0 ){
.self$compare <- comp[,1,drop=FALSE]
.self$t <- c(.self$t,tm)
.self$expr <- ""
}
.self$n <- .self$n+1
.self$t <- c(.self$t, tm)
.self$expr <- c(.self$expr, meta$src)
.self$compare <- cbind(.self$compare, comp[,2,drop=FALSE])
}
, dump = function(file=NULL,...){
"Dump logging info to csv file.
All arguments in '...' except row.names are passed to 'write.csv'"
out <- .self$log_data()
outf <- if( !is.null(file) ) file
else if (.self$label == "" ) "lbj_rules.csv"
else paste0(.self$label, "_lbj_rules.csv")
write.csv(out, file=outf, row.names=FALSE,...)
.self$fmsg("Dumped a log at %s", normalizePath(outf))
}
, log_data = function(){
"Return logged data as a data.frame"
out <- data.frame(
step = if(.self$n > 0 ) 0:.self$n else integer(0)
, time = .self$t
, expression = .self$expr
)
cm <- t(.self$compare)
row.names(cm) <- NULL
cbind(out, cm)
}
, show = function(){
"Print method"
cat("Logging object of class lbj_rules with the following logging info\n")
print(.self$log_data())
}
, plot = function(){
"plot rule comparisons"
pl <- getMethod("plot","validatorComparison")
log <- .self$log_data()
cmp <- t(log[-(1:3)])
class(cmp) <- c("validatorComparison", "array")
x <- gsub("\\(.*","",log$expression)
colnames(cmp) <- x
pl(cmp)
}
, fmsg = function(fmt,...){
if (.self$verbose){
message(sprintf(fmt,...))
}
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/validate/R/lumberjack.R
|
#' @include validate_pkg.R
NULL
# File parsing and functions computing on the language
#' Services for extending 'validate'
#'
#' Functions exported silently to allow for cross-package inheritance
#' of the \code{\link{expressionset}} object. These functions are never
#' needed in scripts or statistical production code.
#'
#' @rdname validate_extend
#' @param .__defaults toggle default options
#' @param .__reset togle reset options
#' @export
#' @keywords internal
.PKGOPT <- settings::options_manager(
# all: warnings and errors are raised. 'errors': raise errors. 'none': warnings and errors are caught.
raise = 'none'
, lin.eq.eps = 1e-8
, lin.ineq.eps= 1e-8
, na.value = NA
, sequential = TRUE # option for the 'dcmodify' package
, na.condition = FALSE # option for the 'dcmodify' package
)
#' Set or get options globally or per object.
#'
#'
#' There are three ways to specify options for this package.
#' \itemize{
#' \item{Globally. Setting \code{voptions(option1=value1,option2=value2,...)}
#' sets global options.}
#' \item{Per object. Setting \code{voptions(x=<object>, option1=value1,...)},
#' causes all relevant functions that use that object (e.g.
#' \code{\link{confront}}) to use those local settings.}
#' \item{At execution time. Relevant functions (e.g. \code{\link{confront}}) take
#' optional arguments allowing one
#' to define options to be used during the current function call}
#' }
#'
#'
#' @section Options for the validate package:
#' Currently the following options are supported.
#'
#' \itemize{
#' \item{\code{na.value} (\code{NA},\code{TRUE},\code{FALSE}; \code{NA}) Value
#' to return when a validating statement results in \code{NA}.}
#' \item{\code{raise} (\code{"none"},\code{"error"},\code{"all"};
#' \code{"none"}) Control if the \code{\link{confront}} methods catch or raise
#' exceptions.
#' The 'all' setting is useful when debugging validation scripts.}
#' \item{\code{lin.eq.eps} ('numeric'; 1e-8) The precision used when evaluating
#' linear equalities. To be used to control for machine rounding.}
#' \item{\code{"reset"} Reset to factory settings.}
#' }
#'
#'
#' @return When requesting option settings: a \code{list}. When setting options,
#' the whole options list is returned silently.
#'
#' @param x (optional) an object inheriting from \code{expressionset} such as \code{\link{validator}} or \code{\link{indicator}}.
#' @param ... Name of an option (character) to retrieve options or \code{option = value} pairs to set options.
#'
#'
#' @export
#' @examples
#'
#' # set an option, local to a validator object:
#' v <- validator(x + y > z)
#' voptions(v,raise='all')
#' # check that local option was set:
#' voptions(v,'raise')
#' # check that global options have not changed:
#' voptions('raise')
setGeneric('voptions',def = function(x=NULL,...) standardGeneric('voptions'))
#' @rdname voptions
setMethod('voptions','ANY',function(x=NULL,...){
do.call(.PKGOPT,c(x,list(...)))
})
#' @rdname voptions
#' @export
validate_options <- function(...){
.Deprecated(new="voptions")
voptions(...)
}
#' @rdname voptions
#' @export
setGeneric('reset',def=function(x=NULL) standardGeneric('reset'))
#' @rdname voptions
setMethod('reset','ANY',function(x=NULL){
settings::reset(.PKGOPT)
})
# get variables from call.
# x : a call object
# output:
# - character vector with variable names
# - NULL : call contains no variables (e.g. 1 == 1)
var_from_call <- function(x){
vars <- all.vars(x)
# Statement containing only literals
if ( identical(vars, character(0)) ){
return(NULL)
} else {
vars
}
}
# find a symbol in a call. Returns a list of multi-indices.
# occurrences of variable names in a function signature are skipped.
which.call <- function(x, what, I=1, e=as.environment(list(n=0))){
# is.symbol filters constants such as NA
if ( is.symbol(x) && x == what ){
e[[paste0('x',e$n)]] <- I
e$n <- e$n + 1
}
if ( is.call(x) ){
for (i in seq_along(x)) which.call(x[[i]],what,c(I,i),e)
}
L <- lapply(as.list(e),function(x) if ( length(x) == 1 ) x else x[-1])
L$n <- NULL
L
}
negate <- function(x){
if (is.call(x)){
ne <- switch(as.character(x[[1]])
, "!" = x[[2]]
, "(" = negate(x[[2]])
, "==" = bquote(.(x[[2]]) != .(x[[3]]))
, "!=" = bquote(.(x[[2]]) == .(x[[3]]))
, ">=" = bquote(.(x[[2]]) < .(x[[3]]))
, ">" = bquote(.(x[[2]]) <= .(x[[3]]))
, "<=" = bquote(.(x[[2]]) > .(x[[3]]))
, "<" = bquote(.(x[[2]]) >= .(x[[3]]))
, "&" = substitute(P | Q, list(P=negate(x[[2]]), Q=negate(x[[3]])))
, "|" = substitute(P & Q, list(P=negate(x[[2]]), Q=negate(x[[3]])))
, bquote(!.(x))
)
return(ne)
}
bquote(!.(x))
}
replace_lin <- function(x, dat, eps_eq=0.1, eps_ineq = 0.01){
if (!is.call(x)){
x
} else if (linear_call(x) && all_numeric(x, dat)){
op <- as.character(x[[1]])
if (op == "==" && eps_eq > 0){
bquote(abs(.(x[[2]]) - .(x[[3]])) <= .(eps_eq))
} else if (eps_ineq > 0){
switch( op
, ">=" = bquote(.(x[[2]]) - .(x[[3]]) >= -.(eps_ineq))
#, ">" = bquote(.(x[[2]]) - .(x[[3]]) > -.(eps_ineq))
, "<=" = bquote(.(x[[2]]) - .(x[[3]]) <= .(eps_ineq))
#, "<" = bquote(.(x[[2]]) - .(x[[3]]) < .(eps_ineq))
, x
)
} else {
x
}
} else if (x[[1]] == "!"){
negate(replace_lin(x[[2]], dat = dat, eps_eq = eps_eq, eps_ineq = eps_ineq))
} else if (x[[1]] == "!="){
negate(replace_lin( bquote(.(x[[2]]) == .(x[[3]]))
, dat = dat
, eps_eq = eps_eq
, eps_ineq = eps_ineq
)
)
} else {
x[-1] <- lapply(x[-1], replace_lin, dat= dat, eps_eq = eps_eq, eps_ineq=eps_ineq)
x
}
}
#
# replace_linear_restriction <- function(x,eps,dat, op="=="){
# repl <- function(x,eps,op){
# # by replacing nodes in the call tree
# # we need not concern about brackets
# if (x[[1]] != op ) return(x)
# m <- quote(e1-e2)
# a <- switch(op
# , "==" = quote(abs(x))
# , "<=" = quote(x)
# , ">=" = quote(x)
# )
# lt <- switch(op
# , "==" = quote(e1 < e2)
# , "<=" = quote(e1 <= e2)
# , ">=" = quote(e1 >= e2)
# )
# if (op == ">=") eps <- -eps
# m[[2]] <- left(x)
# m[[3]] <- right(x)
# a[[2]] <- m
# lt[[2]] <- a
# lt[[3]] <- eps
# lt
# }
#
# if (length(x) == 3 && linear_call(x) && all_numeric(x,dat)){
# return(repl(x,eps,op))
# } else if (length(x) > 1) {
# for ( i in 2:length(x) ){
# x[[i]] <- replace_linear_restriction(x[[i]],eps,dat)
# }
# }
# x
# }
all_numeric <- function(x,dat){
if (is.null(dat)) return(TRUE)
vr <- var_from_call(x)
vr <- vr[vr %in% variables(dat)]
all(sapply(vr,function(u) is.numeric(dat[[u]])))
}
# e <- expression(if (x + y == 3) z>0)[[1]]
# e <- expression(aap / noot > z)[[1]]
# replace_linear_equality(e,1e-8)
# replace occurences x$y --> x[,'y']
replace_dollar <- function(x){
L <- which.call(x,'$')
for ( I in L ){
if (length(I)==1){
x <- parse(text=paste0(left(x),'[[ "',deparse(right(x)),'" ]]'))[[1]]
} else {
I <- I[-length(I)]
p <- paste0(left(x[[I]]),'[[ "',deparse(right(x[[I]])),'" ]]')
x[[I]] <- parse(text=p)[[1]]
}
}
x
}
# replace occurrences of 'x %in% y' with match(x,y,nomatch=NA,incomparables=NA)
replace_in <- function(x){
L <- which.call(x,"%in%")
for ( k in L ){
m <- expression(e1 %vin% e2)[[1]]
if (length(k) == 1){
m[[2]] <- left(x)
m[[3]] <- right(x)
x <- m
} else {
i <- k[-length(k)]
e <- x[[i]]
m[[2]] <- left(e)
m[[3]] <- right(e)
x[[i]] <- m
}
}
x
}
# test if a call defines a variable group
defines_var_group <- function(x){
length(x) == 3 && x[[1]] == ':=' && is.name(x[[2]]) && x[[3]][[1]] == 'var_group'
}
# functions to vectorize validation calls ----
not <- function(x){
f <- expression(!(dummy))[[1]]
f[[2]][[2]] <- x
f
}
`%or%` <- function(x,y){
f <- expression((A)|(B))[[1]]
f[[2]][[2]] <- x
f[[3]][[2]] <- y
f
}
# replace_if <- function(x){
# f <- expression(!(P) | (Q) )[[1]]
# f[[c(2,2,2)]] <- x[[2]]
# f[[c(3,2)]] <- x[[3]]
# f
# }
# logical implication if (P) Q
# P -> Q ==> !(P) | (Q)
#
# if-then-else (length 4)
# if (P) Q else R
# (P -> Q) & (!P -> R)
#
replace_if <- function(x){
if ( length(x) == 3 ){
f <- expression(!(P) | (Q) )[[1]]
f[[c(2,2,2)]] <- x[[2]]
f[[c(3,2)]] <- x[[3]]
f
} else { # length (x) == 4 (there is an 'else')
f <- expression( (!(P) | (Q)) & ((P) | (R)) )[[1]]
P <- which.call(f,"P")
f[[P[[1]]]] <- x[[2]]
f[[P[[2]]]] <- x[[2]]
Q <- which.call(f,"Q")
f[[Q[[1]]]] <- x[[3]]
R <- which.call(f,"R")
f[[R[[1]]]] <- x[[4]]
f
}
}
vectorize <- function(x){
# we are at an end, or we enter a function, which we will not
# modify.
if ( length(x) == 1 || x[[1]] == "function") return(x)
for ( i in seq_along(x) ){
if ( is.symbol(x[[i]]) && x[[i]] == "if" ){
return(vectorize(replace_if(x)))
} else {
x[[i]] <- vectorize(x[[i]])
}
}
x
}
# x: a validation call
#vectorize <- function(x) if ( x[[1]] == 'if' ) not(x[[2]]) %or% x[[3]] else x
# Determine wether a call object represents a linear operation. ----
# the 'length' conditions ensure that for unary operators, the postfix argument is treated as 'right'
node <- function(x) if ( is.call(x) ) x[[1]] else NULL
left <- function(x) if ( is.call(x) && length(x)>2) x[[2]] else NULL
right <- function(x) if ( is.call(x) ) x[[min(length(x),3)]] else NULL
linear_call <- function(x){
if (is.character(x)) return(FALSE)
if (is.logical(x)) return(FALSE)
if ( is.null(node(x)) ) return(TRUE)
n <- deparse(node(x))
if ( !n %in% c("+","-","*","<","<=","==",">=",">" ) ) return(FALSE)
if ( n == "*" && !( is.numeric(left(x)) || is.numeric(right(x)) ) ) return(FALSE)
linear_call(left(x)) & linear_call(right(x))
}
validating_call <- function(cl){
pure <- c("<", "<=", "==", "!=", ">=", ">", "%in%", "%vin%", "identical", "~" ,"%->%"
, "grepl" , "is_unique", "all_unique", "is_complete", "all_complete"
, "exists_any", "exists_one", "is_linear_sequence","in_linear_sequence"
, "part_whole_relation","hierarchy"
, "field_length", "number_format", "field_format"
, "contains_exactly", "contains_at_least", "contains_at_most", "does_not_contain"
, "in_range", "var_group")
unary <- c("!", "(", "all", "any" )
binary <- c("|","||","&","&&","if","xor")
variables_ok <- !is.null(var_from_call(cl)) || # validating call must have variables
grepl("contain", deparse(cl[[1]])) # or be one of the 'contains' functions
# push-button semantic changer. See issue #41
assume_logical <- FALSE
vc <- function(x){
if (is.symbol(x)) return(assume_logical)
node <- deparse(x[[1]])
if ( node %in% pure || grepl("^is\\.",node) ) return(TRUE)
if ( node %in% unary && vc(x[[2]])) return(TRUE)
if ( node %in% binary && vc(x[[2]]) && vc(x[[3]]) ) return(TRUE)
FALSE
}
variables_ok && vc(cl)
}
# coefficients for normalized linear expressions (constant after the comparison operator)
nodesign <- c('+' = 1, '-' = -1)
operatorsign <- c('<'= 1, '<=' = 1, '==' = 1, '>=' = -1, '>' = -1)
normed_operators <- c('<' = '<', '<=' = '<=', '==' = '==', '>=' = '<=', '>' = '<')
addcoef <- function(x,value,env) assign(x,mget(x,envir=env,ifnotfound=0)[[1]]+value,env)
# coefficients of an expression of the form sum_i a_i*x_i (so no comparison operators)
coefficients <- function(x, sign=1, coef=new.env()){
# added constant
if ( is.numeric(x) ) addcoef('CONSTANT',sign*x,coef)
# end node without explicit coefficient.
if ( is.name(x) ) addcoef(deparse(x),sign,coef)
# wer're at a leaf
if ( is.null(node(x)) ){
addcoef('CONSTANT',0,coef)
return(unlist(as.list(coef)))
}
n <- deparse(node(x))
if (n %in% c("+","-") ){
sign <- nodesign[n][[1]] # the extra [[1]] gets rid of the 'name' attribute.
}
if ( n == '*' ){
val <- if ( is.numeric(left(x)) ) left(x) else right(x)
var <- if ( is.name(left(x)) ) left(x) else right(x)
addcoef(deparse(var), sign*val, coef)
} else {
coefficients(left(x),1,coef)
coefficients(right(x),sign,coef)
}
addcoef('CONSTANT',0,coef)
return(unlist(as.list(coef)))
}
|
/scratch/gouwar.j/cran-all/cranData/validate/R/parse.R
|
#' @include confrontation.R
NULL
# helper to make possiby large numbers more readable.
# x: nonnegative integer.
humanize <- function(x){
out <- character(length(x))
i <- x < 1000
out[i] <- sprintf("%3.0f",x[i])
i <- x>= 1000 & x < 1e5
out[i] <- sprintf("%2.1fk", x[i]/1000)
i <- x >= 1e5 & x < 1e6
out[i] <- sprintf("%3.0fk", x[i]/1000)
i <- x >= 1e6 & x < 1e8
out[i] <- sprintf("%2.1fM", x[i]/1e6)
i <- x > 1e8 & x < 1e9
out[i] <- sprintf("%3.0fM", x[i]/1e6)
i <- x >= 1e9 & x < 1e11
out[i] <- sprintf("%2.1fMM", x[i]/1e9)
i <- x > 1e11 & x < 1e12
out[i] <- sprintf("%3.0fMM", x[i]/1e9)
i <- x >= 1e12
out[i] <- sprintf("%g",x[i])
trimws(out)
}
# Add bar plot to current viewport.
# depending on the number of bars, white lines are drawn between them
# and the axes are annotated.
# m: a matrix with columns representing [fails, passes, NA]
fill_bars <- function(m, fill, col=fill, rulenames){
m <- prop.table(m,1)
n <- nrow(m)
h <- 1/n
y <- seq(0,n-1)*h
ii <- order(m[,1])
m <- m[ii,,drop=FALSE]
rulenames <- rulenames[ii]
grid::grid.rect(x=0, y= y, height=h, width=m[,1]
, just=c("left","bottom")
, gp=gpar(fill=fill[1],col=col[1]))
grid::grid.rect(x=m[,1],y=y, height=h, width=m[,2]
, just=c("left","bottom")
, gp=gpar(fill=fill[2], col=col[2]))
grid::grid.rect(x=1-m[,3],y=y, height=h, width=m[,3]
, just=c("left","bottom")
, gp=gpar(fill=fill[3],col=col[3]))
grid::grid.xaxis(at=c(0:5)/5, label=paste0(seq(0,100,by=20),"%"))
if (nrow(m)<50){
grid::grid.text(rulenames,x=unit(-0.3,"lines"), y=unit(y+h/2,"npc")
, just=c("right","center"))
for ( i in seq(2,length(y))){
grid::grid.abline(intercept=y[i], slope=0, gp=grid::gpar(col="white",lwd=0.5))
}
}
}
# Add legend to current viewport.
# m: a matrix with columns representing [fails, passes, NA]
add_vlegend <- function(m, width, labels, fill, color=fill){
ns <- humanize(colSums(m))
ns <- c(ns, humanize(sum(m)))
perc <- colSums(m)/sum(m)*100
perc <- c(perc,100)
for ( j in 1:4){
grid::pushViewport(grid::viewport(layout.pos.row=1, layout.pos.col=j))
# first row: boxes + labels
grid::grid.rect(x=grid::unit(0,"char")
, y=grid::unit(1.5,"lines") - grid::unit(1,"char")
, width=grid::unit(1,"char"), height=grid::unit(1,"char")
, just = c("left","bottom")
, gp = grid::gpar(fill=fill[j], col = color[j]))
grid::grid.text(labels[j]
, x = grid::unit(1.2,"char")
, y = grid::unit(0.9,"char")
, just=c("left","bottom"))
grid::upViewport()
# second row: numbers of items
grid::pushViewport(grid::viewport(layout.pos.row=2, layout.pos.col=j))
grid::grid.text(sprintf("%s",ns[j]),x=unit(1.2,"char"), just=c("left","bottom"))
grid::upViewport()
# third row: percentages
grid::pushViewport(grid::viewport(layout.pos.row=3, layout.pos.col=j))
grid.text(trimws(sprintf("%3.0f%%",perc[j]))
, x = grid::unit(1.2,"char"), just=c("left","bottom"))
upViewport()
}
}
# plot a validation object
# m: a matrix with columns representing [fails, passes, NA]
plot_validation <- function(m
, fill
, col=fill
, rulenames
, labels
, title
, xlab)
{
grid.newpage()
# emperically, we can put about 2 characters in one line height.
left_margin = max(nchar(rulenames))/2
if (length(rulenames)>50) left_margin <- 1
main <- grid::plotViewport(c(5,left_margin,1,1), gp=gpar(fontsize=8))
grid::pushViewport(main)
fill_bars(m, fill=fill, col=col,rulenames=rulenames)
ii <- which.max(nchar(labels))
box_width <- grid::stringWidth(labels[ii])
grid::upViewport()
# add title
if (is.null(title)) title <- "Validation results by rule"
vptitle <- grid::viewport(x = main$x
, y = main$y + main$height
, width = grid::stringWidth(title)
, height = grid::unit(1,"lines")
, just = c("left","bottom"))
pushViewport(vptitle)
grid::grid.text(title
, x = grid::unit(0,"npc")
, y = grid::unit(0.2,"char")
, just = c("left","bottom"))
grid::upViewport()
# add xlabel
if (is.null(xlab)) xlab <- "Items"
vpxlab <- grid::viewport(x = main$x
, y = main$y -grid::unit(2,"lines")
, width = grid::stringWidth(xlab)
, height = grid::unit(1,"lines")
, just = c("left","top"))
grid::pushViewport(vpxlab)
grid::grid.text(xlab
, x = grid::unit(0,"npc")
, y = grid::unit(0,"npc")
, just = c("left","bottom"))
grid::upViewport()
# add legend
ncols <- length(labels)
lgd <- grid::viewport(x = main$x+main$width
, y = grid::unit(0,"char")
, width = ncols*box_width
, height = grid::unit(3,"lines")
, just = c("right","bottom")
, gp = grid::gpar(fontsize=8)
, layout = grid::grid.layout(3, ncols
, widths = rep(grid::unit(1/ncols,"null"), ncols)
, heights=rep(unit(1/3,"null"),ncols))
)
pushViewport(lgd)
add_vlegend(m, width=box_width, labels=labels, fill=fill, color=col)
}
setGeneric("barplot")
#' Plot number of violations
#'
#' @param height an R object defining height of bars (here, a \code{validation} object)
#' @param ... parameters to be passed to \code{\link[graphics]{barplot}} but not
#' \code{height}, \code{horiz}, \code{border},\code{las}, and \code{las}.
#' @param add_legend Display legend?
#' @param add_exprs Display rules?
#' @param colors Bar colors for validations yielding NA or a violation
#' @param topn If specified, plot only the top n most violated calls
#' @param order_by (single \code{character}) order bars decreasingly from top to bottom by the
#' number of fails, passes or \code{NA}'s.
#' @param stack_by (3-vector of \code{characters}) Stacking order for bar chart (left to right)
#'
#' @section Credits:
#' The default colors were generated with the \code{RColorBrewer} package of Erich Neuwirth.
#'
#' @return A list, containing the bar locations as in \code{\link[graphics]{barplot}}
#'
#' @aliases barplot,validation-method
#' @example ../examples/barplot.R
#' @export
#' @family validation-methods
setMethod('barplot',signature('validation'),
function(height, ..., order_by = c("fails","passes","nNA")
, stack_by = c("fails","passes","nNA")
, topn=Inf, add_legend=TRUE, add_exprs=TRUE
, colors=c(fails = "#FB9A99",passes = "#B2DF8A", nNA = "#FDBF6F")
){
warning("The 'barplot' method for confrontation objects is deprecated. Use 'plot' instead"
,call.=FALSE)
add_legend <- isTRUE(add_legend)
add_exprs <- isTRUE(add_exprs)
order_by <- match.arg(order_by)
stopifnot(topn>0,is.character(order_by),is.logical(add_legend),is.logical(add_exprs))
# get calls & values from confrontation object
calls <- sapply(height$._calls, deparse)
names(calls) <- names(height$._value)
val <- values(height,drop=FALSE)
# reorder colors to match stacking order
colors <- colors[stack_by]
# defaults for some optional parameters
args <- list(...)
argn <- names(args)
xlab <- args$xlab
if (is.null(xlab)) xlab <- "Items"
args <- args[argn != "xlab"]
if ( !'main' %in% argn ) args$main <- deparse(height$._call)
# values with different dimensionality are plotted in different row.
# we turn xpd off so the legend can be placed outside
# narrow the margins for more efficient use of plotting region.
oldpar <- par(mar=c(4,4.1,3,1),xpd=TRUE, mfrow=c(length(val),1))
on.exit(par(oldpar))
# create plots, one row for each dimension structure
out <- lapply(seq_along(val), function(i){
y <- val[[i]]
count <- cbind(
nNA = colSums(is.na(y))
, fails = colSums(!y,na.rm=TRUE)
, passes = colSums(y,na.rm=TRUE)
)
labels <- calls[colnames(y)]
# how to order
I <- order(count[,order_by])
count <- count[I,,drop=FALSE]
labels <- labels[I]
if ( topn < Inf ){
I <- order(count[,order_by],decreasing=TRUE)
I <- 1:nrow(count) %in% I[1:min(topn,length(I))]
count <- count[I,,drop=FALSE]
labels <- calls[[i]][I]
}
if ( !'names.arg' %in% argn ) args$names.arg <- abbreviate(rownames(count))
arglist <- list(
height = t(count[,stack_by,drop=FALSE])
, horiz=TRUE
, border=NA
, las=1
, col=c(colors)
, xlab=""
)
# actual plot
p = do.call(barplot,c(arglist,args))[seq_along(labels)]
# Add labels & legend
if (add_exprs) text(0.1,p,labels,pos=4)
n <- length(labels)
bw <- if ( n > 1) (p[n]-p[n-1] - 0.2) else 0.2
ht <- if ( n > 1) p[n]+0.5*bw else 1.197
m <- sum(arglist$height[,1])
text(m-strwidth(xlab)/2, 0.2-0.16*ht,xlab)
if(add_legend){
legend(x=-0.04*m, y=0.2-0.12*ht
, legend = stack_by
, fill=colors
, border='black'
, bty='n'
, horiz=TRUE
)
}
p
})
par(xpd=FALSE)
invisible(out)
})
|
/scratch/gouwar.j/cran-all/cranData/validate/R/plot.R
|
#' @name retailers
#' @aliases SBS2000
#' @title data on Dutch supermarkets
#' @description
#' Anonymized and distorted data on revenue and cost structure for
#' 60 retailers. Currency is in thousands of Euros. There are two
#' data sets. The \code{SBS2000} dataset is equal to the \code{retailers}
#' data set except that it has a record identifier (called \code{id}) column.
#'
#' \itemize{
#' \item id: A unique identifier (only in SBS2000)
#' \item size: Size class (0=undetermined)
#' \item incl.prob: Probability of inclusion in the sample
#' \item staff: Number of staff
#' \item turnover: Amount of turnover
#' \item other.rev: Amount of other revenue
#' \item total.rev: Total revenue
#' \item staff.costs: Costs assiciated to staff
#' \item total.costs: Total costs made
#' \item profit: Amount of profit
#' \item vat: Turnover reported for Value Added Tax
#'}
#'
#' @family datasets
#'
#' @docType data
#' @format A csv file, one retailer per row.
NULL
#' @name SBS2000
#' @rdname retailers
NULL
#' @name samplonomy
#' @title Economic data on Samplonia
#' @description
#' Simulated economic time series representing GDP, Import, Export and
#' Balance of Trade (BOT) of Samplonia. Samplonia is a fictional Island
#' invented by Jelke Bethelehem (2009). The country has 10 000 inhabitants.
#' It consists of two provinces: Agria and Induston. Agria is a rural
#' province consisting of the mostly fruit and vegetable producing district
#' of Wheaton and the mostly cattle producing Greenham. Induston has four
#' districts. Two districts with heavy industry named Smokeley and Mudwater.
#' Newbay is a young, developing district while Crowdon is where the rich
#' Samplonians retire. The current data set contains several time series
#' from Samplonia's national accounts system in long format.
#'
#' There are annual and quarterly time series on GDP, Import, Export and
#' Balance of Trade, for Samplonia as a whole, for each province and each
#' district. BOT is defined as Export-Import for each region and period;
#' quarterly figures are expected to add up to annual figures for each
#' region and measure, and subregions are expected to add up to their
#' super-regions.
#'
#' \itemize{
#' \item region: Region (Samplonia, one if its 2 provinces, or one of its 6 districts)
#' \item freq: Frequency of the time series
#' \item period: Period (year or quarter)
#' \item measure: The economic variable (gdp, import, export, balance)
#' \item value: The value
#' }
#'
#' The data set has been endowed with the following errors.
#' \itemize{
#' \item For Agria, the 2015 GDP record is not present.
#' \item For Induston, the 2018Q3 export value is missing (\code{NA})
#' \item For Induston, there are two different values for the 2018Q2 Export
#' \item For Crowdon, the 2015Q1 balance value is missing (\code{NA}).
#' \item For Wheaton, the 2019Q2 import is missing (\code{NA}).
#' }
#'
#' @family datasets
#'
#' @references
#' J. Bethlehem (2009), Applied Survey Methods: A Statistical Perspective. John
#' Wiley & Sons, Hoboken, NJ.
#'
#' @docType data
#' @format An RData file.
NULL
#' @name nace_rev2
#' @title NACE classification code table
#' @description
#' Statistical Classification of Economic Activities.
#'
#'
#'
#'\itemize{
#' \item Order \code{[integer]}
#' \item Level \code{[integer]} NACE level
#' \item Code \code{[character]} NACE code
#' \item Parent \code{[character]} parent code of \code{"Code"}
#' \item Description \code{[character]}
#' \item This_item_includes \code{[character]}
#' \item This_item_also_includes \code{[character]}
#' \item Rulings \code{[character]}
#' \item This_item_excludes \code{[character]}
#' \item Reference_to_ISIC_Rev._4 \code{[character]}
#'}
#'
#'
#' @family datasets
#' @seealso \code{\link{hierarchy}}
#' @docType data
#' @references
#' This codelist was downloaded on 2020-10-21 from
#' \href{https://ec.europa.eu/eurostat/ramon/nomenclatures/index.cfm?TargetUrl=LST_CLS_DLD&StrNom=NACE_REV2&StrLanguageCode=EN&StrLayoutCode=HIERARCHIC#}{Eurostat}
#'
#'
#' @format A csv file, one NACE code per row.
NULL
|
/scratch/gouwar.j/cran-all/cranData/validate/R/retailers.R
|
# RULE OBJECT -----------------------------------------------------------------
#' A rich expression
#'
#' @section Details:
#' Technically, \code{rule} is a \code{call} object endowed with extra
#' attributes such as a name, a label and a description description, creation
#' time and a reference to its origin. Rule objects are not for direct use by
#' users of the package, but may be of interest for developers of this package,
#' or packages depending on it.
#'
#' @section Exported S4 methods for \code{rule}:
#' \itemize{
#' \item{\code{show}}
#' \item{\code{\link{origin}}}
#' \item{\code{\link{label}}}
#' \item{\code{\link{description}}}
#' \item{\code{\link{created}}}
#' }
#'
#' @section Private S4 methods for \code{rule}:
#' \itemize{
#' \item{validating}
#' \item{linear}
#' \item{expr}
#' \item{is_tran_assign}
#' }
#'
#' @section See also:
#' \itemize{
#' \item{\code{\link{expressionset-class}}}
#' }
#'
#' @keywords internal
rule <- setClass("rule",
slots = c(
expr = "language" # MUST be a 'call'[*]
, name = "character"
, label = "character" # label description
, description = "character" # description description
, origin = "character"
, created = "POSIXct"
, meta = "list"
)
, prototype = list(
expr = NULL
, name = character(0)
, label = character(0)
, description = character(0)
, origin = character(0)
, created = as.POSIXct(NA)
, meta = vector(mode = "list", length = 0)
)
)
#[*] Peeling off an expression always yields an object of 'mode' call, but not
# of 'type' call. For example:
#
# p <- parse(text="x + y")[[1]]
# class(p)
# [1] "call"
#
# p <- parse(text="if (A) B))[[1]]
# parse(p)
# [1] "if"
# is.call(p)
# [1] TRUE
#
#
# S4 GENERICS -----------------------------------------------------------------
#' Get or set rule metadata
#'
#' Rule metadata are key-value pairs where the value is a simple (atomic)
#' string or number.
#'
#' @param x an R object
#' @param ... Arguments to be passed to other methods
#'
#' @name meta
#' @export
#' @examples
#'
#' v <- validator(x > 0, y > 0)
#'
#' # metadata is recycled over rules
#' meta(v,"foo") <- "bar"
#'
#' # assign metadata to a selection of rules
#' meta(v[1],"fu") <- 2
#'
#' # retrieve metadata as data.frame
#' meta(v)
#'
#' # retrieve metadata as list
#' meta(v,simplify=TRUE)
#'
setGeneric("meta",function(x,...) standardGeneric("meta"))
#' Get variable names
#'
#' Generic function that extracts names of variables ocurring
#' in R objects.
#'
#'
#' @param x An R object
#' @param ... Arguments to be passed to other methods.
#'
#' @family expressionset-methods
#' @name variables
#' @export
setGeneric("variables", function(x,...) standardGeneric("variables"))
setGeneric("validating",function(x,...) standardGeneric('validating'))
setGeneric("linear",function(x,...) standardGeneric("linear"))
#' Get expressions
#'
#' @param x Object
#' @param ... options to be passed to other functions
#' @keywords internal
#' @export
#' @rdname expr
setGeneric("expr",def=function(x,...) standardGeneric("expr"))
# check for transient assignments (:=)
setGeneric("is_tran_assign", function(x,...) standardGeneric("is_tran_assign"))
#' Origin of rules
#'
#' A slot to store where the rule originated, e.g. a filename
#' or \code{"command-line"} for interactively defined rules.
#'
#'
#'
#' @param x and R object
#' @param ... Arguments to be passed to other methods
#' @return A \code{character} vector.
#' @example ../examples/properties.R
#' @export
setGeneric("origin",def=function(x,...) standardGeneric("origin"))
#' Rule label
#'
#' A short (typically two or three word) description of a rule.
#'
#' @param x and R object
#' @param ... Arguments to be passed to other methods
#' @return A \code{character} vector.
#' @example ../examples/properties.R
#' @export
setGeneric("label", function(x,...) standardGeneric("label"))
#' Rule description
#'
#' A longer (typically one-paragraph) description of a rule.
#'
#' @param x and R object
#' @param ... Arguments to be passed to other methods
#' @return A \code{character} vector.
#' @example ../examples/properties.R
#' @export
setGeneric("description", function(x,...) standardGeneric("description"))
#' Creation timestamp
#'
#'
#' @param x and R object
#' @param ... Arguments to be passed to other methods
#' @return A \code{POSIXct} vector.
#'
#' @example ../examples/properties.R
#' @export
setGeneric("created", function(x,...) standardGeneric("created"))
#' @rdname meta
#' @param name \code{[character]} metadata key
#' @param value Value to set
#' @export
setGeneric("meta<-", function(x, name, value) standardGeneric("meta<-"))
#' @rdname origin
#' @param value Value to set
#' @example ../examples/properties.R
#' @export
setGeneric("origin<-",function(x,value) standardGeneric("origin<-"))
#' @rdname label
#' @param value Value to set
#' @example ../examples/properties.R
#' @export
setGeneric("label<-",function(x,value) standardGeneric("label<-"))
#' @rdname description
#' @param value Value to set
#' @example ../examples/properties.R
#' @export
setGeneric("description<-",function(x,value) standardGeneric("description<-"))
#' @rdname created
#' @param value Value to set
#' @example ../examples/properties.R
#' @export
setGeneric("created<-",function(x,value) standardGeneric("created<-"))
# S4 METHODS ------------------------------------------------------------------
#' @rdname meta
setMethod("meta","rule", function(x,...){
x@meta
})
#' @rdname expr
setMethod("expr","rule",function(x,...) x@expr)
#' @describeIn variables Retrieve unique variable names
setMethod("variables","rule", function(x,...){
var_from_call(x@expr)
})
#' @describeIn variables Alias to \code{names.list}
setMethod('variables',signature('list'), function(x,...) names(x))
#' @describeIn variables Alias to \code{names.data.frame}
setMethod('variables',signature('data.frame'), function(x,...) names(x))
#' @describeIn variables Alias to \code{ls}
setMethod('variables',signature('environment'), function(x,...) ls(x))
# full print method for rules
setMethod("show", "rule", function(object){
cat(sprintf("\nObject of class %s.",class(object)))
nm <- slotNames(object)
n <- max(nchar(nm))
nm <- nm[nm != "meta"]
vl <- sapply(nm,function(x) paste0("",format(slot(object,x))))
fmt <- paste0("\n %-",n,"s: %s")
cat(sprintf(fmt,nm,vl))
# meta names and abbreviated type.
tp <- abbreviate(sapply(meta(object),class),3)
nm <- names(meta(object))
meta_str <- paste(sprintf("%s<%s>",nm,tp), collapse=", ")
cat(sprintf(fmt,"meta",meta_str))
})
setMethod("validating","rule", function(x,...){
validating_call(x@expr)
})
setMethod("linear","rule",function(x,...){
linear_call(x@expr)
})
#' @rdname origin
setMethod("origin","rule",function(x,...){
setNames(x@origin, x@name)
})
#' @rdname label
setMethod("label","rule",function(x,...) setNames(paste0("",x@label),x@name) )
#' @rdname description
setMethod("description", "rule", function(x,...) setNames( paste0("",x@description), x@name) )
#' @rdname created
setMethod("created", "rule", function(x,...) setNames( x@created,x@name) )
setMethod("is_tran_assign","rule", function(x){
x@expr[[1]] == ":="
})
#' @rdname meta
#' @export
setReplaceMethod("meta", c("rule","character"), function(x, name, value){
x@meta[[name]] <- value
x
})
#' @rdname names
#' @export
setReplaceMethod("names",c("rule","character"),function(x,value){
if (length(value) > 1){
stop("name must be 'character' of length 1")
}
x@name <- value
x
})
#' @rdname origin
#' @export
setReplaceMethod("origin",c("rule","character"),function(x,value){
if (length(value) > 1){
stop("origin must be 'character' of length 1")
}
x@origin <- value
x
})
#' @rdname label
#' @export
setReplaceMethod("label",c("rule","character"),function(x,value){
if (length(value) > 1){
stop("label must be 'character' of length 1")
}
x@label <- value
x
})
#' @rdname description
#' @export
setReplaceMethod("description",c("rule","character"),function(x,value){
if (length(value) > 1){
stop("description must be 'character' of length 1")
}
x@description <- value
x
})
#' @rdname created
#' @export
setReplaceMethod("created",c("rule","POSIXct"),function(x,value){
if (length(value) > 1){
stop("timestamp must be 'POSIXct' of length 1")
}
x@created <- value
x
})
# handy for rule to yaml/json
rule_to_list <- function(x, expr_as_text = TRUE, ...){
expr <- x@expr
if (expr_as_text){
expr <- deparse(expr, width.cutoff = 500L)
}
list(
expr=expr,
name = x@name,
label = x@label,
description = x@description,
created = x@created,
origin = x@origin,
meta = x@meta
)
}
|
/scratch/gouwar.j/cran-all/cranData/validate/R/rule.R
|
#' @importFrom utils capture.output
{}
# create reference object to store or ignore output
# of validation functions
output <- function(){
e <- new.env(parent=globalenv())
e$n <- 0 # validating calls
e$nrule <- 0 # rules executed
e$nwarn <- 0 # warnings thrown
e$nerrs <- 0 # errors thrown
e$nfail <- 0 # items with fails
e$nNA <- 0 # items with NA
re <- "^V[0-9]+" # regex to retrieve validations
e$add <- function(x){
e$n <<- e$n + 1
e[[sprintf("V%04d",e$n)]] <- x
# Administration of some of the content
e$nrule <- e$nrule + length(x)
e$nwarn <- e$nwarn + confrontation_nwarn(x)
e$nerrs <- e$nerrs + confrontation_nerrs(x)
s <- summary(x)
e$nfail <- e$nfail + sum(s[,"fails"])
e$nNA <- e$nNA + sum(s[,"nNA"])
}
e$gimme <- function(){
vr <- ls(e, pattern = re)
lapply(vr, function(i) e[[i]])
}
e
}
capture <- function(fun, env){
function(...){
out <- fun(...)
# Reconstruct the call that was captured
out$._call <- match.call(fun)
attr(out, "file") <- env$file
attr(out, "lines") <- c(fst = env$fst, lst=env$lst)
env$add(out)
out
}
}
#' Run a file with confrontations. Capture results
#'
#' A validation script is a regular R script, intersperced with \code{confront}
#' or \code{check_that} statements. This function will run the script file
#' and capture all output from calls to these functions.
#'
#'
#' @param file \code{[character]} location of an R file.
#' @param verbose \code{[logical]} toggle verbose output.
#'
#' @return \code{run_validation_file}: An object of class \code{validations}. This is
#' a \code{list} of objects of class \code{\link{validation}}.
#'
#' @family validations validation-files
#' @export
run_validation_file <- function(file, verbose=TRUE){
catf <- function(fmt, ...) if (verbose) cat(sprintf(fmt,...))
dir <- dirname(file)
oldwd <- getwd()
on.exit(setwd(oldwd))
setwd(dir)
# environment to capture confrontations
o <- output()
e <- new.env()
e$confront <- capture(confront, o)
e$check_that <- capture(check_that, o)
fname <- basename(file)
parsed <- parse(fname, keep.source=TRUE)
src <- attr(parsed, "srcref")
prfile <- substr(fname, 1,24)
prfile <- gsub(" ",".", x=sprintf("%-26s", prfile), fixed=TRUE)
o$file <- file
for ( i in seq_along(parsed) ){
expr <- parsed[[i]]
o$fst <- src[[i]][1]
o$lst <- src[[i]][3]
o$expr <- expr
out <- eval(expr, envir=e)
# print status for quick overview.
prefix <- sprintf("\rRunning %s %3d calls %3d rules", prfile,o$n, o$nrule)
postfix <- ""
if (o$nfail == 0 & o$nNA == 0){
postfix <- "OK"
} else {
if (o$nfail > 0) postfix <- sprintf("%d FAILS", o$nfail)
if (o$nNA > 0) postfix <- sprintf("%s %d NA", postfix, o$nNA)
if (o$nerrs > 0) postfix <- sprintf("%s %d ERRORS",postfix, o$nerrs)
if (o$nwarn > 0) postfix <- sprintf("%s %d WARNINGS",postfix, o$nwarn)
}
catf("%s %s",prefix,postfix)
}
catf("\n")
structure(o$gimme()
, class=c("validations", "list")
, call=match.call()
)
}
#' @param dir \code{[character]} path to directory.
#' @param pattern \code{[characer]} regular expression that selects validation files to run.
#'
#'
#'
#' @rdname run_validation_file
#' @return \code{run_validation_dir}: An object of class \code{validations}. This is
#' a \code{list} of objects of class \code{\link{validation}}.
#' @export
#' @family validation validation-files
run_validation_dir <- function(dir="./", pattern="^validate.+[rR]", verbose=TRUE){
if (!dir.exists(dir)) stop(sprintf("%s\nnot found or is not a directory"))
files <- dir(path=dir, pattern=pattern, full.names=TRUE)
L <- list()
for ( f in files ){
out <- c(L, run_validation_file(f))
}
structure(out
, class=c("validations", "list")
, call=match.call()
)
}
#' @param x An R object
#' @param ... Unused
#' @export
#' @return \code{print}: \code{NULL}, invisibly.
#' @rdname run_validation_file
print.validations <- function(x,...){
cat(sprintf("Object of class 'validations'\nCall:\n "))
print(attr(x,"call"))
cat("\n")
cat(sprintf("Confrontations: %d\n",sum(sapply(x,length))))
cat(sprintf("With fails : %d\n",sum(sapply(x, failed_confrontations)) ))
cat(sprintf("Warnings : %d\n", sum(sapply(x, function(y) length(warnings(y)) )) ))
cat(sprintf("Errors : %d\n", sum(sapply(x, function(y) length(errors(y)) )) ))
invisible(NULL)
}
#' @rdname run_validation_file
#' @param object An R object
#' @return \code{summary}: A data frame similar to the data frame returned
#' when summarizing a \code{\link{validation}} object. There are extra columns listing
#' each call, file and first and last line where the code occurred.
#'
#' @method summary validations
#' @export
summary.validations <- function(object, ...){
L <- lapply(object, function(x){
s <- summary(x)
s$call <- capture.output(print(x$._call))
s$file <- attr(x,"file")
s$fst <- attr(x,"lines")[1]
s$lst <- attr(x, "lines")[2]
s
})
do.call(rbind,L)
}
|
/scratch/gouwar.j/cran-all/cranData/validate/R/run_validation.R
|
# do we have rsdmx installed?
check_rsdmx <- function(){
if (!requireNamespace('rsdmx', quietly=TRUE)){
stop("You need to install 'rsdmx' to use SDMX functionality", call.=FALSE)
}
}
# download something from an SDMX registry and cash it.
download_sdmx <- local({
store <- new.env()
function(endpoint, resource, agency_id, resource_id, version="latest", ...){
check_rsdmx()
url <- file.path(endpoint, resource, agency_id, resource_id, version)
if (!url %in% ls(store)){
store[[url]] <- tryCatch(rsdmx::readSDMX(url, ...), error=function(e){
msg <- sprintf(
"retrieving data from \n'%s'\n failed with '%s'\n", url, e$message)
stop(msg, call.=FALSE)
})
}
out <- store[[url]]
attr(out, "url") <- url
out
}
})
#' Get code list from an SDMX REST API endpoint.
#'
#' \code{sdmx_codelist} constructs an URL for \code{rsdmx::readSDMX} and
#' extracts the code IDs. Code lists are downloaded once and cached for the
#' duration of the R session.
#'
#' @inheritParams validator_from_dsd
#' @param what \code{[character]} Return a \code{character} with code id's, or
#' a data frame with all information.
#'
#' @family sdmx
#' @export
#'
#' @examples
#'
#'
#' # here we download the CL_ACTIVITY codelist from the ESTAT registry.
#' \dontrun{
#' codelist <- sdmx_codelist(
#' endpoint = "https://registry.sdmx.org/ws/public/sdmxapi/rest/"
#' , agency_id = "ESTAT"
#' , resource_id = "CL_ACTIVITY"
#' }
#'
sdmx_codelist <- function(endpoint, agency_id, resource_id, version="latest", what=c("id","all")){
check_rsdmx()
what <- match.arg(what)
dl <- download_sdmx(endpoint, "codelist", agency_id, resource_id, version)
df <- as.data.frame(dl)
if (what=="all") df else df[,1]
}
#' Get code list from Eurostat SDMX repository
#'
#' \code{estat_codelist} gets a code list from the REST API provided at
#' \code{ec.europa.eu/tools/cspa_services_global/sdmxregistry}. It is a
#' convenience wrapper that calls \code{sdmx_codelist}.
#'
#' @rdname sdmx_codelist
#'
#' @export
#'
#' @examples
#' \dontrun{
#' estat_codelist("CL_ACTIVITY")
#' }
estat_codelist <- function(resource_id, agency_id = "ESTAT", version="latest"){
sdmx_codelist(
endpoint = sdmx_endpoint("ESTAT")
, agency_id = agency_id
, resource_id = resource_id
, version = version
)
}
#' Get code list from global SDMX repository
#'
#' \code{global_codelist} gets a code list from the REST API provided at
#' \code{https://registry.sdmx.org/webservice/data.html}. It is a convenience
#' wrapper that calls \code{sdmx_codelist}.
#'
#' @rdname sdmx_codelist
#' @export
#' @family sdmx
#' @examples
#' \dontrun{
#' global_codelist("CL_AGE") )
#' global_codelist("CL_CONF_STATUS")
#' global_codelist("CL_SEX")
#' }
#' # An example of using SDMX information, downloaded from the SDMX global
#' # registry
#' \dontrun{
#' # economic data from the country of Samplonia
#' data(samplonomy)
#' head(samplonomy)
#'
#' rules <- validator(
#' , freq %in% global_codelist("CL_FREQ")
#' , value >= 0
#' )
#' cf <- confront(samplonomy, rules)
#' summary(cf)
#'
#' }
global_codelist <- function(resource_id, agency_id = "SDMX", version="latest"){
sdmx_codelist(
endpoint = sdmx_endpoint("GLOBAL")
, agency_id = agency_id
, resource_id = resource_id
, version = version
)
}
#' Get URL for known SDMX registry endpoints
#'
#' Convenience function storing URLs for SDMX endpoints.
#'
#' @param registry \code{[character]} name of the endpoint (case insensitve). If \code{registry}
#' is \code{NULL} (the default), the list of supported endpoints is returned.
#'
#' @family sdmx
#'
#' @examples
#' sdmx_endpoint()
#' sdmx_endpoint("ESTAT")
#' sdmx_endpoint("global")
#'
#' @export
sdmx_endpoint <- function(registry=NULL){
ENDPOINTS <- c(
"ESTAT" = "https://ec.europa.eu/tools/cspa_services_global/sdmxregistry/rest"
, "GLOBAL" = "https://registry.sdmx.org/ws/public/sdmxapi/rest"
)
if (is.null(registry)) return(ENDPOINTS)
registry <- toupper(registry)
out <- ENDPOINTS[registry]
if (is.na(out)) stop(sprintf("Unrecognized registry, use one of %s"
, paste(names(ENDPOINTS), collapse=", ")))
else out
}
#' Extract a rule set from an SDMX DSD file
#'
#' Data Structure Definitions contain references to code lists.
#' This function extracts those references and generates rules
#' that check data against code lists in an SDMX registry.
#'
#' @param endpoint \code{[character]} REST API endpoint of the SDMX registry
#' @param agency_id \code{[character]} Agency ID (e.g. \code{"ESTAT"})
#' @param resource_id \code{[character]} Resource ID (e.g. \code{"CL_ACTIVITY"})
#' @param version \code{[character]} Version of the code list.
#'
#' @return An object of class \code{\link{validator}}.
#' @family sdmx
#' @export
validator_from_dsd <- function(endpoint, agency_id, resource_id, version="latest"){
dsd <- download_sdmx(endpoint, "datastructure", agency_id, resource_id, version)
dimensions <- slot(slot(dsd, "datastructures")[[1]], "Components")
df <- as.data.frame(dimensions)
cl_vars <- !is.na(df$codelist)
template <- '%s %%in%% sdmx_codelist(endpoint = "%s", agency_id = "%s", resource_id = "%s", version = "%s")'
df1 <- df[cl_vars,]
rules <- data.frame(
rule = sprintf(template
, df1$conceptRef
, endpoint
, df1$codelistAgency
, df1$codelist
, version=df1$codelistVersion)
, name = paste0("CL_", df1$conceptRef)
, origin = attr(dsd,"url")
, description = sprintf("Code list from %s::%s %s", resource_id, df1$conceptRef, version)
)
validator(.data=rules)
}
|
/scratch/gouwar.j/cran-all/cranData/validate/R/sdmx.R
|
#### GROUP DEFINITION SYNTAX --------------------------------------------------
## USER-FACING
# user-defined variable group:
# Returns a function that expands a call in which a group name is used
# to a list of calls.
#
# name (possibly unqouted) name of the group.
# ...
var_group <- function(...){
L <- as.list(substitute(list(...))[-1])
function(){
if (length(L)==0) return(NULL)
out <- L[[1]]
L <<- L[-1]
out
}
}
## UNDER THE HOOD
# Replace occurrences of 'vargroup(v1,v2,...,vn)'
# calls: 'list' of calls
# output: the same list of calls, but ocurrences of 'vargroup' have been multiplied
#
expand_groups <- function(calls){
L <- list()
for (k in seq_along(calls)){
# this copies the name.
U <- calls[k]
# get reference
ref <- get_ref(U)
# find var groups, if any.
cl <- calls[[k]]
I <- which.call(cl,'var_group')
if (length(I) > 0){
i <- I[[1]]
i <- i[-length(i)]
f <- eval(cl[[i]])
U <- list()
while(!is.null(x <- f())){
u <- cl
u[[i]] <- x
U <- c(U,u)
}
names(U) <- paste0(names(calls)[k],".",seq_along(U))
U <- set_ref(U, rep(ref, length(U)))
}
L <- c(L,U)
}
L <- unlist(L)
# recurse to check if groups are still present.
if (length(L) > length(calls))
expand_groups(L)
else
L
}
#### ASSIGNMENT SUBSTITUTION --------------------------------------------------
## All under the hood
## Substitute assignments in subsequent calls
expand_assignments <- function(calls){
e <- new.env()
i <- 1
lapply(calls, function(x){
x <- substitute_assignments(x,e)
# add index into original list of calls.
attr(x,"reference") <- i
i <<- i+1
if(x[[1]] == ':=')
add_assignment(x,e)
x
}
)[!is.assignment(calls)]
}
substitute_assignments <- function(call,assignments){
for ( lhs in ls(assignments) ){
i <- which.call(call, lhs)
for ( j in i ){
call[[j]] <- assignments[[lhs]]
}
}
call
}
# add named assignment to environment
# - rhs is 'embraced' for substitution
add_assignment <- function(assignment, e){
e[[as.character(left(assignment))]] <- right(assignment)
assignment
}
# check wether a call is an assignment
is.assignment <- function(x) sapply(x,function(y) y[[1]] == ":=")
|
/scratch/gouwar.j/cran-all/cranData/validate/R/sugar.R
|
#' @importFrom stats complete.cases
{}
#' Syntax to define validation or indicator rules
#'
#' A concise overview of the \code{validate} syntax.
#'
#' @name syntax
#'
#' @section Basic syntax:
#'
#' The basic rule is that an R-statement that evaluates to a \code{logical} is a
#' validating statement. This is established by static code inspection when
#' \code{validator} reads a (set of) user-defined validation rule(s).
#'
#' @section Comparisons:
#'
#' All basic comparisons, including \code{>, >=, ==, !=, <=, <}, \code{\%in\%}
#' are validating statements. When executing a validating statement, the
#' \code{\%in\%} operator is replaced with \code{\link[validate:vin]{\%vin\%}}.
#'
#' @section Logical operations:
#'
#' Unary logical operators `\code{!}', \code{all()} and \code{any} define
#' validating statements. Binary logical operations including \code{&, &&, |,
#' ||}, are validating when \code{P} and \code{Q} in e.g. \code{P & Q} are
#' validating. (note that the short-circuits \code{&&} and \code{&} onnly return
#' the first logical value, in cases where for \code{P && Q}, \code{P} and/or
#' \code{Q} are vectors. Binary logical implication \eqn{P\Rightarrow Q} (P
#' implies Q) is implemented as \code{if ( P ) Q}. The latter is interpreted as
#' \code{!(P) | Q}.
#'
#' @section Type checking:
#'
#' Any function starting with \code{is.} (e.g. \code{is.numeric}) is a
#' validating expression.
#'
#' @section Text search:
#'
#' \code{grepl} is a validating expression.
#'
#' @section Functional dependencies:
#'
#' Armstrong's functional dependencies, of the form \eqn{A + B \to C + D} are
#' represented using the \code{~}, e.g. \code{A + B ~ C + D}. For example
#' \code{postcode ~ city} means, that when two records have the same value for
#' \code{postcode}, they must have the same value for \code{city}.
#'
#'
#' @section Reference the dataset as a whole:
#'
#' Metadata such as numer of rows, columns, column names and so on can be
#' tested by referencing the whole data set with the '\code{.}'. For example,
#' the rule \code{nrow(.) == 15} checks whether there are 15 rows in the
#' dataset at hand.
#'
#' @section Uniqueness, completeness:
#'
#' These can be tested in principle with the 'dot' syntax. However, there are
#' some convenience functions: \code{\link{is_complete}}, \code{\link{all_complete}}
#' \code{\link{is_unique}}, \code{\link{all_unique}}.
#'
#'
#' @section Local, transient assignment:
#' The operator `\code{:=}' can be used to set up local variables (during, for
#' example, validation) to save time (the rhs of an assignment is computed only
#' once) or to make your validation code more maintainable. Assignments work more
#' or less like common R assignments: they are only valid for statements coming
#' after the assignment and they may be overwritten. The result of computing the
#' rhs is not part of a \code{\link{confront}}ation with data.
#'
#'
#' @section Groups:
#' Often the same constraints/rules are valid for groups of variables.
#' \code{validate} allows for compact notation. Variable groups can be used
#' in-statement or by defining them with the \code{:=} operator.
#'
#' \code{validator( var_group(a,b) > 0 )}
#'
#' is equivalent to
#'
#' \code{validator(G := var_group(a,b), G > 0)}
#'
#' is equivalent to
#'
#' \code{validator(a>0,b>0)}.
#'
#' Using two groups results in the cartesian product of checks. So the statement
#'
#' \code{validator( f=var_group(c,d), g=var_group(a,b), g > f)}
#'
#' is equivalent to
#'
#' \code{validator(a > c, b > c, a > d, b > d)}
#'
#' @section File parsing:
#' Please see the cookbook on how to read rules from and write rules to file:
#'
#' \code{vignette("cookbook",package="validate")}
#'
#'
NULL
### CONSISTENT SET MEMBERSHIP --------------------------------------------------
#' A consistent set membership operator
#'
#' A set membership operator like \code{\link[base:match]{\%in\%}} that handles
#' \code{NA} more consistently with R's other logical comparison operators.
#'
#'
#' @details
#' R's basic comparison operators (almost) always return \code{NA} when one
#' of the operands is \code{NA}. The \code{\%in\%} operator is an exception.
#' Compare for example \code{NA \%in\% NA} with \code{NA == NA}: the first
#' results in \code{TRUE}, while the latter results in \code{NA} as expected.
#' The \code{\%vin\%} operator acts consistent with operators such as \code{==}.
#' Specifically, \code{NA} results in the following cases.
#' \itemize{
#' \item{For each position where \code{x} is \code{NA}, the result is \code{NA}.}
#' \item{When \code{table} contains an \code{NA}, each non-matched value in
#' \code{x} results in \code{NA}.}
#' }
#'
#'
#'
#' @param x vector or \code{NULL}: the values to be matched
#' @param table vector or \code{NULL}: the values to be matched against.
#'
#'
#' @examples
#' # we cannot be sure about the first element:
#' c(NA, "a") %vin% c("a","b")
#'
#' # we cannot be sure about the 2nd and 3rd element (but note that they
#' # cannot both be TRUE):
#' c("a","b","c") %vin% c("a",NA)
#'
#' # we can be sure about all elements:
#' c("a","b") %in% character(0)
#'
#' @rdname vin
#' @export
"%vin%" <- function(x, table){
out <- match(x, table, nomatch=0) > 0
if (anyNA(table)){
out[!out] <- NA
}
out[is.na(x)] <- NA
out
}
#### FUNCTIONAL DEPENDENCIES --------------------------------------------------
# Internal function that tests for functional dependencies
`~` <- function(lhs, rhs){
Lvars <- all.vars(substitute(lhs))
Rvars <- all.vars(substitute(rhs))
condition <- do.call(paste, c(mget(Lvars, parent.frame()), sep="|"))
consequent <- do.call(paste0, c(mget(Rvars, parent.frame()), sep="|"))
cf <- .Call("R_fdcheck", condition, consequent)
cf == seq_along(cf)
}
# synonym of `~`, may be more understandable
`%->%` <- `~`
# returns a character vector of variables specified in L, matched in env.
# regexps are switched off untill we can analyze relation with literal variables better.
matchvars <- function(L,env){
if( length(L) == 0 ){
TRUE
} else {
# if (is.character(L[[1]])) {
# grep(pattern = L[[1]], x = ls(env), value = TRUE)
# } else {
sapply(L,as.character)
# }
}
}
#### UNIQUENESS ---------------------------------------------------------------
#' Test for uniquenes of records
#'
#' Test for uniqueness of columns or combinations of columns.
#'
#'
#' @param ... When used in a validation rule: a bare (unquoted) list of variable names.
#' When used directly, a comma-separated list of vectors of equal length.
#'
#' @return
#' For \code{is_unique} A logical vector that is \code{FALSE} for each record
#' that has a duplicate.
#'
#' @family cross-record-helpers
#'
#' @examples
#'
#' d <- data.frame(X = c('a','b','c','b'), Y = c('banana','apple','banana','apple'), Z=1:4)
#' v <- validator(is_unique(X, Y))
#' values(confront(d, v))
#'
#' # example with groupwise test
#' df <- data.frame(x=c(rep("a",3), rep("b",3)),y=c(1,1,2,1:3))
#' v <- validator(is_unique(y, by=x))
#' values(confront(d,v))
#'
#' @export
is_unique <- function(...){
d <- data.frame(...)
!duplicated(d) & !duplicated(d, fromLast=TRUE)
}
#' @rdname is_unique
#' @return For \code{all_unique} a single \code{TRUE} or \code{FALSE}.
#' @export
all_unique <- function(...){
!anyDuplicated(data.frame(...))
}
## TODO: work out subtleties regarding NA.
# Count how often a value of value combination occurs
#
# For each row in a data frame, count how many similar rows there are. The
# \code{...} argument is used to specify which variables are taken into
# account. Missing values are counted as a unique value.
#
# @param ... When used in a validation rule: a bare (unquoted) list of variable names.
# When used directly, a comma-separated list of vectors of equal length.
#
#
# @return For each record it indicates how often the value or value
# combination in the arguments occur.
# @export
#
# @examples
#
# # for each element i the sequence (a,b,c,a,c,a)
# # compute how often it occurrs in the sequence.
# occurs(c("a","b","c","a","c","a"))
#
# # for each record in 'iris' how often do the same
# # (Sepal.Length, Species) combinations occur?
# with(iris, occurs(Sepal.Length, Species))
#
# # in the context of a validation. Check whether
# # each individual Species occurs at least 10 times.
#
# rules <- validator(occurs(Species) > 10)
# cf <- confront(iris, rules)
# summary(cf)
#
#occurs <- function(...){
# keys <- data.frame(...)
# tab <- as.data.frame(table(keys,useNA="ifany"))
# names(tab)[1:(ncol(tab)-1)] <- names(keys)
# keys$index <- seq_len(nrow(keys))
# out <- merge(x=keys, y=tab, all.x=TRUE, all.y=FALSE)
# out$Freq[order(out$index)]
#}
#' @rdname is_unique
#' @return For \code{number_unique} a single number representing the number
#' of unique values or value combinations in the arguments.
n_unique <- function(...){
nrow(unique(data.frame(...)))
}
#### MISSING DATA -------------------------------------------------------------
#' Test for completeness of records
#'
#' Utility function to make common tests easier.
#'
#' @inheritParams is_unique
#' @return
#' For \code{is_complete} A logical vector that is \code{FALSE} for each record
#' that has at least one missing value.
#'
#' @family cross-record-helpers
#' @examples
#' d <- data.frame(X = c('a','b',NA,'b'), Y = c(NA,'apple','banana','apple'), Z=1:4)
#' v <- validator(is_complete(X, Y))
#' values(confront(d, v))
#'
#' @export
is_complete <- function(...){
stats::complete.cases(data.frame(...))
}
#' @rdname is_complete
#' @return For \code{all_unique} a single \code{TRUE} or \code{FALSE}.
#' @export
all_complete <- function(...){
all(stats::complete.cases(data.frame(...)))
}
#### EXISTENCE ----------------------------------------------------------------
#' Test for (unique) existence
#'
#' Group records according to (zero or more) classifying variables. Test for
#' each group whether at least one (\code{exists}) or precisely one
#' (\code{exists_one}) record satisfies a condition.
#'
#' @param rule \code{[expression]} A validation rule
#' @param by A bare (unquoted) variable name or a list of bare variable
#' names, that will be used to group the data.
#' @param na.rm \code{[logical]} Toggle to ignore results that yield \code{NA}.
#'
#' @return A \code{logical} vector, with the same number of entries as there
#' are rows in the entire data under scrutiny. If a test fails, all records in
#' the group are labeled with \code{FALSE}.
#'
#' @family cross-record-helpers
#'
#' @examples
#' # Test whether each household has exactly one 'head of household'
#'
#' dd <- data.frame(
#' hhid = c(1, 1, 2, 1, 2, 2, 3 )
#' , person = c(1, 2, 3, 4, 5, 6, 7 )
#' , hhrole = c("h","h","m","m","h","m","m")
#' )
#' v <- validator(exists_one(hhrole=="h", hhid))
#' values(confront(dd, v))
#'
#' # same, but now with missing value in the data
#' dd <- data.frame(
#' hhid = c(1, 1, 2, 1, 2, 2, 3 )
#' , person = c(1, 2, 3, 4, 5, 6, 7 )
#' , hhrole = c("h",NA,"m","m","h","m","h")
#' )
#' values(confront(dd, v))
#'
#' # same, but now we ignore the missing values
#' v <- validator(exists_one(hhrole=="h", hhid, na.rm=TRUE))
#' values(confront(dd, v))
#'
#' @export
exists_any <- function(rule, by = NULL, na.rm=FALSE){
parent <- parent.frame()
# get the whole data set from the environment provided
# by 'confront
. <- get(".", parent)
if (is.null(by)) by <- character(nrow(.))
rule <- as.expression(substitute(rule))
unsplit(lapply(split(., f=by), function(d){
res <- eval(rule, envir=d, enclos=parent)
ntrue <- sum(res, na.rm=na.rm)
rep(ntrue >= 1, nrow(d))
}), by)
}
#' @rdname exists_any
#' @export
exists_one <- function(rule, by=NULL, na.rm=FALSE){
parent <- parent.frame()
# get the whole data set from the environment provided
# by 'confront
. <- get(".", parent)
if (is.null(by)) by <- character(nrow(.))
rule <- as.expression(substitute(rule))
unsplit(lapply(split(., f=by), function(d){
res <- eval(rule, envir=d, enclos=parent)
ntrue <- sum(res, na.rm=na.rm)
rep(ntrue == 1, nrow(d))
}), by)
}
|
/scratch/gouwar.j/cran-all/cranData/validate/R/syntax.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.