content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Coerce Yamlet to Data Frame
#'
#' Coerces yamlet to data.frame. Columns are constructed in the order that
#' attributes are encountered, beginning with top-level 'item' (default).
#' Cell contents are calculated using
#' \code{getOption('yamlet_cell_value', yamlet::cell_value)} to which
#' is passed the cell-specific metadata as well as \code{sep} and \code{def}.
#'
#' @param x yamlet; see \code{\link{decorations}} and \code{\link{read_yamlet}}
#' @param row.names a name for a column to hold top-level names, or NULL to represent these as row.names
#' @param optional if TRUE and row.names is NULL, row.names will not be set
#' @param sep separator for multiple items within an attribute
#' @param def definition string: separator between items and their (preceding) names, if any
#' @param ... ignored
#' @export
#' @keywords internal
#' @return data.frame
#' @examples
#'
#' library(magrittr)
#'
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' file %>% read_yamlet %>% explicit_guide %>% as.data.frame
#' file <- system.file(package = 'yamlet', 'extdata','phenobarb.csv')
#'
#' # phenobarb.yaml has conditional metadata that benefits
#' # from interpretation in the context of the data itself.
#' # thus, we
#' # * read the whole 'decorated' object (not just yaml),
#' # * resolve the 'guide' ambiguity,
#' # extract the best-guess decorations, and
#' # convert to data.frame.
#'
#' file %>% io_csv %>% resolve %>% decorations %>% as.data.frame
#'
as.data.frame.yamlet <- function(
x,
row.names = 'item',
optional = FALSE,
sep = '\n',
def = ': ',
...
){
x <- unclass(x)
nms <- names(x)
stopifnot(length(row.names) <= 1)
stopifnot(length(sep) %in% c(1, length(nms)))
stopifnot(length(def) %in% c(1, length(nms)))
if(length(sep) == 1)sep <- rep(sep, length(nms))
if(length(def) == 1)def <- rep(def, length(nms))
cols <- lapply(x, names)
cols <- unlist(cols)
cols <- unique(cols)
mat <- matrix(NA, nrow = length(nms), ncol = length(cols))
fun <- match.fun(getOption('yamlet_cell_value', yamlet::cell_value))
for(i in seq_along(nms)){
for(j in seq_along(cols)){
val <- NA_character_
nm <- nms[[i]]
col <- cols[[j]]
item <- x[[nm]]
if(col %in% names(item)){
val <- fun(item[[col]], sep = sep[[i]], def = def[[i]], ...)
}
mat[i,j] <- val
}
}
dat <- data.frame(mat)
names(dat) <- cols
if(length(row.names)){
dat[[row.names]] <- nms
if(ncol(dat) > 1) dat <- dat[, c(ncol(dat), 1:(ncol(dat)-1)),drop = FALSE]
} else {
if(!optional) row.names(dat) <- nms
}
dat
}
#' Calculate a Cell Value
#'
#' Calculates a cell value.
#'
#' @param x list of character, possibly named
#' @param sep separator for multiple items within an attribute
#' @param def definition string: separator between items and their (preceding) names, if any
#' @export
#' @keywords internal
#'
cell_value <- function(x, sep = '\n', def = ': '){
nms <- names(x)
y <- sapply(x, paste, collapse = '') # guarrantee length one character
if(!is.null(nms)) y <- paste0(nms, def, y)
z <- paste(y, collapse = sep)
z
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/data.frame.R |
#' Redecorate a List-like Object
#'
#' Redecorates a list-like object.
#' Equivalent to \code{decorate( ..., overwrite = TRUE)}.
#' If \code{meta} is not supplied, an attempt will be made
#' to redecorate with existing decorations, if any.
#'
#' @param x object
#' @param meta file path for corresponding yamlet metadata, or a yamlet object
#' @param ... passed arguments
#' @param overwrite passed to \code{\link{decorate}}
#' @export
#' @keywords internal
#' @family decorate
#' @return a list-like object, typically data.frame
#' @examples
#' library(dplyr)
#' library(magrittr)
#' library(csv)
#' library(haven)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(as.csv(file))
#' x %>% select(Subject) %>% decorations
#' x %<>% redecorate('Subject: Patient Identifier')
#' x %>% select(Subject) %>% decorations
#'
#' # xpt may already have labels:
#'
#' dm <- 'extdata/dm.xpt.gz' %>%
#' system.file(package = 'yamlet') %>%
#' gzfile %>%
#' read_xpt
#'
#' dm %>% class
#' dm %>% decorations(AGE, SEX, RACE)
#'
#' # but technically not decorated, and poor persistence:
#' bind_rows(dm, dm) %>% decorations(AGE, SEX, RACE)
#'
#' # self-redecorating helps:
#' dm %<>% redecorate
#' bind_rows(dm, dm) %>% decorations(AGE, SEX, RACE)
redecorate <- function(x, meta = NULL, ..., overwrite = TRUE){
if(is.null(meta)){
alt <- try(decorations(x))
if(inherits(alt,'yamlet')) meta <- alt
}
decorate(x, meta = meta, ..., overwrite = overwrite)
}
#' Decorate a List-like Object
#'
#' Decorates a list-like object. Generic.
#' See \code{\link{decorate.character}}.
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family decorate
#' @return a list-like object, typically data.frame
#' @examples
#' library(csv)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(as.csv(file))
#' identical(decorate(as.csv(file)), decorate(file))
#' decorations(x)
#'
#'
decorate <- function(x,...)UseMethod('decorate')
#' Decorate Character
#'
#' Treats \code{x} as a file path. By default,
#' metadata is sought from a file with the same
#' base but the 'yaml' extension.
#'
#' @param x file path for table data
#' @param meta file path for corresponding yamlet metadata, or a yamlet object
#' @param read function or function name for reading x
#' @param ext file extension for metadata file, if relevant
#' @param ... passed to \code{read} (if accepted) and to \code{\link{as_yamlet.character}}
#' @return class 'decorated' 'data.frame'
#' @importFrom csv as.csv
#' @export
#' @family decorate
#' @family interface
#' @examples
#'
#' # find data file
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' file
#'
#' # find metadata file
#' meta <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' meta
#'
#' # decorate with explicit metadata reference
#' a <- decorate(file, meta)
#'
#' # rely on default metadata path
#' b <- decorate(file)
#'
#' # in this case: same
#' stopifnot(identical(a, b))
decorate.character <- function(
x,
meta = NULL,
...,
read = getOption('yamlet_import', as.csv),
ext = getOption('yamlet_extension', '.yaml')
# coerce = getOption('yamlet_coerce',FALSE),
){
stopifnot(length(x) == 1)
if(!file.exists(x))stop('could not find file ', x)
read <- match.fun(read)
args <- list(...)
# args <- args[names(args) %in% names(formals(read))] # debilitating
args <- c(list(x),args)
y <- do.call(read, args)
if(is.null(meta)){
meta <- sub('\\.[^.]*$','',x) # remove last dot and any trailing chars
meta <- paste0(meta, ext)
}
if(is.character(meta) & length(meta) == 1){
meta <- try(as_yamlet(meta,...))
}
if(!inherits(meta, 'yamlet')) stop('could not interpret meta: ', meta)
decorate(
y,
meta = meta,
# coerce = coerce,
...
)
}
#' Decorate List
#'
#' Decorates a list-like object. Takes metadata
#' in yamlet format and loads it onto corresponding
#' list elements as attributes.
#'
#' As of v0.8.8, attribute persistence is supported
#' by optionally coercing decorated items to class 'dvec'
#' where suitable methods exist. \code{persistence}
#' is false by default for the list method
#' but true by default for the data.frame method.
#' See also \code{\link{decorate.data.frame}}.
#'
#' @param x object inheriting from \code{list}
#' @param meta file path for corresponding yaml metadata, or a yamlet or something coercible to yamlet; an attempt will be made to guess the file path if x has a 'source' attribute (as for \code{\link[csv]{as.csv}})
#' @param ... passed to \code{\link{as_yamlet.character}} (by method dispatch)
#' @param ext file extension for metadata file, if relevant
#' @param persistence whether to coerce decorated items to 'dvec' where suitable method exists
#' @param overwrite whether to overwrite attributes that are already present (else give warning)
#' @return like x but with 'decorated' as first class element
#' @export
#' @keywords internal
#' @family decorate
#' @examples
#' example(decorate.data.frame)
#'
decorate.list <- function(
x,
meta = NULL,
...,
ext = getOption('yamlet_extension', '.yaml'),
persistence = getOption('yamlet_persistence', FALSE),
overwrite = getOption('yamlet_overwrite', FALSE)
){
if(is.null(meta)) meta <- attr(x, 'source')
if(is.null(meta)) stop('could not guess metadata location; supply meta')
m <- try(silent = TRUE, as_yamlet(meta))
if(inherits(m, 'yamlet')) meta <- m
if(is.character(meta) & length(meta) == 1){
meta <- sub('\\.[^.]*$','',meta) # remove last dot and any trailing chars
meta <- paste0(meta, ext)
meta <- try(as_yamlet(meta, ...))
}
if(!inherits(meta, 'yamlet')) stop('could not interpret meta: ', meta)
for(item in names(x)){ # if list has no names, nothing happens
if(item %in% names(meta)){ # if list has names, name '' should not be reached
val <- meta[[item]]
for(attrb in names(val)){
if(attrb == ''){ # warn if name is ''
warning('ignoring anonymous attribute for ', item)
next
}
if(attrb %in% names(attributes(x[[item]]))){
if(!overwrite){
if(
!identical( # avoid moot warnings
attr(x[[item]], attrb), # current
val[[attrb]] # proposed
)
){
warning('not overwriting ', attrb, ' attribute of ', item)
}
next # avoid all overwrites, moot or otherwise
}
}
attr(x[[item]], attrb) <- val[[attrb]]
# since this is really the only place we
# assign an attribute, it is a good place
# to coerce to dvec. A bit redundant
# if more than one attribute,
# but safer and perhaps not too expensive.
if(persistence){
try(silent = TRUE, x[[item]] <- as_dvec(x[[item]]))
}
}
}
}
# as of 0.6.2, this is the only constructor for 'decorated'
class(x) <- union('decorated', class(x))
x
}
#' Decorate Data Frame
#'
#' Decorates a data.frame. Expects metadata in yamlet
#' format, and loads it onto columns as attributes.
#'
#' As of v0.8.8, the data.frame method for decorate()
#' coerces affected columns using \code{\link{as_dvec}}
#' if \code{persistence} is true and a suitable method
#' exists. 'vctrs' methods are implemented for class
#' \code{dvec} to help attributes persist during
#' tidyverse operations. Details are described in
#' \code{\link{c.dvec}}. Disable this functionality
#' with \code{options(yamlet_persistence = FALSE)}.
#' @param x data.frame
#' @param meta file path for corresponding yaml metadata, or a yamlet; an attempt will be made to guess the file path if x has a 'source' attribute
#' @param ... passed to \code{\link{decorate.list}}
#' @param persistence whether to coerce decorated columns to 'dvec' where suitable method exists
#' @return class 'decorated' 'data.frame'
#' @export
#' @family interface
#' @family decorate
#' @seealso decorate.list
#' @examples
#'
#' # find data path
#' library(csv)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' file
#' dat <- as.csv(file) # dat now has 'source' attribute
#'
#' # use source attribute to find metadata
#' a <- decorate(as.csv(file))
#'
#' # supply metadata path (or something close) explicitly
#' b <- decorate(dat, meta = file)
#'
#' # these are equivalent
#' stopifnot(identical(a, b))
decorate.data.frame <- function(
x,
meta = NULL,
...,
persistence = getOption('yamlet_persistence', TRUE)
)decorate.list(
x,
meta = meta,
...,
persistence = persistence
)
#' Retrieve Decorations
#'
#' Retrieve the decorations of something.
#' Generic, with method \code{\link{decorations.data.frame}}.
#'
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family decorate
#' @return see methods
#' @examples
#' library(csv)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(as.csv(file))
#' decorations(x)
decorations <- function(x,...)UseMethod('decorations')
#' Retrieve Decorations for Data Frame
#'
#' Retrieve the decorations of a data.frame; i.e., the metadata
#' used to decorate it. Returns a list with same names as the data.frame.
#' By default, 'class' and 'level' attributes are excluded from the result,
#' as you likely don't want to manipulate these independently.
# As of 0.6.1, dropping coerce argument because of conflicts with classified().
# former help:
# Consider carefully whether the default handling of factor levels
# (see \code{coerce} argument) is appropriate for your application.
#'
#' @param x data.frame
#' @param ... optional unquoted column names to limit output (passed to \code{\link[dplyr]{select}})
# @param coerce logical: whether to coerce factor levels to guide; alternatively, a key for the levels
#' @param exclude_attr attributes to remove from the result
#' @export
#' @family decorate
#' @return named list of class 'yamlet'
#' @examples
#' # prepare a decorated data.frame
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#'
#' # retrieve the decorations
#' decorations(x, Subject, time, conc)
decorations.data.frame <- function(
x,
...,
# coerce = getOption('yamlet_coerce_decorations', FALSE),
exclude_attr = getOption('yamlet_exclude_attr', c('class','levels'))
# and possibly comment, dim, dimnames, names, row.names, and tsp
# see help for attributes
){
# coerce <- FALSE
stopifnot(length(exclude_attr) == 0 || is.character(exclude_attr))
nms <- selected(x, ...)
x <- x[, as.character(nms), drop = FALSE] # selected may have incompatible class path
out <- lapply(x, attributes)
levs_key <- 'guide'
# if(!is.logical(coerce)){
# if(is.character(coerce))
# if(length(coerce) == 1){
# levs_key <- coerce
# coerce <- TRUE
# }
# }
# if(!is.logical(coerce)){
# warning('coerce value not logical')
# }else{
# if(coerce){
# for(i in seq_along(out)){
# if('class' %in% names(out[[i]])){
# if(any(out[[i]]$class == 'factor')){ # factor or ordered factor
# out[[i]]$class <- NULL
# names(out[[i]])[names(out[[i]]) == 'levels'] <- levs_key
# }
# }
# }
# }
# }
for(i in exclude_attr){
for(j in names(out)){
if(i %in% names(out[[j]])) out[[j]][[i]] <- NULL
}
}
class(out) <- 'yamlet'
out
}
# Print Decorations
#
# Prints decorations. Coerces to yamlet and prints result.
#
# @param x decorations, i.e. a named list of class 'decorations'
# @param ... ignored
# @export
# @family decorate
# @keywords internal
# @return invisible x (yamlet)
# @examples
# example(decorations.data.frame)
# print.decorations <- function(x, ...){
# x <- as_yamlet(x)
# print(x)
# }
# there is no actual class 'decorations' so methods unnecessary at 0.6.2.
#' Coerce to Decorated
#'
#' Coerces to class 'decorated'. Generic, with method \code{\link{as_decorated.default}}.
#'
#' @param x object
#' @param ... passed arguments
#' @export
#' @family decorate
#' @keywords internal
#' @return decorated
#' @examples
#' class(Puromycin)
#' class(as_decorated(Puromycin))
as_decorated <- function(x, ...)UseMethod('as_decorated')
#' Coerce to Decorated by Default
#'
#' Coerces to class 'decorated' by decorating (by default) with an empty list.
#'
#' @param x object
#' @param meta see \code{\link{decorate.list}}
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family decorate
#' @return decorated
#' @examples
#' class(Puromycin)
#' class(as_decorated(Puromycin))
as_decorated.default <- function(x, meta = '-', ...){
decorate(x, meta = meta, ...)
}
# @aliases decorations.data.frame
# @keywords internal
#decorations.data.frame
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/decorate.R |
# See singularity.R for [.decorated
# Subset Decorated
#
# Subsets 'decorated', retaining attributes.
# @param x decorated
# @param ... passed to next method
# @export
# @keywords internal
# @family decorated
# @return decorated
# @examples
# a <- as_decorated(as.list(setNames(letters[1:3], LETTERS[1:3])))
# attr(a$B, 'label') <- 'foo'
# a <- a[1:3]
# attributes(a)
# `[.decorated` <- function(x, ...)as_decorated(NextMethod())
#' Assign Subset of Decorated
#'
#' Assigns subset of decorated, retaining attributes.
#' @param x decorated
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family decorated
#' @return decorated
#' @examples
#' a <- as_decorated(as.list(setNames(letters[1:3], LETTERS[1:3])))
#' a[2:3] <- 'a'
#' str(a)
#' class(a)
`[<-.decorated` <- function(x, ..., value)as_decorated(NextMethod())
#' Element-select Decorated.
#'
#' Selects element of decorated, retaining attributes.
#' @param x decorated
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family decorated
#' @return decorated
#' @examples
#' a <- as_decorated(as.list(setNames(letters[1:3], LETTERS[1:3])))
#' a[[2]]
`[[.decorated` <- function(x, ...)NextMethod()
#' Assign Element of Decorated
#'
#' Assigns element of decorated, retaining attributes.
#' @param x decorated
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family decorated
#' @return decorated
#' @examples
#' a <- as_decorated(as.list(setNames(letters[1:3], LETTERS[1:3])))
#' a[[2]]
#' a[[2]] <- 'c'
#' class(a)
#'
`[[<-.decorated` <- function(x, ..., value)as_decorated(NextMethod())
#' Assign Names of Decorated
#'
#' Assigns names of decorated, retaining attributes.
#' @param x decorated
#' @param value passed to next method
#' @export
#' @keywords internal
#' @family decorated
#' @return decorated
#' @examples
#' a <- as_decorated(as.list(setNames(letters[1:3], LETTERS[1:3])))
#' a[[2]]
#' names(a[[2]]) <- 'c'
#' class(a)
#'
`names<-.decorated` <- function(x, value)as_decorated(NextMethod())
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/decorated.R |
#' Desolve Guide
#'
#' Un-resolves explicit versions of 'guide' to implicit usage.
#' Generic, with methods
#' \code{\link{desolve.decorated}},
#' \code{\link{desolve.classified}}, and
#' \code{\link{desolve.dvec}}.
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family resolve
#' @examples
#' example(resolve.decorated)
desolve <- function(x, ...)UseMethod('desolve')
#' Desolve Guide for Decorated
#'
#' Un-resolves explicit usage of default key 'guide' to
#' implicit usage for 'decorated' class.
#' Simply calls
#' \code{\link{drop_title}},
#' \code{\link{unclassified}},
#' and \code{\link{implicit_guide}}.
#' @param x decorated
#' @param ... passed to \code{\link{drop_title}}, \code{\link{unclassified}}, and \code{\link{implicit_guide}}
#' @export
#' @return decorated
#' @family resolve
#' @family interface
#' @examples
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#'
#' # this is how Age, glyco, Race look when resolved
#' x %>% resolve %>% decorations(Age, glyco, Race)
#'
#' # we can resolve two of them and then 'unresolve' all of them
#' x %>% resolve(glyco, Race) %>% desolve %>% decorations(Age, glyco, Race)
desolve.decorated <- function(x, ...){
x <- drop_title(x, ...)
x <- unclassified(x, ...)
x <- implicit_guide(x, ...)
x
}
#' Desolve Guide for Classified
#'
#' Un-resolves explicit usage of default key 'guide' to
#' implicit usage for class 'classified'.
#' Calls
#' \code{\link{drop_title}} (a non-action by default),
#' \code{\link{unclassified}},
#' followed by \code{\link{implicit_guide}}.
#' @param x classified
#' @param ... passed to \code{\link{drop_title}}, \code{\link{unclassified}}, and \code{\link{unclassified}}
#' @export
#' @return dvec
#' @family resolve
#' @family classified
#' @examples
#' library(magrittr)
#' x <- as_dvec(
#' 4:6,
#' guide = list(a = 4L, b = 5L, c = 6L)
#' )
#'
#' # untouched
#' x %>% str
#'
#' # resolved
#' x %>% resolve %>% str
#'
#' # resolved and desolved
#' x %>% resolve %>% desolve %>% str
desolve.classified <- function(x, ...){
x <- drop_title(x, ...)
x <- unclassified(x, ...)
x <- implicit_guide(x, ...)
x
}
#' Desolve Guide for Decorated Vector
#'
#' Un-resolves explicit usage of default key 'guide' to
#' implicit usage for class dvec.
#' Calls
#' \code{\link{drop_title}},
#' \code{\link{unclassified}},
#' and \code{\link{implicit_guide}}.
#' @param x
#' @param ... passed to \code{\link{drop_title}}, \code{\link{unclassified}}, and \code{\link{implicit_guide}}
#' @export
#' @return dvec
#' @family resolve
#' @family dvec
#' @keywords internal
#' @examples
#' library(magrittr)
#' x <- as_dvec(4:6)
#' attr(x, 'guide') <- 'kg'
#' x %>% str
#' x %>% resolve %>% str
#' x %>% resolve %>% desolve %>% str
desolve.dvec <- function(x, ...){
x <- drop_title(x, ...)
x <- unclassified(x, ...)
x <- implicit_guide(x, ...)
x
}
#' Desolve Data Frame
#'
#' Desolves data.frame.
#' Coerces first using as_decorated().
#'
#' @param x data.frame
#' @param ... ignored
#' @export
#' @keywords internal
#' @return decorated
#' @family resolve
#' @examples
#' head(desolve(Theoph))
desolve.data.frame <- function(x, ...){
#desolve(as_decorated(x, ...), ...)
# @ 1.0.3: above, first use of dots
# can pass anonymous args to decorate.list,
# which may understand one of them
# as 'meta' and issue an error
desolve(as_decorated(x), ...)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/desolve.R |
# Coerce to Decorated
#
# Coerces to decorated by subclassing.
# @param x object of dispatch
# @param ... passed arguments
# @family dplyr
# @export
# @keywords internal
# as_decorated <- function(x, ...){
# class(x) <- union('decorated', class(x))
# x
# }
#' Slice Decorated
#'
#' Preserves class when slicing decorated.
#' @importFrom dplyr slice
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::slice
slice.decorated <- function(.data, ..., .preserve = FALSE){
as_decorated(NextMethod())
}
# For filter.decorated, see singularity.R
# Filter Decorated
#
# Preserves class when filtering decorated.
# @param .data see \code{\link[dplyr]{filter}}
# @param ... see \code{\link[dplyr]{filter}}
# @param preserve see \code{\link[dplyr]{filter}}
# @importFrom dplyr filter
# @export
# @keywords internal
# @family dplyr
# filter.decorated <- function(.data, ..., .preserve = FALSE){
# as_decorated(NextMethod())
# }
#' Select Decorated
#'
#' Preserves class when selecting decorated.
#' @importFrom dplyr select
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::select
select.decorated <- function(.data, ...){
as_decorated(NextMethod())
}
#' Arrange Decorated
#'
#' Preserves class when arranging decorated.
#' @importFrom dplyr arrange
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::arrange
arrange.decorated <- function(.data, ...){
as_decorated(NextMethod())
}
#' Group_by Decorated
#'
#' Preserves class when grouping decorated.
#' @importFrom dplyr group_by
#' @importFrom dplyr group_by_drop_default
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::group_by
group_by.decorated <- function(
.data, ...,
add = FALSE, .drop = group_by_drop_default(.data)){
as_decorated(NextMethod())
}
#' Ungroup Decorated
#'
#' Preserves class when ungrouping decorated.
#' @importFrom dplyr ungroup
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::ungroup
ungroup.decorated <- function(
x, ...
){
as_decorated(NextMethod())
}
#' Mutate Decorated
#'
#' Preserves class when mutating decorated.
#' @importFrom dplyr mutate
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::mutate
mutate.decorated <- function(.data, ...){
as_decorated(NextMethod())
}
#' Summarize Decorated
#'
#' Preserves class when summarizing decorated.
#' @importFrom dplyr summarize
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::summarize
summarize.decorated <- function(.data, ...){
as_decorated(NextMethod())
}
#' Summarise Decorated
#'
#' Preserves class when summarising decorated.
#' @importFrom dplyr summarise
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::summarise
summarise.decorated <- function(.data, ...){
as_decorated(NextMethod())
}
#' Semi_join Decorated
#'
#' Preserves class when joining decorated.
#' @importFrom dplyr semi_join
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::semi_join
semi_join.decorated <- function(x, y, by = NULL, copy = FALSE, ...){
as_decorated(NextMethod())
}
#' Anti_join Decorated
#'
#' Preserves class when joining decorated.
#' @importFrom dplyr anti_join
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::anti_join
anti_join.decorated <- function(x, y, by = NULL, copy = FALSE, ...){
as_decorated(NextMethod())
}
#' Full_join Decorated
#'
#' Preserves class when joining decorated.
#' @importFrom dplyr full_join
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::full_join
full_join.decorated <- function(
x, y, by = NULL,
copy = FALSE, suffix = c(".x", ".y"), ...){
as_decorated(NextMethod())
}
#' Inner_join Decorated
#'
#' Preserves class when joining decorated.
#' @importFrom dplyr inner_join
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::inner_join
inner_join.decorated <- function(
x, y, by = NULL,
copy = FALSE, suffix = c(".x", ".y"), ...){
as_decorated(NextMethod())
}
#' Left_join Decorated
#'
#' Preserves class when joining decorated.
#' @importFrom dplyr left_join
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::left_join
left_join.decorated <- function(x, y, by = NULL,
copy = FALSE, suffix = c(".x", ".y"), ...){
as_decorated(NextMethod())
}
#' Right_join Decorated
#'
#' Preserves class when joining decorated.
#' @importFrom dplyr right_join
#' @export
#' @keywords internal
#' @family dplyr
#' @inheritParams dplyr::right_join
right_join.decorated <- function(x, y, by = NULL,
copy = FALSE, suffix = c(".x", ".y"), ...){
as_decorated(NextMethod())
}
#' @export
#' @keywords internal
dplyr::filter
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/dplyr.R |
#' Drop Title
#'
#' Drop title attribute.
#' Generic, with methods
#' \code{\link{drop_title.default}},
#' \code{\link{drop_title.decorated}}, and
#' \code{\link{drop_title.dvec}}.
#'
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family deprecated
#' @family labels
#' @return see methods
#' @examples
#' # see methods
drop_title <- function(x, ...)UseMethod('drop_title')
#' Drop Title for Decorated
#'
#' Drops title for 'decorated' class.
#' Limits scope to requested variables, and then calls
#' class-specific methods for each.
#'
#' @param x object
#' @param ... optional names of variables to limit scope
#' @export
#' @keywords internal
#' @family labels
#' @return decorated
#' @examples
#' library(magrittr)
#' x <- data.frame(length = 1:10)
#' x %>%
#' decorate('length: [ Length, mm ]') %>%
#' resolve %>%
#' desolve %>%
#' decorations
#'
drop_title.decorated <- function(
x,
...
){
vars <- selected(x, ...)
args <- named(...)
for(var in vars){
# pass only named arguments
x[[var]] <- do.call(drop_title, c(list(x[[var]]),args))
}
x
}
#' Drop Title by Default
#'
#' Drops title by default.
#' To be specific: this is the default method
#' for the generic function \code{\link{drop_title}},
#' and it actually does nothing. Individual methods
#' are written for those classes where 'drop title'
#' behavior is expected.
#'
#' @param x object
#' @param ... ignored
#' @export
#' @keywords internal
#' @family labels
#' @return same as x
drop_title.default <- function(
x,
...
){
return(x)
}
#' Drop Title for Decorated Vector
#'
#' Drops title for decorated vectors.
#' If option \code{with_title} is TRUE
#' and x has a 'units' attribute,
#' it removes the title attribute. See also
#' \code{\link{make_title}} for coordinated use.
#' @param x dvec
#' @param ... ignored arguments
#' @param with_title whether to drop title
#' @export
#' @keywords internal
#' @family labels
#' @return dvec
#' @examples
#' library(magrittr)
#' 1 %>%
#' as_dvec(label = 'length', guide = 'mm') %>%
#' resolve %>%
#' desolve
drop_title.dvec <- function(
x,
...,
with_title = getOption('yamlet_with_title', TRUE)
){
stopifnot(length(with_title) == 1)
with_title <- as.logical(with_title)
if(with_title & 'units' %in% names(attributes(x))){
attr(x, 'title') <- NULL
}
x
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/drop_title.R |
#' Coerce to Decorated Vector
#'
#' Coerces to Decorated Vector. Generic, with methods
#' \code{\link{as_dvec.logical}},
#' \code{\link{as_dvec.integer}},
#' \code{\link{as_dvec.numeric}},
#' \code{\link{as_dvec.complex}}, and
#' \code{\link{as_dvec.character}}.
#' @param x object of dispatch
#' @param ... ignored arguments
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' as_dvec(0)
as_dvec <- function(x, ...)UseMethod('as_dvec')
#' Coerce Logical to Decorated Vector
#'
#' Coerces logical to decorated vector.
#' Assigns class 'dvec' and any named attributes in dots.
#' @param x logical
#' @param ... attributes to assign
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' as_dvec(c(FALSE, TRUE))
as_dvec.logical <- function(
x,
...
){
at <- list(...)
nms <- names(at)
nms <- nms[nms != '']
for(nm in nms)attr(x, nm) <- at[[nm]]
class(x) <- 'dvec'
x
}
#' Coerce Integer to Decorated Vector
#'
#' Coerces integer to decorated vector. Assigns class 'dvec' and any named attributes in dots.
#' @param x integer
#' @param ... attributes to assign
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' as_dvec(1:3)
as_dvec.integer <- function(x, ...){
at <- list(...)
nms <- names(at)
nms <- nms[nms != '']
for(nm in nms)attr(x, nm) <- at[[nm]]
class(x) <- 'dvec'
x
}
#' Coerce Numeric to Decorated Vector
#'
#' Coerces numeric to decorated vector. Assigns class 'dvec' and any named attributes in dots.
#' @param x numeric
#' @param ... attributes to assign
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' as_dvec(c(10.3, 1.2))
#' as_dvec(1, label = 'yin')
#' as_dvec(structure(1, label = 'yin'))
#' as_dvec(structure(1, label = 'yin'), label = 'yang')
#'
as_dvec.numeric <- function(x, ...){
at <- list(...)
nms <- names(at)
nms <- nms[nms != '']
for(nm in nms)attr(x, nm) <- at[[nm]]
class(x) <- 'dvec'
x
}
#' Coerce Complex to Decorated Vector
#'
#' Coerces complex to decorated vector. Assigns class 'dvec' and any named attributes in dots.
#' @param x complex
#' @param ... attributes to assign
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' as_dvec(c(complex(1), complex(2)))
as_dvec.complex <- function(x, ...){
at <- list(...)
nms <- names(at)
nms <- nms[nms != '']
for(nm in nms)attr(x, nm) <- at[[nm]]
class(x) <- 'dvec'
x
}
#' Coerce Character to Decorated Vector
#'
#' Coerces character to decorated vector. Assigns class 'dvec' and any named attributes in dots.
#' @param x character
#' @param ... attributes to assign
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' as_dvec(letters)
as_dvec.character <- function(x, ...){
at <- list(...)
nms <- names(at)
nms <- nms[nms != '']
for(nm in nms)attr(x, nm) <- at[[nm]]
class(x) <- 'dvec'
x
}
#' Coerce Decorated Vector to Decorated Vector
#'
#' Coerces decorated vector to decorated vector.
#' Assigns any named attributes in dots.
#' @param x character
#' @param ... attributes to assign
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' as_dvec(as_dvec(letters[1:3]), label = 'Letters')
#' as_dvec(as_dvec(letters[1:3], label = 'Letters'))
as_dvec.dvec <- function(x, ...){
at <- list(...)
nms <- names(at)
nms <- nms[nms != '']
for(nm in nms)attr(x, nm) <- at[[nm]]
class(x) <- 'dvec'
x
}
# http://adv-r.had.co.nz/S3.html
# When implementing a vector class, you should implement these methods:
# length, [, [<-, [[, [[<-, c.
#' Get Length of a Decorated Vector
#'
#' Gets the length of a decorated vector. Simply calls next method.
#'
#' @param x decorated vector
#' @export
#' @keywords internal
#' @family dvec
#' @return integer
#' @examples
#' length(as_dvec(1:3))
length.dvec <- function(x)NextMethod()
#' Subset Decorated Vector
#'
#' Subsets decorated vector, retaining attributes.
#' @param x decorated vector
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' a <- as_dvec(letters, label = 'foo')
#' a <- a[1:3]
#' attributes(a)
#' names(a) <- a
#' a[1:2]
`[.dvec` <- function(x, ...){
y <- NextMethod()
dropped <- setdiff(names(attributes(x)), names(attributes(y)))
attributes(y)[dropped] <- attributes(x)[dropped]
y
}
#' Element-select Decorated Vector
#'
#' Selects element of decorated vector, retaining attributes.
#' @param x decorated vector
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' a <- as_dvec(letters[1:3], label = 'foo')
#' a <- a[[2]]
#' attributes(a)
`[[.dvec` <- function(x, ...){
y <- NextMethod()
dropped <- setdiff(names(attributes(x)), names(attributes(y)))
attributes(y)[dropped] <- attributes(x)[dropped]
y
}
#' Assign Subset of Decorated Vector
#'
#' Assigns subset of decorated vector, retaining attributes.
#' @param x decorated vector
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' a <- as_dvec(letters[1:3], label = 'foo')
#' a[2:3] <- 'a'
#' str(a)
#' class(a)
`[<-.dvec` <- function(x, ..., value){
y <- NextMethod() # preserves attributes, including class!
y <- unclass(y)
class(y) <- 'dvec'
y
}
#' Assign Element of Decorated Vector
#'
#' Assigns element of decorated vector, retaining attributes.
#' @param x decorated vector
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' a <- as_dvec(letters[1:3], label = 'foo')
#' a[[3]] <- 'a'
#' str(a)
#' class(a)
`[[<-.dvec` <- function(x, ..., value){
y <- NextMethod() # seems to preserve attributes, including class!
y <- unclass(y)
class(y) <- 'dvec'
y
}
#' Combine Decorated Vector
#'
#' Combines decorated vectors. Tries to preserve
#' attributes by resolving pairwise conflicts intelligently.
#' The attributes of the first item are reconciled
#' with those of the second, then those of the third, etc.
#' the \code{class} attribute is untouched.
#'
#' By default, the first version of any attribute is
#' preserved, with warning if the alternative differs.
#' NULLs are largely ignored. If either attribute
#' is a list the other is coerced to a list with as.list().
#' If either of two lists has names, then names
#' are enforced for the other (blank names if necessary).
#' Lists are combined in forward order, and
#' elements that are duplicates in value and name (if present)
#' are removed. If the result has only blank names,
#' these are removed as well.
#'
#' @param ... items to be combined, presumably all vectors
#' @export
#' @keywords internal
#' @family dvec
#' @return dvec
#' @examples
#' a <- as_dvec(letters[1:3], label = 'foo')
#' b <- as_dvec(letters[3:5], label = 'foo')
#' c <- c(a,b)
#' c
#' class(c)
#'
`c.dvec` <- function( ... ){
all <- list(...)
for(a in all){
if(!(is.atomic(a)))warning('expecting only atomic elements to combine')
if(is.factor(a))stop('cannot combine dvec with factor')
}
y <- NextMethod()
rec <- reconcile(all)
rec$class <- NULL # never assign class
attributes(y) <- rec
class(y) <- 'dvec'
y
}
#' Reconcile Atttributes
#'
#' Reconciles attributes. Generic, with method \code{\link{reconcile.list}}.
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return list (of attributes)
reconcile <- function(x, ...)UseMethod('reconcile')
#' Reconcile Atttributes of List Members
#'
#' Reconciles attributes of list members. Recursively arbitrates
#' list members pairwise, returning the accumulated attributes.
#' @param x list
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return list (of attributes)
#' library(magrittr)
#' library(dplyr)
#' a <- data.frame(study = 1) %>% decorate('study: [Study, [A: 1]]')
#' b <- data.frame(study = 2) %>% decorate('study: [Study, [B: 2]]')
#' bind_rows(a, b) %>% decorations
#' c(a$study, b$study)
#' reconcile(list(a$study, b$study))
reconcile.list <- function(x, ...){
if(length(x) == 1) return(attributes(x[[1]]))
# If we got this far, the list has length two or more.
# Reconcile the last member with the reconciliation
# of whatever came earlier.
left <- x[-length(x)] # length one or more
right <- x[[length(x)]] # just the tail
left_attr <- reconcile(left)
right_attr <- attributes(right)
nms <- union(names(left_attr), names(right_attr))
nms <- setdiff(nms, 'class') # never assign class
best <- sapply(
simplify = FALSE,
USE.NAMES = TRUE,
nms,
function(nm)arbitrate(
left_attr[[nm]],
right_attr[[nm]],
tag = nm
)
)
best
}
#' Arbitrate Two Attributes
#'
#' Arbitrates two attribute sets. Generic, with methods
#' \code{\link{arbitrate.NULL}},
#' \code{\link{arbitrate.namedList}},
#' \code{\link{arbitrate.list}},
#' \code{\link{arbitrate.default}}.
#' @param x left attribute set
#' @param y right attribute set
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return list (of attributes)
arbitrate <- function(x, y, ...)UseMethod('arbitrate')
#' Arbitrate Null
#'
#' Arbitrates two attributes, the first of which is NULL.
#' Simply returns the second.
#' @param x left attribute
#' @param y right attribute
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return class of y
arbitrate.NULL <- function(x, y, ...){
if(is.null(y))warning('unexpected NULL')
y
}
#' Arbitrate List
#'
#' Arbitrates two attributes, the first of which is a list.
#' If x has names, it is cast as namedList and arbitrated thus.
#' @param x left attribute
#' @param y right attribute
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return list
arbitrate.list <- function(x, y, ...){
if(!is.null(names(x))){
class(x) <- 'namedList'
res <- arbitrate(x, y, ...)
if(inherits(res, 'namedList')){
res <- unclass(res)
}
return(res)
}
# if we got here, x is an un-named list
if(is.null(y)) return(x)
# Not sure what y is, but probably should coerce to list
y <- as.list(y)
# combine these two lists
z <- c(x, y)
# x did not have names, but if y had names, now z does also.
# we'll put in temp names if necessary and strip them later.
if(is.null(names(z))) names(z) <- rep('', length(z))
# if ever both name and content are duplicated, we'll drop those.
bad <- duplicated(names(z)) & duplicated(z)
z <- z[!bad]
if(all(names(z) == '')) names(z) <- NULL # probably created by us.
z
}
#' Arbitrate Named List
#'
#' Arbitrates two attributes, the first of which is a named list.
#' @param x left attribute
#' @param y right attribute
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return list
arbitrate.namedList <- function(x, y, ...){
if(is.null(y)) return(x)
# Not sure what y is, but probably should coerce to list
y <- as.list(y)
# combine these two lists
z <- c(x, y)
# since x did have names, now z does also.
# if ever both name and content are duplicated, we'll drop those.
# bad <- duplicated(names(z)) & duplicated(z) # error for White: 1, Asian: 2, White: 2, Asian: 1.
# z <- list(White = 1, Asian = 2, White = 2, Asian = 1)
# z <- z[!bad]
classes <- unique(sapply(z, function(i)class(i)[[1]]))
if(length(classes) > 1)warning('mixed classes, e.g.', paste(collapse = ', ', classes[1:2]))
# https://www.r-bloggers.com/2016/07/populating-data-frame-cells-with-more-than-one-value/
codes <- data.frame(levels = I(structure(z, names = NULL)), labels = names(z))
# TTB 0.10.22 the line above must be useful in some situations.
# but in the simple case that each element of z is length one,
# unlist coerces type for better duplicate detection.
# e.g. a mix of matching int and num in two lists won't flag as duplicates.
# consider also:
# codes <- data.frame(levels = unlist(z), labels = names(z))
if(any(duplicated(codes))){
duplicated <- anyDuplicated(codes)
# in this context, unlike classified.default, some duplication is normal
# warning(
# 'dropping duplicated levels, e.g.: ',
# codes$levels[[duplicated]],
# ' (',
# codes$labels[[duplicated]],
# ')'
# )
codes <- unique(codes)
}
if(any(duplicated(codes$levels))){
duplicated <- anyDuplicated(codes$levels)
warning(
'level(s) cross-labelled, e.g.: ',
paste(
collapse = ', ',
unlist(# in case level is itself a list
codes$levels[[duplicated]]
)
),
': ',
paste(
collapse = ', ',
codes$labels[codes$levels == codes$levels[[duplicated]]]
)
)
}
if(any(duplicated(codes$labels))){
duplicated <- anyDuplicated(codes$labels)
warning(
'levels like-labelled, e.g.: ',
paste(
collapse = ', ',
codes$levels[codes$labels == codes$labels[[duplicated]]][[1]]
),
', ',
paste(
collapse = ', ',
codes$levels[codes$labels == codes$labels[[duplicated]]][[2]]
),
': ',
codes$labels[[duplicated]]
)
}
# having dropped any duplicates, we unpack codes
z <- as.list(codes$levels)
names(z) <- codes$labels
# now elements are unique, but could be like-labelled or cross-labelled.
class(z) <- 'list'
z
}
#' Arbitrate Default
#'
#' Arbitrates two attributes, the first of which is non-NULL and non-list.
#' If y is list, x is promoted to list and re-arbitrated thus.
#' Otherwise, x is returned, with warning if y not identical and not NULL.
#' @param x left attribute
#' @param y right attribute
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return list if y is list, else x
arbitrate.default <- function(x, y, tag = '', ...){
if(is.null(y)) return(x)
# so y is not NULL. if list, promote x and re-evaluate.
if(is.list(y)) return(arbitrate(as.list(x), y))
# so y is not null and not list. Presumably vector. Hopefully scalar.
if(identical(x, y)) return(x)
warning(
call. = FALSE,
immediate. = TRUE,
'mismatched ', tag, ' attributes: ignoring \'',
paste(y, collapse = ', '),
'\' in favor of \'',
paste(x, collapse = ', '),
'\''
)
return(x)
}
#' Format Decorated Vector
#'
#' Formats a decorated vector.
#' @param x dvec
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family dvec
#' @return character
format.dvec <- function(x, ...){
x <- unclass(x)
x <- NextMethod()
x
}
#' Print Decorated Vector
#'
#' Prints a decorated vector.
#' @param x dvec
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family dvec
#' @return character
print.dvec <- function(x, ...){
x <- unclass(x)
x <- NextMethod()
x
}
#' Coerce Decorated Vector to Data Frame
#'
#' Coerces decorated vector to data.frame.
#'
#' @param x dvec
#' @param row.names passed to next method
#' @param optional passed to next method
#' @param ... passed to next method
#' @param nm name for new column
#' @export
#' @keywords internal
#' @family dvec
#' @return data.frame
#' @examples
#' as.data.frame(as_dvec(letters[1:3]))
#' L <- as_dvec(letters[1:3], label = 'My Letters')
#' d <- data.frame(letters = L )
#' str(d)
as.data.frame.dvec <- function (x, row.names = NULL, optional = FALSE, ..., nm = deparse1(substitute(x)))
{
force(nm)
nrows <- length(x)
if (!(is.null(row.names) || (is.character(row.names) && length(row.names) ==
nrows))) {
warning(gettextf("'row.names' is not a character vector of length %d -- omitting it. Will be an error!",
nrows), domain = NA)
row.names <- NULL
}
if (is.null(row.names)) {
if (nrows == 0L)
row.names <- character()
else if (length(row.names <- names(x)) != nrows || anyDuplicated(row.names))
row.names <- .set_row_names(nrows)
}
if (!is.null(names(x)))
names(x) <- NULL
value <- list(x)
if (!optional)
names(value) <- nm
attributes(value[[1]]) <- attributes(x) # sole difference from as.data.frame.vector
structure(value, row.names = row.names, class = "data.frame")
}
#' Coerce Decorated Vector to Units
#'
#' Coerces dvec to units. If x has a units attribute,
#' it is used to create class 'units'. It is an error if
#' x has no units attribute.
#' @importFrom units as_units
#' @method as_units dvec
#' @param x dvec
#' @param ... ignored
#' @param preserve attributes to preserve; just label by default (class and units are handled implicitly)
#' @export
#' @examples
#' library(magrittr)
#' a <- data.frame(id = 1:4, wt = c(70, 80, 70, 80), sex = c(0,1,0,1))
#' a %<>% decorate('wt: [ body weight, kg ]')
#' a %<>% decorate('sex: [ sex, [ female: 0, male: 1]]')
#' a %<>% decorate('id: identifier')
#' a %<>% resolve
#' a$wt %>% as_units
as_units.dvec <- function(x, ..., preserve = getOption('yamlet_as_units_preserve', 'label')){
value <- attr(x, 'units')
if(is.null(value))stop('x must have non-null value of attribute: units')
# attr(x, 'units') <- NULL
drop <- names(attributes(x))
drop <- setdiff(drop, preserve)
for(nm in drop){
attr(x, nm) <- NULL
}
x <- unclass(x)
units(x) <- value
x
}
#' @export
units::as_units
#' Coerce Units to Decorated Vector
#'
#' Coerces units to dvec.
#' @param x units
#' @param ... passed arguments
#' @export
#' @importFrom units drop_units
#' @examples
#' library(magrittr)
#' library(dplyr)
#' a <- data.frame(id = 1:4, wt = c(70, 80, 70, 80), sex = c(0,1,0,1))
#' a %<>% decorate('wt: [ body weight, kg ]')
#' a %<>% decorate('sex: [ sex, [ female: 0, male: 1]]')
#' a %<>% decorate('id: identifier')
#' a %<>% resolve
#' a %<>% mutate(wt = as_units(wt))
#' a %<>% mutate(wt = as_dvec(wt))
#' str(a$wt)
as_dvec.units <- function(x, ...){
units <- deparse_unit(x)
x <- drop_units(x)
attr(x, 'units') <- units
x <- as_dvec(x, ...)
x
}
#' Abbreviate Decorated Vector
#'
#' Abbreviated class name for dvec.
#'
#' @export
#' @importFrom vctrs vec_ptype_abbr
#' @method vec_ptype_abbr dvec
#' @return character
#' @keywords internal
#' @param x classified
#' @param ... ignored
#' @examples
#' cat(vec_ptype_abbr(as_dvec(0)))
vec_ptype_abbr.dvec <- function(x, ...) {
"dvec"
}
# # https://vctrs.r-lib.org/articles/s3-vector.html
#' @importFrom vctrs vec_ptype_abbr
#' @export
vctrs::vec_ptype_abbr
#' Test if Class is dvec
#'
#' Tests whether x inherits 'dvec'.
#' @param x object
#' @export
#' @return logical
#' @examples
#' is_dvec(1L)
#' is_dvec(as_dvec(1L))
is_dvec <- function(x){
inherits(x, 'dvec')
} | /scratch/gouwar.j/cran-all/cranData/yamlet/R/dvec.R |
#' Coerce Guide to Something More Explicit
#'
#' Coerces 'guide' to something more explicit. Generic, with methods for
#' data.frame and yamlet. The key 'guide' generally suggests a guide
#' to interpretation of a data item, such as units, formats, codelists,
#' and encodings. The idea here is to replace 'guide' with something
#' explicit in case required downstream.
#'
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family explicit_guide
#' @md
explicit_guide <- function(x,...)UseMethod('explicit_guide')
#' Coerce Yamlet Guide to Something More Explicit
#'
#' Coerces yamlet 'guide' keys to something more explicit.
#' The key 'guide' generally suggests a guide
#' to interpretation of a data item, such as units, formats, codelists,
#' and encodings. The idea here is to replace 'guide' with something
#' explicit in case required downstream.
#'
#' If \code{data} is supplied, guides that are lists
#' are checked to see if they evaluate to conditions in data context
#' (see \code{\link{isConditional.list}}).
#' If so, inferences are based on the first guide element rather
#' than the guide as a whole.
#'
#' This method iterates across the guide elements, renaming them
#' as specified by the value of \code{test}. (default: \code{\link{infer_guide}}).
#' \code{test} should be a function (or name of one) that accepts x, data, and dots.
#' If a data.frame is passed to explicit_guide(), the relevant column will
#' be passed as data to \code{test}.
#' @param x yamlet
#' @param ... passed to \code{\link[dplyr]{select}} to limit scope
#' @param test function or function name; supply non-default or globally set \code{options(yamlet_infer_guide = )}.
#' @param data optional data.frame for testing guides with length > 1
#' @export
#' @keywords internal
#' @importFrom dplyr case_when
#' @importFrom encode encoded
#' @return yamlet
#' @family explicit_guide
#' @examples
#' library(magrittr)
#' 'CONC: [ concentration, µg/mL ]' %>% as_yamlet %>% explicit_guide
#' 'RACE: [ subject race, [ Caucasian: 0, Latin: 1, Black: 2 ]]' %>% as_yamlet %>% explicit_guide
#' 'RACE: [ subject race, [ Caucasian, Latin, Black ]]' %>% as_yamlet %>% explicit_guide
#' 'RACE: [ subject race, //0/Caucasian//1/Latin//2/Black// ]' %>% as_yamlet %>% explicit_guide
#' 'DATE: [ date, "%Y-%m-%d" ]' %>% as_yamlet %>% explicit_guide
#' 'PRSE: [ standard error, "%" ]' %>% as_yamlet %>% explicit_guide
#'
explicit_guide.yamlet <- function(
x, ...,
test = getOption('yamlet_infer_guide', yamlet::infer_guide),
data = NULL
){
if(!is.null(data))stopifnot(is.list(data))
### don't do all, just selected names
# for(i in seq_along(x)){
for(i in selected(x, ...)){
nms <- names(x[[i]])
for(j in seq_along(x[[i]])){
if(length(nms) >= j){
if(nms[[j]] == 'guide'){
val <- x[[i]][[j]]
if(length(val) > 1){ # may be a conditional
if(!is.null(data)){
if(isConditional(val, data)){
val <- val[[1]] # just test the first element
}
}
}
explicit <- match.fun(test)(val, data = data[[i]], token = i, ...)
stopifnot(is.character(explicit), length(explicit) == 1)
# explicit <- unique(explicit)
names(x[[i]])[[j]] <- explicit
}
}
}
}
x
}
#' Infer Type of Guide
#'
#' Infers type of guide.
#' Default mapping function for \code{\link{explicit_guide.yamlet}}
#' where it replaces the key 'guide' with the return value.
#'
#' * If x is a list, the result is 'codelist'.
#'
#' * If x otherwise has length greater than 1, result is the default value.
#'
#' * If x \code{\link{is_parseable}}, result is 'units'.
#' Use \code{\link[units]{install_unit}} to register a non-default unit.
#'
#' * If x contains two or more percent signs, result is 'format'
#' (i.e. a 'format' string for a date or time class).
#'
#' * If x is (\code{\link[encode]{encoded}}), result is 'encoding'.
#'
#' * A length-one value of x not otherwise recognized is
#' assumed to be an attempt to provide a length-one 'codelist'.
#'
#' If data is supplied (not NULL), a warning is issued for
#' a codelist with elements not present.
#'
#' @param x character or list
#' @param data atomic
#' @param default value for unrecognized guides
#' @param token character: discriptive term for 'data' used in warning
#' @param ... ignored
#' @return length-one character
#' @export
#' @keywords internal
#' @family explicit_guide
#' @examples
#' infer_guide('a') # recognized unit
#' infer_guide('z') # unrecognized as unit, evaluates to guide
#' \dontrun{
#' # evaluates to codelist but data suggests otherwise (warning)
#' infer_guide(as.list(letters), data = LETTERS)
#' }
#' infer_guide(c(1,2,3)) # guide
#' infer_guide(list('a','b','c')) # codelist
#' infer_guide(list(a = 1, b = 2, c = 3)) # codelist
#' infer_guide(list(a = 1)) # codelist
#' infer_guide('kg/m^2') # units
#' infer_guide('%') # units
#' infer_guide('%Y-%m-%d') # format
#' infer_guide('//a/1//b/2//c/3//') # encoding
#'
infer_guide <- function(
x,
data = NULL,
default = 'guide',
token = 'data',
...
){
stopifnot(is.atomic(data)||is.null(data))
stopifnot(length(token) == 1)
token <- as.character(token)
res <- case_when(
# a list is clearly an attempt to supply a codelist
is.list(x) ~ 'codelist',
# all codelist now parsed as list @ 0.8.2
# not expecting any further plurality
length(x) > 1 ~ default,
# now everything is length one
# registered unit? (units::install_unit())
all(is_parseable(x)) ~ 'units',
# two percent signs?
length(gregexpr(pattern = '%', x)[[1]]) > 1 ~ 'format',
# qualifies as encoding?
all(encoded(x)) ~ 'encoding',
# x is length-one, none of the above
TRUE ~ default
)
stopifnot(length(res) == 1)
undescribed <- setdiff(data, x)
if(length(x) > 10){
x[[11]] <- '...'
x <- x[1:11]
}
msg <- paste(collapse = ', ', unlist(x))
if(identical(res, 'codelist') & length(undescribed))warning(
token, ' has values not in ', msg, ': e.g. ', undescribed[[1]]
)
res
}
#' Coerce Data Frame Guide to Something More Explicit
#'
#' Coerces data.frame 'guide' attributes to something more explicit.
#' The attribute 'guide' generally suggests a guide
#' to interpretation of a data item, such as units, formats, codelists,
#' and encodings. The idea here is to replace 'guide' with something
#' explicit in case required downstream.
#'
#' This method pulls the 'decorations' off of the data.frame,
#' converts to yamlet, applies \code{\link{explicit_guide.yamlet}},
#' purges 'guide' attributes from the data.frame,
#' and then re-decorates using \code{overwrite = TRUE}.
#'
#' @param x data.frame
#' @param ... named arguments passed to \code{\link{as_yamlet}}, \code{\link{explicit_guide}}, and \code{\link{decorate}}; un-named arguments limit scope
#' @param overwrite passed as TRUE
#' @param simplify whether to remove guide attribute
#' @export
#' @keywords internal
#' @importFrom dplyr case_when
#' @importFrom encode encoded
#' @return data.frame
#' @family explicit_guide
#' @family dvec
#' @examples
#' library(magrittr)
#' x <- data.frame(
#' ID = 1,
#' CONC = 1,
#' RACE = 1,
#' SEX = 1,
#' DATE = 1
#' )
#' x %<>% modify(ID, label = 'subject identifier')
#' x %<>% modify(CONC, label = 'concentration', guide = 'ng/mL')
#' x %<>% modify(RACE, label = 'race', guide = list(white = 0, black = 1, asian = 2))
#' x %<>% modify(SEX, label = 'sex', guide = list(female = 0, male = 1))
#' x %<>% modify(DATE, label = 'date', guide = '%Y-%m-%d')
#' x %>% decorations
#' x %>% explicit_guide %>% decorations
#' x %>% explicit_guide(DATE) %>% decorations # limit scope
explicit_guide.data.frame <- function(
x,
...,
overwrite = getOption('yamlet_explicit_guide_overwrite',TRUE),
simplify = getOption('yamlet_explicit_guide_simplify', TRUE)
){
y <- do.call(as_yamlet, c(list(x), named(...)))
nms <- selected(x, ...)
y <- y[as.character(nms)] # selected may have incompatible class path
y <- do.call(explicit_guide, c(list(y, data = x), named(...)))
if(simplify){
for(nm in nms){
attr(x[[nm]], 'guide') <- NULL
}
}
x <- do.call(decorate, c(list(x, meta = y, overwrite = TRUE), named(...)))
x
}
#' Coerce Decorated Vector Guide to Something More Explicit
#'
#' Coerces dvec 'guide' attribute to something more explicit.
#' The attribute 'guide' generally suggests a guide
#' to interpretation of a data item, such as units, formats, codelists,
#' and encodings. The idea here is to replace 'guide' with something
#' explicit in case required downstream.
#'
#' @param x dvec
#' @param ... named arguments passed to \code{\link{as_yamlet}}, \code{\link{explicit_guide}}, and \code{\link{decorate}}; un-named arguments ignored
#' @param overwrite whether to overwrite attributes
#' @param simplify whether to remove guide attribute
#' @export
#' @keywords internal
#' @importFrom dplyr case_when
#' @importFrom encode encoded
#' @return dvec
#' @family explicit_guide
#' @examples
#' library(magrittr)
#' x <- data.frame(
#' ID = as_dvec(1),
#' CONC = as_dvec(1),
#' RACE = as_dvec(1),
#' SEX = as_dvec(1),
#' DATE = as_dvec(1)
#' )
#' x %<>% modify(ID, label = 'subject identifier')
#' x %<>% modify(CONC, label = 'concentration', guide = 'ng/mL')
#' x %<>% modify(RACE, label = 'race', guide = list(white = 0, black = 1, asian = 2))
#' x %<>% modify(SEX, label = 'sex', guide = list(female = 0, male = 1))
#' x %<>% modify(DATE, label = 'date', guide = '%Y-%m-%d')
#' x %>% decorations
#' x %>% explicit_guide %>% decorations
#' x %>% explicit_guide(DATE) %>% decorations # limit scope
#' x %$% DATE %>% explicit_guide
explicit_guide.dvec <- function(
x,
...,
overwrite = getOption('explicit_guide_overwrite',TRUE),
simplify = getOption('explicit_guide_simplify', TRUE)
){
y <- data.frame(x = x)
y <- do.call(
explicit_guide,
c(
list(
x = y,
overwrite = overwrite,
simplify = simplify
),
named(...)
)
)
y <- y$x
y
}
#' Coerce Guide to Something More Implicit
#'
#' Coerces 'guide' to something more implicit. Generic, with methods for
#' data.frame. The key 'guide' generally suggests a guide
#' to interpretation of a data item, such as units, formats, codelists,
#' and encodings. The idea here is to replace these with 'guide': i.e.,
#' to undo the effects of \code{\link{explicit_guide}}.
#'
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family explicit_guide
#' @md
implicit_guide <- function(x,...)UseMethod('implicit_guide')
#' Coerce Data Frame Guide to Something More Implicit
#'
#' Coerces data.frame guide-like attributes to 'guide'.
#' The attribute 'guide' generally suggests a guide
#' to interpretation of a data item, such as units, formats, codelists,
#' and encodings. The idea here is to replace these with 'guide':
#' i.e., to undo the effects of \code{\link{explicit_guide.data.frame}}.
#' If guide attribute is still present, the explicit attribute is removed.
#' Otherwise the explicit element is renamed.
#'
#'
#' @param x data.frame
#' @param ... named arguments ignored; un-named arguments limit scope
#' @export
#' @keywords internal
#' @importFrom dplyr case_when
#' @importFrom encode encoded
#' @return data.frame
#' @family explicit_guide
#' @examples
#' library(magrittr)
#' x <- data.frame(
#' ID = 1,
#' CONC = 1,
#' RACE = 1,
#' SEX = 1,
#' DATE = 1
#' )
#' x %<>% modify(ID, label = 'subject identifier')
#' x %<>% modify(CONC, label = 'concentration', guide = 'ng/mL')
#' x %<>% modify(RACE, label = 'race', guide = list(white = 0, black = 1, asian = 2))
#' x %<>% modify(SEX, label = 'sex', guide = list(female = 0, male = 1))
#' x %<>% modify(DATE, label = 'date', guide = '%Y-%m-%d')
#' x %>% decorations
#' x %>% explicit_guide %>% decorations
#' x %>% explicit_guide %>% implicit_guide %>% decorations
#' x %>% explicit_guide %>% implicit_guide(DATE) %>% decorations # limit scope
#' x %>% explicit_guide(simplify = FALSE) %>% decorations
#' x %>% explicit_guide(simplify = FALSE) %>% implicit_guide %>% decorations
implicit_guide.data.frame <- function(
x,
...
){
nms <- selected(x, ...)
for(nm in nms){
attr <- attributes(x[[nm]])
anms <- names(attr)
anms <- intersect(anms, c('units', 'format', 'codelist', 'encoding'))
for(anm in anms){
if('guide' %in% anms){
attributes(x[[nm]][[anm]]) <- NULL
} else {
names(attributes(x[[nm]]))[names(attributes(x[[nm]])) == anm] <- 'guide'
}
}
}
x
}
#' Coerce Decorated Vector Guide to Something More Implicit
#'
#' Coerces dvec guide-like attributes to 'guide'.
#' The attribute 'guide' generally suggests a guide
#' to interpretation of a data item, such as units, formats, codelists,
#' and encodings. The idea here is to replace these with 'guide':
#' i.e., to undo the effects of \code{\link{explicit_guide.dvec}}.
#' If guide attribute is still present, the explicit attribute is removed.
#' Otherwise the explicit element is renamed.
#'
#'
#' @param x dvec
#' @param ... ignored
#' @export
#' @keywords internal
#' @importFrom dplyr case_when
#' @importFrom encode encoded
#' @return dvec
#' @family explicit_guide
#' @family dvec
#' @examples
#' library(magrittr)
#' x <- data.frame(
#' ID = as_dvec(1),
#' CONC = as_dvec(1),
#' RACE = as_dvec(1),
#' SEX = as_dvec(1),
#' DATE = as_dvec(1)
#' )
#' x %<>% modify(ID, label = 'subject identifier')
#' x %<>% modify(CONC, label = 'concentration', guide = 'ng/mL')
#' x %<>% modify(RACE, label = 'race', guide = list(white = 0, black = 1, asian = 2))
#' x %<>% modify(SEX, label = 'sex', guide = list(female = 0, male = 1))
#' x %<>% modify(DATE, label = 'date', guide = '%Y-%m-%d')
#' x %>% decorations
#' x %>% explicit_guide %>% decorations
#' x %>% explicit_guide %>% implicit_guide %>% decorations
#' x %>% explicit_guide %>% implicit_guide(DATE) %>% decorations # limit scope
#' x %>% explicit_guide(simplify = FALSE) %>% decorations
#' x %>% explicit_guide(simplify = FALSE) %>% implicit_guide %>% decorations
#' x %<>% explicit_guide
#' a <- x$DATE
#' str(a)
#' str(a %>% implicit_guide)
implicit_guide.dvec <- function(
x,
...
){
y <- data.frame(x = x)
y <- implicit_guide(y)
y <- y$x
y
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/explicit_guide.R |
# Coerce Codelist to Factor
#
# Coerces codelist to factor.
# Generic, with default and data.frame methods.
# Returns 'classified' 'factor' which as an attribute-preserving
# subset method.
#
# @param x object
# @param ... passed arguments
# @export
# @keywords internal
# @return class 'classified' 'factor'
# @family factorize_codelist
# factorize_codelist <- function(x,...){UseMethod('factorize_codelist')}
# Coerce Codelist to Factor by Default
#
# Coerces Codelist to Factor by Default. Coerces to character and calls next method.
#
# @param x presumably vector-like
# @param ... passed arguments
# @export
# @keywords internal
# @return class 'classified' 'factor'
# @family factorize_codelist
# factorize_codelist.default <- function(x,...){
# y <- as.character(x)
# attributes(y) <- attributes(x)
# factorize_codelist(y,...)
# }
# Coerce Codelist to Factor for Factor
#
# Coerces Codelist to Factor for Factors.
# Coerces to character and calls next method.
#
# @param x factor
# @param ... passed arguments
# @export
# @keywords internal
# @return class 'classified' 'factor'
# @family factorize_codelist
# factorize_codelist.factor <- function(x,...){
# y <- as.character(x)
# attr(x, 'levels') <- NULL
# attr(x, 'class') <- NULL
# attributes(y) <- attributes(x) # non-factor attributes
# factorize_codelist(y,...)
# }
# Coerce Character with Codelist to Factor
#
# Coerces character with codelist attribute to factor.
# If attribute 'codelist' is missing, unique values of
# x are supplied.
#
# @param x character
# @param ... ignored
# @export
# @keywords internal
# @return class 'classified' 'factor'
# @family factorize_codelist
# @examples
# example(factorize_codelist.data.fame)
# factorize_codelist.character <- function(x,...){
# guide <- attr(x,'codelist')
# if(is.null(guide)) guide <- as.list(unique(x))
# if(any(sapply(guide,function(i)is.null(i)))){
# warning('codelist contains NULL')
# }else{
# labs <- names(guide)
# if(is.null(labs))labs <- rep('',length(guide))
# levs <- unlist(guide)
# if(any(labs == '')){
# # warning('guide for ',item,' contains unlabeled level(s); using level itself')
# labs[labs == ''] <- levs[labs == '']
# }
# codelist <- as.list(levs)
# names(codelist) <- labs
# reserve <- attributes(x)
# reserve$codelist <- NULL
# # proposed:
# # reserve$codelist <- codelist # fully-specified. This was <- NULL before 0.6.0
# try(x <- factor(x, levels = levs, labels = labs))
# if(is.factor(x)){
# attributes(x) <- c(reserve, attributes(x))
# x <- as_classified(x)
# }else{
# warning('could not coerce to factor, returning character')
# }
# x
# }
# }
# Coerce Data Frame Items with Codelists to Factor
#
# Coerces items in data.frame with codelist attribute to 'classified':
# a factor with a codelist attribute
#
# @param x data.frame
# @param ... passed to \code{\link[dplyr]{select}} to limit scope
# @export
# @keywords internal
# @return data.frame
# @family factorize_codelist
# @examples
# library(magrittr)
# file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
# x <- decorate(file)
# x %>% explicit_guide %>% decorations(Age, Race, Heart:glyco)
# x %>% explicit_guide %>% factorize_codelist %>% decorations(Age, Race, Heart:glyco)
# x %>% explicit_guide %>% factorize_codelist(Heart:glyco) %>% decorations(Age, Race, Heart:glyco)
# factorize_codelist.data.frame <- function(x,...){
# my_class <- class(x)
# for(nm in selected(x,...)){
# if('codelist' %in% names(attributes(x[[nm]]))){
# x[[nm]] <- factorize_codelist(x[[nm]]) # grouped_df can drop subclass!
# }
# }
# class(x) <- my_class
# x
# }
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/factorize_codelist.R |
#' Gather a Decorated Data Frame
#'
#' Gathers a decorated data.frame.
#' I.e. a gather method for class 'decorated'.
#' Invokes tidyr::gather(), converting gathered
#' labels to the guide attribute of \code{key}, and
#' converting gathered guides ... if all the same ...
#' to the guide attribute of \code{value}.
#' Somewhat experimental!
#'
#' @param data see \code{\link[tidyr]{gather}}
#' @param key see \code{\link[tidyr]{gather}}
#' @param value see \code{\link[tidyr]{gather}}
#' @param ... see \code{\link[tidyr]{gather}}
#' @param na.rm see \code{\link[tidyr]{gather}}
#' @param convert see \code{\link[tidyr]{gather}}
#' @param factor_key see \code{\link[tidyr]{gather}}
#' @export
#' @importFrom dplyr select
#' @importFrom tidyr gather
#' @importFrom rlang ensym sym enquo as_label
#' @return decorated
#' @keywords internal
#' @examples
#' library(magrittr)
#' library(tidyr)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#' x %>% gather('key', 'value', time, interval) %>% decorations
#'
gather.decorated <- function(
data,
key = 'key',
value = 'value',
...,
na.rm = FALSE,
convert = FALSE,
factor_key = FALSE
){
# NextMethod()
# @ tidyr 1.1.2, NextMethod() i.e. tidyr.data.frame does not respect key/value.
args <- quos(...)
args <- args[names(args) == ""]
if(length(args) == 0) return(data)
#if(length(args[[1]] == 0)) return(data)
class(data) <- setdiff(class(data), 'decorated')
# probably need to quote key and value before using.
# https://tidyeval.tidyverse.org/sec-up-to-speed.html#writing-functions
key <- enquo(key)
value <- enquo(value)
x <- gather(
data = data,
key = !!key,
value = !!value,
...,
na.rm = na.rm,
convert = convert,
factor_key = factor_key
)
#x <- ungroup(x) # @0.8.4, to select only one column without autoselection of groups
if(as_label(key) %in% names(x)){
token <- names(select(ungroup(x), !!key))
val <- names(select(x, !!value))
nms <- unique(x[[token]])
labs <- sapply(nms, function(nm)attr(data[[nm]],'label'))
names(nms) <- labs
attr(x[[token]], 'guide') <- nms
guides <- lapply(nms, function(nm)attr(data[[nm]],'guide'))
guides <- unique(guides)
if(length(guides) == 1)attr(x[[val]], 'guide') <- guides[[1]]
}
as_decorated(x)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/gather.R |
#' Create a New ggplot for a Decorated Data Frame
#'
#' Creates a new ggplot object for a decorated data.frame.
#' This is the ggplot() method for class 'decorated'.
#' It creates a ggplot object using the default method,
#' but reclassifies it as 'decorated_ggplot' so that a custom print method
#' is invoked; see \code{\link{print.decorated_ggplot}}.
#'
#' This approach is similar to but more flexible than
#' the method for \code{\link{ggready}}.
# Currently,
# there is only one method for resolve() (\code{\link{resolve.decorated}})
# with the result that all 'resolved' objects inherit 'decorated'
# and thus can use \code{\link{ggplot.decorated}}.
#' For fine control, you can switch between 'data.frame'
#' and 'decorated' using \code{\link{as_decorated}}
#' (supplies null decorations) and \code{\link{as.data.frame}}
#' (preserves decorations).
#'
#' @param data decorated, see \code{\link{decorate}}
#' @param ... passed to \code{\link[ggplot2]{ggplot}}
#' @return return value like \code{\link[ggplot2]{ggplot}} but inheriting 'decorated_ggplot'
#' @export
#' @importFrom ggplot2 ggplot
#' @family decorated_ggplot
#' @family interface
#' @seealso decorate resolve ggready
#' @examples
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' library(ggplot2)
#' library(dplyr)
#' library(magrittr)
#' # par(ask = FALSE)
#'
#' x <- decorate(file)
#' x %<>% filter(!is.na(conc))
#'
#' # Manipulate class to switch among ggplot methods.
#' class(x)
#' class(data.frame(x))
#' class(as_decorated(data.frame(x)))
#'
#' # The bare data.frame gives boring labels and un-ordered groups.
#' map <- aes(x = time, y = conc, color = Heart)
#' data.frame(x) %>% ggplot(map) + geom_point()
#'
#' # Decorated data.frame uses supplied labels.
#' # Notice CHF levels are still not ordered. (Moderate first.)
#' x %>% ggplot(map) + geom_point()
#'
#' # If we resolve Heart, CHF levels are ordered.
#' x %<>% resolve(Heart)
#' x %>% ggplot(map) + geom_point()
#'
#' # We can map aesthetics as decorations.
#' x %<>% decorate('Heart: [ color: [gold, purple, green]]')
#' x %>% ggplot(map) + geom_point()
#'
#' # Colors are matched to particular levels. Purple drops out here:
#' x %>% filter(Heart != 'Moderate') %>% ggplot(map) + geom_point()
#'
#' # We can resolve other columns for a chance to enrich the output with units.
#' x %<>% resolve
#' suppressWarnings( # because this complains for columns with no units
#' x <- modify(x, title = paste0(label, '\n(', units, ')'))
#' )
#' x %>% ggplot(map) + geom_point()
#'
#' # Or something fancier.
#' x %<>% modify(conc, title = 'conc_serum. (mg*L^-1.)')
#' x %>% ggplot(map) + geom_point()
#'
#' # The y-axis title is deliberately given in spork syntax for elegant coercion:
#' library(spork)
#' x %<>% modify(conc, expression = as.expression(as_plotmath(as_spork(title))))
#' x %>% ggplot(map) + geom_point()
#' # Add a fancier label for Heart, and facet by a factor:
#' x %<>% modify(Heart, expression = as.expression(as_plotmath(as_spork('CHF^\\*'))))
#' x %>% ggplot(map) + geom_point() + facet_wrap(~Creatinine)
#'
#' # ggready handles the units and plotmath implicitly for a 'standard' display:
#' x %>% ggready %>% ggplot(map) + geom_point() + facet_wrap(~Creatinine)
#'
#' # Notice that instead of over-writing the label
#' # attribute, we are creating a stack of label
#' # substitutes (title, expression) so that
#' # label is still available as an argument
#' # if we want to try something else. The
#' # print method by default looks for all of these.
#' # Precedence is expression, title, label, column name.
#' # Precedence can be controlled using
#' # options(decorated_ggplot_search = c(a, b, ...) ).
#'
#' # Here we try a dataset with conditional labels and units.
#'
#' file <- system.file(package = 'yamlet', 'extdata','phenobarb.csv')
#' x <- file %>% decorate %>% resolve
#' # Note that value has two elements for label and guide.
#' x %>% decorations(value)
#'
#' # The print method defaults to the first, with warning.
#' map <- aes(x = time, y = value, color = event)
#' \donttest{
#' x %>% ggplot(map) + geom_point()
#' }
#'
#' # If we subset appropriately, the relevant value is substituted.
#' x %>% filter(event == 'conc') %>% ggplot(map) + geom_point()
#'
#' x %>% filter(event == 'conc') %>%
#' ggplot(aes(x = time, y = value, color = ApgarInd)) + geom_point()
#'
#' x %>% filter(event == 'dose') %>%
#' ggplot(aes(x = time, y = value, color = Wt)) +
#' geom_point() +
#' scale_y_log10() +
#' scale_color_gradientn(colours = rainbow(4))
#'
#' # print.decorated_ggplot will attempt to honor coordinated aesthetics.
#' x <- data.frame(x = c(1:6, 3:8), y = c(1:6,1:6), z = letters[c(1:6,1:6)])
#' x %<>% decorate('z: [color: ["red", "blue", "green", "gold", "black", "magenta"]]')
#' x %<>% decorate('z: [fill: ["red", "blue", "green", "gold", "black", "magenta"]]')
#' x %<>% decorate('z: [shape: [20, 21, 22, 23, 24, 25]]')
#' x %<>% decorate('z: [linetype: [6, 5, 4, 3, 2, 1]]')
#' x %<>% decorate('z: [alpha: [ .9, .8, .7, .6, .5, .4]]')
#' x %<>% decorate('z: [size: [1, 1.5, 2, 2.5, 3, 3.5]]')
#' x %>% ggplot(aes(
#' x, y,
#' color = z,
#' fill = z,
#' shape = z,
#' linetype = z,
#' alpha = z,
#' size = z,
#' )) +
#' geom_point() +
#' geom_line(size = 1)
ggplot.decorated <- function(data, ...){
p <- NextMethod()
class(p) <- c('decorated_ggplot',class(p))
p
}
#' Substitute Expressions, Titles, Labels and Aesthetics in ggplots
#'
#' Default labels (e.g. mappings for \code{x}, \code{y}, etc.)
#' will be used to search \code{data} for more meaningful
#' labels, taking first available from attributes
#' with names in \code{search}. Likewise, if mappings for
#' colour (color), fill, size, etc. (see defaults for \code{discrete})
#' indicate columns that have these defined as attributes,
#' an attempt is made to add a corresponding discrete scale if
#' one does not exist already. Values are recycled if necessary
#' and are specific by ordinal position to the corresponding
#' level of the corresponding variable. Levels are defined
#' in increasing priority by
#' \code{sort(unique(x))},
#' any guide attribute,
#' any factor levels,
#' any codelist attribute, or
#' any plotmath attribute.
#'
#'
#'
#' @param x class 'decorated_ggplot' from \code{\link{ggplot.decorated}}
#' @param ... ignored
#' @param search attribute names from which to seek label substitutes
#' @param discrete discrete aesthetics to map from data decorations where available
#' @param drop should unused factor levels be omitted from data-driven discrete scales?
#' @return see \code{\link[ggplot2]{print.ggplot}}
#' @importFrom ggplot2 scale_discrete_manual waiver
#' @export
#' @family decorated_ggplot
#' @examples
#' example(ggplot.decorated)
print.decorated_ggplot <- function(
x,
...,
search = getOption(
'yamlet_decorated_ggplot_search',
c('expression', 'title', 'label')
),
discrete = getOption(
'yamlet_decorated_ggplot_discrete',
c('colour', 'fill', 'size', 'shape', 'linetype', 'linewidth', 'alpha')
),
drop = getOption('yamlet_decorated_ggplot_drop', TRUE)
){
# # support for plotmath levels
# parseable <- character(0)
# for(col in x$labels){
# plotmath <- attr(x$data[[col]], 'plotmath')
# if(is.null())
# if(!is.null(plotmath)){
# parseable <- c(parseable, col) # accumulate to inform scales below
# # number of levels should exactly match length of plotmath
# levels(x$data[[col]]) <- plotmath
# }
# }
# support for discrete manual scales
labelnames <- names(x$labels)
aesthetics <- intersect(discrete, labelnames)
scaletypes <- sapply(x$scales$scales, `[[`, 'aesthetics')
# don't redefine existing scales:
aesthetics <- setdiff(aesthetics, scaletypes)
for(a in aesthetics){ # color, fill, size, etc
src <- x$labels[[a]] # the corresponding label
if(length(src) == 1){ # needs to be singular
if(src %in% names(x$data)){ # and present in data
col <- x$data[[src]] # the column name
atr <- attributes(col)
nms <- names(atr)
if('color' %in% nms & !'colour' %in% nms){
atr$colour <- atr$color
}
# now we want to make one new scale
# for this aesthetic
# if the column has a matching attribute
# or if the column is parseable.
need_scale <- (a %in% names(atr)) | ('plotmath' %in% names(atr))
if(!need_scale) next
# calculate values
this <- atr[[a]]
# preserve correspondence with guides
# increasing precedence:
levels <- sort(unique(col))
if('guide' %in% names(atr)) levels <- atr$guide
if(is.factor(col)) levels <- levels(col)
if('codelist' %in% names(atr)) levels <- atr$codelist # ignore names
# support plotmath
if('plotmath' %in% names(atr) & inherits(col, 'factor')){
plotmath <- attr(col, 'plotmath')
levels(x$data[[src]]) <- plotmath # implement in data
levels(col) <- plotmath # implement locally
levels <- plotmath # implement here
}
if(!is.null(this)){this <- rep(this, length.out = length(levels))}
if(!is.null(this)){names(this) <- levels}
this <- unlist(this)
# calculate breaks
breaks <- waiver()
if(drop) breaks <- sort(unique(col))
# create a new scale using the stored values
# scale_discrete_manual('color'...) must have values and aesthetics
# scale_color_discrete must not have values nor aesthetics
# both accept labels and breaks
args <- list(breaks = breaks)
if(!is.null(this)){
args <- list(aesthetics = a, values = this, breaks = breaks)
}
if('plotmath' %in% names(atr)){
args <- c(args, list(labels = scales::label_parse()))
}
# arg list complete
# calculate function name
fun <- match.fun('scale_discrete_manual')
if(is.null(this)){
fun <- match.fun(paste(sep = '_', 'scale', a, 'discrete'))
}
theScale <- do.call(fun, args)
x <- x + theScale
# x <- x + scale_discrete_manual(
# aesthetics = a,
# values = this,
# breaks = breaks,
# labels = scales::label_parse()
# )
}
}
}
for(i in seq_along(x$labels)){ # x (gg object) stores names of used columns as $labels
lab <- x$labels[[i]] # handle one label
if(length(lab)){ # i.e. not null or empty expression
if(length(lab) == 1){
if(lab %in% names(x$data)){ # if this is just a bare column name
col <- x$data[[lab]]
atr <- attributes(col)
for( s in rev(search)){ # end with first
label <- atr[[s]] # retrieve label
if(!is.null(label)){
x$labels[[i]] <- label # overwrite default label with one from data attributes
}
}
}
}
# done with search. Plural labels? Note x$labels unchanged, lab unchanged
if(length(lab) > 1){
if(length(names(lab)))lab = paste(
paste0(
'(',
names(lab),
')'
),
lab
)
lab <- paste(lab, collapse = '\n')
msg <- paste('using first of', lab, sep = '\n')
warning(msg)
x$labels[[i]] <- x$labels[[i]][[1]]
}
}
}
NextMethod()
}
#' Determine Scale Type for dvec
#'
#' Determines scale type for dvec.
#' @param x dvec
#' @export
#' @keywords internal
#' @importFrom ggplot2 scale_type
#' @method scale_type dvec
scale_type.dvec <- function(x)scale_type(unclass(x))
#' Rescale dvec
#'
#' Rescales dvec
#' @param x dvec
#' @param to numeric
#' @param from numeric
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @importFrom scales rescale
#' @method rescale dvec
rescale.dvec <- function(
x,
to = c(0, 1),
from = range(x, na.rm = TRUE, finite = TRUE),
...
){
rescale(unclass(x), to = to, from = from, ...)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/ggplot.R |
#' Enable Automatic Labels and Units for ggplot
#'
#' Enable automatic labels and units for ggplot.
#' Substitutes column label, if present, for default.
#' Supports arrangements of ggplot objects.
#' Defined similarly to \code{\link{print.decorated_ggplot}}
#' and respects global options
#' yamlet_decorated_ggplot_search,
#' yamlet_decorated_ggplot_discrete, and
#' yamlet_decorated_ggplot_drop.
#'
#' @param plot class 'decorated_ggplot' from \code{\link{ggplot.decorated}}
#' @return see \code{\link[ggplot2]{ggplot_build}}
#' @export
#' @method ggplot_build decorated_ggplot
#' @importFrom ggplot2 ggplot_build
#' @keywords internal
#' @family decorated_ggplot
ggplot_build.decorated_ggplot <- function(plot){
search = getOption(
'yamlet_decorated_ggplot_search',
c('expression', 'title', 'label')
)
discrete = getOption(
'yamlet_decorated_ggplot_discrete',
c('colour', 'fill', 'size', 'shape', 'linetype', 'linewidth', 'alpha')
)
drop = getOption('yamlet_decorated_ggplot_drop', TRUE)
x <- plot
# # support for plotmath levels
# parseable <- character(0)
# for(col in x$labels){
# plotmath <- attr(x$data[[col]], 'plotmath')
# if(is.null())
# if(!is.null(plotmath)){
# parseable <- c(parseable, col) # accumulate to inform scales below
# # number of levels should exactly match length of plotmath
# levels(x$data[[col]]) <- plotmath
# }
# }
# support for discrete manual scales
labelnames <- names(x$labels)
aesthetics <- intersect(discrete, labelnames)
scaletypes <- sapply(x$scales$scales, `[[`, 'aesthetics')
# don't redefine existing scales:
aesthetics <- setdiff(aesthetics, scaletypes)
for(a in aesthetics){ # color, fill, size, etc
src <- x$labels[[a]] # the corresponding label
if(length(src) == 1){ # needs to be singular
if(src %in% names(x$data)){ # and present in data
col <- x$data[[src]] # the column name
atr <- attributes(col)
nms <- names(atr)
if('color' %in% nms & !'colour' %in% nms){
atr$colour <- atr$color
}
# now we want to make one new scale
# for this aesthetic
# if the column has a matching attribute
# or if the column is parseable.
need_scale <- (a %in% names(atr)) | ('plotmath' %in% names(atr))
if(!need_scale) next
# calculate values
this <- atr[[a]]
# preserve correspondence with guides
# increasing precedence:
levels <- sort(unique(col))
if('guide' %in% names(atr)) levels <- atr$guide
if(is.factor(col)) levels <- levels(col)
if('codelist' %in% names(atr)) levels <- atr$codelist # ignore names
# support plotmath
if('plotmath' %in% names(atr) & inherits(col, 'factor')){
plotmath <- attr(col, 'plotmath')
levels(x$data[[src]]) <- plotmath # implement in data
levels(col) <- plotmath # implement locally
levels <- plotmath # implement here
}
this <- rep(this, length.out = length(levels))
if(!is.null(this)){this <- rep(this, length.out = length(levels))}
this <- unlist(this)
# calculate breaks
breaks <- waiver()
if(drop) breaks <- sort(unique(col))
# create a new scale using the stored values
# scale_discrete_manual('color'...) must have values and aesthetics
# scale_color_discrete must not have values nor aesthetics
# both accept labels and breaks
args <- list(breaks = breaks)
if(!is.null(this)){
args <- list(aesthetics = a, values = this, breaks = breaks)
}
if('plotmath' %in% names(atr)){
args <- c(args, list(labels = scales::label_parse()))
}
# arg list complete
# calculate function name
fun <- match.fun('scale_discrete_manual')
if(is.null(this)){
fun <- match.fun(paste(sep = '_', 'scale', a, 'discrete'))
}
theScale <- do.call(fun, args)
x <- x + theScale
}
}
}
for(i in seq_along(x$labels)){ # x (gg object) stores names of used columns as $labels
lab <- x$labels[[i]] # handle one label
if(length(lab)){ # i.e. not null or empty expression
if(length(lab) == 1){
if(lab %in% names(x$data)){ # if this is just a bare column name
col <- x$data[[lab]]
atr <- attributes(col)
for( s in rev(search)){ # end with first
label <- atr[[s]] # retrieve label
if(!is.null(label)){
x$labels[[i]] <- label # overwrite default label with one from data attributes
}
}
}
}
# done with search. Plural labels? Note x$labels unchanged, lab unchanged
if(length(lab) > 1){
if(length(names(lab)))lab = paste(
paste0(
'(',
names(lab),
')'
),
lab
)
lab <- paste(lab, collapse = '\n')
msg <- paste('using first of', lab, sep = '\n')
warning(msg)
x$labels[[i]] <- x$labels[[i]][[1]]
}
}
}
plot <- x
NextMethod()
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/ggplot_build.R |
# as of 0.9.4, printing/plotting ggready redirects to printing decorated.
# Create a New ggplot for a GGready Data Frame
#
# Creates a new ggplot object for a ggready data.frame.
# This is the ggplot() method for class 'ggready';
# it tries to implement automatic labels and units in axes and legends
# in association with \code{\link{print.ggready_ggplot}}.
# This approach is deprecated in favor of \code{\link{ggplot.decorated}}.
#
# @param data data.frame or similar
# @param ... passed to \code{\link[ggplot2]{ggplot}}
# @return return value like \code{\link[ggplot2]{ggplot}}
# @export
# @importFrom ggplot2 ggplot
# @family ggready
# @keywords internal
# @examples
# example(ggready)
# ggplot.ggready <- function(data, ...){
# class(data) <- setdiff(class(data), 'ggready')
# p <- ggplot(data = data, ...)
# class(p) <- c('ggready_ggplot',class(p))
# p
# }
# Print Automatic Labels and Units for ggplot
#
# Prints automatic labels and units for ggplot.
# Substitutes column label, if present, for default.
#
# @param x class 'ggready_ggplot' from \code{\link{ggplot.ggready}}
# @param ... passed arguments
# @return see \code{\link[ggplot2]{print.ggplot}}
# @export
# @keywords internal
# @family ggready
# @examples
# example(ggready)
# print.ggready_ggplot <- function(x, ...){
# for(i in seq_along(x$labels)){ # x (gg object) stores names of used columns as $labels
# lab <- x$labels[[i]] # handle one label
# if(length(lab)){ # not null or empty expression
# if(lab %in% names(x$data)){ # if this is just a bare column name
# col <- x$data[[lab]]
# atr <- attributes(col)
# label <- atr$label # retrieve label
# if(!is.null(label)){
# x$labels[[i]] <- label # replace default label with one from data attributes
# }
# }
# }
# }
# # for(col in names(x$data)){
# # if(inherits(x$data[[col]],'dvec')){
# # x$data[[col]] <- unclass(x$data[[col]]) # class dvec confuses ggplot: "Don't know how to automatically pick scale for object of type dvec. Defaulting to continuous."
# # }
# # }
# NextMethod()
# }
# Enable Automatic Labels and Units for ggplot
#
# Enable automatic labels and units for ggplot.
# Substitutes column label, if present, for default.
# Supports arrangements of ggplot objects.
#
# @param x class 'ggready_ggplot' from \code{\link{ggplot.ggready}}
# @param ... passed arguments
# @return see \code{\link[ggplot2]{ggplot_build}}
# @export
# @importFrom ggplot2 ggplot_build
# @method ggplot_build ggready_ggplot
# @keywords internal
# @family ggready
# ggplot_build.ggready_ggplot <- print.ggready_ggplot
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/ggplot_ggready.R |
#' Prepare Data for GGplot
#'
#' Prepares data for ggplot.
#' Generic, with methods for data.frame, and decorated.
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family ggready
#' @examples
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#' decorations(x, Weight)
#' decorations(as.data.frame(x), Weight) # downgrade still has attributes
#' class(x)
#' class(ggready(as.data.frame(x)))
#' class(ggready(x))
#' class(ggready(resolve(x)))
#' x <- ggready(x)
#' library(magrittr)
#' library(ggplot2)
#'
#' # Here we filter on-the-fly
#' # without loss of attributes.
#' # Notice mg/L rendering; this is actually part of an expression.
#' file %>%
#' decorate %>%
#' filter(!is.na(conc)) %>%
#' ggready %>%
#' ggplot(aes(x = time, y = conc, color = Heart)) +
#' geom_point()
#'
#' # By default ggready resolves everything that is decorated.
#' # But we can intervene to resolve selectively,
#' # And further intervene to 'ggready' selectively.
#' #
#' x <- file %>% decorate %>% filter(!is.na(conc))
#' x %>%
#' resolve(conc, time) %>% # Heart left unresolved!
#' ggready(conc, Heart, resolve = FALSE) %>% # time left unreadied!
#' ggplot(aes(x = time, y = conc, color = Heart)) + geom_point()
#'
#' # Still, all the labels were actually expressions:
#' x %>%
#' resolve(conc, time) %>%
#' ggready(conc, Heart, resolve = FALSE) %>%
#' decorations(conc, time, Heart)
ggready <- function(x, ...)UseMethod('ggready')
#' Prepare Data Frame for GGplot
#'
#' Prepares data.frame for ggplot. Appends
#' units to label using \code{\link{append_units}}
#' (passing \code{style = 'plotmath'} if \code{parse}
#' is true, else \code{style = 'plain'}).
#' Enforces classes 'decorated' and 'ggready'.
#'
#' @param x object
#' @param ... passed to \code{\link{append_units}}; may include unquoted column names
#' @param parse passed to \code{\link{append_units}}
#' @export
#' @importFrom spork as_spork
#' @importFrom spork plotmathToken
#' @return ggready decorated
#' @keywords internal
#' @family ggready
#' @examples
#' example(ggready)
ggready.data.frame <- function(
x, ... ,
parse = getOption('yamlet_ggready_parse',TRUE)
){
stopifnot(is.logical(parse), length(parse) == 1)
x <- append_units(x, ..., style = if(parse) 'plotmath' else 'plain')
class(x) <- union(c('ggready', 'decorated'), class(x))
x
}
#' Prepare Decorated Data Frame for GGplot
#'
#' Prepares decorated data.frame for ggplot. Calls
#' \code{\link{resolve}} and appends
#' units to label using \code{\link{append_units}}
#' (passing \code{style = 'plotmath'} if \code{parse}
#' is true, else \code{style = 'plain'}).
#' Enforces classes 'decorated' and 'ggready'.
#'
#' @param x object
#' @param ... passed to \code{\link{append_units}} and \code{\link{resolve}}; may include unquoted column names
#' @param parse passed to \code{\link{append_units}}
#' @param resolve whether to resolve guide attributes
#' @export
#' @importFrom spork as_spork
#' @importFrom spork plotmathToken
#' @return ggready decorated
#' @keywords internal
#' @family ggready
#' @examples
#' example(ggready)
ggready.decorated <- function(
x, ... ,
parse = getOption('yamlet_ggready_parse',TRUE),
resolve = TRUE
){
if(resolve) x <- resolve(x, ...)
stopifnot(is.logical(parse), length(parse) == 1)
x <- append_units(x, ..., style = if(parse) 'plotmath' else 'plain')
class(x) <- union('decorated', class(x))
# class(x) <- union('resolved', class(x))
class(x) <- union('ggready', class(x))
x
}
# As of 0.6.1, deconstructing class resolved.
# Prepare Resolved Data Frame for GGplot
#
# Prepares resolved data.frame for ggplot. Appends
# units to label using \code{\link{append_units}}
# (passing \code{style = 'plotmath'} if \code{parse}
# is true, else \code{style = 'plain'}).
# Enforces classes 'decorated','resolved', and 'ggready'.
# Unlike \code{\link{ggready.decorated}}, the
# method for class resolved does NOT call resolve(),
# and so does not second-guess any particular
# resolutions you may have already made.
#
# @param x object
# @param ... passed to \code{\link{append_units}} and \code{\link{resolve}}; may include unquoted column names
# @param parse passed to \code{\link{append_units}}
# @export
# @importFrom spork as_spork
# @importFrom spork plotmathToken
# @return ggready
# @keywords internal
# @family ggready
# @examples
# example(ggready)
# ggready.resolved <- function(
# x, ... ,
# parse = getOption('ggready_parse',TRUE)
# ){
# stopifnot(is.logical(parse), length(parse) == 1)
# x <- append_units(x, ..., style = if(parse) 'plotmath' else 'plain')
# class(x) <- union('decorated', class(x))
# class(x) <- union('resolved', class(x))
# class(x) <- union('ggready', class(x))
# x
# }
#' @export
spork::plotmathToken
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/ggready.R |
#' Capture Groups as Decorations
#'
#' Captures groups as decorations. Generic,
#' with method \code{\link{decorate_groups.data.frame}}
#'
#' @export
#' @keywords internal
#' @family decorate
#' @param x object of dispatch
#' @param ... passed
decorate_groups <- function(x, ...)UseMethod('decorate_groups')
#' Capture Groups as Decorations for Data Frame
#'
#' Captures groups as decorations for class 'data.frame'.
#' Creates a sequentially-valued integer attribute
#' with name 'groups' for each corresponding column
#' (after clearing all such existing designations).
#' It is an error if not all such columns are present.
#' Defaults to \code{groups(x)}. If no columns are
#' specified and x has no groups, x is returned
#' with any existing column-level 'groups' attributes
#' removed.
#'
#' @export
#' @family decorate
#' @importFrom dplyr group_vars groups
#' @param x data.frame
#' @param ... unquoted names of columns to assign as groups; defaults to \code{groups(x)}
#' @return same class as x
#' @examples
#' library(magrittr)
#' library(dplyr)
#' Theoph %>% decorate_groups(Subject, Time) %>% groups # nothing!
#' Theoph %>% decorate_groups(Subject, Time) %>% decorations # note well
#' Theoph %>% group_by(Subject, Time) %>% decorate_groups %>% decorations # same
#'
decorate_groups.data.frame <- function(x, ...){
vars <- selected(x, ..., expand = FALSE)
if(!(length(vars))){vars <- group_vars(x)}
stopifnot(all(vars %in% names(x)))
x <- modify(x, groups = NULL)
index <- 0L
for(var in vars){
index <- index + 1L
attr(x[[var]], 'groups') <- index
}
x
}
#' Recover Groups Decorations
#'
#' Recovers groups decorations. Generic,
#' with method \code{\link{decorations_groups.data.frame}}
#'
#' @export
#' @keywords internal
#' @family decorate
#' @param x object of dispatch
#' @param ... passed
decorations_groups <- function(x, ...)UseMethod('decorations_groups')
#' Recover Groups Decorations for Data Frame
#'
#' Recovers groups decorations for class 'data.frame'.
#' Seeks a sequentially-valued integer attribute
#' with name 'groups' for each column, sorts these,
#' and returns a character vector like \code{group_vars(x)}.
#'
#' @export
#' @family decorate
#' @importFrom dplyr group_vars groups
#' @param x data.frame
#' @param ... ignored
#' @return character: names of groups columns
#' @examples
#' library(magrittr)
#' library(dplyr)
#' Theoph %<>% group_by(Subject, Time)
#' Theoph %>% group_vars
#' Theoph %>% decorations_groups # nothing!
#' Theoph %<>% decorate_groups
#' Theoph %>% decorations_groups # something!
#' Theoph %<>% ungroup
#' Theoph %>% group_vars # gone!
#' Theoph %<>% group_by(across(all_of(decorations_groups(.))))
#' Theoph %>% group_vars # recovered!
#' Theoph %<>% group_by_decorations
#' Theoph %>% group_vars # same
#' rm(Theoph)
#'
decorations_groups.data.frame <- function(x, ...){
for(name in names(x)){
if(is.null(attr(x[[name]], 'groups'))){
x[[name]] <- NULL
}
}
nms <- names(x)
vals <- lapply(nms, function(nm)attr(x[[nm]], 'groups'))
stopifnot(length(nms) == length(vals))
for(index in seq_along(nms)){
nm <- nms[[index]]
val <- vals[[index]]
val <- type.convert(val, as.is = TRUE)
if(!(length(val) == 1))stop(nm, ': expecting length-one value but found length ', length(val))
if(!is.integer(val))stop(nm, ': expecting integer but found ', val)
if(!is.finite(val))stop(nm, ': expecting finite value but found', val)
if(!(val > 0))stop(nm, ': expecting positive value but found', val)
}
if(length(vals) == 0) return(as.character(vals))
# all length one, safe to unlist
vals <- unlist(vals)
stopifnot(length(vals) == length(nms))
stopifnot(is.integer(vals))
stopifnot(all(vals > 0))
names(vals) <- nms
vals <- sort(vals)
max <- max(vals)
missing <- setdiff(seq_len(max), vals)
if(length(missing))warning('missing indices: ', paste(missing, collapse = ', '))
dup <- anyDuplicated(vals)
if(dup)warning('duplicated indices, e.g. ', vals[[dup]], ': ', names(vals)[[dup]])
nms <- names(vals) # now sorted
#nms <- lapply(nms, sym)
nms
}
#' Group by Decorations
#'
#' Groups according to decorations. Generic,
#' with method \code{\link{group_by_decorations.data.frame}}
#'
#' @export
#' @keywords internal
#' @family decorate
#' @param x object of dispatch
#' @param ... passed
group_by_decorations <- function(x, ...)UseMethod('group_by_decorations')
#' Groups by Decorations for Data Frame
#'
#' Invokes \code{\link[dplyr]{group_by}}
#' using whatever groups are recovered by
#' \code{\link{decorations_groups}}.
#'
#' @export
#' @family decorate
#' @importFrom dplyr group_by across all_of
#' @importFrom rlang sym
#' @param x grouped_df
#' @param ... ignored
#' @return list of symbols
#' @examples
#' library(magrittr)
#' library(dplyr)
#' Theoph %>% group_vars # nothing!
#' Theoph %<>% decorate_groups(Subject, Time)
#' Theoph %<>% group_by_decorations
#' Theoph %>% group_vars # something
#' rm(Theoph)
#'
group_by_decorations.data.frame <- function(x, ...){
gr <- decorations_groups(x)
x <- group_by(x, across(all_of(gr)))
x
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/groups.R |
#' Import and Export Documented Tables as CSV
#'
#' Imports or exports documented tables as comma-separated variable.
#' Generic, with methods that extend \code{\link[csv]{as.csv}}.
#' @param x object
#' @param ... passed arguments
#' @export
#' @return See methods.
#' @family io
#' @examples
#' # generate some decorated data
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#'
#' # get a temporary filepath
#' out <- file.path(tempdir(), 'out.csv')
#'
#' # save file using io_csv (returns filepath)
#' foo <- io_csv(x, out)
#' stopifnot(identical(out, foo))
#'
#' # read using this filepath
#' y <- io_csv(foo)
#'
#' # lossless round-trip (ignoring source attribute)
#' attr(x, 'source') <- NULL
#' attr(y, 'source') <- NULL
#' stopifnot(identical(x, y))
io_csv <- function(x, ...)UseMethod('io_csv')
#' Import Documented Table as CSV
#'
#' Imports a documented table as comma-separated variable.
#' A wrapper for \code{\link[csv]{as.csv.character}} that also
#' reads associated yamlet metadata, if present, and applies it
#' as attributes.
#'
#' @param x character file path; passed to \code{\link[csv]{as.csv.character}} (by method dispatch)
#' @param ext extension for metadata equivalent of x
#' @param meta explicit file path for metadata; if null, \code{ext} is appended to x after removing (final) extension, if any
#' @param gz logical; guessed by default from \code{x}; if TRUE, '.gz' extension enforced present for \code{x} and absent for default \code{meta}
# @param coerce whether to coerce to factor where guide is a list; passed to \code{\link{decorate.data.frame}}
#' @param ... passed to \code{\link[csv]{as.csv.character}} and to \code{\link{decorate}}
#' @export
#' @keywords internal
#' @importFrom csv as.csv
#' @family io
#' @family interface
#' @return data.frame
#' @examples
#' example(io_csv)
io_csv.character <- function(
x,
ext = getOption('yamlet_extension', '.yaml'),
meta = NULL,
gz = NULL,
#coerce = getOption('yamlet_coerce', FALSE),
...
){
hasGZ <- grepl(ignore.case = TRUE, pattern = '\\.gz$', x)
if(is.null(gz)) gz <- hasGZ
stopifnot(length(gz) == 1, is.logical(gz))
if(gz & !hasGZ) x <- paste0(x, '.gz')
d <- csv::as.csv(x, ...)
if(is.null(meta)){
meta <- x
if(gz) meta <- sub('\\.gz$', '', meta, ignore.case = TRUE)
meta <- sub('\\.[^.]*$', '', meta) # remove last dot and any trailing chars
meta <- paste0(meta, ext)
}
if(!file.exists(meta)){
message('did not find ', meta)
}else{
d <- decorate(
d,
meta = meta,
#coerce = coerce,
...)
}
d
}
#' Export Documented Table as CSV
#'
#' Exports a data.frame as comma-separated variable,
#' as well as a yamlet version of its decorations.
#' A wrapper for \code{\link[csv]{as.csv.data.frame}}.
#'
#' You should be able to supply exactly the connections you want for
#' \code{file} (the data file) and \code{meta} (the metadata file)
#' if \code{gz} is FALSE. If \code{gz} is NULL, it will be
#' guessed from file (TRUE for character ending with '.gz' or '.GZ').
#' If TRUE, character \code{file} will have '.gz' extension enforced,
#' but any '.gz' ('.GZ') will be stripped when calculating \code{meta}.
#'
#'
#' @param x data.frame
#' @param file passed to \code{\link[csv]{as.csv.data.frame}} (by method dispatch)
#' @param ext = extension for metadata equivalent of x
# coerce was passed to io_yamlet, which does not implement it
# @param coerce logical; whether to coerce factor levels to guide; alternatively, a key for the levels
#' @param meta passed as \code{con} to \code{\link{io_yamlet}}
#' @param gz logical; guessed by default from \code{x}; if TRUE, '.gz' extension enforced present for (character) \code{file} and absent for default \code{meta}
#' @param useBytes passed to \code{\link{io_yamlet}}
#' @param default_keys passed to \code{\link{io_yamlet}}
#' @param ... passed to \code{\link{as.csv}} and to \code{\link{io_yamlet}}
#' @export
#' @keywords internal
#' @importFrom csv as.csv
#' @family io
#' @family interface
#' @return invisible(file)
#' @examples
#' example(io_csv)
io_csv.data.frame <- function(
x,
file = '',
ext = getOption('yamlet_extension', '.yaml'),
# coerce = getOption("yamlet_coerce_decorations", FALSE),
meta = stdout(),
gz = NULL,
useBytes = FALSE,
default_keys = getOption(
'yamlet_default_keys',
list('label','guide')
),
...
){
hasGZ <- grepl(ignore.case = TRUE, pattern = '\\.gz$', file)
isSTDOUT <- identical(summary(meta), summary(stdout()))
if(is.null(gz)) gz <- hasGZ
stopifnot(length(gz) == 1, is.logical(gz))
stopifnot(length(file) == 1)
# Now gz is true or false.
# calculate file
if(is.character(file) & file != '' & gz & !hasGZ) file <- paste0(file, '.gz')
# calculate meta
if(is.character(file) & file != '' & isSTDOUT){
# i.e., user has supplied file as character, but has not supplied meta
meta <- file
if(gz) meta <- sub('\\.gz$', '', meta, ignore.case = TRUE)
meta <- sub('\\.[^.]*$', '', meta) # remove last dot and any trailing chars
meta <- paste0(meta, ext)
}
# if file is character and gz is true, convert it to connection
was <- file
if(is.character(file) & file != '' & gz) file <- gzfile(file)
csv::as.csv(x, file = file, ...)
io_yamlet(
x,
con = meta,
useBytes = useBytes,
default_keys = default_keys,
# coerce = coerce,
...
)
invisible(was)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/io_csv.R |
#' Import and Export Resolved Tables
#'
#' Inter-converts between tables as comma-separated variable
#' and fully resolved data frames.
#' Generic, with character and data.frame methods that extend \code{\link{io_csv}}.
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return See methods.
#' @family io
#' @examples
#' example(io_res.character)
io_res <- function(x, ...)UseMethod('io_res')
#' Import Documented Table as Resolved
#'
#' Imports a documented table and resolves ambiguous guide elements.
#' A wrapper for \code{\link{io_csv.character}} that also
#' reads associated yamlet metadata, if present, and applies it
#' as attributes. Invokes \code{\link{resolve}} to resolve
#' ambiguity of 'guide' attribute, if possible. A short-cut
#' for \code{resolve(io_csv(x))}.
#'
#' @param x character file path; passed to \code{\link{io_csv.character}}
#' @param ext extension for metadata equivalent of x
#' @param ... passed to \code{\link{io_csv.character}}
#' @export
#' @keywords internal
#' @family io
#' @family interface
#' @return decorated
#' @examples
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- io_csv(file) %>% resolve
#' y <- io_res(file)
#' identical(x, y)
io_res.character <- function(
x,
ext = getOption('yamlet_extension', '.yaml'),
...
){
d <- io_csv(x = x, ext = ext, ...)
d <- resolve(d)
d
}
#' Export Resolved Table
#'
#' Exports a documented table. "Desolves" attributes
#' to standard form, then writes data and metadata to storage.
#' A short-cut for \code{(io_csv(desolve(x))}.
#'
#' @param x decorated; passed to \code{\link{io_csv.data.frame}}
#' @param file passed to \code{\link{io_csv.data.frame}}
#' @param ... passed to \code{\link{io_csv.character}} and \code{\link{desolve.decorated}}
#' @export
#' @keywords internal
#' @family io
#' @family interface
#' @return decorated (invisible)
#' @examples
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- io_res(file)
#' tmp <- tempfile(fileext = '.csv')
#' io_res(x, tmp)
#' a <- io_csv(tmp, source = FALSE)
#' b <- io_csv(file, source = FALSE)
#' stopifnot(identical(a, b))
io_res.decorated <- function(
x,
file = '',
...
){
d <- desolve(x, ...)
d <- io_csv(d, file = file, ...)
invisible(d)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/io_res.R |
#' Import and Export Documented Tables
#'
#' Imports or exports documented tables. Generic, with methods
#' that extend \code{\link{read.table}} and \code{\link{write.table}}.
#'@param x object
#'@param ... passed arguments
#'@export
#'@return See methods.
#'@family io
#'@examples
#' # generate some decorated data
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#'
#' # get a temporary filepath
#' out <- file.path(tempdir(), 'out.tab')
#'
#' # save file using io_table (returns filepath)
#' foo <- io_table(x, out)
#' stopifnot(identical(out, foo))
#'
#' # read using this filepath
#' y <- io_table(foo, as.is = TRUE)
#'
#' # lossless round-trip
#' attr(x, 'source') <- NULL
#' rownames(x) <- NULL
#' rownames(y) <- NULL
#' stopifnot(identical(x, y))
io_table <- function(x, ...)UseMethod('io_table')
#' Import Documented Table
#'
#' Imports a documented table.
#' A wrapper for read.table() that also
#' reads associated yamlet metadata, if present, and applies it
#' as attributes.
#'
#' @param x character file path; passed to \code{\link{read.table}}
#' @param ext extension for metadata equivalent of x
#' @param meta explicit file path for metadata
# @param coerce whether to coerce to factor where guide is a list; passed to \code{\link{decorate.data.frame}}
#' @param ... passed to \code{\link{read.table}} (if accepted) and to \code{\link{decorate}}
#' @export
#' @keywords internal
#' @family io
#' @family interface
#' @return data.frame
#' @examples
#' example(io_table)
io_table.character <- function(
x,
ext = getOption('yamlet_extension', '.yaml'),
meta = NULL,
#coerce = getOption('yamlet_coerce', FALSE),
...
){
args <- list(...)
args <- args[names(args) %in% names(formals(utils::read.table))]
args <- c(list(file = x), args)
d <- do.call(utils::read.table, args)
if(is.null(meta)){
meta <- sub('\\.[^.]*$','',x) # remove last dot and any trailing chars
meta <- paste0(meta, ext)
}
if(!file.exists(meta)){
message('did not find ', meta)
}else{
d <- decorate(
d,
meta = meta,
# coerce = coerce,
...
)
}
d
}
#' Export Documented Table
#'
#' Exports a data.frame and a yamlet version of its decorations.
#' A wrapper for \code{\link{write.table}}.
#'
#' @param x data.frame
#' @param file passed to \code{\link{write.table}}
#' @param ext = extension for metadata equivalent of x
# @ 0.6.1, dropping coerce (passed to io_yamlet, which does not implement)
# @param coerce logical; whether to coerce factor levels to guide; alternatively, a key for the levels
#' @param meta passed as \code{con} to \code{\link{io_yamlet}}
#' @param useBytes passed to \code{\link{io_yamlet}}
#' @param default_keys passed to \code{\link{io_yamlet}}
#' @param ... passed to \code{\link{write.table}} (if accepted) and to \code{\link{io_yamlet}}
#' @export
#' @keywords internal
#' @family io
#' @family interface
#' @return invisible(file)
#' @examples
#' example(io_table)
io_table.data.frame <- function(
x,
file = '',
ext = getOption('yamlet_extension', '.yaml'),
# coerce = getOption("yamlet_coerce_decorations", FALSE),
meta = stdout(),
useBytes = FALSE,
default_keys = getOption(
'yamlet_default_keys',
list('label','guide')
),
...
){
args <- list(...)
args <- args[names(args) %in% names(formals(utils::write.table))]
args <- c(list(x = x, file = file),args)
do.call(utils::write.table, args)
if(is.character(file)){
if(file != ''){
meta <- sub('\\.[^.]*$','',file) # remove last dot and any trailing chars
meta <- paste0(meta, ext)
}
}
io_yamlet(
x,
con = meta,
useBytes = useBytes,
default_keys = default_keys,
# coerce = coerce,
...
)
invisible(file)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/io_table.R |
#' Import and Export Yamlet
#'
#' Imports and exports yamlet.
#' Generic, with a read method \code{\link{io_yamlet.character}}
#' for character and a write method \code{\link{io_yamlet.data.frame}}
#' for data.frame. See also \code{\link{io_yamlet.yamlet}}.
#'
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family io
#' @examples
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' x <- io_yamlet(file)
#' tmp <- tempdir()
#' out <- file.path(tmp, 'tmp.yaml')
#'
#' # we can losslessly 'round-trip' x using to generic calls
#' identical(x, io_yamlet(io_yamlet(x, out)))
io_yamlet <- function(x, ...)UseMethod('io_yamlet')
#' Import Yamlet
#'
#' Imports yamlet.
#' Character method for \code{\link{io_yamlet}}.
#' Similar to \code{\link{read_yamlet}}, but only reads files.
#'
#' @param x file path for yamlet
#' @param default_keys character: default keys for the first n anonymous members of each element
#' @param ... passed to \code{\link{as_yamlet}}
#' @export
#' @keywords internal
#' @family io
#' @family interface
#' @seealso \code{\link{decorate.list}}
#' @return yamlet: a named list with default keys applied
#' @examples
#' example(io_yamlet)
io_yamlet.character <- function(
x,
default_keys = getOption(
'yamlet_default_keys',
list('label','guide')
),
...
){
stopifnot(file.exists(x))
as_yamlet(x, default_keys = default_keys, ...)
}
#' Export Yamlet
#'
#' Exports yamlet.
#' The archetype method for \code{\link{io_yamlet}}.
#' Similar to \code{\link{write_yamlet}} but returns (description of) \code{con}.
#'
#' @param x yamlet
#' @param con passed to \code{\link{writeLines}}
#' @param eol end-of-line; passed to \code{\link{writeLines}} as \code{sep}
#' @param useBytes passed to \code{\link{writeLines}}
#' @param default_keys character: default keys for the first n anonymous members of each element
#' @param fileEncoding if \code{con} is character, passed to \code{\link{file}} as \code{encoding}
#' @param ... passed to \code{\link{as.character.yamlet}}
#' @export
#' @keywords internal
#' @family io
#' @return invisible description of con: i.e., a file path
#' @examples
#' library(csv)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#' out <- file.path(tempdir(), 'out.yamlet')
#' io_yamlet(as_yamlet(x), out)
#' io_yamlet(out)
#'
io_yamlet.yamlet <- function(
x,
con = stdout(),
eol = "\n",
useBytes = FALSE,
default_keys = getOption(
'yamlet_default_keys',
list('label','guide')
),
fileEncoding = getOption('encoding'),
...
){
y <- as.character(x, default_keys = default_keys, ...)
if(is.character(con)){
con <- file(con, 'w', encoding = fileEncoding)
on.exit(close(con))
}
writeLines(text = y, con = con, sep = eol, useBytes = useBytes)
invisible(summary(con)$description)
}
#' Export Data Frame Attributes as Yamlet
#'
#' Writes data.frame attributes as yamlet.
#' The data.frame method for \code{\link{io_yamlet}}.
#' Similar to \code{\link{write_yamlet}}, but returns (description of) \code{con}.
#'
#' @param x data.frame
#' @param con passed to \code{\link{writeLines}}
#' @param eol end-of-line; passed to \code{\link{writeLines}} as \code{sep}
#' @param useBytes passed to \code{\link{writeLines}}
#' @param default_keys character: default keys for the first n anonymous members of each element
#' @param fileEncoding if \code{con} is character, passed to \code{\link{file}} as \code{encoding}
#' @param ... passed to \code{\link{as_yamlet}}
#' @export
#' @keywords internal
#' @family interface
#' @family io
#' @return invisible description of con: i.e., a file path
#' @examples
#' library(csv)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#' out <- file.path(tempdir(), 'out.yamlet')
#' io_yamlet(x, out)
#' io_yamlet(out)
#'
io_yamlet.data.frame <- function(
x,
con = stdout(),
eol = "\n",
useBytes = FALSE,
default_keys = getOption(
'yamlet_default_keys',
list('label','guide')
),
fileEncoding = getOption('encoding'),
...
){
x <- as_yamlet(x, ...)
y <- as.character(x, default_keys = default_keys, ...)
if(is.character(con)){
con <- file(con, 'w', encoding = fileEncoding)
on.exit(close(con))
}
writeLines(text = y, con = con, sep = eol, useBytes = useBytes)
invisible(summary(con)$description)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/io_yamlet.R |
globalVariables(c('_yamlet_ymin','_yamlet_ymax','_yamlet_xmin','_yamlet_xmax'))
#' Enforce Isometry
#'
#' Enforces isometric plot design: aspect ratio of 1, identical
#' ranges for x and y axes. Can be used meaningfully with
#' \code{+ facet_wrap(scales = 'free' ...)}.
#' @return ggplot_isometric
#' @seealso ggplot_add.ggplot_isometric
#' @export
#' @keywords internal
#' @family isometric
#' @examples
#' library(magrittr)
#' library(ggplot2)
#' data.frame(x = 1:5, y = 3:7) %>%
#' ggplot(aes(x, y)) + geom_point() + isometric()
isometric <- function()structure(list(), class = 'ggplot_isometric')
#' Add Isometry to Plot Object
#'
#' Adds isometry to plot object.
#' @return gg
#' @seealso isometric
#' @export
#' @keywords internal
#' @importFrom ggplot2 ggplot_add theme geom_blank aes
#' @importFrom rlang sym
#' @method ggplot_add ggplot_isometric
#' @family isometric
#' @examples
#' example(isometric)
ggplot_add.ggplot_isometric <- function(object, plot, object_name){
# https://stackoverflow.com/questions/42588238/setting-individual-y-axis-limits-with-facet-wrap-not-with-scales-free-y
stopifnot('x' %in% names(plot$labels))
stopifnot('y' %in% names(plot$labels))
wrap_facet <- plot$facet$params$facets
grid_facet_col <- names(plot$facet$params$rows)
grid_facet_row <- names(plot$facet$params$cols)
grid_facets <- c(grid_facet_col, grid_facet_row)
facets <- character(0)
if(!is.null(wrap_facet)){
plot$data <- group_by(plot$data, !!!wrap_facet)
}
if(!is.null(grid_facets)){
plot$data <- group_by(plot$data, !!!sapply(facets, sym))
}
# calculate x,y min,max by group if any
# https://stackoverflow.com/questions/46131829/unquote-the-variable-name-on-the-right-side-of-mutate-function-in-dplyr
plot$data <- mutate(plot$data, `_yamlet_ymin` = min(na.rm = TRUE, !!rlang::sym(plot$labels$y)))
plot$data <- mutate(plot$data, `_yamlet_ymax` = max(na.rm = TRUE, !!rlang::sym(plot$labels$y)))
plot$data <- mutate(plot$data, `_yamlet_xmin` = min(na.rm = TRUE, !!rlang::sym(plot$labels$x)))
plot$data <- mutate(plot$data, `_yamlet_xmax` = max(na.rm = TRUE, !!rlang::sym(plot$labels$x)))
plot <- plot + geom_blank(aes(y = `_yamlet_xmin`))
plot <- plot + geom_blank(aes(y = `_yamlet_xmax`))
plot <- plot + geom_blank(aes(x = `_yamlet_ymin`))
plot <- plot + geom_blank(aes(x = `_yamlet_ymax`))
plot <- plot + theme(aspect.ratio = 1)
plot
}
#' Enforce Symmetry
#'
#' Enforces symmetric plot design: y axis includes opposites of the range of the data.
#' @return ggplot_symmetric
#' @seealso ggplot_add.ggplot_symmetric
#' @export
#' @keywords internal
#' @family isometric
#' @examples
#' library(magrittr)
#' library(ggplot2)
#' data.frame(x = 1:10, y = c(-2, 5, 0, -1, 4, 0, 1, -3, 3, 0)) %>%
#' ggplot(aes(x, y)) + geom_point() + symmetric()
#'
symmetric <- function()structure(list(), class = 'ggplot_symmetric')
#' Add Symmetry to Plot Object
#'
#' Adds y axis symmetry to plot object.
#' @return gg
#' @seealso symmetric
#' @export
#' @keywords internal
#' @importFrom ggplot2 ggplot_add expand_limits
#' @method ggplot_add ggplot_symmetric
#' @family isometric
#' @examples
#' example(symmetric)
ggplot_add.ggplot_symmetric <- function(object, plot, object_name){
nms <- names(plot$labels)
stopifnot('y' %in% nms)
yrange <- range(na.rm = TRUE, plot$data[,plot$labels$y])
plot <- plot + expand_limits(y = -yrange)
plot
}
#' @export
ggplot2::ggplot_add
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/isometric.R |
#' Make Title
#'
#' Make title attribute.
#' Generic, with methods
#' \code{\link{make_title.default}},
#' \code{\link{make_title.decorated}}, and
#' \code{\link{make_title.dvec}}.
#'
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family deprecated
#' @family labels
#' @return see methods
#' @examples
#' # see methods
make_title <- function(x, ...)UseMethod('make_title')
#' Make Title for Decorated
#'
#' Make title for class 'decorated'.
##' Limits scope to requested variables, and then calls
#' class-specific methods for each.
#'
#' @param x object
#' @param ... optional names of variables to limit scope
#' @export
#' @keywords internal
#' @family labels
#' @return decorated
#' @examples
#' library(magrittr)
#' x <- data.frame(length = 1:10)
#' x %>%
#' decorate('length: [ Length, mm ]') %>%
#' resolve %>%
#' decorations
#'
make_title.decorated <- function(
x,
...
){
vars <- selected(x, ...)
args <- named(...)
for(var in vars){
# pass only named arguments
x[[var]] <- do.call(make_title, c(list(x[[var]]),args))
}
x
}
#' Make Title by Default
#'
#' Make title by default.
#' To be specific: this is the default method
#' for the generic function \code{\link{make_title}},
#' and it actually does nothing. Individual methods
#' are written for those classes where 'make title'
#' behavior is expected.
#'
#' @param x object
#' @param ... ignored
#' @export
#' @keywords internal
#' @family labels
#' @return same as x
make_title.default <- function(
x,
...
){
return(x)
}
#' Make Title for Decorated Vector
#'
#' Makes title for decorated vectors.
#' If option \code{with_title} is TRUE
#' and x has a 'units' attribute,
#' it adds the title attribute. See also
#' \code{\link{drop_title}} for coordinated use.
#' @param x dvec
#' @param ... ignored arguments
#' @param with_title whether to drop title
#' @export
#' @keywords internal
#' @family labels
#' @return dvec
#' @examples
#' library(magrittr)
#' 1 %>%
#' as_dvec(label = 'length', guide = 'mm') %>%
#' resolve
make_title.dvec <- function(
x,
...,
with_title = getOption('yamlet_with_title', TRUE)
){
stopifnot(length(with_title) == 1)
with_title <- as.logical(with_title)
if(with_title & 'units' %in% names(attributes(x))){
x <- append_units(x, ..., target = 'title')
}
x
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/make_title.R |
#' Merge Decorated
#'
#' Preserves class for 'decorated' during merge().
#'
#'
#' @param x decorated
#' @param y passed to \code{\link{merge}}
#' @param ... passed to \code{\link{merge}}
#' @return class 'decorated' 'data.frame'
#' @export
#' @keywords internal
#' @family decorated
#' @examples
#' library(magrittr)
#' library(dplyr)
#' x <- data.frame(foo = 1, bar = 2)
#' x %<>% decorate('foo: [distance, mm]')
#' x %<>% decorate('bar: [height, mm]')
#' class(merge(x,x))
merge.decorated <- function(x, y, ...){
z <- NextMethod()
z <- as_decorated(z)
z
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/merge.R |
#' Try To Look Like Something Else
#'
#' Tries to make an object look like something else.
#' Generic, with method \code{\link{mimic.default}}
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family mimic
#' @examples
#' example(mimic.default)
mimic <- function(x, ...)UseMethod('mimic')
#' Try To Look Like Another Equal-length Variable
#'
#' Tries to mimic another vector or factor.
#' If meaningful and possible, x acquires
#' a guide attribute with labels from
#' corresponding values in y. Any codelist
#' attribute is removed. No guide is created
#' for zero-length x. If x is a factor,
#' unused levels are removed.
#' @param x vector-like
#' @param y vector-like, same length as x
#' @param ... passed to \code{link{factor}}
#' @export
#' @importFrom stats setNames
#' @return same class as x
#' @family mimic
#' @family interface
#' @examples
#' library(magrittr)
#' library(dplyr)
#' let <- letters[1:5]
#' LET <- LETTERS[1:5]
#' int <- 0L:4L
#' num <- as.numeric(int)
#' fac <- factor(let)
#' css <- classified(let)
#'
#' # any of these can mimic any other
#' str(mimic(LET, let))
#' str(mimic(num, let))
#' str(mimic(let, num))
#'
#' # factors get a guide and classifieds get a named codelist
#' str(mimic(fac, int))
#' str(mimic(css, int))
#'
#' # int can 'pick up' the factor levels as guide names
#' str(mimic(int, css))
#'
#' # if two variables mean essentially the same thing,
#' # mimic lets you save space
#' x <- data.frame(id = 1:2, ID = c('A','B'))
#' x
#' x %<>% mutate(id = mimic(id, ID)) %>% select(-ID)
#' x
#' # ID still available, in principle:
#' x %>% as_decorated %>% resolve
mimic.default <- function(x, y = x, ...){
# clear targets
at <- attributes(x)
nms <- names(at)
at <- at[!nms %in% c('guide','codelist')]
attributes(x) <- at
# native-type levels
z <- factor(x, ...) # not as.factor(x), which retains unused levels if x is factor.
ind <- match(levels(z), z)
lev <- x[ind]
if(is.factor(x)) lev <- as.character(lev)
lev <- as.list(lev)
# y-type names
nms <- proxy(z, y)
# reduce
# i.e. if nms effectively the same as lev,
# don't use the names
# since comparison may contain NA,
# check NA match and character equality separately
# example: mimic(factor(NA, levels = NA, exclude = NULL), 1, exclude = NULL)
if(
all(
is.na(nms) == is.na(unlist(lev)) &
paste(nms) == paste(unlist(lev))
)
){
lev <- unlist(lev)
} else {
lev <- setNames(lev, nms)
}
attr(x, 'guide') <- lev
x
}
#' Try To Make Classified Look Like Another Equal-length Variable
#'
#' Tries to mimic another vector or factor for 'classified'.
#' See \code{\link{classified.default}}.
#' If meaningful and possible, x updates its
#' codelist attribute with labels from
#' corresponding values in y. Codes that don't occur
#' (i.e. unused levels) are removed from the codelist.
#'
#' @param x classified
#' @param y vector-like, same length as x
#' @param ... ignored arguments
#' @export
#' @keywords internal
#' @return classified
#' @family mimic
#' @examples
#' let <- letters[1:5]
#' LET <- LETTERS[1:5]
#' int <- 0L:4L
#' num <- as.numeric(int)
#' fac <- factor(let)
#' css <- classified(let)
#'
#' mimic(LET, let)
#' mimic(let, let)
#' mimic(num, let)
#' mimic(int, let)
#' mimic(fac, let)
#' mimic(css, let)
#' mimic(character(0))
#' mimic(numeric(0))
#' mimic(let, num)
#' mimic(fac, num)
#' mimic(css, num)
#' mimic(num, css)
#' mimic(let, css)
#'
#' util <- c('knife','fork','spoon')
#' util
#' factor(util)
#' classified(util)
#' mimic(util)
#' mimic(factor(util))
#' mimic(classified(util))
#'
#' x <- data.frame(let, LET)
#' library(dplyr)
#' library(magrittr)
#' x %<>% mutate(let = mimic(let, LET), LET = mimic(LET))
#' str(x)
#'
mimic.classified <- function(x, y = x, ...){
z <- NextMethod()
at <- attributes(z)
nms <- names(at)
nms[nms == 'guide'] <- 'codelist'
names(at) <- nms
attributes(z) <- at
z
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/mimic.R |
#' Modify Attributes of Indicated Components
#'
#' Modifies the attributes of indicated components.
#' Generic, with method \code{\link{modify.default}}.
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family modify
#' @examples
#' example(modify.default)
modify <- function(x, ...)UseMethod('modify')
#' Modify Attributes of Indicated Components by Default
#'
#' Modifies the attributes of each indicated element
#' (all elements by default). Tries to assign the value of an expression
#' to the supplied label, with existing attributes
#' and the object itself (.) available as arguments.
#' Gives a warning if the supplied label is considered reserved.
#' Intends to support anything with one or more non-empty names.
#'
#' The name of the component itself is available during assignments as
#' attribute 'name' (any pre-existing attribute 'name' is temporarily masked).
#' After all assignments are complete, the value of 'name' is enforced at the object level.
#' Thus, \code{modify} expressions can modify component names.
#'
#' As currently implemented, the expression is evaluated by
#' \code{\link[rlang]{eval_tidy}}, with attributes supplied as
#' the \code{data} argument. Thus, names in the expression
#' may be disambiguated, e.g. with \code{.data}. See examples.
#' @param x object
#' @param ... indicated columns, or name-value pairs
#' @param .reserved reserved labels that warn on assignment
#' @export
#' @importFrom rlang f_rhs eval_tidy quo_set_env quos new_data_mask
#' @return same class as x
#' @family modify
#' @family interface
#' @examples
#' library(magrittr)
#' library(dplyr)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#'
#' # modify selected columns
#' x %<>% modify(title = paste(label, '(', guide, ')'), time)
#' x %>% select(time, conc) %>% decorations
#'
#' # modify (almost) all columns
#' x %<>% modify(title = paste(label, '(', guide, ')'), -Subject)
#' x %>% select(time, conc) %>% decorations
#'
#' # use column itself
#' x %<>% modify(`defined values` = sum(!is.na(.)))
#' x %>% select(time) %>% decorations
#'
#' # rename column
#' x %<>% modify(time, name = label)
#' names(x)
#'
#' # warn if assignment fails
#' \dontrun{
#' \donttest{
#' x %<>% modify(title = foo, time)
#' }}
#'
#' # support lists
#' list(a = 1, b = 1:10, c = letters) %>%
#' modify(length = length(.), b:c)
#'
#'x %<>% select(Subject) %>% modify(label = NULL, `defined values` = NULL)
#'
#' # distinguish data and environment
#' location <- 'environment'
#' x %>% modify(where = location) %>% decorations
#' x %>% modify(where = .env$location) %>% decorations
#' \dontrun{
#' \donttest{
#' x%>% modify(where = .data$location) %>% decorations
#' }}
#' x %>% modify(location = 'attributes', where = location) %>% decorations
#' x %>% modify(location = 'attributes', where = .data$location) %>% decorations
#'
modify.default <- function(
x,
...,
.reserved = getOption(
'yamlet_modify_reserved',
c('class','levels','labels','names')
)
){
stopifnot(is.character(.reserved))
vars <- selected(x, ...)
mods <- quos(...)
mods <- mods[names(mods) != '']
reserved <- intersect(names(mods), .reserved)
if(length(reserved))warning('reserved: ', paste(reserved, collapse = ', '))
for(var in vars){
was <- attr(x[[var]], 'name', exact = TRUE)
attr(x[[var]], 'name') <- var
for(mod in names(mods)){
expr <- mods[[mod]] # a quosure
attr <- attributes(x[[var]])
attr <- attr[names(attr) != ''] # see ?list2env
attr <- c(attr, list(. = x[[var]]))
# env <- list2env(attr)
# expr <- rlang::quo_set_env(quo = expr, env = env)
# mask <- new_data_mask(env)
tryCatch(
attr(x[[var]], mod) <- rlang::eval_tidy(expr, data = attr),
error = function(e)warning(var, ': ', e)
)
}
i <- match(var, names(x), nomatch = 0) # singular
stopifnot(i > 0)
names(x)[[i]] <- attr(x[[i]],'name')
attr(x[[i]], 'name') <- was
}
x
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/modify.R |
#' Evaluate Named Arguments
#'
#' Evaluates named arguments in \code{...}.
#'
#' @param ... possibly a mix of named and un-named arguments.
#' @export
#' @importFrom rlang eval_tidy quos
#' @importFrom dplyr select
#' @return named list
#' @family modify
#' @keywords internal
#' @examples
#' named(a = 1, b = 2 + 3, 4, 'd')
#'
named <- function(...){
args <- quos(...)
args <- args[names(args) != ""]
args <- lapply(args, rlang::eval_tidy)
args
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/named.R |
#' yamlet: Versatile Curation of Table Metadata
#'
#' The \pkg{yamlet} package supports storage and retrieval of table
#' metadata in yaml format. The most important function
#' is \code{\link{decorate.character}}: it lets you 'decorate'
#' your data by attaching attributes retrieved from a file in
#' yaml format. Typically your data will be of class
#' 'data.frame', but it could be anything that is essentially
#' a named list.
#'
#' @section Storage Format:
#'
#' Storage format for 'yamlet' is a text file containing
#' well-formed yaml. Technically, it is a map of sequences.
#' Though well formed, it need not be complete: attributes
#' or their names may be missing.
#'
#' In the simplest case, the data specification consists
#' of a list of column (item) names, followed by semicolons.
#' Perhaps you only have one column:
#'
#' \code{mpg:}
#'
#' or maybe several:
#'
#' \preformatted{
#' mpg:
#' cyl:
#' disp:
#' }
#'
#' If you know descriptive labels for your columns, provide them
#' (skip a space after the colon).
#'
#' \preformatted{
#' mpg: fuel economy
#' cyl: number of cylinders
#' disp: displacement
#' }
#'
#' If you know units, create a sequence with square brackets.
#'
#' \preformatted{
#' mpg: [ fuel economy, miles/gallon ]
#' cyl: number of cylinders
#' disp: [ displacement , in^3 ]
#' }
#'
#' If you are going to give units, you probably should give a
#' key first, since the first anonymous element is 'label'
#' by default, and the second is 'guide'. (A guide can be
#' units for numeric variables, factor levels/labels for
#' categorical variables, or a format string for dates, times,
#' and datetimes.) You could give just the units
#' but you would have to be specific:
#'
#' \code{mpg: [units: miles/gallon]}
#'
#' You can over-ride default keys by providing them in your data:
#'
#' \preformatted{
#' mpg: [units: miles/gallon]
#' _keys: [label, units]
#' }
#'
#' Notice that stored yamlet can be informationally defective
#' while syntactically correct. If you don't know an item
#' key at the time of data authoring, you can omit it:
#'
#' \code{race: [race, [white: 0, black: 1, 2, asian: 3 ]]}
#'
#' Or perhaps you know the key but not the value:
#'
#' \code{race: [race, [white: 0, black: 1, asian: 2, ? other ]]}
#'
#' Notice that \code{race} is factor-like; the factor sequence
#' is nested within the attribute sequence. Equivalently:
#'
#' \code{race: [label: race, guide: [white: 0, black: 1, asian: 2, ? other ]]}
#'
#' If you have a codelist of length one, you should still enclose it in brackets:
#'
#' \code{sex: [Sex, [ M ]]}
#'
#' To get started using yamlet, see \code{?as_yamlet.character} and
#' examples there. See also \code{?decorate} which adds yamlet
#' values to corresponding items in your data. See also \code{?print.decorated}
#' which uses label attributes, if present, as axis labels.
#'
#' Note: the quinidine and phenobarb datasets in the examples
#' are borrowed from \pkg{nlme} (\code{?Quinidine}, \code{?Phenobarb}),
#' with some reorganization.
#'
#' @docType package
#' @name yamlet
"_PACKAGE"
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/package.R |
#' Check Parseable as Units
#'
#' Checks if something is parseable as units.
#' Generic, with default method.
#'
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family parseable
#' @examples
#' example(is_parseable.character)
is_parseable <- function(x,...)UseMethod('is_parseable')
#' Check Something is Parseable as Units by Default
#'
#' Checks if something is parseable as units.
#' Tests against the udunits library in \pkg{units}.
#' See \code{\link[units]{as_units}}.
#' See also \code{\link[units]{install_unit}}
#' for finer control.
#'
#' @param x character
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @importFrom units as_units
#' @return logical
#' @family parseable
#' @family interface
#' @examples
#' is_parseable(c('kg/m2','kg/m^2','kg.m/s2','µg/L'))
#' is_parseable('foo')
#' library(units)
#' install_unit('foo')
#' is_parseable('foo')
#'
is_parseable.default <- function(x,...){
res <- sapply(x, .is_parseable.default, ..., USE.NAMES = FALSE)
res
}
.is_parseable.default <- function(x,...){
stopifnot(length(x) == 1)
res <- try(
silent = TRUE,
suppressWarnings(
suppressMessages(
as_units(x,...)
)
)
)
res <- !inherits(res, 'try-error')
res
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/parseable.R |
#' Reduce Something to its Simplest Sufficient Version
#'
#' Reduces something to its simplest sufficient version.
#' Generic, with method \code{\link{parsimonious.list}}.
#'
#' @param x object of dispatch
#' @param ... ignored
#' @keywords internal
#' @export
#' @family parsimonious
#' @examples
#' example(parsimonious.list)
parsimonious <- function(x, ...)UseMethod('parsimonious')
#' Reduce A List to its Simplest Sufficient Version
#'
#' Reduces a list to its simplest sufficient version.
#' Used internally with \code{\link[yaml]{yaml.load}}
#' as a custom handler for objects of type 'seq'.
#' Consider: \code{str(yaml.load('[a: 1, b: 2]'))}.
#' The result is technically correct. By default,
#' the parser returns a sequence of two maps.
#' Not reducible to a base type,
#' The sequence is an anonymous list.
#' The maps themselves are named lists.
#' In the special case that all elements are of length one,
#' this structure can be collapsed without semantic loss
#' to a named list.
#' More generally, if an anonymous list consists entirely
#' of length one members, those members which are
#' lists (but not already parsimonious lists)
#' can be replaced with their first elements;
#' the list becomes named if any of those elements
#' has a name. In that case, any elements without
#' names get the name '' (empty string).
#'
#' @param x object of dispatch
#' @param ... passed arguments
#' @keywords internal
#' @export
#' @family parsimonious
#' @return list
#' @examples
#' library(magrittr)
#' library(yaml)
#' # Parsimonious:
#' '[a: 1, b: 2]' %>% yaml.load
#' '[a: 1, b: 2]' %>% yaml.load(handlers = list(seq = parsimonious))
#'
#' # No effect on vector types:
#' '[1, 2]' %>% yaml.load
#' '[1, 2]' %>% yaml.load(handlers = list(seq = parsimonious))
#'
#' # Respects mixed-length vector types:
#' 'RACE: [ race, [white, black, asian ]]' %>% yaml.load
#' 'RACE: [ race, [white, black, asian ]]' %>% yaml.load(handlers = list(seq = parsimonious))
#'
#' # Anonymous elements get a blank name:
#' '[a: 1, 2]' %>% yaml.load %>% sapply(names)
#' '[a: 1, 2]' %>% yaml.load(handlers = list(seq = parsimonious)) %>% names
#'
#' # Also works for sequence of length one:
#' '[a: 1]' %>% yaml.load
#' '[a: 1]' %>% yaml.load(handlers = list(seq = parsimonious))
#'
#' # Works for NULL:
#' yaml.load('-')
#' yaml.load('-', handlers = list(seq = parsimonious))
#'
#' # Limited to first (most deeply nested) encounter:
#' '[[[a: 1]]]' %>% yaml.load
#' '[[[a: 1]]]' %>% yaml.load(handlers = list(seq = parsimonious))
#'
#' # Works for mixed-depth nesting:
#' 'ITEM: [ label: item, [ foo: bar, hey: baz ]]' %>% yaml.load
#' 'ITEM: [ label: item, [ foo: bar, hey: baz ]]' %>% yaml.load(handlers = list(seq = parsimonious))
parsimonious.list <- function(x, ...){
# are any of these lists parsimonious?
parsimonious <- sapply(x, inherits, 'parsimonious')
# are any members longer than one element?
plural <- sapply(x, length) > 1
# are any non-parsimonious members plural?
extensive <- any(!parsimonious & plural)
# is this list anonymous?
# anonymous <- is.null(names(x))
# do we have any lists?
isList <- sapply(x, is.list)
# do we have any NULL?
isNull <- sapply(x, is.null)
# targets are non-parsimonious lists
targets <- isList & !parsimonious
# unconditionally unclass parsimonious lists
x[parsimonious] <- lapply(x[parsimonious], unclass)
# reduce if any targets, and not extensive
if(!extensive & any(targets)){
y <- list()
for(i in seq_along(x)){
if(targets[[i]]) {
y[[i]] <- x[[i]]
} else {
y[[i]] <- x[i]
}
}
x <- do.call(c, y)
}
# if there were no lists or null, then convert to vector
# 2022-02-21 @v8.2, this simplification seems not to support
# length-one un-named codelists, such as 'sex: [ Sex, [M] ]'
# if(!any(isList) & !any(isNull) & !any(parsimonious)) x <- unlist(x)
class(x) <- union('parsimonious', class(x))
x
}
#' Reduce by Default to Simplest Sufficient Version
#'
#' Reduces by default to simplest sufficient version.
#' This is a companion to \code{\link{parsimonious.list}}
#' and currently returns an unmodified object.
#'
#' @param x object for which no specific parsimonious method exists
#' @param ... ignored
#' @export
#' @keywords internal
#' @family parsimonious
parsimonious.default <- function(x, ...){
x
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/parsimonious.R |
#' Calculate Substitute Values
#'
#' Calculates substitute values.
#' Generic, with method \code{\link{proxy.factor}}
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family proxy
#' @return see methods
#' @examples
#' example(proxy.factor)
proxy <- function(x,...)UseMethod('proxy')
#' Calculate Substitute Values for Factor Levels
#'
#' Calculates substitute values for factor levels.
#' If x and y have same length and there is a
#' one-to-one correspondence of their elements,
#' then unique elements of y are returned in
#' an order corresponding to levels(x).
#' @param x factor
#' @param y factor or vector
#' @param ... ignored
#' @export
#' @keywords internal
#' @importFrom dplyr distinct
#' @family proxy
#' @return same class as y
#' @examples
#' proxy(factor(1:3), letters[1:3])
#' proxy(factor(1:3), factor(letters[1:3]))
#' proxy(factor(letters[4:6]))
#' foo <- classified(letters[1:5])
#' as_yamlet(attributes(foo))
#' foo <- classified(foo, labels = proxy(foo))
#' as_yamlet(attributes(foo))
proxy.factor <- function(x, y = as.numeric(x), ...){
if(length(x) != length(y))stop('x and y must have same length')
# if(!(is.factor(y)|is.vector(y)))stop('y must be vector or factor')
if(!(is.atomic(y)))stop('y must be atomic')
if(!(nrow(distinct(data.frame(x,y))) == length(unique(x)))){
stop('one-to-one correspondence of x and y not detected')
}
vals <- y[match(levels(x), x)]
vals
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/proxy.R |
#' Resolve Guide
#'
#' Resolves implicit usage of default key 'guide' to explicit usage.
#' Generic, with methods
#' \code{\link{resolve.decorated}} and
#' \code{\link{resolve.dvec}}.
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family resolve
#' @examples
#' example(resolve.decorated)
resolve <- function(x, ...)UseMethod('resolve')
#' Resolve Guide for Decorated
#'
#' Resolves implicit usage of default key 'guide' to
#' explicit usage for decorated class.
#' Calls
#' \code{\link{explicit_guide}},
#' \code{\link{classified}}, and
#' \code{\link{make_title}}.
#' @param x decorated
#' @param ... passed to \code{\link{explicit_guide}}, \code{\link{classified}}, and \code{\link{make_title}}
#' @export
#' @return decorated
#' @family resolve
#' @family interface
#' @examples
#' # generate some decorated data
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#' x %>% decorations(Age, glyco)
#'
#' # resolve everything, and show selected decorations
#' x %>% resolve %>% decorations(Age, glyco)
#'
#' # resolve selectively, and show selected decorations
#' x %>% resolve(glyco) %>% decorations(Age, glyco)
resolve.decorated <- function(x, ...){
x <- explicit_guide(x, ...)
x <- classified(x, ...)
x <- make_title(x, ...)
x
}
#' Resolve Guide for Decorated Vector
#'
#' Resolves implicit usage of default key 'guide' to
#' explicit usage for class dvec.
#' Simply calls \code{\link{explicit_guide}}
#' followed by \code{\link{classified}} if x has a codelist attribute.
#' If option \code{yamlet_with_title} is not NULL, and if 'units'
#' present, label and units will be concatenated by default to create
#' a title attribute.
#' @param x dvec
#' @param ... passed to \code{\link{explicit_guide}}, \code{\link{classified}}, and \code{\link{make_title}}
#' @export
#' @keywords internal
#' @return dvec or classified
#' @family resolve
#' @family dvec
#' @examples
#' library(magrittr)
#' x <- as_dvec(1:3, guide = list(a = 1, b = 2, c = 3))
#' x %>% str
#' x %>% classified %>% str
#' x %>% explicit_guide %>% classified %>% str
#' x %>% resolve %>% str
resolve.dvec <- function(x, ...){
x <- explicit_guide(x, ...)
if('codelist' %in% names(attributes(x))){
x <- classified(x, ...)
}
if('units' %in% names(attributes(x))){
x <- make_title(x, ...)
}
x
}
#' Resolve Classified
#'
#' Resolves classified.
#' A non-operation since classified already has an explicit guide.
#' Useful to make resolve() idempotent for 'dvec'.
#' @param x classified
#' @param ... ignored
#' @export
#' @keywords internal
#' @return classified
#' @family resolve
#' @family dvec
#' @examples
#' library(magrittr)
#' x <- as_dvec(1:3, guide = list(a = 1, b = 2, c = 3))
#' x %>% str
#' x %>% classified %>% str
#' x %>% explicit_guide %>% classified %>% str
#' x %>% resolve %>% str
#' # idempotent:
#' x %>% resolve %>% resolve %>% str
resolve.classified <- function(x, ...){
return(x)
}
#' Resolve Data Frame
#'
#' Resolves data.frame.
#' Coerces first using as_decorated().
#'
#' @param x data.frame
#' @param ... ignored
#' @export
#' @keywords internal
#' @return decorated
#' @family resolve
#' @examples
#' head(resolve(Theoph))
resolve.data.frame <- function(x, ...){
#resolve(as_decorated(x, ...), ...)
# @ 1.0.3: above, first use of dots
# can pass anonymous args to decorate.list,
# which apparently understands one of them
# as 'meta' and issues an error
resolve(as_decorated(x), ...)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/resolve.R |
#' Render Scripted Attributes of Indicated Components
#'
#' Renders the scripted attributes of indicated components.
#' Generic, with method \code{\link{scripted.default}}.
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family scripted
#' @seealso modify.default
#' @seealso as_spork
#' @examples
#' example(scripted.default)
scripted <- function(x, ...)UseMethod('scripted')
#' Render Scripted Attributes of Indicated Components by Default
#'
#' Modifies specific attributes of each indicated element
#' (all elements by default).
#'
#' The goal here is to render labels and units (where present)
#' in a way that supports subscripts and superscripts
#' for both plots and tables in either html or latex contexts.
#'
#' The current implementation writes an 'expression' attribute
#' to support figure labels and a 'title' attribute to support
#' tables. \code{\link{print.decorated_ggplot}} will attempt
#' to honor the expression attribute if it exists.
#' \code{\link[tablet]{tablet.data.frame}} will attempt to honor
#' the title attribute if it exists (see Details there).
#' An attempt is made to guess the output format (html or latex).
#'
#' In addition to the 'title' and 'expression' attributes, scripted() writes
#' a 'plotmath' attribute to store plotmath versions of factor levels,
#' where present. \code{\link{print.decorated_ggplot}} should prefer
#' these over their latex and html counterparts. Furthermore,
#' factor levels (and codelists, where present) are converted
#' to their latex or html equivalents. None of this happens
#' if a 'plotmath' attribute already exists, thus preventing
#' the same variable from being accidentally transformed twice.
#'
#' To flexibly support latex, html, and plotmath, this function
#' expects column labels and units to be encoded in "spork" syntax.
#' See \code{\link[spork]{as_spork}} for details and examples.
#' Briefly, "_" precedes a subscript, "^" precedes a superscript,
#' and "." is used to force the termination of either a
#' superscript or a subscript where necessary. For best results,
#' units should be written using *, /, and ^; e.g. "kg*m^2/s^2"
#' not "kg m2 s-2" (although both are valid:
#' see \code{\link{is_parseable}}). A literal backslash followed by "n"
#' represents a newline. Greek letters are represented by their names,
#' except where names are enclosed in backticks.
#'
#'
#' \code{scripted()} always calls \code{resolve()} for the indicated
#' columns, to make units present where appropriate.
#'
#' @param x object
#' @param ... indicated columns, or name-value pairs; passed to \code{\link{resolve}} and \code{\link{selected}}
#' @param open character to precede units
#' @param close character to follow units
#' @param format one of 'latex' or 'html'
#' @export
#' @importFrom knitr is_latex_output
#' @importFrom spork as_html as_latex as_plotmath concatenate htmlToken as_spork
#' @return 'scripted', a superclass of x
#' @family scripted
#' @family interface
#' @examples
#' library(magrittr)
#' library(ggplot2)
#' x <- data.frame(time = 1:10, work = (1:10)^1.5)
#' x %<>% decorate('
#' time: [ Time_elapsed, h ]
#' work: [ Work_total_observed, kg*m^2/s^2 ]
#' ')
#'
#' x %>% decorations
#' x %>% ggplot(aes(time, work)) + geom_point()
#' x %>% scripted %>% ggplot(aes(time, work)) + geom_point()
#' x %>% scripted(format = 'html') %$% work %>% attr('title')
#' testthat::expect_equal(scripted(x), scripted(scripted(x)))
scripted.default <- function(
x,
...,
open = getOption("yamlet_append_units_open", " ("),
close = getOption("yamlet_append_units_close", ")"),
format = getOption("yamlet_format", ifelse(knitr::is_latex_output(), 'latex','html'))
){
stopifnot(
is.character(format),
length(format) == 1,
format %in% c('latex','html')
)
stopifnot(is.character(open) | is.null(open))
stopifnot(is.character(close) | is.null(close))
stopifnot(length(open) %in% 0:1)
stopifnot(length(close) %in% 0:1)
x <- resolve(x, ...) # removes html or latex tail where present
vars <- selected(x, ...)
for(var in vars){
label <- attr(x[[var]], 'label')
units <- attr(x[[var]], 'units')
# render factor levels where present and untouched
# for idempotency, use presence of plotmath attribute
# as evidence of prior rendering
if(
!is.null(levels(x[[var]])) &
is.null(attr(x[[var]], 'plotmath'))
){
attr(x[[var]], 'plotmath') <- as_plotmath(as_spork(levels(x[[var]])))
if(format == 'latex'){
levels(x[[var]]) <- as_latex(as_spork(levels(x[[var]])))
class(x[[var]]) <- union(class(x[[var]]), 'latex')
}
if(format == 'html'){
levels(x[[var]]) <- as_html(as_spork(levels(x[[var]])))
class(x[[var]]) <- union(class(x[[var]]), 'html')
}
# need to maintain internal consistency of 'classified'
# ideally, there should be a method for this
if(inherits(x[[var]], 'classified')){
for(i in seq_along(levels(x[[var]]))){
attr(x[[var]], 'codelist')[[i]] <- levels(x[[var]])[[i]]
}
}
}
# for idempotency, ensure tail is restored for factor-like vars
if(!is.null(levels(x[[var]]))){
if(format == 'latex'){
class(x[[var]]) <- union(class(x[[var]]), 'latex')
} else {
class(x[[var]]) <- union(class(x[[var]]), 'html')
}
}
# explicitly spork-terminate all sub, super in label
if(!is.null(label)){
dots <- gsub('\\\\.','',label)
dots <- gsub('[^.]', '', dots)
dots <- nchar(dots)
subs <- gsub('\\\\_','',label)
subs <- gsub('[^_]', '', subs)
subs <- nchar(subs)
sups <- gsub('\\\\^','',label)
sups <- gsub('[^^]', '', sups)
sups <- nchar(sups)
need <- subs + sups - dots
need <- max(0, need)
tail <- rep('.', need)
tail <- paste(tail, collapse = '')
label <- paste0(label, tail)
}
# https://github.com/r-quantities/units/issues/221
# explicitly spork-terminate all superscripts
# immediately following the integer
if(!is.null(units)){
units <- gsub('(\\^[-]?[0-9]+)','\\1.', units)
}
if(!is.null(units)) units <- c(open, units, close) # nulls disappear!
result <- c(label, units) # nulls disappear!
usable <- result
if(is.null(result)) usable <- ''
usable <- paste(usable, collapse = '') # new
usable <- as_spork(usable)
if(format == 'latex'){
# title = concatenate(spork::as_latex(usable))
title = spork::as_latex(usable)
}
if(format == 'html'){
# title = concatenate(spork::as_html(usable))
title = spork::as_html(usable)
}
# plotm = concatenate(as_plotmath(usable))
plotm = as_plotmath(usable)
# token not null, title not null, plotm not null
if(!is.null(result)) attr(x[[var]], 'title') <- title # ready to use
plotm <- as.expression(plotm)
attr(plotm, 'srcref') <- NULL
attr(plotm, 'srcfile') <- NULL
attr(plotm, 'wholeSrcref') <- NULL
if(!is.null(result)) attr(x[[var]], 'expression') <- plotm #as.expression(plotm)
}
#class(x) <- c('scripted', class(x))
x
}
# Render Scripted Attributes of Scripted Object
#
# Renders the scripted attributes of indicated components.
# As currently implemented, scripted() intends to be idempotent.
# To call scripted() on a scripted object is a non-operation.
# @param x object
# @param ... passed arguments
# @export
# @keywords internal
# @return scripted
# @family scripted
# scripted.scripted <- function(x, ...)x
# @importFrom spork htmlToken
# @export
# spork::htmlToken
# @importFrom spork as_html
# @export
# spork::as_html
# @importFrom spork as_latex
# @export
# spork::as_latex
# @importFrom spork as_plotmath
# @export
# spork::as_plotmath
# @importFrom spork concatenate
# @export
# spork::concatenate
# @importFrom spork as_spork
# @export
# spork::as_spork
# @importFrom dplyr group_vars
# @export
# dplyr::group_vars
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/scripted.R |
#' Identify Selected Names
#'
#' Identifies Selected Names.
#' Generic, with default method.
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family modify
#' @examples
#' example(modify.data.frame)
selected <- function(x, ...)UseMethod('selected')
#' Identify Selected Names by Default
#'
#' Evaluates un-named arguments in \code{...} using
#' \code{\link[dplyr]{select}} rules, returning
#' explicit names in x. Returns all (non-empty) names
#' by default (if no dots supplied).
#'
#' @param x object
#' @param ... to \code{\link[dplyr]{select}}
#' @param expand if true, selecting nothing expands to selecting everything
#' @export
#' @keywords internal
#' @importFrom rlang f_rhs eval_tidy quo_set_env quos
#' @importFrom dplyr select
#' @return character: names in x
#' @family modify
#' @family interface
#' @examples
#' library(magrittr)
#' list(a = 1, b = 1:10, c = letters) %>%
#' selected(b:c)
#'
selected.default <- function(
x,
...,
expand = TRUE
){
stopifnot(length(expand) == 1, is.logical(expand))
args <- quos(...)
vars <- args[names(args) == ""]
y <- names(x) # should work if x has names
y <- y[y != ''] # ignore empty names
d <- lapply(y, function(i)character())
names(d) <- y # reuse names
d <- data.frame(
d,
check.names = FALSE,
fix.empty.names = FALSE
)# dummy data.frame for dplyr
vars <- names(select(d,!!!vars))
if(length(vars) == 0 && expand) vars <- y
# vars <- intersect(vars, names(x))
class(vars) <- union('selected', class(vars))
return(vars)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/selected.R |
#' Choose Singular Expression
#'
#' For a list of expressions evaluated within data,
#' this returns the index of the one expression that evaluates
#' to an all-true vector (after coercing NA to FALSE).
#' Returns 0 if no expressions succeed, and NA_integer_ if
#' more than one succeed. Returns -1 if any expression
#' does not evaluate to logical or if list is empty.
#'
#' @param x list of expressions
#' @param data something coercible to a data environment (typically data.frame)
#' @param ... ignored
#' @export
#' @keywords internal
#' @return integer, possibly NA
#' @family promote
#' @examples
#' meta <- system.file(package = 'yamlet', 'extdata','phenobarb.csv')
#' x <- read.csv(meta)
#' singularity(
#' data = x,
#' list(
#' "event == 'conc'",
#' "event == 'dose'",
#' "event == 'metabolite'"
#' )
#' )
#' singularity(
#' data = x[x$event == 'dose',],
#' list(
#' "event == 'conc'",
#' "event == 'dose'",
#' "event == 'metabolite'"
#' )
#' )
#' singularity(
#' data = x[x$event == 'dose',],
#' list(
#' "time >= 0",
#' "event == 'dose'"
#' )
#' )
#
singularity <- function(x, data, ...){
if(!length(x))return(-1)
#exprs <- lapply(x, function(i)parse(text = i))
#vals <- lapply(exprs, function(i)try(eval(i, envir = data, enclos = NULL)))
vals <- lapply(
x, function(i)try(
silent = TRUE,
eval(
parse(text = i),
envir = data,
enclos = NULL
)
)
)
defined <- lapply(vals, function(i){ # must return logical
if(inherits(i, 'try-error')) return(FALSE) # i <- FALSE
if(!is.logical(i)) return(FALSE) # i <- as.logical(i)
i[is.na(i)] <- FALSE
i
})
condensed <- sapply(defined, all)
res <- sum(condensed)
if(res == 0) return(as.integer(res))
if(res > 1) return(NA_integer_)
# res = 1
res <- seq_along(condensed)[condensed]
stopifnot(length(res) == 1)
res
}
#' Promote Something
#'
#' Promotes something. Generic, with default method.
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @return see methods
#' @family promote
#' @examples
#' example(promote.data.frame)
promote <- function(x, ...)UseMethod('promote')
#' Promote by Default
#'
#' Promotes by default. Currently a non-operation.
#'
#' @param x object
#' @param ... other arguments
#' @export
#' @keywords internal
promote.default <- function(x, ...)x
#'
#' Promote a List
#'
#' Promotes attributes of list-like objects.
#' For the plural attributes of each element,
#' any singularity is promoted to the sole attribute.
#' Reserved attributes are untouched.
#' Methods \code{\link{filter.decorated}} and \code{\link{[.decorated}}
#' automatically attempt to promote attributes for all elements.
#' @param x list, or list-like
#' @param ... indicated elements
#' @param .reserved attributes to leave untouched
#' @export
#' @keywords internal
#' @importFrom rlang f_rhs eval_tidy quo_set_env quos
#' @return same class as x
#' @family promote
#' @family interface
#' @seealso filter.decorated [.decorated
#' @examples
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','phenobarb.csv')
#' x <- file %>% decorate
#'
#' # Note that there are two elements each for value label and value guide.
#' x %>% decorations(event, value)
#'
#' # After filtering, only one set is relevant.
#' # promote() identifies and retains such.
#' x %>% dplyr:::filter.data.frame(event == 'dose') %>% decorations(value)
#' x %>% dplyr:::filter.data.frame(event == 'dose') %>% promote %>% decorations(value)
#'
#' # If for some reason we do a partial promote, value attributes are unaffected.
#' # Nonsense example:
#' x %>% dplyr:::filter.data.frame(event == 'dose') %>% promote(event) %>% decorations(value)
#'
#' # However, the 'decorated' method for filter() calls promote() internally.
#' x %>% filter(event == 'dose') %>% decorations(value)
#'
promote.list <- function(
x,
...,
.reserved = getOption(
'yamlet_promote_reserved',
c('class','levels','labels','names')
)
){
stopifnot(is.character(.reserved))
vars <- selected(x, ...)
for(var in vars){
attr <- attributes(x[[var]])
nms <- names(attr)
nms <- nms[nms != '']
nms <- setdiff(nms, .reserved)
for(nm in nms){
this <- attr[[nm]]
cond <- names(this)
if(
!is.null(cond) & # only meaningful for attributes whose values have names
!any(cond %in% c('TRUE','FALSE')) # TRUE and FALSE are unconditional; @1.0.1 to ignore yaml::yaml.load("x: [answer, [ No: 0, Yes: 1]]")
){
verdict <- singularity(cond, x)
if(!is.na(verdict)){
if(verdict > 0){
# verdict is now the index of the singularity
# promote that element to attribute, removing names
this <- this[[verdict]]
names(this) <- NULL
# restore this value to x
attr(x[[var]], nm) <- this
}
}
}
}
}
x
}
#' Promote Data Frame
#'
#' Promotes a data.frame by calling \code{\link{promote.list}}.
#'
#' @param x data.frame
#' @param ... passed arguments
#' @export
#' @return same class as x
#' @keywords internal
#' @family promote
#' @examples
#' example(promote.list)
promote.data.frame <- function(x, ...)promote.list(x, ...)
#' Filter Decorated
#'
#' Filters a decorated data.frame.
#' After a filter operation, promote()
#' is called to see if ambiguous conditional
#' attributes can be improved.
#' @param .data passed to \code{\link[dplyr]{filter}}
#' @param ... passed to \code{\link[dplyr]{filter}}
#' @param .preserve passed to \code{\link[dplyr]{filter}}
#' @param .promote whether to auto-promote plural attributes
#' @importFrom dplyr filter
#' @export
#' @return decorated
#' @family promote
#' @keywords internal
#' @examples
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','phenobarb.csv')
#' x <- file %>% decorate
#'
#' # Note that there are two elements each for value label and value guide.
#' x %>% decorations(event, value)
#'
#' # Filtering promotes the relevant conditional attributes automatically.
#' x %>% filter(event == 'dose') %>% decorations(value)
#' x %>% filter(event == 'conc') %>% decorations(value)
#'
filter.decorated <- function(
.data,
...,
.preserve = FALSE,
.promote = getOption('yamlet_promote', TRUE)
){
y <- NextMethod()
if(.promote) y <- promote(y)
y <- as_decorated(y)
y
}
#' a <- as_decorated(as.list(setNames(letters[1:3], LETTERS[1:3])))
#' attr(a$B, 'label') <- 'foo'
#' a <- a[1:3]
#' attributes(a)
#' Subset Decorated
#'
#' Subsets decorated. Calls \code{\link{promote}}
#' internally to improve ambiguous conditional
#' attributes where possible.
#'
#' @param x decorated
#' @param ... passed to next method
#' @param .promote whether to auto-promote plural attributes
#' @return decorated (unless dimension is dropped)
#' @export
#' @keywords internal
#' @family promote
#' @family decorated
#' @examples
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','phenobarb.csv')
#' x <- file %>% decorate
#' x %>% decorations(event, value)
#'
#' # Subsetting promotes automatically.
#' x[x$event == 'dose',] %>% decorations(event, value)
#' x[x$event == 'conc',] %>% decorations(event, value)
#'
#' # Dimension may be dropped
#' x[1,1]
#'
#' # Conventional subsetting
#' a <- as_decorated(as.list(setNames(letters[1:3], LETTERS[1:3])))
#' attr(a$B, 'label') <- 'foo'
#' a <- a[1:3]
#' attributes(a)
#'
`[.decorated` <- function(
x,
...,
.promote = getOption('yamlet_promote', TRUE)
){
# stopifnot(inherits(x, 'data.frame'))
y <- NextMethod()
# y <- decorate(y, data.frame(x))
# decorate(y, data.frame(x)) is
# problematic, since decorate calls
# decorations, which calls this method
# decorate -> decorate.list -> as_yamlet -> decorations
# At this point, we may have lost, rows, columns, both,
# or been 'dropped' to one column.
# We wish to restore column-level attributes dropped by subset.
nms <- intersect(names(x), names(y))
for(nm in nms){
xat <- attributes(x[[nm]])
if('names' %in% names(xat)){
have <- length(xat[['names']])
need <- length(y[[nm]])
if (have != need){
warning('not updating names')
xat[['names']] <- NULL
}
}
attributes(y[[nm]]) <- xat
}
if(.promote) y <- promote(y)
if(is.list(y)) y <- as_decorated(y)
y
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/singularity.R |
#' Coerce Symbolic Units to Spork
#'
#' Coerces symbolic units to spork by coercing first
#' to unit_string.
#' @param x symbolic_units; see \code{\link[units]{as_units}}
#' @param ... ignored arguments
#' @export
#' @keywords internal
#' @family spork
#' @importFrom spork as_spork
#' @return spork
#' @examples
#' library(units)
#' library(spork)
#' x <- as_units('kg.m/s^2')
#' names(attributes(x))
#' y <- attr(x,'units')
#' class(y)
#' as.character(y)
#' as.character(attr(x, 'units'))
#' as_spork(y)
#' library(magrittr)
#' 'kg.m^2/s^2' %>% as_units %>% attr('units') %>% as_spork
#' 'kg.m2 s-2' %>% as_units %>% attr('units') %>% as_spork
#' 'kg.m^2/s^2' %>% as_units %>% attr('units') %>% as_spork(FALSE)
#' 'kg.m2 s-2' %>% as_units %>% attr('units') %>% as_spork(FALSE)
as_spork.symbolic_units <- function(x, canonical = TRUE, ...){
y <- as_unit_string(x, canonical = canonical, ...)
y <- as_spork(y, ...)
y
}
#' Coerce Units to Spork
#'
#' Coerces units to spork by coercing first
#' to unit_string.
#' @param x units; see \code{\link[units]{as_units}}
#' @param ... ignored arguments
#' @export
#' @importFrom spork as_spork
#' @keywords internal
#' @family spork
#' @return spork
#' @examples
#' library(units)
#' library(magrittr)
#' library(spork)
#' 'kg.m^2/s^2' %>% as_units %>% as_spork
#' 'kg.m2 s-2' %>% as_units %>% as_spork
#' 'kg.m^2/s^2' %>% as_units %>% as_spork(FALSE)
#' 'kg.m2 s-2' %>% as_units %>% as_spork(FALSE)
as_spork.units <- function(x, canonical = TRUE, ...){
y <- as_unit_string(x, canonical = canonical, ...)
y <- as_spork(y, ...)
y
}
#' Coerce Unit String to Spork
#'
#' Coerces unit string to spork. A literal dot
#' means different things in spork vs. units,
#' and there may be some other subtleties as well.
#' Unit string is character that \code{\link{is_parseable}}.
#' @param x unit_string
#' @param ... ignored arguments
#' @export
#' @keywords internal
#' @importFrom spork as_spork
#' @family spork
#' @return units
#' @examples
#' library(magrittr)
#' library(spork)
#' 'kg.m^2/s^2' %>% as_unit_string %>% as_spork
#' 'kg.m2 s-2' %>% as_unit_string %>% as_spork
as_spork.unit_string <- function(x, ...){
stopifnot(all(is_parseable(x)))
y <- gsub('\\.','*',x) # \u22c5 https://en.wikipedia.org/wiki/Interpunct
y <- gsub('\\^([0-9])+','^\\1.',y) # canonical, all pos num follow ^
y <- gsub('([a-zA-Z])([-0-9]+)', '\\1^\\2.',y) # non-canonical, unsigned or neg num follow char
y <- as_spork(as.character(y))
y
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/spork.R |
#' Place Units Under Label
#'
#' Places units attribute below label attribute.
#' Makes the most sense for figures (\code{style = 'plotmath'})
#' and useful for tables (\code{style = 'latex'}) in combination
#' with \code{\link{alias}}. See also \code{\link{append_units}}.
#'
#' @param x object
#' @param ... passed to \code{\link{append_units}}
#' @param open character to precede units
#' @param close character to follow units
#' @param style one of 'plain', 'latex', or 'plotmath'
#' @param math_open,math_close,label_open,label_close,newline passed to \code{\link{as_latex.spar}} if style = 'latex'
#' @export
#' @keywords internal
#' @family labels
#' @return see methods for \code{\link{append_units}}
#' @examples
#' library(units)
#' library(magrittr)
#' library(dplyr)
#' library(ggplot2)
#' x <- 1:10
#' attr(x, 'label') <- 'acceleration'
#' units(x) <- 'm/s^2'
#' y <- as_units('kg')
#' x %>% attr('label')
#' x %>% sub_units %>% attr('label')
#' x %>% sub_units(style = 'plotmath') %>% attr('label')
#' x %>% sub_units(style = 'plain') %>% attr('label') %>% writeLines
#' y %>% attr('label')
#' y %>% sub_units(style = 'plain') %>% attr('label')
#' x %>% sub_units(style = 'plotmath')
#' x %>% sub_units(style = 'latex')
#'
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' file %>% decorate %>% resolve %>%
#' sub_units(style = 'plotmath') %>%
#' ggplot(data = ., aes(x = time, y = conc, color = Heart)) %>%
#' add(geom_point())
sub_units <- function(
x,
...,
open = if(style == 'plain') '\n(' else '\\n(',
close = ')',
style = 'latex',
math_open = "",
math_close = "",
label_open = "$\\begin{gathered}",
label_close = "\\end{gathered}$",
newline = '\\\\'
)append_units(
x,
open = open,
close = close,
style = style,
math_open = math_open,
math_close = math_close,
label_open = label_open,
label_close = label_close,
newline = newline,
...
)
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/sub_units.R |
#' Unclassify Something
#'
#' Unclassify something.
#' Generic, with method \code{\link{unclassified.classified}}.
#' See also \code{\link{classified}}.
#'
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family classified
#' @return see methods
#' @examples
#' example(unclassified.classified)
unclassified <- function(x, ...)UseMethod('unclassified')
#' Unclassify Classified
#'
#' Unclassifies classified. Uses codelist attribute
#' to restore original values, preserving other attributes
#' (and rebuilding codelist).
#' @param x classified
#' @param ... ignored
#' @param persistence whether to reclass as dvec
#' @export
#' @importFrom utils type.convert
#' @keywords internal
#' @family classified
#' @return vector
#' @examples
#' example(unclassified.data.frame)
unclassified.classified <- function(x, ..., persistence = getOption('yamlet_persistence', TRUE)){
codelist <- attr(x, 'codelist')
levels <- unlist(codelist)
labels <- names(codelist)
if(is.null(labels))labels <- as.character(codelist)
# as of 0.8.9 ...
labels <- type.convert(labels, as.is = TRUE)
y <- labels[match(as.character(x), levels)]
#y <- type.convert(y, as.is = TRUE)
nms <- names(attributes(x))
nms <- setdiff(nms, c('class','levels','contrasts','codelist'))
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
names(labels) <- NULL # clean
names(levels) <- NULL # clean
codelist <- structure(as.list(labels), names = levels)
# names codelist or unlist codelist may contain NA
if(
all(
is.na(names(codelist)) == is.na(unlist(codelist)) &
paste(names(codelist)) == paste(unlist(codelist))
)
){
names(codelist) <- NULL
# codelist <- unlist(codelist) # @ 0.8.2 codelist remains list
}
attr(y, 'codelist') <- codelist
if(persistence) y <- as_dvec(y)
y
}
#' Unclassify Data Frame
#'
#' Unclassifies data.frame.
#' Coerces 'classified' items to original values,
#' rebuilding codelist attribute.
#'
#' @param x data.frame
#' @param ... passed to \code{\link[dplyr]{select}} to limit scope
#' @export
#' @keywords internal
#' @return data.frame
#' @family classified
#' @examples
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#' x %>% explicit_guide %>% decorations(Age, Race, Heart:glyco)
#' x %>% explicit_guide %>% classified %>% unclassified %>% decorations(Age, Race, Heart:glyco)
unclassified.data.frame <- function(x,...){
my_class <- class(x)
for(nm in selected(x,...)){
if(inherits(x[[nm]], 'classified')){
x[[nm]] <- unclassified(x[[nm]])
}
}
class(x) <- my_class
x
}
#' Unclassify Decorated Vector
#'
#' Unclassifies dvec. A non-operation, since dvec is not classified.
#' Needed for completness by \code{\link{resolve.dvec}}.
#'
#' @param x dvec
#' @param ... ignored
#' @export
#' @keywords internal
#' @return dvec
#' @family dvec
#' @examples
#' library(magrittr)
#' x <- structure(as_dvec(1), guide = 'misc')
#' unclassified(x)
unclassified.dvec <- function(x,...)x
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/unclassified.R |
#' Coerce to Unit String
#'
#' Coerces to class 'unit_string'. Generic,
#' with method \code{\link{as_unit_string.character}}.
#' A unit string is character text suitable
#' as input for \code{\link[units]{as_units}}.
#' See also \code{\link{is_parseable}}.
#'
#' @param x object
#' @param ... passed arguments
#' @export
#' @keywords internal
#' @family unit_string
#' @return unit_string
#' @md
#' @examples
#' as_unit_string('kg.m^2/s^2')
#' as_unit_string('kg.m2 s-2')
as_unit_string <- function(x, ...)UseMethod('as_unit_string')
#' Coerce Character to Unit String
#'
#' Coerces character to class 'unit_string'.
#' See description for \code{\link{as_unit_string}}.
#'
#' @param x character
#' @param ... ignored arguments
#' @export
#' @keywords internal
#' @family unit_string
#' @return unit_string
#' @examples
#' as_unit_string('kg m2 s-2')
as_unit_string.character <- function(x, ...){
class(x) <- union('unit_string', class(x))
x
}
#' Coerce Factor to Unit String
#'
#' Coerces factor to class 'unit_string'
#' by converting to character and calling
#' \code{\link{as_unit_string}}.
#'
#' @param x factor
#' @param ... ignored arguments
#' @export
#' @keywords internal
#' @family unit_string
#' @return unit_string
#' @examples
#' as_unit_string(as.factor('kg m2 s-2'))
as_unit_string.factor <- function(x, ...)as_unit_string(as.character(x), ...)
#' Subset Unit String
#'
#' Subsets unit_string, retaining class.
#' @param x unit_string
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family unit_string
#' @return unit_string
#' @examples
#' x <- c(
#' 'm',
#' 's',
#' 'ng/mL'
#' )
#' x <- as_unit_string(x)
#' class(x)
#' class(x[1])
`[.unit_string` <- function(x, ...){
y <- NextMethod()
# contrasts and levels will have been handled
class(y) <- union('unit_string', class(y))
y
}
#' Element-select Unit String
#'
#' Element-selects unit_string, retaining class.
#' @param x unit_string
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family unit_string
#' @return unit_string
#' @examples
#' x <- c(
#' 'm',
#' 's',
#' 'ng/mL'
#' )
#' x <- as_unit_string(x)
#' class(x)
#' class(x[[1]])
`[[.unit_string` <- function(x, ...){
y <- NextMethod()
# contrasts and levels will have been handled
class(y) <- union('unit_string', class(y))
y
}
#' Coerce Symbolic Units to Unit String.
#'
#' Coerces symbolic units to unit_string.
#' @param x symbolic_units; see \code{\link[units]{as_units}}
#' @param canonical whether to return the form having all positive exponents
#' @param ... ignored arguments
#' @export
#' @keywords internal
#' @importFrom units as_units
#' @importFrom units deparse_unit
#' @family unit_string
#' @return unit_string
#' @examples
#' library(units)
#' x <- as_units('kg.m/s^2')
#' names(attributes(x))
#' y <- attr(x,'units')
#' class(y)
#' as.character(y)
#' as.character(attr(x, 'units'))
#' as_unit_string(y)
#' library(magrittr)
#' 'kg.m^2/s^2' %>% as_units %>% attr('units') %>% as_unit_string
#' 'kg.m2 s-2' %>% as_units %>% attr('units') %>% as_unit_string
#' 'kg.m^2/s^2' %>% as_units %>% attr('units') %>% as_unit_string(FALSE)
#' 'kg.m2 s-2' %>% as_units %>% attr('units') %>% as_unit_string(FALSE)
as_unit_string.symbolic_units <- function(x, canonical = TRUE, ...){
y <- as.character(x)
if(!canonical)y <- deparse_unit(as_units(y))
y <- as_unit_string(y, ...)
y
}
#' Coerce Units to Unit String.
#'
#' Coerces units to unit_string. Extracts units
#' attribute (of class(symbolic_units)) and converts.
#' @param x units; see \code{\link[units]{as_units}}
#' @param ... ignored arguments
#' @export
#' @keywords internal
#' @importFrom units as_units
#' @importFrom units deparse_unit
#' @family unit_string
#' @return unit_string
#' @examples
#' library(units)
#' x <- as_units('kg.m/s^2')
#' as_unit_string(x)
#' as_unit_string(x, canonical = FALSE)
#'
as_unit_string.units <- function(x, canonical = TRUE, ...){
y <- attr(x, 'units')
y <- as_unit_string(y, canonical = canonical, ...)
y
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/unit_string.R |
# https://vctrs.r-lib.org/reference/howto-faq-coercion.html
#' @importFrom vctrs vec_cast
NULL
#' Cast to dvec from dvec
#'
#' Cast to dvec from dvec.
#' @param to dvec
#' @param x dvec
#' @param ... ignored
#' @keywords internal
#' @export
#' @keywords internal
#' @importFrom vctrs vec_data
vec_cast.dvec.dvec <- function(x, to, ...) {
# https://github.com/r-lib/rlang/issues/1432
# as_dvec(x, ...) # during join, adds x_arg, to_arg, and 'call' attributes
# logical, integer, double, complex, character
# at <- attributes(x) # save these
# y <- x
# if(is.logical(to)) y <- as.logical(x)
# if(is.integer(to)) y <- as.integer(x)
# if(is.double(to)) y <- as.double(x)
# if(is.complex(to)) y <- as.complex(x)
# if(is.character(to)) y <- as.character(x)
# attributes(y) <- at
# y <- as_dvec(y)
# y
out <- vec_cast(vec_data(x), vec_data(to), ...)
attributes(out) <- attributes(x)
as_dvec(out)
}
#' Cast to dvec from logical
#'
#' Cast to dvec from logical
#' @param to dvec
#' @param x logical
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.dvec.logical <- function(x, to, ...){
y <- vec_cast(x, unclass(to))
y <- as_dvec(y)
y
}
#' Cast to logical from dvec
#'
#' Cast to logical from dvec.
#' @param to logical
#' @param x dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.logical.dvec <- function(x, to, ...){
as.logical(x)
}
#' Cast to dvec from integer
#'
#' Cast to dvec from integer.
#' @param to dvec
# @param from integer
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.dvec.integer <- function(x, to, ...){
y <- vec_cast(x, unclass(to))
y <- as_dvec(y)
y
}
#' Cast to integer from dvec
#'
#' Cast to integer from dvec.
#' @param to integer
# @param from dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.integer.dvec <- function(x, to, ...){
as.integer(x)
}
#' Cast to dvec from double
#'
#' Cast to dvec from double.
#' @param to dvec
# @param from double
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.dvec.double <- function(x, to, ...){
y <- vec_cast(x, unclass(to))
y <- as_dvec(y)
y
}
#' Cast to double from dvec
#'
#' Cast to double from dvec.
#' @param to double
# @param from dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.double.dvec <- function(x, to, ...){
as.double(x)
}
#' Cast to dvec from character
#'
#' Cast to dvec from character.
#' @param to dvec
# @param from character
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.dvec.character <- function(x, to, ...){
y <- vec_cast(x, unclass(to))
y <- as_dvec(y)
y
}
#' Cast to character from dvec
#'
#' Cast to character from dvec.
#' @param to character
# @param from dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.character.dvec <- function(x, to, ...){
as.character(x)
}
#' Cast to dvec from complex
#'
#' Cast to dvec from complex.
#' @param to dvec
# @param from complex
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.dvec.complex <- function(x, to, ...){
y <- vec_cast(x, unclass(to))
y <- as_dvec(y)
y
}
#' Cast to complex from dvec
#'
#' Cast to complex from dvec.
#' @param to complex
# @param from dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_cast.complex.dvec <- function(x, to, ...){
as.complex(x)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/vec_cast.R |
# https://vctrs.r-lib.org/reference/howto-faq-coercion-data-frame.html
#' Coerce Common Type to Decorated
#'
#' Coerces common type to decorated. Wrapper for \code{\link[vctrs]{df_ptype2}}.
#' @export
#' @keywords internal
#' @return decorated
#' @param x subclass of data.frame
#' @param y subclass of data.frame
#' @param ... passed arguments
#' @importFrom vctrs df_ptype2
dd_ptype2 <- function(x, y, ...) {
as_decorated(df_ptype2(x, y, ...))
}
#' Coerce Data.frame to Decorated
#'
#' Coerces data.frame to decorated. Wrapper for \code{\link[vctrs]{df_cast}}.
#' @export
#' @keywords internal
#' @return decorated
#' @param x subclass of data.frame
#' @param to subclass of data.frame
#' @param ... passed arguments
#' @importFrom vctrs df_cast
dd_cast <- function(x, to, ...) {
as_decorated(df_cast(x, to, ...))
}
#' Determine Common Type for Decorated
#'
#' Determines common type for decorated.
#' @return decorated
#' @param x decorated
#' @param y decorated
#' @param ... passed arguments
#' @export
#' @keywords internal
vec_ptype2.decorated.decorated <- function(x, y, ...) {
dd_ptype2(x, y, ...)
}
#' Determine Common Type for Decorated and Data.frame
#'
#' Determines common type for decorated and data.frame.
#' @return decorated
#' @param x decorated
#' @param y data.frame
#' @param ... passed arguments
#' @export
#' @keywords internal
vec_ptype2.decorated.data.frame <- function(x, y, ...) {
dd_ptype2(x, y, ...)
}
#' Determine Common Type for Data.frame and Decorated
#'
#' Determines common type for data.frame and decorated.
#' @return decorated
#' @param x data.frame
#' @param y decorated
#' @param ... passed arguments
#' @export
#' @keywords internal
vec_ptype2.data.frame.decorated <- function(x, y, ...) {
dd_ptype2(x, y, ...)
}
#' Cast to Decorated from Decorated
#'
#' Casts to decorated from decorated.
#' @export
#' @return decorated
#' @param x decorated
#' @param to decorated
#' @param ... passed arguments
#' @keywords internal
vec_cast.decorated.decorated <- function(x, to, ...) {
dd_cast(x, to, ...)
}
#' Cast to Decorated from Data.frame
#'
#' Casts to decorated from data.frame.
#' @export
#' @return decorated
#' @param x data.frame
#' @param to decorated
#' @param ... passed arguments
#' @keywords internal
vec_cast.decorated.data.frame <- function(x, to, ...) {
# `x` is a data.frame to be converted to a decorated
dd_cast(x, to, ...)
}
#' Cast to Data.frame from Decorated
#'
#' Casts to data.frame from decorated.
#' @export
#' @return data.frame
#' @param x decorated
#' @param to data.frame
#' @param ... passed arguments
#' @keywords internal
vec_cast.data.frame.decorated <- function(x, to, ...) {
# `x` is a decorated to be converted to a data.frame
df_cast(x, to, ...)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/vec_decorated.R |
# https://vctrs.r-lib.org/reference/howto-faq-coercion.html
#' @importFrom vctrs vec_ptype2
NULL
#' Find Common Type for dvec, dvec
#'
#' Find common type for dvec, dvec.
#' @param x dvec
#' @param y dvec
#' @param ... ignored
#' @keywords internal
#' @export
#' @examples
#' str(vctrs::vec_ptype2(as_dvec(1L), as_dvec(1)))
#' str(vctrs::vec_ptype2(as_dvec(1), as_dvec(1L)))
#' str(vctrs::vec_ptype2(as_dvec(TRUE), as_dvec(1L)))
#' str(vctrs::vec_ptype2(as_dvec(TRUE), as_dvec(1)))
#' str(vctrs::vec_ptype2(as_dvec(1), as_dvec(1+0i)))
#'
#' vctrs::vec_ptype2(
#' structure(as_dvec(1), guide = 'mg'),
#' structure(as_dvec(1), guide = 'kg')
#' )
vec_ptype2.dvec.dvec <- function(x, y, ...) {
z <- c(x, y)
z <- unclass(z) # restored below
p <- vec_ptype2(unclass(x), unclass(y))
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for dvec, logical
#'
#' Find common type for dvec, logical.
#' @param x dvec
#' @param y logical
#' @param ... ignored
#' @keywords internal
#' @export
vec_ptype2.dvec.logical <- function(x, y, ...){
z <- c(x, y)
z <- unclass(z) # restored below
p <- vec_ptype2(unclass(x), y)
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for logical, dvec
#'
#' Find common type for logical, dvec.
#' @param x logical
#' @param y dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_ptype2.logical.dvec <- function(x, y, ...){
z <- c(as_dvec(x), y)
z <- unclass(z) # restored below
p <- vec_ptype2(x, unclass(y))
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for dvec, integer
#'
#' Find common type for dvec, integer.
#' @param x dvec
#' @param y integer
#' @param ... ignored
#' @keywords internal
#' @export
vec_ptype2.dvec.integer <- function(x, y, ...){
z <- c(x, y)
z <- unclass(z) # restored below
p <- vec_ptype2(unclass(x), y)
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for integer, dvec
#'
#' Find common type for integer, dvec.
#' @param x integer
#' @param y dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_ptype2.integer.dvec <- function(x, y, ...){
z <- c(as_dvec(x), y)
z <- unclass(z) # restored below
p <- vec_ptype2(x, unclass(y))
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for dvec, double
#'
#' Find common type for dvec, double.
#' @param x dvec
#' @param y double
#' @param ... ignored
#' @keywords internal
#' @export
#' @examples
#' str(vctrs::vec_ptype2(as_dvec(1), 1))
#' str(vctrs::vec_ptype2(1, as_dvec(1)))
#'
#' str(vctrs::vec_ptype2(as_dvec(1, label = 'x'), 1))
#' str(vctrs::vec_ptype2(1, as_dvec(1, label= 'x')))
#' str(vctrs::vec_ptype2(as_dvec(1), structure(1, label = 'x')))
#'
vec_ptype2.dvec.double <- function(x, y, ...){
z <- c(x, y)
z <- unclass(z) # restored below
p <- vec_ptype2(unclass(x), y)
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for double, dvec
#'
#' Find common type for double, dvec.
#' @param x double
#' @param y dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_ptype2.double.dvec <- function(x, y, ...){
z <- c(as_dvec(x), y)
z <- unclass(z) # restored below
p <- vec_ptype2(x, unclass(y))
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for dvec, dvec
#'
#' Find common type for dvec, dvec.
#' @param x dvec
#' @param y dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_ptype2.dvec.character <- function(x, y, ...){
z <- c(x, y)
z <- unclass(z) # restored below
p <- vec_ptype2(unclass(x), y)
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for character, dvec
#'
#' Find common type for character, dvec.
#' @param x character
#' @param y dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_ptype2.character.dvec <- function(x, y, ...){
z <- c(as_dvec(x), y)
z <- unclass(z) # restored below
p <- vec_ptype2(x, unclass(y))
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for dvec, complex
#'
#' Find common type for dvec, complex.
#' @param x dvec
#' @param y complex
#' @param ... ignored
#' @keywords internal
#' @export
vec_ptype2.dvec.complex <- function(x, y, ...){
z <- c(x, y)
z <- unclass(z) # restored below
p <- vec_ptype2(unclass(x), y)
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
#' Find Common Type for complex, dvec
#'
#' Find common type for complex, dvec.
#' @param x complex
#' @param y dvec
#' @param ... ignored
#' @keywords internal
#' @export
vec_ptype2.complex.dvec <- function(x, y, ...){
z <- c(as_dvec(x), y)
p <- vec_ptype2(x, unclass(y))
attributes(p) <- attributes(z)
p <- as_dvec(p)
p
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/vec_ptype2.R |
#' Footnote Something
#'
#' Footnotes something.
#' Generic, with method \code{\link{footnote.decorated}}.
#' @param x object
#' @param ... passed arguments
#' @family footnote
#' @keywords internal
#' @export
#' @return see methods
#' @examples
#' # see methods
footnote <- function(x, ...)UseMethod('footnote')
#' Footnote Decorated
#'
#' Footnotes a decorated data.frame.
#' Generates a text string that defines
#' column names using label and unit attributes.
#' @param x decorated
#' @param ... passed to \code{\link{append_units}}
#' @param equal character: a symbol suggesting equality between a name and its note
#' @param collapse used to \code{\link{paste}} column-wise footnotes
#' @family footnote
#' @export
#' @keywords internal
#' @return character
#' @examples
#' library(magrittr)
#' set.seed(0)
#' x <- data.frame(
#' auc = rnorm(100, mean = 2400, sd = 200),
#' bmi = rnorm(100, mean = 20, sd = 5),
#' gen = 0:1
#' )
#' x %<>% decorate('auc: [AUC_0-24, ng*h/mL]')
#' x %<>% decorate('bmi: [Body Mass Index, kg/m^2]')
#' x %<>% decorate('gen: [Gender, [Male: 1, Female: 0]]')
#' x %<>% resolve
#' footnote(x)
#' footnote(x, auc)
footnote.decorated <- function(x, ..., equal = ':', collapse = '; '){
x <- append_units(x, ...) # safe
nms <- selected(x,...)
y <- sapply(select(x,!!!nms), attr, 'label')
y <- paste0(nms, equal, y)
y <- paste(y, collapse = collapse)
y
}
#' Create Export Table for Decorated
#'
#' Creates an export table for decorated data.frame
#' by adding a footnote attribute.
#'
#' @param x decorated
#' @param ... passed to \code{\link{footnote}} and (if named) \code{\link[xtable]{xtable}}
#' @param label passed to \code{\link[xtable]{xtable}}
#' @param style passed to \code{\link{footnote}}
#' @export
#' @keywords internal
#' @importFrom xtable xtable
#' @return class 'decorated_xtable','xtable', 'data.frame'
#' @examples
#' library(magrittr)
#' library(xtable)
#' set.seed(0)
#' x <- data.frame(
#' auc = rnorm(100, mean = 2400, sd = 200),
#' bmi = rnorm(100, mean = 20, sd = 5),
#' gen = 0:1
#' )
#' x %<>% decorate('auc: [AUC_0-24, ng*h/mL]')
#' x %<>% decorate('bmi: [Body Mass Index, kg/m^2]')
#' x %<>% decorate('gen: [Gender, [Male: 1, Female: 0]]')
#' y <- xtable(x)
#' attr(y, 'footnote')
#' y <- xtable(x, auc:bmi)
#' attr(y, 'footnote')
#'
xtable.decorated <- function(x, ..., label = NULL, style = 'latex'){
y <- do.call(xtable,c(list(data.frame(x), label = label),named(...)))
class(y) <- c('decorated_xtable', 'xtable', 'data.frame')
z <- footnote(x, style = style, ...)
attr(y, 'footnote') <- z
y
}
#' Print Decorated Xtable
#'
#' Prints a decorated xtable.
#' Supplies a footnote.
#' Experimental.
#'
#' @export
#' @importFrom xtable xtable
#' @importFrom xtable print.xtable
#' @keywords internal
#' @return character
#' @param x decorated
#' @param ... passed to other methods
#' @examples
#' library(magrittr)
#' library(xtable)
#' set.seed(0)
#' x <- data.frame(
#' auc = rnorm(4, mean = 2400, sd = 200),
#' bmi = rnorm(4, mean = 20, sd = 5),
#' gen = 0:1
#' )
#' x %<>% decorate('auc: [AUC_0-24, ng*h/mL]')
#' x %<>% decorate('bmi: [Body Mass Index, kg/m^2]')
#' x %<>% decorate('gen: [Gender, [Male: 1, Female: 0]]')
#' x %>% resolve
#' x %>% resolve %>% xtable
#'
#'
print.decorated_xtable <- function(x, ...){
y <- NextMethod(print.results=FALSE, comment = FALSE, ...)
note <- attr(x,'footnote')
y <- sub(
fixed = TRUE,
'\\end{table}',
paste(sep = '\n','\n', note, '\\end{table}'),
y
)
cat(y)
return(invisible())
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/xtable.R |
#' Coerce to Yam
#'
#' Coerce to yam, a precursor to yamlet. Generic, with character
#' method: \code{\link{as_yam.character}}.
#' @param x object
#' @param ... passed arguments
#' @return list
#' @family yam
#' @export
#' @keywords internal
as_yam <- function(x, ...)UseMethod('as_yam')
#' Coerce Character to Yam
#'
#' Coerces character to yam. Length-one character can be
#' a filepath, otherwise treated as data. Proceeds by
#' importing the data and determining the default keys.
#'
#' @param x length-one filepath or actual data
#' @param as.named.list enforced as TRUE
#' @param ... passed to \code{\link[yaml]{read_yaml}} and \code{\link[yaml]{yaml.load}} if supported
#' @export
#' @keywords internal
#' @importFrom yaml read_yaml yaml.load
#' @family yam
#' @keywords internal
#' @return a named list
#' @examples
#'
#' # Read sample data from file.
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' file
#' as_yam(file)
#'
#' # Read yamlet directly from character vector.
#' as_yam(c('ID:','TIME:'))
#'
#' # Read from length-one character (same result).
#' as_yam('ID:\nTIME:')
#'
as_yam.character <- function(
x,
as.named.list,
handlers = list(
seq = parsimonious,
map = function(x)lapply(x, unclass),
str = function(x){
if(identical(x, 'yamlet_NA_literal_')){
return('NA')
}
if(identical(x, 'NA')){
return(NA_character_)
}
return(x)
}
),
...
){
if(length(x) == 1 & file.exists(x[[1]])){
x <- readLines(x)
}
# allowed <- c(names(formals(read_yaml)), names(formals(yaml.load)))
# args <- list(...)
# args <- args[names(args) %in% allowed ]
# args <- c(
# list(
# file = con,
# as.named.list = TRUE,
# handlers = handlers
# ),
# args
# )
# y <- do.call(read_yaml, args)
# }else{
allowed <- c(names(formals(yaml.load)))
args <- list(...)
args <- args[names(args) %in% allowed ]
x <- paste(x, collapse = '\n')
if(any(grepl('yamlet_NA_literal_', x))){
stop('yamlet_NA_literal_ is reserved')
}
x <- gsub('"NA"','yamlet_NA_literal_', x)
x <- gsub("'NA'",'yamlet_NA_literal_', x)
args <- c(
list(
string = x,
handlers = handlers,
as.named.list = TRUE
),
args
)
y <- do.call(yaml.load, args) ### sole parsing!
#}
# should just be a bare list
y <- unclass(y)
# its members should be bare lists
y[] <- lapply(y, unclass)
if(!inherits(y, 'list')){
if(length(x) == 1){
stop('x is not YAML or path to YAML')
}else{
stop('x is not YAML')
}
}
# ? each member of y must be a list
# for(m in seq_along(y)) y[[m]] <- as.list(y[[m]])
# un-nesting is now applied at parsing using 'parsimonious'
# y[] <- lapply(y, unnest)
# but list coercion still important ...
y[] <- lapply(y, as.list)
if('_keys' %in% names(y)){
k <- y$`_keys`
y <- y[names(y) != '_keys']
attr(y,'keys') <- k
}
class(y) <- 'yam'
y
}
#' Collapse Uninformative Levels
#'
#' Each element of a list that is itself a list
#' and does not have a name but has exactly one element
#' that DOES have a name should become that element
#' and have that name (recursively, from depth).
#' Collapses uninformative levels of nested lists.
#' Formerly used in as_yam; now superceded by \code{\link{parsimonious.list}}.
#'
#' @param x object
#' @return named list
#' @export
#' @keywords internal
#' @family unnest
#' @examples
#'
#' # yaml.load reads this as a list of two un-named lists whose elements are named.
#' str(yaml::yaml.load('[foo: 1, bar: 3]'))
#'
#' # yamlet treats it as a list of two named integers.
#' str(unnest(yaml::yaml.load('[foo: 1, bar: 3]')))
unnest <- function(x,...)UseMethod('unnest')
#' Collapse Uninformative Levels by Default
#'
#' The default unnest() method returns the unmodified object.
#'
#' @param x object
#' @return list
#' @export
#' @keywords internal
#' @family unnest
#' @examples
#' unnest(yaml::yaml.load('ITEM:'))
unnest.default <- function(x, ...)x
#' Collapse Uninformative Levels of a List
#'
#' The list method for unnest() recursively
#' ascends a nested list, removing uninformative levels.
#'
#' @param x list
#' @export
#' @keywords internal
#' @family unnest
#' @return list
#' @examples
#'
#' a <- 'ITEM: [ label: sunshine, [foo: 1, bar: 3]]'
#'
#' # yaml.load() sees label nested one-deep, and foo nested two-deep:
#' yaml::yaml.load(a)
#'
#' # unnest() sees label nested zero-deep, and foo nested one-deep:
#' unnest(yaml::yaml.load(a))
#'
#' # as_yamlet() provides explicit name (default key) for second element of ITEM:
#' as_yamlet(a)
unnest.list <- function(x,...){
# make names explicit
if(is.null(names(x))) names(x) <- rep('',length(x))
# unnest members
x[] <- lapply(x, unnest) # for atomics
# if I reached here, I am a list
# and all my members are unnested.
# process each element:
for(i in seq_along(x)){
this <- x[[i]] # element i
nm <- names(x)[[i]] # name of element i
len <- length(this) # length one?
isList <- is.list(this) # list?
nms <- setdiff(names(this),'') # good names
if(
isList & # each element that is a list
nm == '' & # and does not have a name
len == 1 & # but has exactly one element
length(nms) # that is named
){
x <- append(x, this, after = i)[-i] # should be that element and have that name
}
}
x
}
#' Coerce to Yamlet
#'
#' Coerces something to yamlet format. If the object
#' or user specifies default keys, these are applied.
#' See \code{\link{as_yamlet.character}}.
#'
#' @param x object
#' @param ... passed arguments
#' @return a named list
#' @export
#' @keywords internal
#' @family yamlet
#' @examples
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' file
#' identical(as_yamlet(as_yam(file)), as_yamlet(file))
#'
#' # Read yamlet from storage and apply default keys.
#' as_yamlet(file)
#'
as_yamlet <- function(x, ...)UseMethod('as_yamlet')
#' Coerce Yam To Yamlet Format
#'
#' Coerces yam to yamlet format. If the object
#' or user specifies default keys, these are applied
#' See \code{\link{as_yamlet.character}}.
#'
#' @param x a yam object; see \code{\link{as_yam}}
#' @param default_keys character: default keys for the first n anonymous members of each element
#' @param ... ignored
#' @export
#' @family yamlet
#' @keywords internal
#' @return yamlet: a named list with default keys applied
#' @examples
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' file
#' as_yamlet(as_yam(file))
#'
as_yamlet.yam <- function(x, default_keys = getOption('yamlet_default_keys',list('label','guide')), ...){
default_keys <- as.list(default_keys)
k <- attr(x,'keys')
if(is.null(k))k <- as.list(default_keys)
stopifnot(length(k) == length(unlist(k)))
if(!is.character(unlist(k)) & !identical(k,list())){
warning('default keys do not appear to be character: ignoring')
k <- list()
}
attr(x,'keys') <- NULL
x[] <- lapply(x, .resolve, keys = k)
unresolved <- which(sapply(x, function(i)any(names(i) == '')))
if(length(unresolved))warning('missing key(s) for element(s) ', paste(unresolved, collapse = ', '))
class(x) <- 'yamlet'
x
}
.resolve <- function(x, keys){ # an item
nms <- names(x)
if(is.null(nms)) nms <- rep('',length(x))
for(i in seq_along(nms)){
if(length(keys)){ # if we have unused defaults
if(nms[[i]] == ''){
nms[[i]] <- keys[[1]]
keys[[1]] <- NULL
}else{
keys <- setdiff(keys, nms[[i]]) # if a default is given, it is used
}
}
}
names(x) <- nms
x
}
#' Coerce Character To Yamlet Format
#'
#' Coerces character to yamlet format.
#' Length-one character is understood as a file path
#' if the file exists. Otherwise, it is treated as data.
#' The file is a mapping of (nested) sequences,
#' where map keys are data item names, and
#' sequences represent data item attributes.
#' Attributes may be named or anonymous.
#'
#' If an attribute is anonymous, an attempt
#' is made to name it using available defaults.
#' A special item named '_keys' if present identifies a sequence of
#' key names that over-ride \code{default_keys}.
#' Attribute names are sought first in the explicit yaml,
#' then in the special item named '_keys',
#' then in the \code{default_keys} argument passed to \code{\link{as_yamlet}},
#' then in \code{options()$yamlet_default_keys},
#' then in the defaults for argument \code{default_keys}.
#'
#'
#' @param x length-one filepath or actual data
#' @param default_keys character: default keys for the first n anonymous members of each element
#' @param ... passed to \code{\link{as_yam.character}} and \code{\link{as_yamlet.yam}}
#' @export
#' @keywords internal
#' @family yamlet
#' @return yamlet: a named list with default keys applied
#' @examples
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' as_yamlet(file)
#' as_yamlet('ID: subject identifier')
#' as_yamlet(c('id: subject','amt: dose'))
#' as_yamlet(c('id: subject\namt: dose'))
#'
as_yamlet.character <- function(x, default_keys = getOption('yamlet_default_keys', list('label','guide')), ...){
as_yamlet(as_yam(x, ...), default_keys = default_keys, ...)
}
#' Coerce Data Frame to Yamlet
#'
#' Coerces data.frame to yamlet by calling {\code{\link{decorations}}}.
#'
#' @param x data.frame
#' @param ... passed to \code{\link{decorations}}
#' @family yamlet
#' @export
#' @keywords internal
#' @return yamlet
#' @examples
#' library(csv)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(as.csv(file))
#' as_yamlet(x)
as_yamlet.data.frame <- function(x, ...){
out <- decorations(x,...)
# class(out) <- 'yamlet' # as of 0.6.2 decorations() returns yamlet
out
}
#' Coerce List to Yamlet
#'
#' Coerces list to yamlet. Assigns class 'yamlet'.
#' Checks that list has names.
#'
#' @param x list
#' @param ... ignored
#' @family yamlet
#' @export
#' @keywords internal
#' @return yamlet
#' @examples
#' x <- list(a = 2, b = 3)
#' as_yamlet(x)
as_yamlet.list <- function(x, ...){
if(is.null(names(x)))stop('list must have names for conversion to yamlet')
class(x) <- 'yamlet'
x
}
#' Coerce Yamlet to Yamlet
#'
#' Coerces yamlet to yamlet. Currently a non-operation.
#'
#' @param x yamlet
#' @param ... ignored
#' @family yamlet
#' @export
#' @keywords internal
#' @return yamlet
#' @examples
#' meta <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' x <- as_yamlet(meta)
#' as_yamlet(x)
as_yamlet.yamlet<- function(x, ...)x
#' Coerce Yamlet to Yam
#'
#' Coerces class yamlet to yam, negotiating the default keys.
#' For each member of x, names of sub-members will be dropped
#' if all previous such have been dropped. I.e., attribute
#' order is preserved, and 'guide' (by default) will not be
#' made implicit unless 'label' has already been encountered
#' (and made implicit). Default keys are attached as the 'keys'
#' attribute of the result.
#'
#' @param x yamlet
#' @param ... ignored
#' @param default_keys names that may be omitted in left subsets
#' @export
#' @keywords internal
#' @return yam
#' @family yam
#' @keywords internal
#' @examples
#' as_yam(as_yamlet(c('id: subject','amt: dose')))
#' as_yam(as_yamlet(c('amt: [ dose, mg ]')))
#' as_yam(as_yamlet(c('amt: [ guide: mg, label: dose ]')))
#'
as_yam.yamlet <- function(
x,
...,
default_keys = getOption('yamlet_default_keys', list('label','guide'))
){
default_keys <- as.list(default_keys)
for(nm in names(x)){
candidates <- unlist(default_keys)
nms <- names(x[[nm]])
for(i in seq_along(nms)){
if(length(candidates)){
if(identical(nms[[i]], candidates[[1]])){
names(x[[nm]])[[i]] <- ''
candidates <- candidates[-1]
}
}
}
}
attr(x, 'keys') <- default_keys
class(x) <- 'yam'
x
}
#' Coerce Yam to Character
#'
#' Coerces class yam to character. Forms the basis for a
#' yamlet emitter.
#'
#' @param x yam
#' @param ... ignored; keys is an attribute of yam
#' @export
#' @keywords internal
#' @family yam
#' @keywords internal
#' @return character
#' @examples
#' foo <- as_yamlet(c('id: subject','amt: dose'))
#' class(foo)
#' bar <- as_yam(foo)
#' class(bar)
#' as.character(bar)
#' as.character(
#' as_yam(
#' as_yamlet(
#' "race: [label: race, guide: [ white: 0, black: 1, asian: 2 ], multiple: ['yes': 1, 'no': 0]]"
#' )))
#'
as.character.yam <- function(x, ...){
k <- attr(x, 'keys')
if(!identical(k, list('label','guide'))){
x <- c(x, list(`_keys` = k))
}
# out <- paste0(names(x), ': ', sapply(x, to_yamlet, ...))
out <- paste0(
sapply(names(x), to_yamlet, ...),
': ',
sapply(x, to_yamlet, ...)
)
}
#' Coerce to Yamlet Storage Format
#'
#' Coerces to yamlet storage format. Generic, with methods
#' for default, null, character and list which together
#' implement the yamlet storage syntax.
#' Always returns length-one character, possibly the empty string.
#'
#' @param x object
#' @param ... ignored
#' @export
#' @keywords internal
#' @return length-one character
#' @family to_yamlet
to_yamlet <- function(x, ...)UseMethod('to_yamlet')
#' Coerce Default to Yamlet Storage Format
#'
#' Coerces to yamlet storage format by default conversion to character.
#' @param x object
#' @param ... ignored
#' @export
#' @keywords internal
#' @return length-one character
#' @family to_yamlet
#' @examples
#' to_yamlet(3)
#' to_yamlet(c(a = '4',b = '5.8'))
#' to_yamlet(c(a = 4,b = 5.8))
#' to_yamlet(TRUE)
to_yamlet.default <- function(x,...)to_yamlet(sapply(x, as.character, ...))
#' Coerce Numeric to Yamlet Storage Format
#'
#' Coerces numeric to yamlet storage format.
#' By default, numeric values would be processed as character.
#' But character values with a leading dash are ordinarily
#' quoted, since in character context a leading dash could
#' be mistaken for a yaml metacharacter.
#' In the case of a numeric value, however, we can be
#' fairly certain that, despite appearances, the dash
#' if any is actually a negative sign. This method
#' intends to leave negative numerics unquoted, like their
#' positive counterparts.
#'
#' @param x object
#' @param ... ignored
#' @export
#' @keywords internal
#' @return length-one character
#' @family to_yamlet
#' @examples
#' library(magrittr)
#' 'a: [[d: [-1, 0, 1, 2]]]' %>% as_yamlet %>% to_yamlet
#' to_yamlet(c(a = 4, b = 5.8))
#' to_yamlet(list(a = 4, b = 5.8))
#' to_yamlet(numeric(0))
to_yamlet.numeric <- function(x,...){
if(length(x) > 1) return(to_yamlet(as.list(x), ...))
sign <- sign(x)
x <- abs(x)
x <- to_yamlet(as.character(x), ...)
x[sign == -1] <- paste0('-', x[sign == -1])
x
}
#' Coerce Yamlet to Yamlet Storage Format
#'
#' Coerces yamlet to yamlet storage format by unclassing to list.
#' @param x object
#' @param ... ignored
#' @export
#' @keywords internal
#' @return length-one character
#' @family to_yamlet
#' @examples
#' library(magrittr)
#' 'a: [[d: [-1, 0, 1, 2]]]' %>% as_yamlet %>% to_yamlet
to_yamlet.yamlet <- function(x,...)to_yamlet(unclass(x), ...)
#'
#' Coerce Character to Yamlet Storage Format
#'
#' Coerces character to yamlet storage format.
#' Named character is processed as a named list.
#' NA_character_ is is stored as unquoted NA
#' and read back as NA by \code{\link{as_yam.character}}.
#' Use quoted "NA" or 'NA' to store the literal string,
#' which will be read back as a string by \code{\link{as_yam.character}}.
#'
#' If block is TRUE, an attempt will be made
#' to represent character strings as literal
#' block scalars if they contain
#' newlines (experimental in yamlet >= 0.8).
#'
#' @param x character
#' @param block whether to write block scalars
#' @param ... ignored
#' @export
#' @keywords internal
#' @return length-one character, never NA, no names
#' @family to_yamlet
#' @examples
#' to_yamlet('foo')
#' to_yamlet(c('a','b'))
#' to_yamlet(c(a = 'a',b = 'b'))
#' to_yamlet(c(no = 'n', yes = 'y'))
#' to_yamlet(NA)
#' to_yamlet("NA")
to_yamlet.character <- function(x, block = FALSE, ...){
if(!is.null(names(x))){
x <- as.list(x)
return(to_yamlet(x))
}
# treat NA_character_ as 'NA'.
# x <- paste(x) # dropping this behavior @ 0.10.12
# quote strings beginning with ' " [] {} > | * & ! % # ` @ ,. ? : -
index <- grepl("^'", x) # starts with '
x[index] <- paste0('"',x[index], '"') # wrapped in "
# leading single quote has been double-quoted
# test conditions for single quoting.
quotable <- rep(FALSE, length(x))
index <- grepl('^[][{}>|*&!%#`@,.?:-]', x) # starts with special
quotable[index] <- TRUE # paste0("'",x[index],"'")
# quote yes, no, y, n
# index <- x %in% c('yes','no','y','n')
# https://yaml.org/type/bool.html
# TTB 2022-10-26 @ 0.9.8 extended set
index <- x %in% c(
'y', 'Y', 'yes', 'Yes', 'YES', 'n', 'N', 'no', 'No', 'NO',
'true', 'True', 'TRUE', 'false', 'False', 'FALSE',
'on', 'On', 'ON', 'off', 'Off', 'OFF',
'NA' # @ 0.10.12
)
quotable[index] <- TRUE
# must quote existing ][, to disambiguate
index <- grepl('[],[]', x) # contains collapse meta
quotable[index] <- TRUE
# leading colon trapped above
# must quote embedded colon-space (multi-char syntactical element)
index <- grepl(': +', x) # contains collapse meta
quotable[index] <- TRUE
# implement one instance of single-quotes
x[quotable] <- paste0("'", x[quotable], "'")
if(block){
has_newline <- grepl('\n', x)
x[has_newline] <- gsub('\n','\n ', x[has_newline])
x[has_newline] <- paste0('|\n ', x[has_newline])
}
# @0.10.12, true NA has now survived the
# quoting process, but must be emitted as
# character. Formerly, we used 'paste' to
# convert NA to 'NA'.
# Here, we conserve code by striking
# the early return of length one result.
# That allows the ensuing paste to have
# effect. We do however conditionalize
# the imposition of brackets.
#if(length(x) == 1) return(x)
# multiples get [,,]
# collapse multiple
multiples <- length(x) > 1
# converts NA to character, no effect on other singlets:
x <- paste(x, collapse = ', ')
if(multiples) x <- paste('[', x, ']')
names(x) <- NULL # should not have names
x
}
#' Coerce Null to Yamlet Storage Format
#'
#' Coerces null to yamlet storage format (returns empty string).
#' @param x object
#' @param ... ignored
#' @export
#' @keywords internal
#' @return length-one character
#' @family to_yamlet
#' @examples
#' to_yamlet(NULL)
to_yamlet.NULL <- function(x, ...)''
#' Coerce list to yamlet Storage Format
#'
#' Coerces list to yamlet storage format. Operates recursively on list members.
#' @param x object
#' @param ... ignored
#' @param bracket_all FALSE at top level, but TRUE recursively downward; supports 'ITEM: definition' which would otherwise be bracketted
#' @export
#' @keywords internal
#' @return length-one character
#' @family to_yamlet
#' @examples
#' to_yamlet(list())
#' to_yamlet(list(a = 1, b = 2, c = NULL))
#' to_yamlet(list(a = 1, b = list(c = 3, d = list(e = 4, f = 'g', 'h'))))
#' to_yamlet(setNames(1:3, c('a','b',NA)))
#' to_yamlet(setNames(c(1,2,NA), c('a','b','c')))
#'
to_yamlet.list <- function(x, ..., bracket_all = FALSE){
# convert each member to yamlet
if(length(x) == 0) x <- list(NULL)
nms <- names(x)
nms <- sapply(nms, to_yamlet) # assures individual treatment
out <- lapply(x, to_yamlet, ..., bracket_all = TRUE)
# if member not null (''), attach name using colon-space,
# else using ? name
# if name is '', do not attach
for(i in seq_along(nms)){
if(nms[[i]] != ''){
if(out[[i]] != ''){
out[[i]] <- paste(nms[[i]], out[[i]], sep = ': ')
}else{
out[[i]] <- paste0('? ', nms[[i]])
}
}
}
# separate members with commas
if(
length(unlist(x)) == length(unlist(out)) |
identical(x, list(NULL))
){
out <- unlist(out) # converts empty list to NULL
if(is.null(out)) out <- ''
}
if(length(out) == 1){ # a singlet
# maybe *all* singlets need brackets.
# bracket_all <- length(x) > 1 # i.e. not for 'sex: Sex' but perhaps for 'sex: [Sex, M ]'
has_name <- as.logical(length(names(out))) # not all singlets have names
if(has_name){
if(is.na(names(out)) | names(out) == ''){ # a singlet may have an empty name
has_name <- FALSE
}
}
if(
bracket_all
| has_name
| is.list(out)
){
out <-paste0('[ ', out, ' ]') # named singlets may need brackets
}
}
if(length(out) > 1){ # sequences need brackets
out <- paste(out, collapse = ', ')
out <- paste0('[ ', out, ' ]')
}
out <- gsub('] ',']', out)
out <- gsub('\\[ \\[','[[', out)
names(out) <- NULL # should not have names
out
}
#' Coerce Yamlet to Character
#'
#' Coerces yamlet to character. See also \code{\link{as_yamlet.character}}.
#'
#' @param x yamlet
#' @param sort whether to coerce attribute order using \code{\link{canonical.yamlet}}
#' @param ... passed to \code{\link{as.character.yam}} and \code{\link{as_yam.yamlet}}
#' @export
#' @keywords internal
#' @family yamlet
#' @return character
#' @examples
#'
#' as.character(as_yamlet('ID: subject identifier'))
#' as.character(as_yamlet(c('id: subject','amt: dose')))
#' as.character(as_yamlet(c('id: subject\namt: dose')))
#' foo <- as_yamlet(system.file(package = 'yamlet', 'extdata','quinidine.yaml'))
#' class(foo)
#' writeLines(as.character(foo))
#' identical(foo, as_yamlet(as.character(foo)))
#' identical(as.character(foo), as.character(as_yamlet(as.character(foo))))
#' file <- system.file(package = 'yamlet','extdata','quinidine.csv')
#' file
#' foo <- resolve(decorate(file))
#' as.character(as_yamlet(foo))
#' as.character(as_yamlet(foo, exclude_attr = 'class'))
#'
as.character.yamlet <- function(
x,
sort = TRUE,
...
){
if(sort) x <- canonical(x, ...)
y <- as_yam(x, ...)
z <- as.character(y, ...)
z
}
#' Coerce List to Encoding
#'
#' Tries to coerce a list to an encoding. Names are
#' understood as decodes, and list values as codes.
#' On failure, the list is returned unchanged.
#'
#' @param x list
#' @param ... ignored
#' @return an encoding (length-one character), or original list if error occurs
#' @export
#' @keywords internal
#' @family encode
#' @importFrom encode encoded
#' @importFrom encode encode
#' @examples
#' meta <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' meta <- as_yamlet(meta)
#' list2encoding(meta$Creatinine$guide)
list2encoding <- function(x, ...){
# empty strings are effectively zero-length character
for(i in seq_along(x)){
if(length(x[[i]] == 1)){
if(x[[i]] == ''){
x[[i]] <- character(0)
}
}
}
drop <- x[lapply(x,length) == 0 ]
if(length(drop)){
warning(
'dropping ',length(drop),
' zero-length level(s) including labels: ',
paste(names(drop), collapse = ', ')
)
x <- x[lapply(x,length) != 0]
}
out <- unlist(x)
nms <- names(out)
out <- try(encode(out, labels = nms))
if(length(out) != 1) return(x)
if(inherits(out, 'try-error')) return(x)
if(!encoded(out)) return(x)
out
}
#' Encode Yamlet
#'
#' Encodes yamlet. Each 'guide' element with length > 1
#' is converted to an encoding, if possible. If \code{data}
#' is supplied, conditional guides will be ignored.
#'
#' @param x yamlet
#' @param ... ignored
#' @param target attribute to encode
#' @param data optional data.frame for guide context
#' @return yamlet, with guide elements possibly transformed to encodings
#' @export
#' @keywords internal
#' @family encode
#' @examples
#' meta <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' meta <- as_yamlet(meta)
#' meta <- encode(meta)
encode.yamlet <- function(x, ..., target = 'guide', data = NULL){
for(i in seq_along(x)){
t <- x[[i]][[target]]
if(!is.null(t)){
if(length(t) > 1){ # prime criterion
if(!is.null(data)){
if(isConditional(t,data)){
next
}
}
try <- list2encoding(as.list(t))
if(inherits(try, 'character')){
if(length(try) == 1){
if(encoded(try)){
x[[i]][[target]] <- try
}
}
}
}
}
}
x
}
#' @importFrom encode encode
#' @export
encode::encode
#' Subset Yamlet
#'
#' Subsets yamlet. Preserves class, since a subset of yamlet is still yamlet.
#'
#' @param x object to subset
#' @param ... passed to next method
#' @return yamlet
#' @export
#' @keywords internal
#' @family yamlet
#' @examples
#' meta <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' meta <- as_yamlet(meta)
#' class(meta)
#' stopifnot(inherits(meta[1:2],'yamlet'))
`[.yamlet` <- function(x, ...){
x <- NextMethod()
class(x) <- 'yamlet'
x
}
#' Print a Yamlet
#'
#' Prints a yamlet object for interactive inspection.
#' By default, lists with no names (recursively) that
#' unlist to identical length are displayed in one
#' line for compactness. If this seems misleading,
#' you can turn it of with \code{options(yamlet_print_simplify = FALSE)}.
#'
#' @param x yamlet
#' @param ... ignored
#' @param simplify whether to collapse the display of very simple lists into one line of output
#' @export
#' @keywords internal
#' @method print yamlet
#' @family yamlet
#' @return invisible(x)
#' @examples
#' as_yamlet('mpg: [efficiency, mi/gallon]\nvs: [Engine, [V-shaped: 0, straight: 1]]')
print.yamlet <- function(x, ..., simplify = getOption('yamlet_print_simplify', TRUE)){
stopifnot(length(simplify) == 1, is.logical(simplify), !is.na(simplify))
render <- function(x, ...)UseMethod('render')
render.list <- function(x, indent = 0, name = NULL, ...){
stopifnot(length(name) <= 1)
simplified <- unlist(x)
if(
simplify &&
length(x) == length(simplified) &&
is.null(names(simplified))
)return(render(simplified, indent = indent, name = name))
margin <- paste(rep(' ',indent), collapse = '')
leader <- paste0(margin, '- ',name)
writeLines(leader)
for(i in seq_along(x)){
render(x[[i]], indent = indent + 1, name = names(x)[[i]])
}
}
render.default <- function(x, indent = 0, name = NULL, ...){
# @0.10.12, adding to_yamlet() calls to distinguish variants of NA
stopifnot(length(name) <= 1)
x <- sapply(x, to_yamlet) # avoids the brackets of a global to_yamlet()
name <- to_yamlet(name)
margin <- paste(rep(' ',indent), collapse = '')
leader <- paste0(margin, '- ',name)
#data <- paste(format(x), collapse = ', ')
data <- paste(x, collapse = ', ') # no brackets
# don't print colon if name was null (now '')
if(name != '') leader = paste0(leader, ': ')
#msg <- paste0(leader,': ', data)
# if(is.null(name)) ...
msg <- paste0(leader, data)
writeLines(msg)
}
# render.function <- function(x, indent = 0, name = NULL, ...){
# margin <- paste(rep(' ',indent), collapse = '')
# leader <- paste0(margin, '- ',name)
# data <- format(x)
# data <- paste0(margin, ' ', data)
# msg <- paste0(leader,': |')
# msg <- c(msg,data)
# writeLines(msg)
# }
if(!length(x)){
writeLines('0 length object of class yamlet')
invisible(x)
}
# x has length
nms <- names(x)
if(!length(nms))stop('yamlet must have names')
# x has names
lapply(seq_along(x), function(i)render(x[[i]], name = nms[[i]]))
invisible(x)
}
#' Read Yamlet
#'
#' Reads yamlet from file.
#' Similar to \code{\link{io_yamlet.character}}
#' but also reads text fragments.
#'
#' @param x file path for yamlet, or vector of yamlet in storage syntax
#' @param ... passed to \code{\link{as_yamlet}}
#' @param default_keys character: default keys for the first n anonymous members of each element
#' @export
#' @family interface
#' @seealso \code{\link{decorate.data.frame}}
#' @return yamlet: a named list with default keys applied
#' @examples
#' library(csv)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' meta <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' x <- as.csv(file)
#' y <- read_yamlet(meta)
#' x <- decorate(x, meta = y)
#' stopifnot(identical(x, decorate(file)))
read_yamlet <- function(
x,
...,
default_keys = getOption(
'yamlet_default_keys',
list('label','guide')
)
){
stopifnot(is.character(x))
as_yamlet(x, default_keys = default_keys, ...)
}
#' Write Yamlet
#'
#' Writes yamlet to file. Similar to \code{\link{io_yamlet.yamlet}}
#' but returns invisible storage format instead of invisible storage location.
#'
#' @param x something that can be coerced to class 'yamlet', like a yamlet object or a decorated data.frame
#' @param ... passed to \code{\link{as_yamlet}} and to \code{\link{as.character.yamlet}}
#' @param con passed to \code{\link{writeLines}}
#' @param eol end-of-line; passed to \code{\link{writeLines}} as \code{sep}
#' @param useBytes passed to \code{\link{writeLines}}
#' @param default_keys character: default keys for the first n anonymous members of each element
#' @param fileEncoding if \code{con} is character, passed to \code{\link{file}} as \code{encoding}
#' @param block whether to write block scalars
#' @export
#' @family interface
#' @seealso \code{\link{decorate.list}}
#' @return invisible character representation of yamlet (storage syntax)
#' @examples
#' library(csv)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' meta <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
#' x <- as.csv(file)
#' y <- read_yamlet(meta)
#' x <- decorate(x, meta = y)
#' identical(x, decorate(file))
#' tmp <- tempfile()
#' write_yamlet(x, tmp)
#' stopifnot(identical(read_yamlet(meta), read_yamlet(tmp)))
write_yamlet <- function(
x,
con = stdout(),
eol = "\n",
useBytes = FALSE,
default_keys = getOption(
'yamlet_default_keys',
list('label','guide')
),
fileEncoding = getOption('encoding'),
block = FALSE,
...
){
x <- as_yamlet(x, default_keys = default_keys, ...)
y <- as.character(x, default_keys = default_keys, block = block, ...)
if(is.character(con)){
con <- file(con, 'w', encoding = fileEncoding)
on.exit(close(con))
}
writeLines(text = y, con = con, sep = eol, useBytes = useBytes)
invisible(y)
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/yamlet.R |
#' Display Global Yamlet Options
#'
#' Displays global yamlet options: those options
#' whose names begin with 'yamlet_'.
#' * **yamlet_append_units_open**: see \code{\link{append_units.default}}.
#' Controls how labels are constructed for variables
#' with 'units' attributes. In brief, units are wrapped in parentheses,
#' and appended to the label.
#' * **yamlet_append_units_close**: see \code{\link{append_units.default}}.
#' Controls how labels are constructed for variables
#' with 'units' attributes. In brief, units are wrapped in parentheses,
#' and appended to the label.
#' * **yamlet_append_units_style**: see \code{\link{append_units.default}}.
#' Determines parsing as 'plotmath' or 'latex', or 'plain' for no parsing.
#' * **yamlet_append_units_target**: see \code{\link{append_units.default}}.
#' By default, append result is assigned to attribute 'label', but could be
#' something else like 'title'.
#' * **yamlet_default_keys**: see \code{\link{as_yamlet.character}}.
#' The first two yaml attributes without specified names
#' are assumed to be 'label' and 'guide'.
#' * **yamlet_persistence**: see \code{\link{decorate.list}} and
#' \code{\link{as.integer.classified}}. By default, persistence
#' of column attributes is implemented by creating 'dvec' objects
#' (decorated vectors) using \pkg{vctrs} methodology.
#' * **yamlet_cell_value**: see \code{\link{as.data.frame.yamlet}}.
#' Controls how cells are calculated when converting yamlet
#' (decorations) to a data.frame.
#' * **yamlet_import**: see \code{\link{decorate.character}}.
#' Controls how primary data is read from file (default: as.csv()).
#' * **yamlet_extension**: see \code{\link{decorate.character}}.
#' Controls what file extension is expected for yaml metadata
#' (default: '.yaml')
#' * **yamlet_overwrite**: see \code{\link{decorate.list}}.
#' Controls whether existing decorations are overwritten.
#' * **yamlet_exclude_attr**: see \code{\link{decorations.data.frame}}
#' Controls what attributes are excluded from display.
# * **yamlet_as.integer_exclude_attr**: see \code{\link{as.integer.classified}}
# Controls what attributes are excluded when preserving attributes.
#' * **yamlet_with_title**: see \code{\link{make_title.dvec}} and \code{\link{drop_title.dvec}}.
#' For objects with (implied) units attributes, titles are by default
#' automatically created on resolve() and destroyed on desolve().
#' Interacts with yamlet_append_units_*.
#' * **yamlet_infer_guide**: see \code{\link{explicit_guide.yamlet}}.
#' Identifies the function that will be used to reclassify 'guide' as something
#' more explicit.
#' * **yamlet_explicit_guide_overwrite**: see \code{\link{explicit_guide.data.frame}}
#' and \code{\link{explicit_guide.dvec}}. In the latter case, controls
#' whether existing attributes are overwritten.
#' * **yamlet_explicit_guide_simplify**: \code{\link{explicit_guide.data.frame}}
#' and \code{\link{explicit_guide.dvec}}. Ordinarily, the 'guide' attribute
#' is removed if something more useful can be inferred.
#' * **yamlet_decorated_ggplot_search**: see \code{\link{print.decorated_ggplot}}.
#' The print method for decorated_ggplot populates axis labels by searching
#' first for attributes named 'expression', 'title', and 'label'. Customizable.
#' * **yamlet_decorated_ggplot_discrete**: see \code{\link{print.decorated_ggplot}}.
#' Discrete aesthetics to map from data decorations where available.
#' * **yamlet_decorated_ggplot_drop**: see \code{\link{print.decorated_ggplot}}.
#' Should unused factor levels be omitted from data-driven discrete scales?
#' * **yamlet_ggready_parse**: see \code{\link{ggready.data.frame}},
#' \code{\link{ggready.decorated}}. Whether to parse axis labels.
#' TRUE by default, but may be problematic if unintended.
#' * **yamlet_modify_reserved**: see \code{\link{modify.default}}. A list of
#' reserved labels that warn on reassignment.
#' * **yamlet_promote_reserved**: see \code{\link{promote.list}}.
#' Attributes to leave untouched when promoting singularities.
#' * **yamlet_promote**: see \code{\link{filter.decorated}}.
#' Whether to promote when filtering 'decorated'.
#' * **yamlet_as_units_preserve**: \code{\link{as_units.dvec}}.
#' What attributes to preserve when converting dvec to units.
#' Just 'label' by default.
#' Assign \code{options(yamlet_as_units_preserve = character(0))}
#' to remove all.
#' * **yamlet_print_simplify**: \code{\link{print.yamlet}}.
#' Whether to collapse interactively-displayed decorations
#' into a single line for lists that have no (nested) names
#' and have the same length when unlisted. True by default.
#' Can be misleading for lists with fine detail, but in
#' most cases fine detail will likely have names.
#' * **yamlet_format**: \code{\link{scripted.default}}.
#' Choice of 'html' or 'latex', guessed if not supplied.
#'
#' @export
#' @md
#' @return list
#' @examples
#' yamlet_options()
yamlet_options <- function(){
opts <- options()
nms <- names(opts)
nms <- nms[grepl('^yamlet_',nms)]
opts <- opts[nms]
opts
}
| /scratch/gouwar.j/cran-all/cranData/yamlet/R/yamlet_options.R |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(dpi = 600, out.width = '50%')
## ---- message = FALSE, warning = FALSE----------------------------------------
library(magrittr)
library(ggplot2)
library(tablet)
library(yamlet)
library(dplyr)
library(kableExtra)
## -----------------------------------------------------------------------------
x <- data.frame(
time = 1:10,
work = (1:10)^1.5,
group = 1:2,
set = c(rep('delta',5), rep('gamma', 5))
)
x %<>% decorate('
time: [ Time_cum.^alpha, h ]
work: [ Work_total_obs\\n, kg*m^2/s^2 ]
group: [ Group, [ Second\\nGroup^\\*: 2, First\\nGroup^#: 1 ]]
set: [ Set, [ gamma, delta ]]
')
x %>% decorations
## ---- fig.width = 4.43, fig.height = 2.77-------------------------------------
x %>%
resolve %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
## ---- , fig.width = 4.33, fig.height = 2.82-----------------------------------
x %>%
scripted %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
## -----------------------------------------------------------------------------
x %>%
scripted %>%
group_by(group, set) %>%
tablet %>%
as_kable
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/doc/scripted-html.R |
---
title: "Scripted HTML"
author: "Tim Bergsma"
date: "`r Sys.Date()`"
output:
html_document:
keep_md: true
toc: FALSE
vignette: >
%\VignetteIndexEntry{Scripted HTML}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
The point of this exercise is to demonstrate flexible rendering of
subscripts and superscripts. We want to write expressions for
column labels and units that are fairly readable as they are, and
yet can be easily rendered with equivalent results in
plotmath, html, or pdf.
First we load some packages.
```{r include = FALSE}
knitr::opts_chunk$set(dpi = 600, out.width = '50%')
```
```{r, message = FALSE, warning = FALSE}
library(magrittr)
library(ggplot2)
library(tablet)
library(yamlet)
library(dplyr)
library(kableExtra)
```
We create some example data.
```{r}
x <- data.frame(
time = 1:10,
work = (1:10)^1.5,
group = 1:2,
set = c(rep('delta',5), rep('gamma', 5))
)
x %<>% decorate('
time: [ Time_cum.^alpha, h ]
work: [ Work_total_obs\\n, kg*m^2/s^2 ]
group: [ Group, [ Second\\nGroup^\\*: 2, First\\nGroup^#: 1 ]]
set: [ Set, [ gamma, delta ]]
')
x %>% decorations
```
The label for column ```work``` has nested subscripts suggesting
$\sf{Work_{total_{obs}}}$. The label for column ```time``` suggests
$\sf{Time_{cum}{}^{\alpha}}$. The dot closes the subscript to distinguish
this from $\sf{Time_{cum^{\alpha}}}$. Backslash-n requests a line break.
How does this look when we plot it?
```{r, fig.width = 4.43, fig.height = 2.77}
x %>%
resolve %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
```
By default, we get verbatim labels and units as substitutes for column names.
Next, we use ```scripted()``` instead of ```resolve()``` to indicate
that the labels should be understood as
potentially having subscripts and superscripts.
For this to work well, units should be constructed
using *, /, and ^ (even though the "units"
package supports other encodings).
```{r, , fig.width = 4.33, fig.height = 2.82}
x %>%
scripted %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
```
In the background, ```scripted()``` is writing __expression__ and __plotmath__ attributes
(consumed by ```ggplot()``` ) and __title__ attributes (consumed by ```tablet()``` ).
We illustrate the latter.
```{r}
x %>%
scripted %>%
group_by(group, set) %>%
tablet %>%
as_kable
```
In summary, we have decorated our data with labels and
units containing markup for subscripts and superscripts.
If everything goes well, these render similarly
in figures and tables. They also render similarly in
html and pdf. Please see the pdf version of this document.
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/doc/scripted-html.Rmd |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(dpi = 600, out.width = '50%')
## ---- message = FALSE, warning = FALSE----------------------------------------
library(magrittr)
library(ggplot2)
library(tablet)
library(yamlet)
library(dplyr)
library(kableExtra)
## -----------------------------------------------------------------------------
x <- data.frame(
time = 1:10,
work = (1:10)^1.5,
group = 1:2,
set = c(rep('delta',5), rep('gamma', 5))
)
x %<>% decorate('
time: [ Time_cum.^alpha, h ]
work: [ Work_total_obs\\n, kg*m^2/s^2 ]
group: [ Group, [ Second\\nGroup^\\*: 2, First\\nGroup^#: 1 ]]
set: [ Set, [ gamma, delta ]]
')
x %>% decorations
## ---- fig.width = 4.43, fig.height = 2.77-------------------------------------
x %>%
resolve %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
## ---- , fig.width = 4.33, fig.height = 2.82-----------------------------------
x %>%
scripted %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
## -----------------------------------------------------------------------------
x %>%
scripted %>%
group_by(group, set) %>%
tablet %>%
as_kable
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/doc/scripted-pdf.R |
---
title: "Scripted PDF"
author: "Tim Bergsma"
date: "`r Sys.Date()`"
output:
pdf_document:
keep_tex: true
toc: FALSE
extra_dependencies:
booktabs,
longtable,
array,
multirow,
wrapfig,
float,
colortbl,
pdflscape,
tabu,
threeparttable,
threeparttablex,
makecell,
xcolor,
upgreek
vignette: >
%\VignetteIndexEntry{Scripted PDF}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
The point of this exercise is to demonstrate flexible rendering of
subscripts and superscripts. We want to write expressions for
column labels and units that are fairly readable as they are, and
yet can be easily rendered with equivalent results in
plotmath, html, or pdf.
First we load some packages.
```{r include = FALSE}
knitr::opts_chunk$set(dpi = 600, out.width = '50%')
```
```{r, message = FALSE, warning = FALSE}
library(magrittr)
library(ggplot2)
library(tablet)
library(yamlet)
library(dplyr)
library(kableExtra)
```
We create some example data.
```{r}
x <- data.frame(
time = 1:10,
work = (1:10)^1.5,
group = 1:2,
set = c(rep('delta',5), rep('gamma', 5))
)
x %<>% decorate('
time: [ Time_cum.^alpha, h ]
work: [ Work_total_obs\\n, kg*m^2/s^2 ]
group: [ Group, [ Second\\nGroup^\\*: 2, First\\nGroup^#: 1 ]]
set: [ Set, [ gamma, delta ]]
')
x %>% decorations
```
The label for column ```work``` has nested subscripts suggesting
$\sf{Work_{total_{obs}}}$. The label for column ```time``` suggests
$\sf{Time_{cum}{}^{\alpha}}$. The dot closes the subscript to distinguish
this from $\sf{Time_{cum^{\alpha}}}$. Backslash-n requests a line break.
How does this look when we plot it?
```{r, fig.width = 4.43, fig.height = 2.77}
x %>%
resolve %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
```
By default, we get verbatim labels and units as substitutes for column names.
Next, we use ```scripted()``` instead of ```resolve()``` to indicate
that the labels should be understood as
potentially having subscripts and superscripts.
For this to work well, units should be constructed
using *, /, and ^ (even though the "units"
package supports other encodings).
```{r, , fig.width = 4.33, fig.height = 2.82}
x %>%
scripted %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
```
In the background, ```scripted()``` is writing __expression__ and __plotmath__ attributes
(consumed by ```ggplot()``` ) and __title__ attributes (consumed by ```tablet()``` ).
We illustrate the latter.
```{r}
x %>%
scripted %>%
group_by(group, set) %>%
tablet %>%
as_kable
```
In summary, we have decorated our data with labels and
units containing markup for subscripts and superscripts.
If everything goes well, these render similarly
in figures and tables. They also render similarly in
html and pdf. Please see the html version of this document.
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/doc/scripted-pdf.Rmd |
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(package.startup.message = FALSE)
## ---- package.startup.message = FALSE-----------------------------------------
suppressMessages(library(dplyr))
library(magrittr)
library(yamlet)
x <- data.frame(
ID = 1,
CONC = 1,
RACE = 1
)
x$ID %<>% structure(label = 'subject identifier')
x$CONC %<>% structure(label = 'concentration', guide = 'ng/mL')
x$RACE %<>% structure(label = 'race', guide = list(white = 0, black = 1, asian = 2))
x %>% as_yamlet %>% as.character %>% writeLines
# or
x %>% as_yamlet %>% as.character %>% writeLines(file.path(tempdir(), 'drug.yaml'))
## -----------------------------------------------------------------------------
meta <- read_yamlet(file.path(tempdir(), 'drug.yaml'))
meta
## -----------------------------------------------------------------------------
x <- data.frame(ID = 1, CONC = 1, RACE = 1)
x <- decorate(x, meta = meta)
decorations(x)
## -----------------------------------------------------------------------------
x <- data.frame(ID = 1, CONC = 1, RACE = 1)
x <- decorate(x,'
ID: subject identifier
CONC: [ concentration, ng/mL ]
RACE: [ race, [white: 0, black: 1, asian: 2 ]]
')
decorations(x)
## -----------------------------------------------------------------------------
decorations(x)
## -----------------------------------------------------------------------------
file <- file.path(tempdir(), 'out.yaml')
write_yamlet(x, con = file )
file %>% readLines %>% writeLines
## -----------------------------------------------------------------------------
library(csv)
# see ?Quinidine in package nlme
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
a <- decorate(file)
as_yamlet(a)[1:3]
## -----------------------------------------------------------------------------
options(csv_source = FALSE) # see ?as.csv
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
x <- decorate(file)
out <- file.path(tempdir(), 'out.csv')
io_csv(x, out)
y <- io_csv(out)
identical(x, y) # lossless 'round-trip'
file.exists(out)
meta <- sub('csv','yaml', out)
file.exists(meta)
meta %>% readLines %>% head %>% writeLines
options(csv_source = TRUE) # restore
## ---- fig.width = 5.46, fig.height = 3.52, fig.cap = 'Automatic axis labels and legends using curated metadata as column attributes.'----
suppressWarnings(library(ggplot2))
library(dplyr)
library(magrittr)
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
file %>%
decorate %>%
filter(!is.na(conc)) %>%
resolve %>%
ggplot(aes(x = time, y = conc, color = Heart)) +
geom_point()
## -----------------------------------------------------------------------------
suppressMessages(library(table1))
file %>%
decorate %>%
resolve %>%
group_by(Subject) %>%
slice(1) %>%
table1(~ Age + Weight + Race | Heart, .)
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/doc/yamlet-introduction.R |
---
title: "An Introduction to Yamlet"
author: "Tim Bergsma"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{An Introduction to Yamlet}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(package.startup.message = FALSE)
```
## Motivation
R datasets of modest size are routinely stored as
flat files and retrieved as data frames. Unfortunately,
the classic storage formats (comma delimited, tab delimited)
do not have obvious mechanisms for storing data *about*
the data: i.e., metadata such as column labels, units,
and meanings of categorical codes. In many cases
we hold such information in our heads and hard-code
it in our scripts as axis labels, figure legends, or
table enhancements. That's probably fine for simple
cases but does not scale well in production settings
where the same metadata is re-used extensively. Is
there a better way to store, retrieve, and bind
table metadata for consistent reuse?
## Writing Yamlet
**yamlet** is a storage format for table metadata,
implemented as an R package.
It was designed to be:
- easy to edit
- easy to import
- open-ended
Although intended mainly to document (or pre-specify!) data column
labels and units, there are few restrictions
on the types of metadata that can be stored.
In fact, the only real restriction is that
the stored form must be valid [yaml](https://yaml.org/spec/1.2/spec.html).
Below, we use **yamlet** to indicate the
paradigm or package, and `yamlet` to indicate
stored instances.
### Manual Method
Actually, `yamlet` (think: "just a little yaml")
is a special case of `yaml`
that stores column attributes in one record
per column. For instance, to store the fact
that data for an imaginary drug trial
has a column called 'ID',
pop open a text file and write
```
ID:
```
This in itself is valid `yaml`!
But if you know a label to go with ID, you can add it:
```
ID: subject identifier
```
If you have (or expect) a second column with units,
you can add it below.
```
ID: subject identifier
CONC: concentration, ng/mL
```
A couple of notes here.
- The first thing after a colon must be a space.
- Whatever follows the colon-space is only One Thing.
- That One Thing could be a *sequence* of Many Things.
To get a sequence, just add square brackets. For
instance, above we have said that 'CONC' has the label
'concentration, ng/mL' but what we really intend
is that it has label 'concentration' and
unit 'ng/mL' so we rewrite it as
```
ID: subject identifier
CONC: [ concentration, ng/mL ]
```
Now label and units are two different things.
Notice we have not explicitly named them.
Unless we say otherwise, the **yamlet** package
will treat the first two un-named items
as 'label' (a short description) and 'guide' (a hint
about how to interpret the values). 'guide'
might be units for continuous variables,
levels (and possibly labels) for categorical
values, format strings for dates and times,
or perhaps something else.
The **yamlet** package
gives you five ways of controlling how
data items are identified (see details for `?as_yamlet.character`).
The most direct way is to supply explicit `yaml` keys:
```
ID: [ label: subject identifier ]
CONC: [ label: concentration, guide: ng/mL ]
```
We see that rather complex data can be expressed
using only colons, commas, and square brackets.
`yaml` itself also uses curly braces to express
"maps", but for purposes here they are unnecessary.
Note above that we had to add square brackets
for 'ID' when introducing the second colon (can't
really have two colons at the same level, so to
speak). Note also that sequences can be nested
arbitrarily deep. We take advantage of this principle
to transform 'guide' into a set of categorical levels.
```
ID: [ label: subject identifier ]
CONC: [ label: concentration, guide: ng/mL ]
RACE: [ label: race, guide: [ 0, 1, 2 ]]
```
or more simply (taking advantage of default keys)
```
ID: subject identifier
CONC: [ concentration, ng/mL ]
RACE: [ race, [ 0, 1, 2 ]]
```
So now we have 'codes' (levels) for our dataset
that represent races. What do these
codes mean? We supply 'decodes' (labels) as keys.
```
ID: subject identifier
CONC: [ concentration, ng/mL ]
RACE: [ race, [ white: 0, black: 1, asian: 2 ]]
```
Elegantly, `yaml` (and therefore **yamlet**) gives
us a way to represent a code even if we don't
know the decode, *and* a way to represent a
decode even though we don't know the code.
Imagine a dataset is under collaborative
development, and we already know that there
are some 'RACE' values of 0 but we're not sure what
they mean. We also know that there will be
some 'asian' race values, but we haven't
assigned a code yet. We can write:
```
ID: subject identifier
CONC: [ concentration, ng/mL ]
RACE: [ race, [ 0, black: 1, ? asian ]]
```
### Automatic Method
The whole point of this exercise (and I'm getting
a little ahead of myself) is to have some
stored metadata that we can read into R
and apply to a data frame as column attributes.
If typing square brackets isn't your thing,
you can actually do this backwards by
supplying column attributes to a data frame
and writing them out!
```{r, package.startup.message = FALSE}
suppressMessages(library(dplyr))
library(magrittr)
library(yamlet)
x <- data.frame(
ID = 1,
CONC = 1,
RACE = 1
)
x$ID %<>% structure(label = 'subject identifier')
x$CONC %<>% structure(label = 'concentration', guide = 'ng/mL')
x$RACE %<>% structure(label = 'race', guide = list(white = 0, black = 1, asian = 2))
x %>% as_yamlet %>% as.character %>% writeLines
# or
x %>% as_yamlet %>% as.character %>% writeLines(file.path(tempdir(), 'drug.yaml'))
```
## Reading and Binding Yamlet in R
Let's take advantage of that last example to show how we
can read **yamlet** into R.
```{r}
meta <- read_yamlet(file.path(tempdir(), 'drug.yaml'))
meta
```
`meta` is just a named list of column attributes.
`decorate()` loads them onto columns of a data frame.
```{r}
x <- data.frame(ID = 1, CONC = 1, RACE = 1)
x <- decorate(x, meta = meta)
decorations(x)
```
If you like, you can skip the external file and
decorate directly with `yamlet` (instead of, say, structure() like
we did above).
```{r}
x <- data.frame(ID = 1, CONC = 1, RACE = 1)
x <- decorate(x,'
ID: subject identifier
CONC: [ concentration, ng/mL ]
RACE: [ race, [white: 0, black: 1, asian: 2 ]]
')
decorations(x)
```
## Extracting and Writing Yamlet to Storage
We saw earlier that `as_yamlet()` can
pull "decorations" off a data frame and
present them as **yamlet**.
this is the default behavior of `decorations()`.
```{r}
decorations(x)
```
`write_yamlet()` calls `as_yamlet()` on its
primary argument, and sends the result to a
connection of our choice.
```{r}
file <- file.path(tempdir(), 'out.yaml')
write_yamlet(x, con = file )
file %>% readLines %>% writeLines
```
## Coordinated Input and Output
A useful convention is to store metadata in a file
next to the file it describes, with the same name
but the 'yaml' extension. `decorate()` expects
this, and if given a file path to a CSV
file, it will look for a '*.yaml' file nearby.
To "decorate" a CSV path means to read it,
read its `yamlet` (if any) and apply the `yamlet`
as attributes on the resulting data frame.
```{r}
library(csv)
# see ?Quinidine in package nlme
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
a <- decorate(file)
as_yamlet(a)[1:3]
```
Another way to achieve the same thing is with `io_csv()`.
It is a toggle function that returns a path if
given a file to store, and returns a decorated data frame
if given a path to read (same for `io_table()`, which
has all the formatting options of `read.table()` and `write.table()`).
The path is just the path
to the primary data, but the path to the metadata is implied
as well.
```{r}
options(csv_source = FALSE) # see ?as.csv
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
x <- decorate(file)
out <- file.path(tempdir(), 'out.csv')
io_csv(x, out)
y <- io_csv(out)
identical(x, y) # lossless 'round-trip'
file.exists(out)
meta <- sub('csv','yaml', out)
file.exists(meta)
meta %>% readLines %>% head %>% writeLines
options(csv_source = TRUE) # restore
```
## Using Yamlet Metadata
Metadata can be used prospectively or retrospectively.
Early in the data life cycle, it can be used prospectively
to guide table development in a collaborative setting
(i.e. as a data specification). Later in the life cycle,
metadata can be used retrospectively to consistently
inform report elements such as figures and tables.
### Example Figure
For example,
The **yamlet** package provides an experimental wrapper
for ggplot that uses column attributes to automatically
generate informative axis labels and legends.
```{r, fig.width = 5.46, fig.height = 3.52, fig.cap = 'Automatic axis labels and legends using curated metadata as column attributes.'}
suppressWarnings(library(ggplot2))
library(dplyr)
library(magrittr)
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
file %>%
decorate %>%
filter(!is.na(conc)) %>%
resolve %>%
ggplot(aes(x = time, y = conc, color = Heart)) +
geom_point()
```
### Example Table
The **table1** package uses labels and units stored
as attributes to enrich table output. In the example
below, we use `resolve()` to re-implement guides
as units and factor levels, which is what `table1()` needs.
```{r}
suppressMessages(library(table1))
file %>%
decorate %>%
resolve %>%
group_by(Subject) %>%
slice(1) %>%
table1(~ Age + Weight + Race | Heart, .)
```
## Caveat
It is a well-known problem that many table
manipulations in R cause column attributes
to be dropped. Binding of metadata is best
done at a point in a workflow where few or
no such manipulations remain. Else, precautions
should be taken to preserve or restore
attributes as necessary.
## Reminder
Remember to quote a literal
value of yes, no, y, n, true false, on, off,
or any of these capitalized, or any of these
as all-caps. Otherwise they will be converted
to TRUE or FALSE per the usual rules for yaml.
## Conclusion
The **yamlet** package implements a metadata
storage syntax that is easy to write, read,
and bind to data frame columns. Systematic
curation of metadata enriches and
simplifies efforts to create and describe
tables stored in flat files. Conforming
tools can take advantage of internal and
external **yamlet** representations to
enhance data development and reporting.
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/doc/yamlet-introduction.Rmd |
#' ---
#' title: Simulate PK data
#' output: html_document
#' theme: united
#' highlight: tango
#' ---
# dm.xpt, vs.xpt, ex.xpt downloaded from https://github.com/phuse-org/TestDataFactory/tree/main/Updated/TDF_SDTM on 2022-06-23.
library(haven)
library(magrittr)
library(dplyr)
library(tidyr)
library(wrangle)
library(ggplot2)
library(nlmixr)
library(yamlet)
library(datetime)
library(RxODE)
dm <- 'dm.xpt.gz' %>% gzfile %>% read_xpt
dm %>% data.frame %>% head(3)
dm %>% constant
dm %<>% select(-(names(constant(dm))))
dm %>% decorations
dm %<>% select(USUBJID, SUBJID, RFSTDTC, AGE, SEX, RACE, ACTARM, ACTARMCD)
dm %$% SUBJID %>% as.integer %>% is.na %>% any
dm %>% decorations(SUBJID)
dm %<>% mutate(SUBJID = SUBJID %>% as.integer) %>% decorate(dm)
dm %>% decorations(SUBJID)
dm %<>% mutate(SUBJID = SUBJID %>% mimic(USUBJID)) %>% select(-USUBJID)
dm %>% itemize(ACTARMCD, ACTARM)
dm %<>% mutate(
ACTARM = ACTARM %>%
classified(
levels = c(
'Screen Failure',
'Placebo',
'Xanomeline Low Dose',
'Xanomeline High Dose'
)
) %>%
as.integer(-2)
) %>% select(-ACTARMCD)
dm %>% head
dm %>% implicit_guide %>% resolve %>% head
dm %>% resolve %>% head
dm %>% modify(SUBJID, guide = NULL) %>% decorations
dm %>% enumerate(SEX)
dm %<>% mutate(SEX = SEX %>% classified(labels = c('Female','Male')) %>% as.integer(-1)) %>% decorate(dm)
dm %>% decorations(SEX)
dm %>% enumerate(RACE)
dm %<>% mutate(
RACE = RACE %>% classified(
levels = c(
'WHITE',
'BLACK OR AFRICAN AMERICAN',
'ASIAN',
'AMERICAN INDIAN OR ALASKA NATIVE'
)
) %>% as.integer(-1)
) %>% decorate(dm)
dm %>% select(-SUBJID) %>% decorations
dm %<>% decorate('
RFSTDTC: [ guide: "%Y-%m-%d" ]
AGE: [ guide: year ]
')
dm %>% head
vs <- 'vs.xpt.gz' %>% gzfile %>% read_xpt
vs %>% data.frame %>% head
vs %>% constant
vs %<>% select(-(names(constant(vs))))
vs %>% enumerate(VSTESTCD, VSTEST, VSSTRESU, VSSTAT)
vs %<>% filter(VSSTAT == '') %>% select(-VSSTAT)
vs %>% enumerate(VSTESTCD, VSTEST, VSSTRESU, VSPOS)
vs %<>% filter(VSTESTCD %in% c('HEIGHT', 'WEIGHT'))
vs %>% constant
vs %<>% select(-(names(constant(vs))))
length(unique(vs$USUBJID))
length(unique(vs$USUBJID[vs$VSBLFL == 'Y']))
vs %>% group_by(USUBJID, VSTESTCD, VISITNUM) %>% status
vs %>% filter(VSTESTCD == 'HEIGHT') %>% nrow
vs %>%
filter(VSTESTCD == 'WEIGHT') %>%
group_by(USUBJID) %>%
filter(!any(VSBLFL == 'Y')) %>% data.frame
vs$VSBLFL[with(vs, USUBJID == '01-702-1082' & VSTESTCD == 'WEIGHT' & VISITNUM == 1)] <- 'Y'
vs %<>% filter(VSTESTCD %in% c('HEIGHT', 'WEIGHT'))
vs %>% enumerate(VSTESTCD, VSBLFL)
vs %<>% filter(VSTESTCD == 'HEIGHT' | VSBLFL == 'Y')
vs %>% enumerate(VSTESTCD)
vs %>% data.frame %>% head
vs %<>% select(USUBJID, VSTESTCD, VSSTRESN)
vs %<>% pivot_wider(names_from=VSTESTCD, values_from = VSSTRESN)
vs %<>% redecorate('
HEIGHT: [ Baseline Height, cm ]
WEIGHT: [ Baseline Weight, kg ]
')
vs %>% decorations
dm %>% head
vs %>% head
vs %>% decorations
dm %<>% mutate(USUBJID = SUBJID %>% resolve %>% as.character)
dm %<>% safe_join(vs) %>% select(-USUBJID)
dm %>% decorations(-SUBJID)
dm %>% head
dm %>% decorations
dm %>% resolve %>% head
ex <- 'ex.xpt.gz' %>% gzfile %>% read_xpt
ex %>% head %>% data.frame
ex %>% constant
ex %<>% select(-(names(constant(ex))))
ex %>% enumerate(EXTRT)
ex %>% enumerate(USUBJID)
ex %>% enumerate(VISIT)
ex %>% enumerate(VISIT, EXDOSE)
ex %>% enumerate(VISITNUM, VISIT)
ex %<>% mutate(VISIT = VISITNUM %>% mimic(VISIT) %>% as_dvec)
ex %>% decorations(VISIT)
ex %<>% select(USUBJID, VISIT, EXDOSE, EXSTDTC)
ex %<>% safe_join(
dm %>%
mutate(USUBJID = SUBJID) %>%
resolve(USUBJID) %>%
mutate(USUBJID = as.character(USUBJID)) %>%
select(SUBJID, USUBJID, RFSTDTC)
)
ex %<>% mutate(
TIME =
(
as.date(as.character(EXSTDTC)) -
as.date(as.character(RFSTDTC))
) %>% as.second %>% as.hour %>% as.numeric
)
head(ex)
ex %<>% select(SUBJID, VISIT, TIME, AMT = EXDOSE)
# convert to micrograms
ex %<>% mutate(AMT = AMT * 1000)
ex %<>% mutate(
DV = as_dvec(0),
MDV = as_dvec(1),
EVID = as_dvec(1),
CMT = as_dvec(1)
)
ex %>% decorations(-SUBJID)
ex %<>% decorate('
TIME: [ Time, h]
AMT: [guide: µg]
DV: [ Xanomeline Plasma Concentration, ng/mL]
EVID: [Event Type, [Dose: 1]]
CMT: [ Compartment, [ Depot: 1, Plasma: 2]]
MDV: [ Missing Dependent Value, [ DV not missing: 0, DV missing: 1]]
')
pc <- ex %>%
full_join(
by = character(0),
data.frame(
TAD = as_dvec(
c(0, 1, 2, 4, 8, 12, 24, 48, 96),
label = 'Time After Dose',
guide = 'h'
)
)
)
pc %>% decorations(-SUBJID)
pc %<>% mutate(AMT = AMT * 0, MDV = MDV * 0, EVID = EVID * 0, CMT = CMT + 1)
pc %<>% mutate(TIME = TIME + TAD)
pc %<>% arrange(SUBJID, TIME)
pc %>% head
pc %<>% redecorate('EVID: [ Event Type, [Observation: 0]]')
pc %>% decorations(-SUBJID)
x <- bind_rows(pc, ex) %>% left_join(dm)
x %>% decorations(-SUBJID)
x %>% head %>% data.frame
x %<>% select(
SUBJID, ACTARM, VISIT, TIME, TAD,
EVID, CMT, AMT, DV, MDV,
AGE, SEX, RACE, HEIGHT, WEIGHT
)
x %>% decorations(VISIT)
mod <- RxODE({
CL = exp(TCl)*(WEIGHT/70)^0.75;
V2 = exp(TV2 + eta.V2) *(WEIGHT/70);
KA = exp(TKA + eta.KA)
kel = CL/V2;
d/dt(depot) = -KA*depot;
d/dt(centr) = KA*depot - kel * centr;
cp = centr/V2 * (1 + cp.err);
})
# mod2 <- RxODE({
# CL = exp(TCl)*(WEIGHT/70)^0.75;
# V2 = exp(TV2) *(WEIGHT/70);
# KA = exp(TKA)
# kel = CL/V2;
# d/dt(depot) = -KA*depot;
# d/dt(centr) = KA*depot - kel * centr;
# cp = centr/V2 * (1 + cp.err);
# })
theta <- c(TKA=log(2.3), TCl=log(187), TV2=log(5380)) # central
omega <- lotri(eta.V2 + eta.KA ~ c(0.0243, 0, 0.5750))
sigma <- lotri(cp.err ~ .01)
set.seed(0)
ipred <- rxSolve(mod, theta, x%>% rename(ID=SUBJID), omega=omega, sigma=sigma)
ipred %>% ggplot(aes(time, cp)) + geom_point()
# set.seed(0)
# pred <- rxSolve(mod2, theta, x%>% rename(ID=SUBJID), omega=omega, sigma=sigma)
# pred %>% ggplot(aes(time, cp)) + geom_point()
x %<>% safe_join(ipred %>% select(SUBJID = id, TIME = time, IPRED = cp))
# x %<>% safe_join(pred %>% select(SUBJID = id, TIME = time, PRED = cp))
x %>% head
x %>% decorations(-SUBJID)
# x %<>% decorate('
# IPRED: [Individual Prediction, ng/mL]
# PRED: [Population Prediction, ng/mL]
# ')
# x %>%
# ggready %>%
# ggplot(aes(PRED, IPRED, color = ACTARM)) +
# geom_point()
#
# x %>%
# filter(ACTARM > 0) %>%
# ggready %>%
# ggplot(aes(TAD, IPRED, color = SUBJID)) +
# geom_line() +
# theme(legend.position = 'none') +
# facet_wrap(~VISIT)
x %>% head
pc <- x %>%
filter(EVID == 0) %>%
select(SUBJID, VISIT, TIME, TAD, DV = IPRED)
pc$DV %<>% as_dvec(label = 'Xanomeline Plasma Concentration', guide = 'ng/mL')
pc$DV %<>% signif(4)
pc %>% io_csv('pc.csv')
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/extdata/pc.R |
library(nlme)
library(csv)
data(Phenobarb)
x <- Phenobarb
x$event <- with(x, ifelse(is.na(dose), 'conc', 'dose'))
x$value <- with(x, ifelse(is.na(dose), conc, dose))
x$dose <- NULL
x$conc <- NULL
as.csv(x, 'phenobarb.csv')
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/extdata/phenobarb.R |
library(nlme)
# Pinheiro and Bates, 2000
# Mixed-Effects Models in S and S-Plus
fm1Quin.nlme <-
nlme(
conc ~ quinModel(
Subject, time, conc, dose, interval,
lV, lKa, lCl
),
data = Quinidine,
fixed = lV + lKa + lCl ~ 1,
random = pdDiag(lV + lCl ~ 1),
groups = ~ Subject,
start = list(fixed = c(5, -0.3, 2)),
na.action = na.pass,
naPattern = ~ !is.na(conc)
)
ranef <- ranef(fm1Quin.nlme)
ranef$Subject <- rownames(ranef)
dat <- Quinidine
dat$resid[!is.na(dat$conc)] <- resid(fm1Quin.nlme)
dat$Subject <- as.numeric(as.character(dat$Subject))
dat$Subject <- factor(dat$Subject, levels = unique(dat$Subject))
#dat %>% group_by(Subject, time) %>% status
ranef$Subject <- factor(ranef$Subject, levels = levels(dat$Subject))
dat <- merge(dat, ranef)
dat <- dat[order(dat$Subject, dat$time),]
dat <- dat[,c(1:14,16,17,15)]
dat %>% group_by(Subject) %>% slice(1) %>% metaplot(lCl, glyco, ysmooth = T)
# https://stackoverflow.com/questions/53937854/trouble-with-convergence-in-non-linear-mixed-effects-model-from-pinheiro-and-bat
library(nlme)
fm1Quin.nlme <- nlme(conc ~ quinModel(Subject, time, conc, dose, interval, lV, lKa, lCl),
data = Quinidine,
fixed = lV + lKa + lCl ~ 1,
random = pdDiag(lV + lCl ~ 1),
groups = ~ Subject,
start = list(fixed = c(5, -0.3, 2)),
na.action = na.pass, # R does not have the function na.include
naPattern = ~ !is.na(conc))
fm1Quin.fix <- fixef(fm1Quin.nlme)
fm2Quin.nlme <- update(fm1Quin.nlme,
fixed = list(lCl ~ glyco, lKa + lV ~ 1),
start = c(fm1Quin.fix[3], 0, fm1Quin.fix[2:1]))
fm2Quin.fix <- fixef(fm2Quin.nlme)
fm3Quin.nlme <- update(fm2Quin.nlme,
fixed = list(lCl ~ glyco + Creatinine, lKa + lV ~ 1),
start = c(3.0291, -0.3631, 0.1503, -0.7458, 5.2893),
control = nlmeControl(pnlsTol = 0.0011))
data(Quinidine)
library(csv)
as.csv(Quinidine, 'quinidine.csv')
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/extdata/quinidine.R |
#' ---
#' title: Assemble Xanomeline Dataset
#' output: html_document
#' theme: united
#' highlight: tango
#' ---
# dm.xpt, vs.xpt, ex.xpt downloaded from https://github.com/phuse-org/TestDataFactory/tree/main/Updated/TDF_SDTM on 2022-06-23.
library(haven)
library(magrittr)
library(dplyr)
library(tidyr)
library(wrangle)
library(ggplot2)
library(nlmixr)
library(yamlet)
library(datetime)
dm <- 'dm.xpt.gz' %>% gzfile %>% read_xpt
dm %>% data.frame %>% head(3)
dm %>% constant
dm %<>% select(-(names(constant(dm))))
dm %>% decorations
dm %<>% select(USUBJID, SUBJID, RFSTDTC, AGE, SEX, RACE, ACTARM, ACTARMCD)
dm %$% SUBJID %>% as.integer %>% is.na %>% any
dm %>% decorations(SUBJID)
dm %<>% mutate(SUBJID = SUBJID %>% as.integer) %>% decorate(dm)
dm %>% decorations(SUBJID)
dm %<>% mutate(SUBJID = SUBJID %>% mimic(USUBJID)) %>% select(-USUBJID)
dm %>% itemize(ACTARMCD, ACTARM)
dm %<>% mutate(
ACTARM = ACTARM %>%
classified(
levels = c(
'Screen Failure',
'Placebo',
'Xanomeline Low Dose',
'Xanomeline High Dose'
)
) %>%
as.integer(-2)
) %>% select(-ACTARMCD)
dm %>% head
dm %>% implicit_guide %>% resolve %>% head
dm %>% resolve %>% head
dm %>% modify(SUBJID, guide = NULL) %>% decorations
dm %>% enumerate(SEX)
dm %<>% mutate(SEX = SEX %>% classified(labels = c('Female','Male')) %>% as.integer(-1)) %>% decorate(dm)
dm %>% decorations(SEX)
dm %>% enumerate(RACE)
dm %<>% mutate(
RACE = RACE %>% classified(
levels = c(
'WHITE',
'BLACK OR AFRICAN AMERICAN',
'ASIAN',
'AMERICAN INDIAN OR ALASKA NATIVE'
)
) %>% as.integer(-1)
) %>% decorate(dm)
dm %>% select(-SUBJID) %>% decorations
dm %<>% decorate('
RFSTDTC: [ guide: "%Y-%m-%d" ]
AGE: [ guide: year ]
')
dm %>% head
vs <- 'vs.xpt.gz' %>% gzfile %>% read_xpt
vs %>% data.frame %>% head
vs %>% constant
vs %<>% select(-(names(constant(vs))))
vs %>% enumerate(VSTESTCD, VSTEST, VSSTRESU, VSSTAT)
vs %<>% filter(VSSTAT == '') %>% select(-VSSTAT)
vs %>% enumerate(VSTESTCD, VSTEST, VSSTRESU, VSPOS)
vs %<>% filter(VSTESTCD %in% c('HEIGHT', 'WEIGHT'))
vs %>% constant
vs %<>% select(-(names(constant(vs))))
length(unique(vs$USUBJID))
length(unique(vs$USUBJID[vs$VSBLFL == 'Y']))
vs %>% group_by(USUBJID, VSTESTCD, VISITNUM) %>% status
vs %>% filter(VSTESTCD == 'HEIGHT') %>% nrow
vs %>%
filter(VSTESTCD == 'WEIGHT') %>%
group_by(USUBJID) %>%
filter(!any(VSBLFL == 'Y')) %>% data.frame
vs$VSBLFL[with(vs, USUBJID == '01-702-1082' & VSTESTCD == 'WEIGHT' & VISITNUM == 1)] <- 'Y'
vs %<>% filter(VSTESTCD %in% c('HEIGHT', 'WEIGHT'))
vs %>% enumerate(VSTESTCD, VSBLFL)
vs %<>% filter(VSTESTCD == 'HEIGHT' | VSBLFL == 'Y')
vs %>% enumerate(VSTESTCD)
vs %>% data.frame %>% head
vs %<>% select(USUBJID, VSTESTCD, VSSTRESN)
vs %<>% pivot_wider(names_from=VSTESTCD, values_from = VSSTRESN)
vs %<>% redecorate('
HEIGHT: [ Baseline Height, cm ]
WEIGHT: [ Baseline Weight, kg ]
')
vs %>% decorations
dm %>% head
vs %>% head
vs %>% decorations
dm %<>% mutate(USUBJID = SUBJID %>% resolve %>% as.character)
dm %<>% safe_join(vs) %>% select(-USUBJID)
dm %>% decorations(-SUBJID)
dm %>% head
dm %>% decorations
dm %>% resolve %>% head
ex <- 'ex.xpt.gz' %>% gzfile %>% read_xpt
ex %>% head %>% data.frame
ex %>% constant
ex %<>% select(-(names(constant(ex))))
ex %>% enumerate(EXTRT)
ex %>% enumerate(USUBJID)
ex %>% enumerate(VISIT)
ex %>% enumerate(VISIT, EXDOSE)
ex %>% enumerate(VISITNUM, VISIT)
ex %<>% mutate(VISIT = VISITNUM %>% mimic(VISIT) %>% as_dvec)
ex %>% decorations(VISIT)
ex %<>% select(USUBJID, VISIT, EXDOSE, EXSTDTC)
ex %<>% safe_join(
dm %>%
mutate(USUBJID = SUBJID) %>%
resolve(USUBJID) %>%
mutate(USUBJID = as.character(USUBJID)) %>%
select(SUBJID, USUBJID, RFSTDTC)
)
ex %<>% mutate(
TIME =
(
as.date(as.character(EXSTDTC)) -
as.date(as.character(RFSTDTC))
) %>% as.second %>% as.hour %>% as.numeric
)
head(ex)
ex %<>% select(SUBJID, VISIT, TIME, AMT = EXDOSE)
# convert to micrograms
ex %<>% mutate(AMT = AMT * 1000)
ex %<>% mutate(
DV = as_dvec(0),
MDV = as_dvec(1),
EVID = as_dvec(1),
CMT = as_dvec(1)
)
ex %>% decorations(-SUBJID)
ex %<>% decorate('
TIME: [ Time, h]
AMT: [guide: µg]
DV: [ Xanomeline Plasma Concentration, ng/mL]
EVID: [Event Type, [Dose: 1]]
CMT: [ Compartment, [ Depot: 1, Plasma: 2]]
MDV: [ Missing Dependent Value, [ DV not missing: 0, DV missing: 1]]
')
# pc simulated in pc.R
pc <- 'pc.csv' %>% io_csv
pc %>% decorations(-SUBJID)
pc %<>% mutate(AMT = 0, MDV = 0, EVID = 0, CMT = 2)
pc %<>% redecorate(ex)
pc %>% head
pc %<>% redecorate('EVID: [ Event Type, [Observation: 0]]')
is.integer(pc$VISIT)
is.double(ex$VISIT)
x <- bind_rows(
pc,
ex
) %>% left_join(dm)
is.double(x$VISIT)
x %>% decorations(-SUBJID)
x %>% head %>% data.frame
x %<>% select(
SUBJID, ACTARM, VISIT, TIME, TAD,
EVID, CMT, AMT, DV, MDV,
AGE, SEX, RACE, HEIGHT, WEIGHT
)
x$MDV[x$DV <= 0] <- 1
x %<>% arrange(SUBJID, TIME, EVID)
x %>% group_by(SUBJID, TIME, EVID) %>% status
x %>% io_csv('xanomeline.csv', gz = TRUE)
x %>% head
x %>% decorations(-SUBJID)
sessionInfo()
| /scratch/gouwar.j/cran-all/cranData/yamlet/inst/extdata/xanomeline.R |
---
title: "Scripted HTML"
author: "Tim Bergsma"
date: "`r Sys.Date()`"
output:
html_document:
keep_md: true
toc: FALSE
vignette: >
%\VignetteIndexEntry{Scripted HTML}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
The point of this exercise is to demonstrate flexible rendering of
subscripts and superscripts. We want to write expressions for
column labels and units that are fairly readable as they are, and
yet can be easily rendered with equivalent results in
plotmath, html, or pdf.
First we load some packages.
```{r include = FALSE}
knitr::opts_chunk$set(dpi = 600, out.width = '50%')
```
```{r, message = FALSE, warning = FALSE}
library(magrittr)
library(ggplot2)
library(tablet)
library(yamlet)
library(dplyr)
library(kableExtra)
```
We create some example data.
```{r}
x <- data.frame(
time = 1:10,
work = (1:10)^1.5,
group = 1:2,
set = c(rep('delta',5), rep('gamma', 5))
)
x %<>% decorate('
time: [ Time_cum.^alpha, h ]
work: [ Work_total_obs\\n, kg*m^2/s^2 ]
group: [ Group, [ Second\\nGroup^\\*: 2, First\\nGroup^#: 1 ]]
set: [ Set, [ gamma, delta ]]
')
x %>% decorations
```
The label for column ```work``` has nested subscripts suggesting
$\sf{Work_{total_{obs}}}$. The label for column ```time``` suggests
$\sf{Time_{cum}{}^{\alpha}}$. The dot closes the subscript to distinguish
this from $\sf{Time_{cum^{\alpha}}}$. Backslash-n requests a line break.
How does this look when we plot it?
```{r, fig.width = 4.43, fig.height = 2.77}
x %>%
resolve %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
```
By default, we get verbatim labels and units as substitutes for column names.
Next, we use ```scripted()``` instead of ```resolve()``` to indicate
that the labels should be understood as
potentially having subscripts and superscripts.
For this to work well, units should be constructed
using *, /, and ^ (even though the "units"
package supports other encodings).
```{r, , fig.width = 4.33, fig.height = 2.82}
x %>%
scripted %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
```
In the background, ```scripted()``` is writing __expression__ and __plotmath__ attributes
(consumed by ```ggplot()``` ) and __title__ attributes (consumed by ```tablet()``` ).
We illustrate the latter.
```{r}
x %>%
scripted %>%
group_by(group, set) %>%
tablet %>%
as_kable
```
In summary, we have decorated our data with labels and
units containing markup for subscripts and superscripts.
If everything goes well, these render similarly
in figures and tables. They also render similarly in
html and pdf. Please see the pdf version of this document.
| /scratch/gouwar.j/cran-all/cranData/yamlet/vignettes/scripted-html.Rmd |
---
title: "Scripted PDF"
author: "Tim Bergsma"
date: "`r Sys.Date()`"
output:
pdf_document:
keep_tex: true
toc: FALSE
extra_dependencies:
booktabs,
longtable,
array,
multirow,
wrapfig,
float,
colortbl,
pdflscape,
tabu,
threeparttable,
threeparttablex,
makecell,
xcolor,
upgreek
vignette: >
%\VignetteIndexEntry{Scripted PDF}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
The point of this exercise is to demonstrate flexible rendering of
subscripts and superscripts. We want to write expressions for
column labels and units that are fairly readable as they are, and
yet can be easily rendered with equivalent results in
plotmath, html, or pdf.
First we load some packages.
```{r include = FALSE}
knitr::opts_chunk$set(dpi = 600, out.width = '50%')
```
```{r, message = FALSE, warning = FALSE}
library(magrittr)
library(ggplot2)
library(tablet)
library(yamlet)
library(dplyr)
library(kableExtra)
```
We create some example data.
```{r}
x <- data.frame(
time = 1:10,
work = (1:10)^1.5,
group = 1:2,
set = c(rep('delta',5), rep('gamma', 5))
)
x %<>% decorate('
time: [ Time_cum.^alpha, h ]
work: [ Work_total_obs\\n, kg*m^2/s^2 ]
group: [ Group, [ Second\\nGroup^\\*: 2, First\\nGroup^#: 1 ]]
set: [ Set, [ gamma, delta ]]
')
x %>% decorations
```
The label for column ```work``` has nested subscripts suggesting
$\sf{Work_{total_{obs}}}$. The label for column ```time``` suggests
$\sf{Time_{cum}{}^{\alpha}}$. The dot closes the subscript to distinguish
this from $\sf{Time_{cum^{\alpha}}}$. Backslash-n requests a line break.
How does this look when we plot it?
```{r, fig.width = 4.43, fig.height = 2.77}
x %>%
resolve %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
```
By default, we get verbatim labels and units as substitutes for column names.
Next, we use ```scripted()``` instead of ```resolve()``` to indicate
that the labels should be understood as
potentially having subscripts and superscripts.
For this to work well, units should be constructed
using *, /, and ^ (even though the "units"
package supports other encodings).
```{r, , fig.width = 4.33, fig.height = 2.82}
x %>%
scripted %>%
ggplot(aes(time, work, color = group, shape = set)) +
geom_point()
```
In the background, ```scripted()``` is writing __expression__ and __plotmath__ attributes
(consumed by ```ggplot()``` ) and __title__ attributes (consumed by ```tablet()``` ).
We illustrate the latter.
```{r}
x %>%
scripted %>%
group_by(group, set) %>%
tablet %>%
as_kable
```
In summary, we have decorated our data with labels and
units containing markup for subscripts and superscripts.
If everything goes well, these render similarly
in figures and tables. They also render similarly in
html and pdf. Please see the html version of this document.
| /scratch/gouwar.j/cran-all/cranData/yamlet/vignettes/scripted-pdf.Rmd |
---
title: "An Introduction to Yamlet"
author: "Tim Bergsma"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{An Introduction to Yamlet}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(package.startup.message = FALSE)
```
## Motivation
R datasets of modest size are routinely stored as
flat files and retrieved as data frames. Unfortunately,
the classic storage formats (comma delimited, tab delimited)
do not have obvious mechanisms for storing data *about*
the data: i.e., metadata such as column labels, units,
and meanings of categorical codes. In many cases
we hold such information in our heads and hard-code
it in our scripts as axis labels, figure legends, or
table enhancements. That's probably fine for simple
cases but does not scale well in production settings
where the same metadata is re-used extensively. Is
there a better way to store, retrieve, and bind
table metadata for consistent reuse?
## Writing Yamlet
**yamlet** is a storage format for table metadata,
implemented as an R package.
It was designed to be:
- easy to edit
- easy to import
- open-ended
Although intended mainly to document (or pre-specify!) data column
labels and units, there are few restrictions
on the types of metadata that can be stored.
In fact, the only real restriction is that
the stored form must be valid [yaml](https://yaml.org/spec/1.2/spec.html).
Below, we use **yamlet** to indicate the
paradigm or package, and `yamlet` to indicate
stored instances.
### Manual Method
Actually, `yamlet` (think: "just a little yaml")
is a special case of `yaml`
that stores column attributes in one record
per column. For instance, to store the fact
that data for an imaginary drug trial
has a column called 'ID',
pop open a text file and write
```
ID:
```
This in itself is valid `yaml`!
But if you know a label to go with ID, you can add it:
```
ID: subject identifier
```
If you have (or expect) a second column with units,
you can add it below.
```
ID: subject identifier
CONC: concentration, ng/mL
```
A couple of notes here.
- The first thing after a colon must be a space.
- Whatever follows the colon-space is only One Thing.
- That One Thing could be a *sequence* of Many Things.
To get a sequence, just add square brackets. For
instance, above we have said that 'CONC' has the label
'concentration, ng/mL' but what we really intend
is that it has label 'concentration' and
unit 'ng/mL' so we rewrite it as
```
ID: subject identifier
CONC: [ concentration, ng/mL ]
```
Now label and units are two different things.
Notice we have not explicitly named them.
Unless we say otherwise, the **yamlet** package
will treat the first two un-named items
as 'label' (a short description) and 'guide' (a hint
about how to interpret the values). 'guide'
might be units for continuous variables,
levels (and possibly labels) for categorical
values, format strings for dates and times,
or perhaps something else.
The **yamlet** package
gives you five ways of controlling how
data items are identified (see details for `?as_yamlet.character`).
The most direct way is to supply explicit `yaml` keys:
```
ID: [ label: subject identifier ]
CONC: [ label: concentration, guide: ng/mL ]
```
We see that rather complex data can be expressed
using only colons, commas, and square brackets.
`yaml` itself also uses curly braces to express
"maps", but for purposes here they are unnecessary.
Note above that we had to add square brackets
for 'ID' when introducing the second colon (can't
really have two colons at the same level, so to
speak). Note also that sequences can be nested
arbitrarily deep. We take advantage of this principle
to transform 'guide' into a set of categorical levels.
```
ID: [ label: subject identifier ]
CONC: [ label: concentration, guide: ng/mL ]
RACE: [ label: race, guide: [ 0, 1, 2 ]]
```
or more simply (taking advantage of default keys)
```
ID: subject identifier
CONC: [ concentration, ng/mL ]
RACE: [ race, [ 0, 1, 2 ]]
```
So now we have 'codes' (levels) for our dataset
that represent races. What do these
codes mean? We supply 'decodes' (labels) as keys.
```
ID: subject identifier
CONC: [ concentration, ng/mL ]
RACE: [ race, [ white: 0, black: 1, asian: 2 ]]
```
Elegantly, `yaml` (and therefore **yamlet**) gives
us a way to represent a code even if we don't
know the decode, *and* a way to represent a
decode even though we don't know the code.
Imagine a dataset is under collaborative
development, and we already know that there
are some 'RACE' values of 0 but we're not sure what
they mean. We also know that there will be
some 'asian' race values, but we haven't
assigned a code yet. We can write:
```
ID: subject identifier
CONC: [ concentration, ng/mL ]
RACE: [ race, [ 0, black: 1, ? asian ]]
```
### Automatic Method
The whole point of this exercise (and I'm getting
a little ahead of myself) is to have some
stored metadata that we can read into R
and apply to a data frame as column attributes.
If typing square brackets isn't your thing,
you can actually do this backwards by
supplying column attributes to a data frame
and writing them out!
```{r, package.startup.message = FALSE}
suppressMessages(library(dplyr))
library(magrittr)
library(yamlet)
x <- data.frame(
ID = 1,
CONC = 1,
RACE = 1
)
x$ID %<>% structure(label = 'subject identifier')
x$CONC %<>% structure(label = 'concentration', guide = 'ng/mL')
x$RACE %<>% structure(label = 'race', guide = list(white = 0, black = 1, asian = 2))
x %>% as_yamlet %>% as.character %>% writeLines
# or
x %>% as_yamlet %>% as.character %>% writeLines(file.path(tempdir(), 'drug.yaml'))
```
## Reading and Binding Yamlet in R
Let's take advantage of that last example to show how we
can read **yamlet** into R.
```{r}
meta <- read_yamlet(file.path(tempdir(), 'drug.yaml'))
meta
```
`meta` is just a named list of column attributes.
`decorate()` loads them onto columns of a data frame.
```{r}
x <- data.frame(ID = 1, CONC = 1, RACE = 1)
x <- decorate(x, meta = meta)
decorations(x)
```
If you like, you can skip the external file and
decorate directly with `yamlet` (instead of, say, structure() like
we did above).
```{r}
x <- data.frame(ID = 1, CONC = 1, RACE = 1)
x <- decorate(x,'
ID: subject identifier
CONC: [ concentration, ng/mL ]
RACE: [ race, [white: 0, black: 1, asian: 2 ]]
')
decorations(x)
```
## Extracting and Writing Yamlet to Storage
We saw earlier that `as_yamlet()` can
pull "decorations" off a data frame and
present them as **yamlet**.
this is the default behavior of `decorations()`.
```{r}
decorations(x)
```
`write_yamlet()` calls `as_yamlet()` on its
primary argument, and sends the result to a
connection of our choice.
```{r}
file <- file.path(tempdir(), 'out.yaml')
write_yamlet(x, con = file )
file %>% readLines %>% writeLines
```
## Coordinated Input and Output
A useful convention is to store metadata in a file
next to the file it describes, with the same name
but the 'yaml' extension. `decorate()` expects
this, and if given a file path to a CSV
file, it will look for a '*.yaml' file nearby.
To "decorate" a CSV path means to read it,
read its `yamlet` (if any) and apply the `yamlet`
as attributes on the resulting data frame.
```{r}
library(csv)
# see ?Quinidine in package nlme
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
a <- decorate(file)
as_yamlet(a)[1:3]
```
Another way to achieve the same thing is with `io_csv()`.
It is a toggle function that returns a path if
given a file to store, and returns a decorated data frame
if given a path to read (same for `io_table()`, which
has all the formatting options of `read.table()` and `write.table()`).
The path is just the path
to the primary data, but the path to the metadata is implied
as well.
```{r}
options(csv_source = FALSE) # see ?as.csv
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
x <- decorate(file)
out <- file.path(tempdir(), 'out.csv')
io_csv(x, out)
y <- io_csv(out)
identical(x, y) # lossless 'round-trip'
file.exists(out)
meta <- sub('csv','yaml', out)
file.exists(meta)
meta %>% readLines %>% head %>% writeLines
options(csv_source = TRUE) # restore
```
## Using Yamlet Metadata
Metadata can be used prospectively or retrospectively.
Early in the data life cycle, it can be used prospectively
to guide table development in a collaborative setting
(i.e. as a data specification). Later in the life cycle,
metadata can be used retrospectively to consistently
inform report elements such as figures and tables.
### Example Figure
For example,
The **yamlet** package provides an experimental wrapper
for ggplot that uses column attributes to automatically
generate informative axis labels and legends.
```{r, fig.width = 5.46, fig.height = 3.52, fig.cap = 'Automatic axis labels and legends using curated metadata as column attributes.'}
suppressWarnings(library(ggplot2))
library(dplyr)
library(magrittr)
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
file %>%
decorate %>%
filter(!is.na(conc)) %>%
resolve %>%
ggplot(aes(x = time, y = conc, color = Heart)) +
geom_point()
```
### Example Table
The **table1** package uses labels and units stored
as attributes to enrich table output. In the example
below, we use `resolve()` to re-implement guides
as units and factor levels, which is what `table1()` needs.
```{r}
suppressMessages(library(table1))
file %>%
decorate %>%
resolve %>%
group_by(Subject) %>%
slice(1) %>%
table1(~ Age + Weight + Race | Heart, .)
```
## Caveat
It is a well-known problem that many table
manipulations in R cause column attributes
to be dropped. Binding of metadata is best
done at a point in a workflow where few or
no such manipulations remain. Else, precautions
should be taken to preserve or restore
attributes as necessary.
## Reminder
Remember to quote a literal
value of yes, no, y, n, true false, on, off,
or any of these capitalized, or any of these
as all-caps. Otherwise they will be converted
to TRUE or FALSE per the usual rules for yaml.
## Conclusion
The **yamlet** package implements a metadata
storage syntax that is easy to write, read,
and bind to data frame columns. Systematic
curation of metadata enriches and
simplifies efforts to create and describe
tables stored in flat files. Conforming
tools can take advantage of internal and
external **yamlet** representations to
enhance data development and reporting.
| /scratch/gouwar.j/cran-all/cranData/yamlet/vignettes/yamlet-introduction.Rmd |
#' @name coerce-methods
#' @rdname coerce-methods
#'
#' @description
#' Coercion of lists into [rmd_doc-class] objects and vice versa.
#'
#' @param object Either a list or a [rmd_doc-class] object.
#'
#' @example examples/as.R
#'
#' @export
list2rmd_doc <- function(object) {
x <- list()
if (!all(names(object) == "body")) {
x$header <- object[names(object) != "body"]
}
if ("body" %in% names(object)) {
x$body <- object$body
}
class(x) <- c("rmd_doc", "list")
return(x)
}
#' @rdname coerce-methods
#' @export
rmd_doc2list <- function(object) {
x <- list()
if (any(!names(object) %in% c("body"))) {
x$header <- object[names(object) != "body"]
}
if ("body" %in% names(object)) {
x$body <- object$body
}
class(x) <- c("rmd_doc", "list")
return(x)
}
#' @name coerce-methods
#' @title Coercing lists and rmd_doc objects
#'
#' @description
#' Objects of class [rmd_doc-class] can be created from lists or converted back
#' to lists. This is convenient for defining functions that manipulate the
#' content of such objects.
#'
#' @rdname coerce-methods
#' @aliases coerce,list,rmd_doc-method
setAs("list", "rmd_doc", function(from) list2rmd_doc(object = from))
#' @name coerce-methods
#' @rdname coerce-methods
#' @aliases coerce,list,rmd_doc-method
setAs("rmd_doc", "list", function(from) rmd_doc2list(object = from))
| /scratch/gouwar.j/cran-all/cranData/yamlme/R/as.R |
#' @name rmd_doc-class
#'
#' @title R-markdown document
#'
#' @description
#' An S3 class for rmarkdown documents iheriting properties from lists.
#' Header settings are a list at `object$header`, while content in markdown
#' is a character vector at `object$body`.
#'
#' @exportClass rmd_doc
setOldClass(c("rmd_doc", "list"))
| /scratch/gouwar.j/cran-all/cranData/yamlme/R/classes.R |
#' @importFrom methods setOldClass
#' @importFrom rmarkdown render
#' @importFrom tools file_path_sans_ext
#' @importFrom yaml as.yaml yaml.load
NULL
| /scratch/gouwar.j/cran-all/cranData/yamlme/R/imports.R |
#' @name print2text
#' @title Print content of rmd_doc into a text string
#'
#' @description
#' Content of [rmd_doc-class] objects will be converted into a string that will
#' be passed either to [print()] or to [write_rmd()].
#'
#' @param x Input [rmd_doc-class] object.
#'
#' @keywords internal
print2text <- function(x) {
if ("header" %in% names(x)) {
x$header <- paste0(c("---\n", as.yaml(x$header), "---\n\n"),
collapse = ""
)
} else {
x$header <- paste0(c(
"---\n", "# No header in this document!\n", "---\n\n"
),
collapse = ""
)
}
if ("body" %in% names(x)) {
x <- x[c("header", "body")]
x$body <- paste0(x$body, collapse = "")
}
return(do.call(paste0, list(x, collapse = "")))
}
#' @name print
#'
#' @title Print Method for rmd_doc
#'
#' @description
#' Quick display for `rmd_doc` objects. This method also defines the way how
#' objects are displayed in the console.
#'
#' @param x An object of class `rmd_doc`.
#' @param maxlines An integer value indicating the number of lines used for the
#' display. Longer documents will be truncated.
#' @param ... Further arguments passed among methods (not yet in use).
#'
#' @return
#' A display of the resulting R-Markdown document in the console.
#'
#' @example examples/print.R
#'
#' @method print rmd_doc
#' @aliases print,rmd_doc-method
#' @export
print.rmd_doc <- function(x, maxlines = 10, ...) {
if ("body" %in% names(x) & length(x$body) > maxlines) {
x$body <- c(x$body[1:maxlines], "\n[...truncated]\n")
}
x <- print2text(x)
cat(x)
}
| /scratch/gouwar.j/cran-all/cranData/yamlme/R/print.R |
#' @name read_rmd
#' @rdname read_rmd
#'
#' @title Read R-markdown Documents
#'
#' @description
#' Import Rmd files into objects of class [rmd_doc-class].
#'
#' The function `txt_body()` add a line break at the end of each element of a
#' character vector considering them as single lines.
#'
#' Note that comments will be deleted in the input file.
#'
#' @param file Character value indicating the path and the name to the Rmd file.
#' @param ... Arguments passed by `read_rmd()` to [readLines()].
#' In `txt_body()` they are character values passed to `c()`.
#' @param skip_head Logical value indicating whether the yaml head should be
#' skip or not (this argument is not used at the moment).
#'
#' @return
#' The function `read_rmd()` returns a [rmd_doc-class] object.
#' The function `txt_body()`, a character vector suitable for the parameter
#' `body` in the function [write_rmd()].
#'
#' @examples
#' \dontrun{
#' ## Read pre-installed example
#' ex_document <- read_rmd(file.path(
#' path.package("yamlme"),
#' "taxlistjourney.Rmd"
#' ))
#' }
#' @export
read_rmd <- function(file, ..., skip_head = FALSE) {
file <- txt_body(readLines(file, ...))
if (substr(file[1], 1, 3) != "---") {
message("Rmd file seems to be headless")
yaml_head <- list(body = file)
} else {
idx <- cumsum(grepl("---", file, fixed = TRUE))
yaml_head <- list(header = yaml.load(paste0(file[idx == 1], collapse = "")))
yaml_head$body <- file[idx > 1][-1]
}
if (skip_head) {
yaml_head$header <- NULL
}
class(yaml_head) <- c("rmd_doc", "list")
return(yaml_head)
}
#' @aliases txt_body
#' @rdname read_rmd
#' @export txt_body
txt_body <- function(...) {
return(paste0(c(...), "\n"))
}
| /scratch/gouwar.j/cran-all/cranData/yamlme/R/read_rmd.R |
#' @name render_rmd
#'
#' @title Render documents from object
#'
#' @description
#' This function is a wrapper of [rmarkdown::render()] and will also work with
#' file names but also enables the possibility of rendering from objects created
#' by [write_rmd()].
#'
#' @param input Either a character value indicating the path and the name of the
#' r-markdown file, or an object of class `rmd_doc`, written by
#' [write_rmd()].
#' @param output_file A character value indicating the name of the output file.
#' This argument is passed to [rmarkdown::render()]. Note that the argument
#' only contains the name of the file without extension and can only be
#' written at the working directory.
#' @param delete_rmd A logical value idicating whether the temporary Rmd file
#' should be deleted or not. If not, the file gets the same name as the
#' rendered file.
#' @param ... Further parameters passed to [rmarkdown::render()].
#'
#' @examples
#' \dontrun{
#' ## copy example to your working directory
#' filename <- "taxlistjourney.Rmd"
#' file.copy(from = file.path(path.package("yamlme"), filename), to = filename)
#'
#' ## Render the file with rmarkdown::render()
#' render_rmd(filename, output_file = "example")
#' browseURL("example.html")
#'
#' ## Render the file with yamlme
#' text_document <- read_rmd(filename)
#'
#' text_document <- update(text_document,
#' title = "my title", author = "my name",
#' output = "html_document"
#' )
#'
#' render_rmd(text_document, output_file = "example2")
#' browseURL("example2.html")
#' }
#'
#' @rdname render_rmd
#'
#' @export
render_rmd <- function(input, ...) {
UseMethod("render_rmd", input)
}
#' @rdname render_rmd
#' @aliases render_rmd,character-method
#' @method render_rmd character
#' @export
render_rmd.character <- function(input, ...) {
render(input, ...)
}
#' @rdname render_rmd
#' @aliases render_rmd,rmd_doc-method
#' @method render_rmd rmd_doc
#' @export
render_rmd.rmd_doc <- function(input, output_file, delete_rmd = TRUE, ...) {
if (!"header" %in% names(input)) {
stop("Input 'rmd_doc' object without header are not allowed.")
}
if (missing(output_file)) {
output_file <- paste0(deparse(substitute(input)))
}
output_file_temp <- file.path(tempdir(), basename(output_file))
rmd_file <- file.path(
tempdir(),
paste0(file_path_sans_ext(basename(output_file)), ".Rmd")
)
write_rmd(object = input, filename = rmd_file)
render(rmd_file, output_file = output_file_temp, ...)
files_tmp <- list.files(tempdir())
files_tmp <- files_tmp[grepl(file_path_sans_ext(basename(output_file)),
files_tmp,
fixed = TRUE
)]
if (delete_rmd) {
files_tmp <- files_tmp[!grepl(".Rmd", files_tmp, fixed = TRUE)]
}
file.copy(from = file.path(tempdir(), files_tmp), to = dirname(output_file))
}
| /scratch/gouwar.j/cran-all/cranData/yamlme/R/render_rmd.R |
#' @name update
#'
#' @title Update an rmd_doc
#'
#' @description
#' Alternative to modify settings and content in `rmd_doc` objects. Note that to
#' skip some elements of the YAML header, you can set the argument NULL to the
#' respective parameter.
#'
#' @param object An object of class `rmd_doc`.
#' @param ... Named arguments to be inserted in the YAML header (passed to
#' [write_rmd()]).
#'
#' @example examples/update.R
#'
#' @method update rmd_doc
#' @export
update.rmd_doc <- function(object, ...) {
new_values <- list(...)
for (i in names(new_values)[!names(new_values) %in% c("body")]) {
object$header[[i]] <- new_values[[i]]
}
if ("body" %in% names(new_values)) {
object$body <- new_values$body
}
return(object)
}
| /scratch/gouwar.j/cran-all/cranData/yamlme/R/update.R |
#' @name write_rmd
#' @rdname write_rmd
#'
#' @title Writing R-Markdown Documents
#'
#' @description
#' This function generates R-Markdown documents by including
#' the settings as arguments of the function.
#' Comments and pieces of header can be also added through the argument
#' `append`.
#'
#' @param object [rmd_doc-class] object used to write an Rmarkdown file. If
#' header is missing, `write_rmd()` will fail with an error message.
#' @param filename A character value with the name of the file to be written.
#' If not included, the extension *.Rmd will be appended to this name.
#' If missing, no file will be written by this function.
#' @param ... Further arguments passed among methods (not yet used).
#'
#' @return
#' A character vector of class `rmd_doc` and, if argument set for parameter
#' `filename`, an Rmd file.
#'
#' @examples
#' \dontrun{
#' my_document <- list(
#' title = "Sample Document",
#' author = "Miguel Alavarez",
#' output = "html_document",
#' body = txt_body(
#' "# Intro",
#' "",
#' "This is just an example."
#' )
#' )
#' my_document <- as(my_document, "rmd_doc")
#' write_rmd(my_document, filename = file.path(tempdir(), "example"))
#' }
#'
#' @export
write_rmd <- function(object, ...) {
UseMethod("write_rmd", object)
}
#' @rdname write_rmd
#' @aliases write_rmd,rmd_doc-method
#' @method write_rmd rmd_doc
#' @export
write_rmd.rmd_doc <- function(object, filename, ...) {
filename <- paste0(file_path_sans_ext(filename), ".Rmd")
con <- file(filename, "wb")
writeBin(
charToRaw(print2text(object)),
con
)
close(con)
}
| /scratch/gouwar.j/cran-all/cranData/yamlme/R/write_rmd.R |
## ----eval = FALSE-------------------------------------------------------------
# library(devtools)
# install_github("kamapu/yamlme", build_vignettes = TRUE)
## -----------------------------------------------------------------------------
library(yamlme)
## -----------------------------------------------------------------------------
my_document <- list(title = "My first document")
as(my_document, "rmd_doc")
## -----------------------------------------------------------------------------
my_document <- list(description = paste0(c(
"This text starts with a vertical line",
"and will be thus used as a description",
"in the head."), collapse = "\n"))
as(my_document, "rmd_doc")
## -----------------------------------------------------------------------------
my_document <- list("header-includes" = c(
"\\usepackage{titling}",
"\\pretitle{\\begin{flushleft}\\LARGE\\textbf}",
"\\posttitle{\\end{flushleft}}",
"\\sffamily"))
as(my_document, "rmd_doc")
## -----------------------------------------------------------------------------
my_document <- list(output = list(pdf_document = "default"))
as(my_document, "rmd_doc")
## -----------------------------------------------------------------------------
my_document <- list(
author = list(
list(
name = "Miguel Alvarez",
url = "https://kamapu.github.io/"),
list(
name = "Bisrat H. Gebrekhidan")))
as(my_document, "rmd_doc")
## -----------------------------------------------------------------------------
my_document <- list(
title = "Mi First Document",
author = "My Name",
output = "html_document",
body = txt_body(
"# Starting a working day",
"",
"At the beginning of every day I will do:",
"",
"- Say everyone \"Good morning!\"",
"- Start the coffe mashine",
"- Start the computer",
"- Read mails"))
my_document <- as(my_document, "rmd_doc")
## ----eval = FALSE-------------------------------------------------------------
# render_rmd(input = my_document)
# browseURL("my_document.html")
## -----------------------------------------------------------------------------
my_template <- list(
title = "Example HTML document",
author = "My Self",
output = "html_document",
body = txt_body(
"# Introduction",
"",
"This is just an example."))
my_template <- as(my_template, "rmd_doc")
my_template
## -----------------------------------------------------------------------------
my_template <- update(my_template,
title = "Example PDF document",
output = "pdf_document")
my_template
| /scratch/gouwar.j/cran-all/cranData/yamlme/inst/doc/yamlme-intro.R |
---
title: "Setting elements in yaml headers for r-markdown documents"
author: Miguel Alvarez
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Setting elements in yaml headers for r-markdown documents}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
# Introduction
The package `yamlme` targets to produce R-markdown
documents from plain R code. The tasks of this package are the automatic
generation of reports from R sessions as well as producing templates that can
be shared as functions or `rmd_doc` objects.
# Installing yamlme
To install this package from its **GitHub** repository, you can use
the package `devtools`.
```{r eval = FALSE}
library(devtools)
install_github("kamapu/yamlme", build_vignettes = TRUE)
```
Load the package after you start a new session.
```{r}
library(yamlme)
```
# Writting R-markdown documents
This package uses functions of
[`yaml`](http://biostat.app.vumc.org/wiki/Main/YamlR) for reading and writing
yaml-headers.
In `yamlme`, R-markdown documents can be created from lists, for instance:
```{r}
my_document <- list(title = "My first document")
as(my_document, "rmd_doc")
```
Some applications may also require a description (or abstract) as in the case of
documents rendered by `distill`.
To add a description you need to collapse lines into a single string
(character value) including line breaks.
The description will start with a vertical line in the yaml header.
```{r}
my_document <- list(description = paste0(c(
"This text starts with a vertical line",
"and will be thus used as a description",
"in the head."), collapse = "\n"))
as(my_document, "rmd_doc")
```
You can use character vectors to produce sequences in the yaml header, as
sometimes required for PDF documents.
```{r}
my_document <- list("header-includes" = c(
"\\usepackage{titling}",
"\\pretitle{\\begin{flushleft}\\LARGE\\textbf}",
"\\posttitle{\\end{flushleft}}",
"\\sffamily"))
as(my_document, "rmd_doc")
```
List embedded into lists can be conveniently used to produce more complex maps
for yaml headers in Rmarkdown documents.
```{r}
my_document <- list(output = list(pdf_document = "default"))
as(my_document, "rmd_doc")
```
The following is a more complex map using embedded lists.
```{r}
my_document <- list(
author = list(
list(
name = "Miguel Alvarez",
url = "https://kamapu.github.io/"),
list(
name = "Bisrat H. Gebrekhidan")))
as(my_document, "rmd_doc")
```
To know the representation of a specific yaml map in Rmarkdown documents, you
can read Rmd files using the function `read_rmd()`. Also consider a visit
to the R yaml homepage [here](https://biostat.app.vumc.org/wiki/Main/YamlR).
# Case example
Here there is an example of a full Rmarkdown document.
```{r}
my_document <- list(
title = "Mi First Document",
author = "My Name",
output = "html_document",
body = txt_body(
"# Starting a working day",
"",
"At the beginning of every day I will do:",
"",
"- Say everyone \"Good morning!\"",
"- Start the coffe mashine",
"- Start the computer",
"- Read mails"))
my_document <- as(my_document, "rmd_doc")
```
In this case we can render the document directly from the resulting object.
```{r eval = FALSE}
render_rmd(input = my_document)
browseURL("my_document.html")
```
# Using objects as template
The function `update()` can be used to modify settings and content in documents
written by `write_rmd()`.
```{r}
my_template <- list(
title = "Example HTML document",
author = "My Self",
output = "html_document",
body = txt_body(
"# Introduction",
"",
"This is just an example."))
my_template <- as(my_template, "rmd_doc")
my_template
```
We can also modify the template to adapt the output or the template of the
document.
```{r}
my_template <- update(my_template,
title = "Example PDF document",
output = "pdf_document")
my_template
```
| /scratch/gouwar.j/cran-all/cranData/yamlme/inst/doc/yamlme-intro.Rmd |
---
title: "A journey in rOpenSci"
description: |
A brief account of the experiences gained after a bit more than 2 years review
process of the package taxlist for its submission to rOpenSci.
This journey resulted on new tools learned and new friends.
The talk was presented at LatinR 2020 (virtual meeting).
author:
- name: Miguel Alvarez
url: https://kamapu.github.io/
date: 10-08-2020
output:
distill::distill_article:
self_contained: false
draft: false
categories:
- conference
- R
#preview: images/cover_video.png
---
After a long review process, where a lot of improvements have been implemented,
the package [`taxlist`](https://docs.ropensci.org/taxlist/) managed to be
accepted in [**rOpenSci**](https://ropensci.org/).
The package `taxllist` handles taxonomic lists as **S4** objects in
[**R**](https://www.r-project.org/).
Such objects can be used as modules within data sets of biodiversity records as
in the package [`vegtable`](https://github.com/kamapu/vegtable).
In a brief talk a summary of the experiences during the process of submission to
**rOpenSci** was presented in the context of the conference [**LatinR
2020**](https://latin-r.com/en).
Here you can access to the
[slides](https://kamapu.github.io/documents/Alvarez2020.pdf) and the
[video](https://www.youtube.com/watch?v=NDB4s8N2fTw) of the presentation.
[](http://www.youtube.com/watch?v=NDB4s8N2fTw "")
| /scratch/gouwar.j/cran-all/cranData/yamlme/inst/taxlistjourney.Rmd |
---
title: "Setting elements in yaml headers for r-markdown documents"
author: Miguel Alvarez
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Setting elements in yaml headers for r-markdown documents}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
# Introduction
The package `yamlme` targets to produce R-markdown
documents from plain R code. The tasks of this package are the automatic
generation of reports from R sessions as well as producing templates that can
be shared as functions or `rmd_doc` objects.
# Installing yamlme
To install this package from its **GitHub** repository, you can use
the package `devtools`.
```{r eval = FALSE}
library(devtools)
install_github("kamapu/yamlme", build_vignettes = TRUE)
```
Load the package after you start a new session.
```{r}
library(yamlme)
```
# Writting R-markdown documents
This package uses functions of
[`yaml`](http://biostat.app.vumc.org/wiki/Main/YamlR) for reading and writing
yaml-headers.
In `yamlme`, R-markdown documents can be created from lists, for instance:
```{r}
my_document <- list(title = "My first document")
as(my_document, "rmd_doc")
```
Some applications may also require a description (or abstract) as in the case of
documents rendered by `distill`.
To add a description you need to collapse lines into a single string
(character value) including line breaks.
The description will start with a vertical line in the yaml header.
```{r}
my_document <- list(description = paste0(c(
"This text starts with a vertical line",
"and will be thus used as a description",
"in the head."), collapse = "\n"))
as(my_document, "rmd_doc")
```
You can use character vectors to produce sequences in the yaml header, as
sometimes required for PDF documents.
```{r}
my_document <- list("header-includes" = c(
"\\usepackage{titling}",
"\\pretitle{\\begin{flushleft}\\LARGE\\textbf}",
"\\posttitle{\\end{flushleft}}",
"\\sffamily"))
as(my_document, "rmd_doc")
```
List embedded into lists can be conveniently used to produce more complex maps
for yaml headers in Rmarkdown documents.
```{r}
my_document <- list(output = list(pdf_document = "default"))
as(my_document, "rmd_doc")
```
The following is a more complex map using embedded lists.
```{r}
my_document <- list(
author = list(
list(
name = "Miguel Alvarez",
url = "https://kamapu.github.io/"),
list(
name = "Bisrat H. Gebrekhidan")))
as(my_document, "rmd_doc")
```
To know the representation of a specific yaml map in Rmarkdown documents, you
can read Rmd files using the function `read_rmd()`. Also consider a visit
to the R yaml homepage [here](https://biostat.app.vumc.org/wiki/Main/YamlR).
# Case example
Here there is an example of a full Rmarkdown document.
```{r}
my_document <- list(
title = "Mi First Document",
author = "My Name",
output = "html_document",
body = txt_body(
"# Starting a working day",
"",
"At the beginning of every day I will do:",
"",
"- Say everyone \"Good morning!\"",
"- Start the coffe mashine",
"- Start the computer",
"- Read mails"))
my_document <- as(my_document, "rmd_doc")
```
In this case we can render the document directly from the resulting object.
```{r eval = FALSE}
render_rmd(input = my_document)
browseURL("my_document.html")
```
# Using objects as template
The function `update()` can be used to modify settings and content in documents
written by `write_rmd()`.
```{r}
my_template <- list(
title = "Example HTML document",
author = "My Self",
output = "html_document",
body = txt_body(
"# Introduction",
"",
"This is just an example."))
my_template <- as(my_template, "rmd_doc")
my_template
```
We can also modify the template to adapt the output or the template of the
document.
```{r}
my_template <- update(my_template,
title = "Example PDF document",
output = "pdf_document")
my_template
```
| /scratch/gouwar.j/cran-all/cranData/yamlme/vignettes/yamlme-intro.Rmd |
#' General Function to Estimate Performance
#'
#' This function estimates one or more common performance estimates depending
#' on the class of `truth` (see **Value** below) and returns them in a three
#' column tibble. If you wish to modify the metrics used or how they are used
#' see [metric_set()].
#'
#' @inheritParams roc_auc
#'
#' @param data A `data.frame` containing the columns specified by `truth`,
#' `estimate`, and `...`.
#'
#' @param truth The column identifier for the true results (that
#' is `numeric` or `factor`). This should be an unquoted column name
#' although this argument is passed by expression and support
#' [quasiquotation][rlang::quasiquotation] (you can unquote column
#' names).
#'
#' @param estimate The column identifier for the predicted results
#' (that is also `numeric` or `factor`). As with `truth` this can be
#' specified different ways but the primary method is to use an
#' unquoted variable name.
#'
#' @return
#'
#' A three column tibble.
#'
#' * When `truth` is a factor, there are rows for [accuracy()] and the
#' Kappa statistic ([kap()]).
#'
#' * When `truth` has two levels and 1 column of class probabilities is
#' passed to `...`, there are rows for the two class versions of
#' [mn_log_loss()] and [roc_auc()].
#'
#' * When `truth` has more than two levels and a full set of class probabilities
#' are passed to `...`, there are rows for the multiclass version of
#' [mn_log_loss()] and the Hand Till generalization of [roc_auc()].
#'
#' * When `truth` is numeric, there are rows for [rmse()], [rsq()],
#' and [mae()].
#'
#' @seealso [metric_set()]
#'
#' @examples
#'
#' # Accuracy and kappa
#' metrics(two_class_example, truth, predicted)
#'
#' # Add on multinomal log loss and ROC AUC by specifying class prob columns
#' metrics(two_class_example, truth, predicted, Class1)
#'
#' # Regression metrics
#' metrics(solubility_test, truth = solubility, estimate = prediction)
#'
#' # Multiclass metrics work, but you cannot specify any averaging
#' # for roc_auc() besides the default, hand_till. Use the specific function
#' # if you need more customization
#' library(dplyr)
#'
#' hpc_cv %>%
#' group_by(Resample) %>%
#' metrics(obs, pred, VF:L) %>%
#' print(n = 40)
#'
#' @export metrics
metrics <- function(data, ...) {
UseMethod("metrics")
}
#' @export
#' @rdname metrics
metrics.data.frame <- function(data,
truth,
estimate,
...,
na_rm = TRUE,
options = list()) {
check_roc_options_deprecated("metrics", options)
names <- names(data)
truth <- tidyselect::vars_pull(names, {{ truth }})
estimate <- tidyselect::vars_pull(names, {{ estimate }})
probs <- names(tidyselect::eval_select(rlang::expr(c(...)), data))
is_class <- is.factor(data[[truth]]) || is_class_pred(data[[truth]])
if (is_class) {
metrics_class <- metric_set(accuracy, kap)
res <- metrics_class(data, !!truth, estimate = !!estimate, na_rm = na_rm)
if (length(probs) > 0L) {
res2 <- mn_log_loss(data, !!truth, !!probs, na_rm = na_rm)
res3 <- roc_auc(data, !!truth, !!probs, na_rm = na_rm)
res <- dplyr::bind_rows(res, res2, res3)
}
} else {
# Assume only regression for now
metrics_regression <- metric_set(rmse, rsq, mae)
res <- metrics_regression(
data = data,
truth = !!truth,
estimate = !!estimate,
na_rm = na_rm
)
}
res
}
# Metric set -------------------------------------------------------------------
#' Combine metric functions
#'
#' `metric_set()` allows you to combine multiple metric functions together
#' into a new function that calculates all of them at once.
#'
#' @param ... The bare names of the functions to be included in the metric set.
#'
#' @details
#' All functions must be either:
#' - Only numeric metrics
#' - A mix of class metrics or class prob metrics
#' - A mix of dynamic, integrated, and static survival metrics
#'
#' For instance, `rmse()` can be used with `mae()` because they
#' are numeric metrics, but not with `accuracy()` because it is a classification
#' metric. But `accuracy()` can be used with `roc_auc()`.
#'
#' The returned metric function will have a different argument list
#' depending on whether numeric metrics or a mix of class/prob metrics were
#' passed in.
#'
#' ```
#' # Numeric metric set signature:
#' fn(
#' data,
#' truth,
#' estimate,
#' na_rm = TRUE,
#' case_weights = NULL,
#' ...
#' )
#'
#' # Class / prob metric set signature:
#' fn(
#' data,
#' truth,
#' ...,
#' estimate,
#' estimator = NULL,
#' na_rm = TRUE,
#' event_level = yardstick_event_level(),
#' case_weights = NULL
#' )
#'
#' # Dynamic / integrated / static survival metric set signature:
#' fn(
#' data,
#' truth,
#' ...,
#' estimate,
#' na_rm = TRUE,
#' case_weights = NULL
#' )
#' ```
#'
#' When mixing class and class prob metrics, pass in the hard predictions
#' (the factor column) as the named argument `estimate`, and the soft
#' predictions (the class probability columns) as bare column names or
#' `tidyselect` selectors to `...`.
#'
#' When mixing dynamic, integrated, and static survival metrics, pass in the
#' time predictions as the named argument `estimate`, and the survival
#' predictions as bare column names or `tidyselect` selectors to `...`.
#'
#' If `metric_tweak()` has been used to "tweak" one of these arguments, like
#' `estimator` or `event_level`, then the tweaked version wins. This allows you
#' to set the estimator on a metric by metric basis and still use it in a
#' `metric_set()`.
#'
#' @examples
#' library(dplyr)
#'
#' # Multiple regression metrics
#' multi_metric <- metric_set(rmse, rsq, ccc)
#'
#' # The returned function has arguments:
#' # fn(data, truth, estimate, na_rm = TRUE, ...)
#' multi_metric(solubility_test, truth = solubility, estimate = prediction)
#'
#' # Groups are respected on the new metric function
#' class_metrics <- metric_set(accuracy, kap)
#'
#' hpc_cv %>%
#' group_by(Resample) %>%
#' class_metrics(obs, estimate = pred)
#'
#' # ---------------------------------------------------------------------------
#'
#' # If you need to set options for certain metrics,
#' # do so by wrapping the metric and setting the options inside the wrapper,
#' # passing along truth and estimate as quoted arguments.
#' # Then add on the function class of the underlying wrapped function,
#' # and the direction of optimization.
#' ccc_with_bias <- function(data, truth, estimate, na_rm = TRUE, ...) {
#' ccc(
#' data = data,
#' truth = !!rlang::enquo(truth),
#' estimate = !!rlang::enquo(estimate),
#' # set bias = TRUE
#' bias = TRUE,
#' na_rm = na_rm,
#' ...
#' )
#' }
#'
#' # Use `new_numeric_metric()` to formalize this new metric function
#' ccc_with_bias <- new_numeric_metric(ccc_with_bias, "maximize")
#'
#' multi_metric2 <- metric_set(rmse, rsq, ccc_with_bias)
#'
#' multi_metric2(solubility_test, truth = solubility, estimate = prediction)
#'
#' # ---------------------------------------------------------------------------
#' # A class probability example:
#'
#' # Note that, when given class or class prob functions,
#' # metric_set() returns a function with signature:
#' # fn(data, truth, ..., estimate)
#' # to be able to mix class and class prob metrics.
#'
#' # You must provide the `estimate` column by explicitly naming
#' # the argument
#'
#' class_and_probs_metrics <- metric_set(roc_auc, pr_auc, accuracy)
#'
#' hpc_cv %>%
#' group_by(Resample) %>%
#' class_and_probs_metrics(obs, VF:L, estimate = pred)
#'
#' @seealso [metrics()]
#'
#' @export
metric_set <- function(...) {
quo_fns <- enquos(...)
validate_not_empty(quo_fns)
# Get values and check that they are fns
fns <- lapply(quo_fns, eval_tidy)
validate_inputs_are_functions(fns)
# Add on names, and then check that
# all fns are of the same function class
names(fns) <- vapply(quo_fns, get_quo_label, character(1))
validate_function_class(fns)
fn_cls <- class1(fns[[1]])
# signature of the function is different depending on input functions
if (fn_cls == "numeric_metric") {
make_numeric_metric_function(fns)
} else if (fn_cls %in% c("prob_metric", "class_metric")) {
make_prob_class_metric_function(fns)
} else if (fn_cls %in% c(
"dynamic_survival_metric",
"static_survival_metric",
"integrated_survival_metric"
)) {
make_survival_metric_function(fns)
} else {
cli::cli_abort(
"{.fn validate_function_class} should have errored on unknown classes.",
.internal = TRUE
)
}
}
#' @export
print.metric_set <- function(x, ...) {
cat(format(x), sep = "\n")
invisible(x)
}
#' @export
format.metric_set <- function(x, ...) {
metrics <- attributes(x)$metrics
names <- names(metrics)
cli::cli_format_method({
cli::cli_text("A metric set, consisting of:")
metric_formats <- vapply(metrics, format, character(1))
metric_formats <- strsplit(metric_formats, " | ", fixed = TRUE)
metric_names <- names(metric_formats)
metric_types <- vapply(metric_formats, `[`, character(1), 1, USE.NAMES = FALSE)
metric_descs <- vapply(metric_formats, `[`, character(1), 2)
metric_nchars <- nchar(metric_names) + nchar(metric_types)
metric_desc_paddings <- max(metric_nchars) - metric_nchars
# see r-lib/cli#506
metric_desc_paddings <- lapply(metric_desc_paddings, rep, x = "\u00a0")
metric_desc_paddings <- vapply(metric_desc_paddings, paste, character(1), collapse = "")
for (i in seq_along(metrics)) {
cli::cli_text(
"- {.fun {metric_names[i]}}, \\
{tolower(metric_types[i])}{metric_desc_paddings[i]} | \\
{metric_descs[i]}"
)
}
})
}
#' @export
as_tibble.metric_set <- function(x, ...) {
metrics <- attributes(x)$metrics
names <- names(metrics)
metrics <- unname(metrics)
classes <- map_chr(metrics, class1)
directions <- map_chr(metrics, get_metric_fn_direction)
dplyr::tibble(
metric = names,
class = classes,
direction = directions
)
}
map_chr <- function(x, f, ...) {
vapply(x, f, character(1), ...)
}
class1 <- function(x) {
class(x)[[1]]
}
get_metric_fn_direction <- function(x) {
attr(x, "direction")
}
get_quo_label <- function(quo) {
out <- as_label(quo)
if (length(out) != 1L) {
cli::cli_abort(
"{.code as_label(quo)} resulted in a character vector of length >1.",
.internal = TRUE
)
}
is_namespaced <- grepl("::", out, fixed = TRUE)
if (is_namespaced) {
# Split by `::` and take the second half
split <- strsplit(out, "::", fixed = TRUE)[[1]]
out <- split[[2]]
}
out
}
make_prob_class_metric_function <- function(fns) {
metric_function <- function(data,
truth,
...,
estimate,
estimator = NULL,
na_rm = TRUE,
event_level = yardstick_event_level(),
case_weights = NULL) {
# Find class vs prob metrics
are_class_metrics <- vapply(
X = fns,
FUN = inherits,
FUN.VALUE = logical(1),
what = "class_metric"
)
class_fns <- fns[are_class_metrics]
prob_fns <- fns[!are_class_metrics]
metric_list <- list()
# Evaluate class metrics
if (!is_empty(class_fns)) {
class_args <- quos(
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
event_level = event_level,
case_weights = !!enquo(case_weights)
)
class_calls <- lapply(class_fns, call2, !!!class_args)
class_calls <- mapply(call_remove_static_arguments, class_calls, class_fns)
class_list <- mapply(
FUN = eval_safely,
class_calls, # .x
names(class_calls), # .y
SIMPLIFY = FALSE,
USE.NAMES = FALSE
)
metric_list <- c(metric_list, class_list)
}
# Evaluate prob metrics
if (!is_empty(prob_fns)) {
# TODO - If prob metrics can all do micro, we can remove this
if (!is.null(estimator) && estimator == "micro") {
prob_estimator <- NULL
} else {
prob_estimator <- estimator
}
prob_args <- quos(
data = data,
truth = !!enquo(truth),
... = ...,
estimator = prob_estimator,
na_rm = na_rm,
event_level = event_level,
case_weights = !!enquo(case_weights)
)
prob_calls <- lapply(prob_fns, call2, !!!prob_args)
prob_calls <- mapply(call_remove_static_arguments, prob_calls, prob_fns)
prob_list <- mapply(
FUN = eval_safely,
prob_calls, # .x
names(prob_calls), # .y
SIMPLIFY = FALSE,
USE.NAMES = FALSE
)
metric_list <- c(metric_list, prob_list)
}
dplyr::bind_rows(metric_list)
}
class(metric_function) <- c(
"class_prob_metric_set",
"metric_set",
class(metric_function)
)
attr(metric_function, "metrics") <- fns
metric_function
}
make_numeric_metric_function <- function(fns) {
metric_function <- function(data,
truth,
estimate,
na_rm = TRUE,
case_weights = NULL,
...) {
# Construct common argument set for each metric call
# Doing this dynamically inside the generated function means
# we capture the correct arguments
call_args <- quos(
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
na_rm = na_rm,
case_weights = !!enquo(case_weights),
... = ...
)
# Construct calls from the functions + arguments
calls <- lapply(fns, call2, !!!call_args)
calls <- mapply(call_remove_static_arguments, calls, fns)
# Evaluate
metric_list <- mapply(
FUN = eval_safely,
calls, # .x
names(calls), # .y
SIMPLIFY = FALSE,
USE.NAMES = FALSE
)
dplyr::bind_rows(metric_list)
}
class(metric_function) <- c(
"numeric_metric_set",
"metric_set",
class(metric_function)
)
attr(metric_function, "metrics") <- fns
metric_function
}
make_survival_metric_function <- function(fns) {
metric_function <- function(data,
truth,
...,
estimate,
pred_time,
na_rm = TRUE,
case_weights = NULL) {
# Construct common argument set for each metric call
# Doing this dynamically inside the generated function means
# we capture the correct arguments
dynamic_call_args <- quos(
data = data,
truth = !!enquo(truth),
... = ...,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
... = ...
)
static_call_args <- quos(
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
na_rm = na_rm,
case_weights = !!enquo(case_weights),
... = ...
)
call_class_ind <- vapply(
fns, inherits, "static_survival_metric",
FUN.VALUE = logical(1)
)
# Construct calls from the functions + arguments
dynamic_calls <- lapply(fns[!call_class_ind], call2, !!!dynamic_call_args)
static_calls <- lapply(fns[call_class_ind], call2, !!!static_call_args)
calls <- c(dynamic_calls, static_calls)
calls <- mapply(call_remove_static_arguments, calls, fns)
# Evaluate
metric_list <- mapply(
FUN = eval_safely,
calls, # .x
names(calls), # .y
SIMPLIFY = FALSE,
USE.NAMES = FALSE
)
dplyr::bind_rows(metric_list)
}
class(metric_function) <- c(
"survival_metric_set",
"metric_set",
class(metric_function)
)
attr(metric_function, "metrics") <- fns
metric_function
}
validate_not_empty <- function(x, call = caller_env()) {
if (is_empty(x)) {
cli::cli_abort("At least 1 function supplied to `...`.", call = call)
}
}
validate_inputs_are_functions <- function(fns, call = caller_env()) {
# Check that the user supplied all functions
is_fun_vec <- vapply(fns, is_function, logical(1))
all_fns <- all(is_fun_vec)
if (!all_fns) {
not_fn <- which(!is_fun_vec)
cli::cli_abort(
"All inputs to {.fn metric_set} must be functions. \\
These inputs are not: {not_fn}.",
call = call
)
}
}
# Validate that all metric functions inherit from valid function classes or
# combinations of classes
validate_function_class <- function(fns) {
fn_cls <- vapply(fns, function(fn) class(fn)[1], character(1))
fn_cls_unique <- unique(fn_cls)
n_unique <- length(fn_cls_unique)
if (n_unique == 0L) {
return(invisible(fns))
}
valid_cls <- c(
"class_metric", "prob_metric", "numeric_metric",
"dynamic_survival_metric", "static_survival_metric",
"integrated_survival_metric"
)
if (n_unique == 1L) {
if (fn_cls_unique %in% valid_cls) {
return(invisible(fns))
}
}
# Special case of ONLY class and prob functions together
# These are allowed to be together
if (n_unique == 2) {
if (fn_cls_unique[1] %in% c("class_metric", "prob_metric") &&
fn_cls_unique[2] %in% c("class_metric", "prob_metric")) {
return(invisible(fns))
}
if (fn_cls_unique[1] %in% c(
"dynamic_survival_metric",
"static_survival_metric",
"integrated_survival_metric"
) &&
fn_cls_unique[2] %in% c(
"dynamic_survival_metric",
"static_survival_metric",
"integrated_survival_metric"
)) {
return(invisible(fns))
}
}
if (n_unique == 3) {
if (fn_cls_unique[1] %in% c(
"dynamic_survival_metric",
"static_survival_metric",
"integrated_survival_metric"
) &&
fn_cls_unique[2] %in% c(
"dynamic_survival_metric",
"static_survival_metric",
"integrated_survival_metric"
) &&
fn_cls_unique[3] %in% c(
"dynamic_survival_metric",
"static_survival_metric",
"integrated_survival_metric"
)) {
return(invisible(fns))
}
}
# Special case unevaluated groupwise metric factories
if ("metric_factory" %in% fn_cls) {
factories <- fn_cls[fn_cls == "metric_factory"]
cli::cli_abort(
c("{cli::qty(factories)}The input{?s} {.arg {names(factories)}} \\
{?is a/are} {.help [groupwise metric](yardstick::new_groupwise_metric)} \\
{?factory/factories} and must be passed a data-column before
addition to a metric set.",
"i" = "Did you mean to type e.g. `{names(factories)[1]}(col_name)`?"),
call = rlang::call2("metric_set")
)
}
# Each element of the list contains the names of the fns
# that inherit that specific class
fn_bad_names <- lapply(fn_cls_unique, function(x) {
names(fns)[fn_cls == x]
})
# clean up for nicer printing
fn_cls_unique <- gsub("_metric", "", fn_cls_unique)
fn_cls_unique <- gsub("function", "other", fn_cls_unique)
fn_cls_other <- fn_cls_unique == "other"
if (any(fn_cls_other)) {
fn_cls_other_loc <- which(fn_cls_other)
fn_other_names <- fn_bad_names[[fn_cls_other_loc]]
fns_other <- fns[fn_other_names]
env_names_other <- vapply(
fns_other,
function(fn) env_name(fn_env(fn)),
character(1)
)
fn_bad_names[[fn_cls_other_loc]] <- paste0(
fn_other_names, " ", "<", env_names_other, ">"
)
}
# Prints as:
# - fn_type1 (fn_name1, fn_name2)
# - fn_type2 (fn_name1)
fn_pastable <- mapply(
FUN = function(fn_type, fn_names) {
fn_names <- paste0(fn_names, collapse = ", ")
paste0("- ", fn_type, " (", fn_names, ")")
},
fn_type = fn_cls_unique,
fn_names = fn_bad_names,
USE.NAMES = FALSE
)
cli::cli_abort(c(
"x" = "The combination of metric functions must be:",
"*" = "only numeric metrics.",
"*" = "a mix of class metrics and class probability metrics.",
"*" = "a mix of dynamic and static survival metrics.",
"i" = "The following metric function types are being mixed:",
fn_pastable
))
}
# Safely evaluate metrics in such a way that we can capture the
# error and inform the user of the metric that failed
eval_safely <- function(expr, expr_nm, data = NULL, env = caller_env()) {
tryCatch(
expr = {
eval_tidy(expr, data = data, env = env)
},
error = function(cnd) {
cli::cli_abort(
"Failed to compute {.fn {expr_nm}}.",
parent = cnd,
call = call("metric_set")
)
}
)
}
call_remove_static_arguments <- function(call, fn) {
static <- get_static_arguments(fn)
if (length(static) == 0L) {
# No static arguments
return(call)
}
names <- rlang::call_args_names(call)
names <- intersect(names, static)
if (length(names) == 0L) {
# `static` arguments don't intersect with `call`
return(call)
}
zaps <- rlang::rep_named(names, list(rlang::zap()))
call <- call_modify(call, !!!zaps)
call
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/aaa-metrics.R |
# File is named with `aaa-` so that it is loaded before any other files. We need
# to call `new_*_metric()` internally from outside any function in the package,
# so this file has to be sourced first. It is a bit of a hack, but works.
# ------------------------------------------------------------------------------
#' Construct a new metric function
#'
#' @description
#' These functions provide convenient wrappers to create the three types of
#' metric functions in yardstick: numeric metrics, class metrics, and
#' class probability metrics. They add a metric-specific class to `fn` and
#' attach a `direction` attribute. These features are used by [metric_set()]
#' and by [tune](https://tune.tidymodels.org/) when model tuning.
#'
#' See [Custom performance
#' metrics](https://www.tidymodels.org/learn/develop/metrics/) for more
#' information about creating custom metrics.
#'
#' @param fn A function. The metric function to attach a metric-specific class
#' and `direction` attribute to.
#'
#' @param direction A string. One of:
#' - `"maximize"`
#' - `"minimize"`
#' - `"zero"`
#'
#' @name new-metric
NULL
#' @rdname new-metric
#' @export
new_class_metric <- function(fn, direction) {
new_metric(fn, direction, class = "class_metric")
}
#' @rdname new-metric
#' @export
new_prob_metric <- function(fn, direction) {
new_metric(fn, direction, class = "prob_metric")
}
#' @rdname new-metric
#' @export
new_numeric_metric <- function(fn, direction) {
new_metric(fn, direction, class = "numeric_metric")
}
#' @rdname new-metric
#' @export
new_dynamic_survival_metric <- function(fn, direction) {
new_metric(fn, direction, class = "dynamic_survival_metric")
}
#' @rdname new-metric
#' @export
new_integrated_survival_metric <- function(fn, direction) {
new_metric(fn, direction, class = "integrated_survival_metric")
}
#' @rdname new-metric
#' @export
new_static_survival_metric <- function(fn, direction) {
new_metric(fn, direction, class = "static_survival_metric")
}
new_metric <- function(fn, direction, class = NULL) {
if (!is.function(fn)) {
cli::cli_abort("{.arg fn} must be a function.")
}
direction <- arg_match(
direction,
values = c("maximize", "minimize", "zero")
)
class <- c(class, "metric", "function")
structure(
fn,
direction = direction,
class = class
)
}
is_metric <- function(x) {
inherits(x, "metric")
}
metric_direction <- function(x) {
attr(x, "direction", exact = TRUE)
}
`metric_direction<-` <- function(x, value) {
attr(x, "direction") <- value
x
}
#' @noRd
#' @export
print.metric <- function(x, ...) {
cat(format(x), sep = "\n")
invisible(x)
}
#' @export
format.metric <- function(x, ...) {
first_class <- class(x)[[1]]
metric_type <-
switch(
first_class,
"prob_metric" = "probability metric",
"class_metric" = "class metric",
"numeric_metric" = "numeric metric",
"dynamic_survival_metric" = "dynamic survival metric",
"static_survival_metric" = "static survival metric",
"integrated_survival_metric" = "integrated survival metric",
"metric"
)
metric_desc <- "direction: {.field {attr(x, 'direction')}}"
by_attr <- attr(x, "by")
if (!is.null(by_attr)) {
metric_desc <-
c(
metric_desc,
", group-wise on: {.field {as.character(by_attr)}}"
)
}
cli::cli_format_method(
cli::cli_text(c("A {metric_type} | ", metric_desc))
)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/aaa-new.R |
# nocov start
# Global vars ------------------------------------------------------------------
utils::globalVariables(
c(
# for class prob metrics
"estimate",
".estimator",
"threshold",
"specificity",
".level",
".",
# for survival metrics
".estimate",
".eval_time",
".pred_survival",
".weight_censored",
# for autoplot methods
".n_events",
".n",
"slope",
"perfect",
"sensitivity",
".percent_found",
".percent_tested",
"Prediction",
"Truth",
"Freq",
"xmin",
"xmax",
"ymin",
"ymax"
)
)
# Onload -----------------------------------------------------------------------
## Taken from https://github.com/tidyverse/dplyr/blob/d310ad1cef1c14d770c94e1a9a4c79c888f46af6/R/zzz.r#L2-L9
.onLoad <- function(libname, pkgname) {
# dynamically register autoplot methods
s3_register("ggplot2::autoplot", "gain_df")
s3_register("ggplot2::autoplot", "lift_df")
s3_register("ggplot2::autoplot", "roc_df")
s3_register("ggplot2::autoplot", "roc_survival_df")
s3_register("ggplot2::autoplot", "pr_df")
s3_register("ggplot2::autoplot", "conf_mat")
invisible()
}
# Dynamic reg helper -----------------------------------------------------------
# vctrs/register-s3.R
# https://github.com/r-lib/vctrs/blob/master/R/register-s3.R
s3_register <- function(generic, class, method = NULL) {
stopifnot(is.character(generic), length(generic) == 1)
stopifnot(is.character(class), length(class) == 1)
pieces <- strsplit(generic, "::")[[1]]
stopifnot(length(pieces) == 2)
package <- pieces[[1]]
generic <- pieces[[2]]
if (is.null(method)) {
method <- get(paste0(generic, ".", class), envir = parent.frame())
}
stopifnot(is.function(method))
if (package %in% loadedNamespaces()) {
registerS3method(generic, class, method, envir = asNamespace(package))
}
# Always register hook in case package is later unloaded & reloaded
setHook(
packageEvent(package, "onLoad"),
function(...) {
registerS3method(generic, class, method, envir = asNamespace(package))
}
)
}
# nocov end
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/aaa.R |
#' Developer function for checking inputs in new metrics
#'
#' `check_numeric_metric()`, `check_class_metric()`, and `check_prob_metric()`
#' are useful alongside [metric-summarizers] for implementing new custom
#' metrics. [metric-summarizers] call the metric function inside
#' `dplyr::summarise()`. These functions perform checks on the inputs in
#' accordance with the type of metric that is used.
#'
#' @inheritParams rlang::args_error_context
#'
#' @param truth The realized vector of `truth`.
#' - For `check_numeric_metric()`, a numeric vector.
#' - For `check_class_metric()`, a factor.
#' - For `check_prob_metric()`, a factor.
#' - For `check_dynamic_survival_metric()`, a Surv object.
#' - For `check_static_survival_metric()`, a Surv object.
#'
#' @param estimate The realized `estimate` result.
#' - For `check_numeric_metric()`, a numeric vector.
#' - For `check_class_metric()`, a factor.
#' - For `check_prob_metric()`, a numeric vector for binary `truth`,
#' a numeric matrix for multic-class `truth`.
#' - For `check_dynamic_survival_metric()`, list-column of data.frames.
#' - For `check_static_survival_metric()`, a numeric vector.
#'
#' @param case_weights The realized case weights, as a numeric vector. This must
#' be the same length as `truth`.
#'
#' @param estimator This can either be `NULL` for the default auto-selection of
#' averaging (`"binary"` or `"macro"`), or a single character to pass along to
#' the metric implementation describing the kind of averaging to use.
#'
#' @seealso [metric-summarizers]
#'
#' @name check_metric
NULL
#' @rdname check_metric
#' @export
check_numeric_metric <- function(truth,
estimate,
case_weights,
call = caller_env()) {
validate_case_weights(case_weights, size = length(truth), call = call)
validate_numeric_truth_numeric_estimate(truth, estimate, call = call)
}
#' @rdname check_metric
#' @export
check_class_metric <- function(truth,
estimate,
case_weights,
estimator,
call = caller_env()) {
validate_case_weights(case_weights, size = length(truth), call = call)
validate_factor_truth_factor_estimate(truth, estimate, call = call)
validate_binary_estimator(truth, estimator, call = call)
}
#' @rdname check_metric
#' @export
check_prob_metric <- function(truth,
estimate,
case_weights,
estimator,
call = caller_env()) {
validate_case_weights(case_weights, size = length(truth), call = call)
validate_factor_truth_matrix_estimate(truth, estimate, estimator, call = call)
validate_binary_estimator(truth, estimator, call = call)
}
#' @rdname check_metric
#' @export
check_dynamic_survival_metric <- function(truth,
estimate,
case_weights,
call = caller_env()) {
validate_surv_truth_list_estimate(truth, estimate, call = call)
validate_case_weights(case_weights, size = nrow(truth), call = call)
}
#' @rdname check_metric
#' @export
check_static_survival_metric <- function(truth,
estimate,
case_weights,
call = caller_env()) {
validate_case_weights(case_weights, size = nrow(truth), call = call)
validate_surv_truth_numeric_estimate(truth, estimate, call = call)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/check-metric.R |
#' Accuracy
#'
#' Accuracy is the proportion of the data that are predicted correctly.
#'
#' @family class metrics
#' @templateVar fn accuracy
#' @template return
#'
#' @section Multiclass:
#'
#' Accuracy extends naturally to multiclass scenarios. Because
#' of this, macro and micro averaging are not implemented.
#'
#' @inheritParams sens
#'
#' @author Max Kuhn
#'
#' @export
#' @examples
#' library(dplyr)
#' data("two_class_example")
#' data("hpc_cv")
#'
#' # Two class
#' accuracy(two_class_example, truth, predicted)
#'
#' # Multiclass
#' # accuracy() has a natural multiclass extension
#' hpc_cv %>%
#' filter(Resample == "Fold01") %>%
#' accuracy(obs, pred)
#'
#' # Groups are respected
#' hpc_cv %>%
#' group_by(Resample) %>%
#' accuracy(obs, pred)
accuracy <- function(data, ...) {
UseMethod("accuracy")
}
accuracy <- new_class_metric(
accuracy,
direction = "maximize"
)
#' @export
#' @rdname accuracy
accuracy.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
case_weights = NULL,
...) {
class_metric_summarizer(
name = "accuracy",
fn = accuracy_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
na_rm = na_rm,
case_weights = !!enquo(case_weights)
)
}
#' @export
accuracy.table <- function(data, ...) {
check_table(data)
estimator <- finalize_estimator(data, metric_class = "accuracy")
metric_tibbler(
.metric = "accuracy",
.estimator = estimator,
.estimate = accuracy_table_impl(data)
)
}
#' @export
accuracy.matrix <- function(data, ...) {
data <- as.table(data)
accuracy.table(data)
}
#' @export
#' @rdname accuracy
accuracy_vec <- function(truth, estimate, na_rm = TRUE, case_weights = NULL, ...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, metric_class = "accuracy")
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
accuracy_table_impl(data)
}
accuracy_table_impl <- function(x) {
sum(diag(x)) / sum(x)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-accuracy.R |
#' Balanced accuracy
#'
#' Balanced accuracy is computed here as the average of [sens()] and [spec()].
#'
#' @family class metrics
#' @templateVar fn bal_accuracy
#' @template event_first
#' @template multiclass
#' @template return
#'
#' @inheritParams sens
#'
#' @author Max Kuhn
#'
#' @template examples-class
#'
#' @export
bal_accuracy <- function(data, ...) {
UseMethod("bal_accuracy")
}
bal_accuracy <- new_class_metric(
bal_accuracy,
direction = "maximize"
)
#' @export
#' @rdname bal_accuracy
bal_accuracy.data.frame <- function(data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "bal_accuracy",
fn = bal_accuracy_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level
)
}
#' @export
bal_accuracy.table <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "bal_accuracy",
.estimator = estimator,
.estimate = bal_accuracy_table_impl(data, estimator, event_level)
)
}
#' @export
bal_accuracy.matrix <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
bal_accuracy.table(data, estimator, event_level)
}
#' @export
#' @rdname bal_accuracy
bal_accuracy_vec <- function(truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
bal_accuracy_table_impl(data, estimator, event_level)
}
bal_accuracy_table_impl <- function(data, estimator, event_level) {
if (is_binary(estimator)) {
bal_accuracy_binary(data, event_level)
} else {
w <- get_weights(data, estimator)
out_vec <- bal_accuracy_multiclass(data, estimator)
stats::weighted.mean(out_vec, w)
}
}
bal_accuracy_binary <- function(data, event_level) {
(sens_binary(data, event_level) + spec_binary(data, event_level)) / 2
}
# Urbanowicz 2015 ExSTraCS 2.0 description and evaluation of a scalable learning.pdf
bal_accuracy_multiclass <- function(data, estimator) {
(recall_multiclass(data, estimator) + spec_multiclass(data, estimator)) / 2
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-bal_accuracy.R |
#' Detection prevalence
#'
#' Detection prevalence is defined as the number of _predicted_ positive events (both
#' true positive and false positive) divided by the total number of predictions.
#'
#' @family class metrics
#' @templateVar fn detection_prevalence
#' @template event_first
#' @template multiclass
#' @template return
#'
#' @inheritParams sens
#'
#' @author Max Kuhn
#'
#' @template examples-class
#'
#' @export
detection_prevalence <- function(data, ...) {
UseMethod("detection_prevalence")
}
detection_prevalence <- new_class_metric(
detection_prevalence,
direction = "maximize"
)
#' @export
#' @rdname detection_prevalence
detection_prevalence.data.frame <- function(data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "detection_prevalence",
fn = detection_prevalence_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
event_level = event_level,
na_rm = na_rm,
case_weights = !!enquo(case_weights)
)
}
#' @export
detection_prevalence.table <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "detection_prevalence",
.estimator = estimator,
.estimate = detection_prevalence_table_impl(data, estimator, event_level)
)
}
#' @export
detection_prevalence.matrix <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
detection_prevalence.table(data, estimator, event_level)
}
#' @export
#' @rdname detection_prevalence
detection_prevalence_vec <- function(truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
detection_prevalence_table_impl(data, estimator, event_level)
}
detection_prevalence_table_impl <- function(data, estimator, event_level) {
if (is_binary(estimator)) {
detection_prevalence_binary(data, event_level)
} else {
w <- get_weights(data, estimator)
out_vec <- detection_prevalence_multiclass(data, estimator)
stats::weighted.mean(out_vec, w)
}
}
detection_prevalence_binary <- function(data, event_level) {
pos_level <- pos_val(data, event_level)
sum(data[pos_level, ]) / sum(data)
}
detection_prevalence_multiclass <- function(data, estimator) {
numer <- rowSums(data)
denom <- rep(sum(data), times = nrow(data))
denom[denom <= 0] <- NA_real_
if (is_micro(estimator)) {
numer <- sum(numer)
denom <- sum(denom)
}
numer / denom
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-detection_prevalence.R |
#' F Measure
#'
#' These functions calculate the [f_meas()] of a measurement system for
#' finding relevant documents compared to reference results
#' (the truth regarding relevance). Highly related functions are [recall()]
#' and [precision()].
#'
#' The measure "F" is a combination of precision and recall (see below).
#'
#' @family class metrics
#' @family relevance metrics
#' @templateVar fn f_meas
#' @template event_first
#' @template multiclass
#' @template return
#' @template table-relevance
#'
#' @inheritParams sens
#'
#' @param beta A numeric value used to weight precision and
#' recall. A value of 1 is traditionally used and corresponds to
#' the harmonic mean of the two values but other values weight
#' recall beta times more important than precision.
#'
#'
#' @references
#'
#' Buckland, M., & Gey, F. (1994). The relationship
#' between Recall and Precision. *Journal of the American Society
#' for Information Science*, 45(1), 12-19.
#'
#' Powers, D. (2007). Evaluation: From Precision, Recall and F
#' Factor to ROC, Informedness, Markedness and Correlation.
#' Technical Report SIE-07-001, Flinders University
#'
#' @author Max Kuhn
#'
#' @template examples-class
#'
#' @export
f_meas <- function(data, ...) {
UseMethod("f_meas")
}
f_meas <- new_class_metric(
f_meas,
direction = "maximize"
)
#' @rdname f_meas
#' @export
f_meas.data.frame <- function(data,
truth,
estimate,
beta = 1,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "f_meas",
fn = f_meas_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level,
fn_options = list(beta = beta)
)
}
#' @export
f_meas.table <- function(data,
beta = 1,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "f_meas",
.estimator = estimator,
.estimate = f_meas_table_impl(data, estimator, event_level, beta = beta)
)
}
#' @export
f_meas.matrix <- function(data,
beta = 1,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
f_meas.table(data, beta, estimator, event_level)
}
#' @export
#' @rdname f_meas
f_meas_vec <- function(truth,
estimate,
beta = 1,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
f_meas_table_impl(data, estimator, event_level, beta)
}
f_meas_table_impl <- function(data, estimator, event_level, beta) {
if (is_binary(estimator)) {
f_meas_binary(data, event_level, beta)
} else {
w <- get_weights(data, estimator)
out_vec <- f_meas_multiclass(data, estimator, beta)
stats::weighted.mean(out_vec, w, na.rm = TRUE)
}
}
f_meas_binary <- function(data, event_level, beta = 1) {
precision <- precision_binary(data, event_level)
rec <- recall_binary(data, event_level)
# if precision and recall are both 0, return 0 not NA
if (isTRUE(precision == 0 & rec == 0)) {
return(0)
}
(1 + beta^2) * precision * rec / ((beta^2 * precision) + rec)
}
f_meas_multiclass <- function(data, estimator, beta = 1) {
precision <- precision_multiclass(data, estimator)
rec <- recall_multiclass(data, estimator)
res <- (1 + beta^2) * precision * rec / ((beta^2 * precision) + rec)
# if precision and recall are both 0, define this as 0 not NA
# this is the case when tp == 0 and is well defined
# Matches sklearn behavior
# https://github.com/scikit-learn/scikit-learn/blob/bac89c253b35a8f1a3827389fbee0f5bebcbc985/sklearn/metrics/classification.py#L1150
where_zero <- which(precision == 0 & rec == 0)
res[where_zero] <- 0
res
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-f_meas.R |
#' J-index
#'
#' @description
#' Youden's J statistic is defined as:
#'
#' [sens()] + [spec()] - 1
#'
#' A related metric is Informedness, see the Details section for the relationship.
#'
#' @details
#'
#' The value of the J-index ranges from \[0, 1\] and is `1` when there are
#' no false positives and no false negatives.
#'
#' The binary version of J-index is equivalent to the binary concept of
#' Informedness. Macro-weighted J-index is equivalent to multiclass informedness
#' as defined in Powers, David M W (2011), equation (42).
#'
#' @family class metrics
#' @templateVar fn j_index
#' @template event_first
#' @template multiclass
#' @template return
#'
#' @inheritParams sens
#'
#' @author Max Kuhn
#'
#' @references
#'
#' Youden, W.J. (1950). "Index for rating diagnostic tests". Cancer. 3: 32-35.
#'
#' Powers, David M W (2011). "Evaluation: From Precision, Recall and F-Score to
#' ROC, Informedness, Markedness and Correlation". Journal of Machine Learning
#' Technologies. 2 (1): 37-63.
#'
#' @template examples-class
#'
#' @export
j_index <- function(data, ...) {
UseMethod("j_index")
}
j_index <- new_class_metric(
j_index,
direction = "maximize"
)
#' @rdname j_index
#' @export
j_index.data.frame <- function(data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "j_index",
fn = j_index_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level
)
}
#' @export
j_index.table <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "j_index",
.estimator = estimator,
.estimate = j_index_table_impl(data, estimator, event_level)
)
}
#' @export
j_index.matrix <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
j_index.table(data, estimator, event_level)
}
#' @rdname j_index
#' @export
j_index_vec <- function(truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
j_index_table_impl(data, estimator, event_level)
}
j_index_table_impl <- function(data, estimator, event_level) {
if (is_binary(estimator)) {
j_index_binary(data, event_level)
} else {
w <- get_weights(data, estimator)
out_vec <- j_index_multiclass(data, estimator)
# Set `na.rm = TRUE` to remove undefined values from weighted computation (#265)
stats::weighted.mean(out_vec, w, na.rm = TRUE)
}
}
j_index_binary <- function(data, event_level) {
sens_binary(data, event_level) + spec_binary(data, event_level) - 1
}
j_index_multiclass <- function(data, estimator) {
sens_multiclass(data, estimator) + spec_multiclass(data, estimator) - 1
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-j_index.R |
#' Kappa
#'
#' Kappa is a similar measure to [accuracy()], but is normalized by
#' the accuracy that would be expected by chance alone and is very useful
#' when one or more classes have large frequency distributions.
#'
#' @family class metrics
#' @templateVar fn kap
#' @template return
#'
#' @section Multiclass:
#'
#' Kappa extends naturally to multiclass scenarios. Because
#' of this, macro and micro averaging are not implemented.
#'
#' @inheritParams sens
#'
#' @param weighting A weighting to apply when computing the scores. One of:
#' `"none"`, `"linear"`, or `"quadratic"`. Linear and quadratic weighting
#' penalizes mis-predictions that are "far away" from the true value. Note
#' that distance is judged based on the ordering of the levels in `truth` and
#' `estimate`. It is recommended to provide ordered factors for `truth` and
#' `estimate` to explicitly code the ordering, but this is not required.
#'
#' In the binary case, all 3 weightings produce the same value, since it is
#' only ever possible to be 1 unit away from the true value.
#'
#' @author Max Kuhn
#' @author Jon Harmon
#'
#' @references
#' Cohen, J. (1960). "A coefficient of agreement for nominal
#' scales". _Educational and Psychological Measurement_. 20 (1): 37-46.
#'
#' Cohen, J. (1968). "Weighted kappa: Nominal scale agreement provision for
#' scaled disagreement or partial credit". _Psychological
#' Bulletin_. 70 (4): 213-220.
#'
#' @export
#' @examples
#' library(dplyr)
#' data("two_class_example")
#' data("hpc_cv")
#'
#' # Two class
#' kap(two_class_example, truth, predicted)
#'
#' # Multiclass
#' # kap() has a natural multiclass extension
#' hpc_cv %>%
#' filter(Resample == "Fold01") %>%
#' kap(obs, pred)
#'
#' # Groups are respected
#' hpc_cv %>%
#' group_by(Resample) %>%
#' kap(obs, pred)
kap <- function(data, ...) {
UseMethod("kap")
}
kap <- new_class_metric(
kap,
direction = "maximize"
)
#' @export
#' @rdname kap
kap.data.frame <- function(data,
truth,
estimate,
weighting = "none",
na_rm = TRUE,
case_weights = NULL,
...) {
class_metric_summarizer(
name = "kap",
fn = kap_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
na_rm = na_rm,
case_weights = !!enquo(case_weights),
fn_options = list(weighting = weighting)
)
}
#' @export
kap.table <- function(data,
weighting = "none",
...) {
check_table(data)
metric_tibbler(
.metric = "kap",
.estimator = finalize_estimator(data, metric_class = "kap"),
.estimate = kap_table_impl(data, weighting = weighting)
)
}
#' @export
kap.matrix <- function(data,
weighting = "none",
...) {
data <- as.table(data)
kap.table(data, weighting = weighting)
}
#' @export
#' @rdname kap
kap_vec <- function(truth,
estimate,
weighting = "none",
na_rm = TRUE,
case_weights = NULL,
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, metric_class = "kap")
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
kap_table_impl(data, weighting = weighting)
}
kap_table_impl <- function(data, weighting) {
full_sum <- sum(data)
row_sum <- rowSums(data)
col_sum <- colSums(data)
expected <- outer(row_sum, col_sum) / full_sum
n_levels <- nrow(data)
w <- make_weighting_matrix(weighting, n_levels)
n_disagree <- sum(w * data)
n_chance <- sum(w * expected)
1 - n_disagree / n_chance
}
make_weighting_matrix <- function(weighting, n_levels, call = caller_env()) {
validate_weighting(weighting, call = call)
if (is_no_weighting(weighting)) {
# [n_levels x n_levels], 0 on diagonal, 1 on off-diagonal
w <- matrix(1L, nrow = n_levels, ncol = n_levels)
diag(w) <- 0L
return(w)
}
if (is_linear_weighting(weighting)) {
power <- 1L
} else {
# quadratic
power <- 2L
}
# [n_levels x n_levels], 0 on diagonal, increasing weighting on off-diagonal
w <- seq2(0L, n_levels - 1L)
w <- matrix(w, nrow = n_levels, ncol = n_levels)
w <- abs(w - t(w))^power
w
}
# ------------------------------------------------------------------------------
validate_weighting <- function(x, call = caller_env()) {
if (!is_string(x)) {
cli::cli_abort("{.arg weighting} must be a string.", call = call)
}
ok <- is_no_weighting(x) ||
is_linear_weighting(x) ||
is_quadratic_weighting(x)
if (!ok) {
cli::cli_abort(
"{.arg weighting} must be {.val none}, {.val linear}, or \\
{.val quadratic}, not {.val {x}}.",
call = call
)
}
invisible(x)
}
is_no_weighting <- function(x) {
identical(x, "none")
}
is_linear_weighting <- function(x) {
identical(x, "linear")
}
is_quadratic_weighting <- function(x) {
identical(x, "quadratic")
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-kap.R |
#' Matthews correlation coefficient
#'
#' @family class metrics
#' @templateVar fn mcc
#' @template event_first
#' @template return
#'
#' @section Multiclass:
#'
#' `mcc()` has a known multiclass generalization and that is computed
#' automatically if a factor with more than 2 levels is provided. Because
#' of this, no averaging methods are provided.
#'
#' @inheritParams sens
#'
#' @author Max Kuhn
#'
#' @references Giuseppe, J. (2012). "A Comparison of MCC and CEN Error
#' Measures in Multi-Class Prediction". _PLOS ONE_. Vol 7, Iss 8, e41882.
#'
#' @export
#' @examples
#' library(dplyr)
#' data("two_class_example")
#' data("hpc_cv")
#'
#' # Two class
#' mcc(two_class_example, truth, predicted)
#'
#' # Multiclass
#' # mcc() has a natural multiclass extension
#' hpc_cv %>%
#' filter(Resample == "Fold01") %>%
#' mcc(obs, pred)
#'
#' # Groups are respected
#' hpc_cv %>%
#' group_by(Resample) %>%
#' mcc(obs, pred)
mcc <- function(data, ...) {
UseMethod("mcc")
}
mcc <- new_class_metric(
mcc,
direction = "maximize"
)
#' @export
#' @rdname mcc
mcc.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
case_weights = NULL,
...) {
class_metric_summarizer(
name = "mcc",
fn = mcc_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
na_rm = na_rm,
case_weights = !!enquo(case_weights)
)
}
#' @export
mcc.table <- function(data, ...) {
check_table(data)
estimator <- finalize_estimator(data, metric_class = "mcc")
# Convert to a double matrix to avoid integer overflow in the binary case
# and to pass to the C code in the multiclass case.
# Using `storage.mode()<-` keeps dimensions (as opposed to as.double()).
data <- as.matrix(data)
if (!is.double(data)) {
storage.mode(data) <- "double"
}
metric_tibbler(
.metric = "mcc",
.estimator = estimator,
.estimate = mcc_table_impl(data, estimator)
)
}
#' @export
mcc.matrix <- function(data, ...) {
data <- as.table(data)
mcc.table(data)
}
#' @export
#' @rdname mcc
mcc_vec <- function(truth,
estimate,
na_rm = TRUE,
case_weights = NULL,
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, metric_class = "mcc")
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
mcc_table_impl(data, estimator)
}
mcc_table_impl <- function(data, estimator) {
if (is_binary(estimator)) {
mcc_binary(data)
} else {
mcc_multiclass(data)
}
}
mcc_binary <- function(data) {
check_mcc_data(data)
# mcc() produces identical results regardless of which level is
# considered the "event", so hardcode to first here
positive <- pos_val(data, event_level = "first")
negative <- neg_val(data, event_level = "first")
tp <- data[positive, positive]
tn <- data[negative, negative]
fp <- data[positive, negative]
fn <- data[negative, positive]
d1 <- tp + fp
d2 <- tp + fn
d3 <- tn + fp
d4 <- tn + fn
if (d1 == 0 || d2 == 0 || d3 == 0 || d4 == 0) {
return(NA)
}
((tp * tn) - (fp * fn)) / sqrt(prod(d1, d2, d3, d4))
}
mcc_multiclass <- function(data) {
check_mcc_data(data)
mcc_multiclass_impl(data)
}
mcc_multiclass_impl <- function(C) {
.Call(yardstick_mcc_multiclass_impl, C)
}
check_mcc_data <- function(data) {
if (!is.double(data) && !is.matrix(data)) {
cli::cli_abort(
"{.arg data} should be a double matrix at this point.",
.internal = TRUE
)
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-mcc.R |
#' Negative predictive value
#'
#' These functions calculate the [npv()] (negative predictive value) of a
#' measurement system compared to a reference result (the "truth" or gold standard).
#' Highly related functions are [spec()], [sens()], and [ppv()].
#'
#' The positive predictive value ([ppv()]) is defined as the percent of
#' predicted positives that are actually positive while the
#' negative predictive value ([npv()]) is defined as the percent of negative
#' positives that are actually negative.
#'
#' @family class metrics
#' @family sensitivity metrics
#' @templateVar fn npv
#' @template event_first
#' @template multiclass
#' @template return
#' @template table-positive
#'
#' @inheritParams ppv
#'
#' @author Max Kuhn
#'
#' @references
#'
#' Altman, D.G., Bland, J.M. (1994) ``Diagnostic tests 2:
#' predictive values,'' *British Medical Journal*, vol 309,
#' 102.
#'
#' @template examples-class
#'
#' @export
npv <- function(data, ...) {
UseMethod("npv")
}
npv <- new_class_metric(
npv,
direction = "maximize"
)
#' @rdname npv
#' @export
npv.data.frame <- function(data,
truth,
estimate,
prevalence = NULL,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "npv",
fn = npv_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level,
fn_options = list(prevalence = prevalence)
)
}
#' @export
npv.table <- function(data,
prevalence = NULL,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "npv",
.estimator = estimator,
.estimate = npv_table_impl(data, estimator, event_level, prevalence = prevalence)
)
}
#' @export
npv.matrix <- function(data,
prevalence = NULL,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
npv.table(data, prevalence, estimator, event_level)
}
#' @export
#' @rdname npv
npv_vec <- function(truth,
estimate,
prevalence = NULL,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
npv_table_impl(data, estimator, event_level, prevalence = prevalence)
}
npv_table_impl <- function(data,
estimator,
event_level,
prevalence = NULL) {
if (is_binary(estimator)) {
npv_binary(data, event_level, prevalence)
} else {
w <- get_weights(data, estimator)
out_vec <- npv_multiclass(data, estimator, prevalence)
stats::weighted.mean(out_vec, w)
}
}
npv_binary <- function(data, event_level, prevalence = NULL) {
positive <- pos_val(data, event_level)
if (is.null(prevalence)) {
prevalence <- sum(data[, positive]) / sum(data)
}
sens <- sens_binary(data, event_level)
spec <- spec_binary(data, event_level)
(spec * (1 - prevalence)) / (((1 - sens) * prevalence) + ((spec) * (1 - prevalence)))
}
npv_multiclass <- function(data, estimator, prevalence = NULL) {
if (is.null(prevalence)) {
tpfn <- colSums(data)
tptnfpfn <- rep(sum(data), times = nrow(data))
if (is_micro(estimator)) {
tpfn <- sum(tpfn)
tptnfpfn <- sum(tptnfpfn)
}
prevalence <- tpfn / tptnfpfn
}
.sens_vec <- recall_multiclass(data, estimator)
.spec_vec <- spec_multiclass(data, estimator)
numer <- .spec_vec * (1 - prevalence)
denom <- (1 - .sens_vec) * prevalence + .spec_vec * (1 - prevalence)
denom[denom <= 0] <- NA_real_
numer / denom
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-npv.R |
#' Positive predictive value
#'
#' These functions calculate the [ppv()] (positive predictive value) of a
#' measurement system compared to a reference result (the "truth" or gold standard).
#' Highly related functions are [spec()], [sens()], and [npv()].
#'
#' The positive predictive value ([ppv()]) is defined as the percent of
#' predicted positives that are actually positive while the
#' negative predictive value ([npv()]) is defined as the percent of negative
#' positives that are actually negative.
#'
#' @family class metrics
#' @family sensitivity metrics
#' @templateVar fn ppv
#' @template event_first
#' @template multiclass
#' @template return
#' @template table-positive
#'
#' @inheritParams sens
#'
#' @param prevalence A numeric value for the rate of the
#' "positive" class of the data.
#'
#' @author Max Kuhn
#'
#' @references
#'
#' Altman, D.G., Bland, J.M. (1994) ``Diagnostic tests 2:
#' predictive values,'' *British Medical Journal*, vol 309,
#' 102.
#'
#' @template examples-class
#' @examples
#' # But what if we think that Class 1 only occurs 40% of the time?
#' ppv(two_class_example, truth, predicted, prevalence = 0.40)
#'
#' @export
ppv <- function(data, ...) {
UseMethod("ppv")
}
ppv <- new_class_metric(
ppv,
direction = "maximize"
)
#' @rdname ppv
#' @export
ppv.data.frame <- function(data,
truth,
estimate,
prevalence = NULL,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "ppv",
fn = ppv_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level,
fn_options = list(prevalence = prevalence)
)
}
#' @export
ppv.table <- function(data,
prevalence = NULL,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "ppv",
.estimator = estimator,
.estimate = ppv_table_impl(
data,
estimator = estimator,
event_level = event_level,
prevalence = prevalence
)
)
}
#' @export
ppv.matrix <- function(data,
prevalence = NULL,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
ppv.table(
data,
prevalence = prevalence,
estimator = estimator,
event_level = event_level
)
}
#' @export
#' @rdname ppv
ppv_vec <- function(truth,
estimate,
prevalence = NULL,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
ppv_table_impl(data, estimator, event_level, prevalence = prevalence)
}
ppv_table_impl <- function(data,
estimator,
event_level,
prevalence = NULL) {
if (is_binary(estimator)) {
ppv_binary(data, event_level, prevalence)
} else {
w <- get_weights(data, estimator)
out_vec <- ppv_multiclass(data, estimator, prevalence)
stats::weighted.mean(out_vec, w)
}
}
ppv_binary <- function(data, event_level, prevalence = NULL) {
positive <- pos_val(data, event_level)
if (is.null(prevalence)) {
prevalence <- sum(data[, positive]) / sum(data)
}
sens <- sens_binary(data, event_level)
spec <- spec_binary(data, event_level)
(sens * prevalence) / ((sens * prevalence) + ((1 - spec) * (1 - prevalence)))
}
ppv_multiclass <- function(data, estimator, prevalence = NULL) {
# ppv should be equal to precision in all cases except when
# prevalence is explicitely set. In that case, that value
# is used which alters the result
if (is.null(prevalence)) {
tpfn <- colSums(data)
tptnfpfn <- rep(sum(data), times = nrow(data))
if (is_micro(estimator)) {
tpfn <- sum(tpfn)
tptnfpfn <- sum(tptnfpfn)
}
prevalence <- tpfn / tptnfpfn
}
.sens_vec <- recall_multiclass(data, estimator)
.spec_vec <- spec_multiclass(data, estimator)
numer <- .sens_vec * prevalence
denom <- .sens_vec * prevalence + (1 - .spec_vec) * (1 - prevalence)
denom[denom <= 0] <- NA_real_
numer / denom
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-ppv.R |
#' Precision
#'
#' These functions calculate the [precision()] of a measurement system for
#' finding relevant documents compared to reference results
#' (the truth regarding relevance). Highly related functions are [recall()]
#' and [f_meas()].
#'
#' The precision is the percentage of predicted truly relevant results
#' of the total number of predicted relevant results and
#' characterizes the "purity in retrieval performance" (Buckland
#' and Gey, 1994).
#'
#' When the denominator of the calculation is `0`, precision is undefined. This
#' happens when both `# true_positive = 0` and `# false_positive = 0` are true,
#' which mean that there were no predicted events. When computing binary
#' precision, a `NA` value will be returned with a warning. When computing
#' multiclass precision, the individual `NA` values will be removed, and the
#' computation will procede, with a warning.
#'
#' @family class metrics
#' @family relevance metrics
#' @templateVar fn precision
#' @template event_first
#' @template multiclass
#' @template return
#' @template table-relevance
#'
#' @inheritParams sens
#'
#' @references
#'
#' Buckland, M., & Gey, F. (1994). The relationship
#' between Recall and Precision. *Journal of the American Society
#' for Information Science*, 45(1), 12-19.
#'
#' Powers, D. (2007). Evaluation: From Precision, Recall and F
#' Factor to ROC, Informedness, Markedness and Correlation.
#' Technical Report SIE-07-001, Flinders University
#'
#' @author Max Kuhn
#'
#' @template examples-class
#'
#' @export
precision <- function(data, ...) {
UseMethod("precision")
}
precision <- new_class_metric(
precision,
direction = "maximize"
)
#' @rdname precision
#' @export
precision.data.frame <- function(data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "precision",
fn = precision_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level
)
}
#' @export
precision.table <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "precision",
.estimator = estimator,
.estimate = precision_table_impl(data, estimator, event_level)
)
}
#' @export
precision.matrix <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
precision.table(data, estimator, event_level)
}
#' @export
#' @rdname precision
precision_vec <- function(truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
precision_table_impl(data, estimator, event_level)
}
precision_table_impl <- function(data, estimator, event_level) {
if (is_binary(estimator)) {
precision_binary(data, event_level)
} else {
w <- get_weights(data, estimator)
out_vec <- precision_multiclass(data, estimator)
# set `na.rm = TRUE` to remove undefined values from weighted computation (#98)
stats::weighted.mean(out_vec, w, na.rm = TRUE)
}
}
precision_binary <- function(data, event_level) {
relevant <- pos_val(data, event_level)
numer <- data[relevant, relevant]
denom <- sum(data[relevant, ])
undefined <- denom <= 0
if (undefined) {
not_relevant <- setdiff(colnames(data), relevant)
count <- data[not_relevant, relevant]
warn_precision_undefined_binary(relevant, count)
return(NA_real_)
}
numer / denom
}
precision_multiclass <- function(data, estimator) {
numer <- diag(data)
denom <- rowSums(data)
undefined <- denom <= 0
if (any(undefined)) {
counts <- colSums(data) - numer
counts <- counts[undefined]
events <- colnames(data)[undefined]
warn_precision_undefined_multiclass(events, counts)
numer[undefined] <- NA_real_
denom[undefined] <- NA_real_
}
# set `na.rm = TRUE` to remove undefined values from weighted computation (#98)
if (is_micro(estimator)) {
numer <- sum(numer, na.rm = TRUE)
denom <- sum(denom, na.rm = TRUE)
}
numer / denom
}
warn_precision_undefined_binary <- function(event, count) {
message <- c(
"While computing binary {.fn precision}, no predicted events were \\
detected (i.e. `true_positive + false_positive = 0`).",
"Precision is undefined in this case, and `NA` will be returned."
)
message <- c(
message,
paste(
"Note that",
count,
"true event(s) actually occurred for the problematic event level,",
event
)
)
warn_precision_undefined(
message = message,
events = event,
counts = count,
class = "yardstick_warning_precision_undefined_binary"
)
}
warn_precision_undefined_multiclass <- function(events, counts) {
message <- c(
"While computing multiclass {.fn precision}, some levels had no predicted \\
events (i.e. `true_positive + false_positive = 0`).",
"Precision is undefined in this case, and those levels will be removed \\
from the averaged result.",
"Note that the following number of true events actually occurred for each \\
problematic event level:",
paste0("'", events, "': ", counts, collapse = ", ")
)
warn_precision_undefined(
message = message,
events = events,
counts = counts,
class = "yardstick_warning_precision_undefined_multiclass"
)
}
warn_precision_undefined <- function(message, events, counts, ..., class = character()) {
cli::cli_warn(
message = message,
class = c(class, "yardstick_warning_precision_undefined"),
events = events,
counts = counts,
...
)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-precision.R |
#' Recall
#'
#' These functions calculate the [recall()] of a measurement system for
#' finding relevant documents compared to reference results
#' (the truth regarding relevance). Highly related functions are [precision()]
#' and [f_meas()].
#'
#' The recall (aka sensitivity) is defined as the proportion of
#' relevant results out of the number of samples which were
#' actually relevant. When there are no relevant results, recall is
#' not defined and a value of `NA` is returned.
#'
#' When the denominator of the calculation is `0`, recall is undefined. This
#' happens when both `# true_positive = 0` and `# false_negative = 0` are true,
#' which mean that there were no true events. When computing binary
#' recall, a `NA` value will be returned with a warning. When computing
#' multiclass recall, the individual `NA` values will be removed, and the
#' computation will procede, with a warning.
#'
#' @family class metrics
#' @family relevance metrics
#' @templateVar fn recall
#' @template event_first
#' @template multiclass
#' @template return
#' @template table-relevance
#'
#' @inheritParams sens
#'
#' @references
#'
#' Buckland, M., & Gey, F. (1994). The relationship
#' between Recall and Precision. *Journal of the American Society
#' for Information Science*, 45(1), 12-19.
#'
#' Powers, D. (2007). Evaluation: From Precision, Recall and F
#' Factor to ROC, Informedness, Markedness and Correlation.
#' Technical Report SIE-07-001, Flinders University
#'
#' @author Max Kuhn
#'
#' @template examples-class
#'
#' @export
recall <- function(data, ...) {
UseMethod("recall")
}
recall <- new_class_metric(
recall,
direction = "maximize"
)
#' @rdname recall
#' @export
recall.data.frame <- function(data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "recall",
fn = recall_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level
)
}
#' @export
recall.table <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "recall",
.estimator = estimator,
.estimate = recall_table_impl(data, estimator, event_level)
)
}
#' @export
recall.matrix <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
recall.table(data, estimator, event_level)
}
#' @export
#' @rdname recall
recall_vec <- function(truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
recall_table_impl(data, estimator, event_level)
}
recall_table_impl <- function(data, estimator, event_level) {
if (is_binary(estimator)) {
recall_binary(data, event_level)
} else {
w <- get_weights(data, estimator)
out_vec <- recall_multiclass(data, estimator)
# set `na.rm = TRUE` to remove undefined values from weighted computation (#98)
stats::weighted.mean(out_vec, w, na.rm = TRUE)
}
}
recall_binary <- function(data, event_level) {
relevant <- pos_val(data, event_level)
numer <- sum(data[relevant, relevant])
denom <- sum(data[, relevant])
undefined <- denom <= 0
if (undefined) {
not_relevant <- setdiff(colnames(data), relevant)
count <- data[relevant, not_relevant]
warn_recall_undefined_binary(relevant, count)
return(NA_real_)
}
numer / denom
}
recall_multiclass <- function(data, estimator) {
numer <- diag(data)
denom <- colSums(data)
undefined <- denom <= 0
if (any(undefined)) {
counts <- rowSums(data) - numer
counts <- counts[undefined]
events <- colnames(data)[undefined]
warn_recall_undefined_multiclass(events, counts)
numer[undefined] <- NA_real_
denom[undefined] <- NA_real_
}
# set `na.rm = TRUE` to remove undefined values from weighted computation (#98)
if (is_micro(estimator)) {
numer <- sum(numer, na.rm = TRUE)
denom <- sum(denom, na.rm = TRUE)
}
numer / denom
}
warn_recall_undefined_binary <- function(event, count) {
message <- c(
"While computing binary {.fn recall}, no true events were detected \\
(i.e. `true_positive + false_negative = 0`).",
"Recall is undefined in this case, and `NA` will be returned."
)
message <- c(
message,
paste(
"Note that",
count,
"predicted event(s) actually occurred for the problematic event level",
event
)
)
warn_recall_undefined(
message = message,
events = event,
counts = count,
class = "yardstick_warning_recall_undefined_binary"
)
}
warn_recall_undefined_multiclass <- function(events, counts) {
message <- c(
"While computing multiclass {.fn recall}, some levels had no true events \\
(i.e. `true_positive + false_negative = 0`).",
"Recall is undefined in this case, and those levels will be removed from \\
the averaged result.",
"Note that the following number of predicted events actually occurred for \\
each problematic event level:",
paste0("'", events, "': ", counts, collapse = ", ")
)
warn_recall_undefined(
message = message,
events = events,
counts = counts,
class = "yardstick_warning_recall_undefined_multiclass"
)
}
warn_recall_undefined <- function(message, events, counts, ..., class = character()) {
cli::cli_warn(
message = message,
class = c(class, "yardstick_warning_recall_undefined"),
events = events,
counts = counts,
...
)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-recall.R |
#' Sensitivity
#'
#' These functions calculate the [sens()] (sensitivity) of a measurement system
#' compared to a reference result (the "truth" or gold standard).
#' Highly related functions are [spec()], [ppv()], and [npv()].
#'
#' The sensitivity (`sens()`) is defined as the proportion of positive
#' results out of the number of samples which were actually
#' positive.
#'
#' When the denominator of the calculation is `0`, sensitivity is undefined.
#' This happens when both `# true_positive = 0` and `# false_negative = 0`
#' are true, which mean that there were no true events. When computing binary
#' sensitivity, a `NA` value will be returned with a warning. When computing
#' multiclass sensitivity, the individual `NA` values will be removed, and the
#' computation will procede, with a warning.
#'
#' @family class metrics
#' @family sensitivity metrics
#' @templateVar fn sens
#' @template event_first
#' @template multiclass
#' @template return
#' @template table-positive
#'
#' @param data Either a `data.frame` containing the columns specified by the
#' `truth` and `estimate` arguments, or a `table`/`matrix` where the true
#' class results should be in the columns of the table.
#'
#' @param truth The column identifier for the true class results
#' (that is a `factor`). This should be an unquoted column name although
#' this argument is passed by expression and supports
#' [quasiquotation][rlang::quasiquotation] (you can unquote column
#' names). For `_vec()` functions, a `factor` vector.
#'
#' @param estimate The column identifier for the predicted class
#' results (that is also `factor`). As with `truth` this can be
#' specified different ways but the primary method is to use an
#' unquoted variable name. For `_vec()` functions, a `factor` vector.
#'
#' @param estimator One of: `"binary"`, `"macro"`, `"macro_weighted"`,
#' or `"micro"` to specify the type of averaging to be done. `"binary"` is
#' only relevant for the two class case. The other three are general methods
#' for calculating multiclass metrics. The default will automatically choose
#' `"binary"` or `"macro"` based on `estimate`.
#'
#' @param na_rm A `logical` value indicating whether `NA`
#' values should be stripped before the computation proceeds.
#'
#' @param case_weights The optional column identifier for case weights.
#' This should be an unquoted column name that evaluates to a numeric column
#' in `data`. For `_vec()` functions, a numeric vector,
#' [hardhat::importance_weights()], or [hardhat::frequency_weights()].
#'
#' @param event_level A single string. Either `"first"` or `"second"` to specify
#' which level of `truth` to consider as the "event". This argument is only
#' applicable when `estimator = "binary"`. The default uses an internal helper
#' that defaults to `"first"`.
#'
#' @param ... Not currently used.
#'
#'
#' @author Max Kuhn
#'
#' @references
#'
#' Altman, D.G., Bland, J.M. (1994) ``Diagnostic tests 1:
#' sensitivity and specificity,'' *British Medical Journal*,
#' vol 308, 1552.
#'
#' @template examples-class
#'
#' @export
sens <- function(data, ...) {
UseMethod("sens")
}
sens <- new_class_metric(
sens,
direction = "maximize"
)
#' @export
#' @rdname sens
sens.data.frame <- function(data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "sens",
fn = sens_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level
)
}
#' @export
sens.table <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "sens",
.estimator = estimator,
.estimate = sens_table_impl(data, estimator, event_level)
)
}
#' @export
sens.matrix <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
sens.table(data, estimator, event_level)
}
#' @export
#' @rdname sens
sens_vec <- function(truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
sens_table_impl(data, estimator, event_level)
}
# ------------------------------------------------------------------------------
#' @rdname sens
#' @export
sensitivity <- function(data, ...) {
UseMethod("sensitivity")
}
sensitivity <- new_class_metric(
sensitivity,
direction = "maximize"
)
#' @rdname sens
#' @export
sensitivity.data.frame <- function(data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "sensitivity",
fn = sens_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level
)
}
#' @export
sensitivity.table <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "sensitivity",
.estimator = estimator,
.estimate = sens_table_impl(data, estimator, event_level)
)
}
#' @export
sensitivity.matrix <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
sensitivity.table(data, estimator, event_level)
}
#' @rdname sens
#' @export
sensitivity_vec <- sens_vec
# ------------------------------------------------------------------------------
# sens() == recall(), so this is a copy paste from there, with altered warning
# classes
sens_table_impl <- function(data, estimator, event_level) {
if (is_binary(estimator)) {
sens_binary(data, event_level)
} else {
w <- get_weights(data, estimator)
out_vec <- sens_multiclass(data, estimator)
# set `na.rm = TRUE` to remove undefined values from weighted computation (#98)
stats::weighted.mean(out_vec, w, na.rm = TRUE)
}
}
sens_binary <- function(data, event_level) {
relevant <- pos_val(data, event_level)
numer <- sum(data[relevant, relevant])
denom <- sum(data[, relevant])
undefined <- denom <= 0
if (undefined) {
not_relevant <- setdiff(colnames(data), relevant)
count <- data[relevant, not_relevant]
warn_sens_undefined_binary(relevant, count)
return(NA_real_)
}
numer / denom
}
sens_multiclass <- function(data, estimator) {
numer <- diag(data)
denom <- colSums(data)
undefined <- denom <= 0
if (any(undefined)) {
counts <- rowSums(data) - numer
counts <- counts[undefined]
events <- colnames(data)[undefined]
warn_sens_undefined_multiclass(events, counts)
numer[undefined] <- NA_real_
denom[undefined] <- NA_real_
}
# set `na.rm = TRUE` to remove undefined values from weighted computation (#98)
if (is_micro(estimator)) {
numer <- sum(numer, na.rm = TRUE)
denom <- sum(denom, na.rm = TRUE)
}
numer / denom
}
warn_sens_undefined_binary <- function(event, count) {
message <- c(
"While computing binary {.fn sens}, no true events were detected \\
(i.e. `true_positive + false_negative = 0`).",
"Sensitivity is undefined in this case, and `NA` will be returned."
)
message <- c(
message,
paste(
"Note that",
count,
"predicted event(s) actually occurred for the problematic event level,",
event
)
)
warn_sens_undefined(
message = message,
events = event,
counts = count,
class = "yardstick_warning_sens_undefined_binary"
)
}
warn_sens_undefined_multiclass <- function(events, counts) {
message <- c(
"While computing multiclass {.fn sens}, some levels had no true events \\
(i.e. `true_positive + false_negative = 0`).",
"Sensitivity is undefined in this case, and those levels will be removed \\
from the averaged result.",
"Note that the following number of predicted events actually occurred for \\
each problematic event level:",
paste0("'", events, "': ", counts, collapse = ", ")
)
warn_sens_undefined(
message = message,
events = events,
counts = counts,
class = "yardstick_warning_sens_undefined_multiclass"
)
}
warn_sens_undefined <- function(message, events, counts, ..., class = character()) {
cli::cli_warn(
message = message,
class = c(class, "yardstick_warning_sens_undefined"),
events = events,
counts = counts,
...
)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-sens.R |
#' Specificity
#'
#' These functions calculate the [spec()] (specificity) of a measurement system
#' compared to a reference result (the "truth" or gold standard).
#' Highly related functions are [sens()], [ppv()], and [npv()].
#'
#' The specificity measures the proportion of negatives that are correctly
#' identified as negatives.
#'
#' When the denominator of the calculation is `0`, specificity is undefined.
#' This happens when both `# true_negative = 0` and `# false_positive = 0`
#' are true, which mean that there were no true negatives. When computing binary
#' specificity, a `NA` value will be returned with a warning. When computing
#' multiclass specificity, the individual `NA` values will be removed, and the
#' computation will procede, with a warning.
#'
#' @family class metrics
#' @family sensitivity metrics
#' @templateVar fn spec
#' @template event_first
#' @template multiclass
#' @template return
#' @template table-positive
#'
#' @inheritParams sens
#'
#'
#' @author Max Kuhn
#'
#' @references
#'
#' Altman, D.G., Bland, J.M. (1994) ``Diagnostic tests 1:
#' sensitivity and specificity,'' *British Medical Journal*,
#' vol 308, 1552.
#'
#' @template examples-class
#'
#' @export
spec <- function(data, ...) {
UseMethod("spec")
}
spec <- new_class_metric(
spec,
direction = "maximize"
)
#' @export
#' @rdname spec
spec.data.frame <- function(data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "spec",
fn = spec_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level
)
}
#' @export
spec.table <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "spec",
.estimator = estimator,
.estimate = spec_table_impl(data, estimator, event_level)
)
}
#' @export
spec.matrix <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
spec.table(data, estimator, event_level)
}
#' @export
#' @rdname spec
spec_vec <- function(truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- finalize_estimator(truth, estimator)
check_class_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
data <- yardstick_table(truth, estimate, case_weights = case_weights)
spec_table_impl(data, estimator, event_level)
}
# ------------------------------------------------------------------------------
#' @rdname spec
#' @export
specificity <- function(data, ...) {
UseMethod("specificity")
}
specificity <- new_class_metric(
specificity,
direction = "maximize"
)
#' @rdname spec
#' @export
specificity.data.frame <- function(data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
case_weights = NULL,
event_level = yardstick_event_level(),
...) {
class_metric_summarizer(
name = "specificity",
fn = spec_vec,
data = data,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
estimator = estimator,
na_rm = na_rm,
case_weights = !!enquo(case_weights),
event_level = event_level
)
}
#' @export
specificity.table <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
check_table(data)
estimator <- finalize_estimator(data, estimator)
metric_tibbler(
.metric = "specificity",
.estimator = estimator,
.estimate = spec_table_impl(data, estimator, event_level)
)
}
#' @export
specificity.matrix <- function(data,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
data <- as.table(data)
specificity.table(data, estimator, event_level)
}
#' @rdname spec
#' @export
specificity_vec <- spec_vec
# ------------------------------------------------------------------------------
spec_table_impl <- function(data, estimator, event_level) {
if (is_binary(estimator)) {
spec_binary(data, event_level)
} else {
w <- get_weights(data, estimator)
out_vec <- spec_multiclass(data, estimator)
# set `na.rm = TRUE` to remove undefined values from weighted computation (#98)
stats::weighted.mean(out_vec, w, na.rm = TRUE)
}
}
spec_binary <- function(data, event_level) {
negative <- neg_val(data, event_level)
numer <- sum(data[negative, negative])
denom <- sum(data[, negative])
undefined <- denom <= 0
if (undefined) {
positive <- setdiff(colnames(data), negative)
count <- data[negative, positive]
warn_spec_undefined_binary(positive, count)
return(NA_real_)
}
numer / denom
}
spec_multiclass <- function(data, estimator) {
n <- sum(data)
tp <- diag(data)
tpfp <- rowSums(data)
tpfn <- colSums(data)
tn <- n - (tpfp + tpfn - tp)
fp <- tpfp - tp
numer <- tn
denom <- tn + fp
undefined <- denom <= 0
if (any(undefined)) {
counts <- tpfn - tp
counts <- counts[undefined]
events <- colnames(data)[undefined]
warn_spec_undefined_multiclass(events, counts)
numer[undefined] <- NA_real_
denom[undefined] <- NA_real_
}
# set `na.rm = TRUE` to remove undefined values from weighted computation (#98)
if (is_micro(estimator)) {
numer <- sum(numer, na.rm = TRUE)
denom <- sum(denom, na.rm = TRUE)
}
numer / denom
}
warn_spec_undefined_binary <- function(event, count) {
message <- c(
"While computing binary {.fn spec}, no true negatives were detected \\
(i.e. `true_negative + false_positive = 0`).",
"Specificity is undefined in this case, and `NA` will be returned."
)
message <- c(
message,
paste(
"Note that",
count,
"predicted negatives(s) actually occurred for the problematic event level,",
event
)
)
warn_spec_undefined(
message = message,
events = event,
counts = count,
class = "yardstick_warning_spec_undefined_binary"
)
}
warn_spec_undefined_multiclass <- function(events, counts) {
message <- c(
"While computing multiclass {.fn spec}, some levels had no true negatives \\
(i.e. `true_negative + false_positive = 0`).",
"Specificity is undefined in this case, and those levels will be removed \\
from the averaged result.",
"Note that the following number of predicted negatives actually occurred \\
for each problematic event level:",
paste0("'", events, "': ", counts, collapse = ", ")
)
warn_spec_undefined(
message = message,
events = events,
counts = counts,
class = "yardstick_warning_spec_undefined_multiclass"
)
}
warn_spec_undefined <- function(message, events, counts, ..., class = character()) {
cli::cli_warn(
message = message,
class = c(class, "yardstick_warning_spec_undefined"),
events = events,
counts = counts,
...
)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/class-spec.R |
#' Confusion Matrix for Categorical Data
#'
#' Calculates a cross-tabulation of observed and predicted classes.
#'
#' For [conf_mat()] objects, a `broom` `tidy()` method has been created
#' that collapses the cell counts by cell into a data frame for
#' easy manipulation.
#'
#' There is also a `summary()` method that computes various classification
#' metrics at once. See [summary.conf_mat()]
#'
#' There is a [ggplot2::autoplot()]
#' method for quickly visualizing the matrix. Both a heatmap and mosaic type
#' is implemented.
#'
#' The function requires that the factors have exactly the same levels.
#'
#' @aliases conf_mat.table conf_mat.default conf_mat
#'
#' @inheritParams sens
#'
#' @param data A data frame or a [base::table()].
#'
#' @param dnn A character vector of dimnames for the table.
#'
#' @param ... Not used.
#'
#' @return
#' `conf_mat()` produces an object with class `conf_mat`. This contains the
#' table and other objects. `tidy.conf_mat()` generates a tibble with columns
#' `name` (the cell identifier) and `value` (the cell count).
#'
#' When used on a grouped data frame, `conf_mat()` returns a tibble containing
#' columns for the groups along with `conf_mat`, a list-column
#' where each element is a `conf_mat` object.
#'
#' @seealso
#'
#' [summary.conf_mat()] for computing a large number of metrics from one
#' confusion matrix.
#'
#' @examples
#' library(dplyr)
#' data("hpc_cv")
#'
#' # The confusion matrix from a single assessment set (i.e. fold)
#' cm <- hpc_cv %>%
#' filter(Resample == "Fold01") %>%
#' conf_mat(obs, pred)
#' cm
#'
#' # Now compute the average confusion matrix across all folds in
#' # terms of the proportion of the data contained in each cell.
#' # First get the raw cell counts per fold using the `tidy` method
#' library(tidyr)
#'
#' cells_per_resample <- hpc_cv %>%
#' group_by(Resample) %>%
#' conf_mat(obs, pred) %>%
#' mutate(tidied = lapply(conf_mat, tidy)) %>%
#' unnest(tidied)
#'
#' # Get the totals per resample
#' counts_per_resample <- hpc_cv %>%
#' group_by(Resample) %>%
#' summarize(total = n()) %>%
#' left_join(cells_per_resample, by = "Resample") %>%
#' # Compute the proportions
#' mutate(prop = value / total) %>%
#' group_by(name) %>%
#' # Average
#' summarize(prop = mean(prop))
#'
#' counts_per_resample
#'
#' # Now reshape these into a matrix
#' mean_cmat <- matrix(counts_per_resample$prop, byrow = TRUE, ncol = 4)
#' rownames(mean_cmat) <- levels(hpc_cv$obs)
#' colnames(mean_cmat) <- levels(hpc_cv$obs)
#'
#' round(mean_cmat, 3)
#'
#' # The confusion matrix can quickly be visualized using autoplot()
#' library(ggplot2)
#'
#' autoplot(cm, type = "mosaic")
#' autoplot(cm, type = "heatmap")
#' @export
conf_mat <- function(data, ...) {
UseMethod("conf_mat")
}
#' @export
#' @rdname conf_mat
conf_mat.data.frame <- function(data,
truth,
estimate,
dnn = c("Prediction", "Truth"),
case_weights = NULL,
...) {
if (dots_n(...) != 0L) {
warn_conf_mat_dots_deprecated()
}
truth <- enquo(truth)
estimate <- enquo(estimate)
case_weights <- enquo(case_weights)
truth <- yardstick_eval_select(
expr = truth,
data = data,
arg = "truth"
)
truth <- data[[truth]]
estimate <- yardstick_eval_select(
expr = estimate,
data = data,
arg = "estimate"
)
estimate <- data[[estimate]]
if (quo_is_null(case_weights)) {
case_weights <- NULL
} else {
case_weights <- yardstick_eval_select(
expr = case_weights,
data = data,
arg = "case_weights"
)
case_weights <- data[[case_weights]]
}
table <- conf_mat_impl(
truth = truth,
estimate = estimate,
case_weights = case_weights
)
dimnames <- dimnames(table)
names(dimnames) <- dnn
dimnames(table) <- dimnames
conf_mat.matrix(table)
}
#' @export
conf_mat.grouped_df <- function(data,
truth,
estimate,
dnn = c("Prediction", "Truth"),
case_weights = NULL,
...) {
if (dots_n(...) != 0L) {
warn_conf_mat_dots_deprecated()
}
truth <- enquo(truth)
estimate <- enquo(estimate)
case_weights <- enquo(case_weights)
truth <- yardstick_eval_select(
expr = truth,
data = data,
arg = "truth"
)
estimate <- yardstick_eval_select(
expr = estimate,
data = data,
arg = "estimate"
)
if (quo_is_null(case_weights)) {
group_case_weights <- NULL
} else {
case_weights <- yardstick_eval_select(
expr = case_weights,
data = data,
arg = "case_weights",
error_call = error_call
)
}
group_rows <- dplyr::group_rows(data)
group_keys <- dplyr::group_keys(data)
data <- dplyr::ungroup(data)
groups <- vec_chop(data, indices = group_rows)
out <- vector("list", length = length(groups))
for (i in seq_along(groups)) {
group <- groups[[i]]
group_truth <- group[[truth]]
group_estimate <- group[[estimate]]
if (is_string(case_weights)) {
group_case_weights <- group[[case_weights]]
}
table <- conf_mat_impl(
truth = group_truth,
estimate = group_estimate,
case_weights = group_case_weights
)
dimnames <- dimnames(table)
names(dimnames) <- dnn
dimnames(table) <- dimnames
out[[i]] <- conf_mat.matrix(table)
}
out <- vec_cbind(group_keys, conf_mat = out)
out
}
conf_mat_impl <- function(truth, estimate, case_weights, call = caller_env()) {
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
estimator <- "not binary"
check_class_metric(truth, estimate, case_weights, estimator, call = call)
if (length(levels(truth)) < 2) {
cli::cli_abort(
"{.arg truth} must have at least 2 factor levels.",
call = call
)
}
yardstick_table(
truth = truth,
estimate = estimate,
case_weights = case_weights
)
}
#' @export
conf_mat.table <- function(data, ...) {
check_table(data)
# To ensure that we always have a consistent output type, whether or not
# case weights were used when constructing the table
storage.mode(data) <- "double"
class_lev <- rownames(data)
num_lev <- length(class_lev)
if (num_lev < 2) {
cli::cli_abort(
"There must be at least 2 factors levels in the {.arg data}."
)
}
structure(
list(table = data),
class = "conf_mat"
)
}
#' @export
conf_mat.matrix <- function(data, ...) {
# We want the conversion from a yardstick_table() result (i.e. a double
# matrix) to a table to occur. tune relies on the `as.data.frame.table()`
# method to run, so it has to be a table.
data <- as.table(data)
conf_mat.table(data)
}
warn_conf_mat_dots_deprecated <- function() {
lifecycle::deprecate_warn(
when = "1.0.0",
what = I("The `...` argument of `conf_mat()`"),
details = "This argument no longer has any effect, and is being ignored."
)
}
#' @export
print.conf_mat <- function(x, ...) {
print(x$table)
}
#' @export
#' @rdname conf_mat
#' @param x A `conf_mat` object.
tidy.conf_mat <- function(x, ...) {
y <- flatten(x$table)
dplyr::tibble(
name = names(y),
value = unname(y)
)
}
flatten <- function(xtab, call = caller_env()) {
n_col <- ncol(xtab)
n_row <- nrow(xtab)
if (n_row != n_col) {
cli::cli_abort(
"{.arg x} must have equal dimensions. \\
{.arg x} has {n_col} columns and {n_row} rows.",
call = call
)
}
flat <- as.vector(xtab)
names(flat) <- paste(
"cell",
rep(seq_len(n_col), n_col),
rep(seq_len(n_col), each = n_col),
sep = "_"
)
flat
}
#' Summary Statistics for Confusion Matrices
#'
#' Various statistical summaries of confusion matrices are
#' produced and returned in a tibble. These include those shown in the help
#' pages for [sens()], [recall()], and [accuracy()], among others.
#'
#' @template event_first
#'
#' @inheritParams sens
#'
#' @param object An object of class [conf_mat()].
#'
#' @param prevalence A number in `(0, 1)` for the prevalence (i.e.
#' prior) of the event. If left to the default, the data are used
#' to derive this value.
#'
#' @param beta A numeric value used to weight precision and
#' recall for [f_meas()].
#'
#' @param ... Not currently used.
#'
#' @return
#'
#' A tibble containing various classification metrics.
#'
#' @seealso
#'
#' [conf_mat()]
#'
#' @examples
#' data("two_class_example")
#'
#' cmat <- conf_mat(two_class_example, truth = "truth", estimate = "predicted")
#' summary(cmat)
#' summary(cmat, prevalence = 0.70)
#'
#' library(dplyr)
#' library(tidyr)
#' data("hpc_cv")
#'
#' # Compute statistics per resample then summarize
#' all_metrics <- hpc_cv %>%
#' group_by(Resample) %>%
#' conf_mat(obs, pred) %>%
#' mutate(summary_tbl = lapply(conf_mat, summary)) %>%
#' unnest(summary_tbl)
#'
#' all_metrics %>%
#' group_by(.metric) %>%
#' summarise(
#' mean = mean(.estimate, na.rm = TRUE),
#' sd = sd(.estimate, na.rm = TRUE)
#' )
#' @export
summary.conf_mat <- function(object,
prevalence = NULL,
beta = 1,
estimator = NULL,
event_level = yardstick_event_level(),
...) {
xtab <- object$table
stats <- dplyr::bind_rows(
# known multiclass extension
accuracy(xtab),
# known multiclass extension
kap(xtab),
sens(xtab, estimator = estimator, event_level = event_level),
spec(xtab, estimator = estimator, event_level = event_level),
ppv(xtab, prevalence = prevalence, estimator = estimator, event_level = event_level),
npv(xtab, prevalence = prevalence, estimator = estimator, event_level = event_level),
# known multiclass extension
mcc(xtab),
j_index(xtab, estimator = estimator, event_level = event_level),
bal_accuracy(xtab, estimator = estimator, event_level = event_level),
detection_prevalence(xtab, estimator = estimator, event_level = event_level),
precision(xtab, estimator = estimator, event_level = event_level),
recall(xtab, estimator = estimator, event_level = event_level),
f_meas(xtab, beta = beta, estimator = estimator, event_level = event_level)
)
stats
}
# Dynamically exported
autoplot.conf_mat <- function(object, type = "mosaic", ...) {
type <- arg_match(type, conf_mat_plot_types)
switch(type,
mosaic = cm_mosaic(object),
heatmap = cm_heat(object)
)
}
conf_mat_plot_types <- c("mosaic", "heatmap")
cm_heat <- function(x) {
`%+%` <- ggplot2::`%+%`
df <- as.data.frame.table(x$table)
# Force specific column names for referencing in ggplot2 code
names(df) <- c("Prediction", "Truth", "Freq")
# Have prediction levels going from high to low so they plot in an
# order that matches the LHS of the confusion matrix
lvls <- levels(df$Prediction)
df$Prediction <- factor(df$Prediction, levels = rev(lvls))
# For case weighted confusion matrices this looks a little better
df$Freq <- round(df$Freq, digits = 3)
axis_labels <- get_axis_labels(x)
df %>%
ggplot2::ggplot(
ggplot2::aes(
x = Truth,
y = Prediction,
fill = Freq
)
) %+%
ggplot2::geom_tile() %+%
ggplot2::scale_fill_gradient(
low = "grey90",
high = "grey40"
) %+%
ggplot2::theme(
panel.background = ggplot2::element_blank(),
legend.position = "none"
) %+%
ggplot2::geom_text(
mapping = ggplot2::aes(label = Freq)
) %+%
ggplot2::labs(
x = axis_labels$x,
y = axis_labels$y
)
}
space_fun <- function(x, adjustment, rescale = FALSE) {
if (rescale) {
x <- x / sum(x)
}
adjustment <- sum(x) / adjustment
xmax <- cumsum(x) + seq(0, length(x) - 1) * adjustment
xmin <- cumsum(x) - x + seq(0, length(x) - 1) * adjustment
dplyr::tibble(xmin = xmin, xmax = xmax)
}
space_y_fun <- function(data, id, x_data) {
out <- space_fun(data[, id], 100, rescale = TRUE) * -1
names(out) <- c("ymin", "ymax")
out$xmin <- x_data[[id, 1]]
out$xmax <- x_data[[id, 2]]
out
}
cm_mosaic <- function(x) {
`%+%` <- ggplot2::`%+%`
cm_zero <- (as.numeric(x$table == 0) / 2) + x$table
x_data <- space_fun(colSums(cm_zero), 200)
full_data_list <- lapply(
seq_len(ncol(cm_zero)),
FUN = function(.x) space_y_fun(cm_zero, .x, x_data)
)
full_data <- dplyr::bind_rows(full_data_list)
y1_data <- full_data_list[[1]]
tick_labels <- colnames(cm_zero)
axis_labels <- get_axis_labels(x)
ggplot2::ggplot(full_data) %+%
ggplot2::geom_rect(
ggplot2::aes(
xmin = xmin,
xmax = xmax,
ymin = ymin,
ymax = ymax
)
) %+%
ggplot2::scale_x_continuous(
breaks = (x_data$xmin + x_data$xmax) / 2,
labels = tick_labels
) %+%
ggplot2::scale_y_continuous(
breaks = (y1_data$ymin + y1_data$ymax) / 2,
labels = tick_labels
) %+%
ggplot2::labs(
y = axis_labels$y,
x = axis_labels$x
) %+%
ggplot2::theme(panel.background = ggplot2::element_blank())
}
# Note: Always assumes predictions are on the LHS of the table
get_axis_labels <- function(x) {
table <- x$table
labels <- names(dimnames(table))
if (is.null(labels)) {
labels <- c("Prediction", "Truth")
}
list(
y = labels[[1]],
x = labels[[2]]
)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/conf_mat.R |
#' Liver Pathology Data
#'
#' @details These data have the results of a _x_-ray examination
#' to determine whether liver is abnormal or not (in the `scan`
#' column) versus the more extensive pathology results that
#' approximate the truth (in `pathology`).
#'
#' @name pathology
#' @aliases pathology
#' @docType data
#' @return \item{pathology}{a data frame}
#'
#' @source Altman, D.G., Bland, J.M. (1994) ``Diagnostic tests 1:
#' sensitivity and specificity,'' *British Medical Journal*,
#' vol 308, 1552.
#'
#'
#' @keywords datasets
#' @examples
#' data(pathology)
#' str(pathology)
NULL
#' Solubility Predictions from MARS Model
#'
#' @details For the solubility data in Kuhn and Johnson (2013),
#' these data are the test set results for the MARS model. The
#' observed solubility (in column `solubility`) and the model
#' results (`prediction`) are contained in the data.
#'
#' @name solubility_test
#' @aliases solubility_test
#' @docType data
#' @return \item{solubility_test}{a data frame}
#'
#' @source Kuhn, M., Johnson, K. (2013) *Applied Predictive
#' Modeling*, Springer
#'
#' @keywords datasets
#' @examples
#' data(solubility_test)
#' str(solubility_test)
NULL
#' Multiclass Probability Predictions
#'
#' @details This data frame contains the predicted classes and
#' class probabilities for a linear discriminant analysis model fit
#' to the HPC data set from Kuhn and Johnson (2013). These data are
#' the assessment sets from a 10-fold cross-validation scheme. The
#' data column columns for the true class (`obs`), the class
#' prediction (`pred`) and columns for each class probability
#' (columns `VF`, `F`, `M`, and `L`). Additionally, a column for
#' the resample indicator is included.
#'
#' @name hpc_cv
#' @aliases hpc_cv
#' @docType data
#' @return \item{hpc_cv}{a data frame}
#'
#' @source Kuhn, M., Johnson, K. (2013) *Applied Predictive
#' Modeling*, Springer
#'
#' @keywords datasets
#' @examples
#' data(hpc_cv)
#' str(hpc_cv)
#'
#' # `obs` is a 4 level factor. The first level is `"VF"`, which is the
#' # "event of interest" by default in yardstick. See the Relevant Level
#' # section in any classification function (such as `?pr_auc`) to see how
#' # to change this.
#' levels(hpc_cv$obs)
NULL
#' Two Class Predictions
#'
#' @details These data are a test set form a model built for two
#' classes ("Class1" and "Class2"). There are columns for the true
#' and predicted classes and column for the probabilities for each
#' class.
#'
#' @name two_class_example
#' @aliases two_class_example
#' @docType data
#' @return \item{two_class_example}{a data frame}
#'
#' @keywords datasets
#' @examples
#' data(two_class_example)
#' str(two_class_example)
#'
#' # `truth` is a 2 level factor. The first level is `"Class1"`, which is the
#' # "event of interest" by default in yardstick. See the Relevant Level
#' # section in any classification function (such as `?pr_auc`) to see how
#' # to change this.
#' levels(hpc_cv$obs)
NULL
#' Survival Analysis Results
#'
#' @details These data contain plausible results from applying predictive
#' survival models to the [lung] data set using the censored package.
#'
#' @name lung_surv
#' @aliases lung_surv
#' @docType data
#' @return \item{lung_surv}{a data frame}
#'
#' @keywords datasets
#' @examples
#' data(lung_surv)
#' str(lung_surv)
#'
#' # `surv_obj` is a `Surv()` object
NULL
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/data.R |
# `...` -> estimate matrix / vector helper -------------------------------------
#' Developer helpers
#'
#' Helpers to be used alongside [check_metric], [yardstick_remove_missing] and
#' [metric summarizers][class_metric_summarizer()] when creating new metrics.
#' See [Custom performance
#' metrics](https://www.tidymodels.org/learn/develop/metrics/) for more
#' information.
#'
#' @section Dots -> Estimate:
#' `r lifecycle::badge("deprecated")`
#'
#' `dots_to_estimate()` is useful with class probability metrics that take
#' `...` rather than `estimate` as an argument. It constructs either a single
#' name if 1 input is provided to `...` or it constructs a quosure where the
#' expression constructs a matrix of as many columns as are provided to `...`.
#' These are eventually evaluated in the `summarise()` call in
#' [metric-summarizers] and evaluate to either a vector or a matrix for
#' further use in the underlying vector functions.
#'
#'
#' @name developer-helpers
#'
#' @aliases dots_to_estimate
#'
#' @export
#'
#' @inheritParams roc_auc
dots_to_estimate <- function(data, ...) {
lifecycle::deprecate_soft(
when = "1.2.0",
what = "dots_to_estimate()",
details = I(
paste(
"No longer needed with",
"`prob_metric_summarizer()`, or `curve_metric_summarizer()`."
)
)
)
# Capture dots
dot_vars <- with_handlers(
tidyselect::vars_select(names(data), !!!enquos(...)),
tidyselect_empty_dots = function(cnd) {
abort("No valid variables provided to `...`.")
}
)
# estimate is a matrix of the selected columns if >1 selected
dot_nms <- lapply(dot_vars, as.name)
if (length(dot_nms) > 1) {
estimate <- quo(
matrix(
data = c(!!!dot_nms),
ncol = !!length(dot_nms),
dimnames = list(NULL, !!dot_vars)
)
)
} else {
estimate <- dot_nms[[1]]
}
estimate
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/deprecated-prob_helpers.R |
#' Developer function for summarizing new metrics
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#'
#' `metric_summarizer()` has been soft-deprecated as of yardstick 1.2.0. Please
#' switch to use [class_metric_summarizer()], [numeric_metric_summarizer()],
#' [prob_metric_summarizer()], or [curve_metric_summarizer()].
#'
#' @param metric_nm A single character representing the name of the metric to
#' use in the `tibble` output. This will be modified to include the type
#' of averaging if appropriate.
#'
#' @param metric_fn The vector version of your custom metric function. It
#' generally takes `truth`, `estimate`, `na_rm`, and any other extra arguments
#' needed to calculate the metric.
#'
#' @param data The data frame with `truth` and `estimate` columns passed
#' in from the data frame version of your metric function that called
#' `metric_summarizer()`.
#'
#' @param truth The unquoted column name corresponding to the `truth` column.
#'
#' @param estimate Generally, the unquoted column name corresponding to
#' the `estimate` column. For metrics that take multiple columns through `...`
#' like class probability metrics, this is a result of [dots_to_estimate()].
#'
#' @param estimator For numeric metrics, this is left as `NULL` so averaging
#' is not passed on to the metric function implementation. For classification
#' metrics, this can either be `NULL` for the default auto-selection of
#' averaging (`"binary"` or `"macro"`), or a single character to pass along
#' to the metric implementation describing the kind of averaging to use.
#'
#' @param na_rm A `logical` value indicating whether `NA` values should be
#' stripped before the computation proceeds. The removal is executed in
#' `metric_vec_template()`.
#'
#' @param event_level For numeric metrics, this is left as `NULL` to prevent
#' it from being passed on to the metric function implementation. For
#' classification metrics, this can either be `NULL` to use the default
#' `event_level` value of the `metric_fn` or a single string of either
#' `"first"` or `"second"` to pass along describing which level should be
#' considered the "event".
#'
#' @param case_weights For metrics supporting case weights, an unquoted
#' column name corresponding to case weights can be passed here. If not `NULL`,
#' the case weights will be passed on to `metric_fn` as the named argument
#' `case_weights`.
#'
#' @param ... Currently not used. Metric specific options are passed in
#' through `metric_fn_options`.
#'
#' @param metric_fn_options A named list of metric specific options. These
#' are spliced into the metric function call using `!!!` from `rlang`. The
#' default results in nothing being spliced into the call.
#'
#' @keywords internal
#' @export
metric_summarizer <- function(metric_nm,
metric_fn,
data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
event_level = NULL,
case_weights = NULL,
...,
metric_fn_options = list()) {
lifecycle::deprecate_soft(
when = "1.2.0",
what = "metric_summarizer()",
with = I(
paste(
"`numeric_metric_summarizer()`, `class_metric_summarizer()`,",
"`prob_metric_summarizer()`, or `curve_metric_summarizer()`"
)
)
)
truth <- enquo(truth)
estimate <- enquo(estimate)
case_weights <- enquo(case_weights)
validate_not_missing(truth, "truth")
validate_not_missing(estimate, "estimate")
# Explicit handling of length 1 character vectors as column names
nms <- colnames(data)
truth <- handle_chr_names(truth, nms)
estimate <- handle_chr_names(estimate, nms)
finalize_estimator_expr <- expr(
finalize_estimator(!!truth, estimator, metric_nm)
)
metric_tbl <- dplyr::summarise(
data,
.metric = metric_nm,
.estimator = eval_tidy(finalize_estimator_expr),
.estimate = metric_fn(
truth = !!truth,
estimate = !!estimate,
!!!spliceable_argument(estimator, "estimator"),
na_rm = na_rm,
!!!spliceable_argument(event_level, "event_level"),
!!!spliceable_case_weights(case_weights),
!!!metric_fn_options
)
)
dplyr::as_tibble(metric_tbl)
}
# ------------------------------------------------------------------------------
# Utilities
validate_not_missing <- function(x, nm) {
if (quo_is_missing(x)) {
abort(paste0(
"`", nm, "` ",
"is missing and must be supplied."
))
}
}
handle_chr_names <- function(x, nms) {
x_expr <- get_expr(x)
# Replace character with bare name
if (is.character(x_expr) && length(x_expr) == 1) {
# Only replace if it is actually a column name in `data`
if (x_expr %in% nms) {
# Replace the quosure with just the name
# Don't replace the quosure expression, this
# breaks with dplyr 0.8.0.1 and R <= 3.4.4
x <- as.name(x_expr)
}
}
x
}
spliceable_case_weights <- function(case_weights) {
if (quo_is_null(case_weights)) {
return(list())
}
list(case_weights = case_weights)
}
#' Developer function for calling new metrics
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#'
#' `metric_vec_template()` has been soft-deprecated as of yardstick 1.2.0.
#' Please switch to use [check_metric] and [yardstick_remove_missing] functions.
#'
#' @param metric_impl The core implementation function of your custom metric.
#' This core implementation function is generally defined inside the vector
#' method of your metric function.
#'
#' @param truth The realized vector of `truth`. This is either a factor
#' or a numeric.
#'
#' @param estimate The realized `estimate` result. This is either a numeric
#' vector, a factor vector, or a numeric matrix (in the case of multiple
#' class probability columns) depending on your metric function.
#'
#' @param na_rm A `logical` value indicating whether `NA` values should be
#' stripped before the computation proceeds. `NA` values are removed
#' before getting to your core implementation function so you do not have to
#' worry about handling them yourself. If `na_rm=FALSE` and any `NA` values
#' exist, then `NA` is automatically returned.
#'
#' @param cls A character vector of length 1 or 2 corresponding to the
#' class that `truth` and `estimate` should be, respectively. If `truth` and
#' `estimate` are of the same class, just supply a vector of length 1. If
#' they are different, supply a vector of length 2. For matrices, it is best
#' to supply `"numeric"` as the class to check here.
#'
#' @param estimator The type of averaging to use. By this point, the averaging
#' type should be finalized, so this should be a character vector of length 1\.
#' By default, this character value is required to be one of: `"binary"`,
#' `"macro"`, `"micro"`, or `"macro_weighted"`. If your metric allows more
#' or less averaging methods, override this with `averaging_override`.
#'
#' @param case_weights Optionally, the realized case weights, as a numeric
#' vector. This must be the same length as `truth`, and will be considered in
#' the `na_rm` checks. If supplied, this will be passed on to `metric_impl` as
#' the named argument `case_weights`.
#'
#' @param ... Extra arguments to your core metric function, `metric_impl`, can
#' technically be passed here, but generally the extra args are added through
#' R's scoping rules because the core metric function is created on the fly
#' when the vector method is called.
#'
#' @keywords internal
#' @export
metric_vec_template <- function(metric_impl,
truth,
estimate,
na_rm = TRUE,
cls = "numeric",
estimator = NULL,
case_weights = NULL,
...) {
lifecycle::deprecate_soft(
when = "1.2.0",
what = "metric_vec_template()",
with = I(
paste(
"`check_numeric_metric()`, `check_class_metric()`,",
"`check_class_metric()`, `yardstick_remove_missing()`, and `yardstick_any_missing()`"
)
)
)
abort_if_class_pred(truth)
estimate <- as_factor_from_class_pred(estimate)
validate_truth_estimate_checks(truth, estimate, cls, estimator)
validate_case_weights(case_weights, size = length(truth))
has_case_weights <- !is.null(case_weights)
if (na_rm) {
complete_cases <- stats::complete.cases(truth, estimate, case_weights)
truth <- truth[complete_cases]
if (is.matrix(estimate)) {
estimate <- estimate[complete_cases, , drop = FALSE]
} else {
estimate <- estimate[complete_cases]
}
if (has_case_weights) {
case_weights <- case_weights[complete_cases]
}
} else {
any_na <-
anyNA(truth) ||
anyNA(estimate) ||
(has_case_weights && anyNA(case_weights))
# return NA if any NA
if (any_na) {
return(NA_real_)
}
}
if (has_case_weights) {
metric_impl(truth = truth, estimate = estimate, case_weights = case_weights, ...)
} else {
# Assume signature doesn't have `case_weights =`
metric_impl(truth = truth, estimate = estimate, ...)
}
}
validate_truth_estimate_types <- function(truth, estimate, estimator) {
UseMethod("validate_truth_estimate_types")
}
validate_truth_estimate_types.default <- function(truth, estimate, estimator) {
cls <- class(truth)[[1]]
abort(paste0(
"`truth` class `", cls, "` is unknown. ",
"`truth` must be a numeric or a factor."
))
}
# factor / ?
validate_truth_estimate_types.factor <- function(truth, estimate, estimator) {
switch(estimator,
"binary" = binary_checks(truth, estimate),
# otherwise multiclass checks
multiclass_checks(truth, estimate)
)
}
# numeric / numeric
validate_truth_estimate_types.numeric <- function(truth, estimate, estimator) {
if (!is.numeric(estimate)) {
cls <- class(estimate)[[1]]
abort(paste0(
"`estimate` should be a numeric, not a `", cls, "`."
))
}
if (is.matrix(estimate)) {
abort(paste0(
"`estimate` should be a numeric vector, not a numeric matrix."
))
}
if (is.matrix(truth)) {
abort(paste0(
"`truth` should be a numeric vector, not a numeric matrix."
))
}
}
# double dispatch
# truth = factor
# estimate = ?
binary_checks <- function(truth, estimate) {
UseMethod("binary_checks", estimate)
}
# factor / unknown
binary_checks.default <- function(truth, estimate) {
cls <- class(estimate)[[1]]
abort(paste0(
"A binary metric was chosen but",
"`estimate` class `", cls, "` is unknown."
))
}
# factor / factor
binary_checks.factor <- function(truth, estimate) {
lvls_t <- levels(truth)
lvls_e <- levels(estimate)
if (!identical(lvls_t, lvls_e)) {
lvls_t <- paste0(lvls_t, collapse = ", ")
lvls_e <- paste0(lvls_e, collapse = ", ")
abort(
paste0(
"`truth` and `estimate` levels must be equivalent.\n",
"`truth`: ", lvls_t, "\n",
"`estimate`: ", lvls_e, "\n"
)
)
}
lvls <- levels(truth)
if (length(lvls) != 2) {
abort(paste0(
"`estimator` is binary, only two class `truth` factors are allowed. ",
"A factor with ", length(lvls), " levels was provided."
))
}
}
# factor / numeric
binary_checks.numeric <- function(truth, estimate) {
# nothing to check here, all good
}
# factor / matrix
binary_checks.matrix <- function(truth, estimate) {
abort(paste0(
"You are using a `binary` metric but have passed multiple columns to `...`"
))
}
# truth = factor
# estimate = ?
multiclass_checks <- function(truth, estimate) {
UseMethod("multiclass_checks", estimate)
}
# factor / unknown
multiclass_checks.default <- function(truth, estimate) {
cls <- class(estimate)[[1]]
abort(paste0("`estimate` class `", cls, "` is unknown."))
}
# factor / factor, >2 classes each
multiclass_checks.factor <- function(truth, estimate) {
lvls_t <- levels(truth)
lvls_e <- levels(estimate)
if (!identical(lvls_t, lvls_e)) {
lvls_t <- paste0(lvls_t, collapse = ", ")
lvls_e <- paste0(lvls_e, collapse = ", ")
abort(
paste0(
"`truth` and `estimate` levels must be equivalent.\n",
"`truth`: ", lvls_t, "\n",
"`estimate`: ", lvls_e, "\n"
)
)
}
}
# factor / numeric, but should be matrix
# (any probs function, if user went from binary->macro, they need to supply
# all cols)
multiclass_checks.numeric <- function(truth, estimate) {
# this is bad, but we want to be consistent in erorr messages
# with the factor / matrix check below
multiclass_checks.matrix(truth, as.matrix(estimate))
}
# factor / matrix (any probs functions)
multiclass_checks.matrix <- function(truth, estimate) {
n_lvls <- length(levels(truth))
n_cols <- ncol(estimate)
if (n_lvls != n_cols) {
abort(paste0(
"The number of levels in `truth` (", n_lvls, ") ",
"must match the number of columns supplied in `...` (", n_cols, ")."
))
}
}
validate_truth_estimate_lengths <- function(truth, estimate) {
n_truth <- length(truth)
if (is.matrix(estimate)) {
n_estimate <- nrow(estimate)
} else {
n_estimate <- length(estimate)
}
if (n_truth != n_estimate) {
abort(paste0(
"Length of `truth` (", n_truth, ") ",
"and `estimate` (", n_estimate, ") must match."
))
}
}
validate_class <- function(x, nm, cls) {
# cls is always known to have a `is.cls()` function
is_cls <- get(paste0("is.", cls))
if (!is_cls(x)) {
cls_real <- class(x)[[1]]
abort(paste0(
"`", nm, "` ",
"should be a ", cls, " ",
"but a ", cls_real, " was supplied."
))
}
}
validate_truth_estimate_checks <- function(truth, estimate,
cls = "numeric",
estimator) {
if (length(cls) == 1) {
cls <- c(cls, cls)
}
validate_class(truth, "truth", cls[1])
validate_class(estimate, "estimate", cls[2])
validate_truth_estimate_types(truth, estimate, estimator)
validate_truth_estimate_lengths(truth, estimate)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/deprecated-template.R |
#' @section Weight Calculation:
#' `get_weights()` accepts a confusion matrix and an `estimator` of type
#' `"macro"`, `"micro"`, or `"macro_weighted"` and returns the correct weights.
#' It is useful when creating multiclass metrics.
#'
#' @export
#' @rdname developer-helpers
#' @param data A table with truth values as columns and predicted values
#' as rows.
get_weights <- function(data, estimator) {
if (estimator == "macro") {
n <- ncol(data)
rep(1 / n, times = n)
} else if (estimator == "micro") {
1
} else if (estimator == "macro_weighted") {
.col_sums <- colSums(data)
.col_sums / sum(.col_sums)
} else {
cli::cli_abort(
"{.arg estimator} type {.val {estimator}} is unknown."
)
}
}
# ------------------------------------------------------------------------------
#' @section Estimator Selection:
#'
#' `finalize_estimator()` is the engine for auto-selection of `estimator` based
#' on the type of `x`. Generally `x` is the `truth` column. This function
#' is called from the vector method of your metric.
#'
#' `finalize_estimator_internal()` is an S3 generic that you should extend for
#' your metric if it does not implement _only_ the following estimator types:
#' `"binary"`, `"macro"`, `"micro"`, and `"macro_weighted"`.
#' If your metric does support all of these, the default version of
#' `finalize_estimator_internal()` will autoselect `estimator` appropriately.
#' If you need to create a method, it should take the form:
#' `finalize_estimator_internal.metric_name`. Your method for
#' `finalize_estimator_internal()` should do two things:
#'
#' 1) If `estimator` is `NULL`, autoselect the `estimator` based on the
#' type of `x` and return a single character for the `estimator`.
#'
#' 2) If `estimator` is not `NULL`, validate that it is an allowed `estimator`
#' for your metric and return it.
#'
#' If you are using the default for `finalize_estimator_internal()`, the
#' `estimator` is selected using the following heuristics:
#'
#' 1) If `estimator` is not `NULL`, it is validated and returned immediately
#' as no auto-selection is needed.
#'
#' 2) If `x` is a:
#'
#' * `factor` - Then `"binary"` is returned if it has 2 levels, otherwise
#' `"macro"` is returned.
#'
#' * `numeric` - Then `"binary"` is returned.
#'
#' * `table` - Then `"binary"` is returned if it has 2 columns, otherwise
#' `"macro"` is returned. This is useful if you have `table` methods.
#'
#' * `matrix` - Then `"macro"` is returned.
#'
#' @rdname developer-helpers
#'
#' @inheritParams rlang::args_error_context
#'
#' @param metric_class A single character of the name of the metric to autoselect
#' the estimator for. This should match the method name created for
#' `finalize_estimator_internal()`.
#'
#' @param x The column used to autoselect the estimator. This is generally
#' the `truth` column, but can also be a table if your metric has table methods.
#'
#' @param estimator Either `NULL` for auto-selection, or a single character
#' for the type of estimator to use.
#'
#' @seealso [metric-summarizers] [check_metric] [yardstick_remove_missing]
#'
#' @export
finalize_estimator <- function(x,
estimator = NULL,
metric_class = "default",
call = caller_env()) {
metric_dispatcher <- make_dummy(metric_class)
finalize_estimator_internal(metric_dispatcher, x, estimator, call = call)
}
#' @rdname developer-helpers
#' @param metric_dispatcher A simple dummy object with the class provided to
#' `metric_class`. This is created and passed along for you.
#' @export
finalize_estimator_internal <- function(metric_dispatcher,
x,
estimator,
call = caller_env()) {
UseMethod("finalize_estimator_internal")
}
#' @export
finalize_estimator_internal.default <- function(metric_dispatcher,
x,
estimator,
call = caller_env()) {
finalize_estimator_default(x, estimator, call = call)
}
# Accuracy, Kappa, Mean Log Loss, and MCC have natural multiclass extensions.
# Additionally, they all produce the same results regardless of which level
# is considered the "event". Because of this, the user cannot set the estimator,
# and it should only be "binary" or "multiclass"
#' @export
finalize_estimator_internal.accuracy <- function(metric_dispatcher,
x,
estimator,
call = caller_env()) {
if (is_multiclass(x)) {
"multiclass"
} else {
"binary"
}
}
#' @export
finalize_estimator_internal.kap <- finalize_estimator_internal.accuracy
#' @export
finalize_estimator_internal.mcc <- finalize_estimator_internal.accuracy
#' @export
finalize_estimator_internal.mn_log_loss <- finalize_estimator_internal.accuracy
#' @export
finalize_estimator_internal.brier_class <- finalize_estimator_internal.accuracy
# Classification cost extends naturally to multiclass and produce the same
# result regardless of the "event" level.
#' @export
finalize_estimator_internal.classification_cost <- finalize_estimator_internal.accuracy
# Curve methods don't use the estimator when printing, but do dispatch
# off it to determine whether to do one-vs-all or not
#' @export
finalize_estimator_internal.gain_curve <- finalize_estimator_internal.accuracy
#' @export
finalize_estimator_internal.lift_curve <- finalize_estimator_internal.accuracy
#' @export
finalize_estimator_internal.roc_curve <- finalize_estimator_internal.accuracy
#' @export
finalize_estimator_internal.pr_curve <- finalize_estimator_internal.accuracy
# Hand Till method is the "best" multiclass extension to me
# because it is immune to class imbalance like binary roc_auc
#' @export
finalize_estimator_internal.roc_auc <- function(metric_dispatcher,
x,
estimator,
call = caller_env()) {
validate_estimator(
estimator = estimator,
estimator_override = c("binary", "macro", "macro_weighted", "hand_till")
)
if (!is.null(estimator)) {
return(estimator)
}
if (is_multiclass(x)) {
"hand_till"
} else {
"binary"
}
}
# PR AUC and Gain Capture don't have micro methods currently
#' @export
finalize_estimator_internal.pr_auc <- function(metric_dispatcher,
x,
estimator,
call = caller_env()) {
validate_estimator(
estimator = estimator,
estimator_override = c("binary", "macro", "macro_weighted")
)
if (!is.null(estimator)) {
return(estimator)
}
if (is_multiclass(x)) {
"macro"
} else {
"binary"
}
}
#' @export
finalize_estimator_internal.gain_capture <- finalize_estimator_internal.pr_auc
# Default ----------------------------------------------------------------------
finalize_estimator_default <- function(x, estimator, call = caller_env()) {
if (!is.null(estimator)) {
validate_estimator(estimator, call = call)
return(estimator)
}
UseMethod("finalize_estimator_default")
}
finalize_estimator_default.default <- function(x,
estimator,
call = caller_env()) {
"binary"
}
finalize_estimator_default.matrix <- function(x,
estimator,
call = caller_env()) {
"macro"
}
# Covers all numeric metric functions
finalize_estimator_default.numeric <- function(x,
estimator,
call = caller_env()) {
"standard"
}
# Covers all dynamic survival functions
finalize_estimator_default.Surv <- function(x,
estimator,
call = caller_env()) {
"standard"
}
finalize_estimator_default.table <- function(x,
estimator,
call = caller_env()) {
if (is_multiclass(x)) {
"macro"
} else {
"binary"
}
}
finalize_estimator_default.factor <- function(x,
estimator,
call = caller_env()) {
if (is_multiclass(x)) {
"macro"
} else {
"binary"
}
}
# Util -------------------------------------------------------------------------
make_dummy <- function(metric_class) {
structure(list(), class = metric_class)
}
is_multiclass <- function(x) {
UseMethod("is_multiclass")
}
is_multiclass.default <- function(x) {
# dont throw a error here
# this case should only happen if x is an
# unknown type, and better error catching
# is done later to return a good error message
FALSE
}
is_multiclass.table <- function(x) {
n_col <- ncol(x)
# binary
if (n_col <= 2) {
return(FALSE)
}
# multiclass
if (n_col > 2) {
return(TRUE)
}
}
is_multiclass.factor <- function(x) {
lvls <- levels(x)
n_lvls <- length(lvls)
if (n_lvls <= 2) {
return(FALSE)
}
if (n_lvls > 2) {
return(TRUE)
}
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/estimator-helpers.R |
# Internal helper to query a default `event_level`
#
# 1) Respect `yardstick.event_first` if set, but warn about deprecation
# 2) Return `"first"` otherwise as the default event level
#
# Metric functions that use this helper can completely ignore the global option
# by setting the `event_first` argument to `"first"` or `"second"` directly.
yardstick_event_level <- function() {
opt <- getOption("yardstick.event_first")
if (!is.null(opt)) {
lifecycle::deprecate_warn(
when = "0.0.7",
what = I("The global option `yardstick.event_first`"),
with = I("the metric function argument `event_level`"),
details = "The global option is being ignored entirely."
)
}
"first"
}
is_event_first <- function(event_level) {
validate_event_level(event_level)
identical(event_level, "first")
}
validate_event_level <- function(event_level) {
if (identical(event_level, "first")) {
return(invisible())
}
if (identical(event_level, "second")) {
return(invisible())
}
cli::cli_abort("{.arg event_level} must be {.val first} or {.val second}.")
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/event-level.R |
#' Create groupwise metrics
#'
#' Groupwise metrics quantify the disparity in value of a metric across a
#' number of groups. Groupwise metrics with a value of zero indicate that the
#' underlying metric is equal across groups. yardstick defines
#' several common fairness metrics using this function, such as
#' [demographic_parity()], [equal_opportunity()], and [equalized_odds()].
#'
#' Note that _all_ yardstick metrics are group-aware in that, when passed
#' grouped data, they will return metric values calculated for each group.
#' When passed grouped data, groupwise metrics also return metric values
#' for each group, but those metric values are calculated by first additionally
#' grouping by the variable passed to `by` and then summarizing the per-group
#' metric estimates across groups using the function passed as the
#' `aggregate` argument. Learn more about grouping behavior in yardstick using
#' `vignette("grouping", "yardstick")`.
#'
#' @param fn A yardstick metric function or metric set.
#' @param name The name of the metric to place in the `.metric` column
#' of the output.
#' @param aggregate A function to summarize the generated metric set results.
#' The function takes metric set results as the first argument and returns
#' a single numeric giving the `.estimate` value as output. See the Value and
#' Examples sections for example uses.
#' @inheritParams new_class_metric
#'
#' @section Relevant Group Level:
#' Additional arguments can be passed to the function outputted by
#' the function that this function outputs. That is:
#'
#' ```
#' res_fairness <- new_groupwise_metric(...)
#' res_by <- res_fairness(by)
#' res_by(..., additional_arguments_to_aggregate = TRUE)
#' ```
#'
#' For finer control of how groups in `by` are treated, use the
#' `aggregate` argument.
#'
#' @return
#' This function is a
#' [function factory](https://adv-r.hadley.nz/function-factories.html); it's
#' output is itself a function. Further, the functions that this function
#' outputs are also function factories. More explicitly, this looks like:
#'
#' ```
#' # a function with similar implementation to `demographic_parity()`:
#' diff_range <- function(x) {diff(range(x$.estimate))}
#'
#' dem_parity <-
#' new_groupwise_metric(
#' fn = detection_prevalence,
#' name = "dem_parity",
#' aggregate = diff_range
#' )
#' ```
#'
#' The outputted `dem_parity` is a function that takes one argument, `by`,
#' indicating the data-masked variable giving the sensitive feature.
#'
#' When called with a `by` argument, `dem_parity` will return a yardstick
#' metric function like any other:
#'
#' ```
#' dem_parity_by_gender <- dem_parity(gender)
#' ```
#'
#' Note that `dem_parity` doesn't take any arguments other than `by`, and thus
#' knows nothing about the data it will be applied to other than that it ought
#' to have a column with name `"gender"` in it.
#'
#' The output `dem_parity_by_gender` is a metric function that takes the
#' same arguments as the function supplied as `fn`, in this case
#' `detection_prevalence`. It will thus interface like any other yardstick
#' function except that it will look for a `"gender"` column in
#' the data it's supplied.
#'
#' In addition to the examples below, see the documentation on the
#' return value of fairness metrics like [demographic_parity()],
#' [equal_opportunity()], or [equalized_odds()] to learn more about how the
#' output of this function can be used.
#'
#' @examples
#' data(hpc_cv)
#'
#' # `demographic_parity`, among other fairness metrics,
#' # is generated with `new_groupwise_metric()`:
#' diff_range <- function(x) {diff(range(x$.estimate))}
#' demographic_parity_ <-
#' new_groupwise_metric(
#' fn = detection_prevalence,
#' name = "demographic_parity",
#' aggregate = diff_range
#' )
#'
#' m_set <- metric_set(demographic_parity_(Resample))
#'
#' m_set(hpc_cv, truth = obs, estimate = pred)
#'
#' # the `post` argument can be used to accommodate a wide
#' # variety of parameterizations. to encode demographic
#' # parity as a ratio inside of a difference, for example:
#' ratio_range <- function(x, ...) {
#' range <- range(x$.estimate)
#' range[1] / range[2]
#' }
#'
#' demographic_parity_ratio <-
#' new_groupwise_metric(
#' fn = detection_prevalence,
#' name = "demographic_parity_ratio",
#' aggregate = ratio_range
#' )
#'
#' @export
new_groupwise_metric <- function(fn, name, aggregate, direction = "minimize") {
if (is_missing(fn) || !inherits_any(fn, c("metric", "metric_set"))) {
cli::cli_abort(
"{.arg fn} must be a metric function or metric set."
)
}
if (is_missing(name) || !is_string(name)) {
cli::cli_abort(
"{.arg name} must be a string."
)
}
if (is_missing(aggregate) || !is_function(aggregate)) {
cli::cli_abort(
"{.arg aggregate} must be a function."
)
}
arg_match(
direction,
values = c("maximize", "minimize", "zero")
)
metric_factory <-
function(by) {
by_str <- as_string(enexpr(by))
res <-
function(data, ...) {
gp_vars <- dplyr::group_vars(data)
if (by_str %in% gp_vars) {
cli::cli_abort(
"Metric is internally grouped by {.field {by_str}}; grouping \\
{.arg data} by {.field {by_str}} is not well-defined."
)
}
# error informatively when `fn` is a metric set; see `eval_safely()`
data_grouped <- dplyr::group_by(data, {{by}}, .add = TRUE)
res <-
tryCatch(
fn(data_grouped, ...),
error = function(cnd) {
if (!is.null(cnd$parent)) {
cnd <- cnd$parent
}
cli::cli_abort(conditionMessage(cnd), call = call(name))
}
)
# restore to the grouping structure in the supplied data
if (length(gp_vars) > 0) {
res <- dplyr::group_by(res, !!!dplyr::groups(data), .add = FALSE)
}
group_rows <- dplyr::group_rows(res)
group_keys <- dplyr::group_keys(res)
res <- dplyr::ungroup(res)
groups <- vec_chop(res, indices = group_rows)
out <- vector("list", length = length(groups))
for (i in seq_along(groups)) {
group <- groups[[i]]
.estimate <- aggregate(group)
if (!is_bare_numeric(.estimate)) {
cli::cli_abort(
"{.arg aggregate} must return a single numeric value.",
call = call2("new_groupwise_metric")
)
}
elt_out <- list(
.metric = name,
.by = by_str,
.estimator = group$.estimator[1],
.estimate = .estimate
)
out[[i]] <- tibble::new_tibble(elt_out)
}
group_keys <- vctrs::vec_rep_each(group_keys, times = list_sizes(out))
out <- vec_rbind(!!!out)
out <- vec_cbind(group_keys, out)
out
}
res <- new_class_metric(res, direction = "minimize")
structure(
res,
direction = direction,
by = by_str,
class = groupwise_metric_class(fn)
)
}
structure(metric_factory, class = c("metric_factory", "function"))
}
groupwise_metric_class <- function(fn) {
if (inherits(fn, "metric")) {
return(class(fn))
}
class(attr(fn, "metrics")[[1]])
}
#' @noRd
#' @export
print.metric_factory <- function(x, ...) {
cat(format(x), sep = "\n")
invisible(x)
}
#' @export
format.metric_factory <- function(x, ...) {
cli::cli_format_method(
cli::cli_text("A {.help [metric factory](yardstick::new_groupwise_metric)}")
)
}
diff_range <- function(x) {
estimates <- x$.estimate
max(estimates) - min(estimates)
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/fair-aaa.R |
#' Demographic parity
#'
#' @description
#' Demographic parity is satisfied when a model's predictions have the
#' same predicted positive rate across groups. A value of 0 indicates parity
#' across groups. Note that this definition does not depend on the true
#' outcome; the `truth` argument is included in outputted metrics
#' for consistency.
#'
#' `demographic_parity()` is calculated as the difference between the largest
#' and smallest value of [detection_prevalence()] across groups.
#'
#' Demographic parity is sometimes referred to as group fairness,
#' disparate impact, or statistical parity.
#'
#' See the "Measuring Disparity" section for details on implementation.
#'
#' @param by The column identifier for the sensitive feature. This should be an
#' unquoted column name referring to a column in the un-preprocessed data.
#'
#' @templateVar fn demographic_parity
#' @templateVar internal_fn detection_prevalence
#' @template return-fair
#' @template event-fair
#' @template examples-fair
#'
#' @family fairness metrics
#'
#' @references
#'
#' Agarwal, A., Beygelzimer, A., Dudik, M., Langford, J., & Wallach, H. (2018).
#' "A Reductions Approach to Fair Classification." Proceedings of the 35th
#' International Conference on Machine Learning, in Proceedings of Machine
#' Learning Research. 80:60-69.
#'
#' Verma, S., & Rubin, J. (2018). "Fairness definitions explained". In
#' Proceedings of the international workshop on software fairness (pp. 1-7).
#'
#' Bird, S., Dudík, M., Edgar, R., Horn, B., Lutz, R., Milan, V., ... & Walker,
#' K. (2020). "Fairlearn: A toolkit for assessing and improving fairness in AI".
#' Microsoft, Tech. Rep. MSR-TR-2020-32.
#'
#' @export
demographic_parity <-
new_groupwise_metric(
fn = detection_prevalence,
name = "demographic_parity",
aggregate = diff_range
)
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/fair-demographic_parity.R |
#' Equal opportunity
#'
#' @description
#'
#' Equal opportunity is satisfied when a model's predictions have the same
#' true positive and false negative rates across protected groups. A value of
#' 0 indicates parity across groups.
#'
#' `equal_opportunity()` is calculated as the difference between the largest
#' and smallest value of [sens()] across groups.
#'
#' Equal opportunity is sometimes referred to as equality of opportunity.
#'
#' See the "Measuring Disparity" section for details on implementation.
#'
#' @inheritParams demographic_parity
#'
#' @templateVar fn equal_opportunity
#' @templateVar internal_fn sens
#' @template return-fair
#' @template event-fair
#' @template examples-fair
#'
#' @family fairness metrics
#'
#' @references
#'
#' Hardt, M., Price, E., & Srebro, N. (2016). "Equality of opportunity in
#' supervised learning". Advances in neural information processing systems, 29.
#'
#' Verma, S., & Rubin, J. (2018). "Fairness definitions explained". In
#' Proceedings of the international workshop on software fairness (pp. 1-7).
#'
#' Bird, S., Dudík, M., Edgar, R., Horn, B., Lutz, R., Milan, V., ... & Walker,
#' K. (2020). "Fairlearn: A toolkit for assessing and improving fairness in AI".
#' Microsoft, Tech. Rep. MSR-TR-2020-32.
#'
#' @export
equal_opportunity <-
new_groupwise_metric(
fn = sens,
name = "equal_opportunity",
aggregate = diff_range
)
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/fair-equal_opportunity.R |
max_positive_rate_diff <- function(x) {
metric_values <- vec_split(x, x$.metric)
positive_rate_diff <- vapply(metric_values$val, diff_range, numeric(1))
max(positive_rate_diff)
}
#' Equalized odds
#'
#' @description
#'
#' Equalized odds is satisfied when a model's predictions have the same false
#' positive, true positive, false negative, and true negative rates across
#' protected groups. A value of 0 indicates parity across groups.
#'
#' By default, this function takes the maximum difference in range of [sens()]
#' and [spec()] `.estimate`s across groups. That is, the maximum pair-wise
#' disparity in [sens()] or [spec()] between groups is the return value of
#' `equalized_odds()`'s `.estimate`.
#'
#' Equalized odds is sometimes referred to as conditional procedure accuracy
#' equality or disparate mistreatment.
#'
#' See the "Measuring disparity" section for details on implementation.
#'
#' @inheritParams demographic_parity
#'
#' @templateVar fn equalized_odds
#' @templateVar internal_fn [sens()] and [spec()]
#' @template return-fair
#' @template examples-fair
#'
#' @section Measuring Disparity:
#' For finer control of group treatment, construct a context-aware fairness
#' metric with the [new_groupwise_metric()] function by passing a custom `aggregate`
#' function:
#'
#' ```
#' # see yardstick:::max_positive_rate_diff for the actual `aggregate()`
#' diff_range <- function(x, ...) {diff(range(x$.estimate))}
#'
#' equalized_odds_2 <-
#' new_groupwise_metric(
#' fn = metric_set(sens, spec),
#' name = "equalized_odds_2",
#' aggregate = diff_range
#' )
#' ```
#'
#' In `aggregate()`, `x` is the [metric_set()] output with [sens()] and [spec()]
#' values for each group, and `...` gives additional arguments (such as a grouping
#' level to refer to as the "baseline") to pass to the function outputted
#' by `equalized_odds_2()` for context.
#'
#' @family fairness metrics
#'
#' @references
#'
#' Agarwal, A., Beygelzimer, A., Dudik, M., Langford, J., & Wallach, H. (2018).
#' "A Reductions Approach to Fair Classification." Proceedings of the 35th
#' International Conference on Machine Learning, in Proceedings of Machine
#' Learning Research. 80:60-69.
#'
#' Verma, S., & Rubin, J. (2018). "Fairness definitions explained". In
#' Proceedings of the international workshop on software fairness (pp. 1-7).
#'
#' Bird, S., Dudík, M., Edgar, R., Horn, B., Lutz, R., Milan, V., ... & Walker,
#' K. (2020). "Fairlearn: A toolkit for assessing and improving fairness in AI".
#' Microsoft, Tech. Rep. MSR-TR-2020-32.
#'
#' @export
equalized_odds <-
new_groupwise_metric(
fn = metric_set(sens, spec),
name = "equalized_odds",
aggregate = max_positive_rate_diff
)
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/fair-equalized_odds.R |
# Standalone file: do not edit by hand
# Source: <https://github.com/r-lib/rlang/blob/main/R/standalone-obj-type.R>
# ----------------------------------------------------------------------
#
# ---
# repo: r-lib/rlang
# file: standalone-obj-type.R
# last-updated: 2023-05-01
# license: https://unlicense.org
# imports: rlang (>= 1.1.0)
# ---
#
# ## Changelog
#
# 2023-05-01:
# - `obj_type_friendly()` now only displays the first class of S3 objects.
#
# 2023-03-30:
# - `stop_input_type()` now handles `I()` input literally in `arg`.
#
# 2022-10-04:
# - `obj_type_friendly(value = TRUE)` now shows numeric scalars
# literally.
# - `stop_friendly_type()` now takes `show_value`, passed to
# `obj_type_friendly()` as the `value` argument.
#
# 2022-10-03:
# - Added `allow_na` and `allow_null` arguments.
# - `NULL` is now backticked.
# - Better friendly type for infinities and `NaN`.
#
# 2022-09-16:
# - Unprefixed usage of rlang functions with `rlang::` to
# avoid onLoad issues when called from rlang (#1482).
#
# 2022-08-11:
# - Prefixed usage of rlang functions with `rlang::`.
#
# 2022-06-22:
# - `friendly_type_of()` is now `obj_type_friendly()`.
# - Added `obj_type_oo()`.
#
# 2021-12-20:
# - Added support for scalar values and empty vectors.
# - Added `stop_input_type()`
#
# 2021-06-30:
# - Added support for missing arguments.
#
# 2021-04-19:
# - Added support for matrices and arrays (#141).
# - Added documentation.
# - Added changelog.
#
# nocov start
#' Return English-friendly type
#' @param x Any R object.
#' @param value Whether to describe the value of `x`. Special values
#' like `NA` or `""` are always described.
#' @param length Whether to mention the length of vectors and lists.
#' @return A string describing the type. Starts with an indefinite
#' article, e.g. "an integer vector".
#' @noRd
obj_type_friendly <- function(x, value = TRUE) {
if (is_missing(x)) {
return("absent")
}
if (is.object(x)) {
if (inherits(x, "quosure")) {
type <- "quosure"
} else {
type <- class(x)[[1L]]
}
return(sprintf("a <%s> object", type))
}
if (!is_vector(x)) {
return(.rlang_as_friendly_type(typeof(x)))
}
n_dim <- length(dim(x))
if (!n_dim) {
if (!is_list(x) && length(x) == 1) {
if (is_na(x)) {
return(switch(
typeof(x),
logical = "`NA`",
integer = "an integer `NA`",
double =
if (is.nan(x)) {
"`NaN`"
} else {
"a numeric `NA`"
},
complex = "a complex `NA`",
character = "a character `NA`",
.rlang_stop_unexpected_typeof(x)
))
}
show_infinites <- function(x) {
if (x > 0) {
"`Inf`"
} else {
"`-Inf`"
}
}
str_encode <- function(x, width = 30, ...) {
if (nchar(x) > width) {
x <- substr(x, 1, width - 3)
x <- paste0(x, "...")
}
encodeString(x, ...)
}
if (value) {
if (is.numeric(x) && is.infinite(x)) {
return(show_infinites(x))
}
if (is.numeric(x) || is.complex(x)) {
number <- as.character(round(x, 2))
what <- if (is.complex(x)) "the complex number" else "the number"
return(paste(what, number))
}
return(switch(
typeof(x),
logical = if (x) "`TRUE`" else "`FALSE`",
character = {
what <- if (nzchar(x)) "the string" else "the empty string"
paste(what, str_encode(x, quote = "\""))
},
raw = paste("the raw value", as.character(x)),
.rlang_stop_unexpected_typeof(x)
))
}
return(switch(
typeof(x),
logical = "a logical value",
integer = "an integer",
double = if (is.infinite(x)) show_infinites(x) else "a number",
complex = "a complex number",
character = if (nzchar(x)) "a string" else "\"\"",
raw = "a raw value",
.rlang_stop_unexpected_typeof(x)
))
}
if (length(x) == 0) {
return(switch(
typeof(x),
logical = "an empty logical vector",
integer = "an empty integer vector",
double = "an empty numeric vector",
complex = "an empty complex vector",
character = "an empty character vector",
raw = "an empty raw vector",
list = "an empty list",
.rlang_stop_unexpected_typeof(x)
))
}
}
vec_type_friendly(x)
}
vec_type_friendly <- function(x, length = FALSE) {
if (!is_vector(x)) {
abort("`x` must be a vector.")
}
type <- typeof(x)
n_dim <- length(dim(x))
add_length <- function(type) {
if (length && !n_dim) {
paste0(type, sprintf(" of length %s", length(x)))
} else {
type
}
}
if (type == "list") {
if (n_dim < 2) {
return(add_length("a list"))
} else if (is.data.frame(x)) {
return("a data frame")
} else if (n_dim == 2) {
return("a list matrix")
} else {
return("a list array")
}
}
type <- switch(
type,
logical = "a logical %s",
integer = "an integer %s",
numeric = ,
double = "a double %s",
complex = "a complex %s",
character = "a character %s",
raw = "a raw %s",
type = paste0("a ", type, " %s")
)
if (n_dim < 2) {
kind <- "vector"
} else if (n_dim == 2) {
kind <- "matrix"
} else {
kind <- "array"
}
out <- sprintf(type, kind)
if (n_dim >= 2) {
out
} else {
add_length(out)
}
}
.rlang_as_friendly_type <- function(type) {
switch(
type,
list = "a list",
NULL = "`NULL`",
environment = "an environment",
externalptr = "a pointer",
weakref = "a weak reference",
S4 = "an S4 object",
name = ,
symbol = "a symbol",
language = "a call",
pairlist = "a pairlist node",
expression = "an expression vector",
char = "an internal string",
promise = "an internal promise",
... = "an internal dots object",
any = "an internal `any` object",
bytecode = "an internal bytecode object",
primitive = ,
builtin = ,
special = "a primitive function",
closure = "a function",
type
)
}
.rlang_stop_unexpected_typeof <- function(x, call = caller_env()) {
abort(
sprintf("Unexpected type <%s>.", typeof(x)),
call = call
)
}
#' Return OO type
#' @param x Any R object.
#' @return One of `"bare"` (for non-OO objects), `"S3"`, `"S4"`,
#' `"R6"`, or `"R7"`.
#' @noRd
obj_type_oo <- function(x) {
if (!is.object(x)) {
return("bare")
}
class <- inherits(x, c("R6", "R7_object"), which = TRUE)
if (class[[1]]) {
"R6"
} else if (class[[2]]) {
"R7"
} else if (isS4(x)) {
"S4"
} else {
"S3"
}
}
#' @param x The object type which does not conform to `what`. Its
#' `obj_type_friendly()` is taken and mentioned in the error message.
#' @param what The friendly expected type as a string. Can be a
#' character vector of expected types, in which case the error
#' message mentions all of them in an "or" enumeration.
#' @param show_value Passed to `value` argument of `obj_type_friendly()`.
#' @param ... Arguments passed to [abort()].
#' @inheritParams args_error_context
#' @noRd
stop_input_type <- function(x,
what,
...,
allow_na = FALSE,
allow_null = FALSE,
show_value = TRUE,
arg = caller_arg(x),
call = caller_env()) {
# From standalone-cli.R
cli <- env_get_list(
nms = c("format_arg", "format_code"),
last = topenv(),
default = function(x) sprintf("`%s`", x),
inherit = TRUE
)
if (allow_na) {
what <- c(what, cli$format_code("NA"))
}
if (allow_null) {
what <- c(what, cli$format_code("NULL"))
}
if (length(what)) {
what <- oxford_comma(what)
}
if (inherits(arg, "AsIs")) {
format_arg <- identity
} else {
format_arg <- cli$format_arg
}
message <- sprintf(
"%s must be %s, not %s.",
format_arg(arg),
what,
obj_type_friendly(x, value = show_value)
)
abort(message, ..., call = call, arg = arg)
}
oxford_comma <- function(chr, sep = ", ", final = "or") {
n <- length(chr)
if (n < 2) {
return(chr)
}
head <- chr[seq_len(n - 1)]
last <- chr[n]
head <- paste(head, collapse = sep)
# Write a or b. But a, b, or c.
if (n > 2) {
paste0(head, sep, final, " ", last)
} else {
paste0(head, " ", final, " ", last)
}
}
# nocov end
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/import-standalone-obj-type.R |
# Standalone file: do not edit by hand
# Source: <https://github.com/tidymodels/parsnip/blob/main/R/standalone-survival.R>
# ----------------------------------------------------------------------
#
# ---
# repo: tidymodels/parsnip
# file: standalone-survival.R
# last-updated: 2023-02-28
# license: https://unlicense.org
# ---
# This file provides a portable set of helper functions for Surv objects
# ## Changelog
# 2023-02-28:
# * Initial version
# @param surv A [survival::Surv()] object
# @details
# `.is_censored_right()` always returns a logical while
# `.check_censored_right()` will fail if `FALSE`.
#
# `.extract_status()` will return the data as 0/1 even if the original object
# used the legacy encoding of 1/2. See [survival::Surv()].
# @return
# - `.extract_surv_status()` returns a vector.
# - `.extract_surv_time()` returns a vector when the type is `"right"` or `"left"`
# and a tibble otherwise.
# - Functions starting with `.is_` or `.check_` return logicals although the
# latter will fail when `FALSE`.
# nocov start
# These are tested in the extratests repo since it would require a dependency
# on the survival package. https://github.com/tidymodels/extratests/pull/78
.is_censored_right <- function(surv) {
.check_cens_type(surv, fail = FALSE)
}
.check_censored_right <- function(surv) {
.check_cens_type(surv, fail = TRUE)
} # will add more as we need them
.extract_surv_time <- function(surv) {
.is_surv(surv)
keepers <- c("time", "start", "stop", "time1", "time2")
res <- surv[, colnames(surv) %in% keepers]
if (NCOL(res) > 1) {
res <- dplyr::tibble(as.data.frame(res))
}
res
}
.extract_surv_status <- function(surv) {
.is_surv(surv)
res <- surv[, "status"]
un_vals <- sort(unique(res))
event_type_to_01 <- !(.extract_surv_type(surv) %in% c("interval", "interval2", "mstate"))
if (
event_type_to_01 &&
(identical(un_vals, 1:2) || identical(un_vals, c(1.0, 2.0)))) {
res <- res - 1
}
res
}
.is_surv <- function(surv, fail = TRUE) {
is_surv <- inherits(surv, "Surv")
if (!is_surv && fail) {
abort("The object does not have class `Surv`.", call = NULL)
}
is_surv
}
.extract_surv_type <- function(surv) {
attr(surv, "type")
}
.check_cens_type <- function(surv, type = "right", fail = TRUE) {
.is_surv(surv)
obj_type <- .extract_surv_type(surv)
good_type <- all(obj_type %in% type)
if (!good_type && fail) {
c_list <- paste0("'", type, "'")
msg <- cli::format_inline("For this usage, the allowed censoring type{?s} {?is/are}: {c_list}")
abort(msg, call = NULL)
}
good_type
}
# nocov end
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/import-standalone-survival.R |
# Standalone file: do not edit by hand
# Source: <https://github.com/r-lib/rlang/blob/main/R/standalone-types-check.R>
# ----------------------------------------------------------------------
#
# ---
# repo: r-lib/rlang
# file: standalone-types-check.R
# last-updated: 2023-03-13
# license: https://unlicense.org
# dependencies: standalone-obj-type.R
# imports: rlang (>= 1.1.0)
# ---
#
# ## Changelog
#
# 2023-03-13:
# - Improved error messages of number checkers (@teunbrand)
# - Added `allow_infinite` argument to `check_number_whole()` (@mgirlich).
# - Added `check_data_frame()` (@mgirlich).
#
# 2023-03-07:
# - Added dependency on rlang (>= 1.1.0).
#
# 2023-02-15:
# - Added `check_logical()`.
#
# - `check_bool()`, `check_number_whole()`, and
# `check_number_decimal()` are now implemented in C.
#
# - For efficiency, `check_number_whole()` and
# `check_number_decimal()` now take a `NULL` default for `min` and
# `max`. This makes it possible to bypass unnecessary type-checking
# and comparisons in the default case of no bounds checks.
#
# 2022-10-07:
# - `check_number_whole()` and `_decimal()` no longer treat
# non-numeric types such as factors or dates as numbers. Numeric
# types are detected with `is.numeric()`.
#
# 2022-10-04:
# - Added `check_name()` that forbids the empty string.
# `check_string()` allows the empty string by default.
#
# 2022-09-28:
# - Removed `what` arguments.
# - Added `allow_na` and `allow_null` arguments.
# - Added `allow_decimal` and `allow_infinite` arguments.
# - Improved errors with absent arguments.
#
#
# 2022-09-16:
# - Unprefixed usage of rlang functions with `rlang::` to
# avoid onLoad issues when called from rlang (#1482).
#
# 2022-08-11:
# - Added changelog.
#
# nocov start
# Scalars -----------------------------------------------------------------
.standalone_types_check_dot_call <- .Call
check_bool <- function(x,
...,
allow_na = FALSE,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x) && .standalone_types_check_dot_call(ffi_standalone_is_bool_1.0.7, x, allow_na, allow_null)) {
return(invisible(NULL))
}
stop_input_type(
x,
c("`TRUE`", "`FALSE`"),
...,
allow_na = allow_na,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_string <- function(x,
...,
allow_empty = TRUE,
allow_na = FALSE,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
is_string <- .rlang_check_is_string(
x,
allow_empty = allow_empty,
allow_na = allow_na,
allow_null = allow_null
)
if (is_string) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"a single string",
...,
allow_na = allow_na,
allow_null = allow_null,
arg = arg,
call = call
)
}
.rlang_check_is_string <- function(x,
allow_empty,
allow_na,
allow_null) {
if (is_string(x)) {
if (allow_empty || !is_string(x, "")) {
return(TRUE)
}
}
if (allow_null && is_null(x)) {
return(TRUE)
}
if (allow_na && (identical(x, NA) || identical(x, na_chr))) {
return(TRUE)
}
FALSE
}
check_name <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
is_string <- .rlang_check_is_string(
x,
allow_empty = FALSE,
allow_na = FALSE,
allow_null = allow_null
)
if (is_string) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"a valid name",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
IS_NUMBER_true <- 0
IS_NUMBER_false <- 1
IS_NUMBER_oob <- 2
check_number_decimal <- function(x,
...,
min = NULL,
max = NULL,
allow_infinite = TRUE,
allow_na = FALSE,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (missing(x)) {
exit_code <- IS_NUMBER_false
} else if (0 == (exit_code <- .standalone_types_check_dot_call(
ffi_standalone_check_number_1.0.7,
x,
allow_decimal = TRUE,
min,
max,
allow_infinite,
allow_na,
allow_null
))) {
return(invisible(NULL))
}
.stop_not_number(
x,
...,
exit_code = exit_code,
allow_decimal = TRUE,
min = min,
max = max,
allow_na = allow_na,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_number_whole <- function(x,
...,
min = NULL,
max = NULL,
allow_infinite = FALSE,
allow_na = FALSE,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (missing(x)) {
exit_code <- IS_NUMBER_false
} else if (0 == (exit_code <- .standalone_types_check_dot_call(
ffi_standalone_check_number_1.0.7,
x,
allow_decimal = FALSE,
min,
max,
allow_infinite,
allow_na,
allow_null
))) {
return(invisible(NULL))
}
.stop_not_number(
x,
...,
exit_code = exit_code,
allow_decimal = FALSE,
min = min,
max = max,
allow_na = allow_na,
allow_null = allow_null,
arg = arg,
call = call
)
}
.stop_not_number <- function(x,
...,
exit_code,
allow_decimal,
min,
max,
allow_na,
allow_null,
arg,
call) {
if (allow_decimal) {
what <- "a number"
} else {
what <- "a whole number"
}
if (exit_code == IS_NUMBER_oob) {
min <- min %||% -Inf
max <- max %||% Inf
if (min > -Inf && max < Inf) {
what <- sprintf("%s between %s and %s", what, min, max)
} else if (x < min) {
what <- sprintf("%s larger than or equal to %s", what, min)
} else if (x > max) {
what <- sprintf("%s smaller than or equal to %s", what, max)
} else {
abort("Unexpected state in OOB check", .internal = TRUE)
}
}
stop_input_type(
x,
what,
...,
allow_na = allow_na,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_symbol <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is_symbol(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"a symbol",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_arg <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is_symbol(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"an argument name",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_call <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is_call(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"a defused call",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_environment <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is_environment(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"an environment",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_function <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is_function(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"a function",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_closure <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is_closure(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"an R function",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_formula <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is_formula(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"a formula",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
# Vectors -----------------------------------------------------------------
check_character <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is_character(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"a character vector",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_logical <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is_logical(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"a logical vector",
...,
allow_na = FALSE,
allow_null = allow_null,
arg = arg,
call = call
)
}
check_data_frame <- function(x,
...,
allow_null = FALSE,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
if (is.data.frame(x)) {
return(invisible(NULL))
}
if (allow_null && is_null(x)) {
return(invisible(NULL))
}
}
stop_input_type(
x,
"a data frame",
...,
allow_null = allow_null,
arg = arg,
call = call
)
}
# nocov end
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/import-standalone-types-check.R |
#' Tweak a metric function
#'
#' @description
#' `metric_tweak()` allows you to tweak an existing metric `.fn`, giving it a
#' new `.name` and setting new optional argument defaults through `...`. It
#' is similar to `purrr::partial()`, but is designed specifically for yardstick
#' metrics.
#'
#' `metric_tweak()` is especially useful when constructing a [metric_set()] for
#' tuning with the tune package. After the metric set has been constructed,
#' there is no way to adjust the value of any optional arguments (such as
#' `beta` in [f_meas()]). Using `metric_tweak()`, you can set optional arguments
#' to custom values ahead of time, before they go into the metric set.
#'
#' @details
#' The function returned from `metric_tweak()` only takes `...` as arguments,
#' which are passed through to the original `.fn`. Passing `data`, `truth`,
#' and `estimate` through by position should generally be safe, but it is
#' recommended to pass any other optional arguments through by name to ensure
#' that they are evaluated correctly.
#'
#' @param .name A single string giving the name of the new metric. This will be
#' used in the `".metric"` column of the output.
#'
#' @param .fn An existing yardstick metric function to tweak.
#'
#' @param ... Name-value pairs specifying which optional arguments to override
#' and the values to replace them with.
#'
#' Arguments `data`, `truth`, and `estimate` are considered _protected_,
#' and cannot be overridden, but all other optional arguments can be
#' altered.
#'
#' @return
#' A tweaked version of `.fn`, updated to use new defaults supplied in `...`.
#'
#' @export
#' @examples
#' mase12 <- metric_tweak("mase12", mase, m = 12)
#'
#' # Defaults to `m = 1`
#' mase(solubility_test, solubility, prediction)
#'
#' # Updated to use `m = 12`. `mase12()` has this set already.
#' mase(solubility_test, solubility, prediction, m = 12)
#' mase12(solubility_test, solubility, prediction)
#'
#' # This is most useful to set optional argument values ahead of time when
#' # using a metric set
#' mase10 <- metric_tweak("mase10", mase, m = 10)
#' metrics <- metric_set(mase, mase10, mase12)
#' metrics(solubility_test, solubility, prediction)
metric_tweak <- function(.name, .fn, ...) {
check_string(.name)
if (!is_metric(.fn)) {
cli::cli_abort(
"{.arg .fn} must be a metric function, not {.obj_type_friendly {(.fn)}}."
)
}
fixed <- enquos(...)
if (length(fixed) > 0 && !is_named(fixed)) {
cli::cli_abort("All arguments passed through {.arg ...} must be named.")
}
check_protected_names(fixed)
out <- function(...) {
args <- enquos(...)
call <- call2(.fn, !!!args, !!!fixed)
out <- eval_tidy(call)
out[[".metric"]] <- .name
out
}
out <- set_static_arguments(out, names(fixed))
class(out) <- class(.fn)
metric_direction(out) <- metric_direction(.fn)
out
}
# ------------------------------------------------------------------------------
check_protected_names <- function(fixed) {
protected <- protected_names()
has_protected_name <- any(names(fixed) %in% protected)
if (!has_protected_name) {
return(invisible(fixed))
}
cli::cli_abort(
"Arguments passed through {.arg ...} cannot be named any of: {protected}."
)
}
protected_names <- function() {
c("data", "truth", "estimate")
}
get_static_arguments <- function(fn) {
attr(fn, "static", exact = TRUE)
}
set_static_arguments <- function(fn, static) {
attr(fn, "static") <- static
fn
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/metric-tweak.R |
# ------------------------------------------------------------------------------
# Column name extractors
pos_val <- function(xtab, event_level) {
if (!all(dim(xtab) == 2)) {
cli::cli_abort("Only relevant for 2x2 tables.")
}
if (is_event_first(event_level)) {
colnames(xtab)[[1]]
} else {
colnames(xtab)[[2]]
}
}
neg_val <- function(xtab, event_level) {
if (!all(dim(xtab) == 2)) {
cli::cli_abort("Only relevant for 2x2 tables.")
}
if (is_event_first(event_level)) {
colnames(xtab)[[2]]
} else {
colnames(xtab)[[1]]
}
}
# ------------------------------------------------------------------------------
check_table <- function(x, call = caller_env()) {
n_col <- ncol(x)
n_row <- nrow(x)
if (n_row != n_col) {
cli::cli_abort(
"{.arg x} must have equal dimensions. \\
{.arg x} has {n_col} columns and {n_row} rows.",
call = call
)
}
if (!isTRUE(all.equal(rownames(x), colnames(x)))) {
cli::cli_abort(
"The table must the same groups in the same order.",
call = call
)
}
invisible(NULL)
}
# ------------------------------------------------------------------------------
is_binary <- function(x) {
identical(x, "binary")
}
is_micro <- function(x) {
identical(x, "micro")
}
# ------------------------------------------------------------------------------
quote_and_collapse <- function(x) {
x <- encodeString(x, quote = "'", na.encode = FALSE)
paste0(x, collapse = ", ")
}
# ------------------------------------------------------------------------------
is_class_pred <- function(x) {
inherits(x, "class_pred")
}
as_factor_from_class_pred <- function(x) {
if (!is_class_pred(x)) {
return(x)
}
if (!is_installed("probably")) {
cli::cli_abort(
"A {.cls class_pred} input was detected, but the {.pkg probably} \\
package isn't installed. Install {.pkg probably} to be able to convert \\
{.cls class_pred} to {.cls factor}."
)
}
probably::as.factor(x)
}
abort_if_class_pred <- function(x, call = caller_env()) {
if (is_class_pred(x)) {
cli::cli_abort(
"{.arg truth} should not a {.cls class_pred} object.",
call = call
)
}
return(invisible(x))
}
# ------------------------------------------------------------------------------
curve_finalize <- function(result, data, class, grouped_class) {
# Packed `.estimate` curve data frame
out <- dplyr::pull(result, ".estimate")
if (!dplyr::is_grouped_df(data)) {
class(out) <- c(class, class(out))
return(out)
}
group_syms <- dplyr::groups(data)
# Poor-man's `tidyr::unpack()`
groups <- dplyr::select(result, !!!group_syms)
out <- dplyr::bind_cols(groups, out)
# Curve functions always return a result grouped by original groups
out <- dplyr::group_by(out, !!!group_syms)
class(out) <- c(grouped_class, class, class(out))
out
}
# ------------------------------------------------------------------------------
yardstick_mean <- function(x, ..., case_weights = NULL, na_remove = FALSE) {
check_dots_empty()
if (is.null(case_weights)) {
mean(x, na.rm = na_remove)
} else {
case_weights <- vec_cast(case_weights, to = double())
stats::weighted.mean(x, w = case_weights, na.rm = na_remove)
}
}
yardstick_sum <- function(x, ..., case_weights = NULL, na_remove = FALSE) {
check_dots_empty()
if (is.null(case_weights)) {
sum(x, na.rm = na_remove)
} else {
case_weights <- vec_cast(case_weights, to = double())
if (na_remove) {
# Only remove `NA`s found in `x`, copies `stats::weighted.mean()`
keep <- !is.na(x)
x <- x[keep]
case_weights <- case_weights[keep]
}
sum(x * case_weights)
}
}
# ------------------------------------------------------------------------------
yardstick_sd <- function(x,
...,
case_weights = NULL) {
check_dots_empty()
variance <- yardstick_var(
x = x,
case_weights = case_weights
)
sqrt(variance)
}
yardstick_var <- function(x,
...,
case_weights = NULL) {
check_dots_empty()
yardstick_cov(
truth = x,
estimate = x,
case_weights = case_weights
)
}
yardstick_cov <- function(truth,
estimate,
...,
case_weights = NULL) {
check_dots_empty()
if (is.null(case_weights)) {
# To always go through `stats::cov.wt()` for consistency
case_weights <- rep(1, times = length(truth))
}
truth <- vec_cast(truth, to = double())
estimate <- vec_cast(estimate, to = double())
case_weights <- vec_cast(case_weights, to = double())
size <- vec_size(truth)
if (size != vec_size(estimate)) {
cli::cli_abort(
"{.arg truth} ({vec_size(truth)}) and \\
{.arg estimate} ({vec_size(estimate)}) must be the same size.",
.internal = TRUE
)
}
if (size != vec_size(case_weights)) {
cli::cli_abort(
"{.arg truth} ({vec_size(truth)}) and \\
{.arg case_weights} ({vec_size(case_weights)}) must be the same size.",
.internal = TRUE
)
}
if (size == 0L || size == 1L) {
# Like `cov(double(), double())` and `cov(0, 0)`,
# Otherwise `cov.wt()` returns `NaN` or an error.
return(NA_real_)
}
input <- cbind(truth = truth, estimate = estimate)
cov <- stats::cov.wt(
x = input,
wt = case_weights,
cor = FALSE,
center = TRUE,
method = "unbiased"
)
cov <- cov$cov
# 2-column matrix generates 2x2 covariance matrix.
# All values represent the variance.
cov[[1, 2]]
}
yardstick_cor <- function(truth,
estimate,
...,
case_weights = NULL) {
check_dots_empty()
if (is.null(case_weights)) {
# To always go through `stats::cov.wt()` for consistency
case_weights <- rep(1, times = length(truth))
}
truth <- vec_cast(truth, to = double())
estimate <- vec_cast(estimate, to = double())
case_weights <- vec_cast(case_weights, to = double())
size <- vec_size(truth)
if (size != vec_size(estimate)) {
cli::cli_abort(
"{.arg truth} ({vec_size(truth)}) and \\
{.arg estimate} ({vec_size(estimate)}) must be the same size.",
.internal = TRUE
)
}
if (size != vec_size(case_weights)) {
cli::cli_abort(
"{.arg truth} ({vec_size(truth)}) and \\
{.arg case_weights} ({vec_size(case_weights)}) must be the same size.",
.internal = TRUE
)
}
if (size == 0L || size == 1L) {
warn_correlation_undefined_size_zero_or_one()
return(NA_real_)
}
if (vec_unique_count(truth) == 1L) {
warn_correlation_undefined_constant_truth(truth)
return(NA_real_)
}
if (vec_unique_count(estimate) == 1L) {
warn_correlation_undefined_constant_estimate(estimate)
return(NA_real_)
}
input <- cbind(truth = truth, estimate = estimate)
cov <- stats::cov.wt(
x = input,
wt = case_weights,
cor = TRUE,
center = TRUE,
method = "unbiased"
)
cor <- cov$cor
# 2-column matrix generates 2x2 correlation matrix.
# Diagonals are 1s. Off-diagonals are correlations.
cor[[1, 2]]
}
warn_correlation_undefined_size_zero_or_one <- function() {
message <- paste0(
"A correlation computation is required, but the inputs are size zero or ",
"one and the standard deviation cannot be computed. ",
"`NA` will be returned."
)
warn_correlation_undefined(
message = message,
class = "yardstick_warning_correlation_undefined_size_zero_or_one"
)
}
warn_correlation_undefined_constant_truth <- function(truth) {
message <- make_correlation_undefined_constant_message(what = "truth")
warn_correlation_undefined(
message = message,
truth = truth,
class = "yardstick_warning_correlation_undefined_constant_truth"
)
}
warn_correlation_undefined_constant_estimate <- function(estimate) {
message <- make_correlation_undefined_constant_message(what = "estimate")
warn_correlation_undefined(
message = message,
estimate = estimate,
class = "yardstick_warning_correlation_undefined_constant_estimate"
)
}
make_correlation_undefined_constant_message <- function(what) {
paste0(
"A correlation computation is required, but `", what, "` is constant ",
"and has 0 standard deviation, resulting in a divide by 0 error. ",
"`NA` will be returned."
)
}
warn_correlation_undefined <- function(message, ..., class = character()) {
cli::cli_warn(
message = message,
class = c(class, "yardstick_warning_correlation_undefined"),
...
)
}
# ------------------------------------------------------------------------------
yardstick_quantile <- function(x, probabilities, ..., case_weights = NULL) {
# When this goes through `quantile()`, that uses `type = 7` by default,
# which does linear interpolation of modes. `weighted_quantile()` uses a
# weighted version of what `type = 4` does, which is a linear interpolation
# of the empirical CDF, so even if you supply `case_weights = 1`, the values
# will likely differ.
check_dots_empty()
if (is.null(case_weights)) {
stats::quantile(x, probs = probabilities, names = FALSE)
} else {
weighted_quantile(x, weights = case_weights, probabilities = probabilities)
}
}
weighted_quantile <- function(x, weights, probabilities) {
# For possible use in hardhat. A weighted variant of `quantile(type = 4)`,
# which does linear interpolation of the empirical CDF.
x <- vec_cast(x, to = double())
weights <- vec_cast(weights, to = double())
probabilities <- vec_cast(probabilities, to = double())
size <- vec_size(x)
if (size != vec_size(weights)) {
cli::cli_abort(
"{.arg x} ({vec_size(x)}) and {.arg weights} ({vec_size(weights)}) \\
must have the same size."
)
}
if (any(is.na(probabilities))) {
cli::cli_abort("{.arg probabilities} can't have missing values.")
}
if (any(probabilities > 1 | probabilities < 0)) {
cli::cli_abort("{.arg probabilities} must be within `[0, 1]`.")
}
if (size == 0L) {
# For compatibility with `quantile()`, since `approx()` requires >=2 points
out <- rep(NA_real_, times = length(probabilities))
return(out)
}
if (size == 1L) {
# For compatibility with `quantile()`, since `approx()` requires >=2 points
out <- rep(x, times = length(probabilities))
return(out)
}
o <- vec_order(x)
x <- vec_slice(x, o)
weights <- vec_slice(weights, o)
weighted_quantiles <- cumsum(weights) / sum(weights)
interpolation <- stats::approx(
x = weighted_quantiles,
y = x,
xout = probabilities,
method = "linear",
rule = 2L
)
out <- interpolation$y
out
}
# ------------------------------------------------------------------------------
yardstick_table <- function(truth, estimate, ..., case_weights = NULL) {
check_dots_empty()
abort_if_class_pred(truth)
if (is_class_pred(estimate)) {
estimate <- as_factor_from_class_pred(estimate)
}
if (!is.factor(truth)) {
cli::cli_abort(
"{.arg truth} must be a factor, not {.obj_type_friendly {truth}}.",
.internal = TRUE
)
}
if (!is.factor(estimate)) {
cli::cli_abort(
"{.arg estimate} must be a factor, not {.obj_type_friendly {estimate}}.",
.internal = TRUE
)
}
levels <- levels(truth)
n_levels <- length(levels)
if (!identical(levels, levels(estimate))) {
cli::cli_abort(
"{.arg truth} and {.arg estimate} must have the same levels in the same \\
order.",
.internal = TRUE
)
}
if (n_levels < 2) {
cli::cli_abort(
"{.arg truth} must have at least 2 factor levels.",
.internal = TRUE
)
}
# Supply `estimate` first to get it to correspond to the row names.
# Always return a double matrix for type stability (in particular, we know
# `mcc()` relies on this for overflow and C code purposes).
if (is.null(case_weights)) {
out <- table(Prediction = estimate, Truth = truth)
out <- unclass(out)
storage.mode(out) <- "double"
} else {
out <- hardhat::weighted_table(
Prediction = estimate,
Truth = truth,
weights = case_weights
)
}
out
}
yardstick_truth_table <- function(truth, ..., case_weights = NULL) {
# For usage in many of the prob-metric functions.
# A `truth` table is required for `"macro_weighted"` estimators.
# Case weights must be passed through to generate correct `"macro_weighted"`
# results. `"macro"` and `"micro"` don't require case weights for this
# particular part of the calculation.
# Modeled after the treatment of `average = "weighted"` in sklearn, which
# works the same as `"macro_weighted"` here.
# https://github.com/scikit-learn/scikit-learn/blob/baf828ca126bcb2c0ad813226963621cafe38adb/sklearn/metrics/_base.py#L23
check_dots_empty()
abort_if_class_pred(truth)
if (!is.factor(truth)) {
cli::cli_abort("{.arg truth} must be a factor.", .internal = TRUE)
}
levels <- levels(truth)
n_levels <- length(levels)
if (n_levels < 2) {
cli::cli_abort(
"{.arg truth} must have at least 2 factor levels.",
.internal = TRUE
)
}
# Always return a double matrix for type stability
if (is.null(case_weights)) {
out <- table(truth, dnn = NULL)
out <- unclass(out)
storage.mode(out) <- "double"
} else {
out <- hardhat::weighted_table(
truth,
weights = case_weights
)
}
# Required to be a 1 row matrix for `get_weights()`
out <- matrix(out, nrow = 1L)
out
}
| /scratch/gouwar.j/cran-all/cranData/yardstick/R/misc.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.