content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' @title eventlog_from_xes
#' @description Extracts eventlog from a xes-file.
#' @param xesfile Reference to a .xes file, conforming to the xes-standard.
#' @seealso \url{http://www.xes-standard.org/}
#' @export eventlog_from_xes
eventlog_from_xes <- function(xesfile = file.choose()){
warning("Function deprecated. Please use read_xes")
return(read_xes(xesfile))
}
| /scratch/gouwar.j/cran-all/cranData/xesreadR/R/eventlog_from_xes.R |
#' @title read_xes
#' @description Extracts eventlog from a xes-file.
#' @param xesfile Reference to a .xes file, conforming to the xes-standard.
#' @param validate When 'TRUE' some basic checks are run on the contents of the event log such as that activity instances are not connected to more than one case or activity.
#' Using 'FALSE' improves the performance by skipping those checks and allows to import XES files that do not conform to bupaR requirements.
#'
#' @seealso \url{http://www.xes-standard.org/}
#' @export read_xes
read_xes <- function(xesfile = file.choose(), validate = TRUE){
EVENT_ID <- NULL
n_attributes <- NULL
attr_id <- NULL
type <- NULL
value <- NULL
key <- NULL
case_id <- NULL
n_attr <- NULL
CASE_ID <- NULL
`concept:name` <- NULL
CASE_CASE_ID <- NULL
org_resource <- NULL
time_timestamp <- NULL
lifecycle_transition <- NULL
concept_instance <- NULL
xml2::read_xml(xesfile) %>%
xml_children() %>%
.[xml_name(.) == "trace"] %>%
map(xml_children) -> t
t %>%
map(~.x[xml_name(.x) == "event"]) %>%
map(map, xml_children) -> all_events
t %>%
map(~.x[xml_name(.x) != "event"]) -> all_cases
all_events %>%
map(map, xml_attrs) -> all_attrs
n_attributes_per_event = map(all_attrs, lengths)
data_frame(CASE_ID = 1:length(all_events)) %>%
mutate(EVENT_ID = map2(lengths(all_events), n_attributes_per_event, ~data.frame(EVENT_ID = 1:.x,
n_attributes = .y))) %>%
unnest(EVENT_ID) %>%
mutate(attr_id = map(n_attributes, ~data.frame(attr_id = 1:.x))) %>%
unnest(attr_id) -> eventlog
all_attrs %>%
unlist() %>%
as_data_frame() %>%
mutate(type = rep(c("key","value"), length = nrow(.)),
attr_id = rep(1:(nrow(.)/2),each = 2)) %>%
spread(type, value) %>%
select(-attr_id) %>%
bind_cols(eventlog) %>%
select(-n_attributes, -attr_id) %>%
spread(key, value) -> eventlog
n_attributes_per_case = lengths(all_cases)
data_frame(CASE_ID = 1:length(all_cases),
n_attr = n_attributes_per_case) %>%
mutate(attr_id = map(n_attr, ~data.frame(attr_id = seq_len(.x)))) %>%
unnest(attr_id) -> cases
if(nrow(cases) > 0) {
all_cases %>%
map(xml_attrs) %>%
unlist() %>%
as_data_frame() %>%
mutate(type = rep(c("key","value"), length = nrow(.)),
attr_id = rep(1:(nrow(.)/2),each = 2)) %>%
spread(type, value) %>%
select(-attr_id) %>%
bind_cols(cases) %>%
select(-n_attr, -attr_id) %>%
spread(key, value) -> cases
cases <- cases %>% select(CASE_ID, concept_name = `concept:name`, everything()) %>%
set_names(paste0("CASE_",names(.))) %>%
rename(CASE_ID = CASE_CASE_ID)
case_classifier <- "CASE_concept_name"
eventlog %>%
inner_join(cases, ., by = "CASE_ID") -> eventlog
} else {
case_classifier <- "CASE_ID"
}
if(!("concept:name" %in% names(eventlog))) {
stop("XES-file does not contain event")
}
eventlog %>%
rename(activity_id = `concept:name`) %>%
set_names(str_replace_all(names(.),":", "_")) %>%
select(-EVENT_ID) -> eventlog
if(nrow(cases) > 0) {
eventlog %>%
select(-CASE_ID) -> eventlog
}
if("org_resource" %in% names(eventlog)) {
eventlog %>%
rename(resource_id = org_resource) -> eventlog
} else {
warning("No resource_id specified in xes-file")
eventlog %>%
mutate(resource_id = NA) -> eventlog
}
if("time_timestamp" %in% names(eventlog)) {
eventlog %>%
rename(timestamp = time_timestamp) %>%
mutate(timestamp = ymd_hms(timestamp)) -> eventlog
} else {
stop("XES-file does not contain timestamp")
}
if("lifecycle_transition" %in% names(eventlog)) {
eventlog %>%
rename(lifecycle_id = lifecycle_transition) -> eventlog
} else {
warning("No lifecycle transition id specified in xes-file")
eventlog %>%
mutate(lifecycle_id = NA) -> eventlog
}
if("concept_instance" %in% names(eventlog)) {
eventlog %>%
rename(activity_instance_id = concept_instance) -> eventlog
} else {
warning("No activity instance identifier specified in xes-file. By default considered each event as a different activity instance. Please check!")
eventlog %>%
mutate(activity_instance_id = 1:nrow(.)) -> eventlog
}
eventlog %>%
eventlog(case_id = case_classifier,
activity_id = "activity_id",
activity_instance_id = "activity_instance_id",
timestamp = "timestamp",
lifecycle_id = "lifecycle_id",
resource_id = "resource_id",
validate = validate) -> eventlog
return(eventlog)
}
| /scratch/gouwar.j/cran-all/cranData/xesreadR/R/read_xes.R |
#' @title Case Attributes from Xes-file
#' @description Extracts case attributes from a xes-file.
#' @param xesfile Reference to a .xes file, conforming to the xes-standard.
#' @seealso \url{http://www.xes-standard.org/}
#' @export read_xes_cases
read_xes_cases <- function(xesfile = file.choose()) {
EVENT_ID <- NULL
n_attributes <- NULL
attr_id <- NULL
type <- NULL
value <- NULL
key <- NULL
case_id <- NULL
n_attr <- NULL
CASE_ID <- NULL
`concept:name` <- NULL
CASE_CASE_ID <- NULL
org_resource <- NULL
time_timestamp <- NULL
lifecycle_transition <- NULL
concept_instance <- NULL
xml2::read_xml(xesfile) %>%
xml_children() %>%
.[xml_name(.) == "trace"] %>%
map(xml_children) -> t
t %>%
map(~.x[xml_name(.x) != "event"]) -> all_cases
n_attributes_per_case = lengths(all_cases)
data_frame(CASE_ID = 1:length(all_cases),
n_attr = n_attributes_per_case) %>%
mutate(attr_id = map(n_attr, ~data.frame(attr_id = seq_len(.x)))) %>%
unnest(attr_id) -> cases
if(nrow(cases) > 0) {
all_cases %>%
map(xml_attrs) %>%
unlist() %>%
as_data_frame() %>%
mutate(type = rep(c("key","value"), length = nrow(.)),
attr_id = rep(1:(nrow(.)/2),each = 2)) %>%
spread(type, value) %>%
select(-attr_id) %>%
bind_cols(cases) %>%
select(-n_attr, -attr_id) %>%
spread(key, value) -> cases
cases <- cases %>% select(CASE_ID, concept_name = `concept:name`, everything()) %>%
set_names(paste0("CASE_",names(.))) %>%
rename(CASE_ID = CASE_CASE_ID)
} else {
stop("xes-file does not contain case attributes")
}
cases %>%
select(-CASE_ID) -> cases
return(cases)
}
| /scratch/gouwar.j/cran-all/cranData/xesreadR/R/read_xes_cases.R |
stop_eventlog <- function(eventlog)
if(!("eventlog" %in% class(eventlog)))
stop("Function only applicable for class eventlog")
| /scratch/gouwar.j/cran-all/cranData/xesreadR/R/utils.R |
#' @title Write XES file
#' @description Function for writing xes-file
#' @param eventlog An event log object
#' @param case_attributes List of columns containing case_attributes
#' @param xesfile Destination file
#'
#' @export write_xes
write_xes <- function(eventlog,
xesfile = file.choose(),
case_attributes = NULL) {
case_classifier <- NULL
stop_eventlog(eventlog)
e <- eventlog
eventlog <- eventlog %>% arrange(!!as.symbol(timestamp(eventlog)))
if(is.null(case_attributes)){
if(any(str_detect(colnames(eventlog), "CASE"))) {
case_attributes <- eventlog %>%
select(case_id(e), starts_with("CASE"), force_df = T) %>%
unique
sel <- setdiff(colnames(eventlog), colnames(case_attributes))
eventlog %>% select(one_of(c(case_id(e), sel))) -> eventlog
} else {
colnames(eventlog)[colnames(eventlog) == case_id(e)] <- "case_classifier"
case_attributes <- data.frame(as.character(unique(eventlog$case_classifier)))
colnames(case_attributes)[1] <- case_id(e)
}
}
colnames(eventlog)[colnames(eventlog) == case_id(e)] <- "case_classifier"
eventlog %>%
as.data.frame() %>%
mutate_if(is.numeric, as.character) %>%
rename_("lifecycle:transition" = lifecycle_id(e),
"org:resource" = resource_id(e),
"concept:name" = activity_id(e),
"time:timestamp" = timestamp(e),
"concept:instance" = activity_instance_id(e)) %>%
select(case_classifier, everything()) -> eventlog
createXES(xesfile, traces = case_attributes , events = as.data.frame(eventlog), case_classifier = case_id(e))
}
| /scratch/gouwar.j/cran-all/cranData/xesreadR/R/write_xes.R |
csv_from_xes <- function(xesfile) {
parsed_xes <- parseXES(xesfile)
n_case_att <- length(parsed_xes$trace.att)
n_event_att <- length(parsed_xes$event.att)
n_cases <- length(parsed_xes$traces$'concept:name')
caseids <- parsed_xes$traces$'concept:name'
result <- data.frame(stringsAsFactors = FALSE)
n<-1
for(i in 1:n_cases){
data <- add_NAS_to_event_attributes_per_case(parsed_xes$events[[i]])
n_events <- length(data[[1]])
case <- as.data.frame(data, stringsAsFactors = F)
case <- bind_cols(data.frame(rep(caseids[i], n_events), stringsAsFactors = F), case)
result <- bind_rows(result, case)
}
colnames(result)[1]<-"case_concept.name"
for(i in 1:n_event_att)
colnames(result)[i+1] <- paste("event",colnames(result)[i+1], sep = "_")
return(result)
}
add_NAS_to_event_attributes_per_case <- function(x) {
for(i in 1:length(x))
v <- (as.vector(sapply(x, length)))
if(length(unique(v)) > 1){
# meer dan 1 waarde
m <- max(v) # max aantal attributen
for(i in 1:length(x)){
if(length(x[[i]]) < m){
x[[i]] <- c(x[[i]], rep(NA, times = (m - length(x[[i]]))))
}
}
}
return(x)
}
##Handler function for XES parsing.Used by parseXES
handler <- function(){
#states: log, trace, event
state <- "log"
trace.data <- list()
event.data <- list()
trace.counter <- 0
event.counter <- 0
trace <- function(x,atts){
state <<- "trace"
trace.counter <<- trace.counter + 1
event.data[[trace.counter]] <<- list()
}
event <- function(x,atts){
state <<- "event"
event.counter <<- event.counter + 1
}
endElement <- function(x,...){
if(x =="trace"){
state <<- "log"
event.counter <<- 0
}
else if(x == "event"){
state <<- "log"
}
}
attributes <- function(x,atts){
if(state =="trace"){
trace.data[[atts[["key"]]]][trace.counter] <<- atts[["value"]]
}
else if (state == "event"){
event.data[[trace.counter]][[atts[["key"]]]][event.counter] <<- atts[["value"]]
}
}
return(list(
trace = trace,
event = event,
endElement = endElement,
date = attributes,
string = attributes,
int = attributes,
float = attributes,
boolean = attributes,
trace.data = function(){trace.data},
event.data = function(){event.data}
))
}
parseXES <- function(logfile){
temp <- XML::xmlEventParse(logfile,handler())
tracedata <- temp$trace.data()
trace.att <- names(tracedata)
eventdata <- temp$event.data()
event.att <- unique(unlist(lapply(eventdata,function(x){names(x)})))
return(list(
traces = tracedata,
events = eventdata,
trace.att = trace.att,
event.att = event.att
))
}
| /scratch/gouwar.j/cran-all/cranData/xesreadR/R/xes_parser.r |
#' @title xesreadR - Read and write XES fles
#'
#' @description Functions for reading and writing XES-files. XES (eXtensible Event Stream) is the IEEE standard for storing and sharing event data (see <http://standards.ieee.org/findstds/standard/1849-2016.html> for more info)
#' @docType package
#' @name xesreadR
#'
#' @import XML
#' @import xml2
#' @importFrom data.table data.table
#' @importFrom data.table :=
#' @import bupaR
#' @import dplyr
#' @import tidyr
#' @importFrom purrr map
#' @importFrom purrr map2
#' @importFrom purrr set_names
#' @importFrom lubridate ymd_hms
#' @import stringr
#' @importFrom stats median
#' @importFrom stats na.omit
#' @importFrom stats quantile
#' @importFrom stats sd
#' @importFrom utils head
#' @importFrom utils setTxtProgressBar
#' @importFrom utils txtProgressBar
#' @importFrom utils data
globalVariables(".")
NULL
| /scratch/gouwar.j/cran-all/cranData/xesreadR/R/xesreadR.R |
---
title: "xesreadR"
author: "Gert Janssenswillen"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{xesreadR}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
This package provides functions for reading and writing xes file. Please not that these function are still experimental. For more information, please check <www.bupar.net>
| /scratch/gouwar.j/cran-all/cranData/xesreadR/inst/doc/xesreadr.rmd |
---
title: "xesreadR"
author: "Gert Janssenswillen"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{xesreadR}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
This package provides functions for reading and writing xes file. Please not that these function are still experimental. For more information, please check <www.bupar.net>
| /scratch/gouwar.j/cran-all/cranData/xesreadR/vignettes/xesreadr.rmd |
#' Get data from a REST API
#'
#' Read data from a REST API and optionally with an authorization token in the
#' request header. The function `rest_api_raw()` returns the raw text of
#' the response, and `rest_api()` will parse the response with
#' `jsonlite::fromJSON()` (assuming that the response is in the JSON
#' format).
#'
#' These functions are simple wrappers based on [url()] and
#' [read_utf8()]. Specifically, the `headers` argument is
#' passed to `url()`, and `read_utf8()` will send a \samp{GET} request
#' to the API server. This means these functions only support the \samp{GET}
#' method. If you need to use other HTTP methods (such as \samp{POST}), you have
#' to use other packages such as \pkg{curl} and \pkg{httr}.
#' @param ... Arguments to be passed to `rest_api_raw()`.
#' @return A character vector (the raw JSON response) or an R object parsed from
#' the JSON text.
#' @export
#' @examplesIf interactive()
#' # a normal GET request
#' xfun::rest_api('https://httpbin.org', '/get')
#' xfun::rest_api_raw('https://httpbin.org', '/get')
#'
#' # send the request with an auth header
#' xfun::rest_api('https://httpbin.org', '/headers', 'OPEN SESAME!')
#'
#' # with query parameters
#' xfun::rest_api('https://httpbin.org', '/response-headers', params = list(foo = 'bar'))
#'
#' # get the rate limit info from Github
#' xfun::github_api('/rate_limit')
rest_api = function(...) {
res = rest_api_raw(...)
jsonlite::fromJSON(res, simplifyVector = FALSE)
}
#' @param root The API root URL.
#' @param endpoint The API endpoint.
#' @param token A named character string (e.g., `c(token = "xxxx")`), which
#' will be used to create an authorization header of the form
#' \samp{Authorization: NAME TOKEN} for the API call, where \samp{NAME} is the
#' name of the string and \samp{TOKEN} is the string. If the string does not
#' have a name, \samp{Basic} will be used as the default name.
#' @param params A list of query parameters to be sent with the API call.
#' @param headers A named character vector of HTTP headers, e.g., `c(Accept
#' = "application/vnd.github.v3+json")`.
#' @rdname rest_api
#' @export
rest_api_raw = function(root, endpoint, token = '', params = list(), headers = NULL) {
if (is.null(names(token))) names(token) = 'Basic'
endpoint = sub('^/?', '/', endpoint) # make sure it has a leading /
url2 = if ('headers' %in% names(formals(url))) url else stop2(
"The url() function does not support the 'headers' argument. Please upgrade R (>= 3.6.0)."
)
con = url2(
paste0(root, endpoint, query_params(.list = params)), encoding = 'UTF-8',
headers = c(
headers, if (token != '') c(Authorization = sprintf('%s %s', names(token), token))
)
)
on.exit(close(con), add = TRUE)
raw_string(suppressWarnings(read_utf8(con)))
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/api.R |
#' Encode/decode data into/from base64 encoding.
#'
#' The function `base64_encode()` encodes a file or a raw vector into the
#' base64 encoding. The function `base64_decode()` decodes data from the
#' base64 encoding.
#' @param x For `base64_encode()`, a raw vector. If not raw, it is assumed
#' to be a file or a connection to be read via `readBin()`. For
#' `base64_decode()`, a string.
#' @param from If provided (and `x` is not provided), a connection or file
#' to be read via `readChar()`, and the result will be passed to the
#' argument `x`.
#' @return `base64_encode()` returns a character string.
#' `base64_decode()` returns a raw vector.
#' @useDynLib xfun, .registration = TRUE
#' @export
#' @examples xfun::base64_encode(as.raw(1:10))
#' logo = xfun:::R_logo()
#' xfun::base64_encode(logo)
base64_encode = function(x) {
if (!is.raw(x)) x = read_bin(x)
.Call('base64_enc', x, PACKAGE = 'xfun')
}
#' @export
#' @rdname base64_encode
#' @examples xfun::base64_decode("AQIDBAUGBwgJCg==")
base64_decode = function(x, from = NA) {
if (!is.na(from)) {
if (!missing(x)) stop("Please provide either 'x' or 'from', but not both.")
x = readChar(from, file.size(from), TRUE)
}
if (!is.character(x) || length(x) != 1) stop("'x' must be a single character string.")
.Call('base64_dec', x, PACKAGE = 'xfun')
}
# an R implementation of base64 encoding by Wush Wu moved from knitr (of
# historic interest only): https://github.com/yihui/knitr/pull/324
base64_encode_r = function(x) {
if (!is.raw(x)) x = read_bin(x)
chars = c(LETTERS, letters, 0:9, '+', '/')
n = length(s <- as.integer(x))
res = rep(NA, (n + 2) / 3 * 4)
i = 0L # index of res vector
j = 1L # index of base64_table
while (n > 2L) {
res[i <- i + 1L] = chars[s[j] %/% 4L + 1L]
res[i <- i + 1L] = chars[16 * (s[j] %% 4L) + s[j + 1L] %/% 16 + 1L]
res[i <- i + 1L] = chars[4L * (s[j + 1L] %% 16) + s[j + 2L] %/% 64L + 1L]
res[i <- i + 1L] = chars[s[j + 2L] %% 64L + 1L]
j = j + 3L
n = n - 3L
}
if (n) {
res[i <- i + 1L] = chars[s[j] %/% 4L + 1L]
if (n > 1L) {
res[i <- i + 1L] = chars[16 * (s[j] %% 4L) + s[j + 1L] %/% 16 + 1L]
res[i <- i + 1L] = chars[4L * (s[j + 1L] %% 16) + 1L]
res[i <- i + 1L] = '='
} else {
res[i <- i + 1L] = chars[16 * (s[j] %% 4L) + 1L]
res[i <- i + 1L] = '='
res[i <- i + 1L] = '='
}
}
paste(res[!is.na(res)], collapse = '')
}
#' Generate the Data URI for a file
#'
#' Encode the file in the base64 encoding, and add the media type. The data URI
#' can be used to embed data in HTML documents, e.g., in the `src` attribute of
#' the `<img />` tag.
#' @param x A file path.
#' @param type The MIME type of the file, e.g., `"image/png"` for a PNG image
#' file.
#' @return A string of the form `data:<media type>;base64,<data>`.
#' @note This function requires the \pkg{mime} package to determine the MIME
#' type of the file except for a few common MIME types.
#' @export
#' @examples
#' logo = xfun:::R_logo()
#' img = htmltools::img(src = xfun::base64_uri(logo), alt = 'R logo')
#' if (interactive()) htmltools::browsable(img)
base64_uri = function(x, type = mime::guess_type(x)) {
if (missing(type)) type = guess_type(x)
paste0("data:", type, ";base64,", base64_encode(x))
}
# a limited version of mime::guess_type()
guess_type = function(x, use_mime = loadable('mime')) {
if (use_mime) return(mime::guess_type(x))
res = mimemap[tolower(file_ext(x))]
if (any(i <- is.na(res))) {
warning(
'Cannot determine the MIME type(s) of ', paste(x[i], collapse = ', '),
'. You may try to install the "mime" package or report an issue to ',
packageDescription('xfun')$BugReports, '.'
)
res[i] = 'application/octet-stream'
}
unname(res)
}
# a comprehensive version is mime::mimemap (can extend it upon user request)
mimemap = c(
css = 'text/css', csv = 'text/csv', gif = 'image/gif', jpeg = 'image/jpeg',
jpg = 'image/jpeg', js = 'application/javascript', png = 'image/png',
svg = 'image/svg+xml', ttf = 'application/font-sfnt',
woff = 'application/font-woff', woff2 = 'application/octet-stream'
)
| /scratch/gouwar.j/cran-all/cranData/xfun/R/base64.R |
#' Cache the value of an R expression to an RDS file
#'
#' Save the value of an expression to a cache file (of the RDS format). Next
#' time the value is loaded from the file if it exists.
#'
#' Note that the `file` argument does not provide the full cache filename. The
#' actual name of the cache file is of the form \file{BASENAME_HASH.rds}, where
#' \file{BASENAME} is the base name provided via the \file{file} argument (e.g.,
#' if `file = 'foo.rds'`, `BASENAME` would be \file{foo}), and \file{HASH} is
#' the MD5 hash (also called the \sQuote{checksum}) calculated from the R code
#' provided to the `expr` argument and the value of the `hash` argument, which
#' means when the code or the `hash` argument changes, the \file{HASH} string
#' may also change, and the old cache will be invalidated (if it exists). If you
#' want to find the cache file, look for \file{.rds} files that contain 32
#' hexadecimal digits (consisting of 0-9 and a-z) at the end of the filename.
#'
#' The possible ways to invalidate the cache are: 1) change the code in `expr`
#' argument; 2) delete the cache file manually or automatically through the
#' argument `rerun = TRUE`; and 3) change the value of the `hash` argument. The
#' first two ways should be obvious. For the third way, it makes it possible to
#' automatically invalidate the cache based on changes in certain R objects. For
#' example, when you run `cache_rds({ x + y })`, you may want to invalidate the
#' cache to rerun `{ x + y }` when the value of `x` or `y` has been changed, and
#' you can tell `cache_rds()` to do so by `cache_rds({ x + y }, hash = list(x,
#' y))`. The value of the argument `hash` is expected to be a list, but it can
#' also take a special value, `"auto"`, which means `cache_rds(expr)` will try
#' to automatically figure out the global variables in `expr`, return a list of
#' their values, and use this list as the actual value of `hash`. This behavior
#' is most likely to be what you really want: if the code in `expr` uses an
#' external global variable, you may want to invalidate the cache if the value
#' of the global variable has changed. Here a \dQuote{global variable} means a
#' variable not created locally in `expr`, e.g., for `cache_rds({ x <- 1; x + y
#' })`, `x` is a local variable, and `y` is (most likely to be) a global
#' variable, so changes in `y` should invalidate the cache. However, you know
#' your own code the best. If you want to be completely sure when to invalidate
#' the cache, you can always provide a list of objects explicitly rather than
#' relying on `hash = "auto"`.
#'
#' By default (the argument `clean = TRUE`), old cache files will be
#' automatically cleaned up. Sometimes you may want to use `clean = FALSE` (set
#' the R global option `options(xfun.cache_rds.clean = FALSE)` if you want
#' `FALSE` to be the default). For example, you may not have decided which
#' version of code to use, and you can keep the cache of both versions with
#' `clean = FALSE`, so when you switch between the two versions of code, it will
#' still be fast to run the code.
#' @param expr An R expression.
#' @param rerun Whether to delete the RDS file, rerun the expression, and save
#' the result again (i.e., invalidate the cache if it exists).
#' @param file The *base* (see Details) cache filename under the directory
#' specified by the `dir` argument. If not specified and this function is
#' called inside a code chunk of a \pkg{knitr} document (e.g., an R Markdown
#' document), the default is the current chunk label plus the extension
#' \file{.rds}.
#' @param dir The path of the RDS file is partially determined by `paste0(dir,
#' file)`. If not specified and the \pkg{knitr} package is available, the
#' default value of `dir` is the \pkg{knitr} chunk option `cache.path` (so if
#' you are compiling a \pkg{knitr} document, you do not need to provide this
#' `dir` argument explicitly), otherwise the default is \file{cache/}. If you
#' do not want to provide a `dir` but simply a valid path to the `file`
#' argument, you may use `dir = ""`.
#' @param hash A `list` object that contributes to the MD5 hash of the cache
#' filename (see Details). It can also take a special character value
#' `"auto"`. Other types of objects are ignored.
#' @param clean Whether to clean up the old cache files automatically when
#' `expr` has changed.
#' @param ... Other arguments to be passed to [saveRDS()].
#' @note Changes in the code in the `expr` argument do not necessarily always
#' invalidate the cache, if the changed code is [`parse`]`d` to the same
#' expression as the previous version of the code. For example, if you have
#' run `cache_rds({Sys.sleep(5);1+1})` before, running `cache_rds({ Sys.sleep(
#' 5 ) ; 1 + 1 })` will use the cache, because the two expressions are
#' essentially the same (they only differ in white spaces). Usually you can
#' add/delete white spaces or comments to your code in `expr` without
#' invalidating the cache. See the package vignette `vignette('xfun', package
#' = 'xfun')` for more examples.
#'
#' When this function is called in a code chunk of a \pkg{knitr} document, you
#' may not want to provide the filename or directory of the cache file,
#' because they have reasonable defaults.
#'
#' Side-effects (such as plots or printed output) will not be cached. The
#' cache only stores the last value of the expression in `expr`.
#' @return If the cache file does not exist, run the expression and save the
#' result to the file, otherwise read the cache file and return the value.
#' @export
#' @examples
#' f = tempfile() # the cache file
#' compute = function(...) {
#' res = xfun::cache_rds({
#' Sys.sleep(1)
#' 1:10
#' }, file = f, dir = '', ...)
#' res
#' }
#' compute() # takes one second
#' compute() # returns 1:10 immediately
#' compute() # fast again
#' compute(rerun = TRUE) # one second to rerun
#' compute()
#' file.remove(f)
cache_rds = function(
expr = {}, rerun = FALSE, file = 'cache.rds', dir = 'cache/',
hash = NULL, clean = getOption('xfun.cache_rds.clean', TRUE), ...
) {
if (loadable('knitr')) {
if (missing(file) && !is.null(lab <- knitr::opts_current$get('label')))
file = paste0(lab, '.rds')
if (missing(dir) && !is.null(d <- knitr::opts_current$get('cache.path')))
dir = d
}
path = paste0(dir, file)
if (!grepl(r <- '([.]rds)$', path)) path = paste0(path, '.rds')
code = deparse(substitute(expr))
md5 = md5_obj(code)
if (identical(hash, 'auto')) hash = global_vars(code, parent.frame(2))
if (is.list(hash)) md5 = md5_obj(c(md5, md5_obj(hash)))
path = sub(r, paste0('_', md5, '\\1'), path)
if (rerun) unlink(path)
if (clean) clean_cache(path)
if (file_exists(path)) readRDS(path) else {
obj = expr # lazy evaluation
dir.create(dirname(path), recursive = TRUE, showWarnings = FALSE)
saveRDS(obj, path, ...)
obj
}
}
# write an object to a file and return the md5 sum
md5_obj = function(x) {
f = tempfile(); on.exit(unlink(f), add = TRUE)
if (is.character(x)) writeLines(x, f) else saveRDS(x, f)
unname(tools::md5sum(f))
}
# clean up old cache files (those with the same base names as the new cache
# file, e.g., if the new file is FOO_0123abc...z.rds, then FOO_9876def...x.rds
# should be deleted)
clean_cache = function(path) {
olds = list.files(dirname(path), '_[0-9a-f]{32}[.]rds$', full.names = TRUE)
olds = c(olds, path) # `path` may not exist; make sure it is in target paths
base = basename(olds)
keep = basename(path) == base # keep this file (will cache to this file)
base = substr(base, 1, nchar(base) - 37) # 37 = 1 (_) + 32 (md5 sum) + 4 (.rds)
unlink(olds[(base == base[keep][1]) & !keep])
}
# analyze code and find out global variables
find_globals = function(code) {
fun = eval(parse_only(c('function(){', code, '}')))
setdiff(codetools::findGlobals(fun), known_globals)
}
known_globals = c(
'{', '[', '(', ':', '<-', '=', '+', '-', '*', '/', '%%', '%/%', '%*%', '%o%', '%in%'
)
# return a list of values of global variables in code
global_vars = function(code, env) {
if (length(vars <- find_globals(code)) > 0) mget(vars, env)
}
#' Download a file from a URL and cache it on disk
#'
#' This object provides methods to download files and cache them on disk.
#' @format A list of methods:
#'
#' - `$get(url, type, handler)` downloads a URL, caches it, and returns the file
#' content according to the value of `type` (possible values: `"text"` means
#' the text content; `"base64"` means the base64 encoded data; `"raw"` means
#' the raw binary content; `"auto"` is the default and means the type is
#' determined by the content type in the URL headers). Optionally a `handler`
#' function can be applied to the content.
#' - `$list()` gives the list of cache files.
#' - `$summary()` gives a summary of existing cache files.
#' - `$remove(url, type)` removes a single cache file.
#' - `$purge()` deletes all cache files.
#' @export
#' @examplesIf interactive()
#' # the first time it may take a few seconds
#' x1 = xfun::download_cache$get('https://www.r-project.org/')
#' head(x1)
#'
#' # now you can get the cached content
#' x2 = xfun::download_cache$get('https://www.r-project.org/')
#' identical(x1, x2) # TRUE
#'
#' # a binary file
#' x3 = xfun::download_cache$get('https://yihui.org/images/logo.png', 'raw')
#' length(x3)
#'
#' # show a summary
#' xfun::download_cache$summary()
#' # remove a specific cache file
#' xfun::download_cache$remove('https://yihui.org/images/logo.png', 'raw')
#' # remove all cache files
#' xfun::download_cache$purge()
download_cache = local({
pre = 'url' # file prefix
c_dir = function() {
getOption('xfun.cache.dir', tools::R_user_dir('xfun', 'cache'))
}
c_file = function(url, type) {
file.path(c_dir(), sprintf('%s_%s_%s.rds', pre, type, md5_obj(url)))
}
read = function(url, type) {
if (length(f <- c_file(url, type)) && file.exists(f)) readRDS(f)
}
write = function(url, type, data) {
if (length(f <- c_file(url, type))) {
dir_create(dirname(f))
saveRDS(data, f)
}
}
list_cache = function() {
d = c_dir()
list.files(d, sprintf('^%s_.+[.]rds$', pre), full.names = TRUE)
}
list(
get = function(url, type = c('auto', 'text', 'base64', 'raw'), handler = NULL) {
type = type[1]
if (!is.null(x <- read(url, type))) return(x[[url]])
if ((auto <- type == 'auto')) type = if (length(grep(
'^content-type:\\s+(text/.+|[^;]+;\\s+charset=utf-8)\\s*$',
curlGetHeaders(url), ignore.case = TRUE
))) 'text' else 'raw'
dir_create(d <- tempfile())
on.exit(unlink(d, recursive = TRUE), add = TRUE)
x = in_dir(d, {
o = url_filename(url)
download_file(url, o)
switch(
type, text = read_utf8(o), base64 = base64_uri(o), raw = read_bin(o)
)
})
if (is.function(handler)) x = handler(x)
write(url, if (auto) 'auto' else type, setNames(list(x), url))
x
},
summary = function() {
f = list_cache()
if (length(f) == 0) return(invisible())
t = gsub('^url_([^_]+)_.+$', '\\1', basename(f))
u = vapply(f, function(x) names(readRDS(x)), character(1))
s = file.size(f)
if (length(f) > 1) message('Total size: ', format_bytes(sum(s)))
d = data.frame(url = u, type = t, size = s, size_h = format_bytes(s))
rownames(d) = NULL
unname(split(d, seq_len(nrow(d))))
},
list = list_cache,
remove = function(url, type = 'auto') file.remove(c_file(url, type)),
purge = function() {
f = list_cache()
s = file.size(f)
i = file.remove(f)
message(sprintf(
"Purged %d cache file(s) from '%s' (%s)",
sum(i), c_dir(), format_bytes(sum(s[i]))
))
}
)
})
| /scratch/gouwar.j/cran-all/cranData/xfun/R/cache.R |
#' Run `system2()` and mark its character output as UTF-8 if appropriate
#'
#' This is a wrapper function based on `system2()`. If `system2()`
#' returns character output (e.g., with the argument `stdout = TRUE`),
#' check if the output is encoded in UTF-8. If it is, mark it with UTF-8
#' explicitly.
#' @param ... Passed to [system2()].
#' @return The value returned by `system2()`.
#' @export
#' @examplesIf interactive()
#' a = shQuote(c('-e', 'print(intToUtf8(c(20320, 22909)))'))
#' x2 = system2('Rscript', a, stdout = TRUE)
#' Encoding(x2) # unknown
#'
#' x3 = xfun::system3('Rscript', a, stdout = TRUE)
#' # encoding of x3 should be UTF-8 if the current locale is UTF-8
#' !l10n_info()[['UTF-8']] || Encoding(x3) == 'UTF-8' # should be TRUE
system3 = function(...) {
res = system2(...)
if (is.character(res)) {
if (all(is_utf8(res))) Encoding(res) = 'UTF-8'
}
if (is.integer(res) && res == 0) invisible(res) else res
}
#' Run OptiPNG on all PNG files under a directory
#'
#' Call the command \command{optipng} via `system2()` to optimize all PNG
#' files under a directory.
#' @param dir Path to a directory.
#' @param files Alternatively, you can choose the specific files to optimize.
#' @param ... Arguments to be passed to `system2()`.
#' @references OptiPNG: <https://optipng.sourceforge.net>.
#' @export
optipng = function(dir = '.', files = all_files('[.]png$', dir), ...) {
if (Sys.which('optipng') != '') for (f in files) system2('optipng', shQuote(f), ...)
}
#' Run the commands \command{Rscript} and \command{R CMD}
#'
#' Wrapper functions to run the commands \command{Rscript} and \command{R CMD}.
#' @param args A character vector of command-line arguments.
#' @param ... Other arguments to be passed to [system2()].
#' @export
#' @return A value returned by `system2()`.
#' @examples library(xfun)
#' Rscript(c('-e', '1+1'))
#' Rcmd(c('build', '--help'))
Rscript = function(args, ...) {
# unset R_TESTS for the new R session: https://stackoverflow.com/a/27994299
if (is_R_CMD_check()) {
v = set_envvar(c(R_TESTS = NA)); on.exit(set_envvar(v), add = TRUE)
}
system2(file.path(R.home('bin'), 'Rscript'), args, ...)
}
#' @rdname Rscript
#' @export
Rcmd = function(args, ...) {
system2(file.path(R.home('bin'), 'R'), c('CMD', args), ...)
}
#' Call a function in a new R session via `Rscript()`
#'
#' Save the argument values of a function in a temporary RDS file, open a new R
#' session via [Rscript()], read the argument values, call the
#' function, and read the returned value back to the current R session.
#' @param fun A function, or a character string that can be parsed and evaluated
#' to a function.
#' @param args A list of argument values.
#' @param options A character vector of options to passed to
#' [Rscript()], e.g., `"--vanilla"`.
#' @param ...,wait Arguments to be passed to [system2()].
#' @param fail The desired error message when an error occurred in calling the
#' function.
#' @export
#' @return The returned value of the function in the new R session.
#' @examples factorial(10)
#' # should return the same value
#' xfun::Rscript_call('factorial', list(10))
#'
#' # the first argument can be either a character string or a function
#' xfun::Rscript_call(factorial, list(10))
#'
#' # Run Rscript starting a vanilla R session
#' xfun::Rscript_call(factorial, list(10), options = c("--vanilla"))
Rscript_call = function(
fun, args = list(), options = NULL, ..., wait = TRUE,
fail = sprintf("Failed to run '%s' in a new R session.", deparse(substitute(fun))[1])
) {
f = replicate(2, tempfile(fileext = '.rds'))
on.exit(unlink(if (wait) f else f[2]), add = TRUE)
saveRDS(list(fun, args), f[1])
Rscript(
c(options, shQuote(c(pkg_file('scripts', 'call-fun.R'), f)))
,..., wait = wait
)
if (wait) if (file_exists(f[2])) readRDS(f[2]) else stop(fail, call. = FALSE)
}
# call a function in a background process
Rscript_bg = function(fun, args = list(), timeout = 10) {
pid = tempfile() # to store the process ID of the new R session
saveRDS(NULL, pid)
Rscript_call(function() {
saveRDS(Sys.getpid(), pid)
# remove this pid file when the function finishes
on.exit(unlink(pid), add = TRUE)
do.call(fun, args)
}, wait = FALSE)
id = NULL # read the above process ID into this R session
res = list(pid = id, is_alive = function() FALSE)
# check if the pid file still exists; if not, the process has ended
if (!file_exists(pid)) return(res)
t0 = Sys.time()
while (difftime(Sys.time(), t0, units = 'secs') < timeout) {
Sys.sleep(.1)
if (!file_exists(pid)) return(res)
if (length(id <- readRDS(pid)) == 1) break
}
if (length(id) == 0) stop(
'Failed to launch the background process in ', timeout, ' seconds (timeout).'
)
list(pid = id, is_alive = function() file_exists(pid))
}
#' Kill a process and (optionally) all its child processes
#'
#' Run the command \command{taskkill /f /pid} on Windows and \command{kill} on
#' Unix, respectively, to kill a process.
#' @param pid The process ID.
#' @param recursive Whether to kill the child processes of the process.
#' @param ... Arguments to be passed to [system2()] to run the
#' command to kill the process.
#' @return The status code returned from `system2()`.
#' @export
proc_kill = function(pid, recursive = TRUE, ...) {
if (is_windows()) {
system2('taskkill', c(if (recursive) '/t', '/f', '/pid', pid), ...)
} else {
system2('kill', c(pid, if (recursive) child_pids(pid)), ...)
}
}
# obtain pids of all child processes (recursively)
child_pids = function(id) {
x = system2('sh', shQuote(c(pkg_file('scripts', 'child-pids.sh'), id)), stdout = TRUE)
grep('^[0-9]+$', x, value = TRUE)
}
powershell = function(command) {
if (Sys.which('powershell') == '') return()
command = paste(command, collapse = ' ')
system2('powershell', c('-Command', shQuote(command)), stdout = TRUE)
}
# start a background process via the PowerShell cmdlet and return its pid
ps_process = function(command, args = character(), verbose = FALSE) {
powershell(c(
'echo (Start-Process', '-FilePath', shQuote(command), '-ArgumentList',
ps_quote(args), '-PassThru', '-WindowStyle',
sprintf('%s).ID', if (verbose) 'Normal' else 'Hidden')
))
}
# quote PowerShell arguments properly
ps_quote = function(x) {
x = gsub('"', '""', x) # '""' mean a literal '"'
# if an argument contains a space, surround it with escaped double quotes `"`"
i = grep('\\s', x)
x[i] = sprintf('`"%s`"', x[i])
sprintf('"%s"', paste(x, collapse = ' '))
}
#' Start a background process
#'
#' Start a background process using the PowerShell cmdlet
#' \command{Start-Process-PassThru} on Windows or the ampersand \command{&} on
#' Unix, and return the process ID.
#' @param command,args The system command and its arguments. They do not need to
#' be quoted, since they will be quoted via [shQuote()] internally.
#' @param verbose If `FALSE`, suppress the output from `stdout` (and also
#' `stderr` on Windows). The default value of this argument can be set via a
#' global option, e.g., `options(xfun.bg_process.verbose = TRUE)`.
#' @return The process ID as a character string.
#' @note On Windows, if PowerShell is not available, try to use
#' [`system2`]`(wait = FALSE)` to start the background process instead. The
#' process ID will be identified from the output of the command
#' \command{tasklist}. This method of looking for the process ID may not be
#' reliable. If the search is not successful in 30 seconds, it will throw an
#' error (timeout). If a longer time is needed, you may set
#' `options(xfun.bg_process.timeout)` to a larger value, but it should be very
#' rare that a process cannot be started in 30 seconds. When you reach the
#' timeout, it is more likely that the command actually failed.
#' @export
#' @seealso [proc_kill()] to kill a process.
bg_process = function(
command, args = character(), verbose = getOption('xfun.bg_process.verbose', FALSE)
) {
throw_error = function(...) stop(
'Failed to run the command', ..., ' in the background: ',
paste(shQuote(c(command, args)), collapse = ' '), call. = FALSE
)
# check the possible pid returned from system2()
check_pid = function(res) {
if (is.null(res)) return(res)
if (!is.null(attr(res, 'status'))) throw_error()
if (length(res) == 1 && grepl('^[0-9]+$', res)) return(res)
throw_error()
}
if (is_windows()) {
# first try 'Start-Process -PassThrough' to start a background process; if
# PowerShell is unavailable, fall back to system2(wait = FALSE), and the
# method to find out the pid is not 100% reliable
if (length(pid <- check_pid(ps_process(command, args, verbose))) == 1) return(pid)
message(
'It seems you do not have PowerShell installed. The process ID may be inaccurate.'
)
# format of task list: hugo.exe 4592 Console 1 35,188 K
tasklist = function() system2('tasklist', stdout = TRUE)
pid1 = tasklist()
system2(command, shQuote(args), wait = FALSE)
get_pid = function() {
# make sure the command points to an actual executable (e.g., resolve 'R'
# to 'R.exe')
if (!file_exists(command)) {
if (Sys.which(command) != '') command = Sys.which(command)
}
cmd = basename(command)
pid2 = setdiff(tasklist(), pid1)
# the process's info should start with the command name
pid2 = pid2[substr(pid2, 1, nchar(cmd)) == cmd]
if (length(pid2) == 0) return()
m = regexec('\\s+([0-9]+)\\s+', pid2)
for (v in regmatches(pid2, m)) if (length(v) >= 2) return(v[2])
}
t0 = Sys.time(); id = NULL; timeout = getOption('xfun.bg_process.timeout', 30)
while (difftime(Sys.time(), t0, units = 'secs') < timeout) {
if (length(id <- get_pid()) > 0) break
}
if (length(id) > 0) return(id)
system2(command, args, timeout = timeout) # see what the error is
throw_error(' in ', timeout, ' second(s)')
} else {
pid = tempfile(); on.exit(unlink(pid), add = TRUE)
code = paste(c(
shQuote(c(command, args)), if (!verbose) '> /dev/null', '& echo $! >', shQuote(pid)
), collapse = ' ')
system2('sh', c('-c', shQuote(code)))
return(check_pid(readLines(pid)))
}
}
#' Upload to an FTP server via \command{curl}
#'
#' The function `upload_ftp()` runs the command \command{curl -T file
#' server} to upload a file to an FTP server if the system command
#' \command{curl} is available, otherwise it uses the R package \pkg{curl}. The
#' function `upload_win_builder()` uses `upload_ftp()` to upload
#' packages to the win-builder server.
#'
#' These functions were written mainly to save package developers the trouble of
#' going to the win-builder web page and uploading packages there manually.
#' @param file Path to a local file.
#' @param server The address of the FTP server. For `upload_win_builder()`,
#' `server = 'https'` means uploading to
#' `'https://win-builder.r-project.org/upload.aspx'`.
#' @param dir The remote directory to which the file should be uploaded.
#' @param version The R version(s) on win-builder.
#' @return Status code returned from [system2()] or
#' `curl::curl_fetch_memory()`.
#' @export
upload_ftp = function(file, server, dir = '') {
if (dir != '') dir = gsub('/*$', '/', dir)
server = paste0(server, dir)
if (Sys.which('curl') == '') {
curl::curl_upload(file, server)$status_code
} else {
system2('curl', shQuote(c('-T', file, server)))
}
}
#' @param solaris Whether to also upload the package to the Rhub server to check
#' it on Solaris.
#' @rdname upload_ftp
#' @export
upload_win_builder = function(
file = pkg_build(), version = c("R-devel", "R-release", "R-oldrelease"),
server = c('ftp', 'https'), solaris = pkg_available('rhub')
) {
if (missing(file)) on.exit(file.remove(file), add = TRUE)
if (system2('git', 'status', stderr = FALSE) == 0) system2('git', 'pull')
server = server[1]
server = switch(
server,
'ftp' = paste0(server, '://win-builder.r-project.org/'),
'https' = paste0(server, '://win-builder.r-project.org/upload.aspx'),
server
)
res = if (grepl('^ftp://', server)) {
lapply(version, upload_ftp, file = file, server = server)
} else {
vers = c('R-devel' = 2, 'R-release' = 1, 'R-oldrelease' = 3)
params = list(
FileUpload = file,
Button = 'Upload File',
# perhaps we should read these tokens dynamically from
# https://win-builder.r-project.org/upload.aspx
`__VIEWSTATE` = '/wEPDwULLTE0OTY5NTg0MTUPZBYCAgIPFgIeB2VuY3R5cGUFE211bHRpcGFydC9mb3JtLWRhdGFkZFHMrNH6JjHTyJ00T0dAADGf4oa0',
`__VIEWSTATEGENERATOR` = '69164837',
`__EVENTVALIDATION` = '/wEWBQKksYbrBgKM54rGBgK7q7GGCAKF2fXbAwLWlM+bAqR2dARbCNfKVu0vDawqWYgB5kKI'
)
lapply(version, function(i) {
names(params)[1:2] = paste0(names(params)[1:2], vers[i])
if (Sys.which('curl') == '') {
h = curl::new_handle()
params[[1]] = curl::form_file(params[[1]])
curl::handle_setform(h, .list = params)
curl::curl_fetch_memory(server, h)$status_code
} else {
params[1] = paste0('@', params[1])
system2('curl', shQuote(c(
rbind('-F', paste(names(params), params, sep = '=')),
server
)), stdout = FALSE)
}
})
}
if (solaris) rhub::check_on_solaris(
file, check_args = '--no-manual', show_status = FALSE,
env_vars = c(`_R_CHECK_FORCE_SUGGESTS_` = 'false')
)
setNames(unlist(res), version)
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/command.R |
# retrieve the release dates of packages
cran_pkg_dates = function(full = FALSE, maintainer = 'Yihui Xie') {
info = tools::CRAN_package_db()
pkgs = info[grep(maintainer, info$Maintainer), 'Package']
info = setNames(vector('list', length(pkgs)), pkgs)
for (p in pkgs) {
message('Processing ', p)
x = readLines(u <- sprintf('https://cran.rstudio.com/web/packages/%s/', p))
i = which(x == '<td>Published:</td>')
if (length(i) == 0) stop('Cannot find the publishing date from ', u)
d = as.Date(gsub('</?td>', '', x[i[1] + 1]))
x = try_silent(suppressWarnings(readLines(
u <- sprintf('https://cran.r-project.org/src/contrib/Archive/%s/', p)
)))
if (inherits(x, 'try-error')) {
info[[p]] = d; next
}
r = '.+</td><td align="right">(\\d{4,}-\\d{2}-\\d{2}) .+'
d = c(d, as.Date(gsub(r, '\\1', grep(r, x, value = TRUE))))
info[[p]] = sort(d, decreasing = TRUE)
}
if (full) info else sort(do.call(c, lapply(info, `[`, 1)), decreasing = TRUE)
}
# return packages that haven't been updated for X days, and can be updated on CRAN
cran_updatable = function(days = 90, maintainer = 'Yihui Xie') {
info = cran_pkg_dates(TRUE, maintainer)
flag = unlist(lapply(info, function(d) {
sum(d > Sys.Date() - 180) < 6 && d[1] < Sys.Date() - days
}))
if (length(pkgs <- names(which(flag))) == 0) return(pkgs)
# look into DESCRIPTION in Github repos and see if new version has been pushed
info = tools::CRAN_package_db()
info = info[info$Package %in% pkgs, , drop = FALSE]
pkgs = info$Package
for (i in seq_len(nrow(info))) {
b = grep_sub('^(https://github.com/[^/]+/[^/]+)/issues$', '\\1', info$BugReports[i])
if (length(b) != 1) next
f = tempfile()
u = paste0(b, '/raw/HEAD/DESCRIPTION')
if (is.null(tryCatch(download.file(u, f, quiet = TRUE), error = function(e) NULL))) next
d = read.dcf(f)
file.remove(f)
if (!'Version' %in% colnames(d)) next
if (as.numeric_version(d[, 'Version']) <= paste0(info$Version[i], '.1')) {
pkgs = setdiff(pkgs, info$Package[i])
message('Skipped package ', info$Package[i], ' ', d[, 'Version'], ' (no new version).')
} else {
message('Package can be updated: ', b)
}
}
pkgs
}
#' Some utility functions for checking packages
#'
#' Miscellaneous utility functions to obtain information about the package
#' checking environment.
#' @export
#' @keywords internal
is_R_CMD_check = function() {
!is.na(check_package_name()) || tolower(Sys.getenv('_R_CHECK_LICENSE_')) == 'true'
}
#' @rdname is_R_CMD_check
#' @export
is_CRAN_incoming = function() {
isTRUE(as.logical(Sys.getenv('_R_CHECK_CRAN_INCOMING_REMOTE_')))
}
#' @rdname is_R_CMD_check
#' @export
check_package_name = function() {
Sys.getenv('_R_CHECK_PACKAGE_NAME_', NA)
}
# is R CMD check running on a package that has a version lower or equal to `version`?
#' @rdname is_R_CMD_check
#' @export
check_old_package = function(name, version) {
if (is.na(pkg <- check_package_name()) || !(pkg %in% name)) return(FALSE)
tryCatch(packageVersion(pkg) <= version[pkg == name], error = function(e) FALSE)
}
# return package maintainers (with email addresses)
pkg_maintainers = function(pkgs) {
info = tools::CRAN_package_db()
info = info[match(pkgs, info$Package), c('Package', 'Maintainer')]
setNames(info$Maintainer, info$Package)
}
#' Submit a source package to CRAN
#'
#' Build a source package and submit it to CRAN with the \pkg{curl} package.
#' @param file The path to the source package tarball. By default, the current
#' working directory is treated as the package root directory, and
#' automatically built into a tarball, which is deleted after submission. This
#' means you should run `xfun::submit_cran()` in the root directory of a
#' package project, unless you want to pass a path explicitly to the
#' `file` argument.
#' @param comment Submission comments for CRAN. By default, if a file
#' \file{cran-comments.md} exists, its content will be read and used as the
#' comment.
#' @seealso `devtools::submit_cran()` does the same job, with a few more
#' dependencies in addition to \pkg{curl} (such as \pkg{cli});
#' `xfun::submit_cran()` only depends on \pkg{curl}.
#' @export
submit_cran = function(file = pkg_build(), comment = '') {
# if the tarball is automatically created, delete it after submission
if (missing(file)) on.exit(file.remove(file), add = TRUE)
# read the maintainer's name/email
dir_create(d <- tempfile())
on.exit(unlink(d, recursive = TRUE), add = TRUE)
desc = file.path(gsub('_.*', '', basename(file)), 'DESCRIPTION')
untar(file, desc, exdir = d)
info = read.dcf(file.path(d, desc), fields = 'Maintainer')[1, 1]
info = unlist(strsplit(info, '( <|>)'))
# read submission comments from cran-comments.md if exists
if (missing(comment) && file_exists(f <- 'cran-comments.md')) {
comment = file_string(f)
}
params = list(
uploaded_file = curl::form_file(file), name = info[1], email = info[2],
upload = 'Upload package'
)
params$comment = if (length(comment)) comment
server = 'https://xmpalantir.wu.ac.at/cransubmit/index2.php'
# submit the form
h = curl::new_handle()
curl::handle_setform(h, .list = params)
res = curl::curl_fetch_memory(server, h)
# find the pkg_id from the response page
id = grep_sub(
'(.*<input name="pkg_id" type="hidden" value=")([^"]+)(".*)', '\\2',
rawToChar(res$content)
)
if (length(id) != 1) stop('Failed to submit ', file, ' to CRAN')
# skip the review and submit directly
h = curl::new_handle()
curl::handle_setform(h, .list = list(pkg_id = id, submit = 'Submit package'))
res = curl::curl_fetch_memory(server, h)
if (grepl('>Step 3<', rawToChar(res$content))) message(
'The package has been submitted. Please confirm the submission in email: ',
params$email
) else message('The submission may be unsuccessful.')
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/cran.R |
#' Strict lists
#'
#' A strict list is essentially a normal [list()] but it does not
#' allow partial matching with `$`.
#'
#' To me, partial matching is often more annoying and surprising than
#' convenient. It can lead to bugs that are very hard to discover, and I have
#' been bitten by it many times. When I write `x$name`, I always mean
#' precisely `name`. You should use a modern code editor to autocomplete
#' the `name` if it is too long to type, instead of using partial names.
#' @param ... Objects (list elements), possibly named. Ignored in the
#' `print()` method.
#' @export
#' @return Both `strict_list()` and `as_strict_list()` return a list
#' with the class `xfun_strict_list`. Whereas `as_strict_list()`
#' attempts to coerce its argument `x` to a list if necessary,
#' `strict_list()` just wraps its argument `...` in a list, i.e., it
#' will add another list level regardless if `...` already is of type
#' list.
#' @examples library(xfun)
#' (z = strict_list(aaa = 'I am aaa', b = 1:5))
#' z$a # NULL!
#' z$aaa # I am aaa
#' z$b
#' z$c = 'create a new element'
#'
#' z2 = unclass(z) # a normal list
#' z2$a # partial matching
#'
#' z3 = as_strict_list(z2) # a strict list again
#' z3$a # NULL again!
strict_list = function(...) {
as_strict_list(list(...))
}
# https://twitter.com/xieyihui/status/782462926862954496
#' @param x For `as_strict_list()`, the object to be coerced to a strict
#' list.
#'
#' For `print()`, a strict list.
#' @rdname strict_list
#' @export
as_strict_list = function(x) {
structure(as.list(x), class = 'xfun_strict_list')
}
#' @param name The name (a character string) of the list element.
#' @rdname strict_list
#' @export
`$.xfun_strict_list` = function(x, name) x[[name]]
#' @rdname strict_list
#' @export
print.xfun_strict_list = function(x, ...) {
print(unclass(x))
}
#' Print a character vector in its raw form
#'
#' The function `raw_string()` assigns the class `xfun_raw_string` to
#' the character vector, and the corresponding printing function
#' `print.xfun_raw_string()` uses `cat(x, sep = '\n')` to write the
#' character vector to the console, which will suppress the leading indices
#' (such as `[1]`) and double quotes, and it may be easier to read the
#' characters in the raw form (especially when there are escape sequences).
#' @param x For `raw_string()`, a character vector. For the print method,
#' the `raw_string()` object.
#' @export
#' @examples library(xfun)
#' raw_string(head(LETTERS))
#' raw_string(c('a "b"', 'hello\tworld!'))
raw_string = function(x) {
if (is.null(x)) x = as.character(x)
class(x) = c('xfun_raw_string', class(x))
x
}
#' @param ... Other arguments (currently ignored).
#' @rdname raw_string
#' @export
print.xfun_raw_string = function(x, ...) {
if (length(x)) cat(x, sep = '\n')
invisible(x)
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/data-structure.R |
#' Try to use the system native encoding to represent a character vector
#'
#' Apply `enc2native()` to the character vector, and check if `enc2utf8()` can
#' convert it back without a loss. If it does, return `enc2native(x)`, otherwise
#' return the original vector with a warning.
#' @param x A character vector.
#' @note On platforms that supports UTF-8 as the native encoding
#' ([l10n_info()]`[['UTF-8']]` returns `TRUE`), the conversion will be
#' skipped.
#' @export
#' @examples
#' library(xfun)
#' s = intToUtf8(c(20320, 22909))
#' Encoding(s)
#'
#' s2 = native_encode(s)
#' Encoding(s2)
native_encode = function(x) {
if (isTRUE(l10n_info()[['UTF-8']])) return(x)
if (identical(enc2utf8(x2 <- enc2native(x)), x)) return(x2)
warning('The character vector cannot be represented in the native encoding')
x
}
#' Check if a character vector consists of entirely ASCII characters
#'
#' Converts the encoding of a character vector to `'ascii'`, and check if
#' the result is `NA`.
#' @param x A character vector.
#' @return A logical vector indicating whether each element of the character
#' vector is ASCII.
#' @export
#' @examples library(xfun)
#' is_ascii(letters) # yes
#' is_ascii(intToUtf8(8212)) # no
is_ascii = function(x) {
out = iconv(x, to = 'ascii') == x
out[is.na(out)] = FALSE
out[is.na(x)] = NA
out
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/encoding.R |
#' Get the tags of Github releases of a repository
#'
#' Use the Github API ([github_api()]) to obtain the tags of the
#' releases.
#' @param repo The repository name of the form `user/repo`, e.g.,
#' `"yihui/xfun"`.
#' @param tag A tag as a character string. If provided, it will be returned if
#' the tag exists. If `tag = "latest"`, the tag of the latest release is
#' returned.
#' @param pattern A regular expression to match the tags.
#' @param use_jsonlite Whether to use \pkg{jsonlite} to parse the releases info.
#' @export
#' @return A character vector of (GIT) tags.
#' @examplesIf interactive()
#' xfun::github_releases('yihui/xfun')
#' xfun::github_releases('gohugoio/hugo')
github_releases = function(
repo, tag = '', pattern = 'v[0-9.]+', use_jsonlite = loadable('jsonlite')
) {
if (tag != '') return(github_releases2(repo, tag, pattern))
i = 1; v = character()
repeat {
res = github_api(
sprintf('/repos/%s/tags', repo), NULL, list(per_page = 100, page = i),
raw = !use_jsonlite
)
v2 = unlist(if (use_jsonlite) {
lapply(res, `[[`, 'name')
} else {
reg_match('\\{"name":"([^"]+)",', res)
})
if (length(v2) == 0) break
v = c(v, v2)
if (length(v2) < 100) break # not enough items for the next page
i = i + 1
}
grep(sprintf('^%s$', pattern), unique(v), value = TRUE)
}
# extract the matched elements in the n-th pair of () in the regex
reg_match = function(p, x, n = 1, ...) {
# TODO: gregexec was added in R 4.1.0; remove this workaround when we don't
# need to support R < 4.1.0
v = 'gregexec' %in% ls(baseenv())
m = (if (v) base::gregexec else base::gregexpr)(p, x, ...)
lapply(regmatches(x, m), function(x) {
if (v) x[n + 1, ] else gsub(p, paste0('\\', n), x)
})
}
# the fallback method to retrieve release tags (read HTML source)
github_releases2 = function(repo, tag = '', pattern = '[^"&]+') {
read = function() suppressWarnings(
read_utf8(sprintf('https://github.com/%s/releases/%s', repo, tag))
)
h = if (tag == '') read() else tryCatch(read(), error = function(e) '')
r = sprintf('^.*?%s/releases/tag/(%s)".*', repo, pattern)
unique(grep_sub(r, '\\1', h))
}
#' @details `github_api()` is a wrapper function based on
#' `rest_api_raw()` to obtain data from the Github API:
#' <https://docs.github.com/en/rest>. You can provide a personal access
#' token (PAT) via the `token` argument, or via one of the environment
#' variables \var{GITHUB_PAT}, \var{GITHUB_TOKEN}, \var{GH_TOKEN}. A PAT
#' allows for a much higher rate limit in API calls. Without a token, you can
#' only make 60 calls in an hour.
#' @param raw Whether to return the raw response or parse the response with
#' \pkg{jsonlite}.
#' @rdname rest_api
#' @export
github_api = function(
endpoint, token = '', params = list(), headers = NULL, raw = !loadable('jsonlite')
) {
token = c(token, unname(Sys.getenv(envs <- c('GITHUB_PAT', 'GITHUB_TOKEN', 'GH_TOKEN'))))
token = if (length(token <- token[token != ''])) token[1] else ''
names(token) = 'token'
error = TRUE
on.exit(if (error && token == '') message(
'You may need to save a Github personal access token in one of the ',
'environment variables: ', paste(envs, collapse = ', ')
))
res = rest_api_raw('https://api.github.com', endpoint, token, params, headers)
error = FALSE
if (raw) res else jsonlite::fromJSON(res, FALSE)
}
git = function(...) {
if (Sys.which('git') == '') stop('git is not available')
# R's HOME var is different from the system's HOME on Windows:
# https://github.com/yihui/crandalf/issues/24
if (is_windows()) {
env = set_envvar(c(HOME = Sys.getenv('USERPROFILE')))
on.exit(set_envvar(env), add = TRUE)
}
system2('git', ...)
}
git_co = function(args = NULL, ...) {
git(c('checkout', args), ...)
}
git_test_branch = function() {
if (length(d <- git(c('diff', '--name-only'), stdout = TRUE))) stop(
'The current branch has changes not stated for commit:\n',
paste(d, collapse = '\n')
)
}
gh = function(...) {
if (Sys.which('gh') == '') stop('Github CLI not found: https://cli.github.com')
system2('gh', ...)
}
gh_run = function(..., repo = NA) {
gh(c(if (!is.na(repo)) c('-R', repo), 'run', ...), stdout = TRUE)
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/github.R |
# add a border to an image via ImageMagick
add_border = function(input, pixels = 1, color = 'black', output) {
input = normalizePath(input)
if (missing(output))
output = paste0(sans_ext(input), '-output.', file_ext(input))
system2('convert', shQuote(c(
input, '-shave', paste(pixels, pixels, sep = 'x'), '-bordercolor', color,
'-border', pixels, output)
))
optipng(dirname(output))
}
#' Use the Tinify API to compress PNG and JPEG images
#'
#' Compress PNG/JPEG images with \samp{api.tinify.com}, and download the
#' compressed images. These functions require R packages \pkg{curl} and
#' \pkg{jsonlite}. `tinify_dir()` is a wrapper function of `tinify()` to
#' compress images under a directory.
#'
#' You are recommended to set the API key in \file{.Rprofile} or
#' \file{.Renviron}. After that, the only required argument of this function is
#' `input`. If the original images can be overwritten by the compressed images,
#' you may either use `output = identity`, or set the value of the `history`
#' argument in \file{.Rprofile} or \file{.Renviron}.
#' @param input A vector of input paths of images.
#' @param output A vector of output paths or a function that takes `input` and
#' returns a vector of output paths (e.g., `output = `[`identity`] means
#' `output = input`). By default, if the `history` argument is not a provided,
#' `output` is `input` with a suffix `-min` (e.g., when `input = 'foo.png'`,
#' `output = 'foo-min.png'`), otherwise `output` is the same as `input`, which
#' means the original image files will be overwritten.
#' @param quiet Whether to suppress detailed information about the compression,
#' which is of the form \samp{input.png (10 Kb) ==> output.png (5 Kb, 50\%);
#' compression count: 42}. The percentage after `output.png` stands for
#' the compression ratio, and the compression count shows the number of
#' compressions used for the current month.
#' @param force Whether to compress an image again when it appears to have been
#' compressed before. This argument only makes sense when the `history`
#' argument is provided.
#' @param key The Tinify API key. It can be set via either the global option
#' `xfun.tinify.key` or the environment variable `R_XFUN_TINIFY_KEY` (see
#' [env_option()]).
#' @param history Path to a history file to record the MD5 checksum of
#' compressed images. If the checksum of an expected output image exists in
#' this file and `force = FALSE`, the compression will be skipped. This can
#' help you avoid unnecessary API calls.
#' @return The output file paths.
#' @references Tinify API: <https://tinypng.com/developers>.
#' @seealso The \pkg{tinieR} package (<https://github.com/jmablog/tinieR/>) is a
#' more comprehensive implementation of the Tinify API, whereas
#' `xfun::tinify()` has only implemented the feature of shrinking images.
#' @export
#' @examplesIf interactive()
#' f = xfun:::R_logo('jpg$')
#' xfun::tinify(f) # remember to set the API key before trying this
tinify = function(
input, output, quiet = FALSE, force = FALSE,
key = env_option('xfun.tinify.key'),
history = env_option('xfun.tinify.history')
) {
if (!(is.character(key) && length(key) == 1 && key != '')) stop(
"The value of the 'key' argument must be a single non-empty character string."
)
if (length(input) == 0) return(invisible(input))
if (any(i <- !file_exists(input))) stop(
'Input file(s) not found: ', paste(input[i], collapse = ', ')
)
if (missing(output)) {
output = if (is.character(history)) input else {
paste0(sans_ext(input), '-min.', file_ext(input))
}
} else if (is.function(output)) output = output(input)
# avoid optimizing the input image if its md5 checksum exists in history
save_history = function(file) {
if (!is.character(history) || history == '') return()
dir_create(dirname(history))
cat(paste0(tools::md5sum(file), '\n'), file = history, append = TRUE)
}
test_history = function(file) {
is.character(history) && all(file_exists(c(history, file))) &&
(tools::md5sum(file) %in% readLines(history))
}
auth = paste('Authorization: Basic', base64_encode(charToRaw(paste0('api:', key))))
mapply(input, output, FUN = function(i, o) {
if (!force && test_history(o)) {
if (!quiet) message(
'The image ', o, ' has been compressed before. ',
'To compress it again, call tinify() with force = TRUE.'
)
return()
}
if (grepl('[.]png$', i, ignore.case = TRUE))
optipng(files = i, stdout = if (quiet) FALSE else '')
res = curl::curl_upload(i, 'https://api.tinify.com/shrink', httpheader = auth, verbose = FALSE)
cnt = curl::parse_headers_list(res$headers)[['compression-count']]
res = jsonlite::fromJSON(rawToChar(res$content))
if (!is.character(u <- res$output$url)) stop2(
"Failed to shrink '", i, "'", sprintf(': %s (%s)', res$error, res$message)
)
if (!quiet) message(sprintf(
'%s (%s) ==> %s (%s, %.01f%%); compression count: %s',
i, format_bytes(res$input$size), o, format_bytes(res$output$size),
res$output$ratio * 100, if (length(cnt)) cnt else NA
))
# back up the original image and restore it if download failed
if (i == o) {
b = paste0(i, '~')
file.rename(i, b)
on.exit(if (file_exists(o)) file.remove(b) else file.rename(b, i), add = TRUE)
}
curl::curl_download(u, o)
save_history(o)
})
invisible(output)
}
#' @param dir A directory under which all \file{.png}, \file{.jpeg}, and
#' \file{.webp} files are to be compressed.
#' @param ... Arguments passed to [tinify()].
#' @rdname tinify
#' @export
tinify_dir = function(dir = '.', ...) {
tinify(all_files('[.](png|jpe?g|webp)$', dir), ...)
}
#' Shrink images to a maximum width
#'
#' Use [magick::image_resize()] to shrink an
#' image if its width is larger than the value specified by the argument
#' `width`, and optionally call [tinify()] to compress it.
#' @param width The desired maximum width of images.
#' @param dir The directory of images.
#' @param files A vector of image file paths. By default, this is all
#' \file{.png}, \file{.jpeg}, and \file{.webp} images under `dir`.
#' @param tinify Whether to compress images using [tinify()].
#' @export
#' @examples
#' f = xfun:::all_files('[.](png|jpe?g)$', R.home('doc'))
#' file.copy(f, tempdir())
#' f = file.path(tempdir(), basename(f))
#' magick::image_info(magick::image_read(f)) # some widths are larger than 300
#' xfun::shrink_images(300, files = f)
#' magick::image_info(magick::image_read(f)) # all widths <= 300 now
#' file.remove(f)
shrink_images = function(
width = 800, dir = '.', files = all_files('[.](png|jpe?g|webp)$', dir),
tinify = FALSE
) {
for (f in files) {
x = magick::image_read(f)
if (magick::image_info(x)$width <= width) next
x = magick::image_resize(x, sprintf('%dx', width))
magick::image_write(x, f)
}
if (tinify) tinify(files, identity)
}
#' Upload an image to imgur.com
#'
#' This function uses the \pkg{curl} package or the system command `curl`
#' (whichever is available) to upload a image to <https://imgur.com>.
#'
#' One application is to upload local image files to Imgur when knitting a
#' document with \pkg{knitr}: you can set the `knitr::opts_knit$set(upload.fun =
#' xfun::upload_imgur`, so the output document does not need local image files
#' any more, and it is ready to be published online.
#' @param file Path to the image file to be uploaded.
#' @param key Client ID for Imgur. It can be set via either the global option
#' `xfun.upload_imgur.key` or the environment variable
#' `R_XFUN_UPLOAD_IMGUR_KEY` (see [xfun::env_option()]). If neither is set,
#' this uses a client ID registered by Yihui Xie.
#' @param use_curl Whether to use the R package \pkg{curl} to upload the image.
#' If `FALSE`, the system command `curl` will be used.
#' @param include_xml Whether to include the XML response in the returned value.
#' @return A character string of the link to the image. If `include_xml = TRUE`,
#' this string carries an attribute named `XML`, which is the XML response
#' from Imgur (it will be parsed by \pkg{xml2} if available). See Imgur API in
#' the references.
#' @author Yihui Xie, adapted from the \pkg{imguR} package by Aaron Statham
#' @note Please register your own Imgur application to get your client ID; you
#' can certainly use mine, but this ID is in the public domain so everyone has
#' access to all images associated to it.
#' @references A demo: <https://yihui.org/knitr/demo/upload/>
#' @export
#' @examples \dontrun{
#' f = tempfile(fileext = '.png')
#' png(f); plot(rnorm(100), main = R.version.string); dev.off()
#'
#' res = imgur_upload(f, include_xml = TRUE)
#' res # link to original URL of the image
#' attr(res, 'XML') # all information
#' if (interactive()) browseURL(res)
#'
#' # to use your own key
#' options(xfun.upload_imgur.key = 'your imgur key')
#' }
upload_imgur = function(
file, key = env_option('xfun.upload_imgur.key', '9f3460e67f308f6'),
use_curl = loadable('curl'), include_xml = FALSE
) {
if (!is.character(key)) stop('The Imgur API Key must be a character string!')
api = 'https://api.imgur.com/3/image.xml'
hdr = paste('Authorization: Client-ID', key)
if (use_curl) {
h = curl::new_handle(httpheader = hdr)
curl::handle_setform(h, image = curl::form_file(file))
res = curl::curl_fetch_memory(api, h)$content
} else {
file = path.expand(file)
res = system2(
'curl', shQuote(c('-H', hdr, '-F', paste0('image=@', file), '-s', api)),
stdout = TRUE
)
res = one_string(res)
if (res == '') stop('Failed to upload ', file)
}
if (loadable('xml2')) {
res = xml2::as_list(xml2::read_xml(res))
link = res[[1]]$link[[1]]
} else {
if (is.raw(res)) res = rawToChar(res)
link = grep_sub('.*<link>([^<]+)</link>.*', '\\1', res)
}
if (length(link) != 1) stop(
'Failed to upload ', file, sprintf(' (reason: %s)', if (is.character(res)) {
grep_sub('.*<error>([^<]+)</error>.*', '\\1', res)
} else res[[1]]$error[[1]])
)
if (include_xml) structure(link, XML = res) else link
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/image.R |
#' Read / write files encoded in UTF-8
#'
#' Read or write files, assuming they are encoded in UTF-8. `read_utf8()`
#' is roughly `readLines(encoding = 'UTF-8')` (a warning will be issued if
#' non-UTF8 lines are found), and `write_utf8()` calls
#' `writeLines(enc2utf8(text), useBytes = TRUE)`.
#'
#' The function `append_utf8()` appends UTF-8 content to a file or
#' connection based on `read_utf8()` and `write_utf8()`, and
#' optionally sort the content. The function `append_unique()` appends
#' unique lines to a file or connection.
#' @param con A connection or a file path.
#' @param error Whether to signal an error when non-UTF8 characters are detected
#' (if `FALSE`, only a warning message is issued).
#' @param text A character vector (will be converted to UTF-8 via
#' [enc2utf8()]).
#' @param ... Other arguments passed to [writeLines()] (except
#' `useBytes`, which is `TRUE` in `write_utf8()`).
#' @export
read_utf8 = function(con, error = FALSE) {
# users may have set options(encoding = 'UTF-8'), which usually won't help but
# will bring more trouble than good, so we reset this option temporarily
opts = options(encoding = 'native.enc'); on.exit(options(opts), add = TRUE)
x = readLines(con, encoding = 'UTF-8', warn = FALSE)
i = invalid_utf8(x)
n = length(i)
if (n > 0) (if (error) stop else warning)(
if (is.character(con)) c('The file ', con, ' is not encoded in UTF-8. '),
'These lines contain invalid UTF-8 characters: ',
paste(c(head(i), if (n > 6) '...'), collapse = ', ')
)
x
}
#' @rdname read_utf8
#' @export
write_utf8 = function(text, con, ...) {
if (is.null(text)) text = character(0)
if (identical(con, '')) {
cat(text, sep = '\n', file = con)
} else {
# prevent re-encoding the text in the file() connection in writeLines()
# https://kevinushey.github.io/blog/2018/02/21/string-encoding-and-r/
opts = options(encoding = 'native.enc'); on.exit(options(opts), add = TRUE)
writeLines(enc2utf8(text), con, ..., useBytes = TRUE)
}
}
#' @param sort Logical (`FALSE` means not to sort the content) or a
#' function to sort the content; `TRUE` is equivalent to
#' `base::sort`.
#' @rdname read_utf8
#' @export
append_utf8 = function(text, con, sort = TRUE) {
x = read_utf8(con, error = TRUE)
x = c(x, text)
if (is.logical(sort)) sort = if (sort) base::sort else identity
if (is.function(sort)) x = sort(x)
write_utf8(x, con)
}
#' @rdname read_utf8
#' @export
append_unique = function(text, con, sort = function(x) base::sort(unique(x))) {
append_utf8(text, con, sort)
}
# which lines are invalid UTF-8
invalid_utf8 = function(x) {
which(!is_utf8(x))
}
test_utf8 = function(x) {
is.na(x) | !is.na(iconv(x, 'UTF-8', 'UTF-8'))
}
# validUTF8() was added to base R 3.3.0
is_utf8 = function(x) {
if ('validUTF8' %in% ls(baseenv())) validUTF8(x) else test_utf8(x)
}
#' Read a text file and concatenate the lines by `'\n'`
#'
#' The source code of this function should be self-explanatory.
#' @param file Path to a text file (should be encoded in UTF-8).
#' @return A character string of text lines concatenated by `'\n'`.
#' @export
#' @examples
#' xfun::file_string(system.file('DESCRIPTION', package = 'xfun'))
file_string = function(file) {
x = read_utf8(file)
# paste converts 0-length character() into 1-length ""
if (length(x)) x = paste(x, collapse = '\n')
raw_string(x)
}
#' Read all records of a binary file as a raw vector by default
#'
#' This is a wrapper function of [readBin()] with default arguments
#' `what = "raw"` and `n = `[`file.size`]`(file)`, which means it
#' will read the full content of a binary file as a raw vector by default.
#' @param file,what,n,... Arguments to be passed to `readBin()`.
#' @return A vector returned from `readBin()`.
#' @export
#' @examples
#' f = tempfile()
#' cat('abc', file = f)
#' xfun::read_bin(f)
#' unlink(f)
read_bin = function(file, what = 'raw', n = file.info(file)$size, ...) {
readBin(file, what, n, ...)
}
#' Read all text files and concatenate their content
#'
#' Read files one by one, and optionally add text before/after the content. Then
#' combine all content into one character vector.
#' @param files A vector of file paths.
#' @param before,after A function that takes one file path as the input and
#' returns values to be added before or after the content of the file.
#' Alternatively, they can be constant values to be added.
#' @return A character vector.
#' @export
#' @examples
#' # two files in this package
#' fs = system.file('scripts', c('call-fun.R', 'child-pids.sh'), package = 'xfun')
#' xfun::read_all(fs)
#'
#' # add file paths before file content and an empty line after content
#' xfun::read_all(fs, before = function(f) paste('#-----', f, '-----'), after = '')
#'
#' # add constants
#' xfun::read_all(fs, before = '/*', after = c('*/', ''))
read_all = function(files, before = function(f) NULL, after = function(f) NULL) {
b = before; a = after
x = unlist(lapply(files, function(f) {
c(if (is.function(b)) b(f) else b, read_utf8(f), if (is.function(a)) a(f) else a)
}))
raw_string(x)
}
#' Read a text file, process the text with a function, and write the text back
#'
#' Read a text file with the UTF-8 encoding, apply a function to the text, and
#' write back to the original file if the processed text is different with the
#' original input.
#'
#' `sort_file()` is an application of `process_file()`, with the processing
#' function being [sort()], i.e., it sorts the text lines in a file and write
#' back the sorted text.
#' @param file Path to a text file.
#' @param fun A function to process the text.
#' @param x The content of the file.
#' @param ... Arguments to be passed to `process_file()`.
#' @return If `file` is provided, invisible `NULL` (the file is updated as a
#' side effect), otherwise the processed content (as a character vector).
#' @export
#' @examples f = tempfile()
#' xfun::write_utf8('Hello World', f)
#' xfun::process_file(f, function(x) gsub('World', 'woRld', x))
#' xfun::read_utf8(f) # see if it has been updated
#' file.remove(f)
process_file = function(file, fun = identity, x = read_utf8(file)) {
x2 = fun(x)
if (missing(file)) x2 else {
if ((length(x2) != length(x)) || !all(x2 == x)) write_utf8(x2, file)
}
}
#' @rdname process_file
#' @export
sort_file = function(..., fun = sort) {
process_file(fun = fun, ...)
}
#' Search and replace strings in files
#'
#' These functions provide the "file" version of [gsub()], i.e.,
#' they perform searching and replacement in files via `gsub()`.
#' @param file Path of a single file.
#' @param ... For `gsub_file()`, arguments passed to `gsub()`. For
#' other functions, arguments passed to `gsub_file()`. Note that the
#' argument `x` of `gsub()` is the content of the file.
#' @param rw_error Whether to signal an error if the file cannot be read or
#' written. If `FALSE`, the file will be ignored (with a warning).
#' @param files A vector of file paths.
#' @param dir Path to a directory (all files under this directory will be
#' replaced).
#' @param recursive Whether to find files recursively under a directory.
#' @param ext A vector of filename extensions (without the leading periods).
#' @param mimetype A regular expression to filter files based on their MIME
#' types, e.g., `'^text/'` for plain text files. This requires the
#' \pkg{mime} package.
#' @note These functions perform in-place replacement, i.e., the files will be
#' overwritten. Make sure you backup your files in advance, or use version
#' control!
#' @export
#' @examples library(xfun)
#' f = tempfile()
#' writeLines(c('hello', 'world'), f)
#' gsub_file(f, 'world', 'woRld', fixed = TRUE)
#' readLines(f)
gsub_file = function(file, ..., rw_error = TRUE) {
if (!(file.access(file, 2) == 0 && file.access(file, 4) == 0)) {
(if (rw_error) stop else warning)('Unable to read or write to ', file)
if (!rw_error) return(invisible())
}
x1 = tryCatch(read_utf8(file, error = TRUE), error = function(e) if (rw_error) stop(e))
if (is.null(x1)) return(invisible())
x2 = gsub(x = x1, ...)
if (!identical(x1, x2)) write_utf8(x2, file)
}
#' @rdname gsub_file
#' @export
gsub_files = function(files, ...) {
for (f in files) gsub_file(f, ...)
}
#' @rdname gsub_file
#' @export
gsub_dir = function(..., dir = '.', recursive = TRUE, ext = NULL, mimetype = '.*') {
files = list.files(dir, full.names = TRUE, recursive = recursive)
if (length(ext)) files = files[file_ext(files) %in% ext]
if (mimetype != '.*') files = files[grep(mimetype, mime::guess_type(files))]
gsub_files(files, ...)
}
#' @rdname gsub_file
#' @export
gsub_ext = function(ext, ..., dir = '.', recursive = TRUE) {
gsub_dir(..., dir = dir, recursive = recursive, ext = ext)
}
#' Perform replacement with `gsub()` on elements matched from `grep()`
#'
#' This function is a shorthand of `gsub(pattern, replacement,
#' grep(pattern, x, value = TRUE))`.
#' @param pattern,replacement,x,... Passed to [grep()] and
#' `gsub()`.
#' @return A character vector.
#' @export
#' @examples # find elements that matches 'a[b]+c' and capitalize 'b' with perl regex
#' xfun::grep_sub('a([b]+)c', 'a\\U\\1c', c('abc', 'abbbc', 'addc', '123'), perl = TRUE)
grep_sub = function(pattern, replacement, x, ...) {
x = grep(pattern, x, value = TRUE, ...)
gsub(pattern, replacement, x, ...)
}
#' Try various methods to download a file
#'
#' Try all possible methods in [download.file()] (e.g.,
#' `libcurl`, `curl`, `wget`, and `wininet`) and see if any
#' method can succeed. The reason to enumerate all methods is that sometimes the
#' default method does not work, e.g.,
#' <https://stat.ethz.ch/pipermail/r-devel/2016-June/072852.html>.
#' @param url The URL of the file.
#' @param output Path to the output file. By default, it is determined by
#' [url_filename()].
#' @param ... Other arguments to be passed to [download.file()]
#' (except `method`).
#' @param .error An error message to signal when the download fails.
#' @note To allow downloading large files, the `timeout` option in
#' [options()] will be temporarily set to one hour (3600 seconds)
#' inside this function when this option has the default value of 60 seconds.
#' If you want a different `timeout` value, you may set it via
#' `options(timeout = N)`, where `N` is the number of seconds (not
#' 60).
#' @return The integer code `0` for success, or an error if none of the
#' methods work.
#' @export
download_file = function(
url, output = url_filename(url), ...,
.error = 'No download method works (auto/wininet/wget/curl/lynx)'
) {
if (getOption('timeout') == 60L) {
opts = options(timeout = 3600) # one hour
on.exit(options(opts), add = TRUE)
}
download = function(method = 'auto') suppressWarnings({
tryCatch(download.file(url, output, ..., method = method), error = function(e) 1L)
})
for (method in c(if (is_windows()) 'wininet', 'libcurl', 'auto')) {
if (download(method = method) == 0) return(0L)
}
# check for libcurl/curl/wget/lynx, call download.file with appropriate method
if (Sys.which('curl') != '') {
# curl needs to add a -L option to follow redirects
opts2 = if (is.null(getOption('download.file.extra')))
options(download.file.extra = c('-L', '--fail'))
res = download(method = 'curl')
options(opts2)
if (res == 0) return(res)
}
if (Sys.which('wget') != '') {
if ((res <- download(method = 'wget')) == 0) return(res)
}
if (Sys.which('lynx') != '') {
if ((res <- download(method = 'lynx')) == 0) return(res)
}
stop(.error)
}
#' Test if a URL is accessible
#'
#' Try to send a `HEAD` request to a URL using
#' [curlGetHeaders()] or the \pkg{curl} package, and see if it
#' returns a successful status code.
#' @param x A URL as a character string.
#' @param use_curl Whether to use the \pkg{curl} package or the
#' `curlGetHeaders()` function in base R to send the request to the URL.
#' By default, \pkg{curl} will be used when base R does not have the
#' \command{libcurl} capability (which should be rare).
#' @param ... Arguments to be passed to `curlGetHeaders()`.
#' @return `TRUE` or `FALSE`.
#' @export
#' @examples xfun::url_accessible('https://yihui.org')
url_accessible = function(x, use_curl = !capabilities('libcurl'), ...) {
try_status = function(code) tryCatch(code < 400, error = function(e) FALSE)
if (use_curl) {
h = curl::new_handle()
curl::handle_setopt(h, customrequest = 'HEAD', nobody = TRUE)
try_status(curl::curl_fetch_memory(x, h)$status_code)
} else {
# use curlGetHeaders() instead
try_status(attr(curlGetHeaders(x, ...), 'status'))
}
}
#' Generate a message with `cat()`
#'
#' This function is similar to [message()], and the difference is
#' that `msg_cat()` uses [cat()] to write out the message,
#' which is sent to [stdout()] instead of [stderr()]. The
#' message can be suppressed by [suppressMessages()].
#' @param ... Character strings of messages, which will be concatenated into one
#' string via `paste(c(...), collapse = '')`.
#' @note By default, a newline will not be appended to the message. If you need
#' a newline, you have to explicitly add it to the message (see
#' \sQuote{Examples}).
#' @return Invisible `NULL`, with the side-effect of printing the message.
#' @seealso This function was inspired by `rlang::inform()`.
#' @export
#' @examples
#' {
#' # a message without a newline at the end
#' xfun::msg_cat('Hello world!')
#' # add a newline at the end
#' xfun::msg_cat(' This message appears right after the previous one.\n')
#' }
#' suppressMessages(xfun::msg_cat('Hello world!'))
msg_cat = function(...) {
x = paste(c(...), collapse = '')
withRestarts({
signalCondition(simpleMessage(x))
cat(x)
}, muffleMessage = function() invisible(NULL))
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/io.R |
#' A simple JSON serializer
#'
#' A JSON serializer that only works on a limited types of R data (`NULL`,
#' lists, logical scalars, character/numeric vectors). A character string of the
#' class `JS_EVAL` is treated as raw JavaScript, so will not be quoted. The
#' function `json_vector()` converts an atomic R vector to JSON.
#' @param x An R object.
#' @export
#' @return A character string.
#' @seealso The \pkg{jsonlite} package provides a full JSON serializer.
#' @examples library(xfun)
#' tojson(NULL); tojson(1:10); tojson(TRUE); tojson(FALSE)
#' cat(tojson(list(a = 1, b = list(c = 1:3, d = 'abc'))))
#' cat(tojson(list(c('a', 'b'), 1:5, TRUE)))
#'
#' # the class JS_EVAL is originally from htmlwidgets::JS()
#' JS = function(x) structure(x, class = 'JS_EVAL')
#' cat(tojson(list(a = 1:5, b = JS('function() {return true;}'))))
tojson = function(x) {
if (is.null(x)) return('null')
if (is.logical(x)) {
if (length(x) != 1 || any(is.na(x)))
stop('Logical values of length > 1 and NA are not supported')
return(tolower(as.character(x)))
}
if (is.character(x) && inherits(x, 'JS_EVAL')) return(paste(x, collapse = '\n'))
if (is.character(x) || is.numeric(x)) {
return(json_vector(x, length(x) != 1 || inherits(x, 'AsIs'), is.character(x)))
}
if (is.list(x)) {
if (length(x) == 0) return('{}')
return(if (is.null(names(x))) {
json_vector(unlist(lapply(x, tojson)), TRUE, quote = FALSE)
} else {
nms = quote_string(names(x))
paste0('{\n', paste(nms, unlist(lapply(x, tojson)), sep = ': ', collapse = ',\n'), '\n}')
})
}
stop('The class of x is not supported: ', paste(class(x), collapse = ', '))
}
#' @param to_array Whether to convert a vector to a JSON array (use `[]`).
#' @param quote Whether to double quote the elements.
#' @rdname tojson
#' @export
json_vector = function(x, to_array = FALSE, quote = TRUE) {
if (quote) {
x = quote_string(x)
x = gsub('\n', '\\\\n', x)
x = gsub('\b', '\\\\b', x)
x = gsub('\f', '\\\\f', x)
x = gsub('\r', '\\\\r', x)
x = gsub('\t', '\\\\t', x)
}
if (to_array) paste0('[', paste(x, collapse = ', '), ']') else x
}
# escape \ and " in strings, and quote them
quote_string = function(x) {
x = gsub('(["\\])', "\\\\\\1", x)
if (length(x)) x = paste0('"', x, '"')
x
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/json.R |
# functions extracted from knitr and to be reused in other packages like litedown
#' Parse comma-separated chunk options
#'
#' For \pkg{knitr} and R Markdown documents, code chunk options can be written
#' using the comma-separated syntax (e.g., `opt1=value1, opt2=value2`). This
#' function parses these options and returns a list. If an option is not named,
#' it will be treated as the chunk label.
#' @param x The chunk options as a string.
#' @return A list of chunk options.
#' @export
#' @examples
#' xfun::csv_options('foo, eval=TRUE, fig.width=5, echo=if (TRUE) FALSE')
csv_options = function(x) {
x = one_string(x)
res = handle_error(
eval(parse_only(paste('alist(', quote_label(x), ')'))),
function(e, loc) c(
sprintf('Invalid syntax for chunk options%s:\n', loc), x,
'\nPlease see documentation at https://yihui.org/knitr/options/.\n'
)
)
idx = which(names(res) == '') # which option is not named?
# remove empty options
j = NULL
for (i in idx) if (identical(res[[i]], alist(,)[[1]])) j = c(j, i)
if (length(j)) res[j] = NULL
idx = if (is.null(names(res)) && length(res) == 1L) 1L else which(names(res) == '')
if ((n <- length(idx)) > 1L || (length(res) > 1L && is.null(names(res)))) stop(
'Invalid chunk options: ', x,
"\n\nAll options must be of the form 'tag=value' except for the chunk label."
)
if (is.null(res$label)) {
if (n == 0L) res$label = '' else names(res)[idx] = 'label'
}
if (!is.character(res$label))
res$label = gsub(' ', '', as.character(as.expression(res$label)))
if (res$label == '') res$label = NULL
res
}
# quote the chunk label if necessary
quote_label = function(x) {
x = gsub('^\\s*,?', '', x)
if (grepl('^\\s*[^\'"](,|\\s*$)', x)) {
# <<a,b=1>>= ---> <<'a',b=1>>=
x = gsub('^\\s*([^\'"])(,|\\s*$)', "'\\1'\\2", x)
} else if (grepl('^\\s*[^\'"](,|[^=]*(,|\\s*$))', x)) {
# <<abc,b=1>>= ---> <<'abc',b=1>>=
x = gsub('^\\s*([^\'"][^=]*)(,|\\s*$)', "'\\1'\\2", x)
}
x
}
# comment characters for various languages
comment_chars = list(
`#` = c('awk', 'bash', 'coffee', 'gawk', 'julia', 'octave', 'perl', 'powershell', 'python', 'r', 'ruby', 'sed', 'stan'),
'//' = c('asy', 'cc', 'csharp', 'd3', 'dot', 'fsharp', 'go', 'groovy', 'java', 'js', 'node', 'ojs', 'Rcpp', 'sass', 'scss', 'scala'),
`%%` = c('mermaid'),
`%` = c('matlab', 'tikz'),
`/* */` = c('c', 'css'),
`* ;` = c('sas'),
`--` = c('haskell', 'lua', 'mysql', 'psql', 'sql'),
`!` = c('fortran', 'fortran95'),
`*` = c('stata')
)
# reshape it using the language name as the index, i.e., from list(char = lang)
# to list(lang = char)
comment_chars = local({
res = list(apl = '\u235D')
for (i in names(comment_chars)) {
chars = comment_chars[[i]]
res = c(res, setNames(rep(list(strsplit(i, ' ')[[1]]), length(chars)), chars))
}
res[order(names(res))]
})
get_option_comment = function(engine) {
char = comment_chars[[engine]] %||% '#'
s1 = paste0(char[[1]], '| ')
s2 = ifelse(length(char) > 1, char[[2]], '')
list(start = s1, end = s2)
}
#' Divide chunk options from the code chunk body
#'
#' Chunk options can be written in special comments (e.g., after `#|` for R code
#' chunks) inside a code chunk. This function partitions these options from the
#' chunk body.
#' @param engine The name of the language engine (to determine the appropriate
#' comment character).
#' @param code A character vector (lines of code).
#' @return A list with the following items:
#'
#' - `options`: The parsed options (if there are any) as a list.
#' - `src`: The part of the input that contains the options.
#' - `code`: The part of the input that contains the code.
#'
#' @note Chunk options must be written on _continuous_ lines (i.e., all lines
#' must start with the special comment prefix such as `#|`) at the beginning
#' of the chunk body.
#' @export
#' @examples
#' # parse yaml-like items
#' yaml_like = c("#| label: mine", "#| echo: true", "#| fig.width: 8", "#| foo: bar", "1 + 1")
#' writeLines(yaml_like)
#' xfun::divide_chunk("r", yaml_like)
#'
#' # parse CSV syntax
#' csv_like = c("#| mine, echo = TRUE, fig.width = 8, foo = 'bar'", "1 + 1")
#' writeLines(csv_like)
#' xfun::divide_chunk("r", csv_like)
divide_chunk = function(engine, code) {
res = list(options = NULL, src = NULL, code = code)
# mask out empty blocks
if (length(code) == 0) return(res)
opt_comment = get_option_comment(engine)
s1 = opt_comment$start
s2 = opt_comment$end
# check for option comments
i1 = startsWith(code, s1)
i2 = endsWith(trimws(code, 'right'), s2)
# if "commentChar| " is not found, try "#| " instead
if (!i1[1] && !identical(s1, '#|')) {
s1 = '#| '; s2 = ''
i1 = startsWith(code, s1); i2 = TRUE
}
m = i1 & i2
# has to have at least one matched line at the beginning
if (!m[[1]]) return(res)
# divide into yaml and code
if (all(m)) {
src = code
code = NULL
} else {
src = head(code, which.min(m) - 1)
code = tail(code, -length(src))
}
# trim right
if (any(i2)) src = trimws(src, 'right')
# extract meta from comments, then parse it
meta = substr(src, nchar(s1) + 1, nchar(src) - nchar(s2))
# see if the metadata looks like YAML or CSV
if (grepl('^[^ :]+:($|\\s)', meta[1])) {
meta = yaml_load(meta, envir = FALSE)
if (!is.list(meta) || length(names(meta)) == 0) {
warning('Invalid YAML option format in chunk: \n', one_string(meta), '\n')
meta = list()
}
} else {
meta = csv_options(meta)
}
# normalize field name 'id' to 'label' if provided
meta$label = unlist(meta[c('label', 'id')])[[1]]
meta$id = NULL
# extract code
if (length(code) > 0 && is_blank(code[[1]])) {
code = code[-1]
src = c(src, '')
}
list(options = meta, src = src, code = code)
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/knitr.R |
#' Find the indices of lines in Markdown that are prose (not code blocks)
#'
#' Filter out the indices of lines between code block fences such as \verb{```}
#' (could be three or four or more backticks).
#' @param x A character vector of text in Markdown.
#' @param warn Whether to emit a warning when code fences are not balanced.
#' @note If the code fences are not balanced (e.g., a starting fence without an
#' ending fence), this function will treat all lines as prose.
#' @return An integer vector of indices of lines that are prose in Markdown.
#' @export
#' @examples library(xfun)
#' prose_index(c('a', '```', 'b', '```', 'c'))
#' prose_index(c('a', '````', '```r', '1+1', '```', '````', 'c'))
prose_index = function(x, warn = TRUE) {
idx = NULL; r = '^(\\s*```+).*'; s = ''
for (i in setdiff(grep(r, x), grep('-->\\s*$', x))) {
if (s == '') {
s = gsub(r, '\\1', x[i]); idx = c(idx, i); next
}
# look for the next line with the same amount of backticks (end of block)
if (grepl(paste0('^', s), x[i])) {
idx = c(idx, i); s = ''
}
}
xi = seq_along(x); n = length(idx)
if (n == 0) return(xi)
if (n %% 2 != 0) {
if (warn) warning('Code fences are not balanced')
# treat all lines as prose
return(xi)
}
idx2 = matrix(idx, nrow = 2)
idx2 = unlist(mapply(seq, idx2[1, ], idx2[2, ], SIMPLIFY = FALSE))
xi[-idx2]
}
#' Protect math expressions in pairs of backticks in Markdown
#'
#' For Markdown renderers that do not support LaTeX math, we need to protect
#' math expressions as verbatim code (in a pair of backticks), because some
#' characters in the math expressions may be interpreted as Markdown syntax
#' (e.g., a pair of underscores may make text italic). This function detects
#' math expressions in Markdown (by heuristics), and wrap them in backticks.
#'
#' Expressions in pairs of dollar signs or double dollar signs are treated as
#' math, if there are no spaces after the starting dollar sign, or before the
#' ending dollar sign. There should be spaces before the starting dollar sign,
#' unless the math expression starts from the very beginning of a line. For a
#' pair of single dollar signs, the ending dollar sign should not be followed by
#' a number. With these assumptions, there should not be too many false
#' positives when detecing math expressions.
#'
#' Besides, LaTeX environments (\verb{\begin{*}} and \verb{\end{*}}) are also
#' protected in backticks.
#' @param x A character vector of text in Markdown.
#' @param token A character string to wrap math expressions at both ends. This
#' can be a unique token so that math expressions can be reliably identified
#' and restored after the Markdown text is converted.
#' @return A character vector with math expressions in backticks.
#' @note If you are using Pandoc or the \pkg{rmarkdown} package, there is no
#' need to use this function, because Pandoc's Markdown can recognize math
#' expressions.
#' @export
#' @examples library(xfun)
#' protect_math(c('hi $a+b$', 'hello $$\\alpha$$', 'no math here: $x is $10 dollars'))
#' protect_math(c('hi $$', '\\begin{equation}', 'x + y = z', '\\end{equation}'))
#' protect_math('$a+b$', '===')
protect_math = function(x, token = '') {
i = prose_index(x)
if (length(i)) x[i] = escape_math(x[i], token)
x
}
escape_math = function(x, token = '') {
# replace $x$ with `\(x\)` (protect inline math in <code></code>)
m = gregexpr('(?<=^|[\\s])[$](?! )[^$]+?(?<! )[$](?![$0123456789])', x, perl = TRUE)
regmatches(x, m) = lapply(regmatches(x, m), function(z) {
if (length(z) == 0) return(z)
z = sub('^[$]', paste0('`', token, '\\\\('), z)
z = sub('[$]$', paste0('\\\\)', token, '`'), z)
z
})
# replace $$x$$ with `$$x$$` (protect display math)
m = gregexpr('(?<=^|[\\s])[$][$](?! )[^$]+?(?<! )[$][$]', x, perl = TRUE)
regmatches(x, m) = lapply(regmatches(x, m), function(z) {
if (length(z) == 0) return(z)
paste0('`', token, z, token, '`')
})
# now, if there are still lines starting and ending with $$, they might be
# math expressions of display style spanning multiple lines, e.g.,
# $$\alpha +
# \beta$$
# we assume that $$ can only appear once on one line
i = vapply(gregexpr('[$]', x), length, integer(1)) == 2
if (any(i)) {
x[i] = gsub('^(\\s*)([$][$][^ ]+)', paste0('\\1`', token, '\\2'), x[i], perl = TRUE)
x[i] = gsub('([^ ][$][$])$', paste0('\\1', token, '`'), x[i], perl = TRUE)
}
# equation environments (\begin and \end must match)
i1 = grep('^\\\\begin\\{[^}]+\\}$', x)
i2 = grep('^\\\\end\\{[^}]+\\}$', x)
if (length(i1) == length(i2)) {
# TODO: do not protect inner environments in case of nested environments (#57)
x[i1] = paste0('`', token, x[i1])
x[i2] = paste0(x[i2], token, '`')
}
x
}
#' Create a fenced block in Markdown
#'
#' Wrap content with fence delimiters such as backticks (code blocks) or colons
#' (fenced Div). Optionally the fenced block can have attributes.
#' @param x A character vector of the block content.
#' @param attrs A vector of block attributes.
#' @param fence The fence string, e.g., `:::` or ```` ``` ````. This will be
#' generated from the `char` argument by default.
#' @param char The fence character to be used to generate the fence string by
#' default.
#' @return `fenced_block()` returns a character vector that contains both the
#' fences and content.
#' @export
#' @examples
#' # code block with class 'r' and ID 'foo'
#' xfun::fenced_block('1+1', c('.r', '#foo'))
#' # fenced Div
#' xfun::fenced_block('This is a **Div**.', char = ':')
fenced_block = function(x, attrs = NULL, fence = make_fence(x, char), char = '`') {
c('', paste0(fence, block_attr(attrs)), x, fence)
}
#' @return `make_fence()` returns a character string. If the block content
#' contains `N` fence characters (e.g., backticks), use `N + 1` characters as
#' the fence.
#' @rdname fenced_block
#' @export
#' @examples
#' # three backticks by default
#' xfun::make_fence('1+1')
#' # needs five backticks for the fences because content has four
#' xfun::make_fence(c('````r', '1+1', '````'))
make_fence = function(x, char = '`') {
f = strrep(char, 3)
while (any(grepl(f, x, fixed = TRUE))) f = paste0(f, char)
f
}
# concatenate block attributes for fenced blocks
block_attr = function(attrs) {
a = paste(attrs, collapse = ' ')
if (grepl('[ .=]', a)) a = paste0(' {', a, '}')
a
}
#' Embed a file, multiple files, or directory on an HTML page
#'
#' For a file, first encode it into base64 data (a character string). Then
#' generate a hyperlink of the form \samp{<a href="base64 data"
#' download="filename">Download filename</a>}. The file can be downloaded when
#' the link is clicked in modern web browsers. For a directory, it will be
#' compressed as a zip archive first, and the zip file is passed to
#' `embed_file()`. For multiple files, they are also compressed to a zip
#' file first.
#'
#' These functions can be called in R code chunks in R Markdown documents with
#' HTML output formats. You may embed an arbitrary file or directory in the HTML
#' output file, so that readers of the HTML page can download it from the
#' browser. A common use case is to embed data files for readers to download.
#' @param path Path to the file(s) or directory.
#' @param name The default filename to use when downloading the file. Note that
#' for `embed_dir()`, only the base name (of the zip filename) will be
#' used.
#' @param text The text for the hyperlink.
#' @param ... For `embed_file()`, additional arguments to be passed to
#' `htmltools::a()` (e.g., `class = 'foo'`). For `embed_dir()`
#' and `embed_files()`, arguments passed to `embed_file()`.
#' @note Windows users may need to install Rtools to obtain the \command{zip}
#' command to use `embed_dir()` and `embed_files()`.
#'
#' These functions require R packages \pkg{mime} and \pkg{htmltools}. If you
#' have installed the \pkg{rmarkdown} package, these packages should be
#' available, otherwise you need to install them separately.
#'
#' Currently Internet Explorer does not support downloading embedded files
#' (<https://caniuse.com/#feat=download>). Chrome has a 2MB limit on the
#' file size.
#' @return An HTML tag \samp{<a>} with the appropriate attributes.
#' @export
#' @examples
#' logo = xfun:::R_logo()
#' link = xfun::embed_file(logo, text = 'Download R logo')
#' link
#' if (interactive()) htmltools::browsable(link)
embed_file = function(path, name = basename(path), text = paste('Download', name), ...) {
pkg_require(c('mime', 'htmltools'))
h = base64_uri(path)
htmltools::a(text, href = h, download = name, ...)
}
#' @rdname embed_file
#' @export
embed_dir = function(path, name = paste0(normalize_path(path), '.zip'), ...) {
name = gsub('/', '', basename(name))
in_dir(path, {
name = file.path(tempdir(), name); on.exit(file.remove(name), add = TRUE)
zip(name, '.'); embed_file(name, ...)
})
}
#' @rdname embed_file
#' @export
embed_files = function(path, name = with_ext(basename(path[1]), '.zip'), ...) {
name = file.path(tempdir(), basename(name))
on.exit(file.remove(name), add = TRUE)
zip(name, path)
embed_file(name, ...)
}
zip = function(name, ...) {
if (utils::zip(name, ...) != 0) stop('Failed to create the zip archive ', name)
invisible(0)
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/markdown.R |
#' Test for types of operating systems
#'
#' Functions based on `.Platform$OS.type` and `Sys.info()` to test if
#' the current operating system is Windows, macOS, Unix, or Linux.
#' @rdname os
#' @export
#' @examples
#' library(xfun)
#' # only one of the following statements should be true
#' is_windows()
#' is_unix() && is_macos()
#' is_linux()
#' # In newer Macs, CPU can be either Intel or Apple
#' is_arm64() # TRUE on Apple silicone machines
is_windows = function() .Platform$OS.type == 'windows'
#' @rdname os
#' @export
is_unix = function() .Platform$OS.type == 'unix'
#' @rdname os
#' @export
is_macos = function() unname(Sys.info()['sysname'] == 'Darwin')
#' @rdname os
#' @export
is_linux = function() unname(Sys.info()['sysname'] == 'Linux')
#' @rdname os
#' @export
is_arm64 = function() Sys.info()[['machine']] %in% c('arm64', 'aarch64')
| /scratch/gouwar.j/cran-all/cranData/xfun/R/os.R |
#' Attach or load packages, and automatically install missing packages if
#' requested
#'
#' `pkg_attach()` is a vectorized version of [library()] over
#' the `package` argument to attach multiple packages in a single function
#' call. `pkg_load()` is a vectorized version of
#' [requireNamespace()] to load packages (without attaching them).
#' The functions `pkg_attach2()` and `pkg_load2()` are wrappers of
#' `pkg_attach(install = TRUE)` and `pkg_load(install = TRUE)`,
#' respectively. `loadable()` is an abbreviation of
#' `requireNamespace(quietly = TRUE)`. `pkg_available()` tests if a
#' package with a minimal version is available.
#'
#' These are convenience functions that aim to solve these common problems: (1)
#' We often need to attach or load multiple packages, and it is tedious to type
#' several `library()` calls; (2) We are likely to want to install the
#' packages when attaching/loading them but they have not been installed.
#' @param ... Package names (character vectors, and must always be quoted).
#' @param install Whether to automatically install packages that are not
#' available using [install.packages()]. Besides `TRUE` and
#' `FALSE`, the value of this argument can also be a function to install
#' packages (`install = TRUE` is equivalent to `install =
#' install.packages`), or a character string `"pak"` (equivalent to
#' `install = pak::pkg_install`, which requires the \pkg{pak} package).
#' You are recommended to set a CRAN mirror in the global option `repos`
#' via [options()] if you want to automatically install packages.
#' @param message Whether to show the package startup messages (if any startup
#' messages are provided in a package).
#' @return `pkg_attach()` returns `NULL` invisibly. `pkg_load()`
#' returns a logical vector, indicating whether the packages can be loaded.
#' @seealso `pkg_attach2()` is similar to `pacman::p_load()`, but does
#' not allow non-standard evaluation (NSE) of the `...` argument, i.e.,
#' you must pass a real character vector of package names to it, and all names
#' must be quoted. Allowing NSE adds too much complexity with too little gain
#' (the only gain is that it saves your effort in typing two quotes).
#' @import utils
#' @export
#' @examples library(xfun)
#' pkg_attach('stats', 'graphics')
#' # pkg_attach2('servr') # automatically install servr if it is not installed
#'
#' (pkg_load('stats', 'graphics'))
pkg_attach = function(
..., install = FALSE, message = getOption('xfun.pkg_attach.message', TRUE)
) {
if (!message) library = function(...) {
suppressPackageStartupMessages(base::library(...))
}
for (i in c(...)) {
if (!identical(install, FALSE) && !loadable(i)) pkg_install(i, install)
library(i, character.only = TRUE)
}
}
#' @param error Whether to signal an error when certain packages cannot be loaded.
#' @rdname pkg_attach
#' @export
pkg_load = function(..., error = TRUE, install = FALSE) {
n = length(pkg <- c(...)); res = logical(n)
if (n == 0) return(invisible(res))
for (i in seq_len(n)) {
res[i] = loadable(p <- pkg[i])
if (!identical(install, FALSE) && !res[i]) {
pkg_install(p, install); res[i] = loadable(p)
}
}
if (error && any(!res)) stop('Package(s) not loadable: ', paste(pkg[!res], collapse = ' '))
invisible(res)
}
#' @param pkg A single package name.
#' @param strict If `TRUE`, use [requireNamespace()] to test if
#' a package is loadable; otherwise only check if the package is in
#' [`.packages`]`(TRUE)` (this does not really load the package, so it
#' is less rigorous but on the other hand, it can keep the current R session
#' clean).
#' @param new_session Whether to test if a package is loadable in a new R
#' session. Note that `new_session = TRUE` implies `strict = TRUE`.
#' @rdname pkg_attach
#' @export
loadable = function(pkg, strict = TRUE, new_session = FALSE) {
if (length(pkg) != 1L) stop("'pkg' must be a character vector of length one")
if (new_session) {
Rscript(c('-e', shQuote(sprintf('library("%s")', pkg))), stdout = FALSE, stderr = FALSE) == 0
} else {
if (strict) {
suppressPackageStartupMessages(requireNamespace(pkg, quietly = TRUE))
} else pkg %in% .packages(TRUE)
}
}
#' @param version A minimal version number. If `NULL`, only test if a
#' package is available and do not check its version.
#' @rdname pkg_attach
#' @export
pkg_available = function(pkg, version = NULL) {
loadable(pkg) && (is.null(version) || packageVersion(pkg) >= version)
}
#' @rdname pkg_attach
#' @export
pkg_attach2 = function(...) pkg_attach(..., install = TRUE)
#' @rdname pkg_attach
#' @export
pkg_load2 = function(...) pkg_load(..., install = TRUE)
pkg_require = function(pkgs, which = length(sys.calls()) - 1) {
f = func_name(which)
for (p in pkgs) if (!loadable(p)) stop2(
"The '", p, "' package is required by the function '", f, "' but not available.",
if (is_R_CMD_check()) c(
" If you are developing an R package, you need to declare the dependency on '",
p, "' in the DESCRIPTION file (e.g., in 'Imports')."
)
)
}
# update all packages in libraries that are writable by the current user
pkg_update = function(...) {
libs = .libPaths()
libs = libs[file.access(libs, 2) >= 0]
for (l in libs) update.packages(l, ask = FALSE, checkBuilt = TRUE, ...)
}
# allow users to specify a custom install.packages() function via the global
# option xfun.install.packages
pkg_install = function(pkgs, install = TRUE, ...) {
if (length(pkgs) == 0) return()
# in case the CRAN repo is not set up
repos = getOption('repos')
if (length(repos) == 0 || identical(repos, c(CRAN = '@CRAN@'))) {
opts = options(repos = c(CRAN = 'https://cran.rstudio.com'))
on.exit(options(opts), add = TRUE)
}
if (length(pkgs) > 1)
message('Installing ', length(pkgs), ' packages: ', paste(pkgs, collapse = ' '))
if (isTRUE(install)) install = getOption(
'xfun.install.packages',
if (is.na(Sys.getenv('RENV_PROJECT', NA)) || !loadable('renv')) install.packages else {
function(pkgs, lib = NULL, ...) renv::install(pkgs, library = lib, ...)
}
)
if (identical(install, 'pak')) install = pak::pkg_install
retry(install, pkgs, ..., .pause = 0)
}
#' Find out broken packages and reinstall them
#'
#' If a package is broken (i.e., not [loadable()]), reinstall it.
#'
#' Installed R packages could be broken for several reasons. One common reason
#' is that you have upgraded R to a newer `x.y` version, e.g., from `4.0.5` to
#' `4.1.0`, in which case you need to reinstall previously installed packages.
#' @param reinstall Whether to reinstall the broken packages, or only list their
#' names.
#' @return A character vector of names of broken package.
#' @export
broken_packages = function(reinstall = TRUE) {
libs = .libPaths()
pkgs = unlist(lapply(libs, function(lib) {
p = unlist(lapply(.packages(TRUE, lib), function(p) {
if (!loadable(p, new_session = TRUE)) p
}))
if (length(p) && reinstall) {
remove.packages(p, lib); pkg_install(p, lib = lib)
}
p
}))
if(reinstall) invisible(pkgs) else pkgs
}
# remove (binary) packages that were built with a previous major-minor version of R
check_built = function(dir = '.', dry_run = TRUE) {
ext = if (is_macos()) 'tgz' else if (is_windows()) 'zip' else 'tar.gz'
r = paste0('_[-.0-9]+[.]', ext, '$')
pkgs = list.files(dir, r, full.names = TRUE)
meta = file.path(dir, 'PACKAGES')
info = if (file_exists(meta)) read.dcf(meta)
extract = if (grepl('gz$', ext)) untar else unzip
for (f in pkgs) {
d = file.path(gsub(r, '', basename(f)), 'DESCRIPTION')
extract(f, d)
if (is.na(b <- read.dcf(d, 'Built')[1, 1])) next
unlink(dirname(d), recursive = TRUE)
v = as.numeric_version(gsub('^\\s*R ([^;]+);.*', '\\1', b))
if (major_minor_smaller(v, getRversion())) {
message('The package ', f, ' was built with R ', v)
if (!dry_run) file.remove(f)
}
}
if (!is.null(info) && !dry_run) tools::write_PACKAGES(dir)
}
# is one version smaller than the other in major.minor? e.g., 4.1.0 is smaller
# than 4.2.0, but not smaller than 4.1.1
major_minor_smaller = function(v1, v2) {
v1 = unclass(v1)[[1]]
v2 = unclass(v2)[[1]]
if (length(v1) < 3 || length(v2) < 3) return(TRUE) # should return NA
v1[1] < v2[1] || v1[2] < v2[2]
}
#' Install a source package from a directory
#'
#' Run \command{R CMD build} to build a tarball from a source directory, and run
#' \command{R CMD INSTALL} to install it.
#' @param pkg The package source directory.
#' @param build Whether to build a tarball from the source directory. If
#' `FALSE`, run \command{R CMD INSTALL} on the directory directly (note
#' that vignettes will not be automatically built).
#' @param build_opts The options for \command{R CMD build}.
#' @param install_opts The options for \command{R CMD INSTALL}.
#' @export
#' @return Invisible status from \command{R CMD INSTALL}.
install_dir = function(pkg, build = TRUE, build_opts = NULL, install_opts = NULL) {
if (build) {
pkg = pkg_build(pkg, build_opts)
on.exit(unlink(pkg), add = TRUE)
}
res = Rcmd(c('INSTALL', install_opts, pkg))
if (res != 0) stop('Failed to install the package ', pkg)
invisible(res)
}
pkg_build = function(dir = '.', opts = NULL) {
desc = file.path(dir, 'DESCRIPTION')
pv = read.dcf(desc, fields = c('Package', 'Version'))
# delete existing tarballs
unlink(sprintf('%s_*.tar.gz', pv[1, 1]))
Rcmd(c('build', opts, shQuote(dir)))
pkg = sprintf('%s_%s.tar.gz', pv[1, 1], pv[1, 2])
if (!file_exists(pkg)) stop('Failed to build the package ', pkg)
pkg
}
# query the Homebrew dependencies of an R package
brew_dep = function(pkg) {
u = sprintf('https://sysreqs.r-hub.io/pkg/%s/osx-x86_64-clang', pkg)
x = retry(readLines, u, warn = FALSE)
x = gsub('^\\s*\\[|\\]\\s*$', '', x)
x = unlist(strsplit(gsub('"', '', x), '[, ]+'))
x = setdiff(x, 'null')
if (length(x))
message('Package ', pkg, ' requires Homebrew packages: ', paste(x, collapse = ' '))
x
}
brew_deps = function(pkgs) {
if (length(pkgs) == 0) return()
deps = pkg_brew_deps()
unlist(lapply(pkgs, function(p) {
if (is.null(deps[[p]])) brew_dep(p) else deps[[p]]
}))
}
pkg_brew_deps = function() {
con = url('https://macos.rbind.io/bin/macosx/sysreqsdb.rds')
on.exit(close(con), add = TRUE)
readRDS(con)
}
install_brew_deps = function(pkg = .packages(TRUE)) {
inst = installed.packages()
pkg = intersect(pkg, pkg_needs_compilation(inst))
deps = pkg_brew_deps()
deps = deps[c(pkg, pkg_dep(pkg, inst, recursive = TRUE))]
deps = paste(na.omit(unique(unlist(deps))), collapse = ' ')
if (deps != '') system(paste('brew install', deps))
}
pkg_needs_compilation = function(db = installed.packages()) {
pkgs = unname(db[tolower(db[, 'NeedsCompilation']) == 'yes', 'Package'])
pkgs[!is.na(pkgs)]
}
#' An alias of `remotes::install_github()`
#'
#' This alias is to make autocomplete faster via `xfun::install_github`, because
#' most `remotes::install_*` functions are never what I want. I only use
#' `install_github` and it is inconvenient to autocomplete it, e.g.
#' `install_git` always comes before `install_github`, but I never use it. In
#' RStudio, I only need to type `xfun::ig` to get `xfun::install_github`.
#' @param ... Arguments to be passed to [remotes::install_github()].
#' @export
install_github = function(...) remotes::install_github(...)
# Remove packages not installed from CRAN
reinstall_from_cran = function(dry_run = TRUE, skip_github = TRUE) {
r = paste(c('Repository', if (skip_github) 'GithubRepo'), collapse = '|')
r = paste0('^(', r, '): ')
for (lib in .libPaths()) {
pkgs = .packages(TRUE, lib)
pkgs = setdiff(pkgs, c('xfun', 'rstudio', base_pkgs()))
for (p in pkgs) {
desc = read_utf8(system.file('DESCRIPTION', package = p, lib.loc = lib))
if (!any(grepl(r, desc))) {
if (dry_run) message(p, ': ', lib) else install.packages(p, lib = lib)
}
}
}
}
#' Convert package news to the Markdown format
#'
#' Read the package news with [news()], convert the result to
#' Markdown, and write to an output file (e.g., \file{NEWS.md}). Each package
#' version appears in a first-level header, each category (e.g., \samp{NEW
#' FEATURES} or \samp{BUG FIXES}) is in a second-level header, and the news
#' items are written into bullet lists.
#' @param package,... Arguments to be passed to [news()].
#' @param output The output file path.
#' @param category Whether to keep the category names.
#' @return If `output = NA`, returns the Markdown content as a character
#' vector, otherwise the content is written to the output file.
#' @export
#' @examplesIf interactive()
#' # news for the current version of R
#' xfun::news2md('R', Version == getRversion(), output = NA)
news2md = function(package, ..., output = 'NEWS.md', category = TRUE) {
db = news(package = package, ...)
k = db[, 'Category']
db[is.na(k), 'Category'] = '' # replace NA category with ''
res = unlist(lapply(unique(db[, 'Version']), function(v) {
d1 = db[db[, 'Version'] == v, ]
res = unlist(lapply(unique(d1[, 'Category']), function(k) {
txt = d1[d1[, 'Category'] == k, 'Text']
txt = txt[txt != '']
if (k == '' && length(txt) == 0) return()
txt = gsub('\n *', ' ', txt)
c(if (category && k != '') paste('##', k), if (length(txt)) paste('-', txt))
}))
if (is.na(dt <- d1[1, 'Date'])) dt = '' else dt = paste0(' (', dt, ')')
c(sprintf('# CHANGES IN %s VERSION %s%s', package, v, dt), res)
}))
res = c(rbind(res, '')) # add a blank line after each line
if (is.na(output)) raw_string(res) else write_utf8(res, output)
}
#' Get base R package names
#'
#' Return names of packages from [installed.packages()] of which the
#' priority is `"base"`.
#' @return A character vector of base R package names.
#' @export
#' @examplesIf interactive()
#' xfun::base_pkgs()
base_pkgs = function() rownames(installed.packages(.Library, priority = 'base'))
# update one package (from source by default)
pkg_update_one = function(pkg, type = 'source') {
opts = options(repos = c(CRAN = 'https://cran.r-project.org'))
on.exit(options(opts), add = TRUE)
if (is.null(pkgs <- old.packages(type = type)) || !pkg %in% rownames(pkgs)) return()
install.packages(pkg, pkgs[pkg, 'LibPath'], type = type, INSTALL_opts = '--no-staged-install')
NULL
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/packages.R |
#' Manipulate filename extensions
#'
#' Functions to obtain (`file_ext()`), remove (`sans_ext()`), and
#' change (`with_ext()`) extensions in filenames.
#'
#' `file_ext()` is similar to [tools::file_ext()], and
#' `sans_ext()` is similar to [tools::file_path_sans_ext()].
#' The main differences are that they treat `tar.(gz|bz2|xz)` and
#' `nb.html` as extensions (but functions in the \pkg{tools} package
#' doesn't allow double extensions by default), and allow characters `~`
#' and `#` to be present at the end of a filename.
#' @param x A character of file paths.
#' @param extra Extra characters to be allowed in the extensions. By default,
#' only alphanumeric characters are allowed (and also some special cases in
#' \sQuote{Details}). If other characters should be allowed, they can be
#' specified in a character string, e.g., `"-+!_#"`.
#' @export
#' @return A character vector of the same length as `x`.
#' @examples library(xfun)
#' p = c('abc.doc', 'def123.tex', 'path/to/foo.Rmd', 'backup.ppt~', 'pkg.tar.xz')
#' file_ext(p); sans_ext(p); with_ext(p, '.txt')
#' with_ext(p, c('.ppt', '.sty', '.Rnw', 'doc', 'zip')); with_ext(p, 'html')
#'
#' # allow for more characters in extensions
#' p = c('a.c++', 'b.c--', 'c.e##')
#' file_ext(p) # -/+/# not recognized by default
#' file_ext(p, extra = '-+#')
file_ext = function(x, extra = '') {
ext = character(length(x))
i = grep(r <- reg_path(extra), x)
ext[i] = sub(r, '\\3', x[i])
ext
}
#' @rdname file_ext
#' @export
sans_ext = function(x, extra = '') {
sub(reg_path(extra), '\\1', x)
}
#' @param ext A vector of new extensions. It must be either of length 1, or the
#' same length as `x`.
#' @rdname file_ext
#' @export
with_ext = function(x, ext, extra = '') {
if (anyNA(ext)) stop("NA is not allowed in 'ext'")
n1 = length(x); n2 = length(ext)
if (n1 * n2 == 0) return(x)
i = !grepl('^[.]', ext) & ext != ''
ext[i] = paste0('.', ext[i])
if (all(ext == '')) ext = ''
r = sub('[$]$', '?$', reg_ext(extra)) # make extensions in 'x' optional
if (length(ext) == 1) return(sub(r, ext, x))
if (n1 > 1 && n1 != n2) stop("'ext' must be of the same length as 'x'")
mapply(sub, r, ext, x, USE.NAMES = FALSE)
}
# regex to extract base path and extension from a file path
reg_ext = function(extra = '') {
sprintf('([.](([%s[:alnum:]]+|tar[.](gz|bz2|xz)|nb[.]html)[~#]?))$', extra)
}
reg_path = function(...) paste0('^(.*?)', reg_ext(...))
#' Normalize paths
#'
#' A wrapper function of `normalizePath()` with different defaults.
#' @param x,winslash,must_work Arguments passed to
#' [normalizePath()].
#' @param resolve_symlink Whether to resolve symbolic links.
#' @export
#' @examples library(xfun)
#' normalize_path('~')
normalize_path = function(x, winslash = '/', must_work = FALSE, resolve_symlink = TRUE) {
if (!resolve_symlink) {
# apply the trick on all files on Windows since Sys.readlink() doesn't work
# and we can't know which files are symlinks
i = if (is_windows()) file_test('-f', x) else is_symlink(x)
b = basename(x[i])
x[i] = dirname(x[i]) # normalize the dirs of symlinks instead
}
res = normalizePath(x, winslash = winslash, mustWork = must_work)
if (is_windows()) res[is.na(x)] = NA
if (!resolve_symlink) {
res[i] = file.path(res[i], b, fsep = winslash)
}
res
}
is_symlink = function(x) {
!is.na(y <- Sys.readlink(x)) & (y != '')
}
#' Test if two paths are the same after they are normalized
#'
#' Compare two paths after normalizing them with the same separator (`/`).
#' @param p1,p2 Two vectors of paths.
#' @param ... Arguments to be passed to [normalize_path()].
#' @export
#' @examples library(xfun)
#' same_path('~/foo', file.path(Sys.getenv('HOME'), 'foo'))
same_path = function(p1, p2, ...) {
normalize_path(p1, ...) == normalize_path(p2, ...)
}
#' Find file paths that exist
#'
#' This is a shorthand of `x[file.exists(x)]`, and optionally returns the
#' first existing file path.
#' @param x A vector of file paths.
#' @param first Whether to return the first existing path. If `TRUE` and no
#' specified files exist, it will signal an error unless the argument
#' `error = FALSE`.
#' @param error Whether to throw an error when `first = TRUE` but no files
#' exist. It can also take a character value, which will be used as the error
#' message.
#' @return A vector of existing file paths.
#' @export
#' @examples
#' xfun::existing_files(c('foo.txt', system.file('DESCRIPTION', package='xfun')))
existing_files = function(x, first = FALSE, error = TRUE) {
x = x[file_exists(x)]
if (!first) return(x)
x = head(x, 1)
if (length(x) != 1 && !identical(error, FALSE)) {
if (isTRUE(error)) error = 'None of the specified files exist.'
stop(error, call. = FALSE)
}
x
}
#' Return the (possible) root directory of a project
#'
#' Given a path of a file (or dir) in a potential project (e.g., an R package or
#' an RStudio project), return the path to the project root directory.
#'
#' The search for the root directory is performed by a series of tests,
#' currently including looking for a \file{DESCRIPTION} file that contains
#' `Package: *` (which usually indicates an R package), and a
#' \file{*.Rproj} file that contains `Version: *` (which usually indicates
#' an RStudio project). If files with the expected patterns are not found in the
#' initial directory, the search will be performed recursively in upper-level
#' directories.
#' @param path The initial path to start the search. If it is a file path, its
#' parent directory will be used.
#' @param rules A matrix of character strings of two columns: the first column
#' contains regular expressions to look for filenames that match the patterns,
#' and the second column contains regular expressions to match the content of
#' the matched files. The regular expression can be an empty string, meaning
#' that it will match anything.
#' @return Path to the root directory if found, otherwise `NULL`.
#' @export
#' @note This function was inspired by the \pkg{rprojroot} package, but is much
#' less sophisticated. It is a rather simple function designed to be used in
#' some of packages that I maintain, and may not meet the need of general
#' users until this note is removed in the future (which should be unlikely).
#' If you are sure that you are working on the types of projects mentioned in
#' the \sQuote{Details} section, this function may be helpful to you,
#' otherwise please consider using \pkg{rprojroot} instead.
proj_root = function(path = './', rules = root_rules) {
path = normalize_path(path)
dir = if (dir_exists(path)) path else dirname(path)
if (same_path(dir, file.path(dir, '..'))) return()
if (is.null(dim(rules))) dim(rules) = c(1, length(rules))
for (i in seq_len(nrow(rules))) {
file = rules[i, 1]; pattern = rules[i, 2]
for (f in list.files(dir, file, full.names = TRUE)) {
if (pattern == '' || length(grep(pattern, read_utf8(f)))) return(dir)
}
}
proj_root(dirname(dir), rules)
}
#' @rdname proj_root
#' @export
root_rules = matrix(c(
'^DESCRIPTION$', '^Package: ',
'.+[.]Rproj$', '^Version: '
), ncol = 2, byrow = TRUE, dimnames = list(NULL, c('file', 'pattern')))
#' Get the relative path of a path relative to a directory
#'
#' Given a directory, return the relative path that is relative to this
#' directory. For example, the path \file{foo/bar.txt} relative to the directory
#' \file{foo/} is \file{bar.txt}, and the path \file{/a/b/c.txt} relative to
#' \file{/d/e/} is \file{../../a/b/c.txt}.
#' @param dir Path to a directory.
#' @param x A vector of paths to be converted to relative paths.
#' @param use.. Whether to use double-dots (\file{..}) in the relative path. A
#' double-dot indicates the parent directory (starting from the directory
#' provided by the `dir` argument).
#' @param error Whether to signal an error if a path cannot be converted to a
#' relative path.
#' @return A vector of relative paths if the conversion succeeded; otherwise the
#' original paths when `error = FALSE`, and an error when `error =
#' TRUE`.
#' @export
#' @examples
#' xfun::relative_path('foo/bar.txt', 'foo/')
#' xfun::relative_path('foo/bar/a.txt', 'foo/haha/')
#' xfun::relative_path(getwd())
relative_path = function(x, dir = '.', use.. = TRUE, error = TRUE) {
res = x
for (i in seq_along(x)) res[i] = relative_path_one(x[i], dir, use.., error)
res
}
relative_path_one = function(x, dir, use.., error) {
# on Windows, if a relative path doesn't exist, normalizePath() will use
# getwd() as its parent dir; however, normalizePath() just returns the
# relative path on *nix, and we have to assume it's relative to getwd()
abs_path = function(p) {
if (!file.exists(p) && is_unix() && is_rel_path(p)) p = file.path(getwd(), p)
normalize_path(p)
}
p = abs_path(x); n1 = nchar(p)
if ((n1 <- nchar(p)) == 0) return(x) # not sure what you mean
d = abs_path(dir); n2 = nchar(d)
if (is_sub_path(p, d, n2)) {
p2 = get_subpath(p, n1, n2)
if (p2 == '') p2 = '.' # if the subpath is empty, it means the current dir
return(p2)
}
if (!use..) {
if (error) stop("When use.. = FALSE, the path 'x' must be under the 'dir'")
return(x)
}
s = '../'; d1 = d
while (!is_sub_path(p, d2 <- dirname(d1))) {
if (same_path(d1, d2)) {
if (error) stop(
"The path 'x' cannot be converted to a relative path to 'dir'. ",
"Perhaps they are on different volumes of the disk."
)
return(x)
}
s = paste0('../', s)
d1 = d2 # go to one level up
}
paste0(s, get_subpath(p, n1, nchar(d2)))
}
#' Test if a path is a subpath of a dir
#'
#' Check if the path starts with the dir path.
#' @inheritParams is_abs_path
#' @param dir A vector of directory paths.
#' @param n The length of `dir` paths.
#' @return A logical vector.
#' @note You may want to normalize the values of the `x` and `dir` arguments
#' first (with [xfun::normalize_path()]), to make sure the path separators
#' are consistent.
#' @export
#' @examples
#' xfun::is_sub_path('a/b/c.txt', 'a/b') # TRUE
#' xfun::is_sub_path('a/b/c.txt', 'd/b') # FALSE
#' xfun::is_sub_path('a/b/c.txt', 'a\\b') # FALSE (even on Windows)
is_sub_path = function(x, dir, n = nchar(dir)) substr(x, 1, n) == dir
# remove the first n2 characters and the possible / from the path
get_subpath = function(p, n1, n2) {
p = substr(p, n2 + 1, n1)
sub('^/', '', p)
}
#' Test if paths are relative or absolute
#'
#' On Unix, check if the paths start with \file{/} or \file{~} (if they do, they
#' are absolute paths). On Windows, check if a path remains the same (via
#' [xfun::same_path()]) if it is prepended with \file{./} (if it does, it is a
#' relative path).
#' @param x A vector of paths.
#' @return A logical vector.
#' @export
#' @examples
#' xfun::is_abs_path(c('C:/foo', 'foo.txt', '/Users/john/', tempdir()))
#' xfun::is_rel_path(c('C:/foo', 'foo.txt', '/Users/john/', tempdir()))
is_abs_path = function(x) {
if (is_unix()) grepl('^[/~]', x) else !same_path(x, file.path('.', x))
}
#' @rdname is_abs_path
#' @export
is_rel_path = function(x) !is_abs_path(x)
#' Test if a path is a web path
#'
#' Check if a path starts with \file{http://} or \file{https://} or
#' \file{ftp://} or \file{ftps://}.
#' @inheritParams is_abs_path
#' @return A logical vector.
#' @export
#' @examples
#' xfun::is_web_path('https://www.r-project.org') # TRUE
#' xfun::is_web_path('www.r-project.org') # FALSE
is_web_path = function(x) {
grepl('^(f|ht)tps?://', x)
}
#' Get the relative path of a path in a project relative to the current working
#' directory
#'
#' First compose an absolute path using the project root directory and the
#' relative path components, i.e., [`file.path`]`(root, ...)`. Then
#' convert it to a relative path with [relative_path()], which is
#' relative to the current working directory.
#'
#' This function was inspired by `here::here()`, and the major difference
#' is that it returns a relative path by default, which is more portable.
#' @param ... A character vector of path components *relative to the root
#' directory of the project*.
#' @param root The root directory of the project.
#' @param error Whether to signal an error if the path cannot be converted to a
#' relative path.
#' @return A relative path, or an error when the project root directory cannot
#' be determined or the conversion failed and `error = TRUE`.
#' @export
#' @examples
#' \dontrun{
#' xfun::from_root('data', 'mtcars.csv')
#' }
from_root = function(..., root = proj_root(), error = TRUE) {
if (is.null(root)) stop('Cannot determin the root directory of the current project.')
p = file.path(root, ..., fsep = '/')
relative_path(p, error = error)
}
#' Find a file or directory under a root directory
#'
#' Given a path, try to find it recursively under a root directory. The input
#' path can be an incomplete path, e.g., it can be a base filename, and
#' `magic_path()` will try to find this file under subdirectories.
#' @param ... A character vector of path components.
#' @param root The root directory under which to search for the path. If
#' `NULL`, the current working directory is used.
#' @param relative Whether to return a relative path.
#' @param error Whether to signal an error if the path is not found, or multiple
#' paths are found.
#' @param message Whether to emit a message when multiple paths are found and
#' `error = FALSE`.
#' @param n_dirs The number of subdirectories to recursively search. The
#' recursive search may be time-consuming when there are a large number of
#' subdirectories under the root directory. If you really want to search for
#' all subdirectories, you may try `n_dirs = Inf`.
#' @return The path found under the root directory, or an error when `error
#' = TRUE` and the path is not found (or multiple paths are found).
#' @export
#' @examples
#' \dontrun{
#' xfun::magic_path('mtcars.csv') # find any file that has the base name mtcars.csv
#' }
magic_path = function(
..., root = proj_root(), relative = TRUE, error = TRUE,
message = getOption('xfun.magic_path.message', TRUE),
n_dirs = getOption('xfun.magic_path.n_dirs', 10000)
) {
if (file.exists(p <- file.path(...))) return(p)
if (is.null(root)) root = getwd()
nd = 0
# find a path 'f' recursively under a directory 'd'
find_it = function(f, d) {
if (nd > n_dirs) {
if (error) stop(
'Failed to find the path under ', n_dirs, ' subdirectories. If you want ',
'to search for the path in more subdirectories, increase the value of ',
"the 'n_dirs' argument of magic_path()."
)
return(p)
}
ds = list.files(d, full.names = TRUE)
ds = ds[dir_exists(ds)]
if ((n1 <- length(ds)) == 0) return()
nd <<- nd + n1
fs = file.path(ds, f)
fs = fs[file.exists(fs)]
if ((n2 <- length(fs)) == 1) return(fs)
if (n2 > 1) {
msg = c(
'Found more than one path containg the input path "', f, '":\n\n',
paste('*', fs, collapse = '\n')
)
if (error) stop(msg)
if (message) base::message(msg, '\n\nReturned the first one.')
return(fs[1])
}
# look into subdirectories one by one
for (i in seq_len(n1)) {
fs = find_it(f, file.path(ds[i]))
if (length(fs)) return(fs)
}
}
f = find_it(p, root)
if (is.null(f)) {
if (error) stop('Could not find the path "', p, '" in any subdirectories.')
p
} else {
if (relative) relative_path(f, error = error) else f
}
}
#' Test the existence of files and directories
#'
#' These are wrapper functions of [`utils::file_test()]` to test the
#' existence of directories and files. Note that `file_exists()` only tests
#' files but not directories, which is the main difference between
#' [file.exists()] in base R. If you use are using the R version
#' 3.2.0 or above, `dir_exists()` is the same as [dir.exists()]
#' in base R.
#' @param x A vector of paths.
#' @export
#' @return A logical vector.
dir_exists = function(x) file_test('-d', x)
#' @rdname dir_exists
#' @export
file_exists = function(x) file_test('-f', x)
#' Create a directory recursively by default
#'
#' First check if a directory exists. If it does, return `TRUE`, otherwise
#' create it with [`dir.create`]`(recursive = TRUE)` by default.
#' @param x A path name.
#' @param recursive Whether to create all directory components in the path.
#' @param ... Other arguments to be passed to [dir.create()].
#' @return A logical value indicating if the directory either exists or is
#' successfully created.
#' @export
dir_create = function(x, recursive = TRUE, ...) {
dir_exists(x) || dir.create(x, recursive = recursive, ...)
}
#' Rename files with a sequential numeric prefix
#'
#' Rename a series of files and add an incremental numeric prefix to the
#' filenames. For example, files \file{a.txt}, \file{b.txt}, and \file{c.txt}
#' can be renamed to \file{1-a.txt}, \file{2-b.txt}, and \file{3-c.txt}.
#' @param pattern A regular expression for [list.files()] to obtain
#' the files to be renamed. For example, to rename `.jpeg` files, use
#' `pattern = "[.]jpeg$"`.
#' @param format The format for the numeric prefix. This is passed to
#' [sprintf()]. The default format is `"\%0Nd"` where `N
#' = floor(log10(n)) + 1` and `n` is the number of files, which means the
#' prefix may be padded with zeros. For example, if there are 150 files to be
#' renamed, the format will be `"\%03d"` and the prefixes will be
#' `001`, `002`, ..., `150`.
#' @param replace Whether to remove existing numeric prefixes in filenames.
#' @param start The starting number for the prefix (it can start from 0).
#' @param dry_run Whether to not really rename files. To be safe, the default is
#' `TRUE`. If you have looked at the new filenames and are sure the new
#' names are what you want, you may rerun `rename_seq()` with
#' `dry_run = FALSE` to actually rename files.
#' @return A named character vector. The names are original filenames, and the
#' vector itself is the new filenames.
#' @export
#' @examples xfun::rename_seq()
#' xfun::rename_seq('[.](jpeg|png)$', format = '%04d')
rename_seq = function(
pattern = '^[0-9]+-.+[.]Rmd$', format = 'auto', replace = TRUE, start = 1,
dry_run = TRUE
) {
n = length(files <- list.files('.', pattern))
if (n == 0) return(files)
files2 = if (replace) sub('^[0-9]+-*', '', files) else files
if (format == 'auto') format = paste0('%0', floor(log10(n)) + 1, 'd')
files2 = paste(sprintf(format, seq_len(n) + start - 1), files2, sep = '-')
if (!dry_run) file.rename(files, files2)
structure(setNames(files2, files), class = 'xfun_rename_seq')
}
#' @export
print.xfun_rename_seq = function(x, ...) {
x = unclass(x)
tab = data.frame(original = names(x), ' ' = '->', new = unname(x), check.names = FALSE)
if (loadable('knitr')) tab = knitr::kable(tab, 'simple')
print(tab)
}
# return path to R's svg logo if it exists, otherwise return the jpg logo; or
# specify a regex to match the logo path, e.g., ext = 'jpg$'
R_logo = function(ext = NULL, all = FALSE) {
x = file.path(R.home('doc'), 'html', c('Rlogo.svg', 'logo.jpg'))
if (!is.null(ext)) x = grep(ext, x, value = TRUE)
existing_files(x, first = !all)
}
#' Extract filenames from a URLs
#'
#' Get the base names of URLs via [basename()], and remove the
#' possible query parameters or hash from the names.
#' @param x A character vector of URLs.
#' @param default The default filename when it cannot be determined from the
#' URL, e.g., when the URL ends with a slash.
#' @return A character vector of filenames at the end of URLs.
#' @export
#' @examples
#' xfun::url_filename('https://yihui.org/images/logo.png')
#' xfun::url_filename('https://yihui.org/index.html')
#' xfun::url_filename('https://yihui.org/index.html?foo=bar')
#' xfun::url_filename('https://yihui.org/index.html#about')
#' xfun::url_filename('https://yihui.org')
#' xfun::url_filename('https://yihui.org/')
url_filename = function(x, default = 'index.html') {
# protocol shouldn't be treated as dir name, and query/hash should be removed
x = gsub('^https?://|[?#].*$', '', x)
f = basename(x)
ifelse(grepl('/$', x) | x == f, default, f)
}
#' Delete an empty directory
#'
#' Use `list.file()` to check if there are any files or subdirectories
#' under a directory. If not, delete this empty directory.
#' @param dir Path to a directory. If `NULL` or the directory does not
#' exist, no action will be performed.
#' @export
del_empty_dir = function(dir) {
if (is.null(dir) || !dir_exists(dir)) return()
files = list.files(dir, all.files = TRUE, no.. = TRUE)
if (length(files) == 0) unlink(dir, recursive = TRUE)
}
#' Mark some paths as directories
#'
#' Add a trailing backlash to a file path if this is a directory. This is useful
#' in messages to the console for example to quickly identify directories from
#' files.
#'
#' If `x` is a vector of relative paths, directory test is done with path
#' relative to the current working dir. Use [xfun::in_dir()] or use absolute
#' paths.
#'
#' @param x Character vector of paths to files and directories.
#' @examples
#' mark_dirs(list.files(find.package("xfun"), full.names = TRUE))
#' @export
mark_dirs = function(x) {
i = dir_exists(x) & !grepl("/$", x)
x[i] = paste0(x[i], "/")
x
}
# change list.files()'s default argument values
all_files = function(
pattern = NULL, dir = '.', ignore.case = TRUE, full.names = TRUE,
recursive = TRUE, ...
) {
list.files(
dir, pattern, ignore.case = ignore.case, full.names = full.names,
recursive = recursive, no.. = TRUE, ...
)
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/paths.R |
#' Run R code and record the results
#'
#' Run R code and capture various types of output, including text output, plots,
#' messages, warnings, and errors.
#' @param code A character vector of R source code.
#' @param dev A graphics device. It can be a function name, a function, or a
#' character string that can be evaluated to a function to open a graphics
#' device.
#' @param dev.path A base file path for plots (by default, a temporary path
#' under the current working directory). Actual plot filenames will be this
#' base path plus incremental suffixes. For example, if `dev.path = "foo"`,
#' the plot files will be `foo-1.png`, `foo-2.png`, and so on. If `dev.path`
#' is not character (e.g., `FALSE`), plots will not be recorded.
#' @param dev.ext The file extension for plot files. By default, it will be
#' inferred from the first argument of the device function if possible.
#' @param dev.args Extra arguments to be passed to the device. The default
#' arguments are `list(units = 'in', onefile = FALSE, width = 7, height = 7,
#' res = 96)`. If any of these arguments is not present in the device
#' function, it will be dropped.
#' @param error Whether to record errors. If `TRUE`, errors will not stop the
#' execution and error messages will be recorded. If `FALSE`, errors will be
#' thrown normally.
#' @param verbose `2` means to always print the value of each expression in the
#' code, no matter if the value is [invisible()] or not; `1` means to always
#' print the value of the last expression; `0` means no special handling
#' (i.e., print only when the value is visible).
#' @param envir An environment in which the code is evaluated.
#' @return `record()` returns a list of the class `xfun_record_results` that
#' contains elements with these possible classes: `record_source` (source
#' code), `record_output` (text output), `record_plot` (plot file paths),
#' `record_message` (messages), `record_warning` (warnings), and
#' `record_error` (errors, only when the argument `error = TRUE`).
#' @import grDevices
#' @export
#' @examples
#' code = c('# a message test', '1:2 + 1:3', 'par(mar = c(4, 4, 1, .2))', 'barplot(5:1, col = 2:6, horiz = TRUE)', 'head(iris)', "sunflowerplot(iris[, 3:4], seg.col = 'purple')", "if (TRUE) {\n message('Hello, xfun::record()!')\n}", '# throw an error', "1 + 'a'")
#' res = xfun::record(code, dev.args = list(width = 9, height = 6.75), error = TRUE)
#' xfun::tree(res)
#' format(res)
#' # find and clean up plot files
#' plots = Filter(function(x) inherits(x, 'record_plot'), res)
#' file.remove(unlist(plots))
record = function(
code = NULL, dev = 'png', dev.path = tempfile('record-', '.'),
dev.ext = dev_ext(dev), dev.args = list(), error = FALSE,
verbose = getOption('xfun.record.verbose', 0), envir = parent.frame()
) {
new_record = function(x = list()) structure(x, class = 'xfun_record_results')
res = new_record()
if (length(code) == 0) return(res)
code = split_lines(code)
add_result = function(x, type, pos = length(res), insert = TRUE) {
# insert a whole element or append to an existing element in res
if (!insert) x = c(res[[pos]], x)
el = structure(x, class = paste0('record_', type))
N = length(res)
if (insert) {
if (N == pos) res[[N + 1]] <<- el else res <<- append(res, el, pos)
} else {
res[[pos]] <<- el
}
}
# dev.off() will set the current device to next instead of previous device,
# but we often want to restore to the previous instead
dev_cur = dev.cur()
dev_reset = function() if (dev_cur != 1 && dev_cur %in% dev.list()) dev.set(dev_cur)
# look for all possible ${dev.path}-%d.${dev.ext} files created by the device
get_plots = function() {
files = sprintf(dev.path, seq_along(list.files(dirname(dev.path))))
files[file_exists(files)]
}
# open a graphics device
dev_num = if (is.character(dev.path)) {
if (!is.function(dev)) dev = tryCatch(match.fun(dev), error = function(e) {
eval(parse(text = dev), envir = envir)
})
# normalize \ to / and remove the leading ./
if (is_windows()) dev.path = gsubf('\\', '/', dev.path)
dev.path = sub('^[.]/+', '', dev.path)
# add extension
dev.path = with_ext(paste0(dev.path, if (!grepl('/$', dev.path)) '-', '%d'), dev.ext)
dir_create(dirname(dev.path))
# clean up existing plots before opening the device
if (any(i <- !file.remove(old_plots <- get_plots()))) stop(
'Failed to delete existing plot file(s): ',
paste("'", old_plots[i], "'", collapse = ', ')
)
dev_open(dev, dev.path, dev.args)
}
dev_old = dev.list() # keep track of current devices
dev_off = function() {
if (length(dev_num) && dev_num %in% dev.list()) dev.off(dev_num)
dev_reset()
dev_num <<- NULL # prevent dev.off() again by accident
}
on.exit(dev_off(), add = TRUE)
# check if new plots are generated
handle_plot = local({
old_files = NULL # previously existing plots
old_plot = recordPlot()
function(last = FALSE) {
# if dev.list() has changed, no longer record graphics, except for the last plot
if (!last) {
if (!(length(dev_num) && identical(dev.list(), dev_old))) return()
dev.set(dev_num)
}
files = get_plots()
# on Windows, an empty plot file is created upon opening a device
files = files[file.size(files) > 0]
if (!last) {
new_plot = recordPlot()
if (!identical(old_plot, new_plot)) {
# add a placeholder for new plots, which may not have been created by
# the device until the next new plot is drawn
add_result(character(), 'plot')
old_plot <<- new_plot
}
}
if (length(files) == 0) return()
plots = setdiff(files, old_files)
old_files <<- files
if ((n <- length(plots)) == 0) return()
# indices of plots in results
i = which(vapply(res, inherits, logical(1), 'record_plot'))
N = length(i)
# the last plot should always be appended the last plot block
if (last) {
add_result(plots, 'plot', i[N], FALSE)
return()
}
# for the newly generated plots, append the first one (which should be the
# last plot of a previous code chunk, since it will be created only when
# the next code chunk produces a new plot) to the previous plot block; the
# rest are for the current block
if (N > 1) {
add_result(plots[1], 'plot', i[N - 1], FALSE)
if (n > 1) add_result(plots[2:n], 'plot', i[N], FALSE)
} else {
# if there exists only one plot block, add plots to that block
add_result(plots, 'plot', i[N], FALSE)
}
}
})
handle = if (error) try_silent else identity
# split code into individual expressions
codes = handle(split_source(code, merge_comments = TRUE, line_number = TRUE))
# code may contain syntax errors
if (is_error(codes)) {
add_result(code, 'source'); add_result(attr(codes, 'condition')$message, 'error')
return(new_record(res))
}
handle_message = function(type) {
mf = sub('^(.)', 'muffle\\U\\1', type, perl = TRUE)
function(e) {
add_result(e$message, type)
if (type %in% c('message', 'warning')) invokeRestart(mf)
}
}
handle_m = handle_message('message')
handle_w = handle_message('warning')
handle_e = handle_message('error')
n = length(codes)
for (i in seq_len(n)) {
add_result(code <- codes[[i]], 'source')
expr = parse_only(code)
if (length(expr) == 0) next
# verbose = 1: always print the last value; verbose = 2: print all values
if (verbose == 2 || (verbose == 1 && i == n)) {
expr = parse_only(c('(', code, ')'))
}
# TODO: replace capture.output() with a custom version of sink() +
# withVisible() so we can support a custom printing function like knit_print()
out = handle(withCallingHandlers(
capture.output(eval(expr, envir)),
message = handle_m, warning = handle_w, error = handle_e
))
if (length(out) && !is_error(out)) add_result(out, 'output')
handle_plot()
}
# shut off the device to write out the last plot if there exists one
dev_off()
handle_plot(TRUE)
# remove empty blocks
res = Filter(length, res)
# merge neighbor elements of the same class
if (length(res) > 1) {
k = NULL
for (i in seq_along(res)) {
if (i == 1) next
r1 = res[[i - 1]]; c1 = class(r1); r2 = res[[i]]; c2 = class(r2)
if (!identical(c1, c2)) next
res[[i]] = c(r1, r2)
attributes(res[[i]]) = attributes(r1)
k = c(k, i - 1)
}
if (length(k)) res = res[-k]
}
new_record(res)
}
dev_open = function(dev, file, args) {
m = names(formals(dev))
a = list(units = 'in', onefile = FALSE, width = 8, height = 8, res = 84)
for (i in names(a)) {
if (i %in% m && is.null(args[[i]])) args[[i]] = a[[i]]
}
do.call(dev, c(list(file), args))
dev.control('enable')
dev.cur()
}
# infer the filename extension from a device's first argument
dev_ext = function(dev) {
# the first argument could be a string or an expression
if (!is.character(name <- formals(dev)[[1]])) name = gsub('"', '', deparse(name))
file_ext(name)
}
is_error = function(x) inherits(x, 'try-error')
#' @param to The output format (text or html).
#' @param encode For HTML output, whether to base64 encode plots.
#' @param template For HTML output, whether to embed the formatted results in an
#' HTML template. Alternatively, this argument can take a file path, i.e.,
#' path to an HTML template that contains the variable `$body$`. If `TRUE`,
#' the default template in this package will be used
#' (`xfun:::pkg_file('resources', 'record.html')`).
#' @rdname record
#' @exportS3Method
#' @return The `format()` method returns a character vector of plain-text output
#' or HTML code for displaying the results.
format.xfun_record_results = function(
x, to = c('text', 'html'), encode = FALSE, template = FALSE, ...
) {
if (to[1] == 'text') {
res = unlist(lapply(x, function(z) {
if (!inherits(z, 'record_source')) z = paste('#>', z)
gsub('\n*$', '\n', one_string(z))
}))
return(raw_string(res))
}
res = unlist(lapply(x, function(z) {
cls = sub('^record_', '', class(z))
if (cls == 'plot') {
sprintf(
'<p class="%s"><img src="%s" alt="A plot recorded by xfun::record()" /></p>',
cls, if (encode) vapply(z, base64_uri, '') else URLencode(z)
)
} else {
paste0(
sprintf(
'<pre class="%s"><code>',
if (cls == 'source') paste0('language-r" data-start="', attr(z, 'line_start')) else cls
),
escape_html(one_string(z)), '</code></pre>'
)
}
}))
if (isTRUE(template)) template = pkg_file('resources', 'record.html')
if (is.character(template)) {
res = sub('$body$', one_string(res), read_utf8(template), fixed = TRUE)
if (length(x) > 0) res = sub('$title$', make_title(x[[1]]), res, fixed = TRUE)
}
raw_string(res)
}
# generate a title from the first line of a character vector
make_title = function(x) {
if (length(x) == 0) return('')
x = gsub('^#+\\s*', '', x[1]) # remove possible comment chars
x = gsub('\\s*[.]*$', '... | ', x) # add ... to the end
escape_html(x)
}
#' @param x An object returned by `record()`.
#' @param browse Whether to browse the results on an HTML page.
#' @param ... Currently ignored.
#' @rdname record
#' @exportS3Method
#' @return The `print()` method prints the results as plain text or HTML to the
#' console or displays the HTML page.
print.xfun_record_results = function(
x, browse = interactive(), to = if (browse) 'html' else 'text', template = TRUE, ...
) {
res = format(x, to, encode = browse, template = template, ...)
if (browse && to == 'html') {
viewer = getOption('viewer', utils::browseURL)
f = tempfile('record-', fileext = '.html')
write_utf8(res, f)
viewer(f)
} else {
cat(res, sep = '\n')
}
invisible(x)
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/record.R |
#' Run \command{R CMD check} on the reverse dependencies of a package
#'
#' Install the source package, figure out the reverse dependencies on CRAN,
#' download all of their source packages, and run \command{R CMD check} on them
#' in parallel.
#'
#' Everything occurs under the current working directory, and you are
#' recommended to call this function under a designated directory, especially
#' when the number of reverse dependencies is large, because all source packages
#' will be downloaded to this directory, and all \file{*.Rcheck} directories
#' will be generated under this directory, too.
#'
#' If a source tarball of the expected version has been downloaded before (under
#' the \file{tarball} directory), it will not be downloaded again (to save time
#' and bandwidth).
#'
#' After a package has been checked, the associated \file{*.Rcheck} directory
#' will be deleted if the check was successful (no warnings or errors or notes),
#' which means if you see a \file{*.Rcheck} directory, it means the check
#' failed, and you need to take a look at the log files under that directory.
#'
#' The time to finish the check is recorded for each package. As the check goes
#' on, the total remaining time will be roughly estimated via `n *
#' mean(times)`, where `n` is the number of packages remaining to be
#' checked, and `times` is a vector of elapsed time of packages that have
#' been checked.
#'
#' If a check on a reverse dependency failed, its \file{*.Rcheck} directory will
#' be renamed to \file{*.Rcheck2}, and another check will be run against the
#' CRAN version of the package unless `options(xfun.rev_check.compare =
#' FALSE)` is set. If the logs of the two checks are the same, it means no new
#' problems were introduced in the package, and you can probably ignore this
#' particular reverse dependency. The function `compare_Rcheck()` can be
#' used to create a summary of all the differences in the check logs under
#' \file{*.Rcheck} and \file{*.Rcheck2}. This will be done automatically if
#' `options(xfun.rev_check.summary = TRUE)` has been set.
#'
#' A recommended workflow is to use a special directory to run
#' `rev_check()`, set the global [options()]
#' `xfun.rev_check.src_dir` and `repos` in the R startup (see
#' `?`[`Startup`]) profile file `.Rprofile` under this directory,
#' and (optionally) set `R_LIBS_USER` in \file{.Renviron} to use a special
#' library path (so that your usual library will not be cluttered). Then run
#' `xfun::rev_check(pkg)` once, investigate and fix the problems or (if you
#' believe it was not your fault) ignore broken packages in the file
#' \file{00ignore}, and run `xfun::rev_check(pkg)` again to recheck the
#' failed packages. Repeat this process until all \file{*.Rcheck} directories
#' are gone.
#'
#' As an example, I set `options(repos = c(CRAN =
#' 'https://cran.rstudio.com'), xfun.rev_check.src_dir = '~/Dropbox/repo')` in
#' \file{.Rprofile}, and `R_LIBS_USER=~/R-tmp` in \file{.Renviron}. Then I
#' can run, for example, `xfun::rev_check('knitr')` repeatedly under a
#' special directory \file{~/Downloads/revcheck}. Reverse dependencies and their
#' dependencies will be installed to \file{~/R-tmp}, and \pkg{knitr} will be
#' installed from \file{~/Dropbox/repo/kintr}.
#' @param pkg The package name.
#' @param which Which types of reverse dependencies to check. See
#' [tools::package_dependencies()] for possible values. The
#' special value `'hard'` means the hard dependencies, i.e.,
#' `c('Depends', 'Imports', 'LinkingTo')`.
#' @param recheck A vector of package names to be (re)checked. If not provided
#' and there are any \file{*.Rcheck} directories left by certain packages
#' (this often means these packages failed the last time), `recheck` will
#' be these packages; if there are no \file{*.Rcheck} directories but a text
#' file \file{recheck} exists, `recheck` will be the character vector
#' read from this file. This provides a way for you to manually specify the
#' packages to be checked. If there are no packages to be rechecked, all
#' reverse dependencies will be checked.
#' @param ignore A vector of package names to be ignored in \command{R CMD
#' check}. If this argument is missing and a file \file{00ignore} exists, the
#' file will be read as a character vector and passed to this argument.
#' @param update Whether to update all packages before the check.
#' @param src The path of the source package directory.
#' @param src_dir The parent directory of the source package directory. This can
#' be set in a global option if all your source packages are under a common
#' parent directory.
#' @param timeout Timeout in seconds for \command{R CMD check} to check each
#' package. The (approximate) total time can be limited by the global option
#' `xfun.rev_check.timeout_total`.
#' @return A named numeric vector with the names being package names of reverse
#' dependencies; `0` indicates check success, `1` indicates failure,
#' and `2` indicates that a package was not checked due to global
#' timeout.
#' @seealso `devtools::revdep_check()` is more sophisticated, but currently
#' has a few major issues that affect me: (1) It always deletes the
#' \file{*.Rcheck} directories
#' (<https://github.com/r-lib/devtools/issues/1395>), which makes it
#' difficult to know more information about the failures; (2) It does not
#' fully install the source package before checking its reverse dependencies
#' (<https://github.com/r-lib/devtools/pull/1397>); (3) I feel it is
#' fairly difficult to iterate the check (ignore the successful packages and
#' only check the failed packages); by comparison, `xfun::rev_check()`
#' only requires you to run a short command repeatedly (failed packages are
#' indicated by the existing \file{*.Rcheck} directories, and automatically
#' checked again the next time).
#'
#' `xfun::rev_check()` borrowed a very nice feature from
#' `devtools::revdep_check()`: estimating and displaying the remaining
#' time. This is particularly useful for packages with huge numbers of reverse
#' dependencies.
#' @export
rev_check = function(
pkg, which = 'all', recheck = NULL, ignore = NULL, update = TRUE,
timeout = getOption('xfun.rev_check.timeout', 15 * 60),
src = file.path(src_dir, pkg), src_dir = getOption('xfun.rev_check.src_dir')
) {
if (length(src) != 1 || !dir_exists(src)) stop(
'The package source dir (the "src" argument) must be an existing directory'
)
message('Installing the source package ', src)
install_dir(path.expand(src))
db = available.packages(type = 'source')
# install packages that are not loadable (testing in parallel)
p_install = function(pkgs) {
pkgs_up = NULL
if (update) {
message('Updating all R packages...')
pkgs_up = intersect(old.packages(checkBuilt = TRUE)[, 'Package'], pkgs)
pkg_install(pkgs_up)
}
pkgs = setdiff(pkgs, pkgs_up) # don't install pkgs that were just updated
print(system.time(
pkg_install(unlist(plapply(pkgs, function(p) if (!loadable(p, new_session = TRUE)) p)))
))
}
unlink('*.Rcheck2', recursive = TRUE)
if (missing(recheck)) {
dirs = list.files('.', '.+[.]Rcheck$')
pkgs = gsub('.Rcheck$', '', dirs)
recheck = if (length(pkgs) == 0 && file_exists('recheck')) {
scan('recheck', 'character')
} else pkgs
}
pkgs = if (length(recheck)) {
p_install(pkg_dep(recheck, db, which = 'all'))
recheck
} else {
res = check_deps(pkg, db, which)
message('Installing dependencies of reverse dependencies')
res$install = setdiff(res$install, ignore_deps())
print(system.time(p_install(res$install)))
res$check
}
pkgs = intersect(pkgs, rownames(db)) # make sure the pkgs are on CRAN
lib_cran = './library-cran'
on.exit(unlink(lib_cran, recursive = TRUE), add = TRUE)
dir.create(lib_cran, showWarnings = FALSE)
pkg_install(pkg, lib = lib_cran) # the CRAN version of the package
f = tempfile('check-done', fileext = '.rds')
l = tempfile('check-lock'); on.exit(unlink(c(f, l)), add = TRUE)
n = length(pkgs)
if (n == 0) {
message('No reverse dependencies to be checked for the package ', pkg); return()
}
if (missing(ignore) && file_exists('00ignore')) ignore = scan('00ignore', 'character')
if (length(ignore)) {
message('Ignoring packages: ', paste(ignore, collapse = ' '))
unlink(sprintf('%s.Rcheck', ignore), recursive = TRUE)
pkgs = setdiff(pkgs, ignore)
if ((n <- length(pkgs)) == 0) {
message('No packages left to be checked'); return()
}
}
message('Downloading tarballs')
tars = download_tarball(pkgs, db, dir = 'tarball')
tars = setNames(tars, pkgs)
t0 = Sys.time()
tt = getOption('xfun.rev_check.timeout_total', Inf)
message('Checking ', n, ' packages: ', paste(pkgs, collapse = ' '))
res = plapply(pkgs, function(p) {
d = sprintf('%s.Rcheck', p)
if (!p %in% rownames(db)) {
message('Checking ', p, ' (aborted since it is no longer on CRAN')
unlink(d, recursive = TRUE)
return()
}
timing = function() {
# in case two packages finish at exactly the same time
while (file_exists(l)) Sys.sleep(.1)
file.create(l); on.exit(unlink(l), add = TRUE)
done = c(if (file_exists(f)) readRDS(f), p)
saveRDS(done, f)
n2 = length(setdiff(pkgs, done)) # remaining packages
t1 = Sys.time(); t2 = Sys.time() + n2 * (t1 - t0) / (n - n2)
message(
'Packages remaining: ', n2, '/', n, '; Expect to finish at ', t2,
' (', format(round(difftime(t2, t1))), ')'
)
# 0 (FALSE): success; 1: failure
setNames(as.integer(dir_exists(d)), p)
}
if (!file_exists(z <- tars[p])) {
dir.create(d, showWarnings = FALSE)
return(timing())
}
# timeout; package not checked
if (difftime(Sys.time(), t0, units = 'secs') > tt) {
return(setNames(2L, p))
}
check_it = function(args = NULL, ...) {
system2(
file.path(R.home('bin'), 'R'),
c(args, 'CMD', 'check', '--no-manual', shQuote(z)),
stdout = FALSE, stderr = FALSE, timeout = timeout, ...
)
}
check_it()
if (!clean_Rcheck(d)) {
if (!dir_exists(d)) {dir.create(d); return(timing())}
# try to install missing LaTeX packages for vignettes if possible, then recheck
vigs = list.files(
file.path(d, 'vign_test', p, 'vignettes'), '[.](Rnw|Rmd)$',
ignore.case = TRUE, full.names = TRUE
)
pkg_load2('tinytex')
if (length(vigs) && any(file_exists(with_ext(vigs, 'log')))) {
if (tinytex::is_tinytex()) for (vig in vigs) in_dir(dirname(vig), {
Rscript(shQuote(c('-e', 'if (grepl("[.]Rnw$", f <- commandArgs(T), ignore.case = T)) knitr::knit2pdf(f) else rmarkdown::render(f)', basename(vig))))
})
check_it()
if (clean_Rcheck(d)) return(timing())
}
# if there are still missing LaTeX packages, install them and recheck
l0 = tinytex::tl_pkgs()
lapply(
list.files(d, '[.]log$', full.names = TRUE, recursive = TRUE),
tinytex::parse_install, quiet = TRUE
)
if (!identical(l0, tinytex::tl_pkgs())) {
check_it()
if (clean_Rcheck(d)) return(timing())
}
# clean up the check log, and recheck with the current CRAN version of pkg
cleanup = function() in_dir(d, {
clean_log()
# so that I can easily preview it in the Finder on macOS
file_exists('00install.out') && file.rename('00install.out', '00install.log')
})
# ignore vignettes that failed to build for unknown reasons
cleanup()
if (clean_Rcheck(d)) return(timing())
# whether to check the package against the CRAN version?
if (!getOption('xfun.rev_check.compare', TRUE)) return(timing())
file.rename(d, d2 <- paste0(d, '2'))
check_it('--no-environ', env = tweak_r_libs(lib_cran))
if (!dir_exists(d)) file.rename(d2, d) else {
cleanup()
if (identical_logs(c(d, d2))) unlink(c(d, d2), recursive = TRUE)
}
}
timing()
})
if (getOption('xfun.rev_check.summary', FALSE)) {
html = compare_Rcheck(); if (isTRUE(grepl('[.]html$', html))) browseURL(html)
}
unlist(res)
}
# remove the OK lines in the check log
clean_log = function() {
if (!file_exists(l <- '00check.log')) return()
x = grep('^[*].+OK$', read_utf8(l), invert = TRUE, value = TRUE)
# don't want diffs in random tempdir/tempfile paths when comparing check logs
x[grep(dirname(tempdir()), x, fixed = TRUE)] = 'RANDOM TEMPDIR/TEMPFILE PATH DELETED'
# delete the download progress
x = grep('^\\s*\\[\\d+%] Downloaded \\d+ bytes...\\s*$', x, invert = TRUE, value = TRUE)
# delete lines of the form "address 0x1067143eb, cause 'illegal opcode'"
x = grep("address 0x[[:xdigit:]]+, cause '[^']+'", x, invert = TRUE, value = TRUE)
x = recheck_vig(x)
x = tail(x, -2)
writeLines(x, l) # remove the first 2 lines (log dir name and R version)
x
}
# sometimes R CMD check fails to build vignettes for unknown reasons; try to
# recheck the package in this case
recheck_vig = function(x) {
if (!any(i1 <- (x == '* checking re-building of vignette outputs ... WARNING')))
return(x)
i1 = which(i1)[1]
i2 = which(x == 'Execution halted')
i2 = i2[i2 > i1]
if (length(i2) == 0) return(x)
i3 = grep('^[*] checking ', x) # next checking item
i3 = i3[i3 > i1]
if (length(i3)) {
i2 = i2[i2 < i3[1]] # 'Execution halted' needs to appear before next '* checking'
if (length(i2) == 0) return(x)
}
# if no explicit errors were found in processing vignettes (except pandoc
# error), remove the relevant log
i2 = tail(i2, 1)
if (length(grep('pandoc document conversion failed with error', x[i1:i2])) > 0 ||
length(grep('Error: processing vignette .+ failed with diagnostics:', x[i1:i2])) == 0)
x = x[-(i1:i2)]
x
}
# are the check logs identical under a series of *.Rcheck directories?
identical_logs = function(dirs) {
if (length(dirs) < 2) return(FALSE)
if (!all(file_exists(logs <- file.path(dirs, '00check.log')))) return(FALSE)
x = read_utf8(logs[1])
for (i in 2:length(dirs)) if (!identical(x, read_utf8(logs[i]))) return(FALSE)
TRUE
}
# delete files/dirs that are usually not helpful
clean_Rcheck2 = function(dir = '.') {
owd = setwd(dir); on.exit(setwd(owd), add = TRUE)
ds = list.files('.', '.+[.]Rcheck$')
for (d in c(ds, paste0(ds, '2'))) {
f1 = list.files(d, full.names = TRUE)
f2 = file.path(d, c('00_pkg_src', '00check.log', '00install.log'), fsep = '/')
unlink(setdiff(f1, f2), recursive = TRUE)
}
}
# add a new library path to R_LIBS_USER
tweak_r_libs = function(new) {
x = read_all(existing_files(c('~/.Renviron', '.Renviron')))
x = grep('^\\s*#', x, invert = TRUE, value = TRUE)
x = gsub('^\\s+|\\s+$', '', x)
x = x[x != '']
i = grep('^R_LIBS_USER=.+', x)
if (length(i)) {
x[i[1]] = sub('(="?)', path_sep('\\1', new), x[i[1]])
x
} else {
v = Sys.getenv('R_LIBS_USER')
v = if (v == '') new else path_sep(new, v)
c(paste0('R_LIBS_USER=', v), x)
}
}
# separate paths by the path separator on a specific platform
path_sep = function(...) paste(..., sep = .Platform$path.sep)
# a shorthand of tools::package_dependencies()
pkg_dep = function(x, ...) {
if (length(x)) unique(unlist(tools::package_dependencies(x, ...)))
}
# calculate the packages required to check a package
check_deps = function(x, db = available.packages(), which = 'all') {
if (identical(which, 'hard')) which = c('Depends', 'Imports', 'LinkingTo')
x0 = db[, 'Package'] # all available packages
# packages that reverse depend on me
x1 = pkg_dep(x, db, which, reverse = TRUE)
x1 = intersect(x1, x0)
# only check a sample of soft reverse dependencies (useful if there are too many)
if (identical(which, 'all') && (n <- getOption('xfun.rev_check.sample', 100)) >= 0) {
x2 = pkg_dep(x, db, c('Suggests', 'Enhances'), reverse = TRUE)
x2 = intersect(x2, x0)
if (n < length(x2)) x1 = c(setdiff(x1, x2), sample(x2, n))
}
# to R CMD check x1, I have to install all their dependencies
x2 = pkg_dep(x1, db, 'all')
# and for those dependencies, I have to install the default dependencies
x3 = pkg_dep(x2, db, recursive = TRUE)
list(check = x1, install = intersect(c(x1, x2, x3), x0))
}
#' Submit check jobs to crandalf
#'
#' Check the reverse dependencies of a package using the crandalf service:
#' <https://github.com/yihui/crandalf>. If the number of reverse
#' dependencies is large, they will be split into batches and pushed to crandalf
#' one by one.
#'
#' Due to the time limit of a single job on Github Actions (6 hours), you will
#' have to split the large number of reverse dependencies into batches and check
#' them sequentially on Github (at most 5 jobs in parallel). The function
#' `crandalf_check()` does this automatically when necessary. It requires
#' the \command{git} command to be available.
#'
#' The function `crandalf_results()` fetches check results from Github
#' after all checks are completed, merge the results, and show a full summary of
#' check results. It requires `gh` (Github CLI:
#' <https://cli.github.com/manual/>) to be installed and you also need to
#' authenticate with your Github account beforehand.
#' @param pkg The package name of which the reverse dependencies are to be
#' checked.
#' @param size The number of reverse dependencies to be checked in each job.
#' @param jobs The number of jobs to run in Github Actions (by default, all jobs
#' are submitted, but you can choose to submit the first few jobs).
#' @param which The type of dependencies (see [rev_check()]).
#' @export
crandalf_check = function(pkg, size = 400, jobs = Inf, which = 'all') {
git_test_branch()
git_co('main')
on.exit(git_co('main'), add = TRUE)
git_test_branch()
# do everything inside the check-pkg branch
b = paste0('check-', pkg)
if (git_co(b, stderr = FALSE) != 0) {
git_co(c('-b', b))
file.create('recheck')
git(c('add', 'recheck'))
git(c('commit', '-m', shQuote(paste('Revcheck', pkg))))
git('push')
message(
'Please create a pull request from the branch ', b,
' on Github and re-run xfun::crandalf_check("', pkg, '").'
)
return(invisible())
}
git(c('merge', 'main'))
x = check_deps(pkg, which = which)$check
n = length(x)
if (n <= size) {
message('No need to split ', n, ' reverse dependencies into batches of size ', size, '.')
if (any(grepl('Your branch is ahead of ', git('status', stdout = TRUE)))) {
git('push')
} else if (Sys.which('gh') != '') {
gh(c('workflow', 'run', 'rev-check.yaml', '--ref', b))
message('Triggering rev-check.yaml job against ', b, ' branch in crandalf repo on Github.')
} else {
message('Remember to re-run the last job for the package ', pkg, ' on Github.')
}
return(invisible())
}
b = ceiling(n/size)
i = rep(seq_len(b), each = size)[seq_len(n)]
k = 1
# use an id in the commit so that I know which jobs are for the same pkg
id = format(Sys.time(), '%Y%m%d%H%M')
for (p in head(split(x, i), jobs)) {
message('Batch ', k)
writeLines(p, 'recheck')
git(c('add', 'recheck'))
git(c('commit', '-m', shQuote(paste(
c(id, 'checking:', head(p, 3), '...'), collapse = ' '
))))
git('push')
Sys.sleep(10)
k = k + 1
}
}
#' @param repo The crandalf repo on Github (of the form `user/repo` such as
#' `"yihui/crandalf"`). Usually you do not need to specify it, unless you
#' are not calling this function inside the crandalf project, because
#' \command{gh} should be able to figure out the repo automatically.
#' @param limit The maximum of records for \command{gh run list} to retrieve.
#' You only need a larger number if the check results are very early in the
#' Github Action history.
#' @param wait Number of seconds to wait if not all jobs have been completed on
#' Github. By default, this function checks the status every 5 minutes until
#' all jobs are completed. Set `wait` to 0 to disable waiting (and throw
#' an error immediately when any jobs are not completed).
#' @rdname crandalf_check
#' @export
crandalf_results = function(pkg, repo = NA, limit = 200, wait = 5 * 60) {
res = crandalf_jobs(pkg, repo, limit)
if (NROW(res) == 0) {
stop('Did not find check results for ', pkg, ' from Github Actions.')
}
if (any(res[, 1] != 'completed')) {
if (wait <= 0) stop('Please wait till all jobs have been completed on Github Actions.')
status = NULL
repeat {
res = crandalf_jobs(pkg, repo, limit)
if (all(res[, 1] == 'completed')) break
if (is.null(status) || !identical(status, table(res[, 1]))) {
status = table(res[, 1])
timestamp()
print(status)
}
Sys.sleep(wait)
}
}
ids = grep_sub('^(\\d+) checking: .+', '\\1', res[, 3])
i = if (length(ids) > 0) grep(sprintf('^%s checking: ', ids[1]), res[, 3]) else {
head(which(res[, 2] == 'failure'), 1)
}
res = res[i, , drop = FALSE]
res = res[res[, 2] == 'failure', , drop = FALSE]
if (NROW(res) == 0) {
stop('Did not find any failed results on Github Actions.')
}
for (i in seq_len(nrow(res))) {
message('Downloading check results (', i, '/', nrow(res), ')')
gh_run('download', res[i, 7], '-D', tempfile('crandalf-', '.'), repo = repo)
}
if (interactive()) browseURL(crandalf_merge(pkg))
}
# retrieve the first N jobs info
crandalf_jobs = function(pkg, repo = NA, limit = 200) {
res = gh_run('list', '-L', limit, '-w', 'rev-check', repo = repo)
res = res[grep(paste0('rev-check\tcheck-', pkg), res)]
do.call(rbind, strsplit(res, '\t'))
}
crandalf_merge = function(pkg) {
unlink(list.files('.', '[.]Rcheck2?$'), recursive = TRUE)
x1 = x2 = x3 = NULL
f1 = '00check_diffs.html'; f3 = 'latex.txt'
for (d in list.files('.', '^crandalf-.+')) {
if (!dir_exists(d)) next
p = file.path(d, 'macOS-rev-check-results')
if (file_exists(f <- file.path(p, f1))) {
x = read_utf8(f)
x1 = if (length(x1) == 0) x else {
i1 = grep('<body>', x)[1]
i2 = tail(grep('</body>', x), 1)
i3 = tail(grep('</body>', x1), 1)
append(x1, x[(i1 + 1):(i2 - 1)], i3 - 1)
}
file.remove(f)
}
if (file_exists(f <- file.path(p, 'recheck2'))) {
x2 = c(x2, read_utf8(f))
file.remove(f)
}
cs = list.files(p, '[.]Rcheck[2]?$', full.names = TRUE)
file.rename(cs, basename(cs))
if (file_exists(f <- file.path(p, f3))) {
x3 = c(x3, read_utf8(f))
file.remove(f)
}
unlink(d, recursive = TRUE)
}
write_utf8(x1, f1) # the full summary
# store newly detected missing latex packages in latex.txt and commit/push
git_co('main')
append_unique(x3, f3)
find_missing_latex()
git(c('commit', '-m', shQuote('add more latex packages'), f3))
git('push')
git_co(paste0('check-', pkg))
r = '[.]Rcheck2$'
write_utf8(sort(unique(c(x2, gsub(r, '', list.files('.', r))))), 'recheck')
f1
}
# mclapply() with a different default for mc.cores and disable prescheduling
plapply = function(X, FUN, ...) {
parallel::mclapply(
X, FUN, ..., mc.cores = getOption('mc.cores', parallel::detectCores()),
mc.preschedule = FALSE
)
}
# download the source package from CRAN
download_tarball = function(p, db = available.packages(type = 'source'), dir = '.', retry = 3) {
if (!dir_exists(dir)) dir.create(dir, recursive = TRUE)
z = file.path(dir, sprintf('%s_%s.tar.gz', p, db[p, 'Version']))
mapply(function(p, z) {
# remove other versions of the package tarball
unlink(setdiff(list.files(dir, sprintf('^%s_.+.tar.gz', p), full.names = TRUE), z))
for (i in seq_len(retry)) {
if (file_exists(z)) break
try(download.file(paste(db[p, 'Repository'], basename(z), sep = '/'), z, mode = 'wb'))
}
}, p, z, SIMPLIFY = FALSE)
z
}
# clean up *.Rcheck if there are no warnings, errors, or notes in the log
clean_Rcheck = function(dir, log = read_utf8(file.path(dir, '00check.log'))) {
# do not check the status line
if (length(grep('^Status: ', tail(log, 1)))) log = head(log, -1)
if (length(grep('(WARNING|ERROR|NOTE)$', log)) == 0 ||
length(grep('[*] checking whether package .+ can be installed ... ERROR', log)))
unlink(dir, recursive = TRUE)
!dir_exists(dir)
}
#' @rdname rev_check
#' @param status_only If `TRUE`, only compare the final statuses of the
#' checks (the last line of \file{00check.log}), and delete \file{*.Rcheck}
#' and \file{*.Rcheck2} if the statuses are identical, otherwise write out the
#' full diffs of the logs. If `FALSE`, compare the full logs under
#' \file{*.Rcheck} and \file{*.Rcheck2}.
#' @param output The output Markdown file to which the diffs in check logs will
#' be written. If the \pkg{markdown} package is available, the Markdown file
#' will be converted to HTML, so you can see the diffs more clearly.
#' @export
compare_Rcheck = function(status_only = TRUE, output = '00check_diffs.md') {
if (length(dirs <- list.files('.', '.+[.]Rcheck$')) == 0) {
# clean up the `recheck` file
if (file_exists('recheck')) writeLines(character(), 'recheck')
return()
}
d2 = function(d) c(d, paste0(d, '2'))
logs = function(d) file.path(d2(d), '00check.log')
res = NULL
if (!status_only && Sys.which('diff') == '')
warning("The command 'diff' is not available; will not calculate exact diffs in logs.")
for (d in dirs) {
f = existing_files(logs(d))
if (status_only && length(f) == 2) {
status_line = function(file) {
x = tail(read_utf8(file), 1)
if (grepl('^Status: ', x)) x else {
warning('The last line of ', file, ' is not the status.')
NULL
}
}
# if the check with current CRAN version of package also failed, or the
# two statues are the same, chances are we are good to go
s1 = status_line(f[1])
if (length(grep('Status: .*\\d+ ERROR', s1)) || identical(s1, status_line(f[2]))) {
unlink(d2(d), recursive = TRUE); next
}
}
res = c(
res, paste('##', p <- sans_ext(d)), '',
sprintf('[CRAN version](https://cran.rstudio.com/package=%s) (-) vs current version (+):\n', p),
'```diff', file_diff(f), '```', ''
)
if (length(res2 <- cran_check_page(p, NULL))) res = c(
res, 'CRAN check logs:\n\n```', head_tail(unique(unlist(strsplit(res2, '\n')))), '```\n'
)
}
if (length(res) == 0) return()
xfun::write_utf8(res, output)
if (!loadable('markdown')) return(output)
markdown::mark_html(
text = res,
output = html_file <- with_ext(output, 'html')
)
if (!getOption('xfun.rev_check.keep_md', FALSE)) unlink(output)
html_file
}
# keep the first and last n elements in x, and omit the middle
head_tail = function(x, n = 10) {
if (length(x) <= 2 * n) return(x)
c(head(x, n), '....', tail(x, n))
}
# compute the diffs of two files; if diffs too large, dedup them
file_diff = function(files, len = 200, use_diff = Sys.which('diff') != '') {
n = length(files)
if (n == 0) return()
if (n == 1) {
f = tempfile(); on.exit(unlink(f), add = TRUE); file.create(f)
files = c(f, files)
}
d = if (use_diff) {
suppressWarnings(system2('diff', shQuote(files), stdout = TRUE))
} else {
c(paste('<', read_utf8(files[1])), '---', paste('>', read_utf8(files[2])))
}
if (length(d) >= len) d = unique(d)
gsub('^>', '+', gsub('^<', '-', d))
}
# specify a list of package names to be ignored when installing all dependencies
ignore_deps = function() {
if (file_exists('00ignore_deps')) scan('00ignore_deps', 'character')
}
# download a check summary of a package from CRAN
cran_check_page = function(pkg, con = '00check-cran.log') {
u = sprintf('https://cran.rstudio.com/web/checks/check_results_%s.html', pkg)
x = read_utf8(u)
if (length(i <- grep('Check Details', x, ignore.case = TRUE)) == 0) return()
x = x[i[1]:length(x)]
x = gsub('<[^>]+>', '', x)
x = gsub(' ', ' ', x)
x = gsub('>', '>', x)
x = gsub('<', '<', x)
x = gsub('\\s+', ' ', x)
x = paste(trimws(x), collapse = '\n')
x = gsub('\n\n+', '\n\n', x)
if (length(con) == 1) writeLines(x, con) else x
}
# download CRAN check summaries of all failed packages
cran_check_pages = function() {
dirs = list.files('.', '[.]Rcheck$')
for (d in dirs) {
if (dir_exists(d)) in_dir(d, cran_check_page(gsub('[.]Rcheck$', '', d)))
}
}
# parse the check log for missing LaTeX packages and install them
find_missing_latex = function() {
dirs = list.files('.', '[.]Rcheck2?$')
pkgs = NULL
for (d in dirs) {
if (dir_exists(d)) pkgs = c(pkgs, in_dir(
d, tinytex::parse_packages('00check.log', quiet = c(TRUE, FALSE, FALSE))
))
}
pkgs = unique(pkgs)
if (file_exists(f <- 'latex.txt')) append_unique(pkgs, f)
pkgs
}
# run revdepcheck::cloud_check()
cloud_check = function(pkgs = NULL, ...) {
get_fun = function(name) getFromNamespace(name, 'revdepcheck')
tgz = pkg_build() # tarball
pkg = gsub('_.*$', '', tgz)
if (length(pkgs) == 0) pkgs = setdiff(get_fun('cran_revdeps')(pkg, bioc = TRUE), pkg)
N = 9000 # max is 10000 packages per batch job
jobs = broken = NULL
rver = format(getRversion())
check = function(...) {
# make sure to check at least 2 packages
if (length(pkgs) == 1) pkgs = c(pkgs, if (length(broken)) broken[1] else pkgs)
try_check = function(...) {
get_fun('cloud_check')(tarball = tgz, r_version = rver, revdep_packages = head(pkgs, N), ...)
}
jobs <<- c(jobs, tryCatch(
try_check(...),
error = function(e) {
if (getRversion() != rver) stop(e) # already tried a different version
# if the current R version doesn't work, use the highest supported version
r = ".*?\\[(('([0-9.]+)'(,\\s+)?)+)].*"
x = grep(r, e$message, value = TRUE)
x = gsub(r, '\\1', x)
v = unlist(strsplit(x, "('|,\\s+)"))
v = v[v != ''][1]
if (length(v) != 1 || is.na(v)) stop(e)
rver <<- v
try_check(...)
}
))
pkgs <<- tail(pkgs, -N)
}
# if there are more than N revdeps, check the first N of them at one time
while (length(pkgs) > 0) check(...)
for (job in jobs) {
assign('job_name', job, envir = get_fun('cloud_data'))
get_fun('cloud_status')(update_interval = 60)
if (length(res <- get_fun('cloud_broken')())) {
get_fun('cloud_report')()
for (p in res) print(get_fun('cloud_details')(revdep = p))
fs = list.files(file.path('revdep/cloud.noindex', job), full.names = TRUE)
# only keep results from broken packages
unlink(fs[!basename(fs) %in% c(res, paste0(res, '.tar.gz'))], recursive = TRUE)
broken = unique(c(res, broken))
}
}
if (length(broken)) {
stop('Package(s) broken: ', paste(broken, collapse = ' '))
} else {
message('All reverse dependencies are good!')
}
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/revcheck.R |
#' Type a character vector into the RStudio source editor
#'
#' Use the \pkg{rstudioapi} package to insert characters one by one into the
#' RStudio source editor, as if they were typed by a human.
#' @param x A character vector.
#' @param pause A function to return a number in seconds to pause after typing
#' each character.
#' @param mistake The probability of making random mistakes when typing the next
#' character. A random mistake is a random string typed into the editor and
#' deleted immediately.
#' @param save The probability of saving the document after typing each
#' character. Note that If a document is not opened from a file, it will never
#' be saved.
#' @export
#' @import stats
#' @examples library(xfun)
#' if (loadable('rstudioapi') && rstudioapi::isAvailable()) {
#' rstudio_type('Hello, RStudio! xfun::rstudio_type() looks pretty cool!',
#' pause = function() runif(1, 0, .5), mistake = .1)
#' }
rstudio_type = function(x, pause = function() .1, mistake = 0, save = 0) {
get_ctx = function() rstudioapi::getSourceEditorContext()
ctx = get_ctx()
if (is.null(id <- ctx$id)) {
message('Please make sure an RStudio editor tab is open')
return()
}
save_it = function(prob = 1) {
if (ctx$path == '' || (rbinom(1, 1, prob) == 0)) return()
ctx = get_ctx()
# in case a new line is automatically added at the end when saving the doc
on.exit(rstudioapi::setSelectionRanges(ctx$selection[[1]]$range, id), add = TRUE)
rstudioapi::documentSave(id)
}
type_one = function(x) {
rstudioapi::insertText(text = x, id = id)
Sys.sleep(pause())
}
type_mistake = function() {
n = sample(1:10, 1)
x = sample(ascii_chars, n, replace = TRUE)
for (i in x) type_one(i)
Sys.sleep(.5)
ctx = rstudioapi::getSourceEditorContext()
r = ctx$selection[[1]]$range
r$start[2] = r$start[2] - n
rstudioapi::modifyRange(r, '', id)
Sys.sleep(.5)
}
x = paste(x, collapse = '\n')
for (i in unlist(strsplit(x, ''))) {
type_one(i); save_it(save)
if (runif(1) < mistake) type_mistake()
}
save_it(as.integer(save > 0)) # if prob is non-zero, save it finally
invisible()
}
ascii_chars = intToUtf8(32:126, TRUE)
| /scratch/gouwar.j/cran-all/cranData/xfun/R/rstudio.R |
#' An alternative to sessionInfo() to print session information
#'
#' This function tweaks the output of [sessionInfo()]: (1) It adds
#' the RStudio version information if running in the RStudio IDE; (2) It removes
#' the information about matrix products, BLAS, and LAPACK; (3) It removes the
#' names of base R packages; (4) It prints out package versions in a single
#' group, and does not differentiate between loaded and attached packages.
#'
#' It also allows you to only print out the versions of specified packages (via
#' the `packages` argument) and optionally their recursive dependencies.
#' For these specified packages (if provided), if a function
#' `xfun_session_info()` exists in a package, it will be called and
#' expected to return a character vector to be appended to the output of
#' `session_info()`. This provides a mechanism for other packages to inject
#' more information into the `session_info` output. For example,
#' \pkg{rmarkdown} (>= 1.20.2) has a function `xfun_session_info()` that
#' returns the version of Pandoc, which can be very useful information for
#' diagnostics.
#' @param packages A character vector of package names, of which the versions
#' will be printed. If not specified, it means all loaded and attached
#' packages in the current R session.
#' @param dependencies Whether to print out the versions of the recursive
#' dependencies of packages.
#' @return A character vector of the session information marked as
#' [raw_string()].
#' @export
#' @examplesIf interactive()
#' xfun::session_info()
#' if (xfun::loadable('MASS')) xfun::session_info('MASS')
session_info = function(packages = NULL, dependencies = TRUE) {
res = sessionInfo()
res$matprod = res$BLAS = res$LAPACK = NULL
if (loadable('rstudioapi') && rstudioapi::isAvailable()) {
res$running = paste0(res$running, ', RStudio ', rstudioapi::getVersion())
}
tweak_info = function(obj, extra = NULL) {
res = capture.output(print(obj, tzone = FALSE))
i = grep('^(attached base packages|Matrix products):\\s*$', res, ignore.case = TRUE)
if (length(i)) res = res[-c(i, i + 1)]
res = gsubi('^\\s*locale:\\s*$', 'Locale:', res)
res = gsub('^\\s*\\[[0-9]+]\\s*', ' ', res) # remove vector indices like [1]
res = gsubi('^\\s*other attached packages:\\s*$', 'Package version:', res)
# print the locale info on a single line if possible
if (length(i <- which(res == 'Locale:')) == 1 && res[i + 2] == '') {
res[i] = paste(res[i], gsub('\\s*/\\s*', ' / ', gsub('^\\s+', '', res[i + 1])))
res = res[-(i + 1)]
}
# remove extra blank lines
if ((n <- length(res)) > 1) {
i = is_blank(res)
res = res[!c(FALSE, i[1:(n-1)] & i[2:n])]
}
raw_string(c(res, extra))
}
version_info = function(pkgs) {
res = lapply(pkgs, function(p) {
list(Version = as.character(packageVersion(p)), Package = p)
})
as.list(setNames(res, pkgs))
}
res$basePkgs = raw_string(list())
info = c(res$otherPkgs, res$loadedOnly)
packages = setdiff(packages, '') # remove empty strings (#65)
if (length(packages) > 0) {
info = info[intersect(names(info), packages)]
info = c(info, version_info(setdiff(packages, names(info))))
}
res$loadedOnly = NULL
if (dependencies) {
deps = pkg_dep(names(info), installed.packages(), recursive = TRUE)
deps = sort(setdiff(deps, names(info)))
info = c(info, version_info(deps))
}
if (length(packages) > 0 || dependencies) info = info[sort(names(info))]
res$otherPkgs = info
extra = unlist(lapply(packages, function(p) tryCatch(
c('', getFromNamespace('xfun_session_info', p)()), error = function(e) NULL)
))
tweak_info(res, extra)
}
#' Perform a task once in an R session
#'
#' Perform a task once in an R session, e.g., emit a message or warning. Then
#' give users an optional hint on how not to perform this task at all.
#' @param task Any R code expression to be evaluated once to perform a task,
#' e.g., `warning('Danger!')` or `message('Today is ', Sys.Date())`.
#' @param option An R option name. This name should be as unique as possible in
#' [options()]. After the task has been successfully performed,
#' this option will be set to `FALSE` in the current R session, to
#' prevent the task from being performed again the next time when
#' `do_once()` is called.
#' @param hint A character vector to provide a hint to users on how not to
#' perform the task or see the message again in the current R session. Set
#' `hint = ""` if you do not want to provide the hint.
#' @return The value returned by the `task`, invisibly.
#' @export
#' @examples
#' do_once(message("Today's date is ", Sys.Date()), "xfun.date.reminder")
#' # if you run it again, it will not emit the message again
#' do_once(message("Today's date is ", Sys.Date()), "xfun.date.reminder")
#'
#' do_once({Sys.sleep(2); 1 + 1}, "xfun.task.1plus1")
#' do_once({Sys.sleep(2); 1 + 1}, "xfun.task.1plus1")
do_once = function(task, option, hint = c(
'You will not see this message again in this R session.',
'If you never want to see this message,',
sprintf('you may set options(%s = FALSE) in your .Rprofile.', option)
)) {
if (identical(getOption(option), FALSE)) return(invisible())
task
hint = paste(hint, collapse = ' ')
if (hint != '') message(hint)
options(setNames(list(FALSE), option))
invisible(task)
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/session.R |
#' Test if a character vector consists of blank strings
#'
#' Return a logical vector indicating if elements of a character vector are
#' blank (white spaces or empty strings).
#' @param x A character vector.
#' @return `TRUE` for blank elements, or `FALSE` otherwise.
#' @export
#' @examples
#' xfun::is_blank('')
#' xfun::is_blank('abc')
#' xfun::is_blank(c('', ' ', '\n\t'))
#' xfun::is_blank(c('', ' ', 'abc'))
is_blank = function(x) grepl('^\\s*$', x)
#' Convert numbers to English words
#'
#' This can be helpful when writing reports with \pkg{knitr}/\pkg{rmarkdown} if
#' we want to print numbers as English words in the output. The function `n2w()`
#' is an alias of `numbers_to_words()`.
#' @param x A numeric vector. The absolute values should be less than `1e15`.
#' @param cap Whether to capitalize the first letter of the word. This can be
#' useful when the word is at the beginning of a sentence. Default is `FALSE`.
#' @param hyphen Whether to insert hyphen (-) when the number is between 21 and
#' 99 (except 30, 40, etc.).
#' @param and Whether to insert `and` between hundreds and tens, e.g., write 110
#' as \dQuote{one hundred and ten} if `TRUE` instead of \dQuote{one hundred
#' ten}.
#' @return A character vector.
#' @author Daijiang Li
#' @export
#' @examples library(xfun)
#' n2w(0, cap = TRUE)
#' n2w(0:121, and = TRUE)
#' n2w(1e6)
#' n2w(1e11+12345678)
#' n2w(-987654321)
#' n2w(1e15-1)
#' n2w(123.456)
#' n2w(123.45678901)
#' n2w(123.456789098765)
numbers_to_words = function(x, cap = FALSE, hyphen = TRUE, and = FALSE) {
if (!is.numeric(x)) stop('The input is not numeric.')
if (any(abs(x) >= 1e15)) stop('The absolute value must be less than 1e15.')
opts = options(scipen = 15, OutDec = '.') # avoid scientific notation
on.exit(options(opts), add = TRUE)
zero_to_19 = c(
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten',
'eleven', 'twelve', paste0(c('thir', 'four', 'fif', 'six', 'seven', 'eigh', 'nine'), 'teen')
)
names(zero_to_19) = as.character(0:19)
tens = c('twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety')
names(tens) = as.character(seq(20, 90, 10))
marks = c('', 'thousand,', 'million,', 'billion,', 'trillion,')
convert_1 = function(x_c) zero_to_19[x_c] # 0 - 9
# 10 - 99
convert_2 = function(x_c) {
x_cs = strsplit(x_c, split = '')[[1]]
if (x_cs[1] == 1) return(zero_to_19[x_c]) # 10 - 19
if (x_cs[2] == 0) return(tens[x_c]) # 20, 30, 40, ...
# 21, 22, etc.
paste(tens[as.integer(x_cs[1]) - 1], convert_1(x_cs[2]), sep = if (hyphen) '-' else ' ')
}
# 100 - 999
convert_3 = function(x_c) {
x_cs = strsplit(x_c, split = '')[[1]]
n_hundreds = paste(convert_1(x_cs[1]), 'hundred', sep = ' ')
out = if (x_cs[2] == '0') {
if (x_cs[3] == '0') return(n_hundreds) # x00
convert_1(x_cs[3]) # x0x
} else {
convert_2(paste(x_cs[2:3], collapse = '')) # xxx
}
paste(n_hundreds, out, sep = if (and) ' and ' else ' ')
}
convert_le3 = function(x_c) {
x_c = gsub('^0+', '', x_c) # avoid something like 000, 001, 010; but also remove 0
n = nchar(x_c)
if (n == 0) return('')
if (n == 1) return(convert_1(x_c))
if (n == 2) return(convert_2(x_c))
if (n == 3) return(convert_3(x_c))
}
convert_one = function(x) {
minus = if (x >= 0) '' else {
x = abs(x); 'minus '
}
if (x == 0) {
out = 'zero' # because convert_le3 removed all 0s
} else {
x_marks = strsplit(format(floor(x), big.mark = ','), split = ',')[[1]] # e.g. 123,456,789
out = vapply(x_marks, convert_le3, character(1)) # group by 3 digits
x_marks2 = marks[length(x_marks):1] # units?
x_marks2[which(out == '')] = '' # e.g. 4,000,123, 000, remove millions
out = paste(out, x_marks2, sep = ' ', collapse = ' ') # zip together
}
out = paste0(minus, out)
out = gsub('^ *|,? *$', '', out) # trim heading/trailing space
out = gsub(' {2,}', ' ', out) # remove multiple spaces
if (cap) out = sub('^([a-z])', '\\U\\1', out, perl = TRUE)
if (x - floor(x) > 0) {
frac = sub('^[0-9]+[.]', '', as.character(x))
frac = convert_1(strsplit(frac, '')[[1]])
out = paste(c(out, 'point', frac), collapse = ' ')
}
out
}
if (length(x) > 1) vapply(x, convert_one, character(1)) else convert_one(x)
}
#' @export
#' @rdname numbers_to_words
n2w = numbers_to_words
#' Evaluate an expression after forcing the decimal point to be a dot
#'
#' Sometimes it is necessary to use the dot character as the decimal separator.
#' In R, this could be affected by two settings: the global option
#' `options(OutDec)` and the `LC_NUMERIC` locale. This function sets the former
#' to `.` and the latter to `C` before evaluating an expression, such as
#' coercing a number to character.
#' @param x An expression.
#' @export
#' @return The value of `x`.
#' @examples
#' opts = options(OutDec = ',')
#' as.character(1.234) # using ',' as the decimal separator
#' print(1.234) # same
#' xfun::decimal_dot(as.character(1.234)) # using dot
#' xfun::decimal_dot(print(1.234)) # using dot
#' options(opts)
decimal_dot = function(x) {
opts = options(OutDec = '.'); on.exit(options(opts), add = TRUE)
lcn = Sys.getlocale('LC_NUMERIC')
if (lcn != 'C') {
Sys.setlocale('LC_NUMERIC', 'C')
on.exit(suppressWarnings(Sys.setlocale('LC_NUMERIC', lcn)), add = TRUE)
}
x
}
# create a URL query string from named parameters
query_params = function(..., .list = list()) {
x = if (missing(.list)) list(...) else .list
x = paste(names(x), x, sep = '=', collapse = '&')
if (x != '') paste0('?', x) else x
}
#' Split a character vector by line breaks
#'
#' Call `unlist(strsplit(x, '\n'))` on the character vector `x` and
#' make sure it works in a few edge cases: `split_lines('')` returns
#' `''` instead of `character(0)` (which is the returned value of
#' `strsplit('', '\n')`); `split_lines('a\n')` returns `c('a',
#' '')` instead of `c('a')` (which is the returned value of
#' `strsplit('a\n', '\n')`.
#' @param x A character vector.
#' @return All elements of the character vector are split by `'\n'` into
#' lines.
#' @export
#' @examples xfun::split_lines(c('a', 'b\nc'))
split_lines = function(x) {
if (length(grep('\n', x)) == 0L) return(x)
x = gsub('\n$', '\n\n', x)
x[x == ''] = '\n'
unlist(strsplit(x, '\r?\n'))
}
#' Split source lines into complete expressions
#'
#' Parse the lines of code one by one to find complete expressions in the code,
#' and put them in a list.
#' @param x A character vector of R source code.
#' @param merge_comments Whether to merge consecutive lines of comments as a
#' single expression to be combined with the next non-comment expression (if
#' any).
#' @param line_number Whether to store the starting line number of each
#' expression in the returned value.
#' @param skip A token to skip the rest of code. When provided as a character
#' string, the split will stop at the this token.
#' @return A list of character vectors, and each vector contains a complete R
#' expression, with an attribute `line_start` indicating the starting line
#' number of the expression if the argument `line_number = TRUE`.
#' @export
#' @examples
#' xfun::split_source(c('if (TRUE) {', '1 + 1', '}', 'print(1:5)'))
#' xfun::split_source(c('print(1:5)', '#--#', 'if (TRUE) {', '1 + 1', '}'), skip = '#--#')
split_source = function(
x, merge_comments = FALSE, line_number = FALSE, skip = getOption('xfun.split_source.skip')
) {
if ((n <- length(x)) < 1) return(list(x))
if (!is.character(skip) || length(skip) != 1) skip = NULL
i1 = i2 = 1
res = list()
add_source = function(x) {
res[[length(res) + 1]] <<- if (line_number) structure(x, line_start = i1) else x
}
while (i2 <= n) {
piece = x[i1:i2]
if ((!merge_comments || (!all(grepl('^#', piece)) || i2 == n)) && valid_syntax(piece)) {
# check if the skip token is found
if (!is.null(skip) && !is.na(i3 <- match(skip, piece))) {
if (i3 > 1) add_source(x[i1 + 1:i3 - 1])
return(res)
}
add_source(piece)
i1 = i2 + 1 # start from the next line
}
i2 = i2 + 1
}
if (i1 <= n) parse(text = piece) # must be an error there
res
}
#' Check if the syntax of the code is valid
#'
#' Try to [parse()] the code and see if an error occurs.
#' @param code A character vector of R source code.
#' @param silent Whether to suppress the error message when the code is not
#' valid.
#' @return `TRUE` if the code could be parsed, otherwise `FALSE`.
#' @export
#' @examples xfun::valid_syntax('1+1')
#' xfun::valid_syntax('1+')
#' xfun::valid_syntax(c('if(T){1+1}', 'else {2+2}'), silent = FALSE)
valid_syntax = function(code, silent = TRUE) {
!inherits(try(parse_only(code), silent = silent), 'try-error')
}
#' Bump version numbers
#'
#' Increase the last digit of version numbers, e.g., from `0.1` to
#' `0.2`, or `7.23.9` to `7.23.10`.
#' @param x A vector of version numbers (of the class `"numeric_version"`),
#' or values that can be coerced to version numbers via
#' `as.numeric_version()`.
#' @return A vector of new version numbers.
#' @export
#' @examples xfun::bump_version(c('0.1', '91.2.14'))
bump_version = function(x) {
x = as.numeric_version(x)
for (i in seq_along(x)) {
v = x[i]
n = length(unclass(v)[[1]])
v[[1, n]] = v[[1, n]] + 1 # bump the last digit
x[i] = v
}
x
}
#' Fix pairs of characters in a file
#'
#' For example, the curly braces may be wrong (the opening and closing braces
#' are swapped for some reason).
#' @param x A character vector (by default, read from `file`).
#' @param file Path to a text file.
#' @param chars A vector of characters of length 2. By default, it is a pair of
#' curly double quotes.
#' @references <https://d.cosx.org/d/420794/5>
#' @noRd
#' @examples
#' files = list.files('.', '[.]R?md$', recursive = TRUE, full.names = TRUE)
#' for (f in files) {
#' pair_chars(file = f)
#' # curly single quotes
#' pair_chars(file = f, chars = c('\U2018', '\U2019'))
#' }
pair_chars = function(x = read_utf8(file), file, chars = c('\U201c', '\U201d')) {
if (length(chars) != 2) stop("'chars' must be of length 2 (i.e., a pair of characters)")
is_file = !missing(file)
r = paste(c('[', chars, ']'), collapse = '')
k = gregexpr(r, x)
m = regmatches(x, k)
for (i in seq_along(m)) {
n = length(m[[i]])
if (n %% 2 != 0) {
warning(
'The characters do not appear in pairs in the text (',
'line: ', i, if (is_file) c('; file: ', file), '):\n', x[i], '\n'
)
next
}
m[[i]] = rep(chars, length.out = n)
}
x2 = x
regmatches(x, k) = m
if (is_file) {
if (!identical(x, x2)) write_utf8(x, file)
invisible(x)
} else x
}
#' Generate ID strings
#'
#' Substitute certain (by default, non-alphanumeric) characters with dashes and
#' remove extra dashes at both ends to generate ID strings. This function is
#' intended for generating IDs for HTML elements, so HTML tags in the input text
#' will be removed first.
#' @param x A character vector.
#' @param exclude A (Perl) regular expression to detect characters to be
#' replaced by dashes. By default, non-alphanumeric characters are replaced.
#' @return A character vector of IDs.
#' @export
#' @examples
#' x = c('Hello world 123!', 'a &b*^##c 456')
#' xfun::alnum_id(x)
#' xfun::alnum_id(x, '[^[:alpha:]]+') # only keep alphabetical chars
#' # when text contains HTML tags
#' xfun::alnum_id('<h1>Hello <strong>world</strong>!')
alnum_id = function(x, exclude = '[^[:alnum:]]+') {
x = strip_html(x)
tolower(gsub('^-+|-+$', '', gsub(exclude, '-', x, perl = TRUE)))
}
#' Strip HTML tags
#'
#' Remove HTML tags and comments from text.
#' @param x A character vector.
#' @return A character vector with HTML tags and comments stripped off.
#' @export
#' @examples
#' xfun::strip_html('<a href="#">Hello <!-- comment -->world!</a>')
strip_html = function (x) {
x = gsub('<!--.*?-->', '', x)
x = gsub('<[^>]+>', '', x)
x
}
# escape special HTML characters
escape_html = function (x) {
x = gsubf('&', '&', x)
x = gsubf('<', '<', x)
x = gsubf('>', '>', x)
x = gsubf('"', '"', x)
x
}
one_string = function(x, ...) paste(x, ..., collapse = '\n')
| /scratch/gouwar.j/cran-all/cranData/xfun/R/string.R |
`%|%` = function(x, y) if (length(x)) x else y
`%||%` = function(x, y) if (is.null(x)) y else x
stop2 = function(...) stop(..., call. = FALSE)
warning2 = function(...) warning(..., call. = FALSE)
#' Obtain an attribute of an object without partial matching
#'
#' An abbreviation of [`base::attr`]`(exact = TRUE)`.
#' @param ... Passed to [base::attr()] (without the `exact` argument).
#' @export
#' @examples
#' z = structure(list(a = 1), foo = 2)
#' base::attr(z, 'f') # 2
#' xfun::attr(z, 'f') # NULL
#' xfun::attr(z, 'foo') # 2
attr = function(...) base::attr(..., exact = TRUE)
#' Set environment variables
#'
#' Set environment variables from a named character vector, and return the old
#' values of the variables, so they could be restored later.
#'
#' The motivation of this function is that [Sys.setenv()] does not
#' return the old values of the environment variables, so it is not
#' straightforward to restore the variables later.
#' @param vars A named character vector of the form `c(VARIABLE = VALUE)`.
#' If any value is `NA`, this function will try to unset the variable.
#' @return Old values of the variables (if not set, `NA`).
#' @export
#' @examples
#' vars = xfun::set_envvar(c(FOO = '1234'))
#' Sys.getenv('FOO')
#' xfun::set_envvar(vars)
#' Sys.getenv('FOO')
set_envvar = function(vars) {
if (is.null(nms <- names(vars)) || any(nms == '')) stop(
"The 'vars' argument must take a named character vector."
)
vals = Sys.getenv(nms, NA, names = TRUE)
i = is.na(vars)
suppressWarnings(Sys.unsetenv(nms[i]))
if (length(vars <- vars[!i])) do.call(Sys.setenv, as.list(vars))
invisible(vals)
}
#' Retrieve a global option from both `options()` and environment variables
#'
#' If the option exists in [options()], use its value. If not, query the
#' environment variable with the name `R_NAME` where `NAME` is the capitalized
#' option name with dots substituted by underscores. For example, for an option
#' `xfun.foo`, first we try `getOption('xfun.foo')`; if it does not exist, we
#' check the environment variable `R_XFUN_FOO`.
#'
#' This provides two possible ways, whichever is more convenient, for users to
#' set an option. For example, global options can be set in the [.Rprofile]
#' file, and environment variables can be set in the [.Renviron] file.
#' @param name The option name.
#' @param default The default value if the option is not found in [options()] or
#' environment variables.
#' @return The option value.
#' @export
#' @examples
#' xfun::env_option('xfun.test.option') # NULL
#'
#' Sys.setenv(R_XFUN_TEST_OPTION = '1234')
#' xfun::env_option('xfun.test.option') # 1234
#'
#' options(xfun.test.option = TRUE)
#' xfun::env_option('xfun.test.option') # TRUE (from options())
#' options(xfun.test.option = NULL) # reset the option
#' xfun::env_option('xfun.test.option') # 1234 (from env var)
#'
#' Sys.unsetenv('R_XFUN_TEST_OPTION')
#' xfun::env_option('xfun.test.option') # NULL again
#'
#' xfun::env_option('xfun.test.option', FALSE) # use default
env_option = function(name, default = NULL) {
if (name %in% names(.Options)) return(.Options[[name]])
name = toupper(paste0('R_', gsub('[.]', '_', name)))
envs = Sys.getenv()
if (name %in% names(envs)) envs[[name]] else default
}
#' Call `on.exit()` in a parent function
#'
#' The function [on.exit()] is often used to perform tasks when the
#' current function exits. This `exit_call()` function allows calling a
#' function when a parent function exits (thinking of it as inserting an
#' `on.exit()` call into the parent function).
#' @param fun A function to be called when the parent function exits.
#' @param n The parent frame number. For `n = 1`, `exit_call(fun)` is
#' the same as `on.exit(fun())`; `n = 2` means adding
#' `on.exit(fun())` in the parent function; `n = 3` means the
#' grandparent, etc.
#' @param ... Other arguments to be passed to `on.exit()`.
#' @references This function was inspired by Kevin Ushey:
#' <https://yihui.org/en/2017/12/on-exit-parent/>
#' @export
#' @examples
#' f = function(x) {
#' print(x)
#' xfun::exit_call(function() print('The parent function is exiting!'))
#' }
#' g = function(y) {
#' f(y)
#' print('f() has been called!')
#' }
#' g('An argument of g()!')
exit_call = function(fun, n = 2, ...) {
do.call(
on.exit, list(substitute(fun(), list(fun = fun)), add = TRUE, ...),
envir = parent.frame(n)
)
}
#' Evaluate an expression under a specified working directory
#'
#' Change the working directory, evaluate the expression, and restore the
#' working directory.
#' @param dir Path to a directory.
#' @param expr An R expression.
#' @export
#' @examples
#' library(xfun)
#' in_dir(tempdir(), {print(getwd()); list.files()})
in_dir = function(dir, expr) {
owd = setwd(dir); on.exit(setwd(owd))
expr
}
#' Test if an object is `FALSE`
#'
#' For R versions lower than 3.5.0, this function is a simple abbreviation of
#' `identical(x, FALSE)`. For higher R versions, this function calls
#' `base::isFALSE()`.
#' @param x An R object.
#' @note This function will be deprecated in the future. We recommend that you
#' use [base::isFALSE()] instead. If you have to support R versions lower
#' than 3.5.0, you may use `identical(x, FALSE)`, but please note that it is
#' not equivalent to `base::isFALSE()`.
#' @export
#' @keywords internal
isFALSE = function(x) {
pkgs = tools::dependsOnPkgs('xfun', dependencies = 'all', recursive = FALSE)
pkgs = intersect(pkgs, sys.packages())
vers = sapply(pkgs, function(p) as.character(packageVersion(p)))
if ('isFALSE' %in% ls(baseenv())) stop(
'The function xfun::isFALSE() has been deprecated. Please ',
if (length(vers)) {
c('update the possibly outdated package(s): ', paste(pkgs, vers, sep = ' ', collapse = ', '), '. ')
} else {
'consider using base::isFALSE(x) or identical(x, FALSE) instead. '
},
'You may see https://yihui.org/en/2023/02/xfun-isfalse/ for more info.'
)
identical(x, FALSE)
}
# try to get the names of packages for all functions on the call stack
sys.packages = function() {
unique(unlist(lapply(seq_along(sys.calls()), function(i) {
environment(sys.function(i))$.packageName
})))
}
#' Parse R code and do not keep the source
#'
#' An abbreviation of `parse(keep.source = FALSE)`.
#' @param code A character vector of the R source code.
#' @export
#' @return R [expression()]s.
#' @examples library(xfun)
#' parse_only('1+1'); parse_only(c('y~x', '1:5 # a comment'))
#' parse_only(character(0))
parse_only = function(code) {
if (length(code) == 0) return(expression())
parse(text = code, keep.source = FALSE)
}
#' Try to evaluate an expression silently
#'
#' An abbreviation of `try(silent = TRUE)`.
#' @param expr An R expression.
#' @export
#' @examples library(xfun)
#' z = try_silent(stop('Wrong!'))
#' inherits(z, 'try-error')
try_silent = function(expr) try(expr, silent = TRUE)
#' Try an expression and see if it throws an error
#'
#' Use [tryCatch()] to check if an expression throws an error.
#' @inheritParams try_silent
#' @return `TRUE` (error) or `FALSE` (success).
#' @export
#' @examples
#' xfun::try_error(stop('foo')) # TRUE
#' xfun::try_error(1:10) # FALSE
try_error = function(expr) {
err = FALSE
tryCatch(expr, error = function(e) err <<- TRUE)
err
}
#' Retry calling a function for a number of times
#'
#' If the function returns an error, retry it for the specified number of
#' times, with a pause between attempts.
#'
#' One application of this function is to download a web resource. Since the
#' download might fail sometimes, you may want to retry it for a few more times.
#' @param fun A function.
#' @param ... Arguments to be passed to the function.
#' @param .times The number of times.
#' @param .pause The number of seconds to wait before the next attempt.
#' @export
#' @examplesIf interactive()
#' # read the Github releases info of the repo yihui/xfun
#' xfun::retry(xfun::github_releases, 'yihui/xfun')
retry = function(fun, ..., .times = 3, .pause = 5) {
for (i in seq_len(.times)) {
if (!inherits(res <- tryCatch(fun(...), error = identity), 'error'))
return(res)
Sys.sleep(.pause)
}
stop(res$message, call. = FALSE)
}
gsubi = function(...) gsub(..., ignore.case = TRUE)
gsubf = function(...) gsub(..., fixed = TRUE)
#' Turn the output of [str()] into a tree diagram
#'
#' The super useful function `str()` uses \verb{..} to indicate the level
#' of sub-elements of an object, which may be difficult to read. This function
#' uses vertical pipes to connect all sub-elements on the same level, so it is
#' clearer which elements belong to the same parent element in an object with a
#' nested structure (such as a nested list).
#' @param ... Arguments to be passed to [str()] (note that the
#' `comp.str` is hardcoded inside this function, and it is the only
#' argument that you cannot customize).
#' @return A character string as a [raw_string()].
#' @export
#' @examples fit = lsfit(1:9, 1:9)
#' str(fit)
#' xfun::tree(fit)
#'
#' fit = lm(dist ~ speed, data = cars)
#' str(fit)
#' xfun::tree(fit)
#'
#' # some trivial examples
#' xfun::tree(1:10)
#' xfun::tree(iris)
tree = function(...) {
x = capture.output(str(..., comp.str = '$ '))
r = '^([^$-]+[$-] )(.*)$'
x1 = gsub(r, '\\1', x)
x2 = gsub(r, '\\2', x)
x1 = gsub('[.][.]', ' ', x1)
x1 = gsub('[$] $', '|-', x1)
x1 = connect_pipes(x1)
x3 = paste(x1, x2, sep = '')
i = !grepl(r, x)
x3[i] = x[i]
raw_string(x3)
}
# for a tree diagram, connect the pipes on the same level, e.g., change
# |- ..
# |- ..
#
# |- ..
# to
# |- ..
# |- ..
# |
# |- ..
# this task is not complicated, but just boring nested for-loops
connect_pipes = function(x) {
ns = nchar(x); n = max(ns); m = length(x)
if (n < 2 || m < 3) return(x)
A = matrix('', nrow = m, ncol = n)
x = strsplit(x, '')
for (i in seq_len(m)) {
A[i, seq_len(ns[i])] = x[[i]]
}
k = NULL
for (j in seq_len(n - 1)) {
for (i in seq_len(m - 2)) {
if (!all(A[i, j + 0:1] == c('|', '-'))) next
for (l in (i + 1):m) {
cells = A[l, j + 0:1]
if (all(cells == ' ')) {
if (l == m) {
k = NULL; break
} else k = c(k, l)
} else if (all(cells == c('|', '-'))) {
break
} else {
k = NULL; break
}
}
if (length(k) > 0) A[k, j] = '|'
k = NULL
}
}
apply(A, 1, paste, collapse = '')
}
pkg_file = function(...) system.file(..., package = 'xfun', mustWork = TRUE)
#' Format numbers of bytes using a specified unit
#'
#' Call the S3 method `format.object_size()` to format numbers of bytes.
#' @param x A numeric vector (each element represents a number of bytes).
#' @param units,... Passed to [`format()`][format.object_size].
#' @return A character vector.
#' @export
#' @examples
#' xfun::format_bytes(c(1, 1024, 2000, 1e6, 2e8))
#' xfun::format_bytes(c(1, 1024, 2000, 1e6, 2e8), units = 'KB')
format_bytes = function(x, units = 'auto', ...) {
vapply(x, function(b) {
format(structure(b, class = 'object_size'), units = units, ...)
}, character(1))
}
# get the function name of the parent call
func_name = function(which = 1) {
x = sys.call(which)[[1]]
deparse(x)[1]
}
# evaluate an expression with an error handler; originally this was for knitr to
# output error location but can also be useful for other applications
handle_error = function(
expr, handler, label = '', fun = getOption('xfun.handle_error.loc_fun')
) {
withCallingHandlers(expr, error = function(e) {
loc = if (is.function(fun)) trimws(fun(label)) else ''
if (loc != '') loc = sprintf(' at lines %s', loc)
message(one_string(handler(e, loc)))
})
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/utils.R |
#' Read YAML data
#'
#' If the \pkg{yaml} package is installed, use [yaml::yaml.load()] to read the
#' data. If not, use a simple parser instead, which only supports a limited
#' number of data types (see \dQuote{Examples}). In particular, it does not
#' support values that span across multiple lines (such as multi-line text).
#' @param x A character vector of YAML data.
#' @param ...,handlers Arguments to be passed to [yaml::yaml.load()].
#' @param envir The environment in which R expressions in YAML are evaluated. To
#' disable the evaluation, use `envir = FALSE`.
#' @param use_yaml Whether to use the \pkg{yaml} package.
#' @return An R object (typically a list).
#' @note R expressions in YAML will be returned as [expression]s when they are
#' not evaluated. This is different with [yaml::yaml.load()], which returns
#' character strings for expressions.
#' @export
#' @examples
#' # test the simple parser without using the yaml package
#' read_yaml = function(...) xfun::yaml_load(..., use_yaml = FALSE)
#' read_yaml('a: 1')
#' read_yaml('a: 1\nb: "foo"\nc: null')
#' read_yaml('a:\n b: false\n c: true\n d: 1.234\ne: bar')
#' read_yaml('a: !expr paste(1:10, collapse = ", ")')
#' read_yaml('a: [1, 3, 4, 2]')
#' read_yaml('a: [1, "abc", 4, 2]')
#' read_yaml('a: ["foo", "bar"]')
#' read_yaml('a: [true, false, true]')
#' # the other form of array is not supported
#' read_yaml('a:\n - b\n - c')
#' # and you must use the yaml package
#' if (loadable('yaml')) yaml_load('a:\n - b\n - c')
yaml_load = function(
x, ..., handlers = NULL, envir = parent.frame(), use_yaml = loadable('yaml')
) {
if (use_yaml) return(handle_error(
yaml::yaml.load(x, eval.expr = FALSE, handlers = yaml_handlers(handlers, envir), ...),
function(e, loc) {
s = e$message
r = 'line (\\d+), column (\\d+)'
m = regmatches(s, regexec(r, s, perl = TRUE))[[1]]
if (length(m) < 3) return()
m = as.integer(m[-1]) # c(row, col)
c(
sprintf('Failed to parse YAML%s:', loc), '',
append(x, paste0(strrep(' ', m[2]), '^~~~~~'), m[1]), ''
)
}
))
# the below simple parser is quite limited
res = list()
r = '^( *)([^ ]+?):($|\\s+.*)'
x = split_lines(x)
x = x[grep(r, x)]
x = x[grep('^\\s*#', x, invert = TRUE)] # comments
if (length(x) == 0) return(res)
lvl = gsub(r, '\\1', x) # indentation level
key = gsub(r, '\\2', x)
val = gsub('^\\s*|\\s*$', '', gsub(r, '\\3', x))
keys = NULL
for (i in seq_along(x)) {
keys = c(head(keys, nchar(lvl[i])/2), key[i])
v = if (is_blank(val[i])) list() else yaml_value(val[i], envir)
# special treatment of NULL (to preserve a key with a null value)
if (is.null(v)) {
if (length(keys) <= 1) res[keys] = list(v) else {
res[[head(keys, -1)]][tail(keys, 1)] = list(v)
}
} else res[[keys]] = v
}
res
}
# only support logical, numeric, character values (both scalar and [] arrays),
# and R expressions starting with !r/!expr
yaml_value = function(x, envir = parent.frame()) {
v = tolower(x)
if (v == 'null') return()
if (grepl('^true|false$', v)) return(as.logical(x))
if (grepl(r <- '^\\s*\\[(.*)\\]\\s*$', v)) {
v = gsub(r, '\\1', v)
if (is_blank(v)) return(list())
v = unname(unlist(read.csv(text = v, header = FALSE)))
if (is.numeric(v)) return(v)
v = gsub('^ ', '', v) # [a, b] -> ["a", " b"] -> ["a", "b"]
return(if (all(grepl('^true|false$', v))) as.logical(v) else v)
}
if (grepl('^[0-9.e+-]', v)) {
v = suppressWarnings(as.numeric(v))
if (!is.na(v)) return(if ((v2 <- as.integer(v)) == v) v2 else v)
}
x = gsub('^["\']|["\']$', '', x) # remove optional quotes for strings
if (grepl(r <- '^!(r|expr) (.+)$', x)) {
x = yaml_expr(gsub(r, '\\2', x), envir)
}
x
}
yaml_expr = function(x, envir) {
x = parse_only(x)
if (is.environment(envir)) x = eval(x, envir)
x
}
# add !r and !expr handlers to support a custom eval environment, which
# yaml::yaml.load() doesn't support by default (vubiostat/r-yaml#54)
yaml_handlers = function(h, envir) {
h = as.list(h)
f = function(x) yaml_expr(x, envir)
for (i in c('r', 'expr')) if (is.null(h[[i]])) h[[i]] = f
h
}
#' Partition the YAML metadata and the body in a document
#'
#' Split a document into the YAML metadata (which starts with `---` in the
#' beginning of the document) and the body. The YAML metadata will be parsed.
#' @param x A character vector of the document content.
#' @param ... Arguments to be passed to `yaml_load()`.
#' @export
#' @return A list of components `yaml` and `body`.
#' @examples
#' xfun::yaml_body(c('---', 'title: Hello', 'output: markdown::html_document', '---', '', 'Content.'))
yaml_body = function(x, ...) {
i = grep('^---\\s*$', x)
n = length(x)
res = if (n < 2 || length(i) < 2 || (i[1] > 1 && !all(is_blank(x[seq(i[1] - 1)])))) {
list(yaml = list(), body = x)
} else list(
yaml = x[i[1]:i[2]], body = c(rep('', i[2]), tail(x, n - i[2]))
)
if ((n <- length(res$yaml)) >= 3) {
res$yaml = yaml_load(res$yaml[-c(1, n)], ...)
}
res
}
| /scratch/gouwar.j/cran-all/cranData/xfun/R/yaml.R |
## ----setup, include=FALSE-----------------------------------------------------
library(xfun)
## -----------------------------------------------------------------------------
library(xfun)
(z = strict_list(aaa = "I am aaa", b = 1:5))
z$a # NULL (strict matching)
z$aaa # I am aaa
z$b
z$c = "you can create a new element"
z2 = unclass(z) # a normal list
z2$a # partial matching
z3 = as_strict_list(z2) # a strict list again
z3$a # NULL (strict matching) again!
## ----comment=''---------------------------------------------------------------
library(xfun)
raw_string(head(LETTERS))
(x = c("a \"b\"", "hello\tworld!"))
raw_string(x) # this is more likely to be what you want to see
## ----comment=''---------------------------------------------------------------
f = system.file("LICENSE", package = "xfun")
xfun::file_string(f)
as.character(xfun::file_string(f)) # essentially a character string
## -----------------------------------------------------------------------------
f = system.file("LICENSE", package = "xfun")
xfun::base64_uri(f)
## -----------------------------------------------------------------------------
xfun::grep_sub('a([b]+)c', 'a\\U\\1c', c('abc', 'abbbc', 'addc', '123'), perl = TRUE)
## ----comment=''---------------------------------------------------------------
library(xfun)
f = tempfile()
writeLines(c("hello", "world"), f)
gsub_file(f, "world", "woRld", fixed = TRUE)
file_string(f)
## ----comment=''---------------------------------------------------------------
process_file(f, function(x) {
rep(x, 3) # repeat the content 3 times
})
file_string(f)
## -----------------------------------------------------------------------------
library(xfun)
p = c("abc.doc", "def123.tex", "path/to/foo.Rmd")
file_ext(p)
sans_ext(p)
with_ext(p, ".txt")
with_ext(p, c(".ppt", ".sty", ".Rnw"))
with_ext(p, "html")
## -----------------------------------------------------------------------------
xfun::is_macos()
xfun::is_unix()
xfun::is_linux()
xfun::is_windows()
## ----eval=FALSE---------------------------------------------------------------
# library(testit)
# library(parallel)
# library(tinytex)
# library(mime)
## ----eval=FALSE---------------------------------------------------------------
# xfun::pkg_attach(c('testit', 'parallel', 'tinytex', 'mime'))
## ----eval=FALSE---------------------------------------------------------------
# if (!requireNamespace('tinytex')) install.packages('tinytex')
# library(tinytex)
## ----eval=FALSE---------------------------------------------------------------
# xfun::pkg_attach2('tinytex')
## -----------------------------------------------------------------------------
n2w(0, cap = TRUE)
n2w(seq(0, 121, 11), and = TRUE)
n2w(1e+06)
n2w(1e+11 + 12345678)
n2w(-987654321)
n2w(1e+15 - 1)
## ----eval=FALSE---------------------------------------------------------------
# res = xfun::cache_rds({
# # pretend the computing here is a time-consuming
# Sys.sleep(2)
# 1:10
# })
## -----------------------------------------------------------------------------
xfun::session_info(c('xfun', 'rmarkdown', 'knitr', 'tinytex'), dependencies = FALSE)
| /scratch/gouwar.j/cran-all/cranData/xfun/inst/doc/xfun.R |
---
title: An Introduction to xfun
subtitle: A Collection of Miscellaneous Functions
author: "Yihui Xie"
date: "`r Sys.Date()`"
slug: xfun
githubEditURL: https://github.com/yihui/xfun/edit/main/vignettes/xfun.Rmd
output:
markdown::html_format:
meta:
css: ["@default", "@prism-xcode"]
options:
toc: true
---
<!--
%\VignetteIndexEntry{An Introduction to xfun}
%\VignetteEngine{knitr::knitr}
-->
```{r setup, include=FALSE}
library(xfun)
```
After writing about 20 R packages, I found I had accumulated several utility functions that I used across different packages, so I decided to extract them into a separate package. Previously I had been using the evil triple-colon `:::` to access these internal utility functions. Now with **xfun**, these functions have been exported, and more importantly, documented. It should be better to use them under the sun instead of in the dark.
This page shows examples of a subset of functions in this package. For a full list of functions, see the help page `help(package = 'xfun')`. The source package is available on Github: https://github.com/yihui/xfun.
## No more partial matching for lists!
I have been bitten many times by partial matching in lists, e.g., when I want `x$a` but the element `a` does not exist in the list `x`, it returns the value `x$abc` if `abc` exists in `x`. A strict list is a list for which the partial matching of the `$` operator is disabled. The functions `xfun::strict_list()` and `xfun::as_strict_list()` are the equivalents to `base::list()` and `base::as.list()` respectively which always return as strict list, e.g.,
```{r}
library(xfun)
(z = strict_list(aaa = "I am aaa", b = 1:5))
z$a # NULL (strict matching)
z$aaa # I am aaa
z$b
z$c = "you can create a new element"
z2 = unclass(z) # a normal list
z2$a # partial matching
z3 = as_strict_list(z2) # a strict list again
z3$a # NULL (strict matching) again!
```
Similarly, the default partial matching in `attr()` can be annoying, too. The function `xfun::attr()` is simply a shorthand of `attr(..., exact = TRUE)`.
I want it, or I do not want. There is no "I probably want".
## Output character vectors for human eyes
When R prints a character vector, your eyes may be distracted by the indices like `[1]`, double quotes, and escape sequences. To see a character vector in its "raw" form, you can use `cat(..., sep = '\n')`. The function `raw_string()` marks a character vector as "raw", and the corresponding printing function will call `cat(sep = '\n')` to print the character vector to the console.
```{r comment=''}
library(xfun)
raw_string(head(LETTERS))
(x = c("a \"b\"", "hello\tworld!"))
raw_string(x) # this is more likely to be what you want to see
```
## Print the content of a text file
I have used `paste(readLines('foo'), collapse = '\n')` many times before I decided to write a simple wrapper function `xfun::file_string()`. This function also makes use of `raw_string()`, so you can see the content of a file in the console as a side-effect, e.g.,
```{r comment=''}
f = system.file("LICENSE", package = "xfun")
xfun::file_string(f)
as.character(xfun::file_string(f)) # essentially a character string
```
## Get the data URI of a file
Files can be encoded into base64 strings via `base64_uri()`. This is a common technique to embed arbitrary files in HTML documents (which is [what `xfun::embed_file()` does](https://bookdown.org/yihui/rmarkdown-cookbook/embed-file.html) and it is based on `base64_uri()`).
```{r}
f = system.file("LICENSE", package = "xfun")
xfun::base64_uri(f)
```
## Match strings and do substitutions
After typing the code `x = grep(pattern, x, value = TRUE); gsub(pattern, '\\1', x)` many times, I combined them into a single function `xfun::grep_sub()`.
```{r}
xfun::grep_sub('a([b]+)c', 'a\\U\\1c', c('abc', 'abbbc', 'addc', '123'), perl = TRUE)
```
## Search and replace strings in files
I can never remember how to properly use `grep` or `sed` to search and replace strings in multiple files. My favorite IDE, RStudio, has not provided this feature yet (you can only search and replace in the currently opened file). Therefore I did a quick and dirty implementation in R, including functions `gsub_files()`, `gsub_dir()`, and `gsub_ext()`, to search and replace strings in multiple files under a directory. Note that the files are assumed to be encoded in UTF-8. If you do not use UTF-8, we cannot be friends. Seriously.
All functions are based on `gsub_file()`, which performs searching and replacing in a single file, e.g.,
```{r comment=''}
library(xfun)
f = tempfile()
writeLines(c("hello", "world"), f)
gsub_file(f, "world", "woRld", fixed = TRUE)
file_string(f)
```
The function `gsub_dir()` is very flexible: you can limit the list of files by MIME types, or extensions. For example, if you want to do substitution in text files, you may use `gsub_dir(..., mimetype = '^text/')`.
The function `process_file()` is a more general way to process files. Basically it reads a file, process the content with a function that you pass to it, and writes back the text, e.g.,
```{r, comment=''}
process_file(f, function(x) {
rep(x, 3) # repeat the content 3 times
})
file_string(f)
```
**WARNING**: Before using these functions, make sure that you have backed up your files, or version control your files. The files will be modified in-place. If you do not back up or use version control, there is no chance to regret.
## Manipulate filename extensions
Functions `file_ext()` and `sans_ext()` are based on functions in **tools**. The function `with_ext()` adds or replaces extensions of filenames, and it is vectorized.
```{r}
library(xfun)
p = c("abc.doc", "def123.tex", "path/to/foo.Rmd")
file_ext(p)
sans_ext(p)
with_ext(p, ".txt")
with_ext(p, c(".ppt", ".sty", ".Rnw"))
with_ext(p, "html")
```
## Find files (in a project) without the pain of thinking about absolute/relative paths
The function `proj_root()` was inspired by the **rprojroot** package, and tries to find the root directory of a project. Currently it only supports R package projects and RStudio projects by default. It is much less sophisticated than **rprojroot**.
The function `from_root()` was inspired by `here::here()`, but returns a relative path (relative to the project's root directory found by `proj_root()`) instead of an absolute path. For example, `xfun::from_root('data', 'cars.csv')` in a code chunk of `docs/foo.Rmd` will return `../data/cars.csv` when `docs/` and `data/` directories are under the root directory of a project.
```
root/
|-- data/
| |-- cars.csv
|
|-- docs/
|-- foo.Rmd
```
If file paths are too much pain for you to think about, you can just pass an incomplete path to the function `magic_path()`, and it will try to find the actual path recursively under subdirectories of a root directory. For example, you may only provide a base filename, and `magic_path()` will look for this file under subdirectories and return the actual path if it is found. By default, it returns a relative path, which is relative to the current working directory. With the above example, `xfun::magic_path('cars.csv')` in a code chunk of `docs/foo.Rmd` will return `../data/cars.csv`, if `cars.csv` is a unique filename in the project. You can freely move it to any folders of this project, and `magic_path()` will still find it. If you are not using a project to manage files, `magic_path()` will look for the file under subdirectories of the current working directory.
## Types of operating systems
The series of functions `is_linux()`, `is_macos()`, `is_unix()`, and `is_windows()` test the types of the OS, using the information from `.Platform` and `Sys.info()`, e.g.,
```{r}
xfun::is_macos()
xfun::is_unix()
xfun::is_linux()
xfun::is_windows()
```
## Loading and attaching packages
Oftentimes I see users attach a series of packages in the beginning of their scripts by repeating `library()` multiple times. This could be easily vectorized, and the function `xfun::pkg_attach()` does this job. For example,
```{r eval=FALSE}
library(testit)
library(parallel)
library(tinytex)
library(mime)
```
is equivalent to
```{r eval=FALSE}
xfun::pkg_attach(c('testit', 'parallel', 'tinytex', 'mime'))
```
I also see scripts that contain code to install a package if it is not available, e.g.,
```{r eval=FALSE}
if (!requireNamespace('tinytex')) install.packages('tinytex')
library(tinytex)
```
This could be done via
```{r eval=FALSE}
xfun::pkg_attach2('tinytex')
```
The function `pkg_attach2()` is a shorthand of `pkg_attach(..., install = TRUE)`, which means if a package is not available, install it. This function can also deal with multiple packages.
The function `loadable()` tests if a package is loadable.
## Read/write files in UTF-8
Functions `read_utf8()` and `write_utf8()` can be used to read/write files in UTF-8. They are simple wrappers of `readLines()` and `writeLines()`.
## Convert numbers to English words
The function `numbers_to_words()` (or `n2w()` for short) converts numbers to English words.
```{r}
n2w(0, cap = TRUE)
n2w(seq(0, 121, 11), and = TRUE)
n2w(1e+06)
n2w(1e+11 + 12345678)
n2w(-987654321)
n2w(1e+15 - 1)
```
## Cache an R expression to an RDS file
The function `cache_rds()` provides a simple caching mechanism: the first time an expression is passed to it, it saves the result to an RDS file; the next time it will read the RDS file and return the value instead of evaluating the expression again. If you want to invalidate the cache, you can use the argument `rerun = TRUE`.
```{r, eval=FALSE}
res = xfun::cache_rds({
# pretend the computing here is a time-consuming
Sys.sleep(2)
1:10
})
```
When the function is used in a code chunk in a **knitr** document, the RDS cache file is saved to a path determined by the chunk label (the base filename) and the chunk option `cache.path` (the cache directory), so you do not have to provide the `file` and `dir` arguments of `cache_rds()`.
This caching mechanism is much simpler than **knitr**'s caching. Cache invalidation is often tricky (see [this post](https://yihui.org/en/2018/06/cache-invalidation/)), so this function may be helpful if you want more transparency and control over when to invalidate the cache (for `cache_rds()`, the cache is invalidated when the cache file is deleted, which can be achieved via the argument `rerun = TRUE`).
As documented on the help page of `cache_rds()`, there are two common cases in which you may want to invalidate the cache:
1. The code in the expression has changed, e.g., if you changed the code from `cache_rds({x + 1})` to `cache_rds({x + 2})`, the cache will be automatically invalidated and the expression will be re-evaluated. However, please note that changes in white spaces or comments do not matter. Or generally speaking, as long as the change does not affect the parsed expression, the cache will not be invalidated, e.g., the two expressions below are essentially identical (hence if you have executed `cache_rds()` on the first expression, the second expression will be able to take advantage of the cache):
```r
res = xfun::cache_rds({
Sys.sleep(3 );
x=1:10; # semi-colons won't matter
x+1;
})
res = xfun::cache_rds({
Sys.sleep(3)
x = 1:10 # a comment
x +
1 # feel free to make any changes in white spaces
})
```
1. The value of a global variable in the expression has changed, e.g., if `y` has changed, you are most likely to want to invalidate the cache and rerun the expression below:
```r
res = xfun::cache_rds({
x = 1:10
x + y
})
```
This is because `x` is a local variable in the expression, and `y` is an external global variable (not created locally like `x`). To invalidate the cache when `y` has changed, you may let `cache_rds()` know through the `hash` argument that `y` needs to be considered when deciding if the cache should be invalidated:
```r
res = xfun::cache_rds({
x = 1:10
x + y
}, hash = list(y))
```
If you do not want to provide this list of value(s) to the `hash` argument, you may try `hash = "auto"` instead, which asks `cache_rds()` to try to figure out all global variables automatically and use a list of their values as the value for the `hash` argument.
```r
res = xfun::cache_rds({
x = 1:10
x + y
}, hash = "auto")
```
## Check reverse dependencies of a package
Running `R CMD check` on the reverse dependencies of **knitr** and **rmarkdown** is my least favorite thing in developing R packages, because the numbers of their reverse dependencies are huge. The function `rev_check()` reflects some of my past experience in this process. I think I have automated it as much as possible, and made it as easy as possible to discover possible new problems introduced by the current version of the package (compared to the CRAN version). Finally I can just sit back and let it run.
## Input a character vector into the RStudio source editor
The function `rstudio_type()` inputs characters in the RStudio source editor as if they were typed by a human. I came up with the idea when preparing my talk for rstudio::conf 2018 ([see this post](https://yihui.org/en/2018/03/blogdown-video-rstudio-conf/) for more details).
## Print session information
Since I have never been fully satisfied by the output of `sessionInfo()`, I tweaked it to make it more useful in my use cases. For example, it is rarely useful to print out the names of base R packages, or information about the matrix products / BLAS / LAPACK. Oftentimes I want additional information in the session information, such as the Pandoc version when **rmarkdown** is used. The function `session_info()` tweaks the output of `sessionInfo()`, and makes it possible for other packages to append information in the output of `session_info()`.
You can choose to print out the versions of only the packages you specify, e.g.,
```{r}
xfun::session_info(c('xfun', 'rmarkdown', 'knitr', 'tinytex'), dependencies = FALSE)
```
| /scratch/gouwar.j/cran-all/cranData/xfun/inst/doc/xfun.Rmd |
# This script is executed via the command line `Rscript call-fun.R arg1 arg2`,
# where arg1 is a path to an .rds file, which contains the function and its
# arguments saved as a list, and arg2 is a path to an .rds file to which the
# returned value of the function call is saved.
local({
if (length(a <- commandArgs(TRUE)) != 2)
stop('The number of arguments passed to Rscript should be 2.')
x = readRDS(a[1]) # list(fun, args)
f = x[[1]]
if (is.character(f)) f = eval(parse(text = f), envir = globalenv())
r = do.call(f, x[[2]], envir = globalenv())
saveRDS(r, a[2])
})
| /scratch/gouwar.j/cran-all/cranData/xfun/inst/scripts/call-fun.R |
---
title: An Introduction to xfun
subtitle: A Collection of Miscellaneous Functions
author: "Yihui Xie"
date: "`r Sys.Date()`"
slug: xfun
githubEditURL: https://github.com/yihui/xfun/edit/main/vignettes/xfun.Rmd
output:
markdown::html_format:
meta:
css: ["@default", "@prism-xcode"]
options:
toc: true
---
<!--
%\VignetteIndexEntry{An Introduction to xfun}
%\VignetteEngine{knitr::knitr}
-->
```{r setup, include=FALSE}
library(xfun)
```
After writing about 20 R packages, I found I had accumulated several utility functions that I used across different packages, so I decided to extract them into a separate package. Previously I had been using the evil triple-colon `:::` to access these internal utility functions. Now with **xfun**, these functions have been exported, and more importantly, documented. It should be better to use them under the sun instead of in the dark.
This page shows examples of a subset of functions in this package. For a full list of functions, see the help page `help(package = 'xfun')`. The source package is available on Github: https://github.com/yihui/xfun.
## No more partial matching for lists!
I have been bitten many times by partial matching in lists, e.g., when I want `x$a` but the element `a` does not exist in the list `x`, it returns the value `x$abc` if `abc` exists in `x`. A strict list is a list for which the partial matching of the `$` operator is disabled. The functions `xfun::strict_list()` and `xfun::as_strict_list()` are the equivalents to `base::list()` and `base::as.list()` respectively which always return as strict list, e.g.,
```{r}
library(xfun)
(z = strict_list(aaa = "I am aaa", b = 1:5))
z$a # NULL (strict matching)
z$aaa # I am aaa
z$b
z$c = "you can create a new element"
z2 = unclass(z) # a normal list
z2$a # partial matching
z3 = as_strict_list(z2) # a strict list again
z3$a # NULL (strict matching) again!
```
Similarly, the default partial matching in `attr()` can be annoying, too. The function `xfun::attr()` is simply a shorthand of `attr(..., exact = TRUE)`.
I want it, or I do not want. There is no "I probably want".
## Output character vectors for human eyes
When R prints a character vector, your eyes may be distracted by the indices like `[1]`, double quotes, and escape sequences. To see a character vector in its "raw" form, you can use `cat(..., sep = '\n')`. The function `raw_string()` marks a character vector as "raw", and the corresponding printing function will call `cat(sep = '\n')` to print the character vector to the console.
```{r comment=''}
library(xfun)
raw_string(head(LETTERS))
(x = c("a \"b\"", "hello\tworld!"))
raw_string(x) # this is more likely to be what you want to see
```
## Print the content of a text file
I have used `paste(readLines('foo'), collapse = '\n')` many times before I decided to write a simple wrapper function `xfun::file_string()`. This function also makes use of `raw_string()`, so you can see the content of a file in the console as a side-effect, e.g.,
```{r comment=''}
f = system.file("LICENSE", package = "xfun")
xfun::file_string(f)
as.character(xfun::file_string(f)) # essentially a character string
```
## Get the data URI of a file
Files can be encoded into base64 strings via `base64_uri()`. This is a common technique to embed arbitrary files in HTML documents (which is [what `xfun::embed_file()` does](https://bookdown.org/yihui/rmarkdown-cookbook/embed-file.html) and it is based on `base64_uri()`).
```{r}
f = system.file("LICENSE", package = "xfun")
xfun::base64_uri(f)
```
## Match strings and do substitutions
After typing the code `x = grep(pattern, x, value = TRUE); gsub(pattern, '\\1', x)` many times, I combined them into a single function `xfun::grep_sub()`.
```{r}
xfun::grep_sub('a([b]+)c', 'a\\U\\1c', c('abc', 'abbbc', 'addc', '123'), perl = TRUE)
```
## Search and replace strings in files
I can never remember how to properly use `grep` or `sed` to search and replace strings in multiple files. My favorite IDE, RStudio, has not provided this feature yet (you can only search and replace in the currently opened file). Therefore I did a quick and dirty implementation in R, including functions `gsub_files()`, `gsub_dir()`, and `gsub_ext()`, to search and replace strings in multiple files under a directory. Note that the files are assumed to be encoded in UTF-8. If you do not use UTF-8, we cannot be friends. Seriously.
All functions are based on `gsub_file()`, which performs searching and replacing in a single file, e.g.,
```{r comment=''}
library(xfun)
f = tempfile()
writeLines(c("hello", "world"), f)
gsub_file(f, "world", "woRld", fixed = TRUE)
file_string(f)
```
The function `gsub_dir()` is very flexible: you can limit the list of files by MIME types, or extensions. For example, if you want to do substitution in text files, you may use `gsub_dir(..., mimetype = '^text/')`.
The function `process_file()` is a more general way to process files. Basically it reads a file, process the content with a function that you pass to it, and writes back the text, e.g.,
```{r, comment=''}
process_file(f, function(x) {
rep(x, 3) # repeat the content 3 times
})
file_string(f)
```
**WARNING**: Before using these functions, make sure that you have backed up your files, or version control your files. The files will be modified in-place. If you do not back up or use version control, there is no chance to regret.
## Manipulate filename extensions
Functions `file_ext()` and `sans_ext()` are based on functions in **tools**. The function `with_ext()` adds or replaces extensions of filenames, and it is vectorized.
```{r}
library(xfun)
p = c("abc.doc", "def123.tex", "path/to/foo.Rmd")
file_ext(p)
sans_ext(p)
with_ext(p, ".txt")
with_ext(p, c(".ppt", ".sty", ".Rnw"))
with_ext(p, "html")
```
## Find files (in a project) without the pain of thinking about absolute/relative paths
The function `proj_root()` was inspired by the **rprojroot** package, and tries to find the root directory of a project. Currently it only supports R package projects and RStudio projects by default. It is much less sophisticated than **rprojroot**.
The function `from_root()` was inspired by `here::here()`, but returns a relative path (relative to the project's root directory found by `proj_root()`) instead of an absolute path. For example, `xfun::from_root('data', 'cars.csv')` in a code chunk of `docs/foo.Rmd` will return `../data/cars.csv` when `docs/` and `data/` directories are under the root directory of a project.
```
root/
|-- data/
| |-- cars.csv
|
|-- docs/
|-- foo.Rmd
```
If file paths are too much pain for you to think about, you can just pass an incomplete path to the function `magic_path()`, and it will try to find the actual path recursively under subdirectories of a root directory. For example, you may only provide a base filename, and `magic_path()` will look for this file under subdirectories and return the actual path if it is found. By default, it returns a relative path, which is relative to the current working directory. With the above example, `xfun::magic_path('cars.csv')` in a code chunk of `docs/foo.Rmd` will return `../data/cars.csv`, if `cars.csv` is a unique filename in the project. You can freely move it to any folders of this project, and `magic_path()` will still find it. If you are not using a project to manage files, `magic_path()` will look for the file under subdirectories of the current working directory.
## Types of operating systems
The series of functions `is_linux()`, `is_macos()`, `is_unix()`, and `is_windows()` test the types of the OS, using the information from `.Platform` and `Sys.info()`, e.g.,
```{r}
xfun::is_macos()
xfun::is_unix()
xfun::is_linux()
xfun::is_windows()
```
## Loading and attaching packages
Oftentimes I see users attach a series of packages in the beginning of their scripts by repeating `library()` multiple times. This could be easily vectorized, and the function `xfun::pkg_attach()` does this job. For example,
```{r eval=FALSE}
library(testit)
library(parallel)
library(tinytex)
library(mime)
```
is equivalent to
```{r eval=FALSE}
xfun::pkg_attach(c('testit', 'parallel', 'tinytex', 'mime'))
```
I also see scripts that contain code to install a package if it is not available, e.g.,
```{r eval=FALSE}
if (!requireNamespace('tinytex')) install.packages('tinytex')
library(tinytex)
```
This could be done via
```{r eval=FALSE}
xfun::pkg_attach2('tinytex')
```
The function `pkg_attach2()` is a shorthand of `pkg_attach(..., install = TRUE)`, which means if a package is not available, install it. This function can also deal with multiple packages.
The function `loadable()` tests if a package is loadable.
## Read/write files in UTF-8
Functions `read_utf8()` and `write_utf8()` can be used to read/write files in UTF-8. They are simple wrappers of `readLines()` and `writeLines()`.
## Convert numbers to English words
The function `numbers_to_words()` (or `n2w()` for short) converts numbers to English words.
```{r}
n2w(0, cap = TRUE)
n2w(seq(0, 121, 11), and = TRUE)
n2w(1e+06)
n2w(1e+11 + 12345678)
n2w(-987654321)
n2w(1e+15 - 1)
```
## Cache an R expression to an RDS file
The function `cache_rds()` provides a simple caching mechanism: the first time an expression is passed to it, it saves the result to an RDS file; the next time it will read the RDS file and return the value instead of evaluating the expression again. If you want to invalidate the cache, you can use the argument `rerun = TRUE`.
```{r, eval=FALSE}
res = xfun::cache_rds({
# pretend the computing here is a time-consuming
Sys.sleep(2)
1:10
})
```
When the function is used in a code chunk in a **knitr** document, the RDS cache file is saved to a path determined by the chunk label (the base filename) and the chunk option `cache.path` (the cache directory), so you do not have to provide the `file` and `dir` arguments of `cache_rds()`.
This caching mechanism is much simpler than **knitr**'s caching. Cache invalidation is often tricky (see [this post](https://yihui.org/en/2018/06/cache-invalidation/)), so this function may be helpful if you want more transparency and control over when to invalidate the cache (for `cache_rds()`, the cache is invalidated when the cache file is deleted, which can be achieved via the argument `rerun = TRUE`).
As documented on the help page of `cache_rds()`, there are two common cases in which you may want to invalidate the cache:
1. The code in the expression has changed, e.g., if you changed the code from `cache_rds({x + 1})` to `cache_rds({x + 2})`, the cache will be automatically invalidated and the expression will be re-evaluated. However, please note that changes in white spaces or comments do not matter. Or generally speaking, as long as the change does not affect the parsed expression, the cache will not be invalidated, e.g., the two expressions below are essentially identical (hence if you have executed `cache_rds()` on the first expression, the second expression will be able to take advantage of the cache):
```r
res = xfun::cache_rds({
Sys.sleep(3 );
x=1:10; # semi-colons won't matter
x+1;
})
res = xfun::cache_rds({
Sys.sleep(3)
x = 1:10 # a comment
x +
1 # feel free to make any changes in white spaces
})
```
1. The value of a global variable in the expression has changed, e.g., if `y` has changed, you are most likely to want to invalidate the cache and rerun the expression below:
```r
res = xfun::cache_rds({
x = 1:10
x + y
})
```
This is because `x` is a local variable in the expression, and `y` is an external global variable (not created locally like `x`). To invalidate the cache when `y` has changed, you may let `cache_rds()` know through the `hash` argument that `y` needs to be considered when deciding if the cache should be invalidated:
```r
res = xfun::cache_rds({
x = 1:10
x + y
}, hash = list(y))
```
If you do not want to provide this list of value(s) to the `hash` argument, you may try `hash = "auto"` instead, which asks `cache_rds()` to try to figure out all global variables automatically and use a list of their values as the value for the `hash` argument.
```r
res = xfun::cache_rds({
x = 1:10
x + y
}, hash = "auto")
```
## Check reverse dependencies of a package
Running `R CMD check` on the reverse dependencies of **knitr** and **rmarkdown** is my least favorite thing in developing R packages, because the numbers of their reverse dependencies are huge. The function `rev_check()` reflects some of my past experience in this process. I think I have automated it as much as possible, and made it as easy as possible to discover possible new problems introduced by the current version of the package (compared to the CRAN version). Finally I can just sit back and let it run.
## Input a character vector into the RStudio source editor
The function `rstudio_type()` inputs characters in the RStudio source editor as if they were typed by a human. I came up with the idea when preparing my talk for rstudio::conf 2018 ([see this post](https://yihui.org/en/2018/03/blogdown-video-rstudio-conf/) for more details).
## Print session information
Since I have never been fully satisfied by the output of `sessionInfo()`, I tweaked it to make it more useful in my use cases. For example, it is rarely useful to print out the names of base R packages, or information about the matrix products / BLAS / LAPACK. Oftentimes I want additional information in the session information, such as the Pandoc version when **rmarkdown** is used. The function `session_info()` tweaks the output of `sessionInfo()`, and makes it possible for other packages to append information in the output of `session_info()`.
You can choose to print out the versions of only the packages you specify, e.g.,
```{r}
xfun::session_info(c('xfun', 'rmarkdown', 'knitr', 'tinytex'), dependencies = FALSE)
```
| /scratch/gouwar.j/cran-all/cranData/xfun/vignettes/xfun.Rmd |
#' Transform XGBoost model object to SQL query.
#'
#' This function generates SQL query for in-database scoring of XGBoost models,
#' providing a robust and efficient way of model deployment. It takes in the trained XGBoost model \code{xgbModel},
#' name of the input database table \code{input_table_name},
#' and name of a unique identifier within that table \code{unique_id} as input,
#' writes the SQL query to a file specified by \code{output_file_name}.
#' Note that the input database table should be generated from the raw table using the one-hot encoding query output by \code{onehot2sql()},
#' or to provide the one-hot encoding query as input \code{input_onehot_query} to this function, working as sub-query inside the final model scoring query.
#'
#' @param xgbModel The trained model object of class \code{xgb.Booster}.
#' Current supported booster is \code{booster="gbtree"}, supported \code{objective} options are:
#' \itemize{
#' \item – \code{reg:linear}: linear regression.
#' \item – \code{reg:logistic}: logistic regression.
#' \item - \code{binary:logistic}: logistic regression for binary classification, output probability.
#' \item – \code{binary:logitraw}: logistic regression for binary classification, output score before logistic transformation.
#' \item - \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
#' \item - \code{count:poisson}: poisson regression for count data, output mean of poisson distribution.
#' \item - \code{reg:gamma}: gamma regression with log-link, output mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be gamma-distributed.
#' \item - \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be Tweedie-distributed.
#' }
#' @param print_progress Boolean indicator controls whether the SQL generating progress should be printed to console.
#' @param unique_id A row unique identifier is crucial for in-database scoring of XGBoost model. If not given, SQL query will be generated with id name "ROW_KEY".
#' @param output_file_name File name that the SQL syntax will write to. It must not be empty in order for this function to run.
#' @param input_table_name Name of raw data table in the database, that the SQL query will select from. If not given, SQL query will be generated with table name "MODREADY_TABLE".
#' @param input_onehot_query SQL query of one-hot encoding generated by \code{onehot2sql}. When \code{input_table_name} is empty while \code{input_onehot_query} is not, the final output query will include \code{input_onehot_query} as sub-query.
#' @return The SQL query will write to the file specified by \code{output_file_name}.
#'
#' @export
#'
#' @examples
#' library(xgboost)
#' # load data
#' df = data.frame(ggplot2::diamonds)
#' head(df)
#'
#' # data processing
#' out <- onehot2sql(df)
#' x <- out$model.matrix[,colnames(out$model.matrix)!='price']
#' y <- out$model.matrix[,colnames(out$model.matrix)=='price']
#'
#' # model training
#' bst <- xgboost(data = x,
#' label = y,
#' max.depth = 3,
#' eta = .3,
#' nround = 5,
#' nthread = 1,
#' objective = 'reg:linear')
#'
#' # generate model scoring SQL script with ROW_KEY and MODREADY_TABLE
#' booster2sql(bst, output_file_name='xgb.txt')
booster2sql <- function(xgbModel, print_progress=FALSE, unique_id=NULL,
output_file_name=NULL, input_table_name=NULL, input_onehot_query=NULL) {
###### initial setup ######
xgb_dump <- xgboost::xgb.dump(xgbModel)
first_letter <- substring(xgb_dump,1,1)
all_tree_index <- which(first_letter=="b")
if (is.null(unique_id)) {
unique_id <- "ROW_KEY"
message("query is written to file with row unique id named as ROW_KEY")
}
if (is.null(output_file_name)) {
stop("output file not specified")
}
if (is.null(input_table_name) & is.null(input_onehot_query)) {
input_table_name <- "MODREADY_TABLE"
message("query is written to file with input table named as MODREADY_TABLE")
} else if (is.null(input_table_name) & !is.null(input_onehot_query)) {
input_table_name <- paste0("( \n",input_onehot_query," \n) AS MODREADY_TABLE ")
}
###### recurse fun ######
fun_recurse_tree <- function(g,local_dump,dump_index,branch_index){
if (grepl("leaf",local_dump[dump_index])==TRUE) {
cat(sub(".*leaf= *(.*?)", "\\1", local_dump[dump_index]))
} else {
cur_var_name <- g$feature_names[as.numeric(regmatches(local_dump[dump_index],regexec("f(.*?)[<]",local_dump[dump_index]))[[1]][2])+1]
cur_var_val <- as.numeric(regmatches(local_dump[dump_index],regexec("[<](.*?)[]]",local_dump[dump_index]))[[1]][2])
# if YES
left_dump_index <- which(branch_index==
as.numeric(regmatches(local_dump[dump_index],regexec("yes=(.*?)[,]",local_dump[dump_index]))[[1]][2]))
# if NO
right_dump_index <- which(branch_index==
as.numeric(regmatches(local_dump[dump_index],regexec("no=(.*?)[,]",local_dump[dump_index]))[[1]][2]))
# if missing
missing_dump_index <- which(branch_index==
as.numeric(regmatches(local_dump[dump_index],regexec("missing=(.*?)$",local_dump[dump_index]))[[1]][2]))
cat("\n (CASE WHEN", paste0("[",cur_var_name,"] < ",cur_var_val), "THEN ")
cat(fun_recurse_tree(g,local_dump,left_dump_index,branch_index))
cat("\n WHEN ",paste0("[",cur_var_name,"] >= ",cur_var_val), "THEN ")
cat(fun_recurse_tree(g,local_dump,right_dump_index,branch_index))
cat("\n WHEN ",paste0("[",cur_var_name,"] IS NULL"), "THEN ")
cat(fun_recurse_tree(g,local_dump,missing_dump_index,branch_index))
cat(" END)")
}
}
###### generate tree ######
sink(output_file_name, type ="output")
cat("SELECT ", unique_id, ", ")
if(xgbModel$params$objective == "binary:logistic" | xgbModel$params$objective == "reg:logistic" | xgbModel$params$objective == "binary:logitraw"){
p0 <- ifelse(is.null(xgbModel$params$base_score),0.5,xgbModel$params$base_score)
b0 <- log(p0/(1-p0))
if (xgbModel$params$objective == "binary:logitraw") {
cat(b0,"+ SUM(ONETREE) AS XGB_PRED")
} else {
cat("1/(1+exp(-(",b0,"+ SUM(ONETREE)))) AS XGB_PRED")
}
} else if (xgbModel$params$objective == "binary:hinge") {
b0 <- ifelse(is.null(xgbModel$params$base_score),0.5,xgbModel$params$base_score)
cat("IF((",b0,"+ SUM(ONETREE) )>0,1,0) AS XGB_PRED")
} else if(xgbModel$params$objective == "reg:linear"){
b0 <- ifelse(is.null(xgbModel$params$base_score),0.5,xgbModel$params$base_score)
cat(b0,"+ SUM(ONETREE) AS XGB_PRED")
} else if(xgbModel$params$objective == "reg:gamma" | xgbModel$params$objective == "count:poisson" | xgbModel$params$objective == "reg:tweedie"){
mu0 <- ifelse(is.null(xgbModel$params$base_score),0.5,xgbModel$params$base_score)
b0 <- log(mu0)
cat("exp(",b0,"+ SUM(ONETREE)) AS XGB_PRED")
} else {
warning("query is generated with unsupported objective")
}
cat("\nFROM ( ")
for (tree_num in 1:length(all_tree_index)) {
cat(" \n SELECT", unique_id, ",")
tree_begin <- all_tree_index[tree_num]+1
if(is.na(all_tree_index[tree_num+1])){
tree_end <- length(xgb_dump)
} else {
tree_end <- all_tree_index[tree_num+1] - 1
}
all_branch_index <- as.numeric(sub("\\D*(\\d+).*", "\\1", xgb_dump))
branch_index <- all_branch_index[tree_begin:tree_end]
local_dump <- xgb_dump[tree_begin:tree_end]
fun_recurse_tree(xgbModel,local_dump,1,branch_index)
cat(" AS ONETREE")
if(tree_num != length(all_tree_index)){
cat(" FROM ", input_table_name, "\n UNION ALL \n")
}
if (print_progress==TRUE) {
sink()
cat("====== Processing", tree_num, "/", length(all_tree_index), "Tree ======\n")
sink(output_file_name, type ="output", append=TRUE)
}
}
cat(" FROM ",input_table_name," \n) AS TREES_TABLE GROUP BY ",unique_id)
sink()
}
| /scratch/gouwar.j/cran-all/cranData/xgb2sql/R/booster2sql.R |
#' Prepare training data in R so that it is ready for XGBoost model fitting.
#' Meta information is stored so the exact transformation can be applied to any new data.
#' It also outputs SQL query performing the exact one-hot encoding for in-database data preparation.
#'
#' This function performs full one-hot encoding for all the categorical features inside the training data,
#' with all NAs inside both categorical and numeric features preserved.
#' Other than outputting a matrix \code{model.matrix} which is the data after processing,
#' it also outputs \code{meta} information keeping track of all the transformation the function performs,
#' while SQL query for the transformation is kept in output \code{sql} and write to the file specified by \code{output_file_name}.
#' If \code{meta} is specified as input to the function, the transformation and the corresponding SQL query will
#' follow what is kept in \code{meta} exactly.
#'
#' @param data Data object of class \code{data.frame} or \code{data.table}.
#' @param meta Optional, a list keeps track of all the transformation that has been taken on the categorical features.
#' @param sep Separation symbol between the categorical features and their levels, which will be the column names inside the output \code{model.matrix}, default to "_".
#' @param ws_replace Boolean indicator controls whether white-space and punctuation inside categorical feature levels should be replaced, default to TRUE.
#' @param ws_replace_with Replacing symbol, default to '' which means all white-space and punctuation should be removed.
#' @param unique_id A row unique identifier is crucial for in-database scoring of XGBoost model. If not given, SQL query will be generated with id name "ROW_KEY".
#' @param output_file_name Optional, a file name that the SQL query will write to.
#' @param input_table_name Name of raw data table in the database, that the SQL query will select from. If not given, SQL query will be generated with table name "INPUT_TABLE".
#' @return A list of 1). \code{meta} data tracking the transformation;
#' 2). matrix \code{model.matrix} is the data after processing which is ready for XGBoost fitting;
#' 3). SQL query \code{sql} performing the exact one-hot encoding in the database.
#'
#' @import data.table
#' @importFrom stats contrasts
#' @importFrom stats model.frame
#' @importFrom stats model.matrix
#' @importFrom stats na.pass
#' @export
#'
#' @examples
#' library(data.table)
#' ### load test data
#' df = data.frame(ggplot2::diamonds)
#' head(df)
#'
#' d1 = data.frame(ggplot2::diamonds)
#' d1[1,2] = NA # NA on 1st row cut
#' d1[2,5] = NA # NA on 2nd row depth
#' head(d1)
#'
#' d2 = data.table(ggplot2::diamonds)
#' d2[, cut:=factor(cut, ordered=FALSE)]
#' d2[, clarity:=as.character(clarity)]
#' d2[, tsdt:=as.IDate('2017-01-05')]
#' d2[1:3, tsdt:=tsdt-1]
#' head(d2)
#'
#' ### out is obtained for training data
#' out <- onehot2sql(df)
#' out1 <- onehot2sql(d1) # NA is kept in the output
#' out2 <- onehot2sql(d2) # all non-numeric features will be treated as categorical
#'
#' ### perform same transformation for new data when meta is given
#' # test-1: new data has column class change
#' newdata = df[1:5,]
#' newdata$cut = as.character(newdata$cut)
#' onehot2sql(newdata, meta=out$meta)$model.matrix
#'
#' # test-2: new data has NA
#' newdata = df[1:5,]
#' newdata[1,1]=NA; newdata[2,1]=NA; newdata[3,2]=NA; newdata[3,3]=NA; newdata[5,4]=NA
#' onehot2sql(newdata, meta=out$meta)$model.matrix
#'
#' # test-3: newdata has column with new elements
#' newdata = d2[1:5,]
#' newdata[5,clarity:='NEW']; newdata[1,tsdt:=as.IDate('2017-05-01')]
#' onehot2sql(newdata, meta=out2$meta)$model.matrix
#'
#' # test-4: newdata has new columns
#' newdata = d2[1:5,]
#' newdata[,new_col:=1]
#' onehot2sql(newdata, meta=out2$meta)$model.matrix
#'
#' # test-5: newdata is lacking some columns
#' newdata = d2[1:5,]
#' newdata[,cut:=NULL]
#' onehot2sql(newdata, meta=out2$meta)$model.matrix
onehot2sql <- function(data, meta=NULL, sep="_", ws_replace=TRUE, ws_replace_with="",
unique_id=NULL, output_file_name=NULL, input_table_name=NULL) {
### initial setup ###
if (is.null(unique_id)) {
unique_id <- "ROW_KEY"
if (!is.null(output_file_name)) {
message("query is written to file with row unique id named as ROW_KEY")
}
}
if (is.null(input_table_name)) {
input_table_name <- "INPUT_TABLE"
if (!is.null(output_file_name)) {
message("query is written to file with input table named as INPUT_TABLE")
}
}
### compare with input meta if given ###
if (!is.null(meta[['num.vec']]) | !is.null(meta[['catg.vec']])) {
varnow.vec <- names(data)
varinp.vec <- c(meta[['num.vec']],meta[['catg.vec']])
var1.vec <- varnow.vec[!varnow.vec%in%varinp.vec]
var2.vec <- varinp.vec[!varinp.vec%in%varnow.vec]
# new colmun in current data
if (length(var1.vec)>0) {
if (class(data)[1]=='data.table') {
data[, (var1.vec):=NULL]
} else {
data[,var1.vec] <- NULL
}
}
# current data is lacking column
if (length(var2.vec)>0) {
if (class(data)[1]=='data.table') {
data[, (var2.vec):=NA]
} else {
data[,var2.vec] <- NA
}
warning(paste('Following columns are populated with NAs: ',
paste(var2.vec,collapse=', '), sep='\n'))
}
}
### prepare meta info ###
class.lst <- lapply(data, class)
#class.vec <- sapply(class.lst, function(x) paste(x,collapse=' '))
num.vec <- names(class.lst)[class.lst%in%c('numeric','integer')]
catg.vec <- names(class.lst)[!class.lst%in%c('numeric','integer')]
catg.index <- which(names(data)%in%catg.vec)
factor.index <- which(unname(sapply(class.lst, function(x) 'factor'%in%x)))
### add sep for catg var ###
if (!is.null(sep)) {
names(data)[names(data)%in%catg.vec] <- paste0(names(data)[names(data)%in%catg.vec], sep)
}
### if contrasts not given: change to factor & generate contrasts ###
if (is.null(meta[['contrasts']])) {
# col index to be turned into factor
changeclass.index <- catg.index[!catg.index%in%factor.index]
if (class(data)[1]=='data.table') {
if (length(changeclass.index)>0) {
data[, (changeclass.index):=lapply(.SD,as.factor), .SDcols=changeclass.index]
}
contra.lst <- lapply(data[,catg.index,with=FALSE], contrasts, contrasts=FALSE)
} else {
if (length(changeclass.index)>0) {
data[,changeclass.index] <- lapply(data[,changeclass.index], as.factor)
}
contra.lst <- lapply(data[,catg.index], contrasts, contrasts=FALSE)
}
### if contrasts given: change to factor with forced levels ###
} else {
contra.lst <- meta[['contrasts']]
if (class(data)[1]=='data.table') {
x <- data[, catg.index, with=FALSE]
data[, (catg.index):=lapply(seq_along(.SD),function(i)
factor(.SD[[i]],levels=rownames(contra.lst[[names(.SD)[[i]]]]))), .SDcols=catg.index]
} else {
x <- data[, catg.index]
data[,catg.index] <- lapply(seq_along(x), function(i)
factor(x[[i]],levels=rownames(contra.lst[[names(x)[[i]]]])))
}
# catg feature with new level
notin.list <- lapply(
seq_along(x), function(i)
as.character(unique(x[[i]]))[!as.character(unique(x[[i]]))%in%rownames(contra.lst[[names(x)[i]]])])
notin.list <- lapply(notin.list, function(x) x[!is.na(x)])
names(notin.list) <- paste0(catg.vec, sep)
notin.vec <- sapply(notin.list, length)
notin.vec <- notin.vec[notin.vec>0]
}
### generate one hot sql ###
# catg.lvec: nlevel for each catg col
catg.lvec <- sapply(contra.lst, nrow)
names(catg.lvec) <- substr(names(catg.lvec),1,nchar(names(catg.lvec))-nchar(sep))
# wsmove.lst: list of var-lvl combination pre-pos ws process
wsmove.lst <- list(prelvl=NULL, poslvl=NULL)
# sql.df: generate one hot sql script
sql.df <- data.frame(matrix(1, ncol=10, nrow=sum(catg.lvec)))
sql.df[['X1']] <- "(case when ["
sql.df[['X3']] <- "] IS NULL then NULL when ["
sql.df[['X5']] <- "] = '"
sql.df[['X7']] <- "' then 1 else 0 end) AS ["
sql.df[['X9']] <- "], \n"
index <- 0
for (i in 1:length(catg.lvec)) {
itemp <- names(catg.lvec)[i]
sql.df[['X2']][(index+1):(index+catg.lvec[i])] <- itemp
sql.df[['X4']][(index+1):(index+catg.lvec[i])] <- itemp
for (j in 1:catg.lvec[i]) {
jtemp <- rownames(contra.lst[[i]])[j]
sql.df[['X6']][index+1] <- jtemp
if (ws_replace & grepl('[[:punct:] ]+',jtemp)) {
jtempws <- gsub('[[:punct:] ]+',ws_replace_with,jtemp)
wsmove.lst$prelvl <- c(wsmove.lst$prelvl, paste0(itemp,sep,jtemp))
sql.df[['X8']][index+1] <- paste0(itemp,sep,jtempws)
wsmove.lst$poslvl <- c(wsmove.lst$poslvl, paste0(itemp,sep,jtempws))
} else {
sql.df[['X8']][index+1] <- paste0(itemp,sep,jtemp)
}
index = index + 1
}
}
sql.df[['X9']][index] <- "] \n"
sql.df[['X10']] <- paste0(sql.df[['X1']],sql.df[['X2']],sql.df[['X3']],sql.df[['X4']],
sql.df[['X5']],sql.df[['X6']],sql.df[['X7']],sql.df[['X8']],
sql.df[['X9']])
onehot_sql <- paste0("SELECT ", unique_id, ", ", "[",
paste(num.vec,collapse='], ['), "], \n",
paste(sql.df$X10,collapse=''),
"FROM ", input_table_name)
if (!is.null(output_file_name)) {
sink(output_file_name,type = "output")
cat(onehot_sql)
sink()
}
### model matrix ###
data.mat <- model.matrix(~., model.frame(~., data, na.action=na.pass),
contrasts.arg=contra.lst)
attr(data.mat,'assign') <- NULL
attr(data.mat,'contrasts') <- NULL
if (exists("notin.vec")) {
if (length(notin.vec)>0) {
for (i in 1:length(notin.vec)) {
data.mat[as.character(x[[names(notin.vec)[i]]])%in%notin.list[[names(notin.vec)[i]]],
grep(names(notin.vec)[i],colnames(data.mat))] <- 0
}
}
}
# replace white-space within colnames
if (ws_replace & length(wsmove.lst$prelvl)>0) {
keepname.vec <- colnames(data.mat)[!colnames(data.mat)%in%wsmove.lst$prelvl]
wsmove.lst$prelvl <- c(wsmove.lst$prelvl, keepname.vec)
wsmove.lst$poslvl <- c(wsmove.lst$poslvl, keepname.vec)
colnames(data.mat) <- wsmove.lst$poslvl[match(colnames(data.mat),wsmove.lst$prelvl)]
}
# reorder cols
data.mat <- data.mat[,order(colnames(data.mat))]
### output ###
out.lst <- list()
out.lst[['meta']] <- list('num.vec'=num.vec, 'catg.vec'=catg.vec,
'contrasts'=contra.lst)
out.lst[['model.matrix']] <- data.mat
out.lst[['sql']] <- onehot_sql
return(out.lst)
}
| /scratch/gouwar.j/cran-all/cranData/xgb2sql/R/onehot2sql.R |
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- message=FALSE, warning=FALSE---------------------------------------
library(data.table)
library(xgboost)
library(xgb2sql)
df <- data.frame(ggplot2::diamonds)
head(df)
## ------------------------------------------------------------------------
out <- onehot2sql(df)
print(out$meta)
head(out$model.matrix)
## ------------------------------------------------------------------------
cat(out$sql)
## ------------------------------------------------------------------------
d2 <- data.table(ggplot2::diamonds)
# change column class
d2[, cut:=factor(cut, ordered=FALSE)]
d2[, clarity:=as.character(clarity)]
# create IDate column
d2[, tsdt:=as.IDate('2017-01-05')]
d2[1:3, tsdt:=tsdt-1]
# add NAs
d2[1, clarity:=NA]
d2[2, depth:=NA]
head(d2)
out2 <- onehot2sql(d2)
head(out2$model.matrix)
## ------------------------------------------------------------------------
newdata <- d2[1:5,]
# newdata has columns with new elements
newdata[5, clarity:='NEW']; newdata[1,tsdt:=as.IDate('2018-05-01')]
# newdata has a new column
newdata[, new_col:=1]
# newdata is lacking a column
newdata[, cut:=NULL]
head(newdata)
onehot2sql(newdata, meta=out2$meta)$model.matrix
## ------------------------------------------------------------------------
x <- out$model.matrix[,colnames(out$model.matrix)!='price']
y <- out$model.matrix[,colnames(out$model.matrix)=='price']
bst <- xgboost(data = x,
label = y,
max.depth = 2,
eta = .3,
nround = 2,
objective = 'reg:linear')
booster2sql(bst, output_file_name='xgb.txt')
## ---- warning=FALSE, message=FALSE---------------------------------------
cat(readChar('xgb.txt', file.info('xgb.txt')$size))
## ------------------------------------------------------------------------
xgb.dump(bst)
## ------------------------------------------------------------------------
booster2sql(bst, output_file_name='onehot-xgb.txt', input_onehot_query=out$sql)
## ---- warning=FALSE, message=FALSE---------------------------------------
cat(readChar('onehot-xgb.txt', file.info('onehot-xgb.txt')$size))
| /scratch/gouwar.j/cran-all/cranData/xgb2sql/inst/doc/xgb2sql.R |
---
title: "Deploy XGBoost Model as SQL Query"
author: "Chengjun Hou, Abhishek Bishoyi"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Deploy XGBoost Model as SQL Query}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
You fit a boosting tree model in R with your favourite package `xgboost`, the validation results looks great,
so next question comes up as how to deploy this model into production so that others could utilize it to help with the business.
Incorporating the model into a shiny app would certainly be a good idea,
but sometimes the model needs to be integrated into some other systems that the company is heavily relied on.
Plus moving large amount of data between database and R could be time and memory consuming.
So we propose R package `xgb2sql` enabling in-database scoring of XGBoost models built in R by translating trained model objects into SQL query.
[CRAN Task View: Model Deployment with R](https://CRAN.R-project.org/view=ModelDeployment)
categorizes the process of deploying models to various environments for scoring or inferencing on new data into two categories.
The first category is **Deployment through Different Types of Artifacts**, which basically means exporting the model as an object,
then using supported software/platform to consume this object scoring out the model predictions.
The other category is **Deployment through Cloud/Server**, which includes
a). providing an R interface to third-party managed services such as [Google Cloud Machine Learning Engine](https://cloud.google.com/ml-engine/);
b). turning R code into web API and opening service on the server.
Our approach provides SQL query producing model predictions, which can be taken as a combination of the model itself plus the scoring process.
The output SQL query can be treated as an artifact, but we can easily set up service for it on the database server.
The SQL query generated by this tool is basic enough to be compatible with all SQL-based database products and services.
Other than this tool, there are two R packages providing modeling and predicting capability inside database:
- Package `ibmdbR` offers modeling and predicting capability for naive-Bayes, linear regression, decision tree, association rules, and K-means clustering.
But the only supported database product and service is IBM DB2.
- Package `tidypredict` leverages `dplyr` and `dbplyr` for SQL translation, supporting linear regression, generalized linear model, and random forest.
Here is the outline for the rest of this vignette:
- [Prepare Data in Both R and Database](#data)
- [Transform XGBoost Model into SQL Query](#xgb)
## Prepare Data in Both R and Database {#data}
As we know, `xgboost` only consumes numeric input for its model fitting function [^1].
So after transferring raw table in database to R as a data.frame/data.table,
same one-hot encoding needs to be performed on both the table and the data.frame/data.table.
Here we have function `onehot2sql()` to perform one-hot encoding on the training data in R,
producing at the same the SQL query performing the exact transformation for the raw table in database.
Let's start with loading the sample dataset from `ggplot2`:
```{r, message=FALSE, warning=FALSE}
library(data.table)
library(xgboost)
library(xgb2sql)
df <- data.frame(ggplot2::diamonds)
head(df)
```
Funtion `onehot2sql()` is built upon base R functions `model.frame()` and `model.matrix()`.
Other than consuming a data.frame as input, this function has been optimized to work with a data.table with greater efficiency.
It outputs a matrix ready for model fitting following rules listed below:
1. The function treats any non-numeric columns, i.e., columns with class not being `numeric` and `integer`, as categorical and performs one-hot encoding for them.
1. The one-hot encoding doesn't remove one feature-level combination for each categorical feature,
as `model.matrix` does in order to avoid issues caused by multicollinearity.
Although the output matrix conveys same amount of information with and without one feature-level combination removed,
thus producing similar model performance, system knowledge gained along this modeling practice is very different.
If let's say the "cut" of diamonds being "Ideal" has a huge impact on its price as the target/response,
removing binary column "cut.Ideal" in the output matrix would result in
the predictive power of "cut.Ideal" being scattered among other "cut" columns [^2].
The model performance would be comparative, but we would miss the information that "cut" being "Ideal" is the dominate factor of price,
by studying the variable importance.
So as multicollinearity wouldn't be a problem for tree-based model [^3],
we believe performing full one-hot encoding is more appropriate for XGBoost modeling.
1. The function keeps NAs inside both categorical and numeric features preserved. As pointed by the author of `xgboost`,
the algorithm will automatically learn what is the best direction to go when a value is missing,
which can be viewed as automatically "learn" what is the best imputation value for missing values based on reduction on training loss [^4].
This is one of the reasons of XGBoost being so powerful, so we are keeping all NAs in the output matrix.
1. The function outputs `meta` information tracking all the levels for each categorical feature.
If it is given to the function as an input, the exact feature-level combinations will be populated,
even if the new data is missing one level for a particular categorical feature, or having a new level never seen before.
Available arguments of this function are, which will be explained with examples:
```
onehot2sql(data, meta=NULL, sep="_", ws_replace=TRUE, ws_replace_with="",
unique_id=NULL, output_file_name=NULL, input_table_name=NULL)
```
Output of this function is a list containing:
1. `meta` data tracking the transformation.
1. matrix `model.matrix` being the data after processing which is ready for XGBoost fitting.
1. SQL query `sql` performing the exact one-hot encoding in the database.
So let' take a look of its basic usage:
```{r}
out <- onehot2sql(df)
print(out$meta)
head(out$model.matrix)
```
It should be noted that level "Very Good" for feature "cut" has been replaced with "VeryGood", with the white-space removed.
This behaviour is controlled by function arguments `ws_replace=TRUE` and `ws_replace_with=""`,
where other symbol can be specified to replace the white-space inside levels of categorical features.
Such processing is very necessary as SQL database usually doesn't allow white-space inside its table column names.
And symbol separating the feature and its levels is controlled by `sep="_"`.
The output model.matrix would have all its columns reordered alphabetically.
The SQL query performing one-hot encoding for the raw table is:
```{r}
cat(out$sql)
```
We want to emphasise here that **an unique row identifier** inside the raw table is crucial for in-database scoring of XGBoost model.
Column name of the identifier can be specified by the function argument `unique_id`, which will be passed along to the table after one-hot encoding.
If it is not given, SQL query will be populated with column name "ROW_KEY" for the identifier.
Similarly, "INPUT_TABLE" is used in the query if name of the raw table `input_table_name` is `NULL`.
Given a valid value, the SQL query will be written to the file specified by `output_file_name`.
Let's have another example with NAs and a date column:
```{r}
d2 <- data.table(ggplot2::diamonds)
# change column class
d2[, cut:=factor(cut, ordered=FALSE)]
d2[, clarity:=as.character(clarity)]
# create IDate column
d2[, tsdt:=as.IDate('2017-01-05')]
d2[1:3, tsdt:=tsdt-1]
# add NAs
d2[1, clarity:=NA]
d2[2, depth:=NA]
head(d2)
out2 <- onehot2sql(d2)
head(out2$model.matrix)
```
Then let's look at when `meta` is given to data with new elements,
whether `onehot2sql()` will output model.matrix with identical columns as the training data,
in order to apply `predict()` to the trained model on the new data:
```{r}
newdata <- d2[1:5,]
# newdata has columns with new elements
newdata[5, clarity:='NEW']; newdata[1,tsdt:=as.IDate('2018-05-01')]
# newdata has a new column
newdata[, new_col:=1]
# newdata is lacking a column
newdata[, cut:=NULL]
head(newdata)
onehot2sql(newdata, meta=out2$meta)$model.matrix
```
We can see from this example that
1. any new levels will have value of 0s on all the columns related to that feature.
1. any new features will not be in the output model.matrix.
1. the entire feature will be imputed with NAs if it is missing in the new data, and warnings will be given.
We recommend any feature engineering and/or missing imputation work to be done before applying function `onehot2sql()` to the training data in R.
It should be the last step before kicking off the model fitting.
And SQL query for feature engineering and/or missing imputation can be placed as a sub-query inside the one-hot query.
For example, replacing "INPUT_TABLE" inside `out$sql` with following sub-query will do one-hot encoding together with missing imputation for feature "clarity":
```
(SELECT ROW_KEY, [cut], [color],
(case when [clarity] IS NULL then 'MISS' else [clarity] end) as [clarity],
[carat], [depth], [table], [price], [x], [y], [z]
FROM INPUT_TABLE) AS IMPUTED_TABLE
```
## Transform XGBoost Model into SQL Query {#xgb}
Before taking a close look at function `booster2sql()` translating XGBoost model into SQL query,
we want to illustrate the suggested work-flow for the whole process of model fitting and scoring with package `xgb2sql`:
1. We start with transferring raw table from database to R as a data.frame/data.table.
There are many packages supporting database connection, we recommend `dplyr` and `DBI` here.
1. After all feature engineering and missing imputation is done, apply function `onehot2sql()` to the data.frame/data.table,
obtaining the model.matrix and storing the one-hot query.
1. Conduct all modeling practices until reaching a final model, then apply function `booster2sql()` to the final model,
producing the XGBoost query for its in-database scoring.
1. Modeling in R is done, let's move to in-database scoring:
+ Execute the one-hot query on the raw table, creating the model-ready table.
+ Execute the XGBoost query on the model-ready table, obtaining the model predictions.
+ Compare the model prediction in R with the values given by the XGBoost query is always recommended.
Now let's move back to function `booster2sql()`. Available arguments are:
```
booster2sql(xgbModel, print_progress=FALSE, unique_id=NULL,
output_file_name=NULL, input_table_name=NULL, input_onehot_query=NULL)
```
The model input `xgbModel` to this function should have a class of `xgb.Booster`.
And `print_progress=FALSE` controls whether the translating progress should be printed to console.
Similarly, `unique_id` and `input_table_name` should be given to generate the SQL query.
It should be noted that there must be a valid file path for `output_file_name` to write the query, otherwise the function will not run.
Let's try to predict the "price" of diamonds using the other features. In order to demonstrate the generated XGBoost query,
we will train the model with `max.depth=2` and `nround=2`.
```{r}
x <- out$model.matrix[,colnames(out$model.matrix)!='price']
y <- out$model.matrix[,colnames(out$model.matrix)=='price']
bst <- xgboost(data = x,
label = y,
max.depth = 2,
eta = .3,
nround = 2,
objective = 'reg:linear')
booster2sql(bst, output_file_name='xgb.txt')
```
```{r, warning=FALSE, message=FALSE}
cat(readChar('xgb.txt', file.info('xgb.txt')$size))
```
We can see that each `SELECT ... AS ONETREE` section inside the XGBoost query is composed of nested case when statement,
providing scores along a tree structure.
And each of these sections represents one round/iteration of the XGBoost model.
Values for the splits and scores within the query are from the `xgb.dump()` of the model without any rounding:
```{r}
xgb.dump(bst)
```
It should be noted that model prediction calculated by adding up the scores provided by `xgb.dump()`,
is different from that by applying `predict()` to the model directly.
It is a rounding difference thus extremely insignificant.
But since the XGBoost query is generated with scores from `xgb.dump`,
this difference will still be there between the in-database scoring and the R's `predict()` of the model.
There is one last argument of `booster2sql()` we haven't talked about, i.e., `input_onehot_query`.
Here we can input the one-hot query generated by `onehot2sql()`, which will be used as sub-query replacing "MODREADY_TABLE" within the XGBoost query.
In this way, the XGBoost query can be executed on the raw table, producing the model predictions directly.
```{r}
booster2sql(bst, output_file_name='onehot-xgb.txt', input_onehot_query=out$sql)
```
```{r, warning=FALSE, message=FALSE}
cat(readChar('onehot-xgb.txt', file.info('onehot-xgb.txt')$size))
```
As processing time and query size grow exponentially with `max.depth`, linearly with `nround`,
this approach of combining the one-hot query and the XGBoost query together should be used for only simple models.
[^1]: [Understand your dataset with XGBoost.\ ](https://xgboost.readthedocs.io/en/latest/R-package/discoverYourData.html)
[^2]: [Ensembles of tree-based models: why correlated features do not trip them and why NA matters.\ ](https://medium.com/data-design/ensembles-of-tree-based-models-why-correlated-features-do-not-trip-them-and-why-na-matters-7658f4752e1b)
[^3]: [StackExchange: Does XGBoost handle multicollinearity by itself?\ ](https://datascience.stackexchange.com/questions/12554/does-xgboost-handle-multicollinearity-by-itself)
[^4]: [GitHub Issue: What are the ways of treatng missing values in XGboost?\ ](https://github.com/dmlc/xgboost/issues/21)
| /scratch/gouwar.j/cran-all/cranData/xgb2sql/inst/doc/xgb2sql.Rmd |
---
title: "Deploy XGBoost Model as SQL Query"
author: "Chengjun Hou, Abhishek Bishoyi"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Deploy XGBoost Model as SQL Query}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
You fit a boosting tree model in R with your favourite package `xgboost`, the validation results looks great,
so next question comes up as how to deploy this model into production so that others could utilize it to help with the business.
Incorporating the model into a shiny app would certainly be a good idea,
but sometimes the model needs to be integrated into some other systems that the company is heavily relied on.
Plus moving large amount of data between database and R could be time and memory consuming.
So we propose R package `xgb2sql` enabling in-database scoring of XGBoost models built in R by translating trained model objects into SQL query.
[CRAN Task View: Model Deployment with R](https://CRAN.R-project.org/view=ModelDeployment)
categorizes the process of deploying models to various environments for scoring or inferencing on new data into two categories.
The first category is **Deployment through Different Types of Artifacts**, which basically means exporting the model as an object,
then using supported software/platform to consume this object scoring out the model predictions.
The other category is **Deployment through Cloud/Server**, which includes
a). providing an R interface to third-party managed services such as [Google Cloud Machine Learning Engine](https://cloud.google.com/ml-engine/);
b). turning R code into web API and opening service on the server.
Our approach provides SQL query producing model predictions, which can be taken as a combination of the model itself plus the scoring process.
The output SQL query can be treated as an artifact, but we can easily set up service for it on the database server.
The SQL query generated by this tool is basic enough to be compatible with all SQL-based database products and services.
Other than this tool, there are two R packages providing modeling and predicting capability inside database:
- Package `ibmdbR` offers modeling and predicting capability for naive-Bayes, linear regression, decision tree, association rules, and K-means clustering.
But the only supported database product and service is IBM DB2.
- Package `tidypredict` leverages `dplyr` and `dbplyr` for SQL translation, supporting linear regression, generalized linear model, and random forest.
Here is the outline for the rest of this vignette:
- [Prepare Data in Both R and Database](#data)
- [Transform XGBoost Model into SQL Query](#xgb)
## Prepare Data in Both R and Database {#data}
As we know, `xgboost` only consumes numeric input for its model fitting function [^1].
So after transferring raw table in database to R as a data.frame/data.table,
same one-hot encoding needs to be performed on both the table and the data.frame/data.table.
Here we have function `onehot2sql()` to perform one-hot encoding on the training data in R,
producing at the same the SQL query performing the exact transformation for the raw table in database.
Let's start with loading the sample dataset from `ggplot2`:
```{r, message=FALSE, warning=FALSE}
library(data.table)
library(xgboost)
library(xgb2sql)
df <- data.frame(ggplot2::diamonds)
head(df)
```
Funtion `onehot2sql()` is built upon base R functions `model.frame()` and `model.matrix()`.
Other than consuming a data.frame as input, this function has been optimized to work with a data.table with greater efficiency.
It outputs a matrix ready for model fitting following rules listed below:
1. The function treats any non-numeric columns, i.e., columns with class not being `numeric` and `integer`, as categorical and performs one-hot encoding for them.
1. The one-hot encoding doesn't remove one feature-level combination for each categorical feature,
as `model.matrix` does in order to avoid issues caused by multicollinearity.
Although the output matrix conveys same amount of information with and without one feature-level combination removed,
thus producing similar model performance, system knowledge gained along this modeling practice is very different.
If let's say the "cut" of diamonds being "Ideal" has a huge impact on its price as the target/response,
removing binary column "cut.Ideal" in the output matrix would result in
the predictive power of "cut.Ideal" being scattered among other "cut" columns [^2].
The model performance would be comparative, but we would miss the information that "cut" being "Ideal" is the dominate factor of price,
by studying the variable importance.
So as multicollinearity wouldn't be a problem for tree-based model [^3],
we believe performing full one-hot encoding is more appropriate for XGBoost modeling.
1. The function keeps NAs inside both categorical and numeric features preserved. As pointed by the author of `xgboost`,
the algorithm will automatically learn what is the best direction to go when a value is missing,
which can be viewed as automatically "learn" what is the best imputation value for missing values based on reduction on training loss [^4].
This is one of the reasons of XGBoost being so powerful, so we are keeping all NAs in the output matrix.
1. The function outputs `meta` information tracking all the levels for each categorical feature.
If it is given to the function as an input, the exact feature-level combinations will be populated,
even if the new data is missing one level for a particular categorical feature, or having a new level never seen before.
Available arguments of this function are, which will be explained with examples:
```
onehot2sql(data, meta=NULL, sep="_", ws_replace=TRUE, ws_replace_with="",
unique_id=NULL, output_file_name=NULL, input_table_name=NULL)
```
Output of this function is a list containing:
1. `meta` data tracking the transformation.
1. matrix `model.matrix` being the data after processing which is ready for XGBoost fitting.
1. SQL query `sql` performing the exact one-hot encoding in the database.
So let' take a look of its basic usage:
```{r}
out <- onehot2sql(df)
print(out$meta)
head(out$model.matrix)
```
It should be noted that level "Very Good" for feature "cut" has been replaced with "VeryGood", with the white-space removed.
This behaviour is controlled by function arguments `ws_replace=TRUE` and `ws_replace_with=""`,
where other symbol can be specified to replace the white-space inside levels of categorical features.
Such processing is very necessary as SQL database usually doesn't allow white-space inside its table column names.
And symbol separating the feature and its levels is controlled by `sep="_"`.
The output model.matrix would have all its columns reordered alphabetically.
The SQL query performing one-hot encoding for the raw table is:
```{r}
cat(out$sql)
```
We want to emphasise here that **an unique row identifier** inside the raw table is crucial for in-database scoring of XGBoost model.
Column name of the identifier can be specified by the function argument `unique_id`, which will be passed along to the table after one-hot encoding.
If it is not given, SQL query will be populated with column name "ROW_KEY" for the identifier.
Similarly, "INPUT_TABLE" is used in the query if name of the raw table `input_table_name` is `NULL`.
Given a valid value, the SQL query will be written to the file specified by `output_file_name`.
Let's have another example with NAs and a date column:
```{r}
d2 <- data.table(ggplot2::diamonds)
# change column class
d2[, cut:=factor(cut, ordered=FALSE)]
d2[, clarity:=as.character(clarity)]
# create IDate column
d2[, tsdt:=as.IDate('2017-01-05')]
d2[1:3, tsdt:=tsdt-1]
# add NAs
d2[1, clarity:=NA]
d2[2, depth:=NA]
head(d2)
out2 <- onehot2sql(d2)
head(out2$model.matrix)
```
Then let's look at when `meta` is given to data with new elements,
whether `onehot2sql()` will output model.matrix with identical columns as the training data,
in order to apply `predict()` to the trained model on the new data:
```{r}
newdata <- d2[1:5,]
# newdata has columns with new elements
newdata[5, clarity:='NEW']; newdata[1,tsdt:=as.IDate('2018-05-01')]
# newdata has a new column
newdata[, new_col:=1]
# newdata is lacking a column
newdata[, cut:=NULL]
head(newdata)
onehot2sql(newdata, meta=out2$meta)$model.matrix
```
We can see from this example that
1. any new levels will have value of 0s on all the columns related to that feature.
1. any new features will not be in the output model.matrix.
1. the entire feature will be imputed with NAs if it is missing in the new data, and warnings will be given.
We recommend any feature engineering and/or missing imputation work to be done before applying function `onehot2sql()` to the training data in R.
It should be the last step before kicking off the model fitting.
And SQL query for feature engineering and/or missing imputation can be placed as a sub-query inside the one-hot query.
For example, replacing "INPUT_TABLE" inside `out$sql` with following sub-query will do one-hot encoding together with missing imputation for feature "clarity":
```
(SELECT ROW_KEY, [cut], [color],
(case when [clarity] IS NULL then 'MISS' else [clarity] end) as [clarity],
[carat], [depth], [table], [price], [x], [y], [z]
FROM INPUT_TABLE) AS IMPUTED_TABLE
```
## Transform XGBoost Model into SQL Query {#xgb}
Before taking a close look at function `booster2sql()` translating XGBoost model into SQL query,
we want to illustrate the suggested work-flow for the whole process of model fitting and scoring with package `xgb2sql`:
1. We start with transferring raw table from database to R as a data.frame/data.table.
There are many packages supporting database connection, we recommend `dplyr` and `DBI` here.
1. After all feature engineering and missing imputation is done, apply function `onehot2sql()` to the data.frame/data.table,
obtaining the model.matrix and storing the one-hot query.
1. Conduct all modeling practices until reaching a final model, then apply function `booster2sql()` to the final model,
producing the XGBoost query for its in-database scoring.
1. Modeling in R is done, let's move to in-database scoring:
+ Execute the one-hot query on the raw table, creating the model-ready table.
+ Execute the XGBoost query on the model-ready table, obtaining the model predictions.
+ Compare the model prediction in R with the values given by the XGBoost query is always recommended.
Now let's move back to function `booster2sql()`. Available arguments are:
```
booster2sql(xgbModel, print_progress=FALSE, unique_id=NULL,
output_file_name=NULL, input_table_name=NULL, input_onehot_query=NULL)
```
The model input `xgbModel` to this function should have a class of `xgb.Booster`.
And `print_progress=FALSE` controls whether the translating progress should be printed to console.
Similarly, `unique_id` and `input_table_name` should be given to generate the SQL query.
It should be noted that there must be a valid file path for `output_file_name` to write the query, otherwise the function will not run.
Let's try to predict the "price" of diamonds using the other features. In order to demonstrate the generated XGBoost query,
we will train the model with `max.depth=2` and `nround=2`.
```{r}
x <- out$model.matrix[,colnames(out$model.matrix)!='price']
y <- out$model.matrix[,colnames(out$model.matrix)=='price']
bst <- xgboost(data = x,
label = y,
max.depth = 2,
eta = .3,
nround = 2,
objective = 'reg:linear')
booster2sql(bst, output_file_name='xgb.txt')
```
```{r, warning=FALSE, message=FALSE}
cat(readChar('xgb.txt', file.info('xgb.txt')$size))
```
We can see that each `SELECT ... AS ONETREE` section inside the XGBoost query is composed of nested case when statement,
providing scores along a tree structure.
And each of these sections represents one round/iteration of the XGBoost model.
Values for the splits and scores within the query are from the `xgb.dump()` of the model without any rounding:
```{r}
xgb.dump(bst)
```
It should be noted that model prediction calculated by adding up the scores provided by `xgb.dump()`,
is different from that by applying `predict()` to the model directly.
It is a rounding difference thus extremely insignificant.
But since the XGBoost query is generated with scores from `xgb.dump`,
this difference will still be there between the in-database scoring and the R's `predict()` of the model.
There is one last argument of `booster2sql()` we haven't talked about, i.e., `input_onehot_query`.
Here we can input the one-hot query generated by `onehot2sql()`, which will be used as sub-query replacing "MODREADY_TABLE" within the XGBoost query.
In this way, the XGBoost query can be executed on the raw table, producing the model predictions directly.
```{r}
booster2sql(bst, output_file_name='onehot-xgb.txt', input_onehot_query=out$sql)
```
```{r, warning=FALSE, message=FALSE}
cat(readChar('onehot-xgb.txt', file.info('onehot-xgb.txt')$size))
```
As processing time and query size grow exponentially with `max.depth`, linearly with `nround`,
this approach of combining the one-hot query and the XGBoost query together should be used for only simple models.
[^1]: [Understand your dataset with XGBoost.\ ](https://xgboost.readthedocs.io/en/latest/R-package/discoverYourData.html)
[^2]: [Ensembles of tree-based models: why correlated features do not trip them and why NA matters.\ ](https://medium.com/data-design/ensembles-of-tree-based-models-why-correlated-features-do-not-trip-them-and-why-na-matters-7658f4752e1b)
[^3]: [StackExchange: Does XGBoost handle multicollinearity by itself?\ ](https://datascience.stackexchange.com/questions/12554/does-xgboost-handle-multicollinearity-by-itself)
[^4]: [GitHub Issue: What are the ways of treatng missing values in XGboost?\ ](https://github.com/dmlc/xgboost/issues/21)
| /scratch/gouwar.j/cran-all/cranData/xgb2sql/vignettes/xgb2sql.Rmd |
#' Callback closures for booster training.
#'
#' These are used to perform various service tasks either during boosting iterations or at the end.
#' This approach helps to modularize many of such tasks without bloating the main training methods,
#' and it offers .
#'
#' @details
#' By default, a callback function is run after each boosting iteration.
#' An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
#'
#' When a callback function has \code{finalize} parameter, its finalizer part will also be run after
#' the boosting is completed.
#'
#' WARNING: side-effects!!! Be aware that these callback functions access and modify things in
#' the environment from which they are called from, which is a fairly uncommon thing to do in R.
#'
#' To write a custom callback closure, make sure you first understand the main concepts about R environments.
#' Check either R documentation on \code{\link[base]{environment}} or the
#' \href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
#' book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
#' choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
#' with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
#'
#' @seealso
#' \code{\link{cb.print.evaluation}},
#' \code{\link{cb.evaluation.log}},
#' \code{\link{cb.reset.parameters}},
#' \code{\link{cb.early.stop}},
#' \code{\link{cb.save.model}},
#' \code{\link{cb.cv.predict}},
#' \code{\link{xgb.train}},
#' \code{\link{xgb.cv}}
#'
#' @name callbacks
NULL
#
# Callbacks -------------------------------------------------------------------
#
#' Callback closure for printing the result of evaluation
#'
#' @param period results would be printed every number of periods
#' @param showsd whether standard deviations should be printed (when available)
#'
#' @details
#' The callback function prints the result of evaluation at every \code{period} iterations.
#' The initial and the last iteration's evaluations are always printed.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{bst_evaluation} (also \code{bst_evaluation_err} when available),
#' \code{iteration},
#' \code{begin_iteration},
#' \code{end_iteration}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.print.evaluation <- function(period = 1, showsd = TRUE) {
callback <- function(env = parent.frame()) {
if (length(env$bst_evaluation) == 0 ||
period == 0 ||
NVL(env$rank, 0) != 0)
return()
i <- env$iteration
if ((i - 1) %% period == 0 ||
i == env$begin_iteration ||
i == env$end_iteration) {
stdev <- if (showsd) env$bst_evaluation_err else NULL
msg <- .format_eval_string(i, env$bst_evaluation, stdev)
cat(msg, '\n')
}
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.print.evaluation'
callback
}
#' Callback closure for logging the evaluation history
#'
#' @details
#' This callback function appends the current iteration evaluation results \code{bst_evaluation}
#' available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
#'
#' The finalizer callback (called with \code{finalize = TURE} in the end) converts
#' the \code{evaluation_log} list into a final data.table.
#'
#' The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
#'
#' Note: in the column names of the final data.table, the dash '-' character is replaced with
#' the underscore '_' in order to make the column names more like regular R identifiers.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{evaluation_log},
#' \code{bst_evaluation},
#' \code{iteration}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.evaluation.log <- function() {
mnames <- NULL
init <- function(env) {
if (!is.list(env$evaluation_log))
stop("'evaluation_log' has to be a list")
mnames <<- names(env$bst_evaluation)
if (is.null(mnames) || any(mnames == ""))
stop("bst_evaluation must have non-empty names")
mnames <<- gsub('-', '_', names(env$bst_evaluation))
if (!is.null(env$bst_evaluation_err))
mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std'))
}
finalizer <- function(env) {
env$evaluation_log <- as.data.table(t(simplify2array(env$evaluation_log)))
setnames(env$evaluation_log, c('iter', mnames))
if (!is.null(env$bst_evaluation_err)) {
# rearrange col order from _mean,_mean,...,_std,_std,...
# to be _mean,_std,_mean,_std,...
len <- length(mnames)
means <- mnames[seq_len(len / 2)]
stds <- mnames[(len / 2 + 1):len]
cnames <- numeric(len)
cnames[c(TRUE, FALSE)] <- means
cnames[c(FALSE, TRUE)] <- stds
env$evaluation_log <- env$evaluation_log[, c('iter', cnames), with = FALSE]
}
}
callback <- function(env = parent.frame(), finalize = FALSE) {
if (is.null(mnames))
init(env)
if (finalize)
return(finalizer(env))
ev <- env$bst_evaluation
if (!is.null(env$bst_evaluation_err))
ev <- c(ev, env$bst_evaluation_err)
env$evaluation_log <- c(env$evaluation_log,
list(c(iter = env$iteration, ev)))
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.evaluation.log'
callback
}
#' Callback closure for resetting the booster's parameters at each iteration.
#'
#' @param new_params a list where each element corresponds to a parameter that needs to be reset.
#' Each element's value must be either a vector of values of length \code{nrounds}
#' to be set at each iteration,
#' or a function of two parameters \code{learning_rates(iteration, nrounds)}
#' which returns a new parameter value by using the current iteration number
#' and the total number of boosting rounds.
#'
#' @details
#' This is a "pre-iteration" callback function used to reset booster's parameters
#' at the beginning of each iteration.
#'
#' Note that when training is resumed from some previous model, and a function is used to
#' reset a parameter value, the \code{nrounds} argument in this function would be the
#' the number of boosting rounds in the current training.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{bst} or \code{bst_folds},
#' \code{iteration},
#' \code{begin_iteration},
#' \code{end_iteration}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.reset.parameters <- function(new_params) {
if (typeof(new_params) != "list")
stop("'new_params' must be a list")
pnames <- gsub("\\.", "_", names(new_params))
nrounds <- NULL
# run some checks in the beginning
init <- function(env) {
nrounds <<- env$end_iteration - env$begin_iteration + 1
if (is.null(env$bst) && is.null(env$bst_folds))
stop("Parent frame has neither 'bst' nor 'bst_folds'")
# Some parameters are not allowed to be changed,
# since changing them would simply wreck some chaos
not_allowed <- pnames %in%
c('num_class', 'num_output_group', 'size_leaf_vector', 'updater_seq')
if (any(not_allowed))
stop('Parameters ', paste(pnames[not_allowed]), " cannot be changed during boosting.")
for (n in pnames) {
p <- new_params[[n]]
if (is.function(p)) {
if (length(formals(p)) != 2)
stop("Parameter '", n, "' is a function but not of two arguments")
} else if (is.numeric(p) || is.character(p)) {
if (length(p) != nrounds)
stop("Length of '", n, "' has to be equal to 'nrounds'")
} else {
stop("Parameter '", n, "' is not a function or a vector")
}
}
}
callback <- function(env = parent.frame()) {
if (is.null(nrounds))
init(env)
i <- env$iteration
pars <- lapply(new_params, function(p) {
if (is.function(p))
return(p(i, nrounds))
p[i]
})
if (!is.null(env$bst)) {
xgb.parameters(env$bst$handle) <- pars
} else {
for (fd in env$bst_folds)
xgb.parameters(fd$bst) <- pars
}
}
attr(callback, 'is_pre_iteration') <- TRUE
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.reset.parameters'
callback
}
#' Callback closure to activate the early stopping.
#'
#' @param stopping_rounds The number of rounds with no improvement in
#' the evaluation metric in order to stop the training.
#' @param maximize whether to maximize the evaluation metric
#' @param metric_name the name of an evaluation column to use as a criteria for early
#' stopping. If not set, the last column would be used.
#' Let's say the test data in \code{watchlist} was labelled as \code{dtest},
#' and one wants to use the AUC in test data for early stopping regardless of where
#' it is in the \code{watchlist}, then one of the following would need to be set:
#' \code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
#' All dash '-' characters in metric names are considered equivalent to '_'.
#' @param verbose whether to print the early stopping information.
#'
#' @details
#' This callback function determines the condition for early stopping
#' by setting the \code{stop_condition = TRUE} flag in its calling frame.
#'
#' The following additional fields are assigned to the model's R object:
#' \itemize{
#' \item \code{best_score} the evaluation score at the best iteration
#' \item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index)
#' }
#' The Same values are also stored as xgb-attributes:
#' \itemize{
#' \item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models)
#' \item \code{best_msg} message string is also stored.
#' }
#'
#' At least one data element is required in the evaluation watchlist for early stopping to work.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{stop_condition},
#' \code{bst_evaluation},
#' \code{rank},
#' \code{bst} (or \code{bst_folds} and \code{basket}),
#' \code{iteration},
#' \code{begin_iteration},
#' \code{end_iteration},
#' \code{num_parallel_tree}.
#'
#' @seealso
#' \code{\link{callbacks}},
#' \code{\link{xgb.attr}}
#'
#' @export
cb.early.stop <- function(stopping_rounds, maximize = FALSE,
metric_name = NULL, verbose = TRUE) {
# state variables
best_iteration <- -1
best_ntreelimit <- -1
best_score <- Inf
best_msg <- NULL
metric_idx <- 1
init <- function(env) {
if (length(env$bst_evaluation) == 0)
stop("For early stopping, watchlist must have at least one element")
eval_names <- gsub('-', '_', names(env$bst_evaluation))
if (!is.null(metric_name)) {
metric_idx <<- which(gsub('-', '_', metric_name) == eval_names)
if (length(metric_idx) == 0)
stop("'metric_name' for early stopping is not one of the following:\n",
paste(eval_names, collapse = ' '), '\n')
}
if (is.null(metric_name) &&
length(env$bst_evaluation) > 1) {
metric_idx <<- length(eval_names)
if (verbose)
cat('Multiple eval metrics are present. Will use ',
eval_names[metric_idx], ' for early stopping.\n', sep = '')
}
metric_name <<- eval_names[metric_idx]
# maximize is usually NULL when not set in xgb.train and built-in metrics
if (is.null(maximize))
maximize <<- grepl('(_auc|_map|_ndcg)', metric_name)
if (verbose && NVL(env$rank, 0) == 0)
cat("Will train until ", metric_name, " hasn't improved in ",
stopping_rounds, " rounds.\n\n", sep = '')
best_iteration <<- 1
if (maximize) best_score <<- -Inf
env$stop_condition <- FALSE
if (!is.null(env$bst)) {
if (!inherits(env$bst, 'xgb.Booster'))
stop("'bst' in the parent frame must be an 'xgb.Booster'")
if (!is.null(best_score <- xgb.attr(env$bst$handle, 'best_score'))) {
best_score <<- as.numeric(best_score)
best_iteration <<- as.numeric(xgb.attr(env$bst$handle, 'best_iteration')) + 1
best_msg <<- as.numeric(xgb.attr(env$bst$handle, 'best_msg'))
} else {
xgb.attributes(env$bst$handle) <- list(best_iteration = best_iteration - 1,
best_score = best_score)
}
} else if (is.null(env$bst_folds) || is.null(env$basket)) {
stop("Parent frame has neither 'bst' nor ('bst_folds' and 'basket')")
}
}
finalizer <- function(env) {
if (!is.null(env$bst)) {
attr_best_score <- as.numeric(xgb.attr(env$bst$handle, 'best_score'))
if (best_score != attr_best_score) {
# If the difference is too big, throw an error
if (abs(best_score - attr_best_score) >= 1e-14) {
stop("Inconsistent 'best_score' values between the closure state: ", best_score,
" and the xgb.attr: ", attr_best_score)
}
# If the difference is due to floating-point truncation, update best_score
best_score <- attr_best_score
}
env$bst$best_iteration <- best_iteration
env$bst$best_ntreelimit <- best_ntreelimit
env$bst$best_score <- best_score
} else {
env$basket$best_iteration <- best_iteration
env$basket$best_ntreelimit <- best_ntreelimit
}
}
callback <- function(env = parent.frame(), finalize = FALSE) {
if (best_iteration < 0)
init(env)
if (finalize)
return(finalizer(env))
i <- env$iteration
score <- env$bst_evaluation[metric_idx]
if ((maximize && score > best_score) ||
(!maximize && score < best_score)) {
best_msg <<- .format_eval_string(
i, env$bst_evaluation, env$bst_evaluation_err
)
best_score <<- score
best_iteration <<- i
best_ntreelimit <<- best_iteration * env$num_parallel_tree
# save the property to attributes, so they will occur in checkpoint
if (!is.null(env$bst)) {
xgb.attributes(env$bst) <- list(
best_iteration = best_iteration - 1, # convert to 0-based index
best_score = best_score,
best_msg = best_msg,
best_ntreelimit = best_ntreelimit)
}
} else if (i - best_iteration >= stopping_rounds) {
env$stop_condition <- TRUE
env$end_iteration <- i
if (verbose && NVL(env$rank, 0) == 0)
cat("Stopping. Best iteration:\n", best_msg, "\n\n", sep = '')
}
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.early.stop'
callback
}
#' Callback closure for saving a model file.
#'
#' @param save_period save the model to disk after every
#' \code{save_period} iterations; 0 means save the model at the end.
#' @param save_name the name or path for the saved model file.
#' It can contain a \code{\link[base]{sprintf}} formatting specifier
#' to include the integer iteration number in the file name.
#' E.g., with \code{save_name} = 'xgboost_%04d.model',
#' the file saved at iteration 50 would be named "xgboost_0050.model".
#'
#' @details
#' This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{bst},
#' \code{iteration},
#' \code{begin_iteration},
#' \code{end_iteration}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
if (save_period < 0)
stop("'save_period' cannot be negative")
callback <- function(env = parent.frame()) {
if (is.null(env$bst))
stop("'save_model' callback requires the 'bst' booster object in its calling frame")
if ((save_period > 0 && (env$iteration - env$begin_iteration) %% save_period == 0) ||
(save_period == 0 && env$iteration == env$end_iteration))
xgb.save(env$bst, sprintf(save_name, env$iteration))
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.save.model'
callback
}
#' Callback closure for returning cross-validation based predictions.
#'
#' @param save_models a flag for whether to save the folds' models.
#'
#' @details
#' This callback function saves predictions for all of the test folds,
#' and also allows to save the folds' models.
#'
#' It is a "finalizer" callback and it uses early stopping information whenever it is available,
#' thus it must be run after the early stopping callback if the early stopping is used.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{bst_folds},
#' \code{basket},
#' \code{data},
#' \code{end_iteration},
#' \code{params},
#' \code{num_parallel_tree},
#' \code{num_class}.
#'
#' @return
#' Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
#' depending on the number of prediction outputs per data row. The order of predictions corresponds
#' to the order of rows in the original dataset. Note that when a custom \code{folds} list is
#' provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
#' non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
#' meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits.
#' When some of the indices in the training dataset are not included into user-provided \code{folds},
#' their prediction value would be \code{NA}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.cv.predict <- function(save_models = FALSE) {
finalizer <- function(env) {
if (is.null(env$basket) || is.null(env$bst_folds))
stop("'cb.cv.predict' callback requires 'basket' and 'bst_folds' lists in its calling frame")
N <- nrow(env$data)
pred <-
if (env$num_class > 1) {
matrix(NA_real_, N, env$num_class)
} else {
rep(NA_real_, N)
}
iterationrange <- c(1, NVL(env$basket$best_iteration, env$end_iteration) + 1)
if (NVL(env$params[['booster']], '') == 'gblinear') {
iterationrange <- c(1, 1) # must be 0 for gblinear
}
for (fd in env$bst_folds) {
pr <- predict(fd$bst, fd$watchlist[[2]], iterationrange = iterationrange, reshape = TRUE)
if (is.matrix(pred)) {
pred[fd$index, ] <- pr
} else {
pred[fd$index] <- pr
}
}
env$basket$pred <- pred
if (save_models) {
env$basket$models <- lapply(env$bst_folds, function(fd) {
xgb.attr(fd$bst, 'niter') <- env$end_iteration - 1
xgb.Booster.complete(xgb.handleToBooster(fd$bst), saveraw = TRUE)
})
}
}
callback <- function(env = parent.frame(), finalize = FALSE) {
if (finalize)
return(finalizer(env))
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.cv.predict'
callback
}
#' Callback closure for collecting the model coefficients history of a gblinear booster
#' during its training.
#'
#' @param sparse when set to FALSE/TRUE, a dense/sparse matrix is used to store the result.
#' Sparse format is useful when one expects only a subset of coefficients to be non-zero,
#' when using the "thrifty" feature selector with fairly small number of top features
#' selected per iteration.
#'
#' @details
#' To keep things fast and simple, gblinear booster does not internally store the history of linear
#' model coefficients at each boosting iteration. This callback provides a workaround for storing
#' the coefficients' path, by extracting them after each training iteration.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{bst} (or \code{bst_folds}).
#'
#' @return
#' Results are stored in the \code{coefs} element of the closure.
#' The \code{\link{xgb.gblinear.history}} convenience function provides an easy
#' way to access it.
#' With \code{xgb.train}, it is either a dense of a sparse matrix.
#' While with \code{xgb.cv}, it is a list (an element per each fold) of such
#' matrices.
#'
#' @seealso
#' \code{\link{callbacks}}, \code{\link{xgb.gblinear.history}}.
#'
#' @examples
#' #### Binary classification:
#'
#' ## Keep the number of threads to 1 for examples
#' nthread <- 1
#' data.table::setDTthreads(nthread)
#'
#' # In the iris dataset, it is hard to linearly separate Versicolor class from the rest
#' # without considering the 2nd order interactions:
#' x <- model.matrix(Species ~ .^2, iris)[,-1]
#' colnames(x)
#' dtrain <- xgb.DMatrix(scale(x), label = 1*(iris$Species == "versicolor"), nthread = nthread)
#' param <- list(booster = "gblinear", objective = "reg:logistic", eval_metric = "auc",
#' lambda = 0.0003, alpha = 0.0003, nthread = nthread)
#' # For 'shotgun', which is a default linear updater, using high eta values may result in
#' # unstable behaviour in some datasets. With this simple dataset, however, the high learning
#' # rate does not break the convergence, but allows us to illustrate the typical pattern of
#' # "stochastic explosion" behaviour of this lock-free algorithm at early boosting iterations.
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 1.,
#' callbacks = list(cb.gblinear.history()))
#' # Extract the coefficients' path and plot them vs boosting iteration number:
#' coef_path <- xgb.gblinear.history(bst)
#' matplot(coef_path, type = 'l')
#'
#' # With the deterministic coordinate descent updater, it is safer to use higher learning rates.
#' # Will try the classical componentwise boosting which selects a single best feature per round:
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8,
#' updater = 'coord_descent', feature_selector = 'thrifty', top_k = 1,
#' callbacks = list(cb.gblinear.history()))
#' matplot(xgb.gblinear.history(bst), type = 'l')
#' # Componentwise boosting is known to have similar effect to Lasso regularization.
#' # Try experimenting with various values of top_k, eta, nrounds,
#' # as well as different feature_selectors.
#'
#' # For xgb.cv:
#' bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 100, eta = 0.8,
#' callbacks = list(cb.gblinear.history()))
#' # coefficients in the CV fold #3
#' matplot(xgb.gblinear.history(bst)[[3]], type = 'l')
#'
#'
#' #### Multiclass classification:
#' #
#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1, nthread = nthread)
#' param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3,
#' lambda = 0.0003, alpha = 0.0003, nthread = nthread)
#' # For the default linear updater 'shotgun' it sometimes is helpful
#' # to use smaller eta to reduce instability
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
#' callbacks = list(cb.gblinear.history()))
#' # Will plot the coefficient paths separately for each class:
#' matplot(xgb.gblinear.history(bst, class_index = 0), type = 'l')
#' matplot(xgb.gblinear.history(bst, class_index = 1), type = 'l')
#' matplot(xgb.gblinear.history(bst, class_index = 2), type = 'l')
#'
#' # CV:
#' bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 70, eta = 0.5,
#' callbacks = list(cb.gblinear.history(FALSE)))
#' # 1st fold of 1st class
#' matplot(xgb.gblinear.history(bst, class_index = 0)[[1]], type = 'l')
#'
#' @export
cb.gblinear.history <- function(sparse=FALSE) {
coefs <- NULL
init <- function(env) {
if (!is.null(env$bst)) { # xgb.train:
} else if (!is.null(env$bst_folds)) { # xgb.cv:
} else stop("Parent frame has neither 'bst' nor 'bst_folds'")
}
# convert from list to (sparse) matrix
list2mat <- function(coef_list) {
if (sparse) {
coef_mat <- sparseMatrix(x = unlist(lapply(coef_list, slot, "x")),
i = unlist(lapply(coef_list, slot, "i")),
p = c(0, cumsum(sapply(coef_list, function(x) length(x@x)))),
dims = c(length(coef_list[[1]]), length(coef_list)))
return(t(coef_mat))
} else {
return(do.call(rbind, coef_list))
}
}
finalizer <- function(env) {
if (length(coefs) == 0)
return()
if (!is.null(env$bst)) { # # xgb.train:
coefs <<- list2mat(coefs)
} else { # xgb.cv:
# second lapply transposes the list
coefs <<- lapply(
X = lapply(
X = seq_along(coefs[[1]]),
FUN = function(i) lapply(coefs, "[[", i)
),
FUN = list2mat
)
}
}
extract.coef <- function(env) {
if (!is.null(env$bst)) { # # xgb.train:
cf <- as.numeric(grep('(booster|bias|weigh)', xgb.dump(env$bst), invert = TRUE, value = TRUE))
if (sparse) cf <- as(cf, "sparseVector")
} else { # xgb.cv:
cf <- vector("list", length(env$bst_folds))
for (i in seq_along(env$bst_folds)) {
dmp <- xgb.dump(xgb.handleToBooster(env$bst_folds[[i]]$bst))
cf[[i]] <- as.numeric(grep('(booster|bias|weigh)', dmp, invert = TRUE, value = TRUE))
if (sparse) cf[[i]] <- as(cf[[i]], "sparseVector")
}
}
cf
}
callback <- function(env = parent.frame(), finalize = FALSE) {
if (is.null(coefs)) init(env)
if (finalize) return(finalizer(env))
cf <- extract.coef(env)
coefs <<- c(coefs, list(cf))
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.gblinear.history'
callback
}
#' Extract gblinear coefficients history.
#'
#' A helper function to extract the matrix of linear coefficients' history
#' from a gblinear model created while using the \code{cb.gblinear.history()}
#' callback.
#'
#' @param model either an \code{xgb.Booster} or a result of \code{xgb.cv()}, trained
#' using the \code{cb.gblinear.history()} callback.
#' @param class_index zero-based class index to extract the coefficients for only that
#' specific class in a multinomial multiclass model. When it is NULL, all the
#' coefficients are returned. Has no effect in non-multiclass models.
#'
#' @return
#' For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns
#' corresponding to iteration's coefficients (in the order as \code{xgb.dump()} would
#' return) and the rows corresponding to boosting iterations.
#'
#' For an \code{xgb.cv} result, a list of such matrices is returned with the elements
#' corresponding to CV folds.
#'
#' @export
xgb.gblinear.history <- function(model, class_index = NULL) {
if (!(inherits(model, "xgb.Booster") ||
inherits(model, "xgb.cv.synchronous")))
stop("model must be an object of either xgb.Booster or xgb.cv.synchronous class")
is_cv <- inherits(model, "xgb.cv.synchronous")
if (is.null(model[["callbacks"]]) || is.null(model$callbacks[["cb.gblinear.history"]]))
stop("model must be trained while using the cb.gblinear.history() callback")
if (!is_cv) {
# extract num_class & num_feat from the internal model
dmp <- xgb.dump(model)
if (length(dmp) < 2 || dmp[2] != "bias:")
stop("It does not appear to be a gblinear model")
dmp <- dmp[-c(1, 2)]
n <- which(dmp == 'weight:')
if (length(n) != 1)
stop("It does not appear to be a gblinear model")
num_class <- n - 1
num_feat <- (length(dmp) - 4) / num_class
} else {
# in case of CV, the object is expected to have this info
if (model$params$booster != "gblinear")
stop("It does not appear to be a gblinear model")
num_class <- NVL(model$params$num_class, 1)
num_feat <- model$nfeatures
if (is.null(num_feat))
stop("This xgb.cv result does not have nfeatures info")
}
if (!is.null(class_index) &&
num_class > 1 &&
(class_index[1] < 0 || class_index[1] >= num_class))
stop("class_index has to be within [0,", num_class - 1, "]")
coef_path <- environment(model$callbacks$cb.gblinear.history)[["coefs"]]
if (!is.null(class_index) && num_class > 1) {
coef_path <- if (is.list(coef_path)) {
lapply(coef_path,
function(x) x[, seq(1 + class_index, by = num_class, length.out = num_feat)])
} else {
coef_path <- coef_path[, seq(1 + class_index, by = num_class, length.out = num_feat)]
}
}
coef_path
}
#
# Internal utility functions for callbacks ------------------------------------
#
# Format the evaluation metric string
.format_eval_string <- function(iter, eval_res, eval_err = NULL) {
if (length(eval_res) == 0)
stop('no evaluation results')
enames <- names(eval_res)
if (is.null(enames))
stop('evaluation results must have names')
iter <- sprintf('[%d]\t', iter)
if (!is.null(eval_err)) {
if (length(eval_res) != length(eval_err))
stop('eval_res & eval_err lengths mismatch')
res <- paste0(sprintf("%s:%f+%f", enames, eval_res, eval_err), collapse = '\t')
} else {
res <- paste0(sprintf("%s:%f", enames, eval_res), collapse = '\t')
}
return(paste0(iter, res))
}
# Extract callback names from the list of callbacks
callback.names <- function(cb_list) {
unlist(lapply(cb_list, function(x) attr(x, 'name')))
}
# Extract callback calls from the list of callbacks
callback.calls <- function(cb_list) {
unlist(lapply(cb_list, function(x) attr(x, 'call')))
}
# Add a callback cb to the list and make sure that
# cb.early.stop and cb.cv.predict are at the end of the list
# with cb.cv.predict being the last (when present)
add.cb <- function(cb_list, cb) {
cb_list <- c(cb_list, cb)
names(cb_list) <- callback.names(cb_list)
if ('cb.early.stop' %in% names(cb_list)) {
cb_list <- c(cb_list, cb_list['cb.early.stop'])
# this removes only the first one
cb_list['cb.early.stop'] <- NULL
}
if ('cb.cv.predict' %in% names(cb_list)) {
cb_list <- c(cb_list, cb_list['cb.cv.predict'])
cb_list['cb.cv.predict'] <- NULL
}
cb_list
}
# Sort callbacks list into categories
categorize.callbacks <- function(cb_list) {
list(
pre_iter = Filter(function(x) {
pre <- attr(x, 'is_pre_iteration')
!is.null(pre) && pre
}, cb_list),
post_iter = Filter(function(x) {
pre <- attr(x, 'is_pre_iteration')
is.null(pre) || !pre
}, cb_list),
finalize = Filter(function(x) {
'finalize' %in% names(formals(x))
}, cb_list)
)
}
# Check whether all callback functions with names given by 'query_names' are present in the 'cb_list'.
has.callbacks <- function(cb_list, query_names) {
if (length(cb_list) < length(query_names))
return(FALSE)
if (!is.list(cb_list) ||
any(sapply(cb_list, class) != 'function')) {
stop('`cb_list` must be a list of callback functions')
}
cb_names <- callback.names(cb_list)
if (!is.character(cb_names) ||
length(cb_names) != length(cb_list) ||
any(cb_names == "")) {
stop('All callbacks in the `cb_list` must have a non-empty `name` attribute')
}
if (!is.character(query_names) ||
length(query_names) == 0 ||
any(query_names == "")) {
stop('query_names must be a non-empty vector of non-empty character names')
}
return(all(query_names %in% cb_names))
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/callbacks.R |
#
# This file is for the low level reusable utility functions
# that are not supposed to be visible to a user.
#
#
# General helper utilities ----------------------------------------------------
#
# SQL-style NVL shortcut.
NVL <- function(x, val) {
if (is.null(x))
return(val)
if (is.vector(x)) {
x[is.na(x)] <- val
return(x)
}
if (typeof(x) == 'closure')
return(x)
stop("typeof(x) == ", typeof(x), " is not supported by NVL")
}
# List of classification and ranking objectives
.CLASSIFICATION_OBJECTIVES <- function() {
return(c('binary:logistic', 'binary:logitraw', 'binary:hinge', 'multi:softmax',
'multi:softprob', 'rank:pairwise', 'rank:ndcg', 'rank:map'))
}
#
# Low-level functions for boosting --------------------------------------------
#
# Merges booster params with whatever is provided in ...
# plus runs some checks
check.booster.params <- function(params, ...) {
if (!identical(class(params), "list"))
stop("params must be a list")
# in R interface, allow for '.' instead of '_' in parameter names
names(params) <- gsub("\\.", "_", names(params))
# merge parameters from the params and the dots-expansion
dot_params <- list(...)
names(dot_params) <- gsub("\\.", "_", names(dot_params))
if (length(intersect(names(params),
names(dot_params))) > 0)
stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.")
params <- c(params, dot_params)
# providing a parameter multiple times makes sense only for 'eval_metric'
name_freqs <- table(names(params))
multi_names <- setdiff(names(name_freqs[name_freqs > 1]), 'eval_metric')
if (length(multi_names) > 0) {
warning("The following parameters were provided multiple times:\n\t",
paste(multi_names, collapse = ', '), "\n Only the last value for each of them will be used.\n")
# While xgboost internals would choose the last value for a multiple-times parameter,
# enforce it here in R as well (b/c multi-parameters might be used further in R code,
# and R takes the 1st value when multiple elements with the same name are present in a list).
for (n in multi_names) {
del_idx <- which(n == names(params))
del_idx <- del_idx[-length(del_idx)]
params[[del_idx]] <- NULL
}
}
# for multiclass, expect num_class to be set
if (typeof(params[['objective']]) == "character" &&
substr(NVL(params[['objective']], 'x'), 1, 6) == 'multi:' &&
as.numeric(NVL(params[['num_class']], 0)) < 2) {
stop("'num_class' > 1 parameter must be set for multiclass classification")
}
# monotone_constraints parser
if (!is.null(params[['monotone_constraints']]) &&
typeof(params[['monotone_constraints']]) != "character") {
vec2str <- paste(params[['monotone_constraints']], collapse = ',')
vec2str <- paste0('(', vec2str, ')')
params[['monotone_constraints']] <- vec2str
}
# interaction constraints parser (convert from list of column indices to string)
if (!is.null(params[['interaction_constraints']]) &&
typeof(params[['interaction_constraints']]) != "character"){
# check input class
if (!identical(class(params[['interaction_constraints']]), 'list')) stop('interaction_constraints should be class list')
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric', 'integer'))) {
stop('interaction_constraints should be a list of numeric/integer vectors')
}
# recast parameter as string
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse = ','), ']'))
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse = ','), ']')
}
return(params)
}
# Performs some checks related to custom objective function.
# WARNING: has side-effects and can modify 'params' and 'obj' in its calling frame
check.custom.obj <- function(env = parent.frame()) {
if (!is.null(env$params[['objective']]) && !is.null(env$obj))
stop("Setting objectives in 'params' and 'obj' at the same time is not allowed")
if (!is.null(env$obj) && typeof(env$obj) != 'closure')
stop("'obj' must be a function")
# handle the case when custom objective function was provided through params
if (!is.null(env$params[['objective']]) &&
typeof(env$params$objective) == 'closure') {
env$obj <- env$params$objective
env$params$objective <- NULL
}
}
# Performs some checks related to custom evaluation function.
# WARNING: has side-effects and can modify 'params' and 'feval' in its calling frame
check.custom.eval <- function(env = parent.frame()) {
if (!is.null(env$params[['eval_metric']]) && !is.null(env$feval))
stop("Setting evaluation metrics in 'params' and 'feval' at the same time is not allowed")
if (!is.null(env$feval) && typeof(env$feval) != 'closure')
stop("'feval' must be a function")
# handle a situation when custom eval function was provided through params
if (!is.null(env$params[['eval_metric']]) &&
typeof(env$params$eval_metric) == 'closure') {
env$feval <- env$params$eval_metric
env$params$eval_metric <- NULL
}
# require maximize to be set when custom feval and early stopping are used together
if (!is.null(env$feval) &&
is.null(env$maximize) && (
!is.null(env$early_stopping_rounds) ||
has.callbacks(env$callbacks, 'cb.early.stop')))
stop("Please set 'maximize' to indicate whether the evaluation metric needs to be maximized or not")
}
# Update a booster handle for an iteration with dtrain data
xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
if (!identical(class(booster_handle), "xgb.Booster.handle")) {
stop("booster_handle must be of xgb.Booster.handle class")
}
if (!inherits(dtrain, "xgb.DMatrix")) {
stop("dtrain must be of xgb.DMatrix class")
}
if (is.null(obj)) {
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
} else {
pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE,
ntreelimit = 0)
gpair <- obj(pred, dtrain)
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
}
return(TRUE)
}
# Evaluate one iteration.
# Returns a named vector of evaluation metrics
# with the names in a 'datasetname-metricname' format.
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
if (!identical(class(booster_handle), "xgb.Booster.handle"))
stop("class of booster_handle must be xgb.Booster.handle")
if (length(watchlist) == 0)
return(NULL)
evnames <- names(watchlist)
if (is.null(feval)) {
msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames))
mat <- matrix(strsplit(msg, '\\s+|:')[[1]][-1], nrow = 2)
res <- structure(as.numeric(mat[2, ]), names = mat[1, ])
} else {
res <- sapply(seq_along(watchlist), function(j) {
w <- watchlist[[j]]
## predict using all trees
preds <- predict(booster_handle, w, outputmargin = TRUE, iterationrange = c(1, 1))
eval_res <- feval(preds, w)
out <- eval_res$value
names(out) <- paste0(evnames[j], "-", eval_res$metric)
out
})
}
return(res)
}
#
# Helper functions for cross validation ---------------------------------------
#
# Possibly convert the labels into factors, depending on the objective.
# The labels are converted into factors only when the given objective refers to the classification
# or ranking tasks.
convert.labels <- function(labels, objective_name) {
if (objective_name %in% .CLASSIFICATION_OBJECTIVES()) {
return(as.factor(labels))
} else {
return(labels)
}
}
# Generates random (stratified if needed) CV folds
generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
# cannot do it for rank
objective <- params$objective
if (is.character(objective) && strtrim(objective, 5) == 'rank:') {
stop("\n\tAutomatic generation of CV-folds is not implemented for ranking!\n",
"\tConsider providing pre-computed CV-folds through the 'folds=' parameter.\n")
}
# shuffle
rnd_idx <- sample.int(nrows)
if (stratified &&
length(label) == length(rnd_idx)) {
y <- label[rnd_idx]
# WARNING: some heuristic logic is employed to identify classification setting!
# - For classification, need to convert y labels to factor before making the folds,
# and then do stratification by factor levels.
# - For regression, leave y numeric and do stratification by quantiles.
if (is.character(objective)) {
y <- convert.labels(y, params$objective)
} else {
# If no 'objective' given in params, it means that user either wants to
# use the default 'reg:squarederror' objective or has provided a custom
# obj function. Here, assume classification setting when y has 5 or less
# unique values:
if (length(unique(y)) <= 5) {
y <- factor(y)
}
}
folds <- xgb.createFolds(y, nfold)
} else {
# make simple non-stratified folds
kstep <- length(rnd_idx) %/% nfold
folds <- list()
for (i in seq_len(nfold - 1)) {
folds[[i]] <- rnd_idx[seq_len(kstep)]
rnd_idx <- rnd_idx[-seq_len(kstep)]
}
folds[[nfold]] <- rnd_idx
}
return(folds)
}
# Creates CV folds stratified by the values of y.
# It was borrowed from caret::createFolds and simplified
# by always returning an unnamed list of fold indices.
xgb.createFolds <- function(y, k = 10)
{
if (is.numeric(y)) {
## Group the numeric data based on their magnitudes
## and sample within those groups.
## When the number of samples is low, we may have
## issues further slicing the numeric data into
## groups. The number of groups will depend on the
## ratio of the number of folds to the sample size.
## At most, we will use quantiles. If the sample
## is too small, we just do regular unstratified
## CV
cuts <- floor(length(y) / k)
if (cuts < 2) cuts <- 2
if (cuts > 5) cuts <- 5
y <- cut(y,
unique(stats::quantile(y, probs = seq(0, 1, length = cuts))),
include.lowest = TRUE)
}
if (k < length(y)) {
## reset levels so that the possible levels and
## the levels in the vector are the same
y <- factor(as.character(y))
numInClass <- table(y)
foldVector <- vector(mode = "integer", length(y))
## For each class, balance the fold allocation as far
## as possible, then resample the remainder.
## The final assignment of folds is also randomized.
for (i in seq_along(numInClass)) {
## create a vector of integers from 1:k as many times as possible without
## going over the number of samples in the class. Note that if the number
## of samples in a class is less than k, nothing is produced here.
seqVector <- rep(seq_len(k), numInClass[i] %/% k)
## add enough random integers to get length(seqVector) == numInClass[i]
if (numInClass[i] %% k > 0) seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k))
## shuffle the integers for fold assignment and assign to this classes's data
## seqVector[sample.int(length(seqVector))] is used to handle length(seqVector) == 1
foldVector[y == dimnames(numInClass)$y[i]] <- seqVector[sample.int(length(seqVector))]
}
} else {
foldVector <- seq(along = y)
}
out <- split(seq(along = y), foldVector)
names(out) <- NULL
out
}
#
# Deprectaion notice utilities ------------------------------------------------
#
#' Deprecation notices.
#'
#' At this time, some of the parameter names were changed in order to make the code style more uniform.
#' The deprecated parameters would be removed in the next release.
#'
#' To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
#'
#' A deprecation warning is shown when any of the deprecated parameters is used in a call.
#' An additional warning is shown when there was a partial match to a deprecated parameter
#' (as R is able to partially match parameter names).
#'
#' @name xgboost-deprecated
NULL
#' Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of
#' models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.
#'
#' It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or
#' \code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist
#' \code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if
#' the model is to be accessed in the future. If you train a model with the current version of
#' XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
#' accessible in later releases of XGBoost. To ensure that your model can be accessed in future
#' releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead.
#'
#' @details
#' Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
#' the JSON format by specifying the JSON extension. To read the model back, use
#' \code{\link{xgb.load}}.
#'
#' Use \code{\link{xgb.save.raw}} to save the XGBoost model as a sequence (vector) of raw bytes
#' in a future-proof manner. Future releases of XGBoost will be able to read the raw bytes and
#' re-construct the corresponding model. To read the model back, use \code{\link{xgb.load.raw}}.
#' The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
#' as part of another R object.
#'
#' Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the
#' model but also internal configurations and parameters, and its format is not stable across
#' multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing.
#'
#' For more details and explanation about model persistence and archival, consult the page
#' \url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#'
#' # Save as a stand-alone file; load it with xgb.load()
#' xgb.save(bst, 'xgb.model')
#' bst2 <- xgb.load('xgb.model')
#'
#' # Save as a stand-alone file (JSON); load it with xgb.load()
#' xgb.save(bst, 'xgb.model.json')
#' bst2 <- xgb.load('xgb.model.json')
#' if (file.exists('xgb.model.json')) file.remove('xgb.model.json')
#'
#' # Save as a raw byte vector; load it with xgb.load.raw()
#' xgb_bytes <- xgb.save.raw(bst)
#' bst2 <- xgb.load.raw(xgb_bytes)
#'
#' # Persist XGBoost model as part of another R object
#' obj <- list(xgb_model_bytes = xgb.save.raw(bst), description = "My first XGBoost model")
#' # Persist the R object. Here, saveRDS() is okay, since it doesn't persist
#' # xgb.Booster directly. What's being persisted is the future-proof byte representation
#' # as given by xgb.save.raw().
#' saveRDS(obj, 'my_object.rds')
#' # Read back the R object
#' obj2 <- readRDS('my_object.rds')
#' # Re-construct xgb.Booster object from the bytes
#' bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
#' if (file.exists('my_object.rds')) file.remove('my_object.rds')
#'
#' @name a-compatibility-note-for-saveRDS-save
NULL
# Lookup table for the deprecated parameters bookkeeping
depr_par_lut <- matrix(c(
'print.every.n', 'print_every_n',
'early.stop.round', 'early_stopping_rounds',
'training.data', 'data',
'with.stats', 'with_stats',
'numberOfClusters', 'n_clusters',
'features.keep', 'features_keep',
'plot.height', 'plot_height',
'plot.width', 'plot_width',
'n_first_tree', 'trees',
'dummy', 'DUMMY'
), ncol = 2, byrow = TRUE)
colnames(depr_par_lut) <- c('old', 'new')
# Checks the dot-parameters for deprecated names
# (including partial matching), gives a deprecation warning,
# and sets new parameters to the old parameters' values within its parent frame.
# WARNING: has side-effects
check.deprecation <- function(..., env = parent.frame()) {
pars <- list(...)
# exact and partial matches
all_match <- pmatch(names(pars), depr_par_lut[, 1])
# indices of matched pars' names
idx_pars <- which(!is.na(all_match))
if (length(idx_pars) == 0) return()
# indices of matched LUT rows
idx_lut <- all_match[idx_pars]
# which of idx_lut were the exact matches?
ex_match <- depr_par_lut[idx_lut, 1] %in% names(pars)
for (i in seq_along(idx_pars)) {
pars_par <- names(pars)[idx_pars[i]]
old_par <- depr_par_lut[idx_lut[i], 1]
new_par <- depr_par_lut[idx_lut[i], 2]
if (!ex_match[i]) {
warning("'", pars_par, "' was partially matched to '", old_par, "'")
}
.Deprecated(new_par, old = old_par, package = 'xgboost')
if (new_par != 'NULL') {
eval(parse(text = paste(new_par, '<-', pars[[pars_par]])), envir = env)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/utils.R |
# Construct an internal xgboost Booster and return a handle to it.
# internal utility function
xgb.Booster.handle <- function(params = list(), cachelist = list(),
modelfile = NULL, handle = NULL) {
if (typeof(cachelist) != "list" ||
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
stop("cachelist must be a list of xgb.DMatrix objects")
}
## Load existing model, dispatch for on disk model file and in memory buffer
if (!is.null(modelfile)) {
if (typeof(modelfile) == "character") {
## A filename
handle <- .Call(XGBoosterCreate_R, cachelist)
modelfile <- path.expand(modelfile)
.Call(XGBoosterLoadModel_R, handle, modelfile[1])
class(handle) <- "xgb.Booster.handle"
if (length(params) > 0) {
xgb.parameters(handle) <- params
}
return(handle)
} else if (typeof(modelfile) == "raw") {
## A memory buffer
bst <- xgb.unserialize(modelfile, handle)
xgb.parameters(bst) <- params
return (bst)
} else if (inherits(modelfile, "xgb.Booster")) {
## A booster object
bst <- xgb.Booster.complete(modelfile, saveraw = TRUE)
bst <- xgb.unserialize(bst$raw)
xgb.parameters(bst) <- params
return (bst)
} else {
stop("modelfile must be either character filename, or raw booster dump, or xgb.Booster object")
}
}
## Create new model
handle <- .Call(XGBoosterCreate_R, cachelist)
class(handle) <- "xgb.Booster.handle"
if (length(params) > 0) {
xgb.parameters(handle) <- params
}
return(handle)
}
# Convert xgb.Booster.handle to xgb.Booster
# internal utility function
xgb.handleToBooster <- function(handle, raw = NULL) {
bst <- list(handle = handle, raw = raw)
class(bst) <- "xgb.Booster"
return(bst)
}
# Check whether xgb.Booster.handle is null
# internal utility function
is.null.handle <- function(handle) {
if (is.null(handle)) return(TRUE)
if (!identical(class(handle), "xgb.Booster.handle"))
stop("argument type must be xgb.Booster.handle")
if (.Call(XGCheckNullPtr_R, handle))
return(TRUE)
return(FALSE)
}
# Return a verified to be valid handle out of either xgb.Booster.handle or
# xgb.Booster internal utility function
xgb.get.handle <- function(object) {
if (inherits(object, "xgb.Booster")) {
handle <- object$handle
} else if (inherits(object, "xgb.Booster.handle")) {
handle <- object
} else {
stop("argument must be of either xgb.Booster or xgb.Booster.handle class")
}
if (is.null.handle(handle)) {
stop("invalid xgb.Booster.handle")
}
handle
}
#' Restore missing parts of an incomplete xgb.Booster object.
#'
#' It attempts to complete an \code{xgb.Booster} object by restoring either its missing
#' raw model memory dump (when it has no \code{raw} data but its \code{xgb.Booster.handle} is valid)
#' or its missing internal handle (when its \code{xgb.Booster.handle} is not valid
#' but it has a raw Booster memory dump).
#'
#' @param object object of class \code{xgb.Booster}
#' @param saveraw a flag indicating whether to append \code{raw} Booster memory dump data
#' when it doesn't already exist.
#'
#' @details
#'
#' While this method is primarily for internal use, it might be useful in some practical situations.
#'
#' E.g., when an \code{xgb.Booster} model is saved as an R object and then is loaded as an R object,
#' its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
#' should still work for such a model object since those methods would be using
#' \code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
#' \code{xgb.Booster.complete} function explicitly once after loading a model as an R-object.
#' That would prevent further repeated implicit reconstruction of an internal booster model.
#'
#' @return
#' An object of \code{xgb.Booster} class.
#'
#' @examples
#'
#' data(agaricus.train, package='xgboost')
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' saveRDS(bst, "xgb.model.rds")
#'
#' # Warning: The resulting RDS file is only compatible with the current XGBoost version.
#' # Refer to the section titled "a-compatibility-note-for-saveRDS-save".
#' bst1 <- readRDS("xgb.model.rds")
#' if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
#' # the handle is invalid:
#' print(bst1$handle)
#'
#' bst1 <- xgb.Booster.complete(bst1)
#' # now the handle points to a valid internal booster model:
#' print(bst1$handle)
#'
#' @export
xgb.Booster.complete <- function(object, saveraw = TRUE) {
if (!inherits(object, "xgb.Booster"))
stop("argument type must be xgb.Booster")
if (is.null.handle(object$handle)) {
object$handle <- xgb.Booster.handle(modelfile = object$raw, handle = object$handle)
} else {
if (is.null(object$raw) && saveraw) {
object$raw <- xgb.serialize(object$handle)
}
}
attrs <- xgb.attributes(object)
if (!is.null(attrs$best_ntreelimit)) {
object$best_ntreelimit <- as.integer(attrs$best_ntreelimit)
}
if (!is.null(attrs$best_iteration)) {
## Convert from 0 based back to 1 based.
object$best_iteration <- as.integer(attrs$best_iteration) + 1
}
if (!is.null(attrs$best_score)) {
object$best_score <- as.numeric(attrs$best_score)
}
if (!is.null(attrs$best_msg)) {
object$best_msg <- attrs$best_msg
}
if (!is.null(attrs$niter)) {
object$niter <- as.integer(attrs$niter)
}
return(object)
}
#' Predict method for eXtreme Gradient Boosting model
#'
#' Predicted values based on either xgboost model or model handle object.
#'
#' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}
#' @param newdata takes \code{matrix}, \code{dgCMatrix}, \code{dgRMatrix}, \code{dsparseVector},
#' local data file or \code{xgb.DMatrix}.
#'
#' For single-row predictions on sparse data, it's recommended to use CSR format. If passing
#' a sparse vector, it will take it as a row vector.
#' @param missing Missing is only used when input is dense matrix. Pick a float value that represents
#' missing values in data (e.g., sometimes 0 or some other extreme value is used).
#' @param outputmargin whether the prediction should be returned in the for of original untransformed
#' sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for
#' logistic regression would result in predictions for log-odds instead of probabilities.
#' @param ntreelimit Deprecated, use \code{iterationrange} instead.
#' @param predleaf whether predict leaf index.
#' @param predcontrib whether to return feature contributions to individual predictions (see Details).
#' @param approxcontrib whether to use a fast approximation for feature contributions (see Details).
#' @param predinteraction whether to return contributions of feature interactions to individual predictions (see Details).
#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several
#' prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
#' or predinteraction flags is TRUE.
#' @param training whether is the prediction result used for training. For dart booster,
#' training predicting will perform dropout.
#' @param iterationrange Specifies which layer of trees are used in prediction. For
#' example, if a random forest is trained with 100 rounds. Specifying
#' `iterationrange=(1, 21)`, then only the forests built during [1, 21) (half open set)
#' rounds are used in this prediction. It's 1-based index just like R vector. When set
#' to \code{c(1, 1)} XGBoost will use all trees.
#' @param strict_shape Default is \code{FALSE}. When it's set to \code{TRUE}, output
#' type and shape of prediction are invariant to model type.
#'
#' @param ... Parameters passed to \code{predict.xgb.Booster}
#'
#' @details
#'
#' Note that \code{iterationrange} would currently do nothing for predictions from gblinear,
#' since gblinear doesn't keep its boosting history.
#'
#' One possible practical applications of the \code{predleaf} option is to use the model
#' as a generator of new features which capture non-linearity and interactions,
#' e.g., as implemented in \code{\link{xgb.create.features}}.
#'
#' Setting \code{predcontrib = TRUE} allows to calculate contributions of each feature to
#' individual predictions. For "gblinear" booster, feature contributions are simply linear terms
#' (feature_beta * feature_value). For "gbtree" booster, feature contributions are SHAP
#' values (Lundberg 2017) that sum to the difference between the expected output
#' of the model and the current prediction (where the hessian weights are used to compute the expectations).
#' Setting \code{approxcontrib = TRUE} approximates these values following the idea explained
#' in \url{http://blog.datadive.net/interpreting-random-forests/}.
#'
#' With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
#' are computed. Note that this operation might be rather expensive in terms of compute and memory.
#' Since it quadratically depends on the number of features, it is recommended to perform selection
#' of the most important features first. See below about the format of the returned results.
#'
#' @return
#' The return type is different depending whether \code{strict_shape} is set to \code{TRUE}. By default,
#' for regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
#' For multiclass classification, either a \code{num_class * nrows(newdata)} vector or
#' a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on
#' the \code{reshape} value.
#'
#' When \code{predleaf = TRUE}, the output is a matrix object with the
#' number of columns corresponding to the number of trees.
#'
#' When \code{predcontrib = TRUE} and it is not a multiclass setting, the output is a matrix object with
#' \code{num_features + 1} columns. The last "+ 1" column in a matrix corresponds to bias.
#' For a multiclass case, a list of \code{num_class} elements is returned, where each element is
#' such a matrix. The contribution values are on the scale of untransformed margin
#' (e.g., for binary classification would mean that the contributions are log-odds deviations from bias).
#'
#' When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with
#' dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
#' elements represent different features interaction contributions. The array is symmetric WRT the last
#' two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should
#' produce practically the same result as predict with \code{predcontrib = TRUE}.
#' For a multiclass case, a list of \code{num_class} elements is returned, where each element is
#' such an array.
#'
#' When \code{strict_shape} is set to \code{TRUE}, the output is always an array. For
#' normal prediction, the output is a 2-dimension array \code{(num_class, nrow(newdata))}.
#'
#' For \code{predcontrib = TRUE}, output is \code{(ncol(newdata) + 1, num_class, nrow(newdata))}
#' For \code{predinteraction = TRUE}, output is \code{(ncol(newdata) + 1, ncol(newdata) + 1, num_class, nrow(newdata))}
#' For \code{predleaf = TRUE}, output is \code{(n_trees_in_forest, num_class, n_iterations, nrow(newdata))}
#'
#' @seealso
#' \code{\link{xgb.train}}.
#'
#' @references
#'
#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
#'
#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060}
#'
#' @examples
#' ## binary classification:
#'
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#'
#' ## Keep the number of threads to 2 for examples
#' nthread <- 2
#' data.table::setDTthreads(nthread)
#'
#' train <- agaricus.train
#' test <- agaricus.test
#'
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 0.5, nthread = nthread, nrounds = 5, objective = "binary:logistic")
#' # use all trees by default
#' pred <- predict(bst, test$data)
#' # use only the 1st tree
#' pred1 <- predict(bst, test$data, iterationrange = c(1, 2))
#'
#' # Predicting tree leafs:
#' # the result is an nsamples X ntrees matrix
#' pred_leaf <- predict(bst, test$data, predleaf = TRUE)
#' str(pred_leaf)
#'
#' # Predicting feature contributions to predictions:
#' # the result is an nsamples X (nfeatures + 1) matrix
#' pred_contr <- predict(bst, test$data, predcontrib = TRUE)
#' str(pred_contr)
#' # verify that contributions' sums are equal to log-odds of predictions (up to float precision):
#' summary(rowSums(pred_contr) - qlogis(pred))
#' # for the 1st record, let's inspect its features that had non-zero contribution to prediction:
#' contr1 <- pred_contr[1,]
#' contr1 <- contr1[-length(contr1)] # drop BIAS
#' contr1 <- contr1[contr1 != 0] # drop non-contributing features
#' contr1 <- contr1[order(abs(contr1))] # order by contribution magnitude
#' old_mar <- par("mar")
#' par(mar = old_mar + c(0,7,0,0))
#' barplot(contr1, horiz = TRUE, las = 2, xlab = "contribution to prediction in log-odds")
#' par(mar = old_mar)
#'
#'
#' ## multiclass classification in iris dataset:
#'
#' lb <- as.numeric(iris$Species) - 1
#' num_class <- 3
#' set.seed(11)
#' bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
#' max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5,
#' objective = "multi:softprob", num_class = num_class)
#' # predict for softmax returns num_class probability numbers per case:
#' pred <- predict(bst, as.matrix(iris[, -5]))
#' str(pred)
#' # reshape it to a num_class-columns matrix
#' pred <- matrix(pred, ncol=num_class, byrow=TRUE)
#' # convert the probabilities to softmax labels
#' pred_labels <- max.col(pred) - 1
#' # the following should result in the same error as seen in the last iteration
#' sum(pred_labels != lb)/length(lb)
#'
#' # compare that to the predictions from softmax:
#' set.seed(11)
#' bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
#' max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5,
#' objective = "multi:softmax", num_class = num_class)
#' pred <- predict(bst, as.matrix(iris[, -5]))
#' str(pred)
#' all.equal(pred, pred_labels)
#' # prediction from using only 5 iterations should result
#' # in the same error as seen in iteration 5:
#' pred5 <- predict(bst, as.matrix(iris[, -5]), iterationrange=c(1, 6))
#' sum(pred5 != lb)/length(lb)
#'
#' @rdname predict.xgb.Booster
#' @export
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
reshape = FALSE, training = FALSE, iterationrange = NULL, strict_shape = FALSE, ...) {
object <- xgb.Booster.complete(object, saveraw = FALSE)
if (!inherits(newdata, "xgb.DMatrix")) {
config <- jsonlite::fromJSON(xgb.config(object))
nthread <- strtoi(config$learner$generic_param$nthread)
newdata <- xgb.DMatrix(
newdata,
missing = missing, nthread = NVL(nthread, -1)
)
}
if (!is.null(object[["feature_names"]]) &&
!is.null(colnames(newdata)) &&
!identical(object[["feature_names"]], colnames(newdata)))
stop("Feature names stored in `object` and `newdata` are different!")
if (NVL(object$params[['booster']], '') == 'gblinear' || is.null(ntreelimit))
ntreelimit <- 0
if (ntreelimit != 0 && is.null(iterationrange)) {
## only ntreelimit, initialize iteration range
iterationrange <- c(0, 0)
} else if (ntreelimit == 0 && !is.null(iterationrange)) {
## only iteration range, handle 1-based indexing
iterationrange <- c(iterationrange[1] - 1, iterationrange[2] - 1)
} else if (ntreelimit != 0 && !is.null(iterationrange)) {
## both are specified, let libgxgboost throw an error
} else {
## no limit is supplied, use best
if (is.null(object$best_iteration)) {
iterationrange <- c(0, 0)
} else {
## We don't need to + 1 as R is 1-based index.
iterationrange <- c(0, as.integer(object$best_iteration))
}
}
## Handle the 0 length values.
box <- function(val) {
if (length(val) == 0) {
cval <- vector(, 1)
cval[0] <- val
return(cval)
}
return (val)
}
## We set strict_shape to TRUE then drop the dimensions conditionally
args <- list(
training = box(training),
strict_shape = box(TRUE),
iteration_begin = box(as.integer(iterationrange[1])),
iteration_end = box(as.integer(iterationrange[2])),
ntree_limit = box(as.integer(ntreelimit)),
type = box(as.integer(0))
)
set_type <- function(type) {
if (args$type != 0) {
stop("One type of prediction at a time.")
}
return(box(as.integer(type)))
}
if (outputmargin) {
args$type <- set_type(1)
}
if (predcontrib) {
args$type <- set_type(if (approxcontrib) 3 else 2)
}
if (predinteraction) {
args$type <- set_type(if (approxcontrib) 5 else 4)
}
if (predleaf) {
args$type <- set_type(6)
}
predts <- .Call(
XGBoosterPredictFromDMatrix_R, object$handle, newdata, jsonlite::toJSON(args, auto_unbox = TRUE)
)
names(predts) <- c("shape", "results")
shape <- predts$shape
ret <- predts$results
n_ret <- length(ret)
n_row <- nrow(newdata)
if (n_row != shape[1]) {
stop("Incorrect predict shape.")
}
arr <- array(data = ret, dim = rev(shape))
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
n_groups <- shape[2]
## Needed regardless of whether strict shape is being used.
if (predcontrib) {
dimnames(arr) <- list(cnames, NULL, NULL)
} else if (predinteraction) {
dimnames(arr) <- list(cnames, cnames, NULL, NULL)
}
if (strict_shape) {
return(arr) # strict shape is calculated by libxgboost uniformly.
}
if (predleaf) {
## Predict leaf
arr <- if (n_ret == n_row) {
matrix(arr, ncol = 1)
} else {
matrix(arr, nrow = n_row, byrow = TRUE)
}
} else if (predcontrib) {
## Predict contribution
arr <- aperm(a = arr, perm = c(2, 3, 1)) # [group, row, col]
arr <- if (n_ret == n_row) {
matrix(arr, ncol = 1, dimnames = list(NULL, cnames))
} else if (n_groups != 1) {
## turns array into list of matrices
lapply(seq_len(n_groups), function(g) arr[g, , ])
} else {
## remove the first axis (group)
dn <- dimnames(arr)
matrix(arr[1, , ], nrow = dim(arr)[2], ncol = dim(arr)[3], dimnames = c(dn[2], dn[3]))
}
} else if (predinteraction) {
## Predict interaction
arr <- aperm(a = arr, perm = c(3, 4, 1, 2)) # [group, row, col, col]
arr <- if (n_ret == n_row) {
matrix(arr, ncol = 1, dimnames = list(NULL, cnames))
} else if (n_groups != 1) {
## turns array into list of matrices
lapply(seq_len(n_groups), function(g) arr[g, , , ])
} else {
## remove the first axis (group)
arr <- arr[1, , , , drop = FALSE]
array(arr, dim = dim(arr)[2:4], dimnames(arr)[2:4])
}
} else {
## Normal prediction
arr <- if (reshape && n_groups != 1) {
matrix(arr, ncol = n_groups, byrow = TRUE)
} else {
as.vector(ret)
}
}
return(arr)
}
#' @rdname predict.xgb.Booster
#' @export
predict.xgb.Booster.handle <- function(object, ...) {
bst <- xgb.handleToBooster(object)
ret <- predict(bst, ...)
return(ret)
}
#' Accessors for serializable attributes of a model.
#'
#' These methods allow to manipulate the key-value attribute strings of an xgboost model.
#'
#' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.
#' @param name a non-empty character string specifying which attribute is to be accessed.
#' @param value a value of an attribute for \code{xgb.attr<-}; for \code{xgb.attributes<-}
#' it's a list (or an object coercible to a list) with the names of attributes to set
#' and the elements corresponding to attribute values.
#' Non-character values are converted to character.
#' When attribute value is not a scalar, only the first index is used.
#' Use \code{NULL} to remove an attribute.
#'
#' @details
#' The primary purpose of xgboost model attributes is to store some meta-data about the model.
#' Note that they are a separate concept from the object attributes in R.
#' Specifically, they refer to key-value strings that can be attached to an xgboost model,
#' stored together with the model's binary representation, and accessed later
#' (from R or any other interface).
#' In contrast, any R-attribute assigned to an R-object of \code{xgb.Booster} class
#' would not be saved by \code{xgb.save} because an xgboost model is an external memory object
#' and its serialization is handled externally.
#' Also, setting an attribute that has the same name as one of xgboost's parameters wouldn't
#' change the value of that parameter for a model.
#' Use \code{\link{xgb.parameters<-}} to set or change model parameters.
#'
#' The attribute setters would usually work more efficiently for \code{xgb.Booster.handle}
#' than for \code{xgb.Booster}, since only just a handle (pointer) would need to be copied.
#' That would only matter if attributes need to be set many times.
#' Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
#' the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
#' and it would be user's responsibility to call \code{xgb.serialize} to update it.
#'
#' The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
#' but it doesn't delete the other existing attributes.
#'
#' @return
#' \code{xgb.attr} returns either a string value of an attribute
#' or \code{NULL} if an attribute wasn't stored in a model.
#'
#' \code{xgb.attributes} returns a list of all attribute stored in a model
#' or \code{NULL} if a model has no stored attributes.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#'
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#'
#' xgb.attr(bst, "my_attribute") <- "my attribute value"
#' print(xgb.attr(bst, "my_attribute"))
#' xgb.attributes(bst) <- list(a = 123, b = "abc")
#'
#' xgb.save(bst, 'xgb.model')
#' bst1 <- xgb.load('xgb.model')
#' if (file.exists('xgb.model')) file.remove('xgb.model')
#' print(xgb.attr(bst1, "my_attribute"))
#' print(xgb.attributes(bst1))
#'
#' # deletion:
#' xgb.attr(bst1, "my_attribute") <- NULL
#' print(xgb.attributes(bst1))
#' xgb.attributes(bst1) <- list(a = NULL, b = NULL)
#' print(xgb.attributes(bst1))
#'
#' @rdname xgb.attr
#' @export
xgb.attr <- function(object, name) {
if (is.null(name) || nchar(as.character(name[1])) == 0) stop("invalid attribute name")
handle <- xgb.get.handle(object)
.Call(XGBoosterGetAttr_R, handle, as.character(name[1]))
}
#' @rdname xgb.attr
#' @export
`xgb.attr<-` <- function(object, name, value) {
if (is.null(name) || nchar(as.character(name[1])) == 0) stop("invalid attribute name")
handle <- xgb.get.handle(object)
if (!is.null(value)) {
# Coerce the elements to be scalar strings.
# Q: should we warn user about non-scalar elements?
if (is.numeric(value[1])) {
value <- format(value[1], digits = 17)
} else {
value <- as.character(value[1])
}
}
.Call(XGBoosterSetAttr_R, handle, as.character(name[1]), value)
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
object$raw <- xgb.serialize(object$handle)
}
object
}
#' @rdname xgb.attr
#' @export
xgb.attributes <- function(object) {
handle <- xgb.get.handle(object)
attr_names <- .Call(XGBoosterGetAttrNames_R, handle)
if (is.null(attr_names)) return(NULL)
res <- lapply(attr_names, function(x) {
.Call(XGBoosterGetAttr_R, handle, x)
})
names(res) <- attr_names
res
}
#' @rdname xgb.attr
#' @export
`xgb.attributes<-` <- function(object, value) {
a <- as.list(value)
if (is.null(names(a)) || any(nchar(names(a)) == 0)) {
stop("attribute names cannot be empty strings")
}
# Coerce the elements to be scalar strings.
# Q: should we warn a user about non-scalar elements?
a <- lapply(a, function(x) {
if (is.null(x)) return(NULL)
if (is.numeric(x[1])) {
format(x[1], digits = 17)
} else {
as.character(x[1])
}
})
handle <- xgb.get.handle(object)
for (i in seq_along(a)) {
.Call(XGBoosterSetAttr_R, handle, names(a[i]), a[[i]])
}
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
object$raw <- xgb.serialize(object$handle)
}
object
}
#' Accessors for model parameters as JSON string.
#'
#' @param object Object of class \code{xgb.Booster}
#' @param value A JSON string.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' ## Keep the number of threads to 1 for examples
#' nthread <- 1
#' data.table::setDTthreads(nthread)
#' train <- agaricus.train
#'
#' bst <- xgboost(
#' data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = nthread, nrounds = 2, objective = "binary:logistic"
#' )
#' config <- xgb.config(bst)
#'
#' @rdname xgb.config
#' @export
xgb.config <- function(object) {
handle <- xgb.get.handle(object)
.Call(XGBoosterSaveJsonConfig_R, handle);
}
#' @rdname xgb.config
#' @export
`xgb.config<-` <- function(object, value) {
handle <- xgb.get.handle(object)
.Call(XGBoosterLoadJsonConfig_R, handle, value)
object$raw <- NULL # force renew the raw buffer
object <- xgb.Booster.complete(object)
object
}
#' Accessors for model parameters.
#'
#' Only the setter for xgboost parameters is currently implemented.
#'
#' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.
#' @param value a list (or an object coercible to a list) with the names of parameters to set
#' and the elements corresponding to parameter values.
#'
#' @details
#' Note that the setter would usually work more efficiently for \code{xgb.Booster.handle}
#' than for \code{xgb.Booster}, since only just a handle would need to be copied.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#'
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#'
#' xgb.parameters(bst) <- list(eta = 0.1)
#'
#' @rdname xgb.parameters
#' @export
`xgb.parameters<-` <- function(object, value) {
if (length(value) == 0) return(object)
p <- as.list(value)
if (is.null(names(p)) || any(nchar(names(p)) == 0)) {
stop("parameter names cannot be empty strings")
}
names(p) <- gsub("\\.", "_", names(p))
p <- lapply(p, function(x) as.character(x)[1])
handle <- xgb.get.handle(object)
for (i in seq_along(p)) {
.Call(XGBoosterSetParam_R, handle, names(p[i]), p[[i]])
}
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
object$raw <- xgb.serialize(object$handle)
}
object
}
# Extract the number of trees in a model.
# TODO: either add a getter to C-interface, or simply set an 'ntree' attribute after each iteration.
# internal utility function
xgb.ntree <- function(bst) {
length(grep('^booster', xgb.dump(bst)))
}
#' Print xgb.Booster
#'
#' Print information about xgb.Booster.
#'
#' @param x an xgb.Booster object
#' @param verbose whether to print detailed data (e.g., attribute values)
#' @param ... not currently used
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' attr(bst, 'myattr') <- 'memo'
#'
#' print(bst)
#' print(bst, verbose=TRUE)
#'
#' @method print xgb.Booster
#' @export
print.xgb.Booster <- function(x, verbose = FALSE, ...) {
cat('##### xgb.Booster\n')
valid_handle <- !is.null.handle(x$handle)
if (!valid_handle)
cat("Handle is invalid! Suggest using xgb.Booster.complete\n")
cat('raw: ')
if (!is.null(x$raw)) {
cat(format(object.size(x$raw), units = "auto"), '\n')
} else {
cat('NULL\n')
}
if (!is.null(x$call)) {
cat('call:\n ')
print(x$call)
}
if (!is.null(x$params)) {
cat('params (as set within xgb.train):\n')
cat(' ',
paste(names(x$params),
paste0('"', unlist(x$params), '"'),
sep = ' = ', collapse = ', '), '\n', sep = '')
}
# TODO: need an interface to access all the xgboosts parameters
attrs <- character(0)
if (valid_handle)
attrs <- xgb.attributes(x)
if (length(attrs) > 0) {
cat('xgb.attributes:\n')
if (verbose) {
cat(paste(paste0(' ', names(attrs)),
paste0('"', unlist(attrs), '"'),
sep = ' = ', collapse = '\n'), '\n', sep = '')
} else {
cat(' ', paste(names(attrs), collapse = ', '), '\n', sep = '')
}
}
if (!is.null(x$callbacks) && length(x$callbacks) > 0) {
cat('callbacks:\n')
lapply(callback.calls(x$callbacks), function(x) {
cat(' ')
print(x)
})
}
if (!is.null(x$feature_names))
cat('# of features:', length(x$feature_names), '\n')
cat('niter: ', x$niter, '\n', sep = '')
# TODO: uncomment when faster xgb.ntree is implemented
#cat('ntree: ', xgb.ntree(x), '\n', sep='')
for (n in setdiff(names(x), c('handle', 'raw', 'call', 'params', 'callbacks',
'evaluation_log', 'niter', 'feature_names'))) {
if (is.atomic(x[[n]])) {
cat(n, ':', x[[n]], '\n', sep = ' ')
} else {
cat(n, ':\n\t', sep = ' ')
print(x[[n]])
}
}
if (!is.null(x$evaluation_log)) {
cat('evaluation_log:\n')
print(x$evaluation_log, row.names = FALSE, topn = 2)
}
invisible(x)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.Booster.R |
#' Construct xgb.DMatrix object
#'
#' Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file.
#' Supported input file formats are either a LIBSVM text file or a binary file that was created previously by
#' \code{\link{xgb.DMatrix.save}}).
#'
#' @param data a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object,
#' a \code{dgRMatrix} object (only when making predictions from a fitted model),
#' a \code{dsparseVector} object (only when making predictions from a fitted model, will be
#' interpreted as a row vector), or a character string representing a filename.
#' @param info a named list of additional information to store in the \code{xgb.DMatrix} object.
#' See \code{\link{setinfo}} for the specific allowed kinds of
#' @param missing a float value to represents missing values in data (used only when input is a dense matrix).
#' It is useful when a 0 or some other extreme value represents missing values in data.
#' @param silent whether to suppress printing an informational message after loading from a file.
#' @param nthread Number of threads used for creating DMatrix.
#' @param ... the \code{info} data could be passed directly as parameters, without creating an \code{info} list.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' ## Keep the number of threads to 1 for examples
#' nthread <- 1
#' data.table::setDTthreads(nthread)
#' dtrain <- with(
#' agaricus.train, xgb.DMatrix(data, label = label, nthread = nthread)
#' )
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
#' @export
xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthread = NULL, ...) {
cnames <- NULL
if (typeof(data) == "character") {
if (length(data) > 1)
stop("'data' has class 'character' and length ", length(data),
".\n 'data' accepts either a numeric matrix or a single filename.")
data <- path.expand(data)
handle <- .Call(XGDMatrixCreateFromFile_R, data, as.integer(silent))
} else if (is.matrix(data)) {
handle <- .Call(XGDMatrixCreateFromMat_R, data, missing, as.integer(NVL(nthread, -1)))
cnames <- colnames(data)
} else if (inherits(data, "dgCMatrix")) {
handle <- .Call(
XGDMatrixCreateFromCSC_R, data@p, data@i, data@x, nrow(data), as.integer(NVL(nthread, -1))
)
cnames <- colnames(data)
} else if (inherits(data, "dgRMatrix")) {
handle <- .Call(
XGDMatrixCreateFromCSR_R, data@p, data@j, data@x, ncol(data), as.integer(NVL(nthread, -1))
)
cnames <- colnames(data)
} else if (inherits(data, "dsparseVector")) {
indptr <- c(0L, as.integer(length(data@i)))
ind <- as.integer(data@i) - 1L
handle <- .Call(
XGDMatrixCreateFromCSR_R, indptr, ind, data@x, length(data), as.integer(NVL(nthread, -1))
)
} else {
stop("xgb.DMatrix does not support construction from ", typeof(data))
}
dmat <- handle
attributes(dmat) <- list(class = "xgb.DMatrix")
if (!is.null(cnames)) {
setinfo(dmat, "feature_name", cnames)
}
info <- append(info, list(...))
for (i in seq_along(info)) {
p <- info[i]
setinfo(dmat, names(p), p[[1]])
}
return(dmat)
}
# get dmatrix from data, label
# internal helper method
xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL, nthread = NULL) {
if (inherits(data, "dgCMatrix") || is.matrix(data)) {
if (is.null(label)) {
stop("label must be provided when data is a matrix")
}
dtrain <- xgb.DMatrix(data, label = label, missing = missing, nthread = nthread)
if (!is.null(weight)){
setinfo(dtrain, "weight", weight)
}
} else {
if (!is.null(label)) {
warning("xgboost: label will be ignored.")
}
if (is.character(data)) {
data <- path.expand(data)
dtrain <- xgb.DMatrix(data[1])
} else if (inherits(data, "xgb.DMatrix")) {
dtrain <- data
} else if (inherits(data, "data.frame")) {
stop("xgboost doesn't support data.frame as input. Convert it to matrix first.")
} else {
stop("xgboost: invalid input data")
}
}
return (dtrain)
}
#' Dimensions of xgb.DMatrix
#'
#' Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
#' @param x Object of class \code{xgb.DMatrix}
#'
#' @details
#' Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
#' be directly used with an \code{xgb.DMatrix} object.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label, nthread = 2)
#'
#' stopifnot(nrow(dtrain) == nrow(train$data))
#' stopifnot(ncol(dtrain) == ncol(train$data))
#' stopifnot(all(dim(dtrain) == dim(train$data)))
#'
#' @export
dim.xgb.DMatrix <- function(x) {
c(.Call(XGDMatrixNumRow_R, x), .Call(XGDMatrixNumCol_R, x))
}
#' Handling of column names of \code{xgb.DMatrix}
#'
#' Only column names are supported for \code{xgb.DMatrix}, thus setting of
#' row names would have no effect and returned row names would be NULL.
#'
#' @param x object of class \code{xgb.DMatrix}
#' @param value a list of two elements: the first one is ignored
#' and the second one is column names
#'
#' @details
#' Generic \code{dimnames} methods are used by \code{colnames}.
#' Since row names are irrelevant, it is recommended to use \code{colnames} directly.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label, nthread = 2)
#' dimnames(dtrain)
#' colnames(dtrain)
#' colnames(dtrain) <- make.names(1:ncol(train$data))
#' print(dtrain, verbose=TRUE)
#'
#' @rdname dimnames.xgb.DMatrix
#' @export
dimnames.xgb.DMatrix <- function(x) {
fn <- getinfo(x, "feature_name")
## row names is null.
list(NULL, fn)
}
#' @rdname dimnames.xgb.DMatrix
#' @export
`dimnames<-.xgb.DMatrix` <- function(x, value) {
if (!is.list(value) || length(value) != 2L)
stop("invalid 'dimnames' given: must be a list of two elements")
if (!is.null(value[[1L]]))
stop("xgb.DMatrix does not have rownames")
if (is.null(value[[2]])) {
setinfo(x, "feature_name", NULL)
return(x)
}
if (ncol(x) != length(value[[2]])) {
stop("can't assign ", length(value[[2]]), " colnames to a ", ncol(x), " column xgb.DMatrix")
}
setinfo(x, "feature_name", value[[2]])
x
}
#' Get information of an xgb.DMatrix object
#'
#' Get information of an xgb.DMatrix object
#' @param object Object of class \code{xgb.DMatrix}
#' @param name the name of the information field to get (see details)
#' @param ... other parameters
#'
#' @details
#' The \code{name} field can be one of the following:
#'
#' \itemize{
#' \item \code{label}: label XGBoost learn from ;
#' \item \code{weight}: to do a weight rescale ;
#' \item \code{base_margin}: base margin is the base prediction XGBoost will boost from ;
#' \item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
#'
#' }
#'
#' \code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
#'
#' labels <- getinfo(dtrain, 'label')
#' setinfo(dtrain, 'label', 1-labels)
#'
#' labels2 <- getinfo(dtrain, 'label')
#' stopifnot(all(labels2 == 1-labels))
#' @rdname getinfo
#' @export
getinfo <- function(object, ...) UseMethod("getinfo")
#' @rdname getinfo
#' @export
getinfo.xgb.DMatrix <- function(object, name, ...) {
if (typeof(name) != "character" ||
length(name) != 1 ||
!name %in% c('label', 'weight', 'base_margin', 'nrow',
'label_lower_bound', 'label_upper_bound', "feature_type", "feature_name")) {
stop(
"getinfo: name must be one of the following\n",
" 'label', 'weight', 'base_margin', 'nrow', 'label_lower_bound', 'label_upper_bound', 'feature_type', 'feature_name'"
)
}
if (name == "feature_name" || name == "feature_type") {
ret <- .Call(XGDMatrixGetStrFeatureInfo_R, object, name)
} else if (name != "nrow"){
ret <- .Call(XGDMatrixGetInfo_R, object, name)
} else {
ret <- nrow(object)
}
if (length(ret) == 0) return(NULL)
return(ret)
}
#' Set information of an xgb.DMatrix object
#'
#' Set information of an xgb.DMatrix object
#'
#' @param object Object of class "xgb.DMatrix"
#' @param name the name of the field to get
#' @param info the specific field of information to set
#' @param ... other parameters
#'
#' @details
#' The \code{name} field can be one of the following:
#'
#' \itemize{
#' \item \code{label}: label XGBoost learn from ;
#' \item \code{weight}: to do a weight rescale ;
#' \item \code{base_margin}: base margin is the base prediction XGBoost will boost from ;
#' \item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective).
#' }
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
#'
#' labels <- getinfo(dtrain, 'label')
#' setinfo(dtrain, 'label', 1-labels)
#' labels2 <- getinfo(dtrain, 'label')
#' stopifnot(all.equal(labels2, 1-labels))
#' @rdname setinfo
#' @export
setinfo <- function(object, ...) UseMethod("setinfo")
#' @rdname setinfo
#' @export
setinfo.xgb.DMatrix <- function(object, name, info, ...) {
if (name == "label") {
if (length(info) != nrow(object))
stop("The length of labels must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
if (name == "label_lower_bound") {
if (length(info) != nrow(object))
stop("The length of lower-bound labels must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
if (name == "label_upper_bound") {
if (length(info) != nrow(object))
stop("The length of upper-bound labels must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
if (name == "weight") {
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
if (name == "base_margin") {
# if (length(info)!=nrow(object))
# stop("The length of base margin must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
if (name == "group") {
if (sum(info) != nrow(object))
stop("The sum of groups must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.integer(info))
return(TRUE)
}
if (name == "feature_weights") {
if (length(info) != ncol(object)) {
stop("The number of feature weights must equal to the number of columns in the input data")
}
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
set_feat_info <- function(name) {
msg <- sprintf(
"The number of %s must equal to the number of columns in the input data. %s vs. %s",
name,
length(info),
ncol(object)
)
if (!is.null(info)) {
info <- as.list(info)
if (length(info) != ncol(object)) {
stop(msg)
}
}
.Call(XGDMatrixSetStrFeatureInfo_R, object, name, info)
}
if (name == "feature_name") {
set_feat_info("feature_name")
return(TRUE)
}
if (name == "feature_type") {
set_feat_info("feature_type")
return(TRUE)
}
stop("setinfo: unknown info name ", name)
return(FALSE)
}
#' Get a new DMatrix containing the specified rows of
#' original xgb.DMatrix object
#'
#' Get a new DMatrix containing the specified rows of
#' original xgb.DMatrix object
#'
#' @param object Object of class "xgb.DMatrix"
#' @param idxset a integer vector of indices of rows needed
#' @param colset currently not used (columns subsetting is not available)
#' @param ... other parameters (currently not used)
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
#'
#' dsub <- slice(dtrain, 1:42)
#' labels1 <- getinfo(dsub, 'label')
#' dsub <- dtrain[1:42, ]
#' labels2 <- getinfo(dsub, 'label')
#' all.equal(labels1, labels2)
#'
#' @rdname slice.xgb.DMatrix
#' @export
slice <- function(object, ...) UseMethod("slice")
#' @rdname slice.xgb.DMatrix
#' @export
slice.xgb.DMatrix <- function(object, idxset, ...) {
if (!inherits(object, "xgb.DMatrix")) {
stop("object must be xgb.DMatrix")
}
ret <- .Call(XGDMatrixSliceDMatrix_R, object, idxset)
attr_list <- attributes(object)
nr <- nrow(object)
len <- sapply(attr_list, NROW)
ind <- which(len == nr)
if (length(ind) > 0) {
nms <- names(attr_list)[ind]
for (i in seq_along(ind)) {
obj_attr <- attr(object, nms[i])
if (NCOL(obj_attr) > 1) {
attr(ret, nms[i]) <- obj_attr[idxset, ]
} else {
attr(ret, nms[i]) <- obj_attr[idxset]
}
}
}
return(structure(ret, class = "xgb.DMatrix"))
}
#' @rdname slice.xgb.DMatrix
#' @export
`[.xgb.DMatrix` <- function(object, idxset, colset = NULL) {
slice(object, idxset)
}
#' Print xgb.DMatrix
#'
#' Print information about xgb.DMatrix.
#' Currently it displays dimensions and presence of info-fields and colnames.
#'
#' @param x an xgb.DMatrix object
#' @param verbose whether to print colnames (when present)
#' @param ... not currently used
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
#'
#' dtrain
#' print(dtrain, verbose=TRUE)
#'
#' @method print xgb.DMatrix
#' @export
print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
cat('xgb.DMatrix dim:', nrow(x), 'x', ncol(x), ' info: ')
infos <- character(0)
if (length(getinfo(x, 'label')) > 0) infos <- 'label'
if (length(getinfo(x, 'weight')) > 0) infos <- c(infos, 'weight')
if (length(getinfo(x, 'base_margin')) > 0) infos <- c(infos, 'base_margin')
if (length(infos) == 0) infos <- 'NA'
cat(infos)
cnames <- colnames(x)
cat(' colnames:')
if (verbose & !is.null(cnames)) {
cat("\n'")
cat(cnames, sep = "','")
cat("'")
} else {
if (is.null(cnames)) cat(' no')
else cat(' yes')
}
cat("\n")
invisible(x)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.DMatrix.R |
#' Save xgb.DMatrix object to binary file
#'
#' Save xgb.DMatrix object to binary file
#'
#' @param dmatrix the \code{xgb.DMatrix} object
#' @param fname the name of the file to write.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
#' @export
xgb.DMatrix.save <- function(dmatrix, fname) {
if (typeof(fname) != "character")
stop("fname must be character")
if (!inherits(dmatrix, "xgb.DMatrix"))
stop("dmatrix must be xgb.DMatrix")
fname <- path.expand(fname)
.Call(XGDMatrixSaveBinary_R, dmatrix, fname[1], 0L)
return(TRUE)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.DMatrix.save.R |
#' Global configuration consists of a collection of parameters that can be applied in the global
#' scope. See \url{https://xgboost.readthedocs.io/en/stable/parameter.html} for the full list of
#' parameters supported in the global configuration. Use \code{xgb.set.config} to update the
#' values of one or more global-scope parameters. Use \code{xgb.get.config} to fetch the current
#' values of all global-scope parameters (listed in
#' \url{https://xgboost.readthedocs.io/en/stable/parameter.html}).
#'
#' @rdname xgbConfig
#' @title Set and get global configuration
#' @name xgb.set.config, xgb.get.config
#' @export xgb.set.config xgb.get.config
#' @param ... List of parameters to be set, as keyword arguments
#' @return
#' \code{xgb.set.config} returns \code{TRUE} to signal success. \code{xgb.get.config} returns
#' a list containing all global-scope parameters and their values.
#'
#' @examples
#' # Set verbosity level to silent (0)
#' xgb.set.config(verbosity = 0)
#' # Now global verbosity level is 0
#' config <- xgb.get.config()
#' print(config$verbosity)
#' # Set verbosity level to warning (1)
#' xgb.set.config(verbosity = 1)
#' # Now global verbosity level is 1
#' config <- xgb.get.config()
#' print(config$verbosity)
xgb.set.config <- function(...) {
new_config <- list(...)
.Call(XGBSetGlobalConfig_R, jsonlite::toJSON(new_config, auto_unbox = TRUE))
return(TRUE)
}
#' @rdname xgbConfig
xgb.get.config <- function() {
config <- .Call(XGBGetGlobalConfig_R)
return(jsonlite::fromJSON(config))
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.config.R |
#' Create new features from a previously learned model
#'
#' May improve the learning by adding new features to the training data based on the decision trees from a previously learned model.
#'
#' @param model decision tree boosting model learned on the original data
#' @param data original data (usually provided as a \code{dgCMatrix} matrix)
#' @param ... currently not used
#'
#' @return \code{dgCMatrix} matrix including both the original data and the new features.
#'
#' @details
#' This is the function inspired from the paragraph 3.1 of the paper:
#'
#' \strong{Practical Lessons from Predicting Clicks on Ads at Facebook}
#'
#' \emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
#' Joaquin Quinonero Candela)}
#'
#' International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014
#'
#' \url{https://research.facebook.com/publications/practical-lessons-from-predicting-clicks-on-ads-at-facebook/}.
#'
#' Extract explaining the method:
#'
#' "We found that boosted decision trees are a powerful and very
#' convenient way to implement non-linear and tuple transformations
#' of the kind we just described. We treat each individual
#' tree as a categorical feature that takes as value the
#' index of the leaf an instance ends up falling in. We use
#' 1-of-K coding of this type of features.
#'
#' For example, consider the boosted tree model in Figure 1 with 2 subtrees,
#' where the first subtree has 3 leafs and the second 2 leafs. If an
#' instance ends up in leaf 2 in the first subtree and leaf 1 in
#' second subtree, the overall input to the linear classifier will
#' be the binary vector \code{[0, 1, 0, 1, 0]}, where the first 3 entries
#' correspond to the leaves of the first subtree and last 2 to
#' those of the second subtree.
#'
#' [...]
#'
#' We can understand boosted decision tree
#' based transformation as a supervised feature encoding that
#' converts a real-valued vector into a compact binary-valued
#' vector. A traversal from root node to a leaf node represents
#' a rule on certain features."
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
#' dtest <- with(agaricus.test, xgb.DMatrix(data, label = label, nthread = 2))
#'
#' param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
#' nrounds = 4
#'
#' bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
#'
#' # Model accuracy without new features
#' accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) /
#' length(agaricus.test$label)
#'
#' # Convert previous features to one hot encoding
#' new.features.train <- xgb.create.features(model = bst, agaricus.train$data)
#' new.features.test <- xgb.create.features(model = bst, agaricus.test$data)
#'
#' # learning with new features
#' new.dtrain <- xgb.DMatrix(
#' data = new.features.train, label = agaricus.train$label, nthread = 2
#' )
#' new.dtest <- xgb.DMatrix(
#' data = new.features.test, label = agaricus.test$label, nthread = 2
#' )
#' watchlist <- list(train = new.dtrain)
#' bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
#'
#' # Model accuracy with new features
#' accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) /
#' length(agaricus.test$label)
#'
#' # Here the accuracy was already good and is now perfect.
#' cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
#' accuracy.after, "!\n"))
#'
#' @export
xgb.create.features <- function(model, data, ...) {
check.deprecation(...)
pred_with_leaf <- predict(model, data, predleaf = TRUE)
cols <- lapply(as.data.frame(pred_with_leaf), factor)
cbind(data, sparse.model.matrix(~ . -1, cols)) # nolint
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.create.features.R |
#' Cross Validation
#'
#' The cross validation function of xgboost
#'
#' @param params the list of parameters. The complete list of parameters is
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
#' is a shorter summary:
#' \itemize{
#' \item \code{objective} objective function, common ones are
#' \itemize{
#' \item \code{reg:squarederror} Regression with squared loss.
#' \item \code{binary:logistic} logistic regression for classification.
#' \item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives.
#' }
#' \item \code{eta} step size of each boosting step
#' \item \code{max_depth} maximum depth of the tree
#' \item \code{nthread} number of thread used in training, if not set, all threads are used
#' }
#'
#' See \code{\link{xgb.train}} for further details.
#' See also demo/ for walkthrough example in R.
#' @param data takes an \code{xgb.DMatrix}, \code{matrix}, or \code{dgCMatrix} as the input.
#' @param nrounds the max number of iterations
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
#' @param label vector of response values. Should be provided only when data is an R-matrix.
#' @param missing is only used when input is a dense matrix. By default is set to NA, which means
#' that NA values should be considered as 'missing' by the algorithm.
#' Sometimes, 0 or other extreme value might be used to represent missing values.
#' @param prediction A logical value indicating whether to return the test fold predictions
#' from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
#' @param metrics, list of evaluation metrics to be used in cross validation,
#' when it is not specified, the evaluation metric is chosen according to objective function.
#' Possible options are:
#' \itemize{
#' \item \code{error} binary classification error rate
#' \item \code{rmse} Rooted mean square error
#' \item \code{logloss} negative log-likelihood function
#' \item \code{mae} Mean absolute error
#' \item \code{mape} Mean absolute percentage error
#' \item \code{auc} Area under curve
#' \item \code{aucpr} Area under PR curve
#' \item \code{merror} Exact matching error, used to evaluate multi-class classification
#' }
#' @param obj customized objective function. Returns gradient and second order
#' gradient with given prediction and dtrain.
#' @param feval customized evaluation function. Returns
#' \code{list(metric='metric-name', value='metric-value')} with given
#' prediction and dtrain.
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
#' by the values of outcome labels.
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
#' (each element must be a vector of test fold's indices). When folds are supplied,
#' the \code{nfold} and \code{stratified} parameters are ignored.
#' @param train_folds \code{list} list specifying which indicies to use for training. If \code{NULL}
#' (the default) all indices not specified in \code{folds} will be used for training.
#' @param verbose \code{boolean}, print the statistics during the process
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
#' Default is 1 which means all messages are printed. This parameter is passed to the
#' \code{\link{cb.print.evaluation}} callback.
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
#' If set to an integer \code{k}, training with a validation set will stop if the performance
#' doesn't improve for \code{k} rounds.
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
#' then this parameter must be set as well.
#' When it is \code{TRUE}, it means the larger the evaluation score the better.
#' This parameter is passed to the \code{\link{cb.early.stop}} callback.
#' @param callbacks a list of callback functions to perform various task during boosting.
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
#' parameters' values. User can provide either existing or their own callback methods in order
#' to customize the training process.
#' @param ... other parameters to pass to \code{params}.
#'
#' @details
#' The original sample is randomly partitioned into \code{nfold} equal size subsamples.
#'
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
#'
#' The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
#'
#' All observations are used for both training and validation.
#'
#' Adapted from \url{https://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29}
#'
#' @return
#' An object of class \code{xgb.cv.synchronous} with the following elements:
#' \itemize{
#' \item \code{call} a function call.
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
#' \item \code{callbacks} callback functions that were either automatically assigned or
#' explicitly passed.
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
#' first column corresponding to iteration number and the rest corresponding to the
#' CV-based evaluation means and standard deviations for the training and test CV-sets.
#' It is created by the \code{\link{cb.evaluation.log}} callback.
#' \item \code{niter} number of boosting iterations.
#' \item \code{nfeatures} number of features in training data.
#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
#' parameter or randomly generated.
#' \item \code{best_iteration} iteration number with the best evaluation metric value
#' (only available with early stopping).
#' \item \code{best_ntreelimit} and the \code{ntreelimit} Deprecated attributes, use \code{best_iteration} instead.
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
#' \item \code{models} a list of the CV folds' models. It is only available with the explicit
#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
#' }
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
#' cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"),
#' max_depth = 3, eta = 1, objective = "binary:logistic")
#' print(cv)
#' print(cv, verbose=TRUE)
#'
#' @export
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
prediction = FALSE, showsd = TRUE, metrics=list(),
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, train_folds = NULL,
verbose = TRUE, print_every_n=1L,
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
check.deprecation(...)
params <- check.booster.params(params, ...)
# TODO: should we deprecate the redundant 'metrics' parameter?
for (m in metrics)
params <- c(params, list("eval_metric" = m))
check.custom.obj()
check.custom.eval()
#if (is.null(params[['eval_metric']]) && is.null(feval))
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
# Check the labels
if ((inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
} else if (inherits(data, 'xgb.DMatrix')) {
if (!is.null(label))
warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix")
cv_label <- getinfo(data, 'label')
} else {
cv_label <- label
}
# CV folds
if (!is.null(folds)) {
if (!is.list(folds) || length(folds) < 2)
stop("'folds' must be a list with 2 or more elements that are vectors of indices for each CV-fold")
nfold <- length(folds)
} else {
if (nfold <= 1)
stop("'nfold' must be > 1")
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
}
# Potential TODO: sequential CV
#if (strategy == 'sequential')
# stop('Sequential CV strategy is not yet implemented')
# verbosity & evaluation printing callback:
params <- c(params, list(silent = 1))
print_every_n <- max(as.integer(print_every_n), 1L)
if (!has.callbacks(callbacks, 'cb.print.evaluation') && verbose) {
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n, showsd = showsd))
}
# evaluation log callback: always is on in CV
evaluation_log <- list()
if (!has.callbacks(callbacks, 'cb.evaluation.log')) {
callbacks <- add.cb(callbacks, cb.evaluation.log())
}
# Early stopping callback
stop_condition <- FALSE
if (!is.null(early_stopping_rounds) &&
!has.callbacks(callbacks, 'cb.early.stop')) {
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
maximize = maximize, verbose = verbose))
}
# CV-predictions callback
if (prediction &&
!has.callbacks(callbacks, 'cb.cv.predict')) {
callbacks <- add.cb(callbacks, cb.cv.predict(save_models = FALSE))
}
# Sort the callbacks into categories
cb <- categorize.callbacks(callbacks)
# create the booster-folds
# train_folds
dall <- xgb.get.DMatrix(data, label, missing, nthread = params$nthread)
bst_folds <- lapply(seq_along(folds), function(k) {
dtest <- slice(dall, folds[[k]])
# code originally contributed by @RolandASc on stackoverflow
if (is.null(train_folds))
dtrain <- slice(dall, unlist(folds[-k]))
else
dtrain <- slice(dall, train_folds[[k]])
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test = dtest), index = folds[[k]])
})
rm(dall)
# a "basket" to collect some results from callbacks
basket <- list()
# extract parameters that can affect the relationship b/w #trees and #iterations
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) # nolint
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) # nolint
# those are fixed for CV (no training continuation)
begin_iteration <- 1
end_iteration <- nrounds
# synchronous CV boosting: run CV folds' models within each iteration
for (iteration in begin_iteration:end_iteration) {
for (f in cb$pre_iter) f()
msg <- lapply(bst_folds, function(fd) {
xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj)
xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval)
})
msg <- simplify2array(msg)
bst_evaluation <- rowMeans(msg)
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2) # nolint
for (f in cb$post_iter) f()
if (stop_condition) break
}
for (f in cb$finalize) f(finalize = TRUE)
# the CV result
ret <- list(
call = match.call(),
params = params,
callbacks = callbacks,
evaluation_log = evaluation_log,
niter = end_iteration,
nfeatures = ncol(data),
folds = folds
)
ret <- c(ret, basket)
class(ret) <- 'xgb.cv.synchronous'
invisible(ret)
}
#' Print xgb.cv result
#'
#' Prints formatted results of \code{xgb.cv}.
#'
#' @param x an \code{xgb.cv.synchronous} object
#' @param verbose whether to print detailed data
#' @param ... passed to \code{data.table.print}
#'
#' @details
#' When not verbose, it would only print the evaluation results,
#' including the best iteration (when available).
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' cv <- xgb.cv(data = train$data, label = train$label, nfold = 5, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' print(cv)
#' print(cv, verbose=TRUE)
#'
#' @rdname print.xgb.cv
#' @method print xgb.cv.synchronous
#' @export
print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
cat('##### xgb.cv ', length(x$folds), '-folds\n', sep = '')
if (verbose) {
if (!is.null(x$call)) {
cat('call:\n ')
print(x$call)
}
if (!is.null(x$params)) {
cat('params (as set within xgb.cv):\n')
cat(' ',
paste(names(x$params),
paste0('"', unlist(x$params), '"'),
sep = ' = ', collapse = ', '), '\n', sep = '')
}
if (!is.null(x$callbacks) && length(x$callbacks) > 0) {
cat('callbacks:\n')
lapply(callback.calls(x$callbacks), function(x) {
cat(' ')
print(x)
})
}
for (n in c('niter', 'best_iteration', 'best_ntreelimit')) {
if (is.null(x[[n]]))
next
cat(n, ': ', x[[n]], '\n', sep = '')
}
if (!is.null(x$pred)) {
cat('pred:\n')
str(x$pred)
}
}
if (verbose)
cat('evaluation_log:\n')
print(x$evaluation_log, row.names = FALSE, ...)
if (!is.null(x$best_iteration)) {
cat('Best iteration:\n')
print(x$evaluation_log[x$best_iteration], row.names = FALSE, ...)
}
invisible(x)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.cv.R |
#' Dump an xgboost model in text format.
#'
#' Dump an xgboost model in text format.
#'
#' @param model the model object.
#' @param fname the name of the text file where to save the model text dump.
#' If not provided or set to \code{NULL}, the model is returned as a \code{character} vector.
#' @param fmap feature map file representing feature types.
#' See demo/ for walkthrough example in R, and
#' \url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt}
#' for example Format.
#' @param with_stats whether to dump some additional statistics about the splits.
#' When this option is on, the model dump contains two additional values:
#' gain is the approximate loss function gain we get in each split;
#' cover is the sum of second order gradient in each node.
#' @param dump_format either 'text' or 'json' format could be specified.
#' @param ... currently not used
#'
#' @return
#' If fname is not provided or set to \code{NULL} the function will return the model
#' as a \code{character} vector. Otherwise it will return \code{TRUE}.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' # save the model in file 'xgb.model.dump'
#' dump_path = file.path(tempdir(), 'model.dump')
#' xgb.dump(bst, dump_path, with_stats = TRUE)
#'
#' # print the model without saving it to a file
#' print(xgb.dump(bst, with_stats = TRUE))
#'
#' # print in JSON format:
#' cat(xgb.dump(bst, with_stats = TRUE, dump_format='json'))
#'
#' @export
xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE,
dump_format = c("text", "json"), ...) {
check.deprecation(...)
dump_format <- match.arg(dump_format)
if (!inherits(model, "xgb.Booster"))
stop("model: argument must be of type xgb.Booster")
if (!(is.null(fname) || is.character(fname)))
stop("fname: argument must be a character string (when provided)")
if (!(is.null(fmap) || is.character(fmap)))
stop("fmap: argument must be a character string (when provided)")
model <- xgb.Booster.complete(model)
model_dump <- .Call(XGBoosterDumpModel_R, model$handle, NVL(fmap, "")[1], as.integer(with_stats),
as.character(dump_format))
if (is.null(fname))
model_dump <- gsub('\t', '', model_dump, fixed = TRUE)
if (dump_format == "text")
model_dump <- unlist(strsplit(model_dump, '\n', fixed = TRUE))
model_dump <- grep('^\\s*$', model_dump, invert = TRUE, value = TRUE)
if (is.null(fname)) {
return(model_dump)
} else {
fname <- path.expand(fname)
writeLines(model_dump, fname[1])
return(TRUE)
}
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.dump.R |
# ggplot backend for the xgboost plotting facilities
#' @rdname xgb.plot.importance
#' @export
xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
rel_to_first = FALSE, n_clusters = c(1:10), ...) {
importance_matrix <- xgb.plot.importance(importance_matrix, top_n = top_n, measure = measure,
rel_to_first = rel_to_first, plot = FALSE, ...)
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("ggplot2 package is required", call. = FALSE)
}
if (!requireNamespace("Ckmeans.1d.dp", quietly = TRUE)) {
stop("Ckmeans.1d.dp package is required", call. = FALSE)
}
clusters <- suppressWarnings(
Ckmeans.1d.dp::Ckmeans.1d.dp(importance_matrix$Importance, n_clusters)
)
importance_matrix[, Cluster := as.character(clusters$cluster)]
plot <-
ggplot2::ggplot(importance_matrix,
ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.5),
environment = environment()) +
ggplot2::geom_bar(ggplot2::aes(fill = Cluster), stat = "identity", position = "identity") +
ggplot2::coord_flip() +
ggplot2::xlab("Features") +
ggplot2::ggtitle("Feature importance") +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = .9, face = "bold"),
panel.grid.major.y = ggplot2::element_blank())
return(plot)
}
#' @rdname xgb.plot.deepness
#' @export
xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.depth", "med.weight")) {
if (!requireNamespace("ggplot2", quietly = TRUE))
stop("ggplot2 package is required for plotting the graph deepness.", call. = FALSE)
which <- match.arg(which)
dt_depths <- xgb.plot.deepness(model = model, plot = FALSE)
dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth]
setkey(dt_summaries, 'Depth')
if (which == "2x1") {
p1 <-
ggplot2::ggplot(dt_summaries) +
ggplot2::geom_bar(ggplot2::aes(x = Depth, y = N), stat = "Identity") +
ggplot2::xlab("") +
ggplot2::ylab("Number of leafs") +
ggplot2::ggtitle("Model complexity") +
ggplot2::theme(
plot.title = ggplot2::element_text(lineheight = 0.9, face = "bold"),
panel.grid.major.y = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
axis.text.x = ggplot2::element_blank()
)
p2 <-
ggplot2::ggplot(dt_summaries) +
ggplot2::geom_bar(ggplot2::aes(x = Depth, y = Cover), stat = "Identity") +
ggplot2::xlab("Leaf depth") +
ggplot2::ylab("Weighted cover")
multiplot(p1, p2, cols = 1)
return(invisible(list(p1, p2)))
} else if (which == "max.depth") {
p <-
ggplot2::ggplot(dt_depths[, max(Depth), Tree]) +
ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1),
height = 0.15, alpha = 0.4, size = 3, stroke = 0) +
ggplot2::xlab("tree #") +
ggplot2::ylab("Max tree leaf depth")
return(p)
} else if (which == "med.depth") {
p <-
ggplot2::ggplot(dt_depths[, median(as.numeric(Depth)), Tree]) +
ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1),
height = 0.15, alpha = 0.4, size = 3, stroke = 0) +
ggplot2::xlab("tree #") +
ggplot2::ylab("Median tree leaf depth")
return(p)
} else if (which == "med.weight") {
p <-
ggplot2::ggplot(dt_depths[, median(abs(Weight)), Tree]) +
ggplot2::geom_point(ggplot2::aes(x = Tree, y = V1),
alpha = 0.4, size = 3, stroke = 0) +
ggplot2::xlab("tree #") +
ggplot2::ylab("Median absolute leaf weight")
return(p)
}
}
#' @rdname xgb.plot.shap.summary
#' @export
xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
data_list <- xgb.shap.data(
data = data,
shap_contrib = shap_contrib,
features = features,
top_n = top_n,
model = model,
trees = trees,
target_class = target_class,
approxcontrib = approxcontrib,
subsample = subsample,
max_observations = 10000 # 10,000 samples per feature.
)
p_data <- prepare.ggplot.shap.data(data_list, normalize = TRUE)
# Reverse factor levels so that the first level is at the top of the plot
p_data[, "feature" := factor(feature, rev(levels(feature)))]
p <- ggplot2::ggplot(p_data, ggplot2::aes(x = feature, y = p_data$shap_value, colour = p_data$feature_value)) +
ggplot2::geom_jitter(alpha = 0.5, width = 0.1) +
ggplot2::scale_colour_viridis_c(limits = c(-3, 3), option = "plasma", direction = -1) +
ggplot2::geom_abline(slope = 0, intercept = 0, colour = "darkgrey") +
ggplot2::coord_flip()
p
}
#' Combine and melt feature values and SHAP contributions for sample
#' observations.
#'
#' Conforms to data format required for ggplot functions.
#'
#' Internal utility function.
#'
#' @param data_list List containing 'data' and 'shap_contrib' returned by
#' \code{xgb.shap.data()}.
#' @param normalize Whether to standardize feature values to have mean 0 and
#' standard deviation 1 (useful for comparing multiple features on the same
#' plot). Default \code{FALSE}.
#'
#' @return A data.table containing the observation ID, the feature name, the
#' feature value (normalized if specified), and the SHAP contribution value.
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
data <- data_list[["data"]]
shap_contrib <- data_list[["shap_contrib"]]
data <- data.table::as.data.table(as.matrix(data))
if (normalize) {
data[, (names(data)) := lapply(.SD, normalize)]
}
data[, "id" := seq_len(nrow(data))]
data_m <- data.table::melt.data.table(data, id.vars = "id", variable.name = "feature", value.name = "feature_value")
shap_contrib <- data.table::as.data.table(as.matrix(shap_contrib))
shap_contrib[, "id" := seq_len(nrow(shap_contrib))]
shap_contrib_m <- data.table::melt.data.table(shap_contrib, id.vars = "id", variable.name = "feature", value.name = "shap_value")
p_data <- data.table::merge.data.table(data_m, shap_contrib_m, by = c("id", "feature"))
p_data
}
#' Scale feature value to have mean 0, standard deviation 1
#'
#' This is used to compare multiple features on the same plot.
#' Internal utility function
#'
#' @param x Numeric vector
#'
#' @return Numeric vector with mean 0 and sd 1.
normalize <- function(x) {
loc <- mean(x, na.rm = TRUE)
scale <- stats::sd(x, na.rm = TRUE)
(x - loc) / scale
}
# Plot multiple ggplot graph aligned by rows and columns.
# ... the plots
# cols number of columns
# internal utility function
multiplot <- function(..., cols = 1) {
plots <- list(...)
num_plots <- length(plots)
layout <- matrix(seq(1, cols * ceiling(num_plots / cols)),
ncol = cols, nrow = ceiling(num_plots / cols))
if (num_plots == 1) {
print(plots[[1]])
} else {
grid::grid.newpage()
grid::pushViewport(grid::viewport(layout = grid::grid.layout(nrow(layout), ncol(layout))))
for (i in 1:num_plots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.table(which(layout == i, arr.ind = TRUE))
print(
plots[[i]], vp = grid::viewport(
layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col
)
)
}
}
}
globalVariables(c(
"Cluster", "ggplot", "aes", "geom_bar", "coord_flip", "xlab", "ylab", "ggtitle", "theme",
"element_blank", "element_text", "V1", "Weight", "feature"
))
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.ggplot.R |
#' Importance of features in a model.
#'
#' Creates a \code{data.table} of feature importances in a model.
#'
#' @param feature_names character vector of feature names. If the model already
#' contains feature names, those would be used when \code{feature_names=NULL} (default value).
#' Non-null \code{feature_names} could be provided to override those in the model.
#' @param model object of class \code{xgb.Booster}.
#' @param trees (only for the gbtree booster) an integer vector of tree indices that should be included
#' into the importance calculation. If set to \code{NULL}, all trees of the model are parsed.
#' It could be useful, e.g., in multiclass classification to get feature importances
#' for each class separately. IMPORTANT: the tree index in xgboost models
#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).
#' @param data deprecated.
#' @param label deprecated.
#' @param target deprecated.
#'
#' @details
#'
#' This function works for both linear and tree models.
#'
#' For linear models, the importance is the absolute magnitude of linear coefficients.
#' For that reason, in order to obtain a meaningful ranking by importance for a linear model,
#' the features need to be on the same scale (which you also would want to do when using either
#' L1 or L2 regularization).
#'
#' @return
#'
#' For a tree model, a \code{data.table} with the following columns:
#' \itemize{
#' \item \code{Features} names of the features used in the model;
#' \item \code{Gain} represents fractional contribution of each feature to the model based on
#' the total gain of this feature's splits. Higher percentage means a more important
#' predictive feature.
#' \item \code{Cover} metric of the number of observation related to this feature;
#' \item \code{Frequency} percentage representing the relative number of times
#' a feature have been used in trees.
#' }
#'
#' A linear model's importance \code{data.table} has the following columns:
#' \itemize{
#' \item \code{Features} names of the features used in the model;
#' \item \code{Weight} the linear coefficient of this feature;
#' \item \code{Class} (only for multiclass models) class label.
#' }
#'
#' If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names},
#' index of the features will be used instead. Because the index is extracted from the model dump
#' (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R).
#'
#' @examples
#'
#' # binomial classification using gbtree:
#' data(agaricus.train, package='xgboost')
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' xgb.importance(model = bst)
#'
#' # binomial classification using gblinear:
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear",
#' eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic")
#' xgb.importance(model = bst)
#'
#' # multiclass classification using gbtree:
#' nclass <- 3
#' nrounds <- 10
#' mbst <- xgboost(data = as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1,
#' max_depth = 3, eta = 0.2, nthread = 2, nrounds = nrounds,
#' objective = "multi:softprob", num_class = nclass)
#' # all classes clumped together:
#' xgb.importance(model = mbst)
#' # inspect importances separately for each class:
#' xgb.importance(model = mbst, trees = seq(from=0, by=nclass, length.out=nrounds))
#' xgb.importance(model = mbst, trees = seq(from=1, by=nclass, length.out=nrounds))
#' xgb.importance(model = mbst, trees = seq(from=2, by=nclass, length.out=nrounds))
#'
#' # multiclass classification using gblinear:
#' mbst <- xgboost(data = scale(as.matrix(iris[, -5])), label = as.numeric(iris$Species) - 1,
#' booster = "gblinear", eta = 0.2, nthread = 1, nrounds = 15,
#' objective = "multi:softprob", num_class = nclass)
#' xgb.importance(model = mbst)
#'
#' @export
xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
data = NULL, label = NULL, target = NULL){
if (!(is.null(data) && is.null(label) && is.null(target)))
warning("xgb.importance: parameters 'data', 'label' and 'target' are deprecated")
if (!inherits(model, "xgb.Booster"))
stop("model: must be an object of class xgb.Booster")
if (is.null(feature_names) && !is.null(model$feature_names))
feature_names <- model$feature_names
if (!(is.null(feature_names) || is.character(feature_names)))
stop("feature_names: Has to be a character vector")
model <- xgb.Booster.complete(model)
config <- jsonlite::fromJSON(xgb.config(model))
if (config$learner$gradient_booster$name == "gblinear") {
args <- list(importance_type = "weight", feature_names = feature_names)
results <- .Call(
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
)
names(results) <- c("features", "shape", "weight")
n_classes <- if (length(results$shape) == 2) { results$shape[2] } else { 0 }
importance <- if (n_classes == 0) {
data.table(Feature = results$features, Weight = results$weight)[order(-abs(Weight))]
} else {
data.table(
Feature = rep(results$features, each = n_classes), Weight = results$weight, Class = seq_len(n_classes) - 1
)[order(Class, -abs(Weight))]
}
} else {
concatenated <- list()
output_names <- vector()
for (importance_type in c("weight", "total_gain", "total_cover")) {
args <- list(importance_type = importance_type, feature_names = feature_names, tree_idx = trees)
results <- .Call(
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
)
names(results) <- c("features", "shape", importance_type)
concatenated[
switch(importance_type, "weight" = "Frequency", "total_gain" = "Gain", "total_cover" = "Cover")
] <- results[importance_type]
output_names <- results$features
}
importance <- data.table(
Feature = output_names,
Gain = concatenated$Gain / sum(concatenated$Gain),
Cover = concatenated$Cover / sum(concatenated$Cover),
Frequency = concatenated$Frequency / sum(concatenated$Frequency)
)[order(Gain, decreasing = TRUE)]
}
importance
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(c(".", ".N", "Gain", "Cover", "Frequency", "Feature", "Class"))
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.importance.R |
#' Load xgboost model from binary file
#'
#' Load xgboost model from the binary model file.
#'
#' @param modelfile the name of the binary input file.
#'
#' @details
#' The input file is expected to contain a model saved in an xgboost model format
#' using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
#' appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
#' saved from there in xgboost format, could be loaded from R.
#'
#' Note: a model saved as an R-object, has to be loaded using corresponding R-methods,
#' not \code{xgb.load}.
#'
#' @return
#' An object of \code{xgb.Booster} class.
#'
#' @seealso
#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#'
#' ## Keep the number of threads to 1 for examples
#' nthread <- 1
#' data.table::setDTthreads(nthread)
#'
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(
#' data = train$data, label = train$label, max_depth = 2, eta = 1,
#' nthread = nthread,
#' nrounds = 2,
#' objective = "binary:logistic"
#' )
#'
#' xgb.save(bst, 'xgb.model')
#' bst <- xgb.load('xgb.model')
#' if (file.exists('xgb.model')) file.remove('xgb.model')
#' @export
xgb.load <- function(modelfile) {
if (is.null(modelfile))
stop("xgb.load: modelfile cannot be NULL")
handle <- xgb.Booster.handle(modelfile = modelfile)
# re-use modelfile if it is raw so we do not need to serialize
if (typeof(modelfile) == "raw") {
warning(
paste(
"The support for loading raw booster with `xgb.load` will be ",
"discontinued in upcoming release. Use `xgb.load.raw` or",
" `xgb.unserialize` instead. "
)
)
bst <- xgb.handleToBooster(handle, modelfile)
} else {
bst <- xgb.handleToBooster(handle, NULL)
}
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
return(bst)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.load.R |
#' Load serialised xgboost model from R's raw vector
#'
#' User can generate raw memory buffer by calling xgb.save.raw
#'
#' @param buffer the buffer returned by xgb.save.raw
#' @param as_booster Return the loaded model as xgb.Booster instead of xgb.Booster.handle.
#'
#' @export
xgb.load.raw <- function(buffer, as_booster = FALSE) {
cachelist <- list()
handle <- .Call(XGBoosterCreate_R, cachelist)
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
class(handle) <- "xgb.Booster.handle"
if (as_booster) {
booster <- list(handle = handle, raw = NULL)
class(booster) <- "xgb.Booster"
booster <- xgb.Booster.complete(booster, saveraw = TRUE)
return(booster)
} else {
return (handle)
}
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.load.raw.R |
#' Parse a boosted tree model text dump
#'
#' Parse a boosted tree model text dump into a \code{data.table} structure.
#'
#' @param feature_names character vector of feature names. If the model already
#' contains feature names, those would be used when \code{feature_names=NULL} (default value).
#' Non-null \code{feature_names} could be provided to override those in the model.
#' @param model object of class \code{xgb.Booster}
#' @param text \code{character} vector previously generated by the \code{xgb.dump}
#' function (where parameter \code{with_stats = TRUE} should have been set).
#' \code{text} takes precedence over \code{model}.
#' @param trees an integer vector of tree indices that should be parsed.
#' If set to \code{NULL}, all trees of the model are parsed.
#' It could be useful, e.g., in multiclass classification to get only
#' the trees of one certain class. IMPORTANT: the tree index in xgboost models
#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).
#' @param use_int_id a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be
#' represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE).
#' @param ... currently not used.
#'
#' @return
#' A \code{data.table} with detailed information about model trees' nodes.
#'
#' The columns of the \code{data.table} are:
#'
#' \itemize{
#' \item \code{Tree}: integer ID of a tree in a model (zero-based index)
#' \item \code{Node}: integer ID of a node in a tree (zero-based index)
#' \item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE})
#' \item \code{Feature}: for a branch node, it's a feature id or name (when available);
#' for a leaf note, it simply labels it as \code{'Leaf'}
#' \item \code{Split}: location of the split for a branch node (split condition is always "less than")
#' \item \code{Yes}: ID of the next node when the split condition is met
#' \item \code{No}: ID of the next node when the split condition is not met
#' \item \code{Missing}: ID of the next node when branch value is missing
#' \item \code{Quality}: either the split gain (change in loss) or the leaf value
#' \item \code{Cover}: metric related to the number of observation either seen by a split
#' or collected by a leaf during training.
#' }
#'
#' When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers
#' in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from
#' the corresponding trees in the "Node" column.
#'
#' @examples
#' # Basic use:
#'
#' data(agaricus.train, package='xgboost')
#' ## Keep the number of threads to 1 for examples
#' nthread <- 1
#' data.table::setDTthreads(nthread)
#'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
#' eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic")
#'
#' (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
#'
#' # This bst model already has feature_names stored with it, so those would be used when
#' # feature_names is not set:
#' (dt <- xgb.model.dt.tree(model = bst))
#'
#' # How to match feature names of splits that are following a current 'Yes' branch:
#'
#' merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
#'
#' @export
xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
trees = NULL, use_int_id = FALSE, ...){
check.deprecation(...)
if (!inherits(model, "xgb.Booster") && !is.character(text)) {
stop("Either 'model' must be an object of class xgb.Booster\n",
" or 'text' must be a character vector with the result of xgb.dump\n",
" (or NULL if 'model' was provided).")
}
if (is.null(feature_names) && !is.null(model) && !is.null(model$feature_names))
feature_names <- model$feature_names
if (!(is.null(feature_names) || is.character(feature_names))) {
stop("feature_names: must be a character vector")
}
if (!(is.null(trees) || is.numeric(trees))) {
stop("trees: must be a vector of integers.")
}
if (is.null(text)){
text <- xgb.dump(model = model, with_stats = TRUE)
}
if (length(text) < 2 ||
sum(grepl('leaf=(\\d+)', text)) < 1) {
stop("Non-tree model detected! This function can only be used with tree models.")
}
position <- which(grepl("booster", text, fixed = TRUE))
add.tree.id <- function(node, tree) if (use_int_id) node else paste(tree, node, sep = "-")
anynumber_regex <- "[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?"
td <- data.table(t = text)
td[position, Tree := 1L]
td[, Tree := cumsum(ifelse(is.na(Tree), 0L, Tree)) - 1L]
if (is.null(trees)) {
trees <- 0:max(td$Tree)
} else {
trees <- trees[trees >= 0 & trees <= max(td$Tree)]
}
td <- td[Tree %in% trees & !grepl('^booster', t)]
td[, Node := as.integer(sub("^([0-9]+):.*", "\\1", t))]
if (!use_int_id) td[, ID := add.tree.id(Node, Tree)]
td[, isLeaf := grepl("leaf", t, fixed = TRUE)]
# parse branch lines
branch_rx <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
"gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
branch_cols <- c("Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
td[
isLeaf == FALSE,
(branch_cols) := {
matches <- regmatches(t, regexec(branch_rx, t))
# skip some indices with spurious capture groups from anynumber_regex
xtr <- do.call(rbind, matches)[, c(2, 3, 5, 6, 7, 8, 10), drop = FALSE]
xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree)
if (length(xtr) == 0) {
as.data.table(
list(Feature = "NA", Split = "NA", Yes = "NA", No = "NA", Missing = "NA", Quality = "NA", Cover = "NA")
)
} else {
as.data.table(xtr)
}
}
]
# assign feature_names when available
is_stump <- function() {
return(length(td$Feature) == 1 && is.na(td$Feature))
}
if (!is.null(feature_names) && !is_stump()) {
if (length(feature_names) <= max(as.numeric(td$Feature), na.rm = TRUE))
stop("feature_names has less elements than there are features used in the model")
td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1]]
}
# parse leaf lines
leaf_rx <- paste0("leaf=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
leaf_cols <- c("Feature", "Quality", "Cover")
td[
isLeaf == TRUE,
(leaf_cols) := {
matches <- regmatches(t, regexec(leaf_rx, t))
xtr <- do.call(rbind, matches)[, c(2, 4)]
if (length(xtr) == 2) {
c("Leaf", as.data.table(xtr[1]), as.data.table(xtr[2]))
} else {
c("Leaf", as.data.table(xtr))
}
}
]
# convert some columns to numeric
numeric_cols <- c("Split", "Quality", "Cover")
td[, (numeric_cols) := lapply(.SD, as.numeric), .SDcols = numeric_cols]
if (use_int_id) {
int_cols <- c("Yes", "No", "Missing")
td[, (int_cols) := lapply(.SD, as.integer), .SDcols = int_cols]
}
td[, t := NULL]
td[, isLeaf := NULL]
td[order(Tree, Node)]
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf", ".SD", ".SDcols"))
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.model.dt.tree.R |
#' Plot model trees deepness
#'
#' Visualizes distributions related to depth of tree leafs.
#' \code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend.
#'
#' @param model either an \code{xgb.Booster} model generated by the \code{xgb.train} function
#' or a data.table result of the \code{xgb.model.dt.tree} function.
#' @param plot (base R barplot) whether a barplot should be produced.
#' If FALSE, only a data.table is returned.
#' @param which which distribution to plot (see details).
#' @param ... other parameters passed to \code{barplot} or \code{plot}.
#'
#' @details
#'
#' When \code{which="2x1"}, two distributions with respect to the leaf depth
#' are plotted on top of each other:
#' \itemize{
#' \item the distribution of the number of leafs in a tree model at a certain depth;
#' \item the distribution of average weighted number of observations ("cover")
#' ending up in leafs at certain depth.
#' }
#' Those could be helpful in determining sensible ranges of the \code{max_depth}
#' and \code{min_child_weight} parameters.
#'
#' When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth
#' per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how
#' a tree's median absolute leaf weight changes through the iterations.
#'
#' This function was inspired by the blog post
#' \url{https://github.com/aysent/random-forest-leaf-visualization}.
#'
#' @return
#'
#' Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function
#' silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model,
#' and contains information about leaf's depth, cover, and weight (which is used in calculating predictions).
#'
#' The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"}
#' or a single ggplot graph for the other \code{which} options.
#'
#' @seealso
#'
#' \code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}.
#'
#' @examples
#'
#' data(agaricus.train, package='xgboost')
#' ## Keep the number of threads to 2 for examples
#' nthread <- 2
#' data.table::setDTthreads(nthread)
#'
#' ## Change max_depth to a higher number to get a more significant result
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 6,
#' eta = 0.1, nthread = nthread, nrounds = 50, objective = "binary:logistic",
#' subsample = 0.5, min_child_weight = 2)
#'
#' xgb.plot.deepness(bst)
#' xgb.ggplot.deepness(bst)
#'
#' xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2)
#'
#' xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2)
#'
#' @rdname xgb.plot.deepness
#' @export
xgb.plot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.depth", "med.weight"),
plot = TRUE, ...) {
if (!(inherits(model, "xgb.Booster") || is.data.table(model)))
stop("model: Has to be either an xgb.Booster model generaged by the xgb.train function\n",
"or a data.table result of the xgb.importance function")
if (!requireNamespace("igraph", quietly = TRUE))
stop("igraph package is required for plotting the graph deepness.", call. = FALSE)
which <- match.arg(which)
dt_tree <- model
if (inherits(model, "xgb.Booster"))
dt_tree <- xgb.model.dt.tree(model = model)
if (!all(c("Feature", "Tree", "ID", "Yes", "No", "Cover") %in% colnames(dt_tree)))
stop("Model tree columns are not as expected!\n",
" Note that this function works only for tree models.")
dt_depths <- merge(get.leaf.depth(dt_tree), dt_tree[, .(ID, Cover, Weight = Quality)], by = "ID")
setkeyv(dt_depths, c("Tree", "ID"))
# count by depth levels, and also calculate average cover at a depth
dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth]
setkey(dt_summaries, "Depth")
if (plot) {
if (which == "2x1") {
op <- par(no.readonly = TRUE)
par(mfrow = c(2, 1),
oma = c(3, 1, 3, 1) + 0.1,
mar = c(1, 4, 1, 0) + 0.1)
dt_summaries[, barplot(N, border = NA, ylab = 'Number of leafs', ...)]
dt_summaries[, barplot(Cover, border = NA, ylab = "Weighted cover", names.arg = Depth, ...)]
title("Model complexity", xlab = "Leaf depth", outer = TRUE, line = 1)
par(op)
} else if (which == "max.depth") {
dt_depths[, max(Depth), Tree][
, plot(jitter(V1, amount = 0.1) ~ Tree, ylab = 'Max tree leaf depth', xlab = "tree #", ...)]
} else if (which == "med.depth") {
dt_depths[, median(as.numeric(Depth)), Tree][
, plot(jitter(V1, amount = 0.1) ~ Tree, ylab = 'Median tree leaf depth', xlab = "tree #", ...)]
} else if (which == "med.weight") {
dt_depths[, median(abs(Weight)), Tree][
, plot(V1 ~ Tree, ylab = 'Median absolute leaf weight', xlab = "tree #", ...)]
}
}
invisible(dt_depths)
}
# Extract path depths from root to leaf
# from data.table containing the nodes and edges of the trees.
# internal utility function
get.leaf.depth <- function(dt_tree) {
# extract tree graph's edges
dt_edges <- rbindlist(list(
dt_tree[Feature != "Leaf", .(ID, To = Yes, Tree)],
dt_tree[Feature != "Leaf", .(ID, To = No, Tree)]
))
# whether "To" is a leaf:
dt_edges <-
merge(dt_edges,
dt_tree[Feature == "Leaf", .(ID, Leaf = TRUE)],
all.x = TRUE, by.x = "To", by.y = "ID")
dt_edges[is.na(Leaf), Leaf := FALSE]
dt_edges[, {
graph <- igraph::graph_from_data_frame(.SD[, .(ID, To)])
# min(ID) in a tree is a root node
paths_tmp <- igraph::shortest_paths(graph, from = min(ID), to = To[Leaf == TRUE])
# list of paths to each leaf in a tree
paths <- lapply(paths_tmp$vpath, names)
# combine into a resulting path lengths table for a tree
data.table(Depth = sapply(paths, length), ID = To[Leaf == TRUE])
}, by = Tree]
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(
c(
".N", "N", "Depth", "Quality", "Cover", "Tree", "ID", "Yes", "No", "Feature", "Leaf", "Weight"
)
)
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.plot.deepness.R |
#' Plot feature importance as a bar graph
#'
#' Represents previously calculated feature importance as a bar graph.
#' \code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend.
#'
#' @param importance_matrix a \code{data.table} returned by \code{\link{xgb.importance}}.
#' @param top_n maximal number of top features to include into the plot.
#' @param measure the name of importance measure to plot.
#' When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.
#' @param rel_to_first whether importance values should be represented as relative to the highest ranked feature.
#' See Details.
#' @param left_margin (base R barplot) allows to adjust the left margin size to fit feature names.
#' When it is NULL, the existing \code{par('mar')} is used.
#' @param cex (base R barplot) passed as \code{cex.names} parameter to \code{barplot}.
#' @param plot (base R barplot) whether a barplot should be produced.
#' If FALSE, only a data.table is returned.
#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range
#' of the possible number of clusters of bars.
#' @param ... other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).
#'
#' @details
#' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
#' Features are shown ranked in a decreasing importance order.
#' It works for importances from both \code{gblinear} and \code{gbtree} models.
#'
#' When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
#' For gbtree model, that would mean being normalized to the total of 1
#' ("what is feature's importance contribution relative to the whole model?").
#' For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
#' "what is feature's importance contribution relative to the most important feature?"
#'
#' The ggplot-backend method also performs 1-D clustering of the importance values,
#' with bar colors corresponding to different clusters that have somewhat similar importance values.
#'
#' @return
#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE})
#' and silently returns a processed data.table with \code{n_top} features sorted by importance.
#'
#' The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards.
#' E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result.
#'
#' @seealso
#' \code{\link[graphics]{barplot}}.
#'
#' @examples
#' data(agaricus.train)
#' ## Keep the number of threads to 2 for examples
#' nthread <- 2
#' data.table::setDTthreads(nthread)
#'
#' bst <- xgboost(
#' data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
#' eta = 1, nthread = nthread, nrounds = 2, objective = "binary:logistic"
#' )
#'
#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
#'
#' xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance")
#'
#' (gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE))
#' gg + ggplot2::ylab("Frequency")
#'
#' @rdname xgb.plot.importance
#' @export
xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
rel_to_first = FALSE, left_margin = 10, cex = NULL, plot = TRUE, ...) {
check.deprecation(...)
if (!is.data.table(importance_matrix)) {
stop("importance_matrix: must be a data.table")
}
imp_names <- colnames(importance_matrix)
if (is.null(measure)) {
if (all(c("Feature", "Gain") %in% imp_names)) {
measure <- "Gain"
} else if (all(c("Feature", "Weight") %in% imp_names)) {
measure <- "Weight"
} else {
stop("Importance matrix column names are not as expected!")
}
} else {
if (!measure %in% imp_names)
stop("Invalid `measure`")
if (!"Feature" %in% imp_names)
stop("Importance matrix column names are not as expected!")
}
# also aggregate, just in case when the values were not yet summed up by feature
importance_matrix <- importance_matrix[, Importance := sum(get(measure)), by = Feature]
# make sure it's ordered
importance_matrix <- importance_matrix[order(-abs(Importance))]
if (!is.null(top_n)) {
top_n <- min(top_n, nrow(importance_matrix))
importance_matrix <- head(importance_matrix, top_n)
}
if (rel_to_first) {
importance_matrix[, Importance := Importance / max(abs(Importance))]
}
if (is.null(cex)) {
cex <- 2.5 / log2(1 + nrow(importance_matrix))
}
if (plot) {
original_mar <- par()$mar
# reset margins so this function doesn't have side effects
on.exit({par(mar = original_mar)})
mar <- original_mar
if (!is.null(left_margin))
mar[2] <- left_margin
par(mar = mar)
# reverse the order of rows to have the highest ranked at the top
importance_matrix[rev(seq_len(nrow(importance_matrix))),
barplot(Importance, horiz = TRUE, border = NA, cex.names = cex,
names.arg = Feature, las = 1, ...)]
}
invisible(importance_matrix)
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(c("Feature", "Importance"))
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.plot.importance.R |
#' Project all trees on one tree and plot it
#'
#' Visualization of the ensemble of trees as a single collective unit.
#'
#' @param model produced by the \code{xgb.train} function.
#' @param feature_names names of each feature as a \code{character} vector.
#' @param features_keep number of features to keep in each position of the multi trees.
#' @param plot_width width in pixels of the graph to produce
#' @param plot_height height in pixels of the graph to produce
#' @param render a logical flag for whether the graph should be rendered (see Value).
#' @param ... currently not used
#'
#' @details
#'
#' This function tries to capture the complexity of a gradient boosted tree model
#' in a cohesive way by compressing an ensemble of trees into a single tree-graph representation.
#' The goal is to improve the interpretability of a model generally seen as black box.
#'
#' Note: this function is applicable to tree booster-based models only.
#'
#' It takes advantage of the fact that the shape of a binary tree is only defined by
#' its depth (therefore, in a boosting model, all trees have similar shape).
#'
#' Moreover, the trees tend to reuse the same features.
#'
#' The function projects each tree onto one, and keeps for each position the
#' \code{features_keep} first features (based on the Gain per feature measure).
#'
#' This function is inspired by this blog post:
#' \url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/}
#'
#' @return
#'
#' When \code{render = TRUE}:
#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}.
#' Similar to ggplot objects, it needs to be printed to see it when not running from command line.
#'
#' When \code{render = FALSE}:
#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
#' This could be useful if one wants to modify some of the graph attributes
#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}.
#'
#' @examples
#'
#' data(agaricus.train, package='xgboost')
#' ## Keep the number of threads to 2 for examples
#' nthread <- 2
#' data.table::setDTthreads(nthread)
#'
#' bst <- xgboost(
#' data = agaricus.train$data, label = agaricus.train$label, max_depth = 15,
#' eta = 1, nthread = nthread, nrounds = 30, objective = "binary:logistic",
#' min_child_weight = 50, verbose = 0
#' )
#'
#' p <- xgb.plot.multi.trees(model = bst, features_keep = 3)
#' print(p)
#'
#' \dontrun{
#' # Below is an example of how to save this plot to a file.
#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed.
#' library(DiagrammeR)
#' gr <- xgb.plot.multi.trees(model=bst, features_keep = 3, render=FALSE)
#' export_graph(gr, 'tree.pdf', width=1500, height=600)
#' }
#'
#' @export
xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5, plot_width = NULL, plot_height = NULL,
render = TRUE, ...){
if (!requireNamespace("DiagrammeR", quietly = TRUE)) {
stop("DiagrammeR is required for xgb.plot.multi.trees")
}
check.deprecation(...)
tree.matrix <- xgb.model.dt.tree(feature_names = feature_names, model = model)
# first number of the path represents the tree, then the following numbers are related to the path to follow
# root init
root.nodes <- tree.matrix[Node == 0, ID]
tree.matrix[ID %in% root.nodes, abs.node.position := root.nodes]
precedent.nodes <- root.nodes
while (tree.matrix[, sum(is.na(abs.node.position))] > 0) {
yes.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(Yes)]
no.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(No)]
yes.nodes.abs.pos <- paste0(yes.row.nodes[, abs.node.position], "_0")
no.nodes.abs.pos <- paste0(no.row.nodes[, abs.node.position], "_1")
tree.matrix[ID %in% yes.row.nodes[, Yes], abs.node.position := yes.nodes.abs.pos]
tree.matrix[ID %in% no.row.nodes[, No], abs.node.position := no.nodes.abs.pos]
precedent.nodes <- c(yes.nodes.abs.pos, no.nodes.abs.pos)
}
tree.matrix[!is.na(Yes), Yes := paste0(abs.node.position, "_0")]
tree.matrix[!is.na(No), No := paste0(abs.node.position, "_1")]
for (nm in c("abs.node.position", "Yes", "No"))
data.table::set(tree.matrix, j = nm, value = sub("^\\d+-", "", tree.matrix[[nm]]))
nodes.dt <- tree.matrix[
, .(Quality = sum(Quality))
, by = .(abs.node.position, Feature)
][, .(Text = paste0(
paste0(
Feature[1:min(length(Feature), features_keep)],
" (",
format(Quality[1:min(length(Quality), features_keep)], digits = 5),
")"
),
collapse = "\n"
)
)
, by = abs.node.position
]
edges.dt <- data.table::rbindlist(
l = list(
tree.matrix[Feature != "Leaf", .(abs.node.position, Yes)],
tree.matrix[Feature != "Leaf", .(abs.node.position, No)]
)
)
data.table::setnames(edges.dt, c("From", "To"))
edges.dt <- edges.dt[, .N, .(From, To)]
edges.dt[, N := NULL]
nodes <- DiagrammeR::create_node_df(
n = nrow(nodes.dt),
label = nodes.dt[, Text]
)
edges <- DiagrammeR::create_edge_df(
from = match(edges.dt[, From], nodes.dt[, abs.node.position]),
to = match(edges.dt[, To], nodes.dt[, abs.node.position]),
rel = "leading_to")
graph <- DiagrammeR::create_graph(
nodes_df = nodes,
edges_df = edges,
attr_theme = NULL
)
graph <- DiagrammeR::add_global_graph_attrs(
graph = graph,
attr_type = "graph",
attr = c("layout", "rankdir"),
value = c("dot", "LR")
)
graph <- DiagrammeR::add_global_graph_attrs(
graph = graph,
attr_type = "node",
attr = c("color", "fillcolor", "style", "shape", "fontname"),
value = c("DimGray", "beige", "filled", "rectangle", "Helvetica")
)
graph <- DiagrammeR::add_global_graph_attrs(
graph = graph,
attr_type = "edge",
attr = c("color", "arrowsize", "arrowhead", "fontname"),
value = c("DimGray", "1.5", "vee", "Helvetica")
)
if (!render) return(invisible(graph))
DiagrammeR::render_graph(graph, width = plot_width, height = plot_height)
}
globalVariables(c(".N", "N", "From", "To", "Text", "Feature", "no.nodes.abs.pos",
"ID", "Yes", "No", "Tree", "yes.nodes.abs.pos", "abs.node.position"))
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.plot.multi.trees.R |
#' SHAP contribution dependency plots
#'
#' Visualizing the SHAP feature contribution to prediction dependencies on feature value.
#'
#' @param data data as a \code{matrix} or \code{dgCMatrix}.
#' @param shap_contrib a matrix of SHAP contributions that was computed earlier for the above
#' \code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.
#' @param features a vector of either column indices or of feature names to plot. When it is NULL,
#' feature importance is calculated, and \code{top_n} high ranked features are taken.
#' @param top_n when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.
#' @param model an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib}
#' or \code{features} is missing.
#' @param trees passed to \code{\link{xgb.importance}} when \code{features = NULL}.
#' @param target_class is only relevant for multiclass models. When it is set to a 0-based class index,
#' only SHAP contributions for that specific class are used.
#' If it is not set, SHAP importances are averaged over all classes.
#' @param approxcontrib passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.
#' @param subsample a random fraction of data points to use for plotting. When it is NULL,
#' it is set so that up to 100K data points are used.
#' @param n_col a number of columns in a grid of plots.
#' @param col color of the scatterplot markers.
#' @param pch scatterplot marker.
#' @param discrete_n_uniq a maximal number of unique values in a feature to consider it as discrete.
#' @param discrete_jitter an \code{amount} parameter of jitter added to discrete features' positions.
#' @param ylab a y-axis label in 1D plots.
#' @param plot_NA whether the contributions of cases with missing values should also be plotted.
#' @param col_NA a color of marker for missing value contributions.
#' @param pch_NA a marker type for NA values.
#' @param pos_NA a relative position of the x-location where NA values are shown:
#' \code{min(x) + (max(x) - min(x)) * pos_NA}.
#' @param plot_loess whether to plot loess-smoothed curves. The smoothing is only done for features with
#' more than 5 distinct values.
#' @param col_loess a color to use for the loess curves.
#' @param span_loess the \code{span} parameter in \code{\link[stats]{loess}}'s call.
#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.
#' @param plot whether a plot should be drawn. If FALSE, only a list of matrices is returned.
#' @param ... other parameters passed to \code{plot}.
#'
#' @details
#'
#' These scatterplots represent how SHAP feature contributions depend of feature values.
#' The similarity to partial dependency plots is that they also give an idea for how feature values
#' affect predictions. However, in partial dependency plots, we usually see marginal dependencies
#' of model prediction on feature value, while SHAP contribution dependency plots display the estimated
#' contributions of a feature to model prediction for each individual case.
#'
#' When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and
#' weighted LOESS is computed and plotted, where weights are the numbers of data points
#' at each rounded value.
#'
#' Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective,
#' the margin is prediction before a sigmoidal transform into probability-like values.
#' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP
#' contributions for all features + bias), depending on the objective used, transforming SHAP
#' contributions for a feature from the marginal to the prediction space is not necessarily
#' a meaningful thing to do.
#'
#' @return
#'
#' In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices:
#' \itemize{
#' \item \code{data} the values of selected features;
#' \item \code{shap_contrib} the contributions of selected features.
#' }
#'
#' @references
#'
#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
#'
#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060}
#'
#' @examples
#'
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#'
#' ## Keep the number of threads to 1 for examples
#' nthread <- 1
#' data.table::setDTthreads(nthread)
#' nrounds <- 20
#'
#' bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = nrounds,
#' eta = 0.1, max_depth = 3, subsample = .5,
#' method = "hist", objective = "binary:logistic", nthread = nthread, verbose = 0)
#'
#' xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
#' contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
#' xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
#' xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
#'
#' # multiclass example - plots for each class separately:
#' nclass <- 3
#' x <- as.matrix(iris[, -5])
#' set.seed(123)
#' is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values
#' mbst <- xgboost(data = x, label = as.numeric(iris$Species) - 1, nrounds = nrounds,
#' max_depth = 2, eta = 0.3, subsample = .5, nthread = nthread,
#' objective = "multi:softprob", num_class = nclass, verbose = 0)
#' trees0 <- seq(from=0, by=nclass, length.out=nrounds)
#' col <- rgb(0, 0, 1, 0.5)
#' xgb.plot.shap(x, model = mbst, trees = trees0, target_class = 0, top_n = 4,
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4,
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4,
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
#' xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot
#'
#' @rdname xgb.plot.shap
#' @export
xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE,
subsample = NULL, n_col = 1, col = rgb(0, 0, 1, 0.2), pch = '.',
discrete_n_uniq = 5, discrete_jitter = 0.01, ylab = "SHAP",
plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6), pch_NA = '.', pos_NA = 1.07,
plot_loess = TRUE, col_loess = 2, span_loess = 0.5,
which = c("1d", "2d"), plot = TRUE, ...) {
data_list <- xgb.shap.data(
data = data,
shap_contrib = shap_contrib,
features = features,
top_n = top_n,
model = model,
trees = trees,
target_class = target_class,
approxcontrib = approxcontrib,
subsample = subsample,
max_observations = 100000
)
data <- data_list[["data"]]
shap_contrib <- data_list[["shap_contrib"]]
features <- colnames(data)
which <- match.arg(which)
if (which == "2d")
stop("2D plots are not implemented yet")
if (n_col > length(features)) n_col <- length(features)
if (plot && which == "1d") {
op <- par(mfrow = c(ceiling(length(features) / n_col), n_col),
oma = c(0, 0, 0, 0) + 0.2,
mar = c(3.5, 3.5, 0, 0) + 0.1,
mgp = c(1.7, 0.6, 0))
for (f in features) {
ord <- order(data[, f])
x <- data[, f][ord]
y <- shap_contrib[, f][ord]
x_lim <- range(x, na.rm = TRUE)
y_lim <- range(y, na.rm = TRUE)
do_na <- plot_NA && any(is.na(x))
if (do_na) {
x_range <- diff(x_lim)
loc_na <- min(x, na.rm = TRUE) + x_range * pos_NA
x_lim <- range(c(x_lim, loc_na))
}
x_uniq <- unique(x)
x2plot <- x
# add small jitter for discrete features with <= 5 distinct values
if (length(x_uniq) <= discrete_n_uniq)
x2plot <- jitter(x, amount = discrete_jitter * min(diff(x_uniq), na.rm = TRUE))
plot(x2plot, y, pch = pch, xlab = f, col = col, xlim = x_lim, ylim = y_lim, ylab = ylab, ...)
grid()
if (plot_loess) {
# compress x to 3 digits, and mean-aggregate y
zz <- data.table(x = signif(x, 3), y)[, .(.N, y = mean(y)), x]
if (nrow(zz) <= 5) {
lines(zz$x, zz$y, col = col_loess)
} else {
lo <- stats::loess(y ~ x, data = zz, weights = zz$N, span = span_loess)
zz$y_lo <- predict(lo, zz, type = "link")
lines(zz$x, zz$y_lo, col = col_loess)
}
}
if (do_na) {
i_na <- which(is.na(x))
x_na <- rep(loc_na, length(i_na))
x_na <- jitter(x_na, amount = x_range * 0.01)
points(x_na, y[i_na], pch = pch_NA, col = col_NA)
}
}
par(op)
}
if (plot && which == "2d") {
# TODO
warning("Bivariate plotting is currently not available.")
}
invisible(list(data = data, shap_contrib = shap_contrib))
}
#' SHAP contribution dependency summary plot
#'
#' Compare SHAP contributions of different features.
#'
#' A point plot (each point representing one sample from \code{data}) is
#' produced for each feature, with the points plotted on the SHAP value axis.
#' Each point (observation) is coloured based on its feature value. The plot
#' hence allows us to see which features have a negative / positive contribution
#' on the model prediction, and whether the contribution is different for larger
#' or smaller values of the feature. We effectively try to replicate the
#' \code{summary_plot} function from https://github.com/shap/shap
#'
#' @inheritParams xgb.plot.shap
#'
#' @return A \code{ggplot2} object.
#' @export
#'
#' @examples # See \code{\link{xgb.plot.shap}}.
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
#' \url{https://github.com/shap/shap}
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
# Only ggplot implementation is available.
xgb.ggplot.shap.summary(data, shap_contrib, features, top_n, model, trees, target_class, approxcontrib, subsample)
}
#' Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
#' Internal utility function.
#'
#' @inheritParams xgb.plot.shap
#' @keywords internal
#'
#' @return A list containing: 'data', a matrix containing sample observations
#' and their feature values; 'shap_contrib', a matrix containing the SHAP contribution
#' values for these observations.
xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE,
subsample = NULL, max_observations = 100000) {
if (!is.matrix(data) && !inherits(data, "dgCMatrix"))
stop("data: must be either matrix or dgCMatrix")
if (is.null(shap_contrib) && (is.null(model) || !inherits(model, "xgb.Booster")))
stop("when shap_contrib is not provided, one must provide an xgb.Booster model")
if (is.null(features) && (is.null(model) || !inherits(model, "xgb.Booster")))
stop("when features are not provided, one must provide an xgb.Booster model to rank the features")
if (!is.null(shap_contrib) &&
(!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1))
stop("shap_contrib is not compatible with the provided data")
if (is.character(features) && is.null(colnames(data)))
stop("either provide `data` with column names or provide `features` as column indices")
if (is.null(model$feature_names) && model$nfeatures != ncol(data))
stop("if model has no feature_names, columns in `data` must match features in model")
if (!is.null(subsample)) {
idx <- sample(x = seq_len(nrow(data)), size = as.integer(subsample * nrow(data)), replace = FALSE)
} else {
idx <- seq_len(min(nrow(data), max_observations))
}
data <- data[idx, ]
if (is.null(colnames(data))) {
colnames(data) <- paste0("X", seq_len(ncol(data)))
}
if (!is.null(shap_contrib)) {
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]] else Reduce("+", lapply(shap_contrib, abs))
}
shap_contrib <- shap_contrib[idx, ]
if (is.null(colnames(shap_contrib))) {
colnames(shap_contrib) <- paste0("X", seq_len(ncol(data)))
}
} else {
shap_contrib <- predict(model, newdata = data, predcontrib = TRUE, approxcontrib = approxcontrib)
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]] else Reduce("+", lapply(shap_contrib, abs))
}
}
if (is.null(features)) {
if (!is.null(model$feature_names)) {
imp <- xgb.importance(model = model, trees = trees)
} else {
imp <- xgb.importance(model = model, trees = trees, feature_names = colnames(data))
}
top_n <- top_n[1]
if (top_n < 1 | top_n > 100) stop("top_n: must be an integer within [1, 100]")
features <- imp$Feature[1:min(top_n, NROW(imp))]
}
if (is.character(features)) {
features <- match(features, colnames(data))
}
shap_contrib <- shap_contrib[, features, drop = FALSE]
data <- data[, features, drop = FALSE]
list(
data = data,
shap_contrib = shap_contrib
)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.plot.shap.R |
#' Plot a boosted tree model
#'
#' Read a tree model text dump and plot the model.
#'
#' @param feature_names names of each feature as a \code{character} vector.
#' @param model produced by the \code{xgb.train} function.
#' @param trees an integer vector of tree indices that should be visualized.
#' If set to \code{NULL}, all trees of the model are included.
#' IMPORTANT: the tree index in xgboost model is zero-based
#' (e.g., use \code{trees = 0:2} for the first 3 trees in a model).
#' @param plot_width the width of the diagram in pixels.
#' @param plot_height the height of the diagram in pixels.
#' @param render a logical flag for whether the graph should be rendered (see Value).
#' @param show_node_id a logical flag for whether to show node id's in the graph.
#' @param ... currently not used.
#'
#' @details
#'
#' The content of each node is organised that way:
#'
#' \itemize{
#' \item Feature name.
#' \item \code{Cover}: The sum of second order gradient of training data classified to the leaf.
#' If it is square loss, this simply corresponds to the number of instances seen by a split
#' or collected by a leaf during training.
#' The deeper in the tree a node is, the lower this metric will be.
#' \item \code{Gain} (for split nodes): the information gain metric of a split
#' (corresponds to the importance of the node in the model).
#' \item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction.
#' }
#' The tree root nodes also indicate the Tree index (0-based).
#'
#' The "Yes" branches are marked by the "< split_value" label.
#' The branches that also used for missing values are marked as bold
#' (as in "carrying extra capacity").
#'
#' This function uses \href{https://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
#'
#' @return
#'
#' When \code{render = TRUE}:
#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}.
#' Similar to ggplot objects, it needs to be printed to see it when not running from command line.
#'
#' When \code{render = FALSE}:
#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
#' This could be useful if one wants to modify some of the graph attributes
#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' # plot all the trees
#' xgb.plot.tree(model = bst)
#' # plot only the first tree and display the node ID:
#' xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
#'
#' \dontrun{
#' # Below is an example of how to save this plot to a file.
#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed.
#' library(DiagrammeR)
#' gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE)
#' export_graph(gr, 'tree.pdf', width=1500, height=1900)
#' export_graph(gr, 'tree.png', width=1500, height=1900)
#' }
#'
#' @export
xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot_width = NULL, plot_height = NULL,
render = TRUE, show_node_id = FALSE, ...){
check.deprecation(...)
if (!inherits(model, "xgb.Booster")) {
stop("model: Has to be an object of class xgb.Booster")
}
if (!requireNamespace("DiagrammeR", quietly = TRUE)) {
stop("DiagrammeR package is required for xgb.plot.tree", call. = FALSE)
}
dt <- xgb.model.dt.tree(feature_names = feature_names, model = model, trees = trees)
dt[, label := paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Quality)]
if (show_node_id)
dt[, label := paste0(ID, ": ", label)]
dt[Node == 0, label := paste0("Tree ", Tree, "\n", label)]
dt[, shape := "rectangle"][Feature == "Leaf", shape := "oval"]
dt[, filledcolor := "Beige"][Feature == "Leaf", filledcolor := "Khaki"]
# in order to draw the first tree on top:
dt <- dt[order(-Tree)]
nodes <- DiagrammeR::create_node_df(
n = nrow(dt),
ID = dt$ID,
label = dt$label,
fillcolor = dt$filledcolor,
shape = dt$shape,
data = dt$Feature,
fontcolor = "black")
if (nrow(dt[Feature != "Leaf"]) != 0) {
edges <- DiagrammeR::create_edge_df(
from = match(rep(dt[Feature != "Leaf", c(ID)], 2), dt$ID),
to = match(dt[Feature != "Leaf", c(Yes, No)], dt$ID),
label = c(
dt[Feature != "Leaf", paste("<", Split)],
rep("", nrow(dt[Feature != "Leaf"]))
),
style = c(
dt[Feature != "Leaf", ifelse(Missing == Yes, "bold", "solid")],
dt[Feature != "Leaf", ifelse(Missing == No, "bold", "solid")]
),
rel = "leading_to")
} else {
edges <- NULL
}
graph <- DiagrammeR::create_graph(
nodes_df = nodes,
edges_df = edges,
attr_theme = NULL
)
graph <- DiagrammeR::add_global_graph_attrs(
graph = graph,
attr_type = "graph",
attr = c("layout", "rankdir"),
value = c("dot", "LR")
)
graph <- DiagrammeR::add_global_graph_attrs(
graph = graph,
attr_type = "node",
attr = c("color", "style", "fontname"),
value = c("DimGray", "filled", "Helvetica")
)
graph <- DiagrammeR::add_global_graph_attrs(
graph = graph,
attr_type = "edge",
attr = c("color", "arrowsize", "arrowhead", "fontname"),
value = c("DimGray", "1.5", "vee", "Helvetica")
)
if (!render) return(invisible(graph))
DiagrammeR::render_graph(graph, width = plot_width, height = plot_height)
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(c("Feature", "ID", "Cover", "Quality", "Split", "Yes", "No", "Missing", ".", "shape", "filledcolor", "label"))
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.plot.tree.R |
#' Save xgboost model to binary file
#'
#' Save xgboost model to a file in binary format.
#'
#' @param model model object of \code{xgb.Booster} class.
#' @param fname name of the file to write.
#'
#' @details
#' This methods allows to save a model in an xgboost-internal binary format which is universal
#' among the various xgboost interfaces. In R, the saved model file could be read-in later
#' using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter
#' of \code{\link{xgb.train}}.
#'
#' Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
#' or \code{\link[base]{save}}). However, it would then only be compatible with R, and
#' corresponding R-methods would need to be used to load it. Moreover, persisting the model with
#' \code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in
#' future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
#' how to persist models in a future-proof way, i.e. to make the model accessible in future
#' releases of XGBoost.
#'
#' @seealso
#' \code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#'
#' ## Keep the number of threads to 1 for examples
#' nthread <- 1
#' data.table::setDTthreads(nthread)
#'
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(
#' data = train$data, label = train$label, max_depth = 2, eta = 1,
#' nthread = nthread,
#' nrounds = 2,
#' objective = "binary:logistic"
#' )
#' xgb.save(bst, 'xgb.model')
#' bst <- xgb.load('xgb.model')
#' if (file.exists('xgb.model')) file.remove('xgb.model')
#' @export
xgb.save <- function(model, fname) {
if (typeof(fname) != "character")
stop("fname must be character")
if (!inherits(model, "xgb.Booster")) {
stop("model must be xgb.Booster.",
if (inherits(model, "xgb.DMatrix")) " Use xgb.DMatrix.save to save an xgb.DMatrix object." else "")
}
model <- xgb.Booster.complete(model, saveraw = FALSE)
fname <- path.expand(fname)
.Call(XGBoosterSaveModel_R, model$handle, fname[1])
return(TRUE)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.save.R |
#' Save xgboost model to R's raw vector,
#' user can call xgb.load.raw to load the model back from raw vector
#'
#' Save xgboost model from xgboost or xgb.train
#'
#' @param model the model object.
#' @param raw_format The format for encoding the booster. Available options are
#' \itemize{
#' \item \code{json}: Encode the booster into JSON text document.
#' \item \code{ubj}: Encode the booster into Universal Binary JSON.
#' \item \code{deprecated}: Encode the booster into old customized binary format.
#' }
#'
#' Right now the default is \code{deprecated} but will be changed to \code{ubj} in upcoming release.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#'
#' ## Keep the number of threads to 2 for examples
#' nthread <- 2
#' data.table::setDTthreads(nthread)
#'
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic")
#'
#' raw <- xgb.save.raw(bst)
#' bst <- xgb.load.raw(raw)
#'
#' @export
xgb.save.raw <- function(model, raw_format = "deprecated") {
handle <- xgb.get.handle(model)
args <- list(format = raw_format)
.Call(XGBoosterSaveModelToRaw_R, handle, jsonlite::toJSON(args, auto_unbox = TRUE))
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.save.raw.R |
#' Serialize the booster instance into R's raw vector. The serialization method differs
#' from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
#' parameters. This serialization format is not stable across different xgboost versions.
#'
#' @param booster the booster instance
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' raw <- xgb.serialize(bst)
#' bst <- xgb.unserialize(raw)
#'
#' @export
xgb.serialize <- function(booster) {
handle <- xgb.get.handle(booster)
.Call(XGBoosterSerializeToBuffer_R, handle)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.serialize.R |
#' eXtreme Gradient Boosting Training
#'
#' \code{xgb.train} is an advanced interface for training an xgboost model.
#' The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
#'
#' @param params the list of parameters. The complete list of parameters is
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
#' is a shorter summary:
#'
#' 1. General Parameters
#'
#' \itemize{
#' \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
#' }
#'
#' 2. Booster Parameters
#'
#' 2.1. Parameters for Tree Booster
#'
#' \itemize{
#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
#' \item \code{max_depth} maximum depth of a tree. Default: 6
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
#' \item \code{lambda} L2 regularization term on weights. Default: 1
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through XGBoost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
#' }
#'
#' 2.2. Parameters for Linear Booster
#'
#' \itemize{
#' \item \code{lambda} L2 regularization term on weights. Default: 0
#' \item \code{lambda_bias} L2 regularization term on bias. Default: 0
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
#' }
#'
#' 3. Task Parameters
#'
#' \itemize{
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
#' \itemize{
#' \item \code{reg:squarederror} Regression with squared loss (Default).
#' \item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
#' \item \code{reg:logistic} logistic regression.
#' \item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
#' \item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
#' \item \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
#' \item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
#' \item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
#' \item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
#' \item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
#' \item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
#' \item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
#' \item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
#' }
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
#' }
#'
#' @param data training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
#' \code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or name of a local data file.
#' @param nrounds max number of boosting iterations.
#' @param watchlist named list of xgb.DMatrix datasets to use for evaluating model performance.
#' Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each
#' of these datasets during each boosting iteration, and stored in the end as a field named
#' \code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
#' \code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously
#' printed out during the training.
#' E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track
#' the performance of each round's model on mat1 and mat2.
#' @param obj customized objective function. Returns gradient and second order
#' gradient with given prediction and dtrain.
#' @param feval customized evaluation function. Returns
#' \code{list(metric='metric-name', value='metric-value')} with given
#' prediction and dtrain.
#' @param verbose If 0, xgboost will stay silent. If 1, it will print information about performance.
#' If 2, some additional information will be printed out.
#' Note that setting \code{verbose > 0} automatically engages the
#' \code{cb.print.evaluation(period=1)} callback function.
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
#' Default is 1 which means all messages are printed. This parameter is passed to the
#' \code{\link{cb.print.evaluation}} callback.
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
#' If set to an integer \code{k}, training with a validation set will stop if the performance
#' doesn't improve for \code{k} rounds.
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
#' then this parameter must be set as well.
#' When it is \code{TRUE}, it means the larger the evaluation score the better.
#' This parameter is passed to the \code{\link{cb.early.stop}} callback.
#' @param save_period when it is non-NULL, model is saved to disk after every \code{save_period} rounds,
#' 0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback.
#' @param save_name the name or path for periodically saved model file.
#' @param xgb_model a previously built model to continue the training from.
#' Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
#' file with a previously saved model.
#' @param callbacks a list of callback functions to perform various task during boosting.
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
#' parameters' values. User can provide either existing or their own callback methods in order
#' to customize the training process.
#' @param ... other parameters to pass to \code{params}.
#' @param label vector of response values. Should not be provided when data is
#' a local data file name or an \code{xgb.DMatrix}.
#' @param missing by default is set to NA, which means that NA values should be considered as 'missing'
#' by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values.
#' This parameter is only used when input is a dense matrix.
#' @param weight a vector indicating the weight for each row of the input.
#'
#' @details
#' These are the training functions for \code{xgboost}.
#'
#' The \code{xgb.train} interface supports advanced features such as \code{watchlist},
#' customized objective and evaluation metric functions, therefore it is more flexible
#' than the \code{xgboost} interface.
#'
#' Parallelization is automatically enabled if \code{OpenMP} is present.
#' Number of threads can also be manually specified via the \code{nthread}
#' parameter.
#'
#' The evaluation metric is chosen automatically by XGBoost (according to the objective)
#' when the \code{eval_metric} parameter is not provided.
#' User may set one or several \code{eval_metric} parameters.
#' Note that when using a customized metric, only this single metric can be used.
#' The following is the list of built-in metrics for which XGBoost provides optimized implementation:
#' \itemize{
#' \item \code{rmse} root mean square error. \url{https://en.wikipedia.org/wiki/Root_mean_square_error}
#' \item \code{logloss} negative log-likelihood. \url{https://en.wikipedia.org/wiki/Log-likelihood}
#' \item \code{mlogloss} multiclass logloss. \url{https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html}
#' \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
#' By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
#' Different threshold (e.g., 0.) could be specified as "error@0."
#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
#' \item \code{mae} Mean absolute error
#' \item \code{mape} Mean absolute percentage error
#' \item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
#' }
#'
#' The following callbacks are automatically created when certain parameters are set:
#' \itemize{
#' \item \code{cb.print.evaluation} is turned on when \code{verbose > 0};
#' and the \code{print_every_n} parameter is passed to it.
#' \item \code{cb.evaluation.log} is on when \code{watchlist} is present.
#' \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set.
#' \item \code{cb.save.model}: when \code{save_period > 0} is set.
#' }
#'
#' @return
#' An object of class \code{xgb.Booster} with the following elements:
#' \itemize{
#' \item \code{handle} a handle (pointer) to the xgboost model in memory.
#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
#' \item \code{niter} number of boosting iterations.
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
#' first column corresponding to iteration number and the rest corresponding to evaluation
#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
#' \item \code{call} a function call.
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
#' \item \code{callbacks} callback functions that were either automatically assigned or
#' explicitly passed.
#' \item \code{best_iteration} iteration number with the best evaluation metric value
#' (only available with early stopping).
#' \item \code{best_score} the best evaluation metric value during early stopping.
#' (only available with early stopping).
#' \item \code{feature_names} names of the training dataset features
#' (only when column names were defined in training data).
#' \item \code{nfeatures} number of features in training data.
#' }
#'
#' @seealso
#' \code{\link{callbacks}},
#' \code{\link{predict.xgb.Booster}},
#' \code{\link{xgb.cv}}
#'
#' @references
#'
#' Tianqi Chen and Carlos Guestrin, "XGBoost: A Scalable Tree Boosting System",
#' 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016, \url{https://arxiv.org/abs/1603.02754}
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#'
#' ## Keep the number of threads to 1 for examples
#' nthread <- 1
#' data.table::setDTthreads(nthread)
#'
#' dtrain <- with(
#' agaricus.train, xgb.DMatrix(data, label = label, nthread = nthread)
#' )
#' dtest <- with(
#' agaricus.test, xgb.DMatrix(data, label = label, nthread = nthread)
#' )
#' watchlist <- list(train = dtrain, eval = dtest)
#'
#' ## A simple xgb.train example:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread,
#' objective = "binary:logistic", eval_metric = "auc")
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
#'
#' ## An xgb.train example where custom objective and evaluation metric are
#' ## used:
#' logregobj <- function(preds, dtrain) {
#' labels <- getinfo(dtrain, "label")
#' preds <- 1/(1 + exp(-preds))
#' grad <- preds - labels
#' hess <- preds * (1 - preds)
#' return(list(grad = grad, hess = hess))
#' }
#' evalerror <- function(preds, dtrain) {
#' labels <- getinfo(dtrain, "label")
#' err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
#' return(list(metric = "error", value = err))
#' }
#'
#' # These functions could be used by passing them either:
#' # as 'objective' and 'eval_metric' parameters in the params list:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread,
#' objective = logregobj, eval_metric = evalerror)
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
#'
#' # or through the ... arguments:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread)
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
#' objective = logregobj, eval_metric = evalerror)
#'
#' # or as dedicated 'obj' and 'feval' parameters of xgb.train:
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
#' obj = logregobj, feval = evalerror)
#'
#'
#' ## An xgb.train example of using variable learning rates at each iteration:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread,
#' objective = "binary:logistic", eval_metric = "auc")
#' my_etas <- list(eta = c(0.5, 0.1))
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
#' callbacks = list(cb.reset.parameters(my_etas)))
#'
#' ## Early stopping:
#' bst <- xgb.train(param, dtrain, nrounds = 25, watchlist,
#' early_stopping_rounds = 3)
#'
#' ## An 'xgboost' interface example:
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
#' max_depth = 2, eta = 1, nthread = nthread, nrounds = 2,
#' objective = "binary:logistic")
#' pred <- predict(bst, agaricus.test$data)
#'
#' @rdname xgb.train
#' @export
xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL,
save_period = NULL, save_name = "xgboost.model",
xgb_model = NULL, callbacks = list(), ...) {
check.deprecation(...)
params <- check.booster.params(params, ...)
check.custom.obj()
check.custom.eval()
# data & watchlist checks
dtrain <- data
if (!inherits(dtrain, "xgb.DMatrix"))
stop("second argument dtrain must be xgb.DMatrix")
if (length(watchlist) > 0) {
if (typeof(watchlist) != "list" ||
!all(vapply(watchlist, inherits, logical(1), what = 'xgb.DMatrix')))
stop("watchlist must be a list of xgb.DMatrix elements")
evnames <- names(watchlist)
if (is.null(evnames) || any(evnames == ""))
stop("each element of the watchlist must have a name tag")
}
# evaluation printing callback
params <- c(params)
print_every_n <- max(as.integer(print_every_n), 1L)
if (!has.callbacks(callbacks, 'cb.print.evaluation') &&
verbose) {
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n))
}
# evaluation log callback: it is automatically enabled when watchlist is provided
evaluation_log <- list()
if (!has.callbacks(callbacks, 'cb.evaluation.log') &&
length(watchlist) > 0) {
callbacks <- add.cb(callbacks, cb.evaluation.log())
}
# Model saving callback
if (!is.null(save_period) &&
!has.callbacks(callbacks, 'cb.save.model')) {
callbacks <- add.cb(callbacks, cb.save.model(save_period, save_name))
}
# Early stopping callback
stop_condition <- FALSE
if (!is.null(early_stopping_rounds) &&
!has.callbacks(callbacks, 'cb.early.stop')) {
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
maximize = maximize, verbose = verbose))
}
# Sort the callbacks into categories
cb <- categorize.callbacks(callbacks)
params['validate_parameters'] <- TRUE
if (!is.null(params[['seed']])) {
warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.")
}
# The tree updating process would need slightly different handling
is_update <- NVL(params[['process_type']], '.') == 'update'
# Construct a booster (either a new one or load from xgb_model)
handle <- xgb.Booster.handle(params, append(watchlist, dtrain), xgb_model)
bst <- xgb.handleToBooster(handle)
# extract parameters that can affect the relationship b/w #trees and #iterations
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1)
# When the 'xgb_model' was set, find out how many boosting iterations it has
niter_init <- 0
if (!is.null(xgb_model)) {
niter_init <- as.numeric(xgb.attr(bst, 'niter')) + 1
if (length(niter_init) == 0) {
niter_init <- xgb.ntree(bst) %/% (num_parallel_tree * num_class)
}
}
if (is_update && nrounds > niter_init)
stop("nrounds cannot be larger than ", niter_init, " (nrounds of xgb_model)")
niter_skip <- ifelse(is_update, 0, niter_init)
begin_iteration <- niter_skip + 1
end_iteration <- niter_skip + nrounds
# the main loop for boosting iterations
for (iteration in begin_iteration:end_iteration) {
for (f in cb$pre_iter) f()
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
if (length(watchlist) > 0)
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
xgb.attr(bst$handle, 'niter') <- iteration - 1
for (f in cb$post_iter) f()
if (stop_condition) break
}
for (f in cb$finalize) f(finalize = TRUE)
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
# store the total number of boosting iterations
bst$niter <- end_iteration
# store the evaluation results
if (length(evaluation_log) > 0 &&
nrow(evaluation_log) > 0) {
# include the previous compatible history when available
if (inherits(xgb_model, 'xgb.Booster') &&
!is_update &&
!is.null(xgb_model$evaluation_log) &&
isTRUE(all.equal(colnames(evaluation_log),
colnames(xgb_model$evaluation_log)))) {
evaluation_log <- rbindlist(list(xgb_model$evaluation_log, evaluation_log))
}
bst$evaluation_log <- evaluation_log
}
bst$call <- match.call()
bst$params <- params
bst$callbacks <- callbacks
if (!is.null(colnames(dtrain)))
bst$feature_names <- colnames(dtrain)
bst$nfeatures <- ncol(dtrain)
return(bst)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.train.R |
#' Load the instance back from \code{\link{xgb.serialize}}
#'
#' @param buffer the buffer containing booster instance saved by \code{\link{xgb.serialize}}
#' @param handle An \code{xgb.Booster.handle} object which will be overwritten with
#' the new deserialized object. Must be a null handle (e.g. when loading the model through
#' `readRDS`). If not provided, a new handle will be created.
#' @return An \code{xgb.Booster.handle} object.
#'
#' @export
xgb.unserialize <- function(buffer, handle = NULL) {
cachelist <- list()
if (is.null(handle)) {
handle <- .Call(XGBoosterCreate_R, cachelist)
} else {
if (!is.null.handle(handle))
stop("'handle' is not null/empty. Cannot overwrite existing handle.")
.Call(XGBoosterCreateInEmptyObj_R, cachelist, handle)
}
tryCatch(
.Call(XGBoosterUnserializeFromBuffer_R, handle, buffer),
error = function(e) {
error_msg <- conditionMessage(e)
m <- regexec("(src[\\\\/]learner.cc:[0-9]+): Check failed: (header == serialisation_header_)",
error_msg, perl = TRUE)
groups <- regmatches(error_msg, m)[[1]]
if (length(groups) == 3) {
warning(paste("The model had been generated by XGBoost version 1.0.0 or earlier and was ",
"loaded from a RDS file. We strongly ADVISE AGAINST using saveRDS() ",
"function, to ensure that your model can be read in current and upcoming ",
"XGBoost releases. Please use xgb.save() instead to preserve models for the ",
"long term. For more details and explanation, see ",
"https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html",
sep = ""))
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
} else {
stop(e)
}
})
class(handle) <- "xgb.Booster.handle"
return (handle)
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgb.unserialize.R |
# Simple interface for training an xgboost model that wraps \code{xgb.train}.
# Its documentation is combined with xgb.train.
#
#' @rdname xgb.train
#' @export
xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
params = list(), nrounds,
verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL,
save_period = NULL, save_name = "xgboost.model",
xgb_model = NULL, callbacks = list(), ...) {
merged <- check.booster.params(params, ...)
dtrain <- xgb.get.DMatrix(data, label, missing, weight, nthread = merged$nthread)
watchlist <- list(train = dtrain)
bst <- xgb.train(params, dtrain, nrounds, watchlist, verbose = verbose, print_every_n = print_every_n,
early_stopping_rounds = early_stopping_rounds, maximize = maximize,
save_period = save_period, save_name = save_name,
xgb_model = xgb_model, callbacks = callbacks, ...)
return (bst)
}
#' Training part from Mushroom Data Set
#'
#' This data set is originally from the Mushroom data set,
#' UCI Machine Learning Repository.
#'
#' This data set includes the following fields:
#'
#' \itemize{
#' \item \code{label} the label for each record
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
#' }
#'
#' @references
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
#'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
#' School of Information and Computer Science.
#'
#' @docType data
#' @keywords datasets
#' @name agaricus.train
#' @usage data(agaricus.train)
#' @format A list containing a label vector, and a dgCMatrix object with 6513
#' rows and 127 variables
NULL
#' Test part from Mushroom Data Set
#'
#' This data set is originally from the Mushroom data set,
#' UCI Machine Learning Repository.
#'
#' This data set includes the following fields:
#'
#' \itemize{
#' \item \code{label} the label for each record
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
#' }
#'
#' @references
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
#'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
#' School of Information and Computer Science.
#'
#' @docType data
#' @keywords datasets
#' @name agaricus.test
#' @usage data(agaricus.test)
#' @format A list containing a label vector, and a dgCMatrix object with 1611
#' rows and 126 variables
NULL
# Various imports
#' @importClassesFrom Matrix dgCMatrix dgeMatrix
#' @importFrom Matrix colSums
#' @importFrom Matrix sparse.model.matrix
#' @importFrom Matrix sparseVector
#' @importFrom Matrix sparseMatrix
#' @importFrom Matrix t
#' @importFrom data.table data.table
#' @importFrom data.table is.data.table
#' @importFrom data.table as.data.table
#' @importFrom data.table :=
#' @importFrom data.table rbindlist
#' @importFrom data.table setkey
#' @importFrom data.table setkeyv
#' @importFrom data.table setnames
#' @importFrom jsonlite fromJSON
#' @importFrom jsonlite toJSON
#' @importFrom utils object.size str tail
#' @importFrom stats predict
#' @importFrom stats median
#' @importFrom utils head
#' @importFrom graphics barplot
#' @importFrom graphics lines
#' @importFrom graphics points
#' @importFrom graphics grid
#' @importFrom graphics par
#' @importFrom graphics title
#' @importFrom grDevices rgb
#'
#' @import methods
#' @useDynLib xgboost, .registration = TRUE
NULL
| /scratch/gouwar.j/cran-all/cranData/xgboost/R/xgboost.R |
require(xgboost)
require(methods)
# we load in the agaricus dataset
# In this example, we are aiming to predict whether a mushroom is edible
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
train <- agaricus.train
test <- agaricus.test
# the loaded data is stored in sparseMatrix, and label is a numeric vector in {0,1}
class(train$label)
class(train$data)
#-------------Basic Training using XGBoost-----------------
# this is the basic usage of xgboost you can put matrix in data field
# note: we are putting in sparse matrix here, xgboost naturally handles sparse input
# use sparse matrix when your feature is sparse(e.g. when you are using one-hot encoding vector)
print("Training xgboost with sparseMatrix")
bst <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic")
# alternatively, you can put in dense matrix, i.e. basic R-matrix
print("Training xgboost with Matrix")
bst <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic")
# you can also put in xgb.DMatrix object, which stores label, data and other meta datas needed for advanced features
print("Training xgboost with xgb.DMatrix")
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2,
objective = "binary:logistic")
# Verbose = 0,1,2
print("Train xgboost with verbose 0, no message")
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic", verbose = 0)
print("Train xgboost with verbose 1, print evaluation metric")
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic", verbose = 1)
print("Train xgboost with verbose 2, also print information about tree")
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic", verbose = 2)
# you can also specify data as file path to a LIBSVM format input
# since we do not have this file with us, the following line is just for illustration
# bst <- xgboost(data = 'agaricus.train.svm', max_depth = 2, eta = 1, nrounds = 2,objective = "binary:logistic")
#--------------------basic prediction using xgboost--------------
# you can do prediction using the following line
# you can put in Matrix, sparseMatrix, or xgb.DMatrix
pred <- predict(bst, test$data)
err <- mean(as.numeric(pred > 0.5) != test$label)
print(paste("test-error=", err))
#-------------------save and load models-------------------------
# save model to binary local file
xgb.save(bst, "xgboost.model")
# load binary model to R
bst2 <- xgb.load("xgboost.model")
pred2 <- predict(bst2, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2 - pred))))
# save model to R's raw vector
raw <- xgb.save.raw(bst)
# load binary model to R
bst3 <- xgb.load.raw(raw)
pred3 <- predict(bst3, test$data)
# pred3 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3 - pred))))
#----------------Advanced features --------------
# to use advanced features, we need to put data in xgb.DMatrix
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
dtest <- xgb.DMatrix(data = test$data, label = test$label)
#---------------Using watchlist----------------
# watchlist is a list of xgb.DMatrix, each of them is tagged with name
watchlist <- list(train = dtrain, test = dtest)
# to train with watchlist, use xgb.train, which contains more advanced features
# watchlist allows us to monitor the evaluation result on all data in the list
print("Train xgboost using xgb.train with watchlist")
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
nthread = 2, objective = "binary:logistic")
# we can change evaluation metrics, or use multiple evaluation metrics
print("train xgboost using xgb.train with watchlist, watch logloss and error")
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
eval_metric = "error", eval_metric = "logloss",
nthread = 2, objective = "binary:logistic")
# xgb.DMatrix can also be saved using xgb.DMatrix.save
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data = dtrain2, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
nthread = 2, objective = "binary:logistic")
# information can be extracted from xgb.DMatrix using getinfo
label <- getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
print(paste("test-error=", err))
# You can dump the tree you learned using xgb.dump into a text file
dump_path <- file.path(tempdir(), 'dump.raw.txt')
xgb.dump(bst, dump_path, with_stats = TRUE)
# Finally, you can check which features are the most important.
print("Most important features (look at column Gain):")
imp_matrix <- xgb.importance(feature_names = colnames(train$data), model = bst)
print(imp_matrix)
# Feature importance bar plot by gain
print("Feature importance Plot : ")
print(xgb.plot.importance(importance_matrix = imp_matrix))
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/basic_walkthrough.R |
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
watchlist <- list(eval = dtest, train = dtrain)
###
# advanced: start from a initial base prediction
#
print('start running example to start from a initial prediction')
# train xgboost for 1 round
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
bst <- xgb.train(param, dtrain, 1, watchlist)
# Note: we need the margin value instead of transformed prediction in set_base_margin
# do predict with output_margin=TRUE, will always give you margin values before logistic transformation
ptrain <- predict(bst, dtrain, outputmargin = TRUE)
ptest <- predict(bst, dtest, outputmargin = TRUE)
# set the base_margin property of dtrain and dtest
# base margin is the base prediction we will boost from
setinfo(dtrain, "base_margin", ptrain)
setinfo(dtest, "base_margin", ptest)
print('this is result of boost from initial prediction')
bst <- xgb.train(params = param, data = dtrain, nrounds = 1, watchlist = watchlist)
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/boost_from_prediction.R |
# install development version of caret library that contains xgboost models
devtools::install_github("topepo/caret/pkg/caret")
require(caret)
require(xgboost)
require(data.table)
require(vcd)
require(e1071)
# Load Arthritis dataset in memory.
data(Arthritis)
# Create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
df <- data.table(Arthritis, keep.rownames = FALSE)
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
df[, ID := NULL]
#-------------Basic Training using XGBoost in caret Library-----------------
# Set up control parameters for caret::train
# Here we use 10-fold cross-validation, repeating twice, and using random search for tuning hyper-parameters.
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 2, search = "random")
# train a xgbTree model using caret::train
model <- train(factor(Improved)~., data = df, method = "xgbTree", trControl = fitControl)
# Instead of tree for our boosters, you can also fit a linear regression or logistic regression model using xgbLinear
# model <- train(factor(Improved)~., data = df, method = "xgbLinear", trControl = fitControl)
# See model results
print(model)
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/caret_wrapper.R |
require(xgboost)
require(Matrix)
require(data.table)
if (!require(vcd)) {
install.packages('vcd') #Available in CRAN. Used for its dataset with categorical values.
require(vcd)
}
# According to its documentation, XGBoost works only on numbers.
# Sometimes the dataset we have to work on have categorical data.
# A categorical variable is one which have a fixed number of values. By example, if for each observation a variable called "Colour" can have only "red", "blue" or "green" as value, it is a categorical variable.
#
# In R, categorical variable is called Factor.
# Type ?factor in console for more information.
#
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix before analyzing it in XGBoost.
# The method we are going to see is usually called "one hot encoding".
#load Arthritis dataset in memory.
data(Arthritis)
# create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
df <- data.table(Arthritis, keep.rownames = FALSE)
# Let's have a look to the data.table
cat("Print the dataset\n")
print(df)
# 2 columns have factor type, one has ordinal type (ordinal variable is a categorical variable with values which can be ordered, here: None > Some > Marked).
cat("Structure of the dataset\n")
str(df)
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independent values.
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
df[, ID := NULL]
# List the different values for the column Treatment: Placebo, Treated.
cat("Values of the categorical feature Treatment\n")
print(levels(df[, Treatment]))
# Next step, we will transform the categorical data to dummy variables.
# This method is also called one hot encoding.
# The purpose is to transform each value of each categorical feature in one binary feature.
#
# Let's take, the column Treatment will be replaced by two columns, Placebo, and Treated. Each of them will be binary. For example an observation which had the value Placebo in column Treatment before the transformation will have, after the transformation, the value 1 in the new column Placebo and the value 0 in the new column Treated.
#
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
# Column Improved is excluded because it will be our output column, the one we want to predict.
sparse_matrix <- sparse.model.matrix(Improved ~ . - 1, data = df)
cat("Encoding of the sparse Matrix\n")
print(sparse_matrix)
# Create the output vector (not sparse)
# 1. Set, for all rows, field in Y column to 0;
# 2. set Y to 1 when Improved == Marked;
# 3. Return Y column
output_vector <- df[, Y := 0][Improved == "Marked", Y := 1][, Y]
# Following is the same process as other demo
cat("Learning...\n")
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 9,
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic")
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
print(importance)
# According to the matrix below, the most important feature in this dataset to predict if the treatment will work is the Age. The second most important feature is having received a placebo or not. The sex is third. Then we see our generated features (AgeDiscret). We can see that their contribution is very low (Gain column).
# Does these result make sense?
# Let's check some Chi2 between each of these features and the outcome.
print(chisq.test(df$Age, df$Y))
# Pearson correlation between Age and illness disappearing is 35
print(chisq.test(df$AgeDiscret, df$Y))
# Our first simplification of Age gives a Pearson correlation of 8.
print(chisq.test(df$AgeCat, df$Y))
# The perfectly random split I did between young and old at 30 years old have a low correlation of 2. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same. Don't let your "gut" lower the quality of your model. In "data science", there is science :-)
# As you can see, in general destroying information by simplifying it won't improve your model. Chi2 just demonstrates that. But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model. The case studied here is not enough complex to show that. Check Kaggle forum for some challenging datasets.
# However it's almost always worse when you add some arbitrary rules.
# Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age. Linear model may not be that strong in these scenario.
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/create_sparse_matrix.R |
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
nrounds <- 2
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
cat('running cross validation\n')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(param, dtrain, nrounds, nfold = 5, metrics = {'error'})
cat('running cross validation, disable standard deviation display\n')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(param, dtrain, nrounds, nfold = 5,
metrics = 'error', showsd = FALSE)
###
# you can also do cross validation with customized loss function
# See custom_objective.R
##
print ('running cross validation, with customized loss function')
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
return(list(metric = "error", value = err))
}
param <- list(max_depth = 2, eta = 1,
objective = logregobj, eval_metric = evalerror)
# train with customized objective
xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5)
# do cross validation with prediction values for each fold
res <- xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5, prediction = TRUE)
res$evaluation_log
length(res$pred)
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/cross_validation.R |
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# note: for customized objective function, we leave objective as default
# note: what we are getting is margin value in prediction
# you must know what you are doing
watchlist <- list(eval = dtest, train = dtrain)
num_round <- 2
# user define objective function, given prediction, return gradient and second order gradient
# this is log likelihood loss
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
# user defined evaluation function, return a pair metric_name, result
# NOTE: when you do customized loss function, the default prediction value is margin
# this may make builtin evaluation metric not function properly
# for example, we are doing logistic loss, the prediction is score before logistic transformation
# the builtin evaluation error assumes input is after logistic transformation
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
return(list(metric = "error", value = err))
}
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
objective = logregobj, eval_metric = evalerror)
print ('start training with user customized objective')
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train
bst <- xgb.train(param, dtrain, num_round, watchlist)
#
# there can be cases where you want additional information
# being considered besides the property of DMatrix you can get by getinfo
# you can set additional information as attributes if DMatrix
# set label attribute of dtrain to be label, we use label as an example, it can be anything
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
# this is new customized objective, where you can access things you set
# same thing applies to customized evaluation function
logregobjattr <- function(preds, dtrain) {
# now you can access the attribute in customized function
labels <- attr(dtrain, 'label')
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
objective = logregobjattr, eval_metric = evalerror)
print ('start training with user customized objective, with additional attributes in DMatrix')
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train
bst <- xgb.train(param, dtrain, num_round, watchlist)
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/custom_objective.R |
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# note: for customized objective function, we leave objective as default
# note: what we are getting is margin value in prediction
# you must know what you are doing
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0)
watchlist <- list(eval = dtest)
num_round <- 20
# user define objective function, given prediction, return gradient and second order gradient
# this is log likelihood loss
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
# user defined evaluation function, return a pair metric_name, result
# NOTE: when you do customized loss function, the default prediction value is margin
# this may make builtin evaluation metric not function properly
# for example, we are doing logistic loss, the prediction is score before logistic transformation
# the builtin evaluation error assumes input is after logistic transformation
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
return(list(metric = "error", value = err))
}
print ('start training with early Stopping setting')
bst <- xgb.train(param, dtrain, num_round, watchlist,
objective = logregobj, eval_metric = evalerror, maximize = FALSE,
early_stopping_round = 3)
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
objective = logregobj, eval_metric = evalerror,
maximize = FALSE, early_stopping_rounds = 3)
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/early_stopping.R |
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
##
# this script demonstrate how to fit generalized linear model in xgboost
# basically, we are using linear model, instead of tree for our boosters
# you can fit a linear regression, or logistic regression model
##
# change booster to gblinear, so that we are fitting a linear model
# alpha is the L1 regularizer
# lambda is the L2 regularizer
# you can also set lambda_bias which is L2 regularizer on the bias term
param <- list(objective = "binary:logistic", booster = "gblinear",
nthread = 2, alpha = 0.0001, lambda = 1)
# normally, you do not need to set eta (step_size)
# XGBoost uses a parallel coordinate descent algorithm (shotgun),
# there could be affection on convergence with parallelization on certain cases
# setting eta to be smaller value, e.g 0.5 can make the optimization more stable
##
# the rest of settings are the same
##
watchlist <- list(eval = dtest, train = dtrain)
num_round <- 2
bst <- xgb.train(param, dtrain, num_round, watchlist)
ypred <- predict(bst, dtest)
labels <- getinfo(dtest, 'label')
cat('error of preds=', mean(as.numeric(ypred > 0.5) != labels), '\n')
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/generalized_linear_model.R |
# An example of using GPU-accelerated tree building algorithms
#
# NOTE: it can only run if you have a CUDA-enable GPU and the package was
# specially compiled with GPU support.
#
# For the current functionality, see
# https://xgboost.readthedocs.io/en/latest/gpu/index.html
#
library('xgboost')
# Simulate N x p random matrix with some binomial response dependent on pp columns
set.seed(111)
N <- 1000000
p <- 50
pp <- 25
X <- matrix(runif(N * p), ncol = p)
betas <- 2 * runif(pp) - 1
sel <- sort(sample(p, pp))
m <- X[, sel] %*% betas - 1 + rnorm(N)
y <- rbinom(N, 1, plogis(m))
tr <- sample.int(N, N * 0.75)
dtrain <- xgb.DMatrix(X[tr, ], label = y[tr])
dtest <- xgb.DMatrix(X[-tr, ], label = y[-tr])
wl <- list(train = dtrain, test = dtest)
# An example of running 'gpu_hist' algorithm
# which is
# - similar to the 'hist'
# - the fastest option for moderately large datasets
# - current limitations: max_depth < 16, does not implement guided loss
# You can use tree_method = 'gpu_hist' for another GPU accelerated algorithm,
# which is slower, more memory-hungry, but does not use binning.
param <- list(objective = 'reg:logistic', eval_metric = 'auc', subsample = 0.5, nthread = 4,
max_bin = 64, tree_method = 'gpu_hist')
pt <- proc.time()
bst_gpu <- xgb.train(param, dtrain, watchlist = wl, nrounds = 50)
proc.time() - pt
# Compare to the 'hist' algorithm:
param$tree_method <- 'hist'
pt <- proc.time()
bst_hist <- xgb.train(param, dtrain, watchlist = wl, nrounds = 50)
proc.time() - pt
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/gpu_accelerated.R |
library(xgboost)
library(data.table)
set.seed(1024)
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
treeInteractions <- function(input_tree, input_max_depth) {
ID_merge <- i.id <- i.feature <- NULL # Suppress warning "no visible binding for global variable"
trees <- data.table::copy(input_tree) # copy tree input to prevent overwriting
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
if (nrow(input_tree) == 1) return(list())
# Attach parent nodes
for (i in 2:input_max_depth) {
if (i == 2) trees[, ID_merge := ID] else trees[, ID_merge := get(paste0('parent_', i - 2))]
parents_left <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = Yes)]
parents_right <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = No)]
data.table::setorderv(trees, 'ID_merge')
data.table::setorderv(parents_left, 'ID_merge')
data.table::setorderv(parents_right, 'ID_merge')
trees <- merge(trees, parents_left, by = 'ID_merge', all.x = TRUE)
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
:= list(i.id, i.feature)]
trees[, c('i.id', 'i.feature') := NULL]
trees <- merge(trees, parents_right, by = 'ID_merge', all.x = TRUE)
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
:= list(i.id, i.feature)]
trees[, c('i.id', 'i.feature') := NULL]
}
# Extract nodes with interactions
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
c('Feature', paste0('parent_feat_', 1:(input_max_depth - 1))),
with = FALSE]
interaction_trees_split <- split(interaction_trees, seq_len(nrow(interaction_trees)))
interaction_list <- lapply(interaction_trees_split, as.character)
# Remove NAs (no parent interaction)
interaction_list <- lapply(interaction_list, function(x) x[!is.na(x)])
# Remove non-interactions (same variable)
interaction_list <- lapply(interaction_list, unique) # remove same variables
interaction_length <- sapply(interaction_list, length)
interaction_list <- interaction_list[interaction_length > 1]
interaction_list <- unique(lapply(interaction_list, sort))
return(interaction_list)
}
# Generate sample data
x <- list()
for (i in 1:10) {
x[[i]] <- i * rnorm(1000, 10)
}
x <- as.data.table(x)
y <- -1 * x[, rowSums(.SD)] + x[['V1']] * x[['V2']] + x[['V3']] * x[['V4']] * x[['V5']]
+ rnorm(1000, 0.001) + 3 * sin(x[['V7']])
train <- as.matrix(x)
# Interaction constraint list (column names form)
interaction_list <- list(c('V1', 'V2'), c('V3', 'V4', 'V5'))
# Convert interaction constraint list into feature index form
cols2ids <- function(object, col_names) {
LUT <- seq_along(col_names) - 1
names(LUT) <- col_names
rapply(object, function(x) LUT[x], classes = "character", how = "replace")
}
interaction_list_fid <- cols2ids(interaction_list, colnames(train))
# Fit model with interaction constraints
bst <- xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid)
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
bst_interactions <- treeInteractions(bst_tree, 4)
# interactions constrained to combinations of V1*V2 and V3*V4*V5
# Fit model without interaction constraints
bst2 <- xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000)
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
# Fit model with both interaction and monotonicity constraints
bst3 <- xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid,
monotone_constraints = c(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
bst3_interactions <- treeInteractions(bst3_tree, 4)
# interactions still constrained to combinations of V1*V2 and V3*V4*V5
# Show monotonic constraints still apply by checking scores after incrementing V1
x1 <- sort(unique(x[['V1']]))
for (i in seq_along(x1)){
testdata <- copy(x[, - ('V1')])
testdata[['V1']] <- x1[i]
testdata <- testdata[, paste0('V', 1:10), with = FALSE]
pred <- predict(bst3, as.matrix(testdata))
# Should not print out anything due to monotonic constraints
if (i > 1) if (any(pred > prev_pred)) print(i)
prev_pred <- pred
}
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/interaction_constraints.R |
data(mtcars)
head(mtcars)
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11],
objective = 'count:poisson', nrounds = 5)
pred <- predict(bst, as.matrix(mtcars[, -11]))
sqrt(mean((pred - mtcars[, 11]) ^ 2))
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/poisson_regression.R |
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
watchlist <- list(eval = dtest, train = dtrain)
nrounds <- 2
# training the model for two rounds
bst <- xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
cat('start testing prediction from first n trees\n')
labels <- getinfo(dtest, 'label')
### predict using first 1 tree
ypred1 <- predict(bst, dtest, ntreelimit = 1)
# by default, we predict using all the trees
ypred2 <- predict(bst, dtest)
cat('error of ypred1=', mean(as.numeric(ypred1 > 0.5) != labels), '\n')
cat('error of ypred2=', mean(as.numeric(ypred2 > 0.5) != labels), '\n')
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/predict_first_ntree.R |
require(xgboost)
require(data.table)
require(Matrix)
set.seed(1982)
# load in the agaricus dataset
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
nrounds <- 4
# training the model for two rounds
bst <- xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
# Model accuracy without new features
accuracy.before <- (sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label)
/ length(agaricus.test$label))
# by default, we predict using all the trees
pred_with_leaf <- predict(bst, dtest, predleaf = TRUE)
head(pred_with_leaf)
create.new.tree.features <- function(model, original.features){
pred_with_leaf <- predict(model, original.features, predleaf = TRUE)
cols <- list()
for (i in 1:model$niter) {
# max is not the real max but it s not important for the purpose of adding features
leaf.id <- sort(unique(pred_with_leaf[, i]))
cols[[i]] <- factor(x = pred_with_leaf[, i], level = leaf.id)
}
cbind(original.features, sparse.model.matrix(~ . - 1, as.data.frame(cols)))
}
# Convert previous features to one hot encoding
new.features.train <- create.new.tree.features(bst, agaricus.train$data)
new.features.test <- create.new.tree.features(bst, agaricus.test$data)
colnames(new.features.test) <- colnames(new.features.train)
# learning with new features
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
watchlist <- list(train = new.dtrain)
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
# Model accuracy with new features
accuracy.after <- (sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label)
/ length(agaricus.test$label))
# Here the accuracy was already good and is now perfect.
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
accuracy.after, "!\n"))
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/predict_leaf_indices.R |
library(xgboost)
library(data.table)
library(cplm)
data(AutoClaim)
# auto insurance dataset analyzed by Yip and Yau (2005)
dt <- data.table(AutoClaim)
# exclude these columns from the model matrix
exclude <- c('POLICYNO', 'PLCYDATE', 'CLM_FREQ5', 'CLM_AMT5', 'CLM_FLAG', 'IN_YY')
# retains the missing values
# NOTE: this dataset is comes ready out of the box
options(na.action = 'na.pass')
x <- sparse.model.matrix(~ . - 1, data = dt[, -exclude, with = FALSE])
options(na.action = 'na.omit')
# response
y <- dt[, CLM_AMT5]
d_train <- xgb.DMatrix(data = x, label = y, missing = NA)
# the tweedie_variance_power parameter determines the shape of
# distribution
# - closer to 1 is more poisson like and the mass
# is more concentrated near zero
# - closer to 2 is more gamma like and the mass spreads to the
# the right with less concentration near zero
params <- list(
objective = 'reg:tweedie',
eval_metric = 'rmse',
tweedie_variance_power = 1.4,
max_depth = 6,
eta = 1)
bst <- xgb.train(
data = d_train,
params = params,
maximize = FALSE,
watchlist = list(train = d_train),
nrounds = 20)
var_imp <- xgb.importance(attr(x, 'Dimnames')[[2]], model = bst)
preds <- predict(bst, d_train)
rmse <- sqrt(sum(mean((y - preds) ^ 2)))
| /scratch/gouwar.j/cran-all/cranData/xgboost/demo/tweedie_regression.R |
## ----libLoading, results='hold', message=F, warning=F-------------------------
require(xgboost)
require(Matrix)
require(data.table)
if (!require('vcd')) {
install.packages('vcd')
}
data.table::setDTthreads(2)
## ----results='hide'-----------------------------------------------------------
data(Arthritis)
df <- data.table(Arthritis, keep.rownames = FALSE)
## -----------------------------------------------------------------------------
head(df)
## -----------------------------------------------------------------------------
str(df)
## -----------------------------------------------------------------------------
head(df[,AgeDiscret := as.factor(round(Age/10,0))])
## -----------------------------------------------------------------------------
head(df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))])
## ----results='hide'-----------------------------------------------------------
df[,ID:=NULL]
## -----------------------------------------------------------------------------
levels(df[,Treatment])
## ----warning=FALSE,message=FALSE----------------------------------------------
sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[,-1]
head(sparse_matrix)
## -----------------------------------------------------------------------------
output_vector = df[,Improved] == "Marked"
## -----------------------------------------------------------------------------
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4,
eta = 1, nthread = 2, nrounds = 10,objective = "binary:logistic")
## -----------------------------------------------------------------------------
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
head(importance)
## -----------------------------------------------------------------------------
importanceRaw <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst, data = sparse_matrix, label = output_vector)
# Cleaning for better display
importanceClean <- importanceRaw[,`:=`(Cover=NULL, Frequency=NULL)]
head(importanceClean)
## ----fig.width=8, fig.height=5, fig.align='center'----------------------------
xgb.plot.importance(importance_matrix = importance)
## ----warning=FALSE, message=FALSE---------------------------------------------
c2 <- chisq.test(df$Age, output_vector)
print(c2)
## ----warning=FALSE, message=FALSE---------------------------------------------
c2 <- chisq.test(df$AgeDiscret, output_vector)
print(c2)
## ----warning=FALSE, message=FALSE---------------------------------------------
c2 <- chisq.test(df$AgeCat, output_vector)
print(c2)
## ----warning=FALSE, message=FALSE---------------------------------------------
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
#Random Forest - 1000 trees
bst <- xgboost(
data = train$data,
label = train$label,
max_depth = 4,
num_parallel_tree = 1000,
subsample = 0.5,
colsample_bytree = 0.5,
nrounds = 1,
objective = "binary:logistic",
nthread = 2
)
#Boosting - 3 rounds
bst <- xgboost(
data = train$data,
label = train$label,
max_depth = 4,
nrounds = 3,
objective = "binary:logistic",
nthread = 2
)
| /scratch/gouwar.j/cran-all/cranData/xgboost/inst/doc/discoverYourData.R |
---
title: "Understand your dataset with XGBoost"
output:
rmarkdown::html_vignette:
css: vignette.css
number_sections: yes
toc: yes
author: Tianqi Chen, Tong He, Michaël Benesty, Yuan Tang
vignette: >
%\VignetteIndexEntry{Discover your data}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
Understand your dataset with XGBoost
====================================
Introduction
------------
The purpose of this vignette is to show you how to use **XGBoost** to discover and understand your own dataset better.
This vignette is not about predicting anything (see [XGBoost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)). We will explain how to use **XGBoost** to highlight the *link* between the *features* of your data and the *outcome*.
Package loading:
```{r libLoading, results='hold', message=F, warning=F}
require(xgboost)
require(Matrix)
require(data.table)
if (!require('vcd')) {
install.packages('vcd')
}
data.table::setDTthreads(2)
```
> **VCD** package is used for one of its embedded dataset only.
Preparation of the dataset
--------------------------
### Numeric v.s. categorical variables
**XGBoost** manages only `numeric` vectors.
What to do when you have *categorical* data?
A *categorical* variable has a fixed number of different values. For instance, if a variable called *Colour* can have only one of these three values, *red*, *blue* or *green*, then *Colour* is a *categorical* variable.
> In **R**, a *categorical* variable is called `factor`.
>
> Type `?factor` in the console for more information.
To answer the question above we will convert *categorical* variables to `numeric` one.
### Conversion from categorical to numeric variables
#### Looking at the raw data
In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = few zeroes in the matrix) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero in the matrix) of `numeric` features.
The method we are going to see is usually called [one-hot encoding](https://en.wikipedia.org/wiki/One-hot).
The first step is to load `Arthritis` dataset in memory and wrap it with `data.table` package.
```{r, results='hide'}
data(Arthritis)
df <- data.table(Arthritis, keep.rownames = FALSE)
```
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost** **R** package use `data.table`.
The first thing we want to do is to have a look to the first few lines of the `data.table`:
```{r}
head(df)
```
Now we will check the format of each column.
```{r}
str(df)
```
2 columns have `factor` type, one has `ordinal` type.
> `ordinal` variable :
>
> * can take a limited number of values (like `factor`) ;
> * these values are ordered (unlike `factor`). Here these ordered values are: `Marked > Some > None`
#### Creation of new features based on old ones
We will add some new *categorical* features to see if it helps.
##### Grouping per 10 years
For the first feature we create groups of age by rounding the real age.
Note that we transform it to `factor` so the algorithm treat these age groups as independent values.
Therefore, 20 is not closer to 30 than 60. To make it short, the distance between ages is lost in this transformation.
```{r}
head(df[,AgeDiscret := as.factor(round(Age/10,0))])
```
##### Random split into two groups
Following is an even stronger simplification of the real age with an arbitrary split at 30 years old. We choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
```{r}
head(df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))])
```
##### Risks in adding correlated features
These new features are highly correlated to the `Age` feature because they are simple transformations of this feature.
For many machine learning algorithms, using correlated features is not a good idea. It may sometimes make prediction less accurate, and most of the time make interpretation of the model almost impossible. GLM, for instance, assumes that the features are uncorrelated.
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we have nothing to do to manage this situation.
##### Cleaning data
We remove ID as there is nothing to learn from this feature (it would just add some noise).
```{r, results='hide'}
df[,ID:=NULL]
```
We will list the different values for the column `Treatment`:
```{r}
levels(df[,Treatment])
```
#### Encoding categorical features
Next step, we will transform the categorical data to dummy variables.
Several encoding methods exist, e.g., [one-hot encoding](https://en.wikipedia.org/wiki/One-hot) is a common approach.
We will use the [dummy contrast coding](https://stats.oarc.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/) which is popular because it produces "full rank" encoding (also see [this blog post by Max Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
For example, the column `Treatment` will be replaced by two columns, `TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have after the transformation the value `1` in the new column `TreatmentPlacebo` and the value `0` in the new column `TreatmentTreated`. The column `TreatmentPlacebo` will disappear during the contrast encoding, as it would be absorbed into a common constant intercept column.
Column `Improved` is excluded because it will be our `label` column, the one we want to predict.
```{r, warning=FALSE,message=FALSE}
sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[,-1]
head(sparse_matrix)
```
> Formula `Improved ~ .` used above means transform all *categorical* features but column `Improved` to binary values. The `-1` column selection removes the intercept column which is full of `1` (this column is generated by the conversion). For more information, you can type `?sparse.model.matrix` in the console.
Create the output `numeric` vector (not as a sparse `Matrix`):
```{r}
output_vector = df[,Improved] == "Marked"
```
1. set `Y` vector to `0`;
2. set `Y` to `1` for rows where `Improved == Marked` is `TRUE` ;
3. return `Y` vector.
Build the model
---------------
The code below is very usual. For more information, you can look at the documentation of `xgboost` function (or at the vignette [XGBoost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)).
```{r}
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4,
eta = 1, nthread = 2, nrounds = 10,objective = "binary:logistic")
```
You can see some `train-error: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains your data. Lower is better.
A small value for training error may be a symptom of [overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the model will not accurately predict the future values.
> Here you can see the numbers decrease until line 7 and then increase.
>
> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nrounds = 4`. I will let things like that because I don't really care for the purpose of this example :-)
Feature importance
------------------
## Measure feature importance
### Build the feature importance data.table
Remember, each binary column corresponds to a single value of one of *categorical* features.
```{r}
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
head(importance)
```
> The column `Gain` provide the information we are looking for.
>
> As you can see, features are classified by `Gain`.
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there was some wrongly classified elements, after adding the split on this feature, there are two new branches, and each of these branch is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
`Cover` measures the relative quantity of observations concerned by a feature.
`Frequency` is a simpler way to measure the `Gain`. It just counts the number of times a feature is used in all generated trees. You should not use it (unless you know why you want to use it).
#### Improvement in the interpretability of feature importance data.table
We can go deeper in the analysis of the model. In the `data.table` above, we have discovered which features counts to predict if the illness will go or not. But we don't yet know the role of these features. For instance, one of the question we may want to answer would be: does receiving a placebo treatment helps to recover from the illness?
One simple solution is to count the co-occurrences of a feature and a class of the classification.
For that purpose we will execute the same function as above but using two more parameters, `data` and `label`.
```{r}
importanceRaw <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst, data = sparse_matrix, label = output_vector)
# Cleaning for better display
importanceClean <- importanceRaw[,`:=`(Cover=NULL, Frequency=NULL)]
head(importanceClean)
```
> In the table above we have removed two not needed columns and select only the first lines.
First thing you notice is the new column `Split`. It is the split applied to the feature on a branch of one of the tree. Each split is present, therefore a feature can appear several times in this table. Here we can see the feature `Age` is used several times with different splits.
How the split is applied to count the co-occurrences? It is always `<`. For instance, in the second line, we measure the number of persons under 61.5 years with the illness gone after the treatment.
The two other new columns are `RealCover` and `RealCover %`. In the first column it measures the number of observations in the dataset where the split is respected and the label marked as `1`. The second column is the percentage of the whole population that `RealCover` represents.
Therefore, according to our findings, getting a placebo doesn't seem to help but being younger than 61 years may help (seems logic).
> You may wonder how to interpret the `< 1.00001` on the first line. Basically, in a sparse `Matrix`, there is no `0`, therefore, looking for one hot-encoded categorical observations validating the rule `< 1.00001` is like just looking for `1` for this feature.
### Plotting the feature importance
All these things are nice, but it would be even better to plot the results.
```{r, fig.width=8, fig.height=5, fig.align='center'}
xgb.plot.importance(importance_matrix = importance)
```
Feature have automatically been divided in 2 clusters: the interesting features... and the others.
> Depending of the dataset and the learning parameters you may have more than two clusters. Default value is to limit them to `10`, but you can increase this limit. Look at the function documentation for more information.
According to the plot above, the most important features in this dataset to predict if the treatment will work are :
* the Age ;
* having received a placebo or not ;
* the sex is third but already included in the not interesting features group ;
* then we see our generated features (AgeDiscret). We can see that their contribution is very low.
### Do these results make sense?
Let's check some **Chi2** between each of these features and the label.
Higher **Chi2** means better correlation.
```{r, warning=FALSE, message=FALSE}
c2 <- chisq.test(df$Age, output_vector)
print(c2)
```
Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
```{r, warning=FALSE, message=FALSE}
c2 <- chisq.test(df$AgeDiscret, output_vector)
print(c2)
```
Our first simplification of Age gives a Pearson correlation is **`r round(c2$statistic, 2)`**.
```{r, warning=FALSE, message=FALSE}
c2 <- chisq.test(df$AgeCat, output_vector)
print(c2)
```
The perfectly random split I did between young and old at 30 years old have a low correlation of **`r round(c2$statistic, 2)`**. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same.
Morality: don't let your *gut* lower the quality of your model.
In *data science* expression, there is the word *science* :-)
Conclusion
----------
As you can see, in general *destroying information by simplifying it won't improve your model*. **Chi2** just demonstrates that.
But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model.
The case studied here is not enough complex to show that. Check [Kaggle website](http://www.kaggle.com/) for some challenging datasets. However it's almost always worse when you add some arbitrary rules.
Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age.
Linear model may not be that smart in this scenario.
Special Note: What about Random Forests™?
-----------------------------------------
As you may know, [Random Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
Both trains several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the tree `N+1` focus its learning on the loss (<=> what has not been well modeled by the tree `N`).
This difference have an impact on a corner case in feature importance analysis: the *correlated features*.
Imagine two features perfectly correlated, feature `A` and feature `B`. For one specific tree, if the algorithm needs one of them, it will choose randomly (true in both boosting and Random Forests).
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximatively, depending of your parameters, 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature have an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
If you want to try Random Forests algorithm, you can tweak XGBoost parameters!
For instance, to compute a model with 1000 trees, with a 0.5 factor on sampling rows and columns:
```{r, warning=FALSE, message=FALSE}
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
#Random Forest - 1000 trees
bst <- xgboost(
data = train$data,
label = train$label,
max_depth = 4,
num_parallel_tree = 1000,
subsample = 0.5,
colsample_bytree = 0.5,
nrounds = 1,
objective = "binary:logistic",
nthread = 2
)
#Boosting - 3 rounds
bst <- xgboost(
data = train$data,
label = train$label,
max_depth = 4,
nrounds = 3,
objective = "binary:logistic",
nthread = 2
)
```
> Note that the parameter `round` is set to `1`.
> [**Random Forests**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm) is a trademark of Leo Breiman and Adele Cutler and is licensed exclusively to Salford Systems for the commercial release of the software.
| /scratch/gouwar.j/cran-all/cranData/xgboost/inst/doc/discoverYourData.Rmd |
## ----knitropts,echo=FALSE,message=FALSE---------------------------------------
if (require('knitr')) opts_chunk$set(fig.width = 5, fig.height = 5, fig.align = 'center', tidy = FALSE, warning = FALSE, cache = TRUE)
## ----prelim,echo=FALSE--------------------------------------------------------
xgboost.version <- packageDescription("xgboost")$Version
## ----Training and prediction with iris----------------------------------------
library(xgboost)
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1,
nrounds = 2, objective = "binary:logistic", nthread = 2)
xgb.save(bst, 'model.save')
bst = xgb.load('model.save')
xgb.parameters(bst) <- list(nthread = 2)
pred <- predict(bst, test$data)
## ----Dump Model---------------------------------------------------------------
xgb.dump(bst, 'model.dump')
## ----xgb.DMatrix--------------------------------------------------------------
dtrain <- xgb.DMatrix(train$data, label = train$label, nthread = 2)
class(dtrain)
head(getinfo(dtrain,'label'))
## ----save model---------------------------------------------------------------
xgb.DMatrix.save(dtrain, 'xgb.DMatrix')
dtrain = xgb.DMatrix('xgb.DMatrix')
## ----Customized loss function-------------------------------------------------
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- sqrt(mean((preds-labels)^2))
return(list(metric = "MSE", value = err))
}
dtest <- xgb.DMatrix(test$data, label = test$label, nthread = 2)
watchlist <- list(eval = dtest, train = dtrain)
param <- list(max_depth = 2, eta = 1, nthread = 2)
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, logregobj, evalerror, maximize = FALSE)
## ----Temp file cleaning, include=FALSE----------------------------------------
file.remove("xgb.DMatrix")
file.remove("model.dump")
file.remove("model.save")
| /scratch/gouwar.j/cran-all/cranData/xgboost/inst/doc/xgboost.R |
## ----installGithub, eval=FALSE------------------------------------------------
# install.packages("drat", repos="https://cran.rstudio.com")
# drat:::addRepo("dmlc")
# install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
## ----eval=FALSE---------------------------------------------------------------
# install.packages("xgboost")
## ----libLoading, results='hold', message=F, warning=F-------------------------
require(xgboost)
## ----datasetLoading, results='hold', message=F, warning=F---------------------
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
## ----dataList, message=F, warning=F-------------------------------------------
str(train)
## ----dataSize, message=F, warning=F-------------------------------------------
dim(train$data)
dim(test$data)
## ----dataClass, message=F, warning=F------------------------------------------
class(train$data)[1]
class(train$label)
## ----trainingSparse, message=F, warning=F-------------------------------------
bstSparse <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
## ----trainingDense, message=F, warning=F--------------------------------------
bstDense <- xgboost(
data = as.matrix(train$data),
label = train$label,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
## ----trainingDmatrix, message=F, warning=F------------------------------------
dtrain <- xgb.DMatrix(data = train$data, label = train$label, nthread = 2)
bstDMatrix <- xgboost(
data = dtrain,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
## ----trainingVerbose0, message=T, warning=F-----------------------------------
# verbose = 0, no message
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0)
## ----trainingVerbose1, message=T, warning=F-----------------------------------
# verbose = 1, print evaluation metric
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 1)
## ----trainingVerbose2, message=T, warning=F-----------------------------------
# verbose = 2, also print information about tree
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 2)
## ----predicting, message=F, warning=F-----------------------------------------
pred <- predict(bst, test$data)
# size of the prediction vector
print(length(pred))
# limit display of predictions to the first 10
print(head(pred))
## ----predictingTest, message=F, warning=F-------------------------------------
prediction <- as.numeric(pred > 0.5)
print(head(prediction))
## ----predictingAverageError, message=F, warning=F-----------------------------
err <- mean(as.numeric(pred > 0.5) != test$label)
print(paste("test-error=", err))
## ----DMatrix, message=F, warning=F--------------------------------------------
dtrain <- xgb.DMatrix(data = train$data, label = train$label, nthread = 2)
dtest <- xgb.DMatrix(data = test$data, label = test$label, nthread = 2)
## ----watchlist, message=F, warning=F------------------------------------------
watchlist <- list(train=dtrain, test=dtest)
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
## ----watchlist2, message=F, warning=F-----------------------------------------
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
## ----linearBoosting, message=F, warning=F-------------------------------------
bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
## ----DMatrixSave, message=F, warning=F----------------------------------------
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
## ----DMatrixDel, include=FALSE------------------------------------------------
file.remove("dtrain.buffer")
## ----getinfo, message=F, warning=F--------------------------------------------
label = getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
print(paste("test-error=", err))
## ----dump, message=T, warning=F-----------------------------------------------
xgb.dump(bst, with_stats = TRUE)
## ----saveModel, message=F, warning=F------------------------------------------
# save model to binary local file
xgb.save(bst, "xgboost.model")
## ----loadModel, message=F, warning=F------------------------------------------
# load binary model to R
bst2 <- xgb.load("xgboost.model")
xgb.parameters(bst2) <- list(nthread = 2)
pred2 <- predict(bst2, test$data)
# And now the test
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
## ----clean, include=FALSE-----------------------------------------------------
# delete the created model
file.remove("./xgboost.model")
## ----saveLoadRBinVectorModel, message=F, warning=F----------------------------
# save model to R's raw vector
rawVec <- xgb.serialize(bst)
# print class
print(class(rawVec))
# load binary model to R
bst3 <- xgb.load(rawVec)
xgb.parameters(bst3) <- list(nthread = 2)
pred3 <- predict(bst3, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred))))
| /scratch/gouwar.j/cran-all/cranData/xgboost/inst/doc/xgboostPresentation.R |
---
title: "XGBoost presentation"
output:
rmarkdown::html_vignette:
css: vignette.css
number_sections: yes
toc: yes
bibliography: xgboost.bib
author: Tianqi Chen, Tong He, Michaël Benesty
vignette: >
%\VignetteIndexEntry{XGBoost presentation}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
XGBoost R Tutorial
==================
## Introduction
**XGBoost** is short for e**X**treme **G**radient **Boost**ing package.
The purpose of this Vignette is to show you how to use **XGBoost** to build a model and make predictions.
It is an efficient and scalable implementation of gradient boosting framework by @friedman2000additive and @friedman2001greedy. Two solvers are included:
- *linear* model ;
- *tree learning* algorithm.
It supports various objective functions, including *regression*, *classification* and *ranking*. The package is made to be extendible, so that users are also allowed to define their own objective functions easily.
It has been [used](https://github.com/dmlc/xgboost) to win several [Kaggle](http://www.kaggle.com) competitions.
It has several features:
* Speed: it can automatically do parallel computation on *Windows* and *Linux*, with *OpenMP*. It is generally over 10 times faster than the classical `gbm`.
* Input Type: it takes several types of input data:
* *Dense* Matrix: *R*'s *dense* matrix, i.e. `matrix` ;
* *Sparse* Matrix: *R*'s *sparse* matrix, i.e. `Matrix::dgCMatrix` ;
* Data File: local data files ;
* `xgb.DMatrix`: its own class (recommended).
* Sparsity: it accepts *sparse* input for both *tree booster* and *linear booster*, and is optimized for *sparse* input ;
* Customization: it supports customized objective functions and evaluation functions.
## Installation
### GitHub version
For weekly updated version (highly recommended), install from *GitHub*:
```{r installGithub, eval=FALSE}
install.packages("drat", repos="https://cran.rstudio.com")
drat:::addRepo("dmlc")
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
```
> *Windows* user will need to install [Rtools](https://cran.r-project.org/bin/windows/Rtools/) first.
### CRAN version
The version 0.4-2 is on CRAN, and you can install it by:
```{r, eval=FALSE}
install.packages("xgboost")
```
Formerly available versions can be obtained from the CRAN [archive](https://cran.r-project.org/src/contrib/Archive/xgboost/)
## Learning
For the purpose of this tutorial we will load **XGBoost** package.
```{r libLoading, results='hold', message=F, warning=F}
require(xgboost)
```
### Dataset presentation
In this example, we are aiming to predict whether a mushroom can be eaten or not (like in many tutorials, example data are the same as you will use on in your every day life :-).
Mushroom data is cited from UCI Machine Learning Repository. @Bache+Lichman:2013.
### Dataset loading
We will load the `agaricus` datasets embedded with the package and will link them to variables.
The datasets are already split in:
* `train`: will be used to build the model ;
* `test`: will be used to assess the quality of our model.
Why *split* the dataset in two parts?
In the first part we will build our model. In the second part we will want to test it and assess its quality. Without dividing the dataset we would test the model on the data which the algorithm have already seen.
```{r datasetLoading, results='hold', message=F, warning=F}
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
```
> In the real world, it would be up to you to make this division between `train` and `test` data. The way to do it is out of the purpose of this article, however `caret` package may [help](http://topepo.github.io/caret/data-splitting.html).
Each variable is a `list` containing two things, `label` and `data`:
```{r dataList, message=F, warning=F}
str(train)
```
`label` is the outcome of our dataset meaning it is the binary *classification* we will try to predict.
Let's discover the dimensionality of our datasets.
```{r dataSize, message=F, warning=F}
dim(train$data)
dim(test$data)
```
This dataset is very small to not make the **R** package too heavy, however **XGBoost** is built to manage huge dataset very efficiently.
As seen below, the `data` are stored in a `dgCMatrix` which is a *sparse* matrix and `label` vector is a `numeric` vector (`{0,1}`):
```{r dataClass, message=F, warning=F}
class(train$data)[1]
class(train$label)
```
### Basic Training using XGBoost
This step is the most critical part of the process for the quality of our model.
#### Basic training
We are using the `train` data. As explained above, both `data` and `label` are stored in a `list`.
In a *sparse* matrix, cells containing `0` are not stored in memory. Therefore, in a dataset mainly made of `0`, memory size is reduced. It is very usual to have such dataset.
We will train decision tree model using the following parameters:
* `objective = "binary:logistic"`: we will train a binary classification model ;
* `max_depth = 2`: the trees won't be deep, because our case is very simple ;
* `nthread = 2`: the number of CPU threads we are going to use;
* `nrounds = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction.
```{r trainingSparse, message=F, warning=F}
bstSparse <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
```
> More complex the relationship between your features and your `label` is, more passes you need.
#### Parameter variations
##### Dense matrix
Alternatively, you can put your dataset in a *dense* matrix, i.e. a basic **R** matrix.
```{r trainingDense, message=F, warning=F}
bstDense <- xgboost(
data = as.matrix(train$data),
label = train$label,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
```
##### xgb.DMatrix
**XGBoost** offers a way to group them in a `xgb.DMatrix`. You can even add other meta data in it. It will be useful for the most advanced features we will discover later.
```{r trainingDmatrix, message=F, warning=F}
dtrain <- xgb.DMatrix(data = train$data, label = train$label, nthread = 2)
bstDMatrix <- xgboost(
data = dtrain,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
```
##### Verbose option
**XGBoost** has several features to help you to view how the learning progress internally. The purpose is to help you to set the best parameters, which is the key of your model quality.
One of the simplest way to see the training progress is to set the `verbose` option (see below for more advanced techniques).
```{r trainingVerbose0, message=T, warning=F}
# verbose = 0, no message
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0)
```
```{r trainingVerbose1, message=T, warning=F}
# verbose = 1, print evaluation metric
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 1)
```
```{r trainingVerbose2, message=T, warning=F}
# verbose = 2, also print information about tree
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 2)
```
## Basic prediction using XGBoost
## Perform the prediction
The purpose of the model we have built is to classify new data. As explained before, we will use the `test` dataset for this step.
```{r predicting, message=F, warning=F}
pred <- predict(bst, test$data)
# size of the prediction vector
print(length(pred))
# limit display of predictions to the first 10
print(head(pred))
```
These numbers doesn't look like *binary classification* `{0,1}`. We need to perform a simple transformation before being able to use these results.
## Transform the regression in a binary classification
The only thing that **XGBoost** does is a *regression*. **XGBoost** is using `label` vector to build its *regression* model.
How can we use a *regression* model to perform a binary classification?
If we think about the meaning of a regression applied to our data, the numbers we get are probabilities that a datum will be classified as `1`. Therefore, we will set the rule that if this probability for a specific datum is `> 0.5` then the observation is classified as `1` (or `0` otherwise).
```{r predictingTest, message=F, warning=F}
prediction <- as.numeric(pred > 0.5)
print(head(prediction))
```
## Measuring model performance
To measure the model performance, we will compute a simple metric, the *average error*.
```{r predictingAverageError, message=F, warning=F}
err <- mean(as.numeric(pred > 0.5) != test$label)
print(paste("test-error=", err))
```
> Note that the algorithm has not seen the `test` data during the model construction.
Steps explanation:
1. `as.numeric(pred > 0.5)` applies our rule that when the probability (<=> regression <=> prediction) is `> 0.5` the observation is classified as `1` and `0` otherwise ;
2. `probabilityVectorPreviouslyComputed != test$label` computes the vector of error between true data and computed probabilities ;
3. `mean(vectorOfErrors)` computes the *average error* itself.
The most important thing to remember is that **to do a classification, you just do a regression to the** `label` **and then apply a threshold**.
*Multiclass* classification works in a similar way.
This metric is **`r round(err, 2)`** and is pretty low: our yummy mushroom model works well!
## Advanced features
Most of the features below have been implemented to help you to improve your model by offering a better understanding of its content.
### Dataset preparation
For the following advanced features, we need to put data in `xgb.DMatrix` as explained above.
```{r DMatrix, message=F, warning=F}
dtrain <- xgb.DMatrix(data = train$data, label = train$label, nthread = 2)
dtest <- xgb.DMatrix(data = test$data, label = test$label, nthread = 2)
```
### Measure learning progress with xgb.train
Both `xgboost` (simple) and `xgb.train` (advanced) functions train models.
One of the special feature of `xgb.train` is the capacity to follow the progress of the learning after each round. Because of the way boosting works, there is a time when having too many rounds lead to an overfitting. You can see this feature as a cousin of cross-validation method. The following techniques will help you to avoid overfitting or optimizing the learning time in stopping it as soon as possible.
One way to measure progress in learning of a model is to provide to **XGBoost** a second dataset already classified. Therefore it can learn on the first dataset and test its model on the second one. Some metrics are measured after each round during the learning.
> in some way it is similar to what we have done above with the average error. The main difference is that below it was after building the model, and now it is during the construction that we measure errors.
For the purpose of this example, we use `watchlist` parameter. It is a list of `xgb.DMatrix`, each of them tagged with a name.
```{r watchlist, message=F, warning=F}
watchlist <- list(train=dtrain, test=dtest)
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
```
**XGBoost** has computed at each round the same average error metric than seen above (we set `nrounds` to 2, that is why we have two lines). Obviously, the `train-error` number is related to the training dataset (the one the algorithm learns from) and the `test-error` number to the test dataset.
Both training and test error related metrics are very similar, and in some way, it makes sense: what we have learned from the training dataset matches the observations from the test dataset.
If with your own dataset you have not such results, you should think about how you divided your dataset in training and test. May be there is something to fix. Again, `caret` package may [help](http://topepo.github.io/caret/data-splitting.html).
For a better understanding of the learning progression, you may want to have some specific metric or even use multiple evaluation metrics.
```{r watchlist2, message=F, warning=F}
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
```
> `eval_metric` allows us to monitor two new metrics for each round, `logloss` and `error`.
### Linear boosting
Until now, all the learnings we have performed were based on boosting trees. **XGBoost** implements a second algorithm, based on linear boosting. The only difference with previous command is `booster = "gblinear"` parameter (and removing `eta` parameter).
```{r linearBoosting, message=F, warning=F}
bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
```
In this specific case, *linear boosting* gets slightly better performance metrics than decision trees based algorithm.
In simple cases, it will happen because there is nothing better than a linear algorithm to catch a linear link. However, decision trees are much better to catch a non linear link between predictors and outcome. Because there is no silver bullet, we advise you to check both algorithms with your own datasets to have an idea of what to use.
### Manipulating xgb.DMatrix
#### Save / Load
Like saving models, `xgb.DMatrix` object (which groups both dataset and outcome) can also be saved using `xgb.DMatrix.save` function.
```{r DMatrixSave, message=F, warning=F}
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
```
```{r DMatrixDel, include=FALSE}
file.remove("dtrain.buffer")
```
#### Information extraction
Information can be extracted from `xgb.DMatrix` using `getinfo` function. Hereafter we will extract `label` data.
```{r getinfo, message=F, warning=F}
label = getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
print(paste("test-error=", err))
```
### View feature importance/influence from the learnt model
Feature importance is similar to R gbm package's relative influence (rel.inf).
```
importance_matrix <- xgb.importance(model = bst)
print(importance_matrix)
xgb.plot.importance(importance_matrix = importance_matrix)
```
#### View the trees from a model
You can dump the tree you learned using `xgb.dump` into a text file.
```{r dump, message=T, warning=F}
xgb.dump(bst, with_stats = TRUE)
```
You can plot the trees from your model using ```xgb.plot.tree``
```
xgb.plot.tree(model = bst)
```
> if you provide a path to `fname` parameter you can save the trees to your hard drive.
#### Save and load models
Maybe your dataset is big, and it takes time to train a model on it? May be you are not a big fan of losing time in redoing the same task again and again? In these very rare cases, you will want to save your model and load it when required.
Hopefully for you, **XGBoost** implements such functions.
```{r saveModel, message=F, warning=F}
# save model to binary local file
xgb.save(bst, "xgboost.model")
```
> `xgb.save` function should return `r TRUE` if everything goes well and crashes otherwise.
An interesting test to see how identical our saved model is to the original one would be to compare the two predictions.
```{r loadModel, message=F, warning=F}
# load binary model to R
bst2 <- xgb.load("xgboost.model")
xgb.parameters(bst2) <- list(nthread = 2)
pred2 <- predict(bst2, test$data)
# And now the test
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
```
```{r clean, include=FALSE}
# delete the created model
file.remove("./xgboost.model")
```
> result is `0`? We are good!
In some very specific cases, like when you want to pilot **XGBoost** from `caret` package, you will want to save the model as a *R* binary vector. See below how to do it.
```{r saveLoadRBinVectorModel, message=F, warning=F}
# save model to R's raw vector
rawVec <- xgb.serialize(bst)
# print class
print(class(rawVec))
# load binary model to R
bst3 <- xgb.load(rawVec)
xgb.parameters(bst3) <- list(nthread = 2)
pred3 <- predict(bst3, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred))))
```
> Again `0`? It seems that `XGBoost` works pretty well!
## References
| /scratch/gouwar.j/cran-all/cranData/xgboost/inst/doc/xgboostPresentation.Rmd |
## -----------------------------------------------------------------------------
require(xgboost)
require(jsonlite)
require(float)
options(digits=22)
## -----------------------------------------------------------------------------
dates <- c(20180130, 20180130, 20180130,
20180130, 20180130, 20180130,
20180131, 20180131, 20180131,
20180131, 20180131, 20180131,
20180131, 20180131, 20180131,
20180134, 20180134, 20180134)
labels <- c(1, 1, 1,
1, 1, 1,
0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0)
data <- data.frame(dates = dates, labels=labels)
bst <- xgboost(
data = as.matrix(data$dates),
label = labels,
nthread = 2,
nrounds = 1,
objective = "binary:logistic",
missing = NA,
max_depth = 1
)
## -----------------------------------------------------------------------------
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format='json')
bst_from_json <- fromJSON(bst_json, simplifyDataFrame = FALSE)
node <- bst_from_json[[1]]
cat(bst_json)
## -----------------------------------------------------------------------------
bst_preds_logodds <- predict(bst,as.matrix(data$dates), outputmargin = TRUE)
# calculate the logodds values using the JSON representation
bst_from_json_logodds <- ifelse(data$dates<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
bst_preds_logodds
bst_from_json_logodds
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
## -----------------------------------------------------------------------------
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
## -----------------------------------------------------------------------------
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates)<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
# test that values are equal
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
## -----------------------------------------------------------------------------
fl(20180131)
## -----------------------------------------------------------------------------
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
## -----------------------------------------------------------------------------
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(node$children[[1]]$leaf)),
as.numeric(fl(node$children[[2]]$leaf)))
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
## -----------------------------------------------------------------------------
bst_preds <- predict(bst,as.matrix(data$dates))
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(1/(1+exp(-1*fl(node$children[[1]]$leaf)))),
as.numeric(1/(1+exp(-1*fl(node$children[[2]]$leaf))))
)
# test that values are equal
bst_preds == bst_from_json_preds
## -----------------------------------------------------------------------------
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[1]]$leaf)))),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[2]]$leaf))))
)
# test that values are equal
bst_preds == bst_from_json_preds
| /scratch/gouwar.j/cran-all/cranData/xgboost/inst/doc/xgboostfromJSON.R |
---
title: "XGBoost from JSON"
output:
rmarkdown::html_vignette:
number_sections: yes
toc: yes
author: Roland Stevenson
vignette: >
%\VignetteIndexEntry{XGBoost from JSON}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
XGBoost from JSON
=================
## Introduction
The purpose of this Vignette is to show you how to correctly load and work with an **XGBoost** model that has been dumped to JSON. **XGBoost** internally converts all data to [32-bit floats](https://en.wikipedia.org/wiki/Single-precision_floating-point_format), and the values dumped to JSON are decimal representations of these values. When working with a model that has been parsed from a JSON file, care must be taken to correctly treat:
- the input data, which should be converted to 32-bit floats
- any 32-bit floats that were stored in JSON as decimal representations
- any calculations must be done with 32-bit mathematical operators
## Setup
For the purpose of this tutorial we will load the xgboost, jsonlite, and float packages. We'll also set `digits=22` in our options in case we want to inspect many digits of our results.
```{r}
require(xgboost)
require(jsonlite)
require(float)
options(digits=22)
```
We will create a toy binary logistic model based on the example first provided [here](https://github.com/dmlc/xgboost/issues/3960), so that we can easily understand the structure of the dumped JSON model object. This will allow us to understand where discrepancies can occur and how they should be handled.
```{r}
dates <- c(20180130, 20180130, 20180130,
20180130, 20180130, 20180130,
20180131, 20180131, 20180131,
20180131, 20180131, 20180131,
20180131, 20180131, 20180131,
20180134, 20180134, 20180134)
labels <- c(1, 1, 1,
1, 1, 1,
0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0)
data <- data.frame(dates = dates, labels=labels)
bst <- xgboost(
data = as.matrix(data$dates),
label = labels,
nthread = 2,
nrounds = 1,
objective = "binary:logistic",
missing = NA,
max_depth = 1
)
```
## Comparing results
We will now dump the model to JSON and attempt to illustrate a variety of issues that can arise, and how to properly deal with them.
First let's dump the model to JSON:
```{r}
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format='json')
bst_from_json <- fromJSON(bst_json, simplifyDataFrame = FALSE)
node <- bst_from_json[[1]]
cat(bst_json)
```
The tree JSON shown by the above code-chunk tells us that if the data is less than 20180132, the tree will output the value in the first leaf. Otherwise it will output the value in the second leaf. Let's try to reproduce this manually with the data we have and confirm that it matches the model predictions we've already calculated.
```{r}
bst_preds_logodds <- predict(bst,as.matrix(data$dates), outputmargin = TRUE)
# calculate the logodds values using the JSON representation
bst_from_json_logodds <- ifelse(data$dates<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
bst_preds_logodds
bst_from_json_logodds
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
```
None are equal. What happened?
At this stage two things happened:
- input data was not converted to 32-bit floats
- the JSON variables were not converted to 32-bit floats
### Lesson 1: All data is 32-bit floats
> When working with imported JSON, all data must be converted to 32-bit floats
To explain this, let's repeat the comparison and round to two decimals:
```{r}
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
```
If we round to two decimals, we see that only the elements related to data values of `20180131` don't agree. If we convert the data to floats, they agree:
```{r}
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates)<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
# test that values are equal
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
```
What's the lesson? If we are going to work with an imported JSON model, any data must be converted to floats first. In this case, since '20180131' cannot be represented as a 32-bit float, it is rounded up to 20180132, as shown here:
```{r}
fl(20180131)
```
### Lesson 2: JSON parameters are 32-bit floats
> All JSON parameters stored as floats must be converted to floats.
Let's now say we do care about numbers past the first two decimals.
```{r}
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
```
None are exactly equal. What happened? Although we've converted the data to 32-bit floats, we also need to convert the JSON parameters to 32-bit floats. Let's do this:
```{r}
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(node$children[[1]]$leaf)),
as.numeric(fl(node$children[[2]]$leaf)))
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
```
All equal. What's the lesson? If we are going to work with an imported JSON model, any JSON parameters that were stored as floats must also be converted to floats first.
### Lesson 3: Use 32-bit math
> Always use 32-bit numbers and operators
We were able to get the log-odds to agree, so now let's manually calculate the sigmoid of the log-odds. This should agree with the xgboost predictions.
```{r}
bst_preds <- predict(bst,as.matrix(data$dates))
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(1/(1+exp(-1*fl(node$children[[1]]$leaf)))),
as.numeric(1/(1+exp(-1*fl(node$children[[2]]$leaf))))
)
# test that values are equal
bst_preds == bst_from_json_preds
```
None are exactly equal again. What is going on here? Well, since we are using the value `1` in the calculations, we have introduced a double into the calculation. Because of this, all float values are promoted to 64-bit doubles and the 64-bit version of the exponentiation operator `exp` is also used. On the other hand, xgboost uses the 32-bit version of the exponentiation operator in its [sigmoid function](https://github.com/dmlc/xgboost/blob/54980b8959680a0da06a3fc0ec776e47c8cbb0a1/src/common/math.h#L25-L27).
How do we fix this? We have to ensure we use the correct data types everywhere and the correct operators. If we use only floats, the float library that we have loaded will ensure the 32-bit float exponentiation operator is applied.
```{r}
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[1]]$leaf)))),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[2]]$leaf))))
)
# test that values are equal
bst_preds == bst_from_json_preds
```
All equal. What's the lesson? We have to ensure that all calculations are done with 32-bit floating point operators if we want to reproduce the results that we see with xgboost.
| /scratch/gouwar.j/cran-all/cranData/xgboost/inst/doc/xgboostfromJSON.Rmd |
# [description]
# Create a definition file (.def) from a .dll file, using objdump. This
# is used by FindLibR.cmake when building the R package with MSVC.
#
# [usage]
#
# Rscript make-r-def.R something.dll something.def
#
# [references]
# * https://www.cs.colorado.edu/~main/cs1300/doc/mingwfaq.html
args <- commandArgs(trailingOnly = TRUE)
IN_DLL_FILE <- args[[1L]]
OUT_DEF_FILE <- args[[2L]]
DLL_BASE_NAME <- basename(IN_DLL_FILE)
message(sprintf("Creating '%s' from '%s'", OUT_DEF_FILE, IN_DLL_FILE))
# system() will not raise an R exception if the process called
# fails. Wrapping it here to get that behavior.
#
# system() introduces a lot of overhead, at least on Windows,
# so trying processx if it is available
.pipe_shell_command_to_stdout <- function(command, args, out_file) {
has_processx <- suppressMessages({
suppressWarnings({
require("processx") # nolint
})
})
if (has_processx) {
p <- processx::process$new(
command = command
, args = args
, stdout = out_file
, windows_verbatim_args = FALSE
)
invisible(p$wait())
} else {
message(paste0(
"Using system2() to run shell commands. Installing "
, "'processx' with install.packages('processx') might "
, "make this faster."
))
exit_code <- system2(
command = command
, args = shQuote(args)
, stdout = out_file
)
if (exit_code != 0L) {
stop(paste0("Command failed with exit code: ", exit_code))
}
}
return(invisible(NULL))
}
# use objdump to dump all the symbols
OBJDUMP_FILE <- "objdump-out.txt"
.pipe_shell_command_to_stdout(
command = "objdump"
, args = c("-p", IN_DLL_FILE)
, out_file = OBJDUMP_FILE
)
objdump_results <- readLines(OBJDUMP_FILE)
result <- file.remove(OBJDUMP_FILE)
# Only one table in the objdump results matters for our purposes,
# see https://www.cs.colorado.edu/~main/cs1300/doc/mingwfaq.html
start_index <- which(
grepl(
pattern = "[Ordinal/Name Pointer] Table"
, x = objdump_results
, fixed = TRUE
)
)
empty_lines <- which(objdump_results == "")
end_of_table <- empty_lines[empty_lines > start_index][1L]
# Read the contents of the table
exported_symbols <- objdump_results[(start_index + 1L):end_of_table]
exported_symbols <- gsub("\t", "", exported_symbols)
exported_symbols <- gsub(".*\\] ", "", exported_symbols)
exported_symbols <- gsub(" ", "", exported_symbols)
# Write R.def file
writeLines(
text = c(
paste0("LIBRARY \"", DLL_BASE_NAME, "\"")
, "EXPORTS"
, exported_symbols
)
, con = OUT_DEF_FILE
, sep = "\n"
)
message(sprintf("Successfully created '%s'", OUT_DEF_FILE))
| /scratch/gouwar.j/cran-all/cranData/xgboost/inst/make-r-def.R |
---
title: "Understand your dataset with XGBoost"
output:
rmarkdown::html_vignette:
css: vignette.css
number_sections: yes
toc: yes
author: Tianqi Chen, Tong He, Michaël Benesty, Yuan Tang
vignette: >
%\VignetteIndexEntry{Discover your data}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
Understand your dataset with XGBoost
====================================
Introduction
------------
The purpose of this vignette is to show you how to use **XGBoost** to discover and understand your own dataset better.
This vignette is not about predicting anything (see [XGBoost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)). We will explain how to use **XGBoost** to highlight the *link* between the *features* of your data and the *outcome*.
Package loading:
```{r libLoading, results='hold', message=F, warning=F}
require(xgboost)
require(Matrix)
require(data.table)
if (!require('vcd')) {
install.packages('vcd')
}
data.table::setDTthreads(2)
```
> **VCD** package is used for one of its embedded dataset only.
Preparation of the dataset
--------------------------
### Numeric v.s. categorical variables
**XGBoost** manages only `numeric` vectors.
What to do when you have *categorical* data?
A *categorical* variable has a fixed number of different values. For instance, if a variable called *Colour* can have only one of these three values, *red*, *blue* or *green*, then *Colour* is a *categorical* variable.
> In **R**, a *categorical* variable is called `factor`.
>
> Type `?factor` in the console for more information.
To answer the question above we will convert *categorical* variables to `numeric` one.
### Conversion from categorical to numeric variables
#### Looking at the raw data
In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = few zeroes in the matrix) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero in the matrix) of `numeric` features.
The method we are going to see is usually called [one-hot encoding](https://en.wikipedia.org/wiki/One-hot).
The first step is to load `Arthritis` dataset in memory and wrap it with `data.table` package.
```{r, results='hide'}
data(Arthritis)
df <- data.table(Arthritis, keep.rownames = FALSE)
```
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost** **R** package use `data.table`.
The first thing we want to do is to have a look to the first few lines of the `data.table`:
```{r}
head(df)
```
Now we will check the format of each column.
```{r}
str(df)
```
2 columns have `factor` type, one has `ordinal` type.
> `ordinal` variable :
>
> * can take a limited number of values (like `factor`) ;
> * these values are ordered (unlike `factor`). Here these ordered values are: `Marked > Some > None`
#### Creation of new features based on old ones
We will add some new *categorical* features to see if it helps.
##### Grouping per 10 years
For the first feature we create groups of age by rounding the real age.
Note that we transform it to `factor` so the algorithm treat these age groups as independent values.
Therefore, 20 is not closer to 30 than 60. To make it short, the distance between ages is lost in this transformation.
```{r}
head(df[,AgeDiscret := as.factor(round(Age/10,0))])
```
##### Random split into two groups
Following is an even stronger simplification of the real age with an arbitrary split at 30 years old. We choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
```{r}
head(df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))])
```
##### Risks in adding correlated features
These new features are highly correlated to the `Age` feature because they are simple transformations of this feature.
For many machine learning algorithms, using correlated features is not a good idea. It may sometimes make prediction less accurate, and most of the time make interpretation of the model almost impossible. GLM, for instance, assumes that the features are uncorrelated.
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we have nothing to do to manage this situation.
##### Cleaning data
We remove ID as there is nothing to learn from this feature (it would just add some noise).
```{r, results='hide'}
df[,ID:=NULL]
```
We will list the different values for the column `Treatment`:
```{r}
levels(df[,Treatment])
```
#### Encoding categorical features
Next step, we will transform the categorical data to dummy variables.
Several encoding methods exist, e.g., [one-hot encoding](https://en.wikipedia.org/wiki/One-hot) is a common approach.
We will use the [dummy contrast coding](https://stats.oarc.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/) which is popular because it produces "full rank" encoding (also see [this blog post by Max Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
For example, the column `Treatment` will be replaced by two columns, `TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have after the transformation the value `1` in the new column `TreatmentPlacebo` and the value `0` in the new column `TreatmentTreated`. The column `TreatmentPlacebo` will disappear during the contrast encoding, as it would be absorbed into a common constant intercept column.
Column `Improved` is excluded because it will be our `label` column, the one we want to predict.
```{r, warning=FALSE,message=FALSE}
sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[,-1]
head(sparse_matrix)
```
> Formula `Improved ~ .` used above means transform all *categorical* features but column `Improved` to binary values. The `-1` column selection removes the intercept column which is full of `1` (this column is generated by the conversion). For more information, you can type `?sparse.model.matrix` in the console.
Create the output `numeric` vector (not as a sparse `Matrix`):
```{r}
output_vector = df[,Improved] == "Marked"
```
1. set `Y` vector to `0`;
2. set `Y` to `1` for rows where `Improved == Marked` is `TRUE` ;
3. return `Y` vector.
Build the model
---------------
The code below is very usual. For more information, you can look at the documentation of `xgboost` function (or at the vignette [XGBoost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)).
```{r}
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4,
eta = 1, nthread = 2, nrounds = 10,objective = "binary:logistic")
```
You can see some `train-error: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains your data. Lower is better.
A small value for training error may be a symptom of [overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the model will not accurately predict the future values.
> Here you can see the numbers decrease until line 7 and then increase.
>
> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nrounds = 4`. I will let things like that because I don't really care for the purpose of this example :-)
Feature importance
------------------
## Measure feature importance
### Build the feature importance data.table
Remember, each binary column corresponds to a single value of one of *categorical* features.
```{r}
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
head(importance)
```
> The column `Gain` provide the information we are looking for.
>
> As you can see, features are classified by `Gain`.
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there was some wrongly classified elements, after adding the split on this feature, there are two new branches, and each of these branch is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
`Cover` measures the relative quantity of observations concerned by a feature.
`Frequency` is a simpler way to measure the `Gain`. It just counts the number of times a feature is used in all generated trees. You should not use it (unless you know why you want to use it).
#### Improvement in the interpretability of feature importance data.table
We can go deeper in the analysis of the model. In the `data.table` above, we have discovered which features counts to predict if the illness will go or not. But we don't yet know the role of these features. For instance, one of the question we may want to answer would be: does receiving a placebo treatment helps to recover from the illness?
One simple solution is to count the co-occurrences of a feature and a class of the classification.
For that purpose we will execute the same function as above but using two more parameters, `data` and `label`.
```{r}
importanceRaw <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst, data = sparse_matrix, label = output_vector)
# Cleaning for better display
importanceClean <- importanceRaw[,`:=`(Cover=NULL, Frequency=NULL)]
head(importanceClean)
```
> In the table above we have removed two not needed columns and select only the first lines.
First thing you notice is the new column `Split`. It is the split applied to the feature on a branch of one of the tree. Each split is present, therefore a feature can appear several times in this table. Here we can see the feature `Age` is used several times with different splits.
How the split is applied to count the co-occurrences? It is always `<`. For instance, in the second line, we measure the number of persons under 61.5 years with the illness gone after the treatment.
The two other new columns are `RealCover` and `RealCover %`. In the first column it measures the number of observations in the dataset where the split is respected and the label marked as `1`. The second column is the percentage of the whole population that `RealCover` represents.
Therefore, according to our findings, getting a placebo doesn't seem to help but being younger than 61 years may help (seems logic).
> You may wonder how to interpret the `< 1.00001` on the first line. Basically, in a sparse `Matrix`, there is no `0`, therefore, looking for one hot-encoded categorical observations validating the rule `< 1.00001` is like just looking for `1` for this feature.
### Plotting the feature importance
All these things are nice, but it would be even better to plot the results.
```{r, fig.width=8, fig.height=5, fig.align='center'}
xgb.plot.importance(importance_matrix = importance)
```
Feature have automatically been divided in 2 clusters: the interesting features... and the others.
> Depending of the dataset and the learning parameters you may have more than two clusters. Default value is to limit them to `10`, but you can increase this limit. Look at the function documentation for more information.
According to the plot above, the most important features in this dataset to predict if the treatment will work are :
* the Age ;
* having received a placebo or not ;
* the sex is third but already included in the not interesting features group ;
* then we see our generated features (AgeDiscret). We can see that their contribution is very low.
### Do these results make sense?
Let's check some **Chi2** between each of these features and the label.
Higher **Chi2** means better correlation.
```{r, warning=FALSE, message=FALSE}
c2 <- chisq.test(df$Age, output_vector)
print(c2)
```
Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
```{r, warning=FALSE, message=FALSE}
c2 <- chisq.test(df$AgeDiscret, output_vector)
print(c2)
```
Our first simplification of Age gives a Pearson correlation is **`r round(c2$statistic, 2)`**.
```{r, warning=FALSE, message=FALSE}
c2 <- chisq.test(df$AgeCat, output_vector)
print(c2)
```
The perfectly random split I did between young and old at 30 years old have a low correlation of **`r round(c2$statistic, 2)`**. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same.
Morality: don't let your *gut* lower the quality of your model.
In *data science* expression, there is the word *science* :-)
Conclusion
----------
As you can see, in general *destroying information by simplifying it won't improve your model*. **Chi2** just demonstrates that.
But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model.
The case studied here is not enough complex to show that. Check [Kaggle website](http://www.kaggle.com/) for some challenging datasets. However it's almost always worse when you add some arbitrary rules.
Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age.
Linear model may not be that smart in this scenario.
Special Note: What about Random Forests™?
-----------------------------------------
As you may know, [Random Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
Both trains several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the tree `N+1` focus its learning on the loss (<=> what has not been well modeled by the tree `N`).
This difference have an impact on a corner case in feature importance analysis: the *correlated features*.
Imagine two features perfectly correlated, feature `A` and feature `B`. For one specific tree, if the algorithm needs one of them, it will choose randomly (true in both boosting and Random Forests).
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximatively, depending of your parameters, 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature have an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
If you want to try Random Forests algorithm, you can tweak XGBoost parameters!
For instance, to compute a model with 1000 trees, with a 0.5 factor on sampling rows and columns:
```{r, warning=FALSE, message=FALSE}
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
#Random Forest - 1000 trees
bst <- xgboost(
data = train$data,
label = train$label,
max_depth = 4,
num_parallel_tree = 1000,
subsample = 0.5,
colsample_bytree = 0.5,
nrounds = 1,
objective = "binary:logistic",
nthread = 2
)
#Boosting - 3 rounds
bst <- xgboost(
data = train$data,
label = train$label,
max_depth = 4,
nrounds = 3,
objective = "binary:logistic",
nthread = 2
)
```
> Note that the parameter `round` is set to `1`.
> [**Random Forests**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm) is a trademark of Leo Breiman and Adele Cutler and is licensed exclusively to Salford Systems for the commercial release of the software.
| /scratch/gouwar.j/cran-all/cranData/xgboost/vignettes/discoverYourData.Rmd |
---
title: "XGBoost presentation"
output:
rmarkdown::html_vignette:
css: vignette.css
number_sections: yes
toc: yes
bibliography: xgboost.bib
author: Tianqi Chen, Tong He, Michaël Benesty
vignette: >
%\VignetteIndexEntry{XGBoost presentation}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
XGBoost R Tutorial
==================
## Introduction
**XGBoost** is short for e**X**treme **G**radient **Boost**ing package.
The purpose of this Vignette is to show you how to use **XGBoost** to build a model and make predictions.
It is an efficient and scalable implementation of gradient boosting framework by @friedman2000additive and @friedman2001greedy. Two solvers are included:
- *linear* model ;
- *tree learning* algorithm.
It supports various objective functions, including *regression*, *classification* and *ranking*. The package is made to be extendible, so that users are also allowed to define their own objective functions easily.
It has been [used](https://github.com/dmlc/xgboost) to win several [Kaggle](http://www.kaggle.com) competitions.
It has several features:
* Speed: it can automatically do parallel computation on *Windows* and *Linux*, with *OpenMP*. It is generally over 10 times faster than the classical `gbm`.
* Input Type: it takes several types of input data:
* *Dense* Matrix: *R*'s *dense* matrix, i.e. `matrix` ;
* *Sparse* Matrix: *R*'s *sparse* matrix, i.e. `Matrix::dgCMatrix` ;
* Data File: local data files ;
* `xgb.DMatrix`: its own class (recommended).
* Sparsity: it accepts *sparse* input for both *tree booster* and *linear booster*, and is optimized for *sparse* input ;
* Customization: it supports customized objective functions and evaluation functions.
## Installation
### GitHub version
For weekly updated version (highly recommended), install from *GitHub*:
```{r installGithub, eval=FALSE}
install.packages("drat", repos="https://cran.rstudio.com")
drat:::addRepo("dmlc")
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
```
> *Windows* user will need to install [Rtools](https://cran.r-project.org/bin/windows/Rtools/) first.
### CRAN version
The version 0.4-2 is on CRAN, and you can install it by:
```{r, eval=FALSE}
install.packages("xgboost")
```
Formerly available versions can be obtained from the CRAN [archive](https://cran.r-project.org/src/contrib/Archive/xgboost/)
## Learning
For the purpose of this tutorial we will load **XGBoost** package.
```{r libLoading, results='hold', message=F, warning=F}
require(xgboost)
```
### Dataset presentation
In this example, we are aiming to predict whether a mushroom can be eaten or not (like in many tutorials, example data are the same as you will use on in your every day life :-).
Mushroom data is cited from UCI Machine Learning Repository. @Bache+Lichman:2013.
### Dataset loading
We will load the `agaricus` datasets embedded with the package and will link them to variables.
The datasets are already split in:
* `train`: will be used to build the model ;
* `test`: will be used to assess the quality of our model.
Why *split* the dataset in two parts?
In the first part we will build our model. In the second part we will want to test it and assess its quality. Without dividing the dataset we would test the model on the data which the algorithm have already seen.
```{r datasetLoading, results='hold', message=F, warning=F}
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
```
> In the real world, it would be up to you to make this division between `train` and `test` data. The way to do it is out of the purpose of this article, however `caret` package may [help](http://topepo.github.io/caret/data-splitting.html).
Each variable is a `list` containing two things, `label` and `data`:
```{r dataList, message=F, warning=F}
str(train)
```
`label` is the outcome of our dataset meaning it is the binary *classification* we will try to predict.
Let's discover the dimensionality of our datasets.
```{r dataSize, message=F, warning=F}
dim(train$data)
dim(test$data)
```
This dataset is very small to not make the **R** package too heavy, however **XGBoost** is built to manage huge dataset very efficiently.
As seen below, the `data` are stored in a `dgCMatrix` which is a *sparse* matrix and `label` vector is a `numeric` vector (`{0,1}`):
```{r dataClass, message=F, warning=F}
class(train$data)[1]
class(train$label)
```
### Basic Training using XGBoost
This step is the most critical part of the process for the quality of our model.
#### Basic training
We are using the `train` data. As explained above, both `data` and `label` are stored in a `list`.
In a *sparse* matrix, cells containing `0` are not stored in memory. Therefore, in a dataset mainly made of `0`, memory size is reduced. It is very usual to have such dataset.
We will train decision tree model using the following parameters:
* `objective = "binary:logistic"`: we will train a binary classification model ;
* `max_depth = 2`: the trees won't be deep, because our case is very simple ;
* `nthread = 2`: the number of CPU threads we are going to use;
* `nrounds = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction.
```{r trainingSparse, message=F, warning=F}
bstSparse <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
```
> More complex the relationship between your features and your `label` is, more passes you need.
#### Parameter variations
##### Dense matrix
Alternatively, you can put your dataset in a *dense* matrix, i.e. a basic **R** matrix.
```{r trainingDense, message=F, warning=F}
bstDense <- xgboost(
data = as.matrix(train$data),
label = train$label,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
```
##### xgb.DMatrix
**XGBoost** offers a way to group them in a `xgb.DMatrix`. You can even add other meta data in it. It will be useful for the most advanced features we will discover later.
```{r trainingDmatrix, message=F, warning=F}
dtrain <- xgb.DMatrix(data = train$data, label = train$label, nthread = 2)
bstDMatrix <- xgboost(
data = dtrain,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
```
##### Verbose option
**XGBoost** has several features to help you to view how the learning progress internally. The purpose is to help you to set the best parameters, which is the key of your model quality.
One of the simplest way to see the training progress is to set the `verbose` option (see below for more advanced techniques).
```{r trainingVerbose0, message=T, warning=F}
# verbose = 0, no message
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0)
```
```{r trainingVerbose1, message=T, warning=F}
# verbose = 1, print evaluation metric
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 1)
```
```{r trainingVerbose2, message=T, warning=F}
# verbose = 2, also print information about tree
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 2)
```
## Basic prediction using XGBoost
## Perform the prediction
The purpose of the model we have built is to classify new data. As explained before, we will use the `test` dataset for this step.
```{r predicting, message=F, warning=F}
pred <- predict(bst, test$data)
# size of the prediction vector
print(length(pred))
# limit display of predictions to the first 10
print(head(pred))
```
These numbers doesn't look like *binary classification* `{0,1}`. We need to perform a simple transformation before being able to use these results.
## Transform the regression in a binary classification
The only thing that **XGBoost** does is a *regression*. **XGBoost** is using `label` vector to build its *regression* model.
How can we use a *regression* model to perform a binary classification?
If we think about the meaning of a regression applied to our data, the numbers we get are probabilities that a datum will be classified as `1`. Therefore, we will set the rule that if this probability for a specific datum is `> 0.5` then the observation is classified as `1` (or `0` otherwise).
```{r predictingTest, message=F, warning=F}
prediction <- as.numeric(pred > 0.5)
print(head(prediction))
```
## Measuring model performance
To measure the model performance, we will compute a simple metric, the *average error*.
```{r predictingAverageError, message=F, warning=F}
err <- mean(as.numeric(pred > 0.5) != test$label)
print(paste("test-error=", err))
```
> Note that the algorithm has not seen the `test` data during the model construction.
Steps explanation:
1. `as.numeric(pred > 0.5)` applies our rule that when the probability (<=> regression <=> prediction) is `> 0.5` the observation is classified as `1` and `0` otherwise ;
2. `probabilityVectorPreviouslyComputed != test$label` computes the vector of error between true data and computed probabilities ;
3. `mean(vectorOfErrors)` computes the *average error* itself.
The most important thing to remember is that **to do a classification, you just do a regression to the** `label` **and then apply a threshold**.
*Multiclass* classification works in a similar way.
This metric is **`r round(err, 2)`** and is pretty low: our yummy mushroom model works well!
## Advanced features
Most of the features below have been implemented to help you to improve your model by offering a better understanding of its content.
### Dataset preparation
For the following advanced features, we need to put data in `xgb.DMatrix` as explained above.
```{r DMatrix, message=F, warning=F}
dtrain <- xgb.DMatrix(data = train$data, label = train$label, nthread = 2)
dtest <- xgb.DMatrix(data = test$data, label = test$label, nthread = 2)
```
### Measure learning progress with xgb.train
Both `xgboost` (simple) and `xgb.train` (advanced) functions train models.
One of the special feature of `xgb.train` is the capacity to follow the progress of the learning after each round. Because of the way boosting works, there is a time when having too many rounds lead to an overfitting. You can see this feature as a cousin of cross-validation method. The following techniques will help you to avoid overfitting or optimizing the learning time in stopping it as soon as possible.
One way to measure progress in learning of a model is to provide to **XGBoost** a second dataset already classified. Therefore it can learn on the first dataset and test its model on the second one. Some metrics are measured after each round during the learning.
> in some way it is similar to what we have done above with the average error. The main difference is that below it was after building the model, and now it is during the construction that we measure errors.
For the purpose of this example, we use `watchlist` parameter. It is a list of `xgb.DMatrix`, each of them tagged with a name.
```{r watchlist, message=F, warning=F}
watchlist <- list(train=dtrain, test=dtest)
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
```
**XGBoost** has computed at each round the same average error metric than seen above (we set `nrounds` to 2, that is why we have two lines). Obviously, the `train-error` number is related to the training dataset (the one the algorithm learns from) and the `test-error` number to the test dataset.
Both training and test error related metrics are very similar, and in some way, it makes sense: what we have learned from the training dataset matches the observations from the test dataset.
If with your own dataset you have not such results, you should think about how you divided your dataset in training and test. May be there is something to fix. Again, `caret` package may [help](http://topepo.github.io/caret/data-splitting.html).
For a better understanding of the learning progression, you may want to have some specific metric or even use multiple evaluation metrics.
```{r watchlist2, message=F, warning=F}
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
```
> `eval_metric` allows us to monitor two new metrics for each round, `logloss` and `error`.
### Linear boosting
Until now, all the learnings we have performed were based on boosting trees. **XGBoost** implements a second algorithm, based on linear boosting. The only difference with previous command is `booster = "gblinear"` parameter (and removing `eta` parameter).
```{r linearBoosting, message=F, warning=F}
bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
```
In this specific case, *linear boosting* gets slightly better performance metrics than decision trees based algorithm.
In simple cases, it will happen because there is nothing better than a linear algorithm to catch a linear link. However, decision trees are much better to catch a non linear link between predictors and outcome. Because there is no silver bullet, we advise you to check both algorithms with your own datasets to have an idea of what to use.
### Manipulating xgb.DMatrix
#### Save / Load
Like saving models, `xgb.DMatrix` object (which groups both dataset and outcome) can also be saved using `xgb.DMatrix.save` function.
```{r DMatrixSave, message=F, warning=F}
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
```
```{r DMatrixDel, include=FALSE}
file.remove("dtrain.buffer")
```
#### Information extraction
Information can be extracted from `xgb.DMatrix` using `getinfo` function. Hereafter we will extract `label` data.
```{r getinfo, message=F, warning=F}
label = getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
print(paste("test-error=", err))
```
### View feature importance/influence from the learnt model
Feature importance is similar to R gbm package's relative influence (rel.inf).
```
importance_matrix <- xgb.importance(model = bst)
print(importance_matrix)
xgb.plot.importance(importance_matrix = importance_matrix)
```
#### View the trees from a model
You can dump the tree you learned using `xgb.dump` into a text file.
```{r dump, message=T, warning=F}
xgb.dump(bst, with_stats = TRUE)
```
You can plot the trees from your model using ```xgb.plot.tree``
```
xgb.plot.tree(model = bst)
```
> if you provide a path to `fname` parameter you can save the trees to your hard drive.
#### Save and load models
Maybe your dataset is big, and it takes time to train a model on it? May be you are not a big fan of losing time in redoing the same task again and again? In these very rare cases, you will want to save your model and load it when required.
Hopefully for you, **XGBoost** implements such functions.
```{r saveModel, message=F, warning=F}
# save model to binary local file
xgb.save(bst, "xgboost.model")
```
> `xgb.save` function should return `r TRUE` if everything goes well and crashes otherwise.
An interesting test to see how identical our saved model is to the original one would be to compare the two predictions.
```{r loadModel, message=F, warning=F}
# load binary model to R
bst2 <- xgb.load("xgboost.model")
xgb.parameters(bst2) <- list(nthread = 2)
pred2 <- predict(bst2, test$data)
# And now the test
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
```
```{r clean, include=FALSE}
# delete the created model
file.remove("./xgboost.model")
```
> result is `0`? We are good!
In some very specific cases, like when you want to pilot **XGBoost** from `caret` package, you will want to save the model as a *R* binary vector. See below how to do it.
```{r saveLoadRBinVectorModel, message=F, warning=F}
# save model to R's raw vector
rawVec <- xgb.serialize(bst)
# print class
print(class(rawVec))
# load binary model to R
bst3 <- xgb.load(rawVec)
xgb.parameters(bst3) <- list(nthread = 2)
pred3 <- predict(bst3, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred))))
```
> Again `0`? It seems that `XGBoost` works pretty well!
## References
| /scratch/gouwar.j/cran-all/cranData/xgboost/vignettes/xgboostPresentation.Rmd |
---
title: "XGBoost from JSON"
output:
rmarkdown::html_vignette:
number_sections: yes
toc: yes
author: Roland Stevenson
vignette: >
%\VignetteIndexEntry{XGBoost from JSON}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
XGBoost from JSON
=================
## Introduction
The purpose of this Vignette is to show you how to correctly load and work with an **XGBoost** model that has been dumped to JSON. **XGBoost** internally converts all data to [32-bit floats](https://en.wikipedia.org/wiki/Single-precision_floating-point_format), and the values dumped to JSON are decimal representations of these values. When working with a model that has been parsed from a JSON file, care must be taken to correctly treat:
- the input data, which should be converted to 32-bit floats
- any 32-bit floats that were stored in JSON as decimal representations
- any calculations must be done with 32-bit mathematical operators
## Setup
For the purpose of this tutorial we will load the xgboost, jsonlite, and float packages. We'll also set `digits=22` in our options in case we want to inspect many digits of our results.
```{r}
require(xgboost)
require(jsonlite)
require(float)
options(digits=22)
```
We will create a toy binary logistic model based on the example first provided [here](https://github.com/dmlc/xgboost/issues/3960), so that we can easily understand the structure of the dumped JSON model object. This will allow us to understand where discrepancies can occur and how they should be handled.
```{r}
dates <- c(20180130, 20180130, 20180130,
20180130, 20180130, 20180130,
20180131, 20180131, 20180131,
20180131, 20180131, 20180131,
20180131, 20180131, 20180131,
20180134, 20180134, 20180134)
labels <- c(1, 1, 1,
1, 1, 1,
0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0)
data <- data.frame(dates = dates, labels=labels)
bst <- xgboost(
data = as.matrix(data$dates),
label = labels,
nthread = 2,
nrounds = 1,
objective = "binary:logistic",
missing = NA,
max_depth = 1
)
```
## Comparing results
We will now dump the model to JSON and attempt to illustrate a variety of issues that can arise, and how to properly deal with them.
First let's dump the model to JSON:
```{r}
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format='json')
bst_from_json <- fromJSON(bst_json, simplifyDataFrame = FALSE)
node <- bst_from_json[[1]]
cat(bst_json)
```
The tree JSON shown by the above code-chunk tells us that if the data is less than 20180132, the tree will output the value in the first leaf. Otherwise it will output the value in the second leaf. Let's try to reproduce this manually with the data we have and confirm that it matches the model predictions we've already calculated.
```{r}
bst_preds_logodds <- predict(bst,as.matrix(data$dates), outputmargin = TRUE)
# calculate the logodds values using the JSON representation
bst_from_json_logodds <- ifelse(data$dates<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
bst_preds_logodds
bst_from_json_logodds
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
```
None are equal. What happened?
At this stage two things happened:
- input data was not converted to 32-bit floats
- the JSON variables were not converted to 32-bit floats
### Lesson 1: All data is 32-bit floats
> When working with imported JSON, all data must be converted to 32-bit floats
To explain this, let's repeat the comparison and round to two decimals:
```{r}
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
```
If we round to two decimals, we see that only the elements related to data values of `20180131` don't agree. If we convert the data to floats, they agree:
```{r}
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates)<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
# test that values are equal
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
```
What's the lesson? If we are going to work with an imported JSON model, any data must be converted to floats first. In this case, since '20180131' cannot be represented as a 32-bit float, it is rounded up to 20180132, as shown here:
```{r}
fl(20180131)
```
### Lesson 2: JSON parameters are 32-bit floats
> All JSON parameters stored as floats must be converted to floats.
Let's now say we do care about numbers past the first two decimals.
```{r}
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
```
None are exactly equal. What happened? Although we've converted the data to 32-bit floats, we also need to convert the JSON parameters to 32-bit floats. Let's do this:
```{r}
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(node$children[[1]]$leaf)),
as.numeric(fl(node$children[[2]]$leaf)))
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
```
All equal. What's the lesson? If we are going to work with an imported JSON model, any JSON parameters that were stored as floats must also be converted to floats first.
### Lesson 3: Use 32-bit math
> Always use 32-bit numbers and operators
We were able to get the log-odds to agree, so now let's manually calculate the sigmoid of the log-odds. This should agree with the xgboost predictions.
```{r}
bst_preds <- predict(bst,as.matrix(data$dates))
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(1/(1+exp(-1*fl(node$children[[1]]$leaf)))),
as.numeric(1/(1+exp(-1*fl(node$children[[2]]$leaf))))
)
# test that values are equal
bst_preds == bst_from_json_preds
```
None are exactly equal again. What is going on here? Well, since we are using the value `1` in the calculations, we have introduced a double into the calculation. Because of this, all float values are promoted to 64-bit doubles and the 64-bit version of the exponentiation operator `exp` is also used. On the other hand, xgboost uses the 32-bit version of the exponentiation operator in its [sigmoid function](https://github.com/dmlc/xgboost/blob/54980b8959680a0da06a3fc0ec776e47c8cbb0a1/src/common/math.h#L25-L27).
How do we fix this? We have to ensure we use the correct data types everywhere and the correct operators. If we use only floats, the float library that we have loaded will ensure the 32-bit float exponentiation operator is applied.
```{r}
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[1]]$leaf)))),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[2]]$leaf))))
)
# test that values are equal
bst_preds == bst_from_json_preds
```
All equal. What's the lesson? We have to ensure that all calculations are done with 32-bit floating point operators if we want to reproduce the results that we see with xgboost.
| /scratch/gouwar.j/cran-all/cranData/xgboost/vignettes/xgboostfromJSON.Rmd |
utils::globalVariables(c("left")) # resolves note on 'no visible binding for global variable 'left'' in group_by() in ln.123.
#' @importFrom gbm gbm
#' @importFrom gbm pretty.gbm.tree
#' @importFrom dplyr group_by
#' @importFrom dplyr summarise
#' @importFrom stats cor
#' @importFrom stats predict
#' @importFrom rpart rpart
#' @importFrom rpart rpart.control
#' @importFrom rpart.plot rpart.plot
#'
#' @title Explanation groves
#'
#' @description Compute surrogate groves to explain predictive machine learning model and analyze complexity vs. explanatory power.
#'
#' @details A surrogate grove is trained via gradient boosting using \code{\link[gbm]{gbm}} on \code{data} with the predictions of using of the \code{model} as target variable.
#' Note that \code{data} must not contain the original target variable! The boosting model is trained using stumps of depth 1.
#' The resulting interpretation is extracted from \code{\link[gbm]{pretty.gbm.tree}}.
#'
#' @param model A model with corresponding predict function that returns numeric values.
#' @param data Data that must not (!) contain the target variable.
#' @param ntrees Sequence of integers: number of boosting trees for rule extraction.
#' @param pfun Optional predict function \code{function(model, data)} returning a real number. Default is the \code{predict()} method of the \code{model}.
#' @param shrink Sets the \code{shrinkage} argument for the internal call of \code{\link[gbm]{gbm}}. As the \code{model} usually has a deterministic response
#' the default is 1 different to the default of \code{\link[gbm]{gbm}} applied train a model based on data.
#' @param b.frac Sets the \code{bag.fraction} argument for the internal call of \code{\link[gbm]{gbm}}. As the \code{model} usually has a deterministic response
#' the default is 1 different to the default of \code{\link[gbm]{gbm}} applied train a model based on data.
#' @param seed Seed for the random number generator to ensure reproducible results (e.g. for the default \code{bag.fraction} < 1 in boosting).
#' @param ... Further arguments to be passed to \code{gbm} or the \code{predict()} method of the \code{model}.
#'
#' @return List of the results:
#' @return \item{explanation}{Matrix containing tree sizes, rules, explainability \eqn{{\Upsilon}} and the correlation between the predictions of the explanation and the true model.}
#' @return \item{rules}{Summary of the explanation grove: Rules with identical splits are aggegated. For numeric variables any splits are merge if they lead to identical parititions of the training data}
#' @return \item{groves}{Rules of the explanation grove.}
#' @return \item{model}{\code{gbm} model.}
#'
#' @export
#'
#' @examples
#' library(randomForest)
#' library(pdp)
#' data(boston)
#' set.seed(42)
#' rf <- randomForest(cmedv ~ ., data = boston)
#' data <- boston[,-3] # remove target variable
#' ntrees <- c(4,8,16,32,64,128)
#' xg <- xgrove(rf, data, ntrees)
#' xg
#' plot(xg)
#'
#' @author \email{gero.szepannek@@web.de}
#'
#' @references \itemize{
#' \item {Szepannek, G. and von Holt, B.H. (2023): Can’t see the forest for the trees -- analyzing groves to explain random forests,
#' Behaviormetrika, DOI: 10.1007/s41237-023-00205-2}.
#' \item {Szepannek, G. and Luebke, K.(2023): How much do we see? On the explainability of partial dependence plots for credit risk scoring,
#' Argumenta Oeconomica 50, DOI: 10.15611/aoe.2023.1.07}.
#' }
#'
#' @rdname xgrove
xgrove <- function(model, data, ntrees = c(4,8,16,32,64,128), pfun = NULL, shrink = 1, b.frac = 1, seed = 42, ...){
set.seed(seed)
if(is.null(pfun)) {
surrogatetarget <- predict(model, data)
if(!is.numeric(surrogatetarget) | !is.vector(surrogatetarget)) stop("Default predict method does not return a numeric vector. Please specify pfun argument!")
}
if(!is.null(pfun)){
surrogatetarget <- pfun(model = model, data= data)
if(!is.numeric(surrogatetarget) | !is.vector(surrogatetarget)) stop("pfun does not return a numeric vector!")
}
# compute surrogate grove for specified maximal number of trees
data$surrogatetarget <- surrogatetarget
surrogate_grove <- gbm::gbm(surrogatetarget ~., data = data, n.trees = max(ntrees), shrinkage = shrink, bag.fraction = b.frac, ...)
if(surrogate_grove$interaction.depth > 1) stop("gbm interaction.depth is supposed to be 1. Please do not specify it differently within the ... argument.")
# extract groves of different size and compute performance
explanation <- NULL
groves <- list()
interpretation <- list()
for(nt in ntrees){
predictions <- predict(surrogate_grove, data, n.trees = nt, ...)
rules <- NULL
for(tid in 1:nt){
tinf <- gbm::pretty.gbm.tree(surrogate_grove, i.tree = tid)
newrule <- tinf[tinf$SplitVar != -1,]
newrule <- data.frame(newrule, pleft = tinf$Prediction[rownames(tinf) == newrule$LeftNode], pright = tinf$Prediction[rownames(tinf) == newrule$RightNode])
rules <- rbind(rules, newrule)
}
vars <- NULL
splits <- NULL
csplits_left <- NULL
pleft <- NULL
pright <- NULL
for(i in 1:nrow(rules)){
vars <- c(vars, names(data)[rules$SplitVar[i]+1])
if(is.numeric(data[,rules$SplitVar[i]+1])){
splits <- c(splits, rules$SplitCodePred[i])
csplits_left <- c(csplits_left, NA)
}
if(is.factor(data[,rules$SplitVar[i]+1])){
levs <- levels(data[,(rules$SplitVar[i]+1)])
lids <- surrogate_grove$c.splits[[(rules$SplitCodePred[i] +1)]] == -1
if(sum(lids) == 1) levs <- levs[lids]
if(sum(lids) > 1) levs <- paste(levs[lids], sep = "|")
csl <- levs[1]
if(length(levs) > 1){for(j in 2:length(levs)) csl <- paste(csl, levs[j], sep = " | ")}
splits <- c(splits, "")
csplits_left <- c(csplits_left, csl)
}
pleft <- c(pleft, rules$pleft[i])
pright <- c(pright, rules$pright[i])
}
basepred <- surrogate_grove$initF
df <- data.frame(vars, splits, left = csplits_left, pleft = round(pleft, 4), pright = round(pright,4))
df <- dplyr::group_by(df, vars, splits, left)
df_small <- as.data.frame(dplyr::summarise(df, pleft = sum(pleft), pright = sum(pright)))
df <- as.data.frame(df)
# merge rules for numeric variables
if(nrow(df_small) > 1){
i <- 2
while (i != 0){
drop.rule <- FALSE
if(is.numeric(data[,df_small$vars[i]])){
for(j in 1:(i-1)){
if(df_small$vars[i] == df_small$vars[j]) {
v1 <- data[,df_small$vars[i]] <= df_small$splits[i]
v2 <- data[,df_small$vars[j]] <= df_small$splits[j]
tab <- table(v1, v2)
if(sum(diag(tab)) == sum(tab)) {
df_small$pleft[j] <- df_small$pleft[i] + df_small$pleft[j]
df_small$pright[j] <- df_small$pright[i] + df_small$pright[j]
drop.rule <- TRUE
}
}
}
}
if(drop.rule) {df_small <- df_small[-i,]}
if(!drop.rule) {i <- i+1}
if(i > nrow(df_small)) {i <- 0}
}
}
# compute complexity and explainability statistics
trees <- nt
rules <- nrow(df_small) #
ASE <- mean((data$surrogatetarget - predictions)^2)
ASE0 <- mean((data$surrogatetarget - mean(data$surrogatetarget))^2)
upsilon <- 1 - ASE / ASE0
rho <- cor(data$surrogatetarget, predictions)
df0 <- data.frame(vars = "Intercept", splits = NA, left = NA, pleft = basepred, pright = basepred)
df <- rbind(df0, df)
df_small <- rbind(df0, df_small)
groves[[length(groves)+1]] <- df
interpretation[[length(interpretation)+1]] <- df_small
explanation <- rbind(explanation, c(trees, rules, upsilon, rho))
}
names(groves) <- names(interpretation) <- ntrees
colnames(explanation) <- c("trees","rules","upsilon","cor")
res <- list(explanation = explanation, rules = interpretation, groves = groves, model = surrogate_grove)
class(res) <- "xgrove"
return(res)
}
#' @title Plot surrogate grove statistics
#'
#' @description Plot statistics of surrogate groves to analyze complexity vs. explanatory power.
#'
#' @param x An object of class \code{xgrove}.
#' @param abs Name of the measure to be plotted on the x-axis, either \code{"trees"}, \code{"rules"}, \code{"upsilon"} or \code{"cor"}.
#' @param ord Name of the measure to be plotted on the y-axis, either \code{"trees"}, \code{"rules"}, \code{"upsilon"} or \code{"cor"}.
#' @param ... Further arguments passed to \code{plot}.
#'
#' @return No return value.
#'
#' @examples
#' library(randomForest)
#' library(pdp)
#' data(boston)
#' set.seed(42)
#' rf <- randomForest(cmedv ~ ., data = boston)
#' data <- boston[,-3] # remove target variable
#' ntrees <- c(4,8,16,32,64,128)
#' xg <- xgrove(rf, data, ntrees)
#' xg
#' plot(xg)
#'
#' @author \email{gero.szepannek@@web.de}
#'
#' @rdname plot.xgrove
#' @export
plot.xgrove <- function(x, abs = "rules", ord = "upsilon", ...){
i <- which(colnames(x$explanation) == abs)
j <- which(colnames(x$explanation) == ord)
plot(x$explanation[,i], x$explanation[,j], xlab = abs, ylab = ord, type = "b", ...)
}
#' @export
print.xgrove <- function(x, ...) print(x$explanation)
#' @title Explainability
#'
#' @description Compute explainability given predicted data of the model and an explainer.
#'
#' @param porig An object of class \code{xgrove}.
#' @param pexp Name of the measure to be plotted on the x-axis, either \code{"trees"}, \code{"rules"}, \code{"upsilon"} or \code{"cor"}.
#'
#' @return Numeric explainability upsilon.
#'
#' @references \itemize{
#' \item {Szepannek, G. and Luebke, K.(2023): How much do we see? On the explainability of partial dependence plots for credit risk scoring,
#' Argumenta Oeconomica 50, DOI: 10.15611/aoe.2023.1.07}.
#' }
#'
#' @examples
#' library(randomForest)
#' library(pdp)
#' data(boston)
#' set.seed(42)
#' # Compute original model
#' rf <- randomForest(cmedv ~ ., data = boston)
#' data <- boston[,-3] # remove target variable
#' # Compute predictions
#' porig <- predict(rf, data)
#'
#' # Compute surrogate grove
#' xg <- xgrove(rf, data)
#' pexp <- predict(xg$model, data, n.trees = 16)
#' upsilon(porig, pexp)
#'
#' @author \email{gero.szepannek@@web.de}
#'
#' @rdname upsilon
#' @export
upsilon <- function(porig, pexp){
#porig <- predict(model, data)
#pexp <- predict(explanation, data)
ASE <- mean((porig - pexp)^2)
ASE0 <- mean((porig - mean(porig))^2)
ups <- 1 - ASE / ASE0
return(ups)
}
#' @title Surrogate trees
#'
#' @description Compute surrogate trees of different depth to explain predictive machine learning model and analyze complexity vs. explanatory power.
#'
#' @details A surrogate grove is trained via gradient boosting using \code{\link[rpart]{rpart}} on \code{data} with the predictions of using of the \code{model} as target variable.
#' Note that \code{data} must not contain the original target variable!
#'
#' @param model A model with corresponding predict function that returns numeric values.
#' @param data Data that must not (!) contain the target variable.
#' @param maxdeps Sequence of integers: Maximum depth of the trees.
#' @param cparam Complexity parameter for growing the trees.
#' @param pfun Optional predict function \code{function(model, data)} returning a real number. Default is the \code{predict()} method of the \code{model}.
#' @param ... Further arguments to be passed to \code{\link[rpart]{rpart.control}} or the \code{predict()} method of the \code{model}.
#'
#' @return List of the results:
#' @return \item{explanation}{Matrix containing tree sizes, rules, explainability \eqn{{\Upsilon}} and the correlation between the predictions of the explanation and the true model.}
#' @return \item{rules}{List of rules for each tree.}
#' @return \item{model}{List of the \code{rpart} models.}
#'
#' @export
#'
#' @examples
#' library(randomForest)
#' library(pdp)
#' data(boston)
#' set.seed(42)
#' rf <- randomForest(cmedv ~ ., data = boston)
#' data <- boston[,-3] # remove target variable
#' maxds <- 1:7
#' st <- sgtree(rf, data, maxds)
#' st
#' # rules for tree of depth 3
#' st$rules[["3"]]
#' # plot tree of depth 3
#' rpart.plot::rpart.plot(st$model[["3"]])
#'
#' @author \email{gero.szepannek@@web.de}
#'
#' @references \itemize{
#' \item {Szepannek, G. and Laabs, B.H. (2023): Can’t see the forest for the trees -- analyzing groves to explain random forests,
#' Behaviormetrika, submitted}.
#' \item {Szepannek, G. and Luebke, K.(2023): How much do we see? On the explainability of partial dependence plots for credit risk scoring,
#' Argumenta Oeconomica 50, DOI: 10.15611/aoe.2023.1.07}.
#' }
#'
#' @rdname sgtree
sgtree <- function(model, data, maxdeps = 1:8, cparam = 0, pfun = NULL, ...){ #seed = 42
#browser()
#set.seed(seed)
if(is.null(pfun)) {
surrogatetarget <- predict(model, data)
if(!is.numeric(surrogatetarget) | !is.vector(surrogatetarget)) stop("Default predict method does not return a numeric vector. Please specify pfun argument!")
}
if(!is.null(pfun)){
surrogatetarget <- pfun(model = model, data= data)
if(!is.numeric(surrogatetarget) | !is.vector(surrogatetarget)) stop("pfun does not return a numeric vector!")
}
# compute surrogate grove for specified maximal number of trees
data$surrogatetarget <- surrogatetarget
surrogate_trees <- list()
rules <- list()
explanation <- NULL # #rules etc...
for(md in maxdeps){
surrogate_trees[[as.character(md)]] <- rpart::rpart(surrogatetarget ~ ., data = data,
control = rpart::rpart.control(cp = cparam, minsplit = 1, minbucket = 1, maxcompete = 0, maxsurrogate = 0, maxdepth = md)) # ...
if(is.null(surrogate_trees[[as.character(md)]]$splits)){
rules[[as.character(md)]] <- NULL
explanation <- rbind(explanation, c(1, 0, 0, 0))
}
if(!is.null(surrogate_trees[[as.character(md)]]$splits)){
frame <- surrogate_trees[[as.character(md)]]$frame
splits <- surrogate_trees[[as.character(md)]]$splits
csplit <- surrogate_trees[[as.character(md)]]$csplit
frame <- frame[,c(1:3)] #[,c(1:3,5)]
splits <- splits[,c(1,2,4)]
newrules <- frame[frame$var != "<leaf>",]
if(!is.matrix(splits)) {splits <- as.data.frame(t(splits))} # if only one split
newrules <- cbind(newrules, splits)
newrules$left <- ifelse(newrules$ncat < 0, "<" , ">")
newrules <- newrules[,c(1,7,6,5,2,3)]
# For a factor, the index column contains the row number of the csplit matrix
for(j in 1:nrow(newrules)){
vn <- newrules$var[j]
#if(is.factor(data[[vn]])){
if(newrules$ncat[j] > 1){
ind <- newrules$index[j]
sel <- which(csplit[ind,] == 1)
newrules$index[j] <- paste(levels(data[[vn]])[sel], collapse = " + ")
newrules$left[j] <- "among"
# The columns record 1 if that level of the factor goes to the left, 3 if it goes to the right,
# and 2 if that level is not present at this node of the tree (or not defined for the factor).
}
}
rules[[as.character(md)]] <- newrules
# compute complexity and explainability statistics
trees <- 1
nrules <- nrow(newrules)
predictions <- predict(surrogate_trees[[as.character(md)]], data)
ASE <- mean((data$surrogatetarget - predictions)^2)
ASE0 <- mean((data$surrogatetarget - mean(data$surrogatetarget))^2)
upsilon <- 1 - ASE / ASE0
rho <- cor(data$surrogatetarget, predictions)
explanation <- rbind(explanation, c(trees, nrules, upsilon, rho))
}
}
colnames(explanation) <- c("trees","rules","upsilon","cor")
res <- list(explanation = explanation, rules = rules, model = surrogate_trees)
class(res) <- "sgtree"
return(res)
}
#' @title Plot surrogate tree statistics
#'
#' @description Plot statistics of surrogate trees to analyze complexity vs. explanatory power.
#'
#' @param x An object of class \code{sgtree}.
#' @param abs Name of the measure to be plotted on the x-axis, either \code{"trees"}, \code{"rules"}, \code{"upsilon"} or \code{"cor"}.
#' @param ord Name of the measure to be plotted on the y-axis, either \code{"trees"}, \code{"rules"}, \code{"upsilon"} or \code{"cor"}.
#' @param ... Further arguments passed to \code{plot}.
#'
#' @return No return value.
#'
#' @examples
#' library(randomForest)
#' library(pdp)
#' data(boston)
#' set.seed(42)
#' rf <- randomForest(cmedv ~ ., data = boston)
#' data <- boston[,-3] # remove target variable
#' ntrees <- c(4,8,16,32,64,128)
#' xg <- xgrove(rf, data, ntrees)
#' xg
#' plot(xg)
#'
#' @author \email{gero.szepannek@@web.de}
#'
#' @rdname plot.sgtree
#' @export
plot.sgtree <- function(x, abs = "rules", ord = "upsilon", ...){
i <- which(colnames(x$explanation) == abs)
j <- which(colnames(x$explanation) == ord)
plot(x$explanation[,i], x$explanation[,j], xlab = abs, ylab = ord, type = "b", ...)
}
#' @export
print.sgtree <- function(x, ...) print(x$explanation)
| /scratch/gouwar.j/cran-all/cranData/xgrove/R/xgrove.R |
#' Multiple Ascending Dose Data Set
#'
#' Model generated PK and PD data to mimic an orally administered small
#' molecule with various endpoints from continuous to ordinal response and
#' count data. Simulated multiple dose administration ranging from 100 mg
#' to 1600 mg, once per day.
#'
#' @format A data frame with the following 19 columns:
#' \tabular{rll}{
#' column 1: \tab \code{ID} \tab numeric; unique subject ID\cr
#' column 2: \tab \code{TIME} \tab numeric; time relative to first drug
#' administration\cr
#' column 3: \tab \code{NOMTIME} \tab numeric; nominal time\cr
#' column 4: \tab \code{TIMEUNIT} \tab character; unit of TIME\cr
#' column 5: \tab \code{AMT} \tab numeric; dosing amount (for dosing events)
#' in mg\cr
#' column 6: \tab \code{LIDV} \tab numeric; observation on a linear scale
#' (observation type determined by CMT), units determined by EVENTU column\cr
#' column 7: \tab \code{MDV} \tab numeric; missing dependent variable\cr
#' column 8: \tab \code{CMT} \tab integer; compartment number
#' (determines observation type):\cr
#' \tab \tab CMT 1 = Dosing event\cr
#' \tab \tab CMT 2 = PK concentration\cr
#' \tab \tab CMT 3 = Continuous response data\cr
#' \tab \tab CMT 4 = Count response data\cr
#' \tab \tab CMT 5 = Ordinal response data\cr
#' \tab \tab CMT 6 = Binary response data\cr
#' column 9: \tab \code{NAME} \tab character; description of event\cr
#' column 10: \tab \code{EVENTU} \tab character; unit for observation\cr
#' column 11: \tab \code{CENS} \tab integer; censored values
#' (0 = not censored, 1 = censored)\cr
#' column 12: \tab \code{EVID} \tab integer; event ID (0 = observation,
#' 1 = dosing event)\cr
#' column 13: \tab \code{WEIGHTB} \tab numeric; baseline body weight (kg)\cr
#' column 14: \tab \code{SEX} \tab character; sex\cr
#' column 15: \tab \code{TRTACT} \tab factor; treatment group label\cr
#' column 16: \tab \code{DOSE} \tab numeric; randomized dose in mg\cr
#' column 17: \tab \code{PROFDAY} \tab numeric; day of profile\cr
#' column 18: \tab \code{PROFTIME} \tab numeric; time within PROFDAY\cr
#' column 19: \tab \code{CYCLE} \tab numeric; count of drug administrations
#' received
#' }
"mad"
#' Multiple Ascending Dose Data Set (Duplicates Removed)
#'
#' Model generated PK and PD data to mimic an orally administered small
#' molecule with various endpoints from continuous to ordinal response and
#' count data. Simulated multiple dose administration ranging from 100 mg
#' to 1600 mg, once per day.
#'
#' @format A data frame with the following 19 columns:
#' \tabular{rll}{
#' column 1: \tab \code{ID} \tab numeric; unique subject ID\cr
#' column 2: \tab \code{TIME} \tab numeric; time relative to first drug
#' administration\cr
#' column 3: \tab \code{NOMTIME} \tab numeric; nominal time\cr
#' column 4: \tab \code{TIMEUNIT} \tab character; unit of TIME\cr
#' column 5: \tab \code{AMT} \tab numeric; dosing amount (for dosing events)
#' in mg\cr
#' column 6: \tab \code{LIDV} \tab numeric; observation on a linear scale
#' (observation type determined by CMT), units determined by EVENTU column\cr
#' column 7: \tab \code{MDV} \tab numeric; missing dependent variable\cr
#' column 8: \tab \code{CMT} \tab integer; compartment number
#' (determines observation type):\cr
#' \tab \tab CMT 1 = Dosing event\cr
#' \tab \tab CMT 2 = PK concentration\cr
#' \tab \tab CMT 3 = Continuous response data\cr
#' \tab \tab CMT 4 = Count response data\cr
#' \tab \tab CMT 5 = Ordinal response data\cr
#' \tab \tab CMT 6 = Binary response data\cr
#' column 9: \tab \code{NAME} \tab character; description of event\cr
#' column 10: \tab \code{EVENTU} \tab character; unit for observation\cr
#' column 11: \tab \code{CENS} \tab integer; censored values
#' (0 = not censored, 1 = censored)\cr
#' column 12: \tab \code{EVID} \tab integer; event ID (0 = observation,
#' 1 = dosing event)\cr
#' column 13: \tab \code{WEIGHTB} \tab numeric; baseline body weight (kg)\cr
#' column 14: \tab \code{SEX} \tab character; sex\cr
#' column 15: \tab \code{TRTACT} \tab factor; treatment group label\cr
#' column 16: \tab \code{DOSE} \tab numeric; randomized dose in mg\cr
#' column 17: \tab \code{PROFDAY} \tab numeric; day of profile\cr
#' column 18: \tab \code{PROFTIME} \tab numeric; time within PROFDAY\cr
#' column 19: \tab \code{CYCLE} \tab numeric; count of drug administrations
#' received
#' }
"mad_missing_duplicates"
#' Multiple Ascending Dose Noncompartmental Analysis (NCA) dataset
#' @format A data frame with the following 7 columns:
#' \tabular{rll}{
#' column 1: \tab \code{ID} \tab numeric; unique subject ID\cr
#' column 2: \tab \code{PARAM} \tab character; NCA parameter\cr
#' column 3: \tab \code{VALUE} \tab numeric; Value of the NCA parameter\cr
#' column 4: \tab \code{DOSE} \tab numeric; randomized dose in mg\cr
#' column 15: \tab \code{TRTACT} \tab factor; treatment group label\cr
#' column 14: \tab \code{SEX} \tab character; sex\cr
#' column 13: \tab \code{WEIGHTB} \tab numeric; baseline body weight (kg)
#' }
"mad_nca"
#' Case 1 PKPD Data Set
#' @format A data frame with the following 21 columns:
#' \tabular{rll}{
#' column 1: \tab \code{ID} \tab integer; unique subject ID\cr
#' column 2: \tab \code{TIME} \tab numeric; time relative to first drug
#' administration\cr
#' column 3: \tab \code{NOMTIME} \tab numeric; nominal time\cr
#' column 4: \tab \code{TIMEUNIT} \tab factor; unit of TIME\cr
#' column 5: \tab \code{AMT} \tab integer; dosing amount (for dosing events)
#' in mg\cr
#' column 6: \tab \code{LIDV} \tab numeric; observation on a linear scale
#' (observation type determined by CMT), units determined by EVENTU column\cr
#' column 7: \tab \code{CMT} \tab integer; compartment number
#' (determines observation type):\cr
#' \tab \tab CMT 1 = Dosing event\cr
#' \tab \tab CMT 2 = PK concentration\cr
#' \tab \tab CMT 3 = Continuous response data\cr
#' \tab \tab CMT 4 = Count response data\cr
#' \tab \tab CMT 5 = Ordinal response data\cr
#' \tab \tab CMT 6 = Binary response data\cr
#' column 8: \tab \code{NAME} \tab factor; description of event\cr
#' column 9: \tab \code{EVENTU} \tab factor; unit for observation\cr
#' column 10: \tab \code{CENS} \tab integer; censored values
#' (0 = not censored, 1 = censored)\cr
#' column 11: \tab \code{EVID} \tab integer; event ID (0 = observation,
#' 1 = dosing event)\cr
#' column 12: \tab \code{WEIGHTB} \tab numeric; baseline body weight (kg)\cr
#' column 13: \tab \code{eff0} \tab numeric; efficacy\cr
#' column 14: \tab \code{TRTACT} \tab factor; treatment group label\cr
#' column 15: \tab \code{DOSE} \tab integer; Dose in mg\cr
#' column 16: \tab \code{PROFDAY} \tab integer; day of profile\cr
#' column 17: \tab \code{PROFTIME} \tab numeric; time within PROFDAY\cr
#' column 18: \tab \code{CYCLE} \tab integer; count of drug administrations
#' received\cr
#' column 19: \tab \code{PART} \tab integer; part of study\cr
#' column 20: \tab \code{STUDY} \tab integer; study\cr
#' column 21: \tab \code{IPRED} \tab numeric; individual prediction\cr
#' }
"case1_pkpd"
#' nlmixr Theophylline SD Data Set
#'
#' Theophylline dataset, from the nlmixr R package
#'
#' @format A data frame with the following 7 columns:
#' \tabular{rll}{
#' column 1: \tab \code{ID} \tab integer; unique patient identifier\cr
#' column 2: \tab \code{TIME} \tab numeric; time relative to first drug
#' administration\cr
#' column 3: \tab \code{DV} \tab numeric; dependent variable (drug concentration) \cr
#' column 4: \tab \code{AMT} \tab numeric; dose of drug \cr
#' column 5: \tab \code{EVID} \tab integer; event ID, 1 if dose, 0 otherwise\cr
#' column 6: \tab \code{CMT} \tab integer; compartment number\cr
#' column 7: \tab \code{WT} \tab numeric; weight
#' }
"nlmixr_theo_sd"
#' Single Ascending Dose Data Set
#'
#' Model generated PK data to mimic an orally administered small molecule.
#' Simulated single dose administration ranging from 100 mg to 1600 mg.
#'
#' @format A data frame with the following 16 columns:
#' \tabular{rll}{
#' column 1: \tab \code{ID} \tab numeric; unique subject ID\cr
#' column 2: \tab \code{TIME} \tab numeric; time relative to first drug
#' administration\cr
#' column 3: \tab \code{NOMTIME} \tab numeric; nominal time\cr
#' column 4: \tab \code{TIMEUNIT} \tab character; unit of TIME\cr
#' column 5: \tab \code{AMT} \tab numeric; dosing amount (for dosing events)
#' in mg\cr
#' column 6: \tab \code{LIDV} \tab numeric; observation on a linear scale
#' (observation type determined by CMT), units determined by EVENTU column\cr
#' column 7: \tab \code{MDV} \tab numeric; missing dependent variable \cr
#' (1 if missing, 0 otherwise)\cr
#' column 8: \tab \code{CMT} \tab integer; compartment number
#' (determines observation type):\cr
#' \tab \tab CMT 1 = Dosing event\cr
#' \tab \tab CMT 2 = PK concentration\cr
#' column 9: \tab \code{NAME} \tab character; description of event\cr
#' column 10: \tab \code{EVENTU} \tab character; unit for observation\cr
#' column 11: \tab \code{CENS} \tab integer; censored values
#' (0 = not censored, 1 = censored)\cr
#' column 12: \tab \code{EVID} \tab integer; event ID (0 = observation,
#' 1 = dosing event)\cr
#' column 13: \tab \code{WEIGHTB} \tab numeric; baseline body weight (kg)\cr
#' column 14: \tab \code{SEX} \tab character; sex\cr
#' column 15: \tab \code{TRTACT} \tab factor; treatment group label\cr
#' column 16: \tab \code{DOSE} \tab numeric; randomized dose in mg
#' received
#' }
"sad"
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/data.R |
#' predict.nls
#'
#' @param object Object of class inheriting from "nls"
#' @param newdata An optional data frame in which to look for variables with which to predict.
#' If omitted, the fitted values are used.
#' @param se.fit A switch indicating if standard errors are required.
#' @param interval Type of interval calculation, "none" or "confidence"
#' @param level Level of confidence interval to use
#' @param ... additional arguments affecting the predictions produced.
#'
#' @return \code{predict.nls} produces a vector of predictions or a matrix of predictions and
#' bounds with column names \code{fit}, \code{lwr}, and \code{upr} if interval is set.
#'
#' If \code{se.fit} is \code{TRUE}, a list with the following components is returned:
#'
#' \item{fit}{vector or matrix as above}
#'
#' \item{se.fit}{standard error of predicted means}
#'
#' \item{residual.scale}{residual standard deviations}
#'
#' \item{df}{degrees of freedom for residual}
#'
#' @examples
#'
#' set.seed(12345)
#' data_to_plot <- data.frame(x1 = rep(c(0, 25, 50, 100, 200, 400, 600), 10)) %>%
#' dplyr::mutate(AUC = x1*rlnorm(length(x1), 0, 0.3),
#' x2 = x1*stats::rlnorm(length(x1), 0, 0.3),
#' Response = (15 + 50*x2/(20+x2))*stats::rlnorm(length(x2), 0, 0.3))
#'
#'
#' gg <- ggplot2::ggplot(data = data_to_plot, ggplot2::aes(x = AUC, y = Response)) +
#' ggplot2::geom_point() +
#' xgx_geom_smooth(method = "nls",
#' method.args = list(formula = y ~ E0 + Emax* x / (EC50 + x),
#' start = list(E0 = 15, Emax = 50, EC50 = 20) ),
#' color = "black", size = 0.5, alpha = 0.25)
#' gg
#'
#' mod <- stats::nls(formula = Response ~ E0 + Emax * AUC / (EC50 + AUC),
#' data = data_to_plot,
#' start = list(E0 = 15, Emax = 50, EC50 = 20))
#'
#' predict.nls(mod)
#'
#' predict.nls(mod, se.fit = TRUE)
#'
#' predict.nls(mod,
#' newdata = data.frame(AUC = c(0, 25, 50, 100, 200, 400, 600)),
#' se.fit = TRUE)
#'
#' predict.nls(mod,
#' newdata = data.frame(AUC = c(0, 25, 50, 100, 200, 400, 600)),
#' se.fit = TRUE, interval = "confidence", level = 0.95)
#'
#' predict(mod,
#' newdata = data.frame(AUC = c(0, 25, 50, 100, 200, 400, 600)),
#' se.fit = TRUE, interval = "confidence", level = 0.95)
#'
#' @importFrom Deriv Deriv
#' @importFrom stats nls
#' @exportS3Method stats::predict
#' @export predict.nls
predict.nls <- function(object, newdata = NULL, se.fit = FALSE, interval = "none", level = 0.95, ...){
pred <- list()
# function to calculate gradient wrt model parameters
# value is the function value
# grad is the gradient
fun_grad <- function(form, newdata, pars, se.fit){
# extract the model parameters to the local environment
list2env(pars %>% as.list(), envir = environment())
ret <- list()
if(se.fit){
ret$grad <- list()
}
for(i in 1:length(newdata[,1])){
if(length(newdata[1,]) > 1){
for(j in names(newdata[i,])){
assign(j, newdata[i,j])
}
}else{
j = names(newdata[1])
assign(j, newdata[i,j])
}
ret$value[i] <- eval(form[[3L]]) # this is the value of the formula
if(se.fit){
ret$grad[[i]] <- eval(Deriv::Deriv(form, names(pars), cache.exp = FALSE)) %>% as.list()
if(is.null(names(ret$grad[[i]]))){
names(ret$grad[[i]]) <- names(pars)
}
}
}
if(se.fit){
ret$grad <- dplyr::bind_rows(ret$grad) %>% as.matrix
}
return(ret)
}
if(is.null(newdata)){
fg <- list()
fg$value <- as.numeric(object$m$fitted())
fg$grad <- object$m$gradient()
}else{
fg <- fun_grad(form = object$m$formula(), newdata, pars = object$m$getPars(), se.fit)
}
f.new <- fg$value # value of function
if(se.fit){
pred$fit <- f.new
grad.new <- fg$grad # value of gradient
vcov <- vcov(object)
GS = rowSums((grad.new%*%vcov)*grad.new)
if(interval == "confidence"){
alpha = 1 - level
deltaf <- sqrt(GS)*qt(1 - alpha/2, df = summary(object)$df[2])
pred$fit <- data.frame(fit = pred$fit)
pred$fit$lwr <- f.new - deltaf
pred$fit$upr <- f.new + deltaf
}
pred$se.fit <- sqrt(GS)
pred$df <- summary(object)$df[2]
}else{
pred <- f.new
}
return(pred)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/predict.nls.R |
#' Calls the standard theme for xGx graphics
#'
#' @return xgx ggplot2 compatible theme
#'
#' @examples
#' conc <- 10^(seq(-3, 3, by = 0.1))
#' ec50 <- 1
#' data <- data.frame(concentration = conc,
#' bound_receptor = 1 * conc / (conc + ec50))
#' ggplot2::ggplot(data, ggplot2::aes(y = concentration, x = bound_receptor)) +
#' ggplot2::geom_point() +
#' ggplot2::geom_line() +
#' xgx_scale_y_log10() +
#' xgx_scale_x_reverselog10() +
#' theme_xgx()
#'
#' @export
theme_xgx <- function() {
xgx_theme()
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/theme_xgx.R |
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @return result of piped operation
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/utils-pipe.R |
#' Append filenames to bottom of the plot
#'
#' \code{xgx_annotate_filenames} appends file details to the bottom of a plot
#' using the plot caption option.
#' File details to append include the parent directory, the path of the R
#' script which generated the plot,
#' and the path of the plot.
#'
#' @param dirs list containing directories and filenames. It must contain
#' five fields
#' \enumerate{
#' \item parent_dir = Parent directory containing the Rscript and the Results
#' folder
#' \item rscript_dir = Subdirectory ofparent_dir that contains the Rscript
#' used to generate the figure
#' \item rscript_name= Name of the Rscript used to generate the figure
#' \item results_dir = Subdirectory ofparent_dir where the figure is stored
#' \item filename = Filename
#' }
#' @param hjust horizontal justification of the caption
#' @param color font color for caption, default black
#' @param size font size for caption, default 11
#'
#' @return None
#'
#' @examples
#' dirs <- list(parent_dir = "/your/parent/path/",
#' rscript_dir = "./Rscripts/",
#' rscript_name = "Example.R",
#' results_dir = "./Results/",
#' filename = "your_file_name.png")
#' data <- data.frame(x = 1:1000, y = rnorm(1000))
#' ggplot2::ggplot(data = data, ggplot2::aes(x = x, y = y)) +
#' ggplot2::geom_point() +
#' xgx_annotate_filenames(dirs)
#'
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_point
#' @importFrom ggplot2 labs
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 element_text
#' @export
xgx_annotate_filenames <- function(dirs, hjust = 0.5, color = "black", size = 11) {
caption <- xgx_dirs2char(dirs)
return(list(
ggplot2::labs(caption = caption),
ggplot2::theme(plot.caption = ggplot2::element_text(hjust = hjust, color = color, size = size))
))
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_annotate_filenames.R |
#' Create a status (e.g. DRAFT) annotation layer
#'
#' \code{xgx_annotate_status} adds a status (e.g. DRAFT) annotation layer
#' to a plot.
#' The text of the annotation can be customized, the default is "DRAFT".
#' The color, location, size, fontface, transparency of the annotation can
#' also be customized.
#'
#' @param status the text to
#' @param x x location, default Inf (right most point)
#' @param y y location, default Inf (up most point)
#' @param color font color, default "grey"
#' @param hjust horizontal justification, default 1.2
#' @param vjust vertical justification, default 1.2
#' @param fontsize font size to use, default 7
#' @param fontface font style to use, default "bold"
#' @param alpha transparency, default is 0.5
#' @param ... other arguments passed on to \code{\link[ggplot2]{layer}}
#'
#' @return ggplot layer
#'
#' @examples
#' data <- data.frame(x = 1:1000, y = rnorm(1000))
#' ggplot2::ggplot(data = data, ggplot2::aes(x = x, y = y)) +
#' ggplot2::geom_point() +
#' xgx_annotate_status("DRAFT")
#'
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_point
#' @importFrom ggplot2 annotate
#' @export
xgx_annotate_status <- function(status = "DRAFT",
x = Inf, y = Inf, color = "grey",
hjust = 1.2, vjust = 1.2,
fontsize = 7, fontface = "bold",
alpha = 0.5, ...) {
ggplot2::annotate("text", x = x, y = y,
label = status, color = color,
hjust = hjust, vjust = vjust,
cex = fontsize, fontface = fontface,
alpha = alpha, ...)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_annotate_status.R |
#' Annotate a png file or directory of png files
#'
#' These function annotates a single png file or all files within a
#' directory.
#'
#' If a png file has been annotated once, this function will not
#' annotate it again. Therefore, you can run this function on
#' directories with different input script names and it will label
#' each file based on when each file was run.
#'
#' Based on code from MrFlick on
#' \href{https://stackoverflow.com/a/23816416}{Stack Overflow}.
#'
#' @param file_or_dir The png file to annotate or directory location for
#' annotating png files. Note this will annotate just once, so if
#' you generate multiple png files and then annotate at the end of
#' your script it will have the correct script name on it. Then if
#' you create new images in a different script in the same directory
#' and then annotate with the script name the second script, the PNG
#' files will show the correct script location for each file.
#' @param script Script name to add as a footnote; By default this is
#' empty, though it could name the script that
#' @param status Draft or other status; If \code{status="Final"} or
#' \code{status=""} the status overlay will be removed. By default
#' the status is DRAFT.
#' @param date_format Date format for adding the time the png was
#' annotated.
#' @param col Color for annotating the draft status
#' @param font Font to use for the annotation function
#' @param cex_status_mult Multiplication factor for the status
#' annotation. By default 7
#' @param cex_footnote_mult Multiplication factor for the footnote
#' annotation. By default 0.8
#' @param status_angle Angle to rotate status
#' @param x11 Display on the X11/Windows device
#'
#' @return nothing
#'
#' @examples
#' # using the examples from plot()
#' file.name <- tempfile()
#' grDevices::png(file.name)
#' graphics::plot(cars)
#' graphics::lines(stats::lowess(cars))
#' grDevices::dev.off()
#' # annotate one file
#' xgx_annotate_status_png(file.name, "/tmp/script1.R")
#'
#' @author Matthew Fidler, Alison M, ....
#' @importFrom grDevices grey
#' @importFrom assertthat has_extension
#' @importFrom png readPNG
#' @importFrom png writePNG
#' @importFrom grDevices png
#' @importFrom graphics lines
#' @importFrom graphics par
#' @importFrom graphics plot
#' @importFrom graphics plot.new
#' @importFrom graphics plot.window
#' @importFrom graphics rasterImage
#' @importFrom graphics text
#' @importFrom grDevices dev.off
#' @importFrom magrittr "%>%"
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_point
#' @importFrom stats lowess
#' @importFrom stats rnorm
#' @importFrom stats sd
#' @importFrom dplyr group_by
#' @importFrom dplyr summarise
#' @export
xgx_annotate_status_png <- function(file_or_dir, script = "", status = "DRAFT",
date_format = "%a %b %d %X %Y",
col = grDevices::grey(0.8, alpha = 0.7),
font = 2,
cex_status_mult = 7,
cex_footnote_mult = 0.8,
status_angle = 45,
x11 = FALSE) {
# read file
if (assertthat::has_extension(file_or_dir, "png")) {
files <- c(file_or_dir)
} else {
files <- list.files(file_or_dir, pattern = ".png$", full.names = TRUE)
}
for (file in files) {
img <- png::readPNG(file, info = TRUE)
info <- attr(img, "info")
dpi <- round(mean(c(0, info$dpi), na.rm = TRUE), 0)
if (dpi < 10) {
dpi <- 75
}
metadata <- attr(img, "metadata")
if (!identical(metadata, "annotated by xGx")) {
message(sprintf("Add footnote to %s\n", file))
# get size
h <- dim(img)[1]
w <- dim(img)[2]
# open file for output
# make it slightly taller to add the text at the bottom
grDevices::png(file, width = w, height = h * 1.05)
# par is for setting graphical parameters
# here, you're initializing a "state machine" setting all the
# graphical parameters
# from the state machine. you can just set a few parameters differently.
# it is extremely fast and takes no memory.
#
# grid, lattice, ggplot are friendlier to use, but they are much slower
# and they require more memory
#
# mar <- c(0, 0, 0, 0): sets margins to zero
# xpd <- NA: all plotting is clipped to the device region
# mgp <- c(0, 0, 0): margin line (in mex units) for the axis title.
# oma <- c(0, 0, 0, 0): more margins # ann = FALSE: do not add extra
# annotation to the plot
old_par <- graphics::par(no.readonly =TRUE)
on.exit(graphics::par(old_par))
graphics::par(mar = c(0, 0, 0, 0),
xpd = NA,
mgp = c(0, 0, 0),
oma = c(0, 0, 0, 0), ann = FALSE)
# creates new plot - uses what was set with par()?
graphics::plot.new()
graphics::plot.window(0:1, 0:1)
# fill plot with image
# gives the extremes of the user coordinates
usr <- graphics::par("usr")
# shifted up by 0.1 to make space for text
graphics::rasterImage(img, usr[1], usr[3] + 0.1, usr[2], usr[4])
# add draft status to text if status isn't "Final"
# could be boolean, too.
cx <- dpi / 75
if (!any(status == c("Final", ""))) {
graphics::text(0.5, 0.5, status, cex = cex_status_mult * cx,
col = col, font = font, srt = status_angle)
}
# add path to the bottom of the graphs
bottom_txt <- paste0(script, ifelse(script == "", "", "\n"),
"PNG: ", file,
ifelse(date_format == "", "",
paste0("\n", "Date: ",
format(Sys.time(), date_format))))
graphics::text(0.5, 0.025, bottom_txt, cex = cx * cex_footnote_mult)
# close image
invisible(grDevices::dev.off())
img <- png::readPNG(file)
png::writePNG(img, file, metadata = "annotated by xGx")
if (x11) {
graphics::par(mar = c(0, 0, 0, 0), xpd = NA, mgp = c(0, 0, 0),
oma = c(0, 0, 0, 0), ann = FALSE)
lim <- graphics::par()
graphics::plot.new()
graphics::plot.window(0:1, 0:1)
graphics::rasterImage(img, lim$usr[1], lim$usr[3],
lim$usr[2], lim$usr[4])
}
} else {
message(sprintf("Already annotated %s; Need to regenerate figure to annotate again\n", file))
}
}
return(invisible())
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_annotate_status_png.R |
#' Produce an xgx-styled report the given dataset using xgx R markdown templates,
#' or a user-provided R markdown template. (Note: The R markdown template provided must
#' be formatted in a similar manner to that of the xgx R markdown templates to work.)
#' The working directory will contain a new directory (`xgx_autoexplore_output`) after running this function,
#' which will contain a directory for the dataset, and futher a directory for the type of analysis / R markdown template.
#'
#' \code{xgx_auto_explore} returns an HTML and PDF document with plots
#' describing the provided dataset
#'
#' This function can be used quickly to explore your data by generating
#' overview plots before constructing non-linear mixed effects models.
#'
#' @param data_path Path (as a string) to the dataset that is to be analyzed
#' @param mapping A list of column name mappings from the
#' original (template) dataset column names
#' to the corresponding columns in the new dataset.
#' @param author_name The name of the author to be displayed on the template
#' @param multiple_dosing Whether or not to use a "Multiple" or "Single" Ascending dose template
#' @param dose_cmt Integer denoting the compartment for dosing records
#' @param pk_cmt An integer denoting the "compartment" containing the PK data. The "CMT" column will typically
#' have these integers, where each row may contain PK, PD, dosing or other events/observations data
#' @param pd_cmt An integer denoting the "compartment" containing the PD data,
#' of the desired type (continuous, ordinal, etc.). The "CMT" column will typically
#' have these integers, where each row may contain PK, PD, dosing or other events/observations data
#' @param pd_data_type The type of PD data - acceptable values exist in the following list:
#' ["binary","continuous","count","ordinal","real_example","receptor_occupancy","time_to_event"]
#' @param steady_state_day used to denote the day of rich sampling of PK at steady state
#' @param time_between_doses dosing interval, has units to match the time variable of the dataset
#' @param rmd_template_name A custom output name for the generated Rmd file
#' @param rmd_template_path A user provided custom template (as a string)
#' @param rmd_output_path A custom output path for the generated Rmd file
#' (This is typically left as `NULL` in order to maintain the hierarchical directory structure of `xgx_autoexplore_output`))
#' @param pdf_output_path A custom output path for the generated PDF file
#' (This is typically left as `NULL` in order to maintain the hierarchical directory structure of `xgx_autoexplore_output`))
#' @param html_output_path A custom output path for the generated HTML file
#' (This is typically left as `NULL` in order to maintain the hierarchical directory structure of `xgx_autoexplore_output`))
#' @param add_datetime Boolean indicating additon of a date stamp to the beginnning of the Rmd file
#' @param show_explanation Boolean indicating if the additional explanations (text in between figures) are needed for the user.
#'
#' @return NULL
#'
#' @examples
#'
#' author_name = "Your Name Here"
#' show_explanation = FALSE
#'
#' \dontrun{
#' # Try out the nonlinear_pkpd dataset with the
#' # Multiple Ascending Dose PK Rmd template
#' data_path <- "~/nonlinear_pkpd.csv"
#'
#' # Specify the mapping of column names
#' mapping <- list(
#' "TIME" = "TIM2",
#' "NOMTIME" = "NT",
#' "EVID" = 0,
#' "CENS" = 0,
#' "DOSE" = "MGKG",
#' "TRTACT" = "TRT",
#' "LIDV_NORM" = "LIDV/MGKG",
#' "LIDV_UNIT" = "UNIT",
#' "PROFDAY" = 1,
#' "SEX" = 0,
#' "WEIGHTB" = 0)
#'
#'
#' # 5 contains the PK Concentration in this dataset
#' pk_cmt = 5
#' # We don't need PD right now
#' pd_cmt = NULL
#' pd_data_type = NULL
#'
#'
#' dose_cmt = 1
#' steady_state_day = c(0, 6)
#' time_between_doses = 24
#' multiple_dosing = TRUE
#'
#' output_directory = tempdir()
#'
#' xgx_auto_explore(data_path = data_path,
#' mapping = mapping,
#' author_name = author_name,
#' pk_cmt = pk_cmt,
#' pd_cmt = pd_cmt,
#' dose_cmt = dose_cmt,
#' steady_state_day = steady_state_day,
#' time_between_doses = time_between_doses,
#' multiple_dosing = multiple_dosing,
#' pd_data_type = pd_data_type,
#' rmd_output_path = output_directory,
#' show_explanation = show_explanation)
#' }
#'
#' @importFrom stringr str_replace
#' @importFrom readr read_file
#' @export
xgx_auto_explore <- function(data_path = NULL,
mapping = list(),
author_name = NULL,
multiple_dosing = FALSE,
pk_cmt = NULL,
pd_cmt = NULL,
pd_data_type = NULL,
dose_cmt = NULL,
steady_state_day = NULL,
time_between_doses = NULL,
rmd_template_name = NULL,
rmd_template_path = NULL,
rmd_output_path = NULL,
pdf_output_path = NULL,
html_output_path = NULL,
add_datetime = TRUE,
show_explanation = TRUE) {
working_dir <- getwd()
# A specific file path to an R markdown file can be given; however,
# if the template path is not provided, the R markdown template
# from the xgx github will be downloaded.
if (is.null(rmd_template_path)){
rmd_str <- get_rmd_str(rmd_template_name = rmd_template_name,
multiple_dosing = multiple_dosing,
pk_cmt = pk_cmt,
pd_cmt = pd_cmt,
pd_data_type = pd_data_type)
rmd_template_name <- tools::file_path_sans_ext(
get_rmd_name(rmd_template_name = rmd_template_name,
multiple_dosing = multiple_dosing,
pk_cmt = pk_cmt,
pd_cmt = pd_cmt,
pd_data_type = pd_data_type))
} else{
rmd_str <- readr::read_file(rmd_template_path)
if (is.null(rmd_template_name)) {
rmd_template_name <- tools::file_path_sans_ext(basename(rmd_template_path))
}
}
# Setup default output paths
dataset_name = tools::file_path_sans_ext(basename(data_path))
autoexplore_out_dir = file.path(working_dir,
"xgx_autoexplore_ouput",
dataset_name,
rmd_template_name)
# Ensure that the directory exists by creating it - must be done iteratively for heirarchical directories
dir.create(working_dir, showWarnings = FALSE)
dir.create(file.path(working_dir, "xgx_autoexplore_ouput"), showWarnings = FALSE)
dir.create(file.path(working_dir, "xgx_autoexplore_ouput", dataset_name), showWarnings = FALSE)
dir.create(file.path(working_dir, "xgx_autoexplore_ouput", dataset_name, rmd_template_name), showWarnings = FALSE)
if (is.null(rmd_output_path)) {
rmd_output_path <- file.path(autoexplore_out_dir, paste0(rmd_template_name, ".Rmd"))
}
if (is.null(pdf_output_path)) {
pdf_output_path <- file.path(autoexplore_out_dir, paste0(rmd_template_name, ".pdf"))
}
if (is.null(html_output_path)) {
html_output_path <- file.path(autoexplore_out_dir, paste0(rmd_template_name, ".html"))
}
# Edit the Rmd template and your data to fit the standard dataset type
rmd_ouput_path <- edit_rmd_template_str(rmd_str = rmd_str,
mapping = mapping,
rmd_output_path = rmd_output_path,
data_path = data_path,
pk_cmt = pk_cmt,
pd_cmt = pd_cmt,
dose_cmt = dose_cmt,
steady_state_day = steady_state_day,
time_between_doses = time_between_doses,
author_name = author_name,
add_datetime = add_datetime,
show_explanation = show_explanation)
# Render and save the HTML document
rmarkdown::render(input = rmd_output_path,
output_file = html_output_path,
output_dir = autoexplore_out_dir,
output_format = "html_document",
quiet = TRUE)
# Render and save the PDF
rmarkdown::render(input = rmd_output_path,
output_file = pdf_output_path,
output_dir = autoexplore_out_dir,
output_format = "pdf_document",
quiet = TRUE)
}
#' Edit a Rmd Template from xgx
#'
#' \code{edit_rmd_template_str} returns a path to the altered Rmd template
#'
#' @param rmd_str A character string containing the Rmd template raw characters
#' @param mapping A list of column name mappings from the
#' original (template) dataset column names
#' to the corresponding columns in the new dataset
#' @param rmd_output_path A custom output path for the generated Rmd file
#' (This is typically left as `NULL` in order to maintain the hierarchical directory structure of `xgx_autoexplore_output`))
#' @param data_path Path (as a string) to the dataset that is to be analyzed
#' @param multiple_dosing if FALSE use single ascending dose template, if TRUE use multiple
#' @param pk_cmt An integer denoting the "compartment" containing the PK data. The "CMT" column will typically
#' have these integers, where each row may contain either PK or PD data, potentially of different types (continuous, ordinal, etc.)
#' @param pd_cmt An integer denoting the "compartment" containing the PD data, of the desired type (continuous, ordinal, etc.). The "CMT" column will typically
#' have these integers, where each row may contain either PK or PD data
#' @param dose_cmt CMT associated with dosing event
#' @param steady_state_day For multiple ascending dose, what day is steady state rich profile?
#' @param time_between_doses time interval between doses
#' @param author_name The name of the author to be displayed on the template
#' @param add_datetime Boolean indicating additon of a date stamp to the beginnning of the Rmd file
#' @param show_explanation Boolean indicating if the additional explanations (text in between figures) are needed for the user.
#'
#' @return A string of the new R markdown template
#'
#' @importFrom glue glue
#' @importFrom Hmisc escapeRegex
#' @importFrom readr read_file
#' @importFrom stringr str_replace
#' @importFrom utils capture.output
#' @importFrom utils getParseData
#' @importFrom utils read.csv
#' @importFrom utils tail
#'
#' @export
edit_rmd_template_str <- function(rmd_str = NULL,
mapping = NULL,
rmd_output_path = NULL,
data_path = NULL,
multiple_dosing = FALSE,
pk_cmt = NULL,
pd_cmt = NULL,
dose_cmt = NULL,
steady_state_day = NULL,
time_between_doses = NULL,
author_name = NULL,
add_datetime = TRUE,
show_explanation = TRUE) {
token <- parent <- NULL
author_name_re <- 'author: \\"(.*)\\"'
user_data <- utils::read.csv(data_path)
# Alter the path to the data, to match the user given path
if (!is.null(data_path)) {
# Edit the Rmd string to contain the csv desired filepath
user_data_path_replacement_re = paste0("\\1", '\\"',
Hmisc::escapeRegex(data_path),
'\\"\\)')
rmd_str <- stringr::str_replace(string = rmd_str,
pattern = "(read.csv\\()(.*)",
replacement = user_data_path_replacement_re)
}
# Change the column name mapping
if (!is.null(mapping)) {
# Alter dataset in Rmd
# - Note: `mutate` is assumed to be used at the top of an Rmd template
# similar to all xgx Rmd templates
# Find first location of `mutate` function
mutate_loc <- stringr::str_locate(rmd_str, "mutate\\(")[1, "start"]
# Get the string starting at this location
mutate_start_rmd_str <- substr(rmd_str,
start = mutate_loc,
stop = nchar(rmd_str))
# Regular Expressions do not work here, becuase comments can contain parentheses
# So we will get the `mutate` full expression by parsing
mutate_expr <- parse(text = mutate_start_rmd_str, n = 1)
mutate_expr_parse_data <- utils::getParseData(mutate_expr, includeText = TRUE)
mutate_expr_str <- mutate_expr_parse_data[1, "text"]
# Split the main `mutate` function parameters into
# 'right' and 'left' hand side expressions
main_parent <- mutate_expr_parse_data[1,"id"]
orig_mapping_right <- mutate_expr_parse_data %>%
subset(token == "expr") %>%
subset(parent == main_parent) %>%
select(text)
orig_mapping_right <- utils::tail(orig_mapping_right[[1]], -1)
orig_mapping_left <- mutate_expr_parse_data %>%
subset(token == "SYMBOL_SUB") %>%
subset(parent == main_parent) %>%
select(text)
# Original mapping is stored as a list with the
# names as the 'right hand' expressions, and values as 'left hand' exprs
orig_mapping <- orig_mapping_right
names(orig_mapping) <- orig_mapping_left[[1]]
# Now we add the original map to the new map
# if the new mapping is missing anything in the original map
for (old_col in names(orig_mapping)) {
# Value for key value pair in orig_mapping
old_col_value <- orig_mapping[[old_col]]
# Add columns from old mapping that are not yet present in mapping
if (!(old_col %in% names(mapping))) {
mapping[old_col] <- old_col_value
}
}
# The new mapping must be stored as a string in order to insert
new_mutate_str <- paste(utils::capture.output(dput(mapply(as.name, mapping))),
sep='\n',
collapse = "")
# Change 'list' to 'mutate' in mapping string representation
new_mutate_str <- stringr::str_replace(string = new_mutate_str,
pattern = "list",
replacement = "mutate")
# Remove newlines, to add them in in a more consistent manner later
new_mutate_str <- stringr::str_replace_all(string = new_mutate_str,
pattern = "\\\\n",
replacement = "")
# Remove string punctuation such that expressions are evaluated correctly
new_mutate_str <- stringr::str_replace_all(string = new_mutate_str,
pattern = "`",
replacement = "")
# New Rmd
rmd_str <- stringr::str_replace(string = rmd_str,
pattern = Hmisc::escapeRegex(mutate_expr_str),
replacement = new_mutate_str)
}
# Change the PK compartment
if (!is.null(pk_cmt)) {
pattern <- "PK_CMT\\s*=\\s*(\\d)"
replace_str <- glue::glue("PK_CMT = {pk_cmt}")
rmd_str <- stringr::str_replace(string = rmd_str,
pattern = pattern,
replacement = replace_str)
}
# Change the PD compartment
if (!is.null(pd_cmt)) {
pattern <- "PD_CMT\\s*=\\s*(\\d)"
replace_str <- glue::glue("PD_CMT = {pd_cmt}")
rmd_str <- stringr::str_replace(string = rmd_str,
pattern = pattern,
replacement = replace_str)
}
# Change the dose compartment
if (!is.null(dose_cmt)) {
pattern <- "DOSE_CMT\\s*=\\s*(\\d)"
replace_str <- glue::glue("DOSE_CMT = {dose_cmt}")
rmd_str <- stringr::str_replace(string = rmd_str,
pattern = pattern,
replacement = replace_str)
}
# Change the steady state day
if (!is.null(steady_state_day)) {
pattern <- "SS_PROFDAY\\s*=\\s*(\\d)"
replace_str <- glue::glue("SS_PROFDAY = {steady_state_day}")
rmd_str <- stringr::str_replace(string = rmd_str,
pattern = pattern,
replacement = replace_str)
}
# Change tau
if (!is.null(time_between_doses)) {
pattern <- "TAU\\s*=\\s*(\\d)"
replace_str <- glue::glue("TAU = {time_between_doses}")
rmd_str <- stringr::str_replace(string = rmd_str,
pattern = pattern,
replacement = replace_str)
}
# Add author name
if (!is.null(author_name)) {
replacement_str <- glue::glue('author: \"{author_name}\"')
rmd_str <- stringr::str_replace(string = rmd_str,
pattern = author_name_re,
replacement = replacement_str)
}
# Add datetime
if (add_datetime) {
date_re <- 'date: \\"(.*)\\"'
rmd_date_str <- stringr::str_match(string = rmd_str,
pattern = date_re)
datetime_str <- "date: \"`r format(Sys.time(), '%d %B, %Y')`\""
# if date already present, just replace it
if (is.null(rmd_date_str[1])) {
search_date_re <- date_re
replacement_str <- datetime_str
}
# Otherwise, add it after the author name
else {
search_date_re <- author_name_re
author_str <- stringr::str_match(string = rmd_str,
pattern = author_name_re)[1]
replacement_str <- paste0(author_str, '\n', datetime_str)
}
rmd_str <- stringr::str_replace(string = rmd_str,
pattern = search_date_re,
replacement = Hmisc::escapeRegex(replacement_str))
}
if(!(show_explanation)) {
start_comment <- "<!--START_EXPLANATION-->"
end_comment <- "<!--END_EXPLANATION-->"
pattern <- paste0(start_comment, "(\\s\\S)*", end_comment)
replace <- "" # paste0("<!--", "\\1", "-->")
rmd_str <- stringr::str_replace_all(rmd_str,
pattern = "(START_EXPLANATION-->|<!--END_EXPLANATION)",
replacement = replace)
}
# Add source files
# rmd_str <- stringr::str_replace_all(rmd_str,
# pattern = "library\\(xgxr\\)",
# replacement = Hmisc::escapeRegex("library(xgxr)
# # For testing:
# source('~/xgxr/R/xgx_conf_int.R', echo=FALSE)
# source('~/xgxr/R/xgx_stat_ci.R', echo=FALSE)
# source('~/xgxr/Rdev/xgx_ordinal_regression_plot.R', echo=FALSE)
# source('~/xgxr/Rdev/xgx_stat_smooth.R', echo=FALSE)"))
# Save the R markdown document
dir.create(dirname(rmd_output_path), showWarnings = FALSE)
fileConn <- file(rmd_output_path, 'w')
writeChar(rmd_str, fileConn)
close(fileConn)
return(rmd_output_path)
}
#' Determine the name of a Rmd template
#'
#' \code{get_rmd_name} returns a name for an Rmd template, based on the desired PKPD parameters
#'
#'
#' @param rmd_template_name A custom output name for the generated Rmd file
#' @param multiple_dosing if FALSE use single ascending dose template, if TRUE use multiple
#' @param pk_cmt An integer denoting the "compartment" containing the PK data. The "CMT" column will typically
#' have these integers, where each row may contain either PK or PD data, potentially of different types (continuous, ordinal, etc.)
#' @param pd_cmt An integer denoting the "compartment" containing the PD data, of the desired type (continuous, ordinal, etc.). The "CMT" column will typically
#' have these integers, where each row may contain either PK or PD data
#' @param pd_data_type The type of PD data - acceptable values exist in the following list: ["binary","continuous","count","ordinal","real_example","receptor_occupancy","time_to_event"]
#'
#' @return a string for the Rmd template name
#'
#'
#' @importFrom glue glue
#' @importFrom stringr str_replace
#' @importFrom readr read_file
#' @export
get_rmd_name <- function(rmd_template_name = NULL,
multiple_dosing = FALSE,
pk_cmt = NULL,
pd_cmt = NULL,
pd_data_type = NULL) {
if (!is.null(rmd_template_name)) {
# For Adverse_Events, Oncology_Efficacy_Plots
# Perhaps Multiple_Ascending_Dose_PK_KeyPlots ?
return(paste0(rmd_template_name, ".Rmd"))
}
allowable_pd_data_types <- c("binary",
"continuous",
"count",
"ordinal",
"real_example",
"receptor_occupancy",
"time_to_event")
if (!(is.null(pd_data_type))) {
if (!(pd_data_type %in% allowable_pd_data_types)) {
warning(glue::glue("The provided pd_data_type `{pd_data_type}` is not allowable.
Please choose a value from the list {allowable_pd_data_types}"))
}
}
# Construct the filename via the standard xgx rmd template filename format
pk_str <- if (!is.null(pk_cmt)) "PK" else ""
pd_str <- if (!is.null(pd_cmt)) "PD" else ""
if (multiple_dosing) {
multiple_dosing_str <- "Multiple_Ascending"
}
else{
multiple_dosing_str <- "Single_Ascending"
}
if (!is.null(pd_data_type) & !is.null(pd_cmt)){
rmd_fname <- glue::glue("{multiple_dosing_str}_Dose_{pk_str}{pd_str}_{pd_data_type}.Rmd")
}
else {
rmd_fname <- glue::glue("{multiple_dosing_str}_Dose_PK.Rmd")
}
return(rmd_fname)
}
# Extract the Rmd document from the xgx github
# if the doesn't work, pull the file from the cached xgxr github
# backup directoryinstead
#'
#' \code{get_rmd_str} returns a Rmd template string, based on the desired PKPD parameters
#'
#'
#' @param rmd_template_name A custom output name for the generated Rmd file
#' @param multiple_dosing if FALSE use single ascending dose template, if TRUE use multiple
#' @param pk_cmt An integer denoting the "compartment" containing the PK data. The "CMT" column will typically
#' have these integers, where each row may contain either PK or PD data, potentially of different types (continuous, ordinal, etc.)
#' @param pd_cmt An integer denoting the "compartment" containing the PD data, of the desired type (continuous, ordinal, etc.). The "CMT" column will typically
#' have these integers, where each row may contain either PK or PD data
#' @param pd_data_type The type of PD data - acceptable values exist in the following list: ["binary","continuous","count","ordinal","real_example","receptor_occupancy","time_to_event"]
#'
#' @return a string for the Rmd template name
#'
#'
#' @importFrom RCurl getURL
#' @importFrom readr read_file
#' @importFrom stringr str_replace
#'
#' @export
get_rmd_str <- function(rmd_template_name = NULL,
multiple_dosing = FALSE,
pk_cmt = NULL,
pd_cmt = NULL,
pd_data_type = NULL){
rmd_fname <- get_rmd_name(rmd_template_name = rmd_template_name,
multiple_dosing = multiple_dosing,
pk_cmt = pk_cmt,
pd_cmt = pd_cmt,
pd_data_type = pd_data_type)
# Try github first
git_url <- "https://raw.githubusercontent.com/Novartis/xgx/master/Rmarkdown/"
full_url <- paste0(git_url, rmd_fname)
# Read the Rmd from github into a string
rmd_str <- RCurl::getURL(full_url, ssl.verifypeer = FALSE)
# If no internet connection / unsuccessful, try the local files in xgxr
if (is.null(rmd_str)) {
rmd_str <- readr::read_file("../data/xgx_Rmd/" + rmd_fname)
}
else if (rmd_str == "404: Not Found") {
rmd_str <- readr::read_file("../data/xgx_Rmd/" + rmd_fname)
}
return(rmd_str)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_auto_explore.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.