content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#'
#'@title toUTC
#'
#'@description Returns UTC time from local time
#'
#'@param localtime POSIXct local time
#'
#'@param timeformat string format of local time, by default it will be "\%Y-\%m-\%d \%H:\%M:\%S"
#'
#'@return utctime POSIXct UTC time
#'
#'@example toUTC(Sys.time())
#'
toUTC <- function(localtime, timeformat = "%Y-%m-%d %H:%M:%S"){
utctime <- as.POSIXct( localtime, format = timeformat)
attr(utctime,"tzone") <- "UTC"
utctime
}
#'
#'@title fromUTC
#'
#'@description Returns local time from UTC time
#'
#'@param utctime POSIXct UTC time
#'
#'@param timeformat string format of local time, by default it will be "\%Y-\%m-\%d \%H:\%M:\%S"
#'
#'@return localtime POSIXct local time
#'
#'@examples fromUTC( as.POSIXct("1991-06-29 05:27:25", tz = "UTC") )
#'
fromUTC <- function(utctime, timeformat = "%Y-%m-%d %H:%M:%S"){
localtime <- as.POSIXct( utctime, format = timeformat)
attr(localtime,"tzone") <- Sys.timezone()
localtime
}
#'
#'@title hoursUTC
#'
#'@description This function returns time difference between UTC and Local
#'
#'@param to_test_time only for testing UTC Sys.time()
#'
#'@param timeformat string format of local time, by default it will be "\%Y-\%m-\%d \%H:\%M:\%S"
#'
#'@return number of hours
#'
#'@example hoursUTC()
#'
hoursUTC <- function(to_test_time = Sys.time(), timeformat = "%Y-%m-%d %H:%M:%S"){
localtime <- to_test_time
difftime(
format(localtime, timeformat)
,
format(toUTC( localtime ), timeformat)
, units ="hours"
)
}
#'
#'@title add hours
#'
#'@description This function adds h hours to a POSIXct
#'
#'@param h number of hours
#'
#'@param datetime POSIXct
#'
#'@return POSIXct and h added
#'
#'@example 2 %h+% Sys.time()
#'
#'
'%h+%' <- function(h,datetime){
h * 60 *60 + datetime
}
|
/scratch/gouwar.j/cran-all/cranData/utc/R/utc.R
|
# Copyright 2017 Patrick O. Perry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
as_character_scalar <- function(name, value, utf8 = TRUE) {
if (is.null(value)) {
return(NULL)
}
value <- as_character_vector(name, value, utf8)
if (length(value) != 1) {
stop(sprintf("'%s' must be a scalar character string", name))
}
value
}
as_character_vector <- function(name, value, utf8 = TRUE) {
if (!(is.null(value) || is.character(value) || is.factor(value) ||
all(is.na(value)))) {
stop(sprintf("'%s' must be text, a character vector, or NULL", name))
}
if (is.null(value)) {
return(NULL)
}
value <- as.character(value)
if (utf8) {
value <- as_utf8(value)
}
value
}
as_enum <- function(name, value, choices) {
if (!(is.character(value) && length(value) == 1 && !is.na(value))) {
stop(sprintf("'%s' must be a character string", name))
}
i <- pmatch(value, choices, nomatch = 0)
if (all(i == 0)) {
stop(
sprintf("'%s' must be one of the following: ", name),
paste(dQuote(choices), collapse = ", ")
)
}
i <- i[i > 0]
choices[[i]]
}
as_integer_scalar <- function(name, value, nonnegative = FALSE) {
if (is.null(value)) {
return(NULL)
}
value <- as_integer_vector(name, value, nonnegative)
if (length(value) != 1) {
stop(sprintf("'%s' must have length 1", name))
}
value
}
as_integer_vector <- function(name, value, nonnegative = FALSE) {
if (is.null(value)) {
return(NULL)
}
if (!(is.numeric(value) || all(is.na(value)))) {
stop(sprintf("'%s' must be integer-valued", name))
}
value <- as.integer(value)
if (nonnegative && any(!is.na(value) & value < 0)) {
stop(sprintf("'%s' must be non-negative", name))
}
value
}
as_na_print <- function(name, value) {
if (is.null(value)) {
return(NULL)
}
value <- as_character_scalar(name, value)
if (is.na(value)) {
stop(sprintf("'%s' cannot be NA", name))
}
value
}
as_nonnegative <- function(name, value) {
if (is.null(value)) {
return(NULL)
}
value <- as_integer_scalar(name, value, nonnegative = TRUE)
if (is.na(value)) {
stop(sprintf("'%s' cannot be NA", name))
}
value
}
as_option <- function(name, value) {
if (is.null(value)) {
return(FALSE)
}
if (!(length(value) == 1 && is.logical(value) && !is.na(value))) {
stop(sprintf("'%s' must be TRUE or FALSE", name))
}
as.logical(value)
}
as_chars <- as_nonnegative
as_justify <- function(name, value) {
as_enum(name, value, c("left", "right", "centre", "none"))
}
as_max_print <- as_nonnegative
as_print_gap <- function(name, value) {
value <- as_nonnegative(name, value)
if (!is.null(value) && value > 1024) {
stop(sprintf("'%s' must be less than or equal to 1024", name))
}
value
}
as_style <- function(name, value) {
value <- as_character_scalar(name, value)
if (is.null(value) || is.na(value)) {
return(NULL)
}
if (!grepl("^[0-9;]*$", value)) {
stop(sprintf("'%s' must be a valid ANSI SGR parameter string", name))
}
if (nchar(value) >= 128) {
stop(sprintf("'%s' must have length below 128 characters", name))
}
value
}
as_output_utf8 <- function(name, value) {
if (is.null(value)) {
return(output_utf8())
}
as_option(name, value)
}
|
/scratch/gouwar.j/cran-all/cranData/utf8/R/coerce.R
|
# Copyright 2017 Patrick O. Perry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
output_ansi <- function() {
# tty?
if (isatty(stdout())) {
# and not Windows GUI?
return(.Platform$GUI != "Rgui")
}
# RStudio 1.1 or later with color enabled?
if (!is.na(as.numeric(Sys.getenv("RSTUDIO_CONSOLE_COLOR")))) {
# and output is stdout?
return(stdout() == 1)
}
FALSE
}
output_utf8 <- function() {
# ASCII-only character locale?
if (Sys.getlocale("LC_CTYPE") == "C") {
return(FALSE)
}
# UTF-8 locale?
if (l10n_info()$`UTF-8`) {
return(TRUE)
}
# Windows?
if (.Platform$OS.type == "windows") {
# This isn't really the case, but there's no way to set the
# locale to UTF-8 on Windows. In RGui and RStudio, UTF-8 is
# always supported on stdout(); output through connections
# gets translated through the native locale.
return(TRUE)
}
FALSE
}
|
/scratch/gouwar.j/cran-all/cranData/utf8/R/output.R
|
# Copyright 2017 Patrick O. Perry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# converts a character vector from its declared encoding to UTF-8
as_utf8 <- function(x, normalize = FALSE) {
ans <- .Call(rutf8_as_utf8, x)
if (normalize) {
ans <- utf8_normalize(ans)
}
ans
}
# test whether the elements can be converted to valid UTF-8
utf8_valid <- function(x) {
.Call(rutf8_utf8_valid, x)
}
# gets the width; NA for invalid or nonprintable sequences
utf8_width <- function(x, encode = TRUE, quote = FALSE, utf8 = NULL) {
with_rethrow({
encode <- as_option("encode", encode)
quote <- as_option("quote", quote)
utf8 <- as_output_utf8("utf8", utf8)
})
.Call(rutf8_utf8_width, x, encode, quote, utf8)
}
utf8_normalize <- function(x, map_case = FALSE, map_compat = FALSE,
map_quote = FALSE, remove_ignorable = FALSE) {
with_rethrow({
x <- as_utf8(x, normalize = FALSE)
map_case <- as_option("map_case", map_case)
map_compat <- as_option("map_compat", map_compat)
map_quote <- as_option("map_quote", map_quote)
remove_ignorable <- as_option("remove_ignorable", remove_ignorable)
})
.Call(
rutf8_utf8_normalize, x, map_case, map_compat, map_quote,
remove_ignorable
)
}
|
/scratch/gouwar.j/cran-all/cranData/utf8/R/utf8.R
|
# Copyright 2017 Patrick O. Perry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
utf8_encode <- function(x, width = 0L, quote = FALSE, justify = "left",
escapes = NULL, display = FALSE, utf8 = NULL) {
if (is.null(x)) {
return(NULL)
}
if (!is.character(x)) {
stop("argument is not a character object")
}
with_rethrow({
width <- as_integer_scalar("width", width)
quote <- as_option("quote", quote)
justify <- as_justify("justify", justify)
escapes <- as_style("escapes", escapes)
display <- as_option("display", display)
utf8 <- as_output_utf8("utf8", utf8)
})
.Call(rutf8_utf8_encode, x, width, quote, justify, escapes, display, utf8)
}
|
/scratch/gouwar.j/cran-all/cranData/utf8/R/utf8_encode.R
|
# Copyright 2017 Patrick O. Perry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
utf8_format <- function(x, trim = FALSE, chars = NULL, justify = "left",
width = NULL, na.encode = TRUE, quote = FALSE,
na.print = NULL, print.gap = NULL, utf8 = NULL, ...) {
if (is.null(x)) {
return(NULL)
}
if (!is.character(x)) {
stop("argument is not a character object")
}
with_rethrow({
trim <- as_option("trim", trim)
chars <- as_chars("chars", chars)
justify <- as_justify("justify", justify)
width <- as_integer_scalar("width", width)
na.encode <- as_option("na.encode", na.encode)
quote <- as_option("quote", quote)
na.print <- as_na_print("na.print", na.print)
print.gap <- as_print_gap("print_gap", print.gap)
utf8 <- as_output_utf8("utf8", utf8)
})
ellipsis <- "\u2026"
iellipsis <- iconv(ellipsis, "UTF-8", "")
if (is.na(iellipsis) || identical(iellipsis, "...")) {
ellipsis <- "..."
wellipsis <- 3L
} else {
wellipsis <- 1L
}
if (is.null(chars) && length(x) > 0) {
linewidth <- getOption("width")
quotes <- if (quote) 2 else 0
gap <- if (is.null(print.gap)) 1 else NULL
dim <- dim(x)
dimnames <- dimnames(x)
names <- if (is.null(dimnames)) names(x) else dimnames[[1]]
if (is.null(names)) {
comma <- length(dim) > 1
namewidth <- floor(log10(length(x)) + 1) + 2 + comma
} else if (length(dim) > 1) {
namewidth <- max(0, utf8_width(names, utf8 = utf8))
} else {
namewidth <- 0
}
chars <- (linewidth - wellipsis - quotes - gap - namewidth)
chars <- max(chars, 12)
}
.Call(
rutf8_utf8_format, x, trim, chars, justify, width, na.encode,
quote, na.print, ellipsis, wellipsis, utf8
)
}
|
/scratch/gouwar.j/cran-all/cranData/utf8/R/utf8_format.R
|
# Copyright 2017 Patrick O. Perry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
utf8_print <- function(x, chars = NULL, quote = TRUE, na.print = NULL,
print.gap = NULL, right = FALSE, max = NULL,
names = NULL, rownames = NULL, escapes = NULL,
display = TRUE, style = TRUE, utf8 = NULL, ...) {
if (is.null(x)) {
return(invisible(NULL))
}
if (!is.character(x)) {
stop("argument is not a character object")
}
with_rethrow({
chars <- as_chars("chars", chars)
quote <- as_option("quote", quote)
na.print <- as_na_print("na.print", na.print)
print.gap <- as_print_gap("print_gap", print.gap)
right <- as_option("right", right)
max <- as_max_print("max", max)
names <- as_style("names", names)
rownames <- as_style("rownames", rownames)
escapes <- as_style("escapes", escapes)
display <- as_option("display", display)
style <- as_option("style", style)
utf8 <- as_output_utf8("utf8", utf8)
})
if (is.null(print.gap)) {
print.gap <- 1L
}
if (is.null(max)) {
max <- getOption("max.print")
}
if (!output_ansi()) {
style <- FALSE
}
if (!output_utf8()) {
utf8 <- FALSE
}
# truncate character objects
justify <- if (right) "right" else "left"
fmt <- utf8_format(x,
trim = TRUE, chars = chars,
justify = justify, na.encode = FALSE,
quote = quote, utf8 = utf8
)
dim <- dim(x)
if (is.null(dim) || length(dim) == 1) {
nprint <- print_vector(fmt,
quote = quote, na.print = na.print,
print.gap = print.gap, right = right,
max = max, names = names, rownames = rownames,
escapes = escapes, display = display,
style = style, utf8 = utf8
)
} else if (length(dim) == 2) {
nprint <- print_matrix(fmt,
quote = quote, na.print = na.print,
print.gap = print.gap, right = right,
max = max, names = names, rownames = rownames,
escapes = escapes, display = display,
style = style, utf8 = utf8
)
} else {
nprint <- print_array(fmt,
quote = quote, na.print = na.print,
print.gap = print.gap, right = right,
max = max, names = names, rownames = rownames,
escapes = escapes, display = display,
style = style, utf8 = utf8
)
}
n <- length(x)
if (nprint < n) {
cat(sprintf(
" [ reached getOption(\"max.print\") -- omitted %d entries ]\n",
n - nprint
))
}
invisible(x)
}
print_vector <- function(x, quote, na.print, print.gap, right, max,
names, rownames, escapes, display, style, utf8) {
if (length(x) == 0) {
cat("character(0)\n")
return(0L)
}
# drop dim, convert dimnames to names
if (!is.null(dim(x))) {
x <- c(x)
}
if (!is.null(names(x))) {
nprint <- print_vector_named(x,
quote = quote, na.print = na.print,
print.gap = print.gap, right = right,
max = max, names = names,
rownames = rownames, escapes = escapes,
display = display, style = style,
utf8 = utf8
)
} else {
nprint <- print_vector_unnamed(x,
quote = quote, na.print = na.print,
print.gap = print.gap, right = right,
max = max, names = names,
rownames = rownames, escapes = escapes,
display = display, style = style,
utf8 = utf8
)
}
nprint
}
print_vector_named <- function(x, quote, na.print, print.gap, right, max,
names, rownames, escapes, display, style, utf8) {
n <- length(x)
nm <- names(x)
namewidth <- max(0L, utf8_width(nm, utf8 = utf8))
eltwidth <- element_width(x,
quote = quote, na.print = na.print,
utf8 = utf8
)
width <- max(eltwidth, namewidth)
linewidth <- getOption("width")
ncol <- max(1L, linewidth %/% (width + print.gap))
extra <- n %% ncol
nprint <- 0L
off <- 0L
while (off + ncol <= n && nprint < max) {
ix <- (off + 1L):(off + ncol)
mat <- matrix(x[ix],
ncol = ncol, byrow = TRUE,
dimnames = list(NULL, nm[ix])
)
np <- print_table(mat,
width = width, quote = quote,
na.print = na.print, print.gap = print.gap,
right = right, max = max - nprint,
names = names, rownames = rownames,
escapes = escapes, display = display,
style = style, utf8 = utf8
)
nprint <- nprint + np
off <- off + ncol
}
if (extra > 0L && nprint < max) {
ix <- n - extra + seq_len(extra)
last <- rbind(as.vector(x[ix]))
rownames(last) <- NULL
colnames(last) <- nm[ix]
np <- print_table(last,
width = width, quote = quote,
na.print = na.print, print.gap = print.gap,
right = right, max = max - nprint,
names = names, rownames = rownames,
escapes = escapes, display = display,
style = style, utf8 = utf8
)
nprint <- nprint + np
}
nprint
}
print_vector_unnamed <- function(x, quote, na.print, print.gap, right,
max, names, rownames, escapes = escapes,
display, style, utf8) {
n <- length(x)
nm <- utf8_format(paste0("[", seq_len(n), "]"),
justify = "right",
utf8 = utf8
)
namewidth <- max(0L, utf8_width(nm, utf8 = utf8))
width <- element_width(x, quote = quote, na.print = na.print, utf8 = utf8)
linewidth <- getOption("width")
ncol <- max(1L, (linewidth - namewidth) %/% (width + print.gap))
extra <- n %% ncol
mat <- matrix(x[seq_len(n - extra)], ncol = ncol, byrow = TRUE)
rownames(mat) <- nm[seq(from = 1L, by = ncol, length.out = nrow(mat))]
nprint <- print_table(mat,
width = width, quote = quote,
na.print = na.print, print.gap = print.gap,
right = right, max = max, names = names,
rownames = rownames, escapes = escapes,
display = display, style = style, utf8 = utf8
)
if (extra > 0L && nprint < max) {
last <- rbind(as.vector(x[n - extra + seq_len(extra)]))
rownames(last) <- nm[n - extra + 1]
np <- print_table(last,
width = width, quote = quote,
na.print = na.print, print.gap = print.gap,
right = right, max = max - nprint, names = names,
rownames = rownames, escapes = escapes,
display = display, style = style, utf8 = utf8
)
nprint <- nprint + np
}
nprint
}
element_width <- function(x, quote, na.print, utf8) {
width <- max(0L, utf8_width(x, encode = TRUE, quote = quote, utf8 = utf8),
na.rm = TRUE
)
if (anyNA(x)) {
if (is.null(na.print)) {
na.print <- if (quote) "NA" else "<NA>"
}
width <- max(width, utf8_width(na.print, utf8 = utf8))
}
width
}
print_matrix <- function(x, quote, na.print, print.gap, right, max,
names, rownames, escapes, display, style, utf8) {
if (all(dim(x) == 0)) {
cat("<0 x 0 matrix>\n")
return(0L)
}
x <- set_dimnames(x)
print_table(x,
width = 0L, quote = quote, na.print = na.print,
print.gap = print.gap, right = right, max = max,
names = names, rownames = rownames, escapes = escapes,
display = display, style = style, utf8 = utf8
)
}
print_array <- function(x, quote, na.print, print.gap, right, max,
names, rownames, escapes, display, style, utf8) {
n <- length(x)
dim <- dim(x)
if (any(dim == 0)) {
cat(sprintf("<%s array>\n", paste(dim, collapse = " x ")))
return(0L)
}
x <- set_dimnames(x)
dimnames <- dimnames(x)
nrow <- dim[1]
ncol <- dim[2]
base <- c(NA, NA, rep(1, length(dim) - 2))
label <- vector("character", length(dim))
for (r in 3:length(dim)) {
label[[r]] <- dimnames[[r]][[1]]
}
nprint <- 0L
off <- 0L
while (off + nrow * ncol <= n && nprint < max) {
cat(paste(label, collapse = ", "), "\n\n", sep = "")
ix <- off + seq_len(nrow * ncol)
mat <- matrix(x[ix], nrow, ncol, dimnames = dimnames[1:2])
np <- print_table(mat,
width = 0L, quote = quote, na.print = na.print,
print.gap = print.gap, right = right,
max = max - nprint, names = names,
rownames = rownames, escapes = escapes,
display = display, style = style, utf8 = utf8
)
nprint <- nprint + np
off <- off + (nrow * ncol)
r <- 3L
while (r < length(dim) && base[r] == dim[r]) {
base[r] <- 1L
label[r] <- dimnames[[r]][[1L]]
r <- r + 1L
}
if (base[r] < dim[r]) {
base[r] <- base[r] + 1L
label[r] <- dimnames[[r]][[base[r]]]
}
cat("\n")
}
nprint
}
print_table <- function(x, width, quote, na.print, print.gap, right, max,
names, rownames, escapes, display, style, utf8) {
width <- as.integer(width)
if (is.null(na.print)) {
na.print <- if (quote) "NA" else "<NA>"
na.name.print <- "<NA>"
} else {
na.name.print <- na.print
}
print.gap <- as.integer(print.gap)
max <- as.integer(max)
if (!is.null(rownames(x))) {
rownames(x)[is.na(rownames(x))] <- na.name.print
}
if (!is.null(colnames(x))) {
colnames(x)[is.na(colnames(x))] <- na.name.print
}
linewidth <- getOption("width")
str <- .Call(
rutf8_render_table, x, width, quote, na.print, print.gap,
right, max, names, rownames, escapes, display, style,
utf8, linewidth
)
cat(str)
nprint <- min(max, length(x))
nprint
}
set_dimnames <- function(x) {
dim <- dim(x)
dimnames <- dimnames(x)
if (is.null(dimnames)) {
dimnames <- vector("list", length(dim))
}
for (i in seq_along(dim)) {
d <- dim[[i]]
ix <- seq_len(d)
if (is.null(dimnames[[i]]) && d > 0) {
if (i == 1) {
dimnames[[i]] <- format(paste0("[", ix, ",]"),
justify = "right"
)
} else if (i == 2) {
dimnames[[i]] <- paste0("[,", ix, "]")
} else {
dimnames[[i]] <- as.character(ix)
}
}
}
dimnames(x) <- dimnames
x
}
|
/scratch/gouwar.j/cran-all/cranData/utf8/R/utf8_print.R
|
# Copyright 2017 Patrick O. Perry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
with_rethrow <- function(expr) {
parentcall <- sys.call(-1)
eval(
envir = parent.frame(),
withCallingHandlers(expr,
error = function(e, call = parentcall) {
e$call <- call
stop(e)
},
warning = function(w, call = parentcall) {
w$call <- call
warning(w)
invokeRestart("muffleWarning")
},
message = function(m, call = parentcall) {
m$call <- call
}
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/utf8/R/util.R
|
---
title: "Unicode: Emoji, accents, and international text"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Unicode: Emoji, accents, and international text}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
Character encoding
------------------
Before we can analyze a text in R, we first need to get its digital
representation, a sequence of ones and zeros. In practice this works by first
choosing an *encoding* for the text that assigns each character a numerical
value, and then translating the sequence of characters in the text to the
corresponding sequence of numbers specified by the encoding. Today, most new
text is encoded according to the [Unicode standard][unicode], specifically the
8-bit block Unicode Transfer Format, [UTF-8][utf8]. Joel Spolsky gives a
good overview of the situation in an [essay from 2003][spolsky2003].
The software community has mostly moved to UTF-8 as a standard for text
storage and interchange, but there is still a large volume of text in other
encodings. Whenever you read a text file into R, you need to specify the
encoding. If you don't, R will try to guess the encoding, and if it guesses
incorrectly, it will wrongly interpret the sequence of ones and zeros.
We will demonstrate the difficulties of encodings with the text of
Jane Austen's novel, _Mansfield Park_ provided by
[Project Gutenberg][gutenberg]. We will download the text, then
read in the lines of the novel.
```r
# download the zipped text from a Project Gutenberg mirror
url <- "http://mirror.csclub.uwaterloo.ca/gutenberg/1/4/141/141.zip"
tmp <- tempfile()
download.file(url, tmp)
# read the text from the zip file
con <- unz(tmp, "141.txt", encoding = "UTF-8")
lines <- readLines(con)
close(con)
```
The `unz` function and other similar file connection functions have `encoding`
arguments which, if left unspecified default to assuming that text is encoded
in your operating system's native encoding. To ensure consistent behavior
across all platforms (Mac, Windows, and Linux), you should set this option
explicitly. Here, we set `encoding = "UTF-8"`. This is a reasonable default,
but it is not always appropriate. In general, you should determine the
appropriate `encoding` value by looking at the file. Unfortunately, the file
extension `".txt"` is not informative, and could correspond to any encoding.
However, if we read the first few lines of the file, we see the following:
```r
lines[11:20]
```
```
[1] "Author: Jane Austen"
[2] ""
[3] "Release Date: June, 1994 [Etext #141]"
[4] "Posting Date: February 11, 2015"
[5] ""
[6] "Language: English"
[7] ""
[8] "Character set encoding: ASCII"
[9] ""
[10] "*** START OF THIS PROJECT GUTENBERG EBOOK MANSFIELD PARK ***"
```
The character set encoding is reported as ASCII, which is a subset of UTF-8.
So, we should be in good shape.
Unfortunately, we run into trouble as soon as we try to process the text:
```r
corpus::term_stats(lines) # produces an error
```
```
Error in corpus::term_stats(lines): argument entry 15252 is incorrectly marked as "UTF-8": invalid leading byte (0xA3) at position 36
```
The error message tells us that line 15252 contains an invalid byte.
```r
lines[15252]
```
```
[1] "the command of her beauty, and her \xa320,000, any one who could satisfy the"
```
We might wonder if there are other lines with invalid data. We can find
all such lines using the `utf8_valid` function:
```r
lines[!utf8_valid(lines)]
```
```
[1] "the command of her beauty, and her \xa320,000, any one who could satisfy the"
```
So, there are no other invalid lines.
The offending byte in line 15252 is displayed as `\xa3`, an escape code
for hexadecimal value 0xa3, decimal value 163. To understand why this
is invalid, we need to learn more about UTF-8 encoding.
UTF-8
-----
### ASCII
The smallest unit of data transfer on modern computers is the byte, a sequence
of eight ones and zeros that can encode a number between 0 and 255
(hexadecimal 0x00 and 0xff). In the earliest character encodings, the numbers
from 0 to 127 (hexadecimal 0x00 to 0x7f) were standardized in an encoding
known as ASCII, the American Standard Code for Information Interchange.
Here are the characters corresponding to these codes:
```r
codes <- matrix(0:127, 8, 16, byrow = TRUE,
dimnames = list(0:7, c(0:9, letters[1:6])))
ascii <- apply(codes, c(1, 2), intToUtf8)
# replace control codes with ""
ascii["0", c(0:6, "e", "f")] <- ""
ascii["1",] <- ""
ascii["7", "f"] <- ""
utf8_print(ascii, quote = FALSE)
```
```
0 1 2 3 4 5 6 7 8 9 a b c d e f
0 \a \b \t \n \v \f \r
1
2 ! " # $ % & ' ( ) * + , - . /
3 0 1 2 3 4 5 6 7 8 9 : ; < = > ?
4 @ A B C D E F G H I J K L M N O
5 P Q R S T U V W X Y Z [ \\ ] ^ _
6 ` a b c d e f g h i j k l m n o
7 p q r s t u v w x y z { | } ~
```
The first 32 codes (the first two rows of the table) are special control
codes, the most common of which, `0x0a` denotes a new line (`\n`). The special
code `0x00` often denotes the end of the input, and R does not allow this
value in character strings. Code `0x7f` corresponds to a "delete" control.
When you call `utf8_print`, it uses the low level `utf8_encode` subroutine
format control codes; they format as `\uXXXX` for four hexadecimal digits
`XXXX` or as `\UXXXXYYYY` for eight hexadecimal digits `XXXXYYYY`:
```r
utf8_print(intToUtf8(1:0x0f), quote = FALSE)
```
```
[1] \u0001\u0002\u0003\u0004\u0005\u0006\a\b\t\n\v\f\r\u000e\u000f
```
Compare `utf8_print` output with the output with the base R print function:
```r
print(intToUtf8(1:0x0f), quote = FALSE)
```
```
[1] \001\002\003\004\005\006\a\b\t\n\v\f\r\016\017
```
Base R format control codes below 128 using octal escapes. There are some
other differences between the function which we will highlight below.
### Latin-1
ASCII works fine for most text in English, but not for other languages. The
Latin-1 encoding extends ASCII to Latin languages by assigning the numbers
128 to 255 (hexadecimal 0x80 to 0xff) to other common characters in Latin
languages. We can see these characters below.
```r
codes <- matrix(128:255, 8, 16, byrow = TRUE,
dimnames = list(c(8:9, letters[1:6]), c(0:9, letters[1:6])))
latin1 <- apply(codes, c(1, 2), intToUtf8)
# replace control codes with ""
latin1[c("8", "9"),] <- ""
utf8_print(latin1, quote = FALSE)
```
```
0 1 2 3 4 5 6 7 8 9 a b c d e f
8
9
a ย ยก ยข ยฃ ยค ยฅ ยฆ ยง ยจ ยฉ ยช ยซ ยฌ ยฎ ยฏ
b ยฐ ยฑ ยฒ ยณ ยด ยต ยถ ยท ยธ ยน ยบ ยป ยผ ยฝ ยพ ยฟ
c ร ร ร ร ร ร
ร ร ร ร ร ร ร ร ร ร
d ร ร ร ร ร ร ร ร ร ร ร ร ร ร ร ร
e ร รก รข รฃ รค รฅ รฆ รง รจ รฉ รช รซ รฌ รญ รฎ รฏ
f รฐ รฑ รฒ รณ รด รต รถ รท รธ รน รบ รป รผ รฝ รพ รฟ
```
As with ASCII, the first 32 numbers are control codes. The others are
characters common in Latin languages. Note that `0xa3`, the invalid byte
from _Mansfield Park_, corresponds to a pound sign in the Latin-1 encoding.
Given the context of the byte:
```r
lines[15252]
```
```
[1] "the command of her beauty, and her \xa320,000, any one who could satisfy the"
```
this is probably the right symbol. The text is probably encoded in Latin-1,
not UTF-8 or ASCII as claimed in the file.
If you run into an error while reading text that claims to be ASCII, it
is probably encoded as Latin-1. Note, however, that this is not the only
possibility, and there are many other encodings. The `iconvlist` function
will list the ones that R knows how to process:
```r
head(iconvlist(), n = 20)
```
```
[1] "437" "850" "852" "855"
[5] "857" "860" "861" "862"
[9] "863" "865" "866" "869"
[13] "ANSI_X3.4-1968" "ANSI_X3.4-1986" "ARABIC" "ARMSCII-8"
[17] "ASCII" "ASMO-708" "ATARI" "ATARIST"
```
### UTF-8
With only 256 unique values, a single byte is not enough to encode every
character. Multi-byte encodings allow for encoding more. UTF-8 encodes
characters using between 1 and 4 bytes each and allows for up to 1,112,064
character codes. Most of these codes are currently unassigned, but every year
the Unicode consortium meets and adds new characters. You can find a list of
all of the characters in the [Unicode Character Database][unicode-data]. A
listing of the Emoji characters is [available separately][emoji-data].
Say you want to input the Unicode character with hexadecimal code 0x2603.
You can do so in one of three ways:
```r
"\u2603" # with \u + 4 hex digits
```
```
[1] "โ"
```
```r
"\U00002603" # with \U + 8 hex digits
```
```
[1] "โ"
```
```r
intToUtf8(0x2603) # from an integer
```
```
[1] "โ"
```
For characters above `0xffff`, the first method won't work. On Windows,
a bug in the current version of R (fixed in R-devel) prevents using the
second method.
When you try to print Unicode in R, the system will first try to determine
whether the code is printable or not. Non-printable codes include control
codes and unassigned codes. On Mac OS, R uses an outdated function to make
this determination, so it is unable to print most emoji. The `utf8_print`
function uses the most recent version (10.0.0) of the Unicode standard,
and will print all Unicode characters supported by your system:
```r
print(intToUtf8(0x1f600 + 0:79)) # base R
```
```
[1] "\U0001f600\U0001f601\U0001f602\U0001f603\U0001f604\U0001f605\U0001f606\U0001f607\U0001f608\U0001f609\U0001f60a\U0001f60b\U0001f60c\U0001f60d\U0001f60e\U0001f60f\U0001f610\U0001f611\U0001f612\U0001f613\U0001f614\U0001f615\U0001f616\U0001f617\U0001f618\U0001f619\U0001f61a\U0001f61b\U0001f61c\U0001f61d\U0001f61e\U0001f61f\U0001f620\U0001f621\U0001f622\U0001f623\U0001f624\U0001f625\U0001f626\U0001f627\U0001f628\U0001f629\U0001f62a\U0001f62b\U0001f62c\U0001f62d\U0001f62e\U0001f62f\U0001f630\U0001f631\U0001f632\U0001f633\U0001f634\U0001f635\U0001f636\U0001f637\U0001f638\U0001f639\U0001f63a\U0001f63b\U0001f63c\U0001f63d\U0001f63e\U0001f63f\U0001f640\U0001f641\U0001f642\U0001f643\U0001f644\U0001f645\U0001f646\U0001f647\U0001f648\U0001f649\U0001f64a\U0001f64b\U0001f64c\U0001f64d\U0001f64e\U0001f64f"
```
```r
utf8_print(intToUtf8(0x1f600 + 0:79)) # truncates to line width
```
```
[1] "๐โ๐โ๐โ๐โ๐โ๐
โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐ โ๐กโ๐ขโ๐ฃโโฆ"
```
```r
utf8_print(intToUtf8(0x1f600 + 0:79), chars = 500) # increase character limit
```
```
[1] "๐โ๐โ๐โ๐โ๐โ๐
โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐ โ๐กโ๐ขโ๐ฃโ๐คโ๐ฅโ๐ฆโ๐งโ๐จโ๐ฉโ๐ชโ๐ซโ๐ฌโ๐ญโ๐ฎโ๐ฏโ๐ฐโ๐ฑโ๐ฒโ๐ณโ๐ดโ๐ตโ๐ถโ๐ทโ๐ธโ๐นโ๐บโ๐ปโ๐ผโ๐ฝโ๐พโ๐ฟโ๐โ๐โ๐โ๐โ๐โ๐
โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ"
```
(Characters with codes above 0xffff, including most emoji, are not
supported on Windows.)
The *utf8* package provides the following utilities for validating, formatting,
and printing UTF-8 characters:
+ `as_utf8()` attempts to convert character data to UTF-8, throwing an
error if the data is invalid;
+ `utf8_valid()` tests whether character data is valid according to its
declared encoding;
+ `utf8_normalize()` converts text to Unicode composed normal form (NFC),
optionally applying case-folding and compatibility maps;
+ `utf8_encode()` encodes a character string, escaping all control
characters, so that it can be safely printed to the screen;
+ `utf8_format()` formats a character vector by truncating to a specified
character width limit or by left, right, or center justifying;
+ `utf8_print()` prints UTF-8 character data to the screen;
+ `utf8_width()` measures the display with of UTF-8 character strings
(many emoji and East Asian characters are twice as wide as other
characters).
The package does not provide a method to translate from another encoding to
UTF-8 as the `iconv()` function from base R already serves this purpose.
Translating to UTF-8
--------------------
Back to our original problem: getting the text of _Mansfield Park_ into R.
Our first attempt failed:
```r
corpus::term_stats(lines)
```
```
Error in corpus::term_stats(lines): argument entry 15252 is incorrectly marked as "UTF-8": invalid leading byte (0xA3) at position 36
```
We discovered a problem on line 15252:
```r
lines[15252]
```
```
[1] "the command of her beauty, and her \xa320,000, any one who could satisfy the"
```
The text is likely encoded in Latin-1, not UTF-8 (or ASCII) as we had
originally thought. We can test this by attempting to convert from
Latin-1 to UTF-8 with the `iconv()` function and inspecting the output:
```r
lines2 <- iconv(lines, "latin1", "UTF-8")
lines2[15252]
```
```
[1] "the command of her beauty, and her ยฃ20,000, any one who could satisfy the"
```
It worked! Now we can analyze our text.
```r
f <- corpus::text_filter(drop_punct = TRUE, drop = corpus::stopwords_en)
corpus::term_stats(lines2, f)
```
```
term count support
1 fanny 816 806
2 must 508 492
3 crawford 493 488
4 mr 482 466
5 much 459 450
6 miss 432 419
7 said 406 400
8 mrs 408 399
9 sir 372 366
10 edmund 364 364
11 one 370 358
12 think 349 346
13 now 333 331
14 might 324 320
15 time 310 307
16 little 309 300
17 nothing 301 291
18 well 299 286
19 thomas 288 285
20 good 280 275
โฎ (8450 rows total)
```
The *readtext* package
----------------------
If you need more than reading in a single text file, the [readtext][readtext]
package supports reading in text in a variety of file formats and encodings.
Beyond just plain text, that package can read in PDFs, Word documents, RTF,
and many other formats. (Unfortunately, that package currently fails when
trying to read in _Mansfield Park_; the authors are aware of the issue and are
working on a fix.)
Summary
-------
Text comes in a variety of encodings, and you cannot analyze a text without
first knowing its encoding. Many functions for reading in text assume that it
is encoded in UTF-8, but this assumption sometimes fails to hold. If you get
an error message reporting that your UTF-8 text is invalid, use `utf8_valid`
to find the offending texts. Try printing the data to the console before and
after using `iconv` to convert between character encodings. You can use
`utf8_print` to print UTF-8 characters that R refuses to display, including
emoji characters. For reading in exotic file formats like PDF or Word, try
the [readtext][readtext] package.
[emoji-data]: http://www.unicode.org/Public/emoji/5.0/emoji-data.txt
[gutenberg]: http://www.gutenberg.org
[readr]: https://github.com/tidyverse/readr#readme
[readtext]: https://github.com/quanteda/readtext
[spolsky2003]: https://www.joelonsoftware.com/2003/10/08/the-absolute-minimum-every-software-developer-absolutely-positively-must-know-about-unicode-and-character-sets-no-excuses/
[unicode]: http://unicode.org/charts/
[unicode-data]: http://www.unicode.org/Public/10.0.0/ucd/UnicodeData.txt
[utf8]: https://en.wikipedia.org/wiki/UTF-8
[windows1252]: https://en.wikipedia.org/wiki/Windows-1252
|
/scratch/gouwar.j/cran-all/cranData/utf8/inst/doc/utf8.Rmd
|
---
title: "Unicode: Emoji, accents, and international text"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Unicode: Emoji, accents, and international text}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
Character encoding
------------------
Before we can analyze a text in R, we first need to get its digital
representation, a sequence of ones and zeros. In practice this works by first
choosing an *encoding* for the text that assigns each character a numerical
value, and then translating the sequence of characters in the text to the
corresponding sequence of numbers specified by the encoding. Today, most new
text is encoded according to the [Unicode standard][unicode], specifically the
8-bit block Unicode Transfer Format, [UTF-8][utf8]. Joel Spolsky gives a
good overview of the situation in an [essay from 2003][spolsky2003].
The software community has mostly moved to UTF-8 as a standard for text
storage and interchange, but there is still a large volume of text in other
encodings. Whenever you read a text file into R, you need to specify the
encoding. If you don't, R will try to guess the encoding, and if it guesses
incorrectly, it will wrongly interpret the sequence of ones and zeros.
We will demonstrate the difficulties of encodings with the text of
Jane Austen's novel, _Mansfield Park_ provided by
[Project Gutenberg][gutenberg]. We will download the text, then
read in the lines of the novel.
```r
# download the zipped text from a Project Gutenberg mirror
url <- "http://mirror.csclub.uwaterloo.ca/gutenberg/1/4/141/141.zip"
tmp <- tempfile()
download.file(url, tmp)
# read the text from the zip file
con <- unz(tmp, "141.txt", encoding = "UTF-8")
lines <- readLines(con)
close(con)
```
The `unz` function and other similar file connection functions have `encoding`
arguments which, if left unspecified default to assuming that text is encoded
in your operating system's native encoding. To ensure consistent behavior
across all platforms (Mac, Windows, and Linux), you should set this option
explicitly. Here, we set `encoding = "UTF-8"`. This is a reasonable default,
but it is not always appropriate. In general, you should determine the
appropriate `encoding` value by looking at the file. Unfortunately, the file
extension `".txt"` is not informative, and could correspond to any encoding.
However, if we read the first few lines of the file, we see the following:
```r
lines[11:20]
```
```
[1] "Author: Jane Austen"
[2] ""
[3] "Release Date: June, 1994 [Etext #141]"
[4] "Posting Date: February 11, 2015"
[5] ""
[6] "Language: English"
[7] ""
[8] "Character set encoding: ASCII"
[9] ""
[10] "*** START OF THIS PROJECT GUTENBERG EBOOK MANSFIELD PARK ***"
```
The character set encoding is reported as ASCII, which is a subset of UTF-8.
So, we should be in good shape.
Unfortunately, we run into trouble as soon as we try to process the text:
```r
corpus::term_stats(lines) # produces an error
```
```
Error in corpus::term_stats(lines): argument entry 15252 is incorrectly marked as "UTF-8": invalid leading byte (0xA3) at position 36
```
The error message tells us that line 15252 contains an invalid byte.
```r
lines[15252]
```
```
[1] "the command of her beauty, and her \xa320,000, any one who could satisfy the"
```
We might wonder if there are other lines with invalid data. We can find
all such lines using the `utf8_valid` function:
```r
lines[!utf8_valid(lines)]
```
```
[1] "the command of her beauty, and her \xa320,000, any one who could satisfy the"
```
So, there are no other invalid lines.
The offending byte in line 15252 is displayed as `\xa3`, an escape code
for hexadecimal value 0xa3, decimal value 163. To understand why this
is invalid, we need to learn more about UTF-8 encoding.
UTF-8
-----
### ASCII
The smallest unit of data transfer on modern computers is the byte, a sequence
of eight ones and zeros that can encode a number between 0 and 255
(hexadecimal 0x00 and 0xff). In the earliest character encodings, the numbers
from 0 to 127 (hexadecimal 0x00 to 0x7f) were standardized in an encoding
known as ASCII, the American Standard Code for Information Interchange.
Here are the characters corresponding to these codes:
```r
codes <- matrix(0:127, 8, 16, byrow = TRUE,
dimnames = list(0:7, c(0:9, letters[1:6])))
ascii <- apply(codes, c(1, 2), intToUtf8)
# replace control codes with ""
ascii["0", c(0:6, "e", "f")] <- ""
ascii["1",] <- ""
ascii["7", "f"] <- ""
utf8_print(ascii, quote = FALSE)
```
```
0 1 2 3 4 5 6 7 8 9 a b c d e f
0 \a \b \t \n \v \f \r
1
2 ! " # $ % & ' ( ) * + , - . /
3 0 1 2 3 4 5 6 7 8 9 : ; < = > ?
4 @ A B C D E F G H I J K L M N O
5 P Q R S T U V W X Y Z [ \\ ] ^ _
6 ` a b c d e f g h i j k l m n o
7 p q r s t u v w x y z { | } ~
```
The first 32 codes (the first two rows of the table) are special control
codes, the most common of which, `0x0a` denotes a new line (`\n`). The special
code `0x00` often denotes the end of the input, and R does not allow this
value in character strings. Code `0x7f` corresponds to a "delete" control.
When you call `utf8_print`, it uses the low level `utf8_encode` subroutine
format control codes; they format as `\uXXXX` for four hexadecimal digits
`XXXX` or as `\UXXXXYYYY` for eight hexadecimal digits `XXXXYYYY`:
```r
utf8_print(intToUtf8(1:0x0f), quote = FALSE)
```
```
[1] \u0001\u0002\u0003\u0004\u0005\u0006\a\b\t\n\v\f\r\u000e\u000f
```
Compare `utf8_print` output with the output with the base R print function:
```r
print(intToUtf8(1:0x0f), quote = FALSE)
```
```
[1] \001\002\003\004\005\006\a\b\t\n\v\f\r\016\017
```
Base R format control codes below 128 using octal escapes. There are some
other differences between the function which we will highlight below.
### Latin-1
ASCII works fine for most text in English, but not for other languages. The
Latin-1 encoding extends ASCII to Latin languages by assigning the numbers
128 to 255 (hexadecimal 0x80 to 0xff) to other common characters in Latin
languages. We can see these characters below.
```r
codes <- matrix(128:255, 8, 16, byrow = TRUE,
dimnames = list(c(8:9, letters[1:6]), c(0:9, letters[1:6])))
latin1 <- apply(codes, c(1, 2), intToUtf8)
# replace control codes with ""
latin1[c("8", "9"),] <- ""
utf8_print(latin1, quote = FALSE)
```
```
0 1 2 3 4 5 6 7 8 9 a b c d e f
8
9
a ย ยก ยข ยฃ ยค ยฅ ยฆ ยง ยจ ยฉ ยช ยซ ยฌ ยฎ ยฏ
b ยฐ ยฑ ยฒ ยณ ยด ยต ยถ ยท ยธ ยน ยบ ยป ยผ ยฝ ยพ ยฟ
c ร ร ร ร ร ร
ร ร ร ร ร ร ร ร ร ร
d ร ร ร ร ร ร ร ร ร ร ร ร ร ร ร ร
e ร รก รข รฃ รค รฅ รฆ รง รจ รฉ รช รซ รฌ รญ รฎ รฏ
f รฐ รฑ รฒ รณ รด รต รถ รท รธ รน รบ รป รผ รฝ รพ รฟ
```
As with ASCII, the first 32 numbers are control codes. The others are
characters common in Latin languages. Note that `0xa3`, the invalid byte
from _Mansfield Park_, corresponds to a pound sign in the Latin-1 encoding.
Given the context of the byte:
```r
lines[15252]
```
```
[1] "the command of her beauty, and her \xa320,000, any one who could satisfy the"
```
this is probably the right symbol. The text is probably encoded in Latin-1,
not UTF-8 or ASCII as claimed in the file.
If you run into an error while reading text that claims to be ASCII, it
is probably encoded as Latin-1. Note, however, that this is not the only
possibility, and there are many other encodings. The `iconvlist` function
will list the ones that R knows how to process:
```r
head(iconvlist(), n = 20)
```
```
[1] "437" "850" "852" "855"
[5] "857" "860" "861" "862"
[9] "863" "865" "866" "869"
[13] "ANSI_X3.4-1968" "ANSI_X3.4-1986" "ARABIC" "ARMSCII-8"
[17] "ASCII" "ASMO-708" "ATARI" "ATARIST"
```
### UTF-8
With only 256 unique values, a single byte is not enough to encode every
character. Multi-byte encodings allow for encoding more. UTF-8 encodes
characters using between 1 and 4 bytes each and allows for up to 1,112,064
character codes. Most of these codes are currently unassigned, but every year
the Unicode consortium meets and adds new characters. You can find a list of
all of the characters in the [Unicode Character Database][unicode-data]. A
listing of the Emoji characters is [available separately][emoji-data].
Say you want to input the Unicode character with hexadecimal code 0x2603.
You can do so in one of three ways:
```r
"\u2603" # with \u + 4 hex digits
```
```
[1] "โ"
```
```r
"\U00002603" # with \U + 8 hex digits
```
```
[1] "โ"
```
```r
intToUtf8(0x2603) # from an integer
```
```
[1] "โ"
```
For characters above `0xffff`, the first method won't work. On Windows,
a bug in the current version of R (fixed in R-devel) prevents using the
second method.
When you try to print Unicode in R, the system will first try to determine
whether the code is printable or not. Non-printable codes include control
codes and unassigned codes. On Mac OS, R uses an outdated function to make
this determination, so it is unable to print most emoji. The `utf8_print`
function uses the most recent version (10.0.0) of the Unicode standard,
and will print all Unicode characters supported by your system:
```r
print(intToUtf8(0x1f600 + 0:79)) # base R
```
```
[1] "\U0001f600\U0001f601\U0001f602\U0001f603\U0001f604\U0001f605\U0001f606\U0001f607\U0001f608\U0001f609\U0001f60a\U0001f60b\U0001f60c\U0001f60d\U0001f60e\U0001f60f\U0001f610\U0001f611\U0001f612\U0001f613\U0001f614\U0001f615\U0001f616\U0001f617\U0001f618\U0001f619\U0001f61a\U0001f61b\U0001f61c\U0001f61d\U0001f61e\U0001f61f\U0001f620\U0001f621\U0001f622\U0001f623\U0001f624\U0001f625\U0001f626\U0001f627\U0001f628\U0001f629\U0001f62a\U0001f62b\U0001f62c\U0001f62d\U0001f62e\U0001f62f\U0001f630\U0001f631\U0001f632\U0001f633\U0001f634\U0001f635\U0001f636\U0001f637\U0001f638\U0001f639\U0001f63a\U0001f63b\U0001f63c\U0001f63d\U0001f63e\U0001f63f\U0001f640\U0001f641\U0001f642\U0001f643\U0001f644\U0001f645\U0001f646\U0001f647\U0001f648\U0001f649\U0001f64a\U0001f64b\U0001f64c\U0001f64d\U0001f64e\U0001f64f"
```
```r
utf8_print(intToUtf8(0x1f600 + 0:79)) # truncates to line width
```
```
[1] "๐โ๐โ๐โ๐โ๐โ๐
โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐ โ๐กโ๐ขโ๐ฃโโฆ"
```
```r
utf8_print(intToUtf8(0x1f600 + 0:79), chars = 500) # increase character limit
```
```
[1] "๐โ๐โ๐โ๐โ๐โ๐
โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐ โ๐กโ๐ขโ๐ฃโ๐คโ๐ฅโ๐ฆโ๐งโ๐จโ๐ฉโ๐ชโ๐ซโ๐ฌโ๐ญโ๐ฎโ๐ฏโ๐ฐโ๐ฑโ๐ฒโ๐ณโ๐ดโ๐ตโ๐ถโ๐ทโ๐ธโ๐นโ๐บโ๐ปโ๐ผโ๐ฝโ๐พโ๐ฟโ๐โ๐โ๐โ๐โ๐โ๐
โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ๐โ"
```
(Characters with codes above 0xffff, including most emoji, are not
supported on Windows.)
The *utf8* package provides the following utilities for validating, formatting,
and printing UTF-8 characters:
+ `as_utf8()` attempts to convert character data to UTF-8, throwing an
error if the data is invalid;
+ `utf8_valid()` tests whether character data is valid according to its
declared encoding;
+ `utf8_normalize()` converts text to Unicode composed normal form (NFC),
optionally applying case-folding and compatibility maps;
+ `utf8_encode()` encodes a character string, escaping all control
characters, so that it can be safely printed to the screen;
+ `utf8_format()` formats a character vector by truncating to a specified
character width limit or by left, right, or center justifying;
+ `utf8_print()` prints UTF-8 character data to the screen;
+ `utf8_width()` measures the display with of UTF-8 character strings
(many emoji and East Asian characters are twice as wide as other
characters).
The package does not provide a method to translate from another encoding to
UTF-8 as the `iconv()` function from base R already serves this purpose.
Translating to UTF-8
--------------------
Back to our original problem: getting the text of _Mansfield Park_ into R.
Our first attempt failed:
```r
corpus::term_stats(lines)
```
```
Error in corpus::term_stats(lines): argument entry 15252 is incorrectly marked as "UTF-8": invalid leading byte (0xA3) at position 36
```
We discovered a problem on line 15252:
```r
lines[15252]
```
```
[1] "the command of her beauty, and her \xa320,000, any one who could satisfy the"
```
The text is likely encoded in Latin-1, not UTF-8 (or ASCII) as we had
originally thought. We can test this by attempting to convert from
Latin-1 to UTF-8 with the `iconv()` function and inspecting the output:
```r
lines2 <- iconv(lines, "latin1", "UTF-8")
lines2[15252]
```
```
[1] "the command of her beauty, and her ยฃ20,000, any one who could satisfy the"
```
It worked! Now we can analyze our text.
```r
f <- corpus::text_filter(drop_punct = TRUE, drop = corpus::stopwords_en)
corpus::term_stats(lines2, f)
```
```
term count support
1 fanny 816 806
2 must 508 492
3 crawford 493 488
4 mr 482 466
5 much 459 450
6 miss 432 419
7 said 406 400
8 mrs 408 399
9 sir 372 366
10 edmund 364 364
11 one 370 358
12 think 349 346
13 now 333 331
14 might 324 320
15 time 310 307
16 little 309 300
17 nothing 301 291
18 well 299 286
19 thomas 288 285
20 good 280 275
โฎ (8450 rows total)
```
The *readtext* package
----------------------
If you need more than reading in a single text file, the [readtext][readtext]
package supports reading in text in a variety of file formats and encodings.
Beyond just plain text, that package can read in PDFs, Word documents, RTF,
and many other formats. (Unfortunately, that package currently fails when
trying to read in _Mansfield Park_; the authors are aware of the issue and are
working on a fix.)
Summary
-------
Text comes in a variety of encodings, and you cannot analyze a text without
first knowing its encoding. Many functions for reading in text assume that it
is encoded in UTF-8, but this assumption sometimes fails to hold. If you get
an error message reporting that your UTF-8 text is invalid, use `utf8_valid`
to find the offending texts. Try printing the data to the console before and
after using `iconv` to convert between character encodings. You can use
`utf8_print` to print UTF-8 characters that R refuses to display, including
emoji characters. For reading in exotic file formats like PDF or Word, try
the [readtext][readtext] package.
[emoji-data]: http://www.unicode.org/Public/emoji/5.0/emoji-data.txt
[gutenberg]: http://www.gutenberg.org
[readr]: https://github.com/tidyverse/readr#readme
[readtext]: https://github.com/quanteda/readtext
[spolsky2003]: https://www.joelonsoftware.com/2003/10/08/the-absolute-minimum-every-software-developer-absolutely-positively-must-know-about-unicode-and-character-sets-no-excuses/
[unicode]: http://unicode.org/charts/
[unicode-data]: http://www.unicode.org/Public/10.0.0/ucd/UnicodeData.txt
[utf8]: https://en.wikipedia.org/wiki/UTF-8
[windows1252]: https://en.wikipedia.org/wiki/Windows-1252
|
/scratch/gouwar.j/cran-all/cranData/utf8/vignettes/utf8.Rmd
|
#' @title Build models
#' @description Models specified terms in model data against an existing model
#' and returns a clean, human readable table of summarizing the effects and
#' statistics for the newly generated model. This function is meant to simplify
#' fitting a large number of variables against a set of time-to-event data.
#' @param .object An object of a supported class. See S3 methods below.
#' @param ... Arguments passed to the appropriate S3 method.
#' @return An object of class \code{tbl_df} (tibble) summarizing the provided
#' object.
#' @seealso \code{\link{build_model.coxph}}
#' @export
build_model <- function (.object, ...) { UseMethod('build_model') }
#' @export
build_model.default <- function (.object, ...) {
stop('Object of class \'', class(.object), '\' no supported.')
}
#' @rdname build_model.coxph
#' @title Build Cox PH models
#' @description Models specified terms in model data against an existing model
#' and returns a clean, human readable table of summarizing the effects and
#' statistics for the newly generated model. This functions greatly simplifies
#' fitting a large number of variables against a set of time-to-event data.
#' @param .object An object of class \code{\link[survival]{coxph}}.
#' @param ... One or more unquoted expressions separated by commas representing
#' columns in the model data.frame. May be specified using
#' \code{\link[tidyselect:select_helpers]{tidyselect helpers}}.
#' @param .mv A logical. Fit all terms into a single multivariable model. If left
#' FALSE, all terms are fit in their own univariate models.
#' @param .test A character. The name of a \code{\link[stats:add1]{stats::drop1}}
#' test to use with the model.
#' @param .col.test A logical. Append a columns for the test and accompanying
#' statistic used to derive the p-value.
#' @param .level A double. The confidence level required.
#' @param .stat.pct.sign A logical. Paste a percent symbol after all reported
#' frequencies.
#' @param .digits An integer. The number of digits to round numbers to.
#' @param .p.digits An integer. The number of p-value digits to report. Note
#' that the p-value still rounded to the number of digits specified in
#' \code{.digits}.
#' @return An object of class data.frame summarizing the provided object. If the
#' \code{tibble} package has been installed, a tibble will be returned.
#' @seealso \code{\link{build_model}}
#' @examples
#' library(survival)
#' library(dplyr)
#'
#' data_lung <- lung |>
#' mutate_at(vars(inst, status, sex), as.factor) |>
#' mutate(status = case_when(status == 1 ~ 0, status == 2 ~ 1))
#'
#' fit <- coxph(Surv(time, status) ~ 1, data = data_lung)
#'
#' # Create a univariate model for each variable
#' fit |> build_model(sex, age)
#' @export
build_model.coxph <- function (
.object,
...,
.mv = FALSE,
.test = c('LRT', 'Wald'),
.col.test = FALSE,
.level = 0.95,
.stat.pct.sign = TRUE,
.digits = 1,
.p.digits = 4
) {
# Reconcile .test argument
.test <- match.arg(.test)
# Retrieve data.frame from call
data <- eval(.object$call$data)
base_formula <- stats::formula(.object)
# Column selection
terms <- if (rlang::dots_n(...) > 0) {
tidyselect::eval_select(expr = rlang::expr(c(...)), data = data)
} else {
rlang::set_names(x = 1:length(names(data)), nm = names(data))
}
# Ignore unusable terms
terms <- terms[
!(names(terms) %in% all.vars(base_formula)) &
purrr::imap_lgl(
terms,
~ {
!((!is.numeric(data[[.x]]) & !is.factor(data[[.x]]) & !is.logical(data[[.x]])) |
all(is.na(data[[.x]])) |
(is.factor(data[[.x]]) & length(levels(data[[.x]])) < 2))
}
)
]
# Convert formula to character
base_formula <- deparse(base_formula)
# build_table factory with pre-specified defaults
build_table_ <- function (...) {
build_table(
...,
.test = .test,
.col.test = .col.test,
.level = .level,
.stat.pct.sign = .stat.pct.sign,
.digits = .digits,
.p.digits = .p.digits
)
}
# Refit model and build summary table
if (!.mv) {
# Univariable modelling
purrr::list_rbind(
purrr::imap(
terms,
~ {
build_table_(
.object = .refit_model(
x = .object,
formula = paste(base_formula, .y, sep = ' + ')
),
!! .y
)
}
)
)
} else {
# Multivariable modelling
build_table_(
.object = .refit_model(
x = .object,
formula = paste(
base_formula,
paste(names(terms), collapse = ' + '),
sep = ' + '
),
na.rm = TRUE
),
!!! names(terms)
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/utile.tables/R/build_model.R
|
#' @name build_row
#' @title Build summary rows
#' @description Summarize data into a data.frame with row(s). Includes optional
#' stratification and null hypothesis testing using a factor or logical
#' variable.
#' @param x A data.frame, numeric, factor, or logical. Data to summarize.
#' @param y A factor or logical. Data to optionally stratify \code{x} by.
#' @param label A character. A label for the summarized data.
#' @param label.stat A logical. Append the summary statistic used to the label.
#' @param inverse A logical. For logical data, report frequencies of the
#' \code{FALSE} values instead.
#' @param stat A character. Name of the summary statistic to use. Supported options
#' include the mean (\code{'mean'}) and median (\code{'median'}) for continuous data.
#' @param stat.pct.sign A logical. Paste a percentage symbol with each frequency.
#' frequency.
#' @param col.overall A logical. Append a column with the statistic for all data.
#' If \code{y} is not specified, this parameter is ignored.
#' @param col.missing A logical. Append a column with counts of missing data.
#' @param test A character. Name of statistical test to compare groups.
#' Supported options: [continuous data] ANOVA linear model (\code{'anova'}),
#' Kruskal-Wallis (\code{'kruskal'}), and Wilcoxon rank sum (\code{'wilcoxon'}) tests;
#' [nominal data] Pearson's Chi-squared Test (\code{'chisq'}) and Fisher's Exact Test
#' (\code{'fisher'}).
#' @param test.simulate.p A logical. Whether to use Monte Carlo simulation of
#' the p-value when testing nominal data.
#' @param col.test A logical. Append a column with the name of the statistical
#' test used.
#' @param digits An integer. Number of digits to round to.
#' @param p.digits An integer. Number of p-value digits to report.
#' @param ... Arguments passed to the appropriate S3 method.
#' @return An object of class \code{tbl_df} (tibble) summarizing the provided
#' data.
#' @examples
#' strata <- as.factor(datasets::mtcars$cyl)
#'
#' # Create a "count" row from a data.frame for a factor
#' build_row(x = datasets::mtcars, y = strata)
#'
#' # Create a row summarizing a numeric by a factor
#' build_row(label = 'MPG', x = as.numeric(datasets::mtcars$mpg), y = strata)
#'
#' # Create a row summarizing a logical by a factor
#' build_row(label = 'VS', x = as.logical(datasets::mtcars$vs), y = strata)
#'
#' # Create a row summarizing a factor by a factor
#' build_row(label = 'Carb', x = as.factor(datasets::mtcars$carb), y = strata)
#' @export
build_row <- function (
x,
...
) UseMethod('build_row')
#' @export
build_row.default <- function (x, label = NULL, ...) {
warning('Warning: \'', label, '\' <', class(x), '> not supported for summary.')
NULL
}
#' @rdname build_row
#' @export
build_row.data.frame <- function (
x,
y = NA_real_,
label = NULL,
label.stat = TRUE,
stat.pct.sign = FALSE,
col.overall = TRUE,
col.missing = FALSE,
col.test = FALSE,
digits = 1,
...
) {
# Statistic Function
paste_stat_ <- function (...) {
utile.tools::paste_freq(
...,
na.rm = FALSE,
percent.sign = stat.pct.sign,
digits = digits
)
}
# Retrieve by variable levels
y_levels <- .get_levels(y)
# Count rows
x_cnt <- nrow(x)
# Build row
cols <- list()
# Variable label
cols$Variable <- paste(c(label, if (label.stat) 'n(%)'), collapse = ', ')
# Overall count
if (col.overall || length(y_levels) == 0) {
cols$Overall <- as.character(x_cnt)
}
# Frequencies by level
if (length(y_levels) > 0) {
cols <- c(
cols,
purrr::map(
y_levels,
function (.y) {
paste_stat_(
x = nrow(x[y == .y & !is.na(y),]),
y = x_cnt
)
}
)
)
}
# Missing
if (col.missing) cols$Missing <- paste_stat_(x = y[is.na(y)], y = y)
# Hypothesis testing columns
if (length(y_levels) > 1) cols[c('p', if (col.test) 'Test')] <- ''
# Return converted tibble
dplyr::as_tibble(cols)
}
#' @rdname build_row
#' @export
build_row.numeric <- function (
x,
y = NA_real_,
label = NULL,
label.stat = TRUE,
stat = c('mean', 'median'),
stat.pct.sign = FALSE,
col.overall = TRUE,
col.missing = FALSE,
test = c('anova', 'kruskal', 'wilcoxon'),
col.test = FALSE,
digits = 1,
p.digits = 4,
...
) {
# Check arguments
stat <- match.arg(stat)
test <- match.arg(test)
# Statistic functions
paste_stat_ <- function (...) {
if (stat == 'median') utile.tools::paste_median(..., digits = digits)
else utile.tools::paste_mean(..., digits = digits)
}
paste_freq_ <- function (...) {
utile.tools::paste_freq(
...,
na.rm = FALSE,
percent.sign = stat.pct.sign,
digits = digits
)
}
# Retrieve by variable levels
y_levels <- .get_levels(y)
# Create column object
cols <- list()
# Variable label +/- statistic name
cols$Variable <- paste(
c(
label,
if (label.stat) {
if (stat == 'median') 'median[IQR]'
else 'mean\u00B1SD'
}
),
collapse = ', '
)
# Summary statistic: Overall
if (col.overall | length(y_levels) == 0) cols$Overall <- paste_stat_(x = x)
# Summary statistic: By strata
if (length(y_levels) > 0) {
cols <- c(
cols,
purrr::map_chr(
y_levels,
function (.y) paste_stat_(x = x[!is.na(x) & y %in% .y])
)
)
}
# Missing
if (col.missing) {
cols$Missing <- paste_freq_(x = sum(is.na(x)), y = length(x))
}
# Hypothesis testing
if (length(y_levels) > 1) {
# Statistical test
test <- utile.tools::test_hypothesis(
x = x,
y = y,
test = test,
digits = digits,
p.digits = p.digits
)
# Addend test data
cols$p <- test$p
if (col.test) cols$Test <- test$test
}
# Return converted tibble
dplyr::as_tibble(cols)
}
#' @rdname build_row
#' @export
build_row.logical <- function (
x,
y = NA_real_,
label = NULL,
label.stat = TRUE,
inverse = FALSE,
stat.pct.sign = FALSE,
col.overall = TRUE,
col.missing = FALSE,
test = c('chisq', 'fisher'),
test.simulate.p = FALSE,
col.test = FALSE,
digits = 1,
p.digits = 4,
...
) {
# Check arguments
test <- match.arg(test)
# Statistic factory
paste_stat_ <- function (...) {
utile.tools::paste_freq(
...,
na.rm = FALSE,
percent.sign = stat.pct.sign,
digits = digits
)
}
# Retrieve by variable levels
y_levels <- .get_levels(y)
# Set inverse, if applicable
if (inverse) x <- !x
# Create column object
cols <- list()
# Variable label +/- statistic name
cols$Variable <- paste(
c(
label,
if (inverse) 'no',
if (label.stat) 'n(%)'
),
collapse = ', '
)
# Overall statistic
if (col.overall || length(y_levels) == 0) {
cols$Overall <- paste_stat_(x = x[x & !is.na(x)], y = x)
}
# Strata statistics
if (length(y_levels) > 0) {
cols <- c(
cols,
purrr::map(
y_levels,
function (.y) {
paste_stat_(
x = x[(x & !is.na(x)) & (y %in% .y)],
y = x[y %in% .y]
)
}
)
)
}
# Missing
if (col.missing) cols$Missing <- paste_stat_(x = x[is.na(x)], y = x)
# Hypothesis testing
if (length(y_levels) > 1) {
# Statistical test
test <- utile.tools::test_hypothesis(
x = x,
y = y,
test = test,
simulate.p = test.simulate.p,
digits = digits,
p.digits = p.digits
)
# Addend test data
cols$p <- test$p
if (col.test) cols$Test <- test$test
}
# Return converted tibble
dplyr::as_tibble(cols)
}
#' @rdname build_row
#' @export
build_row.factor <- function (
x,
y = NA_real_,
label = NULL,
label.stat = TRUE,
stat.pct.sign = FALSE,
col.overall = TRUE,
col.missing = FALSE,
test = c('chisq', 'fisher'),
test.simulate.p = FALSE,
col.test = FALSE,
digits = 1,
p.digits = 4,
...
) {
# Check arguments
test <- match.arg(test)
# Statistic function
paste_stat_ <- function (...) {
utile.tools::paste_freq(
...,
na.rm = FALSE,
percent.sign = stat.pct.sign,
digits = digits
)
}
# Retrieve y variable levels
y_levels <- .get_levels(y)
# Identify x levels, make any NA explicit
x_levels <- .get_levels(x)
if (any(is.na(x))) x_levels <- c(x_levels, "Missing" = NA_character_)
level_fill <- rep('', length(x_levels))
# Create column object
cols <- list()
# Variable labels
cols$Variable <- c(
paste(c(label, if (label.stat) 'n(%)'), collapse = ', '),
paste0(' ', names(x_levels))
)
# Overall summary statistic
if (col.overall || length(y_levels) == 0) {
cols$Overall <- c(
'',
purrr::map_chr(x_levels, function (.x) {
paste_stat_(x = x[x %in% .x], y = x)
})
)
}
# Strata statistics
if (length(y_levels) > 0) {
cols <- c(
cols,
purrr::map(
y_levels,
function (.y) {
c(
'',
purrr::map_chr(
x_levels,
function (.x) {
paste_stat_(
x = x[x %in% .x & y %in% .y],
y = x[y %in% .y]
)
}
)
)
}
)
)
}
# Show missing count
if (col.missing) {
cols$Missing <- c(paste_stat_(x = x[is.na(x)], y = x), level_fill)
}
# Testing with by variable
if (length(y_levels) > 1) {
# Statistical test
test <- utile.tools::test_hypothesis(
x = x,
y = y,
test = test,
simulate.p = test.simulate.p,
digits = digits,
p.digits = p.digits
)
# Addend test data
cols$p <- c(test$p, level_fill)
if (col.test) cols$Test <- c(test$test, level_fill)
}
# Return converted tibble
dplyr::as_tibble(cols)
}
|
/scratch/gouwar.j/cran-all/cranData/utile.tables/R/build_row.R
|
#' @title Build summary tables
#' @description Takes a data or model object and summarizes it into a ready to
#' export, human-readable summary table.
#' @param .object An object of a supported class. See S3 methods below.
#' @param ... Arguments passed to the appropriate S3 method.
#' @return An object of class tbl_df (tibble) summarizing the provided object.
#' @seealso \code{\link{build_table.data.frame}},
#' \code{\link{build_table.coxph}},
#' \code{\link{build_table.lm}}
#' @export
build_table <- function (.object, ...) { UseMethod('build_table') }
#' @export
build_table.default <- function (.object, ...) {
stop('Object of class \'', class(.object), '\' no supported.')
}
#' @title Build summary tables from data.frame objects
#' @description Takes a data.frame object and summarizes the columns into a
#' ready to export, human-readable summary table. Capable of stratifying data
#' and performing appropriate hypothesis testing.
#' @param .object A data.frame.
#' @param ... One or more unquoted expressions separated by commas representing
#' columns in the data.frame. May be specified using
#' \code{\link[tidyselect:select_helpers]{tidyselect helpers}}. If left empty,
#' all columns are summarized.
#' @param .by An unquoted expression. The data column to stratify the
#' summary by.
#' @param .inverse A logical. For logical data, report the frequency
#' of FALSE values instead of the TRUE.
#' @param .label.stat A logical. Append the type of summary statistic
#' to the column label.
#' @param .stat A character. Name of the summary statistic to use for numeric data.
#' Supported options include the mean ('mean') and median ('median').
#' @param .stat.pct.sign A logical. Paste a percent symbol after all
#' reported frequencies.
#' @param .col.overall A logical. Append a column with the statistic for all data.
#' If \code{.by} is not specified, this parameter is ignored.
#' @param .col.missing A logical. Append a column listing the
#' frequencies of missing data for each row.
#' @param .test.continuous A character. A character. Name of statistical test to compare groups.
#' Supported options include ANOVA linear model ('anova'), Kruskal-Wallis ('kruskal'),
#' and Wilcoxon rank sum ('wilcoxon') tests.
#' @param .test.nominal A character. Name of statistical test to compare groups.
#' Supported options include Pearson's Chi-squared Test ('chisq') and Fisher's
#' Exact Test ('fisher').
#' @param .test.simulate.p A logical. Whether to use Monte Carlo simulation of
#' the p-value when testing nominal data.
#' @param .col.test A logical. Append a column containing the test
#' each p-value was derived from.
#' @param .digits An integer. The number of digits to round numbers to.
#' @param .p.digits An integer. The number of p-value digits to report.
#' @return An object of class \code{tbl_df} (tibble) summarizing the provided
#' object.
#' @seealso \code{\link{build_table}}
#' @examples
#' # Sample data
#' df <- data.frame(
#' strata = factor(sample(letters[2:3], 1000, replace = TRUE)),
#' numeric = sample(1:100, 1000, replace = TRUE),
#' numeric2 = sample(1:100, 1000, replace = TRUE),
#' factor = factor(sample(1:5, 1000, replace = TRUE)),
#' logical = sample(c(TRUE,FALSE), 1000, replace = TRUE)
#' )
#'
#' # Summarize all columns
#' build_table(df, .by = strata)
#'
#' # Summarize & rename selected columns
#' build_table(df, numeric2, factor, .by = strata)
#' @export
build_table.data.frame <- function (
.object,
...,
.by,
.inverse = FALSE,
.label.stat = TRUE,
.stat = c('mean', 'median'),
.stat.pct.sign = FALSE,
.col.overall = TRUE,
.col.missing = FALSE,
.test.continuous = c('anova', 'kruskal', 'wilcoxon'),
.test.nominal = c('chisq', 'fisher'),
.test.simulate.p = FALSE,
.col.test = FALSE,
.digits = 1,
.p.digits = 4
) {
# Match arguments
.stat <- match.arg(.stat)
.test.continuous <- match.arg(.test.continuous)
.test.nominal <- match.arg(.test.nominal)
# Column selection
cols <- if (rlang::dots_n(...) > 0) {
tidyselect::eval_select(rlang::expr(c(...)), data = .object)
} else {
rlang::set_names(1:length(names(.object)), names(.object))
}
# By variable selection and validation
by <- if (!missing(.by) & rlang::dots_n(.by) == 1) {
tidyselect::eval_select(rlang::enexpr(.by), data = .object)
}
if (length(by) > 0) {
if (is.logical(.object[[by]]) | is.factor(.object[[by]])) {
# Cast logicals to factors
if (is.logical(.object[[by]])) .object[[by]] <- as.factor(.object[[by]])
# Explicit NA's
.object[[by]] <- .explicit_na(.object[[by]])
# Prevent summarization
cols <- cols[!(cols %in% by)]
} else by <- integer()
}
# Pre-specify row configuration
build_row_ <- function (x, ...) {
build_row(
x = x,
y = if (length(by) == 1) .object[[by]],
...,
label.stat = .label.stat,
inverse = .inverse,
stat = .stat,
stat.pct.sign = .stat.pct.sign,
col.overall = .col.overall,
col.missing = .col.missing,
test.simulate.p = .test.simulate.p,
col.test = .col.test,
digits = .digits,
p.digits = .p.digits
)
}
# Create table
table <- dplyr::bind_rows(
build_row_(x = .object),
purrr::list_rbind(
purrr::imap(
cols, ~ {
build_row_(
x = .object[[.x]],
label = .y,
test = if (inherits(.object[[.x]], c('factor', 'logical'))) .test.nominal
else .test.continuous
)
}
)
)
)
# Replace NA's & return table
.replace_na(table)
}
#' @title Build summary tables from coxph model objects
#' @description Takes a Cox PH model object and summarizes it into a ready to
#' export, human-readable summary table.
#' @param .object An object of class \code{\link[survival]{coxph}}.
#' @param ... One or more unquoted expressions separated by commas representing
#' columns in the data.frame. May be specified using
#' \code{\link[tidyselect:select_helpers]{tidyselect helpers}}. If left empty,
#' all terms are summarized.
#' @param .test A character. The name of the
#' \code{\link[stats:add1]{stats::drop1}} test to use with the model. Supported
#' tests include Wald's Test ('Wald') and Likelihood Ratio Test ('LRT').
#' @param .col.test A logical. Append a columns for the test and accompanying
#' statistic used to derive the p-value.
#' @param .level A double. The confidence level required.
#' @param .stat.pct.sign A logical. Paste a percent symbol after all reported
#' frequencies.
#' @param .digits An integer. The number of digits to round numbers to.
#' @param .p.digits An integer. The number of p-value digits to report. Note
#' that the p-value still rounded to the number of digits specified in
#' \code{.digits}.
#' @return An object of class \code{tbl_df} (tibble) summarizing the provided
#' object.
#' @seealso \code{\link{build_table}}
#' @examples
#' library(survival)
#' library(dplyr)
#'
#' data_lung <- lung |>
#' mutate_at(vars(inst, status, sex), as.factor) |>
#' mutate(status = case_when(status == 1 ~ 0, status == 2 ~ 1))
#'
#' fit <- coxph(Surv(time, status) ~ sex + meal.cal, data = data_lung)
#'
#' fit |> build_table(Sex = sex, Calories = meal.cal, .test = 'LRT')
#' @export
build_table.coxph <- function (
.object,
...,
.test = c('LRT', 'Wald'),
.col.test = FALSE,
.level = 0.95,
.stat.pct.sign = TRUE,
.digits = 1,
.p.digits = 4
) {
# Column selection
terms <- if (rlang::dots_n(...) > 0) {
tidyselect::eval_select(expr = rlang::expr(c(...)), data = .object$assign)
} else {
rlang::set_names(
x = 1:length(names(.object$assign)),
nm = names(.object$assign)
)
}
# Filter assignments and map level names
assignments <- purrr::imap(
.object$assign[terms], ~ {
if (.y %in% names(.object$xlevels)) {
rlang::set_names(x = c(NA_real_, .x), nm = .object$xlevels[[.y]])
} else .x
}
)
names(assignments) <- names(terms)
# Check test argument
prefer.tests <- '.test' %in% names(match.call())
.test <- match.arg(.test)
# Tabulate & format estimates
estimates <- as.data.frame(
cbind(
summary(.object)$coefficients,
stats::confint(.object, level = .level)
)
)
estimates[,6:7] <- exp(estimates[,6:7])
estimates[,-5] <- round(estimates[,-5], digits = .digits)
estimates[,5] <- format.pval(
pv = estimates[,5],
digits = .digits,
eps = 0.0001,
nsmall = .p.digits,
scientific = F,
na.form = ''
)
estimates[] <- lapply(estimates, as.character)
# Tabulate & format special tests
tests <- as.data.frame(
stats::drop1(
if (any(is.na(eval(.object$call$data)[all.vars(stats::formula(.object))]))) {
.refit_model(x = .object, na.rm = TRUE)
} else .object,
test = 'Chisq'
)
)[terms + 1, 3:4]
tests[,-2] <- round(tests[,-2], digits = .digits)
tests[,2] <- format.pval(
pv = tests[,2],
digits = .digits,
eps = 0.0001,
nsmall = .p.digits,
scientific = F,
na.form = ''
)
tests[] <- lapply(tests, as.character)
# Generate table
table <- purrr::imap(
assignments,
function (w, x) {
single_level <- (has_levels <- !is.null(names(w))) & length(w) == 2
if (single_level) w <- w[-1] # Ignore reference level of a 2-level
# Create column object
cols <- list()
# Variable name
cols$Variable <- if (single_level) paste0(x, ', ', names(w)) else x
# Number of observations
cols$n <- as.character(.object$n)
# Number of events
cols$Event <- as.character(.object$nevent)
# Effect estimate & CI
cols$`HR [CI]` = if (!has_levels | single_level) {
paste(
estimates[w, 2],
if (all(!is.na(estimates[w, 6:7]))) {
paste0('[', estimates[w, 6], '-', estimates[w, 7], ']')
} else '[NA]'
)
} else ''
# p-value
cols$p <- if ((prefer.tests & .test == 'LRT') | (has_levels & !single_level)) {
tests[match(x, names(assignments)), 2]
} else estimates[w, 5]
# Report test
if (.col.test) {
cols$Test <-
if ((prefer.tests & .test == 'LRT') | (has_levels & !single_level)) {
'LRT'
} else 'Wald'
cols$Statistic <-
if ((prefer.tests & .test == 'LRT') | (has_levels & !single_level)) {
tests[match(x, names(assignments)), 1]
} else estimates[w, 4]
}
# Generate factor level rows
if (has_levels & !single_level) {
# Level names (incl. ref)
cols$Variable <- c(cols$Variable, paste(' ', names(w)))
# Number of observations
cols$n <- c(cols$n, rep('', length(w)))
# Number of events
cols$Event <- c(cols$Event, rep('', length(w)))
# Effect estimate and CI
cols$`HR [CI]` <- c(
cols$`HR [CI]`,
'Reference',
paste(
estimates[w[-1], 2],
ifelse(
!is.na(estimates[w[-1], 6]) & !is.na(estimates[w[-1], 7]),
paste0('[', estimates[w[-1], 6], '-', estimates[w[-1], 7], ']'),
'[NA]'
)
)
)
# Level p-value
cols$p <- c(cols$p, '', estimates[w[-1], 5])
# Report test
if (.col.test) {
cols$Test <- c(cols$Test, '', rep('Wald', length(w[-1])))
cols$Statistic <- c(cols$Statistic, '', estimates[w[-1], 4])
}
}
# Return as df (better performance)
dplyr::as_tibble(cols)
}
)
# Concatonate rows
table <- purrr::list_rbind(table)
# Replace NA's
.replace_na(table)
}
#' @title Build summary tables from lm model objects
#' @description Takes a linear regression model object and summarizes it into a
#' ready to export, human-readable summary table.
#' @param .object An object of class \code{\link[stats]{lm}}.
#' @param ... One or more unquoted expressions separated by commas representing
#' columns in the data.frame. May be specified using
#' \code{\link[tidyselect:select_helpers]{tidyselect helpers}}. If left empty,
#' all terms are summarized.
#' @param .test A character. The name of the
#' \code{\link[stats:add1]{stats::drop1}} test to use with the model. Supported
#' options include the F-Test ('F') and Chi-squared Test ('Chisq').
#' @param .col.test A logical. Append a columns for the test and accompanying
#' statistic used to derive the p-value.
#' @param .level A double. The confidence level required.
#' @param .stat.pct.sign A logical. Paste a percent symbol after all reported
#' frequencies.
#' @param .digits An integer. The number of digits to round numbers to.
#' @param .p.digits An integer. The number of p-value digits to report. Note
#' that the p-value still rounded to the number of digits specified in
#' \code{.digits}.
#' @return An object of class \code{tbl_df} (tibble) summarizing the provided
#' object.
#' @seealso \code{\link{build_table}}
#' @examples
#' library(dplyr)
#'
#' data_mtcars <- datasets::mtcars |>
#' mutate_at(vars('vs', 'am'), as.logical) |>
#' mutate_at(vars('gear', 'carb', 'cyl'), as.factor)
#'
#' fit <- lm(mpg ~ vs + drat + cyl, data = data_mtcars)
#'
#' fit |> build_table()
#' @export
build_table.lm <- function (
.object,
...,
.test = c('F', 'Chisq'),
.col.test = FALSE,
.level = 0.95,
.stat.pct.sign = TRUE,
.digits = 1,
.p.digits = 4
) {
# Assignments
assignments <- .create_assigns(
x = c('(Intercept)', attr(terms(.object), 'term.labels')),
y = .object$assign
)
# Column selection
terms <- if (rlang::dots_n(...) > 0) {
tidyselect::eval_select(expr = rlang::expr(c(...)), data = assignments)
} else {
rlang::set_names(
x = 1:length(names(assignments)),
nm = names(assignments)
)
}
# Filter assignments and map level names
assignments <- purrr::imap(
assignments[terms], ~ {
if (.y %in% names(.object$xlevels)) {
rlang::set_names(x = c(NA_real_, .x), nm = .object$xlevels[[.y]])
} else .x
}
)
names(assignments) <- names(terms)
# Check test argument
prefer.tests <- '.test' %in% names(match.call())
.test <- match.arg(.test)
# Tabulate & format estimates
estimates <- as.data.frame(
cbind(
summary(.object)$coefficients,
stats::confint(.object, level = .level)
)
)
estimates[,-4] <- round(estimates[,-4], digits = .digits)
estimates[,4] <- format.pval(
pv = estimates[,4],
digits = .digits,
eps = 0.0001,
nsmall = .p.digits,
scientific = F,
na.form = ''
)
estimates[] <- lapply(estimates, as.character)
# Tabulate & format special tests
tests <- as.data.frame(
stats::drop1(
.object,
test = .test
)
)[terms + 1, if (.test == 'Chisq') 5 else 5:6]
if (.test == 'F') tests[,-2] <- round(tests[,-2], digits = .digits)
else tests <- cbind(stat = '', tests)
tests[,2] <- format.pval(
pv = tests[,2],
digits = .digits,
eps = 0.0001,
nsmall = .p.digits,
scientific = F,
na.form = ''
)
tests[] <- lapply(tests, as.character)
# Generate table
table <- purrr::imap(
assignments,
# Map assignments
function (w, x) {
single_level <- (has_levels <- !is.null(names(w))) & length(w) == 2
if (single_level) w <- w[-1] # Ignore reference level of a 2-level
cols <- list()
# Variable name
cols$Variable <- if (single_level) paste0(x, ', ', names(w)) else x
# Effect estimate & CI
cols$`HR [CI]` <- if (!has_levels | single_level) {
paste(
estimates[w, 1],
if (all(!is.na(estimates[w, 5:6]))) {
paste0('[', estimates[w, 5], '-', estimates[w, 6], ']')
} else '[NA]'
)
} else ''
# p-value
cols$p <- if ((prefer.tests & x != '(Intercept)') | (has_levels & !single_level)) {
tests[match(x, names(assignments)), 2]
} else estimates[w, 4]
# Report test
if (.col.test) {
cols$Test <-
if ((prefer.tests & x != '(Intercept)') | (has_levels & !single_level)) {
if(.test == 'F') 'F-stat' else .test
} else 't-value'
cols$Statistic <-
if ((prefer.tests & x != '(Intercept)') | (has_levels & !single_level)) {
tests[match(x, names(assignments)), 1]
} else estimates[w, 3]
}
# Generate factor level rows
if (has_levels & !single_level) {
# Level names
cols$Variable <- c(
cols$Variable,
paste(' ', names(w))
)
# Effect estimate and CI
cols$`HR [CI]` = c(
cols$`HR [CI]`,
'Reference',
paste(
estimates[w[-1], 1],
ifelse(
!is.na(estimates[w[-1], 5]) & !is.na(estimates[w[-1], 6]),
paste0('[', estimates[w[-1], 5], '-', estimates[w[-1], 6], ']'),
'[NA]'
)
)
)
# Level p-value
cols$p <- c(cols$p, '', estimates[w[-1], 4])
# Report test
if (.col.test) {
cols$Test <- c(cols$Test, '', rep('t-value', length(w[-1])))
cols$Statistic <- c(cols$Statistic, '', estimates[w[-1], 3])
}
}
# Return data
dplyr::as_tibble(cols)
}
)
# Concatenate rows
table <- purrr::list_rbind(table)
# Replace NA's & return
.replace_na(table)
}
|
/scratch/gouwar.j/cran-all/cranData/utile.tables/R/build_table.R
|
#' @aliases select_helpers
#' @importFrom tidyselect contains
#' @export
tidyselect::contains
#' @importFrom tidyselect ends_with
#' @export
tidyselect::ends_with
#' @importFrom tidyselect everything
#' @export
tidyselect::everything
#' @importFrom tidyselect matches
#' @export
tidyselect::matches
#' @importFrom tidyselect num_range
#' @export
tidyselect::num_range
#' @importFrom tidyselect one_of
#' @export
tidyselect::one_of
#' @importFrom tidyselect starts_with
#' @export
tidyselect::starts_with
#' @importFrom tidyselect last_col
#' @export
tidyselect::last_col
#' @importFrom tidyselect any_of
#' @export
tidyselect::any_of
#' @importFrom tidyselect all_of
#' @export
tidyselect::all_of
|
/scratch/gouwar.j/cran-all/cranData/utile.tables/R/reexports.R
|
# Make NA's human readable
.replace_na <- function (x) replace(x, is.na(x), '')
# Make factor NA's explicit
.explicit_na <- function (x, y = 'Missing') {
x_na <- is.na(x)
lvl_na <- is.na(levels(x))
if(any(lvl_na)) levels(x)[lvl_na] <- y
if (any(x_na)) {
levels(x) <- c(levels(x), y)
x[x_na] <- y
}
x
}
# Get levels of factor, ordinal, or logical
.get_levels <- function (x) {
lvls <-
if (inherits(x, c('factor', 'ordered'))) levels(x)
else if (inherits(x, 'logical')) {
c(TRUE, FALSE)[c(TRUE, FALSE) %in% unique(x)]
} else character()
rlang::set_names(lvls)
}
# Create assignments for a model
.create_assigns <- function (x, y) {
rlang::set_names(x = purrr::imap(x, ~ which(.y - 1 == y)), nm = x)
}
# Clean data and formula environment
.refit_model <- function (x, formula, na.rm = FALSE) {
# Get model call
call <- x$call
# Reset formula environment
call$formula <- stats::as.formula(
if (!missing(formula)) formula
else call$formula
)
# Remove NA's from data
if (na.rm) {
call$data <- stats::na.omit(eval(call$data)[all.vars(call$formula)])
}
# Refit and return
eval(call, parent.frame())
}
|
/scratch/gouwar.j/cran-all/cranData/utile.tables/R/utils.R
|
#' @title Calculate durations of time
#' @description
#' Calculates the duration of time between two provided date objects.
#' Supports vectorized data (i.e. \code{\link[dplyr:mutate]{dplyr::mutate()}}).
#' @param x A date or datetime. The start date(s)/timestamp(s).
#' @param y A date or datetime. The end date(s)/timestamp(s).
#' @param units A character. Units of the returned duration
#' (i.e. 'seconds', 'days', 'years').
#' @return If 'units' specified, returns numeric. If 'units' unspecified,
#' returns an object of class '\code{\link[lubridate:Duration-class]{Duration}}'.
#' @note Supports multiple calculations against a single time point (i.e.
#' multiple start dates with a single end date). Note that start and end
#' must otherwise be of the same length.
#'
#' When the start and end dates are of different types (i.e. x = date,
#' y = datetime), a lossy cast will be performed which strips the datetime data
#' of its time components. This is done to avoid an assumption of more time
#' passing that would otherwise come with casting the date data to datetime.
#' @examples
#' library(lubridate)
#' library(purrr)
#'
#' # Dates -> duration in years
#' calc_duration(
#' x = mdy(map_chr(sample(1:9, 5), ~ paste0('01/01/199', .x))),
#' y = mdy(map_chr(sample(1:9, 5), ~ paste0('01/01/200', .x))),
#' units = 'years'
#' )
#'
#' # datetimes -> durations
#' calc_duration(
#' x = mdy_hm(map_chr(sample(1:9, 5), ~ paste0('01/01/199', .x, ' 1', .x, ':00'))),
#' y = mdy_hm(map_chr(sample(1:9, 5), ~ paste0('01/01/200', .x, ' 0', .x, ':00')))
#' )
#'
#' # Mixed date classes -> durations
#' calc_duration(
#' x = mdy(map_chr(sample(1:9, 5), ~ paste0('01/01/199', .x))),
#' y = mdy_hm(map_chr(sample(1:9, 5), ~ paste0('01/01/200', .x, ' 0', .x, ':00')))
#' )
#' @export
calc_duration <- function (x, y, units = NULL) {
# Input type check
if (
!all(lubridate::is.timepoint(x), na.rm = TRUE) |
!all(lubridate::is.timepoint(y), na.rm = TRUE)
) {
stop('\'x\' and/or \'y\' not <date> or <datetime>.')
}
# Recycle single timepoint or throw error for mismatched sizes
common_dates <- vctrs::vec_recycle_common(x = x, y = y)
# Remove timestamp if one variable is a Date object
if (any(class(x) != class(y), na.rm = TRUE)) {
common_dates <- purrr::map(common_dates, as.Date)
}
# Calculate duration
duration <- lubridate::as.duration(lubridate::interval(x, y))
# Return data as appropriate type
if (!is.null(units)) as.numeric(duration, units)
else duration
}
#' @title Calculate data chunk indices
#' @description
#' Calculates chunk indices of a data object
#' for a given chunk size (number of items per chunk).
#' @param x A data frame or vector.
#' @param size An integer. The number of items (e.g. rows in a tibble)
#' that make up a given chunk. Must be a positive integer. Caps out at data
#' maximum.
#' @param reverse A logical. Calculate chunks from back to front.
#' @return An iterable list of row indices for each chunk of data.
#' @examples
#' # Create chunk map for a data frame
#' chunks <- calc_chunks(mtcars, size = 6)
#'
#' # Iterate through chunks of data
#' for (chunk in chunks) print(paste0(rownames(mtcars[chunk,]), collapse = ', '))
#' @export
calc_chunks <- function (x, size = 10, reverse = FALSE) {
# Hard stops
if (!is.data.frame(x) & !is.vector(x))
stop('\'x\' not a <data.frame> or vector.')
if (!is.numeric(size) | size < 1)
stop('\'size\' not <numeric> or less than 1.')
# Variables
item_cnt <- vctrs::vec_size(x)
if (size > item_cnt) size <- item_cnt
# Calculate and return chunks
if (!reverse) purrr::map(1:ceiling(item_cnt / size), ~ ((.x-1)*size+1):min(item_cnt, (.x*size)))
else purrr::map(1:ceiling(item_cnt / size), ~ (item_cnt-(.x-1)*size):max(1, item_cnt-(.x*size)+1))
}
|
/scratch/gouwar.j/cran-all/cranData/utile.tools/R/calc.R
|
#' @title Break data into chunks
#' @description
#' Creates a factory function which returns a different chunk
#' of a given data object with each function call.
#' @param x A data frame or vector.
#' @param size An integer. The number of items (e.g. rows in a tibble)
#' that make up a given chunk. Must be a positive integer.
#' @param reverse A logical. Calculate chunks from back to front.
#' @return A factory function which returns a chunk of data from the provided
#' object with each call. Once all data has been returned, function returns
#' NULL perpetually.
#' @examples
#' # Create chunk factory function
#' chunked_data <- chunk_data_(mtcars, size = 6)
#'
#' # Chunk #1 (rows 1-6)
#' paste0(rownames(chunked_data()), collapse = ', ')
#'
#' # Chunk #2 (rows 7-12)
#' paste0(rownames(chunked_data()), collapse = ', ')
#' @export
chunk_data_ <- function (x, size = 10, reverse = FALSE) {
# Calculate chunks & check hard stops
chunks <- calc_chunks(x = x, size = size, reverse = reverse)
# Return factory function
index <- 0
function () {
if ((index <<- index + 1) <= vctrs::vec_size(chunks)) {
vctrs::vec_slice(x, chunks[[index]])
} else NULL
}
}
|
/scratch/gouwar.j/cran-all/cranData/utile.tools/R/chunk.R
|
#' @title Cumulative Sum of Failures
#' @description Calculates the cumulative sum of failures for a series of
#' procedures which can be used to create CUSUM charts.
#' @param xi An integer. The dichotomous outcome variable (1 = Failure, 0 = Success)
#' for the i-th procedure.
#' @param p0 A double. The acceptable event rate.
#' @param p1 A double. The unacceptable event rate.
#' @param by A factor. Optional variable to stratify procedures by.
#' @param alpha A double. The Type I Error rate. Probability of rejecting the
#' null hypothesis when `p0` is the true rate.
#' @param beta A double. The Type II Error rate. Probability of failing to reject
#' null hypothesis when it is false.
#' @return An object of class \code{data.frame}.
#' @references
#' Rogers, C. A., Reeves, B. C., Caputo, M., Ganesh, J. S., Bonser, R. S., & Angelini, G. D. (2004). Control chart methods for monitoring cardiac surgical performance and their interpretation. The Journal of Thoracic and Cardiovascular Surgery, 128(6), 811-819.
#' @examples
#' library(purrr)
#' library(ggplot2)
#'
#' # Data
#' df <- data.frame(
#' xi = simplify(
#' map(
#' c(.1,.08,.05,.1,.13,.14,.14,.09,.25),
#' ~ rbinom(50,1,.x))),
#' p0 = simplify(
#' map(
#' c(.1,.1,.1,.1,.1,.1,.1,.15,.2),
#' ~ rnorm(50,.x,.03))),
#' by = rep(
#' factor(paste('Subject', c('A','B','C'))),
#' times = c(150,150,150))
#' )
#'
#' # Overall event rate
#' p0 <- sum(df$xi) / nrow(df)
#'
#' # Create CUSUM plot
#' cusum_failure(
#' xi = df$xi,
#' p0 = p0,
#' p1 = p0 * 1.5,
#' by = df$by
#' ) |>
#' ggplot(aes(y = cusum, x = i)) +
#' geom_step() +
#' geom_line(mapping = aes(y = l0), linetype = 2) +
#' geom_line(mapping = aes(y = l1), linetype = 2) +
#' ylab("Cumulative Failures") +
#' xlab("Case Number") +
#' facet_wrap(~ by) +
#' theme_bw()
#' @export
cusum_failure <- function (
xi,
p0,
p1,
by = NULL,
alpha = 0.05,
beta = 0.05
) {
# Variable calculations
OR <- (p1 * (1 - p0)) / (p0 * (1 - p1))
s <- log((1 - p0) / (1 - p1)) / log(OR)
h0 <- log((1 - alpha) / beta) / log(OR)
h1 <- log((1 - beta) / alpha) / log(OR)
# Function for column creation
append_cols <- function (x) {
x$i <- 1:nrow(x)
x$cusum <- cumsum(x$xi)
x$l0 <- (1:nrow(x) * s) - h0
x$l1 <- (1:nrow(x) * s) + h1
x
}
# Create data table
col_names <- c('i', 'xi', 'cusum', 'l0', 'l1')
cols <- vctrs::data_frame(xi = xi)
# Create columns
if (!is.null(by)) {
col_names <- c(col_names, 'by')
cols$by <- by
cols <- purrr::list_rbind(
purrr::map(
unique(by),
function (.x) append_cols(cols[cols$by %in% .x,])
)
)
} else cols <- append_cols(cols)
# Return arranged data
cols[,col_names]
}
#' @title Cumulative Sum of Log-Likelihood Ratio
#' @description Calculates the cumulative log likelihood ratio of failure for a
#' series of procedures which can be used to create CUSUM charts.
#' @param xi An integer. The dichotomous outcome variable (1 = Failure, 0 = Success)
#' for the i-th procedure.
#' @param p0 A double. The acceptable event rate.
#' @param p1 A double. The unacceptable event rate.
#' @param by A factor. Optional variable to stratify procedures by.
#' @param alpha A double. The Type I Error rate. Probability of rejecting the
#' null hypothesis when `p0` is true.
#' @param beta A double. The Type II Error rate. Probability of failing to reject
#' null hypothesis when it is false.
#' @return An object of class \code{data.frame}.
#' @references
#' Rogers, C. A., Reeves, B. C., Caputo, M., Ganesh, J. S., Bonser, R. S., & Angelini, G. D. (2004). Control chart methods for monitoring cardiac surgical performance and their interpretation. The Journal of Thoracic and Cardiovascular Surgery, 128(6), 811-819.
#' @examples
#' library(purrr)
#' library(ggplot2)
#'
#' # Data
#' df <- data.frame(
#' xi = simplify(
#' map(
#' c(.1,.08,.05,.1,.13,.14,.14,.09,.25),
#' ~ rbinom(50,1,.x))),
#' p0 = simplify(
#' map(
#' c(.1,.1,.1,.1,.1,.1,.1,.15,.2),
#' ~ rnorm(50,.x,.03))),
#' by = rep(
#' factor(paste('Subject', c('A','B','C'))),
#' times = c(150,150,150))
#' )
#'
#' # Overall event rate
#' p0 <- sum(df$xi) / nrow(df)
#'
#' # Create CUSUM plot
#' cusum_loglike(
#' xi = df$xi,
#' p0 = p0,
#' p1 = p0 * 1.5,
#' by = df$by
#' ) |>
#' ggplot(aes(y = cusum, x = i)) +
#' geom_step() +
#' geom_hline(aes(yintercept = h0), linetype = 2) +
#' geom_hline(aes(yintercept = h1), linetype = 2) +
#' ylab("Cumulative Log-likelihood Ratio") +
#' xlab("Case Number") +
#' facet_wrap(~ by) +
#' theme_bw()
#' @export
cusum_loglike <- function (
xi,
p0,
p1,
by = NULL,
alpha = 0.05,
beta = 0.05
) {
# Variable calculations
OR <- (p1 * (1 - p0)) / (p0 * (1 - p1))
s <- log((1 - p0) / (1 - p1)) / log(OR)
h0 <- log((1 - alpha) / beta) / log(OR)
h1 <- log((1 - beta) / alpha) / log(OR)
# Function for column creation
append_cols <- function (x) {
x$i <- 1:nrow(x)
x$cusum <- cumsum(x$xi - s)
x
}
# Create data table
col_names <- c('i', 'xi', 'cusum', 'h0', 'h1')
cols <- vctrs::data_frame(
xi = xi,
h0 = -rep(h0, times = length(xi)),
h1 = rep(h1, times = length(xi))
)
# Create columns
if (!is.null(by)) {
col_names <- c(col_names, 'by')
cols$by <- by
cols <- purrr::list_rbind(
purrr::map(
unique(by),
function (.x) append_cols(cols[cols$by %in% .x,])
)
)
} else cols <- append_cols(cols)
# Return arranged data
cols[,col_names]
}
#' @title Cumulative Sum of Observed Minus Expected Outcome
#' @description Calculates the cumulative observed-minus-expected failure for a
#' series of procedures which can be used to create CUSUM charts.
#' @param xi An integer. The dichotomous outcome variable (1 = Failure, 0 = Success)
#' for the i-th procedure.
#' @param p0 A double. The acceptable event rate.
#' @param by A factor. Optional variable to stratify procedures by.
#' @return An object of class \code{data.frame}.
#' @references
#' Rogers, C. A., Reeves, B. C., Caputo, M., Ganesh, J. S., Bonser, R. S., & Angelini, G. D. (2004). Control chart methods for monitoring cardiac surgical performance and their interpretation. The Journal of Thoracic and Cardiovascular Surgery, 128(6), 811-819.
#' @examples
#' library(purrr)
#' library(ggplot2)
#'
#' # Data
#' df <- data.frame(
#' xi = simplify(
#' map(
#' c(.1,.08,.05,.1,.13,.14,.14,.09,.25),
#' ~ rbinom(50,1,.x))),
#' p0 = simplify(
#' map(
#' c(.1,.1,.1,.1,.1,.1,.1,.15,.2),
#' ~ rnorm(50,.x,.03))),
#' by = rep(
#' factor(paste('Subject', c('A','B','C'))),
#' times = c(150,150,150))
#' )
#'
#' # Create CUSUM plot
#' cusum_ome(
#' xi = df$xi,
#' p0 = df$p0,
#' by = df$by
#' ) |>
#' ggplot(aes(x = i, y = cusum)) +
#' geom_hline(yintercept = 0, linetype = 6, linewidth = 0.5) +
#' geom_step() +
#' ylab("Cumulative Observed Minus Expected Failures") +
#' xlab("Case Number") +
#' facet_wrap(~ by) +
#' theme_bw()
#' @export
cusum_ome <- function (
xi,
p0,
by = NULL
) {
# Recycle p0 if length 1
if (length(p0) == 1) p0 <- rep(p0, times = length(xi))
# Function for column creation
append_cols <- function (x) {
x$i <- 1:nrow(x)
x$cusum <- (function (y, z) {
res <- double()
for (i in 1:length(y)) {
ref <- if (i == 1) 0 else res[i - 1]
res[i] <- if (y[i] == 0) ref - z[i]
else ref + (1 - z[i])
}
res
})(x$xi, x$p0)
x
}
# Create data table
col_names <- c('i', 'xi', 'p0', 'cusum')
cols <- vctrs::data_frame(
xi = xi,
p0 = p0
)
# Create columns
if (!is.null(by)) {
col_names <- c(col_names, 'by')
cols$by <- by
cols <- purrr::list_rbind(
purrr::map(
unique(by),
function (.x) append_cols(cols[cols$by %in% .x,])
)
)
} else cols <- append_cols(cols)
# Return arranged data
cols[,col_names]
}
#' @title Risk-adjusted Sequential Probability Ratio Test (SPRT)
#' @description Calculates the risk-adjusted sequential probability ratio test
#' for a series of procedures which can be used to create CUSUM charts.
#' @param xi An integer. The dichotomous outcome variable (1 = Failure, 0 = Success)
#' for the i-th procedure.
#' @param p0 A double. The individual acceptable event rate for each individual
#' procedure (adjusted).
#' @param OR A double. An odds-ratio reflecting the increase in relative risk of
#' failure.
#' @param by A factor. Optional variable to stratify procedures by.
#' @param alpha A double. The Type I Error rate. Probability of rejecting the
#' null hypothesis when `p0` is true.
#' @param beta A double. The Type II Error rate. Probability of failing to reject
#' null hypothesis when it is false.
#' @return An object of class \code{data.frame}.
#' @references
#' Rogers, C. A., Reeves, B. C., Caputo, M., Ganesh, J. S., Bonser, R. S., & Angelini, G. D. (2004). Control chart methods for monitoring cardiac surgical performance and their interpretation. The Journal of Thoracic and Cardiovascular Surgery, 128(6), 811-819.
#' @examples
#' library(purrr)
#' library(ggplot2)
#'
#' # Data
#' df <- data.frame(
#' xi = simplify(
#' map(
#' c(.1,.08,.05,.1,.13,.14,.14,.09,.25),
#' ~ rbinom(50,1,.x))),
#' p0 = simplify(
#' map(
#' c(.1,.1,.1,.1,.1,.1,.1,.15,.2),
#' ~ rnorm(50,.x,.03))),
#' by = rep(
#' factor(paste('Subject', c('A','B','C'))),
#' times = c(150,150,150))
#' )
#'
#' # Create CUSUM plot
#' cusum_sprt(
#' xi = df$xi,
#' p0 = df$p0,
#' OR = 1.5,
#' by = df$by
#' ) |>
#' ggplot(aes(y = cusum, x = i)) +
#' geom_step() +
#' geom_hline(aes(yintercept = h0), linetype = 2) +
#' geom_hline(aes(yintercept = h1), linetype = 2) +
#' ylab("Cumulative Log-likelihood Ratio") +
#' xlab("Case Number") +
#' facet_wrap(~ by) +
#' theme_bw()
#' @export
cusum_sprt <- function (
xi,
p0,
OR,
by = NULL,
alpha = 0.05,
beta = 0.05
) {
# Variable calculations
h0 <- log((1 - alpha) / beta) / log(OR)
h1 <- log((1 - beta) / alpha) / log(OR)
# Function for column creation
append_cols <- function (x) {
x$i <- 1:nrow(x)
x$cusum <- cumsum(x$xi - (log((1 - x$p0) + (OR * x$p0)) / log(OR)))
x
}
# Create data table
col_names <- c('i', 'xi', 'p0', 'h0', 'h1', 'cusum')
cols <- vctrs::data_frame(
xi = xi,
p0 = p0,
h0 = -rep(h0, times = length(xi)),
h1 = rep(h1, times = length(xi))
)
# Create columns
if (!is.null(by)) {
col_names <- c(col_names, 'by')
cols$by <- by
cols <- purrr::list_rbind(
purrr::map(
unique(by),
function (.x) append_cols(cols[cols$by %in% .x,])
)
)
} else cols <- append_cols(cols)
# Return arranged data
cols[,col_names]
}
|
/scratch/gouwar.j/cran-all/cranData/utile.tools/R/cusum.R
|
#' @title Paste frequency
#' @description
#' Creates a formatted frequency from count(able) data. Automatically
#' tallies non-numeric data types (nrow or length) and supports vectorized data
#' methods.
#' @param x A data.frame, numeric, or non-numeric. The numerator.
#' @param y A data.frame, numeric, or non-numeric. The denominator. A single
#' denominator may be used for multiple numerators or one denominator for each
#' numerator.
#' @param na.rm A logical. Whether to ignore NA's when tallying non-numeric data.
#' @param percent.sign A logical. Indicates percent sign should be printed
#' with frequencies.
#' @param digits An integer. Number of digits to round to.
#' @return A character vector of count(s) with frequencies.
#' @examples
#' # Numeric
#' paste_freq(20, 100)
#'
#' # data.frame
#' df <- data.frame(x = c(1:100), y = TRUE)
#' paste_freq(df[1:20,], df)
#'
#' # Mixed data types
#' paste_freq(20, df)
#'
#' # Single denominator for multiple numerators
#' paste_freq(c(10,20,30), 100)
#' @export
paste_freq <- function (x, y, na.rm = TRUE, percent.sign = TRUE, digits = 1) {
# Data aggregation
x <- .count_items(x, na.rm = na.rm)
y <- .count_items(y, na.rm = na.rm)
# Recycle single counts or throw error for mixed types
common_counts <- vctrs::vec_recycle_common(x = x, y = y)
# Create frequencies
purrr::pmap_chr(
common_counts,
~ {
if (any(is.na(c(.x,.y))) | !any(is.numeric(c(.x,.y)))) NA_character_
else {
paste0(
.x,
' (',
round((.x / .y) * 100, digits = digits),
if (percent.sign & !is.infinite(.x / .y)) '%',
')'
)
}
}
)
}
#' @title Paste median
#' @description
#' Creates a formatted median with inter-quartile range from numeric data.
#' @param x A numeric. Data to summarize.
#' @param less.than.one A logical. Indicates a median that rounds to 0 should
#' be printed as <1.
#' @param digits An integer. Number of digits to round to.
#' @return A character vector of the median(s) with interquartile range(s).
#' @examples
#' paste_median(mtcars$mpg)
#' @export
paste_median <- function (x, less.than.one = FALSE, digits = 1) {
if (all(is.na(x)) | !all(is.numeric(x))) NA_character_
else {
estimate <- round(stats::median(x, na.rm = TRUE), digits = digits)
precision <- round(
stats::quantile(x, probs = c(0.25, 0.75), na.rm = TRUE),
digits = digits)
if (round(estimate, digits = 0) == 0 & less.than.one) estimate <- '<1'
paste0(estimate, ' [', paste0(precision, collapse = '-'), ']')
}
}
#' @title Paste mean
#' @description
#' Creates a formatted mean with standard deviation from numeric data.
#' @param x A numeric. Data to summarize.
#' @param less.than.one A logical. Indicates a mean that rounds to 0 should
#' be printed as <1.
#' @param digits An integer. Number of digits to round to.
#' @return A character vector of the mean(s) with standard deviation(s).
#' @examples
#' paste_mean(mtcars$mpg)
#' @export
paste_mean <- function (x, less.than.one = FALSE, digits = 1) {
if (all(is.na(x)) | !all(is.numeric(x))) NA_character_
else {
estimate <- round(mean(x, na.rm = TRUE), digits = digits)
precision <- round(stats::sd(x, na.rm = TRUE), digits = digits)
if (round(estimate, digits = 0) == 0 & less.than.one) estimate <- '<1'
paste0(estimate, ' \u00B1', precision)
}
}
#' @title Paste event-free survival
#' @description
#' Creates a formatted event-free-survival from a survfit object
#' and a specified time point.
#' @param x A \code{\link[survival]{survfit}} object. The survival model.
#' @param times A numeric. Indicates time-points of interest. Units are whatever
#' was used to create the survival fit.
#' @param percent.sign A logical. Indicates percent sign should be printed
#' for frequencies.
#' @param digits Integer. Number of digits to round to.
#' @return A named character vector of event-free survival(s).
#' @examples
#' library(survival)
#'
#' fit <- survfit(Surv(time, status) ~ 1, data = diabetic)
#' paste_efs(fit, c(1, 3, 5))
#' @export
paste_efs <- function (x, times, percent.sign = TRUE, digits = 1) {
if (!all(is.numeric(times)) | vctrs::vec_is_empty(times)) {
stop('\'times\' not <numeric> or is empty.')
} else if (!inherits(x, 'survfit') | !(x$type %in% c('right', 'left', 'interval'))) {
stop('\'x\' not <survfit> or fit does not estimate survival.')
} else {
res <- summary(x, times = times)[c('surv', 'lower', 'upper')]
res <- purrr::map(res, ~ round(.x * 100, digits = digits))
res <- purrr::pmap_chr(
res,
\(surv, lower, upper) {
paste0(surv, if (percent.sign) '%', ' [', lower, '-', upper, ']')
}
)
stats::setNames(res, times)
}
}
#' @title Paste p-value
#' @description
#' Creates a human-readable p.value using sensible defaults for `format.pval()`.
#' @param x A numeric. P-value to format.
#' @param digits A numeric. Number of significant digits to round to.
#' @param p.digits A numeric. Minimum number of digits to right of the decimal
#' point.
#' @examples
#' paste_pval(0.061126e-10)
#' @export
paste_pval <- function (x, digits = 1, p.digits = 4) {
format.pval(pv = x,
digits = digits,
eps = 1e-04,
nsmall = p.digits,
scientific = F)
}
#' @name paste
#' @title Concatenate strings
#' @description
#' An augmented version of \code{\link[base:paste]{base::paste()}} with options
#' to manage `NA` values.
#' @param ... R objects to be converted to character vectors.
#' @param sep A character. A string to separate the terms.
#' @param collapse A character. An string to separate the results.
#' @param na.rm A logical. Whether to remove NA values from 'x'.
#' @return Character vector of concatenated values.
#' @seealso \code{\link[base]{paste}}
#' @examples
#' # Base paste() NA handling behavior
#' paste(
#' 'The', c('red', NA_character_, 'orange'), 'fox jumped', NA_character_, 'over the fence.',
#' collapse = ' '
#' )
#'
#' # Removal of NA values
#' paste(
#' 'The', c('red', NA_character_, 'orange'), 'fox jumped', NA_character_, 'over the fence.',
#' collapse = ' ',
#' na.rm = TRUE
#' )
#' @rdname paste
#' @export
paste <- function (..., sep = ' ', collapse = NULL, na.rm = FALSE) {
x <- list(..., sep = sep, collapse = collapse)
if (na.rm) x <- purrr::map(x[!is.na(x)], ~ .x[!is.na(.x)])
do.call(base::paste, x)
}
#' @rdname paste
#' @export
paste0 <- function (..., collapse = NULL, na.rm = FALSE) {
x <- list(..., collapse = collapse)
if (na.rm) x <- purrr::map(x[!is.na(x)], ~ .x[!is.na(.x)])
do.call(base::paste0, x)
}
|
/scratch/gouwar.j/cran-all/cranData/utile.tools/R/paste.R
|
#' @name test_hypothesis
#' @title Test the null hypothesis
#' @description Tests the null hypothesis that there is no difference between
#' grouped data.
#' @param x A numeric, factor, or logical. Observations.
#' @param y A factor or logical. Categorical "by" grouping variable.
#' @param test A character. Name of the statistical test to use. See note.
#' @param digits An integer. Number of digits to round to.
#' @param p.digits An integer. The number of p-value digits to the right of
#' the decimal point. Note that p-values are still rounded using 'digits'.
#' @param simulate.p.value A logical. Whether p-values in nominal variable testing
#' should be computed with Monte Carlo simulation.
#' @param B An integer. Number of replicates to use in Monte Carlo simulation for
#' nominal testing.
#' @param workspace An integer. Size of the workspace used for the Fisher's Exact
#' Test network algorithm.
#' @param ... Additional arguments passed to the appropriate S3 method.
#' @return A list containing the statistical test performed, test statistic,
#' and p-value.
#' @note Statistical testing used is dependent on type of 'x' data. Supported
#' testing for numeric data includes ANOVA ('anova'), Kruskal-Wallis ('kruskal'),
#' and Wilcoxon Rank Sum ('wilcoxon') tests. For categorical data, supported
#' testings includes Pearson's Chi-squared ('chisq') and Fisher's Exact Test
#' ('fisher').
#' @examples
#' strata <- as.factor(mtcars$cyl)
#'
#' # Numeric data
#' test_hypothesis(mtcars$mpg, strata)
#'
#' # Logical data
#' test_hypothesis(as.logical(mtcars$vs), strata)
#'
#' # Factor data
#' test_hypothesis(as.factor(mtcars$carb), strata)
#' @export
test_hypothesis <- function (
x,
y,
test,
digits,
p.digits,
simulate.p.value,
B,
workspace,
...
) {
UseMethod('test_hypothesis')
}
# Default response
#' @export
test_hypothesis.default <- function (...) NA_character_
#' @rdname test_hypothesis
#' @export
test_hypothesis.numeric <-
function (
x,
y,
test = c('anova', 'kruskal', 'wilcoxon'),
digits = 1,
p.digits,
...
) {
# Check for valid test
test <- match.arg(test)
# Set reference variables
res <- list(test = 'None', statistic = NA_real_, p = NA_real_)
tab_na <- table(is.na(x), y)
# Return conditions
if (any(tab_na[1,] == 0) || any(colSums(tab_na) == 0)) {
warning('No empty groups.')
return(res)
}
if (test == 'Wilcoxon' & ncol(tab_na) != 2) {
warning('Wilcoxon Rank Sum must compare 2 groups.')
return(res)
}
# Set test name
res$test <- switch(
test,
'anova' = 'ANOVA linear model',
'kruskal' = 'Kruskal-Wallis rank sum test',
'wilcoxon' = 'Wilcoxon rank sum test'
)
# Run statistical test
test_obj <- switch(
test,
'anova' = stats::anova(stats::lm(x ~ y)),
'kruskal' = stats::kruskal.test(x ~ as.factor(y)),
'wilcoxon' = suppressWarnings(stats::wilcox.test(x ~ as.factor(y)))
)
# Set test statistic
res$statistic <-
if (test == 'anova') test_obj[1, ncol(test_obj)-1]
else unname(test_obj$statistic)
res$statistic <- round(res$statistic, digits = digits)
# Set p-value
res$p <-
if (test == 'anova') test_obj[1, ncol(test_obj)]
else test_obj$p.value
if (!missing(p.digits)) res$p <- paste_pval(x = res$p,
digits = digits,
p.digits = p.digits)
# Return
res
}
#' @rdname test_hypothesis
#' @export
test_hypothesis.factor <-
function (
x,
y,
test = c('chisq', 'fisher'),
digits = 1,
p.digits,
simulate.p.value = FALSE,
B = 2000,
workspace = 2e7,
...
) {
# Check for valid test
test <- match.arg(test)
# Set reference variables
res <- list(test = 'None', statistic = NA_real_, p = NA_real_)
tab <- table(x, y, exclude = NA)
rs <- rowSums(tab)
cs <- colSums(tab)
# Chisq
if (test == 'chisq') {
# Early return condition
if ((any(rs == 0) || any(cs == 0)) && ncol(tab) > 1 && nrow(tab) > 1) {
warning('Chi-squared test cannot be run with groups provided (counts).')
return(res)
}
# Run statistical test
if (length(cs) > 1) tab <- tab[rs > 0, , drop = FALSE]
if (length(rs) > 1) tab <- tab[, cs > 0, drop = FALSE]
test_obj <- suppressWarnings(
stats::chisq.test(x = tab, simulate.p.value = simulate.p.value, B = B)
)
# Set test statistic
res$statistic <- round(unname(test_obj$statistic), digits = digits)
# Fisher
} else {
# Early return condition
if ((any(rs == 0) || any(cs == 0)) || ncol(tab) == 1 || nrow(tab) == 1) {
warning('Fisher\'s Exact Test cannot be run with groups provided (counts).')
return(res)
}
# Run statistical test
test_obj <- stats::fisher.test(x = tab,
simulate.p.value = simulate.p.value,
B = B,
workspace = workspace)
}
# Set test name
res$test <- switch(
test,
'chisq' = 'Pearson\'s Chi-squared Test',
'fisher' = 'Fisher\'s Exact Test'
)
# Set p-value
res$p <- test_obj$p.value
if (!missing(p.digits)) res$p <- paste_pval(x = res$p,
digits = digits,
p.digits = p.digits)
# Return
res
}
#' @rdname test_hypothesis
#' @export
test_hypothesis.logical <- test_hypothesis.factor
|
/scratch/gouwar.j/cran-all/cranData/utile.tools/R/test.R
|
# Aggregate certain object types into a count
.count_items <- function (x, na.rm = TRUE) {
if (is.data.frame(x)) vctrs::vec_size(x) # data frame
else if (!all(is.numeric(x))) {
vctrs::vec_size(if (na.rm) stats::na.omit(x) else x) # Non-numeric
} else if (vctrs::vec_is_empty(x)) 0 # empty numeric
else x # Usable numeric
}
|
/scratch/gouwar.j/cran-all/cranData/utile.tools/R/utils.R
|
#' @title Append a ggplot2 table to the bottom of a ggplot2 plot
#' @description Aligns axes and combines a ggplot2 plot and table into a single plot.
#' Can handle legends.
#' @param plot Required. ggplot2::ggplot() object. If a legend is present, it will be
#' extracted.
#' @param table Required. ggplot2::ggplot object. If a legend is present, it will
#' be removed and ignored.
#' @param plot.height Optional. Numeric. Height of plot relative to table. Defaults
#' to 1.
#' @param table.height Optional. Numeric. Height of table relative to plot. Defaults
#' to 0.1.
#' @param plot.width Optional. Numeric. Width of plot relative to legend. Ignored
#' if no legend present in plot. Defaults to 1.
#' @param extract.legend Optional. Logical. Indicates whether to extract the legend
#' from the plot and reinsert it adjacent to the final combined plot. May be undesired
#' if legend already embedded within the plot area. Defaults to TRUE.
#' @param legend.width Optional. Numeric. Width of legend relative to plot. Ignored
#' if no legend present in plot or 'extract.legend'=FALSE. Defaults 0.2.
#' @param legend.offset Optional. Numeric. Vertical offset of legend. Used to raise
#' or lower. Ignored if no legend present in plot or 'extract.legend'=FALSE. Defaults
#' to -15.
#' @return A ggplot2 tableGrob object. Use grid::grid.draw() to open in RStudio viewer.
#' Works with ggplot2::ggsave() out of the box.
#' @note To ensure proper alignment, double check that both plots use the same scale
#' and breaks!
#' @examples
#' library(survival)
#' library(ggplot2)
#' library(broom) # tidy() model data
#' library(grid) # grid.draw() finished plot
#'
#' # Data with group names specified
#' data_diabetic <- diabetic
#' data_diabetic$trt <- as.factor(data_diabetic$trt)
#' levels(data_diabetic$trt) <- c('None', 'Laser')
#'
#' # Survival Model
#' fit <- survfit(Surv(time, status) ~ trt, data = data_diabetic)
#'
#' # Kaplan Meier (KM) Plot
#' plot_km <- ggplot(
#' data = tidy(fit),
#' mapping = aes(x = time, y = estimate)
#' ) +
#' geom_step(aes(color = strata)) +
#' geom_stepconfint(aes(ymin = conf.low, ymax = conf.high, fill = strata), alpha = 0.3) +
#' coord_cartesian(c(0, 50)) + # Note scale set here!
#' scale_x_continuous(expand = c(0.02,0)) +
#' labs(x = 'Time', y = 'Freedom From Event') +
#' scale_color_manual(
#' values = c('#d83641', '#1A45A7'),
#' name = 'Treatment',
#' labels = c('Laser', 'None'),
#' aesthetics = c('colour', 'fill')) +
#' theme_basic()
#'
#' # Risk Table
#' tbl_risk <- ggrisktable(fit, c(0, 10, 20, 30, 40, 50)) +
#' coord_cartesian(c(0, 50)) +
#' scale_x_continuous(expand = c(0.02,0)) +
#' theme_risk()
#'
#' # Combine KM plot and risk table
#' plot_cmbd <- append_table(
#' plot = plot_km,
#' table = tbl_risk
#' )
#'
#' # Draw in RStudio viewer
#' grid.newpage()
#' grid.draw(plot_cmbd)
#' @export
append_table <- function(
plot = NULL, table = NULL, plot.height = 1,
table.height = 0.1, plot.width = 1, extract.legend = TRUE,
legend.width = 0.2, legend.offset = -15
) {
# Hard stops
if (!ggplot2::is.ggplot(plot) | !ggplot2::is.ggplot(table))
stop('Missing valid ggplot object. Check [\'plot\', \'table\'].')
if (!is.numeric(plot.height) | !is.numeric(table.height))
stop('Both plot and table heights must be specified. Check \'plot.height\', \'table.height\'.')
# Convert plots to grobs
if (extract.legend) plot <- plot +
ggplot2::theme(
legend.margin = ggplot2::margin(legend.offset, 0, 0, 0, unit = 'mm'),
legend.justification = NULL,
legend.position = NULL
)
grob_plot <- ggplot2::ggplotGrob(plot)
grob_tbl <- ggplot2::ggplotGrob(table + ggplot2::theme(legend.position = 'none')) # drop table legend
# Extract legend, if present
if (extract.legend) {
legend_row <- which(purrr::map_chr(grob_plot$grobs, ~ .x$name) == 'guide-box')
if (length(legend_row) > 0) {
# Hard stop
if (!is.numeric(plot.width) | !is.numeric(legend.width))
stop('Widths for both plot & legend required. Check [\'legend.width\', \'plot.width\'].')
grob_legend <- grob_plot$grobs[[legend_row]]
grob_plot <- ggplot2::ggplotGrob(plot + ggplot2::theme(legend.position = 'none'))
}
}
# Combine plot grobs
grob_combined <- rbind(
grob_plot,
grob_tbl,
size = 'first'
)
panels <- grob_combined$layout$t[grep("panel", grob_combined$layout$name)]
grob_combined$heights[panels[1]] <- ggplot2::unit(plot.height, "null") # Set plot height
grob_combined$heights[panels[2]] <- ggplot2::unit(table.height, "null") # Set table height
# Return new arrangement
if (extract.legend & exists('grob_legend'))
gridExtra::arrangeGrob(
grob_combined,
grob_legend,
ncol = 2,
widths = c(plot.width, legend.width),
clip = TRUE
)
else grob_combined
}
|
/scratch/gouwar.j/cran-all/cranData/utile.visuals/R/append.R
|
#' @title Step function confidence intervals for ggplot2
#' @description Produces a step function confidence interval for survival curves.
#' @param mapping Aesthetic mappings with aes() function. Like geom_ribbon(), you must provide
#' columns for x, ymin (lower limit), ymax (upper limit).
#' @param data The data to be displayed in this layer. Can inherit from ggplot parent.
#' @param stat The statistical transformation to use on the data for this layer, as a string.
#' Defaults to 'identity'.
#' @param position Position adjustment, either as a string, or the result of a call to a
#' position adjustment function.
#' @param na.rm If FALSE, the default, missing values are removed with a warning. If TRUE,
#' missing values are silently removed.
#' @param ... Optional. Any other ggplot geom_ribbon() arguments.
#' @note Adapted from the survminer package <https://github.com/kassambara/survminer>.
#' @examples
#' library(survival)
#' library(broom)
#' library(ggplot2)
#'
#' fit <- survfit(Surv(time, status) ~ trt, data = diabetic)
#' fit <- survfit0(fit) # connect origin
#'
#' ggplot(
#' data = tidy(fit),
#' mapping = aes(x = time, y = estimate)
#' ) +
#' geom_step(aes(color = strata)) +
#' geom_stepconfint(aes(ymin = conf.low, ymax = conf.high, fill = strata), alpha = 0.3) +
#' coord_cartesian(c(0, 50)) +
#' scale_x_continuous(expand = c(0.02,0)) +
#' labs(x = 'Time', y = 'Freedom From Event') +
#' scale_color_manual(
#' values = c('#d83641', '#1A45A7'),
#' name = 'Treatment',
#' labels = c('None', 'Laser'),
#' aesthetics = c('colour', 'fill')) +
#' theme_basic()
#' @export
geom_stepconfint <- function (
mapping = NULL, data = NULL, stat = "identity",
position = "identity", na.rm = FALSE, ...
) {
ggplot2::layer(
mapping = mapping,
data = data,
stat = stat,
geom = ggplot2::ggproto(
`_class` = 'GeomConfint',
`_inherit` = ggplot2::GeomRibbon,
required_aes = c("x", "ymin", "ymax"),
draw_group = function (self, data, panel_scales, coord, na.rm = FALSE) {
if (na.rm) data <- data[stats::complete.cases(self$required_aes), ]
data <- data[order(data$group, data$x), ]
data <- self$stairstep_confint(data)
ggplot2::GeomRibbon$draw_group(data, panel_scales, coord, na.rm = FALSE)
},
stairstep_confint = function (data) {
data <- as.data.frame(data)[order(data$x), ]
n <- nrow(data)
ys <- rep(1:n, each = 2)[-2 * n]
xs <- c(1, rep(2:n, each = 2))
data.frame(
x = data$x[xs],
ymin = data$ymin[ys],
ymax = data$ymax[ys],
data[xs, setdiff(names(data), c("x", "ymin", "ymax"))]
)
}
),
position = position,
params = list(na.rm = na.rm, ...)
)
}
|
/scratch/gouwar.j/cran-all/cranData/utile.visuals/R/geoms.R
|
utils::globalVariables(c('time', 'strata', 'n.risk'))
#' @title Create a ggplot2 table showing the number at risk
#' @description A simple wrapper function which calculates the
#' numbers at risk for a survival model and a given set of time points then
#' creates a ggplot2 table with them.
#' @param fit Required. survival::survfit() object.
#' @param times Required. Numeric. One or more time points to calculate
#' the number at risk for.
#' @param text.color Optional. Character. Color of text within table. Defaults
#' to 'black'.
#' @param strata.order Optional. Character. Ordered names of strata factor
#' levels.
#' @return An unformatted ggplot2 table showing the number at risk.
#' @examples
#' library(survival)
#'
#' fit <- survfit(Surv(time, status) ~ trt, data = diabetic)
#'
#' ggrisktable(
#' fit = fit,
#' times = c(0, 10, 20, 30, 40, 50),
#' strata.order = c('0', '1')
#' ) + theme_risk()
#' @export
ggrisktable <- function (fit = NULL, times = NULL, text.color = 'black', strata.order = NULL) {
# Hard stops
if (is.null(fit) | !(inherits(fit, 'survfit'))) stop('No valid fit object provided. [Check: \'fit\']')
if (is.null(times) | !is.numeric(times)) stop('No valid time points provided. [Check: \'times\']')
if (!is.null(strata.order) & !is.character(strata.order)) stop('Invalid strata order data provided. [Check: \'strata.order\']')
# Generate risk table and order
risk_table <- .tabulate_at_risk(fit, times)
# Reorder strata
if (is.character(strata.order))
risk_table$strata <- factor(
risk_table$strata,
levels = unique(c(
rev(strata.order[strata.order %in% levels(risk_table$strata)]),
levels(risk_table$strata)
))
)
# Return plotted table
ggplot2::ggplot(
risk_table,
ggplot2::aes(x = time, y = strata, label = n.risk)
) + ggplot2::geom_text(color = text.color)
}
|
/scratch/gouwar.j/cran-all/cranData/utile.visuals/R/gg.R
|
#' @importFrom ggplot2 %+replace%
#' @title Minimalist theme for ggplot2
#' @description A minimalist \code{ggplot2} theme which removes most background elements and
#' lines.
#' @param base_size A numeric. Base font size.
#' @param base_family A numeric. Base font family.
#' @param base_color A character. Base color for lines and text.
#' @param base_line_size A numeric. Base line element size.
#' @param base_rect_size A numeric. Base rectangle element size.
#' @note Recommend exporting as PNG or TIFF to preserve
#' background transparency.
#' @examples
#' library(ggplot2)
#'
#' ggplot(datasets::mtcars, aes(x = wt, y = hp, color = as.factor(cyl))) +
#' geom_point() +
#' theme_basic()
#' @export
theme_basic <- function(
base_size = 12,
base_family = NULL,
base_color = "black",
base_line_size = base_size/12,
base_rect_size = base_size/12
) {
ggplot2::theme_bw(
base_size = base_size,
base_family = base_family,
base_line_size = base_line_size,
base_rect_size = base_rect_size
) %+replace%
ggplot2::theme(
# Defaults
text = ggplot2::element_text(
family = base_family,
color = base_color,
size = base_size,
lineheight = 0.9,
hjust = 0.5,
vjust = 0.5,
angle = 0,
margin = ggplot2::margin(),
debug = FALSE),
line = ggplot2::element_line(
color = base_color,
linewidth = base_line_size,
linetype = 1,
lineend = "square"),
rect = ggplot2::element_rect(
fill = "transparent",
colour = base_color,
linewidth = base_rect_size,
linetype = 1),
# Axis
axis.line = ggplot2::element_line(
colour = base_color, linewidth = base_line_size),
axis.line.x = NULL,
axis.line.y = NULL,
axis.ticks = ggplot2::element_line(color = base_color, linewidth = base_line_size),
axis.ticks.length = ggplot2::unit(base_size / 2.5, "pt"),
axis.ticks.length.x = NULL,
axis.ticks.length.x.top = NULL,
axis.ticks.length.x.bottom = NULL,
axis.ticks.length.y = NULL,
axis.ticks.length.y.left = NULL,
axis.ticks.length.y.right = NULL,
axis.title = ggplot2::element_text(color = base_color),
axis.title.x = ggplot2::element_text(
angle = 0, margin = ggplot2::margin(t = base_size * 0.6), vjust = 1),
axis.title.x.top = ggplot2::element_text(
margin = ggplot2::margin(b = base_size * 0.6), vjust = 0),
axis.title.y = ggplot2::element_text(
angle = 90, margin = ggplot2::margin(r = base_size * 0.6), vjust = 1),
axis.title.y.right = ggplot2::element_text(
angle = -90, margin = ggplot2::margin(l = base_size * 0.6), vjust = 0),
axis.text = ggplot2::element_text(
size = ggplot2::rel(0.95), color = base_color),
axis.text.x = ggplot2::element_text(
margin = ggplot2::margin(t = 0.8 * base_size / 4), vjust = 0.25),
axis.text.x.top = ggplot2::element_text(
margin = ggplot2::margin(b = 0.8 * base_size / 4),vjust = 0),
axis.text.y = ggplot2::element_text(
margin = ggplot2::margin(r = 0.8 * base_size / 4), hjust = 0.25),
axis.text.y.right = ggplot2::element_text(
margin = ggplot2::margin(l = 0.8 * base_size / 4), hjust = 0),
# Legend
legend.title = ggplot2::element_text(
color = base_color, margin = ggplot2::margin(b = base_size * 0.4)),
legend.title.align = 0,
legend.text = ggplot2::element_text(
color = base_color, size = ggplot2::rel(0.8)),
legend.background = ggplot2::element_blank(),
legend.key = ggplot2::element_blank(),
legend.key.size = ggplot2::unit(1.2, "lines"),
legend.key.width = ggplot2::unit(base_size * 1.8, "pt"),
legend.spacing = ggplot2::unit(base_size, "pt"),
legend.spacing.x = NULL,
legend.spacing.y = NULL,
legend.margin = ggplot2::margin(
base_size/2, base_size/2, base_size/2, base_size/2),
legend.position = "right",
legend.justification = "center",
legend.text.align = NULL,
legend.direction = NULL,
legend.box = NULL,
legend.box.margin = ggplot2::margin(0, 0, 0, 0, "cm"),
legend.box.background = ggplot2::element_blank(),
legend.box.spacing = ggplot2::unit(base_size, "pt"),
# Strip
strip.background = ggplot2::element_blank(),
strip.text = ggplot2::element_text(color = base_color),
strip.text.x = ggplot2::element_text(
margin = ggplot2::margin(b = base_size / 5, t = base_size / 5)),
strip.text.y = ggplot2::element_text(
angle = -90, margin = ggplot2::margin(l = base_size / 5, r = base_size / 5)),
strip.text.y.left = ggplot2::element_text(angle = 90),
strip.placement = "inside",
strip.placement.x = NULL,
strip.placement.y = NULL,
strip.switch.pad.grid = ggplot2::unit(base_size / 4, "pt"),
strip.switch.pad.wrap = ggplot2::unit(base_size / 4, "pt"),
# Panel
panel.border = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
panel.background = ggplot2::element_blank(),
panel.spacing = ggplot2::unit(base_size / 2, "pt"),
panel.spacing.x = NULL,
panel.spacing.y = NULL,
panel.ontop = FALSE,
# Plot
plot.background = ggplot2::element_rect(fill = "transparent", colour = NA),
plot.title = ggplot2::element_text(
size = ggplot2::rel(1.2), hjust = 0.5, vjust = 1,
margin = ggplot2::margin(b = base_size)
),
plot.title.position = "panel",
plot.margin = ggplot2::margin(base_size/2, base_size/2, base_size/2, base_size/2),
complete = TRUE
)
}
#' @title Minimalist risk table theme for ggplot2
#' @description A minimalist \code{ggplot2} theme which removes most background elements and
#' lines.
#' @param base_size A numeric. Base font size.
#' @param base_family A numeric. Base font family.
#' @param base_color A character. Base color for lines and text.
#' @param base_line_size A numeric. Base line element size.
#' @param base_rect_size A numeric. Base rectangle element size.
#' @note Recommend exporting as PNG or TIFF to preserve
#' background transparency.
#' @seealso \code{\link{ggrisktable}}
#' @export
theme_risk <- function(
base_size = 12,
base_family = NULL,
base_color = "black",
base_line_size = base_size/12,
base_rect_size = base_size/12
) {
ggplot2::theme_bw(
base_size = base_size,
base_family = base_family,
base_line_size = base_line_size,
base_rect_size = base_rect_size
) %+replace%
ggplot2::theme(
# Defaults
text = ggplot2::element_text(
family = base_family,
color = base_color,
size = base_size,
lineheight = 0.9,
hjust = 0.5,
vjust = 0.5,
angle = 0,
margin = ggplot2::margin(),
debug = FALSE),
line = ggplot2::element_line(
color = base_color,
linewidth = base_line_size,
linetype = 1,
lineend = "square"),
rect = ggplot2::element_rect(
fill = "transparent",
colour = base_color,
linewidth = base_rect_size,
linetype = 1),
# Axis
axis.line = ggplot2::element_blank(),
axis.line.x = NULL,
axis.line.y = NULL,
axis.ticks = ggplot2::element_blank(),
axis.title = ggplot2::element_blank(),
axis.title.x = NULL,
axis.title.x.top = NULL,
axis.title.y = NULL,
axis.title.y.right = NULL,
axis.text = ggplot2::element_text(
size = ggplot2::rel(0.95), color = base_color),
axis.text.x = ggplot2::element_blank(),
axis.text.x.top = ggplot2::element_blank(),
axis.text.y = ggplot2::element_text(
margin = ggplot2::margin(r = 0.8 * base_size / 4), hjust = 0.25),
axis.text.y.right = ggplot2::element_text(
margin = ggplot2::margin(l = 0.8 * base_size / 4), hjust = 0),
# Legend
legend.title = ggplot2::element_blank(),
legend.title.align = 0,
legend.text = ggplot2::element_text(
color = base_color, size = ggplot2::rel(0.8)),
legend.background = ggplot2::element_blank(),
legend.key = ggplot2::element_blank(),
legend.key.size = ggplot2::unit(1.2, "lines"),
legend.key.width = ggplot2::unit(base_size * 1.8, "pt"),
legend.spacing = ggplot2::unit(base_size, "pt"),
legend.spacing.x = NULL,
legend.spacing.y = NULL,
legend.margin = ggplot2::margin(
base_size/2, base_size/2, base_size/2, base_size/2),
legend.position = "right",
legend.justification = "center",
legend.text.align = NULL,
legend.direction = NULL,
legend.box = NULL,
legend.box.margin = ggplot2::margin(0, 0, 0, 0, "cm"),
legend.box.background = ggplot2::element_blank(),
legend.box.spacing = ggplot2::unit(base_size, "pt"),
# Strip
strip.background = ggplot2::element_blank(),
strip.text = ggplot2::element_text(color = base_color),
strip.text.x = ggplot2::element_text(
margin = ggplot2::margin(b = base_size / 5, t = base_size / 5)),
strip.text.y = ggplot2::element_text(
angle = -90, margin = ggplot2::margin(l = base_size / 5, r = base_size / 5)),
strip.text.y.left = ggplot2::element_text(angle = 90),
strip.placement = "inside",
strip.placement.x = NULL,
strip.placement.y = NULL,
strip.switch.pad.grid = ggplot2::unit(base_size / 4, "pt"),
strip.switch.pad.wrap = ggplot2::unit(base_size / 4, "pt"),
# Panel
panel.border = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
panel.background = ggplot2::element_blank(),
panel.spacing = ggplot2::unit(base_size / 2, "pt"),
panel.spacing.x = NULL,
panel.spacing.y = NULL,
panel.ontop = FALSE,
# Plot
plot.background = ggplot2::element_rect(fill = "transparent", colour = NA),
plot.title = ggplot2::element_text(
size = ggplot2::rel(1.2), hjust = 0.5, vjust = 1,
margin = ggplot2::margin(b = base_size)
),
plot.title.position = "panel",
plot.margin = ggplot2::margin(base_size/1.5, base_size/2, base_size/2, base_size/2),
complete = TRUE
)
}
#' @title Add a panel border to a ggplot2 plot
#' @description A simple \code{ggplot2} theme which replaces the axis lines with
#' a bordered panel.
#' @param base_size A numeric. Base size. Used to calculate line size and spacing.
#' @param base_color A character. Base color for lines.
#' @note This should be placed after the primary theme for the plot.
#' @examples
#' library(ggplot2)
#'
#' ggplot(datasets::mtcars, aes(x = wt, y = hp, color = as.factor(cyl))) +
#' geom_point() +
#' facet_wrap(~as.logical(am)) +
#' theme_basic() +
#' panel_border()
#' @export
panel_border <- function (base_size = 12, base_color = NULL) {
ggplot2::theme(
axis.line = ggplot2::element_blank(),
panel.border = ggplot2::element_rect(color = base_color, linewidth = base_size / 6),
panel.spacing = ggplot2::unit(base_size / 12, 'lines')
)
}
|
/scratch/gouwar.j/cran-all/cranData/utile.visuals/R/themes.R
|
.tabulate_at_risk <- function(fit = NULL, times = NULL) {
fit_summary <- summary(fit, times = times)
dplyr::bind_cols(
strata = as.factor(
if (is.null(fit$strata)) rep('All', length(times))
else
purrr::map_chr(
as.character(fit_summary$strata),
~ strsplit(.x, '=')[[1]][2]
)
),
time = fit_summary$time,
n.risk = fit_summary$n.risk
)
}
|
/scratch/gouwar.j/cran-all/cranData/utile.visuals/R/utils.R
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4 Peter Reichert 10.01.2017 #
# #
################################################################################
# ==============================================================================
# utility aggregation functions
# ==============================================================================
utility.aggregate.add <- function(u,par) # par[i]: weight of u[i]
{
# check input:
if ( length(u) != length(par) )
{
warning("Length of utilities/values and weights not equal: ",
length(u)," ",length(par))
return(NA)
}
ind <- which(!is.na(u))
if ( length(ind) == 0 ) return(NA)
if ( sum( par < 0 ) > 0 )
{
warning("Parameter of additive aggregation smaller than zero")
return(NA)
}
# calculate aggregated value
s <- sum(par[ind])
if ( s <= 0 ) return(NA)
u.agg <- sum(par[ind]*u[ind])/s
return(as.numeric(u.agg))
}
utility.aggregate.min <- function(u,par=NA)
{
# check input:
ind <- which(!is.na(u))
if ( length(ind) == 0 ) return(NA)
# calculate aggregated value
u.agg <- min(u[ind])
return(as.numeric(u.agg))
}
utility.aggregate.max <- function(u,par=NA)
{
# check input:
ind <- which(!is.na(u))
if ( length(ind) == 0 ) return(NA)
# calculate aggregated value
u.agg <- max(u[ind])
return(as.numeric(u.agg))
}
utility.aggregate.mult <- function(u,par)
{
# check input:
if ( length(u) != length(par) )
{
warning("Length of utilities/values and weights not equal: ",
length(u)," ",length(par))
return(NA)
}
ind <- which(!is.na(u))
if ( length(ind) == 0 ) return(NA)
if ( length(ind) == 1 )
{
return(as.numeric(u[ind]))
}
if ( sum( par < 0 | par > 1 ) > 0 )
{
warning("Parameter of multiplicative aggregation",
"smaller than zero or larger than unity")
return(NA)
}
# function used in uniroot to determine the scaling constant k:
utility.aggregate.mult.root <- function(k,ki)
{
res <- 1
for ( i in 1:length(ki) )
{
res <- res * ( 1 + k * ki[i] )
}
res <- 1 + k - res
return(res)
}
# define numerical parameter:
eps <- 1e-3 # maximum deviation of sum(par) from unity to use additive fcn
# rescale weights:
s <- sum(par)
fact <- s/sum(par[ind])
ki <- fact*par[ind]
# calculate additive utility function if sum close to unity:
if ( s > 1-eps & s < 1+eps )
{
return(utility.aggregate.add(u,par))
}
# calculate multiplicative utility function if sum not close to unity:
# calculate k:
# (Keeney and Raiffa, Decisions with multiple objectives, 1976,
# pp. 307, 347-348)
if ( s < 1 )
{
lower <- 1
i <- 0
while ( utility.aggregate.mult.root(lower,ki) < 0 )
{
lower <- 0.1*lower
i <- i+1
if ( i > 20 )
{
warning("Problem solving equation for scaling constant")
return(NA)
}
}
upper <- 1
i <- 0
while ( utility.aggregate.mult.root(upper,ki) > 0 )
{
upper <- 10*upper
i <- i+1
if ( i > 20 )
{
warning("Problem solving equation for scaling constant")
return(NA)
}
}
k <- uniroot(utility.aggregate.mult.root,ki=ki,
lower=lower,upper=upper)$root
}
else # s > 1
{
upper <- -0.1
i <- 0
while ( utility.aggregate.mult.root(upper,ki) < 0 )
{
upper <- 0.1*upper
i <- i+1
if ( i > 20 )
{
warning("Problem solving equation for scaling constant")
return(NA)
}
}
k <- uniroot(utility.aggregate.mult.root,ki=ki,
lower=-1,upper=upper)$root
}
# evaluate multiplicative utility function:
u.agg <- 1
for ( i in 1:length(ki) )
{
if ( !is.na(u[ind][i]) ) u.agg <- u.agg * (k*ki[i]*u[ind][i]+1)
}
u.agg <- (u.agg - 1)/k
# eliminate values out of range due to numerical inaccuracies:
u.agg <- ifelse(u.agg < 0, 0, u.agg)
u.agg <- ifelse(u.agg > 1, 1, u.agg)
return(as.numeric(u.agg))
}
utility.aggregate.geo <- function(u,par)
{
# check input:
if ( length(u) != length(par) )
{
warning("Length of utilities/values and weights not equal: ",
length(u)," ",length(par))
return(NA)
}
ind <- which(!is.na(u))
if ( length(ind) == 0 ) return(NA)
if ( sum( par < 0 ) > 0 )
{
warning("Parameter of geometric aggregation smaller than zero")
return(NA)
}
# calculate aggregated value
s <- sum(par[ind])
if ( s <= 0 ) return(NA)
u.agg <- 1
for ( i in 1:length(ind) )
{
if ( par[ind][i]>0 ) u.agg <- u.agg*u[ind][i]^(par[ind][i]/s)
}
return(as.numeric(u.agg))
}
utility.aggregate.revgeo <- function(u,par)
{
return(1-utility.aggregate.geo(1-u,par))
}
utility.aggregate.geooff <- function(u,par)
{
n <- length(u)
# check input:
if ( length(par) != n + 1)
{
warning("Length of parameter vector should be length of utilities/values (for weights) plus one (for offset): ",
length(par)," ",n)
return(NA)
}
u <- utility.aggregate.geo(u+par[n+1],par[1:n])-par[n+1]
# correct for numerical errors due to differences of "large" numbers
u <- ifelse(u>0,u,0)
u <- ifelse(u<1,u,1)
return(u)
}
utility.aggregate.revgeooff <- function(u,par)
{
return(1-utility.aggregate.geooff(1-u,par))
}
utility.aggregate.cobbdouglas <- function(u,par)
{
return(utility.aggregate.geo(u,par))
}
utility.aggregate.harmo <- function(u,par)
{
# check input:
if ( length(u) != length(par) )
{
warning("Length of utilities/values and weights not equal: ",
length(u)," ",length(par))
return(NA)
}
ind <- which(!is.na(u))
if ( length(ind) == 0 ) return(NA)
if ( sum( par < 0 ) > 0 )
{
warning("Parameter of harmonic aggregation smaller than zero")
return(NA)
}
# calculate aggregated value
s <- sum(par[ind])
if ( s <= 0 ) return(NA)
if ( sum(u==0) > 0 ) return(0)
u.agg <- s / sum(par[ind]/u[ind])
return(as.numeric(u.agg))
}
utility.aggregate.revharmo <- function(u,par)
{
return(1-utility.aggregate.harmo(1-u,par))
}
utility.aggregate.harmooff <- function(u,par)
{
n <- length(u)
# check input:
if ( length(par) != n + 1)
{
warning("Length of parameter vector should be length of utilities/values (for weights) plus one (for offset): ",
length(par)," ",n)
return(NA)
}
return(utility.aggregate.harmo(u+par[n+1],par[1:n])-par[n+1])
}
utility.aggregate.revharmooff <- function(u,par)
{
return(1-utility.aggregate.harmooff(1-u,par))
}
utility.aggregate.mix <- function(u,par) # par[i]: weight of u[i]
{ # par[n+j]: weight of technique j
# check input: # (j = add, min, geo)
n <- length(u)
if ( n+3 != length(par) )
{
warning("Length of parameter vector must be equal to",
"length of utilities/values plus three:",
length(par),length(u))
return(NA)
}
s <- sum(par[n+(1:3)])
if ( s <= 0 | sum(par[n+(1:3)]<0) > 0 )
{
warning("Weights of aggregation techniques to average",
"cannot be negative or not all of them equal to zero")
return(NA)
}
u.add <- 0; if ( par[n+1] != 0 ) u.add <- utility.aggregate.add(u,par[1:n])
u.min <- 0; if ( par[n+2] != 0 ) u.min <- utility.aggregate.min(u)
u.geo <- 0; if ( par[n+3] != 0 ) u.geo <- utility.aggregate.geo(u,par[1:n])
if ( is.na(u.add) | is.na(u.min) | is.na(u.geo) ) return(NA)
u.agg <- (par[n+1]*u.add + par[n+2]*u.min + par[n+3]*u.geo)/s
return(u.agg)
}
utility.aggregate.addmin <- function(u,par)
{
n <- length(u)
if ( length(par) != n+1 )
{
warning("Length of parameter vector should be length of utilities/values ",
"(for weights) plus one (for weight between methods): ",
length(par), " ", n)
return(NA)
}
return( par[n+1] * utility.aggregate.add(u,par[1:n]) +
(1-par[n+1]) * utility.aggregate.min(u,NA))
}
utility.aggregate.addpower <- function(u,par)
{
n <- length(u)
if ( length(par) != n+1 )
{
warning("Length of parameter vector not equal to number of utilities/values + 1:",
" par: ",length(par)," u: ", length(u))
return(NA)
}
u.loc <- u^par[n+1]
return(utility.aggregate.add(u.loc,par[1:n])^(1/par[n+1]))
}
utility.aggregate.revaddpower <- function(u,par)
{
n <- length(u)
if ( length(par) != n+1 )
{
warning("Length of parameter vector not equal to number of utilities/values + 1:",
" par: ",length(par)," u: ", length(u))
return(NA)
}
u.loc <- (1-u)^par[n+1]
return(1-utility.aggregate.add(u.loc,par[1:n])^(1/par[n+1]))
}
utility.aggregate.addsplitpower <- function(u,par)
{
g.trans <- function(v,alpha,split) { return(ifelse(v<=split, split*(v/split)^alpha, 1-(1-split)*((1-v)/(1-split))^alpha)) }
g.trans.inv <- function(v,alpha,split) { return(ifelse(v<=split, split*(v/split)^(1/alpha), 1-(1-split)*((1-v)/(1-split))^(1/alpha))) }
n <- length(u)
if ( length(par) != n+2 )
{
warning("Length of parameter vector not equal to number of utilities/values + 2:",
" par: ",length(par)," u: ", length(u))
return(NA)
}
u.loc <- g.trans(u,par[n+1],par[n+2])
return(g.trans.inv(utility.aggregate.add(u.loc,par[1:n]),par[n+1],par[n+2]))
}
utility.aggregate.revaddsplitpower <- function(u,par)
{
g.trans <- function(v,alpha,split) { return(ifelse(v<=split, split*(v/split)^alpha, 1-(1-split)*((1-v)/(1-split))^alpha)) }
g.trans.inv <- function(v,alpha,split) { return(ifelse(v<=split, split*(v/split)^(1/alpha), 1-(1-split)*((1-v)/(1-split))^(1/alpha))) }
n <- length(u)
if ( length(par) != n+2 )
{
warning("Length of parameter vector not equal to number of utilities/values + 2:",
" par: ",length(par)," u: ", length(u))
return(NA)
}
u.loc <- g.trans(1-u,par[n+1],par[n+2])
return(1-g.trans.inv(utility.aggregate.add(u.loc,par[1:n]),par[n+1],par[n+2]))
}
utility.aggregate.bonusmalus <- function(u,par,def.agg="utility.aggregate.add")
{
# assignments and checks:
n <- length(u)
par.bonusmalus <- par[length(par)-n+1:n]
ind.main <- which(is.na(par.bonusmalus)); if ( length(ind.main) == 0 ) return(NA)
ind.bonus <- which(par.bonusmalus > 0)
ind.malus <- which(par.bonusmalus < 0)
# evaluate main sub-objectives:
if ( length(ind.main) == 1 )
{
u.main <- u[ind.main]
}
else
{
u.main <- as.numeric(apply(as.matrix(u[ind.main],ncol=1),2,def.agg,par[1:(length(par)-n)]))
}
if ( is.na(u.main) ) return(NA)
# check for active bonus:
ind.bonus.active <- numeric(0)
if ( length(ind.bonus) > 0 )
{
ind.bonus.active <- ind.bonus[u[ind.bonus] > u.main]
ind.bonus.active <- as.numeric(ind.bonus.active[!is.na(ind.bonus.active)]) # empty vector was logical(0) rather than numeric(0)
}
# check for active malus:
ind.malus.active <- numeric(0)
if ( length(ind.malus) > 0 )
{
ind.malus.active <- ind.malus[u[ind.malus] < u.main]
ind.malus.active <- as.numeric(ind.malus.active[!is.na(ind.malus.active)]) # empty vector was logical(0) rather than numeric(0)
}
if ( length(ind.bonus.active) + length(ind.malus.active) == 0 ) return(u.main)
u.agg <- utility.aggregate.add(c(u.main,u[c(ind.bonus.active,ind.malus.active)]),
c(1,abs(par.bonusmalus)[c(ind.bonus.active,ind.malus.active)]))
return(u.agg)
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.aggregate.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4.3 Peter Reichert 20.09.2018 #
# #
################################################################################
# ==============================================================================
# utility node for (potentially) aggregating utility and/or end nodes:
# class "utility.aggregation"
# ==============================================================================
# constructor:
# ------------
utility.aggregation.create <-
function(name.node, # character(1)
nodes, # list of nodes
name.fun, # name of aggreg. fun f(u,par)
par, # numeric(n)
names.par = rep(NA,length(par)),
required = FALSE,
num.required = 1,
col = "black",
shift.levels = 0,
add.arg.fun = NULL)
{
# consistency checks:
check.ok <- T
if ( length(nodes) < 1 )
{
cat("*** Warning: No nodes provided","\n")
check.ok <- F
}
utility <- nodes[[1]]$utility
if ( length(nodes) > 1 )
{
for ( i in 2:length(nodes) )
{
if ( nodes[[i]]$utility != utility )
{
cat("*** Warning: Mixted value and utility nodes",
"cannot be aggregated","\n")
check.ok <- F
}
}
}
if ( ! utility.check.name(name.node,nodes) )
{
cat("*** Warning: Node with same name \"",name.node,"\" exists already ",
"as sub-node","\n")
check.ok <- F
}
if ( ! check.ok )
{
cat("*** Warning: Node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
node <- list()
node$name <- name.node
node$description <- "utility/value aggregation node"
node$type <- "aggregationnode"
node$nodes <- nodes
node$name.fun <- name.fun
node$par <- par
node$names.par <- names.par
node$required <- required
node$num.required <- num.required
node$utility <- utility
node$col <- col
node$shift.levels <- shift.levels
node$add.arg.fun <- add.arg.fun
class(node) <- "utility.aggregation"
# return class
#cat(node$description," \"",name.node,"\" constructed","\n",sep="")
return(node)
}
# update parameter values:
# ------------------------
updatepar.utility.aggregation <- function(x,par=NA,...)
{
node <- x
# check availabiliy of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# update conditional nodes:
n <- node
for ( i in 1:length(n$par) )
{
if ( ! is.na(n$names.par[i]) )
{
ind <- which(n$names.par[i] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]],sep="")
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$par[i] <- par[ind]
}
}
}
for ( i in 1:length(n$nodes) )
{
n$nodes[[i]] <- updatepar(n$nodes[[i]],par)
}
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate.utility.aggregation <- function(x,
attrib, # data.frame
par=NA,
...)
{
node <- x
# check input:
if ( ! is.data.frame(attrib) )
{
warning("Node \"",node$name,"\": attrib must be a data frame",sep="")
return(NA)
}
# update parameters:
n <- updatepar(node,par)
# evaluate nodes:
u <- evaluate(n$nodes[[1]],attrib)
ind <- !is.na(u) & (u<0 | u>1)
if ( sum(ind) > 0 )
{
warning("Node \"",node$name,"\": node \"",n$nodes[[1]]$name,"\" produced values outside [0,1]: ",
paste(u[ind],collapse=","),sep="")
}
if ( ! is.data.frame(u) )
{
u <- as.data.frame(u)
names(u) <- n$nodes[[1]]$name
}
required <- n$nodes[[1]]$required
nodenames <- n$nodes[[1]]$name
if ( length(n$nodes) > 1 )
{
for ( i in 2:length(n$nodes) )
{
u.i <- evaluate(n$nodes[[i]],attrib)
ind <- !is.na(u) & (u<0 | u>1)
if ( sum(ind) > 0 )
{
warning("Node \"",node$name,"\": node \"",n$nodes[[i]]$name,"\" produced values outside [0,1]: ",
paste(u.i[ind],collapse=","),sep="")
}
if ( ! is.data.frame(u.i) )
{
u.i <- as.data.frame(u.i)
names(u.i) <- n$nodes[[i]]$name
}
u <- cbind(u,u.i)
nodenames[i] <- n$nodes[[i]]$name
required[i] <- n$nodes[[i]]$required
}
}
if ( length(unique(nodenames)) != length(nodenames) )
{
warning("Node \"",node$name,"\": node names are not unique:",
paste(nodenames,collapse=","))
u.agg <- as.data.frame(rep(NA,nrow(attrib)))
names(u.agg) <- n$name
u <- cbind(u.agg,u)
rownames(u) <- rownames(attrib)
return(u)
}
# return results:
u.agg.input <- as.matrix(u[,nodenames])
if ( length(n$add.arg.fun) > 0 )
{
u.agg <- apply(u.agg.input,1,n$name.fun,n$par,n$add.arg.fun)
}
else
{
u.agg <- apply(u.agg.input,1,n$name.fun,n$par)
}
res.ok <- apply(u.agg.input,1,utility.check.required,
required,n$num.required)
u.agg <- ifelse(res.ok,u.agg,NA)
u.agg <- as.data.frame(u.agg)
names(u.agg) <- n$name
ind <- !is.na(u.agg) & (u.agg<0 | u.agg>1)
if ( sum(ind) > 0 )
{
warning("Node \"",node$name,"\": aggregation technique \"",n$name.fun,"\" produced values outside of [0,1]: ",
paste(u.agg[ind],collapse=","),sep="")
}
u <- cbind(u.agg,u)
rownames(u) <- rownames(attrib)
return(u)
}
# print:
# -----
print.utility.aggregation <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.aggregation <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
for ( i in 1:length(node$nodes) )
{
string1 <- "nodes: "
if ( i > 1 ) string1 <- " "
string2 <- node$nodes[[i]]$name
if ( node$nodes[[i]]$type == "endnode" )
{
num.space <- max(1,15-nchar(node$nodes[[i]]$name))
string2 <- paste(string2,
paste(rep(" ",num.space),collapse=""),
"(end node)",sep="")
}
cat(string1,string2,"\n")
}
cat("function: ",node$name.fun,"\n")
names.par <- ifelse(is.na(node$names.par),"",node$names.par)
cat("parameters:","\n")
print(data.frame(names.par=names.par,par=node$par))
if ( length(node$add.arg.fun) > 0 ) print(node$add.arg.fun)
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
cat("function type: ",funtype,"\n")
cat("required: ",node$required,"\n")
cat("required nodes: ",node$num.required,"\n")
for ( i in 1:length(node$nodes) )
{
cat("***","\n")
summary(node$nodes[[i]])
}
}
# plot:
# -----
plot.utility.aggregation <-
function(x,
u = NA,
uref = NA,
par = NA,
type = c("hierarchy","table","node","nodes"),
nodes = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
cex.nodes = 1,
cex.attrib = 1,
f.reaches = 0.2,
f.nodes = 0.2,
with.attrib = TRUE,
levels = NA,
plot.val = TRUE,
col.val = "black",
lwd.val = 1,
print.val = TRUE,
two.lines = FALSE,
ticks = c(0,0.2,0.4,0.6,0.8,1),
...)
{
node <- x
n <- updatepar(node,par)
utility.plot(node = n,
u = u,
uref = uref,
type = type,
nodes = nodes,
col = col,
gridlines = gridlines,
main = main,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
f.reaches = f.reaches,
f.nodes = f.nodes,
with.attrib = with.attrib,
levels = levels,
plot.val = plot.val,
col.val = col.val,
lwd.val = lwd.val,
print.val = print.val,
two.lines = two.lines,
ticks = ticks,
...)
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.aggregation.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4 Peter Reichert 28.01.2016 #
# #
################################################################################
# ==============================================================================
# registration of member functions
# ==============================================================================
updatepar <- function(x, ...) UseMethod("updatepar")
evaluate <- function(x, ...) UseMethod("evaluate")
# in addition, we support the functions plot, print and summary
# ==============================================================================
# auxiliary functions
# ==============================================================================
# colors
# ======
utility.calc.colors <- function(n=5)
{
if ( n < 2 ) return("black")
if ( n < 3 ) return(c("tomato","blue"))
if ( n < 4 ) return(c("tomato","yellow","blue"))
if ( n < 5 ) return(c("tomato","yellow","green","blue"))
if ( n < 6 ) return(c("tomato","orange","yellow","lightgreen","lightblue"))
red <- col2rgb("tomato")/255
orange <- col2rgb("orange")/255
yellow <- col2rgb("yellow")/255
green <- col2rgb("lightgreen")/255
blue <- col2rgb("lightblue")/255
red.orange <- (2*red+orange)/3
orange.red <- (red+2*orange)/3
orange.yellow <- (2*orange+yellow)/3
yellow.orange <- (orange+2*yellow)/3
yellow.green <- (2*yellow+green)/3
green.yellow <- (yellow+2*green)/3
green.blue <- (2*green+blue)/3
blue.green <- (1.5*green+blue)/2.5
u <- (1:n)/(n+1)
cols <- rep(NA,n)
for ( i in 1:length(u) )
{
if( u[i]<0.2 )
{
col <- (1-u[i]/0.2) * red+
u[i]/0.2 * red.orange
}
if( 0.2<=u[i] & u[i]<0.4 )
{
col <- (1-(u[i]-0.2)/0.2) * orange.red +
(u[i]-0.2)/0.2 * orange.yellow
}
if( 0.4<=u[i] & u[i]<0.6 )
{
col <- (1-(u[i]-0.4)/0.2) * yellow.orange +
(u[i]-0.4)/0.2 * yellow.green
}
if( 0.6<=u[i] & u[i]<0.8 )
{
col <- (1-(u[i]-0.6)/0.2) * green.yellow +
(u[i]-0.6)/0.2 * green.blue
}
if( 0.8<=u[i] )
{
col <- (1-(u[i]-0.8)/0.2) * blue.green +
(u[i]-0.8)/0.2 * blue
}
cols[i] <- rgb(col[1],col[2],col[3])
}
return(cols)
}
utility.get.colors <- function(u,col=utility.calc.colors())
{
col.ind <- 1 + floor(u * length(col) + 1e-15)
col.ind <- ifelse(col.ind>length(col),length(col),col.ind)
cols <- col[col.ind]
cols <- ifelse(is.na(col.ind),"white",cols)
return(cols)
}
# 2d interpolation
# ================
utility.get_y_belowandabove <- function(x,y,xout,yref)
{
y.res <- c(below=NA,above=NA)
if ( xout<min(x) | xout>max(x) ) return(y.res)
x.lower <- x[-length(x)]
x.upper <- x[-1]
ind <- which(ifelse( (xout>=x.lower & xout<=x.upper) |
(xout<=x.lower & xout>=x.upper) ,T,F ))
if ( length(ind) == 0 ) return(y.res)
y.vals <- rep(NA,length(ind))
for ( i in 1:length(ind) )
{
if ( x[ind[i]+1] == x[ind[i]] )
{
if ( (y[ind[i]]>yref) & (y[ind[i]+1]>yref) )
{
y.vals[i] <- min(y[ind[i]],y[ind[i]+1])
}
else
{
if ( (y[ind[i]]<yref) & (y[ind[i]+1]<yref) )
{
y.vals[i] <- max(y[ind[i]],y[ind[i]+1])
}
else
{
y.vals[i] <- yref
}
}
}
else
{
y.vals[i] <- y[ind[i]] + (xout-x[ind[i]])/(x[ind[i]+1]-x[ind[i]])*
(y[ind[i]+1]-y[ind[i]])
}
}
if ( sum(y.vals<=yref) > 0 ) y.res["below"] <- max(y.vals[y.vals<=yref])
if ( sum(y.vals>=yref) > 0 ) y.res["above"] <- min(y.vals[y.vals>=yref])
return(y.res)
}
utility.intpol.multiple <- function(x,xs,ys)
{
ind <- !is.na(xs) & !is.na(ys)
if ( sum(ind) < 2 ) return(NA)
xs.loc <- xs[ind]
ys.loc <- ys[ind]
ind.below <- which(xs.loc<=x)
if ( length(ind.below) == 0 ) return(NA)
ind.above <- which(xs.loc>=x)
if ( length(ind.above) == 0 ) return(NA)
xs.below <- xs.loc[ind.below]
ys.below <- ys.loc[ind.below]
xs.above <- xs.loc[ind.above]
ys.above <- ys.loc[ind.above]
ind.max.below <- which.max(xs.below)
x.below <- xs.below[ind.max.below]
y.below <- ys.below[ind.max.below]
ind.min.above <- which.min(xs.above)
x.above <- xs.above[ind.min.above]
y.above <- ys.above[ind.min.above]
if ( x.above == x.below )
{
y <- mean(y.above,y.below)
}
else
{
y <- ( y.above*(x-x.below) + y.below*(x.above-x) ) / (x.above-x.below)
}
return(y)
}
utility.intpol2d <- function(xy,isolines,levels,lead=0)
{
ind <- order(levels)
z <- apply(xy,1,utility.intpol2d.pair,isolines[ind],levels[ind],lead)
return(z)
}
utility.intpol2d.pair <- function(xy,isolines,levels,lead=0)
{
# initialize u:
z <- rep(NA,2)
nam <- c("x","y")
xy <- as.numeric(xy)
if( is.na(xy[1]) | is.na(xy[2]) ) return(NA)
for ( lead.current in 1:2 )
{
ind.x <- lead.current
ind.y <- 3-ind.x
nam.x <- nam[ind.x]
nam.y <- nam[ind.y]
if ( lead == 0 | lead == ind.x )
{
for ( i in 2:length(isolines) )
{
n.1 <- length(isolines[[i-1]][[nam.x]])
n.2 <- length(isolines[[i]][[nam.x]])
if ( xy[ind.x] >= min(isolines[[i-1]][[nam.x]]) &
xy[ind.x] <= max(isolines[[i-1]][[nam.x]]) )
{
y.1 <- utility.get_y_belowandabove(x = isolines[[i-1]][[nam.x]],
y = isolines[[i-1]][[nam.y]],
xout = xy[ind.x],
yref = xy[ind.y])
if ( xy[ind.x] >= min(isolines[[i]][[nam.x]]) &
xy[ind.x] <= max(isolines[[i]][[nam.x]]) )
{
# x coordinate of xy intersects contour lines at
# levels i-1 and i
y.2 <- utility.get_y_belowandabove(x = isolines[[i]][[nam.x]],
y = isolines[[i]][[nam.y]],
xout = xy[ind.x],
yref = xy[ind.y])
val <- utility.intpol.multiple(x = xy[ind.y],
xs = c(y.1,y.2),
ys = c(rep(levels[i-1],2),
rep(levels[i],2)))
if ( ! is.na(val) )
{
z[lead.current] <- val
break
}
}
else # within range of line at level i-1,
# outside of range at level i
{
if ( xy[ind.x] > max(isolines[[i]][[nam.x]]) )
{
# x coordinate of xy intersects contour line at
# level i-1 but is larger than maximum x at level i
ratio.1 <- NA
y.2.1 <- NA
z.2.1 <- NA
if ( xy[ind.x] < isolines[[i-1]][[nam.x]][1] )
{
ratio.1 <- (xy[ind.x]-isolines[[i-1]][[nam.x]][1])/
(isolines[[i]][[nam.x]][1]-
isolines[[i-1]][[nam.x]][1])
y.2.1 <- isolines[[i-1]][[nam.y]][1] +
ratio.1*(isolines[[i]][[nam.y]][1]-
isolines[[i-1]][[nam.y]][1])
z.2.1 <- levels[[i-1]] +
ratio.1*(levels[[i]]-levels[[i-1]])
}
ratio.n <- NA
y.2.n <- NA
z.2.n <- NA
if ( xy[ind.x] < isolines[[i-1]][[nam.x]][n.1] )
{
ratio.n <- (isolines[[i-1]][[nam.x]][n.1]-xy[ind.x])/
(isolines[[i-1]][[nam.x]][n.1]-
isolines[[i]][[nam.x]][n.2])
y.2.n <- isolines[[i-1]][[nam.y]][n.1] +
ratio.n*(isolines[[i]][[nam.y]][n.2]-
isolines[[i-1]][[nam.y]][n.1])
z.2.n <- levels[[i-1]] +
ratio.n*(levels[[i]]-levels[[i-1]])
}
val <- utility.intpol.multiple(x = xy[ind.y],
xs = c(y.1,y.2.1,y.2.n),
ys = c(rep(levels[i-1],2),
z.2.1,z.2.n))
if ( ! is.na(val) )
{
z[lead.current] <- val
break
}
}
else # xy[ind.x] < min(isolines[[i]][[nam.x]])
{
# x coordinate of xy intersects contour line
# at level i-1 but is smaller than minimum x at level i
ratio.1 <- NA
y.2.1 <- NA
z.2.1 <- NA
if ( xy[ind.x] > isolines[[i-1]][[nam.x]][1] )
{
ratio.1 <- (xy[ind.x]-isolines[[i-1]][[nam.x]][1])/
(isolines[[i]][[nam.x]][1]-
isolines[[i-1]][[nam.x]][1])
y.2.1 <- isolines[[i-1]][[nam.y]][1] +
ratio.1*(isolines[[i]][[nam.y]][1]-
isolines[[i-1]][[nam.y]][1])
z.2.1 <- levels[[i-1]] +
ratio.1*(levels[[i]]-levels[[i-1]])
}
ratio.n <- NA
y.2.n <- NA
z.2.n <- NA
if ( xy[ind.x] > isolines[[i-1]][[nam.x]][n.1] )
{
ratio.n <- (isolines[[i-1]][[nam.x]][n.1]-xy[ind.x])/
(isolines[[i-1]][[nam.x]][n.1]-
isolines[[i]][[nam.x]][n.2])
y.2.n <- isolines[[i-1]][[nam.y]][n.1] +
ratio.n*(isolines[[i]][[nam.y]][n.2]-
isolines[[i-1]][[nam.y]][n.1])
z.2.n <- levels[[i-1]] +
ratio.n*(levels[[i]]-levels[[i-1]])
}
val <- utility.intpol.multiple(x = xy[ind.y],
xs = c(y.1,y.2.1,y.2.n),
ys = c(rep(levels[i-1],2),
z.2.1,z.2.n))
if ( ! is.na(val) )
{
z[lead.current] <- val
break
}
}
}
}
else # outside of range of line at level i-1
{
if ( xy[ind.x] >= min(isolines[[i]][[nam.x]]) &
xy[ind.x] <= max(isolines[[i]][[nam.x]]) )
{
y.2 <- utility.get_y_belowandabove(x = isolines[[i]][[nam.x]],
y = isolines[[i]][[nam.y]],
xout = xy[ind.x],
yref = xy[ind.y])
if ( xy[ind.x] > max(isolines[[i-1]][[nam.x]]) )
{
# x coordinate of xy intersects isoline
# at level i but is larger than maximum x at level i-1
ratio.1 <- NA
y.1.1 <- NA
z.1.1 <- NA
if ( xy[ind.x] < isolines[[i]][[nam.x]][1] )
{
ratio.1 <- (xy[ind.x]-isolines[[i-1]][[nam.x]][1])/
(isolines[[i]][[nam.x]][1]-
isolines[[i-1]][[nam.x]][1])
y.1.1 <- isolines[[i-1]][[nam.y]][1] +
ratio.1*(isolines[[i]][[nam.y]][1]-
isolines[[i-1]][[nam.y]][1])
z.1.1 <- levels[[i-1]] +
ratio.1*(levels[[i]]-levels[[i-1]])
}
ratio.n <- NA
y.1.n <- NA
z.1.n <- NA
if ( xy[ind.x] < isolines[[i]][[nam.x]][n.2] )
{
ratio.n <- (isolines[[i-1]][[nam.x]][n.1]-xy[ind.x])/
(isolines[[i-1]][[nam.x]][n.1]-
isolines[[i]][[nam.x]][n.2])
y.1.n <- isolines[[i-1]][[nam.y]][n.1] +
ratio.n*(isolines[[i]][[nam.y]][n.2]-
isolines[[i-1]][[nam.y]][n.1])
z.1.n <- levels[[i-1]] +
ratio.n*(levels[[i]]-levels[[i-1]])
}
val <- utility.intpol.multiple(x = xy[ind.y],
xs = c(y.1.1,y.1.n,y.2),
ys = c(z.1.1,z.1.n,
rep(levels[i],2)))
if ( ! is.na(val) )
{
z[lead.current] <- val
break
}
}
else # xy[ind.x] < min(isolines[[i-1]][[nam.x]])
{
# x coordinate of xy intersects level i but is smaller than
# minimum x at level i-1
ratio.1 <- NA
y.1.1 <- NA
z.1.1 <- NA
if ( xy[ind.x] > isolines[[i]][[nam.x]][1] )
{
ratio.1 <- (xy[ind.x]-isolines[[i-1]][[nam.x]][1])/
(isolines[[i]][[nam.x]][1]-
isolines[[i-1]][[nam.x]][1])
y.1.1 <- isolines[[i-1]][[nam.y]][1] +
ratio.1*(isolines[[i]][[nam.y]][1]-
isolines[[i-1]][[nam.y]][1])
z.1.1 <- levels[[i-1]] +
ratio.1*(levels[[i]]-levels[[i-1]])
}
ratio.n <- NA
y.1.n <- NA
z.1.n <- NA
if ( xy[ind.x] > isolines[[i]][[nam.x]][n.2] )
{
ratio.n <- (isolines[[i-1]][[nam.x]][n.1]-xy[ind.x])/
(isolines[[i-1]][[nam.x]][n.1]-
isolines[[i]][[nam.x]][n.2])
y.1.n <- isolines[[i-1]][[nam.y]][n.1] +
ratio.n*(isolines[[i]][[nam.y]][n.2]-
isolines[[i-1]][[nam.y]][n.1])
z.1.n <- levels[[i-1]] +
ratio.n*(levels[[i]]-levels[[i-1]])
}
val <- utility.intpol.multiple(x = xy[ind.y],
xs = c(y.1.1,y.1.n,y.2),
ys = c(z.1.1,z.1.n,
rep(levels[i],2)))
if ( ! is.na(val) )
{
z[lead.current] <- val
break
}
}
}
else # not within ranges of contour lines at level i-1 and i
{
x.1.1 <- isolines[[i-1]][[nam.x]][1]
x.2.1 <- isolines[[i]][[nam.x]][1]
x.1.n <- isolines[[i-1]][[nam.x]][n.1]
x.2.n <- isolines[[i]][[nam.x]][n.2]
if ( (xy[ind.x] >= x.1.1 & xy[ind.x] <= x.2.1) |
(xy[ind.x] >= x.2.1 & xy[ind.x] <= x.1.1) )
{
if ( (xy[ind.x] >= x.1.n & xy[ind.x] <= x.2.n) |
(xy[ind.x] >= x.2.n & xy[ind.x] <= x.1.n) )
{
# x not within the ranges of isolines at lev- i-1 and i;
# x within the range of the bounding lines between the
# ends of the isolines at levels i-1 and i
ratio.1 <- (xy[ind.x]-x.1.1)/(x.2.1-x.1.1)
y.1 <- isolines[[i-1]][[nam.y]][1] +
ratio.1*(isolines[[i]][[nam.y]][1]-
isolines[[i-1]][[nam.y]][1])
z.1 <- levels[i-1] + ratio.1*(levels[i]-levels[i-1])
ratio.n <- (xy[ind.x]-x.1.n)/(x.2.n-x.1.n)
y.n <- isolines[[i-1]][[nam.y]][n.1] +
ratio.n*(isolines[[i]][[nam.y]][n.2]-
isolines[[i-1]][[nam.y]][n.1])
z.n <- levels[i-1] + ratio.n*(levels[i]-levels[i-1])
if ( (xy[ind.y] >= y.1 & xy[ind.y] <= y.n) |
(xy[ind.y] <= y.1 & xy[ind.y] >= y.n) )
{
z[lead.current] <-
z.1 + (xy[ind.y]-y.1)/(y.n-y.1)*(z.n-z.1)
break
}
}
}
}
}
}
}
}
if ( is.na(z[1]) & is.na(z[2]) ) return(NA)
return(mean(z,na.rm=TRUE))
}
# structure
# =========
utility.check.required <- function(u,required,num.required)
{
res.ok <- sum(ifelse(is.na(u),0,1)) >= num.required &
sum(ifelse(is.na(u) & required,1,0)) == 0
return(res.ok)
}
utility.check.name <- function(name,nodes)
{
nodes.local <- nodes
if ( !is.list(nodes) ) nodes.local <- as.list(nodes)
for ( i in 1:length(nodes) )
{
if ( name == nodes[[i]]$name ) return(FALSE)
}
return(TRUE)
}
utility.structure <- function(node)
{
if ( substring(class(node),1,7) != "utility" )
{
warning("Node \"",node$name,"\": argument must be a subclass of utility")
return(NA)
}
str <- data.frame(upper = NA,
utility = node$utility,
required = node$required,
num.required = if ( length(node$num.required) > 0 ) node$num.required else NA,
color = node$col,
endnode = FALSE,
attributes = NA,
level = 1 + node$shift.levels,
endnodes = 0,
offset = 0)
rownames(str) <- node$name
if ( node$type == "endnode" )
{
str$endnode <- TRUE
str$attributes <- paste(node$attrib,collapse=";")
str$endnodes <- 1
}
else
{
offset <- 0
for ( i in 1:length(node$nodes) )
{
str.new <- utility.structure(node$nodes[[i]])
if ( ! is.data.frame(str.new) ) return(NA)
str.new[1,"upper"] <- node$name
str.new$level <- str.new$level + 1 + node$shift.levels
str.new$offset <- str.new$offset + offset
str[1,"endnodes"] <- str[1,"endnodes"] + str.new[1,"endnodes"]
offset <- offset + sum(ifelse(str.new$endnode,1,0))
ind1 <- match(rownames(str.new),rownames(str))
ind2 <- ind1[!is.na(ind1)]
if ( length(ind2) > 0 )
{
cat("*** Warning: node name(s) not unique:","\n",
paste(rownames(str)[ind2],"\n"))
return(NA)
}
str <- rbind(str,str.new)
}
}
return(str)
}
utility.prune <- function(str,level=NA)
{
if ( !is.data.frame(str) ) return(NA)
if ( is.na(level) ) level <- max(str$level)-1
while ( max(str$level) > max(1,level) )
{
lev <- max(str$level)
while ( !is.na(match(lev,str$level)) )
{
upper <- str$upper[match(lev,str$level)]
ind.upper <- match(upper,rownames(str))
str$num.required[ind.upper] <- NA
str$endnode[ind.upper] <- TRUE
ind.lower <- which(str$level==lev & str$upper==upper)
str$attributes[ind.upper] <- paste(unique(unlist(strsplit(str$attributes[ind.lower],split=";"))),collapse=";")
red <- length(ind.lower) - 1
if ( red > 0 )
{
str$offset <- ifelse(str$offset>str$offset[ind.upper],str$offset-red,str$offset)
while( !is.na(upper) )
{
str[upper,"endnodes"] <- str[upper,"endnodes"] - red
upper <- str[upper,"upper"]
}
}
str <- str[-ind.lower,]
}
}
return(str)
}
utility.get.attrib.names <- function(node)
{
if ( substring(class(node),1,7) != "utility" )
{
warning("Node \"",node$name,"\": argument must be a subclass of utility")
return(NA)
}
attrib <- character(0)
if ( node$type == "endnode" ) return(node$attrib)
if ( length(node$nodes) == 0 ) return(attrib)
for ( i in 1:length(node$nodes) )
{
attrib <- c(attrib,utility.get.attrib.names(node$nodes[[i]]))
}
return(unique(attrib))
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.aux.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4 Peter Reichert 05.06.2016 #
# #
################################################################################
# ==============================================================================
# conversion node from values to utilities with interpolation:
# class "utility.conversion.intpol"
# ==============================================================================
# constructor:
# ------------
utility.conversion.intpol.create <- function(name.node, # character(1)
node, # character(1)
x, # numeric(n)
u, # numeric(n)
names.x = rep(NA,length(x)),
names.u = rep(NA,length(u)),
required = FALSE,
col = "black",
shift.levels = 0)
{
# consistency checks:
check.ok <- T
if ( length(x) != length(u) )
{
cat("*** Warning: x and u of different length:",
length(x),length(u),"\n")
check.ok <- F
}
if ( length(names.x) != length(names.u) )
{
cat("*** Warning: names.x and names.u of different length:",
length(names.x),length(names.u),"\n")
check.ok <- F
}
if ( length(x) != length(names.x) )
{
cat("*** Warning: x and names.x of different length:",
length(x),length(names.x),"\n")
check.ok <- F
}
if ( ! utility.check.name(name.node,node) )
{
cat("*** Warning: Node with same name \"",name.node,"\" exists already ",
"as sub-node","\n")
check.ok <- F
}
if ( ! check.ok )
{
cat("*** Warning: Node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
n <- list()
n$name <- name.node
n$description <- "utility/value interpolation conversion node"
n$type <- "conversionnode"
n$nodes <- list(node)
n$x <- x
n$u <- u
n$names.x <- names.x
n$names.u <- names.u
n$required <- required
n$num.required <- 1
n$utility <- TRUE
n$col <- col
n$shift.levels <- shift.levels
class(n) <- "utility.conversion.intpol"
# print and return class
#cat(n$description," \"",name.node,"\" constructed","\n",sep="")
return(n)
}
# update parameter values:
# ------------------------
updatepar.utility.conversion.intpol <- function(x,par=NA,...)
{
node <- x
# check availabiliy of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# update adequate values in interpolation list:
n <- node
for ( i in 1:length(n$x) )
{
if ( ! is.na(n$names.x[i]) )
{
ind <- which(n$names.x[i] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]],sep="")
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$x[i] <- par[ind]
}
}
if ( ! is.na(n$names.u[i]) )
{
ind <- which(n$names.u[i] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]],sep="")
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$u[i] <- par[ind]
}
}
}
n$nodes[[1]] <- updatepar(n$nodes[[1]],par)
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate_utility.conversion.intpol <- function(x,v)
{
node <- x
u <- approx(x=node$x,y=node$u,xout=v)$y
return(u)
}
evaluate.utility.conversion.intpol <- function(x,
attrib, # data.frame, numeric
par = NA,
...)
{
node <- x
# update parameters:
n <- updatepar(node,par)
# evaluate results:
v <- evaluate(n$nodes[[1]],attrib)
if ( ! is.data.frame(v) )
{
v <- as.data.frame(v)
}
u <- evaluate_utility.conversion.intpol(n,v[,1])
ind <- !is.na(u) & (u<0 | u>1)
if ( sum(ind) > 0 )
{
warning("Node \"",node$name,"\": node \"",n$name,"\" produced values outside of [0,1]: ",
paste(u[ind],collapse=","),sep="")
}
u <- as.data.frame(u)
names(u) <- node$name
# return results:
u <- cbind(u,v)
rownames(u) <- rownames(attrib)
return(u)
}
# print:
# -----
print.utility.conversion.intpol <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.conversion.intpol <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
cat("function type: ","utility","\n")
cat("required: ",node$required,"\n")
cat("data pairs:","\n")
names.x <- ifelse(is.na(node$names.x),"",node$names.x)
names.u <- ifelse(is.na(node$names.u),"",node$names.u)
print(data.frame(names.x=names.x,x=node$x,u=node$u,names.u=names.u))
for ( i in 1:length(node$nodes) )
{
cat("***","\n")
summary(node$nodes[[i]])
}
}
# plot:
# -----
plot.utility.conversion.intpol <-
function(x,
u = NA,
uref = NA,
par = NA,
type = c("hierarchy","table","node","nodes"),
nodes = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
cex.nodes = 1,
cex.attrib = 1,
f.reaches = 0.2,
f.nodes = 0.2,
with.attrib = TRUE,
levels = NA,
plot.val = TRUE,
print.val = TRUE,
two.lines = FALSE,
...)
{
node <- x
n <- updatepar(node,par)
utility.plot(node = n,
u = u,
uref = uref,
type = type,
nodes = nodes,
col = col,
gridlines = gridlines,
main = main,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
f.reaches = f.reaches,
f.nodes = f.nodes,
with.attrib = with.attrib,
levels = levels,
plot.val = plot.val,
print.val = print.val,
two.lines = two.lines,
...)
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.conversion.intpol.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4 Peter Reichert 05.06.2016 #
# #
################################################################################
# ==============================================================================
# conversion node from values to utilities with parametric function:
# class "utility.conversion.parfun"
# ==============================================================================
# constructor:
# ------------
utility.conversion.parfun.create <- function(name.node, # character(1)
node, # node
name.fun, # name of f(a,par)
par, # numeric(n)
names.par = rep(NA,length(par)),
required = FALSE,
col = "black",
shift.levels = 0)
{
# consistency checks:
check.ok <- T
if ( length(par) != length(names.par) )
{
cat("*** Warning: par and names.par of different length:",
length(par),length(names.par),"\n")
check.ok <- F
}
if ( ! utility.check.name(name.node,list(node)) )
{
cat("*** Warning: node with same name \"",name.node,"\" exists already ",
"as sub-node","\n")
check.ok <- F
}
if ( ! check.ok )
{
cat("*** Warning: node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
n <- list()
n$name <- name.node
n$description <- "utility/value parametric function conversion node"
n$type <- "utility.conversion.parfun"
n$nodes <- list(node)
n$name.fun <- name.fun
n$par <- par
n$names.par <- names.par
n$required <- required
n$num.required <- 1
n$utility <- TRUE
n$col <- col
n$shift.levels <- shift.levels
class(n) <- "utility.conversion.parfun"
# print and return class
#cat(n$description," \"",name.node,"\" constructed","\n",sep="")
return(n)
}
# update parameter values:
# ------------------------
updatepar.utility.conversion.parfun <- function(x,par=NA,...)
{
node <- x
# check availabiliy of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# update adequate values in interpolation list:
n <- node
for ( i in 1:length(n$par) )
{
if ( ! is.na(n$names.par[i]) )
{
ind <- which(n$names.par[i] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]],sep="")
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$par[i] <- par[ind]
}
}
}
n$nodes[[1]] <- updatepar(n$nodes[[1]],par)
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate_utility.conversion.parfun <- function(x,v)
{
node <- x
u <- do.call(node$name.fun,list(v,node$par))
return(u)
}
evaluate.utility.conversion.parfun <- function(x,
attrib, # data.frame, numeric
par = NA,
...)
{
node <- x
# update parameters:
n <- updatepar(node,par)
# evaluate results:
v <- evaluate(n$nodes[[1]],attrib)
if ( ! is.data.frame(v) )
{
v <- as.data.frame(v)
}
u <- evaluate_utility.conversion.parfun(n,v[,1])
u <- as.data.frame(u)
names(u) <- n$name
ind <- !is.na(u) & (u<0 | u>1)
if ( sum(ind) > 0 )
{
warning("Node \"",node$name,"\": node \"",n$name,"\" produced values outside of [0,1]: ",
paste(u[ind],collapse=","),sep="")
}
# return results:
u <- cbind(u,v)
rownames(u) <- rownames(attrib)
# return results:
return(u)
}
# print:
# -----
print.utility.conversion.parfun <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.conversion.parfun <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
cat("node : ",node$nodes[[1]]$name,"\n")
cat("function type: ","utility","\n")
cat("required: ",node$required,"\n")
cat("function: ",node$name.fun,"\n")
cat("parameters:","\n")
names.par <- ifelse(is.na(node$names.par),"",node$names.par)
print(data.frame(names.par=names.par,par=node$par))
for ( i in 1:length(node$nodes) )
{
cat("***","\n")
summary(node$nodes[[i]])
}
}
# plot:
# -----
plot.utility.conversion.parfun <-
function(x,
u = NA,
uref = NA,
par = NA,
type = c("hierarchy","table","node","nodes"),
nodes = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
cex.nodes = 1,
cex.attrib = 1,
f.reaches = 0.2,
f.nodes = 0.2,
with.attrib = TRUE,
levels = NA,
plot.val = TRUE,
print.val = TRUE,
two.lines = FALSE,
...)
{
node <- x
n <- updatepar(node,par)
utility.plot(node = n,
u = u,
uref = uref,
type = type,
nodes = nodes,
col = col,
gridlines = gridlines,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
f.reaches = f.reaches,
f.nodes = f.nodes,
with.attrib = with.attrib,
levels = levels,
plot.val = plot.val,
print.val = print.val,
two.lines = two.lines,
...)
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.conversion.parfun.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4 Peter Reichert 05.09.2016 #
# #
################################################################################
# ==============================================================================
# endnode for valuing counts in discrete classes:
# class "utility.endnode.classcounts"
# ==============================================================================
# constructor:
# ------------
utility.endnode.classcounts.create <- function(name.node, # character(1)
name.attrib, # character(n)
u.max.inc, # list (n) of vect (>=1)
names.u.max.inc = list(),
exceed.next = TRUE,
utility = TRUE,
required = FALSE,
col = "black",
shift.levels = 0)
{
# consistency checks:
check.ok <- T
n <- length(name.attrib)
if ( length(u.max.inc) != n )
{
cat("*** Warning: Number of elements of u.max.inc not equal to number of elements of name.attrib:",
length(u.max.inc),n,"\n")
check.ok <- F
}
for ( i in 1:n )
{
if ( !is.vector(u.max.inc[[i]]) )
{
cat("*** Warning: Eelements of u.max.inc must be vectors","\n")
check.ok <- F
}
}
if ( length(names.u.max.inc) != 0 & length(names.u.max.inc) != n )
{
cat("*** Warning: Number of elements of names.u.max.inc not equal to zero or to the number of elements of name.attrib:",
length(names.u.max.inc),n,"\n")
check.ok <- F
}
if ( ! check.ok )
{
cat("*** Warning: node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
node <- list()
node$name <- name.node
node$description <- "utility/value class counts end node"
node$type <- "endnode"
node$attrib <- name.attrib
node$u.max.inc <- u.max.inc
for ( i in 1:n )
{
l <- length(node$u.max.inc[[i]])
if ( l < n+2-i ) node$u.max.inc[[i]] <- c(node$u.max.inc[[i]],rep(0,n+2-i-l))
if ( l > n+2-i ) node$u.max.inc[[i]] <- node$u.max.inc[[i]][1:(n+2-i)]
}
if ( length(node$names.u.max.inc) == n )
{
for ( i in 1:n )
{
l <- length(node$names.u.max.inc[[i]])
if ( l < n+2-i ) node$names.u.max.inc[[i]] <- c(node$names.u.max.inc[[i]],rep(NA,n+2-i-l))
if ( l > n+2-i ) node$names.u.max.inc[[i]] <- node$names.u.max.inc[[i]][1:(n+2-i)]
}
}
node$names.u.max.inc <- names.u.max.inc
node$exceed.next <- exceed.next
node$required <- required
node$utility <- utility
node$col <- col
node$shift.levels <- shift.levels
class(node) <- "utility.endnode.classcounts"
# print and return class
#cat(node$description," \"",name.node,"\" constructed","\n",sep="")
return(node)
}
# update parameter values:
# ------------------------
updatepar.utility.endnode.classcounts <- function(x,par=NA,...)
{
node <- x
n <- length(node$attrib)
# check availability of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# check availability of parameter names
if ( length(node$names.u.max.inc) != n ) return(node)
# update adequate values:
for ( i in 1:length(node$attrib) )
{
for ( j in 1:(n+2-i) )
{
if ( ! is.na(node$names.u.max.inc[[i]][j]) )
{
ind <- which(node$names.u.max.inc[[i]][j] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]])
ind <- ind[1]
}
if ( length(ind) == 1 )
{
node$u.max.inc[[i]][j] <- par[ind]
}
}
}
}
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate.utility.endnode.classcounts <- function(x,
attrib, # data.frame
par = NA,
...)
{
node <- x
n <- length(node$attrib)
# update parameters:
node <- updatepar(node,par)
# extract attributes:
if ( is.data.frame(attrib) | is.matrix(attrib) )
{
ind <- match(node$attrib,colnames(attrib))
if ( sum(ifelse(is.na(ind),1,0)) > 0 )
{
warning("Node \"",node$name,"\": attribute(s) \"",
paste(node$attrib[is.na(ind)],collapse=","),"\" not found",sep="")
return(rep(NA,nrow(attrib)))
}
a <- attrib[,ind]
}
else
{
if ( ! is.vector(attrib) )
{
warning("Node \"",node$name,"\": unknown format of attribute(s) \"",node$attrib,"\"",sep="")
return(NA)
}
if ( length(names(attrib)) == 0 )
{
if ( length(attrib) == 2 )
a <- as.matrix(attrib,nrow=1)
}
else
{
ind <- match(node$attrib,names(attrib))
if ( sum(ifelse(is.na(ind),1,0)) > 0 )
{
warning("Node \"",node$name,"\": attribute(s) \"",
paste(node$attrib[is.na(ind)],collapse=","),"\" not found",sep="")
return(rep(NA,nrow(attrib)))
}
a <- as.matrix(attrib[ind],nrow=1)
}
}
# evaluate results:
u <- rep(NA,nrow(a))
for ( k in 1:nrow(a) )
{
att <- as.numeric(a[k,])
i <- match(TRUE,att>0)
if ( is.na(i) )
{
if ( sum(!is.na(att)) > 0 ) u[k] <- 0
}
else
{
# basic value:
u[k] <- node$u.max.inc[[i]][1]
# increment for multiplicities at the maximum level:
u[k] <- u[k] + (att[i]-1)*node$u.max.inc[[i]][2]
# increment for multiplicities at lower levels:
if ( i < n )
{
for ( j in 1:(n-i) ) u[k] <- u[k] + att[i+j]*node$u.max.inc[[i]][2+j]
}
# check maximum:
u[k] <- min(1,u[k])
if ( i > 1 & !node$exceed.next )
{
u[k] <- min(node$u.max.inc[[i-1]][1],u[k])
}
}
}
# return results:
return(u)
}
# print:
# -----
print.utility.endnode.classcounts <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.endnode.classcounts <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
cat("attribute(s): ",paste(node$attrib,collapse=","),"\n")
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
cat("function type: ",funtype,"\n")
cat("required: ",node$required,"\n")
cat("basic level, multiplicity increments","\n")
for ( i in 1:length(node$attrib) )
{
cat(paste(node$u.max.inc[[i]],collapse=", "),"\n")
}
}
# plot:
# -----
plot.utility.endnode.classcounts <-
function(x,
par = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
...)
{
# plot frame:
node <- x
space <- 0.2
n <- updatepar(node,par)
title <- main; if ( nchar(title) == 0 ) title <- n$name
funtype <- "utility"; if ( !n$utility ) funtype <- "value"
n.attrib <- length(n$attrib)
u.max.inc <- matrix(0,nrow=n.attrib+1,ncol=n.attrib)
colnames(u.max.inc) <- n$attrib
for ( i in 1:n.attrib ) u.max.inc[1:(n.attrib+2-i),i] <- node$u.max.inc[[i]]
print(u.max.inc)
barplot(u.max.inc,main=title,ylab=paste(funtype,"(base + inc)"),
cex.main=cex.main,xlim=c(0,n.attrib*(1+space)),ylim=c(0,1),
space=space,beside=FALSE,xaxs="i",yaxs="i")
max.val <- 0.995*rep(1,n.attrib)
if ( !n$exceed.next ) max.val <- c(0.995,u.max.inc[1,1:(n.attrib-1)])
for ( i in 1:n.attrib )
{
lines(space+(i-1)*(1+space)+0.5+c(-0.5,0.5),max.val[i]*c(1,1),col="red",lwd=2)
}
}
# test code:
# library(utility)
#
# n <- utility.endnode.classcounts.create(
# name.node = "test",
# name.attrib = c("a","b","c"),
# u.max = list(c(0.8,0.05,0.01),
# c(0.5,0.05,0.01),
# c(0.0,0.01)),
# exceed.next = FALSE,
# utility = FALSE)
#
# attrib <- data.frame(a=c(0,0,0,1,2,3,0),
# b=c(1,2,9,2,2,2,0),
# c=c(5,9,2,8,7,8,0))
#
# values <- evaluate(n,attrib)
#
# print(n)
# plot(n)
# print(attrib)
# print(values)
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.endnode.classcounts.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.3 Peter Reichert 05.10.2014 #
# #
################################################################################
# ==============================================================================
# endnode for combining other endnodes conditional on factor attributes:
# class "utility.endnode.cond"
# ==============================================================================
# constructor:
# ------------
utility.endnode.cond.create <- function(name.node, # character(1)
attrib.levels, # data.frame
nodes, # list of nodes
utility = TRUE,
required = FALSE,
col = "black",
shift.levels = 0)
{
# consistency checks:
check.ok <- T
if ( !is.data.frame(attrib.levels) )
{
cat("*** Warning: attrib.levels must be a data frame","\n")
check.ok <- F
}
if ( length(names(attrib.levels)) != length(unique(names(attrib.levels))) )
{
cat("*** Warning: Column names of attrib.levels must be different","\n")
check.ok <- F
}
if ( nrow(attrib.levels) != length(nodes) )
{
cat("*** Warning: Number of rows of attrib.levels not equal to",
"number of nodes provided:",nrow(attrib.levels),length(nodes),"\n")
check.ok <- F
}
if ( length(nodes) < 1 )
{
cat("*** Warning: No nodes provided","\n")
check.ok <- F
}
for ( i in 1:length(nodes) )
{
if ( nodes[[i]]$utility != utility )
{
funtype <- "utility"; if ( !utility ) funtype <- "value"
funtype.i <- "utility"; if ( !nodes[[i]]$utility ) funtype.i <- "value"
cat("***Warning: incompatible function types: new node is of type",
funtype,"node",nodes[[i]]$name," is of type",funtype.i,"\n")
check.ok <- F
}
}
if ( ! check.ok )
{
cat("*** Warning: Node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
node <- list()
node$name <- name.node
node$description <- "utility/value conditional combination end node"
node$type <- "endnode"
node$attrib.levels <- attrib.levels
for ( i in 1:ncol(attrib.levels) )
{
node$attrib.levels[,i] <- as.character(node$attrib.levels[,i])
}
node$attrib <- names(attrib.levels)
for ( i in 1:length(nodes) )
{
node$attrib <- c(node$attrib,nodes[[i]]$attrib)
}
node$attrib <- unique(node$attrib)
node$nodes <- nodes
node$required <- required
node$utility <- utility
node$col <- col
node$shift.levels <- shift.levels
class(node) <- "utility.endnode.cond"
# print return class
#cat(node$description," \"",name.node,"\" constructed","\n",sep="")
return(node)
}
# update parameter values:
# ------------------------
updatepar.utility.endnode.cond <- function(x,par=NA,...)
{
node <- x
# check availabiliy of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# update conditional nodes:
n <- node
for ( i in 1:length(n$nodes) )
{
n$nodes[[i]] <- updatepar(n$nodes[[i]],par)
}
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate.utility.endnode.cond <- function(x,
attrib, # data.frame
par=NA,
...)
{
as.character.na <- function(x,...) { return(ifelse(is.na(x),"",as.character(x,...))) }
node <- x
# check availability of attributes:
if ( ! is.data.frame(attrib) )
{
warning("Node \"",node$name,"\": attrib must be a data frame",sep="")
return(NA)
}
ind <- match(node$attrib,names(attrib))
if(sum(is.na(ind))>0)
{
ind.na <- is.na(ind)
warning("Node \"",node$name,"\": attribute(s) \"",
paste(node$attrib[ind.na],collapse=","),"\" not found",sep="")
return(rep(NA,nrow(attrib)))
}
# update parameters:
n <- updatepar(node,par)
# select rows compatible with conditioning attributes:
u <- rep(NA,nrow(attrib))
calc <- rep(FALSE,nrow(attrib))
# for ( i in 1:ncol(n$attrib.levels) ) # evaluate NAs # comment to allow for NAs
# {
# calc <- calc | is.na(attrib[,names(n$attrib.levels)[i]])
# }
while( TRUE )
{
# identify first row that has not yet been evaluated:
startind <- match(FALSE,calc)
# break if all were evaluated:
if ( is.na(startind) ) break
# find rows with the same attribute combinations:
ind.attrib <- as.character.na(attrib[startind,names(n$attrib.levels)[1]]) ==
as.character.na(attrib[,names(n$attrib.levels)[1]])
if ( ncol(n$attrib.levels) > 1 )
{
for ( i in 2:ncol(n$attrib.levels) )
{
ind.attrib <-
ind.attrib &
( as.character.na(attrib[startind,names(n$attrib.levels)[i]]) ==
as.character.na(attrib[,names(n$attrib.levels)[i]]) )
}
}
ind.attrib <- which(ind.attrib)
# find corresponding node:
ind.node <- as.character.na(attrib[startind,names(n$attrib.levels)[1]]) ==
as.character.na(n$attrib.levels[,names(n$attrib.levels)[1]])
if ( ncol(n$attrib.levels) > 1 )
{
for ( i in 2:ncol(n$attrib.levels) )
{
ind.node <-
ind.node &
( as.character.na(n$attrib.levels[,names(n$attrib.levels)[i]]) ==
as.character.na(attrib[startind,names(n$attrib.levels)[i]]) )
}
}
ind.node <- which(ind.node)
# evaluate node for all attribute rows with same conditional values:
if ( length(ind.node) > 0 )
{
u[ind.attrib] <- evaluate(n$nodes[[ind.node[1]]],attrib[ind.attrib,])
if ( length(ind.node) > 1 )
{
cat("*** Warning: multiple combinations of the same",
"attribute levels in node",n$name,"\n")
}
}
calc[ind.attrib] <- T
}
# return results:
return(u)
}
# print:
# -----
print.utility.endnode.cond <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.endnode.cond <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
cat("function type: ",funtype,"\n")
cat("required: ",node$required,"\n")
cat("attribute/node combinations:","\n")
nodes.names <- character(0)
for ( i in 1:length(node$nodes) ) nodes.names[i] <- node$nodes[[i]]$name
print(cbind(node$attrib.levels,node=nodes.names))
for ( i in 1:length(node$nodes) )
{
cat("**","\n")
summary(node$nodes[[i]])
}
}
# plot:
# -----
plot.utility.endnode.cond <-
function(x,
par = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
nodes = x$name,
...)
{
node <- x
if ( is.na(nodes[1]) | ! is.na(match(node$name,nodes)) )
{
nrow <- floor(sqrt(length(node$nodes)))
ncol <- floor(length(node$nodes)/nrow+0.999)
par.def <- par(no.readonly=T)
par(mfrow=c(nrow,ncol),mar=c(4.3,3.8,2.8,0.8),oma=c(0,0,2,0))
for ( i in 1:length(node$nodes) ) # c(bottom, left, top, right)
{
title <- main
for ( j in 1:ncol(node$attrib.levels) )
{
title <- paste(title," ",colnames(node$attrib.levels)[j],"=",
as.character(node$attrib.levels[i,j]),sep="")
}
plot(node$nodes[[i]],par=par,col=col,gridlines=gridlines,main=title,cex.main=cex.main,...)
}
mtext(node$name,outer=TRUE,cex=cex.main)
par(par.def)
}
if ( length(node$nodes) > 0 )
{
for ( i in 1:length(node$nodes) )
{
if ( is.na(nodes[1]) | !is.na(match(node$nodes[[i]]$name,nodes)) )
{
plot(node$nodes[[i]],
par=par,
col=col,
gridlines=gridlines,
cex.main=cex.main,
...)
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.endnode.cond.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.3 Peter Reichert 05.10.2014 #
# #
################################################################################
# ==============================================================================
# endnode for discrete factor attributes:
# class "utility.endnode.discrete"
# ==============================================================================
# constructor:
# ------------
utility.endnode.discrete.create <- function(name.node, # character(1)
attrib.levels, # data.frame
u, # numeric(n)
names.u = rep(NA,length(u)),
utility = TRUE,
required = FALSE,
col = "black",
shift.levels = 0)
{
# consistency checks:
check.ok <- T
if ( !is.data.frame(attrib.levels) )
{
cat("*** Warning: Attrib.levels must be a data frame","\n")
check.ok <- F
}
if ( length(names(attrib.levels)) != length(unique(names(attrib.levels))) )
{
cat("*** Warning: Column names of attrib.levels must be different","\n")
check.ok <- F
}
if ( nrow(attrib.levels) != length(u) )
{
cat("*** Warning: Number of rows of attrib.levels not equal to",
"number of elements of u:",nrow(attrib.levels),length(u),"\n")
check.ok <- F
}
if ( length(names.u) != length(u) )
{
cat("*** Warning: Number of elements of names.u not equal",
"number of elements of u:",length(names.u),length(u),"\n")
check.ok <- F
}
if ( ! check.ok )
{
cat("*** Warning: Node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
node <- list()
node$name <- name.node
node$description <- "utility/value discrete attribute end node"
node$type <- "endnode"
node$attrib.levels <- attrib.levels
for ( i in 1:ncol(attrib.levels) )
{
node$attrib.levels[,i] <- as.character(node$attrib.levels[,i])
}
node$attrib <- names(attrib.levels)
node$u <- u
node$names.u <- names.u
node$required <- required
node$utility <- utility
node$col <- col
node$shift.levels <- shift.levels
class(node) <- "utility.endnode.discrete"
# print and return class
#cat(node$description," \"",name.node,"\" constructed","\n",sep="")
return(node)
}
# update parameter values:
# ------------------------
updatepar.utility.endnode.discrete <- function(x,par=NA,...)
{
node <- x
# check availabiliy of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# update adequate values in interpolation list:
n <- node
for ( i in 1:length(n$u) )
{
if ( ! is.na(n$names.u[i]) )
{
ind <- which(n$names.u[i] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]])
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$u[i] <- par[ind]
}
}
}
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate.utility.endnode.discrete <- function(x,
attrib, # data.frame
par = NA,
...)
{
node <- x
# check availability of attributes:
if ( ! is.data.frame(attrib) )
{
warning("Node \"",node$name,"\": attrib must be a data frame",sep="")
return(NA)
}
ind <- match(node$attrib,names(attrib))
if(sum(is.na(ind))>0)
{
ind.na <- is.na(ind)
warning("Node \"",node$name,"\": attribute(s) \"",
paste(node$attrib[ind.na],collapse=","),"\" not found",sep="")
return(rep(NA,nrow(attrib)))
}
# check levels of attributes:
for ( i in 1:ncol(node$attrib.levels) )
{
n <- names(node$attrib.levels)[i] # attribute name
l <- unique(node$attrib.levels[,i]); l <- l[!is.na(l)] # defined levels
a <- unique(attrib[,n]); a <- a[!is.na(a)] # requested levels
ind.na <- is.na(match(a,l))
if ( sum(ind.na) > 0 )
{
warning("Node \"",node$name,"\": unknown attribute level(s): \"",paste(a[ind.na],collapse=","),
"\" of attribute \"",n,"\"",sep="")
}
}
# update parameters:
n <- updatepar(node,par)
# select rows compatible with conditioning attributes:
u <- rep(NA,nrow(attrib))
calc <- rep(FALSE,nrow(attrib))
for ( i in 1:ncol(n$attrib.levels) ) # evaluate NAs
{
calc <- calc | is.na(attrib[,names(n$attrib.levels)[i]])
}
while( TRUE )
{
# identify first row that has not yet been evaluated:
startind <- match(FALSE,calc)
# break if all were evaluated:
if ( is.na(startind) ) break
# find rows with the same attribute combinations:
ind.attrib <- as.character(attrib[startind,names(n$attrib.levels)[1]]) ==
as.character(attrib[,names(n$attrib.levels)[1]])
if ( ncol(n$attrib.levels) > 1 )
{
for ( i in 2:ncol(n$attrib.levels) )
{
ind.attrib <-
ind.attrib &
( as.character(attrib[startind,names(n$attrib.levels)[i]]) ==
as.character(attrib[,names(n$attrib.levels)[i]]) )
}
}
ind.attrib <- which(ind.attrib)
# find corresponding value:
ind.u <- as.character(attrib[startind,names(n$attrib.levels)[1]]) ==
as.character(n$attrib.levels[,names(n$attrib.levels)[1]])
if ( ncol(n$attrib.levels) > 1 )
{
for ( i in 2:ncol(n$attrib.levels) )
{
ind.u <-
ind.u &
( as.character(n$attrib.levels[,names(n$attrib.levels)[i]]) ==
as.character(attrib[startind,names(n$attrib.levels)[i]]) )
}
}
ind.u <- which(ind.u)
# evaluate node for all attribute rows with same conditional values:
if ( length(ind.u) == 1 )
{
u[ind.attrib] <- n$u[ind.u]
}
else
{
if ( length(ind.u) > 1 )
{
warning("Node \"",node$name,"\": multiple combinations of the same",
"attribute levels in node \"",n$name,"\"",sep="")
}
}
calc[ind.attrib] <- T
}
# return results:
return(u)
}
# print:
# -----
print.utility.endnode.discrete <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.endnode.discrete <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
cat("attribute(s): ",paste(node$attrib,collapse=","),"\n")
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
cat("function type: ",funtype,"\n")
cat("required: ",node$required,"\n")
cat("attribute/value combinations:","\n")
names.u <- ifelse(is.na(node$names.u),"",node$names.u)
print(cbind(node$attrib.levels,u=node$u,names.u=names.u))
}
# plot:
# -----
plot.utility.endnode.discrete <-
function(x,
par = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
...)
{
# plot frame:
node <- x
length = 101
n <- updatepar(node,par)
title <- main; if ( nchar(title) == 0 ) title <- n$name
funtype <- "utility"; if ( !n$utility ) funtype <- "value"
plot(numeric(0),numeric(0),type="l",
xlim=c(0,1),ylim=c(0,1),
xlab=paste(n$attrib,collapse=","),ylab=funtype,main=title,
xaxs="i",yaxs="i",yaxt="n",xaxt="n",cex.main=cex.main,...)
# colored bar along y axis:
if ( length(col)>1 & !node$utility )
{
num.grid = 100
endpoints <- seq(0,1,length.out=num.grid+1)+1/(2*num.grid)
midpoints <- 0.5*(endpoints[-1]+endpoints[-length(endpoints)])
cols <- utility.get.colors(midpoints,col)
for ( i in 1:(num.grid-1) )
{
lines(-0.01*c(1,1),endpoints[c(i,i+1)],col=cols[i],lwd=3,lend=2,xpd=TRUE)
}
}
# axes (should overly colored bar):
labels=character(length(n$u))
for ( i in 1:length(n$u) )
{
labels[i] <- paste(as.character(n$attrib.levels[i,]),collapse=",")
}
axis(side=1,at=((1:length(n$u))-0.5)/length(n$u),labels=labels)
axis(side=2,...)
# plot gridlines:
if ( !node$utility )
{
if ( !is.na(gridlines[1]) )
{
for ( level in gridlines ) abline(h=level,lty="dashed")
}
}
# plot points:
color <- "black"; if(length(col)==1) color <- col
points(((1:length(n$u))-0.5)/length(n$u),n$u,pch=19,col=color,xpd=TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.endnode.discrete.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.3 Peter Reichert 05.10.2014 #
# #
################################################################################
# ==============================================================================
# endnode for getting the results of the first node of a list that can
# successfully be evaluated
# class "utility.endnode.firstavail"
# ==============================================================================
# constructor:
# ------------
utility.endnode.firstavail.create <- function(name.node, # character(1)
nodes, # list of nodes
utility = TRUE,
required = FALSE,
col = "black",
shift.levels = 0)
{
# consistency checks:
check.ok <- T
if ( length(nodes) < 1 )
{
cat("*** Warning: No nodes provided","\n")
check.ok <- F
}
for ( i in 1:length(nodes) )
{
if ( nodes[[i]]$utility != utility )
{
funtype <- "utility"; if ( !utility ) funtype <- "value"
funtype.i <- "utility"; if ( !nodes[[i]]$utility ) funtype.i <- "value"
cat("***Warning: incompatible function types: new node is of type",
funtype,"node",nodes[[i]]$name," is of type",funtype.i,"\n")
check.ok <- F
}
}
if ( ! check.ok )
{
cat("*** Warning: Node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
node <- list()
node$name <- name.node
node$description <- "utility/value endnode to evaluate first available subnode"
node$type <- "endnode"
node$attrib <- character(0)
for ( i in 1:length(nodes) )
{
node$attrib <- c(node$attrib,nodes[[i]]$attrib)
}
node$attrib <- unique(node$attrib)
node$nodes <- nodes
node$required <- required
node$utility <- utility
node$col <- col
node$shift.levels <- shift.levels
class(node) <- "utility.endnode.firstavail"
# print return class
#cat(node$description," \"",name.node,"\" constructed","\n",sep="")
return(node)
}
# update parameter values:
# ------------------------
updatepar.utility.endnode.firstavail <- function(x,par=NA,...)
{
node <- x
# check availabiliy of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# update conditional nodes:
n <- node
for ( i in 1:length(n$nodes) )
{
n$nodes[[i]] <- updatepar(n$nodes[[i]],par)
}
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate.utility.endnode.firstavail <- function(x,
attrib, # data.frame
par=NA,
...)
{
node <- x
# check availability of attributes:
if ( ! is.data.frame(attrib) )
{
warning("Node \"",node$name,"\": attrib must be a data frame",sep="")
return(NA)
}
# update parameters:
n <- updatepar(node,par)
# evaluate nodes:
u <- rep(NA,nrow(attrib))
for ( i in 1:nrow(attrib) )
{
for ( j in 1:length(node$nodes) )
{
u[i] <- evaluate(node$nodes[[j]],attrib[i,])
if ( !is.na(u[i]) ) break
}
}
# return results:
return(u)
}
# print:
# -----
print.utility.endnode.firstavail <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.endnode.firstavail <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
cat("function type: ",funtype,"\n")
cat("required: ",node$required,"\n")
cat("nodes:","\n")
for ( i in 1:length(node$nodes) ) cat(" ",node$nodes[[i]]$name,"\n")
for ( i in 1:length(node$nodes) )
{
cat("**","\n")
summary(node$nodes[[i]])
}
}
# plot:
# -----
plot.utility.endnode.firstavail <-
function(x,
par = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
nodes = x$name,
...)
{
node <- x
if ( is.na(nodes[1]) | ! is.na(match(node$name,nodes)) )
{
nrow <- floor(sqrt(length(node$nodes)))
ncol <- floor(length(node$nodes)/nrow+0.999)
par.def <- par(no.readonly=T)
par(mfrow=c(nrow,ncol),mar=c(4.3,3.8,2.8,0.8),oma=c(0,0,2,0))
for ( i in 1:length(node$nodes) ) # c(bottom, left, top, right)
{
title <- paste(main,i,":",node$nodes[[i]]$name)
plot(node$nodes[[i]],par=par,col=col,gridlines=gridlines,main=title,cex.main=cex.main,...)
}
mtext(node$name,outer=TRUE,cex=cex.main)
par(par.def)
}
if ( length(node$nodes) > 0 )
{
for ( i in 1:length(node$nodes) )
{
if ( is.na(nodes[1]) | !is.na(match(node$nodes[[i]]$name,nodes)) )
{
plot(node$nodes[[i]],
par=par,
col=col,
gridlines=gridlines,
cex.main=cex.main,
...)
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.endnode.firstavail.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4.3 Peter Reichert 15.01.2018 #
# #
################################################################################
# ==============================================================================
# endnode for single-attribute interpolation:
# class "utility.endnode.intpol1d"
# ==============================================================================
# constructor:
# ------------
utility.endnode.intpol1d.create <- function(name.node, # character(1)
name.attrib, # character(1)
range, # numeric(2)
x, # numeric(n)
u, # numeric(n)
names.x = rep(NA,length(x)),
names.u = rep(NA,length(u)),
utility = TRUE,
required = FALSE,
col = "black",
shift.levels = 0)
{
# consistency checks:
check.ok <- T
if ( length(x) != length(u) )
{
cat("*** Warning: x and u of different length:",
length(x),length(u))
check.ok <- F
}
if ( length(names.x) != length(names.u) )
{
cat("*** Warning: names.x and names.u of different length:",
length(names.x),length(names.u),"\n")
check.ok <- F
}
if ( length(x) != length(names.x) )
{
cat("*** Warning: x and names.x of different length:",
length(x),length(names.x),"\n")
check.ok <- F
}
if ( range[1] >= range[2] )
{
cat("*** Warning: Minimum of range not smaller than maximum:",
range[1],range[2],"\n")
check.ok <- F
}
if ( sum(x[-1]-x[-length(x)] > 0) != length(x)-1 &
sum(x[-1]-x[-length(x)] < 0) != length(x)-1 )
{
cat("*** Warning: x values in interpolation node must either be","\n",
"strictly increasing or strictly decreasing","\n")
check.ok <- F
}
if ( ! check.ok )
{
cat("*** Warning: node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
node <- list()
node$name <- name.node
node$description <- "utility/value 1d interpolation end node"
node$type <- "endnode"
node$attrib <- name.attrib
node$range <- range
node$x <- x
node$u <- u
node$names.x <- names.x
node$names.u <- names.u
node$required <- required
node$utility <- utility
node$col <- col
node$shift.levels <- shift.levels
class(node) <- "utility.endnode.intpol1d"
# print and return class
#cat(node$description," \"",name.node,"\" constructed","\n",sep="")
return(node)
}
# update parameter values:
# ------------------------
updatepar.utility.endnode.intpol1d <- function(x,par=NA,...)
{
node <- x
# check availabiliy of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# update adequate values in interpolation list:
n <- node
for ( i in 1:length(n$x) )
{
if ( ! is.na(n$names.x[i]) )
{
ind <- which(n$names.x[i] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter \"",
names(par)[ind[1]],"\"",sep="")
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$x[i] <- par[ind]
}
}
if ( ! is.na(n$names.u[i]) )
{
ind <- which(n$names.u[i] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]])
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$u[i] <- par[ind]
}
}
}
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate.utility.endnode.intpol1d <- function(x,
attrib, # data.frame, numeric
par = NA,
...)
{
node <- x
# update parameters:
n <- updatepar(node,par)
# extract attributes:
if ( is.data.frame(attrib) | is.matrix(attrib) )
{
if ( length(which(colnames(attrib)==n$attrib)) != 1 )
{
warning("Node \"",node$name,"\": attribute \"",n$attrib,"\" not found",sep="")
return(rep(NA,nrow(attrib)))
}
a <- attrib[,n$attrib]
}
else
{
if ( ! is.vector(attrib) )
{
warning("Node \"",node$name,"\": unknown format of attribute \"",n$attrib,"\"",sep="")
return(NA)
}
if ( length(names(attrib)) == 0 )
{
a <- attrib
}
else
{
ind <- which(names(attrib)==n$attrib)
if ( length(ind) != 1 )
{
if ( length(ind) > 1)
{
warning("Node \"",node$name,"\": multiple occurrences of attribute \"",
n$attrib,"\"",sep="")
}
else
{
warning("Node \"",node$name,"\": attribute \"",n$attrib,"\" not found",sep="")
}
return(NA)
}
a <- attrib[ind]
}
}
# evaluate results:
if ( !is.numeric(a) )
{
if ( is.factor(a) ) a <- as.numeric(as.character(a))
else a <- as.numeric(a)
}
u <- approx(x=n$x,y=n$u,xout=a,rule=2)$y
ind.out.of.range <- (a < n$range[1]) | (a > n$range[2])
u <- ifelse(ind.out.of.range,NA,u)
if ( sum(ind.out.of.range,na.rm=T) > 0 )
{
ind.not.na <- ifelse(is.na(ind.out.of.range),F,ind.out.of.range)
warning("Node \"",node$name,"\": value(s) of attribute \"",n$attrib,"\" out of range: ",
paste(a[ind.not.na],collapse=","),sep="")
}
# return results:
return(u)
}
# print:
# ------
print.utility.endnode.intpol1d <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.endnode.intpol1d <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
cat("attribute: ",node$attrib,"\n")
cat("attribute range:",node$range[1],"-",node$range[2],"\n")
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
cat("function type: ",funtype,"\n")
cat("required: ",node$required,"\n")
cat("data pairs:","\n")
names.x <- ifelse(is.na(node$names.x),"",node$names.x)
names.u <- ifelse(is.na(node$names.u),"",node$names.u)
print(data.frame(names.x=names.x,x=node$x,u=node$u,names.u=names.u))
}
# plot:
# -----
plot.utility.endnode.intpol1d <-
function(x,
par = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
xlim = numeric(0),
...)
{
node <- x
n <- updatepar(node,par)
if ( length(xlim) < 2 ) xlim <- c(min(n$x),max(n$x))
utility.endnode.plot1d(node = n,
col = col,
gridlines = gridlines,
main = main,
cex.main = cex.main,
xlim = xlim,
...)
points(n$x,n$u,cex=1.5,xpd=TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.endnode.intpol1d.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4.3 Peter Reichert 15.01.2018 #
# #
################################################################################
# ==============================================================================
# endnode for interpolation based on isolines of two attributes:
# class "utility.endnode.intpol2d"
# ==============================================================================
# constructor:
# ------------
utility.endnode.intpol2d.create <- function(name.node, # character(1)
name.attrib, # character(2)
ranges, # list(2) of numeric(2)
isolines, # list(n) of list of
# x, y, and, optionally
# names.x, names.y
u, # numeric(n)
names.u = rep(NA,length(u)),
lead = 0,
utility = TRUE,
required = FALSE,
col = "black",
shift.levels = 0)
{
# consistency checks:
check.ok <- T
if ( length(name.attrib) != 2 )
{
cat("*** Warning: name.attrib must be of length 2","\n")
check.ok <- F
}
if ( length(ranges) != 2 )
{
cat("*** Warning: ranges must be a list of two ranges","\n")
check.ok <- F
}
else
{
if ( length(ranges[[1]]) != 2 )
{
cat("*** Warning: ranges[[1]] must contain two elements","\n")
check.ok <- F
}
else
{
if ( ranges[[1]][1] >= ranges[[1]][2] )
{
cat("*** Warning: Minimum of range not smaller than maximum:",
ranges[[1]][1],ranges[[1]][2],"\n")
check.ok <- F
}
}
if ( length(ranges[[2]]) != 2 )
{
cat("*** Warning: ranges[[2]] must contain two elements","\n")
check.ok <- F
}
else
{
if ( ranges[[2]][1] >= ranges[[2]][2] )
{
cat("*** Warning: Minimum of range not smaller than maximum:",
ranges[[2]][1],ranges[[2]][2],"\n")
check.ok <- F
}
}
}
if ( length(isolines) < 2 )
{
cat("*** Warning: at least two isolines are required","\n")
check.ok <- F
}
if ( length(isolines) != length(u) )
{
cat("*** Warning: isolines and u are of different length:",
length(isolines),length(u),"\n")
check.ok <- F
}
for ( i in 1:length(isolines) )
{
len.x <- length(isolines[[i]]$x)
if ( len.x < 2 )
{
cat("*** Warning: element x of isoline[[",i,"]] ",
"must be of length > 1","\n",sep="")
check.ok <- F
}
if ( len.x != length(isolines[[i]]$y) )
{
cat("*** Warning: x and y in isoline[[",i,"]] ",
"have different lengths:",
len.x," ",length(isolines[[i]]$y),"\n",
sep="")
check.ok <- F
}
if ( length(isolines[[i]]$names.x) == 0 ) isolines[[i]]$names.x <- rep(NA,len.x)
if ( len.x != length(isolines[[i]]$names.x) )
{
cat("*** Warning: x and names.x in isoline[[",i,"]] ",
"have different lengths:",
len.x," ",length(isolines[[i]]$names.x),"\n",
sep="")
check.ok <- F
}
if ( length(isolines[[i]]$names.y) == 0 ) isolines[[i]]$names.y <- rep(NA,len.x)
if ( len.x != length(isolines[[i]]$names.y) )
{
cat("*** Warning: y and names.y in isoline[[",i,"]] ",
"have different lengths:",
len.x," ",length(isolines[[i]]$names.y),"\n",
sep="")
check.ok <- F
}
}
if ( ! check.ok )
{
cat("*** Warning: node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
node <- list()
node$name <- name.node
node$description <- "utility/value 2d interpolation end node"
node$type <- "endnode"
node$attrib <- name.attrib
node$ranges <- ranges
node$isolines <- isolines
node$u <- u
node$names.u <- names.u
node$lead <- lead
node$required <- required
node$utility <- utility
node$col <- col
node$shift.levels <- shift.levels
class(node) <- "utility.endnode.intpol2d"
# print and return class
#cat(node$description," \"",name.node,"\" constructed","\n",sep="")
return(node)
}
# update parameter values:
# ------------------------
updatepar.utility.endnode.intpol2d <- function(x,par=NA,...)
{
node <- x
# check availabiliy of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# update adequate values in interpolation list:
n <- node
for ( i in 1:length(n$u) )
{
if ( ! is.na(n$names.u[i]) )
{
ind <- which(n$names.u[i] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]],sep="")
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$u[i] <- par[ind]
}
}
for ( j in 1:length(n$isolines[[i]]$x) )
{
if ( ! is.na(n$isolines[[i]]$names.x[j]) )
{
ind <- which(n$isolines[[i]]$names.x[j] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]],sep="")
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$isolines[[i]]$x[j] <- par[ind]
}
}
if ( ! is.na(n$isolines[[i]]$names.y[j]) )
{
ind <- which(n$isolines[[i]]$names.y[j] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter",
names(par)[ind[1]],sep="")
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$isolines[[i]]$y[j] <- par[ind]
}
}
}
}
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate.utility.endnode.intpol2d <- function(x,
attrib, # data.frame, numeric
par = NA,
...)
{
node <- x
# update parameters:
n <- updatepar(node,par)
# extract attributes:
if ( is.data.frame(attrib) | is.matrix(attrib) )
{
ind <- match(n$attrib,colnames(attrib))
if ( sum(ifelse(is.na(ind),1,0)) > 0 )
{
warning("Node \"",node$name,"\": attribute(s) \"",
paste(n$attrib[is.na(ind)],collapse=","),"\" not found",sep="")
return(rep(NA,nrow(attrib)))
}
a <- attrib[,ind]
}
else
{
if ( ! is.vector(attrib) )
{
warning("Node \"",node$name,"\": unknown format of attribute(s) \"",n$attrib,"\"",sep="")
return(NA)
}
if ( length(names(attrib)) == 0 )
{
if ( length(attrib) == 2 )
a <- as.matrix(attrib,nrow=1)
}
else
{
ind <- match(n$attrib,names(attrib))
if ( sum(ifelse(is.na(ind),1,0)) > 0 )
{
warning("Node \"",node$name,"\": attribute(s) \"",
paste(n$attrib[is.na(ind)],collapse=","),"\" not found",sep="")
return(rep(NA,nrow(attrib)))
}
a <- as.matrix(attrib[ind],nrow=1)
}
}
# evaluate results:
if ( is.data.frame(a) )
{
if ( !is.numeric(a[,1]) )
{
if ( is.factor(a[,1]) ) a[,1] <- as.numeric(as.character(a[,1]))
else a[,1] <- as.numeric(a[,1])
}
if ( !is.numeric(a[,2]) )
{
if ( is.factor(a[,2]) ) a[,2] <- as.numeric(as.character(a[,2]))
else a[,2] <- as.numeric(a[,2])
}
}
else
{
if ( !is.numeric(a) )
{
if ( is.factor(a) ) a <- as.numeric(as.character(a))
else a <- as.numeric(a)
}
}
ind <- order(n$u)
u <- utility.intpol2d(xy=a,isolines=n$isolines[ind],
levels=n$u[ind],lead=n$lead)
ind.out.of.range <- (a[,1]<n$range[[1]][1])|(a[,1]>n$range[[1]][2])
u <- ifelse(ind.out.of.range,NA,u)
if ( sum(ind.out.of.range,na.rm=T) > 0 )
{
ind.not.na <- ifelse(is.na(ind.out.of.range),F,ind.out.of.range)
warning("Node \"",node$name,"\": value(s) of attribute \"",n$attrib[1],"\" out of range: ",
paste(a[ind.not.na,1],collapse=","),sep="")
}
ind.out.of.range <- (a[,2]<n$range[[2]][1])|(a[,2]>n$range[[2]][2])
u <- ifelse(ind.out.of.range,NA,u)
if ( sum(ind.out.of.range,na.rm=T) > 0 )
{
ind.not.na <- ifelse(is.na(ind.out.of.range),F,ind.out.of.range)
warning("Node \"",node$name,"\": value(s) of attribute \"",n$attrib[2],"\" out of range: ",
paste(a[ind.not.na,2],collapse=","),sep="")
}
# return results:
return(u)
}
# print:
# -----
print.utility.endnode.intpol2d <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.endnode.intpol2d <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
cat("attributes: ",paste(node$attrib,collapse=" , "),"\n")
cat("attribute ranges:",node$range[[1]][1],"-",node$range[[1]][2],
",",node$range[[2]][1],"-",node$range[[2]][2],"\n")
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
cat("function type: ",funtype,"\n")
cat("required: ",node$required,"\n")
cat("isolines:","\n")
for ( i in 1:length(node$u) )
{
name.u <- ""
if ( !is.na(node$names.u[i]) )
{
name.u <- paste(":",node$names.u[i])
}
cat("u:",node$u[i]," ",name.u,"\n")
names.x <- rep("",length(node$isolines[[i]]$x))
if ( length(node$isolines[[i]]$names.x) > 0 )
{
names.x <- ifelse(is.na(node$isolines[[i]]$names.x),
"",node$isolines[[i]]$names.x)
}
names.y <- rep("",length(node$isolines[[i]]$y))
if ( length(node$isolines[[i]]$names.y) > 0 )
{
names.y <- ifelse(is.na(node$isolines[[i]]$names.y),
"",node$isolines[[i]]$names.y)
}
print(data.frame(names.x=names.x,
x=node$isolines[[i]]$x,
y=node$isolines[[i]]$y,
names.y=names.y))
}
}
# plot:
# -----
plot.utility.endnode.intpol2d <-
function(x,
par = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
xlim = numeric(0),
ylim = numeric(0),
...)
{
node <- x
n <- updatepar(node,par)
if ( length(xlim) < 2 )
{
xlim <- c(min(n$isolines[[1]]$x),max(n$isolines[[1]]$x))
if ( length(n$isolines) > 1 )
{
for ( i in 2:length(n$isolines) )
{
xlim <- c(min(c(xlim[1],n$isolines[[i]]$x)),max(c(xlim[2],n$isolines[[i]]$x)))
}
}
}
if ( length(ylim) < 2 )
{
ylim <- c(min(n$isolines[[1]]$y),max(n$isolines[[1]]$y))
if ( length(n$isolines) > 1 )
{
for ( i in 2:length(n$isolines) )
{
ylim <- c(min(c(ylim[1],n$isolines[[i]]$y)),max(c(ylim[2],n$isolines[[i]]$y)))
}
}
}
utility.endnode.plot2d(node = n,
col = col,
gridlines = gridlines,
main = main,
cex.main = cex.main,
xlim = xlim,
ylim = ylim,
...)
ind <- order(n$u)
levels <- n$u[ind]
isolines <- n$isolines[ind]
for ( i in 1:length(levels) )
{
lines(isolines[[i]],...)
if ( i > 1 )
{
lines(c(isolines[[i-1]]$x[1],isolines[[i]]$x[1]),
c(isolines[[i-1]]$y[1],isolines[[i]]$y[1]),
...)
lines(c(isolines[[i-1]]$x[length(isolines[[i-1]]$x)],
isolines[[i]]$x[length(isolines[[i]]$x)]),
c(isolines[[i-1]]$y[length(isolines[[i-1]]$y)],
isolines[[i]]$y[length(isolines[[i]]$x)]),
...)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.endnode.intpol2d.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4.3 Peter Reichert 15.01.2018 #
# #
################################################################################
# ==============================================================================
# endnode for 1d (single attribute) parametric function:
# class "utility.endnode.parfun1d"
# ==============================================================================
# constructor:
# ------------
utility.endnode.parfun1d.create <- function(name.node, # character(1)
name.attrib, # character(1)
range, # numeric(2)
name.fun, # name of f(a,par)
par, # numeric(n)
names.par = rep(NA,length(par)),
utility = TRUE,
required = FALSE,
col = "black",
shift.levels = 0)
{
# consistency checks:
check.ok <- T
if ( length(par) != length(names.par) )
{
cat("*** Warning: par and names.par of different length:",
length(par),length(names.par),"\n")
check.ok <- F
}
if ( range[1] >= range[2] )
{
cat("*** Warning: Minimum of range not smaller than maximum:",
range[1],range[2],"\n")
check.ok <- F
}
if ( ! check.ok )
{
cat("*** Warning: Node \"",name.node,"\" could not be constructed","\n",
sep="")
return(NA)
}
# construct class:
node <- list()
node$name <- name.node
node$description <- "utility/value 1d parametric function end node"
node$type <- "endnode"
node$attrib <- name.attrib
node$range <- range
node$name.fun <- name.fun
node$par <- par
node$names.par <- names.par
node$required <- required
node$utility <- utility
node$col <- col
node$shift.levels <- shift.levels
class(node) <- "utility.endnode.parfun1d"
# print and return class
#cat(node$description," \"",name.node,"\" constructed","\n",sep="")
return(node)
}
# update parameter values:
# ------------------------
updatepar.utility.endnode.parfun1d <- function(x,par=NA,...)
{
node <- x
# check availabiliy of named parameter vector:
if ( length(names(par)) == 0 ) return(node)
# update adequate values in interpolation list:
n <- node
for ( i in 1:length(n$par) )
{
if ( ! is.na(n$names.par[i]) )
{
ind <- which(n$names.par[i] == names(par) )
if ( length(ind) > 1 )
{
warning("Node \"",node$name,"\": multiple occurrences of parameter \"",
names(par)[ind[1]],"\"",sep="")
ind <- ind[1]
}
if ( length(ind) == 1 )
{
n$par[i] <- par[ind]
}
}
}
# return updated node:
return(n)
}
# evaluate values or utilities:
# -----------------------------
evaluate.utility.endnode.parfun1d <- function(x,
attrib, # data.frame, numeric
par = NA,
...)
{
node <- x
# update parameters:
n <- updatepar(node,par)
# extract attributes:
if ( is.data.frame(attrib) )
{
if ( length(which(names(attrib)==n$attrib)) != 1 )
{
warning("Node \"",node$name,"\": attribute \"",n$attrib,"\" not found",sep="")
return(rep(NA,nrow(attrib)))
}
a <- attrib[,n$attrib]
}
else
{
if ( ! is.vector(attrib) )
{
warning("Node \"",node$name,"\": unknown format of attribute \"",n$attrib,"\"",sep="")
return(NA)
}
if ( length(names(attrib)) == 0 )
{
a <- attrib
}
else
{
ind <- which(names(attrib)==n$attrib)
if ( length(ind) != 1 )
{
if ( length(ind) > 1)
{
warning("Node \"",node$name,"\": multiple occurrences of attribute \"",
n$attrib,"\"",sep="")
}
else
{
warning("Node \"",node$name,"\": attribute \"",n$attrib,"\" not found",sep="")
}
return(NA)
}
a <- attrib[ind]
}
}
# evaluate results:
if ( !is.numeric(a) )
{
if ( is.factor(a) ) a <- as.numeric(as.character(a))
else a <- as.numeric(a)
}
u <- do.call(n$name.fun,list(a,n$par))
ind.out.of.range <- (a < n$range[1]) | (a > n$range[2])
u <- ifelse(ind.out.of.range,NA,u)
if ( sum(ind.out.of.range,na.rm=T) > 0 )
{
ind.not.na <- ifelse(is.na(ind.out.of.range),F,ind.out.of.range)
warning("Node \"",node$name,"\": value(s) of attribute \"",n$attrib,"\" out of range: ",
paste(a[ind.not.na],collapse=","),sep="")
}
# return results:
return(u)
}
# print:
# -----
print.utility.endnode.parfun1d <- function(x,...)
{
cat(paste(rep("-",50),collapse=""),"\n")
summary(x,...)
cat(paste(rep("-",50),collapse=""),"\n")
}
# summary:
# --------
summary.utility.endnode.parfun1d <- function(object,...)
{
node <- object
cat(node$name,"\n")
cat(paste(rep("-",nchar(node$name)),collapse=""),"\n")
cat(node$description,"\n")
cat("attribute: ",node$attrib,"\n")
cat("attribute range:",node$range[1],"-",node$range[2],"\n")
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
cat("function type: ",funtype,"\n")
cat("required: ",node$required,"\n")
cat("function: ",node$name.fun,"\n")
cat("parameters:","\n")
names.par <- ifelse(is.na(node$names.par),"",node$names.par)
print(data.frame(names.par=names.par,par=node$par))
}
# plot:
# -----
plot.utility.endnode.parfun1d <-
function(x,
par = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
xlim = numeric(0),
...)
{
node <- x
n <- updatepar(node,par)
utility.endnode.plot1d(node = n,
col = col,
gridlines = gridlines,
main = main,
cex.main = cex.main,
xlim = xlim,
...)
}
# ==============================================================================
# simple parametric utility functions
# ==============================================================================
utility.fun.exp <- function(attrib,par) # par[1]: absolute risk aversion
{ # par[2]: minimum of attribute range (default=0)
# par[3]: maximum of attribute range (default=1)
atrans <- attrib
if ( length(par) >= 3 ) atrans <- (attrib-par[2])/(par[3]-par[2])
if ( par[1] == 0 ) return(atrans)
return((1-exp(-atrans*par[1]))/(1-exp(-par[1])))
}
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.endnode.parfun1d.r
|
################################################################################
# #
# utility and value function package #
# ================================== #
# #
# version 1.4.5 Peter Reichert 08.03.2020 #
# #
################################################################################
# ==============================================================================
# plotting functions (see also the object-specific plotting functions)
# ==============================================================================
utility.endnode.plot1d <-
function(node,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
xlim = numeric(0),
...)
{
length <- 101
if ( length(xlim) < 2 ) xlim <- node$range
x <- seq(xlim[1],xlim[2],length=length)
u <- evaluate(node,attrib=x)
title <- main; if ( nchar(title) == 0 ) title <- node$name
funtype <- "utility"; if ( !node$utility ) funtype <- "value"
plot(numeric(0),numeric(0),type="l",
xlim=xlim,ylim=c(0,1),
xlab=node$attrib,ylab=funtype,main=title,
xaxs="i",yaxs="i",xaxt="n",yaxt="n",cex.main=cex.main,...)
# colored bar along y axis:
if ( length(col)>1 & !node$utility )
{
num.grid = 100
# y-axix:
endpoints <- seq(0,1,length.out=num.grid+1)+1/(2*num.grid)
midpoints <- 0.5*(endpoints[-1]+endpoints[-length(endpoints)])
cols <- utility.get.colors(midpoints,col)
for ( i in 1:(num.grid-1) )
{
lines((xlim[1]-0.01*(xlim[2]-xlim[1]))*c(1,1),
endpoints[c(i,i+1)],
col=cols[i],lwd=3,lend=1,xpd=TRUE)
}
# x-axis:
midpoints <- 0.5*(u[-1]+u[-length(u)])
cols <- utility.get.colors(u,col)
for ( i in 1:length(midpoints) )
{
lines(c(x[i],x[i+1]),
-0.01*c(1,1),
col=cols[i],lwd=3,lend=1,xpd=TRUE)
}
}
# axes (should overly colored bar):
axis(side=1,...)
axis(side=2,...)
# plot gridlines:
if ( !node$utility )
{
if ( ! is.na(gridlines[1]) )
{
for ( level in gridlines )
{
abline(h=level,lty="dashed")
for ( i in 1:(length-1) )
{
if ( !is.na(u[i]) & !is.na(u[i+1]) )
{
if ( (u[i] <= level & u[i+1] > level) |
(u[i] > level & u[i+1] <= level) )
{
x.level <- x[i] + (level-u[i])/(u[i+1]-u[i])*(x[i+1]-x[i])
lines(c(x.level,x.level),c(0,level),lty="dashed")
}
}
}
}
}
}
# plot value/utility function:
color <- "black"
if ( length(col) == 1 ) color <- col
lines(x,u,lwd=2,col=color)
}
utility.endnode.plot2d <- function(node,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
xlim = numeric(0),
ylim = numeric(0),
...)
{
num.grid <- 100
if ( length(xlim) < 2 ) xlim <- node$ranges[[1]]
if ( length(ylim) < 2 ) ylim <- node$ranges[[2]]
x <- xlim[1] + ((1:num.grid)-0.5)/num.grid*(xlim[2]-xlim[1])
y <- ylim[1] + ((1:num.grid)-0.5)/num.grid*(ylim[2]-ylim[1])
array.x <- sort(rep(x,num.grid))
array.y <- rep(y,num.grid)
array.xy <- cbind(array.x,array.y)
colnames(array.xy) <- node$attrib
u <- evaluate(node,as.data.frame(array.xy))
u <- t(matrix(u,ncol=num.grid,byrow=FALSE))
title <- main; if ( nchar(title) == 0 ) title <- node$name
image(x=x,y=y,z=u,xlim=xlim,ylim=ylim,zlim=c(0,1),
col=col,xlab=node$attrib[1],ylab=node$attrib[2],main=title,
cex.main=cex.main)
}
utility.conversion.plot <- function(node,
col = "black",
gridlines = NA,
cex.main = 1,
...)
{
length <- 101
x <- ((1:length)-1)/(length-1)
if ( class(node)[1] == "utility.conversion.intpol" )
{
u <- evaluate_utility.conversion.intpol(node,x)
}
else
{
if ( class(node)[1] == "utility.conversion.parfun" )
{
u <- evaluate_utility.conversion.parfun(node,x)
}
else
{
u <- NA
}
}
plot(numeric(0),numeric(0),type="l",
xlim=c(0,1),ylim=c(0,1),
xlab=paste("value(",node$nodes[[1]]$name,")",sep=""),ylab="utility",
main=node$name,xaxs="i",yaxs="i",cex.main=cex.main)
color <- "black"; if ( length(col) == 1 ) color <- col
lines(x,u,lwd=2,col=color)
lines(c(0,1),c(0,1))
if ( length(node$x) > 0 & length(node$u) > 0 )
{
if ( length(node$x) == length(node$u) )
{
points(node$x,node$u,cex=1.5,xpd=TRUE)
}
}
}
utility.aggregation.plot <- function(node = node,
col = col,
gridlines = gridlines,
cex.main = 1,
cex.attrib = 1,
cex.nodes = 1,
...)
{
nodes.names <- rep(NA,length(node$nodes))
for ( i in 1:length(node$nodes) ) nodes.names[i] <- node$nodes[[i]]$name
if ( length(node$nodes) == 2 )
{
num.grid <- 100
x <- ((1:num.grid)-0.5)/num.grid
y <- ((1:num.grid)-0.5)/num.grid
array.x <- sort(rep(x,num.grid))
array.y <- rep(y,num.grid)
array.xy <- cbind(array.x,array.y)
if ( length(node$add.arg.fun) > 0 )
{
v <- apply(array.xy,1,node$name.fun,node$par,node$add.arg.fun)
}
else
{
v <- apply(array.xy,1,node$name.fun,node$par)
}
v <- t(matrix(v,ncol=num.grid,byrow=FALSE))
if ( node$utility )
{
contour(x=x,y=y,z=v,levels=gridlines,xlim=c(0,1),ylim=c(0,1),zlim=c(0,1),
axes=FALSE,add=FALSE,lty="solid",lwd=2,
xlab=node$nodes[[1]]$name,ylab=node$nodes[[2]]$name,
main=node$name,...)
}
else
{
# area coloring:
image(x=x,y=y,z=v,xlim=c(0,1),ylim=c(0,1),zlim=c(0,1),
col=col,xaxt="n",yaxt="n",
xlab=node$nodes[[1]]$name,ylab=node$nodes[[2]]$name,
main=node$name,...)
# colored bar along axes:
endpoints <- seq(0,1,length.out=10*num.grid+1)
midpoints <- 0.5*(endpoints[-1]+endpoints[-length(endpoints)])
cols <- utility.get.colors(midpoints,col)
for ( i in 1:(10*num.grid-1) )
{
lines(-0.015*c(1,1),endpoints[c(i,i+1)],col=cols[i],lwd=6,lend=1,xpd=TRUE)
lines(endpoints[c(i,i+1)],-0.015*c(1,1),col=cols[i],lwd=6,lend=1,xpd=TRUE)
}
# axes (should overly colored bar):
axis(1,...)
axis(2,...)
lines(c(1,1,0),c(0,1,1))
# contour lines:
contour(x=x,y=y,z=v,levels=gridlines,xlim=c(0,1),ylim=c(0,1),zlim=c(0,1),
axes=FALSE,add=TRUE,lty="solid",lwd=2,...)
}
}
else
{
if ( node$name.fun == "utility.aggregate.add" |
node$name.fun == "utility.aggregate.geo" |
node$name.fun == "utility.aggregate.cobbdouglas" |
node$name.fun == "utility.aggregate.harmo")
{
type <- "Additive"
if ( node$name.fun == "utility.aggregate.geo" |
node$name.fun == "utility.aggregate.cobbdouglas" ) type = "Geometric"
if ( node$name.fun == "utility.aggregate.harmo" ) type = "Harmonic"
w <- node$par/sum(node$par)
w.max <- max(w)
if ( length(w) != length(nodes.names) )
{
warning("Node \"",node$name,"\": ",
"length of sub-nodes and weights not equal: ",
length(nodes.names)," ",length(w),sep="")
}
else
{
barplot(w,names.arg=nodes.names,ylim=c(0,1.2*w.max),
ylab="weight",main=node$name,cex.main=cex.main,cex.names=cex.nodes)
text(0.5*1.3*length(w),1.1*w.max,paste(type,"aggregation with weights:"))
}
}
else
{
if ( node$name.fun == "utility.aggregate.mult" )
{
w <- node$par
w.max <- max(w)
if ( length(w) != length(nodes.names) )
{
warning("Node \"",node$name,"\": ",
"length of sub-nodes and weights not equal: ",
length(nodes.names)," ",length(w),sep="")
}
else
{
barplot(w,names.arg=nodes.names,ylim=c(0,1.2*w.max),
ylab="weight",main=node$name,cex.main=cex.main,cex.names=cex.nodes)
text(0.5*1.3*length(w),1.1*w.max,
"Multiplicative aggregation with weights:")
}
}
else
{
if ( node$name.fun == "utility.aggregate.min" |
node$name.fun == "utility.aggregate.max" )
{
type <- "Minimum (worst-case)"
if ( node$name.fun == "utility.aggregate.max" ) type <- "Maximum"
plot(numeric(0),numeric(0),xlim=c(0,1),ylim=c(0,1),
xaxt="n",yaxt="n",main=node$name,xlab="",ylab="",
cex.main=cex.main)
text(0.5,0.9,paste(type,"aggregation of nodes:"))
for ( i in 1:length(nodes.names) )
{
text(0.5,0.7*i/length(nodes.names),nodes.names[i])
}
}
else
{
plot(numeric(0),numeric(0),xlim=c(0,1),ylim=c(0,1),
xaxt="n",yaxt="n",main=node$name,xlab="",ylab="",
cex.main=cex.main)
text(0.5,0.9,paste("aggregation with function \"",
node$name.fun,"\" of nodes:",sep=""))
for ( i in 1:length(nodes.names) )
{
text(0.5,0.7*i/length(nodes.names),nodes.names[i])
}
}
}
}
}
}
utility.plotcolbox <- function(x,y,col,
val = NA,
plot.val = FALSE,
col.val = "black",
lwd.val = 1,
ticks = numeric(0),
tcl = 0.1)
{
# check for availability of data:
if ( length(val) == 0 ) return()
if ( is.na(val[1]) & length(col)>1 ) return()
# plot colored box (without border):
color <- col
if ( length(col) > 1 ) color <- utility.get.colors(val[1],col)
polygon(x = c(x[1],x[2],x[2],x[1],x[1]),
y = c(y[1],y[1],y[2],y[2],y[1]),
col = color,
border = NA)
# optionally plot value line:
if ( plot.val & !is.na(val[1]) )
{
lines((x[1]+val[1]*(x[2]-x[1]))*c(1,1),y,lwd=lwd.val,col=col.val,lend=1)
}
# optionally plot tick marks:
if( sum(!is.na(ticks)) > 0 )
{
for( i in 1:length(ticks) )
{
if ( !is.na(ticks[i]) )
{
if ( ticks[i]>=0 & ticks[i]<=1 ) lines((x[1]+ticks[i]*(x[2]-x[1]))*c(1,1),c(y[1],y[1]-tcl*(y[2]-y[1])),lend=1)
}
}
}
return()
}
utility.plotquantbox <- function(x,y,col,val,
num.stripes = 500,
plot.val = TRUE,
col.val = "black",
lwd.val = 1.5,
ticks = numeric(0),
tcl = 0.1)
{
min.halfwidth <- 0.02
# check for availability of data:
if ( length(val) == 0 ) return()
if ( sum(is.na(val)) == length(val) ) return()
# get quantiles:
quant <- quantile(val[!is.na(val)],probs=c(0.05,0.5,0.95))
if ( quant[3]-quant[1] < 2*min.halfwidth )
{
quant[1] <- max(0,quant[1]-min.halfwidth)
quant[3] <- min(1,quant[3]+min.halfwidth)
}
# plot colored quantile box:
for ( j in floor(num.stripes*quant[1]):ceiling(num.stripes*quant[3]) )
{
lines((x[1]+j/num.stripes*(x[2]-x[1]))*c(1,1),y,
col=utility.get.colors(j/num.stripes,col))
}
# plot median line:
if ( plot.val ) lines((x[1]+quant[2]*(x[2]-x[1]))*c(1,1),y,col=col.val,lwd=lwd.val,lend=1)
# optionally plot tick marks:
if( sum(!is.na(ticks)) > 0 )
{
for( i in 1:length(ticks) )
{
if ( !is.na(ticks[i]) )
{
if ( ticks[i]>=0 & ticks[i]<=1 ) lines((x[1]+ticks[i]*(x[2]-x[1]))*c(1,1),c(y[1],y[1]-tcl*(y[2]-y[1])),lend=1)
}
}
}
return()
}
utility.plothierarchy <-
function(node,
u = NA,
uref = NA,
col = utility.calc.colors(),
main = "",
cex.main = 1,
cex.nodes = 1,
cex.attrib = 1,
with.attrib = TRUE,
levels = NA,
plot.val = TRUE,
col.val = "black",
lwd.val = 1,
two.lines = FALSE,
ticks = numeric(0),
...)
{
# call multiple times if u and possibly uref are lists:
if ( is.list(u) & !is.data.frame(u) )
{
if ( is.list(uref) & !is.data.frame(uref) )
{
if ( length(u) == length(uref) )
{
for ( i in 1:length(u) )
{
utility.plothierarchy(node = node,
u = u[[i]],
uref = uref[[i]],
col = col,
main = main,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
with.attrib = with.attrib,
levels = levels,
plot.val = plot.val,
col.val = col.val,
lwd.val = lwd.val,
two.lines = two.lines,
ticks = ticks,
...)
}
}
else
{
warning("if u and uref are lists, their lengths must be equal")
}
}
else
{
utility.plothierarchy(node = node,
u = u[[i]],
uref = uref,
col = col,
main = main,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
with.attrib = with.attrib,
levels = levels,
plot.val = plot.val,
col.val = col.val,
lwd.val = lwd.val,
two.lines = two.lines,
ticks = ticks,
...)
}
return()
}
# global parameters:
delta.x <- 0.1
delta.y <- 0.1
dh.rel.utility <- 0.1
tcl <- 0.15
ticks.cond <- ticks; if ( !plot.val ) ticks.cond <- numeric(0)
# get hierarchy structure and define positions of boxes:
str <- utility.structure(node)
if ( ! is.data.frame(str) )
{
warning("unable to identify structure of objectives hierarchy")
return()
}
if ( !is.na(levels) ) str <- utility.prune(str,levels)
w <- 1/max(str$level)
if ( with.attrib ) w <- 1/(max(str$level)+1)
h <- 1/str$endnodes[1]
str$x <- (str$level-0.5)*w
str$y <- 1-(str$offset+0.5*str$endnodes)*h
x.attrib <- max(str$level)*w + delta.y*w
# convert u and uref to data frames:
u.local <- u
if ( is.vector(u.local) ) u.local <- t(u.local)
u.local <- as.data.frame(u.local)
uref.local <- uref
if ( is.vector(uref.local) ) uref.local <- t(uref.local)
uref.local <- as.data.frame(uref.local)
# plot indvidual plots per row if the same number of titles is provided;
# plot quantile summary if not the same number of titles is provided and
# if the number of rows is > 1
quant.summary <- length(main) != nrow(u.local) & nrow(u.local) > 1
# find out if u and uref are available (otherwise plot required/not required shading)
u.available <- FALSE
if ( nrow(u.local)>1 | ncol(u.local)>1 | !is.na(u.local[1,1]) )
{
u.available <- TRUE
}
uref.available <- FALSE
ind.uref.local <- rep(1,nrow(u.local))
if ( nrow(uref.local)>1 | ncol(uref.local)>1 | !is.na(uref.local[1,1]) )
{
uref.available <- TRUE
if ( !quant.summary ) # number of rows must be unity or equal to nrow(u)
{
if ( nrow(uref.local) == nrow(u.local) )
{
ind.uref.local <- 1:nrow(u.local)
}
else
{
if ( nrow(uref.local) != 1 ) uref.available <- FALSE
}
}
}
# loop over rows of utilities/values:
num.plots <- nrow(u.local)
if ( !u.available | quant.summary ) num.plots <- 1
for ( k in 1:num.plots )
{
# set-up plot frame:
#par.def <- par(no.readonly=TRUE)
#par(mar=c(0,0,0,0))
plot(numeric(0),numeric(0),xlim=c(0,1),ylim=c(0,1),type="n",
axes=FALSE,xlab="",ylab="",cex.main=cex.main)
# write title
title <- main[1]
if ( length(main) == nrow(u.local) ) title <- main[k]
text(0,1-0.5*h,title,adj=c(0,0.5),cex=cex.main,...)
# draw color code legend:
if ( u.available )
{
x.l <- delta.x*w
x.r <- (1-delta.x)*w
y <- min(str$y)
num.col <- 100
v <- (1:num.col - 0.5)/num.col
colors <- utility.get.colors(v,col)
for ( i in 1:num.col )
{
lines(x.l+(x.r-x.l)/num.col*c(i-1,i),c(y,y),col=colors[i],lwd=3,lend=1)
}
text(x.l,y,"0",pos=1,cex=cex.nodes)
text(x.r,y,"1",pos=1,cex=cex.nodes)
if ( sum(!is.na(ticks.cond)) > 0 )
{
for( i in 1:length(ticks) )
{
if ( !is.na(ticks[i]) )
{
if ( ticks[i]>=0 & ticks[i]<=1 ) lines((x.l+ticks.cond[i]*(x.r-x.l))*c(1,1),y+0.5*tcl*h*c(-1,1),lend=1)
}
}
}
}
# loop over all boxes in the hierarchy:
for ( i in 1:nrow(str) )
{
# calculate box edge coordinates:
x <- str$x[i] + (0.5-delta.x)*w*c(-1,1)
y <- str$y[i] + (0.5-delta.y)*h*c(-1,1)
y1 <- c(0.5*(y[1]+y[2]),y[2]) # upper part, uref
y2 <- c(y[1],0.5*(y[1]+y[2])) # lower part, u
# plot background color or quantile boxes:
if ( !u.available ) # plot required/not required nodes in differnt grey
{
if ( str$required[i] ) color <- grey(0.7)
else color <- grey(0.9)
utility.plotcolbox(x,y,color)
}
else
{
if ( !quant.summary ) # plot hierarchy for each row of u
{
# plot background color and vertical line:
val <- u.local[k,rownames(str)[i]]
color <- col
if ( str$utility[i] ) color <- "white"
if ( !uref.available )
{
utility.plotcolbox(x,y,color,val,plot.val,col.val,lwd.val,ticks=ticks.cond,tcl=tcl)
}
else
{
valref <- uref.local[ind.uref.local[k],rownames(str)[i]]
utility.plotcolbox(x,y1,color,valref,plot.val,col.val,lwd.val)
utility.plotcolbox(x,y2,color,val,plot.val,col.val,lwd.val,ticks=ticks.cond,tcl=2*tcl)
}
}
else # plot quantile summary of v or expected u
{
if ( !str$utility[i] ) # plot quantile summary
{
val <- u.local[,rownames(str)[i]]
if ( !uref.available )
{
utility.plotquantbox(x,y,col,val,num.stripes=500,
plot.val=plot.val,col.val=col.val,lwd.val=lwd.val,ticks=ticks,tcl=tcl)
}
else
{
valref <- uref.local[,rownames(str)[i]]
utility.plotquantbox(x,y1,col,valref,num.stripes=500,
plot.val=plot.val,col.val=col.val,lwd.val=lwd.val)
utility.plotquantbox(x,y2,col,val,num.stripes=500,
plot.val=plot.val,col.val=col.val,lwd.val=lwd.val,ticks=ticks,tcl=2*tcl)
}
}
else # plot expected utility
{
u.exp <- NA
column <- match(rownames(str)[i],colnames(u.local))
if ( !is.na(column) )
{
u.exp <- mean(u.local[,column],na.rm=TRUE)
}
if ( !uref.available )
{
utility.plotcolbox(x,y,"white",u.exp)
}
else
{
uref.exp <- NA
column <- match(rownames(str)[i],colnames(uref.local))
if ( !is.na(column) )
{
uref.exp <- mean(uref.local[,column],na.rm=TRUE)
}
col1 <- "lightgreen"
col2 <- "tomato"
if ( u.exp > uref.exp )
{
col1 <- "tomato"
col2 <- "lightgreen"
}
utility.plotcolbox(x,y1,col1,uref.exp)
utility.plotcolbox(x,y2,col2,u.exp,ticks=ticks,tcl=2*tcl)
}
}
}
}
# plot bounding box:
lines(x = c(x[1],x[2],x[2],x[1],x[1]),
y = c(y[1],y[1],y[2],y[2],y[1]),
col = as.character(str$color[i]))
if ( str$utility[i] )
{
dh <- dh.rel.utility*(y[2]-y[1])
lines(x,(y[1]+dh)*c(1,1))
lines(x,(y[2]-dh)*c(1,1))
}
# write text into box:
label <- rownames(str)[i]
if ( two.lines == FALSE )
{
text(x=str$x[i],y=str$y[i],labels=label,cex=cex.nodes,...)
}
else
{
pos <- c(as.numeric(gregexpr(" ",label)[[1]]),as.numeric(gregexpr("-",label)[[1]]))
pos <- pos[pos>0]
if ( length(pos) == 0 )
{
text(x=str$x[i],y=str$y[i],labels=label,cex=cex.nodes,...)
} else {
nchar.split <- pos[which.min(abs(pos-0.5*nchar(label)))]
if ( nchar.split > 1 & nchar.split < nchar(label) )
{
label1 <- substr(label,1,nchar.split-1)
if ( substr(label,nchar.split,nchar.split) == "-" )
{
label1 <- substr(label,1,nchar.split)
}
label2 <- substr(label,nchar.split+1,nchar(label))
text(x=str$x[i],y=str$y[i]+(0.5-delta.y)*h/3,labels=label1,cex=cex.nodes,...)
text(x=str$x[i],y=str$y[i]-(0.5-delta.y)*h/3,labels=label2,cex=cex.nodes,...)
} else {
text(x=str$x[i],y=str$y[i],labels=label,cex=cex.nodes,...)
}
}
}
# plot connecting lines:
upper <- str$upper[i]
if ( ! is.na(upper) )
{
x.line.l <- str[upper,"x"] + (0.5-delta.x)*w
x.line.r <- str$x[i] - (0.5-delta.x)*w
x.line.v <- str[upper,"x"] + 0.5*w
y.line.l <- str[upper,"y"]
y.line.r <- str$y[i]
lines(x = c(x.line.l,x.line.v,x.line.v,x.line.r),
y = c(y.line.l,y.line.l,y.line.r,y.line.r))
}
# write attribute names:
if ( with.attrib )
{
if ( str$endnode[i] )
{
attributes <- strsplit(str$attributes[i],split=";")[[1]]
n <- length(attributes)
for ( j in 1:n )
{
y.attrib <- str$y[i] + (0.5 - (j-0.5)/n)*(1-delta.y)*h
text(x.attrib,y.attrib,attributes[j],pos=4,cex=cex.attrib,...)
lines(c(x[2],x.attrib),c(y.attrib,y.attrib),lty="dotted")
}
}
}
} # end for i
#par(par.def)
} # end for k
}
utility.plottable <-
function(node,
u,
uref = NA,
nodes = NA,
col = utility.calc.colors(),
main = "",
cex.main = 1,
cex.nodes = 1,
f.reaches = 0.2,
f.nodes = 0.2,
levels = NA,
plot.val = FALSE,
col.val = "black",
lwd.val = 1,
print.val = TRUE,
ticks = numeric(0),
...)
{
# global parameters:
delta.x <- 0.2
delta.y <- 0.2
delta.main <- 0.05
dh.rel.utility <- 0.1
tcl <- 0.1
ticks.cond <- ticks; if ( !plot.val ) ticks.cond <- numeric(0)
# initializations:
if ( !is.list(u) )
{
warning("unable to interpret u")
return()
}
if ( length(nodes)==1 & is.na(nodes[1]) ) nodes <- character(0)
str <- utility.structure(node)
if ( !is.na(levels) )
{
if ( is.data.frame(str) )
{
str1 <- utility.prune(str,levels)
ind <- order(str1$level)
nodes <- unique(c(nodes,rownames(str1)[ind][str1$level[ind]<=levels]))
}
}
uref.available <- FALSE
ind.uref <- NA
uref.local <- uref
if ( is.data.frame(u) | is.matrix(u) )
{
if ( length(nodes)==0 ) nodes <- colnames(u)
reaches <- rownames(u)
if ( is.data.frame(uref) | is.matrix(uref) )
{
if ( nrow(u) == nrow(uref) )
{
uref.available <- TRUE
ind.uref <- 1:nrow(uref)
}
else
{
if ( nrow(uref) == 1 )
{
uref.available <- TRUE
ind.uref <- rep(1,nrow(u))
}
}
}
}
else
{
if( length(nodes)==0 ) nodes <- colnames(u[[1]])
reaches <- names(u)
if ( is.list(uref) | is.matrix(uref) )
{
if ( !is.data.frame(uref) & !is.matrix(uref) )
{
if ( length(uref) == length(u) )
{
ind.uref <- 1:length(u)
uref.available <- TRUE
}
else
{
if ( length(uref) == 1 )
{
ind.uref <- rep(1,length(u))
uref.available <- TRUE
}
}
}
else
{
uref.local <- list()
uref.local[[1]] <- uref
ind.uref <- rep(1,length(u))
uref.available <- TRUE
}
}
}
# set-up plotting parameters and plot frame:
dx <- (1-f.reaches)/length(nodes)
dy <- (1-f.nodes)/length(reaches)
x <- f.reaches+(1:length(nodes)-0.5)*dx
y <- 1-f.nodes-(1:length(reaches)-0.5)*dy
if ( nchar(main[1]) > 0 )
{
y <- (1-delta.main)*y
dy <- (1-delta.main)*dy
}
#par.def <- par(no.readonly=TRUE)
#par(mar=c(0,0,0,0))
plot(numeric(0),numeric(0),xlim=c(0,1),ylim=c(0,1),type="n",
axes=FALSE,xlab="",ylab="")
# write and color values:
for ( i in 1:length(reaches) )
{
for ( j in 1:length(nodes) )
{
xbox <- x[j]+0.5*(1-delta.x)*dx*c(-1,1)
ybox <- y[i]+0.5*(1-delta.y)*dy*c(-1,1)
if ( is.data.frame(u) | is.matrix(u) )
{
if ( !is.na(match(reaches[i],rownames(u))) &
!is.na(match(nodes[j] ,colnames(u))) )
{
yb <- ybox; if ( uref.available ) yb[2] <- 0.5*(ybox[1]+ybox[2])
yt <- y[i]; if ( uref.available ) yt <- y[i] - 0.25*(ybox[2]-ybox[1])
val <- u[reaches[i],nodes[j]]
color <- col
if ( !is.na(match(nodes[j],rownames(str))) )
{
if ( str[nodes[j],"utility"] ) color <- "white"
}
tcl.loc <- tcl; if ( uref.available ) tcl.loc <- 2*tcl
utility.plotcolbox(xbox,yb,color,val=val,plot.val=plot.val,col.val=col.val,lwd.val=lwd.val,
ticks=ticks.cond,tcl=tcl.loc)
if ( !is.na(val) & print.val )
{
val.str <- paste(round(val,2))
if ( nchar(val.str) > 1 & substring(val.str,1,1) == "0" )
{
val.str <- substring(val.str,2)
if ( nchar(val.str) == 2 ) val.str <- paste(val.str,"0",sep="")
}
text(x=x[j],y=yt,val.str,cex=cex.nodes)
}
}
if ( uref.available )
{
if ( !is.na(match(nodes[j],colnames(uref))) )
{
yb <- ybox; if ( uref.available ) yb[1] <- 0.5*(ybox[1]+ybox[2])
yt <- y[i]; if ( uref.available ) yt <- y[i] + 0.25*(ybox[2]-ybox[1])
val <- uref[ind.uref[i],nodes[j]]
color <- col
if ( !is.na(match(nodes[j],rownames(str))) )
{
if ( str[nodes[j],"utility"] ) color <- "white"
}
utility.plotcolbox(xbox,yb,color,val=val,plot.val=plot.val,col.val=col.val,lwd.val=lwd.val)
if ( !is.na(val) & print.val )
{
val.str <- paste(round(val,2))
if ( nchar(val.str) > 1 & substring(val.str,1,1) == "0" )
{
val.str <- substring(val.str,2)
if ( nchar(val.str) == 2 ) val.str <- paste(val.str,"0",sep="")
}
text(x=x[j],y=yt,val.str,cex=cex.nodes)
}
}
}
}
else
{
yb <- ybox; if ( uref.available ) yb[2] <- 0.5*(ybox[1]+ybox[2])
if ( !is.na(match(reaches[i],names(u))) &
!is.na(match(nodes[j],colnames(u[[reaches[i]]]))) )
{
val <- u[[reaches[i]]][,nodes[j]]
tcl.loc <- tcl; if ( uref.available ) tcl.loc <- 2*tcl
utility.plotquantbox(xbox,yb,col,val,num.stripes=500,
plot.val=plot.val,col.val=col.val,lwd.val=lwd.val,
ticks=ticks,tcl=tcl.loc)
}
if ( uref.available )
{
yb <- ybox; yb[1] <- 0.5*(ybox[1]+ybox[2])
val <- uref.local[[ind.uref[i]]][,nodes[j]]
if ( length(val) > 1 )
{
utility.plotquantbox(xbox,yb,col,val,num.stripes=500,
plot.val=plot.val,col.val=col.val,lwd.val=lwd.val)
}
}
}
# plot bounding box:
lines(x = c(xbox[1],xbox[2],xbox[2],xbox[1],xbox[1]),
y = c(ybox[1],ybox[1],ybox[2],ybox[2],ybox[1]),
col = as.character(str$color[j]))
if ( !is.na(match(nodes[j],rownames(str))) )
{
if ( str[nodes[j],"utility"] )
{
dh <- dh.rel.utility*(ybox[2]-ybox[1])
lines(xbox,(ybox[1]+dh)*c(1,1))
lines(xbox,(ybox[2]-dh)*c(1,1))
}
}
}
}
# write title and names of nodes and reaches:
if ( nchar(main[1]) > 0 ) text(x=0.5,y=1-0.5*delta.main,label=main[1],cex=cex.main)
for ( i in 1:length(reaches) )
{
text(x=0,y=y[i],label=reaches[i],adj=c(0,0.5),cex=cex.nodes)
}
par(srt=90)
for ( j in 1:length(nodes) )
{
text(x=x[j],y=1-f.nodes,label=nodes[j],adj=c(0,0.5),cex=cex.nodes)
}
par(srt=0)
# reset plotting parameters:
#par(par.def)
}
utility.plot <- function(node,
u = NA,
uref = NA,
type = c("hierarchy","table","node","nodes"),
nodes = NA,
col = utility.calc.colors(),
gridlines = c(0.2,0.4,0.6,0.8),
main = "",
cex.main = 1,
cex.nodes = 1,
cex.attrib = 1,
f.reaches = 0.2,
f.nodes = 0.2,
with.attrib = TRUE,
levels = NA,
plot.val = TRUE,
col.val = "black",
lwd.val = 1,
print.val = TRUE,
two.lines = FALSE,
ticks = c(0,0.2,0.4,0.6,0.8,1),
...)
{
if ( type[1] == "nodes" | type[1] == "node" )
{
# plot current node:
if ( is.na(nodes[1]) | ! is.na(match(node$name,nodes)) )
{
if ( substring(class(node)[1],1,18) == "utility.conversion" )
{
utility.conversion.plot(node = node,
col = col,
gridlines = gridlines,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
...)
}
else
{
if ( substring(class(node)[1],1,19) == "utility.aggregation" )
{
utility.aggregation.plot(node = node,
col = col,
gridlines = gridlines,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
...)
}
else
{
if ( node$type == "endnode" )
{
if ( class(node)[1] == "utility.endnode.cond" )
{
plot(node$nodes[[i]],
par = NA,
col = col,
gridlines = gridlines,
cex.main = cex.main,
nodes = nodes,
...)
}
else
{
plot(node$nodes[[i]],
par = NA,
col = col,
gridlines = gridlines,
cex.main = cex.main,
...)
}
}
else
{
# unknown node type; not plotted
}
}
}
}
# plot other nodes:
if ( type == "nodes" )
{
if ( length(node$nodes) > 0 )
{
for ( i in 1:length(node$nodes) )
{
# initiate plot of subnodes:
if ( node$nodes[[i]]$type == "endnode" )
{
if ( class(node$nodes[[i]])[1] == "utility.endnode.cond" )
{
plot(node$nodes[[i]],
par = NA,
col = col,
gridlines = gridlines,
cex.main = cex.main,
nodes = nodes,
...)
}
else
{
if ( is.na(nodes[1]) | ! is.na(match(node$nodes[[i]]$name,nodes)) )
{
plot(node$nodes[[i]],
par = NA,
col = col,
gridlines = gridlines,
cex.main = cex.main,
...)
}
}
}
else
{
plot(node$nodes[[i]],
u = u,
par = NA,
type = type,
nodes = nodes,
col = col,
gridlines = gridlines,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
...)
}
}
}
}
}
else
{
if ( type[1] == "hierarchy" )
{
if ( is.na(nodes[1]) | ! is.na(match(node$name,nodes)) )
{
utility.plothierarchy(node = node,
u = u,
uref = uref,
col = col,
main = main,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
with.attrib = with.attrib,
levels = levels,
plot.val = plot.val,
col.val = col.val,
lwd.val = lwd.val,
two.lines = two.lines,
ticks = ticks,
...)
}
if ( ! is.na(nodes[1]) )
{
if ( node$type != "endnode" )
{
for ( i in 1:length(node$nodes) )
{
utility.plot(node$nodes[[i]],
u = u,
uref = uref,
type = type,
nodes = nodes,
col = col,
gridlines = gridlines,
main = main,
cex.main = cex.main,
cex.nodes = cex.nodes,
cex.attrib = cex.attrib,
with.attrib = with.attrib,
two.lines = two.lines,
ticks = ticks,
...)
}
}
}
}
else
{
if ( type[1] == "table" )
{
utility.plottable(node = node,
u = u,
uref = uref,
nodes = nodes,
col = col,
main = main,
cex.main = cex.main,
cex.nodes = cex.nodes,
f.reaches = f.reaches,
f.nodes = f.nodes,
levels = levels,
plot.val = plot.val,
col.val = col.val,
lwd.val = lwd.val,
print.val = print.val,
ticks = ticks,
...)
}
else
{
cat("unknown plot type:",type[1],"\n")
}
}
}
}
# ==============================================================================
|
/scratch/gouwar.j/cran-all/cranData/utility/R/utility.plot.r
|
# Plots of aggregation functions
# ==============================
if ( !require(utility) ) { install.packages("utility"); library(utility) }
dir.fig <- "./utility/man/figures"
pdf.width1 <- 4.5
pdf.width2 <- 9
pdf.height2 <- 9
pdf.height1 <- 4.5
pdf.mar <- c(4.5,4,3.5,1) + 0.1 # c(bottom, left, top, right)
png.width1 <- 360
png.width2 <- 720
png.height2 <- 720
png.height1 <- 360
png.mar <- c(4.5,4,3.5,1) + 0.1 # c(bottom, left, top, right)
obj1 <- utility.endnode.intpol1d.create(name.node = "v1",
name.attrib = "v1",
range = c(0,1),
x = c(0,1),
u = c(0,1),
utility = FALSE)
obj2 <- utility.endnode.intpol1d.create(name.node = "v2",
name.attrib = "v2",
range = c(0,1),
x = c(0,1),
u = c(0,1),
utility = FALSE)
# add:
# ----
AddAggregation1 <- utility.aggregation.create(
name.node = "Additive Aggregation (w1=w2=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.add",
par = c(1,1))
AddAggregation2 <- utility.aggregation.create(
name.node = "Additive Aggregation (w1=0.25,w2=0.75)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.add",
par = c(1,3))
pdf(paste(dir.fig,"aggregationadd.pdf",sep="/"),width=pdf.width2,height=pdf.height1)
par(mfrow=c(1,2),mar=pdf.mar)
plot(AddAggregation1,type="nodes",node="Additive Aggregation (w1=w2=0.5)")
plot(AddAggregation2,type="nodes",node="Additive Aggregation (w1=0.25,w2=0.75)")
dev.off()
png(paste(dir.fig,"aggregationadd.png",sep="/"),width=png.width2,height=png.height1)
par(mfrow=c(1,2),mar=png.mar)
plot(AddAggregation1,type="nodes",node="Additive Aggregation (w1=w2=0.5)")
plot(AddAggregation2,type="nodes",node="Additive Aggregation (w1=0.25,w2=0.75)")
dev.off()
# min:
# ----
MinAggregation <- utility.aggregation.create(
name.node = "Minimum Aggregation",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.min",
par = numeric(0))
pdf(paste(dir.fig,"aggregationmin.pdf",sep="/"),width=pdf.width1,height=pdf.height1)
par(mfrow=c(1,1),mar=pdf.mar)
plot(MinAggregation,type="nodes",node="Minimum Aggregation")
dev.off()
png(paste(dir.fig,"aggregationmin.png",sep="/"),width=png.width1,height=png.height1)
par(mfrow=c(1,1),mar=png.mar)
plot(MinAggregation,type="nodes",node="Minimum Aggregation")
dev.off()
# max:
# ----
MaxAggregation <- utility.aggregation.create(
name.node = "Maximum Aggregation",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.max",
par = numeric(0))
pdf(paste(dir.fig,"aggregationmax.pdf",sep="/"),width=pdf.width1,height=pdf.height1)
par(mfrow=c(1,1),mar=pdf.mar)
plot(MaxAggregation,type="nodes",node="Maximum Aggregation")
dev.off()
png(paste(dir.fig,"aggregationmax.png",sep="/"),width=png.width1,height=png.height1)
par(mfrow=c(1,1),mar=png.mar)
plot(MaxAggregation,type="nodes",node="Maximum Aggregation")
dev.off()
# mult:
# -----
MultAggregation1 <- utility.aggregation.create(
name.node = "Multiplicative Aggregation (w1=w2=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.mult",
par = c(0.5,0.5))
MultAggregation2 <- utility.aggregation.create(
name.node = "Multiplicative Aggregation (w1=w2=1)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.mult",
par = c(1,1))
MultAggregation3 <- utility.aggregation.create(
name.node = "Multiplicative Aggregation (w1=w2=0.3)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.mult",
par = c(0.3,0.3))
MultAggregation4 <- utility.aggregation.create(
name.node = "Multiplicative Aggregation (w1=0.3,w2=1)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.mult",
par = c(0.3,1))
pdf(paste(dir.fig,"aggregationmult.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(MultAggregation1,type="nodes",node="Multiplicative Aggregation (w1=w2=0.5)")
plot(MultAggregation2,type="nodes",node="Multiplicative Aggregation (w1=w2=1)")
plot(MultAggregation3,type="nodes",node="Multiplicative Aggregation (w1=w2=0.3)")
plot(MultAggregation4,type="nodes",node="Multiplicative Aggregation (w1=0.3,w2=1)")
dev.off()
png(paste(dir.fig,"aggregationmult.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(MultAggregation1,type="nodes",node="Multiplicative Aggregation (w1=w2=0.5)")
plot(MultAggregation2,type="nodes",node="Multiplicative Aggregation (w1=w2=1)")
plot(MultAggregation3,type="nodes",node="Multiplicative Aggregation (w1=w2=0.3)")
plot(MultAggregation4,type="nodes",node="Multiplicative Aggregation (w1=0.3,w2=1)")
dev.off()
# geo/geooff:
# -----------
GeoAggregation1 <- utility.aggregation.create(
name.node = "Geometric Aggregation (w1=w2=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.geo",
par = c(1,1))
GeoAggregation2 <- utility.aggregation.create(
name.node = "Geometric Aggregation (w1=0.25,w2=0.75)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.geo",
par = c(1,3))
GeoAggregation3 <- utility.aggregation.create(
name.node = "Geometric Offset Agg. (w1=w2=0.5,d=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.geooff",
par = c(1,1,0.5))
GeoAggregation4 <- utility.aggregation.create(
name.node = "Geometric Offset Agg. (w1=0.25,w2=0.75,d=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.geooff",
par = c(1,3,0.5))
pdf(paste(dir.fig,"aggregationgeo.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(GeoAggregation1,type="nodes",node="Geometric Aggregation (w1=w2=0.5)")
plot(GeoAggregation2,type="nodes",node="Geometric Aggregation (w1=0.25,w2=0.75)")
plot(GeoAggregation3,type="nodes",node="Geometric Offset Agg. (w1=w2=0.5,d=0.5)")
plot(GeoAggregation4,type="nodes",node="Geometric Offset Agg. (w1=0.25,w2=0.75,d=0.5)")
dev.off()
png(paste(dir.fig,"aggregationgeo.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(GeoAggregation1,type="nodes",node="Geometric Aggregation (w1=w2=0.5)")
plot(GeoAggregation2,type="nodes",node="Geometric Aggregation (w1=0.25,w2=0.75)")
plot(GeoAggregation3,type="nodes",node="Geometric Offset Agg. (w1=w2=0.5,d=0.5)")
plot(GeoAggregation4,type="nodes",node="Geometric Offset Agg. (w1=0.25,w2=0.75,d=0.5)")
dev.off()
# revgeo/revgeooff:
# -----------------
RevGeoAggregation1 <- utility.aggregation.create(
name.node = "Rev. Geometric Aggregation (w1=w2=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revgeo",
par = c(1,1))
RevGeoAggregation2 <- utility.aggregation.create(
name.node = "Rev. Geometric Aggregation (w1=0.25,w2=0.75)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revgeo",
par = c(1,3))
RevGeoAggregation3 <- utility.aggregation.create(
name.node = "Rev. Geometric Offset Agg. (w1=w2=0.5,d=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revgeooff",
par = c(1,1,0.5))
RevGeoAggregation4 <- utility.aggregation.create(
name.node = "Rev. Geometric Offset Agg. (w1=0.25,w2=0.75,d=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revgeooff",
par = c(1,3,0.5))
pdf(paste(dir.fig,"aggregationrevgeo.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(RevGeoAggregation1,type="nodes",node="Rev. Geometric Aggregation (w1=w2=0.5)")
plot(RevGeoAggregation2,type="nodes",node="Rev. Geometric Aggregation (w1=0.25,w2=0.75)")
plot(RevGeoAggregation3,type="nodes",node="Rev. Geometric Offset Agg. (w1=w2=0.5,d=0.5)")
plot(RevGeoAggregation4,type="nodes",node="Rev. Geometric Offset Agg. (w1=0.25,w2=0.75,d=0.5)")
dev.off()
png(paste(dir.fig,"aggregationrevgeo.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(RevGeoAggregation1,type="nodes",node="Rev. Geometric Aggregation (w1=w2=0.5)")
plot(RevGeoAggregation2,type="nodes",node="Rev. Geometric Aggregation (w1=0.25,w2=0.75)")
plot(RevGeoAggregation3,type="nodes",node="Rev. Geometric Offset Agg. (w1=w2=0.5,d=0.5)")
plot(RevGeoAggregation4,type="nodes",node="Rev. Geometric Offset Agg. (w1=0.25,w2=0.75,d=0.5)")
dev.off()
# harmo/harmooff:
# ---------------
HarmoAggregation1 <- utility.aggregation.create(
name.node = "Harmonic Aggregation (w1=w2=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.harmo",
par = c(1,1))
HarmoAggregation2 <- utility.aggregation.create(
name.node = "Harmonic Aggregation (w1=0.25,w2=0.75)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.harmo",
par = c(1,3))
HarmoAggregation3 <- utility.aggregation.create(
name.node = "Harmonic Offset Agg. (w1=w2=0.5,d=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.harmooff",
par = c(1,1,0.5))
HarmoAggregation4 <- utility.aggregation.create(
name.node = "Harmonic Offset Agg. (w1=0.25,w2=0.75,d=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.harmooff",
par = c(1,3,0.5))
pdf(paste(dir.fig,"aggregationharmo.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(HarmoAggregation1,type="nodes",node="Harmonic Aggregation (w1=w2=0.5)")
plot(HarmoAggregation2,type="nodes",node="Harmonic Aggregation (w1=0.25,w2=0.75)")
plot(HarmoAggregation3,type="nodes",node="Harmonic Offset Agg. (w1=w2=0.5,d=0.5)")
plot(HarmoAggregation4,type="nodes",node="Harmonic Offset Agg. (w1=0.25,w2=0.75,d=0.5)")
dev.off()
png(paste(dir.fig,"aggregationharmo.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(HarmoAggregation1,type="nodes",node="Harmonic Aggregation (w1=w2=0.5)")
plot(HarmoAggregation2,type="nodes",node="Harmonic Aggregation (w1=0.25,w2=0.75)")
plot(HarmoAggregation3,type="nodes",node="Harmonic Offset Agg. (w1=w2=0.5,d=0.5)")
plot(HarmoAggregation4,type="nodes",node="Harmonic Offset Agg. (w1=0.25,w2=0.75,d=0.5)")
dev.off()
# revharmo/revharmooff:
# ---------------------
RevHarmoAggregation1 <- utility.aggregation.create(
name.node = "Rev. Harmonic Aggregation (w1=w2=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revharmo",
par = c(1,1))
RevHarmoAggregation2 <- utility.aggregation.create(
name.node = "Rev. Harmonic Aggregation (w1=0.25,w2=0.75)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revharmo",
par = c(1,3))
RevHarmoAggregation3 <- utility.aggregation.create(
name.node = "Rev. Harmonic Offset Agg. (w1=w2=0.5,d=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revharmooff",
par = c(1,1,0.5))
RevHarmoAggregation4 <- utility.aggregation.create(
name.node = "Rev. Harmonic Offset Agg. (w1=0.25,w2=0.75,d=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revharmooff",
par = c(1,3,0.5))
pdf(paste(dir.fig,"aggregationrevharmo.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(RevHarmoAggregation1,type="nodes",node="Rev. Harmonic Aggregation (w1=w2=0.5)")
plot(RevHarmoAggregation2,type="nodes",node="Rev. Harmonic Aggregation (w1=0.25,w2=0.75)")
plot(RevHarmoAggregation3,type="nodes",node="Rev. Harmonic Offset Agg. (w1=w2=0.5,d=0.5)")
plot(RevHarmoAggregation4,type="nodes",node="Rev. Harmonic Offset Agg. (w1=0.25,w2=0.75,d=0.5)")
dev.off()
png(paste(dir.fig,"aggregationrevharmo.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(RevHarmoAggregation1,type="nodes",node="Rev. Harmonic Aggregation (w1=w2=0.5)")
plot(RevHarmoAggregation2,type="nodes",node="Rev. Harmonic Aggregation (w1=0.25,w2=0.75)")
plot(RevHarmoAggregation3,type="nodes",node="Rev. Harmonic Offset Agg. (w1=w2=0.5,d=0.5)")
plot(RevHarmoAggregation4,type="nodes",node="Rev. Harmonic Offset Agg. (w1=0.25,w2=0.75,d=0.5)")
dev.off()
# mix:
# ----
MixAggregation1 <- utility.aggregation.create(
name.node = "Mixture Agg. (w1=w2=0.5,wa=1,wm=0,wg=0)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.mix",
par = c(1,1,1,0,0))
MixAggregation2 <- utility.aggregation.create(
name.node = "Mix. Agg. (w1=w2=0.5,wa=0.33,wm=0.33,wg=0.33)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.mix",
par = c(1,1,1,1,1))
MixAggregation3 <- utility.aggregation.create(
name.node = "Mixture Agg. (w1=w2=0.5,wa=0.5,wm=0,wg=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.mix",
par = c(1,1,1,0,1))
MixAggregation4 <- utility.aggregation.create(
name.node = "Mix. (w1=0.25,w2=0.75,wa=0.33,wm=0.33,wg=0.33)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.mix",
par = c(1,3,1,1,1))
pdf(paste(dir.fig,"aggregationmix.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(MixAggregation1,type="nodes",node="Mixture Agg. (w1=w2=0.5,wa=1,wm=0,wg=0)")
plot(MixAggregation2,type="nodes",node="Mix. Agg. (w1=w2=0.5,wa=0.33,wm=0.33,wg=0.33)")
plot(MixAggregation3,type="nodes",node="Mixture Agg. (w1=w2=0.5,wa=0.5,wm=0,wg=0.5)")
plot(MixAggregation4,type="nodes",node="Mix. (w1=0.25,w2=0.75,wa=0.33,wm=0.33,wg=0.33)")
dev.off()
png(paste(dir.fig,"aggregationmix.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(MixAggregation1,type="nodes",node="Mixture Agg. (w1=w2=0.5,wa=1,wm=0,wg=0)")
plot(MixAggregation2,type="nodes",node="Mix. Agg. (w1=w2=0.5,wa=0.33,wm=0.33,wg=0.33)")
plot(MixAggregation3,type="nodes",node="Mixture Agg. (w1=w2=0.5,wa=0.5,wm=0,wg=0.5)")
plot(MixAggregation4,type="nodes",node="Mix. (w1=0.25,w2=0.75,wa=0.33,wm=0.33,wg=0.33)")
dev.off()
# addmin:
# -------
AddMinAggregation1 <- utility.aggregation.create(
name.node = "Additive-Minimum Aggregation (w1=w2=0.5,a=0.9)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addmin",
par = c(1,1,0.9))
AddMinAggregation2 <- utility.aggregation.create(
name.node = "Additive-Minimum Aggregation (w1=w2=0.5,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addmin",
par = c(1,1,0.5))
AddMinAggregation3 <- utility.aggregation.create(
name.node = "Additive-Minimum Aggregation (w1=w2=0.5,a=0.1)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addmin",
par = c(1,1,0.1))
AddMinAggregation4 <- utility.aggregation.create(
name.node = "Additive-Minimum Agg. (w1=0.25,w2=0.75,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addmin",
par = c(1,3,0.5))
pdf(paste(dir.fig,"aggregationaddmin.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(AddMinAggregation1,type="nodes",node="Additive-Minimum Aggregation (w1=w2=0.5,a=0.9)")
plot(AddMinAggregation2,type="nodes",node="Additive-Minimum Aggregation (w1=w2=0.5,a=0.5)")
plot(AddMinAggregation3,type="nodes",node="Additive-Minimum Aggregation (w1=w2=0.5,a=0.1)")
plot(AddMinAggregation4,type="nodes",node="Additive-Minimum Agg. (w1=0.25,w2=0.75,a=0.5)")
dev.off()
png(paste(dir.fig,"aggregationaddmin.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(AddMinAggregation1,type="nodes",node="Additive-Minimum Aggregation (w1=w2=0.5,a=0.9)")
plot(AddMinAggregation2,type="nodes",node="Additive-Minimum Aggregation (w1=w2=0.5,a=0.5)")
plot(AddMinAggregation3,type="nodes",node="Additive-Minimum Aggregation (w1=w2=0.5,a=0.1)")
plot(AddMinAggregation4,type="nodes",node="Additive-Minimum Agg. (w1=0.25,w2=0.75,a=0.5)")
dev.off()
# addpower:
# ---------
AddPowerAggregation1 <- utility.aggregation.create(
name.node = "Additive Power Aggregation (w1=w2=0.5,a=2)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addpower",
par = c(1,1,2))
AddPowerAggregation2 <- utility.aggregation.create(
name.node = "Additive Power Aggregation (w1=w2=0.5,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addpower",
par = c(1,1,0.5))
AddPowerAggregation3 <- utility.aggregation.create(
name.node = "Add. Power Aggreg. (w1=0.25,w2=0.75,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addpower",
par = c(1,3,0.5))
AddPowerAggregation4 <- utility.aggregation.create(
name.node = "Add. Power Aggregation (w1=0.25,w2=0.75,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addpower",
par = c(1,3,0.5))
pdf(paste(dir.fig,"aggregationaddpower.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(AddPowerAggregation1,type="nodes",node="Additive Power Aggregation (w1=w2=0.5,a=2)")
plot(AddPowerAggregation2,type="nodes",node="Additive Power Aggregation (w1=w2=0.5,a=0.5)")
plot(AddPowerAggregation3,type="nodes",node="Add. Power Aggreg. (w1=0.25,w2=0.75,a=0.5)")
plot(AddPowerAggregation4,type="nodes",node="Add. Power Aggregation (w1=0.25,w2=0.75,a=0.5)")
dev.off()
png(paste(dir.fig,"aggregationaddpower.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(AddPowerAggregation1,type="nodes",node="Additive Power Aggregation (w1=w2=0.5,a=2)")
plot(AddPowerAggregation2,type="nodes",node="Additive Power Aggregation (w1=w2=0.5,a=0.5)")
plot(AddPowerAggregation3,type="nodes",node="Add. Power Aggreg. (w1=0.25,w2=0.75,a=0.5)")
plot(AddPowerAggregation4,type="nodes",node="Add. Power Aggregation (w1=0.25,w2=0.75,a=0.5)")
dev.off()
# revaddpower:
# ------------
RevAddPowerAggregation1 <- utility.aggregation.create(
name.node = "Rev. Add. Power Aggregation (w1=w2=0.5,a=2)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revaddpower",
par = c(1,1,2))
RevAddPowerAggregation2 <- utility.aggregation.create(
name.node = "Rev. Add. Power Aggregation (w1=w2=0.5,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revaddpower",
par = c(1,1,0.5))
RevAddPowerAggregation3 <- utility.aggregation.create(
name.node = "Rev. Add. Power Agg. (w1=0.25,w2=0.75,a=2)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revaddpower",
par = c(1,3,0.5))
RevAddPowerAggregation4 <- utility.aggregation.create(
name.node = "Rev. Add. Power Agg. (w1=0.25,w2=0.75,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revaddpower",
par = c(1,3,0.5))
pdf(paste(dir.fig,"aggregationrevaddpower.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(RevAddPowerAggregation1,type="nodes",node="Rev. Add. Power Aggregation (w1=w2=0.5,a=2)")
plot(RevAddPowerAggregation2,type="nodes",node="Rev. Add. Power Aggregation (w1=w2=0.5,a=0.5)")
plot(RevAddPowerAggregation3,type="nodes",node="Rev. Add. Power Agg. (w1=0.25,w2=0.75,a=2)")
plot(RevAddPowerAggregation4,type="nodes",node="Rev. Add. Power Agg. (w1=0.25,w2=0.75,a=0.5)")
dev.off()
png(paste(dir.fig,"aggregationrevaddpower.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(RevAddPowerAggregation1,type="nodes",node="Rev. Add. Power Aggregation (w1=w2=0.5,a=2)")
plot(RevAddPowerAggregation2,type="nodes",node="Rev. Add. Power Aggregation (w1=w2=0.5,a=0.5)")
plot(RevAddPowerAggregation3,type="nodes",node="Rev. Add. Power Agg. (w1=0.25,w2=0.75,a=2)")
plot(RevAddPowerAggregation4,type="nodes",node="Rev. Add. Power Agg. (w1=0.25,w2=0.75,a=0.5)")
dev.off()
# addsplitpower:
# --------------
AddSplitPowerAggregation1 <- utility.aggregation.create(
name.node = "Add. Split Power Aggregation (w1=w2=0.5,a=2)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addsplitpower",
par = c(1,1,2,0.5))
AddSplitPowerAggregation2 <- utility.aggregation.create(
name.node = "Add. Split Power Aggregation (w1=w2=0.5,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addsplitpower",
par = c(1,1,0.5,0.5))
AddSplitPowerAggregation3 <- utility.aggregation.create(
name.node = "Add. Split Power Agg. (w1=0.25,w2=0.75,a=2)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addsplitpower",
par = c(1,3,2,0.5))
AddSplitPowerAggregation4 <- utility.aggregation.create(
name.node = "Add. Split Power Agg. (w1=0.25,w2=0.75,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.addsplitpower",
par = c(1,3,0.5,0.5))
pdf(paste(dir.fig,"aggregationaddsplitpower.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(AddSplitPowerAggregation1,type="nodes",node="Add. Split Power Aggregation (w1=w2=0.5,a=2)")
plot(AddSplitPowerAggregation2,type="nodes",node="Add. Split Power Aggregation (w1=w2=0.5,a=0.5)")
plot(AddSplitPowerAggregation3,type="nodes",node="Add. Split Power Agg. (w1=0.25,w2=0.75,a=2)")
plot(AddSplitPowerAggregation4,type="nodes",node="Add. Split Power Agg. (w1=0.25,w2=0.75,a=0.5)")
dev.off()
png(paste(dir.fig,"aggregationaddsplitpower.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(AddSplitPowerAggregation1,type="nodes",node="Add. Split Power Aggregation (w1=w2=0.5,a=2)")
plot(AddSplitPowerAggregation2,type="nodes",node="Add. Split Power Aggregation (w1=w2=0.5,a=0.5)")
plot(AddSplitPowerAggregation3,type="nodes",node="Add. Split Power Agg. (w1=0.25,w2=0.75,a=2)")
plot(AddSplitPowerAggregation4,type="nodes",node="Add. Split Power Agg. (w1=0.25,w2=0.75,a=0.5)")
dev.off()
# revaddsplitpower:
# -----------------
RevAddSplitPowerAggregation1 <- utility.aggregation.create(
name.node = "Rev. Add. Split Power Agg. (w1=w2=0.5,a=2)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revaddsplitpower",
par = c(1,1,2,0.5))
RevAddSplitPowerAggregation2 <- utility.aggregation.create(
name.node = "Rev. Add. Split Power Agg. (w1=w2=0.5,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revaddsplitpower",
par = c(1,1,0.5,0.5))
RevAddSplitPowerAggregation3 <- utility.aggregation.create(
name.node = "Rev. Add. Split Pow. Agg. (w1=0.25,w2=0.75,a=2)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revaddsplitpower",
par = c(1,3,2,0.5))
RevAddSplitPowerAggregation4 <- utility.aggregation.create(
name.node = "Rev. Add. Split Pow. Agg. (w1=0.25,w2=0.75,a=0.5)",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.revaddsplitpower",
par = c(1,3,0.5,0.5))
pdf(paste(dir.fig,"aggregationrevaddsplitpower.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(RevAddSplitPowerAggregation1,type="nodes",node="Rev. Add. Split Power Agg. (w1=w2=0.5,a=2)")
plot(RevAddSplitPowerAggregation2,type="nodes",node="Rev. Add. Split Power Agg. (w1=w2=0.5,a=0.5)")
plot(RevAddSplitPowerAggregation3,type="nodes",node="Rev. Add. Split Pow. Agg. (w1=0.25,w2=0.75,a=2)")
plot(RevAddSplitPowerAggregation4,type="nodes",node="Rev. Add. Split Pow. Agg. (w1=0.25,w2=0.75,a=0.5)")
dev.off()
png(paste(dir.fig,"aggregationrevaddsplitpower.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(RevAddSplitPowerAggregation1,type="nodes",node="Rev. Add. Split Power Agg. (w1=w2=0.5,a=2)")
plot(RevAddSplitPowerAggregation2,type="nodes",node="Rev. Add. Split Power Agg. (w1=w2=0.5,a=0.5)")
plot(RevAddSplitPowerAggregation3,type="nodes",node="Rev. Add. Split Pow. Agg. (w1=0.25,w2=0.75,a=2)")
plot(RevAddSplitPowerAggregation4,type="nodes",node="Rev. Add. Split Pow. Agg. (w1=0.25,w2=0.75,a=0.5)")
dev.off()
# bonusmalus:
# -----------
BonusMalusAggregation1 <- utility.aggregation.create(
name.node = "Bonus-Malus Aggregation (par=c(1,NA,1))",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.bonusmalus",
par = c(1,NA,1))
BonusMalusAggregation2 <- utility.aggregation.create(
name.node = "Bonus-Malus Aggregation (par=c(1,1,NA))",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.bonusmalus",
par = c(1,1,NA))
BonusMalusAggregation3 <- utility.aggregation.create(
name.node = "Bonus-Malus Aggregation (par=c(1,NA,-1))",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.bonusmalus",
par = c(1,NA,-1))
BonusMalusAggregation4 <- utility.aggregation.create(
name.node = "Bonus-Malus Aggregation (par=c(1,NA,2))",
nodes = list(obj1,obj2),
name.fun = "utility.aggregate.bonusmalus",
par = c(1,NA,2))
pdf(paste(dir.fig,"aggregationbonusmalus.pdf",sep="/"),width=pdf.width2,height=pdf.height2)
par(mfrow=c(2,2),mar=pdf.mar)
plot(BonusMalusAggregation1,type="nodes",node="Bonus-Malus Aggregation (par=c(1,NA,1))")
plot(BonusMalusAggregation2,type="nodes",node="Bonus-Malus Aggregation (par=c(1,1,NA))")
plot(BonusMalusAggregation3,type="nodes",node="Bonus-Malus Aggregation (par=c(1,NA,-1))")
plot(BonusMalusAggregation4,type="nodes",node="Bonus-Malus Aggregation (par=c(1,NA,2))")
dev.off()
png(paste(dir.fig,"aggregationbonusmalus.png",sep="/"),width=png.width2,height=png.height2)
par(mfrow=c(2,2),mar=png.mar)
plot(BonusMalusAggregation1,type="nodes",node="Bonus-Malus Aggregation (par=c(1,NA,1))")
plot(BonusMalusAggregation2,type="nodes",node="Bonus-Malus Aggregation (par=c(1,1,NA))")
plot(BonusMalusAggregation3,type="nodes",node="Bonus-Malus Aggregation (par=c(1,NA,-1))")
plot(BonusMalusAggregation4,type="nodes",node="Bonus-Malus Aggregation (par=c(1,NA,2))")
dev.off()
|
/scratch/gouwar.j/cran-all/cranData/utility/inst/utility_manual_plots.r
|
# Authors
# Sebastian Schneider, [email protected]; [email protected]
# Giulia Baldini, [email protected]
# Copyright (C) 2020 Sebastian O. Schneider & Giulia Baldini
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################
#### Utility Tools for Higher Order Risk Preferences #####
##########################################################
#' Truncated p-th power function. Helper function for creating the B-Spline basis (Code by Paul Eilers, Package JOPS, http://statweb.lsu.edu/faculty/marx/JOPS_0.1.0.tar.gz)
#' @param x Function value.
#' @param t Point of truncation.
#' @param p degree of the truncated polynomial function.
#' @return Returns a piece-wise defined basis functions for x > t.
#' @examples
#' tpower(1, 2, 3)
#' @export
tpower <- function(x, t, p) {
return ((x - t) ^ p * (x > t))
}
#' Constructs a B-spline basis of degree 'deg' (Code by Paul Eilers, Package JOPS, http://statweb.lsu.edu/faculty/marx/JOPS_0.1.0.tar.gz).
#'
#' @param x values for the x axis.
#' @param xl minimum value, default is the minimum value of the x-values.
#' @param xr maximum value, default is maximum value of the x-values.
#' @param ndx number of intervals to partition the distance between xl and xr.
#' @param deg degree of the B-spline basis.
#' @return a B-spline basis of degree deg and ndx + 1 internal knots.
#' @examples
#' x_finegrid <- seq(0.001, 1.0, (1.0 - 0.001) / 1000)
#' bbase(x_finegrid)
#' @export
bbase <- function(x,
xl = min(x),
xr = max(x),
ndx = 20,
deg = 6) {
dx <- (xr - xl) / ndx
knots <- seq(xl - deg * dx, xr + deg * dx, by = dx)
P <- outer(x, knots, tpower, deg)
n <- dim(P)[2]
D <- diff(diag(n), diff = deg + 1) / (gamma(deg + 1) * dx ^ deg)
B <- (-1) ^ (deg + 1) * P %*% t(D)
return(B)
}
#' Estimates the model
#'
#' @param xi a vector containing the certainty equivalents (x-values of utility points) for a given participant in each use case.
#' @param yi can be a vector or a matrix representing the corresponding utility values (y-values of utility points).
#' @param lambda lambda is the penalization weight used to compute the initial estimate. The default value is 1.
#' @param n_penalty_dimensions number of dimensions (i.e., derivatives) to penalize. Possible values are 1 or 2. The default value is 1.
#' @param penalty_order highest dimension (i.e., derivative) to penalize. Must be lower than deg.
#' @param ndx number of intervals to partition the distance between the lowest and highest x-values of the utility points.
#' @param deg degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.
#' @param cross_validation_mode determines which cross validation mode should be used. If 0, then the cross validation method is leave-one-third-out. If 1, then the cross validation method is a theoretical leave-one-out, i.e., based on a formula. The default value is 1.
#' @param return_estimate parameter that indicates whether or not to return the (initially) estimated coefficients. Default is false.
#' @param left_out_xi needed for cross validation: the x-values of the points that are left out for fitting the model, so that they can be predicted
#' @param left_out_yi needed for cross validation: the y-values of the points that are left out for fitting the model, so that they can be predicted
#' @return Returns the sum of residuals of the prediction of the left-out points using cross validation. If specified, additionally returns the estimated coefficients of the utility function (in the B-spline basis).
#' @examples
#' x <- c(0.0000000, 0.2819824, 0.3007812, 0.4375000, 0.5231934, 0.7784882, 0.8945312, 1.0000000)
#' y <- c(0.0000, 0.1250, 0.2500, 0.5000, 0.6250, 0.6875, 0.7500, 1.0000)
#' estimate_model(x, y, .5)
#' @importFrom stats lsfit
#' @export
estimate_model <- function(xi,
yi,
lambda = 1,
n_penalty_dimensions = 1,
penalty_order = 4,
ndx = 20,
deg = 6,
cross_validation_mode = 0,
return_estimate = 0,
left_out_xi = c(),
left_out_yi = c()) {
if (n_penalty_dimensions > penalty_order) {
n_penalty_dimensions = penalty_order
}
lambda <- lambda * 1000 ^ -(seq(0, n_penalty_dimensions - 1))
# Enforce U(0) = 0 and U(1) = 1
w1 <- 0
w2 <- 0
repeat {
xi <- c(rep(0, w1) , xi, rep(1, w2))
yi <- c(rep(0, w1) , yi, rep(1, w2))
# Generate base and penalty
B = bbase(xi, ndx = ndx, deg = deg)
n = ncol(B)
# Fit the model - poor algorithm, using one penalty
P <- NULL
sumP <- matrix(rep(0, n ^ 2), nrow = n)
nix <- NULL
# Fit the model using lsfit
for (p in 1:n_penalty_dimensions) {
D = diff(diag(n), diff = penalty_order - (p - 1))
P = rbind(P, sqrt(lambda[p]) * D)
sumP = sumP + (lambda[p] * t(D) %*% D)
nix = c(nix, rep(0, n - (penalty_order - (p - 1))))
}
f = lsfit(rbind(B, P), c(yi, nix), intercept = F)
a <- f$coef
if (n_penalty_dimensions == 2) {
# Re-adjust lambda
lambda_ratio <-
sum(abs(diff(a, diff = penalty_order))) / sum(abs(diff(a, diff = penalty_order - 1)))
if (length(lambda) > 1) {
lambda <- lambda[1]
}
lambda <- c(lambda, lambda * lambda_ratio / 5)
P <- NULL
sumP <- matrix(rep(0, n ^ 2), nrow = n)
nix <- NULL
for (p in 1:n_penalty_dimensions) {
D = diff(diag(n), diff = penalty_order - (p - 1))
P = rbind(P, sqrt(lambda[p]) * D)
sumP = sumP + (lambda[p] * t(D) %*% D)
nix = c(nix, rep(0, n - (penalty_order - (p - 1))))
}
}
# Enforce monotonicity
kappa <- 100000000
for (i in 1:10) {
D1 <- diff(diag(n), diff = 1)
W <- as.vector(1 * (D1 %*% a < 0))
P2 <- sqrt(kappa) * diag(W) %*% D1
nix2 <- rep(0, n - 1)
f_new <- lsfit(rbind(B, P, P2), c(yi, nix, nix2), intercept = F)
a_new <- f_new$coef
if (identical(a, a_new)) {
break
}
a = a_new
}
# Predict at observed points
z = B %*% a
if (cross_validation_mode) {
# == 1, so leave one out
# Predict at missing points
x_finegrid <- seq(min(xi), max(xi), (max(xi) - min(xi)) / 1000)
y_hat <- bbase(x_finegrid, ndx = ndx, deg = deg) %*% a
}
# Increase weight at (0,0) and (-1,-1) if not predicted with the desired precision
if ((round(z[1], 2) == 0) & round(z[length(z)], 2) == 1) {
break
} else {
if (round(z[1], 2) != 0) {
w1 <- w1 + 1
}
if (round(z[length(z)], 2) != 1) {
w2 <- w2 + 1
}
}
}
if (return_estimate) {
return (a)
}
# Model choice using Cross Validation
if (cross_validation_mode) {
# == 1, so leave one out
lhs <- t(B) %*% B + sumP + kappa * t(D1) %*% diag(W) %*% D1
H <- lsfit(lhs, diag(nrow = nrow(lhs)), intercept = F)
h_new <- diag(B %*% H$coeff %*% t(B))
r = (yi[abs(xi) > 0 &
abs(xi) < 1] - z[abs(xi) > 0 &
abs(xi) < 1]) / (1 - h_new[abs(xi) > 0 &
abs(xi) < 1])
res <- sqrt(sum(r ^ 2))
} else {
# == 0, so 1/3
# Predict at left-out points
z <- bbase(c(left_out_xi, xi), ndx = ndx, deg = deg) %*% a
res <- sum((left_out_yi - z[1:length(left_out_yi)]) ^ 2)
}
return(res)
}
#' Evaluates the cross validation function.
#'
#' @param xi a vector containing the certainty equivalents (x-values of utility points) for a given participant in each use case.
#' @param yi can be a vector or a matrix representing the corresponding utility values (y-values of utility points).
#' @param lambda lambda is the penalization weight used to compute the initial estimate. The default value is 1.
#' @param n_penalty_dimensions number of dimensions (i.e., derivatives) to penalize. Possible values are 1 or 2. The default value is 1.
#' @param penalty_order highest dimension (i.e., derivative) to penalize. Must be lower than deg.
#' @param ndx number of intervals to partition the distance between the lowest and highest x-values of the utility points.
#' @param deg degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.
#' @param cross_validation_mode determines which cross validation mode should be used. If 0, then the cross validation method is leave-one-third-out. If 1, then the cross validation method is a theoretical leave-one-out, i.e., based on a formula. The default value is 1.
#' @return Returns, for the given utility points and (possibly default) settings, the predictive quality of the estimated utility function according to cross validation as a function of a specified penalty weight lambda.
#' @examples
#' x <- c(0.0000000, 0.2819824, 0.3007812, 0.4375000, 0.5231934, 0.7784882, 0.8945312, 1.0000000)
#' y <- c(0.0000, 0.1250, 0.2500, 0.5000, 0.6250, 0.6875, 0.7500, 1.0000)
#' evaluate_cross_validation(x, y, .5)
#' @export
evaluate_cross_validation <- function(xi,
yi,
lambda = 1,
n_penalty_dimensions = 1,
penalty_order = 4,
ndx = 20,
deg = 6,
cross_validation_mode = 0) {
if (!cross_validation_mode) {
# == 0, so 1/3 out
var_xi <- xi[abs(xi) > 0 & abs(xi) < 1]
var_yi <- yi[abs(xi) > 0 & abs(xi) < 1]
number_xi <- length(var_xi)
# Leave 1/3 out CV
combinations <-
combn(1:number_xi, number_xi - ceiling(1 * number_xi / 3))
number_combn <- dim(combinations)[[2]]
sum_ssr <- 0
for (c in 1:number_combn) {
sum_ssr <- sum_ssr + estimate_model(
xi = c(xi[which(abs(xi) == 0 |
abs(xi) == 1)], var_xi[combinations[, c]]),
yi = c(yi[which(abs(xi) == 0 |
abs(xi) == 1)], var_yi[combinations[, c]]),
lambda = lambda,
n_penalty_dimensions = n_penalty_dimensions,
penalty_order = penalty_order,
ndx = ndx,
deg = deg,
cross_validation_mode = 0,
left_out_xi = var_xi[-combinations[, c]],
left_out_yi = var_yi[-combinations[, c]]
)
}
avg_ssr = sum_ssr / number_combn
return (avg_ssr)
} else {
return(
estimate_model(
xi,
yi,
lambda = lambda,
n_penalty_dimensions = n_penalty_dimensions,
penalty_order = penalty_order,
ndx = ndx,
deg = deg,
cross_validation_mode = 1
)
)
}
}
#' Finds an optimal penalty weight lambda given the parameters
#' @param xi a vector containing the certainty equivalents (x-values of utility points) for a given participant in each use case.
#' @param yi can be a vector or a matrix representing the corresponding utility values (y-values of utility points).
#' @param lambda_max maximum lambda used for computing the optimal lambda. The default value is 10000.
#' @param n_penalty_dimensions number of dimensions (i.e., derivatives) to penalize. Possible values are 1 or 2. The default value is 1.
#' @param penalty_order highest dimension (i.e., derivative) to penalize. Must be lower than deg.
#' @param ndx number of intervals to partition the distance between the lowest and highest x-values of the utility points.
#' @param deg degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.
#' @param cross_validation_mode determines which cross validation mode should be used. If 0, then the cross validation method is leave-one-third-out. If 1, then the cross validation method is a theoretical leave-one-out, i.e., based on a formula. The default value is 1.
#' @param grid_dim dimension of the search grid for the initial grid search before the actual optimization. Default value is 5.
#' @return the optimal lambda for the given set of utility points and (possibly default) settings according to the specified cross validation method.
#' @examples
#' x <- c(0.0000000, 0.2819824, 0.3007812, 0.4375000, 0.5231934, 0.7784882, 0.8945312, 1.0000000)
#' y <- c(0.0000, 0.1250, 0.2500, 0.5000, 0.6250, 0.6875, 0.7500, 1.0000)
#' find_optimal_lambda(x, y)
#' @importFrom stats optim
#' @importFrom utils combn
#' @importFrom spatstat.geom ppp pairdist
#' @export
find_optimal_lambda <- function(xi,
yi,
lambda_max = 10000,
n_penalty_dimensions = 1,
penalty_order = 4,
ndx = 20,
deg = 6,
cross_validation_mode = 0,
grid_dim = 5) {
# Take distance between points
dist <- (pairdist(ppp(xi, yi)) <= .1) * 1
colsums <- apply(dist, 1, sum)
skip <- 0
num_balls <- 0
for (p in 1:length(colsums)) {
if (skip == 0) {
skip <- colsums[p]
num_balls <- num_balls + 1
}
skip <- max(0, skip - 1)
}
lambda_min = max(0.01, (num_balls * (9 / length(xi)) - 1) ^ 2.5)
interval <- c(lambda_min, lambda_max + 1)
argmin <- 1
lower.lim <- lambda_min
upper.lim <- lambda_max
# Do a grid search first
for (i in 1:grid_dim) {
vals <-
exp(seq(log(interval[1]), log(interval[2]), length.out = grid_dim))
min_grid <- 2e+308
min_index <- 1
for (j in 1:length(vals)) {
est <- evaluate_cross_validation(
xi = xi,
yi = yi,
lambda = vals[j],
n_penalty_dimensions = n_penalty_dimensions,
penalty_order = penalty_order,
ndx = ndx,
deg = deg,
cross_validation_mode = cross_validation_mode
)
if (est < min_grid) {
min_grid <- est
min_index <- j
}
}
argmin <- vals[min_index]
lower.lim <- pmax(argmin - lambda_max / 10 ^ i, lambda_min)
upper.lim <- pmin(argmin + lambda_max / 10 ^ i, lambda_max)
interval <- c(lower.lim[1], upper.lim[1])
}
optim_argmin <-
optim(
argmin,
evaluate_cross_validation,
xi = xi,
yi = yi,
n_penalty_dimensions = n_penalty_dimensions,
penalty_order = penalty_order,
ndx = ndx,
deg = deg,
cross_validation_mode = cross_validation_mode,
method = "L-BFGS-B",
lower = lower.lim,
upper = upper.lim,
control = list(
fnscale = 1,
maxit = 30,
trace = T
)
)
return (optim_argmin$par)
}
#' Computes a continuous and smooth utility function from the given utility points
#'
#' @param x a matrix or dataframe containing the certainty equivalents (x-values of utility points) for a given participant in each use case.
#' @param y can be a vector or a matrix representing the corresponding utility values (y-values of utility points).
#' @param ids a list containing the IDs of the participants. If not given, a list with IDs from 1 to n_observations will be created.
#' @param mode an integer between 0, 1, 2 representing the three possible modes: multiple imputation, optimal classification or 'weak' classification. Default is optimal classification (1).
#' @param penalty_order highest dimension (i.e., derivative) to penalize. Must be lower than deg.
#' @param lambda_max maximum lambda used for computing the optimal lambda. It is used only in multiple imputation (mode = 0) and optimal (mode = 1). The default value is 10000.
#' @param current_lambda lambda considered in the current iteration. Only used in multiple imputation (mode = 0) to create the combinations and as actual lambda value in 'weak' classification mode (mode = 2). The default value is 1.
#' @param ndx number of intervals to partition the distance between the lowest and highest x-values of the utility points.
#' @param deg degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.
#' @param verbose shows some information while the program is running.
#' @return A smooth and continuous utility function.
#' @examples
#' \donttest{
#' x <- matrix(c(24.60938,34.76074,78.75,81.86035,128.5156,
#' 7.109375,80.4248,113.75,115.083,135.0781,
#' 3.828125,7.211914,8.75,124.1064,131.7969,
#' 1.640625,2.084961,8.75,36.94824,98.98438), nrow = 4, ncol = 5, byrow = TRUE)
#' y <- c(0.25, 0.375, 0.5, 0.625, 0.75)
#' compute_function(x, y, verbose = 1)
#' }
#' @export
compute_function <- function(x,
y,
ids = NULL,
mode = 1,
penalty_order = 4,
lambda_max = 10000,
current_lambda = 1,
ndx = 20,
deg = 6,
verbose = 0) {
if (is.data.frame(x)) {
# If the data is a dataframe
x <- as.matrix(sapply(x, as.numeric)) # convert to matrix
} else if (!is.matrix(x)) {
stop("Please convert x to a dataframe or to a matrix before calling this function.")
}
if (is.data.frame(y)) {
# If y is a dataframe
if (nrow(y) == 1) {
y <- as.vector(sapply(x, as.numeric)) # convert to vector
} else {
y <- as.matrix(sapply(x, as.numeric)) # convert to matrix
}
}
if (!is.matrix(y) & !is.vector(y)) {
stop("The accepted values for y are: matrix or vector.")
}
if (is.vector(y) && ncol(x) != length(y) || is.matrix(y) && ncol(x) != ncol(y)) {
stop("The y values do not have the same number of columns as the x values.")
}
if (length(penalty_order) > 1) {
stop("The order of penalty should be a single integer.")
}
if (!is.null(ids) & !is.vector(ids) & !is.list(ids)) {
stop("Please convert the ids field to a list or a vector.")
}
if (!verbose) {
if(.Platform$OS.type == "unix") {
sink(file = "/dev/null") # use /dev/null in UNIX
} else {
sink(file = "NUL") # use /dev/null in UNIX
}
}
if (is.null(ids)) {
ids = seq(from = 1,
to = nrow(x),
by = 1)
}
if (deg <= penalty_order) {
stop(
paste(
"A degree value of ",
deg,
" with penalty order",
penalty_order,
" is not valid. The degree must always be greater than the order of penalty.",
sep = ""
)
)
}
if (penalty_order > 4) {
message(
"This program has not been tested with order of penalties higher than 4 and it might produce wrong or unexpected results."
)
warning(
"This program has not been tested with order of penalties higher than 4 and it might produce wrong or unexpected results."
)
}
if (verbose){
if (mode == 0) {
message("Using multiple imputation mode.")
} else if (mode == 1) {
message("Using optimal classification mode.")
} else {
message("Using weak classification mode.")
}
}
n_penalty_dimensions = 2
if (mode == 1) {
if (penalty_order != 3 & penalty_order != 4) {
stop("The orders of penalty for optimal classification can either be 3 or 4.")
}
} else if (mode == 2) {
n_penalty_dimensions = 1
}
xi <- x[1, ]
xi <- xi[!is.na(xi)]
min_length_xi <- length(xi)
max_length_xi <- length(xi)
for (i in 2:nrow(x)) {
# Find the xi of minimum length
xi <- x[i, ]
xi <- xi[!is.na(xi)]
min_length_xi <- min(min_length_xi, length(xi))
max_length_xi <- max(max_length_xi, length(xi))
}
if (ndx < min_length_xi | ndx >= 4 * (max_length_xi+2)) {
message(
paste(
"The value of ndx should be larger than or equal to the minimum length of xi existing in the dataset",
" and not too large compared to the the maximum length of xi existing in the dataset (say, less than 4 times this number).",
"Other values have not been tested to yield reliable results.",
"Consider allowing a higher (lower) degree of flexibility and increase (decrease) the value of ndx."
)
)
warning(
paste(
"The value of ndx should be larger than or equal to the minimum length of xi existing in the dataset",
" and not too large compared to the the maximum length of xi existing in the dataset (say, less than 4 times this number).",
"Other values have not been tested to yield reliable results.",
"Consider allowing a higher (lower) degree of flexibility and increase (decrease) the value of ndx."
)
)
}
# deg should be larger than 1, but still lower than 1.2 * min(length(xi))
if (deg < 1 | deg > 1.2 * min_length_xi) {
message(
paste(
"The value of deg should be larger than or equal to 1 and not exceed 1.2 times the minimum length of xi existing in the dataset.",
"The amount of data is likely too small to fit such a flexible model, consider lowering deg."
)
)
warning(
paste(
"The value of deg should be larger than or equal to 1 and not exceed 1.2 times the minimum length of xi existing in the dataset.",
"The amount of data is likely too small to fit such a flexible model, consider lowering deg."
)
)
}
return (
compute_function_aux(
x,
y,
ids,
mode,
penalty_order,
lambda_max,
current_lambda,
n_penalty_dimensions,
ndx,
deg,
verbose
)
)
}
#' Computes a continuous and smooth function according to the given utility points
#' @keywords internal
#'
#' @param x a matrix or dataframe containing the certainty equivalents (x-values of utility points) for a given participant in each use case.
#' @param y can be a vector or a matrix representing the corresponding utility values (y-values of utility points).
#' @param ids a list containing the IDs of the participants. If not given, a list with IDs from 1 to n_observations will be created.
#' @param mode an integer between 0, 1, 2 representing the three possible modes: multiple imputation, optimal classification or 'weak' classification. Default is optimal classification (1).
#' @param penalty_order highest dimension (i.e., derivative) to penalize. Must be lower than deg.
#' @param lambda_max maximum lambda used for computing the optimal lambda. It is used only in multiple imputation (mode = 0) and optimal (mode = 1). The default value is 10000.
#' @param current_lambda lambda considered in the current iteration. Only used in multiple imputation (mode = 0) to create the combinations and as actual lambda value in 'weak' classification mode (mode = 2). The default value is 1.
#' @param n_penalty_dimensions number of dimensions to penalise. Possible values are 1 or 2. The default value is 1.
#' @param ndx number of intervals to partition the distance between the lowest and highest x-values of the utility points.
#' @param deg degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.
#' @param verbose shows some information while the program is running.
#' @return A smooth and continuous utility function.
compute_function_aux <- function(x,
y,
ids,
mode = 1,
penalty_order = 4,
lambda_max = 10000,
current_lambda = 1,
n_penalty_dimensions = 1,
ndx = 20,
deg = 6,
verbose = 0) {
nval_grid <- 1000
coeffs <- data.frame(row.names = ids, matrix(NA, nrow = nrow(x), ncol = (ndx + deg)))
x_finegrids <- data.frame(row.names = ids, matrix(0, nrow = nrow(x), ncol = (nval_grid + 1)))
for (i in 1:nrow(x)) {
cross_validation_mode <- 0 # means leave 1/3 out
# Collect the needed observation
xi <- x[i, ]
yi <- y
if (is.matrix(y)) {
yi <- y[i, ]
}
id <- ids[i]
# Consider only the columns which are not NA
yi <- yi[!is.na(xi)]
xi <- xi[!is.na(xi)]
if (verbose){
message(paste("Considering participant with id", id))
}
# Rescale
xi <- xi / max(abs(xi))
yi <- yi / max(abs(yi[length(yi)]), abs(yi[1]))
# Estimate utility curve if there is at least one point between (0,0) and (-1,-1) or (1,1), respectively
if (length(xi[abs(xi) > 0 &
abs(xi) < 1]) < 1 || sum(is.na(xi))) {
message(paste("The participant with id", id, "cannot be analysed."))
next
}
# If total sum of points to combine is less than or equal to penalty order,
# only leave-one-out CV possible according to formula
if (length(xi) <= penalty_order) {
if (mode == 0) {
message(
paste(
"The participant with id ",
id,
" cannot be analysed with multiple imputation mode, and penalty order ",
penalty_order,
".",
sep = ""
)
)
next
}
cross_validation_mode <- 1 # leave one out
}
if (penalty_order == 4 &
n_penalty_dimensions == 1 & length(xi) == 3) {
# In that case, cannot estimate the model. Solution: Duplicate the end-points
xi <- c(0, xi, 1)
yi <- c(0, yi, 1)
}
# If we are in optimal classification or in MI, we want to find the optimal lambda
if (mode == 0 || mode == 1) {
new_lambda = find_optimal_lambda(
xi,
yi,
lambda_max,
n_penalty_dimensions,
penalty_order,
ndx,
deg,
cross_validation_mode
)
} else {
new_lambda = current_lambda
}
if (mode == 0) {
# MI
# Leave 1/3 out CV
var_xi <- xi[abs(xi) > 0 & abs(xi) < 1]
var_yi <- yi[abs(xi) > 0 & abs(xi) < 1]
number_xi <- length(var_xi)
combinations <-
combn(1:number_xi, number_xi - ceiling(1 * number_xi / 3))
number_combn <- dim(combinations)[[2]]
if (number_combn < current_lambda) {
current_lambda <- current_lambda %% number_combn
current_lambda <- current_lambda + 1
}
coeff <-
estimate_model(
xi = c(xi[which(abs(xi) == 0 | abs(xi) == 1)], var_xi[combinations[, current_lambda]]),
yi = c(yi[which(abs(xi) == 0 | abs(xi) == 1)], var_yi[combinations[, current_lambda]]),
lambda = new_lambda,
n_penalty_dimensions = n_penalty_dimensions,
penalty_order = penalty_order,
ndx = ndx,
deg = deg,
return_estimate = 1,
)
} else {
coeff <-
estimate_model(
xi = xi,
yi = yi,
lambda = new_lambda,
n_penalty_dimensions = n_penalty_dimensions,
penalty_order = penalty_order,
ndx = ndx,
deg = deg,
return_estimate = 1,
)
}
x_finegrid <- seq(min(xi), max(xi), (max(xi) - min(xi)) / nval_grid)
coeffs[i, ] <- coeff
x_finegrids[i, ] <- x_finegrid
}
return(list(x_finegrids, coeffs))
}
#' Given a set of smooth and continuous functions, computes predefined and user-defined measures.
#' @keywords internal
#' @param x_grids a dataframe of vectors of x-values for a smooth and continuous function.
#' @param coeffs a dataframe of coefficients for a smooth and continuous function for each participant.
#' @param ids a list containing the IDs of the participants. If not given, a list with IDs from 1 to n_observations will be created.
#' @param ndx number of intervals to partition the distance between the lowest and highest x-values of the utility points.
#' @param deg degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.
#' @param measures a vector of measures to be computed.
#' @param ... additional parameters for user-defined measures.
#' @return A set of measurements.
compute_measures_aux <- function(x_grids,
coeffs,
ids,
ndx = 20,
deg = 6,
measures = c("risk-arrow-pratt", "crainich-eeckhoudt", "denuit-eeckhoudt"),
...) {
output_measures <- data.frame(row.names = ids, matrix(0, nrow = length(ids), ncol = length(measures)))
for (i in 1:nrow(coeffs)) {
coeff <- as.numeric(coeffs[i,])
x_finegrid <- as.numeric(x_grids[i,])
if (all(is.na(coeff))){
message(paste("The participant with id", ids[i], "cannot be analysed because all the coefficients are NAs."))
next
}
# Computation of first derivative
dy_rd <- derivative(x_finegrid, coeff, 1, ndx, deg)
for (j in 1:length(measures)) {
measure <- measures[[j]]
if (mode(measure) == "function") {
mes <- measure(x_finegrid, coeff, ndx, deg, ...)
colnames(output_measures)[j] <- paste("custom-", j, sep="")
} else {
colnames(output_measures)[j] <- measure
if (measure == "risk-arrow-pratt") {
# Computation of second derivative
ddy_rd <- derivative(x_finegrid, coeff, 2, ndx, deg)
# Compute Risk Aversion measures by Pratt / Arrow
mes <- -mean(ddy_rd, na.rm = T) / mean(dy_rd, na.rm = T)
} else if (measure == "crainich-eeckhoudt") {
# Computation of third derivative
dddy_rd <- derivative(x_finegrid, coeff, 3, ndx, deg)
# Compute Prudence intensity
mes <- mean(dddy_rd, na.rm = T) / mean(dy_rd, na.rm = T)
} else if (measure == "denuit-eeckhoudt") {
# Computation of fourth derivative
ddddy_rd <- derivative(x_finegrid, coeff, 4, ndx, deg)
# Compute Temperance intensity
mes <- -mean(ddddy_rd, na.rm = T) / mean(dy_rd, na.rm = T)
} else {
stop("The desired measure does not exist. Please use another measure.")
}
}
output_measures[i, j] <- mes
}
}
return (output_measures)
}
#' Given a set of smooth and continuous functions, computes predefined and user-defined measures.
#'
#' @param x_grids a dataframe of vectors of x values for a smooth and continuous function.
#' @param coeffs a dataframe of coefficients for a smooth and continous function for each participant.
#' @param ids a list containing the IDs of the participants. If not given, a list with IDs from 1 to n_observations will be created.
#' @param ndx number of intervals to partition the distance between the lowest and highest x-values of the utility points.
#' @param deg degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.
#' @param measures a vector of measures to be computed.
#' @param ... additional parameters for user-defined measures.
#' @return A set of measurements.
#' @examples
#' x <- rbind(seq(0.000002, 1.0, (1.0 - 0.000002) / 1000),
#' seq(0.001, 1.0, (1.0 - 0.001) / 1000),
#' seq(0.0004, 1.0, (1.0 - 0.0004) / 1000))
#' y <- rbind(seq(0.000002, 1.0, (1.0 - 0.000002) / 15),
#' seq(0.001, 1.0, (1.0 - 0.001) / 15),
#' seq(0.0004, 1.0, (1.0 - 0.0004) / 15))
#' compute_measures(x, y, ndx = 10, deg = 6)
#' # x_finegrid, coeff, ndx, deg are always there to be used
#' # The function should have additional unknown arguments (...) if the given parameters are not used
#' risk_arrow_pratt <- function(x_finegrid, coeff, ndx, deg){
#' dy_rd <- derivative(x_finegrid, coeff, 1, ndx, deg)
#' ddy_rd <- derivative(x_finegrid, coeff, 2, ndx, deg)
#' return (-mean(ddy_rd, na.rm = TRUE) / mean(dy_rd, na.rm = TRUE))
#' }
#' measures = c("crainich-eeckhoudt", "denuit-eeckhoudt", risk_arrow_pratt)
#' compute_measures(x, y, ndx = 10, deg = 6, measures=measures)
#' @export
compute_measures <- function(x_grids,
coeffs,
ids = NULL,
ndx = 20,
deg = 6,
measures = c("risk-arrow-pratt", "crainich-eeckhoudt", "denuit-eeckhoudt"),
...) {
if (!is.data.frame(x_grids) & !is.matrix(x_grids)) {
stop("Please convert x_grids to a dataframe or to a matrix before calling this function.")
}
if (!is.data.frame(coeffs) & !is.matrix(coeffs)) {
stop("Please convert coeffs to a dataframe or to a matrix before calling this function.")
}
if (nrow(x_grids) != nrow(coeffs)){
stop("The number of participants in the coefficients matrix and in the x matrix do not correspond.")
}
if (!is.null(ids) & !is.vector(ids) & !is.list(ids)) {
stop("Please convert the ids field to a list or a vector.")
}
if (!is.null(ids) & length(ids) != ncol(x_grids)) {
stop("The number of participants in the ids vector and in the x matrix do not correspond.")
}
if (ncol(coeffs) != (ndx + deg)){
stop("The number of coefficients is not the same as ndx + deg.")
}
if (is.null(ids)) {
ids = seq(from = 1,
to = nrow(x_grids),
by = 1)
}
return(compute_measures_aux(x_grids, coeffs, ids, ndx, deg, measures, ...))
}
#' Computes the derivative of a function
#'
#' @param x the x values for which the derivative should be computed.
#' @param coeffs the coefficient.
#' @param degree the degree of the derivative.
#' @param ndx number of intervals to partition the distance between the lowest and highest x-values of the utility points.
#' @param deg degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.
#' @return the derivative of the specified degree.
#' @examples
#' coeffs <- seq(0.000002, 1.0, (1.0 - 0.000002) / 25)
#' x <- seq(0.01, 1.0, (1.0 - 0.01) / 5)
#' derivative(x, coeffs)
#' @export
derivative <- function(x,
coeffs,
degree = 1,
ndx = 20,
deg = 6) {
dB = bbase(x = x, ndx = ndx, deg = deg - degree)
for (i in 1:degree) {
coeffs <- (diff(coeffs) / ndx)
}
dy = dB %*% coeffs
range = abs(min(dy) - max(dy))
dy_rd <- round(dy / (range / 1000)) * (range / 1000)
return (dy_rd)
}
#' Computes a continuous and smooth function according to the given utility points
#'
#' @param x a matrix or dataframe containing the certainty equivalents (x-values of utility points) for a given participant in each use case.
#' @param y can be a vector or a matrix representing the corresponding utility values (y-values of utility points).
#' @param ids a list containing the IDs of the participants. If not given, a list with IDs from 1 to n_observations will be created.
#' @param mode an integer between 0, 1, 2 representing the three possible modes: multiple imputation, optimal classification or 'weak' classification. Default is optimal classification (1).
#' @param penalty_orders vector or constant that contains the derivates that will be smoothened. The values in this vector should not be larger than 4.
#' @param ndx number of intervals to partition the distance between the lowest and highest x-values of the utility points.
#' @param deg degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.
#' @param measures the utility based (intensity) measures to be computed.
#' @param ... additional parameters for user-defined measures.
#' @param root_filename filename containing the location of where the output files are going to be saved.
#' @param verbose shows some information while the program is running.
#' @return A smooth and continuous function.
#' @examples
#' \donttest{
#' x <- matrix(c(24.60938,34.76074,78.75,81.86035,128.5156,
#' 7.109375,80.4248,113.75,115.083,135.0781,
#' 3.828125,7.211914,8.75,124.1064,131.7969,
#' 1.640625,2.084961,8.75,36.94824,98.98438), nrow = 4, ncol = 5, byrow = TRUE)
#' y <- c(0.25, 0.375, 0.5, 0.625, 0.75)
#' compute_higher_order_risk_preferences(x, y, mode = 1)
#'
#' # could be used with root_filename argument:
#' # Linux
#' # outfile <- paste(dirname(getwd()), "/out", sep="")
#' # Win
#' # outfile <- paste(dirname(getwd()), "\out", sep="")
#' compute_higher_order_risk_preferences(x, y, mode = 2, verbose = 1)
#' }
#' @importFrom utils write.csv
#' @export
compute_higher_order_risk_preferences <- function(x,
y,
ids = NULL,
mode = 1,
penalty_orders = c(4),
ndx = 20,
deg = 6,
measures = c("risk-arrow-pratt", "crainich-eeckhoudt", "denuit-eeckhoudt"),
...,
root_filename = NULL,
verbose = 0) {
if (is.data.frame(x)) {
# If the data is a dataframe
x <- as.matrix(sapply(x, as.numeric)) # convert to matrix
} else if (!is.matrix(x)) {
stop("Please convert x to a dataframe or to a matrix before calling this function.")
}
if (is.data.frame(y)) {
# If y is a dataframe
if (nrow(y) == 1) {
y <- as.vector(sapply(x, as.numeric)) # convert to vector
} else {
y <- as.matrix(sapply(x, as.numeric)) # convert to matrix
}
}
if (!is.matrix(y) & !is.vector(y)) {
stop("The accepted values for y are: matrix or vector.")
}
if (is.vector(y) && ncol(x) != length(y) || is.matrix(y) && ncol(x) != ncol(y)) {
stop("The y values do not have the same number of columns as the x values.")
}
if (!is.vector(penalty_orders) ||
!is.numeric(penalty_orders)) {
stop("The accepted values for the order of penalty are either a constant or a vector.")
}
if (!is.null(ids) & !is.vector(ids) & !is.list(ids)) {
stop("Please convert the ids field to a list or a vector.")
}
if (is.null(ids)) {
ids = seq(from = 1,
to = nrow(x),
by = 1)
}
for (order in penalty_orders) {
if (deg <= order) {
stop(
paste(
"A degree value of ",
deg,
" with penalty order",
order,
" is not valid. The degree must always be greater than the order of penalty.",
sep = ""
)
)
}
if (order > 4) {
message(
"This program has not been tested with order of penalties higher than 4 and it might produce wrong or unexpected results."
)
warning(
"This program has not been tested with order of penalties higher than 4 and it might produce wrong or unexpected results."
)
}
}
xi <- x[1, ]
xi <- xi[!is.na(xi)]
min_length_xi <- length(xi)
max_length_xi <- length(xi)
for (i in 2:nrow(x)) {
# Find the xi of minimum length
xi <- x[i, ]
xi <- xi[!is.na(xi)]
min_length_xi <- min(min_length_xi, length(xi))
max_length_xi <- max(max_length_xi, length(xi))
}
if (ndx < min_length_xi | ndx >= 4 * (max_length_xi+2)) {
message(
paste(
"The value of ndx should be larger than or equal to the minimum length of xi existing in the dataset",
" and not too large compared to the the maximum length of xi existing in the dataset (say, less than 4 times this number).",
"Other values have not been tested to yield reliable results.",
"Consider allowing a higher (lower) degree of flexibility and increase (decrease) the value of ndx."
)
)
warning(
paste(
"The value of ndx should be larger than or equal to the minimum length of xi existing in the dataset",
" and not too large compared to the the maximum length of xi existing in the dataset (say, less than 4 times this number).",
"Other values have not been tested to yield reliable results.",
"Consider allowing a higher (lower) degree of flexibility and increase (decrease) the value of ndx."
)
)
}
# deg should be larger than 1, but still lower than 1.2 * min(length(xi))
if (deg < 1 | deg > 1.2 * min_length_xi) {
message(
paste(
"The value of deg should be larger than or equal to 1 and not exceed 1.2 times the minimum length of xi existing in the dataset.",
"The amount of data is likely too small to fit such a flexible model, consider lowering deg."
)
)
warning(
paste(
"The value of deg should be larger than or equal to 1 and not exceed 1.2 times the minimum length of xi existing in the dataset.",
"The amount of data is likely too small to fit such a flexible model, consider lowering deg."
)
)
}
mode_txt <- "weak"
if (mode == 0 || mode == 1) {
# Set range for lambda (minimum is determined by the data)
lambda_max = 10000
if (mode == 0) {
mode_txt <- "MI"
lambda_fix_loop_lambdas <- 1:15
} else {
mode_txt <- "opt"
for (order in penalty_orders) {
if (order != 3 & order != 4) {
stop("The orders of penalty for optimal classification can either be 3 or 4.")
}
}
lambda_fix_loop_lambdas <- 1
}
n_penalty_dimensions = 2
} else {
lambda_fix_loop_lambdas <- c(.1, 1, 10, 20, 50, 100, 500, 750, 1000, 2000, 5000, 10000)
n_penalty_dimensions = 1
}
if (verbose){
if (mode == 0) {
message("Using multiple imputation mode.")
} else if (mode == 1) {
message("Using optimal classification mode.")
} else {
message("Using weak classification mode.")
}
}
time_begin <- format(Sys.time(), "%y.%m.%d_%H.%M.%OS")
return_val = NULL
# Loop over the penalization order
for (penalty_order in penalty_orders) {
# Start smoothing & classification
if (verbose){
message(paste("Smoothing over the", penalty_order, "order of penalty."))
}
# Iterate over the lambdas, only once for optimization
for (lambda_fix_loop in lambda_fix_loop_lambdas) {
# Start smoothing & classification
if (verbose){
message("Computing the function for all individuals.")
}
fct <-
compute_function_aux(
x,
y,
ids,
mode,
penalty_order,
lambda_max,
lambda_fix_loop,
n_penalty_dimensions,
ndx,
deg,
verbose
)
x_grids <- fct[[1]]
coeffs <- fct[[2]]
if (verbose){
message("Computing the measures for all individuals.")
}
out_measures <- compute_measures_aux(x_grids, coeffs, row.names(coeffs), ndx, deg, measures)
if (verbose) {
message("The measures have been computed.")
}
if (!is.null(root_filename)) {
if (verbose) {
message("Printing current run...")
}
specific_folder <-
paste(
mode_txt,
"_",
time_begin,
sep = ""
)
addition_folder <-
paste(
"pord",
penalty_order,
"_lambda",
lambda_fix_loop,
sep = ""
)
folder_save <- file.path(root_filename, specific_folder, addition_folder, "")
dir.create(folder_save, recursive = TRUE, showWarnings = FALSE)
write.csv(x_grids, file = paste(folder_save, "x_grids.csv", sep=""))
write.csv(coeffs, file = paste(folder_save, "coeffs.csv", sep=""))
write.csv(out_measures, file = paste(folder_save, "measures.csv", sep=""))
if (verbose) {
message(paste("Saved in", folder_save))
}
}
if (mode == 1){
return_val = fct
}
}
}
return(return_val)
}
|
/scratch/gouwar.j/cran-all/cranData/utilityFunctionTools/R/HigherOrderRiskPreferences.R
|
#' Remove de da e dos of names .
#'
#' \code{contar_letras} return number of any letters on alphabet for each column.
#'
#'
#' @param base A data table, data frame or character vector.
#' @param columns for apply the function
#' @param suffix Suffix name for the new column.
#'
#' @import data.table
#' @return a data.table number of any letters on alphabet for each column
#'
#' @examples
#' base <- data.frame(nome = c("Joรฃo das Neves", "Pedro dos Anjos", "Maria das Gracas"))
#' base <- contar_letras(base, "nome")
#' @export
#'
contar_letras <- function(base, columns, suffix = NULL){
stopifnot(is.character(columns))
ret <- lapply(columns, function(column){
re <- data.table(a=str_count(base[[column]], 'a|A|\u00E1|\u00C1|\u00E2|\u00C2|\u00E3|\u00C3|\u00E0|\u00C0'),
b=str_count(base[[column]], 'b|B'),
c=str_count(base[[column]], "c|C|\u00E7|\u00C7"),
d=str_count(base[[column]], 'd|D'),
e=str_count(base[[column]], "e|E|\u00E9|\u00C9|\u00EA|\u00CA|\u00E8|\u00C8"),
f=str_count(base[[column]], 'f|F'),
g=str_count(base[[column]], 'g|G'),
h=str_count(base[[column]], 'h|H'),
i=str_count(base[[column]], 'i|I|\u00ED|\u00CD|\u00EE|\u00CE|\u00EC|\u00CC'),
j=str_count(base[[column]], 'j|J'),
k=str_count(base[[column]], 'k|K'),
l=str_count(base[[column]], 'l|L'),
m=str_count(base[[column]], 'm|M'),
n=str_count(base[[column]], 'n|N'),
o=str_count(base[[column]], 'o|O|\u00F0|\u00F4|\u00D4|\u00F3|\u00D3|\u00F5|\u00D5|\u00F2|\u00D2'),
p=str_count(base[[column]], 'p|P'),
q=str_count(base[[column]], 'q|Q'),
r=str_count(base[[column]], 'r|R'),
s=str_count(base[[column]], 's|S'),
t=str_count(base[[column]], 't|T'),
u=str_count(base[[column]], 'u|U|\u00FA|\u00DA|\u00FB|\u00DB|\u00F9|\u00D9'),
v=str_count(base[[column]], 'v|V'),
w=str_count(base[[column]], 'w|W'),
x=str_count(base[[column]], 'x|X'),
y=str_count(base[[column]], 'y|Y'),
z=str_count(base[[column]], 'z|Z'))
if(is.null(suffix)){
setnames(re, names(re),paste(names(re), column, sep = "_") )
}else{
setnames(re, names(re),paste(names(re), suffix, sep = "_") )
}
})
return(do.call(cbind, lapply(ret, data.table, stringsAsFactors=FALSE)))
}
|
/scratch/gouwar.j/cran-all/cranData/utilsIPEA/R/contador_letras.R
|
#' Brazilian address
#'
#' Some addresses from Brazil
#'
#' @format A data frame with 5 rows and 12 variables:
#' \describe{
#' \item{uf}{state of Brazil}
#' \item{MatchedAddress}{Address returned from GALILEO}
#' \item{cep}{Zip code}
#' \item{enderecofinal}{Prompted Address}
#' \item{Nome_Municipio}{City names}
#' }
"geocod_base"
|
/scratch/gouwar.j/cran-all/cranData/utilsIPEA/R/data.R
|
#' Check Brazilian names
#'
#' \code{extrai_NomeProprio} Parse Brazilian names and returns given names, surnames and gender
#'
#' @param x List, character or factor with names to be parsed.
#' @param surname If TRUE, the list of surnames is returned.
#' @param gender If TRUE, the list of gender based on the names is returned.
#' @param stringdist if TRUE, make a prediction based on the string distance of Jaro-Winkler between the source data and the input.
#' @param spaces if TRUE, returns the names without spaces. If FALSE, it compress all the blank spaces.
#'
#' @import data.table RCurl
#' @importFrom stringdist stringdist
#' @importFrom stringdist amatch
#' @importFrom stringr str_extract
#' @importFrom stringr str_trim
#' @importFrom stringr str_replace_all
#' @importFrom stringr str_replace
#' @importFrom utils data
#' @importFrom utils write.table
#' @return Returns a data.table
#'
#' @export
# extrai_NomeProprio <- function(x, surname = FALSE, gender = FALSE, stringdist = TRUE, spaces = TRUE){
# dir_base <- file.path(str_replace_all(find.package("utilsIPEA"),"\\\\","/"),"data")
# if(file.exists(file.path(dir_base,"name_gender.csv"))){
# return(extrai_NomeProprio_(x = x, surname = surname, gender = gender, stringdist = stringdist, spaces = spaces, dir_base = dir_base))
# } else {
# print("Downloading source data...")
# url_base <- getURL("https://raw.githubusercontent.com/ipea/utilsIPEA/base_nomes/nomes.csv")
# dir.create(dir_base)
# write.table(url_base,file.path(dir_base,"name_gender.csv"), sep = ",", quote = FALSE)
# return(extrai_NomeProprio_(x = x, surname = surname, gender = gender, stringdist = stringdist, spaces = spaces, dir_base = dir_base))
# }
# }
#
#
# find_strdist <- function(um_primeiro,dois_primeiros,tres_primeiros,base_nomes){
# pos <- amatch(c(um_primeiro,dois_primeiros,tres_primeiros),base_nomes$V1, method = "jw", maxDist = 0.7)
# pes1 <- stringdist(um_primeiro, base_nomes$V1[pos[1]], method = "jw")
# pes2 <- stringdist(dois_primeiros, base_nomes$V1[pos[2]], method = "jw")
# pes3 <- stringdist(tres_primeiros, base_nomes$V1[pos[3]], method = "jw")
# minimo <- suppressWarnings(min(pes1,pes2,pes3, na.rm = TRUE))
# retorno <- ifelse( minimo == pes1, base_nomes$V1[pos[1]],
# ifelse( minimo == pes2, base_nomes$V1[pos[2]],
# ifelse( minimo == pes3, base_nomes$V1[pos[3]],
# NA)))
# return(retorno)
# }
#
#
# extrai_NomeProprio_ <- function(x, surname, gender, stringdist, spaces, dir_base){
# NomeProprio <- NULL
# um_primeiro <- NULL
# dois_primeiros <- NULL
# tres_primeiros <- NULL
# nome <- NULL
# . <- NULL
# base_nomes <- NULL
# V2 <- NULL
# #Carrega bases necessรกrias e variรกveis--------------------------
# #base_nomes <- suppressMessages(fread("data/names_gender.csv"))
# base_nomes <- suppressMessages(fread(file.path(dir_base,"name_gender.csv")))
# patternOneName <- "^[a-zA-Z0-9_]+"
# patternTwoNames <- "^[a-zA-Z0-9_]+\\s[a-zA-Z0-9_]+"
# patternThreeNames <- "^[a-zA-Z0-9_]+\\s[a-zA-Z0-9_]+\\s[a-zA-Z0-9_]+"
#
# #Trata os dados de entrada --------------------------------------
# names <- setDT(data.frame(nome = toupper(x)))
#
# #Separa nomes---------------------------------------------------
# names[,um_primeiro := str_extract(nome,patternOneName)]
# names[,dois_primeiros := str_extract(nome,patternTwoNames)]
# names[,tres_primeiros := str_extract(nome,patternThreeNames)]
#
# if(stringdist == TRUE){
# names[, NomeProprio := ifelse(tres_primeiros %in% base_nomes$V1, tres_primeiros,
# ifelse(dois_primeiros %in% base_nomes$V1, dois_primeiros,
# ifelse(um_primeiro %in% base_nomes$V1, um_primeiro,
# ifelse(!is.na(find_strdist(um_primeiro, dois_primeiros, tres_primeiros,base_nomes)), find_strdist(um_primeiro, dois_primeiros, tres_primeiros,base_nomes),
# um_primeiro))))]
# }
#
# if(stringdist == FALSE){
# names[, NomeProprio := ifelse(tres_primeiros %in% base_nomes$V1, tres_primeiros,
# ifelse(dois_primeiros %in% base_nomes$V1, dois_primeiros,
# str_extract(names$nome,patternOneName)))]
# }
#
# if(surname == TRUE){
# names[,surname := ifelse(str_replace_all(NomeProprio," ","") == str_extract(nome,"^[A-Z]+"), str_replace(nome,"^[A-Z]+\\s",""),
# str_trim(str_replace(nome, NomeProprio, "")))]
# }
# if(gender == TRUE){
# names[,gender := base_nomes[NomeProprio,,on="V1"][,.(V2)]]
#
# }
#
# if(spaces == FALSE){
# names[,NomeProprio := str_replace_all(NomeProprio," ","")]
# if(surname == TRUE){
# names[,surname := str_replace_all(surname," ","")]
# }
# }
#
# names[,dois_primeiros := NULL]
# names[,tres_primeiros := NULL]
# names[,um_primeiro := NULL]
# names[,nome := NULL]
#
# if(surname == FALSE & gender == FALSE){
# return(names[,NomeProprio])
# }
#
# return(as.data.frame(names))
# }
|
/scratch/gouwar.j/cran-all/cranData/utilsIPEA/R/extrai_nome_proprio.R
|
#'
#'
#' \code{ident_erros_munic_galileo} Returns a new column called munmatch with true or false. This column identify where GALILEO failed
#'
#' @importFrom stringr str_replace_all str_detect str_count fixed
#' @importFrom dplyr %>%
#' @param base Data frame, data set with return from GALILEO
#' @param mun character, the name of the municipio.
#' @param match character, the column MatchAdress from GALILEO.
#' @param uf character, the name of the state.
#'
#' @return Returns a new column called munmatch with true or false.
#'
#' @export
#'
ident_erros_munic_galileo <- function(base, mun, match, uf){
base <- as.data.table(base)
mun <- as.character(mun)
match <- as.character(match)
Encoding(mun) <- "latin1"
Encoding(match) <- "latin1"
ufnovo <- NULL
cepmatch <- NULL
munstr <- NULL
matchstr <- NULL
munmatch <- NULL
base[,ufnovo:=uf %>% iconv(to="ASCII//TRANSLIT", from = "latin1") %>% toupper() %>% str_replace_all(fixed("'"), "") %>% str_replace_all("D.(S||)\\b", "") %>%
str_replace_all("[IY]","I") %>% str_replace_all("[ZS]","S") %>% str_replace_all("[JG]","G")]
base[,cepmatch:=str_detect(match,",\\s[0-9]{5}")]
base[,munstr:= mun %>% iconv(to="ASCII//TRANSLIT", from = "latin1") %>% toupper() %>% str_replace_all(fixed("'"), "") %>% str_replace_all(" D.(S||)\\b", "") %>%
str_replace_all("[IY]","I") %>% str_replace_all("[ZS]","S")%>% str_replace_all("[JG]","G")]
base[,matchstr:= match %>% iconv(to="ASCII//TRANSLIT", from = "latin1") %>% toupper() %>% str_replace_all(fixed("'"), "") %>% str_replace_all(" D.(S||)\\b", "") %>%
str_replace_all("[IY]","I") %>% str_replace_all("[ZS]","S")%>% str_replace_all("[JG]","G")]
base[,munmatch:= mapply(
function(x,y,c,d){ifelse(str_count(y, ",")<=1,str_detect(y,paste(x,",",sep='')) & str_detect(y,toupper(d))
,ifelse(str_count(y, ",")==2 && c==TRUE,str_detect(y,paste(x,",",sep='')) & str_detect(y,toupper(d)),str_detect(y,paste(", ",x,",",sep='')) & str_detect(y,toupper(d))))}
,munstr,matchstr,cepmatch,ufnovo)]
base <- base[,!c("munstr", "matchstr", "ufnovo", "cepmatch"),with=F]
return(base)
}
|
/scratch/gouwar.j/cran-all/cranData/utilsIPEA/R/geocode.R
|
#' Return women single's name
#'
#' \code{nome_de_solteira} Return women single's name using the husband last name.
#'
#' @param nome_casada Character, married woman's name
#' @param nome_conjuge character, husband's name.
#'
#' @return Returns a list of possible names
#'
#' @examples
#' nome_de_solteira(nome_casada = "Maria Conceicao da Costa", nome_conjuge = "Mario Silva da Costa")
#' @export
#'
nome_de_solteira <- function(nome_casada, nome_conjuge){
nome_casada <- remove_preposicao_nomes(nome_casada)
nome_conjuge <- remove_preposicao_nomes(nome_conjuge)
nome_separado_casada <- unlist(strsplit(nome_casada, "\\W+"))
nome_separado_conjuge <- unlist(strsplit(nome_conjuge, "\\W+"))
nomes_presentes <- sapply(nome_separado_conjuge, function(sobrenome) any(nome_separado_casada == sobrenome))
sobrenome <- nome_separado_conjuge[nomes_presentes]
sobrenome_split <- lapply(sobrenome, function(x) nome_separado_casada[!(nome_separado_casada %in% x)])
return(lapply(sobrenome_split, function(x) paste(x, collapse = " ")))
}
|
/scratch/gouwar.j/cran-all/cranData/utilsIPEA/R/nome_de_solteira.R
|
#' Remove de da e dos of names .
#'
#' \code{remove_preposicao_nomes} return names without de, da e dos.
#'
#'
#' @param base A data table, data frame or character vector.
#' @param ... columns for apply the function
#' @param suffixo Suffix name for the new column.
#'
#' @import data.table
#' @importFrom stringr str_replace_all
#' @return the base parameter with a new column.
#'
#' @examples
#' base <- data.frame(nome = c("Joรฃo das Neves", "Pedro dos Anjos", "Maria das Gracas"))
#' base <- remove_preposicao_nomes(base, "nome")
#' @export
#'
remove_preposicao_nomes <- function(base, ..., suffixo = "_semD"){
return(funcao_generica(base, ..., suffixo = suffixo, FUN = remove_preposicao_nomes_coluna))
}
remove_preposicao_nomes_coluna <- function(nomes){
subs <- " DA | DE | DOS | D. | DAS | A | B | C | D | E | F | G | H | I | J | K | L | M | N | O | P | Q | R | S | T | U | V | X | Z"
subs <- paste(subs, tolower(subs), sep = "|")
if(is.character(nomes)){
return(str_replace_all(nomes, subs, " "))
}
}
abrevia_nomes_meio_coluna<- function(nomes){
novos_nomes <- sapply(nomes, USE.NAMES = F, function(nome){
nomes_separados <- str_extract_all(nome, "(\\w+)", simplify = T)
if(length(nomes_separados) <= 2) return(nome)
n <- length(nomes_separados) - 1
for(i in 2:n){
nomes_separados[i] <- str_extract(nomes_separados[i], "^\\w{1}")
}
nome_corrigido<- paste(nomes_separados, collapse = " ")
return(nome_corrigido)
} )
return(novos_nomes)
}
funcao_generica <- function(base, ..., suffixo, FUN){
FUN <- match.fun(FUN)
if(is.character(base)){
return(FUN(base))
}
other_columns <- unlist(eval(substitute(alist(...))))
stopifnot(length(other_columns) > 0)
if(!is.data.table(base)){ setDT(base) }
new_columns <- sapply(other_columns, function(x) paste0(x, suffixo))
mapply( function(x, y){ set(base, j = x, value = FUN(base[[y]])) },
new_columns, other_columns)
return(base)
}
#' Abrevia o nome do meio.
#'
#' \code{abrevia_nome_meio} return names .
#'
#'
#' @param base A data table, data frame or character vector.
#' @param suffixo A character indicating the final part of the new columns' names
#' @param ... columns for apply the function
#'
#' @import data.table
#' @importFrom stringr str_replace_all
#' @importFrom stringr str_extract_all
#' @importFrom stringr str_extract
#' @return the base parameter with a new column.
#'
#' @examples
#' base <- data.frame(nome = c("Carlos Pereira Neves", "Pedro Aparecido Anjos"))
#' base <- remove_preposicao_nomes(base, "nome")
#' @export
#'
abrevia_nome_meio <- function(base, ..., suffixo = "_abrev"){
return(funcao_generica(base, ..., suffixo = suffixo, FUN = abrevia_nomes_meio_coluna))
}
|
/scratch/gouwar.j/cran-all/cranData/utilsIPEA/R/text_functions.R
|
#' Build transformation models
#'
#' Base classifiers are used to build models to solve the the transformation
#' problems. To create a new base classifier, two steps are necessary:
#' \enumerate{
#' \item Create a train method
#' \item Create a prediction method
#' }
#' This section is about how to create the first step: a train method.
#' To create a new predict model see \code{\link{mlpredict}} documentation.
#'
#' @section How to create a new train base method:
#' First, is necessary to define a name of your classifier, because this name
#' determines the method name. The base method name must start with
#' \code{mltrain.base} followed by the designed name, e.g. a \code{'FOO'}
#' classify must be defined as \code{mltrain.baseFOO} (we suggest always use
#' upper case names).
#'
#' Next, your method must receive at least two parameters (\code{object, ...}).
#' Use \code{object$data[, object$labelindex]} or
#' \code{object$data[, object$labelname]} to access the labels values and use
#' \code{object$data[, -object$labelindex]} to access the predictive attributes.
#' If you need to know which are the multi-label dataset and method, use
#' \code{object$mldataset} and \code{object$mlmethod}, respectively.
#'
#' Finally, your method should return a model that will be used by the mlpredict
#' method. Remember, that your method may be used to build binary and
#' multi-class models.
#'
#' @param object A \code{mltransformation} object. This is used as a list and
#' contains at least five values:
#' \describe{
#' \item{object$data}{A data.frame with the train data, where the columns are
#' the attributes and the rows are the examples.}
#' \item{object$labelname}{The name of the class column.}
#' \item{object$labelindex}{The column index of the class.}
#' \item{object$mldataset}{The name of multi-label dataset.}
#' \item{object$mlmethod}{The name of the multi-label method.}
#' }
#' Others values may be specified by the multi-label method.
#' @param ... Others arguments passed to the base method.
#' @return A model object. The class of this model can be of any type, however,
#' this object will be passed to the respective mlpredict method.
#' @export
#'
#' @examples
#' # Create a empty model of type FOO
#' mltrain.baseFOO <- function (object, ...) {
#' mymodel <- list(
#' classes = as.character(unique(object$data[, object$labelindex]))
#' )
#' class(mymodel) <- 'fooModel'
#' mymodel
#' }
#'
#' # Using this base method with Binary Relevance
#' brmodel <- br(toyml, 'FOO')
#'
#' \donttest{
#'
#' # Create a SVM method using the e1071 package
#' library(e1071)
#' mltrain.baseSVM <- function (object, ...) {
#' traindata <- object$data[, -object$labelindex]
#' labeldata <- object$data[, object$labelindex]
#' model <- svm(traindata, labeldata, probability = TRUE, ...)
#' model
#' }
#' }
mltrain <- function(object, ...) {
UseMethod("mltrain")
}
#' Prediction transformation problems
#'
#' Base classifiers are used to build models to solve the the transformation
#' problems. To create a new base classifier, two steps are necessary:
#' \enumerate{
#' \item Create a train method
#' \item Create a prediction method
#' }
#' This section is about how to create the second step: a prediction method.
#' To create a new train method see \code{\link{mltrain}} documentation.
#'
#' @section How to create a new prediction base method:
#' Fist is necessary to know the class of model generate by the respective train
#' method, because this name determines the method name. It must start with
#' \code{'mlpredict.'}, followed by the model class name, e.g. a model with
#' class 'fooModel' must be called as \code{mlpredict.fooModel}.
#'
#' After defined the name, you need to implement your prediction base method.
#' The model built on mltrain is available on \code{model} parameter and the
#' \code{newdata} is the data to be predict.
#'
#' The return of this method must be a data.frame with two columns called
#' \code{"prediction"} and \code{"probability"}. The first column contains the
#' predicted class and the second the probability/score/confidence of this
#' prediction. The rows represents the examples.
#'
#' @param model An object model returned by some mltrain method, its class
#' determine the name of this method.
#' @param newdata A data.frame with the new data to be predicted.
#' @param ... Others arguments passed to the predict method.
#' @return A matrix with the probabilities of each class value/example,
#' where the rows are the examples and the columns the class values.
#' @export
#'
#' @examples
#'
#' # Create a method that predict always the first class
#' # The model must be of the class 'fooModel'
#' mlpredict.fooModel <- function (model, newdata, ...) {
#' # Predict the first class with a random confidence
#' data.frame(
#' prediction = rep(model$classes[1], nrow(newdata)),
#' probability = sapply(runif(nrow(newdata)), function (score) {
#' max(score, 1 - score)
#' }),
#' row.names = rownames(newdata)
#' )
#' }
#'
#' \donttest{
#' # Create a SVM predict method using the e1071 package (the class of SVM model
#' # from e1071 package is 'svm')
#' library(e1071)
#' mlpredict.svm <- function (dataset, newdata, ...) {
#' result <- predict(model, newdata, probability = TRUE, ...)
#' attr(result, 'probabilities')
#' }
#' }
mlpredict <- function(model, newdata, ...) {
UseMethod("mlpredict")
}
# DEFAULT METHOD -------------------------------------------------------------
# @describeIn mltrain Default S3 method
mltrain.default <- function(object, ...) {
funcname <- paste("mltrain.base", object$base.method, sep = "")
stop(paste("The function '", funcname, "(object, ...)' is not implemented",
sep = ""))
}
# @describeIn mlpredict Default S3 method
mlpredict.default <- function(model, newdata, ...) {
funcname <- paste("mlpredict.", class(model), sep = "")
stop(paste("The function '", funcname,
"(dataset, newdata, ...)' is not implemented", sep = ""))
}
# SVM METHOD ------------------------------------------------------------------
# @describeIn mltrain SVM implementation (require \pkg{e1071} package to use)
mltrain.baseSVM <- function(object, ...) {
if (requireNamespace("e1071", quietly = TRUE)) {
formula <- stats::as.formula(paste("`", object$labelname, "` ~ .", sep=""))
model <- e1071::svm(formula, object$data, probability = TRUE, ...)
}
else {
stop(paste("There are no installed package 'e1071' to use SVM classifier",
"as base method"))
}
model
}
# @describeIn mlpredict SVM implementation (require \pkg{e1071} package to use)
mlpredict.svm <- function(model, newdata, ...) {
if (!requireNamespace("e1071", quietly = TRUE)) {
stop(paste("There are no installed package 'e1071' to use SVM classifier",
"as base method"))
}
result <- stats::predict(model, newdata, probability = TRUE, ...)
prediction <- as.character(result)
all.prob <- attr(result, "probabilities")
data.frame(
prediction = prediction,
probability = all.prob[cbind(rownames(newdata), prediction)],
row.names = rownames(newdata)
)
}
# C5.0 METHOD ------------------------------------------------------------------
# @describeIn mltrain C5.0 implementation (require \pkg{C50} package to use)
mltrain.baseC5.0 <- function(object, ...) {
if (requireNamespace("C50", quietly = TRUE)) {
formula <- stats::as.formula(paste("`", object$labelname, "` ~ .", sep=""))
model <- C50::C5.0(formula, object$data, ...)
}
else {
stop(paste("There are no installed package 'C50' to use C5.0 classifier",
"as base method"))
}
model
}
# @describeIn mlpredict C5.0 implementation (require \pkg{C50} package to use)
mlpredict.C5.0 <- function(model, newdata, ...) {
if (!requireNamespace("C50", quietly = TRUE)) {
stop(paste("There are no installed package 'C50' to use C5.0 classifier",
"as base method"))
}
result <- C50::predict.C5.0(model, newdata, type = "prob", ...)
prediction <- colnames(result)[apply(result, 1, which.max)]
data.frame(
prediction = prediction,
probability = result[cbind(rownames(newdata), prediction)],
row.names = rownames(newdata)
)
}
# CART METHOD -----------------------------------------------------------------
# @describeIn mltrain CART implementation (require \pkg{rpart} package to use)
mltrain.baseCART <- function(object, ...) {
if (requireNamespace("rpart", quietly = TRUE)) {
formula <- stats::as.formula(paste("`", object$labelname, "` ~ .", sep=""))
model <- rpart::rpart(formula, object$data, ...)
}
else {
stop(paste("There are no installed package 'rpart' to use Cart classifier",
"as base method"))
}
model
}
# @describeIn mlpredict CART implementation (require \pkg{rpart} package)
mlpredict.rpart <- function(model, newdata, ...) {
if (!requireNamespace("rpart", quietly = TRUE)) {
stop(paste("There are no installed package 'rpart' to use Cart classifier",
"as base method"))
}
result <- stats::predict(model, newdata, type = "prob", ...)
rownames(result) <- rownames(newdata)
prediction <- colnames(result)[apply(result, 1, which.max)]
data.frame(
prediction = prediction,
probability = result[cbind(rownames(newdata), prediction)],
row.names = rownames(newdata)
)
}
# RANDOM FOREST METHOD --------------------------------------------------------
# @describeIn mltrain Random Forest (RF) implementation (require
# \pkg{randomForest} package to use)
mltrain.baseRF <- function(object, ...) {
if (requireNamespace("randomForest", quietly = TRUE)) {
traindata <- object$data[, -object$labelindex]
labeldata <- object$data[, object$labelindex]
model <- randomForest::randomForest(traindata, labeldata, ...)
}
else {
stop(paste("There are no installed package 'randomForest' to use",
"randomForest classifier as base method"))
}
model
}
# @describeIn mlpredict Random Forest (RF) implementation (require
# \pkg{randomForest} package to use)
mlpredict.randomForest <- function(model, newdata, ...) {
if (!requireNamespace("randomForest", quietly = TRUE)) {
stop(paste("There are no installed package 'randomForest' to use",
"randomForest classifier as base method"))
}
result <- stats::predict(model, newdata, type = "prob", ...)
prediction <- colnames(result)[apply(result, 1, which.max)]
data.frame(
prediction = prediction,
probability = result[cbind(rownames(newdata), prediction)],
row.names = rownames(newdata)
)
}
# NAIVE BAYES METHOD ----------------------------------------------------------
# @describeIn mltrain Naive Bayes (NB) implementation (require
# \pkg{e1071} package to use)
mltrain.baseNB <- function(object, ...) {
if (requireNamespace("e1071", quietly = TRUE)) {
formula <- stats::as.formula(paste("`", object$labelname, "` ~ .", sep=""))
#Avoid error because there are only one positive instance
duplicate <- any(table(object$data[,object$labelname]) == 1)
model <- e1071::naiveBayes(formula, rbind(object$data,
utiml_ifelse(duplicate,
object$data,
NULL)),
type = "raw", ...)
}
else {
stop(paste("There are no installed package 'e1071' to use naiveBayes",
"classifier as base method"))
}
model
}
# @describeIn mlpredict Naive Bayes (NB) implementation (require
# \pkg{e1071} package to use)
mlpredict.naiveBayes <- function(model, newdata, ...) {
if (!requireNamespace("e1071", quietly = TRUE)) {
stop(paste("There are no installed package 'e1071' to use naiveBayes",
"classifier as base method"))
}
result <- stats::predict(model, newdata, type = "raw", ...)
rownames(result) <- rownames(newdata)
classes <- colnames(result)[apply(result, 1, which.max)]
data.frame(
prediction = classes,
probability = result[cbind(rownames(newdata), classes)],
row.names = rownames(newdata)
)
}
# KNN METHOD ------------------------------------------------------------------
# @describeIn mltrain kNN implementation (require \pkg{kknn} package to use)
mltrain.baseKNN <- function(object, ...) {
if (!requireNamespace("kknn", quietly = TRUE)) {
stop(paste("There are no installed package 'kknn' to use kNN classifier as",
"base method"))
}
object$extrakNN <- list(...)
object
}
# @describeIn mlpredict kNN implementation (require \pkg{kknn} package to use)
mlpredict.baseKNN <- function(model, newdata, ...) {
if (!requireNamespace("kknn", quietly = TRUE)) {
stop(paste("There are no installed package 'kknn' to use kNN classifier as",
"base method"))
}
formula <- stats::as.formula(paste("`", model$labelname, "` ~ .", sep = ""))
args <- list(...)
if (is.null(model$extrakNN[["k"]]) || !is.null(args[["k"]])) {
result <- kknn::kknn(formula, rep_nom_attr(model$data, FALSE),
rep_nom_attr(newdata), ...)
}
else {
result <- kknn::kknn(formula, rep_nom_attr(model$data, FALSE),
rep_nom_attr(newdata), k = model$extrakNN[["k"]], ...)
}
prediction <- as.character(result$fitted.values)
all.prob <- as.matrix(result$prob)
rownames(all.prob) <- rownames(newdata)
data.frame(
prediction = prediction,
probability = all.prob[cbind(rownames(newdata), prediction)],
row.names = rownames(newdata)
)
}
# XGBoost METHOD ------------------------------------------------------------------
# @describeIn mltrain XGBoost implementation (require \pkg{xgboost} package)
mltrain.baseXGB <- function(object, ...) {
if (!requireNamespace("xgboost", quietly = TRUE)) {
stop(paste("There are no installed package 'xgboost' to use xgboost",
"classifier as base method"))
}
def.args <- list(
data = as.matrix(rep_nom_attr(object$data[, -object$labelindex])),
label = as.numeric(object$data[, object$labelindex]) - 1,
nthread = 1,
nrounds = 3,
verbose = FALSE,
objective = ifelse(nlevels(object$data[, object$labelindex]) == 2,
"binary:logistic", "multi:softprob")
)
if (nlevels(object$data[, object$labelindex]) > 2) {
def.args$num_class <- nlevels(object$data[, object$labelindex])
}
args <- list(...)
for (narg in names(args)) {
def.args[[narg]] <- args[[narg]]
}
model <- do.call(xgboost::xgboost, def.args)
attr(model, "classes") <- levels(object$data[, object$labelindex])
model
}
# @describeIn mlpredict XGBoost implementation (require \pkg{xgboost} package)
mlpredict.xgb.Booster <- function(model, newdata, ...) {
if (!requireNamespace("xgboost", quietly = TRUE)) {
stop(paste("There are no installed package 'xgboost' to use xgboost",
"classifier as base method"))
}
classes <- attr(model, "classes")
pred <- stats::predict(model, as.matrix(rep_nom_attr(newdata)), ...)
if (length(classes) == 2) {
bipartitions <- as.numeric(pred >= 0.5)
probabilities <- ifelse(bipartitions == 1, pred, 1 - pred)
} else {
pred <- matrix(pred, nrow=nrow(newdata), byrow = TRUE)
which.pred <- apply(pred, 1, which.max)
bipartitions <- classes[which.pred]
probabilities <- pred[cbind(seq(nrow(newdata)), which.pred)]
}
data.frame(
prediction = bipartitions,
probability = probabilities,
row.names = rownames(newdata)
)
}
# Majority METHOD ------------------------------------------------------------
# @describeIn mltrain Majority model
mltrain.baseMAJORITY <- function(object, ...) {
values <- table(object$data[, object$labelindex])
model <- list(
classes = names(values),
predict = names(which.max(values))
)
class(model) <- 'majorityModel'
model
}
# @describeIn mlpredict Majority prediction
mlpredict.majorityModel <- function(model, newdata, ...) {
data.frame(
prediction = rep(model$predict, nrow(newdata)),
probability = rep(1, nrow(newdata)),
row.names = rownames(newdata)
)
}
# Random METHOD ------------------------------------------------------------
# @describeIn mltrain Random model
mltrain.baseRANDOM <- function(object, ...) {
model <- list(
classes = as.character(unique(object$data[, object$labelindex]))
)
class(model) <- 'randomModel'
model
}
# @describeIn mlpredict Majority prediction
mlpredict.randomModel <- function(model, newdata, ...) {
data.frame(
prediction = sample(model$classes, nrow(newdata), replace = TRUE),
probability = sapply(stats::runif(nrow(newdata)), function (score) {
max(score, 1 - score)
}),
row.names = rownames(newdata)
)
}
# @describeIn mlpredict Empty model to fix the cases with few train examples
mlpredict.emptyModel <- function (model, newdata, ...) {
data.frame(
prediction = rep(0, nrow(newdata)),
probability = rep(1, nrow(newdata)),
row.names = rownames(newdata)
)
}
#' Print Majority model
#' @param x The base model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.majorityModel <- function (x, ...) {
cat("Majority Base Model\n\n")
cat("Label: ", attr(x, "label"), "\n")
cat("Class: ", paste(x$classes, collapse = ' | '))
cat("Predict: ", x$predict)
}
#' Print Random model
#' @param x The base model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.randomModel <- function (x, ...) {
cat("Random Base Model\n\n")
cat("Label: ", attr(x, "label"), "\n")
cat("Class: ", paste(x$classes, collapse = ' | '))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/base_learner.R
|
#' Multi-label cross-validation
#'
#' Perform the cross validation procedure for multi-label learning.
#'
#' @family evaluation
#' @param mdata A mldr dataset.
#' @param method The multi-label classification method. It also accepts the name
#' of the method as a string.
#' @param ... Additional parameters required by the method.
#' @param cv.folds Number of folds. (Default: 10)
#' @param cv.sampling The method to split the data. The default methods are:
#' \describe{
#' \item{random}{Split randomly the folds.}
#' \item{iterative}{Split the folds considering the labels proportions
#' individually. Some specific label can not occurs in all
#' folds.}
#' \item{stratified}{Split the folds considering the labelset proportions.}
#' }
#' (Default: "random")
#' @param cv.results Logical value indicating if the folds results should be
#' reported (Default: FALSE).
#' @param cv.predictions Logical value indicating if the predictions should be
#' reported (Default: FALSE).
#' @param cv.measures The measures names to be computed. Call
#' \code{multilabel_measures()} to see the expected measures. You can also
#' use \code{"bipartition"}, \code{"ranking"}, \code{"label-based"},
#' \code{"example-based"}, \code{"macro-based"}, \code{"micro-based"} and
#' \code{"label-problem"} to include a set of measures. (Default: "all").
#' @param cv.cores The number of cores to parallelize the cross validation
#' procedure. (Default: \code{options("utiml.cores", 1)})
#' @param cv.seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#'
#' @return If cv.results and cv.prediction are FALSE, the return is a vector
#' with the expected multi-label measures, otherwise, a list contained the
#' multi-label and the other expected results (the label measures and/or the
#' prediction object) for each fold.
#'
#' @export
#'
#' @examples
#' #Run 10 folds for BR method
#' res1 <- cv(toyml, br, base.algorithm="RANDOM", cv.folds=10)
#'
#' #Run 3 folds for RAkEL method and get the fold results and the prediction
#' res2 <- cv(mdata=toyml, method="rakel", base.algorithm="RANDOM", k=2, m=10,
#' cv.folds=3, cv.results=TRUE, cv.predictions=TRUE)
cv <- function(mdata, method, ..., cv.folds=10,
cv.sampling=c("random", "iterative", "stratified"),
cv.results=FALSE, cv.predictions=FALSE, cv.measures="all",
cv.cores=getOption("utiml.cores", 1),
cv.seed=getOption("utiml.seed", NA)) {
if (!is.na(cv.seed)) {
set.seed(cv.seed)
}
cvdata <- create_kfold_partition(mdata, cv.folds, cv.sampling)
results <- parallel::mclapply(seq(cv.folds), function (k){
ds <- partition_fold(cvdata, k)
model <- do.call(method, c(list(mdata=ds$train), ...))
pred <- stats::predict(model, ds$test, ...)
c(
list(pred=pred),
multilabel_evaluate(ds$test, pred, cv.measures, labels=TRUE)
)
}, mc.cores = cv.cores)
obj = list(multilabel=do.call(rbind, lapply(results, "[[", "multilabel")))
if (cv.results) {
labels <- rownames(mdata$labels)
lfolds <- lapply(results, "[[", "labels")
obj$labels <- sapply(labels,
function(lbl) t(sapply(lfolds, function(x) x[lbl,])),
simplify = FALSE)
}
if (cv.predictions) {
obj$predictions <- lapply(results, "[[", "pred")
}
if (length(obj) == 1) {
return(colMeans(obj[[1]]))
} else {
obj
}
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/cross_validation.R
|
#' Toy multi-label dataset.
#'
#' A toy multi-label dataset is a synthetic dataset generated by the tool
#' \url{http://sites.labic.icmc.usp.br/mldatagen/} using the Hyperspheres
#' strategy. Its purpose is to be used for small tests and examples.
#'
#' @format A mldr object with 100 instances, 10 features and 5 labels:
#' \describe{
#' \item{att1}{Relevant numeric attribute between (-1 and 1)}
#' \item{att2}{Relevant numeric attribute between (-1 and 1)}
#' \item{att3}{Relevant numeric attribute between (-1 and 1)}
#' \item{att4}{Relevant numeric attribute between (-1 and 1)}
#' \item{att5}{Relevant numeric attribute between (-1 and 1)}
#' \item{att6}{Relevant numeric attribute between (-1 and 1)}
#' \item{att7}{Relevant numeric attribute between (-1 and 1)}
#' \item{iatt8}{Irrelevant numeric attribute between (-1 and 1)}
#' \item{iatt9}{Irrelevant numeric attribute between (-1 and 1)}
#' \item{ratt10}{Redundant numeric attribute between (-1 and 1)}
#' \item{y1}{Label 'y1' - Frequency: 0.17}
#' \item{y2}{Label 'y2' - Frequency: 0.78}
#' \item{y3}{Label 'y3' - Frequency: 0.19}
#' \item{y4}{Label 'y4' - Frequency: 0.69}
#' \item{y5}{Label 'y5' - Frequency: 0.17}
#' }
#'
#' @details General Information
#' \itemize{
#' \item Cardinality: 2
#' \item Density: 0.4
#' \item Distinct multi-labels: 18
#' \item Number of single labelsets: 5
#' \item Max frequency: 23
#' }
#'
#' @source Generated by \url{http://sites.labic.icmc.usp.br/mldatagen/}
#' Configuration:
#' \itemize{
#' \item Strategy: Hyperspheres
#' \item Relevant Features: 7
#' \item Irrelevant Features: 2
#' \item Redundant Features: 1
#' \item Number of Labels (q): 5
#' \item Number of Instances: 100
#' \item Noise (from 0 to 1): 0.05
#' \item Maximum Radius/Half-Edge of the Hyperspheres/Hypercubes: 0.8
#' \item Minimum Radius/Half-Edge of the Hyperspheres/Hypercubes: ((q/10)+1)/q
#' }
"toyml"
#' Foodtruck multi-label dataset.
#'
#' The foodtruck multi-label dataset is a real multi-label dataset, which uses
#' habits and personal information to predict food truck cuisines.
#'
#' @format A mldr object with 407 instances, 21 features and 12 labels:
#'
#' @details General Information
#' \itemize{
#' \item Cardinality: 2.28
#' \item Density: 0.19
#' \item Distinct multi-labels: 117
#' \item Number of single labelsets: 74
#' \item Max frequency: 114
#' }
#'
#' @source The dataset is described in:
#' Rivolli A., Parker L.C., de Carvalho A.C.P.L.F. (2017) Food Truck
#' Recommendation Using Multi-label Classification. In: Oliveira E., Gama J.,
#' Vale Z., Lopes Cardoso H. (eds) Progress in Artificial Intelligence. EPIA
#' 2017. Lecture Notes in Computer Science, vol 10423. Springer, Cham
"foodtruck"
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/data.R
|
#' Compute the multi-label ensemble predictions based on some vote schema
#'
#' @param predictions A list of multi-label predictions (mlresult).
#' @param vote.schema Define the way that ensemble must compute the predictions.
#' The default valid options are:
#' \describe{
#' \item{'avg'}{Compute the mean of probabilities and the bipartitions}
#' \item{'maj'}{Compute the majority of votes}
#' \item{'max'}{Compute the higher probability for each instance/label}
#' \item{'min'}{Compute the lower probability for each instance/label}
#' }. (Default: 'maj')
#' @param probability A logical value. If \code{TRUE} the predicted values are
#' the score between 0 and 1, otherwise the values are bipartition 0 or 1.
#' @return A mlresult with computed predictions.
#' @note You can create your own vote schema, just create a method that receive
#' two matrix (bipartitions and probabilities) and return a list with the
#' final bipartitions and probabilities.
#'
#' Remember that this method will compute the ensemble votes for each label.
#' Thus the bipartition and probability matrix passed as argument for this
#' method is related with the bipartitions and probabilities for a single
#' label.
#' @export
#'
#' @examples
#' \donttest{
#' model <- br(toyml, "KNN")
#' predictions <- list(
#' predict(model, toyml[1:10], k=1),
#' predict(model, toyml[1:10], k=3),
#' predict(model, toyml[1:10], k=5)
#' )
#'
#' result <- compute_multilabel_predictions(predictions, "maj")
#'
#' ## Random choice
#' random_choice <- function (bipartition, probability) {
#' cols <- sample(seq(ncol(bipartition)), nrow(bipartition), replace = TRUE)
#' list(
#' bipartition = bipartition[cbind(seq(nrow(bipartition)), cols)],
#' probability = probability[cbind(seq(nrow(probability)), cols)]
#' )
#' }
#' result <- compute_multilabel_predictions(predictions, "random_choice")
#' }
compute_multilabel_predictions <- function(predictions,
vote.schema = "maj",
probability = getOption("utiml.use.probs", TRUE)
) {
if (length(unique(lapply(predictions, dimnames))) > 1) {
stop("The predictions must be the same dimensions and names.")
}
utiml_ensemble_check_voteschema(vote.schema, FALSE)
vote.method <- utiml_ensemble_method(vote.schema)
probs <- lapply(predictions, as.probability)
preds <- lapply(predictions, as.bipartition)
examples <- rownames(probs[[1]])
labels <- utiml_rename(colnames(probs[[1]]))
final.prediction <- lapply(labels, function (label) {
lpreds <- do.call(cbind,
lapply(preds, function (prediction) prediction[, label]))
lprobs <- do.call(cbind,
lapply(probs, function (prediction) prediction[, label]))
utiml_compute_ensemble(lpreds, lprobs, vote.method, examples)
})
utiml_predict(final.prediction, probability)
}
# Internal methods -------------------------------------------------------------
# Average vote combination for a single-label prediction
#
# Compute the prediction for a single-label using the average votes schema.
# The probabilities result is computed using the averaged values.
#
# @param bipartition A matrix with all bipartition predictions for a single
# label. The column are the predictions and the rows the examples.
# @param probability A matrix with all probability predictions for a single
# label The column are the predictions and the rows the examples.
# @return A list with two values "bipartition" and "probability".
utiml_ensemble_average <- function (bipartition, probability) {
list(
bipartition = as.numeric(rowMeans(bipartition) >= 0.5),
probability = rowMeans(probability)
)
}
# Verify if a schema vote name is valid
#
# @param vote.schema The name of schema vote
# @param accept.null Logical value determine if the vote.schema = NULL is
# also valid. (Default: TRUE)
# @return TRUE or throw an error message otherwise
utiml_ensemble_check_voteschema <- function (vote.schema, accept.null = TRUE) {
if (is.null(vote.schema)) {
if (!accept.null) {
stop("The enseble vote schema can not be NULL")
}
}
else if (!vote.schema %in% c("avg", "maj", "max", "min")) {
if (!exists(vote.schema, mode = "function")) {
stop(paste("The compute ensemble method '", vote.schema,
"' is not a valid function", sep=''))
}
}
invisible(TRUE)
}
# Majority vote combination for single-label prediction
#
# Compute the single-label prediction using the majority votes schema.
# The probabilities result is computed using only the majority instances.
# In others words, if a example is predicted as positive, only the positive
# confidences are used to compute the averaged value.
#
# @param bipartition A matrix with all bipartition predictions for a single
# label. The column are the predictions and the rows the examples.
# @param probability A matrix with all probability predictions for a single
# label The column are the predictions and the rows the examples.
# @return A list with two values "bipartition" and "probability".
utiml_ensemble_majority <- function (bipartition, probability) {
probs <- rowMeans(bipartition)
list(
bipartition = as.numeric(probs >= 0.5),
probability = probs
)
}
# Maximum vote combination for single-label prediction
#
# Compute the single-label prediction using the maximum votes schema. The
# probabilities result is computed using the maximum value.
#
# @param bipartition A matrix with all bipartition predictions for a single
# label. The column are the predictions and the rows the examples.
# @param probability A matrix with all probability predictions for a single
# label The column are the predictions and the rows the examples.
# @return A list with two values "bipartition" and "probability".
utiml_ensemble_maximum <- function (bipartition, probability) {
list(
bipartition = apply(bipartition, 1, max),
probability = apply(probability, 1, max)
)
}
# Define the method name related with the vote schema
#
# @param vote.schema Define the way that ensemble must compute the predictions.
# @return The method name that will compute the votes
utiml_ensemble_method <- function(vote.schema) {
votes <- c(
avg = "utiml_ensemble_average",
maj = "utiml_ensemble_majority",
max = "utiml_ensemble_maximum",
min = "utiml_ensemble_minimum"
)
as.character(ifelse(is.na(votes[vote.schema]),
vote.schema, votes[vote.schema]))
}
# Minimum vote combination for single-label prediction
#
# Compute the single-label prediction using the minimum votes schema. The
# probabilities result is computed using the minimum value.
#
# @param bipartition A matrix with all bipartition predictions for a single
# label. The column are the predictions and the rows the examples.
# @param probability A matrix with all probability predictions for a single
# label The column are the predictions and the rows the examples.
# @return A list with two values "bipartition" and "probability".
utiml_ensemble_minimum <- function (bipartition, probability) {
list(
bipartition = apply(bipartition, 1, min),
probability = apply(probability, 1, min)
)
}
# @describeIn compute_multilabel_predictions Internal version
utiml_predict_ensemble <- function(predictions, vote.schema,
probability) {
if (is.null(vote.schema)) {
return(predictions)
} else {
compute_multilabel_predictions(predictions, vote.schema, probability)
}
}
# Compute binary predictions
#
# @param bipartitions A matrix with bipartitions values.
# @param probabilities A matrix with probabilities values.
# @param vote.methods The vote schema method.
# @param rnames The row names.
#
# @return A binary.prediction object
utiml_compute_ensemble <- function (bipartitions, probabilities,
vote.methods, rnames) {
result <- do.call(vote.methods, list(bipartitions, probabilities))
names(result$bipartition) <- names(result$probability) <- rnames
utiml_binary_prediction(result$bipartition, result$probability)
}
# Predict binary predictions
#
# Is very similar from utiml_compute_ensemble but differs from arguments
#
# @param predictions A list of binary predictions.
# @param vote.schema The name of vote schema.
#
# @return A binary.prediction object
utiml_predict_binary_ensemble <- function(predictions, vote.schema) {
lpreds <- do.call(cbind,
lapply(predictions, function (pred) pred$bipartition))
lprobs <- do.call(cbind,
lapply(predictions, function (pred) pred$probability))
utiml_compute_ensemble(lpreds, lprobs,
utiml_ensemble_method(vote.schema),
rownames(lpreds))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/ensemble.R
|
#' Compute the confusion matrix for a multi-label prediction
#'
#' The multi-label confusion matrix is an object that contains the prediction,
#' the expected values and also a lot of pre-processed information related with
#' these data.
#'
#' @family evaluation
#' @param mdata A mldr dataset
#' @param mlresult A mlresult prediction
#'
#' @return A mlconfmat object that contains:
#' \describe{
#' \item{Z}{The bipartition matrix prediction.}
#' \item{Fx}{The score/probability matrix prediction.}
#' \item{R}{The ranking matrix prediction.}
#' \item{Y}{The expected matrix bipartition.}
#' \item{TP}{The True Positive matrix values.}
#' \item{FP}{The False Positive matrix values.}
#' \item{TN}{The True Negative matrix values.}
#' \item{FN}{The False Negative matrix values.}
#' \item{Zi}{The total of positive predictions for each instance.}
#' \item{Yi}{The total of positive expected for each instance.}
#' \item{TPi}{The total of True Positive predictions for each instance.}
#' \item{FPi}{The total of False Positive predictions for each instance.}
#' \item{TNi}{The total of True Negative predictions for each instance.}
#' \item{FNi}{The total False Negative predictions for each instance.}
#' \item{Zl}{The total of positive predictions for each label.}
#' \item{Yl}{The total of positive expected for each label.}
#' \item{TPl}{The total of True Positive predictions for each label.}
#' \item{FPl}{The total of False Positive predictions for each label.}
#' \item{TNl}{The total of True Negative predictions for each label.}
#' \item{FNl}{The total False Negative predictions for each label.}
#' }
#' @export
#'
#' @examples
#' \donttest{
#' prediction <- predict(br(toyml), toyml)
#'
#' mlconfmat <- multilabel_confusion_matrix(toyml, prediction)
#'
#' # Label with the most number of True Positive values
#' which.max(mlconfmat$TPl)
#'
#' # Number of wrong predictions for each label
#' errors <- mlconfmat$FPl + mlconfmat$FNl
#'
#' # Examples predict with all labels
#' which(mlconfmat$Zi == toyml$measures$num.labels)
#'
#' # You can join one or more mlconfmat
#' part1 <- create_subset(toyml, 1:50)
#' part2 <- create_subset(toyml, 51:100)
#' confmatp1 <- multilabel_confusion_matrix(part1, prediction[1:50, ])
#' confmatp2 <- multilabel_confusion_matrix(part2, prediction[51:100, ])
#' mlconfmat <- confmatp1 + confmatp2
#' }
multilabel_confusion_matrix <- function (mdata, mlresult) {
mdim <- c(mdata$measures$num.instances, mdata$measures$num.labels)
if (any(mdim != dim(mlresult))) {
stop("Wrong dimension between the real and expected data")
}
if (!is(mlresult, "mlresult")) {
mlresult <- as.mlresult(mlresult)
}
expected <- mdata$dataset[, mdata$labels$index]
bipartition <- as.bipartition(mlresult)
scores <- as.probability(mlresult)
#TODO see if apply is correcty
#TODO review ties.method to use the default
ranking <- t(apply(1 - scores, 1, rank, ties.method = "first"))
predict_and_expected <- expected & bipartition
predict_and_nexpected <- !expected & bipartition
npredict_and_nexpected <- !expected & !bipartition
npredict_and_expected <- expected & !bipartition
cm <- list(
Z = bipartition,
Y = expected,
Fx = scores,
R = ranking,
TP = predict_and_expected,
FP = predict_and_nexpected,
TN = npredict_and_nexpected,
FN = npredict_and_expected,
Zi = rowSums(bipartition),
Yi = rowSums(expected),
Zl = colSums(bipartition),
Yl = colSums(expected),
TPi = rowSums(predict_and_expected),
FPi = rowSums(predict_and_nexpected),
TNi = rowSums(npredict_and_nexpected),
FNi = rowSums(npredict_and_expected),
TPl = colSums(predict_and_expected),
FPl = colSums(predict_and_nexpected),
TNl = colSums(npredict_and_nexpected),
FNl = colSums(npredict_and_expected)
)
class(cm) <- "mlconfmat"
cm
}
#' Join two multi-label confusion matrix
#'
#' @param mlcm1 A mlconfmat
#' @param mlcm2 Other mlconfmat
#'
#' @return mlconfmat
#' @export
`+.mlconfmat` <- function (mlcm1, mlcm2) {
if (ncol(mlcm1$Z) != ncol(mlcm1$Z)) {
stop("Different number of labels for each confusion matrix")
}
mlcm1$Z <- rbind(mlcm1$Z, mlcm2$Z)
mlcm1$Y <- rbind(mlcm1$Y, mlcm2$Y)
mlcm1$Fx <- rbind(mlcm1$Fx, mlcm2$Fx)
mlcm1$R <- rbind(mlcm1$R, mlcm2$R)
mlcm1$TP <- rbind(mlcm1$TP, mlcm2$TP)
mlcm1$FP <- rbind(mlcm1$FP, mlcm2$FP)
mlcm1$TN <- rbind(mlcm1$TN, mlcm2$TN)
mlcm1$FN <- rbind(mlcm1$FN, mlcm2$FN)
mlcm1$Zi <- c(mlcm1$Zi, mlcm2$Zi)
mlcm1$Yi <- c(mlcm1$Yi, mlcm2$Yi)
mlcm1$Zl <- mlcm1$Zl + mlcm2$Zl
mlcm1$Yl <- mlcm1$Yl + mlcm2$Yl
mlcm1$TPi <- c(mlcm1$TPi, mlcm2$TPi)
mlcm1$FPi <- c(mlcm1$FPi, mlcm2$FPi)
mlcm1$TNi <- c(mlcm1$TNi, mlcm2$TNi)
mlcm1$FNi <- c(mlcm1$FNi, mlcm2$FNi)
mlcm1$TPl <- mlcm1$TPl + mlcm2$TPl
mlcm1$FPl <- mlcm1$FPl + mlcm2$FPl
mlcm1$TNl <- mlcm1$TNl + mlcm2$TNl
mlcm1$FNl <- mlcm1$FNl + mlcm2$FNl
mlcm1
}
#' Join a list of multi-label confusion matrix
#'
#' @param object A mlconfmat object or a list of mlconfmat objects
#' @param ... mlconfmat objects
#'
#' @return mlconfmat
#' @export
merge_mlconfmat <- function (object, ...) {
Reduce('+', c(object, list(...)))
}
#' Evaluate multi-label predictions
#'
#' This method is used to evaluate multi-label predictions. You can create a
#' confusion matrix object or use directly the test dataset and the predictions.
#' You can also specify which measures do you desire use.
#'
#' @family evaluation
#' @param object A mldr dataset or a mlconfmat confusion matrix
#' @param mlresult The prediction result (Optional, required only when the
#' mldr is used).
#' @param measures The measures names to be computed. Call
#' \code{multilabel_measures()} to see the expected measures. You can also
#' use \code{"bipartition"}, \code{"ranking"}, \code{"label-based"},
#' \code{"example-based"}, \code{"macro-based"}, \code{"micro-based"} and
#' \code{"label-problem"} to include a set of measures. (Default: "all").
#' @param labels Logical value defining if the label results should be also
#' returned. (Default: \code{FALSE})
#' @param ... Extra parameters to specific measures.
#'
#' @return If labels is FALSE return a vector with the expected multi-label
#' measures, otherwise, a list contained the multi-label and label measures.
#' @references
#' Madjarov, G., Kocev, D., Gjorgjevikj, D., & Dzeroski, S. (2012). An
#' extensive experimental comparison of methods for multi-label learning.
#' Pattern Recognition, 45(9), 3084-3104.
#' Zhang, M.-L., & Zhou, Z.-H. (2014). A Review on Multi-Label Learning
#' Algorithms. IEEE Transactions on Knowledge and Data Engineering, 26(8),
#' 1819-1837.
#' Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel Learning.
#' ACM Comput. Surv., 47(3), 52:1-2:38.
#'
#' @export
#'
#' @examples
#' \donttest{
#' prediction <- predict(br(toyml), toyml)
#'
#' # Compute all measures
#' multilabel_evaluate(toyml, prediction)
#' multilabel_evaluate(toyml, prediction, labels=TRUE) # Return a list
#'
#' # Compute bipartition measures
#' multilabel_evaluate(toyml, prediction, "bipartition")
#'
#' # Compute multilples measures
#' multilabel_evaluate(toyml, prediction, c("accuracy", "F1", "macro-based"))
#'
#' # Compute the confusion matrix before the measures
#' cm <- multilabel_confusion_matrix(toyml, prediction)
#' multilabel_evaluate(cm)
#' multilabel_evaluate(cm, "example-based")
#' multilabel_evaluate(cm, c("hamming-loss", "subset-accuracy", "F1"))
#' }
multilabel_evaluate <- function(object, ...) {
UseMethod("multilabel_evaluate")
}
#' @describeIn multilabel_evaluate Default S3 method
#' @export
multilabel_evaluate.mldr <- function (object, mlresult, measures = c("all"),
labels=FALSE, ...) {
mdata <- object
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
mlconfmat <- multilabel_confusion_matrix(mdata, mlresult)
multilabel_evaluate.mlconfmat(mlconfmat, measures, labels, ...)
}
#' @describeIn multilabel_evaluate Default S3 method
#' @export
multilabel_evaluate.mlconfmat <- function (object, measures = c("all"),
labels=FALSE, ...) {
mlconfmat <- object
if (!is(mlconfmat, "mlconfmat")) {
stop("First argument must be an mlconfmat object")
}
default.methods <- list(
'accuracy' = "utiml_measure_accuracy",
'average-precision' = "utiml_measure_average_precision",
'coverage' = "utiml_measure_coverage",
'F1' = "utiml_measure_f1",
'hamming-loss' = "utiml_measure_hamming_loss",
'is-error' = "utiml_measure_is_error",
'macro-accuracy' = "utiml_measure_macro_accuracy",
'macro-AUC' = "utiml_measure_macro_AUC",
'macro-F1' = "utiml_measure_macro_f1",
'macro-precision' = "utiml_measure_macro_precision",
'macro-recall' = "utiml_measure_macro_recall",
'margin-loss' = "utiml_measure_margin_loss",
'micro-accuracy' = "utiml_measure_micro_accuracy",
'micro-AUC' = "utiml_measure_micro_AUC",
'micro-F1' = "utiml_measure_micro_f1",
'micro-precision' = "utiml_measure_micro_precision",
'micro-recall' = "utiml_measure_micro_recall",
'one-error' = "utiml_measure_one_error",
'precision' = "utiml_measure_precision",
'ranking-error' = "utiml_measure_ranking_error",
'ranking-loss' = "utiml_measure_ranking_loss",
'recall' = "utiml_measure_recall",
'subset-accuracy' = "utiml_measure_subset_accuracy",
"clp" = "utiml_measure_clp",
"mlp" = "utiml_measure_mlp",
"wlp" = "utiml_measure_wlp"
)
#Extra methods
measures <- utiml_measure_names(measures)
midx <- measures %in% names(default.methods)
extra.methods <- measures[!midx]
if (!all(sapply(extra.methods, exists, mode="function"))) {
stop(paste("Some methods are not found: ",
extra.methods[!sapply(extra.methods, exists, mode="function")]))
}
names(extra.methods) <- extra.methods
all.methods <- c(unlist(default.methods[measures[midx]]), extra.methods)
extra = list(...)
measures <- sapply(all.methods, function (mname) {
params <- c(list(mlconfmat = mlconfmat), extra)
do.call(mname, params)
}, simplify = FALSE)
mlvalues <- sapply(measures, mean)
if (labels) {
confmat <- do.call(cbind, mlconfmat[c("TPl","TNl","FPl","FNl")])
colnames(confmat) <- c("TP","TN","FP","FN")
if (!"macro-accuracy" %in% names(measures)) {
measures$`macro-accuracy` <- utiml_measure_macro_accuracy(mlconfmat)
}
measures$balacc <- utiml_measure_macro_balacc(mlconfmat)
labelbased <- do.call(cbind, measures[which(sapply(measures, length) > 1)])
colnames(labelbased) <- gsub("macro-", "", colnames(labelbased))
return(
list(
multilabel=mlvalues,
labels=cbind(labelbased[,sort(colnames(labelbased))], confmat)
)
)
} else {
return(mlvalues)
}
}
# MULTILABEL MEASURES -------------------------------------------------------
# Multi-label Accuracy Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_accuracy <- function (mlconfmat, ...) {
sum(mlconfmat$TPi / rowSums(mlconfmat$Z | mlconfmat$Y), na.rm = TRUE) /
nrow(mlconfmat$Y)
}
# Multi-label Average Precision Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Schapire, R. E., & Singer, Y. (2000). BoosTexter: A boosting-
# based system for text categorization. Machine Learning, 39(2), 135-168.
utiml_measure_average_precision <- function (mlconfmat, ...) {
#Remove instance without labels
non.empty <- which(mlconfmat$Yi > 0)
Y <- mlconfmat$Y[non.empty, ]
Yi <- mlconfmat$Yi[non.empty]
Rank <- mlconfmat$R[non.empty, ]
mean(sapply(seq(nrow(Y)), function (i){
rks <- Rank[i, Y[i,] == 1]
sum(unlist(lapply(rks, function (r) sum(rks <= r) / r)))
}) / Yi)
}
# Multi-label Coverage Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Schapire, R. E., & Singer, Y. (2000). BoosTexter: A boosting-
# based system for text categorization. Machine Learning, 39(2), 135-168.
utiml_measure_coverage <- function (mlconfmat, ...) {
#Remove instance without labels
non.empty <- which(mlconfmat$Yi > 0)
Y <- mlconfmat$Y[non.empty, ]
Rank <- mlconfmat$R[non.empty, ]
mean(sapply(seq(nrow(Y)), function (i) {
max(Rank[i, Y[i,] == 1]) - 1
}))
}
# Multi-label F1 Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_f1 <- function (mlconfmat, ...) {
sum((2 * mlconfmat$TPi) / (mlconfmat$Zi + mlconfmat$Yi), na.rm = TRUE) /
nrow(mlconfmat$Y)
}
# Multi-label Hamming Loss Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Schapire, R. E., & Singer, Y. (1999). Improved boosting
# algorithm using confidence-rated predictions. Machine Learning, 297-336.
utiml_measure_hamming_loss <- function (mlconfmat, ...) {
mean(apply(xor(mlconfmat$Z, mlconfmat$Y), 1, sum) / ncol(mlconfmat$Y))
}
# Multi-label Is Error Measure
# @param mlconfmat Confusion matrix
# @param ranking The expected matrix ranking
# @param ... ignored
# @references Crammer, K., & Singer, Y. (2003). A Family of Additive Online
# Algorithms for Category Ranking. Journal of Machine Learning Research, 3(6),
# 1025-1058.
utiml_measure_is_error <- function (mlconfmat, ranking, ...) {
if (missing(ranking)) {
stop("Argument ranking not informed for measure 'is-error'")
}
mean(rowSums(mlconfmat$R != ranking) != 0)
}
# Multi-label Macro-Accuracy Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_macro_accuracy <- function (mlconfmat, ...) {
utiml_measure_binary_accuracy(mlconfmat$TPl, mlconfmat$FPl,
mlconfmat$TNl, mlconfmat$FNl)
}
# Multi-label Macro-AUC Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Zhang, M.-L., & Zhou, Z.-H. (2014). A Review on Multi-Label
# Learning Algorithms. IEEE Transactions on Knowledge and Data Engineering,
# 26(8), 1819-1837.
utiml_measure_macro_AUC <- function (mlconfmat, ...) {
sapply(seq(ncol(mlconfmat$Y)), function (col){
utiml_measure_binary_AUC(mlconfmat$Fx[, col], mlconfmat$Y[, col])
})
}
utiml_measure_macro_balacc <- function (mlconfmat, ...) {
utiml_measure_binary_balacc(mlconfmat$TPl, mlconfmat$FPl,
mlconfmat$TNl, mlconfmat$FNl)
}
# Multi-label Macro-F1 Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_macro_f1 <- function (mlconfmat, ...) {
utiml_measure_binary_f1(mlconfmat$TPl, mlconfmat$FPl,
mlconfmat$TNl, mlconfmat$FNl)
}
# Multi-label Macro-Precision Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_macro_precision <- function (mlconfmat, ...) {
utiml_measure_binary_precision(mlconfmat$TPl, mlconfmat$FPl,
mlconfmat$TNl, mlconfmat$FNl)
}
# Multi-label Macro-Recall Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_macro_recall <- function (mlconfmat, ...) {
utiml_measure_binary_recall(mlconfmat$TPl, mlconfmat$FPl,
mlconfmat$TNl, mlconfmat$FNl)
}
# Multi-label Margin Loss Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Loza Mencia, E., & Furnkranz, J. (2010). Efficient Multilabel
# Classification Algorithms for Large-Scale Problems in the Legal Domain.
# In Semantic Processing of Legal Texts (pp. 192-215).
utiml_measure_margin_loss <- function (mlconfmat, ...) {
mean(sapply(seq(nrow(mlconfmat$Y)), function (i){
idxY <- mlconfmat$Y[i,] == 1
max(0, max(mlconfmat$R[i, idxY], 0) -
min(mlconfmat$R[i, !idxY], length(idxY)))
}))
}
# Multi-label Micro-Accuracy Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_micro_accuracy <- function (mlconfmat, ...) {
utiml_measure_binary_accuracy(sum(mlconfmat$TPl), sum(mlconfmat$FPl),
sum(mlconfmat$TNl), sum(mlconfmat$FNl))
}
# Multi-label Macro-AUC Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Zhang, M.-L., & Zhou, Z.-H. (2014). A Review on Multi-Label
# Learning Algorithms. IEEE Transactions on Knowledge and Data Engineering,
# 26(8), 1819-1837.
utiml_measure_micro_AUC <- function (mlconfmat, ...) {
utiml_measure_binary_AUC(as.numeric(mlconfmat$Fx),
as.numeric(as.matrix(mlconfmat$Y)))
}
# Multi-label Micro-F1 Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_micro_f1 <- function (mlconfmat, ...) {
utiml_measure_binary_f1(sum(mlconfmat$TPl), sum(mlconfmat$FPl),
sum(mlconfmat$TNl), sum(mlconfmat$FNl))
}
# Multi-label Micro-Precision Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_micro_precision <- function (mlconfmat, ...) {
utiml_measure_binary_precision(sum(mlconfmat$TPl), sum(mlconfmat$FPl),
sum(mlconfmat$TNl), sum(mlconfmat$FNl))
}
# Multi-label Micro-Recall Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Gibaja, E., & Ventura, S. (2015). A Tutorial on Multilabel
# Learning. ACM Comput. Surv., 47(3), 52:1-52:38.
utiml_measure_micro_recall <- function (mlconfmat, ...) {
utiml_measure_binary_recall(sum(mlconfmat$TPl), sum(mlconfmat$FPl),
sum(mlconfmat$TNl), sum(mlconfmat$FNl))
}
# Multi-label One Error Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Schapire, R. E., & Singer, Y. (2000). BoosTexter: A boosting-
# based system for text categorization. Machine Learning, 39(2), 135-168.
utiml_measure_one_error <- function (mlconfmat, ...) {
rowcol <- cbind(seq(nrow(mlconfmat$Y)), apply(mlconfmat$R, 1, which.min))
mean(1 - mlconfmat$Y[rowcol])
}
# Multi-label Precision Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Godbole, S., & Sarawagi, S. (2004). Discriminative Methods for
# Multi-labeled Classification. In Proceedings of the 8th Pacific-Asia
# Conference on Knowledge Discovery and Data Mining (PAKDD 2004) (pp. 22-30).
utiml_measure_precision <- function (mlconfmat, ...) {
sum(mlconfmat$TPi / mlconfmat$Zi, na.rm = TRUE) / nrow(mlconfmat$Y)
}
# Multi-label Ranking Error Measure
# @param mlconfmat Confusion matrix
# @param ranking A matrix ranking
# @param ... ignored
# @references Park, S.-H., & Furnkranz, J. (2008). Multi-Label Classification
# with Label Constraints. Proceedings of the ECML PKDD 2008 Workshop on
# Preference Learning (PL-08, Antwerp, Belgium), 157-171.
utiml_measure_ranking_error <- function (mlconfmat, ranking, ...) {
if (missing(ranking)) {
stop("Argument ranking not informed for measure 'is-error'")
}
#TODO
}
# Multi-label Hamming Loss Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Schapire, R. E., & Singer, Y. (1999). Improved boosting
# algorithm using confidence-rated predictions. Machine Learning, 297-336.
utiml_measure_ranking_loss <- function (mlconfmat, ...) {
weight <- 1 / (mlconfmat$Yi * (length(mlconfmat$Yl) - mlconfmat$Yi))
weight <- ifelse(weight == Inf, 0, weight)
E <- sapply(seq(nrow(mlconfmat$Y)), function (i) {
idxY <- mlconfmat$Y[i,] == 1
rkNY <- mlconfmat$R[i, !idxY]
sum(unlist(lapply(mlconfmat$R[i, idxY], function (r) sum(r > rkNY))))
})
mean(weight * E)
}
# Multi-label Recall Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Godbole, S., & Sarawagi, S. (2004). Discriminative Methods for
# Multi-labeled Classification. In Proceedings of the 8th Pacific-Asia
# Conference on Knowledge Discovery and Data Mining (PAKDD 2004) (pp. 22-30).
utiml_measure_recall <- function (mlconfmat, ...) {
sum(mlconfmat$TPi / mlconfmat$Yi, na.rm = TRUE) / nrow(mlconfmat$Y)
}
# Multi-label Subset Accuracy Measure
# @param mlconfmat Confusion matrix
# @param ... ignored
# @references Zhu, S., Ji, X., Xu, W., & Gong, Y. (2005). Multilabelled
# Classification Using Maximum Entropy Method. In Proceedings of the 28th
# Annual International ACM SIGIR Conference on Research and Development in
# Information Retrieval (SIGIR'05) (pp. 274-281).
utiml_measure_subset_accuracy <- function (mlconfmat, ...) {
mean(apply(mlconfmat$Z == mlconfmat$Y, 1, all))
}
# BINARY MEASURES -----------------------------------------------------------
# Compute the binary accuracy
# @param TP The number of True Positive values
# @param FP The number of False Positive values
# @param TN The number of True Negative values
# @param FN The number of False Negative values
#
# @return Accuracy value between 0 and 1
utiml_measure_binary_accuracy <- function (TP, FP, TN, FN) {
(TP + TN) / (TP + FP + TN + FN)
}
# Compute the binary AUC
# @param scores The probability/score from a single label
# @param labels The expected label predictions
#
# @return AUC value between 0 and 1
utiml_measure_binary_AUC <- function (scores, labels) {
if (nlevels(as.factor(labels)) != 2) {
return(NA)
} else {
ROCR::performance(ROCR::prediction(scores, labels), "auc")@y.values[[1]]
}
}
# Compute the binary balanced accuracy
# @param TP The number of True Positive values
# @param FP The number of False Positive values
# @param TN The number of True Negative values
# @param FN The number of False Negative values
#
# @return Balanced accuracy value between 0 and 1
utiml_measure_binary_balacc <- function (TP, FP, TN, FN) {
(TP / (TP + FN) + TN / (TN + FP)) / 2
}
# Compute the binary precision
# @param TP The number of True Positive values
# @param FP The number of False Positive values
# @param TN The number of True Negative values
# @param FN The number of False Negative values
#
# @return Precision value between 0 and 1
utiml_measure_binary_precision <- function (TP, FP, TN, FN) {
ifelse(TP + FP == 0, 0, TP / (TP + FP))
}
# Compute the binary recall
# @param TP The number of True Positive values
# @param FP The number of False Positive values
# @param TN The number of True Negative values
# @param FN The number of False Negative values
#
# @return Recall value between 0 and 1
utiml_measure_binary_recall <- function (TP, FP, TN, FN) {
ifelse(TP + FN == 0, 0, TP / (TP + FN))
}
# Compute the binary F1 measure
# @param TP The number of True Positive values
# @param FP The number of False Positive values
# @param TN The number of True Negative values
# @param FN The number of False Negative values
#
# @return F1 measure value between 0 and 1
utiml_measure_binary_f1 <- function (TP, FP, TN, FN) {
prec <- utiml_measure_binary_precision(TP, FP, TN, FN)
rec <- utiml_measure_binary_recall(TP, FP, TN, FN)
ifelse(prec + rec == 0, 0, 2 * prec * rec / (prec + rec))
}
utiml_measure_clp <- function(mlconfmat, ...) {
sum(mlconfmat$TNl + mlconfmat$FNl == 0) / ncol(mlconfmat$Y)
}
utiml_measure_mlp <- function(mlconfmat, ...) {
sum(mlconfmat$TPl + mlconfmat$FPl == 0) / ncol(mlconfmat$Y)
}
utiml_measure_wlp <- function(mlconfmat, ...) {
sum(mlconfmat$TPl == 0) / ncol(mlconfmat$Y)
}
# MEASURES METHODS ----------------------------------------------------------
# Return the tree with the measure names
# @return list
utiml_all_measures_names <- function (){
list(
'all' = c(
"bipartition",
"ranking",
"label-problem"
),
'bipartition' = c(
"label-based",
"example-based"
),
'ranking' = c(
"one-error",
"coverage",
"ranking-loss",
"average-precision",
"margin-loss"
),
'label-based' = c(
"micro-based",
"macro-based"
),
'example-based' = c(
"subset-accuracy",
"hamming-loss",
"recall",
"precision",
"accuracy",
"F1"
),
'macro-based' = c(
"macro-AUC",
"macro-precision",
"macro-recall",
"macro-F1"
),
'micro-based' = c(
"micro-AUC",
"micro-precision",
"micro-recall",
"micro-F1"
),
"label-problem" = c(
"mlp",
"wlp",
"clp"
)
)
}
#' Return the name of measures
#'
#' @param measures The group of measures (Default: "all").
#'
#' @return array of character contained the measures names.
#'
#' @examples
#' utiml_measure_names()
#' utiml_measure_names("bipartition")
#' utiml_measure_names(c("micro-based", "macro-based"))
#'
#' @export
utiml_measure_names <- function (measures = c("all")) {
measures.names <- utiml_all_measures_names()
names <- unlist(lapply(measures, function (measure){
if (is.null(measures.names[[measure]])) {
measure
} else {
utiml_measure_names(measures.names[[measure]])
}
}))
unique(sort(names))
}
#' Return the name of all measures
#'
#' @family evaluation
#' @return array of character contained the measures names.
#' @export
#'
#' @examples
#' multilabel_measures()
multilabel_measures <- function () {
sort(c(utiml_measure_names(), names(utiml_all_measures_names())))
}
#' Print a Multi-label Confusion Matrix
#' @param x The mlconfmat
#' @param ... ignored
#'
#' @return No return value, called for print a confusion matrix
#'
#' @export
print.mlconfmat <- function (x, ...) {
cat("Multi-label Confusion Matrix\n\n")
cat("Absolute Matrix:\n-------------------------------------\n")
TP <- sum(x$TPi)
FP <- sum(x$FPi)
FN <- sum(x$FNi)
TN <- sum(x$TNi)
cm <- matrix(c(TP, FN, TP + FN,
FP, TN, FP + TN,
TP + FP, FN + TN, TP + FP + FN + TN), ncol=3,
dimnames = list(c("Prediction_1", "Predicion_0", "TOTAL"),
c("Expected_1", "Expected_0", "TOTAL")))
print(cm)
cat("\nProportinal Matrix:\n-------------------------------------\n")
cm[1:2, 1:2] <- prop.table(cm[1:2, 1:2])
cm[1:2, 3] <- apply(cm[1:2, 1:2], 1, sum)
cm[3, ] <- apply(cm[1:2, ], 2, sum)
print(round(cm, 3))
cm <- cbind(x$TPl, x$FPl, x$FNl, x$TNl)
correct <- x$TPl + x$TNl
wrong <- x$FPl + x$FNl
cat("\nLabel Matrix\n-------------------------------------\n")
cm <- cbind(
cm, correct, wrong,
round(prop.table(cm, 1), 2),
round(prop.table(cbind(correct, wrong), 1), 2),
round(apply(x$R, 2, mean), 2),
round(apply(x$Fx, 2, mean), 2)
)
colnames(cm) <- c("TP", "FP", "FN", "TN", "Correct", "Wrong",
"%TP", "%FP", "%FN", "%TN", "%Correct", "%Wrong",
"MeanRanking", "MeanScore")
print(as.data.frame(cm))
}
#' Convert a multi-label Confusion Matrix to matrix
#' @param x The mlconfmat
#' @param ... passed to as.matrix
#'
#' @return A confusion matrix with TP, TN, FP and FN columns
#'
#' @export
as.matrix.mlconfmat <- function (x, ...) {
as.matrix(data.frame(TP=x$TPl, TN=x$TNl, FP=x$FPl, FN=x$FNl), ...)
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/evaluation.R
|
# Conditional value selection
#
# @param test an object which can be coerced to logical mode.
# @param yes object that will be returned when the test value is true.
# @param no object that will be returned when the test value is false
# @return The respective value yes or no based on test value. This is an
# alternative way to use a single logical value for avoid the real if/else for
# choice lists, matrices and other composed data.
#
# @examples
# \dontrun{
# utiml_ifelse(TRUE, dataframe1, dataframe2) ## dataframe1
# utiml_ifelse(length(my.list) > 10, my.list[1:10], my.list)
# }
utiml_ifelse <- function(test, yes, no) {
list(yes, no)[c(test, !test)][[1]]
}
# Select the suitable method lapply or mclaplly
#
# @param mylist a list to iterate.
# @param myfnc The function to be applied to each element of the mylist.
# @param utiml.cores The number of cores to use. If 1 use lapply otherwise use
# mclapply.
# @param utiml.seed A numeric value to set a seed to execute in parallel mode.
# @param ... Extra arguments to myfnc.
# @return A list with the results of the specified method.
utiml_lapply <- function(mylist, myfnc, utiml.cores, utiml.seed = NA, ...) {
mylist <- as.list(mylist)
indexes <- seq_along(mylist)
names(indexes) <- names(mylist)
thefunc <- function (i, ...) {
myfnc(mylist[[i]], ...)
}
if (is.null(utiml.seed)) {
utiml.seed = NA
}
if (utiml.cores > 1 && requireNamespace("parallel", quietly = TRUE)) {
if (!is.na(utiml.seed)) {
RNGkind("L'Ecuyer-CMRG")
set.seed(utiml.seed)
}
result <- parallel::mclapply(indexes,
thefunc,
mc.cores = min(utiml.cores, length(mylist)),
mc.set.seed = TRUE,
...)
if (!is.na(utiml.seed)) {
RNGkind("default")
}
}
else {
if (!is.na(utiml.seed)) {
set.seed(utiml.seed)
}
result <- lapply(indexes, thefunc, ...)
}
result
}
# Internal normalize data function
#
# @param data a set of numbers.
# @param max.val The maximum value to normalize. If NULL use the max value
# present in the data. (default: \code{NULL})
# @param min.val The minimum value to normalize. If NULL use the min value
# present in the data (default: \code{NULL})
# @return The normalized data
#
# @examples
# \dontrun{
# utiml_normalize(c(1,2,3,4,5))
# #--> 0 0.25 0.5 0.75 1
#
# utiml_normalize(c(1,2,3,4,5), 10, 0)
# #--> 0.1 0.2 0.3 0.4 0.5
# }
utiml_normalize <- function(data, max.val = NULL, min.val = NULL) {
max.val <- ifelse(is.null(max.val), max(data, na.rm = TRUE), max.val)
min.val <- ifelse(is.null(min.val), min(data, na.rm = TRUE), min.val)
utiml_ifelse(max.val == min.val, data, (data - min.val)/(max.val - min.val))
}
# Return the newdata to a data.frame or matrix
#
# @param newdata The data.frame or mldr data
# @return A dataframe or matrix containing only dataset
#
# @examples
# \dontrun{
# test <- emotions$dataset[,emotions$attributesIndexes]
# all(test == utiml_newdata(emotions)) # TRUE
# all(test == utiml_newdata(test)) # TRUE
# }
utiml_newdata <- function(newdata) {
UseMethod("utiml_newdata")
}
# @describeIn utiml_newdata Return the data in the original format
utiml_newdata.default <- function(newdata) {
newdata
}
# @describeIn utiml_newdata Return the dataset from the mldr dataset
utiml_newdata.mldr <- function(newdata) {
newdata$dataset[newdata$attributesIndexes]
}
# Rename the list using the names values or its own content
#
# @param X A list
# @param names The list names, If empty the content of X is used
# @return A list with the new names
#
# @examples
# utiml_rename(c("a", "b", "c"))
# ## c(a="a", b="b", c="c")
#
# utiml_rename(c(1, 2, 3), c("a", "b", "c"))
# ## c(a=1, b=2, c=3)
utiml_rename <- function (X, names = NULL) {
names(X) <- utiml_ifelse(is.null(names), X, names)
X
}
# Define if two sets are equals independently of the order of the elements
#
# @param a A list
# @param b Other list
# @return Logical value where TRUE the sets are equals and FALSE otherwise.
# @examples
# \dontrun{
# utiml_is_equal_sets(c(1, 2, 3), c(3, 2, 1))
# ## TRUE
#
# utiml_is_equal_sets(c(1, 2, 3), c(1, 2, 3, 4))
# ## FALSE
# }
utiml_is_equal_sets <- function (a, b) {
length(setdiff(union(a, b), intersect(a, b))) == 0
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/internal.R
|
#' Baseline reference for multilabel classification
#'
#' Create a baseline model for multilabel classification.
#'
#' Baseline is a naive multi-label classifier that maximize/minimize a specific
#' measure without induces a learning model. It uses the general information
#' about the labels in training dataset to estimate the labels in a test
#' dataset.
#'
#' The follow strategies are available:
#' \describe{
#' \item{\code{general}}{Predict the k most frequent labels, where k is the
#' integer most close of label cardinality.}
#' \item{\code{F1}}{Predict the most frequent labels that obtain the best F1
#' measure in training data. In the original paper, the authors use the less
#' frequent labels.}
#' \item{\code{hamming-loss}}{Predict the labels that are associated with more
#' than 50\% of instances.}
#' \item{\code{subset-accuracy}}{Predict the most common labelset.}
#' \item{\code{ranking-loss}}{Predict a ranking based on the most frequent
#' labels.}
#' }
#'
#' @param mdata A mldr dataset used to train the binary models.
#' @param metric Define the strategy used to predict the labels.
#'
#' The possible values are: \code{'general'}, \code{'F1'},
#' \code{'hamming-loss'} or \code{'subset-accuracy'}. See the description
#' for more details. (Default: \code{'general'}).
#' @param ... not used
#' @return An object of class \code{BASELINEmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{predict}{A list with the labels that will be predicted.}
#' }
#' @references
#' Metz, J., Abreu, L. F. de, Cherman, E. A., & Monard, M. C. (2012). On the
#' Estimation of Predictive Evaluation Measure Baselines for Multi-label
#' Learning. In 13th Ibero-American Conference on AI (pp. 189-198).
#' Cartagena de Indias, Colombia.
#' @export
#'
#' @examples
#' model <- baseline(toyml)
#' pred <- predict(model, toyml)
#'
#' ## Change the metric
#' model <- baseline(toyml, "F1")
#' model <- baseline(toyml, "subset-accuracy")
baseline <- function (mdata, metric = c("general", "F1", "hamming-loss",
"subset-accuracy", "ranking-loss"), ...) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
metric <- match.arg(metric)
basefnc <- switch (metric,
"F1" = function (mdata){
labels <- order(mdata$labels$freq, decreasing = TRUE)
lbl <- which.max(lapply(seq(mdata$measures$num.labels), function (i, labels){
Zm <- matrix(0,
nrow=mdata$measures$num.instances,
ncol=mdata$measures$num.labels)
colnames(Zm) <- rownames(mdata$labels)
Zm[,labels[seq(i)]] <- 1
multilabel_evaluate(mdata, as.mlresult(Zm), "F1")
}, labels=labels))
rownames(mdata$labels)[labels[seq(lbl)]]
},
"general" = function (mdata){
freq <- order(mdata$labels$freq, decreasing=TRUE)
rownames(mdata$labels)[freq[seq(round(mdata$measures$cardinality,0))]]
},
"hamming-loss" = function (mdata){
if (any(mdata$labels$freq > 0.5)) {
rownames(mdata$labels)[mdata$labels$freq > 0.5]
} else {
#Avoid empty predictions, recommend only the most frequent label
rownames(mdata$labels[order(mdata$labels$freq, decreasing=TRUE),])[1]
}
},
"subset-accuracy" = function (mdata){
lbl <- as.numeric(unlist(strsplit(names(which.max(mdata$labelsets)), "")))
rownames(mdata$labels)[lbl == 1]
},
"ranking-loss" = function (mdata) {
rk <- order(mdata$labels$freq, decreasing=TRUE)
half <- mdata$labels$freq / 2
half + (0.49 - max(half[-rk[seq(round(mdata$measures$cardinality,0))]]))
}
)
blmodel <- list(
labels = rownames(mdata$labels),
metric = metric,
predict = basefnc(mdata),
call = match.call()
)
class(blmodel) <- "BASELINEmodel"
blmodel
}
#' Predict Method for BASELINE
#'
#' This function predicts values based upon a model trained by
#' \code{\link{baseline}}.
#'
#' @param object Object of class '\code{BASELINEmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... not used.
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=baseline]{Baseline}}
#' @export
#'
#' @examples
#' model <- baseline(toyml)
#' pred <- predict(model, toyml)
predict.BASELINEmodel <- function (object, newdata,
probability = getOption("utiml.use.probs", TRUE), ...){
# Validations
if (!is(object, "BASELINEmodel")) {
stop("First argument must be a BASELINEmodel object")
}
newdata <- utiml_newdata(newdata)
if (mode(object$predict) == "numeric") {
prediction <- matrix(rep(object$predict, nrow(newdata)), byrow = TRUE,
nrow=nrow(newdata), ncol=length(object$labels),
dimnames = list(rownames(newdata), object$labels))
} else {
prediction <- matrix(0, nrow=nrow(newdata), ncol=length(object$labels),
dimnames = list(rownames(newdata), object$labels))
prediction[, object$predict] <- 1
}
as.mlresult(prediction, probability = probability)
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_baseline.R
|
#' Binary Relevance for multi-label Classification
#'
#' Create a Binary Relevance model for multilabel classification.
#'
#' Binary Relevance is a simple and effective transformation method to predict
#' multi-label data. This is based on the one-versus-all approach to build a
#' specific model for each label.
#'
#' @family Transformation methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param ... Others arguments passed to the base algorithm for all subproblems
#' @param cores The number of cores to parallelize the training. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{BRmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{models}{A list of the generated models, named by the label names.}
#' }
#' @references
#' Boutell, M. R., Luo, J., Shen, X., & Brown, C. M. (2004). Learning
#' multi-label scene classification. Pattern Recognition, 37(9), 1757-1771.
#' @export
#'
#' @examples
#' model <- br(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use SVM as base algorithm
#' model <- br(toyml, "SVM")
#' pred <- predict(model, toyml)
#'
#' # Change the base algorithm and use 2 CORES
#' model <- br(toyml[1:50], 'RF', cores = 2, seed = 123)
#'
#' # Set a parameters for all subproblems
#' model <- br(toyml, 'KNN', k=5)
#' }
br <- function(mdata, base.algorithm = getOption("utiml.base.algorithm", "SVM"),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# BR Model class
brmodel <- list(labels = rownames(mdata$labels), call = match.call())
# Create models
labels <- utiml_rename(brmodel$labels)
brmodel$models <- utiml_lapply(labels, function (label) {
utiml_create_model(
utiml_prepare_data(
utiml_create_binary_data(mdata, label),
"mldBR", mdata$name, "br", base.algorithm
), ...
)
}, cores, seed)
class(brmodel) <- "BRmodel"
brmodel
}
#' Predict Method for Binary Relevance
#'
#' This function predicts values based upon a model trained by \code{\link{br}}.
#'
#' @param object Object of class '\code{BRmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=br]{Binary Relevance (BR)}}
#' @export
#'
#' @examples
#' model <- br(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Predict SVM scores
#' model <- br(toyml, "SVM")
#' pred <- predict(model, toyml)
#'
#' # Predict SVM bipartitions running in 2 cores
#' pred <- predict(model, toyml, probability = FALSE, CORES = 2)
#'
#' # Passing a specif parameter for SVM predict algorithm
#' pred <- predict(model, toyml, na.action = na.fail)
#' }
predict.BRmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "BRmodel")) {
stop("First argument must be an BRmodel object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# Create models
newdata <- utiml_newdata(newdata)
labels <- utiml_rename(object$labels)
predictions <- utiml_lapply(labels, function (label) {
utiml_predict_binary_model(object$models[[label]], newdata, ...)
}, cores, seed)
utiml_predict(predictions, probability)
}
#' Print BR model
#' @param x The br model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.BRmodel <- function(x, ...) {
cat("Binary Relevance Model\n\nCall:\n")
print(x$call)
cat("\n", length(x$labels), "Models (labels):\n")
print(x$labels)
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_br.R
|
#' BR+ or BRplus for multi-label Classification
#'
#' Create a BR+ classifier to predict multi-label data. This is a simple approach
#' that enables the binary classifiers to discover existing label dependency by
#' themselves. The main idea of BR+ is to increment the feature space of the
#' binary classifiers to let them discover existing label dependency by
#' themselves.
#'
#' This implementation has different strategy to predict the final set of labels
#' for unlabeled examples, as proposed in original paper.
#'
#' @family Transformation methods
#' @family Stacking methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{BRPmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{freq}{The label frequencies to use with the 'Stat' strategy}
#' \item{initial}{The BR model to predict the values for the labels to
#' initial step}
#' \item{models}{A list of final models named by the label names.}
#' }
#' @references
#' Cherman, E. A., Metz, J., & Monard, M. C. (2012). Incorporating label
#' dependency into the binary relevance framework for multi-label
#' classification. Expert Systems with Applications, 39(2), 1647-1655.
#' @export
#'
#' @examples
#' # Use SVM as base algorithm
#' model <- brplus(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use Random Forest as base algorithm and 2 cores
#' model <- brplus(toyml, 'RF', cores = 2, seed = 123)
#' }
brplus <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# BRplus Model class
brpmodel <- list(labels = rownames(mdata$labels), call = match.call())
freq <- mdata$labels$freq
names(freq) <- brpmodel$labels
brpmodel$freq <- sort(freq)
brpmodel$initial <- br(mdata, base.algorithm, ..., cores = cores, seed = seed)
labeldata <- as.data.frame(mdata$dataset[mdata$labels$index])
for (i in seq(ncol(labeldata))) {
labeldata[, i] <- factor(labeldata[, i], levels=c(0, 1))
}
labels <- utiml_rename(seq(mdata$measures$num.labels), brpmodel$labels)
brpmodel$models <- utiml_lapply(labels, function(li) {
basedata <- utiml_create_binary_data(mdata, brpmodel$labels[li],
labeldata[-li])
dataset <- utiml_prepare_data(basedata, "mldBRP", mdata$name, "brplus",
base.algorithm)
utiml_create_model(dataset, ...)
}, cores, seed)
class(brpmodel) <- "BRPmodel"
brpmodel
}
#' Predict Method for BR+ (brplus)
#'
#' This function predicts values based upon a model trained by \code{brplus}.
#'
#' The strategies of estimate the values of the new features are separated in
#' two groups:
#' \describe{
#' \item{No Update (\code{NU})}{This use the initial prediction of BR to all
#' labels. This name is because no modification is made to the initial
#' estimates of the augmented features during the prediction phase}
#' \item{With Update}{This strategy update the initial prediction in that the
#' final predict occurs. There are three possibilities to define the order of
#' label sequences:
#' \describe{
#' \item{Specific order (\code{Ord})}{The order is define by the user,
#' require a new argument called \code{order}.}
#' \item{Static order (\code{Stat})}{Use the frequency of single labels in
#' the training set to define the sequence, where the least frequent
#' labels are predicted first}
#' \item{Dinamic order (\code{Dyn})}{Takes into account the confidence of
#' the initial prediction for each independent single label, to define a
#' sequence, where the labels predicted with less confidence are updated
#' first.}
#' }
#' }
#' }
#'
#' @param object Object of class '\code{BRPmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param strategy The strategy prefix to determine how to estimate the values
#' of the augmented features of unlabeled examples.
#'
#' The possible values are: \code{'Dyn'}, \code{'Stat'}, \code{'Ord'} or
#' \code{'NU'}. See the description for more details. (Default: \code{'Dyn'}).
#' @param order The label sequence used to update the initial labels results
#' based on the final results. This argument is used only when the
#' \code{strategy = 'Ord'} (Default: \code{list()})
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @references
#' Cherman, E. A., Metz, J., & Monard, M. C. (2012). Incorporating label
#' dependency into the binary relevance framework for multi-label
#' classification. Expert Systems with Applications, 39(2), 1647-1655.
#' @seealso \code{\link[=brplus]{BR+}}
#' @export
#'
#' @examples
#' # Predict SVM scores
#' model <- brplus(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Predict SVM bipartitions and change the method to use No Update strategy
#' pred <- predict(model, toyml, strategy = 'NU', probability = FALSE)
#'
#' # Predict using a random sequence to update the labels
#' labels <- sample(rownames(toyml$labels))
#' pred <- predict(model, toyml, strategy = 'Ord', order = labels)
#'
#' # Passing a specif parameter for SVM predict method
#' pred <- predict(model, toyml, na.action = na.fail)
#' }
predict.BRPmodel <- function(object, newdata,
strategy = c("Dyn", "Stat", "Ord", "NU"),
order = list(),
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "BRPmodel")) {
stop("First argument must be an BRPmodel object")
}
strategy <- match.arg(strategy)
labels <- object$labels
if (strategy == "Ord") {
if (!utiml_is_equal_sets(order, labels)) {
stop("Invalid order (all labels must be on the chain)")
}
}
if (cores < 1) {
stop("Cores must be a positive value")
}
if (!anyNA(seed)) {
set.seed(seed)
}
newdata <- utiml_newdata(newdata)
initial.preds <- predict.BRmodel(object$initial, newdata, probability=FALSE,
..., cores=cores, seed=seed)
labeldata <- as.data.frame(as.bipartition(initial.preds))
for (i in seq(ncol(labeldata))) {
labeldata[, i] <- factor(labeldata[, i], levels=c(0, 1))
}
if (strategy == "NU") {
indices <- utiml_rename(seq_along(labels), labels)
predictions <- utiml_lapply(indices, function(li) {
utiml_predict_binary_model(object$models[[li]],
cbind(newdata, labeldata[, -li]), ...)
}, cores, seed)
}
else {
order <- switch (strategy,
Dyn = names(sort(apply(as.probability(initial.preds), 2, mean))),
Stat = names(object$freq),
Ord = order
)
predictions <- list()
for (labelname in order) {
other.labels <- !labels %in% labelname
model <- object$models[[labelname]]
data <- cbind(newdata, labeldata[, other.labels, drop = FALSE])
predictions[[labelname]] <- utiml_predict_binary_model(model, data, ...)
labeldata[, labelname] <- factor(predictions[[labelname]]$bipartition,
levels=c(0, 1))
}
}
utiml_predict(predictions[labels], probability)
}
#' Print BRP model
#' @param x The brp model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.BRPmodel <- function(x, ...) {
cat("Classifier BRplus (also called BR+)\n\nCall:\n")
print(x$call)
cat("\n", length(x$models), "Models (labels):\n")
print(names(x$models))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_brplus.R
|
#' Classifier Chains for multi-label Classification
#'
#' Create a Classifier Chains model for multilabel classification.
#'
#' Classifier Chains is a Binary Relevance transformation method based to
#' predict multi-label data. This is based on the one-versus-all approach to
#' build a specific model for each label. It is different from BR method due the
#' strategy of extended the attribute space with the 0/1 label relevances of all
#' previous classifiers, forming a classifier chain.
#'
#' @family Transformation methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param chain A vector with the label names to define the chain order. If
#' empty the chain is the default label sequence of the dataset. (Default:
#' \code{NA})
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{CCmodel} containing the set of fitted
#' models, including: \describe{
#' \item{chain}{A vector with the chain order.}
#' \item{labels}{A vector with the label names in expected order.}
#' \item{models}{A list of models named by the label names.}
#' }
#' @references
#' Read, J., Pfahringer, B., Holmes, G., & Frank, E. (2011). Classifier chains
#' for multi-label classification. Machine Learning, 85(3), 333-359.
#'
#' Read, J., Pfahringer, B., Holmes, G., & Frank, E. (2009). Classifier Chains
#' for Multi-label Classification. Machine Learning and Knowledge Discovery
#' in Databases, Lecture Notes in Computer Science, 5782, 254-269.
#' @export
#'
#' @examples
#' model <- cc(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use a specific chain with C5.0 classifier
#' mychain <- sample(rownames(toyml$labels))
#' model <- cc(toyml, 'C5.0', mychain)
#'
#' # Set a specific parameter
#' model <- cc(toyml, 'KNN', k=5)
#'
#' #Run with multiple-cores
#' model <- cc(toyml, 'RF', cores = 2, seed = 123)
#' }
cc <- function(mdata, base.algorithm = getOption("utiml.base.algorithm", "SVM"),
chain = NA, ..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
labels <- rownames(mdata$labels)
chain <- utiml_ifelse(anyNA(chain), labels, chain)
if (!utiml_is_equal_sets(chain, labels)) {
stop("Invalid chain (all labels must be on the chain)")
}
# CC Model class
ccmodel <- list(labels = labels, chain = chain, call = match.call())
# Create models
basedata <- mdata$dataset[mdata$attributesIndexes]
labeldata <- as.data.frame(mdata$dataset[mdata$labels$index][chain])
for (i in seq(ncol(labeldata))) {
labeldata[, i] <- factor(labeldata[, i], levels=c(0, 1))
}
chain.order <- utiml_rename(seq(mdata$measures$num.labels), chain)
ccmodel$models <- utiml_lapply(chain.order, function(lidx) {
utiml_create_model(
utiml_prepare_data(
cbind(basedata, labeldata[seq(lidx)]),
"mldCC", mdata$name, "cc", base.algorithm, chain.order = lidx
), ...)
}, cores, seed)
class(ccmodel) <- "CCmodel"
ccmodel
}
#' Predict Method for Classifier Chains
#'
#' This function predicts values based upon a model trained by \code{cc}.
#'
#' @param object Object of class '\code{CCmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores Ignored because this method does not support multi-core.
#' @param seed An optional integer used to set the seed.
#' (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=cc]{Classifier Chains (CC)}}
#' @note The Classifier Chains prediction can not be parallelized
#' @export
#'
#' @examples
#' model <- cc(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Predict SVM bipartitions
#' pred <- predict(model, toyml, prob = FALSE)
#'
#' # Passing a specif parameter for SVM predict algorithm
#' pred <- predict(model, toyml, na.action = na.fail)
#' }
predict.CCmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = NULL,
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "CCmodel")) {
stop("First argument must be an CCmodel object")
}
if (!anyNA(seed)) {
set.seed(seed)
}
newdata <- list(utiml_newdata(newdata))
predictions <- list()
for (label in object$chain) {
predictions[[label]] <- utiml_predict_binary_model(
object$models[[label]], do.call(cbind, newdata), ...)
newdata[[label]] <- factor(predictions[[label]]$bipartition, levels=c(0, 1))
}
utiml_predict(predictions[object$labels], probability)
}
#' Print CC model
#' @param x The cc model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.CCmodel <- function(x, ...) {
cat("Classifier Chains Model\n\nCall:\n")
print(x$call)
cat("\nChain: (", length(x$chain), "labels )\n")
print(paste(x$chain, collapse =' -> '))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_cc.R
|
#' Calibrated Label Ranking (CLR) for multi-label Classification
#'
#' Create a CLR model for multilabel classification.
#'
#' CLR is an extension of label ranking that incorporates the calibrated
#' scenario. The introduction of an artificial calibration label,
#' separates the relevant from the irrelevant labels.
#'
#' @family Transformation methods
#' @family Pairwise methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param ... Others arguments passed to the base algorithm for all subproblems
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{RPCmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{rpcmodel}{A RPC model.}
#' \item{brmodel}{A BR model used to calibrated the labels.}
#' }
#' @references
#' Brinker, K., Furnkranz, J., & Hullermeier, E. (2006). A unified model for
#' multilabel classification and ranking. In Proceeding of the ECAI 2006:
#' 17th European Conference on Artificial Intelligence. p. 489-493.
#' Furnkranz, J., Hullermeier, E., Loza Mencia, E., & Brinker, K. (2008).
#' Multilabel classification via calibrated label ranking.
#' Machine Learning, 73(2), 133-153.
#' @export
#'
#' @examples
#' model <- clr(toyml, "RANDOM")
#' pred <- predict(model, toyml)
clr <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"), ...,
cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# CLR Model class
clrmodel <- list(labels = rownames(mdata$labels), call = match.call())
# Create pairwise models
clrmodel$rpcmodel <- rpc(mdata, base.algorithm, ..., cores=cores, seed=seed)
# Create calibrated models
clrmodel$brmodel <- br(mdata, base.algorithm, ..., cores=cores, seed=seed)
class(clrmodel) <- "CLRmodel"
clrmodel
}
#' Predict Method for CLR
#'
#' This function predicts values based upon a model trained by
#' \code{\link{clr}}.
#'
#' @param object Object of class '\code{CLRmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=br]{Binary Relevance (BR)}}
#' @export
#'
#' @examples
#' model <- clr(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.CLRmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "CLRmodel")) {
stop("First argument must be an CLRmodel object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# Predict RPC models
predictions <- as.matrix(predict.RPCmodel(object$rpcmodel, newdata, TRUE,
..., cores=cores, seed=seed))
previous.value <- getOption("utiml.empty.prediction")
options(utiml.empty.prediction = TRUE)
calibrated <- as.matrix(predict.BRmodel(object$brmodel, newdata, FALSE, ...,
cores=cores, seed=seed))
options(utiml.empty.prediction = previous.value)
# Compute votes
l0 <- (length(object$labels) - rowSums(calibrated)) / length(object$labels)
bipartitions <- apply(predictions >= l0, 2, as.numeric)
multilabel_prediction(bipartitions, predictions, probability)
}
#' Print CLR model
#' @param x The br model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.CLRmodel <- function(x, ...) {
cat("CLR Model\n\nCall:\n")
print(x$call)
cat("\n", length(x$rpcmodel$models) + length(x$labels), " pairwise models\n", sep='')}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_clr.R
|
#' Dependent Binary Relevance (DBR) for multi-label Classification
#'
#' Create a DBR classifier to predict multi-label data. This is a simple approach
#' that enables the binary classifiers to discover existing label dependency by
#' themselves. The idea of DBR is exactly the same used in BR+ (the training
#' method is the same, excepted by the argument \code{estimate.models} that
#' indicate if the estimated models must be created).
#'
#' @family Transformation methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param estimate.models Logical value indicating whether is necessary build
#' Binary Relevance classifier for estimate process. The default implementation
#' use BR as estimators, however when other classifier is desirable then use
#' the value \code{FALSE} to skip this process. (Default: \code{TRUE}).
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{DBRmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{estimation}{The BR model to estimate the values for the labels.
#' Only when the \code{estimate.models = TRUE}.}
#' \item{models}{A list of final models named by the label names.}
#' }
#' @references
#' Montanes, E., Senge, R., Barranquero, J., Ramon Quevedo, J., Jose Del Coz,
#' J., & Hullermeier, E. (2014). Dependent binary relevance models for
#' multi-label classification. Pattern Recognition, 47(3), 1494-1508.
#' @seealso \code{\link[=rdbr]{Recursive Dependent Binary Relevance}}
#' @export
#'
#' @examples
#' model <- dbr(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use Random Forest as base algorithm and 2 cores
#' model <- dbr(toyml, 'RF', cores = 2)
#' }
dbr <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
estimate.models = TRUE, ...,
cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata,"mldr")) {
stop("First argument must be an mldr object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# DBR Model class
dbrmodel <- list(labels = rownames(mdata$labels), call = match.call())
if (estimate.models) {
dbrmodel$estimation <- br(mdata, base.algorithm, ...,
cores=cores, seed=seed)
}
# Create models
labeldata <- as.data.frame(mdata$dataset[mdata$labels$index])
for (i in seq(ncol(labeldata))) {
labeldata[, i] <- factor(labeldata[, i], levels=c(0, 1))
}
labels <- utiml_rename(seq(dbrmodel$labels), dbrmodel$labels)
dbrmodel$models <- utiml_lapply(labels, function(li) {
utiml_create_model(
utiml_prepare_data(
utiml_create_binary_data(mdata, dbrmodel$labels[li], labeldata[-li]),
"mldDBR", mdata$name, "dbr", base.algorithm
), ...
)
}, cores, seed)
class(dbrmodel) <- "DBRmodel"
dbrmodel
}
#' Predict Method for DBR
#'
#' This function predicts values based upon a model trained by \code{dbr}.
#' In general this method is a restricted version of
#' \code{\link{predict.BRPmodel}} using the 'NU' strategy.
#'
#' As new feature is possible to use other multi-label classifier to predict the
#' estimate values of each label. To this use the prediction argument to inform
#' a result of other multi-label algorithm.
#'
#' @param object Object of class '\code{DBRmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param estimative A matrix containing the bipartition result of other
#' multi-label classification algorithm or an mlresult object with the
#' predictions.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @references
#' Montanes, E., Senge, R., Barranquero, J., Ramon Quevedo, J., Jose Del Coz,
#' J., & Hullermeier, E. (2014). Dependent binary relevance models for
#' multi-label classification. Pattern Recognition, 47(3), 1494-1508.
#' @seealso \code{\link[=dbr]{Dependent Binary Relevance (DBR)}}
#' @export
#'
#' @examples
#' \donttest{
#' # Predict SVM scores
#' model <- dbr(toyml)
#' pred <- predict(model, toyml)
#'
#' # Passing a specif parameter for SVM predict algorithm
#' pred <- predict(model, toyml, na.action = na.fail)
#'
#' # Using other classifier (EBR) to made the labels estimatives
#' estimative <- predict(ebr(toyml), toyml)
#' model <- dbr(toyml, estimate.models = FALSE)
#' pred <- predict(model, toyml, estimative = estimative)
#' }
predict.DBRmodel <- function(object, newdata, estimative = NULL,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object,"DBRmodel")) {
stop("First argument must be an DBRmodel object")
}
if (is.null(object$estimation) && is.null(estimative)) {
stop("The model requires an estimative matrix")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
newdata <- utiml_newdata(newdata)
if (is.null(estimative)) {
estimative <- predict.BRmodel(object$estimation, newdata,
probability = FALSE, ...,
cores = cores, seed = seed)
}
if (is(estimative, 'mlresult')) {
estimative <- as.bipartition(estimative)
}
estimative <- as.data.frame(estimative)
for (i in seq(ncol(estimative))) {
estimative[,i] <- factor(estimative[,i], levels=c(0, 1))
}
labels <- utiml_rename(seq(object$labels), object$labels)
predictions <- utiml_lapply(labels, function(li) {
utiml_predict_binary_model(object$models[[li]],
cbind(newdata, estimative[, -li]),
...)
}, cores, seed)
utiml_predict(predictions, probability)
}
#' Print DBR model
#' @param x The dbr model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.DBRmodel <- function(x, ...) {
cat("Classifier DBR\n\nCall:\n")
print(x$call)
cat("\n", length(x$models), "Models (labels):\n")
print(names(x$models))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_dbr.R
|
#' Ensemble of Binary Relevance for multi-label Classification
#'
#' Create an Ensemble of Binary Relevance model for multilabel classification.
#'
#' This model is composed by a set of Binary Relevance models. Binary Relevance
#' is a simple and effective transformation method to predict multi-label data.
#'
#' @family Transformation methods
#' @family Ensemble methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param m The number of Binary Relevance models used in the ensemble.
#' (Default: 10)
#' @param subsample A value between 0.1 and 1 to determine the percentage of
#' training instances that must be used for each classifier. (Default: 0.75)
#' @param attr.space A value between 0.1 and 1 to determine the percentage of
#' attributes that must be used for each classifier. (Default: 0.50)
#' @param replacement Boolean value to define if use sampling with replacement
#' to create the data of the models of the ensemble. (Default: TRUE)
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{EBRmodel} containing the set of fitted
#' BR models, including:
#' \describe{
#' \item{models}{A list of BR models.}
#' \item{nrow}{The number of instances used in each training dataset.}
#' \item{ncol}{The number of attributes used in each training dataset.}
#' \item{rounds}{The number of interactions.}
#' }
#' @references
#' Read, J., Pfahringer, B., Holmes, G., & Frank, E. (2011). Classifier
#' chains for multi-label classification. Machine Learning, 85(3), 333-359.
#'
#' Read, J., Pfahringer, B., Holmes, G., & Frank, E. (2009).
#' Classifier Chains for Multi-label Classification. Machine Learning and
#' Knowledge Discovery in Databases, Lecture Notes in Computer Science,
#' 5782, 254-269.
#' @note If you want to reproduce the same classification and obtain the same
#' result will be necessary set a flag utiml.mc.set.seed to FALSE.
#' @export
#'
#' @examples
#' model <- ebr(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use C5.0 with 90% of instances and only 5 rounds
#' model <- ebr(toyml, 'C5.0', m = 5, subsample = 0.9)
#'
#' # Use 75% of attributes
#' model <- ebr(toyml, attr.space = 0.75)
#'
#' # Running in 2 cores and define a specific seed
#' model1 <- ebr(toyml, cores=2, seed = 312)
#' }
ebr <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
m = 10, subsample = 0.75, attr.space = 0.5, replacement = TRUE,
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (m < 2) {
stop("The number of iterations (m) must be greater than 1")
}
if (subsample < 0.1 || subsample > 1) {
stop("The subset of training instances must be between 0.1 and 1 inclusive")
}
if (attr.space <= 0.1 || attr.space > 1) {
stop(paste("The attribbute space of training instances must be between ",
"0.1 and 1 inclusive"))
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# EBR Model class
ebrmodel <- list(rounds = m, call = match.call())
ebrmodel$nrow <- ceiling(mdata$measures$num.instances * subsample)
ebrmodel$ncol <- ceiling(length(mdata$attributesIndexes) * attr.space)
ebrmodel$cardinality <- mdata$measures$cardinality
if (!anyNA(seed)) {
set.seed(seed)
}
idx <- lapply(seq(m), function(iteration) {
list(
rows = sample(mdata$measures$num.instances, ebrmodel$nrow, replacement),
cols = sample(mdata$attributesIndexes, ebrmodel$ncol)
)
})
ebrmodel$models <- lapply(seq(m), function(iteration) {
ndata <- create_subset(mdata, idx[[iteration]]$rows, idx[[iteration]]$cols)
brmodel <- br(ndata, base.algorithm, ..., cores = cores, seed = seed)
brmodel$attrs <- colnames(ndata$dataset[, ndata$attributesIndexes])
rm(ndata)
brmodel
})
class(ebrmodel) <- "EBRmodel"
ebrmodel
}
#' Predict Method for Ensemble of Binary Relevance
#'
#' This method predicts values based upon a model trained by \code{\link{ebr}}.
#'
#' @param object Object of class '\code{EBRmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param vote.schema Define the way that ensemble must compute the predictions.
#' The default valid options are: c("avg", "maj", "max", "min"). If \code{NULL}
#' then all predictions are returned. (Default: \code{'maj'})
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=ebr]{Ensemble of Binary Relevance (EBR)}} \code{
#' \link[=compute_multilabel_predictions]{Compute Multi-label Predictions}}
#' @export
#'
#' @examples
#' \donttest{
#' # Predict SVM scores
#' model <- ebr(toyml)
#' pred <- predict(model, toyml)
#'
#' # Predict SVM bipartitions running in 2 cores
#' pred <- predict(model, toyml, prob = FALSE, cores = 2)
#'
#' # Return the classes with the highest score
#' pred <- predict(model, toyml, vote = 'max')
#' }
predict.EBRmodel <- function(object, newdata, vote.schema = "maj",
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "EBRmodel")) {
stop("First argument must be an EBRmodel object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
utiml_ensemble_check_voteschema(vote.schema)
newdata <- utiml_newdata(newdata)
allpreds <- lapply(seq(object$models), function(imodel) {
brmodel <- object$models[[imodel]]
predict.BRmodel(brmodel, newdata[, brmodel$attrs], ...,
cores = cores, seed = seed)
})
prediction <- utiml_predict_ensemble(allpreds, vote.schema, probability)
if (!is.null(vote.schema)) {
prediction <- lcard_threshold(prediction, object$cardinality, probability)
}
prediction
}
#' Print EBR model
#' @param x The ebr model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.EBRmodel <- function(x, ...) {
cat("Ensemble of Binary Relevance Model\n\nCall:\n")
print(x$call)
cat("\nDetails:")
cat("\n ", x$rounds, "Iterations")
cat("\n ", x$nrow, "Instances")
cat("\n ", x$ncol, "Attributes\n")
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_ebr.R
|
#' Ensemble of Classifier Chains for multi-label Classification
#'
#' Create an Ensemble of Classifier Chains model for multilabel classification.
#'
#' This model is composed by a set of Classifier Chains models. Classifier
#' Chains is a Binary Relevance transformation method based to predict
#' multi-label data. It is different from BR method due the strategy of extended
#' the attribute space with the 0/1 label relevances of all previous
#' classifiers, forming a classifier chain.
#'
#' @family Transformation methods
#' @family Ensemble methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param m The number of Classifier Chains models used in the ensemble.
#' (Default: 10)
#' @param subsample A value between 0.1 and 1 to determine the percentage of
#' training instances that must be used for each classifier. (Default: 0.75)
#' @param attr.space A value between 0.1 and 1 to determine the percentage of
#' attributes that must be used for each classifier. (Default: 0.50)
#' @param replacement Boolean value to define if use sampling with replacement
#' to create the data of the models of the ensemble. (Default: TRUE)
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{ECCmodel} containing the set of fitted
#' CC models, including:
#' \describe{
#' \item{rounds}{The number of interactions}
#' \item{models}{A list of BR models.}
#' \item{nrow}{The number of instances used in each training dataset}
#' \item{ncol}{The number of attributes used in each training dataset}
#' }
#' @references
#' Read, J., Pfahringer, B., Holmes, G., & Frank, E. (2011). Classifier
#' chains for multi-label classification. Machine Learning, 85(3), 333-359.
#'
#' Read, J., Pfahringer, B., Holmes, G., & Frank, E. (2009).
#' Classifier Chains for Multi-label Classification. Machine Learning and
#' Knowledge Discovery in Databases, Lecture Notes in Computer Science,
#' 5782, 254-269.
#' @note If you want to reproduce the same classification and obtain the same
#' result will be necessary set a flag utiml.mc.set.seed to FALSE.
#' @export
#'
#' @examples
#' # Use all default values
#' model <- ecc(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use C5.0 with 100% of instances and only 5 rounds
#' model <- ecc(toyml, 'C5.0', m = 5, subsample = 1)
#'
#' # Use 75% of attributes
#' model <- ecc(toyml, attr.space = 0.75)
#'
#' # Running in 2 cores and define a specific seed
#' model1 <- ecc(toyml, cores=2, seed=123)
#' }
ecc <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
m = 10, subsample = 0.75, attr.space = 0.5, replacement = TRUE,
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (m <= 1) {
stop("The number of iterations (m) must be greater than 1")
}
if (subsample < 0.1 || subsample > 1) {
stop("The subset of training instances must be between 0.1 and 1 inclusive")
}
if (attr.space <= 0.1 || attr.space > 1) {
stop("The attribbute space of training instances must be between 0.1 and 1 inclusive")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# ECC Model class
eccmodel <- list(rounds = m, call = match.call())
eccmodel$nrow <- ceiling(mdata$measures$num.instances * subsample)
eccmodel$ncol <- ceiling(length(mdata$attributesIndexes) * attr.space)
eccmodel$cardinality <- mdata$measures$cardinality
if (!anyNA(seed)) {
set.seed(seed)
}
idx <- lapply(seq(m), function(iteration) {
list(
rows = sample(mdata$measures$num.instances, eccmodel$nrow, replacement),
cols = sample(mdata$attributesIndexes, eccmodel$ncol),
chain = sample(rownames(mdata$labels))
)
})
eccmodel$models <- lapply(seq(m), function(iteration) {
ndata <- create_subset(mdata, idx[[iteration]]$rows, idx[[iteration]]$cols)
chain <- idx[[iteration]]$chain
ccmodel <- cc(ndata, base.algorithm, chain, ..., cores = cores, seed = seed)
ccmodel$attrs <- colnames(ndata$dataset[, ndata$attributesIndexes])
rm(ndata)
ccmodel
})
class(eccmodel) <- "ECCmodel"
eccmodel
}
#' Predict Method for Ensemble of Classifier Chains
#'
#' This method predicts values based upon a model trained by \code{\link{ecc}}.
#'
#' @param object Object of class '\code{ECCmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param vote.schema Define the way that ensemble must compute the predictions.
#' The default valid options are: c("avg", "maj", "max", "min"). If \code{NULL}
#' then all predictions are returned. (Default: \code{'maj'})
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=ecc]{Ensemble of Classifier Chains (ECC)}}
#' @export
#'
#' @examples
#' \donttest{
#' # Predict SVM scores
#' model <- ecc(toyml)
#' pred <- predict(model, toyml)
#'
#' # Predict SVM bipartitions running in 2 cores
#' pred <- predict(model, toyml, probability = FALSE, cores = 2)
#'
#' # Return the classes with the highest score
#' pred <- predict(model, toyml, vote.schema = 'max')
#' }
predict.ECCmodel <- function(object, newdata, vote.schema = "maj",
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "ECCmodel")) {
stop("First argument must be an ECCmodel object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
utiml_ensemble_check_voteschema(vote.schema)
newdata <- utiml_newdata(newdata)
allpreds <- utiml_lapply(object$models, function(ccmodel) {
predict.CCmodel(ccmodel, newdata[, ccmodel$attrs], ...)
}, cores, seed)
prediction <- utiml_predict_ensemble(allpreds, vote.schema, probability)
if (!is.null(vote.schema)) {
prediction <- lcard_threshold(prediction, object$cardinality, probability)
}
prediction
}
#' Print ECC model
#' @param x The ecc model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.ECCmodel <- function(x, ...) {
cat("Ensemble of Classifier Chains Model\n\nCall:\n")
print(x$call)
cat("\nDetails:")
cat("\n ", x$rounds, "Iterations")
cat("\n ", x$nrow, "Instances")
cat("\n ", x$ncol, "Attributes\n")
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_ecc.R
|
#' Ensemble of Pruned Set for multi-label Classification
#'
#' Create an Ensemble of Pruned Set model for multilabel classification.
#'
#' Pruned Set (PS) is a multi-class transformation that remove the less common
#' classes to predict multi-label data. The ensemble is created with different
#' subsets of the original multi-label data.
#'
#' @family Transformation methods
#' @family Powerset
#' @family Ensemble methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param m The number of Pruned Set models used in the ensemble.
#' @param subsample A value between 0.1 and 1 to determine the percentage of
#' training instances that must be used for each classifier. (Default: 0.63)
#' @param p Number of instances to prune. All labelsets that occurs p times or
#' less in the training data is removed. (Default: 3)
#' @param strategy The strategy (A or B) for processing infrequent labelsets.
#' (Default: A).
#' @param b The number used by the strategy for processing infrequent labelsets.
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of class \code{EPSmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{rounds}{The number of interactions}
#' \item{models}{A list of PS models.}
#' }
#' @references
#' Read, J. (2008). A pruned problem transformation method for multi-label
#' classification. In Proceedings of the New Zealand Computer Science Research
#' Student Conference (pp. 143-150).
#' @export
#'
#' @examples
#' model <- eps(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' ##Change default configurations
#' model <- eps(toyml, "RF", m=15, subsample=0.4, p=4, strategy="B", b=1)
#' }
eps <- function (mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
m = 10, subsample = 0.75, p = 3, strategy = c("A", "B"), b = 2,
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (m <= 1) {
stop("The number of iterations (m) must be greater than 1")
}
if (subsample < 0.1 || subsample > 1) {
stop("The subset of training instances must be between 0.1 and 1 inclusive")
}
if (p < 1) {
stop("The prunning value must be greater than 0")
}
strategy <- match.arg(strategy)
if (b < 0) {
stop("The parameter b must be greater or equal than 0")
}
# EPS Model class
epsmodel <- list(rounds = m, p = p, strategy = strategy, b = b,
nrow = ceiling(mdata$measures$num.instances * subsample),
call = match.call())
if (!anyNA(seed)) {
set.seed(seed)
}
idxs <- lapply(seq(m), function(iteration) {
sample(mdata$measures$num.instances, epsmodel$nrow)
})
epsmodel$models <- utiml_lapply(idxs, function(idx) {
ps(create_subset(mdata, idx), base.algorithm = base.algorithm, p = p,
strategy = strategy, b = b, ..., seed = seed)
}, cores, seed)
class(epsmodel) <- "EPSmodel"
epsmodel
}
#' Predict Method for Ensemble of Pruned Set Transformation
#'
#' This function predicts values based upon a model trained by
#' \code{\link{eps}}. Different from the others methods the probability value,
#' is actually, the sum of all probability predictions such as it is described
#' in the original paper.
#'
#' @param object Object of class '\code{EPSmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param threshold A threshold value for producing bipartitions. (Default: 0.5)
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the prediction. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=eps]{Ensemble of Pruned Set (EPS)}}
#' @export
#'
#' @examples
#' model <- eps(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.EPSmodel <- function(object, newdata, threshold = 0.5,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "EPSmodel")) {
stop("First argument must be a EPSmodel object")
}
previous.value <- getOption("utiml.empty.prediction")
options(utiml.empty.prediction = TRUE)
newdata <- utiml_newdata(newdata)
results <- utiml_lapply(object$models, function (psmodel){
res <- predict.PSmodel(psmodel, newdata)
as.probability(res) * as.bipartition(res)
}, cores, seed)
options(utiml.empty.prediction = previous.value)
as.mlresult(Reduce('+', results), probability = probability,
threshold = threshold)
}
#' Print EPS model
#' @param x The ps model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.EPSmodel <- function(x, ...) {
cat("Ensemble of Pruned Set Model\n\nCall:\n")
print(x$call)
cat("\nModels:", x$rounds, "\n")
cat("Instance by models: ", x$nrow, "\n")
cat("Prune:", x$p, "\n")
cat("Strategy:", x$strategy, "\n")
cat("B value:", x$b, "\n")
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_eps.R
|
#' Ensemble of Single Label
#'
#' Create an Ensemble of Single Label model for multilabel classification.
#'
#' ESL is an ensemble of multi-class model that uses the less frequent labels.
#' This is based on the label ignore approach different members of the ensemble.
#'
#' @family Transformation methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param m The number of members used in the ensemble. (Default: 10)
#' @param w The weight given to the choice of the less frequent labels. When it
#' is 0, the labels will be random choose, when it is 1 the complement of the
#' label frequency is used as the probability to choose each label. Values
#' greater than 1 will privilege the less frequent labels. (Default: 1)
#' @param ... Others arguments passed to the base algorithm for all subproblems
#' @param cores The number of cores to parallelize the training. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{ESLmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the labels' frequencies.}
#' \item{models}{A list of the multi-class models.}
#' }
#'
#' @export
#'
#' @examples
#' model <- esl(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use SVM as base algorithm
#' model <- esl(toyml, "SVM")
#' pred <- predict(model, toyml)
#'
#' # Change the base algorithm and use 2 CORES
#' model <- esl(toyml[1:50], 'RF', cores = 2, seed = 123)
#'
#' # Set a parameters for all subproblems
#' model <- esl(toyml, 'KNN', k=5)
#' }
esl <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
m=10, w=1, ..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
freqs <- mdata$labels$freq
labels <- rownames(mdata$labels)
names(freqs) <- labels
# ESL Model class
eslmodel <- list(labels = labels, call = match.call(), m=m, w=w)
# Create models
eslmodel$models <- utiml_lapply(seq(m), function (i){
Class.values <- apply(mdata$dataset[,labels], 1, function(row) {
bips <- which(row == 1)
names(which.max(((1 - freqs[bips])*w) + stats::runif(length(bips))))
})
train <- cbind(mdata$dataset[,mdata$attributesIndexes], Class=Class.values)
utiml_create_model(
utiml_prepare_data(train, "mldSL", mdata$name, "esl", base.algorithm),
...
)
}, cores, seed)
class(eslmodel) <- "ESLmodel"
eslmodel
}
#' Predict Method for Ensemble of Single Label
#'
#' This function predicts values based upon a model trained by
#' \code{\link{esl}}.
#'
#' @param object Object of class '\code{ESLmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=esl]{Ensemble of Single Label (ESL)}}
#' @export
#'
#' @examples
#' model <- esl(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.ESLmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "ESLmodel")) {
stop("First argument must be an ESLmodel object")
}
newdata <- utiml_newdata(newdata)
labels <- object$labels
votes <- do.call(cbind, utiml_lapply(object$models, function(model){
#TODO refactory it
#TODO use probability
pred <- do.call(mlpredict, c(list(model = model, newdata = newdata), ...))
#pred <- predict(model, newdata) #, type = "prob"
as.character(pred[,"prediction"])
}, cores, seed))
rownames(votes) <- rownames(newdata)
probs <- t(apply(votes, 1, function(x){
row <- rep(0, length(labels))
names(row) <- labels
vt <- table(x)/object$m
row[names(vt)] <- vt
row
}))
fixed_threshold(probs, 0.0001, probability)
}
#' Print ESL model
#' @param x The esl model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.ESLmodel <- function(x, ...) {
cat("Ensemble of Single Label Model\n\nCall:\n")
print(x$call)
cat("\n", x$m, "Models")
cat("\n", x$w, "is the weight for the less frequent labels\n")
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_esl.R
|
#' Hierarchy Of Multilabel classifiER (HOMER)
#'
#' Create a Hierarchy Of Multilabel classifiER (HOMER).
#'
#' HOMER is an algorithm for effective and computationally efficient multilabel
#' classification in domains with many labels. It constructs a hierarchy of
#' multilabel classifiers, each one dealing with a much smaller set of labels.
#'
#' @family Transformation methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param clusters Number maximum of nodes in each level. (Default: 3)
#' @param method The strategy used to organize the labels (create the
#' meta-labels). The options are: "balanced", "clustering" and "random".
#' (Default: "balanced").
#' @param iteration The number max of iterations, used by balanced or clustering
#' methods.
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of class \code{HOMERmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{clusters}{The number of nodes in each level}
#' \item{models}{The Hierarchy of BR models.}
#' }
#' @references
#' Tsoumakas, G., Katakis, I., & Vlahavas, I. (2008). Effective and efficient
#' multilabel classification in domains with large number of labels. In Proc.
#' ECML/PKDD 2008 Workshop on Mining Multidimensional Data (MMD'08)
#' (pp. 30-44). Antwerp, Belgium.
#' @export
#'
#' @examples
#' model <- homer(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' ##Change default configurations
#' model <- homer(toyml, "RF", clusters=5, method="clustering", iteration=10)
#' }
homer <- function (mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
clusters = 3, method = c("balanced", "clustering", "random"),
iteration = 100, ..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (clusters < 1) {
stop("The number of clusters must be greater than 1")
}
method <- switch (match.arg(method),
balanced = homer_balanced_kmeans,
clustering = homer_kmeans,
random = homer_random
)
# HOMER Model class
hmodel <- list(clusters = clusters, method = method, call = match.call())
hmodel$labels = rownames(mdata$labels)
if (!anyNA(seed)) {
set.seed(seed)
}
hmodel$models <- buildLabelHierarchy(mdata, base.algorithm, method, clusters,
iteration, ..., cores=cores, seed=seed)
class(hmodel) <- "HOMERmodel"
hmodel
}
#' Predict Method for HOMER
#'
#' This function predicts values based upon a model trained by
#' \code{\link{homer}}.
#'
#' @param object Object of class '\code{HOMERmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the prediction. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=homer]{Hierarchy Of Multilabel classifiER (HOMER)}}
#' @export
#'
#' @examples
#' model <- homer(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.HOMERmodel <- function (object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "HOMERmodel")) {
stop("First argument must be a HOMERmodel object")
}
previous.value <- getOption("utiml.empty.prediction")
options(utiml.empty.prediction = FALSE)
newdata <- utiml_newdata(newdata)
if (!anyNA(seed)) {
set.seed(seed)
}
prediction <- predictLabelHierarchy(object$model, newdata, ...,
cores=cores, seed=seed)
options(utiml.empty.prediction = previous.value)
as.mlresult(prediction, probability)
}
predictLabelHierarchy <- function(node, newdata, ..., cores, seed) {
prediction <- predict.BRmodel(node$model, newdata[, node$attributes], ...)
bipartition <- as.bipartition(prediction)
probability <- as.probability(prediction)
metalabel <- paste(unlist(lapply(node$metalabels, paste, collapse="*")),
collapse="|")
for(i in seq(node$metalabels)) {
labels <- node$metalabels[[i]]
if (length(labels) > 1) {
child <- node$children[[i]]
indexes <- bipartition[, i, drop=FALSE] == 1
if (any(indexes)) {
prediction <- predictLabelHierarchy(child, newdata[indexes, ], ...,
cores=cores, seed=seed)
new.bip <- new.prob <- as.data.frame(
matrix(0, ncol = ncol(prediction), nrow = nrow(bipartition),
dimnames = list(rownames(bipartition), colnames(prediction)))
)
new.bip[indexes, colnames(prediction)] <- as.bipartition(prediction)
bipartition <- cbind(bipartition, new.bip)
values <- probability[!indexes, i]
if (length(values) > 0) {
#TODO change this if
new.prob[!indexes, ] <- do.call(
cbind,
lapply(seq(ncol(new.prob)), function (j) values)
)
}
new.prob[indexes, colnames(prediction)] <- as.probability(prediction)
probability <- cbind(probability, new.prob)
} else {
#Predict all instances of the meta-label as negative
aux <- do.call(cbind, lapply(labels, function(lbl)
bipartition[, i, drop=FALSE]))
colnames(aux) <- labels
bipartition <- cbind(bipartition, aux)
aux <- do.call(cbind, lapply(labels, function(lbl)
probability[, i, drop=FALSE]))
colnames(aux) <- labels
probability <- cbind(probability, aux)
}
} else {
#Rename the meta-label because it is the label
colnames(bipartition)[i] <- colnames(probability)[i] <- labels
}
}
#cat(metalabel, "\n")
multilabel_prediction(
bipartition[, node$labels, drop=F], probability[, node$labels, drop=F]
)
}
buildLabelHierarchy <- function (mdata, base.algorithm, method, k, it,
..., cores, seed) {
node <- list(labels = rownames(mdata$labels), metalabels = list())
node$metalabels <- method(mdata, k, it)
newls <- do.call(cbind, lapply(node$metalabels, function (u){
as.numeric(rowSums(mdata$dataset[, u, drop=FALSE]) > 0)
}))
colnames(newls) <- paste('meta-lbl-', seq(node$metalabels), sep='')
rows <- which(rowSums(newls) > 0)
#Fix meta-label without positive instances
if (any(colSums(newls) == 0)) {
empty.labels <- colSums(newls) == 0
node$metalabels <- c(node$metalabels[!empty.labels],
unlist(node$metalabels[empty.labels]))
newls <- do.call(cbind, lapply(node$metalabels, function (u){
as.numeric(rowSums(mdata$dataset[, u, drop=FALSE]) > 0)
}))
colnames(newls) <- paste('meta-lbl-', seq(node$metalabels), sep='')
rows <- which(rowSums(newls) > 0)
}
ndata <- remove_unique_attributes(mldr_from_dataframe(
cbind(mdata$dataset[rows, mdata$attributesIndexes], newls[rows,, drop=F]),
mdata$measures$num.inputs + seq(length(node$metalabels)),
name = mdata$name
))
mtlbl <- paste(sapply(node$metalabels, paste, collapse='*'), collapse="|")
node$attributes <- colnames(ndata$dataset[, ndata$attributesIndexes])
node$model <- br(ndata, base.algorithm, ..., cores=cores, seed=seed)
rm(ndata)
node$children <- lapply(node$metalabels, function (metalabels) {
if (length(metalabels) > 1) {
excluded.label <- node$labels[!node$labels %in% metalabels]
ndata <- remove_unlabeled_instances(remove_labels(mdata, excluded.label))
buildLabelHierarchy(ndata, base.algorithm, method, k, it, ...,
cores=cores, seed=seed)
} else {
NULL
}
})
node
}
homer_balanced_kmeans <- function (mdata, k, it, ...) {
if (k >= mdata$measures$num.labels) {
return(as.list(rownames(mdata$labels)))
}
dataset <- t(mdata$dataset[, mdata$labels$index])
labels <- rownames(dataset)
Ci <- list()
centers <- dataset[sample(labels, k), ]
rownames(centers) <- NULL
for (i in seq(it)) {
ldist <- apply(dataset, 1, function (r1) {
apply(centers, 1, function (r2) stats::dist(rbind(r1, r2)))
})
has.extra <- TRUE
while(has.extra) {
j <- apply(ldist, 2, which.min)
Ci <- lapply(seq(k), function (i) sort(ldist[i, which(j == i)]))
extra <- which(unlist(lapply(Ci, length)) > ceiling(length(labels)/k))
for (i in extra) {
ldist[i, names(Ci[[i]])[length(Ci[[i]])]] <- Inf
}
has.extra <- length(extra) > 0
}
new.centers <- do.call(rbind, lapply(Ci, function (rows) {
colMeans(dataset[names(rows), , drop=FALSE])
}))
if (all(centers == new.centers)) {
break
}
centers <- new.centers
}
lapply(Ci, names)
}
homer_kmeans <- function (mdata, k, it, ...) {
if (k >= mdata$measures$num.labels) {
as.list(rownames(mdata$labels))
} else {
clusters <- stats::kmeans(t(mdata$dataset[, mdata$labels$index]),
k, iter.max = it)
split(rownames(mdata$labels), clusters$cluster)
}
}
homer_random <- function (mdata, k, ...) {
split(sample(rownames(mdata$labels)),
rep_len(seq(k), mdata$measures$num.labels))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_homer.R
|
#' LIFT for multi-label Classification
#'
#' Create a multi-label learning with Label specIfic FeaTures (LIFT) model.
#'
#' LIFT firstly constructs features specific to each label by conducting
#' clustering analysis on its positive and negative instances, and then performs
#' training and testing by querying the clustering results.
#'
#' @family Transformation methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param ratio Control the number of clusters being retained. Must be between
#' 0 and 1. (Default: \code{0.1})
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{LIFTmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{models}{A list of the generated models, named by the label names.}
#' }
#' @references
#' Zhang, M.-L., & Wu, L. (2015). Lift: Multi-Label Learning with
#' Label-Specific Features. IEEE Transactions on Pattern Analysis and Machine
#' Intelligence, 37(1), 107-120.
#' @export
#'
#' @examples
#' model <- lift(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Runing lift with a specific ratio
#' model <- lift(toyml, "RF", 0.15)
#' }
lift <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
ratio = 0.1, ..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
if (ratio < 0 || ratio > 1) {
stop("The attribbute ratio must be between 0 and 1")
}
#TODO parametrize clustering and distance method
# LIFT Model class
liftmodel <- list(labels = rownames(mdata$labels),
ratio = ratio, call = match.call())
# Create models
mldataset <- rep_nom_attr(mdata$dataset[mdata$attributesIndexes], TRUE)
labels <- utiml_rename(liftmodel$labels)
liftdata <- utiml_lapply(labels, function (label) {
#Form Pk and Nk based on D according to Eq.(1)
Pk <- mdata$dataset[,label] == 1
Nk <- !Pk
#Perform k-means on Pk and Nk, each with mk clusters as defined in Eq.(2)
mk <- ceiling(ratio * min(sum(Pk), sum(Nk)))
gpk <- stats::kmeans(mldataset[Pk, ], mk)
gnk <- stats::kmeans(mldataset[Nk, ], mk)
centroids <- rbind(gpk$centers, gnk$centers)
rownames(centroids) <- c(paste("p", rownames(gpk$centers), sep=''),
paste("n", rownames(gnk$centers), sep=''))
#Create the mapping k for lk according to Eq.(3);
dataset <- cbind(utiml_euclidean_distance(mldataset, centroids),
mdata$dataset[label])
colnames(dataset) <- c(rownames(centroids), label)
#Induce the model using the base algorithm
model <- utiml_create_model(
utiml_prepare_data(dataset, "mldLIFT", mdata$name,
"lift", base.algorithm),
...
)
rm(dataset)
list(
centroids = centroids,
model = model
)
}, cores, seed)
liftmodel$centroids <- lapply(liftdata, function (x) x$centroids)
liftmodel$models <- lapply(liftdata, function (x) x$model)
class(liftmodel) <- "LIFTmodel"
liftmodel
}
#' Predict Method for LIFT
#'
#' This function predicts values based upon a model trained by
#' \code{\link{lift}}.
#'
#' @param object Object of class '\code{LIFTmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=lift]{LIFT}}
#' @export
#'
#' @examples
#' model <- lift(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.LIFTmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "LIFTmodel") && !is(object, "MLDFLmodel")) {
stop("First argument must be an LIFTmodel/MLDFLmodel object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# Predict models
newdata <- rep_nom_attr(utiml_newdata(newdata), TRUE)
labels <- utiml_rename(object$labels)
predictions <- utiml_lapply(labels, function (label) {
centroids <- object$centroids[[label]]
dataset <- as.data.frame(utiml_euclidean_distance(newdata, centroids))
dimnames(dataset) <- list(rownames(newdata), rownames(centroids))
utiml_predict_binary_model(object$models[[label]], dataset, ...)
}, cores, seed)
utiml_predict(predictions, probability)
}
#' Print LIFT model
#' @param x The lift model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.LIFTmodel <- function(x, ...) {
cat("LIFT Model\n\nCall:\n")
print(x$call)
cat("\nRatio:", x$ratio, "\n")
cat("\n", length(x$labels), "Binary Models:\n")
overview <- as.data.frame(cbind(label=names(x$centroids),
attrs=unlist(lapply(x$centroids, nrow))))
rownames(overview) <- NULL
print(overview)
}
# Calculate the euclidian distance for two sets of data
utiml_euclidean_distance <- function(x, y) {
x <- t(x)
apply(y, 1, function (row) sqrt(colSums((x - row) ^ 2)))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_lift.R
|
#' Label Powerset for multi-label Classification
#'
#' Create a Label Powerset model for multilabel classification.
#'
#' Label Powerset is a simple transformation method to predict multi-label data.
#' This is based on the multi-class approach to build a model where the classes
#' are each labelset.
#'
#' @family Transformation methods
#' @family Powerset
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param ... Others arguments passed to the base algorithm for all subproblems
#' @param cores Not used
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of class \code{LPmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{model}{A multi-class model.}
#' }
#' @references
#' Boutell, M. R., Luo, J., Shen, X., & Brown, C. M. (2004). Learning
#' multi-label scene classification. Pattern Recognition, 37(9), 1757-1771.
#' @export
#'
#' @examples
#' model <- lp(toyml, "RANDOM")
#' pred <- predict(model, toyml)
lp <- function (mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"), ...,
cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
# LP Model class
lpmodel <- list(labels = rownames(mdata$labels),
call = match.call(),
classes = mdata$labelsets)
lpmodel$model <- utiml_lapply(1, function (x){
#Due the seed
utiml_create_model(
utiml_prepare_data(
utiml_create_lp_data(mdata),
"mldLP", mdata$name, "lp", base.algorithm
), ...
)
}, 1, seed)[[1]]
class(lpmodel) <- "LPmodel"
lpmodel
}
#' Predict Method for Label Powerset
#'
#' This function predicts values based upon a model trained by \code{\link{lp}}.
#'
#' @param object Object of class '\code{LPmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores Not used
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=lp]{Label Powerset (LP)}}
#' @export
#'
#' @examples
#' model <- lp(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.LPmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "LPmodel")) {
stop("First argument must be a LPmodel object")
}
newdata <- utiml_newdata(newdata)
result <- utiml_lapply(1, function (x){
#Due the seed
utiml_predict_multiclass_model(object$model, newdata, object$labels,
probability, ...)
}, 1, seed)[[1]]
result
}
#' Print LP model
#' @param x The lp model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.LPmodel <- function(x, ...) {
cat("Label Powerset Model\n\nCall:\n")
print(x$call)
cat("\n1 Model: ",length(x$classes),"classes\n")
print(cbind.data.frame(classe=names(x$classes), instances=as.numeric(x$classes)))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_lp.R
|
#' Meta-BR or 2BR for multi-label Classification
#'
#' Create a Meta-BR (MBR) classifier to predict multi-label data. To this, two
#' round of Binary Relevance is executed, such that, the first step generates
#' new attributes to enrich the second prediction.
#'
#' This implementation use complete training set for both training and
#' prediction steps of 2BR. However, the \code{phi} parameter may be used to
#' remove labels with low correlations on the second step.
#'
#' @family Transformation methods
#' @family Stacking methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param folds The number of folds used in internal prediction. If this value
#' is 1 all dataset will be used in the first prediction. (Default: 1)
#' @param phi A value between 0 and 1 to determine the correlation coefficient,
#' The value 0 include all labels in the second phase and the 1 only the
#' predicted label. (Default: 0)
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param predict.params A list of default arguments passed to the predictor
#' algorithm. (Default: \code{list()})
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{MBRmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{phi}{The value of \code{phi} parameter.}
#' \item{correlation}{The matrix of label correlations used in combination
#' with \code{phi} parameter to define the labels used in the second
#' step. }
#' \item{basemodel}{The BRModel used in the first iteration.}
#' \item{models}{A list of models named by the label names used in the
#' second iteration. }
#' }
#' @references
#' Tsoumakas, G., Dimou, A., Spyromitros, E., Mezaris, V., Kompatsiaris, I., &
#' Vlahavas, I. (2009). Correlation-based pruning of stacked binary relevance
#' models for multi-label learning. In Proceedings of the Workshop on
#' Learning from Multi-Label Data (MLD'09) (pp. 22-30).
#' Godbole, S., & Sarawagi, S. (2004). Discriminative Methods for Multi-labeled
#' Classification. In Data Mining and Knowledge Discovery (pp. 1-26).
#' @export
#'
#' @examples
#' model <- mbr(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use 10 folds and different phi correlation with C5.0 classifier
#' model <- mbr(toyml, 'C5.0', 10, 0.2)
#'
#' # Run with 2 cores
#' model <- mbr(toyml, "SVM", cores = 2, seed = 123)
#'
#' # Set a specific parameter
#' model <- mbr(toyml, 'KNN', k=5)
#' }
mbr <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
folds = 1, phi = 0, ..., predict.params = list(),
cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (folds < 1) {
stop("The number of folds must be positive")
}
if (phi < 0 || phi > 1) {
stop("The phi threshold must be between 0 and 1, inclusive")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
if (!anyNA(seed)) {
set.seed(seed)
}
# MBR Model class
mbrmodel <- list(labels = rownames(mdata$labels),
phi = phi,
call = match.call())
# 1 Iteration - Base Level -------------------------------------------------
mbrmodel$basemodel <- br(mdata, base.algorithm, ..., cores=cores, seed=seed)
if (folds == 1) {
params <- list(object = mbrmodel$basemodel,
newdata = mdata$dataset[mdata$attributesIndexes],
probability = FALSE, cores = cores, seed = seed)
base.preds <- as.bipartition(do.call(predict.BRmodel,
c(params, predict.params)))
}
else {
kf <- create_kfold_partition(mdata, folds, "iterative")
base.preds <- do.call(rbind, lapply(seq(folds), function(f) {
dataset <- partition_fold(kf, f)
classifier <- br(dataset$train, base.algorithm, ...,
cores=cores, seed=seed)
params <- list(object = classifier, newdata = dataset$test,
probability = FALSE, cores = cores, seed = seed)
as.bipartition(do.call(predict.BRmodel, c(params, predict.params)))
}))
base.preds <- base.preds[rownames(mdata$dataset), ]
}
base.preds <- as.data.frame(base.preds)
for (i in seq(ncol(base.preds))) {
base.preds[, i] <- factor(base.preds[, i], levels=c(0, 1))
}
# 2 Iteration - Meta level -------------------------------------------------
corr <- abs(stats::cor(mdata$dataset[mdata$labels$index]))
mbrmodel$correlation <- corr
labels <- utiml_rename(mbrmodel$labels)
mbrmodel$models <- utiml_lapply(labels, function (label) {
nmcol <- colnames(corr)[corr[label, ] >= phi]
new.data <- base.preds[, nmcol, drop = FALSE]
if (ncol(new.data) > 0) {
colnames(new.data) <- paste("extra", nmcol, sep = ".")
}
utiml_create_model(
utiml_prepare_data(
utiml_create_binary_data(mdata, label, new.data),
"mldMBR", mdata$name, "mbr", base.algorithm, new.features = nmcol
), ...
)
}, cores, seed)
class(mbrmodel) <- "MBRmodel"
mbrmodel
}
#' Predict Method for Meta-BR/2BR
#'
#' This function predicts values based upon a model trained by \code{mbr}.
#'
#' @param object Object of class '\code{MBRmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=mbr]{Meta-BR (MBR or 2BR)}}
#' @export
#'
#' @examples
#' \donttest{
#' # Predict SVM scores
#' model <- mbr(toyml)
#' pred <- predict(model, toyml)
#'
#' # Predict SVM bipartitions
#' pred <- predict(model, toyml, probability = FALSE)
#'
#' # Passing a specif parameter for SVM predict algorithm
#' pred <- predict(model, toyml, na.action = na.fail)
#' }
predict.MBRmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "MBRmodel")) {
stop("First argument must be an MBRmodel object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
newdata <- utiml_newdata(newdata)
# 1 Iteration - Base level -------------------------------------------------
base.preds <- as.bipartition(predict.BRmodel(object$basemodel, newdata,
probability=FALSE, ...,
cores=cores, seed=seed))
base.preds <- as.data.frame(base.preds)
for (i in seq(ncol(base.preds))) {
base.preds[,i] <- factor(base.preds[,i], levels=c(0, 1))
}
# 2 Iteration - Meta level -------------------------------------------------
corr <- object$correlation
labels <- utiml_rename(object$labels)
predictions <- utiml_lapply(labels, function(labelname) {
nmcol <- colnames(corr)[corr[labelname, ] >= object$phi]
extra.col <- base.preds[, nmcol, drop = FALSE]
if (ncol(extra.col) > 0) {
colnames(extra.col) <- paste("extra", nmcol, sep = ".")
}
utiml_predict_binary_model(object$models[[labelname]],
cbind(newdata, extra.col), ...)
}, cores, seed)
utiml_predict(predictions, probability)
}
# Phi Correlation Coefficient
#
# Calculate all labels phi correlation coefficient. This is a specialized
# version of the Pearson product moment correlation coefficient for categorical
# variables with two values, also called dichotomous variables. This is also
# called of Pearson product moment Correlation Coefficient (PCC)
#
# @param mdata A mldr multi-label dataset
# @return A matrix with all labels correlation coefficient. The rows and
# columns have the labels and each value are the correlation between the
# labels. The main diagonal have the 1 value that represents the correlation
# of a label with itself.
# @references
# Tsoumakas, G., Dimou, A., Spyromitros, E., Mezaris, V., Kompatsiaris, I., &
# Vlahavas, I. (2009). Correlation-based pruning of stacked binary relevance
# models for multi-label learning. In Proceedings of the Workshop on Learning
# from Multi-Label Data (MLD'09) (pp. 22-30).
# @seealso \code{\link[=mbr]{Meta-BR (MBR or 2BR)}}
#
# @examples
# ## result <- utiml_labels_correlation(toyml)
#
# # Get the phi coefficient between the labels 'y1' and 'y2'
# ## result['y1', 'y2']
#
# # Get all coefficients of a specific label
# ## result[4, -4]
utiml_labels_correlation <- function(mdata) {
label.names <- rownames(mdata$labels)
classes <- lapply(mdata$labels$index, function (col) {
factor(mdata$dataset[, col], levels=c("0", "1"))
})
q <- length(label.names)
cor <- matrix(nrow = q, ncol = q, dimnames = list(label.names, label.names))
for (i in seq(1, q)) {
for (j in seq(i, q)) {
confmat <- table(classes[c(i, j)])
A <- as.numeric(confmat["1", "1"])
B <- as.numeric(confmat["1", "0"])
C <- as.numeric(confmat["0", "1"])
D <- as.numeric(confmat["0", "0"])
value1 <- A * D - B * C
value2 <- sqrt(as.numeric(A + B) * (C + D) * (A + C) * (B + D))
cor[i, j] <- ifelse(value1 == 0 & value2 == 0, -Inf, abs(value1 / value2))
cor[j, i] <- cor[i, j]
}
}
cor
}
#' Print MBR model
#' @param x The mbr model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.MBRmodel <- function(x, ...) {
cat("Classifier Meta-BR (also called 2BR)\n\nCall:\n")
print(x$call)
cat("\nPhi:", x$phi, "\n")
cat("\nCorrelation Table Overview:\n")
corr <- x$correlation
diag(corr) <- NA
tbl <- data.frame(
min = apply(corr, 1, min, na.rm = TRUE),
mean = apply(corr, 1, mean, na.rm = TRUE),
median = apply(corr, 1, stats::median, na.rm = TRUE),
max = apply(corr, 1, max, na.rm = TRUE),
extra = apply(x$correlation, 1, function(row) sum(row > x$phi))
)
print(tbl)
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_mbr.R
|
#' Multi-label KNN (ML-KNN) for multi-label Classification
#'
#' Create a ML-KNN classifier to predict multi-label data. It is a multi-label
#' lazy learning, which is derived from the traditional K-nearest neighbor (KNN)
#' algorithm. For each unseen instance, its K nearest neighbors in the training
#' set are identified and based on statistical information gained from the label
#' sets of these neighboring instances, the maximum a posteriori (MAP) principle
#' is utilized to determine the label set for the unseen instance.
#'
#' @family Adaptatio methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param k The number of neighbors. (Default: \code{10})
#' @param s Smoothing parameter controlling the strength of uniform prior. When
#' it is set to be 1, we have the Laplace smoothing. (Default: \code{1}).
#' @param distance The name of method used to compute the distance. See
#' \code{\link[stats]{dist}} to the list of options.
#' (Default: \code{"euclidian"})
#' @param ... Not used.
#' @param cores Ignored because this method does not support multi-core.
#' @param seed Ignored because this method is deterministic.
#' @return An object of class \code{MLKNNmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{prior}{The prior probability of each label to occur.}
#' \item{posterior}{The posterior probability of each label to occur given
#' that k neighbors have it.}
#' }
#' @references
#' Zhang, M.L. L., & Zhou, Z.H. H. (2007). ML-KNN: A lazy learning approach
#' to multi-label learning. Pattern Recognition, 40(7), 2038-2048.
#' @export
#'
#' @examples
#' model <- mlknn(toyml, k=3)
#' pred <- predict(model, toyml)
mlknn <- function(mdata, k=10, s=1, distance="euclidean", ...,
cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)){
# KNN Model class
knnmodel <- list(labels = rownames(mdata$labels), call = match.call(),
k=k, s=s, distance=distance)
Prior <- (s + mdata$labels$count) / (s * 2 + mdata$measures$num.instances)
names(Prior) <- knnmodel$labels
dm <- as.matrix(stats::dist(mdata$dataset[,mdata$attributesIndexes],
method=distance))
colnames(dm) <- rownames(dm) <- seq(mdata$measures$num.instances)
diag(dm) <- Inf
Cx <- t(apply(dm, 1, function(dx) {
Nx <- as.numeric(names(sort(dx)[seq(k)]))
colSums(mdata$dataset[Nx, mdata$labels$index])
}))
Ck <- sapply(knnmodel$labels, function(label){
klabel <- factor(Cx[,label], levels=seq(0, k))
has.label <- mdata$dataset[,label] == 1
rbind(c1=table(klabel[has.label]), c0=table(klabel[!has.label]))
}, simplify = FALSE)
Sc <- t(do.call(rbind, lapply(Ck, rowSums)))
Posterior <- lapply(seq(0, k), function(j){
aux <- t(do.call(rbind, lapply(Ck, function(x) x[,j+1])))
(s + aux) / (s * (k+1) + Sc)
})
knnmodel$mdata <- mdata
knnmodel$prior <- Prior
knnmodel$posterior <- Posterior
class(knnmodel) <- "MLKNNmodel"
knnmodel
}
#' Predict Method for ML-KNN
#'
#' This function predicts values based upon a model trained by \code{mlknn}.
#' '
#' @param object Object of class '\code{MLKNNmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Not used.
#' @param cores Ignored because this method does not support multi-core.
#' @param seed Ignored because this method is deterministic.
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=mlknn]{ML-KNN}}
#' @export
#'
#' @examples
#' model <- mlknn(toyml)
#' pred <- predict(model, toyml)
predict.MLKNNmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "MLKNNmodel")) {
stop("First argument must be an MLKNNmodel object")
}
newdata <- utiml_newdata(newdata)
train.data <- object$mdata$dataset[,object$mdata$attributesIndexes]
train.labels <- object$mdata$dataset[,object$mdata$labels$index]
Cx <- t(apply(newdata, 1, function(test.inst){
dx <- apply(train.data, 1, function(train.inst){
stats::dist(rbind(test.inst, train.inst), method=object$distance)
})
names(dx) <- seq(length(dx))
Nx <- as.numeric(names(sort(dx)[seq(object$k)]))
colSums(train.labels[Nx,])
}))
predictions <- sapply(object$labels, function(label){
prior <- c(object$prior[label], 1-object$prior[label])
names(prior) <- c("c1","c0")
probs <- sapply(object$posterior[Cx[,label] + 1], function(item){
item[,label]
}) * prior
bipartition <- abs(apply(probs , 2, which.max) - 2)
probability <- probs[1,] / colSums(probs)
names(bipartition) <- names(probability) <- rownames(newdata)
utiml_binary_prediction(bipartition, probability)
}, simplify = FALSE)
utiml_predict(predictions, probability)
}
#' Print MLKNN model
#' @param x The mlknn model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.MLKNNmodel <- function(x, ...) {
cat("Classifier MLKNN\n\nCall:\n")
print(x$call)
cat("\nk = ", x$k, "\nPrior positive probabilits:\n")
print(x$prior)
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_mlknn.R
|
#' Nested Stacking for multi-label Classification
#'
#' Create a Nested Stacking model for multilabel classification.
#'
#' Nested Stacking is based on Classifier Chains transformation method to
#' predict multi-label data. It differs from CC to predict the labels values in
#' the training step and to regularize the output based on the labelsets
#' available on training data.
#'
#' @family Transformation methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param chain A vector with the label names to define the chain order. If
#' empty the chain is the default label sequence of the dataset. (Default:
#' \code{NA})
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param predict.params A list of default arguments passed to the predict
#' algorithm. (default: \code{list()})
#' @param cores Ignored because this method does not support multi-core.
#' @param seed An optional integer used to set the seed.
#' (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{NSmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{chain}{A vector with the chain order}
#' \item{labels}{A vector with the label names in expected order}
#' \item{labelset}{The matrix containing only labels values}
#' \item{models}{A list of models named by the label names.}
#' }
#' @references
#' Senge, R., Coz, J. J. del, & Hullermeier, E. (2013). Rectifying classifier
#' chains for multi-label classification. In Workshop of Lernen, Wissen &
#' Adaptivitat (LWA 2013) (pp. 162-169). Bamberg, Germany.
#' @export
#'
#' @examples
#' model <- ns(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use a specific chain with C5.0 classifier
#' mychain <- sample(rownames(toyml$labels))
#' model <- ns(toyml, 'C5.0', mychain)
#'
#' # Set a specific parameter
#' model <- ns(toyml, 'KNN', k=5)
#' }
ns <- function(mdata, base.algorithm = getOption("utiml.base.algorithm", "SVM"),
chain = NA, ..., predict.params = list(), cores = NULL,
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
labels <- rownames(mdata$labels)
chain <- utiml_ifelse(anyNA(chain), labels, chain)
if (!utiml_is_equal_sets(chain, labels)) {
stop("Invalid chain (all labels must be on the chain)")
}
if (!anyNA(seed)) {
set.seed(seed)
}
# NS Model class
nsmodel <- list(
labels = labels,
chain = chain,
call = match.call(),
models = list(),
labelsets = as.matrix(mdata$dataset[, mdata$labels$index])
)
basedata <- mdata$dataset[mdata$attributesIndexes]
newattrs <- matrix(nrow = mdata$measures$num.instances, ncol = 0)
for (labelIndex in seq(length(chain))) {
label <- chain[labelIndex]
# Create data
dataset <- cbind(basedata, mdata$dataset[label])
mldCC <- utiml_prepare_data(dataset, "mldCC", mdata$name, "ns",
base.algorithm, chain.order = labelIndex)
# Call dynamic multilabel model with merged parameters
model <- utiml_create_model(mldCC, ...)
result <- do.call(utiml_predict_binary_model,
c(list(model = model, newdata = basedata),
predict.params))
basedata <- cbind(basedata, factor(result$bipartition, levels=c(0, 1)))
names(basedata)[ncol(basedata)] <- label
nsmodel$models[[label]] <- model
}
class(nsmodel) <- "NSmodel"
nsmodel
}
#' Predict Method for Nested Stacking
#'
#' This function predicts values based upon a model trained by \code{ns}.
#' The scores of the prediction was adapted once this method uses a correction
#' of labelsets to predict only classes present on training data. To more
#' information about this implementation see \code{\link{subset_correction}}.
#'
#' @param object Object of class '\code{NSmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores Ignored because this method does not support multi-core.
#' @param seed An optional integer used to set the seed.
#' (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=ns]{Nested Stacking (NS)}}
#' @export
#'
#' @examples
#' model <- ns(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Predict SVM bipartitions
#' pred <- predict(model, toyml, probability = FALSE)
#'
#' # Passing a specif parameter for SVM predict algorithm
#' pred <- predict(model, toyml, na.action = na.fail)
#' }
predict.NSmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = NULL,
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "NSmodel")) {
stop("First argument must be an NSmodel object")
}
if (!anyNA(seed)) {
set.seed(seed)
}
newdata <- utiml_newdata(newdata)
predictions <- list()
for (label in object$chain) {
predictions[[label]] <- utiml_predict_binary_model(object$models[[label]],
newdata,
...)
newdata <- cbind(newdata, factor(predictions[[label]]$bipartition, levels=c(0, 1)))
names(newdata)[ncol(newdata)] <- label
}
subset_correction(utiml_predict(predictions[object$labels], probability),
object$labelsets, probability)
}
#' Print NS model
#' @param x The ns model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.NSmodel <- function(x, ...) {
cat("Nested Stacking Model\n\nCall:\n")
print(x$call)
cat("\n Chain: (", length(x$chain), "labels )\n")
print(x$chain)
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_ns.R
|
#' Pruned Problem Transformation for multi-label Classification
#'
#' Create a Pruned Problem Transformation model for multilabel classification.
#'
#' Pruned Problem Transformation (PPT) is a multi-class transformation that
#' remove the less common classes to predict multi-label data.
#'
#' @family Transformation methods
#' @family Powerset
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param p Number of instances to prune. All labelsets that occurs p times or
#' less in the training data is removed. (Default: 3)
#' @param info.loss Logical value where \code{TRUE} means discard infrequent
#' labelsets and \code{FALSE} means reintroduce infrequent labelsets via
#' subsets. (Default: FALSE)
#' @param ... Others arguments passed to the base algorithm for all subproblems
#' @param cores Not used
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of class \code{PPTmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{model}{A LP model contained only the most common labelsets.}
#' }
#' @references
#' Read, J., Pfahringer, B., & Holmes, G. (2008). Multi-label classification
#' using ensembles of pruned sets. In Proceedings - IEEE International
#' Conference on Data Mining, ICDM (pp. 995โ1000).
#' Read, J. (2008). A pruned problem transformation method for multi-label
#' classification. In Proceedings of the New Zealand Computer Science
#' Research Student Conference (pp. 143-150).
#' @export
#'
#' @examples
#' model <- ppt(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' ##Change default configurations
#' model <- ppt(toyml, "RF", p=4, info.loss=TRUE)
#' }
ppt <- function (mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
p = 3, info.loss = FALSE, ...,
cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (p < 1) {
stop("The prunning value must be greater than 0")
}
# PPT Model class
pptmodel <- list(labels = rownames(mdata$labels),
p = p,
info.loss = info.loss,
call = match.call())
common.labelsets <- names(which(mdata$labelsets > p))
if (length(common.labelsets) == 0) {
stop(paste("All labelsets appear less than", p,
"time(s) in the training data."))
}
instances <- apply(mdata$dataset[, mdata$labels$index], 1, paste, collapse='')
original.instances <- instances %in% common.labelsets
if (info.loss || all(original.instances)) {
#Discard instances (infromation loss)
ndata <- create_subset(mdata, which(original.instances))
} else {
#No information loss
#TODO refactory it too ugly
labelsets <- lapply(common.labelsets, function (x) {
as.numeric(unlist(strsplit(x, '')))
})
#Sort by the number of labels and then for frequency
labelsets <- labelsets[rev(order(unlist(lapply(labelsets, sum))))]
rem.inst <- which(!original.instances)
Si <- mdata$dataset[rem.inst, mdata$labels$index]
has.match <- do.call(cbind, lapply(labelsets, function (ls) {
colSums(ls == 1 & ls == t(Si)) == sum(ls)
}))
rm(Si)
inst.lab <- lapply(lapply(split(has.match,seq(nrow(has.match))),which),
function (lbls){
selected <- c()
if (length(lbls) > 0) {
selected <- lbls[1]
value <- labelsets[[lbls[1]]]
for (x in lbls[-1]) {
the.new <- utiml_ifelse(any(value + labelsets[[x]] > 1), NULL, x)
value <- utiml_ifelse(is.null(the.new), value, value + labelsets[[x]])
selected <- c(selected, the.new)
}
}
selected
})
rm(has.match)
ndata <- merge_pruned_instances(mdata, rem.inst, inst.lab,
labelsets)
}
pptmodel$model <- lp(ndata, base.algorithm=base.algorithm, seed=seed)
class(pptmodel) <- "PPTmodel"
pptmodel
}
#' Predict Method for Pruned Problem Transformation
#'
#' This function predicts values based upon a model trained by
#' \code{\link{ppt}}.
#'
#' @param object Object of class '\code{PPTmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores Not used
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=ppt]{Pruned Problem Transformation (PPT)}}
#' @export
#'
#' @examples
#' model <- ppt(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.PPTmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "PPTmodel")) {
stop("First argument must be a PPTmodel object")
}
predict.LPmodel(object$model, newdata, probability, ..., seed=seed)
}
#' Print PPT model
#' @param x The ppt model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.PPTmodel <- function(x, ...) {
cat("Pruned Problem Transformation Model\n\nCall:\n")
print(x$call)
cat("\nPrune:", x$p, "\n")
cat("Information loss:", ifelse(x$info.loss, "yes", "no"), "\n")
cat("\n1 LP Model:", length(x$model$classes), "classes\n")
print(cbind.data.frame(classe=names(x$model$classes),
instances=as.numeric(x$model$classes)))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_ppt.R
|
#' PruDent classifier for multi-label Classification
#'
#' Create a PruDent classifier to predict multi-label data. To this, two
#' round of Binary Relevance is executed, such that, the first iteration
#' generates new attributes to enrich the second prediction.
#'
#' In the second phase only labels whose information gain is greater than a
#' specific phi value is added.
#'
#' @family Transformation methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param phi A value between 0 and 1 to determine the information gain. The
#' value 0 include all labels in the second phase and the 1 none.
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{PruDentmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{phi}{The value of \code{phi} parameter.}
#' \item{IG}{The matrix of Information Gain used in combination
#' with \code{phi} parameter to define the labels used in the second step.
#' }
#' \item{basemodel}{The BRModel used in the first iteration.}
#' \item{metamodels}{A list of models named by the label names used in the
#' second iteration.
#' }
#' }
#' @references
#' Alali, A., & Kubat, M. (2015). PruDent: A Pruned and Confident Stacking
#' Approach for Multi-Label Classification. IEEE Transactions on Knowledge
#' and Data Engineering, 27(9), 2480-2493.
#' @export
#'
#' @examples
#' model <- prudent(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use different phi correlation with C5.0 classifier
#' model <- prudent(toyml, 'C5.0', 0.3)
#'
#' # Set a specific parameter
#' model <- prudent(toyml, 'KNN', k=5)
#' }
prudent <- function(mdata, base.algorithm = getOption("utiml.base.algorithm", "SVM"),
phi = 0, ..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (phi < 0 || phi > 1) {
stop("The phi threshold must be between 0 and 1, inclusive")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# PruDent Model class
pdmodel <- list(
labels = rownames(mdata$labels),
call = match.call(),
IG = utiml_labels_IG(mdata),
phi = phi,
# 1 Iteration - Base Level
basemodel = br(mdata, base.algorithm, ..., cores = cores, seed = seed)
)
labeldata <- as.data.frame(mdata$dataset[mdata$labels$index])
for (i in seq(ncol(labeldata))) {
labeldata[, i] <- factor(labeldata[, i], levels=c(0, 1))
}
#base.preds <- as.matrix(mdata$dataset[mdata$labels$index])
# 2 Iteration - Meta level
IG <- matrix(pdmodel$IG >= phi,
ncol = ncol(pdmodel$IG), dimnames = dimnames(pdmodel$IG))
labels <- utiml_rename(pdmodel$labels)
pdmodel$metamodels <- utiml_lapply(labels, function(label) {
mmodel <- NULL
extracols <- labeldata[, which(IG[label,]), drop = FALSE]
if (ncol(extracols) > 0) {
nmcol <- paste("extra", colnames(extracols), sep = ".")
colnames(extracols) <- nmcol
base <- utiml_create_binary_data(mdata, label, extracols)
dataset <- utiml_prepare_data(base, "mldPruDent", mdata$name, "prudent",
base.algorithm, new.features = nmcol)
mmodel <- utiml_create_model(dataset, ...)
}
mmodel
}, cores, seed)
class(pdmodel) <- "PruDentmodel"
pdmodel
}
#' Predict Method for PruDent
#'
#' This function predicts values based upon a model trained by \code{prudent}.
#'
#' @param object Object of class '\code{PruDentmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=prudent]{PruDent}}
#' @export
#'
#' @examples
#' \donttest{
#' # Predict SVM scores
#' model <- prudent(toyml)
#' pred <- predict(model, toyml)
#'
#' # Predict SVM bipartitions
#' pred <- predict(model, toyml, probability = FALSE)
#'
#' # Passing a specif parameter for SVM predict algorithm
#' pred <- predict(model, toyml, na.action = na.fail)
#' }
predict.PruDentmodel <- function(object, newdata,
probability = getOption("utiml.use.probs",
TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "PruDentmodel")) {
stop("First argument must be an PruDentmodel object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
newdata <- utiml_newdata(newdata)
# 1 Iteration - Base level
base.scores <- predict.BRmodel(object$basemodel, newdata, TRUE, ...,
cores=cores, seed=seed)
base.preds <- as.bipartition(base.scores)
labeldata <- as.data.frame(base.preds)
for (i in seq(ncol(labeldata))) {
labeldata[,i] <- factor(labeldata[,i], levels=c(0, 1))
}
# 2 Iteration - Meta level
IG <- matrix(object$IG >= object$phi,
ncol = ncol(object$IG), dimnames = dimnames(object$IG))
labels <- utiml_rename(object$labels)
predictions <- utiml_lapply(labels, function(labelname) {
extracols <- labeldata[, which(IG[labelname,]), drop = FALSE]
if (ncol(extracols) > 0) {
colnames(extracols) <- paste("extra", colnames(extracols), sep = ".")
utiml_predict_binary_model(object$metamodels[[labelname]],
cbind(newdata, extracols), ...)
}
else {
utiml_binary_prediction(base.preds[, labelname], base.scores[, labelname])
}
}, cores, seed)
# Choosing the Final Classification
for (i in seq(predictions)) {
scores <- cbind(base = base.scores[,i],
meta = predictions[[i]]$probability)
baseinst <- apply(abs(0.5 - scores), 1, which.max) == 1
predictions[[i]]$probability[baseinst] <- base.scores[baseinst, i]
predictions[[i]]$bipartition[baseinst] <- base.preds[baseinst, i]
}
utiml_predict(predictions, probability)
}
# Calculate the Information Gain for each pair of labels
#
# @param mdata A mldr dataset containing the label information.
# @return A matrix where the rows and columns represents the labels.
# @references
# Alali, A., & Kubat, M. (2015). PruDent: A Pruned and Confident Stacking
# Approach for Multi-Label Classification. IEEE Transactions on Knowledge
# and Data Engineering, 27(9), 2480-2493.
utiml_labels_IG <- function (mdata) {
entropy <- function (prob) {
prob0 <- 1 - prob
ifelse(prob == 0 || prob == 1,
0, -prob * log2(prob) - prob0 * log2(prob0))
}
labelnames <- rownames(mdata$labels)
classes <- mdata$dataset[,mdata$labels$index]
q <- length(labelnames)
ig <- matrix(nrow = q, ncol = q, dimnames = list(labelnames, labelnames))
for (i in seq(q)) {
for (j in seq(q)) {
Hya <- entropy(mdata$labels$freq[i])
hasJ <- classes[j] == 1
Hyab <- mdata$labels$freq[j] *
entropy(sum(classes[hasJ, i] == 1) / sum(hasJ)) +
(1 - mdata$labels$freq[j]) *
entropy(sum(classes[classes[j] == 0, i] == 1) / sum(!hasJ))
ig[i,j] <- Hya - Hyab
ig[j,i] <- ig[i,j]
}
ig[i,i] <- 0
}
ig
}
#' Print PruDent model
#' @param x The prudent model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.PruDentmodel <- function(x, ...) {
cat("Classifier PruDent\n\nCall:\n")
print(x$call)
cat("\nMeta models:", length(x$metamodels), "\n")
cat("\nPhi:", x$phi, "\n")
cat("\nInformation Gain Table Overview:\n")
corr <- x$IG
diag(corr) <- NA
tbl <- data.frame(
min = apply(corr, 1, min, na.rm = TRUE),
mean = apply(corr, 1, mean, na.rm = TRUE),
median = apply(corr, 1, stats::median, na.rm = TRUE),
max = apply(corr, 1, max, na.rm = TRUE),
extra = apply(x$IG, 1, function(row) sum(row > x$phi))
)
print(tbl)
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_prudent.R
|
#' Pruned Set for multi-label Classification
#'
#' Create a Pruned Set model for multilabel classification.
#'
#' Pruned Set (PS) is a multi-class transformation that remove the less common
#' classes to predict multi-label data.
#'
#' @family Transformation methods
#' @family Powerset
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param p Number of instances to prune. All labelsets that occurs p times or
#' less in the training data is removed. (Default: 3)
#' @param strategy The strategy (A or B) for processing infrequent labelsets.
#' (Default: A).
#' @param b The number used by the strategy for processing infrequent labelsets.
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores Not used
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of class \code{PSmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{model}{A LP model contained only the most common labelsets.}
#' }
#' @references
#' Read, J., Pfahringer, B., & Holmes, G. (2008). Multi-label classification
#' using ensembles of pruned sets. In Proceedings - IEEE International
#' Conference on Data Mining, ICDM (pp. 995โ1000).
#' @export
#'
#' @examples
#' model <- ps(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' ##Change default configurations
#' model <- ps(toyml, "RF", p=4, strategy="B", b=1)
#' }
ps <- function (mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
p = 3, strategy = c("A", "B"), b = 2, ...,
cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (p < 1) {
stop("The prunning value must be greater than 0")
}
strategy <- match.arg(strategy)
if (b < 0) {
stop("The parameter b must be greater or equal than 0")
}
# PS Model class
psmodel <- list(labels = rownames(mdata$labels),
p = p,
strategy = strategy,
b = b,
call = match.call())
common.labelsets <- names(which(mdata$labelsets > p))
if (length(common.labelsets) == 0) {
stop(paste("All labelsets appear less than", p,
"time(s) in the training data."))
}
instances <- apply(mdata$dataset[, mdata$labels$index], 1, paste, collapse='')
original.instances <- instances %in% common.labelsets
rem.inst <- which(!original.instances)
labelsets <- lapply(common.labelsets, function (x) {
as.numeric(unlist(strsplit(x, '')))
})
#Sort by the number of labels and then for frequency
labelsets <- labelsets[rev(order(unlist(lapply(labelsets, sum))))]
if (strategy == "B") {
#Strategy B: use only subsets of size greater than b
labelsets <- labelsets[unlist(lapply(labelsets, sum)) > b]
b <- length(labelsets)
if (b == 0) {
stop("There is no labelsets greater than the b value")
}
}
if (length(rem.inst) == 0) {
ndata <- mdata
} else {
Si <- mdata$dataset[rem.inst, mdata$labels$index]
has.match <- do.call(cbind, lapply(labelsets, function (ls) {
colSums(ls == 1 & ls == t(Si)) == sum(ls)
}))
rm(Si)
inst.lab <- lapply(
lapply(split(has.match,seq(nrow(has.match))),which),
function (lbls){
utiml_ifelse(length(lbls) > 0, c(lbls[seq(min(length(lbls), b))]), c())
}
)
rm(has.match)
ndata <- merge_pruned_instances(mdata, rem.inst, inst.lab, labelsets)
}
psmodel$model <- lp(ndata, base.algorithm=base.algorithm, seed=seed)
class(psmodel) <- "PSmodel"
psmodel
}
merge_pruned_instances <- function (mdata, removed.instances,
inst.lab, labelsets) {
#Remove instances without labelsets
inst.idx <- which(unlist(lapply(inst.lab, length)) > 0)
#Create the new labelsets data
new.labelsets <- do.call(rbind, labelsets[unlist(inst.lab[inst.idx])])
colnames(new.labelsets) <- rownames(mdata$labels)
#Select the rows that will be modified
rows <- rep(removed.instances[inst.idx],
unlist(lapply(inst.lab[inst.idx], length)))
mldr::mldr_from_dataframe(
rbind(
#Original instances
mdata$dataset[-removed.instances,
c(mdata$attributesIndexes, mdata$labels$index)],
#Rejected instances
cbind.data.frame(
mdata$dataset[rows, mdata$attributesIndexes], new.labelsets
)
), seq(mdata$measures$num.inputs + 1, mdata$measures$num.attributes),
name = mdata$name
)
}
#' Predict Method for Pruned Set Transformation
#'
#' This function predicts values based upon a model trained by
#' \code{\link{ps}}.
#'
#' @param object Object of class '\code{PSmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores Not used
#' @param seed An optional integer used to set the seed. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=ps]{Pruned Set (PS)}}
#' @export
#'
#' @examples
#' model <- ps(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.PSmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "PSmodel")) {
stop("First argument must be a PSmodel object")
}
predict.LPmodel(object$model, newdata, probability, ..., seed=seed)
}
#' Print PS model
#' @param x The ps model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.PSmodel <- function(x, ...) {
cat("Pruned Set Model\n\nCall:\n")
print(x$call)
cat("\nPrune:", x$p, "\n")
cat("Strategy:", x$strategy, "\n")
cat("B value:", x$b, "\n")
cat("\n1 LP Model:", length(x$model$classes), "classes\n")
print(cbind.data.frame(classe=names(x$model$classes),
instances=as.numeric(x$model$classes)))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_ps.R
|
#' Random k-labelsets for multilabel classification
#'
#' Create a RAkEL model for multilabel classification.
#'
#' RAndom k labELsets is an ensemble of LP models where each classifier is
#' trained with a small set of labels, called labelset. Two different strategies
#' for constructing the labelsets are the disjoint and overlapping labelsets.
#'
#' @family Transformation methods
#' @family Powerset
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param k The number of labels used in each labelset. (Default: \code{3})
#' @param m The number of LP models. Used when overlapping is TRUE, otherwise it
#' is ignored. (Default: \code{2 * length(labels)})
#' @param overlapping Logical value, that defines if the method must overlapping
#' the labelsets. If FALSE the method uses disjoint labelsets.
#' (Default: \code{TRUE})
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is running in parallel. (Default:
#' \code{options("utiml.seed", NA)})
#' @return An object of class \code{RAkELmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{labelsets}{A list with the labelsets used to build the LP models.}
#' \item{model}{A list of the generated models, named by the label names.}
#' }
#' @references
#' Tsoumakas, G., Katakis, I., & Vlahavas, I. (2011). Random k-labelsets for
#' multilabel classification. IEEE Transactions on Knowledge and Data
#' Engineering, 23(7), 1079-1089.
#' @export
#'
#' @examples
#' model <- rakel(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#' \donttest{
#' ## SVM using k = 4 and m = 100
#' model <- rakel(toyml, "SVM", k=4, m=100)
#'
#' ## Random Forest using disjoint labelsets
#' model <- rakel(toyml, "RF", overlapping=FALSE)
#' }
rakel <- function (mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
k = 3, m = 2 * mdata$measures$num.labels, overlapping = TRUE,
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
# RAkEL Model class
rkmodel <- list(
labels = rownames(mdata$labels),
overlapping = overlapping,
k = k,
m = ifelse(overlapping, m, ceiling(mdata$measures$num.labels / k)),
labelsets = list(),
call = match.call()
)
if (!anyNA(seed)) {
set.seed(seed)
}
if (overlapping) {
#RAkEL overllaping
rkmodel$labelsets <- lapply(seq(rkmodel$m), function(i) {
sample(rkmodel$labels, k)
})
#TODO validate if all labels are used
} else {
#RAkEL disjoint
labels <- rkmodel$labels
for (i in seq(rkmodel$m)) {
labelset <- sample(labels, min(k, length(labels)))
rkmodel$labelsets[[length(rkmodel$labelsets) + 1]] <- labelset
labels <- setdiff(labels, labelset)
}
}
lbl.index <- mdata$measures$num.inputs
rkmodel$models <- utiml_lapply(rkmodel$labelsets, function (labels) {
data <- mldr::mldr_from_dataframe(
cbind(mdata$dataset[mdata$attributesIndexes], mdata$dataset[labels]),
seq(lbl.index + 1, lbl.index + length(labels)),
name = mdata$name
)
lp(data, base.algorithm = base.algorithm, ...)
}, cores, seed)
class(rkmodel) <- "RAkELmodel"
rkmodel
}
#' Predict Method for RAkEL
#'
#' This function predicts values based upon a model trained by
#' \code{\link{rakel}}.
#'
#' @param object Object of class '\code{RAkELmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the prediction. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=rakel]{Random k Labelsets (RAkEL)}}
#' @export
#'
#' @examples
#' model <- rakel(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.RAkELmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "RAkELmodel")) {
stop("First argument must be a RAkELmodel object")
}
previous.value <- getOption("utiml.empty.prediction")
options(utiml.empty.prediction = TRUE)
newdata <- utiml_newdata(newdata)
results <- utiml_lapply(object$models, function (lpmodel){
predict.LPmodel(lpmodel, newdata)
}, cores, seed)
if (object$overlapping) {
nvotes <- as.numeric(table(unlist(object$labelsets))[object$labels])
votes <- matrix(0, nrow=nrow(newdata), ncol=length(nvotes),
dimnames = list(rownames(newdata), object$labels))
for (result in results) {
votes[, colnames(result)] <- votes[, colnames(result)] +
as.bipartition(result)
}
prediction <- as.mlresult(t(t(votes) / nvotes), probability, threshold=0.5)
rm(votes, nvotes)
} else {
prediction <- multilabel_prediction(
do.call(cbind, lapply(results, as.bipartition))[,object$labels],
do.call(cbind, lapply(results, as.probability))[,object$labels],
probability
)
}
rm(results)
options(utiml.empty.prediction = previous.value)
prediction
}
#' Print RAkEL model
#' @param x The rakel model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.RAkELmodel <- function(x, ...) {
cat("RAkEL",ifelse(x$overlapping, "Overlapping", "Disjoint"), "Model")
cat("\n\nCall:\n")
print(x$call)
cat("\nLabelsets size:",x$k,"\n")
cat(x$m, "LP Models. Labelsets:\n")
print(do.call(rbind, lapply(x$labelsets, function (v) {
length(v) <- x$k
v
})))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_rakel.R
|
#' Recursive Dependent Binary Relevance (RDBR) for multi-label Classification
#'
#' Create a RDBR classifier to predict multi-label data. This is a recursive
#' approach that enables the binary classifiers to discover existing label
#' dependency by themselves. The idea of RDBR is running DBR recursively until
#' the results stabilization of the result.
#'
#' The train method is exactly the same of DBR the recursion is in the predict
#' method.
#'
#' @family Transformation methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param estimate.models Logical value indicating whether is necessary build
#' Binary Relevance classifier for estimate process. The default implementation
#' use BR as estimators, however when other classifier is desirable then use
#' the value \code{FALSE} to skip this process. (Default: \code{TRUE}).
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{RDBRmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{estimation}{The BR model to estimate the values for the labels.
#' Only when the \code{estimate.models = TRUE}.}
#' \item{models}{A list of final models named by the label names.}
#' }
#' @references
#' Rauber, T. W., Mello, L. H., Rocha, V. F., Luchi, D., & Varejao, F. M.
#' (2014). Recursive Dependent Binary Relevance Model for Multi-label
#' Classification. In Advances in Artificial Intelligence - IBERAMIA, 206-217.
#' @seealso \code{\link[=dbr]{Dependent Binary Relevance (DBR)}}
#' @export
#'
#' @examples
#' model <- rdbr(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use Random Forest as base algorithm and 2 cores
#' model <- rdbr(toyml, 'RF', cores = 2, seed = 123)
#' }
rdbr <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
estimate.models = TRUE, ...,
cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
rdbrmodel <- dbr(mdata, base.algorithm, estimate.models, ...,
cores=cores, seed=seed)
class(rdbrmodel) <- "RDBRmodel"
rdbrmodel
}
#' Predict Method for RDBR
#'
#' This function predicts values based upon a model trained by \code{rdbr}.
#' In general this method is a recursive version of
#' \code{\link{predict.DBRmodel}}.
#'
#' Two versions of the update strategy of the estimated labels are implemented.
#' The batch re-estimates the labels only when a complete current label vector
#' is available. The stochastic uses re-estimated labels as soon as they become
#' available. This second does not support parallelize the prediction, however
#' stabilizes earlier than batch mode.
#'
#' @param object Object of class '\code{RDBRmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param estimative A matrix containing the bipartition result of other
#' multi-label classification algorithm or an mlresult object with the
#' predictions.
#' @param max.iterations The maximum allowed iterations of the RDBR technique.
#' (Default: 5)
#' @param batch.mode Logical value to determine if use the batch re-estimation.
#' If \code{FALSE} then use the stochastic re-estimation strategy.
#' (Default: \code{FALSE})
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @references
#' Rauber, T. W., Mello, L. H., Rocha, V. F., Luchi, D., & Varejao, F. M.
#' (2014). Recursive Dependent Binary Relevance Model for Multi-label
#' Classification. In Advances in Artificial Intelligence - IBERAMIA, 206-217.
#' @seealso \code{\link[=rdbr]{Recursive Dependent Binary Relevance (RDBR)}}
#' @export
#'
#' @examples
#' \donttest{
#' # Predict SVM scores
#' model <- rdbr(toyml)
#' pred <- predict(model, toyml)
#'
#' # Passing a specif parameter for SVM predict algorithm
#' pred <- predict(model, toyml, na.action = na.fail)
#'
#' # Use the batch mode and increase the max number of iteration to 10
#' pred <- predict(model, toyml, max.iterations = 10, batch.mode = TRUE)
#'
#' # Using other classifier (EBR) to made the labels estimatives
#' estimative <- predict(ebr(toyml), toyml, probability = FALSE)
#' model <- rdbr(toyml, estimate.models = FALSE)
#' pred <- predict(model, toyml, estimative = estimative)
#' }
predict.RDBRmodel <- function(object, newdata, estimative = NULL,
max.iterations = 5, batch.mode = FALSE,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "RDBRmodel")) {
stop("First argument must be an RDDBRmodel object")
}
if (is.null(object$estimation) && is.null(estimative)) {
stop("The model requires an estimative matrix")
}
if (max.iterations < 1) {
stop("The number of iteractions must be positive")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
if (!anyNA(seed)) {
set.seed(seed)
}
newdata <- utiml_newdata(newdata)
if (is.null(estimative)) {
estimative <- predict.BRmodel(object$estimation, newdata, FALSE, ...,
cores=cores, seed=seed)
}
if (is(estimative, 'mlresult')) {
estimative <- as.bipartition(estimative)
}
estimative <- as.data.frame(estimative)
for (i in seq(ncol(estimative))) {
estimative[,i] <- factor(estimative[,i], levels=c(0, 1))
}
labels <- names(object$models)
modelsindex <- utiml_rename(seq(labels), labels)
if (batch.mode) {
for (i in seq(max.iterations)) {
old.estimative <- estimative
predictions <- utiml_lapply(modelsindex, function(li) {
utiml_predict_binary_model(object$models[[li]],
cbind(newdata, estimative[, -li]), ...)
}, cores, seed)
for (j in seq(predictions)) {
classes <- predictions[[j]]$bipartition
estimative[, j] <- factor(classes, levels=c(0, 1))
}
if (all(old.estimative == estimative)) {
break
}
}
}
else {
for (i in seq(max.iterations)) {
old.estimative <- estimative
predictions <- list()
# the labels needs to be shuffled in each iteraction
for (li in sample(modelsindex)) {
predictions[[li]] <- utiml_predict_binary_model(object$models[[li]],
cbind(newdata, estimative[, -li]),
...)
estimative[, li] <- factor(predictions[[li]]$bipartition, levels=c(0,1))
}
names(predictions) <- labels
if (all(old.estimative == estimative)) {
break
}
}
}
utiml_predict(predictions, probability)
}
#' Print RDBR model
#' @param x The rdbr model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.RDBRmodel <- function(x, ...) {
cat("Classifier RDBR\n\nCall:\n")
print(x$call)
cat("\n", length(x$models), "Models (labels):\n")
print(names(x$models))
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_rdbr.R
|
#' Ranking by Pairwise Comparison (RPC) for multi-label Classification
#'
#' Create a RPC model for multilabel classification.
#'
#' RPC is a simple transformation method that uses pairwise classification to
#' predict multi-label data. This is based on the one-versus-one approach to
#' build a specific model for each label combination.
#'
#' @family Transformation methods
#' @family Pairwise methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param ... Others arguments passed to the base algorithm for all subproblems
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{RPCmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{labels}{A vector with the label names.}
#' \item{models}{A list of the generated models, named by the label names.}
#' }
#' @references
#' Hullermeier, E., Furnkranz, J., Cheng, W., & Brinker, K. (2008).
#' Label ranking by learning pairwise preferences. Artificial Intelligence,
#' 172(16-17), 1897-1916.
#' @export
#'
#' @examples
#' model <- rpc(toyml, "RANDOM")
#' pred <- predict(model, toyml)
rpc <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"), ...,
cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# RPC Model class
rpcmodel <- list(labels = rownames(mdata$labels), call = match.call())
# Create models
labels <- utils::combn(rpcmodel$labels, 2, simplify=FALSE)
names(labels) <- unlist(lapply(labels, paste, collapse=','))
rpcmodel$models <- utiml_lapply(labels, function (pairwise) {
utiml_create_model(
utiml_prepare_data(
utiml_create_pairwise_data(mdata, pairwise[1], pairwise[2]),
"mldRPC", mdata$name, "rpc", base.algorithm,
label1=pairwise[1], label2=pairwise[2]
), ...
)
}, cores, seed)
class(rpcmodel) <- "RPCmodel"
rpcmodel
}
#' Predict Method for RPC
#'
#' This function predicts values based upon a model trained by
#' \code{\link{rpc}}.
#'
#' @param object Object of class '\code{RPCmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @seealso \code{\link[=br]{Binary Relevance (BR)}}
#' @export
#'
#' @examples
#' model <- rpc(toyml, "RANDOM")
#' pred <- predict(model, toyml)
predict.RPCmodel <- function(object, newdata,
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "RPCmodel")) {
stop("First argument must be an RPCmodel object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# Create models
newdata <- utiml_newdata(newdata)
labels <- utiml_rename(object$labels)
predictions <- utiml_lapply(object$models, utiml_predict_binary_model,
newdata = newdata, ..., cores, seed)
# Compute votes
labels <- utils::combn(object$labels, 2, simplify=FALSE)
votes <- matrix(0, ncol=length(object$labels), nrow=nrow(newdata),
dimnames = list(rownames(newdata), object$labels))
for (i in seq(labels)) {
votes[,labels[[i]]] <- votes[,labels[[i]]] +
cbind(predictions[[i]]$bipartition, 1 - predictions[[i]]$bipartition)
}
as.mlresult(votes / length(object$labels), probability)
}
#' Print RPC model
#' @param x The br model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.RPCmodel <- function(x, ...) {
cat("RPC Model\n\nCall:\n")
print(x$call)
cat("\n", length(x$models), " pairwise models\n", sep='')}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/method_rpc.R
|
#' Fix the mldr dataset to use factors
#'
#' @param mdata A mldr dataset.
#'
#' @return A mldr object
#' @export
#'
#' @examples
#' toyml <- mldata(toyml)
mldata <- function (mdata) {
# Change character attributes to factors
attrs <- which(
sapply(mdata$dataset[, mdata$attributesIndexes], class) == "character"
)
mdata$dataset[,attrs] <- as.data.frame(
apply(mdata$dataset[, attrs, drop=FALSE], 2, as.factor)
)
mdata
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/mldr.R
|
#' Convert a mlresult to a bipartition matrix
#'
#' @param mlresult The mlresult object
#' @return matrix with bipartition values
#' @export
as.bipartition <- function(mlresult) {
utiml_ifelse(is.bipartition(mlresult),
as.matrix(mlresult),
attr(mlresult, "classes"))
}
#' Convert a mlresult to matrix
#'
#' @param x The mlresult object
#' @param ... ignored
#' @return matrix
#' @export
as.matrix.mlresult <- function(x, ...) {
attr.name <- ifelse(attr(x, "type") == "bipartition", "probs", "classes")
only.expected <- x
attr(only.expected, attr.name) <- NULL
attr(only.expected, "type") <- NULL
class(only.expected) <- "matrix"
only.expected
}
#' Convert a matrix prediction in a multi label prediction
#' @param predictions a Matrix or data.frame contained the scores/probabilities
#' values. The columns are the labels and the rows are the examples.
#' @param probability A logical value. If \code{TRUE} the predicted values are
#' the score between 0 and 1, otherwise the values are bipartition 0 or 1.
#' (Default: \code{TRUE})
#' @param ... ignored
#' @return An object of type mlresult
#' @export
#'
#' @examples
#' predictions <- matrix(runif(100), ncol = 10)
#' colnames(predictions) <- paste('label', 1:10, sep='')
#'
#' # Create a mlresult from a matrix
#' mlresult <- as.mlresult(predictions)
#' mlresult <- as.mlresult(predictions, probability = FALSE)
#' mlresult <- as.mlresult(predictions, probability = FALSE, threshold = 0.6)
#'
#' # Change the current type of a mlresult
#' mlresult <- as.mlresult(mlresult, probability = TRUE)
as.mlresult <- function(predictions, probability = TRUE, ...) {
UseMethod("as.mlresult")
}
#' @describeIn as.mlresult Default mlresult transform method
#' @param threshold A single value between 0 and 1 or a list with threshold
#' values contained one value per label (Default: 0.5). Only used when the
#' predictions are not a mlresult.
#' @export
as.mlresult.default <- function (predictions, probability = TRUE, ...,
threshold = 0.5) {
predictions <- as.matrix(predictions)
as.mlresult.mlresult(fixed_threshold(predictions, threshold), probability)
}
#' @describeIn as.mlresult change the mlresult type
#' @export
as.mlresult.mlresult <- function (predictions, probability = TRUE, ...) {
bipartition <- as.bipartition(predictions)
probabilities <- as.probability(predictions)
multilabel_prediction(bipartition, probabilities, probability)
}
#' Convert a mlresult to a probability matrix
#'
#' @param mlresult The mlresult object
#' @return matrix with probabilities values
#' @export
as.probability <- function(mlresult) {
utiml_ifelse(is.probability(mlresult),
as.matrix(mlresult),
attr(mlresult, "probs"))
}
#' Convert a mlresult to a ranking matrix
#'
#' @param mlresult The mlresult object
#' @param ties.method A character string specifying how ties are treated
#' (Default: "min"). see \code{\link{rank}} to more details.
#' @param ... Others parameters passed to the \code{\link{rank}} method.
#' @return matrix with ranking values
#' @export
as.ranking <- function (mlresult, ties.method = "min", ...) {
#TODO see if apply is correctly
t(apply(1 - as.probability(mlresult), 1, rank, ties = ties.method, ...))
}
#' Test if a mlresult contains crisp values as default
#'
#' @param mlresult The mlresult object
#' @return logical value
#' @export
is.bipartition <- function(mlresult) {
attr(mlresult, "type") == "bipartition"
}
#' Test if a mlresult contains score values as default
#'
#' @param mlresult The mlresult object
#' @return logical value
#' @export
is.probability <- function(mlresult) {
attr(mlresult, "type") == "probability"
}
#' Create a mlresult object
#'
#' @param bipartitions The matrix of predictions (bipartition values),
#' only 0 and 1
#' @param probabilities The matrix of probability/confidence of a prediction,
#' between 0..1
#' @param probability A logical value. If \code{TRUE} the predicted values are
#' the score between 0 and 1, otherwise the values are bipartition 0 or 1.
#' (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param empty.prediction A logical value. If \code{TRUE} the predicted values
#' may contains empty values, otherwise at least one label will be positive for
#' each instance.
#' @return An object of type mlresult
#' @export
#' @examples
#' probs <- matrix(
#' runif(90), ncol=3, dimnames = list(1:30, c("y1", "y2", "y3"))
#' )
#' preds <- matrix(
#' as.numeric(probs > 0.5), ncol=3, dimnames = list(1:30, c("y1", "y2", "y3"))
#' )
#' multilabel_prediction(probs, preds)
multilabel_prediction <- function(bipartitions, probabilities,
probability = getOption("utiml.use.probs", TRUE),
empty.prediction =
getOption("utiml.empty.prediction", FALSE)) {
if (!empty.prediction) {
# At least one label is predict
poslab <- apply(probabilities, 1, which.max)
posinst <- rowSums(bipartitions) == 0
bipartitions[cbind(which(posinst), poslab[posinst])] <- 1
}
bipartitions <- as.matrix(bipartitions)
probabilities <- as.matrix(probabilities)
only.bipartitions <- bipartitions
only.probabilities <- probabilities
attr(probabilities, "classes") <- only.bipartitions
attr(probabilities, "type") <- "probability"
attr(bipartitions, "probs") <- only.probabilities
attr(bipartitions, "type") <- "bipartition"
class(probabilities) <- class(bipartitions) <- "mlresult"
utiml_ifelse(probability, probabilities, bipartitions)
}
#' Print the mlresult
#' @param x The mlresult to print
#' @param ... Extra parameters for print method
#'
#' @return No return value, called for print a prediction result
#'
#' @export
print.mlresult <- function(x, ...) {
print(as.matrix(x), ...)
}
#' Filter a Multi-Label Result
#'
#' If column filter is performed, then the result will be a matrix. Otherwise,
#' the result will be a mlresult.
#'
#' @param mlresult A mlresult object
#' @param rowFilter A list of rows to filter
#' @param colFilter A list of columns to filter
#' @param ... Extra parameters to be used as the filter
#' @return mlresult or matrix. If column filter is performed, then the result
#' will be a matrix. Otherwise, the result will be a mlresult.
#' @export
`[.mlresult` <- function (mlresult, rowFilter = T, colFilter, ...) {
if (missing(colFilter)) {
bipartition <- as.bipartition(mlresult)
probability <- as.probability(mlresult)
multilabel_prediction(bipartition[rowFilter, , drop=FALSE],
probability[rowFilter, , drop=FALSE],
is.probability(mlresult))
} else {
as.matrix(mlresult)[rowFilter, colFilter, ...]
}
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/mlresult.R
|
#' Fill sparse dataset with 0 or '' values
#'
#' Transform a sparse dataset filling NA values to 0 or '' based on the column
#' type. Text columns with numeric values will be modified to numerical.
#'
#' @family pre process
#' @param mdata The mldr dataset to be filled.
#' @return a new mldr object.
#' @export
#'
#' @examples
#' sparse.toy <- toyml
#' sparse.toy$dataset$ratt10[sample(100, 30)] <- NA
#' complete.toy <- fill_sparse_mldata(sparse.toy)
fill_sparse_mldata <- function(mdata) {
is.letter <- function(x) {
grepl("[[:alpha:]]", x)
}
attrs <- seq(mdata$measures$num.attributes)
new.cols <- lapply(mdata$dataset[, attrs], function(col) {
if (anyNA(col)) {
# Has NA value
if (is.numeric(col)) {
# Numeric value - fill with 0
col[is.na(col)] <- 0
}
else if (any(is.letter(col))) {
# Text value - fill with ''
col <- as.character(col)
col[is.na(col)] <- ""
}
else {
# Text but with numeric values - convert to numeric and fill with 0
col <- as.numeric(as.character(col))
col[is.na(col)] <- 0
}
}
col
})
dataset <- data.frame(row.names = rownames(mdata$dataset))
dataset <- cbind(dataset, new.cols)
mldr::mldr_from_dataframe(dataset, mdata$labels$index, name = mdata$name)
}
#' Normalize numerical attributes
#'
#' Normalize all numerical attributes to values between 0 and 1. The highest
#' value is changed to 1 and the lowest value to 0.
#'
#' @family pre process
#' @param mdata The mldr dataset to be normalized.
#' @return a new mldr object.
#' @export
#'
#' @examples
#' norm.toy <- normalize_mldata(toyml)
normalize_mldata <- function(mdata) {
data <- mdata$dataset[seq(mdata$measures$num.attributes)]
for (col in mdata$attributesIndexes) {
if (is.numeric(data[, col])) {
data[col] <- utiml_normalize(data[col])
}
}
mldr::mldr_from_dataframe(data, mdata$labels$index, name = mdata$name)
}
#' Remove attributes from the dataset
#'
#' Remove specified attributes generating a new multi-label dataset.
#'
#' @family pre process
#' @param mdata The mldr dataset to remove labels.
#' @param attributes Attributes indexes or attributes names to be removed.
#' @return a new mldr object.
#' @note If invalid attributes names or indexes were informed, they will be
#' ignored.
#' @export
#'
#' @examples
#' toyml1 <- remove_attributes(toyml, c("iatt8","iatt9", "ratt10"))
#' toyml2 <- remove_attributes(toyml, 10)
remove_attributes <- function (mdata, attributes) {
if (mode(attributes) == "character") {
attributes <- which(colnames(mdata$dataset) %in% attributes)
}
use.attributes <- setdiff(seq(mdata$measures$num.attributes), attributes)
create_subset(mdata, seq(mdata$measures$num.instances), use.attributes)
}
#' Remove labels from the dataset
#'
#' Remove specified labels generating a new multi-label dataset.
#'
#' @family pre process
#' @param mdata The mldr dataset to remove labels.
#' @param labels Label indexes or label names to be removed.
#' @return a new mldr object.
#' @note If invalid labels names or indexes were informed, they will be ignored.
#' @export
#'
#' @examples
#' toyml1 <- remove_labels(toyml, c("y1","y5"))
#' toyml2 <- remove_labels(toyml, c(11, 15))
remove_labels <- function (mdata, labels) {
if (mode(labels) == "character") {
labels <- mdata$labels[labels, "index"]
labels <- labels[!is.na(labels)]
}
else {
# Only labels index, not attributes index
labels <- mdata$labels$index[which(mdata$labels$index %in% labels)]
}
new.attrs <- setdiff(seq(mdata$measures$num.attributes), labels)
dataset <- mdata$dataset[new.attrs]
labels <- which(colnames(dataset) %in% rownames(mdata$labels))
if (length(labels) <= 1) {
stop("The pre process procedure result in a single label")
}
mldr::mldr_from_dataframe(dataset, labels, name = mdata$name)
}
#' Remove unique attributes
#'
#' Remove the attributes that have a single value for all instances. Empty and
#' NA values are considered different values.
#'
#' @family pre process
#' @param mdata The mldr dataset to remove.
#' @return a new mldr object.
#' @export
#'
#' @examples
#' alt.toy <- toyml
#' alt.toy$dataset$ratt10 <- mean(alt.toy$dataset$ratt10)
#' new.toy <- remove_unique_attributes(alt.toy)
remove_unique_attributes <- function(mdata) {
labelsIndexes <- c()
attributesIndexes <- c()
for (col in seq(mdata$measures$num.attributes)) {
if (col %in% mdata$labels$index) {
attributesIndexes <- c(attributesIndexes, col)
labelsIndexes <- c(labelsIndexes, length(attributesIndexes))
}
else {
if (length(unique(mdata$dataset[, col])) > 1) {
attributesIndexes <- c(attributesIndexes, col)
}
}
}
mldr::mldr_from_dataframe(mdata$dataset[attributesIndexes],
labelsIndexes,
name = mdata$name)
}
#' Remove examples without labels
#'
#' Remove the examples that do not have labels.
#'
#' @family pre process
#' @param mdata The mldr dataset to remove the instances.
#' @return a new mldr object.
#' @export
#'
#' @examples
#' new.toy <- remove_labels(toyml, c(12,14))
#' remove_unlabeled_instances(new.toy)
remove_unlabeled_instances <- function(mdata) {
labelset <- rep(0, mdata$measures$num.labels)
rows <- !apply(mdata$dataset[mdata$labels$index] == labelset, 1, all)
create_subset(mdata, rows)
}
#' Remove unusual or very common labels
#'
#' Remove the labels that have smaller number of positive or negative examples
#' based on a specific threshold value.
#'
#' @family pre process
#' @param mdata The mldr dataset to remove the skewness labels.
#' @param t Threshold value. Number of minimum examples positive and negative.
#' @return a new mldr object.
#' @export
#'
#' @examples
#' remove_skewness_labels(toyml, 20)
remove_skewness_labels <- function(mdata, t = 1) {
labelsIndexes <- c()
for (col in mdata$labels$index) {
tbl <- table(mdata$dataset[col])
if (length(tbl) > 1 && all(tbl > t)) {
labelsIndexes <- c(labelsIndexes, col)
}
}
if (length(labelsIndexes) <= 1) {
stop("The pre process procedure result in a single label")
}
dataset <- mdata$dataset[sort(c(mdata$attributesIndexes, labelsIndexes))]
labels <- which(colnames(dataset) %in% rownames(mdata$labels))
mldr::mldr_from_dataframe(dataset, labels, name = mdata$name)
}
#' Replace nominal attributes
#' Replace the nominal attributes by binary attributes.
#'
#' @family pre process
#' @param mdata The mldr dataset to remove.
#' @param ordinal.attributes Not yet, but it will be used to specify which
#' attributes need to be replaced.
#' @return a new mldr object.
#' @export
#'
#' @examples
#' new.toy <- toyml
#' new.column <- as.factor(sample(c("a","b","c"), 100, replace = TRUE))
#' new.toy$dataset$ratt10 <- new.column
#' head(replace_nominal_attributes(new.toy))
replace_nominal_attributes <- function(mdata, ordinal.attributes = list()) {
dataset <- data.frame(row.names = rownames(mdata$dataset))
labelIndexes <- c()
for (col in seq(mdata$measures$num.attributes)) {
if (is.numeric(mdata$dataset[, col])) {
dataset <- cbind(dataset, mdata$dataset[col])
if (col %in% mdata$labels$index) {
labelIndexes <- c(labelIndexes, ncol(dataset))
}
}
else {
column <- rep_nom_col(mdata$dataset[, col], colnames(mdata$dataset[col]))
dataset <- cbind(dataset, column)
}
}
mldr::mldr_from_dataframe(dataset, labelIndexes, name = mdata$name)
}
rep_nom_col <- function (column, column.name = "", type = 1) {
# TODO ordinal.attributes
column <- as.factor(column)
symbols <- levels(column)
result <- {}
for (i in seq(length(symbols) - type)) {
result <- cbind(result, as.double(column == symbols[i]))
}
names <- paste(column.name, symbols[seq(length(symbols) - type)], sep="_")
if (column.name != "") {
colnames(result) <- names
}
result
}
rep_nom_attr <- function(sdata, include.last = TRUE) {
sdata <- as.data.frame(sdata)
dataset <- data.frame(row.names = rownames(sdata))
labelIndexes <- c()
cols <- seq(ifelse(include.last, ncol(sdata), ncol(sdata)-1))
for (col in cols) {
if (is.numeric(sdata[, col])) {
dataset <- cbind(dataset, sdata[col])
}
else {
column <- rep_nom_col(sdata[, col], colnames(sdata[col]))
dataset <- cbind(dataset, column)
}
}
if (!include.last) {
dataset <- cbind(dataset, sdata[ncol(sdata)])
}
dataset
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/pre_process.R
|
#' Create a holdout partition based on the specified algorithm
#'
#' This method creates multi-label dataset for train, test, validation or other
#' proposes the partition method defined in \code{method}. The number of
#' partitions is defined in \code{partitions} parameter. Each instance is used
#' in only one partition of division.
#'
#' @family sampling
#' @param mdata A mldr dataset.
#' @param partitions A list of percentages or a single value. The sum of all
#' values does not be greater than 1. If a single value is informed then the
#' complement of them is applied to generated the second partition. If two or
#' more values are informed and the sum of them is lower than 1 the partitions
#' will be generated with the informed proportion. If partitions have names,
#' they are used to name the return. (Default: \code{c(train=0.7, test=0.3)}).
#' @param method The method to split the data. The default methods are:
#' \describe{
#' \item{random}{Split randomly the folds.}
#' \item{iterative}{Split the folds considering the labels proportions
#' individually. Some specific label can not occurs in all
#' folds.}
#' \item{stratified}{Split the folds considering the labelset proportions.}
#' }
#' You can also create your own partition method. See the note and example
#' sections to more details. (Default: "random")
#' @return A list with at least two datasets sampled as specified in partitions
#' parameter.
#' @references Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011). On the
#' stratification of multi-label data. In Proceedings of the Machine
#' Learning and Knowledge Discovery in Databases - European Conference,
#' ECML PKDD (pp. 145-158).
#' @note To create your own split method, you need to build a function that
#' receive a mldr object and a list with the proportions of examples in each
#' fold and return an other list with the index of the elements for each fold.
#' @export
#'
#' @examples
#' dataset <- create_holdout_partition(toyml)
#' names(dataset)
#' ## [1] "train" "test"
#' #dataset$train
#' #dataset$test
#'
#' dataset <- create_holdout_partition(toyml, c(a=0.1, b=0.2, c=0.3, d=0.4))
#' #' names(dataset)
#' #' ## [1] "a" "b" "c" "d"
#'
#' sequencial_split <- function (mdata, r) {
#' S <- list()
#'
#' amount <- trunc(r * mdata$measures$num.instances)
#' indexes <- c(0, cumsum(amount))
#' indexes[length(r)+1] <- mdata$measures$num.instances
#'
#' S <- lapply(seq(length(r)), function (i) {
#' seq(indexes[i]+1, indexes[i+1])
#' })
#'
#' S
#' }
#' dataset <- create_holdout_partition(toyml, method="sequencial_split")
create_holdout_partition <- function (mdata,
partitions = c(train=0.7, test=0.3),
method = c("random", "iterative",
"stratified")) {
# Validations
if (!is(mdata,"mldr")) {
stop("First argument must be an mldr object")
}
if (sum(partitions) > 1) {
stop("The sum of partitions can not be greater than 1")
}
holdout.method <- utiml_validate_splitmethod(method[1])
partitions <- utiml_ifelse(length(partitions) == 1,
c(partitions, 1 - partitions),
partitions)
# Split data
folds <- do.call(holdout.method, list(mdata = mldata(mdata), r = partitions))
names(folds) <- names(partitions)
ldata <- lapply(folds, function (fold) {
create_subset(mdata, fold, mdata$attributesIndexes)
})
ldata
}
#' Create the k-folds partition based on the specified algorithm
#'
#' This method create the kFoldPartition object, from it is possible create
#' the dataset partitions to train, test and optionally to validation.
#'
#' @family sampling
#' @param mdata A mldr dataset.
#' @param k The number of desirable folds. (Default: 10)
#' @param method The method to split the data. The default methods are:
#' \describe{
#' \item{random}{Split randomly the folds.}
#' \item{iterative}{Split the folds considering the labels proportions
#' individually. Some specific label can not occurs in all
#' folds.}
#' \item{stratified}{Split the folds considering the labelset
#' proportions.}
#' }
#' You can also create your own partition method. See the note and example
#' sections to more details. (Default: "random")
#' @return An object of type kFoldPartition.
#' @references Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011). On the
#' stratification of multi-label data. In Proceedings of the Machine
#' Learning and Knowledge Discovery in Databases - European Conference,
#' ECML PKDD (pp. 145-158).
#' @note To create your own split method, you need to build a function that
#' receive a mldr object and a list with the proportions of examples in each
#' fold and return an other list with the index of the elements for each fold.
#' @seealso \link[=partition_fold]{How to create the datasets from folds}
#' @export
#'
#' @examples
#' k10 <- create_kfold_partition(toyml, 10)
#' k5 <- create_kfold_partition(toyml, 5, "stratified")
#'
#' sequencial_split <- function (mdata, r) {
#' S <- list()
#'
#' amount <- trunc(r * mdata$measures$num.instances)
#' indexes <- c(0, cumsum(amount))
#' indexes[length(r)+1] <- mdata$measures$num.instances
#'
#' S <- lapply(seq(length(r)), function (i) {
#' seq(indexes[i]+1, indexes[i+1])
#' })
#'
#' S
#' }
#' k3 <- create_kfold_partition(toyml, 3, "sequencial_split")
create_kfold_partition <- function (mdata,
k = 10,
method = c("random", "iterative",
"stratified")) {
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (k < 1 || k > mdata$measures$num.instances) {
stop("The k value is not valid")
}
kfold.method <- utiml_validate_splitmethod(method[1])
kf <- list(dataset = mdata, k = k)
kf$fold <- do.call(kfold.method, list(mdata = mldata(mdata), r = rep(1/k, k)))
class(kf) <- "kFoldPartition"
kf
}
#' Create a random subset of a dataset
#'
#' @family sampling
#' @param mdata A mldr dataset
#' @param instances The number of expected instances
#' @param attributes The number of expected attributes.
#' (Default: all attributes)
#' @param replacement A boolean value to define sample with replacement or not.
#' (Default: FALSE)
#' @return A new mldr subset
#' @export
#'
#' @examples
#' small.toy <- create_random_subset(toyml, 10, 3)
#' medium.toy <- create_random_subset(toyml, 50, 5)
create_random_subset <- function(mdata, instances,
attributes = mdata$measures$num.inputs,
replacement = FALSE) {
if (instances > mdata$measures$num.instances) {
stop(paste("The expected number of instances is greater than ",
mdata$measures$num.instances))
}
if (attributes > mdata$measures$num.inputs) {
stop(paste("The expected number of attributes is greater than ",
mdata$measures$num.inputs))
}
rows <- sample(mdata$measures$num.instances, instances, replacement)
cols <- sample(mdata$attributesIndexes, attributes)
create_subset(mldata(mdata), rows, cols)
}
#' Create a subset of a dataset
#'
#' @family sampling
#' @param mdata A mldr dataset
#' @param rows A vector with the instances indexes (names or indexes).
#' @param cols A vector with the attributes indexes (names or indexes).
#' @return A new mldr subset
#' @note It is not necessary specify the labels attributes because they are
#' included by default.
#' @export
#'
#' @examples
#' ## Create a dataset with the 20 first examples and the 7 first attributes
#' small.toy <- create_subset(toyml, seq(20), seq(7))
#'
#' ## Create a random dataset with 50 examples and 5 attributes
#' random.toy <- create_subset(toyml, sample(100, 50), sample(10, 5))
create_subset <- function(mdata, rows, cols = NULL) {
if (mode(cols) == "character") {
cols <- which(colnames(mdata$dataset[mdata$attributesIndexes]) %in% cols)
}
else if (is.null(cols)) {
cols <- mdata$attributesIndexes
}
else {
cols <- intersect(cols, seq(mdata$measures$num.attributes))
}
dataset <- mdata$dataset[rows, sort(unique(c(cols, mdata$labels$index)))]
labelIndexes <- which(colnames(dataset) %in% rownames(mdata$labels))
mldr::mldr_from_dataframe(dataset, labelIndices=labelIndexes, name=mdata$name)
}
#' Create the multi-label dataset from folds
#'
#' This is a simple way to use k-fold cross validation.
#'
#' @param kfold A \code{kFoldPartition} object obtained from use of the method
#' \link{create_kfold_partition}.
#' @param n The number of the fold to separated train and test subsets.
#' @param has.validation Logical value that indicate if a validation
#' dataset will be used. (Default: \code{FALSE})
#' @return A list contained train and test mldr dataset:
#' \describe{
#' \code{train}{The mldr dataset with train examples, that includes all
#' examples except those that are in test and validation samples}
#' \code{test}{The mldr dataset with test examples, defined by the
#' number of the fold}
#' \code{validation}{Optionally, only if \code{has.validation = TRUE}.
#' The mldr dataset with validation examples}
#' }
#' @export
#'
#' @examples
#' folds <- create_kfold_partition(toyml, 10)
#'
#' # Using the first partition
#' dataset <- partition_fold(folds, 1)
#' names(dataset)
#' ## [1] "train" "test"
#'
#' # All iterations
#' for (i in 1:10) {
#' dataset <- partition_fold(folds, i)
#' #dataset$train
#' #dataset$test
#' }
#'
#' # Using 3 folds validation
#' dataset <- partition_fold(folds, 3, TRUE)
#' # dataset$train, dataset$test, #dataset$validation
partition_fold <- function(kfold, n, has.validation = FALSE) {
if (!is(kfold,"kFoldPartition")) {
stop("Second argument must be an 'kFoldPartition' object")
}
if (n < 1 || n > kfold$k) {
stop(cat("The 'n' value must be between 1 and", kfold$k))
}
folds <- kfold$fold[-n]
if (has.validation) {
# CHANGED: validation split probably shouldn't always be the first fold
# let's use the n+1th fold instead and wrap around
v <- n %% kfold$k + 1
folds <- kfold$fold[-c(n, v)]
}
mdata <- kfold$dataset
ldata <- list(
train = create_subset(mdata, unlist(folds), mdata$attributesIndexes),
test = create_subset(mdata, kfold$fold[[n]], mdata$attributesIndexes)
)
if (has.validation) {
ldata$validation <- create_subset(mdata, kfold$fold[[v]],
mdata$attributesIndexes)
}
ldata
}
# Internal methods -------------------------------------------------------------
# Return the name of split method and validate if it is valid
#
# @param method The method name
# @return The correct name of split method
utiml_validate_splitmethod <- function (method) {
DEFAULT.METHODS <- c("random", "iterative", "stratified")
method.name <- ifelse(method %in% DEFAULT.METHODS,
paste("utiml", method, "split", sep = "_"),
method)
if (!exists(method.name, mode = "function")) {
stop(paste("The partition method '", method.name,
"' is not a valid function", sep=''))
}
method.name
}
# Internal Iterative Stratification
#
# Create the indexes using the Iterative Stratification algorithm.
#
# @param mdata A mldr dataset.
# @param r Desired proportion of examples in each subset r1, . . . rk.
# @return A list with k disjoint indexes subsets S1, . . .Sk.
# @references Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011). On the
# stratification of multi-label data. In Proceedings of the Machine
# Learningand Knowledge Discovery in Databases - European Conference,
# ECML PKDD (pp. 145-158).
#
# @examples
# \donttest{
# # Create 3 partitions for train, validation and test
# indexes <- utiml_iterative_split(emotions, c(0.6,0.1,0.3))
#
# # Create a stratified 10-fold
# indexes <- utiml_iterative_split(emotions, rep(0.1,10))
# }
utiml_iterative_split <- function(mdata, r) {
D <- rownames(mdata$dataset)
S <- lapply(seq(length(r)), function(i) character())
# Calculate the desired number of examples at each subset
cj <- round(mdata$measures$num.instances * r)
dif <- mdata$measures$num.instances - sum(cj)
if (dif != 0) {
cj[seq(abs(dif))] <- cj[seq(abs(dif))] + utiml_ifelse(dif > 0, 1, -1)
}
# Calculate the desired number of examples of each label at each subset
cji <- trunc(sapply(mdata$labels$count, function(di) di * r))
colnames(cji) <- rownames(mdata$labels)
# Empty examples (without any labels)
empty.inst <- rowSums(mdata$dataset[, mdata$labels$index])
empty.inst <- as.character(which(empty.inst == 0))
if (length(empty.inst) > 0) {
D <- setdiff(D, empty.inst)
prop.empty <- ceiling(length(empty.inst) / length(r))
indexes <- rep(seq(length(r)), prop.empty)[seq(length(empty.inst))]
Dist <- split(empty.inst, indexes)
for (i in 1:length(Dist)) {
S[[i]] <- Dist[[i]]
cj[i] <- cj[i] - length(S[[i]])
}
}
#Confirm that all instances has at least one label
D <- D[rowSums(mdata$dataset[D, mdata$labels$index]) > 0]
while (length(D) > 0) {
# Find the label with the fewest (but at least one) remaining examples,
# Do not use apply because sometimes its returns is a matrix
Dl <- lapply(mdata$labels$index, function(col) {
D[which(mdata$dataset[D, col] == 1)]
})
names(Dl) <- rownames(mdata$labels)
Di <- unlist(lapply(Dl, length))
l <- names(which.min(Di[Di > 0]))
for (ex in Dl[[l]]) {
# Find the subset(s) with the largest number of desired examples for ,
# this label, breaking ties by considering the largest number of desired
# examples
m <- which(cji[which.max(cji[, l]), l] == cji[, l])
if (length(m) > 1) {
m <- intersect(m, which(cj[m[which.max(cj[m])]] == cj))
if (length(m) > 1) m <- sample(m)[1]
}
S[[m]] <- c(S[[m]], ex)
D <- D[D != ex]
# Update desired number of examples
i <- which(mdata$dataset[ex, mdata$labels$index] == 1)
cji[m, i] <- cji[m, i] - 1
cj[m] <- cj[m] - 1
}
}
S <- lapply(S, function(fold) {
new.fold <- which(rownames(mdata$dataset) %in% fold)
names(new.fold) <- rownames(mdata$dataset[new.fold, ])
new.fold
})
S
}
# Random split of a dataset
#
# @param mdata A mldr dataset.
# @param r Desired proportion of examples in each subset r1, . . . rk.
# @return A list with k disjoint indexes subsets S1, . . .Sk.
#
# @examples
# \donttest{
# utiml_random_split(emotions, c(0.6, 0.2, 0.2))
# }
utiml_random_split <- function(mdata, r) {
index <- c()
amount <- round(mdata$measures$num.instances * r)
dif <- mdata$measures$num.instances - sum(amount)
for (i in seq(abs(dif))) {
amount[i] <- amount[i] + sign(dif)
}
for (i in seq(length(amount))) {
index <- c(index, rep(i, amount[i]))
}
S <- split(sample(seq(mdata$measures$num.instances)), index)
for (i in 1:length(S)) {
names(S[[i]]) <- rownames(mdata$dataset[S[[i]], ])
}
S
}
# Labelsets Stratification
# Create the indexes using the Labelsets Stratification approach.
#
# @param mdata A mldr dataset
# @param r Desired proportion of examples in each subset, r1, . . . rk
# @return A list with k disjoint indexes subsets S1, . . .Sk
# @references Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011). On the
# stratification of multi-label data. In Proceedings of the Machine
# Learningand Knowledge Discovery in Databases - European Conference,
# ECML PKDD (pp. 145-158).
#
# @examples
# \donttest{
# # Create 3 partitions for train, validation and test
# indexes <- utiml_stratified_split(emotions, c(0.6,0.1,0.3))
#
# # Create a stratified 10-fold
# indexes <- utiml_stratified_split(emotions, rep(0.1,10))
# }
utiml_stratified_split <- function(mdata, r) {
D <- sample(mdata$measures$num.instances)
S <- lapply(1:length(r), function(i) integer())
labelsets <- apply(mdata$dataset[, mdata$labels$index], 1, paste, collapse="")
# Calculate the desired number of examples of each labelset at each subset
cji.aux <- sapply(mdata$labelsets, function(di) di * r)
cji <- trunc(cji.aux)
dif <- cji.aux - cji
rest <- round(apply(dif, 1, sum))
for (ls in rev(names(mdata$labelsets))) {
s <- sum(dif[, ls])
if (s > 0) {
for (i in seq(s)) {
fold <- which.max(rest)
rest[fold] <- rest[fold] - 1
cji[fold, ls] <- cji[fold, ls] + 1
}
}
}
for (ex in D) {
ls <- labelsets[ex]
fold <- which.max(cji[, ls])
if (cji[fold, ls] > 0) {
S[[fold]] <- c(S[[fold]], ex)
cji[fold, ls] <- cji[fold, ls] - 1
}
}
for (i in seq(length(S))) {
names(S[[i]]) <- rownames(mdata$dataset[S[[i]], ])
}
S
}
#' Print a kFoldPartition object
#'
#' @param x The kFoldPartition object
#' @param ... ignored
#'
#' @return No return value, called for print folds' detail
#'
#' @export
print.kFoldPartition <- function (x, ...) {
cat("K Fold Partition", paste("(k = ",x$k,")", sep=''), "\n\n")
folds <- rbind(lapply(x$fold, length))
rownames(folds) <- c("Examples:")
colnames(folds) <- paste("Fold", seq(x$k), sep='_')
print(folds)
cat("\n")
for (i in seq(x$k)) {
cat(paste("Fold ", i, ":", sep=''), x$fold[[i]], "\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/sampling.R
|
# FIXED ------------------------------------------------------------------------
#' Apply a fixed threshold in the results
#'
#' Transform a prediction matrix with scores/probabilities in a mlresult
#' applying a fixed threshold. A global fixed threshold can be used of all
#' labels or different fixed thresholds, one for each label.
#'
#' @family threshold
#' @param prediction A matrix with scores/probabilities where the columns
#' are the labels and the rows are the instances.
#' @param threshold A single value between 0 and 1 or a list with threshold
#' values contained one value per label.
#' @param probability A logical value. If \code{TRUE} the predicted values are
#' the score between 0 and 1, otherwise the values are bipartition 0 or 1.
#' (Default: \code{FALSE})
#' @return A mlresult object.
#' @references
#' Al-Otaibi, R., Flach, P., & Kull, M. (2014). Multi-label Classification: A
#' Comparative Study on Threshold Selection Methods. In First International
#' Workshop on Learning over Multiple Contexts (LMCE) at ECML-PKDD 2014.
#' @export
#'
#' @examples
#' # Create a prediction matrix with scores
#' result <- matrix(
#' data = rnorm(9, 0.5, 0.2),
#' ncol = 3,
#' dimnames = list(NULL, c('lbl1', 'lb2', 'lb3'))
#' )
#'
#' # Use 0.5 as threshold
#' fixed_threshold(result)
#'
#' # Use an threshold for each label
#' fixed_threshold(result, c(0.4, 0.6, 0.7))
fixed_threshold <- function (prediction, threshold = 0.5, probability = FALSE) {
UseMethod("fixed_threshold")
}
#' @describeIn fixed_threshold Fixed Threshold for matrix or data.frame
#' @export
fixed_threshold.default <- function(prediction, threshold = 0.5,
probability = FALSE) {
if (length(threshold) == 1) {
threshold <- rep(threshold, ncol(prediction))
}
else if (length(threshold) != ncol(prediction)) {
stop(paste("The threshold values must be a single value or the same",
"number of labels"))
}
bipartition <- apply(t(prediction) >= threshold, 1, as.numeric)
dimnames(bipartition) <- dimnames(prediction)
multilabel_prediction(bipartition, prediction, probability)
}
#' @describeIn fixed_threshold Fixed Threshold for mlresult
#' @export
fixed_threshold.mlresult <- function (prediction, threshold = 0.5,
probability = FALSE) {
fixed_threshold.default(as.probability(prediction), threshold, probability)
}
# CARDINALITY -----------------------------------------------------------------
#' Threshold based on cardinality
#'
#' Find and apply the best threshold based on cardinality of training set.
#' The threshold is choice based on how much the average observed label
#' cardinality is close to the average predicted label cardinality.
#'
#' @family threshold
#' @param prediction A matrix or mlresult.
#' @param cardinality A real value of training dataset label cardinality, used
#' to define the threshold value.
#' @param probability A logical value. If \code{TRUE} the predicted values are
#' the score between 0 and 1, otherwise the values are bipartition 0 or 1.
#' (Default: \code{FALSE})
#' @return A mlresult object.
#' @references
#' Read, J., Pfahringer, B., Holmes, G., & Frank, E. (2011). Classifier chains
#' for multi-label classification. Machine Learning, 85(3), 333-359.
#' @export
#'
#' @examples
#' prediction <- matrix(runif(16), ncol = 4)
#' lcard_threshold(prediction, 2.1)
lcard_threshold <- function (prediction, cardinality, probability = FALSE) {
UseMethod("lcard_threshold")
}
#' @describeIn lcard_threshold Cardinality Threshold for matrix or data.frame
#' @export
lcard_threshold.default <- function (prediction, cardinality,
probability = FALSE) {
thresholds <- sort(unique(c(prediction)))
best <- which.min(abs(cardinality - sapply(thresholds, function (ts) {
mean(rowSums(prediction >= ts))
})))
fixed_threshold.default(prediction, thresholds[best], probability)
}
#' @describeIn lcard_threshold Cardinality Threshold for mlresult
#' @export
lcard_threshold.mlresult <- function (prediction, cardinality,
probability = FALSE) {
lcard_threshold.default(as.probability(prediction), cardinality, probability)
}
# MCUT -------------------------------------------------------------------------
#' Maximum Cut Thresholding (MCut)
#'
#' The Maximum Cut (MCut) automatically determines a threshold for each instance
#' that selects a subset of labels with higher scores than others. This leads to
#' the selection of the middle of the interval defined by these two scores as
#' the threshold.
#'
#' @family threshold
#' @param prediction A matrix or mlresult.
#' @param probability A logical value. If \code{TRUE} the predicted values are
#' the score between 0 and 1, otherwise the values are bipartition 0 or 1.
#' (Default: \code{FALSE})
#' @return A mlresult object.
#' @references
#' Largeron, C., Moulin, C., & Gery, M. (2012). MCut: A Thresholding Strategy
#' for Multi-label Classification. In 11th International Symposium, IDA 2012
#' (pp. 172-183).
#' @export
#'
#' @examples
#' prediction <- matrix(runif(16), ncol = 4)
#' mcut_threshold(prediction)
mcut_threshold <- function (prediction, probability = FALSE) {
UseMethod("mcut_threshold")
}
#' @describeIn mcut_threshold Maximum Cut Thresholding (MCut) method for matrix
#' @export
mcut_threshold.default <- function (prediction, probability = FALSE) {
result <- apply(prediction, 1, function (row) {
sorted.row <- sort(row, decreasing = TRUE)
difs <- unlist(lapply(seq(length(row)-1), function (i) {
sorted.row[i] - sorted.row[i+1]
}))
t <- which.max(difs)
mcut <- (sorted.row[t] + sorted.row[t+1]) / 2
row <- ifelse(row > mcut, 1, 0)
row
})
multilabel_prediction(t(result), prediction, probability)
}
#' @describeIn mcut_threshold Maximum Cut Thresholding (MCut) for mlresult
#' @export
mcut_threshold.mlresult <- function (prediction, probability = FALSE) {
mcut_threshold.default(as.probability(prediction), probability)
}
# PCUT -------------------------------------------------------------------------
#' Proportional Thresholding (PCut)
#'
#' Define the proportion of examples for each label will be positive.
#' The Proportion Cut (PCut) method can be a label-wise or global method that
#' calibrates the threshold(s) from the training data globally or per label.
#'
#' @family threshold
#' @param prediction A matrix or mlresult.
#' @param ratio A single value between 0 and 1 or a list with ratio values
#' contained one value per label.
#' @param probability A logical value. If \code{TRUE} the predicted values are
#' the score between 0 and 1, otherwise the values are bipartition 0 or 1.
#' (Default: \code{FALSE})
#' @return A mlresult object.
#' @references
#' Al-Otaibi, R., Flach, P., & Kull, M. (2014). Multi-label Classification: A
#' Comparative Study on Threshold Selection Methods. In First International
#' Workshop on Learning over Multiple Contexts (LMCE) at ECML-PKDD 2014.
#'
#' Largeron, C., Moulin, C., & Gery, M. (2012). MCut: A Thresholding Strategy
#' for Multi-label Classification. In 11th International Symposium, IDA 2012
#' (pp. 172-183).
#' @export
#'
#' @examples
#' prediction <- matrix(runif(16), ncol = 4)
#' pcut_threshold(prediction, .45)
pcut_threshold <- function (prediction, ratio, probability = FALSE) {
UseMethod("pcut_threshold")
}
#' @describeIn pcut_threshold Proportional Thresholding (PCut) method for matrix
#' @export
pcut_threshold.default <- function (prediction, ratio, probability = FALSE) {
n <- nrow(prediction)
num.elem <- ceiling(ratio * n)
if (length(num.elem) == 1) {
num.elem <- rep(num.elem, ncol(prediction))
names(num.elem) <- colnames(prediction)
}
else if (length(num.elem) != ncol(prediction)) {
stop(paste("The number of elements values must be a single value or the",
"same number of labels"))
}
else if (is.null(names(num.elem))) {
names(num.elem) <- colnames(prediction)
}
indexes <- utiml_rename(seq(ncol(prediction)), colnames(prediction))
result <- do.call(cbind, lapply(indexes, function (ncol) {
values <- c(rep(1, num.elem[ncol]), rep(0, n - num.elem[ncol]))
prediction[order(prediction[, ncol], decreasing=TRUE), ncol] <- values
prediction[, ncol]
}))
multilabel_prediction(result, prediction, probability)
}
#' @describeIn pcut_threshold Proportional Thresholding (PCut) for mlresult
#' @export
pcut_threshold.mlresult <- function (prediction, ratio, probability = FALSE) {
pcut_threshold.default(as.probability(prediction), ratio, probability)
}
# RCUT -------------------------------------------------------------------------
#' Rank Cut (RCut) threshold method
#'
#' The Rank Cut (RCut) method is an instance-wise strategy, which outputs the k
#' labels with the highest scores for each instance at the deployment.
#'
#' @family threshold
#' @param prediction A matrix or mlresult.
#' @param k The number of elements that will be positive.
#' @param probability A logical value. If \code{TRUE} the predicted values are
#' the score between 0 and 1, otherwise the values are bipartition 0 or 1.
#' (Default: \code{FALSE})
#' @return A mlresult object.
#' @references
#' Al-Otaibi, R., Flach, P., & Kull, M. (2014). Multi-label Classification: A
#' Comparative Study on Threshold Selection Methods. In First International
#' Workshop on Learning over Multiple Contexts (LMCE) at ECML-PKDD 2014.
#' @export
#'
#' @examples
#' prediction <- matrix(runif(16), ncol = 4)
#' rcut_threshold(prediction, 2)
rcut_threshold <- function (prediction, k, probability = FALSE) {
UseMethod("rcut_threshold")
}
#' @describeIn rcut_threshold Rank Cut (RCut) threshold method for matrix
#' @export
rcut_threshold.default <- function (prediction, k, probability = FALSE) {
values <- c(rep(1, k), rep(0, ncol(prediction) - k))
result <- apply(prediction, 1, function (row) {
row[order(row, decreasing = TRUE)] <- values
row
})
multilabel_prediction(t(result), prediction, probability)
}
#' @describeIn rcut_threshold Rank Cut (RCut) threshold method for mlresult
#' @export
rcut_threshold.mlresult <- function (prediction, k, probability = FALSE) {
rcut_threshold.default(as.probability(prediction), k, probability)
}
# SCORE DRIVEN -----------------------------------------------------------------
score_driven_threshold <- function () {
#TODO
}
# #' Cost-based loss function for multi-label classification
# #'
# #' @param mdata A mldr dataset containing the test data.
# #' @param mlresult An object of mlresult that contain the scores and bipartition
# #' values.
# #' @param cost The cost of classification each positive label. If a single value
# #' is informed then the all labels have tha same cost.
# #' @references
# #' Al-Otaibi, R., Flach, P., & Kull, M. (2014). Multi-label Classification: A
# #' Comparative Study on Threshold Selection Methods. In First International
# #' Workshop on Learning over Multiple Contexts (LMCE) at ECML-PKDD 2014.
# multilabel_loss_function <- function (mdata, mlresult, cost = 0.5) {
# if (length(cost) == 1) {
# cost <- rep(cost, mdata$measures$num.labels)
# names(cost) <- rownames(mdata$labels)
# }
# else if (is.null(names(cost))) {
# names(cost) <- rownames(mdata$label)
# }
#
# prediction <- as.bipartition(mlresult)
# labels <- utiml_rename(rownames(mdata$labels))
# partial.results <- lapply(labels, function (lname) {
# FN <- sum(mdata$dataset[,lname] == 1 & prediction [,lname] == 0) /
# mdata$measures$num.instances
# FP <- sum(mdata$dataset[,lname] == 0 & prediction [,lname] == 1) /
# mdata$measures$num.instances
# freq <- mdata$labels[lname, "freq"]
# 2 * ((cost[lname] * freq * FN) + ((1 - cost[lname]) * (1 - freq) * FP))
# })
#
# mean(unlist(partial.results))
# }
# SCUT -------------------------------------------------------------------------
#' SCut Score-based method
#'
#' This is a label-wise method that adjusts the threshold for each label to
#' achieve a specific loss function using a validation set or cross validation.
#'
#' Different from the others threshold methods instead of return the bipartition
#' results, it returns the threshold values for each label.
#'
#' @family threshold
#' @param prediction A matrix or mlresult.
#' @param expected The expected labels for the prediction. May be a matrix with
#' the label values or a mldr object.
#' @param loss.function A loss function to be optimized. If you want to use your
#' own error function see the notes and example. (Default: Mean Squared Error)
#' @param cores The number of cores to parallelize the computation Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @return A numeric vector with the threshold values for each label
#' @note The loss function is a R method that receive two vectors, the expected
#' values of the label and the predicted values, respectively. Positive values
#' are represented by the 1 and the negative by the 0.
#' @references
#' Fan, R.-E., & Lin, C.-J. (2007). A study on threshold selection for
#' multi-label classification. Department of Computer Science, National
#' Taiwan University.
#'
#' Al-Otaibi, R., Flach, P., & Kull, M. (2014). Multi-label Classification: A
#' Comparative Study on Threshold Selection Methods. In First International
#' Workshop on Learning over Multiple Contexts (LMCE) at ECML-PKDD 2014.
#' @export
#'
#' @examples
#' names <- list(1:10, c("a", "b", "c"))
#' prediction <- matrix(runif(30), ncol = 3, dimnames = names)
#' classes <- matrix(sample(0:1, 30, rep = TRUE), ncol = 3, dimnames = names)
#' thresholds <- scut_threshold(prediction, classes)
#' fixed_threshold(prediction, thresholds)
#'
#' \donttest{
#' # Penalizes only FP predictions
#' mylossfunc <- function (real, predicted) {
#' mean(predicted - real * predicted)
#' }
#' prediction <- predict(br(toyml, "RANDOM"), toyml)
#' scut_threshold(prediction, toyml, loss.function = mylossfunc, cores = 2)
#' }
scut_threshold <- function (prediction, expected, loss.function = NA,
cores = getOption("utiml.cores", 1)) {
UseMethod("scut_threshold")
}
#' @describeIn scut_threshold Default scut_threshold
#' @export
scut_threshold.default <- function (prediction, expected, loss.function = NA,
cores = getOption("utiml.cores", 1)) {
if (cores < 1) {
stop("Cores must be a positive value")
}
if (!is.function(loss.function)) {
# Mean Squared Error
loss.function <- function(real, predicted) {
mean((real - predicted) ^ 2)
}
}
if (is(expected, "mldr")) {
expected <- expected$dataset[expected$labels$index]
}
labels <- utiml_rename(colnames(prediction))
thresholds <- utiml_lapply(labels, function (col) {
scores <- prediction[, col]
index <- order(scores)
ones <- which(expected[index, col] == 1)
difs <- c(Inf)
for (i in seq(length(ones)-1)) {
difs <- c(difs, ones[i+1] - ones[i])
}
evaluated.thresholds <- c()
result <- c()
for (i in ones[which(difs > 1)]) {
thr <- scores[index[i]]
res <- loss.function(expected[, col], ifelse(scores < thr, 0, 1))
evaluated.thresholds <- c(evaluated.thresholds, thr)
result <- c(result, res)
}
ifelse(length(ones) > 0,
as.numeric(evaluated.thresholds[which.min(result)]),
max(scores) + 0.0001) # All expected values are in the negative class
}, cores)
unlist(thresholds)
}
#' @describeIn scut_threshold Mlresult scut_threshold
#' @export
scut_threshold.mlresult <- function (prediction, expected, loss.function = NA,
cores = getOption("utiml.cores", 1)) {
scut_threshold.default(as.probability(prediction), expected,
loss.function, cores)
}
# SUBSET CORRECTION ------------------------------------------------------------
#' Subset Correction of a predicted result
#'
#' This method restrict a multi-label learner to predict only label combinations
#' whose existence is present in the (training) data. To this all labelsets
#' that are predicted but are not found on training data is replaced by the most
#' similar labelset.
#'
#' If the most similar is not unique, those label combinations with higher
#' frequency in the training data are preferred. The Hamming loss distance is
#' used to determine the difference between the labelsets.
#'
#' @family threshold
#' @param mlresult An object of mlresult that contain the scores and bipartition
#' values.
#' @param train_y A matrix/data.frame with all labels values of the training
#' dataset or a mldr train dataset.
#' @param probability A logical value. If \code{TRUE} the predicted values are
#' the score between 0 and 1, otherwise the values are bipartition 0 or 1.
#' (Default: \code{FALSE})
#' @return A new mlresult where all results are present in the training
#' labelsets.
#' @note The original paper describes a method to create only bipartitions
#' result, but we adapted the method to change the scores. Based on the
#' base.threshold value the scores higher than the threshold value, but must be
#' lower are changed to respect this restriction. If \code{NULL} this
#' correction will be ignored.
#' @references
#' Senge, R., Coz, J. J. del, & Hullermeier, E. (2013). Rectifying classifier
#' chains for multi-label classification. In Workshop of Lernen, Wissen &
#' Adaptivitat (LWA 2013) (pp. 162-169). Bamberg, Germany.
#' @export
#'
#' @examples
#' prediction <- predict(br(toyml, "RANDOM"), toyml)
#' subset_correction(prediction, toyml)
subset_correction <- function(mlresult, train_y, probability = FALSE) {
bip <- as.bipartition(mlresult)
prob <- as.probability(mlresult)
if (is(train_y, "mldr")) {
train_y <- train_y$dataset[train_y$labels$index]
}
if (ncol(mlresult) != ncol(train_y)) {
stop("The number of columns in the predicted result are different from the
training data")
}
# Bipartition correction
labelsets <- as.matrix(unique(train_y))
rownames(labelsets) <- apply(labelsets, 1, paste, collapse = "")
order <- names(sort(table(apply(train_y, 1, paste, collapse = "")),
decreasing = TRUE))
labelsets <- labelsets[order, ]
#TODO confirm the use of apply
new.pred <- t(apply(bip, 1, function(y) {
labelsets[names(which.min(apply(labelsets, 1, function(row) {
sum(row != y)
}))), ]
}))
multilabel_prediction(new.pred, prob, probability)
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/threshold.R
|
utiml_binary_prediction <- function(bipartition, probability) {
res <- list(bipartition = bipartition, probability = probability)
class(res) <- "binary.prediction"
res
}
utiml_create_binary_data <- function (mdata, label.name, extra.columns = NULL) {
if (is.null(extra.columns)) {
cbind(mdata$dataset[mdata$attributesIndexes], mdata$dataset[label.name])
}
else {
cbind(mdata$dataset[mdata$attributesIndexes],
extra.columns,
mdata$dataset[label.name])
}
}
utiml_create_pairwise_data <- function (mdata, label1, label2) {
mdata$dataset[xor(mdata$dataset[label1], mdata$dataset[label2]),
c(mdata$attributesIndexes,mdata$labels[label1, "index"])]
}
utiml_create_lp_data <- function (mdata) {
cbind(mdata$dataset[mdata$attributesIndexes],
classlp=factor(apply(mdata$dataset[mdata$labels$index], 1, paste,
collapse="")))
}
utiml_create_model <- function(utiml.object, ...) {
labelinfo <- table(utiml.object$data[utiml.object$labelname])
#if ((any(labelinfo < 2) & length(labelinfo) == 2) | length(labelinfo) < 2) {
if (any(labelinfo < 1) | length(labelinfo) < 2) {
#There are no sufficient examples to train (create a empty model)
model <- list()
class(model) <- "emptyModel"
} else {
# Call dynamic multilabel model with merged parameters
model <- do.call(mltrain, c(list(object = utiml.object), list(...)))
}
attr(model, "dataset") <- utiml.object$mldataset
attr(model, "label") <- utiml.object$labelname
model
}
utiml_predict <- function (predictions, probability) {
bipartitions <- do.call(cbind, lapply(predictions, function(lblres) {
lblres$bipartition
}))
probabilities <- do.call(cbind, lapply(predictions, function(lblres) {
lblres$probability
}))
multilabel_prediction(bipartitions, probabilities, probability)
}
utiml_predict_binary_model <- function(model, newdata, ...) {
result <- do.call(mlpredict, c(list(model = model, newdata = newdata),
list(...)))
if (any(rownames(result) != rownames(newdata))) {
where <- paste(attr(model, "dataset"), "/", attr(model, "label"))
warning(cat("The order of the predicted instances from", where,
"are wrong!\n", sep=' '))
}
#Because the factores is necessary first convert to character
bipartition <- as.numeric(as.character(result$prediction))
probability <- result$probability
zeros <- bipartition == 0
probability[zeros] <- 1 - probability[zeros]
names(bipartition) <- names(probability) <- rownames(result)
utiml_binary_prediction(bipartition, probability)
}
utiml_predict_multiclass_model <- function (model, newdata, labels, probability,
...) {
result <- do.call(mlpredict, c(list(model = model, newdata = newdata),
list(...)))
classes <- do.call(rbind, lapply(
strsplit(as.character(result$prediction),""), as.numeric)
)
dimnames(classes) <- list(rownames(newdata), labels)
probs <- apply(classes, 2, function (col) {
ifelse(col == 1, result$probability, 0)
})
multilabel_prediction(classes, probs, probability)
}
utiml_prepare_data <- function(dataset, classname, mldataset, mlmethod,
base.algorithm, ...) {
label <- colnames(dataset)[ncol(dataset)]
# Convert the class column as factor
dataset[, label] <- as.factor(dataset[, label])
# Create object
object <- list(
data = dataset,
labelname = label,
labelindex = ncol(dataset),
mldataset = mldataset,
mlmethod = mlmethod,
base.algorithm = base.algorithm
)
extra <- list(...)
for (nextra in names(extra)) {
object[[nextra]] <- extra[[nextra]]
}
basename <- paste("base", base.algorithm, sep = "")
class(object) <- c(classname, basename, "mltransformation")
object
}
#' Summary method for mltransformation
#' @param object A transformed dataset
#' @param ... additional arguments affecting the summary produced.
#'
#' @return No return value, called for print model's detail
#'
#' @export
summary.mltransformation <- function(object, ...) {
summary(object$data, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/transformation.R
|
#' utiml: Utilities for Multi-Label Learning
#'
#' The utiml package is a framework for the application of classification
#' algorithms to multi-label data. Like the well known MULAN used with Weka, it
#' provides a set of multi-label procedures such as sampling methods,
#' transformation strategies, threshold functions, pre-processing techniques and
#' evaluation metrics. The package was designed to allow users to easily
#' perform complete multi-label classification experiments in the R environment.
#'
#' Currently, the main methods supported are:
#' \enumerate{
#' \item{
#' \strong{Classification methods}:
#' \code{\link[=baseline]{ML Baselines}},
#' \code{\link[=br]{Binary Relevance (BR)}},
#' \code{\link[=brplus]{BR+}},
#' \code{\link[=cc]{Classifier Chains}},
#' \code{\link[=clr]{Calibrated Label Ranking (CLR)}},
#' \code{\link[=dbr]{Dependent Binary Relevance (DBR)}},
#' \code{\link[=ebr]{Ensemble of Binary Relevance (EBR)}},
#' \code{\link[=ecc]{Ensemble of Classifier Chains (ECC)}},
#' \code{\link[=eps]{Ensemble of Pruned Set (EPS)}},
#' \code{\link[=homer]{Hierarchy Of Multilabel classifiER (HOMER)}},
#' \code{\link[=lift]{Label specIfic FeaTures (LIFT)}},
#' \code{\link[=lp]{Label Powerset (LP)}},
#' \code{\link[=mbr]{Meta-Binary Relevance (MBR or 2BR)}},
#' \code{\link[=mlknn]{Multi-label KNN (ML-KNN)}},
#' \code{\link[=ns]{Nested Stacking (NS)}},
#' \code{\link[=ppt]{Pruned Problem Transformation (PPT)}},
#' \code{\link[=prudent]{Pruned and Confident Stacking Approach (Prudent)}},
#' \code{\link[=ps]{Pruned Set (PS)}},
#' \code{\link[=rakel]{Random k-labelsets (RAkEL)}},
#' \code{\link[=rdbr]{Recursive Dependent Binary Relevance (RDBR)}},
#' \code{\link[=rpc]{Ranking by Pairwise Comparison (RPC)}}
#' }
#' \item{
#' \strong{Evaluation methods}:
#' \code{\link[=cv]{Performing a cross-validation procedure}},
#' \code{\link[=multilabel_confusion_matrix]{Confusion Matrix}},
#' \code{\link[=multilabel_evaluate]{Evaluate}},
#' \code{\link[=multilabel_measures]{Supported measures}}
#' }
#' \item{
#' \strong{Pre-process utilities}:
#' \code{\link[=fill_sparse_mldata]{Fill sparse data}},
#' \code{\link[=normalize_mldata]{Normalize data}},
#' \code{\link[=remove_attributes]{Remove attributes}},
#' \code{\link[=remove_labels]{Remove labels}},
#' \code{\link[=remove_skewness_labels]{Remove skewness labels}},
#' \code{\link[=remove_unique_attributes]{Remove unique attributes}},
#' \code{\link[=remove_unlabeled_instances]{Remove unlabeled instances}},
#' \code{\link[=replace_nominal_attributes]{Replace nominal attributes}}
#' }
#' \item{
#' \strong{Sampling methods}:
#' \code{\link[=create_holdout_partition]{Create holdout partitions}},
#' \code{\link[=create_kfold_partition]{Create k-fold partitions}},
#' \code{\link[=create_random_subset]{Create random subset}},
#' \code{\link[=create_subset]{Create subset}},
#' \code{\link[=partition_fold]{Partition fold}}
#' }
#' \item{
#' \strong{Threshold methods}:
#' \code{\link[=fixed_threshold]{Fixed threshold}},
#' \code{\link[=lcard_threshold]{Cardinality threshold}},
#' \code{\link[=mcut_threshold]{MCUT}},
#' \code{\link[=pcut_threshold]{PCUT}},
#' \code{\link[=rcut_threshold]{RCUT}},
#' \code{\link[=scut_threshold]{SCUT}},
#' \code{\link[=subset_correction]{Subset correction}}
#' }
#' }
#'
#' However, there are other utilities methods not previously cited as
#' \code{\link{as.bipartition}}, \code{\link{as.mlresult}},
#' \code{\link{as.ranking}}, \code{\link{multilabel_prediction}}, etc. More
#' details and examples are available on
#' \href{https://github.com/rivolli/utiml}{utiml repository}.
#'
#' @section Notes:
#' We use the \code{\link{mldr}} package, to manipulate multi-label data.
#' See its documentation to more information about handle multi-label dataset.
#'
#' @section Cite as:
#' \preformatted{
#' @article\{RJ-2018-041,
#' author = \{Adriano Rivolli and Andre C. P. L. F. de Carvalho\},
#' title = \{\{The utiml Package: Multi-label Classification in R\}\},
#' year = \{2018\},
#' journal = \{\{The R Journal\}\},
#' doi = \{10.32614/RJ-2018-041\},
#' url = \{https://doi.org/10.32614/RJ-2018-041\},
#' pages = \{24--37\},
#' volume = \{10\},
#' number = \{2\}
#' \}}
#'
#' @author
#' \itemize{
#' \item Adriano Rivolli <rivolli@@utfpr.edu.br>
#' }
#' This package is a result of my PhD at Institute of Mathematics and Computer
#' Sciences (ICMC) at the University of Sao Paulo, Brazil.
#'
#' PhD advisor: Andre C. P. L. F. de Carvalho
#'
#' @import mldr
#' @import parallel
#' @import ROCR
#' @importFrom methods is
#' @docType package
#' @name utiml
NULL
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/utiml.R
|
.onLoad <- function(libname, pkgname) {
op <- options()
op.utiml <- list(
utiml.base.algorithm = "SVM",
utiml.cores = 1,
utiml.seed = NA,
utiml.use.probs = TRUE,
utiml.empty.prediction = FALSE,
utiml.random = sample(1:10) #Random value
)
toset <- !(names(op.utiml) %in% names(op))
if (any(toset)) options(op.utiml[toset])
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/utiml/R/zzz.R
|
## -----------------------------------------------------------------------------
library("utiml")
## -----------------------------------------------------------------------------
head(toyml)
## -----------------------------------------------------------------------------
foodtruck$labels
## -----------------------------------------------------------------------------
mytoy <- normalize_mldata(toyml)
## -----------------------------------------------------------------------------
ds <- create_holdout_partition(mytoy, c(train=0.65, test=0.35), "iterative")
names(ds)
## -----------------------------------------------------------------------------
brmodel <- br(ds$train, "RF", seed=123)
prediction <- predict(brmodel, ds$test)
## -----------------------------------------------------------------------------
head(as.bipartition(prediction))
head(as.probability(prediction))
head(as.ranking(prediction))
## -----------------------------------------------------------------------------
newpred <- rcut_threshold(prediction, 2)
head(newpred)
## -----------------------------------------------------------------------------
result <- multilabel_evaluate(ds$tes, prediction, "bipartition")
thresres <- multilabel_evaluate(ds$tes, newpred, "bipartition")
round(cbind(Default=result, RCUT=thresres), 3)
## -----------------------------------------------------------------------------
result <- multilabel_evaluate(ds$tes, prediction, "bipartition", labels=TRUE)
result$labels
## -----------------------------------------------------------------------------
results <- cv(foodtruck, br, base.algorith="SVM", cv.folds=5,
cv.sampling="stratified", cv.measures="example-based",
cv.seed=123)
round(results, 4)
## -----------------------------------------------------------------------------
results <- cv(toyml, "rakel", base.algorith="RF", cv.folds=10, cv.results=TRUE,
cv.sampling="random", cv.measures="example-based")
#Multi-label results
round(results$multilabel, 4)
#Labels results
round(sapply(results$labels, colMeans), 4)
## ---- echo=FALSE, results='asis'----------------------------------------------
bl <- data.frame(
Use = c("CART", "C5.0", "KNN", "MAJORITY", "NB", "RANDOM", "RF", "SVM", "XGB"),
Name = c("Classification and regression trees", "C5.0 Decision Trees and Rule-Based Models", "K Nearest Neighbor", "Majority class prediction", "Naive Bayes", "Random prediction", "Random Forest", "Support Vector Machine", "eXtreme Gradient Boosting"),
Package = c("rpart", "C50", "kknn", "-", "e1071", "-", "randomForest", "e1071", "xgboost"),
Call = c("rpart::rpart(...)", "C50::C5.0(...)", "kknn::kknn(...)", "-", "e1071::naiveBayes(...)", "-", "randomForest::randomForest(...)", "e1071::svm(...)", "xgboost::xgboost(...)")
)
knitr::kable(bl)
## ---- echo=FALSE, results='asis'----------------------------------------------
approaches <- c(
"br"="one-against-all", "brplus"="one-against-all; stacking", "cc"="one-against-all; chaining", "clr"="one-versus-one", "dbr"="one-against-all; stacking", "ebr"="one-against-all; ensemble", "ecc"="one-against-all; ensemble", "eps"="powerset", "homer"="hierarchy", "lift"="one-against-all", "lp"="powerset", "mbr"="one-against-all; stacking", "ns"="one-against-all; chaining", "ppt"="powerset", "prudent"="one-against-all; stacking", "ps"="powerset", "rakel"="powerset", "rdbr"="one-against-all; stacking", "rpc"="one-versus-one"
)
mts <- data.frame(
Method = c("br", "brplus", "cc", "clr", "dbr", "ebr", "ecc", "eps", "homer", "lift", "lp", "mbr", "ns", "ppt", "prudent", "ps", "rakel", "rdbr", "rpc"),
Name = c("Binary Relevance (BR)", "BR+", "Classifier Chains", "Calibrated Label Ranking (CLR)", "Dependent Binary Relevance (DBR)", "Ensemble of Binary Relevance (EBR)", "Ensemble of Classifier Chains (ECC)", "Ensemble of Pruned Set (EPS)", "Hierarchy Of Multi-label classifiER (HOMER)", "Learning with Label specIfic FeaTures (LIFT)", "Label Powerset (LP)", "Meta-Binary Relevance (MBR or 2BR)", "Nested Stacking (NS)", "Pruned Problem Transformation (PPT)", "Pruned and Confident Stacking Approach (Prudent)", "Pruned Set (PS)", "Random k-labelsets (RAkEL)", "Recursive Dependent Binary Relevance (RDBR)", "Ranking by Pairwise Comparison (RPC)"),
Approach = as.character(approaches)
)
knitr::kable(mts)
## -----------------------------------------------------------------------------
toy <- create_holdout_partition(toyml)
brmodel <- br(toy$train, "SVM")
prediction <- predict(brmodel, toy$test)
# Using the test dataset and the prediction
result <- multilabel_evaluate(toy$test, prediction)
print(round(result, 3))
# Build a confusion matrix
confmat <- multilabel_confusion_matrix(toy$test, prediction)
result <- multilabel_evaluate(confmat)
print(confmat)
## -----------------------------------------------------------------------------
# Example-based measures
result <- multilabel_evaluate(confmat, "example-based")
print(names(result))
# Subset accuracy, F1 measure and hamming-loss
result <- multilabel_evaluate(confmat, c("subset-accuracy", "F1", "hamming-loss"))
print(names(result))
# Ranking and label-basedd measures
result <- multilabel_evaluate(confmat, c("label-based", "ranking"))
print(names(result))
# To see all the supported measures you can try
multilabel_measures()
## ---- echo=FALSE, results='asis'----------------------------------------------
## 8. How to extend utiml
### 8.1 Create a new Multi-label Method
### 8.2 Create a new base Learner
|
/scratch/gouwar.j/cran-all/cranData/utiml/inst/doc/utiml-overview.R
|
---
title: "utiml: Utilities for multi-label learning"
author: "Adriano Rivolli"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{utiml: Utilities for Multi-label Learning}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
**Version:** 0.1.5
The utiml package is a framework to support multi-label processing, like Mulan on Weka.
It is simple to use and extend. This tutorial explain the main topics related with the utiml package.
More details and examples are available on [utiml repository](https://github.com/rivolli/utiml).
## 1. Introduction
The general prupose of **utiml** is be an alternative to processing multi-label in R.
The main methods available on this package are organized in the groups:
- Classification methods
- Evaluation methods
- Pre-process utilities
- Sampling methods
- Threshold methods
The **utiml** package needs of the [mldr](https://CRAN.R-project.org/package=mldr)
package to handle multi-label datasets. It will be installed together with the
**utiml**^[You may also be interested in [mldr.datasets](https://CRAN.R-project.org/package=mldr.datasets)].
The installation process is similar to other packages available on CRAN:
```r
install.packages("utiml")
```
After installed, you can now load the **utiml** package (The mldr package will be also loaded):
```{r}
library("utiml")
```
The **utiml** brings two multi-label datasets. A synthetic toy dataset called `toyml` and a real world dataset called `foodtruck`. To understand how to load your own dataset, we suggest the read of [mldr](https://CRAN.R-project.org/package=mldr) documentation. The `toyml` contains 100 instances, 10 features and 5 labels, its prupose is to be used for small tests and examples.
```{r}
head(toyml)
```
The `foodtruck` contains different types of cousines to be predicted from user
preferences and habits. The dataset has 12 labels:
```{r}
foodtruck$labels
```
In the following section, an overview of how to conduct a multi-label experiment are explained. Next, we explores each group of methods and its particularity.
## 2. Overview
After load the multi-label dataset some data processing may be necessary. The pre-processing methods are utilities that manipulate the `mldr` datasets.
Suppose that we want to normalize the attributes values (between 0 and 1),
we can do:
```{r}
mytoy <- normalize_mldata(toyml)
```
Next, we want to stratification the dataset in two partitions (train and test),
containing 65% and 35% of instances respectively, then we can do:
```{r}
ds <- create_holdout_partition(mytoy, c(train=0.65, test=0.35), "iterative")
names(ds)
```
Now, the `ds` object has two elements `ds$train` and `ds$test`, where the first will be used to create a model and the second to test the model. For example, using the *Binary Relevance* multi-label method with the base algorithm *Random Forest*^[Requires the
[randomForest](https://CRAN.R-project.org/package=randomForest) package.], we can do:
```{r}
brmodel <- br(ds$train, "RF", seed=123)
prediction <- predict(brmodel, ds$test)
```
The `prediction` is an object of class `mlresult` that contains the probability (also called confidence or score) and the bipartitions values:
```{r}
head(as.bipartition(prediction))
head(as.probability(prediction))
head(as.ranking(prediction))
```
A threshold strategy can be applied:
```{r}
newpred <- rcut_threshold(prediction, 2)
head(newpred)
```
Now we can evaluate the models and compare if the use of the MCUT threshold improved the results:
```{r}
result <- multilabel_evaluate(ds$tes, prediction, "bipartition")
thresres <- multilabel_evaluate(ds$tes, newpred, "bipartition")
round(cbind(Default=result, RCUT=thresres), 3)
```
Details of the labels evaluation can be obtained using:
```{r}
result <- multilabel_evaluate(ds$tes, prediction, "bipartition", labels=TRUE)
result$labels
```
## 3. Pre-processing
The pre-processing methods were developed to facilitate some operations with the multi-label data. Each pre-processing method receives a mldr dataset and returns other mldr dataset. You can use them as needed.
Here, an overview of the pre-processing methods:
```r
# Fill sparse data
mdata <- fill_sparse_mldata(toyml)
# Remove unique attributes
mdata <- remove_unique_attributes(toyml)
# Remove the attributes "iatt8", "iatt9" and "ratt10"
mdata <- remove_attributes(toyml, c("iatt8", "iatt9", "ratt10"))
# Remove labels with less than 10 positive or negative examples
mdata <- remove_skewness_labels(toyml, 10)
# Remove the labels "y2" and "y3"
mdata <- remove_labels(toyml, c("y2", "y3"))
# Remove the examples without any labels
mdata <- remove_unlabeled_instances(toyml)
# Replace nominal attributes
mdata <- replace_nominal_attributes(toyml)
# Normalize the predictive attributes between 0 and 1
mdata <- normalize_mldata(mdata)
```
## 4. Sampling
### 4.1 Subsets
If you want to create a specific or a random subset of a dataset, you can use
the methods `create_subset` and `create_random_subset`, respectively. In the first case, you should specify which rows and optionally attributes, you want.
In the second case, you just define the number of instances and optionally the number of attributes.
```r
# Create a subset of toyml dataset with the even instances and the first five attributes
mdata <- create_subset(toyml, seq(1, 100, 2), 1:5)
# Create a subset of toyml dataset with the ten first instances and all attributes
mdata <- create_subset(toyml, 1:10)
# Create a random subset of toyml dataset with 30 instances and 6 attributes
mdata <- create_random_subset(toyml, 30, 6)
# Create a random subset of toyml dataset with 7 instances and all attributes
mdata <- create_random_subset(toyml, 7)
```
### 4.2 Holdout
To create two or more partitions of the dataset, we use the method `create_holdout_partition`. The first argument is a mldr dataset, the second is the size of partitions and the third is the partition method. The options are: `random`, `iterative` and `stratified`. The `iterative` is a stratification by label and the `stratified` is a stratification by labelset. The return of the
method is a list with the names defined by the second parameter.
See some examples:
```r
# Create two equal partitions using the 'iterative' method
toy <- create_holdout_partition(toyml, c(train=0.5, test=0.5), "iterative")
## toy$train and toy$test is a mldr object
# Create three partitions using the 'random' method
toy <- create_holdout_partition(toyml, c(a=0.4, b=0.3, c=0.3))
## Use toy$a, toy$b and toy$c
# Create two partitions using the 'stratified' method
toy <- create_holdout_partition(toyml, c(0.6, 0.4), "stratified")
## Use toy[[1]] and toy[[2]]
```
### 4.3 k-Folds
The simplest way to run a k-fold cross validation is by using the method `cv`:
```{r}
results <- cv(foodtruck, br, base.algorith="SVM", cv.folds=5,
cv.sampling="stratified", cv.measures="example-based",
cv.seed=123)
round(results, 4)
```
To obtain detailed results of the folds, use the parameter `cv.results`, such that:
```{r}
results <- cv(toyml, "rakel", base.algorith="RF", cv.folds=10, cv.results=TRUE,
cv.sampling="random", cv.measures="example-based")
#Multi-label results
round(results$multilabel, 4)
#Labels results
round(sapply(results$labels, colMeans), 4)
```
Finally, to manually run a k-fold cross validation, you can use the `create_kfold_partition`. The return of this method is an object of type `kFoldPartition` that will be used with the method `partition_fold` to create
the datasets:
```r
# Create 3-fold object
kfcv <- create_kfold_partition(toyml, k=3, "iterative")
result <- lapply(1:3, function (k) {
toy <- partition_fold(kfcv, k)
model <- br(toy$train, "RF")
predict(model, toy$test)
})
# Create 5-fold object and use a validation set
kfcv <- create_kfold_partition(toyml, 5, "stratified")
result <- lapply(1:5, function (k) {
toy <- partition_fold(kfcv, k, has.validation=TRUE)
model <- br(toy$train, "RF")
list(
validation = predict(model, toy$validation),
test = predict(model, toy$test)
)
})
```
## 5. Classification Methods
The multi-label classification is a supervised learning task that seeks to learn and predict one or more labels together. This task can be grouped in: problem transformation and algorithm adaptation. Next, we provide more details about the methods and their specifities.
### 5.1 Transformation methods and Base Algorihtms
The transformation methods require a base algorithm (binary or multi-class) and use their predictions to compose the multi-label result. In the **utiml**
package there are some default base algorithms that are accepted.
Each base algorithm requires a specific package, you need to install manually it,
because they are not installed together with **utiml**. The follow algorithm learners are supported:
```{r, echo=FALSE, results='asis'}
bl <- data.frame(
Use = c("CART", "C5.0", "KNN", "MAJORITY", "NB", "RANDOM", "RF", "SVM", "XGB"),
Name = c("Classification and regression trees", "C5.0 Decision Trees and Rule-Based Models", "K Nearest Neighbor", "Majority class prediction", "Naive Bayes", "Random prediction", "Random Forest", "Support Vector Machine", "eXtreme Gradient Boosting"),
Package = c("rpart", "C50", "kknn", "-", "e1071", "-", "randomForest", "e1071", "xgboost"),
Call = c("rpart::rpart(...)", "C50::C5.0(...)", "kknn::kknn(...)", "-", "e1071::naiveBayes(...)", "-", "randomForest::randomForest(...)", "e1071::svm(...)", "xgboost::xgboost(...)")
)
knitr::kable(bl)
```
To realize a classification first it is necessary to create a multi-label model, the available methods are:
```{r, echo=FALSE, results='asis'}
approaches <- c(
"br"="one-against-all", "brplus"="one-against-all; stacking", "cc"="one-against-all; chaining", "clr"="one-versus-one", "dbr"="one-against-all; stacking", "ebr"="one-against-all; ensemble", "ecc"="one-against-all; ensemble", "eps"="powerset", "homer"="hierarchy", "lift"="one-against-all", "lp"="powerset", "mbr"="one-against-all; stacking", "ns"="one-against-all; chaining", "ppt"="powerset", "prudent"="one-against-all; stacking", "ps"="powerset", "rakel"="powerset", "rdbr"="one-against-all; stacking", "rpc"="one-versus-one"
)
mts <- data.frame(
Method = c("br", "brplus", "cc", "clr", "dbr", "ebr", "ecc", "eps", "homer", "lift", "lp", "mbr", "ns", "ppt", "prudent", "ps", "rakel", "rdbr", "rpc"),
Name = c("Binary Relevance (BR)", "BR+", "Classifier Chains", "Calibrated Label Ranking (CLR)", "Dependent Binary Relevance (DBR)", "Ensemble of Binary Relevance (EBR)", "Ensemble of Classifier Chains (ECC)", "Ensemble of Pruned Set (EPS)", "Hierarchy Of Multi-label classifiER (HOMER)", "Learning with Label specIfic FeaTures (LIFT)", "Label Powerset (LP)", "Meta-Binary Relevance (MBR or 2BR)", "Nested Stacking (NS)", "Pruned Problem Transformation (PPT)", "Pruned and Confident Stacking Approach (Prudent)", "Pruned Set (PS)", "Random k-labelsets (RAkEL)", "Recursive Dependent Binary Relevance (RDBR)", "Ranking by Pairwise Comparison (RPC)"),
Approach = as.character(approaches)
)
knitr::kable(mts)
```
The first and second parameters of each multi-label method is always the same:
The multi-label dataset and the base algorithm, respectively. However, they may have specific
parameters, examples:
```r
#Classifier chain with a specific chain
ccmodel <- cc(toyml, "RF", chain = c("y5", "y4", "y3", "y2", "y1"))
# Ensemble with 5 models using 60% of sampling and 75% of attributes
ebrmodel <- ebr(toyml, "C5.0", m = 5, subsample=0.6, attr = 0.75)
```
Beyond the parameters of each multi-label methods, you can define the parameters for the base algorithm, like this:
```r
# Specific parameters for SVM
brmodel <- br(toyml, "SVM", gamma = 0.1, scale=FALSE)
# Specific parameters for KNN
ccmodel <- cc(toyml, "KNN", c("y5", "y4", "y3", "y2", "y1"), k=5)
# Specific parameters for Random Forest
ebrmodel <- ebr(toyml, "RF", 5, 0.6, 0.75, proximity=TRUE, ntree=100)
```
After build the model, To predict new data use the `predict` method. Here, some predict methods require specific arguments and you can assign arguments for the base method too. For default, all base learner will predict the probability of prediciton, then do not use these parameters. Instead of, use the `probability` parameter defined by the multi-label prediction method.
```r
# Predict the BR model
result <- predict(brmodel, toyml)
# Specific parameters for KNN
result <- predict(ccmodel, toyml, kernel="triangular", probability = FALSE)
```
An object of type `mlresult` is the return of predict method. It always contains
the bipartitions and the probabilities values. So you can use: `as.bipartition`,
`as.probability` and `as.ranking` for specific values.
### 5.2 Algorithm adapatation
Until now, only a single adaptation method is available the `mlknn`.
```r
model <- mlknn(toyml, k=3)
pred <- predict(model, toyml)
```
### 5.3 Seed and Multicores
Almost all multi-label methods can run in parallel. The train and prediction methods receive a parameter called `cores` that specify the number of cores used to run the method. For some multi-label methods are not possible running in multi-core, then read the documentation of each method, for more details.
```r
# Running Binary Relevance method using 2 cores
brmodel <- br(toyml, "SVM", cores=2)
prediction <- predict(brmodel, toyml, cores=2)
```
If you need of reproducibility, you can set a specific seed:
```r
# Running Binary Relevance method using 2 cores
brmodel <- br(toyml, "SVM", cores=2, seed=1984)
prediction <- predict(brmodel, toyml, seed=1984, cores=2)
```
The `cv` method also supports multicores:
```r
results <- cv(toyml, method="ecc", base.algorith="RF", subsample = 0.9, attr.space = 0.9, cv.folds=5, cv.cores=2)
```
## 6. Thresholds
The threshold methods receive a `mlresult` object and return a new `mlresult`, except for `scut` that returns the threshold values. These methods, change mainly the bipartitions values using the probabilities values.
```r
# Use a fixed threshold for all labels
newpred <- fixed_threshold(prediction, 0.4)
# Use a specific threshold for each label
newpred <- fixed_threshold(prediction, c(0.4, 0.5, 0.6, 0.7, 0.8))
# Use the MCut approch to define the threshold
newpred <- mcut_threshold(prediction)
# Use the PCut threshold
newpred <- pcut_threshold(prediction, ratio=0.65)
# Use the RCut threshold
newpred <- rcut_threshold(prediction, k=3)
# Choose the best threshold values based on a Mean Squared Error
thresholds <- scut_threshold(prediction, toyml, cores = 2)
newpred <- fixed_threshold(prediction, thresholds)
#Predict only the labelsets present in the train data
newpred <- subset_correction(prediction, toyml)
```
## 7. Evaluation
To evaluate multi-label models you can use the method `multilabel_evaluate`. There are two ways of call this method:
```{r}
toy <- create_holdout_partition(toyml)
brmodel <- br(toy$train, "SVM")
prediction <- predict(brmodel, toy$test)
# Using the test dataset and the prediction
result <- multilabel_evaluate(toy$test, prediction)
print(round(result, 3))
# Build a confusion matrix
confmat <- multilabel_confusion_matrix(toy$test, prediction)
result <- multilabel_evaluate(confmat)
print(confmat)
```
The confusion matrix summarizes a lot of data, and can be merged. For example,
using a k-fold experiment:
```r
kfcv <- create_kfold_partition(toyml, k=3)
confmats <- lapply(1:3, function (k) {
toy <- partition_fold(kfcv, k)
model <- br(toy$train, "RF")
multilabel_confusion_matrix(toy$test, predict(model, toy$test))
})
result <- multilabel_evaluate(merge_mlconfmat(confmats))
```
Its possible choose which measures will be computed:
```{r}
# Example-based measures
result <- multilabel_evaluate(confmat, "example-based")
print(names(result))
# Subset accuracy, F1 measure and hamming-loss
result <- multilabel_evaluate(confmat, c("subset-accuracy", "F1", "hamming-loss"))
print(names(result))
# Ranking and label-basedd measures
result <- multilabel_evaluate(confmat, c("label-based", "ranking"))
print(names(result))
# To see all the supported measures you can try
multilabel_measures()
```
```{r, echo=FALSE, results='asis'}
## 8. How to extend utiml
### 8.1 Create a new Multi-label Method
### 8.2 Create a new base Learner
```
## 8. How to Contribute
The **utiml** repository is available on (https://github.com/rivolli/utiml).
If you want to contribute with the development of this package, contact us
and you will be very welcome.
Please, report any bugs or suggestions on CRAN mail or git hub page.
|
/scratch/gouwar.j/cran-all/cranData/utiml/inst/doc/utiml-overview.Rmd
|
---
title: "utiml: Utilities for multi-label learning"
author: "Adriano Rivolli"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{utiml: Utilities for Multi-label Learning}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
**Version:** 0.1.5
The utiml package is a framework to support multi-label processing, like Mulan on Weka.
It is simple to use and extend. This tutorial explain the main topics related with the utiml package.
More details and examples are available on [utiml repository](https://github.com/rivolli/utiml).
## 1. Introduction
The general prupose of **utiml** is be an alternative to processing multi-label in R.
The main methods available on this package are organized in the groups:
- Classification methods
- Evaluation methods
- Pre-process utilities
- Sampling methods
- Threshold methods
The **utiml** package needs of the [mldr](https://CRAN.R-project.org/package=mldr)
package to handle multi-label datasets. It will be installed together with the
**utiml**^[You may also be interested in [mldr.datasets](https://CRAN.R-project.org/package=mldr.datasets)].
The installation process is similar to other packages available on CRAN:
```r
install.packages("utiml")
```
After installed, you can now load the **utiml** package (The mldr package will be also loaded):
```{r}
library("utiml")
```
The **utiml** brings two multi-label datasets. A synthetic toy dataset called `toyml` and a real world dataset called `foodtruck`. To understand how to load your own dataset, we suggest the read of [mldr](https://CRAN.R-project.org/package=mldr) documentation. The `toyml` contains 100 instances, 10 features and 5 labels, its prupose is to be used for small tests and examples.
```{r}
head(toyml)
```
The `foodtruck` contains different types of cousines to be predicted from user
preferences and habits. The dataset has 12 labels:
```{r}
foodtruck$labels
```
In the following section, an overview of how to conduct a multi-label experiment are explained. Next, we explores each group of methods and its particularity.
## 2. Overview
After load the multi-label dataset some data processing may be necessary. The pre-processing methods are utilities that manipulate the `mldr` datasets.
Suppose that we want to normalize the attributes values (between 0 and 1),
we can do:
```{r}
mytoy <- normalize_mldata(toyml)
```
Next, we want to stratification the dataset in two partitions (train and test),
containing 65% and 35% of instances respectively, then we can do:
```{r}
ds <- create_holdout_partition(mytoy, c(train=0.65, test=0.35), "iterative")
names(ds)
```
Now, the `ds` object has two elements `ds$train` and `ds$test`, where the first will be used to create a model and the second to test the model. For example, using the *Binary Relevance* multi-label method with the base algorithm *Random Forest*^[Requires the
[randomForest](https://CRAN.R-project.org/package=randomForest) package.], we can do:
```{r}
brmodel <- br(ds$train, "RF", seed=123)
prediction <- predict(brmodel, ds$test)
```
The `prediction` is an object of class `mlresult` that contains the probability (also called confidence or score) and the bipartitions values:
```{r}
head(as.bipartition(prediction))
head(as.probability(prediction))
head(as.ranking(prediction))
```
A threshold strategy can be applied:
```{r}
newpred <- rcut_threshold(prediction, 2)
head(newpred)
```
Now we can evaluate the models and compare if the use of the MCUT threshold improved the results:
```{r}
result <- multilabel_evaluate(ds$tes, prediction, "bipartition")
thresres <- multilabel_evaluate(ds$tes, newpred, "bipartition")
round(cbind(Default=result, RCUT=thresres), 3)
```
Details of the labels evaluation can be obtained using:
```{r}
result <- multilabel_evaluate(ds$tes, prediction, "bipartition", labels=TRUE)
result$labels
```
## 3. Pre-processing
The pre-processing methods were developed to facilitate some operations with the multi-label data. Each pre-processing method receives a mldr dataset and returns other mldr dataset. You can use them as needed.
Here, an overview of the pre-processing methods:
```r
# Fill sparse data
mdata <- fill_sparse_mldata(toyml)
# Remove unique attributes
mdata <- remove_unique_attributes(toyml)
# Remove the attributes "iatt8", "iatt9" and "ratt10"
mdata <- remove_attributes(toyml, c("iatt8", "iatt9", "ratt10"))
# Remove labels with less than 10 positive or negative examples
mdata <- remove_skewness_labels(toyml, 10)
# Remove the labels "y2" and "y3"
mdata <- remove_labels(toyml, c("y2", "y3"))
# Remove the examples without any labels
mdata <- remove_unlabeled_instances(toyml)
# Replace nominal attributes
mdata <- replace_nominal_attributes(toyml)
# Normalize the predictive attributes between 0 and 1
mdata <- normalize_mldata(mdata)
```
## 4. Sampling
### 4.1 Subsets
If you want to create a specific or a random subset of a dataset, you can use
the methods `create_subset` and `create_random_subset`, respectively. In the first case, you should specify which rows and optionally attributes, you want.
In the second case, you just define the number of instances and optionally the number of attributes.
```r
# Create a subset of toyml dataset with the even instances and the first five attributes
mdata <- create_subset(toyml, seq(1, 100, 2), 1:5)
# Create a subset of toyml dataset with the ten first instances and all attributes
mdata <- create_subset(toyml, 1:10)
# Create a random subset of toyml dataset with 30 instances and 6 attributes
mdata <- create_random_subset(toyml, 30, 6)
# Create a random subset of toyml dataset with 7 instances and all attributes
mdata <- create_random_subset(toyml, 7)
```
### 4.2 Holdout
To create two or more partitions of the dataset, we use the method `create_holdout_partition`. The first argument is a mldr dataset, the second is the size of partitions and the third is the partition method. The options are: `random`, `iterative` and `stratified`. The `iterative` is a stratification by label and the `stratified` is a stratification by labelset. The return of the
method is a list with the names defined by the second parameter.
See some examples:
```r
# Create two equal partitions using the 'iterative' method
toy <- create_holdout_partition(toyml, c(train=0.5, test=0.5), "iterative")
## toy$train and toy$test is a mldr object
# Create three partitions using the 'random' method
toy <- create_holdout_partition(toyml, c(a=0.4, b=0.3, c=0.3))
## Use toy$a, toy$b and toy$c
# Create two partitions using the 'stratified' method
toy <- create_holdout_partition(toyml, c(0.6, 0.4), "stratified")
## Use toy[[1]] and toy[[2]]
```
### 4.3 k-Folds
The simplest way to run a k-fold cross validation is by using the method `cv`:
```{r}
results <- cv(foodtruck, br, base.algorith="SVM", cv.folds=5,
cv.sampling="stratified", cv.measures="example-based",
cv.seed=123)
round(results, 4)
```
To obtain detailed results of the folds, use the parameter `cv.results`, such that:
```{r}
results <- cv(toyml, "rakel", base.algorith="RF", cv.folds=10, cv.results=TRUE,
cv.sampling="random", cv.measures="example-based")
#Multi-label results
round(results$multilabel, 4)
#Labels results
round(sapply(results$labels, colMeans), 4)
```
Finally, to manually run a k-fold cross validation, you can use the `create_kfold_partition`. The return of this method is an object of type `kFoldPartition` that will be used with the method `partition_fold` to create
the datasets:
```r
# Create 3-fold object
kfcv <- create_kfold_partition(toyml, k=3, "iterative")
result <- lapply(1:3, function (k) {
toy <- partition_fold(kfcv, k)
model <- br(toy$train, "RF")
predict(model, toy$test)
})
# Create 5-fold object and use a validation set
kfcv <- create_kfold_partition(toyml, 5, "stratified")
result <- lapply(1:5, function (k) {
toy <- partition_fold(kfcv, k, has.validation=TRUE)
model <- br(toy$train, "RF")
list(
validation = predict(model, toy$validation),
test = predict(model, toy$test)
)
})
```
## 5. Classification Methods
The multi-label classification is a supervised learning task that seeks to learn and predict one or more labels together. This task can be grouped in: problem transformation and algorithm adaptation. Next, we provide more details about the methods and their specifities.
### 5.1 Transformation methods and Base Algorihtms
The transformation methods require a base algorithm (binary or multi-class) and use their predictions to compose the multi-label result. In the **utiml**
package there are some default base algorithms that are accepted.
Each base algorithm requires a specific package, you need to install manually it,
because they are not installed together with **utiml**. The follow algorithm learners are supported:
```{r, echo=FALSE, results='asis'}
bl <- data.frame(
Use = c("CART", "C5.0", "KNN", "MAJORITY", "NB", "RANDOM", "RF", "SVM", "XGB"),
Name = c("Classification and regression trees", "C5.0 Decision Trees and Rule-Based Models", "K Nearest Neighbor", "Majority class prediction", "Naive Bayes", "Random prediction", "Random Forest", "Support Vector Machine", "eXtreme Gradient Boosting"),
Package = c("rpart", "C50", "kknn", "-", "e1071", "-", "randomForest", "e1071", "xgboost"),
Call = c("rpart::rpart(...)", "C50::C5.0(...)", "kknn::kknn(...)", "-", "e1071::naiveBayes(...)", "-", "randomForest::randomForest(...)", "e1071::svm(...)", "xgboost::xgboost(...)")
)
knitr::kable(bl)
```
To realize a classification first it is necessary to create a multi-label model, the available methods are:
```{r, echo=FALSE, results='asis'}
approaches <- c(
"br"="one-against-all", "brplus"="one-against-all; stacking", "cc"="one-against-all; chaining", "clr"="one-versus-one", "dbr"="one-against-all; stacking", "ebr"="one-against-all; ensemble", "ecc"="one-against-all; ensemble", "eps"="powerset", "homer"="hierarchy", "lift"="one-against-all", "lp"="powerset", "mbr"="one-against-all; stacking", "ns"="one-against-all; chaining", "ppt"="powerset", "prudent"="one-against-all; stacking", "ps"="powerset", "rakel"="powerset", "rdbr"="one-against-all; stacking", "rpc"="one-versus-one"
)
mts <- data.frame(
Method = c("br", "brplus", "cc", "clr", "dbr", "ebr", "ecc", "eps", "homer", "lift", "lp", "mbr", "ns", "ppt", "prudent", "ps", "rakel", "rdbr", "rpc"),
Name = c("Binary Relevance (BR)", "BR+", "Classifier Chains", "Calibrated Label Ranking (CLR)", "Dependent Binary Relevance (DBR)", "Ensemble of Binary Relevance (EBR)", "Ensemble of Classifier Chains (ECC)", "Ensemble of Pruned Set (EPS)", "Hierarchy Of Multi-label classifiER (HOMER)", "Learning with Label specIfic FeaTures (LIFT)", "Label Powerset (LP)", "Meta-Binary Relevance (MBR or 2BR)", "Nested Stacking (NS)", "Pruned Problem Transformation (PPT)", "Pruned and Confident Stacking Approach (Prudent)", "Pruned Set (PS)", "Random k-labelsets (RAkEL)", "Recursive Dependent Binary Relevance (RDBR)", "Ranking by Pairwise Comparison (RPC)"),
Approach = as.character(approaches)
)
knitr::kable(mts)
```
The first and second parameters of each multi-label method is always the same:
The multi-label dataset and the base algorithm, respectively. However, they may have specific
parameters, examples:
```r
#Classifier chain with a specific chain
ccmodel <- cc(toyml, "RF", chain = c("y5", "y4", "y3", "y2", "y1"))
# Ensemble with 5 models using 60% of sampling and 75% of attributes
ebrmodel <- ebr(toyml, "C5.0", m = 5, subsample=0.6, attr = 0.75)
```
Beyond the parameters of each multi-label methods, you can define the parameters for the base algorithm, like this:
```r
# Specific parameters for SVM
brmodel <- br(toyml, "SVM", gamma = 0.1, scale=FALSE)
# Specific parameters for KNN
ccmodel <- cc(toyml, "KNN", c("y5", "y4", "y3", "y2", "y1"), k=5)
# Specific parameters for Random Forest
ebrmodel <- ebr(toyml, "RF", 5, 0.6, 0.75, proximity=TRUE, ntree=100)
```
After build the model, To predict new data use the `predict` method. Here, some predict methods require specific arguments and you can assign arguments for the base method too. For default, all base learner will predict the probability of prediciton, then do not use these parameters. Instead of, use the `probability` parameter defined by the multi-label prediction method.
```r
# Predict the BR model
result <- predict(brmodel, toyml)
# Specific parameters for KNN
result <- predict(ccmodel, toyml, kernel="triangular", probability = FALSE)
```
An object of type `mlresult` is the return of predict method. It always contains
the bipartitions and the probabilities values. So you can use: `as.bipartition`,
`as.probability` and `as.ranking` for specific values.
### 5.2 Algorithm adapatation
Until now, only a single adaptation method is available the `mlknn`.
```r
model <- mlknn(toyml, k=3)
pred <- predict(model, toyml)
```
### 5.3 Seed and Multicores
Almost all multi-label methods can run in parallel. The train and prediction methods receive a parameter called `cores` that specify the number of cores used to run the method. For some multi-label methods are not possible running in multi-core, then read the documentation of each method, for more details.
```r
# Running Binary Relevance method using 2 cores
brmodel <- br(toyml, "SVM", cores=2)
prediction <- predict(brmodel, toyml, cores=2)
```
If you need of reproducibility, you can set a specific seed:
```r
# Running Binary Relevance method using 2 cores
brmodel <- br(toyml, "SVM", cores=2, seed=1984)
prediction <- predict(brmodel, toyml, seed=1984, cores=2)
```
The `cv` method also supports multicores:
```r
results <- cv(toyml, method="ecc", base.algorith="RF", subsample = 0.9, attr.space = 0.9, cv.folds=5, cv.cores=2)
```
## 6. Thresholds
The threshold methods receive a `mlresult` object and return a new `mlresult`, except for `scut` that returns the threshold values. These methods, change mainly the bipartitions values using the probabilities values.
```r
# Use a fixed threshold for all labels
newpred <- fixed_threshold(prediction, 0.4)
# Use a specific threshold for each label
newpred <- fixed_threshold(prediction, c(0.4, 0.5, 0.6, 0.7, 0.8))
# Use the MCut approch to define the threshold
newpred <- mcut_threshold(prediction)
# Use the PCut threshold
newpred <- pcut_threshold(prediction, ratio=0.65)
# Use the RCut threshold
newpred <- rcut_threshold(prediction, k=3)
# Choose the best threshold values based on a Mean Squared Error
thresholds <- scut_threshold(prediction, toyml, cores = 2)
newpred <- fixed_threshold(prediction, thresholds)
#Predict only the labelsets present in the train data
newpred <- subset_correction(prediction, toyml)
```
## 7. Evaluation
To evaluate multi-label models you can use the method `multilabel_evaluate`. There are two ways of call this method:
```{r}
toy <- create_holdout_partition(toyml)
brmodel <- br(toy$train, "SVM")
prediction <- predict(brmodel, toy$test)
# Using the test dataset and the prediction
result <- multilabel_evaluate(toy$test, prediction)
print(round(result, 3))
# Build a confusion matrix
confmat <- multilabel_confusion_matrix(toy$test, prediction)
result <- multilabel_evaluate(confmat)
print(confmat)
```
The confusion matrix summarizes a lot of data, and can be merged. For example,
using a k-fold experiment:
```r
kfcv <- create_kfold_partition(toyml, k=3)
confmats <- lapply(1:3, function (k) {
toy <- partition_fold(kfcv, k)
model <- br(toy$train, "RF")
multilabel_confusion_matrix(toy$test, predict(model, toy$test))
})
result <- multilabel_evaluate(merge_mlconfmat(confmats))
```
Its possible choose which measures will be computed:
```{r}
# Example-based measures
result <- multilabel_evaluate(confmat, "example-based")
print(names(result))
# Subset accuracy, F1 measure and hamming-loss
result <- multilabel_evaluate(confmat, c("subset-accuracy", "F1", "hamming-loss"))
print(names(result))
# Ranking and label-basedd measures
result <- multilabel_evaluate(confmat, c("label-based", "ranking"))
print(names(result))
# To see all the supported measures you can try
multilabel_measures()
```
```{r, echo=FALSE, results='asis'}
## 8. How to extend utiml
### 8.1 Create a new Multi-label Method
### 8.2 Create a new base Learner
```
## 8. How to Contribute
The **utiml** repository is available on (https://github.com/rivolli/utiml).
If you want to contribute with the development of this package, contact us
and you will be very welcome.
Please, report any bugs or suggestions on CRAN mail or git hub page.
|
/scratch/gouwar.j/cran-all/cranData/utiml/vignettes/utiml-overview.Rmd
|
UUIDgenerate <- function(use.time = NA, n = 1L, output = c("string", "raw", "uuid"))
.Call(UUID_gen, n, switch(match.arg(output), string = 0L, raw = 1L, uuid = 2L),
if (isTRUE(use.time)) 1L else if (isTRUE(!use.time)) 4L else NA_integer_, NULL)
UUIDfromName <- function(namespace, name, type = c("sha1", "md5"),
output = c("string", "raw", "uuid")) {
ns <- as.UUID(namespace)
if (length(ns) != 1 || any(is.na(ns)))
stop("namespace must be a single, valid UUID")
.Call(UUID_gen, name,
switch(match.arg(output),
string = 0L, raw = 1L, uuid = 2L),
switch(match.arg(type), sha1 = 5L, md5 = 3L), ns)
}
UUIDparse <- function(what, output = c("uuid", "string", "raw", "logical"))
.Call(UUID_parse, what,
switch(match.arg(output),
string = 0L, raw = 1L, uuid = 2L, logical = 3L))
UUIDvalidate <- function(what) UUIDparse(what, output="logical")
UUID2string <- function(what) .Call(UUID_unparse, what, 0L)
as.character.UUID <- function(x, ...) .Call(UUID_unparse, x, 0L)
as.UUID <- function(x, ...) UseMethod("as.UUID")
as.UUID.UUID <- function(x, ...) x
as.UUID.raw <- function(x, ...) .Call(UUID_unparse, x, 1L)
as.UUID.default <- function(x, ...) UUIDparse(as.character(x), output="uuid")
as.raw.UUID <- function(x) .Call(UUID_unparse, x, 2L)
rep.UUID <- function(x, ...) {
x <- rep(unclass(x), ...)
class(x) <- "UUID"
x
}
c.UUID <- function(...) {
l <- lapply(list(...), function(x) if (inherits(x, "UUID")) unclass(x) else x)
x <- do.call(base::c, l)
if (is.complex(x)) class(x) <- "UUID"
x
}
unique.UUID <- function (x, incomparables = FALSE, ...) {
x <- unique(unclass(x), incomparables=incomparables, ...)
class(x) <- "UUID"
x
}
print.UUID <- function(x, ...) {
if (length(x) == 1L)
cat("UUID: ", as.character.UUID(x), "\n", sep='')
else {
cat("UUID vector:\n")
print(as.character.UUID(x), ...)
}
invisible(x)
}
`[.UUID` <- function(x, i, ...) {
x <- unclass(x)[i, ...]
class(x) <- "UUID"
x
}
`[<-.UUID` <- function(x, i, ..., value) {
x <- unclass(x)
x[i, ...] <- as.UUID(value)
class(x) <- "UUID"
x
}
`[[.UUID` <- function(x, i, ...) {
x <- unclass(x)[[i, ...]]
class(x) <- "UUID"
x
}
`[[<-.UUID` <- function(x, i, ..., value) {
x <- unclass(x)
x[[i, ...]] <- as.UUID(value)
class(x) <- "UUID"
x
}
Ops.UUID <- function(e1, e2) stop(.Generic, " operator is not supported on UUIDs")
Math.UUID <- function(x, ...) stop(.Generic, " is not supported on UUIDs")
Complex.UUID <- function(z) stop(.Generic, " is not supported on UUIDs")
Summary.UUID <- function(..., na.rm = FALSE) stop(.Generic, " is not supported on UUIDs")
is.UUID <- function(x) inherits(x, "UUID")
`==.UUID` <- function(e1, e2) {
if (inherits(e1, "UUID") && inherits(e2, "UUID"))
.Call(UUID_cmp, e1, e2, 0L)
else
as.character(e1) == as.character(e2)
}
`!=.UUID` <- function(e1, e2) !`==.UUID`(e1, e2)
## We cannot use native is.na, because we only reserve (NA,NA), yet R will flag
## (x,NA) or (NA, x) in the complex case
is.na.UUID <- function(x) .Call(UUID_is_NA, x)
|
/scratch/gouwar.j/cran-all/cranData/uuid/R/uuid.R
|
#' Statistical Data Control. Data Research, Access, Governance Network.
#'
#' A tool for checking how much information is disclosed when
#' reporting summary statistics
#'
#'
#' @docType package
#'
#'
#'
#' @name SDCdragon
NULL
|
/scratch/gouwar.j/cran-all/cranData/uwedragon/R/SDC.R
|
#' Find individual sample values from the sample mean and standard deviation
#'
#' For integer based scales, finds possible solutions for each value within a sample.
#' This is revealed upon providing sample size, minimum possible value, maximum possible value,
#' mean, standard deviation (and optionally median).
#'
#'
#' @param n Sample size.
#'
#' @param min_poss Minimum possible value. If sample minimum is disclosed, this can be inserted here, otherwise use the theoretical minimum. If there is no theoretical maximum 'Inf' can be inserted.
#'
#' @param max_poss Maximum possible value. If sample maximum is disclosed, this can be inserted here, otherwise use the theoretical maximum. If there is no theoretical minimum '-Inf' can be inserted.
#'
#' @param usermean Sample mean.
#'
#' @param usersd Sample standard deviation, i.e. n-1 denominator.
#'
#' @param meandp (optional, default=NULL) Number of decimal places mean is reported to, only required if including trailing zeroes.
#'
#' @param sddp (optional, default=NULL) Number of decimal places standard deviation is reported to, only required if including trailing zeroes.
#'
#' @param usermed (optional, default=NULL) Sample median.
#'
#' @return Outputs possible combinations of original integer sample values.
#'
#' @export
#'
#'
#' @details
#'
#'
#' For use with data measured on a scale with 1 unit increments.
#' Samuelson's inequality [1] used to further restrict the minimum and maximum.
#' All possible combinations within this inequality are calculated [2] for
#' factorial(n+k-1)/(factorial(k)*factorial(n-1))<65,000,000.
#'
#' No restriction on number of decimal places input. Reporting less than
#' two decimal places will reduce the chances of unique solution to all
#' sample values being uncovered [3]
#'
#' Additional options to specify number of digits following the decimal place that are reported,
#' required for trailing zeroes.
#'
#' @examples
#'
#' # EXAMPLE 1
#' # Seven observations are taken from a five-point Likert scale (coded 1 to 5).
#' # The reported mean is 2.857 and the reported standard deviation is 1.574.
#'
#' solutions(7,1,5,2.857,1.574)
#'
#' # For this mean and standard deviation there are two possible distributions:
#' # 1 1 2 3 4 4 5
#' # 1 2 2 2 3 5 5
#'
#' # Optionally adding median value of 3.
#'
#' solutions(7,1,5,2.857,1.574, usermed=3)
#'
#' # uniquely reveals the raw sample values:
#' # 1 1 2 3 4 4 5
#'
#'
#' # EXAMPLE 2
#' # The mean is '4.00'.
#' # The standard deviation is '2.00'.
#' # Narrower set of solutions found specifying 2dp including trailing zeroes.
#'
#' solutions(3,-Inf,Inf,4.00,2.00,2,2)
#'
#' # uniquely reveals the raw sample values:
#' # 2 4 6
#'
#' @references
#'
#' [1] Samuelson, P.A, 1968, How deviant can you be? Journal of the American Statistical Association, Vol 63, 1522-1525.
#'
#' [2] Allenby, R.B. and Slomson, A., 2010. How to count: An introduction to combinatorics. Chapman and Hall/CRC.
#'
#' [3] Derrick, B., Green, L., Kember, K., Ritchie, F. & White P, 2022, Safety in numbers: Minimum thresholding, Maximum bounds, and Little White Lies.
#' Scottish Economic Society Annual Conference, University of Glasgow, 25th-27th April 2022
#'
solutions<- function(n, min_poss, max_poss, usermean, usersd, meandp=NULL, sddp=NULL, usermed=NULL) {
#ensure valid data entry
if (min_poss > max_poss)
stop("Check data input. Minimum cannot be greater than Maximum")
if ((is.null(n)) | (is.null(min_poss)) | (is.null(max_poss)) | (is.null(usermean)) | (is.null(usersd)))
stop("N, Minimum, Maximum, Mean and Standard Deviation all required")
#check Samuelson's inequality and adjust min / max if this reduces the range
min_poss_s<- floor(usermean-((sqrt(n-1))*usersd))
max_poss_s<- ceiling(((sqrt(n-1))*usersd) + usermean)
if(min_poss_s> min_poss){
min_poss_f<- min_poss_s
}
else{
min_poss_f<- min_poss
}
if(max_poss_s< max_poss){
max_poss_f<-max_poss_s
}
else{
max_poss_f<-max_poss
}
k<-length(min_poss_f:max_poss_f)
#stop user if combinations too large for R to store in memory
if (factorial(n+k-1) == Inf)
stop("Sample size or potential range of values too large to compute solutions")
if (factorial(n+k-1)/(factorial(k)*factorial(n-1))>65000000)
stop("Sample size or potential range of values too large to compute solutions")
#apply number of decimal places as determined by user input (where meandp and sddp not specified)
decimalplaces <- function(x) {
if (abs(x - round(x)) > .Machine$double.eps^0.5) {
nchar(strsplit(sub('0+$', '', as.character(x)), ".", fixed = TRUE)[[1]][[2]])
} else {
return(0)
}
}
#generate all combinations
samples<-gtools::combinations(k, n, min_poss_f:max_poss_f, repeats.allowed=TRUE)
samples<-data.frame(samples)
#calculate summary statistics for all combinations and compare to users stated summary statistics
if (is.null(usermed)){
all<-transform(samples, means=apply(samples, 1, mean))
all2<-transform(all, sd=apply(samples, 1, sd))
if (is.null(meandp) & is.null(sddp)) {
all2$combine<-paste0(round(all2$means,decimalplaces(usermean)),round(all2$sd,decimalplaces(usersd)))
user<-paste0(round(usermean,decimalplaces(usermean)),round(usersd,decimalplaces(usersd)))
}
else{
if (is.null(meandp) | is.null(sddp))
stop("If either specified, number of decimal places must be specified for both mean and standard deviation")
if (meandp > round(meandp) | meandp < round(meandp) | sddp > round(sddp) | sddp < round(sddp) )
stop("Data entry incorrect. Number of decimal places must be integer")
all2$combine<-paste0(round(all2$means,meandp),round(all2$sd,sddp))
user<-paste0(round(usermean,meandp),round(usersd,sddp))
}
if(nrow(all[which(all2$combine == user ),c(1:n)])==0){
warning("No solutions found: data input is incorrect or mean and standard deviation disguised")
}
return(all[which(all2$combine == user ),c(1:n)])
}
#if including median
else {
all<-transform(samples, means=apply(samples, 1, mean))
all2<-transform(all, sd=apply(samples, 1, sd))
all2<-transform(all2, med=apply(samples, 1, median))
if (is.null(meandp) & is.null(sddp)) {
all2$combine<-paste0(round(all2$means,decimalplaces(usermean)),round(all2$sd,decimalplaces(usersd)),round(all2$med,decimalplaces(usermed)))
user<-paste0(round(usermean,decimalplaces(usermean)),round(usersd,decimalplaces(usersd)),round(usermed,decimalplaces(usermed)))
}
else{
if (is.null(meandp) | is.null(sddp))
stop("If either specified, number of decimal places must be specified for both mean and standard deviation")
if (meandp > round(meandp) | meandp < round(meandp) | sddp > round(sddp) | sddp < round(sddp) )
stop("Data entry incorrect. Number of decimal places must be integer")
all2$combine<-paste0(round(all2$means,meandp),round(all2$sd,sddp),round(all2$med,decimalplaces(usermed)))
user<-paste0(round(usermean,meandp),round(usersd,sddp), round(usermed,decimalplaces(usermed)))
}
if(nrow(all[which(all2$combine == user ),c(1:n)])==0){
warning("No solutions found: data input is incorrect or mean and standard deviation disguised")
}
return(all[which(all2$combine == user ),c(1:n)])
}
}
|
/scratch/gouwar.j/cran-all/cranData/uwedragon/R/meansd0.R
|
#' Disguise the sample mean and sample deviation
#'
#' Disguises the sample mean and standard deviation via a choice of methods.
#'
#'
#' @param usersample A vector of all individual sample values.
#'
#' @param method Approach for disguising mean and standard deviation. (default = 1)
#'
#' @return Outputs disguised mean and disguised standard deviation.
#'
#' @export
#'
#'
#' @details
#'
#' *Method 1*
#'
#' Randomly split the sample into two (approx. equal size) samples A, and B.
#' For sample A calculate and report mean. For sample B calculate and
#' standard deviation.
#'
#'
#' *Method 2* (default)
#'
#' Take a sample of size N with replacement; calculate and report mean.
#' Repeat to calculate and report standard deviation.
#'
#'
#' *Method 3*
#'
#' Generate a random number (RN1) between N/2 and N. Sample with
#' replacement a sample size of RN1; calculate and report mean.
#' Generate a random number (RN2) between N/2 and N. Sample with
#' replacement a sample size of RN2; calculate and report standard deviation.
#'
#'
#' *Method 4*
#'
#' As Method 3, but sampling without replacement.
#'
#' @examples
#'
#' usersample<-c(1,1,2,3,4,4,5)
#'
#' disguise(usersample,method=1)
#' disguise(usersample,method=2)
#' disguise(usersample,method=3)
#' disguise(usersample,method=4)
#'
#'
#' @references
#' Derrick, B., Green, L., Kember, K., Ritchie, F. & White P, 2022, Safety in numbers: Minimum thresholding, Maximum bounds, and Little White Lies.
#' Scottish Economic Society Annual Conference, University of Glasgow, 25th-27th April 2022
disguise<-function(usersample,method=2){
n<-length(usersample)
#### method 1 #####
if (method == 1){
ind<-sample(1:n,size=ceiling(n/2), replace = FALSE)
split<-sort(ind)
SampleA<-usersample[split]
SampleB<-usersample[-split]
SampleAmean<-mean(SampleA)
SampleBsd<-sd(SampleB)
}
if (method == 2){
#### method 2 #####
SampleA<-sample(usersample, size =n, replace = TRUE)
SampleAmean<-mean(SampleA)
SampleB<-sample(usersample, size =n, replace = TRUE)
SampleBsd<-sd(SampleB)
}
if (method == 3){
### method 3 ####
rnd1<-round(runif(1,min=n/2,max=n),0)
SampleA<-sample(usersample, size =rnd1, replace = TRUE)
SampleAmean<-mean(SampleA)
rnd2<-round(runif(1,min=n/2,max=n),0)
SampleB<-sample(usersample, size =rnd2, replace = TRUE)
SampleBsd<-sd(SampleB)
}
if (method == 4){
### method 4 ####
rnd1<-round(runif(1,min=n/2,max=n),0)
SampleA<-sample(usersample, size =rnd1, replace = FALSE)
SampleAmean<-mean(SampleA)
rnd2<-round(runif(1,min=n/2,max=n),0)
SampleB<-sample(usersample, size =rnd2, replace = FALSE)
SampleBsd<-sd(SampleB)
}
print(paste0("mean = ",round(SampleAmean,1)))
print(paste0("sd = ",round(SampleBsd,1)))
}
|
/scratch/gouwar.j/cran-all/cranData/uwedragon/R/whitelies.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
connected_components_undirected <- function(N, indices1, indptr1, indices2, indptr2) {
.Call(`_uwot_connected_components_undirected`, N, indices1, indptr1, indices2, indptr2)
}
annoy_search_parallel_cpp <- function(index_name, mat, n_neighbors, search_k, metric, n_threads = 0L, grain_size = 1L) {
.Call(`_uwot_annoy_search_parallel_cpp`, index_name, mat, n_neighbors, search_k, metric, n_threads, grain_size)
}
calc_row_probabilities_parallel <- function(nn_dist, n_vertices, perplexity, n_iter = 200L, tol = 1e-5, ret_sigma = FALSE, n_threads = 0L, grain_size = 1L) {
.Call(`_uwot_calc_row_probabilities_parallel`, nn_dist, n_vertices, perplexity, n_iter, tol, ret_sigma, n_threads, grain_size)
}
optimize_layout_r <- function(head_embedding, tail_embedding, positive_head, positive_tail, positive_ptr, n_epochs, n_head_vertices, n_tail_vertices, epochs_per_sample, method, method_args, initial_alpha, opt_args, epoch_callback, negative_sample_rate, pcg_rand = TRUE, batch = FALSE, n_threads = 0L, grain_size = 1L, move_other = TRUE, verbose = FALSE) {
.Call(`_uwot_optimize_layout_r`, head_embedding, tail_embedding, positive_head, positive_tail, positive_ptr, n_epochs, n_head_vertices, n_tail_vertices, epochs_per_sample, method, method_args, initial_alpha, opt_args, epoch_callback, negative_sample_rate, pcg_rand, batch, n_threads, grain_size, move_other, verbose)
}
smooth_knn_distances_parallel <- function(nn_dist, nn_ptr, skip_first, target, n_iter = 64L, local_connectivity = 1.0, tol = 1e-5, min_k_dist_scale = 1e-3, ret_sigma = FALSE, n_threads = 0L, grain_size = 1L) {
.Call(`_uwot_smooth_knn_distances_parallel`, nn_dist, nn_ptr, skip_first, target, n_iter, local_connectivity, tol, min_k_dist_scale, ret_sigma, n_threads, grain_size)
}
reset_local_metrics_parallel <- function(indptr, probabilities, n_iter = 32L, tol = 1e-5, num_local_metric_neighbors = 15.0, n_threads = 0L) {
.Call(`_uwot_reset_local_metrics_parallel`, indptr, probabilities, n_iter, tol, num_local_metric_neighbors, n_threads)
}
fast_intersection_cpp <- function(rows, cols, values, target, unknown_dist = 1.0, far_dist = 5.0) {
.Call(`_uwot_fast_intersection_cpp`, rows, cols, values, target, unknown_dist, far_dist)
}
general_sset_intersection_cpp <- function(indptr1, indices1, data1, indptr2, indices2, data2, result_row, result_col, result_val, mix_weight = 0.5) {
.Call(`_uwot_general_sset_intersection_cpp`, indptr1, indices1, data1, indptr2, indices2, data2, result_row, result_col, result_val, mix_weight)
}
general_sset_union_cpp <- function(indptr1, indices1, data1, indptr2, indices2, data2, result_row, result_col, result_val) {
.Call(`_uwot_general_sset_union_cpp`, indptr1, indices1, data1, indptr2, indices2, data2, result_row, result_col, result_val)
}
hardware_concurrency <- function() {
.Call(`_uwot_hardware_concurrency`)
}
init_transform_parallel <- function(train_embedding, nn_index, n_test_vertices, nn_weights, n_threads = 0L, grain_size = 1L) {
.Call(`_uwot_init_transform_parallel`, train_embedding, nn_index, n_test_vertices, nn_weights, n_threads, grain_size)
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/RcppExports.R
|
# set_op_mix_ratio = between 0 and 1 mixes in fuzzy set intersection
# set to 0 for intersection only
#' @import Matrix
fuzzy_set_union <- function(X, set_op_mix_ratio = 1) {
XX <- X * Matrix::t(X)
if (set_op_mix_ratio == 0) {
Matrix::drop0(XX)
}
else if (set_op_mix_ratio == 1) {
Matrix::drop0(X + Matrix::t(X) - XX)
}
else {
Matrix::drop0(
set_op_mix_ratio * (X + Matrix::t(X) - XX) + (1 - set_op_mix_ratio) * XX
)
}
}
# Calculate the (asymmetric) affinity matrix based on the nearest neighborhoods
# default target for calibration is the sum of affinities = log2(n_nbrs)
# nn distances should be stored column-wise
smooth_knn <- function(nn_dist,
nn_ptr = NULL,
skip_first = TRUE,
target = NULL,
local_connectivity = 1.0,
n_threads = NULL,
grain_size = 1,
ret_sigma = FALSE,
verbose = FALSE) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
tsmessage(
"Commencing smooth kNN distance calibration",
pluralize("thread", n_threads, " using"), appendLF = FALSE
)
if (length(target) == 1) {
tsmessage(" with target n_neighbors = ", formatC(2 ^ target), time_stamp = FALSE)
}
else {
tsmessage(time_stamp = FALSE)
}
affinity_matrix_res <- smooth_knn_distances_parallel(
nn_dist = nn_dist,
nn_ptr = nn_ptr,
skip_first = skip_first,
target = target,
n_iter = 64,
local_connectivity = local_connectivity,
tol = 1e-5,
min_k_dist_scale = 1e-3,
n_threads = n_threads,
grain_size = grain_size,
ret_sigma = ret_sigma
)
if (verbose && affinity_matrix_res$n_failures > 0) {
tsmessage(affinity_matrix_res$n_failures, " smooth knn distance failures")
}
affinity_matrix_res
}
smooth_knn_matrix <- function(nn,
target = NULL,
local_connectivity = 1.0,
bandwidth = 1.0,
ret_sigma = FALSE,
n_threads = NULL,
grain_size = 1,
verbose = FALSE) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
osparse <- NULL
if (is_sparse_matrix(nn)) {
nn <- Matrix::drop0(nn)
osparse <- order_sparse(nn)
nn_dist <- osparse$x
nn_ptr <- osparse$p
n_nbrs <- diff(nn_ptr)
if (any(n_nbrs < 1)) {
stop("All observations need at least one neighbor")
}
if (is.null(target)) {
# add 1 to n_nbrs to account for implicit self neighbor
target <- log2(n_nbrs + 1) * bandwidth
}
skip_first <- FALSE
}
else {
nnt <- nn_graph_t(nn)
n_nbrs <- nrow(nnt$dist)
if (is.null(target)) {
target <- log2(n_nbrs) * bandwidth
}
nn_ptr <- n_nbrs
nn_dist <- as.vector(nnt$dist)
skip_first <- TRUE
}
affinity_matrix_res <- smooth_knn(
nn_dist = nn_dist,
nn_ptr = nn_ptr,
skip_first = skip_first,
target = target,
local_connectivity = local_connectivity,
ret_sigma = ret_sigma,
n_threads = n_threads,
grain_size = grain_size,
verbose = verbose
)
v <- affinity_matrix_res$matrix
if (is_sparse_matrix(nn)) {
# use j instead of i to transpose it
v <- Matrix::sparseMatrix(j = osparse$i, p = osparse$p, x = v,
dims = osparse$dims, index1 = FALSE)
Matrix::diag(v) <- 0.0
v <- Matrix::drop0(v)
}
else {
v <- nng_to_sparse(nnt$idx, v, self_nbr = TRUE, by_row = FALSE)
}
affinity_matrix_res$matrix <- v
affinity_matrix_res
}
# Given nearest neighbor data and a measure of distance compute
# the fuzzy simplicial set (here represented as a fuzzy graph in the form of a
# sparse matrix) associated to the data. This is done by locally approximating
# geodesic distance at each point, creating a fuzzy simplicial set for each such
# point, and then combining all the local fuzzy simplicial sets into a global
# one via a fuzzy union
fuzzy_simplicial_set <- function(nn,
target = NULL,
set_op_mix_ratio = 1.0,
local_connectivity = 1.0, bandwidth = 1.0,
ret_sigma = FALSE,
n_threads = NULL,
grain_size = 1,
verbose = FALSE) {
affinity_matrix_res <- smooth_knn_matrix(nn = nn,
target = target,
local_connectivity = local_connectivity,
bandwidth = bandwidth,
ret_sigma = ret_sigma,
n_threads = n_threads,
grain_size = grain_size,
verbose = verbose)
res <- fuzzy_set_union(affinity_matrix_res$matrix, set_op_mix_ratio = set_op_mix_ratio)
if (ret_sigma) {
res <- list(matrix = res)
res$sigma <- affinity_matrix_res$sigma
res$rho <- affinity_matrix_res$rho
}
res
}
symmetrize <- function(P) {
0.5 * (P + Matrix::t(P))
}
perplexity_similarities <- function(nn, perplexity = NULL, ret_sigma = FALSE,
n_threads = NULL,
grain_size = 1,
kernel = "gauss",
verbose = FALSE) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
if (is.null(perplexity) && kernel != "knn") {
stop("Must provide perplexity")
}
sigma <- NULL
if (kernel == "gauss") {
tsmessage(
"Commencing calibration for perplexity = ", formatC(perplexity),
pluralize("thread", n_threads, " using")
)
nnt <- nn_graph_t(nn)
n_vertices <- ncol(nnt$dist)
affinity_matrix_res <- calc_row_probabilities_parallel(
nn_dist = as.vector(nnt$dist),
n_vertices = n_vertices,
perplexity = perplexity,
ret_sigma = ret_sigma,
n_threads = n_threads,
grain_size = grain_size
)
if (verbose && affinity_matrix_res$n_failures > 0) {
tsmessage(affinity_matrix_res$n_failures, " perplexity failures")
}
dint <- NULL
if (ret_sigma && !is.null(affinity_matrix_res$sigma)) {
# An analytical version of the "soft" correlation dimension estimate of
# intrinsic dimensionality from multi-scale SNE by Lee et al (2015).
# http://jlmelville.github.io/sneer/dimensionality.html
d <- nnt$dist
p <- affinity_matrix_res$matrix
logp <- log(p + .Machine$double.eps)
s <- affinity_matrix_res$sigma
h <- -colSums(p * logp)
lph <- sweep(logp, 2, h, `+`)
dhdb <- colSums(d * d * p * lph)
dint <- -2 * dhdb / (s * s)
}
affinity_matrix <- nng_to_sparse(nnt$idx, as.vector(affinity_matrix_res$matrix),
self_nbr = TRUE, by_row = FALSE
)
if (!is.null(affinity_matrix_res$sigma)) {
sigma <- affinity_matrix_res$sigma
}
}
else {
# knn kernel
tsmessage("Using knn graph for input weights with k = ", ncol(nn$idx))
# Make each row sum to 1, ignoring the self-index
# i.e. diagonal will be zero
affinity_matrix <- nng_to_sparse(nn$idx, val = 1 / (ncol(nn$idx) - 1))
Matrix::diag(affinity_matrix) <- 0
affinity_matrix <- Matrix::drop0(affinity_matrix)
}
res <- list(matrix = symmetrize(affinity_matrix))
if (ret_sigma && !is.null(sigma)) {
res$sigma <- sigma
if (!is.null(dint)) {
res$dint <- dint
}
}
res
}
# Convert the matrix of NN indices to a sparse asymmetric matrix where each
# edge has a weight of val (scalar or vector)
# return a sparse matrix with dimensions of nrow(nn_idx) x max_nbr_id
nn_to_sparse <- function(nn_idxv, n_obs, val = 1, self_nbr = FALSE,
max_nbr_id = NULL, by_row = TRUE) {
n_nbrs <- length(nn_idxv) / n_obs
if (is.null(max_nbr_id)) {
max_nbr_id <- ifelse(self_nbr, n_obs, max(nn_idxv))
}
if (length(val) == 1) {
xs <- rep(val, n_obs * n_nbrs)
}
else {
xs <- val
}
if (by_row) {
is <- rep(1:n_obs, times = n_nbrs)
}
else {
is <- rep(1:n_obs, each = n_nbrs)
}
dims <- c(n_obs, max_nbr_id)
res <- Matrix::sparseMatrix(i = is, j = nn_idxv, x = xs, dims = dims)
if (self_nbr) {
Matrix::diag(res) <- 0
res <- Matrix::drop0(res)
}
res
}
nng_to_sparse <- function(nn_idx, val = 1, self_nbr = FALSE,
max_nbr_id = NULL, by_row = TRUE) {
if (by_row) {
n_obs <- nrow(nn_idx)
}
else {
n_obs <- ncol(nn_idx)
}
nn_to_sparse(as.vector(nn_idx), n_obs, val = val, self_nbr = self_nbr,
max_nbr_id = max_nbr_id, by_row = by_row)
}
# transpose the index and distance matrix
nn_graph_t <- function(nn_graph) {
list(idx = t(nn_graph$idx), dist = t(nn_graph$dist))
}
order_sparse <- function(spm) {
x <- spm@x
i <- spm@i
p <- spm@p
x_sort <- rep(0, length(x))
i_sort <- rep(0, length(i))
n_vertices <- length(p) - 1
for (v in 1:n_vertices) {
p_begin <- p[v]
p_end <- p[v + 1]
if (p_end - p_begin == 0) {
next
}
pb1 <- p_begin + 1
x_order <- order(x[pb1:p_end])
x_sort[pb1:p_end] <- x[x_order + p_begin]
i_sort[pb1:p_end] <- i[x_order + p_begin]
}
list(i = i_sort, p = p, x = x_sort, order = x_order, dims = spm@Dim)
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/affinity.R
|
bigstatsr_is_installed <- function() {
is_installed("bigstatsr")
}
bigstatsr_scores <- function(X,
ncol,
center = TRUE,
ret_extra = FALSE,
ncores = 1,
verbose = FALSE) {
res <- bigstatsr::big_randomSVD(
X = bigstatsr::as_FBM(X),
fun.scaling = bigstatsr::big_scale(center = center, scale = FALSE),
k = ncol,
ncores = ncores
)
if (verbose) {
totalvar <- sum(apply(X, 2, stats::var))
lambda <- sum((res$d^2) / (nrow(X) - 1))
varex <- lambda / totalvar
tsmessage(
"PCA: ",
ncol,
" components explained ",
formatC(varex * 100),
"% variance"
)
}
scores <- stats::predict(res)
if (ret_extra) {
list(
scores = scores,
rotation = res$v,
center = res$center
)
} else {
scores
}
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/bigstatsr_init.R
|
# Laplacian Eigenmap (Belkin & Niyogi, 2002)
# Original formulation solves the generalized eigenvalue problem of the
# unnormalized graph Laplacian: L v = lambda D v, where L = D - A
# and uses the bottom eigenvectors v that result
# (ignoring the constant eigenvector associated with the smallest eigenvalue).
#
# This is equivalent to using the top eigenvectors from the usual
# eigendecomposition of a row-normalized Laplacian P = D^-1 A: P v = lambda' v
# so we don't need to depend on an external package for generalized eigenvalues.
# Note that while the eigenvectors are the same, the eigenvalues are
# different: lambda' = 1 - lambda, but we don't use them with Laplacian
# Eigenmaps anyway.
#
# As we only need to calculate the top ndim + 1 eigenvectors (i.e. normally 3)
# it's incredibly wasteful to calculate all of them.
# A must be symmetric and positive semi definite, but not necessarily
# normalized in any specific way.
#' @import Matrix
laplacian_eigenmap <- function(A, ndim = 2, verbose = FALSE, force_irlba = FALSE) {
if (rspectra_is_installed() && !force_irlba) {
coords <- rspectra_laplacian_eigenmap(A, ndim, verbose = verbose)
}
else {
coords <- irlba_laplacian_eigenmap(A, ndim, verbose = verbose)
}
coords
}
rspectra_laplacian_eigenmap <- function(A, ndim = 2, verbose = FALSE) {
if (nrow(A) < 3) {
tsmessage("Graph too small, using random initialization instead")
return(rand_init(nrow(A), ndim))
}
tsmessage("Initializing from Laplacian Eigenmap (via RSpectra)")
# Equivalent to: D <- diag(colSums(A)); M <- solve(D) %*% A
# This effectively row-normalizes A: colSums is normally faster than rowSums
# and because A is symmetric, they're equivalent
M <- A / colSums(A)
res <- rspectra_eigs_asym(M, ndim)
if (is.null(res) || ncol(res$vectors) < ndim) {
message(
"Laplacian Eigenmap failed to converge, ",
"using random initialization instead"
)
n <- nrow(M)
return(rand_init(n, ndim))
}
# return the smallest eigenvalues
as.matrix(Re(res$vectors[, 2:(ndim + 1)]))
}
irlba_laplacian_eigenmap <- function(A, ndim = 2, verbose = FALSE) {
if (nrow(A) < 3) {
tsmessage("Graph too small, using random initialization instead")
return(rand_init(nrow(A), ndim))
}
tsmessage("Initializing from Laplacian Eigenmap (via irlba)")
lapA <- form_modified_laplacian(A, ret_d = TRUE)
res <- irlba_spectral_tsvd(lapA$L, ndim + 1)
if (is.null(res) || ncol(res$vectors) < ndim || !res$converged) {
message(
"Laplacian Eigenmap failed to converge, ",
"using random initialization instead"
)
return(rand_init(nrow(A), ndim))
}
res <- lapA$Disqrt * res$vectors[, 2:(ndim + 1), drop = FALSE]
# re-scale the vectors to length 1
sweep(res, 2, sqrt(colSums(res * res)), `/`)
}
form_normalized_laplacian <- function(A) {
# Normalized Laplacian: clear and close to UMAP code, but very slow in R
# I <- diag(1, nrow = n, ncol = n)
# D <- diag(1 / sqrt(colSums(A)))
# L <- I - D %*% A %*% D
# A lot faster (order of magnitude when n = 1000)
Dsq <- sqrt(Matrix::colSums(A))
L <- -Matrix::t(A / Dsq) / Dsq
Matrix::diag(L) <- 1 + Matrix::diag(L)
L
}
# The symmetrized graph Laplacian (Lsym) but shifted so that:
# the bottom eigenvectors of Lsym correspond to the top singular vectors of
# this matrix (hence can be used with truncated SVD), and the eigenvalues
# are all positive, so we don't lose sign and hence correct eigenvector ordering
# when using the singular values (lambda = 2 - d)
form_modified_laplacian <- function(A, ret_d = FALSE) {
Dsq <- sqrt(Matrix::colSums(A))
L <- Matrix::t(A / Dsq) / Dsq
Matrix::diag(L) <- 1 + Matrix::diag(L)
if (ret_d) {
list(L = L, Disqrt = 1 / Dsq)
}
else {
L
}
}
# Return the ndim eigenvectors associated with the ndim largest eigenvalues
sort_eigenvectors <- function(eig_res, ndim) {
vec_indices <- rev(order(eig_res$values, decreasing = TRUE)[1:ndim])
as.matrix(Re(eig_res$vectors[, vec_indices]))
}
normalized_laplacian_init <- function(A, ndim = 2, verbose = FALSE, force_irlba = FALSE) {
if (rspectra_is_installed() && !force_irlba) {
coords <- rspectra_normalized_laplacian_init(A, ndim, verbose = verbose)
}
else {
coords <- irlba_normalized_laplacian_init(A, ndim, verbose = verbose)
}
coords
}
rspectra_normalized_laplacian_init <- function(A, ndim = 2, verbose = FALSE) {
if (nrow(A) < 3) {
tsmessage("Graph too small, using random initialization instead")
return(rand_init(nrow(A), ndim))
}
tsmessage("Initializing from normalized Laplacian")
L <- form_normalized_laplacian(A)
res <- rspectra_eigs_sym(L, ndim, verbose = verbose)
if (is.null(res) || ncol(res$vectors) < ndim) {
message(
"Spectral initialization failed to converge, ",
"using random initialization instead"
)
n <- nrow(A)
return(rand_init(n, ndim))
}
sort_eigenvectors(res, ndim)
}
# Use a normalized Laplacian and use truncated SVD
irlba_tsvd_normalized_laplacian_init <- function(A, ndim = 2, verbose = FALSE) {
if (nrow(A) < 3) {
tsmessage("Graph too small, using random initialization instead")
return(rand_init(nrow(A), ndim))
}
tsmessage("Initializing from normalized Laplacian")
L <- form_modified_laplacian(A)
res <- irlba_spectral_tsvd(L, ndim + 1)
if (is.null(res) || ncol(res$vectors) < ndim || !res$converged) {
message(
"Spectral initialization failed to converge, ",
"using random initialization instead"
)
n <- nrow(A)
return(rand_init(n, ndim))
}
res$vectors[, 2:(ndim + 1), drop = FALSE]
}
irlba_spectral_tsvd <- function(L, n, iters = 1000) {
suppressWarnings(res <- irlba::irlba(L, nv = n, nu = 0, maxit = iters))
list(vectors = res$v, values = 2.0 - res$d, converged = res$iter != iters)
}
irlba_eigs_asym <- function(L, ndim) {
suppressWarnings(res <- tryCatch({
res <- irlba::partial_eigen(
L,
n = ndim + 1,
symmetric = FALSE,
smallest = TRUE,
tol = 1e-3,
maxit = 1000,
verbose = TRUE
)
res$values <- sqrt(res$values)
res
},
error = function(c) {
NULL
}))
res
}
irlba_eigs_sym <- function(L, ndim, smallest = TRUE) {
suppressWarnings(res <- tryCatch(
res <- irlba::partial_eigen(
L,
n = ndim + 1,
symmetric = TRUE,
smallest = smallest,
tol = 1e-3,
maxit = 1000,
verbose = FALSE
),
error = function(c) {
NULL
}
))
res
}
# Use irlba's partial_eigen instead of RSpectra
irlba_normalized_laplacian_init <- function(A, ndim = 2, verbose = FALSE) {
if (nrow(A) < 3) {
tsmessage("Graph too small, using random initialization instead")
return(rand_init(nrow(A), ndim))
}
tsmessage("Initializing from normalized Laplacian (using irlba)")
# Using the normalized Laplacian and looking for smallest eigenvalues does
# not work well with irlba's partial_eigen routine, so form the shifted
# Laplacian and look for largest eigenvalues
L <- form_modified_laplacian(A)
res <- irlba_eigs_sym(L, ndim, smallest = FALSE)
# shift back the eigenvalues
res$values <- 2.0 - res$values
if (is.null(res) || ncol(res$vectors) < ndim) {
message(
"Spectral initialization failed to converge, ",
"using random initialization instead"
)
n <- nrow(A)
return(rand_init(n, ndim))
}
sort_eigenvectors(res, ndim)
}
# Default UMAP initialization
# spectral decomposition of the normalized Laplacian + some noise
spectral_init <- function(A, ndim = 2, verbose = FALSE, force_irlba = FALSE) {
if (nrow(A) < 3) {
tsmessage("Graph too small, using random initialization instead")
return(rand_init(nrow(A), ndim))
}
if (rspectra_is_installed() && !force_irlba) {
tsmessage("Initializing from normalized Laplacian + noise (using RSpectra)")
coords <- rspectra_normalized_laplacian_init(A, ndim, verbose = FALSE)
}
else {
tsmessage("Initializing from normalized Laplacian + noise (using irlba)")
coords <- irlba_tsvd_normalized_laplacian_init(A, ndim, verbose = FALSE)
}
scale_and_jitter(coords, max_coord = 10.0, sd = 0.0001)
}
irlba_spectral_init <- function(A, ndim = 2, verbose = FALSE) {
if (nrow(A) < 3) {
tsmessage("Graph too small, using random initialization instead")
return(rand_init(nrow(A), ndim))
}
tsmessage("Initializing from normalized Laplacian (using irlba) + noise")
coords <- irlba_normalized_laplacian_init(A, ndim, verbose = FALSE)
scale_and_jitter(coords, max_coord = 10.0, sd = 0.0001)
}
# Scales coords so that the largest absolute coordinate is 10.0 then jitters by
# adding gaussian noise with mean 0 and standard deviation sd
scale_and_jitter <- function(coords, max_coord = 10.0, sd = 0.0001) {
expansion <- 10.0 / max(abs(coords))
(coords * expansion) + matrix(stats::rnorm(n = prod(dim(coords)), sd = sd),
ncol = ncol(coords)
)
}
# Return the number of connected components in a graph (represented as a
# sparse matrix).
connected_components <- function(X) {
Xt <- Matrix::t(X)
connected_components_undirected(nrow(X), Xt@i, Xt@p, X@i, X@p)
}
# UMAP random initialization: uniform between +10 and -10 along each axis
rand_init <- function(n, ndim, verbose = FALSE) {
tsmessage("Initializing from uniform random")
matrix(stats::runif(n = n * ndim, min = -10, max = 10), ncol = ndim)
}
# LargeVis random initialization: Gaussian with sd 1e-4 (like t-SNE)
rand_init_lv <- function(n, ndim, verbose = FALSE) {
tsmessage("Initializing from random Gaussian with sd = 1e-4")
matrix(stats::rnorm(ndim * n, sd = 1e-4), n)
}
# Rescale embedding so that the standard deviation is the specified value.
# Default gives initialization like t-SNE, but not random. Large initial
# distances lead to small gradients, and hence small updates, so should be
# avoided.
scale_coords <- function(X, sdev = 1e-4, verbose = FALSE) {
if (is.null(sdev)) {
return(X)
}
tsmessage("Scaling init to sdev = ", sdev)
scale_factor <- apply(X, 2, stats::sd)
scale(X, scale = scale_factor / sdev)
}
# PCA
# Calculates a matrix containing the first ndim columns of the PCA scores.
# Returns the score matrix unless ret_extra is TRUE, in which case a list
# is returned also containing the eigenvalues
pca_init <- function(X, ndim = min(dim(X)), center = TRUE, ret_extra = FALSE,
pca_method = "auto", verbose = FALSE) {
if (methods::is(X, "dist")) {
res_mds <- stats::cmdscale(X, x.ret = TRUE, eig = TRUE, k = ndim)
if (ret_extra || verbose) {
lambda <- res_mds$eig
varex <- sum(lambda[1:ndim]) / sum(lambda)
tsmessage(
"PCA (using classical MDS): ", ndim, " components explained ",
formatC(varex * 100), "% variance"
)
}
scores <- res_mds$points
return(scores)
}
# irlba warns about using too large a percentage of total singular value
# so don't use if dataset is small compared to ndim
if (pca_method == "auto") {
if (ndim < 0.5 * min(dim(X))) {
pca_method <- "irlba"
} else {
pca_method <- "svd"
}
}
if (pca_method == "bigstatsr") {
if (!bigstatsr_is_installed()) {
warning(
"PCA via bigstatsr requires the 'bigstatsr' package. ",
"Please install it. Falling back to 'irlba'"
)
pca_method <- "irlba"
}
}
tsmessage("Using '", pca_method, "' for PCA")
pca_fun <- switch(pca_method,
irlba = irlba_scores,
svdr = irlba_svdr_scores,
svd = svd_scores,
bigstatsr = bigstatsr_scores,
stop("BUG: unknown svd method '", pca_method, "'")
)
do.call(pca_fun, list(
X = X,
ncol = ndim,
center = center,
ret_extra = ret_extra,
verbose = verbose
))
}
# Get scores by SVD
svd_scores <- function(X, ncol = min(dim(X)), center = TRUE, ret_extra = FALSE,
verbose = FALSE) {
# need extra data if we want to re-apply PCA to new points in umap_transform
rotation <- NULL
xcenter <- NULL
X <- scale(X, center = center, scale = FALSE)
# do SVD on X directly rather than forming covariance matrix
s <- svd(X, nu = ncol, nv = ifelse(ret_extra, ncol, 0))
D <- diag(c(s$d[1:ncol]), ncol, ncol)
if (verbose || ret_extra) {
# calculate eigenvalues of covariance matrix from singular values
lambda <- (s$d^2) / (nrow(X) - 1)
varex <- sum(lambda[1:ncol]) / sum(lambda)
tsmessage(
"PCA: ", ncol, " components explained ", formatC(varex * 100),
"% variance"
)
}
scores <- s$u %*% D
if (ret_extra) {
rotation <- s$v
xcenter <- attr(X, "scaled:center")
}
if (ret_extra) {
list(
scores = scores,
lambda = lambda[1:ncol],
rotation = rotation,
center = xcenter
)
} else {
scores
}
}
# Get PCA scores via irlba
irlba_scores <- function(X, ncol, center = TRUE, ret_extra = FALSE, verbose = FALSE) {
res <- irlba::prcomp_irlba(X,
n = ncol, retx = TRUE, center = center,
scale = FALSE
)
report_varex(res, verbose)
if (ret_extra) {
list(scores = res$x, rotation = res$rotation, center = res$center)
} else {
res$x
}
}
report_varex <- function(res, verbose = FALSE) {
if (verbose) {
ncol <- ncol(res$rotation)
varex <- sum(res$sdev[1:ncol]^2) / res$totalvar
tsmessage(
"PCA: ",
ncol,
" components explained ",
formatC(varex * 100),
"% variance"
)
}
}
# This function taken from irlba and modified to use irlba::svdr rather
# than irlba::irlba
prcomp_rsvd <- function(x, n = 3, retx = TRUE, center = TRUE, scale. = FALSE,
...) {
a <- names(as.list(match.call()))
ans <- list(scale = scale.)
if ("tol" %in% a) {
warning("The `tol` truncation argument from `prcomp` is not supported by\n`prcomp_rsvd`. If specified, `tol` is passed to the `irlba` function to\ncontrol that algorithm's convergence tolerance. See `?prcomp_irlba` for help.")
}
if (is.data.frame(x)) {
x <- as.matrix(x)
}
args <- list(x = x, k = n)
if (is.logical(center)) {
if (center) {
args$center <- colMeans(x)
}
} else {
args$center <- center
}
if (is.logical(scale.)) {
if (is.numeric(args$center)) {
f <- function(i) {
sqrt(sum((x[, i] - args$center[i])^2) / (nrow(x) -
1L))
}
scale. <- vapply(seq(ncol(x)), f, pi, USE.NAMES = FALSE)
if (ans$scale) {
ans$totalvar <- ncol(x)
} else {
ans$totalvar <- sum(scale.^2)
}
} else {
if (ans$scale) {
scale. <- apply(x, 2L, function(v) {
sqrt(sum(v^2) / max(
1,
length(v) - 1L
))
})
f <- function(i) {
sqrt(sum((x[, i] / scale.[i])^2) / (nrow(x) -
1L))
}
ans$totalvar <- sum(vapply(seq(ncol(x)), f, pi,
USE.NAMES = FALSE
)^2)
} else {
f <- function(i) sum(x[, i]^2) / (nrow(x) - 1L)
ans$totalvar <- sum(vapply(seq(ncol(x)), f, pi,
USE.NAMES = FALSE
))
}
}
if (ans$scale) {
args$scale <- scale.
}
} else {
args$scale <- scale.
f <- function(i) {
sqrt(sum((x[, i] / scale.[i])^2) / (nrow(x) -
1L))
}
ans$totalvar <- sum(vapply(seq(ncol(x)), f, pi, USE.NAMES = FALSE))
}
if (!missing(...)) {
args <- c(args, list(...))
}
s <- do.call(irlba::svdr, args = args)
ans$sdev <- s$d / sqrt(max(1, nrow(x) - 1))
ans$rotation <- s$v
colnames(ans$rotation) <- paste("PC", seq(1, ncol(ans$rotation)),
sep = ""
)
ans$center <- args$center
if (retx) {
ans <- c(ans, list(x = sweep(s$u, 2, s$d, FUN = `*`)))
colnames(ans$x) <- paste("PC", seq(1, ncol(ans$rotation)),
sep = ""
)
}
class(ans) <- c("irlba_prcomp", "prcomp")
ans
}
irlba_svdr_scores <-
function(X,
ncol,
center = TRUE,
ret_extra = FALSE,
verbose = FALSE) {
# 5 iterations is the default for scikit-learn TruncatedSVD
res <- prcomp_rsvd(
X,
n = ncol,
retx = TRUE,
center = center,
scale. = FALSE,
it = 5
)
report_varex(res, verbose)
if (ret_extra) {
list(
scores = res$x,
rotation = res$rotation,
center = res$center
)
} else {
res$x
}
}
init_is_spectral <- function(init) {
res <- pmatch(tolower(init), c(
"normlaplacian", "spectral", "laplacian",
"inormlaplacian", "ispectral", "agspectral",
"irlba_spectral", "irlba_laplacian"
))
length(res) > 0 && !is.na(res)
}
rand_nbr_graph <- function(n_vertices, n_nbrs, val) {
nng_to_sparse(rand_nbr_idx(n_vertices, n_nbrs),
val = val,
max_nbr_id = n_vertices
)
}
rand_nbr_idx <- function(n_vertices, n_nbrs) {
idx <- matrix(nrow = n_vertices, ncol = n_nbrs)
nv1 <- n_vertices - 1
for (i in 1:n_vertices) {
ids <- sample.int(nv1, n_nbrs)
id_sel <- ids >= 1
ids[id_sel] <- ids[id_sel] + 1
idx[i, ] <- ids
}
idx
}
# V: the current affinity graph
# n_pos: number of neighbors to retain per item
# n_neg: number of "negative" (i.e. non-)neighbors per item
# pos_affinity: value for the positive affinity (associated with nbrs)
# neg_affinity: value for the negative affinity (associated with neg nbrs)
approx_affinity_graph <- function(V, n_neg,
pos_affinity = 1, neg_affinity = 0.1,
verbose = FALSE) {
pos_V <- V
pos_V@x <- rep(pos_affinity, length(pos_V@x))
pos_V <- 0.5 * (pos_V + Matrix::t(pos_V))
neg_V <- rand_nbr_graph(nrow(pos_V), n_nbrs = n_neg, val = neg_affinity)
neg_V <- 0.5 * (neg_V + Matrix::t(neg_V))
# the cleanup below will ensure that where the same value got a pos and neg
# affinity it will end up positive
graph <- pos_V + neg_V
# clamp small values to neg_affinity
graph@x[graph@x < pos_affinity] <- neg_affinity
# and large values to pos_affinity
graph@x <- pmin(graph@x, pos_affinity)
Matrix::drop0(graph)
}
# Initialize using a spectral decomposition of an "approximate global" graph
# Uses the same graph as standard UMAP, but with each entry set to 1. A measure
# of global structure is added by randomly setting some of the remaining zero
# to a smaller value (0.1 in this case).
# This routine is inspired by some ideas in
# 2-D Embedding of Large and High-dimensional Data with Minimal Memory and Computational Time Requirements
# Witold Dzwinel, Rafal Wcislo, Stan Matwin
# https://arxiv.org/abs/1902.01108
#
# Randomized Near Neighbor Graphs, Giant Components, and Applications in Data Science
# George C. Linderman, Gal Mishne, Yuval Kluger, Stefan Steinerberger
# https://arxiv.org/abs/1711.04712
agspectral_init <- function(V, n_neg_nbrs, pos_affinity = 1, neg_affinity = 0.1,
ndim = 2, verbose = FALSE) {
graph <- approx_affinity_graph(V, n_neg_nbrs,
pos_affinity = pos_affinity,
neg_affinity = neg_affinity,
verbose = verbose
)
spectral_init(graph, ndim = ndim, verbose = verbose)
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/init.R
|
find_nn <- function(X, k, include_self = TRUE, method = "fnn",
metric = "euclidean",
n_trees = 50, search_k = 2 * k * n_trees,
tmpdir = tempdir(),
n_threads = NULL,
grain_size = 1,
ret_index = FALSE,
verbose = FALSE) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
if (methods::is(X, "dist")) {
res <- dist_nn(X, k, include_self = include_self)
}
else if (is_sparse_matrix(X)) {
# sparse distance matrix
if (Matrix::isTriangular(X)) {
res <- sparse_tri_nn(X, k, include_self = include_self)
}
else {
res <- sparse_nn(X, k, include_self = include_self)
}
}
else {
# normal matrix
if (method == "fnn") {
res <- FNN_nn(X, k = k, include_self = include_self)
}
else {
res <- annoy_nn(X,
k = k,
metric = metric,
n_trees = n_trees, search_k = search_k,
tmpdir = tmpdir,
n_threads = n_threads,
ret_index = ret_index,
verbose = verbose
)
}
}
res
}
# an nn graph not in a list
nn_is_single <- function(nn) {
(is.list(nn) && !is.null(nn$idx)) || is_sparse_matrix(nn)
}
# TRUE if nn is a sparse matrix or an untagged list. This covers passing in
# a single nn graph, sparse distance matrix or list thereof, but excludes a
# tagged annoy index or a string like "euclidean"
nn_is_precomputed <- function(nn) {
(is.list(nn) && is.null(nn$type)) || is_sparse_matrix(nn)
}
# TRUE if we are using an annoy index
nn_is_annoy <- function(ann) {
is.list(ann) && !is.null(ann$type) && startsWith(ann$type, "annoy")
}
# n_trees - number of trees to build when constructing the index. The more trees
# specified, the larger the index, but the better the results. largeVis uses 10
# trees for datasets with N = 10,000 observations, 20 trees for datasets up to N
# = 1,000,000, 50 trees for N up to 5,000,000 and 100 trees otherwise
# search_k - the number of nodes to search during the neighbor retrieval. The
# larger k, the more accurate results, but the longer the search takes. Default
# is k * n_trees.
#' @importFrom methods new
annoy_nn <- function(X, k = 10,
metric = "euclidean",
n_trees = 50, search_k = 2 * k * n_trees,
tmpdir = tempdir(),
n_threads = NULL,
grain_size = 1,
ret_index = FALSE,
verbose = FALSE) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
ann <- annoy_build(X,
metric = metric, n_trees = n_trees,
verbose = verbose
)
res <- annoy_search(X,
k = k, ann = ann, search_k = search_k,
tmpdir = tmpdir,
n_threads = n_threads,
prep_data = TRUE,
grain_size = grain_size, verbose = verbose
)
nn_acc <- sum(res$idx == 1:nrow(X)) / nrow(X)
tsmessage("Annoy recall = ", formatC(nn_acc * 100.0), "%")
res <- list(idx = res$idx, dist = res$dist, recall = nn_acc)
if (ret_index) {
res$index <- ann
}
res
}
annoy_create <- function(metric, ndim) {
if (metric == "correlation") {
name <- "cosine"
}
else {
name <- metric
}
rcppannoy <- create_ann(name, ndim)
list(
ann = rcppannoy,
type = "annoyv1",
metric = metric
)
}
annoy_build <- function(X, metric = "euclidean", n_trees = 50,
verbose = FALSE) {
nr <- nrow(X)
nc <- ncol(X)
annoy <- annoy_create(metric, nc)
if (metric == "correlation") {
tsmessage("Annoy build: subtracting row means for correlation")
X <- sweep(X, 1, rowMeans(X))
}
tsmessage(
"Building Annoy index with metric = ", metric,
", n_trees = ", n_trees
)
ann <- annoy$ann
nstars <- 50
if (verbose && nr > nstars) {
progress_for(
nr, nstars,
function(chunk_start, chunk_end) {
for (i in chunk_start:chunk_end) {
ann$addItem(i - 1, X[i, , drop = FALSE])
}
}
)
}
else {
for (i in 1:nr) {
ann$addItem(i - 1, X[i, ])
}
}
# Build index
ann$build(n_trees)
annoy
}
# create RcppAnnoy class from metric name with ndim dimensions
create_ann <- function(name, ndim) {
ann <- switch(name,
cosine = methods::new(RcppAnnoy::AnnoyAngular, ndim),
manhattan = methods::new(RcppAnnoy::AnnoyManhattan, ndim),
euclidean = methods::new(RcppAnnoy::AnnoyEuclidean, ndim),
hamming = methods::new(RcppAnnoy::AnnoyHamming, ndim),
stop("BUG: unknown Annoy metric '", name, "'")
)
}
# fetch the underlying RcppAnnoy class from inside an index
get_rcppannoy <- function(nni) {
if (startsWith(class(nni), "Rcpp_Annoy")) {
rcppannoy <- nni
}
else if (nn_is_annoy(nni)) {
rcppannoy <- nni$ann
}
else {
stop("BUG: Found an unknown ann implementation of class: '",
class(nni), "'")
}
rcppannoy
}
# Search a pre-built Annoy index for neighbors of X
annoy_search <- function(X, k, ann,
search_k = 100 * k,
prep_data = FALSE,
tmpdir = tempdir(),
n_threads = NULL,
grain_size = 1,
verbose = FALSE) {
# newer NN structures hide impl in a tagged list
if (nn_is_annoy(ann)) {
lann <- ann
ann <- lann$ann
if (prep_data && lann$metric == "correlation") {
tsmessage("Annoy search: subtracting row means for correlation")
X <- sweep(X, 1, rowMeans(X))
}
}
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
if (n_threads > 0) {
annoy_res <- annoy_search_parallel(
X = X, k = k, ann = ann,
search_k = search_k,
tmpdir = tmpdir,
n_threads = n_threads,
grain_size = grain_size,
verbose = verbose
)
res <- list(idx = annoy_res$item + 1, dist = annoy_res$distance)
}
else {
res <- annoy_search_serial(
X = X, k = k, ann = ann,
search_k = search_k,
verbose = verbose
)
}
# Convert from angular distance to the UMAP/sklearn definition of cosine
# distance
# Current Annoy README defines cosine distance as sqrt(2 - 2 cos(u,v))
# where cos(u, v) is the cosine of the angle between two unit-scaled vectors
# u and v (i.e. the cosine similarity). That expression is known to be
# equivalent to the euclidean distance between u and v.
# We shall convert back to 1 - cos(u, v) which is the definition of cosine
# distance used by UMAP.
if (methods::is(ann, "Rcpp_AnnoyAngular")) {
res$dist <- 0.5 * res$dist * res$dist
}
res
}
annoy_search_serial <- function(X, k, ann,
search_k = 100 * k,
verbose = FALSE) {
tsmessage("Searching Annoy index, search_k = ", search_k)
nr <- nrow(X)
idx <- matrix(nrow = nr, ncol = k)
dist <- matrix(nrow = nr, ncol = k)
nstars <- 50
if (verbose && nr > nstars) {
progress_for(
nr, nstars,
function(chunk_start, chunk_end) {
for (i in chunk_start:chunk_end) {
res <- ann$getNNsByVectorList(X[i, ], k, search_k, TRUE)
if (length(res$item) != k) {
stop(
"search_k/n_trees settings were unable to find ", k,
" neighbors for item ", i
)
}
idx[i, ] <<- res$item
dist[i, ] <<- res$distance
}
}
)
}
else {
for (i in 1:nr) {
res <- ann$getNNsByVectorList(X[i, ], k, search_k, TRUE)
if (length(res$item) != k) {
stop(
"search_k/n_trees settings were unable to find ", k,
" neighbors for item ", i
)
}
idx[i, ] <- res$item
dist[i, ] <- res$distance
}
}
list(idx = idx + 1, dist = dist)
}
annoy_search_parallel <- function(X, k, ann,
search_k = 100 * k,
tmpdir = tempdir(),
n_threads = NULL,
grain_size = 1,
verbose = FALSE) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
index_file <- tempfile(tmpdir = tmpdir)
tsmessage("Writing NN index file to temp file ", index_file)
ann$save(index_file)
fsize <- file.size(index_file)
tsmessage(
"Searching Annoy index using ", pluralize("thread", n_threads),
", search_k = ", search_k
)
ann_class <- class(ann)
metric <- switch(ann_class,
Rcpp_AnnoyAngular = "cosine",
Rcpp_AnnoyManhattan = "manhattan",
Rcpp_AnnoyEuclidean = "euclidean",
Rcpp_AnnoyHamming = "hamming",
stop("BUG: unknown Annoy class '", ann_class, "'")
)
res <- annoy_search_parallel_cpp(index_file,
X,
k, search_k,
metric = metric,
n_threads = n_threads,
grain_size = grain_size
)
unlink(index_file)
if (any(res$item == -1)) {
msg <- paste0(
"search_k/n_trees settings were unable to find ", k,
" neighbors for all items."
)
if (fsize > 2147483647) {
msg <- paste0(
msg, " Index file may have been too large to process.",
" Try repeating with n_threads = 0, reducing n_trees,",
" or reducing to a smaller dimensionality, e.g. pca = 50"
)
}
stop(msg)
}
res
}
FNN_nn <- function(X, k = 10, include_self = TRUE) {
if (include_self) {
k <- k - 1
}
fnn <- FNN::get.knn(X, k)
idx <- fnn$nn.index
dist <- fnn$nn.dist
if (include_self) {
idx <- cbind(seq_len(nrow(X)), idx)
dist <- cbind(rep(0, nrow(X)), dist)
}
list(idx = idx, dist = dist)
}
dist_nn <- function(X, k, include_self = TRUE) {
X <- as.matrix(X)
if (!include_self) {
k <- k + 1
}
nn_idx <- t(apply(X, 2, order))[, 1:k]
nn_dist <- matrix(0, nrow = nrow(X), ncol = k)
for (i in seq_len(nrow(nn_idx))) {
nn_dist[i, ] <- X[i, nn_idx[i, ]]
}
if (!include_self) {
nn_idx <- nn_idx[, 2:ncol(nn_idx)]
nn_dist <- nn_dist[, 2:ncol(nn_dist)]
}
attr(nn_idx, "dimnames") <- NULL
attr(nn_dist, "dimnames") <- NULL
list(idx = nn_idx, dist = nn_dist)
}
sparse_nn <- function(X, k, include_self = TRUE) {
if (include_self) {
k <- k - 1
}
n <- nrow(X)
nn_idx <- matrix(0, nrow = n, ncol = k)
nn_dist <- matrix(0, nrow = n, ncol = k)
for (i in 1:n) {
dists <- X[, i]
is_nonzero <- dists != 0
dist_nonzero <- dists[is_nonzero]
if (length(dist_nonzero) < k) {
stop(
"Row ", i, " of distance matrix has only ", length(dist_nonzero),
" defined distances"
)
}
k_order <- order(dist_nonzero)[1:k]
idx_nonzero <- which(is_nonzero, arr.ind = TRUE)
nn_idx[i, ] <- idx_nonzero[k_order]
nn_dist[i, ] <- dist_nonzero[k_order]
}
if (include_self) {
nn_idx <- cbind(1:n, nn_idx)
nn_dist <- cbind(rep(0, n), nn_dist)
}
list(idx = nn_idx, dist = nn_dist)
}
# Extract knn data from sparse lower/upper triangular matrix
sparse_tri_nn <- function(X, k, include_self = TRUE) {
if (include_self) {
k <- k - 1
}
n <- nrow(X)
nn_idx <- matrix(0, nrow = n, ncol = k)
nn_dist <- matrix(0, nrow = n, ncol = k)
# this will get the i,j,x values no matter the internal representation
Xsumm <- summary(X)
for (i in 1:n) {
# get indices where $i/j == i
idxji <- Xsumm$j == i
idxii <- Xsumm$i == i
idxi <- idxji | idxii
# find non-zero distances
dists <- Xsumm$x[idxi]
is_nonzero <- dists != 0
dist_nonzero <- dists[is_nonzero]
if (length(dist_nonzero) < k) {
stop(
"Row ", i, " of distance matrix has only ", length(dist_nonzero),
" defined distances"
)
}
# find indices of k-smallest distances
k_order <- order(dist_nonzero)[1:k]
nn_dist[i, ] <- dist_nonzero[k_order]
# get indices into original vector
isk <- which(idxi)[k_order]
Xis <- Xsumm$i[isk]
Xjs <- Xsumm$j[isk]
# We don't know if the non-i index is in the i or j column
# so do this slightly horrible logical * integer arithmetic
# which will add the correct index to 0
nn_idx[i, ] <- ((Xis != i) * Xis) + ((Xjs != i) * Xjs)
}
if (include_self) {
nn_idx <- cbind(1:n, nn_idx)
nn_dist <- cbind(rep(0, n), nn_dist)
}
list(idx = nn_idx, dist = nn_dist)
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/neighbors.R
|
rspectra_is_installed <- function() {
is_installed("RSpectra")
}
rspectra_eigs_asym <- function(L, ndim) {
res <- NULL
suppressWarnings(res <- tryCatch(
RSpectra::eigs(
L,
k = ndim + 1,
which = "LR",
opt = list(tol = 1e-4)
),
error = function(c) {
NULL
}
))
res
}
rspectra_eigs_sym <- function(L, ndim, verbose = FALSE) {
k <- ndim + 1
opt <- list(tol = 1e-4)
suppressWarnings(res <-
tryCatch(
RSpectra::eigs_sym(L, k = k, which = "SM", opt = opt),
error = function(c) {
tsmessage("RSpectra calculation failed, retrying with shifted")
}
))
if (is.null(res) || ncol(res$vectors) < ndim) {
suppressWarnings(res <- tryCatch(
RSpectra::eigs_sym(
L,
k = k,
which = "LM",
sigma = 0,
opt = opt
),
error = function(c) {
NULL
}
))
}
res
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/rspectra_init.R
|
# Combine a fuzzy simplicial set with another fuzzy simplicial set
# generated from categorical data using categorical distances. The target
# data is assumed to be categorical label data (a vector of labels),
# and this will update the fuzzy simplicial set to respect that label data.
# TODO: optional category cardinality based weighting of distance
# simplicial_set The input fuzzy simplicial set.
# target The categorical labels to use in the intersection.
# unknown_dist The distance an unknown label (-1) is assumed to be from any point.
# far_dist The distance between unmatched labels.
# Return The resulting intersected fuzzy simplicial set.
categorical_simplicial_set_intersection <- function(
simplicial_set, target,
unknown_dist = 1.0,
far_dist = 5.0,
verbose = FALSE) {
# Convert to dgTMatrix to get to the j indices
simplicial_set <- methods::as(simplicial_set, "TsparseMatrix")
simplicial_set@x <- fast_intersection_cpp(
simplicial_set@i,
simplicial_set@j,
simplicial_set@x,
target,
unknown_dist,
far_dist
)
# drop0 converts back to dgCMatrix
reset_local_connectivity(Matrix::drop0(simplicial_set))
}
# Reset the local connectivity requirement -- each data sample should
# have complete confidence in at least one 1-simplex in the simplicial set.
# We can enforce this by locally rescaling confidences, and then remerging the
# different local simplicial sets together.
reset_local_connectivity <-
function(simplicial_set,
reset_local_metric = FALSE,
num_local_metric_neighbors = 15,
n_threads = NULL,
verbose = FALSE) {
# Python UMAP stores graph as CSR uwot uses CSC so need to be careful about
# which axis to normalize
simplicial_set <- col_max_normalize(simplicial_set)
if (reset_local_metric) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
tsmessage(
"Resetting local metric", pluralize("thread", n_threads, " using")
)
metric_res <-
reset_local_metrics_parallel(simplicial_set@p, simplicial_set@x,
num_local_metric_neighbors = num_local_metric_neighbors,
n_threads = n_threads)
simplicial_set@x <- metric_res$values
# TODO: at least some failures are very typical and it doesn't seem to
# affect results, so not worth reporting this for now.
# if (metric_res$n_failures > 0) {
# tsmessage(metric_res$n_failures, " local metric reset failures")
# }
}
fuzzy_set_union(simplicial_set)
}
# Under the assumption of categorical distance for the intersecting
# simplicial set perform a fast intersection.
# This is not at all fast in R, use fast_intersection_cpp instead
fast_intersection <- function(rows, cols, values, target, unknown_dist = 1.0,
far_dist = 5.0) {
ex_unknown <- exp(-unknown_dist)
ex_far <- exp(-far_dist)
for (nz in seq_len(length(values))) {
i <- rows[nz]
j <- cols[nz]
if (is.na(target[i]) || is.na(target[j])) {
values[nz] <- values[nz] * ex_unknown
}
else if (target[i] != target[j]) {
values[nz] <- values[nz] * ex_far
}
}
values
}
general_simplicial_set_intersection <- function(left, right, weight) {
result <- methods::as(left + right, "TsparseMatrix")
result@x <- general_sset_intersection_cpp(
left@p,
left@i,
left@x,
right@p,
right@i,
right@x,
result@i,
result@j,
result@x,
weight
)
result
}
# An R translation of the Python function. Not very fast,
# so use the C++ version instead
general_sset_intersection <- function(indptr1,
indices1,
data1,
indptr2,
indices2,
data2,
result_row,
result_col,
result_val,
mix_weight = 0.5) {
left_min <- max(min(data1) / 2.0, 1.0e-8)
right_min <- max(min(data2) / 2.0, 1.0e-8)
for (idx in seq_len(length(result_row))) {
i <- result_col[idx] + 1
j <- result_row[idx]
left_val <- left_min
for (k in (indptr1[i]):(indptr1[i + 1] - 1)) {
if (indices1[k + 1] == j) {
left_val <- data1[k + 1]
}
}
right_val <- right_min
for (k in (indptr2[i]):(indptr2[i + 1] - 1)) {
if (indices2[k + 1] == j) {
right_val <- data2[k + 1]
}
}
if (left_val > left_min || right_val > right_min) {
if (mix_weight < 0.5) {
result_val[idx] <- left_val *
right_val^(mix_weight / (1.0 - mix_weight))
}
else {
result_val[idx] <- right_val *
left_val^(((1.0 - mix_weight) / mix_weight))
}
}
}
result_val
}
# Sparse Matrix functions -------------------------------------------------
# normalize each column of a dgCMatrix by its maximum
# https://stackoverflow.com/questions/39284774/column-rescaling-for-a-very-large-sparse-matrix-in-r
col_max_normalize <- function(X) {
X@x <- X@x / rep.int(colMaxs(X), diff(X@p))
X
}
# normalize each row of a dgCMatrix by its maximum
row_max_normalize <- function(X) {
Matrix::t(col_max_normalize(Matrix::t(X)))
}
col_sum_normalize <- function(X) {
X@x <- X@x / rep.int(Matrix::colSums(X), diff(X@p))
X
}
row_sum_normalize <- function(X) {
Matrix::t(col_sum_normalize(Matrix::t(X)))
}
# column maximums of a dgCMatrix
colMaxs <- function(X) {
ptr <- X@p
xs <- X@x
vapply(
1:ncol(X),
function(i) {
if (ptr[i + 1] > ptr[i]) {
max(xs[(ptr[i] + 1):ptr[i + 1]])
} else {
0
}
}, numeric(1)
)
}
# row maximums of a dgCMatrix
rowMaxs <- function(X) {
colMaxs(Matrix::t(X))
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/supervised.R
|
#' Add New Points to an Existing Embedding
#'
#' Carry out an embedding of new data using an existing embedding. Requires
#' using the result of calling \code{\link{umap}} or \code{\link{tumap}} with
#' \code{ret_model = TRUE}.
#'
#' Note that some settings are incompatible with the production of a UMAP model
#' via \code{\link{umap}}: external neighbor data (passed via a list to the
#' argument of the \code{nn_method} parameter), and factor columns that were
#' included in the UMAP calculation via the \code{metric} parameter. In the
#' latter case, the model produced is based only on the numeric data.
#' A transformation is possible, but factor columns in the new data are ignored.
#'
#' @param X The new data to be transformed, either a matrix of data frame. Must
#' have the same columns in the same order as the input data used to generate
#' the \code{model}.
#' @param model Data associated with an existing embedding.
#' @param nn_method Optional pre-calculated nearest neighbor data. There are
#' two supported formats. The first is a list consisting of two elements:
#' \itemize{
#' \item \code{"idx"}. A \code{n_vertices x n_neighbors} matrix where
#' \code{n_vertices} is the number of observations in \code{X}. The contents
#' of the matrix should be the integer indexes of the data used to generate
#' the \code{model}, which are the \code{n_neighbors}-nearest neighbors of
#' the data to be transformed.
#' \item \code{"dist"}. A \code{n_vertices x n_neighbors} matrix
#' containing the distances of the nearest neighbors.
#' }
#' The second supported format is a sparse distance matrix of type
#' \code{dgCMatrix}, with dimensions \code{n_model_vertices x n_vertices}.
#' where \code{n_model_vertices} is the number of observations in the original
#' data that generated the model. Distances should be arranged by column, i.e.
#' a non-zero entry in row \code{j} of the \code{i}th column indicates that
#' the \code{j}th observation in the original data used to generate the
#' \code{model} is a nearest neighbor of the \code{i}th observation in the new
#' data, with the distance given by the value of that element. In this format,
#' a different number of neighbors is allowed for each observation, i.e.
#' each column can contain a different number of non-zero values.
#' Multiple nearest neighbor data (e.g. from two different pre-calculated
#' metrics) can be passed by passing a list containing the nearest neighbor
#' data lists as items.
#' @param init_weighted If \code{TRUE}, then initialize the embedded coordinates
#' of \code{X} using a weighted average of the coordinates of the nearest
#' neighbors from the original embedding in \code{model}, where the weights
#' used are the edge weights from the UMAP smoothed knn distances. Otherwise,
#' use an un-weighted average.
#' This parameter will be deprecated and removed at version 1.0 of this
#' package. Use the \code{init} parameter as a replacement, replacing
#' \code{init_weighted = TRUE} with \code{init = "weighted"} and
#' \code{init_weighted = FALSE} with \code{init = "average"}.
#' @param search_k Number of nodes to search during the neighbor retrieval. The
#' larger k, the more the accurate results, but the longer the search takes.
#' Default is the value used in building the \code{model} is used.
#' @param tmpdir Temporary directory to store nearest neighbor indexes during
#' nearest neighbor search. Default is \code{\link{tempdir}}. The index is
#' only written to disk if \code{n_threads > 1}; otherwise, this parameter is
#' ignored.
#' @param n_epochs Number of epochs to use during the optimization of the
#' embedded coordinates. A value between \code{30 - 100} is a reasonable trade
#' off between speed and thoroughness. By default, this value is set to one
#' third the number of epochs used to build the \code{model}.
#' @param n_threads Number of threads to use, (except during stochastic gradient
#' descent). Default is half the number of concurrent threads supported by the
#' system.
#' @param n_sgd_threads Number of threads to use during stochastic gradient
#' descent. If set to > 1, then be aware that if \code{batch = FALSE}, results
#' will \emph{not} be reproducible, even if \code{set.seed} is called with a
#' fixed seed before running. Set to \code{"auto"} to use the same value as
#' \code{n_threads}.
#' @param grain_size Minimum batch size for multithreading. If the number of
#' items to process in a thread falls below this number, then no threads will
#' be used. Used in conjunction with \code{n_threads} and
#' \code{n_sgd_threads}.
#' @param verbose If \code{TRUE}, log details to the console.
#' @param init how to initialize the transformed coordinates. One of:
#' \itemize{
#' \item \code{"weighted"} (The default). Use a weighted average of the
#' coordinates of the nearest neighbors from the original embedding in
#' \code{model}, where the weights used are the edge weights from the UMAP
#' smoothed knn distances. Equivalent to \code{init_weighted = TRUE}.
#' \item \code{"average"}. Use the mean average of the coordinates of
#' the nearest neighbors from the original embedding in \code{model}.
#' Equivalent to \code{init_weighted = FALSE}.
#' \item A matrix of user-specified input coordinates, which must have
#' dimensions the same as \code{(nrow(X), ncol(model$embedding))}.
#' }
#' This parameter should be used in preference to \code{init_weighted}.
#' @param batch If \code{TRUE}, then embedding coordinates are updated at the
#' end of each epoch rather than during the epoch. In batch mode, results are
#' reproducible with a fixed random seed even with \code{n_sgd_threads > 1},
#' at the cost of a slightly higher memory use. You may also have to modify
#' \code{learning_rate} and increase \code{n_epochs}, so whether this provides
#' a speed increase over the single-threaded optimization is likely to be
#' dataset and hardware-dependent. If \code{NULL}, the transform will use the
#' value provided in the \code{model}, if available. Default: \code{FALSE}.
#' @param learning_rate Initial learning rate used in optimization of the
#' coordinates. This overrides the value associated with the \code{model}.
#' This should be left unspecified under most circumstances.
#' @param opt_args A list of optimizer parameters, used when
#' \code{batch = TRUE}. The default optimization method used is Adam (Kingma
#' and Ba, 2014).
#' \itemize{
#' \item \code{method} The optimization method to use. Either \code{"adam"}
#' or \code{"sgd"} (stochastic gradient descent). Default: \code{"adam"}.
#' \item \code{beta1} (Adam only). The weighting parameter for the
#' exponential moving average of the first moment estimator. Effectively the
#' momentum parameter. Should be a floating point value between 0 and 1.
#' Higher values can smooth oscillatory updates in poorly-conditioned
#' situations and may allow for a larger \code{learning_rate} to be
#' specified, but too high can cause divergence. Default: \code{0.5}.
#' \item \code{beta2} (Adam only). The weighting parameter for the
#' exponential moving average of the uncentered second moment estimator.
#' Should be a floating point value between 0 and 1. Controls the degree of
#' adaptivity in the step-size. Higher values put more weight on previous
#' time steps. Default: \code{0.9}.
#' \item \code{eps} (Adam only). Intended to be a small value to prevent
#' division by zero, but in practice can also affect convergence due to its
#' interaction with \code{beta2}. Higher values reduce the effect of the
#' step-size adaptivity and bring the behavior closer to stochastic gradient
#' descent with momentum. Typical values are between 1e-8 and 1e-3. Default:
#' \code{1e-7}.
#' \item \code{alpha} The initial learning rate. Default: the value of the
#' \code{learning_rate} parameter.
#' }
#' If \code{NULL}, the transform will use the value provided in the
#' \code{model}, if available.
#' @param epoch_callback A function which will be invoked at the end of every
#' epoch. Its signature should be:
#' \code{(epoch, n_epochs, coords, fixed_coords)}, where:
#' \itemize{
#' \item \code{epoch} The current epoch number (between \code{1} and
#' \code{n_epochs}).
#' \item \code{n_epochs} Number of epochs to use during the optimization of
#' the embedded coordinates.
#' \item \code{coords} The embedded coordinates as of the end of the current
#' epoch, as a matrix with dimensions (N, \code{n_components}).
#' \item \code{fixed_coords} The originally embedded coordinates from the
#' \code{model}. These are fixed and do not change. A matrix with dimensions
#' (Nmodel, \code{n_components}) where \code{Nmodel} is the number of
#' observations in the original data.
#' }
#' @param ret_extra A vector indicating what extra data to return. May contain
#' any combination of the following strings:
#' \itemize{
#' \item \code{"fgraph"} the high dimensional fuzzy graph (i.e. the fuzzy
#' simplicial set of the merged local views of the input data). The graph
#' is returned as a sparse matrix of class \link[Matrix]{dgCMatrix-class}
#' with dimensions \code{NX} x \code{Nmodel}, where \code{NX} is the number
#' of items in the data to transform in \code{X}, and \code{NModel} is
#' the number of items in the data used to build the UMAP \code{model}.
#' A non-zero entry (i, j) gives the membership strength of the edge
#' connecting the vertex representing the ith item in \code{X} to the
#' jth item in the data used to build the \code{model}. Note that the
#' graph is further sparsified by removing edges with sufficiently low
#' membership strength that they would not be sampled by the probabilistic
#' edge sampling employed for optimization and therefore the number of
#' non-zero elements in the matrix is dependent on \code{n_epochs}. If you
#' are only interested in the fuzzy input graph (e.g. for clustering),
#' setting \code{n_epochs = 0} will avoid any further sparsifying.
#' }
#' @param seed Integer seed to use to initialize the random number generator
#' state. Combined with \code{n_sgd_threads = 1} or \code{batch = TRUE}, this
#' should give consistent output across multiple runs on a given installation.
#' Setting this value is equivalent to calling \code{\link[base]{set.seed}},
#' but it may be more convenient in some situations than having to call a
#' separate function. The default is to not set a seed, in which case this
#' function uses the behavior specified by the supplied \code{model}: If the
#' model specifies a seed, then the model seed will be used to seed then
#' random number generator, and results will still be consistent (if
#' \code{n_sgd_threads = 1}). If you want to force the seed to not be set,
#' even if it is set in \code{model}, set \code{seed = FALSE}.
#' @return A matrix of coordinates for \code{X} transformed into the space
#' of the \code{model}, or if \code{ret_extra} is specified, a list
#' containing:
#' \itemize{
#' \item \code{embedding} the matrix of optimized coordinates.
#' \item if \code{ret_extra} contains \code{"fgraph"}, an item of the same
#' name containing the high-dimensional fuzzy graph as a sparse matrix, of
#' type \link[Matrix]{dgCMatrix-class}.
#' \item if \code{ret_extra} contains \code{"sigma"}, returns a vector of
#' the smooth knn distance normalization terms for each observation as
#' \code{"sigma"} and a vector \code{"rho"} containing the largest
#' distance to the locally connected neighbors of each observation.
#' \item if \code{ret_extra} contains \code{"localr"}, an item of the same
#' name containing a vector of the estimated local radii, the sum of
#' \code{"sigma"} and \code{"rho"}.
#' \item if \code{ret_extra} contains \code{"nn"}, an item of the same name
#' containing the nearest neighbors of each item in \code{X} (with respect
#' to the items that created the \code{model}).
#' }
#' @examples
#'
#' iris_train <- iris[1:100, ]
#' iris_test <- iris[101:150, ]
#'
#' # You must set ret_model = TRUE to return extra data needed
#' iris_train_umap <- umap(iris_train, ret_model = TRUE)
#' iris_test_umap <- umap_transform(iris_test, iris_train_umap)
#' @export
umap_transform <- function(X = NULL, model = NULL,
nn_method = NULL,
init_weighted = TRUE,
search_k = NULL,
tmpdir = tempdir(),
n_epochs = NULL,
n_threads = NULL,
n_sgd_threads = 0,
grain_size = 1,
verbose = FALSE,
init = "weighted",
batch = NULL,
learning_rate = NULL,
opt_args = NULL,
epoch_callback = NULL,
ret_extra = NULL,
seed = NULL
) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
if (is.null(nn_method)) {
if (is.null(X)) {
stop('argument "X" is missing, with no default')
}
if (is.null(model)) {
stop('argument "model" is missing, with no default')
}
if (!all_nn_indices_are_loaded(model)) {
stop("cannot use model: NN index is unloaded." ,
" Try reloading with `load_uwot`")
}
}
else {
if (!is.null(X)) {
tsmessage('argument "nn_method" is provided, ignoring argument "X"')
X <- NULL
}
}
if (is.null(n_epochs)) {
n_epochs <- model$n_epochs
if (is.null(n_epochs)) {
if (ncol(graph) <= 10000) {
n_epochs <- 100
}
else {
n_epochs <- 30
}
}
else {
n_epochs <- max(2, round(n_epochs / 3))
}
}
# Handle setting the random number seed internally:
# 1. If the user specifies seed = FALSE, definitely don't set the seed, even
# if the model has a seed.
# 2. If the user specifies seed = integer, then use that seed, even if the
# model has a seed.
# 3. If the user does not specify a seed, then use the model seed, if it
# exists. Otherwise don't set a seed. Also use this code path if the user
# sets seed = TRUE
if (is.logical(seed) && !seed) {
# do nothing
}
# handle the seed = TRUE case in this clause too
else if (is.logical(seed) || is.null(seed)) {
if (!is.null(model$seed)) {
tsmessage("Setting model random seed ", model$seed)
set.seed(model$seed)
}
# otherwise no model seed, so do nothing
}
else {
tsmessage("Setting random seed ", seed)
set.seed(seed)
}
if (is.null(search_k)) {
search_k <- model$search_k
}
nn_index <- model$nn_index
n_neighbors <- model$n_neighbors
local_connectivity <- model$local_connectivity
train_embedding <- model$embedding
if (!is.matrix(train_embedding)) {
# this should only happen if the user set
# `n_epochs = 0, init = NULL, ret_model = TRUE`
stop("Invalid embedding coordinates: should be a matrix, but got ",
paste0(class(train_embedding), collapse = " "))
}
if (any(is.na(train_embedding))) {
stop("Model embedding coordinates contains NA values")
}
n_train_vertices <- nrow(train_embedding)
ndim <- ncol(train_embedding)
row.names(train_embedding) <- NULL
# uwot model format should be changed so train embedding is stored transposed
train_embedding <- t(train_embedding)
method <- model$method
scale_info <- model$scale_info
metric <- model$metric
nblocks <- length(metric)
pca_models <- model$pca_models
if (method == "leopold") {
dens_scale <- model$dens_scale
aj <- model$ai
rad_coeff <- model$rad_coeff
}
if (is.null(batch)) {
if (!is.null(model$batch)) {
batch <- model$batch
}
else {
batch <- FALSE
}
}
if (is.null(opt_args)) {
if (!is.null(model$opt_args)) {
opt_args <- model$opt_args
}
else {
opt_args <- list()
}
}
a <- model$a
b <- model$b
gamma <- model$gamma
if (is.null(learning_rate)) {
alpha <- model$alpha
}
else {
alpha <- learning_rate
}
if (! is.numeric(alpha) || length(alpha) > 1 || alpha < 0) {
stop("learning rate should be a positive number, not ", alpha)
}
negative_sample_rate <- model$negative_sample_rate
approx_pow <- model$approx_pow
norig_col <- model$norig_col
pcg_rand <- model$pcg_rand
if (is.null(pcg_rand)) {
tsmessage("Using PCG for random number generation")
pcg_rand <- TRUE
}
num_precomputed_nns <- model$num_precomputed_nns
binary_edge_weights <- model$binary_edge_weights
if (is.null(binary_edge_weights)) {
binary_edge_weights <- FALSE
}
# the number of model vertices
n_vertices <- NULL
Xnames <- NULL
if (!is.null(X)) {
if (!(methods::is(X, "data.frame") || methods::is(X, "matrix"))) {
stop("Unknown input data format")
}
if (!is.null(norig_col) && ncol(X) != norig_col) {
stop("Incorrect dimensions: X must have ", norig_col, " columns")
}
if (methods::is(X, "data.frame")) {
indexes <- which(vapply(X, is.numeric, logical(1)))
if (length(indexes) == 0) {
stop("No numeric columns found")
}
X <- as.matrix(X[, indexes])
}
n_vertices <- nrow(X)
if (n_vertices < 1) {
stop("Not enough rows in X")
}
if (!is.null(row.names(X))) {
Xnames <- row.names(X)
}
checkna(X)
} else if (nn_is_precomputed(nn_method)) {
# https://github.com/jlmelville/uwot/issues/97
# In the case where the training model didn't use pre-computed neighbors
# we treat it like it had one block
if (num_precomputed_nns == 0) {
num_precomputed_nns <- 1
}
# store single nn graph as a one-item list
if (num_precomputed_nns == 1 && nn_is_single(nn_method)) {
nn_method <- list(nn_method)
}
if (length(nn_method) != num_precomputed_nns) {
stop("Wrong # pre-computed neighbor data blocks, expected: ",
num_precomputed_nns, " but got: ", length(nn_method))
}
if (length(n_neighbors) != num_precomputed_nns) {
stop("Wrong # n_neighbor values (one per neighbor block), expected: ",
num_precomputed_nns, " but got: ", length(n_neighbors))
}
for (i in 1:num_precomputed_nns) {
graph <- nn_method[[i]]
if (is.list(graph)) {
check_graph(graph, expected_rows = n_vertices,
expected_cols = n_neighbors[[i]], bipartite = TRUE)
if (is.null(n_vertices)) {
n_vertices <- nrow(graph$idx)
}
if (is.null(Xnames)) {
Xnames <- nn_graph_row_names(graph)
}
}
else if (is_sparse_matrix(graph)) {
# nn graph should have dims n_train_obs x n_test_obs
graph <- Matrix::drop0(graph)
if (is.null(n_vertices)) {
n_vertices <- ncol(graph)
}
if (is.null(Xnames)) {
Xnames <- colnames(graph)
}
}
else {
stop("Error: unknown neighbor graph format")
}
}
nblocks <- num_precomputed_nns
}
if (!is.null(init)) {
if (is.logical(init)) {
init_weighted <- init
}
else if (is.character(init)) {
init <- tolower(init)
if (init == "average") {
init_weighted <- FALSE
}
else if (init == "weighted") {
init_weighted <- TRUE
}
else {
stop("Unknown option for init: '", init, "'")
}
}
else if (is.matrix(init)) {
indim <- dim(init)
xdim <- c(n_vertices, ndim)
if (!all(indim == xdim)) {
stop("Initial embedding matrix has wrong dimensions, expected (",
xdim[1], ", ", xdim[2], "), but was (",
indim[1], ", ", indim[2], ")")
}
if (any(is.na(init))) {
stop("Initial embedding matrix coordinates contains NA values")
}
if (is.null(Xnames) && !is.null(row.names(init))) {
Xnames <- row.names(init)
}
init_weighted <- NULL
}
else {
stop("Invalid input format for 'init'")
}
}
if (is.null(n_vertices)) {
stop("Failed to read input correctly: invalid input format")
}
if (verbose) {
x_is_matrix <- methods::is(X, "matrix")
tsmessage("Read ", n_vertices, " rows", appendLF = !x_is_matrix)
if (x_is_matrix) {
tsmessage(" and found ", ncol(X), " numeric columns", time_stamp = FALSE)
}
}
if (!is.null(scale_info)) {
X <- apply_scaling(X, scale_info = scale_info, verbose = verbose)
}
adjusted_local_connectivity <- max(0, local_connectivity - 1.0)
graph <- NULL
embedding <- NULL
localr <- NULL
sigma <- NULL
rho <- NULL
export_nns <- NULL
ret_nn <- FALSE
if ("nn" %in% ret_extra) {
ret_nn <- TRUE
export_nns <- list()
}
need_sigma <- (method == "leopold" && nblocks == 1) || "sigma" %in% ret_extra
for (i in 1:nblocks) {
tsmessage("Processing block ", i, " of ", nblocks)
if (!is.null(X)) {
if (nblocks == 1) {
Xsub <- X
ann <- nn_index
}
else {
subset <- metric[[i]]
if (is.list(subset)) {
subset <- lsplit_unnamed(subset)$unnamed[[1]]
}
Xsub <- X[, subset, drop = FALSE]
ann <- nn_index[[i]]
}
if (!is.null(pca_models) && !is.null(pca_models[[as.character(i)]])) {
Xsub <- apply_pca(
X = Xsub, pca_res = pca_models[[as.character(i)]],
verbose = verbose
)
}
nn <- annoy_search(Xsub,
k = n_neighbors, ann = ann, search_k = search_k,
prep_data = TRUE,
tmpdir = tmpdir,
n_threads = n_threads, grain_size = grain_size,
verbose = verbose)
if (ret_nn) {
export_nns[[i]] <- nn
names(export_nns)[[i]] <- ann$metric
}
} else if (is.list(nn_method)) {
# otherwise we expect a list of NN graphs
nn <- nn_method[[i]]
if (ret_nn) {
export_nns[[i]] <- nn
names(export_nns)[[i]] <- "precomputed"
}
}
else {
stop("Can't transform new data if X is NULL ",
"and no sparse distance matrix available")
}
osparse <- NULL
if (is_sparse_matrix(nn)) {
nn <- Matrix::drop0(nn)
osparse <- order_sparse(nn)
nn_idxv <- osparse$i + 1
nn_distv <- osparse$x
nn_ptr <- osparse$p
n_nbrs <- diff(nn_ptr)
if (any(n_nbrs < 1)) {
stop("All observations need at least one neighbor")
}
target <- log2(n_nbrs)
skip_first <- TRUE
}
else {
nnt <- nn_graph_t(nn)
if (length(n_neighbors) == nblocks) {
# if model came from multiple different external neighbor data
n_nbrs <- n_neighbors[[i]]
}
else {
# multiple internal blocks
n_nbrs <- n_neighbors
}
if (is.na(n_nbrs) || n_nbrs != nrow(nnt$idx)) {
# original neighbor data was sparse, but we are using dense knn format
# or n_neighbors doesn't match
n_nbrs <- nrow(nnt$idx)
tsmessage("Possible mismatch with original vs new neighbor data ",
"format, using ", n_nbrs, " nearest neighbors")
}
target <- log2(n_nbrs)
nn_ptr <- n_nbrs
nn_distv <- as.vector(nnt$dist)
nn_idxv <- as.vector(nnt$idx)
skip_first <- TRUE
}
sknn_res <- smooth_knn(
nn_dist = nn_distv,
nn_ptr = nn_ptr,
skip_first = skip_first,
target = target,
local_connectivity = adjusted_local_connectivity,
n_threads = n_threads,
grain_size = grain_size,
verbose = verbose,
ret_sigma = TRUE
)
if (is.null(localr) && need_sigma) {
# because of the adjusted local connectivity rho is too small compared
# to that used to generate the "training" data but sigma is larger, so
# let's just stick with sigma + rho even though it tends to be an
# underestimate
sigma <- sknn_res$sigma
rho <- sknn_res$rho
localr <- sknn_res$sigma + sknn_res$rho
}
graph_blockv <- sknn_res$matrix
if (is_sparse_matrix(nn)) {
graph_block <- Matrix::sparseMatrix(j = osparse$i, p = osparse$p, x = graph_blockv,
dims = rev(osparse$dims), index1 = FALSE)
}
else {
graph_block <- nn_to_sparse(nn_idxv, n_vertices, graph_blockv,
self_nbr = FALSE,
max_nbr_id = n_train_vertices,
by_row = FALSE
)
}
if (is.logical(init_weighted)) {
embedding_block <-
init_new_embedding(
train_embedding = train_embedding,
nn_idx = nn_idxv,
n_test_vertices = n_vertices,
graph = graph_blockv,
weighted = init_weighted,
n_threads = n_threads,
grain_size = grain_size,
verbose = verbose
)
if (is.null(embedding)) {
embedding <- embedding_block
}
else {
embedding <- embedding + embedding_block
}
}
if (is.null(graph)) {
graph <- graph_block
}
else {
graph <- set_intersect(graph, graph_block, weight = 0.5,
reset_connectivity = FALSE)
}
}
if (is.logical(init_weighted)) {
if (nblocks > 1) {
embedding <- embedding / nblocks
}
}
else {
tsmessage("Initializing from user-supplied matrix")
embedding <- t(init)
}
if (binary_edge_weights) {
tsmessage("Using binary edge weights")
graph@x <- rep(1, length(graph@x))
}
if (n_epochs > 0) {
graph@x[graph@x < max(graph@x) / n_epochs] <- 0
graph <- Matrix::drop0(graph)
# Edges are (i->j) where i (head) is from the new data and j (tail) is
# in the model data
# Unlike embedding of initial data, the edge list is therefore NOT symmetric
# i.e. the presence of (i->j) does NOT mean (j->i) is also present because
# i and j now come from different data
if (batch) {
# This is the same arrangement as Python UMAP
graph <- Matrix::t(graph)
# ordered indices of the new data nodes. Coordinates are updated
# during optimization
positive_head <- Matrix::which(graph != 0, arr.ind = TRUE)[, 2] - 1
# unordered indices of the model nodes (some may not have any incoming
# edges), these coordinates will NOT update during the optimization
positive_tail <- graph@i
}
else {
# unordered indices of the new data nodes. Coordinates are updated
# during optimization
positive_head <- graph@i
# ordered indices of the model nodes (some may not have any incoming edges)
# these coordinates will NOT update during the optimization
positive_tail <- Matrix::which(graph != 0, arr.ind = TRUE)[, 2] - 1
}
n_head_vertices <- ncol(embedding)
n_tail_vertices <- n_train_vertices
# if batch = TRUE points into the head (length == n_tail_vertices)
# if batch = FALSE, points into the tail (length == n_head_vertices)
positive_ptr <- graph@p
epochs_per_sample <- make_epochs_per_sample(graph@x, n_epochs)
tsmessage(
"Commencing optimization for ", n_epochs, " epochs, with ",
length(positive_head), " positive edges",
pluralize("thread", n_sgd_threads, " using")
)
method <- tolower(method)
if (method == "leopold") {
# Use the linear model 2 log ai = -m log(localr) + c
ai <- exp(0.5 * ((-log(localr) * rad_coeff[2]) + rad_coeff[1]))
# Prevent too-small/large aj
min_ai <- min(sqrt(a * 10 ^ (-2 * dens_scale)), 0.1)
ai[ai < min_ai] <- min_ai
max_ai <- sqrt(a * 10 ^ (2 * dens_scale))
ai[ai > max_ai] <- max_ai
method <- "leopold2"
}
method_args <- switch(method,
umap = list(a = a, b = b, gamma = gamma, approx_pow = approx_pow),
leopold2 = list(ai = ai, aj = aj, b = b, ndim = ndim),
list()
)
full_opt_args <- get_opt_args(opt_args, alpha)
embedding <- optimize_layout_r(
head_embedding = embedding,
tail_embedding = train_embedding,
positive_head = positive_head,
positive_tail = positive_tail,
positive_ptr = positive_ptr,
n_epochs = n_epochs,
n_head_vertices = n_head_vertices,
n_tail_vertices = n_tail_vertices,
epochs_per_sample = epochs_per_sample,
method = tolower(method),
method_args = method_args,
initial_alpha = alpha / 4.0,
opt_args = full_opt_args,
negative_sample_rate = negative_sample_rate,
pcg_rand = pcg_rand,
batch = batch,
n_threads = n_sgd_threads,
grain_size = grain_size,
move_other = FALSE,
verbose = verbose,
epoch_callback = epoch_callback
)
}
embedding <- t(embedding)
tsmessage("Finished")
if (!is.null(Xnames)) {
row.names(embedding) <- Xnames
}
if (length(ret_extra) > 0) {
res <- list(embedding = embedding)
for (name in ret_extra) {
if (name == "fgraph") {
res$fgraph <- graph
}
if (name == "sigma") {
res$sigma <- sigma
res$rho <- rho
}
if (name == "localr" && !is.null(localr)) {
res$localr <- localr
}
if (ret_nn && !is.null(export_nns)) {
res$nn <- export_nns
}
}
}
else {
res <- embedding
}
res
}
init_new_embedding <-
function(train_embedding,
nn_idx,
n_test_vertices,
graph,
weighted = TRUE,
n_threads = NULL,
grain_size = 1,
verbose = FALSE) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
avtype <- ifelse(weighted, "weighted ", "")
tsmessage(
"Initializing by ",
avtype,
"average of neighbor coordinates",
pluralize("thread", n_threads, " using")
)
nn_weights <- NULL
if (weighted) {
nn_weights <- graph
}
init_transform_parallel(
train_embedding = train_embedding,
nn_index = nn_idx,
n_test_vertices = n_test_vertices,
nn_weights = nn_weights,
n_threads = n_threads,
grain_size = grain_size
)
}
# Pure R implementation of (weighted) average. Superceded by C++ implementations
init_transform <- function(train_embedding, nn_index, weights = NULL) {
nr <- nrow(nn_index)
nc <- ncol(train_embedding)
embedding <- matrix(nrow = nr, ncol = nc)
if (is.null(weights)) {
for (i in 1:nr) {
nbr_embedding <- train_embedding[nn_index[i, ], ]
embedding[i, ] <- apply(nbr_embedding, 2, mean)
}
}
else {
for (i in 1:nr) {
nbr_embedding <- train_embedding[nn_index[i, ], ]
nbr_weights <- weights[nn_index[i, ], i]
embedding[i, ] <- apply(
nbr_embedding, 2,
function(x) {
stats::weighted.mean(x, nbr_weights)
}
)
}
}
embedding
}
apply_scaling <- function(X, scale_info, verbose = FALSE) {
if (!is.null(scale_info[["scaled:range:min"]])) {
tsmessage("Applying training data range scaling")
X <- X - scale_info[["scaled:range:min"]]
X <- X / scale_info[["scaled:range:max"]]
}
else if (!is.null(scale_info[["scaled:maxabs"]])) {
tsmessage("Applying training data max-abs scaling")
X <- scale(X, center = scale_info[["scaled:center"]], scale = FALSE)
X <- X / scale_info[["scaled:maxabs"]]
}
else if (!is.null(scale_info[["scaled:colrange:min"]])) {
tsmessage("Applying training data column range scaling")
X <- sweep(X, 2, scale_info[["scaled:colrange:min"]])
X <- sweep(X, 2, scale_info[["scaled:colrange:max"]], `/`)
}
else {
tsmessage("Applying training data column filtering/scaling")
X <- X[, scale_info[["scaled:nzvcols"]]]
X <- scale(X,
center = scale_info[["scaled:center"]],
scale = scale_info[["scaled:scale"]]
)
}
X
}
# Apply a previously calculated set of PCA rotations
apply_pca <- function(X, pca_res, verbose = FALSE) {
tsmessage("Applying PCA reducing to ", ncol(X), " dimensions")
if (!is.null(pca_res$center)) {
X <- sweep(X, 2, pca_res$center)
}
X %*% pca_res$rotation
}
all_nn_indices_are_loaded <- function(model) {
if (is.null(model$nn_index)) {
stop("Invalid model: has no 'nn_index'")
}
if (is.list(model$nn_index)) {
for (i in 1:length(model$nn_index)) {
if (model$nn_index$getNTrees() == 0) {
return(FALSE)
}
}
}
else if (model$nn_index$getNTrees() == 0) {
return(FALSE)
}
TRUE
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/transform.R
|
stime <- function() {
format(Sys.time(), "%T")
}
# message with a time stamp
# appears only if called from an environment where a logical verbose = TRUE
# OR force = TRUE
tsmessage <- function(..., domain = NULL, appendLF = TRUE, force = FALSE,
time_stamp = TRUE) {
verbose <- get0("verbose", envir = sys.parent())
if (force || (!is.null(verbose) && verbose)) {
msg <- ""
if (time_stamp) {
msg <- paste0(stime(), " ")
}
message(msg, ..., domain = domain, appendLF = appendLF)
utils::flush.console()
}
}
# log vector information
summarize <- function(X, msg = "") {
summary_X <- summary(X, digits = max(3, getOption("digits") - 3))
tsmessage(msg, ": ", paste(names(summary_X), ":", summary_X, "|",
collapse = ""
),
force = get0("verbose", envir = sys.parent())
)
}
# pluralize("thread", 1) => "1 thread"
# pluralize("thread", 2) => "2 threads"
pluralize <- function(str, n, prefix = NULL, inc_num = TRUE) {
if (n == 0) {
return("")
}
ret <- paste0(str, ifelse(n != 1, "s", ""))
if (inc_num) {
ret <- paste0(n, " ", ret)
}
if (!is.null(prefix)) {
ret <- paste0(prefix, " ", ret)
}
ret
}
# convert data frame to matrix using numeric columns
x2m <- function(X) {
if (!methods::is(X, "matrix")) {
m <- as.matrix(X[, which(vapply(X, is.numeric, logical(1)))])
}
else {
m <- X
}
m
}
# given a metric argument, returns a list containing:
# metrics - the input list with any members called "categorical" removed
# categoricals - a vector of the categorical ids
find_categoricals <- function(metrics) {
res <- list(
metrics = metrics
)
if (is.list(metrics)) {
cat_pos <- grep("categorical", names(metrics))
if (length(cat_pos) > 0) {
cat_ids <- unlist(metrics[cat_pos])
names(cat_ids) <- NULL
res <- list(
metrics = metrics[-cat_pos],
categoricals = cat_ids
)
}
}
res
}
# Splits a list into its named and unnamed components:
# > lsplit_unnamed(list(1:10, pca_center = FALSE))
# $named
# $named$pca_center
# [1] FALSE
#
#
# $unnamed
# $unnamed[[1]]
# [1] 1 2 3 4 5 6 7 8 9 10
lsplit_unnamed <- function(l) {
lnames <- names(l)
if (is.null(lnames)) {
return(list(unnamed = l))
}
is_named <- lnames != ""
nids <- which(is_named)
uids <- which(!is_named)
if (length(uids) == 0) {
return(list(named = l[nids]))
}
list(
named = l[nids],
unnamed = l[uids]
)
}
# Do work and update a progress bar
progress_for <- function(n, nchunks, fun) {
message("0% 10 20 30 40 50 60 70 80 90 100%")
message("[----|----|----|----|----|----|----|----|----|----|")
remaining <- n
chunk_end <- 0
for (i in 1:nchunks) {
chunk_start <- chunk_end + 1
chunk_end <- chunk_start + round(remaining / (nchunks - i + 1)) - 1
remaining <- remaining - (chunk_end - chunk_start + 1)
fun(chunk_start, chunk_end)
message("*", appendLF = FALSE)
utils::flush.console()
}
message("|")
}
checkna <- function(X) {
if (!is.null(X) && any(is.na(X))) {
stop("Missing values found in 'X'")
}
}
check_graph <- function(graph, expected_rows = NULL, expected_cols = NULL,
bipartite = FALSE) {
idx <- graph$idx
dist <- graph$dist
if (!methods::is(idx, "matrix")) {
stop("neighbor graph must contain an 'idx' matrix")
}
if (!methods::is(dist, "matrix")) {
stop("neighbor graph must contain a 'dist' matrix")
}
if (!all(dim(idx) == dim(dist))) {
stop("'idx' and 'dist' matrices must have identical dimensions")
}
# graph may be our only source of input data, in which case no other source
# to validate from
if (!is.null(expected_rows)) {
if (nrow(idx) != expected_rows) {
stop("idx matrix has unexpected number of rows")
}
}
if (!is.null(expected_cols) && !is.na(expected_cols)) {
if (ncol(idx) != expected_cols) {
stop("idx matrix has unexpected number of columns")
}
}
# if looking at neighbors within one graph there can't be more neighbors
# than observations
if (!bipartite) {
if (ncol(idx) > nrow(idx)) {
stop("Invalid neighbors: number exceeds number of observations")
}
if (max(idx) > nrow(idx)) {
stop("Invalid neighbors: max index exceeds number of observations")
}
}
}
check_sparse_graph <- function(graph, expected_rows = NULL,
expected_cols = NULL, bipartite = FALSE) {
if (!is.null(expected_rows)) {
if (nrow(graph) != expected_rows) {
stop("Sparse distance matrix has unexpected number of rows")
}
}
if (!is.null(expected_cols)) {
if (ncol(graph) != expected_cols) {
stop("Sparse distance matrix has unexpected number of cols")
}
}
if (!bipartite) {
if (nrow(graph) != ncol(graph)) {
stop("Sparse distance matrix must have same number of rows and cols")
}
}
}
check_graph_list <- function(graph_list, expected_rows = NULL,
expected_cols = NULL, bipartite = FALSE) {
if (nn_is_single(graph_list)) {
graph_list <- list(graph_list)
}
num_nns <- length(graph_list)
if (num_nns == 0) {
stop("precalculated graph list is empty")
}
for (i in 1:num_nns) {
graph <- graph_list[[i]]
if (is.list(graph)) {
check_graph(graph, expected_rows, expected_cols, bipartite = bipartite)
}
else if (is_sparse_matrix(graph)) {
check_sparse_graph(graph, expected_rows, expected_cols,
bipartite = bipartite)
}
else {
stop("Unknown neighbor data format")
}
}
num_nns
}
nn_graph_row_names_list <- function(graph_list) {
if (nn_is_single(graph_list)) {
graph_list <- list(graph_list)
}
xnames <- NULL
for (i in 1:length(graph_list)) {
graph <- graph_list[[i]]
if (is.list(graph)) {
xnames <- nn_graph_row_names(graph)
}
else if (is_sparse_matrix(graph)) {
xnames <- row.names(graph)
}
else {
stop("Unknown neighbor data format")
}
if (!is.null(xnames)) {
break
}
}
xnames
}
# from a nn graph (or list) get the first non-NULL row names
nn_graph_row_names <- function(graph) {
xnames <- NULL
if (!is.null(row.names(graph$idx))) {
xnames <- row.names(graph$idx)
}
if (is.null(xnames) && !is.null(row.names(graph$dist))) {
xnames <- row.names(graph$dist)
}
xnames
}
nn_graph_nbrs_list <- function(graph_list) {
if (nn_is_single(graph_list)) {
graph_list <- list(graph_list)
}
sapply(graph_list, nn_graph_nbrs)
}
# from a nn graph (or list) get the number of neighbors
nn_graph_nbrs <- function(graph) {
if (is.list(graph)) {
ncol(graph$idx)
}
else if (is_sparse_matrix(graph)) {
NA
}
else {
stop("Unknown neighbor data format")
}
}
is_sparse_matrix <- function(m) {
methods::is(m, "sparseMatrix")
}
# Add the (named) values in l2 to l1.
# Use to override default values in l1 with user-supplied values in l2
lmerge <- function(l1, l2) {
for (name in names(l2)) {
l1[[name]] <- l2[[name]]
}
l1
}
range_scale <- function(x, min = 0, max = 1) {
(x - min(x)) / (max(x) - min(x)) * (max - min) + min
}
is_installed <- function(pkgname) {
requireNamespace(pkgname,
quietly = TRUE,
warn.conflicts = FALSE)
isNamespaceLoaded(pkgname)
}
is_win7 <- function() {
sys_info <- Sys.info()
sys_info[["sysname"]] == "Windows" &&
strsplit(sys_info["release"], split = " ")$release[[1]] == "7"
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/util.R
|
#' Dimensionality Reduction with UMAP
#'
#' Carry out dimensionality reduction of a dataset using the Uniform Manifold
#' Approximation and Projection (UMAP) method (McInnes & Healy, 2018). Some of
#' the following help text is lifted verbatim from the Python reference
#' implementation at \url{https://github.com/lmcinnes/umap}.
#'
#' @param X Input data. Can be a \code{\link{data.frame}}, \code{\link{matrix}},
#' \code{\link[stats]{dist}} object or \code{\link[Matrix]{sparseMatrix}}.
#' Matrix and data frames should contain one observation per row. Data frames
#' will have any non-numeric columns removed, although factor columns will be
#' used if explicitly included via \code{metric} (see the help for
#' \code{metric} for details). A sparse matrix is interpreted as a distance
#' matrix, and is assumed to be symmetric, so you can also pass in an
#' explicitly upper or lower triangular sparse matrix to save storage. There
#' must be at least \code{n_neighbors} non-zero distances for each row. Both
#' implicit and explicit zero entries are ignored. Set zero distances you want
#' to keep to an arbitrarily small non-zero value (e.g. \code{1e-10}).
#' \code{X} can also be \code{NULL} if pre-computed nearest neighbor data is
#' passed to \code{nn_method}, and \code{init} is not \code{"spca"} or
#' \code{"pca"}.
#' @param n_neighbors The size of local neighborhood (in terms of number of
#' neighboring sample points) used for manifold approximation. Larger values
#' result in more global views of the manifold, while smaller values result in
#' more local data being preserved. In general values should be in the range
#' \code{2} to \code{100}.
#' @param n_components The dimension of the space to embed into. This defaults
#' to \code{2} to provide easy visualization, but can reasonably be set to any
#' integer value in the range \code{2} to \code{100}.
#' @param metric Type of distance metric to use to find nearest neighbors. One
#' of:
#' \itemize{
#' \item \code{"euclidean"} (the default)
#' \item \code{"cosine"}
#' \item \code{"manhattan"}
#' \item \code{"hamming"}
#' \item \code{"correlation"} (a distance based on the Pearson correlation)
#' \item \code{"categorical"} (see below)
#' }
#' Only applies if \code{nn_method = "annoy"} (for \code{nn_method = "fnn"}, the
#' distance metric is always "euclidean").
#'
#' If \code{X} is a data frame or matrix, then multiple metrics can be
#' specified, by passing a list to this argument, where the name of each item in
#' the list is one of the metric names above. The value of each list item should
#' be a vector giving the names or integer ids of the columns to be included in
#' a calculation, e.g. \code{metric = list(euclidean = 1:4, manhattan = 5:10)}.
#'
#' Each metric calculation results in a separate fuzzy simplicial set, which are
#' intersected together to produce the final set. Metric names can be repeated.
#' Because non-numeric columns are removed from the data frame, it is safer to
#' use column names than integer ids.
#'
#' Factor columns can also be used by specifying the metric name
#' \code{"categorical"}. Factor columns are treated different from numeric
#' columns and although multiple factor columns can be specified in a vector,
#' each factor column specified is processed individually. If you specify
#' a non-factor column, it will be coerced to a factor.
#'
#' For a given data block, you may override the \code{pca} and \code{pca_center}
#' arguments for that block, by providing a list with one unnamed item
#' containing the column names or ids, and then any of the \code{pca} or
#' \code{pca_center} overrides as named items, e.g. \code{metric =
#' list(euclidean = 1:4, manhattan = list(5:10, pca_center = FALSE))}. This
#' exists to allow mixed binary and real-valued data to be included and to have
#' PCA applied to both, but with centering applied only to the real-valued data
#' (it is typical not to apply centering to binary data before PCA is applied).
#' @param n_epochs Number of epochs to use during the optimization of the
#' embedded coordinates. By default, this value is set to \code{500} for
#' datasets containing 10,000 vertices or less, and \code{200} otherwise.
#' If \code{n_epochs = 0}, then coordinates determined by \code{"init"} will
#' be returned.
#' @param scale Scaling to apply to \code{X} if it is a data frame or matrix:
#' \itemize{
#' \item{\code{"none"} or \code{FALSE} or \code{NULL}} No scaling.
#' \item{\code{"Z"} or \code{"scale"} or \code{TRUE}} Scale each column to
#' zero mean and variance 1.
#' \item{\code{"maxabs"}} Center each column to mean 0, then divide each
#' element by the maximum absolute value over the entire matrix.
#' \item{\code{"range"}} Range scale the entire matrix, so the smallest
#' element is 0 and the largest is 1.
#' \item{\code{"colrange"}} Scale each column in the range (0,1).
#' }
#' For UMAP, the default is \code{"none"}.
#' @param learning_rate Initial learning rate used in optimization of the
#' coordinates.
#' @param init Type of initialization for the coordinates. Options are:
#' \itemize{
#' \item \code{"spectral"} Spectral embedding using the normalized Laplacian
#' of the fuzzy 1-skeleton, with Gaussian noise added.
#' \item \code{"normlaplacian"}. Spectral embedding using the normalized
#' Laplacian of the fuzzy 1-skeleton, without noise.
#' \item \code{"random"}. Coordinates assigned using a uniform random
#' distribution between -10 and 10.
#' \item \code{"lvrandom"}. Coordinates assigned using a Gaussian
#' distribution with standard deviation 1e-4, as used in LargeVis
#' (Tang et al., 2016) and t-SNE.
#' \item \code{"laplacian"}. Spectral embedding using the Laplacian Eigenmap
#' (Belkin and Niyogi, 2002).
#' \item \code{"pca"}. The first two principal components from PCA of
#' \code{X} if \code{X} is a data frame, and from a 2-dimensional classical
#' MDS if \code{X} is of class \code{"dist"}.
#' \item \code{"spca"}. Like \code{"pca"}, but each dimension is then scaled
#' so the standard deviation is 1e-4, to give a distribution similar to that
#' used in t-SNE. This is an alias for \code{init = "pca", init_sdev =
#' 1e-4}.
#' \item \code{"agspectral"} An "approximate global" modification of
#' \code{"spectral"} which all edges in the graph to a value of 1, and then
#' sets a random number of edges (\code{negative_sample_rate} edges per
#' vertex) to 0.1, to approximate the effect of non-local affinities.
#' \item A matrix of initial coordinates.
#' }
#' For spectral initializations, (\code{"spectral"}, \code{"normlaplacian"},
#' \code{"laplacian"}, \code{"agspectral"}), if more than one connected
#' component is identified, no spectral initialization is attempted. Instead
#' a PCA-based initialization is attempted. If \code{verbose = TRUE} the
#' number of connected components are logged to the console. The existence of
#' multiple connected components implies that a global view of the data cannot
#' be attained with this initialization. Increasing the value of
#' \code{n_neighbors} may help.
#' @param init_sdev If non-\code{NULL}, scales each dimension of the initialized
#' coordinates (including any user-supplied matrix) to this standard
#' deviation. By default no scaling is carried out, except when \code{init =
#' "spca"}, in which case the value is \code{0.0001}. Scaling the input may
#' help if the unscaled versions result in initial coordinates with large
#' inter-point distances or outliers. This usually results in small gradients
#' during optimization and very little progress being made to the layout.
#' Shrinking the initial embedding by rescaling can help under these
#' circumstances. Scaling the result of \code{init = "pca"} is usually
#' recommended and \code{init = "spca"} as an alias for \code{init = "pca",
#' init_sdev = 1e-4} but for the spectral initializations the scaled versions
#' usually aren't necessary unless you are using a large value of
#' \code{n_neighbors} (e.g. \code{n_neighbors = 150} or higher). For
#' compatibility with recent versions of the Python UMAP package, if you are
#' using \code{init = "spectral"}, then you should also set
#' \code{init_sdev = "range"}, which will range scale each of the columns
#' containing the initial data between 0-10. This is not set by default to
#' maintain backwards compatibility with previous versions of uwot.
#' @param spread The effective scale of embedded points. In combination with
#' \code{min_dist}, this determines how clustered/clumped the embedded points
#' are.
#' @param min_dist The effective minimum distance between embedded points.
#' Smaller values will result in a more clustered/clumped embedding where
#' nearby points on the manifold are drawn closer together, while larger
#' values will result on a more even dispersal of points. The value should be
#' set relative to the \code{spread} value, which determines the scale at
#' which embedded points will be spread out.
#' @param set_op_mix_ratio Interpolate between (fuzzy) union and intersection as
#' the set operation used to combine local fuzzy simplicial sets to obtain a
#' global fuzzy simplicial sets. Both fuzzy set operations use the product
#' t-norm. The value of this parameter should be between \code{0.0} and
#' \code{1.0}; a value of \code{1.0} will use a pure fuzzy union, while
#' \code{0.0} will use a pure fuzzy intersection.
#' @param local_connectivity The local connectivity required -- i.e. the number
#' of nearest neighbors that should be assumed to be connected at a local
#' level. The higher this value the more connected the manifold becomes
#' locally. In practice this should be not more than the local intrinsic
#' dimension of the manifold.
#' @param bandwidth The effective bandwidth of the kernel if we view the
#' algorithm as similar to Laplacian Eigenmaps. Larger values induce more
#' connectivity and a more global view of the data, smaller values concentrate
#' more locally.
#' @param repulsion_strength Weighting applied to negative samples in low
#' dimensional embedding optimization. Values higher than one will result in
#' greater weight being given to negative samples.
#' @param negative_sample_rate The number of negative edge/1-simplex samples to
#' use per positive edge/1-simplex sample in optimizing the low dimensional
#' embedding.
#' @param a More specific parameters controlling the embedding. If \code{NULL}
#' these values are set automatically as determined by \code{min_dist} and
#' \code{spread}.
#' @param b More specific parameters controlling the embedding. If \code{NULL}
#' these values are set automatically as determined by \code{min_dist} and
#' \code{spread}.
#' @param nn_method Method for finding nearest neighbors. Options are:
#' \itemize{
#' \item \code{"fnn"}. Use exact nearest neighbors via the
#' \href{https://cran.r-project.org/package=FNN}{FNN} package.
#' \item \code{"annoy"} Use approximate nearest neighbors via the
#' \href{https://cran.r-project.org/package=RcppAnnoy}{RcppAnnoy} package.
#' }
#' By default, if \code{X} has less than 4,096 vertices, the exact nearest
#' neighbors are found. Otherwise, approximate nearest neighbors are used.
#' You may also pass pre-calculated nearest neighbor data to this argument. It
#' must be one of two formats, either a list consisting of two elements:
#' \itemize{
#' \item \code{"idx"}. A \code{n_vertices x n_neighbors} matrix
#' containing the integer indexes of the nearest neighbors in \code{X}. Each
#' vertex is considered to be its own nearest neighbor, i.e.
#' \code{idx[, 1] == 1:n_vertices}.
#' \item \code{"dist"}. A \code{n_vertices x n_neighbors} matrix
#' containing the distances of the nearest neighbors.
#' }
#' or a sparse distance matrix of type \code{dgCMatrix}, with dimensions
#' \code{n_vertices x n_vertices}. Distances should be arranged by column,
#' i.e. a non-zero entry in row \code{j} of the \code{i}th column indicates
#' that the \code{j}th observation in \code{X} is a nearest neighbor of the
#' \code{i}th observation with the distance given by the value of that
#' element.
#' The \code{n_neighbors} parameter is ignored when using precomputed
#' nearest neighbor data. If using the sparse distance matrix input, each
#' column can contain a different number of neighbors.
#' @param n_trees Number of trees to build when constructing the nearest
#' neighbor index. The more trees specified, the larger the index, but the
#' better the results. With \code{search_k}, determines the accuracy of the
#' Annoy nearest neighbor search. Only used if the \code{nn_method} is
#' \code{"annoy"}. Sensible values are between \code{10} to \code{100}.
#' @param search_k Number of nodes to search during the neighbor retrieval. The
#' larger k, the more the accurate results, but the longer the search takes.
#' With \code{n_trees}, determines the accuracy of the Annoy nearest neighbor
#' search. Only used if the \code{nn_method} is \code{"annoy"}.
#' @param approx_pow If \code{TRUE}, use an approximation to the power function
#' in the UMAP gradient, from
#' \url{https://martin.ankerl.com/2012/01/25/optimized-approximative-pow-in-c-and-cpp/}.
#' Ignored if \code{dens_scale} is non-\code{NULL}.
#' @param y Optional target data for supervised dimension reduction. Can be a
#' vector, matrix or data frame. Use the \code{target_metric} parameter to
#' specify the metrics to use, using the same syntax as \code{metric}. Usually
#' either a single numeric or factor column is used, but more complex formats
#' are possible. The following types are allowed:
#' \itemize{
#' \item Factor columns with the same length as \code{X}. \code{NA} is
#' allowed for any observation with an unknown level, in which case
#' UMAP operates as a form of semi-supervised learning. Each column is
#' treated separately.
#' \item Numeric data. \code{NA} is \emph{not} allowed in this case. Use the
#' parameter \code{target_n_neighbors} to set the number of neighbors used
#' with \code{y}. If unset, \code{n_neighbors} is used. Unlike factors,
#' numeric columns are grouped into one block unless \code{target_metric}
#' specifies otherwise. For example, if you wish columns \code{a} and
#' \code{b} to be treated separately, specify
#' \code{target_metric = list(euclidean = "a", euclidean = "b")}. Otherwise,
#' the data will be effectively treated as a matrix with two columns.
#' \item Nearest neighbor data, consisting of a list of two matrices,
#' \code{idx} and \code{dist}. These represent the precalculated nearest
#' neighbor indices and distances, respectively. This
#' is the same format as that expected for precalculated data in
#' \code{nn_method}. This format assumes that the underlying data was a
#' numeric vector. Any user-supplied value of the \code{target_n_neighbors}
#' parameter is ignored in this case, because the the number of columns in
#' the matrices is used for the value. Multiple nearest neighbor data using
#' different metrics can be supplied by passing a list of these lists.
#' }
#' Unlike \code{X}, all factor columns included in \code{y} are automatically
#' used.
#' @param target_n_neighbors Number of nearest neighbors to use to construct the
#' target simplicial set. Default value is \code{n_neighbors}. Applies only if
#' \code{y} is non-\code{NULL} and \code{numeric}.
#' @param target_metric The metric used to measure distance for \code{y} if
#' using supervised dimension reduction. Used only if \code{y} is numeric.
#' @param target_weight Weighting factor between data topology and target
#' topology. A value of 0.0 weights entirely on data, a value of 1.0 weights
#' entirely on target. The default of 0.5 balances the weighting equally
#' between data and target. Only applies if \code{y} is non-\code{NULL}.
#' @param pca If set to a positive integer value, reduce data to this number of
#' columns using PCA. Doesn't applied if the distance \code{metric} is
#' \code{"hamming"}, or the dimensions of the data is larger than the
#' number specified (i.e. number of rows and columns must be larger than the
#' value of this parameter). If you have > 100 columns in a data frame or
#' matrix, reducing the number of columns in this way may substantially
#' increase the performance of the nearest neighbor search at the cost of a
#' potential decrease in accuracy. In many t-SNE applications, a value of 50
#' is recommended, although there's no guarantee that this is appropriate for
#' all settings.
#' @param pca_center If \code{TRUE}, center the columns of \code{X} before
#' carrying out PCA. For binary data, it's recommended to set this to
#' \code{FALSE}.
#' @param pca_method Method to carry out any PCA dimensionality reduction when
#' the \code{pca} parameter is specified. Allowed values are:
#' \itemize{
#' \item{\code{"irlba"}}. Uses \code{\link[irlba]{prcomp_irlba}} from the
#' \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' \item{\code{"rsvd"}}. Uses 5 iterations of \code{\link[irlba]{svdr}} from
#' the \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' This is likely to give much faster but potentially less accurate results
#' than using \code{"irlba"}. For the purposes of nearest neighbor
#' calculation and coordinates initialization, any loss of accuracy doesn't
#' seem to matter much.
#' \item{\code{"bigstatsr"}}. Uses \code{\link[bigstatsr]{big_randomSVD}}
#' from the \href{https://cran.r-project.org/package=bigstatsr}{bigstatsr}
#' package. The SVD methods used in \code{bigstatsr} may be faster on
#' systems without access to efficient linear algebra libraries (e.g.
#' Windows). \strong{Note}: \code{bigstatsr} is \emph{not} a dependency of
#' uwot: if you choose to use this package for PCA, you \emph{must} install
#' it yourself.
#' \item{\code{"svd"}}. Uses \code{\link[base]{svd}} for the SVD. This is
#' likely to be slow for all but the smallest datasets.
#' \item{\code{"auto"}} (the default). Uses \code{"irlba"}, unless more than
#' 50% of the full set of singular vectors would be calculated, in which
#' case \code{"svd"} is used.
#' }
#' @param pcg_rand If \code{TRUE}, use the PCG random number generator (O'Neill,
#' 2014) during optimization. Otherwise, use the faster (but probably less
#' statistically good) Tausworthe "taus88" generator. The default is
#' \code{TRUE}.
#' @param fast_sgd If \code{TRUE}, then the following combination of parameters
#' is set: \code{pcg_rand = TRUE}, \code{n_sgd_threads = "auto"} and
#' \code{approx_pow = TRUE}. The default is \code{FALSE}. Setting this to
#' \code{TRUE} will speed up the stochastic optimization phase, but give a
#' potentially less accurate embedding, and which will not be exactly
#' reproducible even with a fixed seed. For visualization, \code{fast_sgd =
#' TRUE} will give perfectly good results. For more generic dimensionality
#' reduction, it's safer to leave \code{fast_sgd = FALSE}. If \code{fast_sgd =
#' TRUE}, then user-supplied values of \code{pcg_rand}, \code{n_sgd_threads},
#' and \code{approx_pow} are ignored.
#' @param batch If \code{TRUE}, then embedding coordinates are updated at the
#' end of each epoch rather than during the epoch. In batch mode, results are
#' reproducible with a fixed random seed even with \code{n_sgd_threads > 1},
#' at the cost of a slightly higher memory use. You may also have to modify
#' \code{learning_rate} and increase \code{n_epochs}, so whether this provides
#' a speed increase over the single-threaded optimization is likely to be
#' dataset and hardware-dependent.
#' @param ret_model If \code{TRUE}, then return extra data that can be used to
#' add new data to an existing embedding via \code{\link{umap_transform}}. The
#' embedded coordinates are returned as the list item \code{embedding}. If
#' \code{FALSE}, just return the coordinates. This parameter can be used in
#' conjunction with \code{ret_nn} and \code{ret_extra}. Note that some
#' settings are incompatible with the production of a UMAP model: external
#' neighbor data (passed via a list to \code{nn_method}), and factor columns
#' that were included via the \code{metric} parameter. In the latter case, the
#' model produced is based only on the numeric data. A transformation using
#' new data is possible, but the factor columns in the new data are ignored.
#' Note that setting \code{ret_model = TRUE} forces the use of the approximate
#' nearest neighbors method. Because small datasets would otherwise use exact
#' nearest neighbor calculations, setting \code{ret_model = TRUE} means that
#' different results may be returned for small datasets in terms of both the
#' returned nearest neighbors (if requested) and the final embedded
#' coordinates, compared to \code{ret_model = FALSE}, even if the random
#' number seed is fixed. To avoid this, explicitly set
#' \code{nn_method = "annoy"} in the \code{ret_model = FALSE} case.
#' @param ret_nn If \code{TRUE}, then in addition to the embedding, also return
#' nearest neighbor data that can be used as input to \code{nn_method} to
#' avoid the overhead of repeatedly calculating the nearest neighbors when
#' manipulating unrelated parameters (e.g. \code{min_dist}, \code{n_epochs},
#' \code{init}). See the "Value" section for the names of the list items. If
#' \code{FALSE}, just return the coordinates. Note that the nearest neighbors
#' could be sensitive to data scaling, so be wary of reusing nearest neighbor
#' data if modifying the \code{scale} parameter. This parameter can be used in
#' conjunction with \code{ret_model} and \code{ret_extra}.
#' @param ret_extra A vector indicating what extra data to return. May contain
#' any combination of the following strings:
#' \itemize{
#' \item \code{"model"} Same as setting \code{ret_model = TRUE}.
#' \item \code{"nn"} Same as setting \code{ret_nn = TRUE}.
#' \item \code{"fgraph"} the high dimensional fuzzy graph (i.e. the fuzzy
#' simplicial set of the merged local views of the input data). The graph
#' is returned as a sparse symmetric N x N matrix of class
#' \link[Matrix]{dgCMatrix-class}, where a non-zero entry (i, j) gives the
#' membership strength of the edge connecting vertex i and vertex j. This
#' can be considered analogous to the input probability (or similarity or
#' affinity) used in t-SNE and LargeVis. Note that the graph is further
#' sparsified by removing edges with sufficiently low membership strength
#' that they would not be sampled by the probabilistic edge sampling
#' employed for optimization and therefore the number of non-zero elements
#' in the matrix is dependent on \code{n_epochs}. If you are only
#' interested in the fuzzy input graph (e.g. for clustering), setting
#' \code{n_epochs = 0} will avoid any further sparsifying.
#' Be aware that setting `binary_edge_weights = TRUE` will affect this
#' graph (all non-zero edge weights will be 1).
#' \item \code{"sigma"} the normalization value for each observation in the
#' dataset when constructing the smoothed distances to each of its
#' neighbors. This gives some sense of the local density of each
#' observation in the high dimensional space: higher values of
#' \code{sigma} indicate a higher dispersion or lower density.
#' }
#' @param n_threads Number of threads to use (except during stochastic gradient
#' descent). Default is half the number of concurrent threads supported by the
#' system. For nearest neighbor search, only applies if
#' \code{nn_method = "annoy"}. If \code{n_threads > 1}, then the Annoy index
#' will be temporarily written to disk in the location determined by
#' \code{\link[base]{tempfile}}.
#' @param n_sgd_threads Number of threads to use during stochastic gradient
#' descent. If set to > 1, then be aware that if \code{batch = FALSE}, results
#' will \emph{not} be reproducible, even if \code{set.seed} is called with a
#' fixed seed before running. Set to \code{"auto"} to use the same value as
#' \code{n_threads}.
#' @param grain_size The minimum amount of work to do on each thread. If this
#' value is set high enough, then less than \code{n_threads} or
#' \code{n_sgd_threads} will be used for processing, which might give a
#' performance improvement if the overhead of thread management and context
#' switching was outweighing the improvement due to concurrent processing.
#' This should be left at default (\code{1}) and work will be spread evenly
#' over all the threads specified.
#' @param tmpdir Temporary directory to store nearest neighbor indexes during
#' nearest neighbor search. Default is \code{\link{tempdir}}. The index is
#' only written to disk if \code{n_threads > 1} and
#' \code{nn_method = "annoy"}; otherwise, this parameter is ignored.
#' @param verbose If \code{TRUE}, log details to the console.
#' @param opt_args A list of optimizer parameters, used when
#' \code{batch = TRUE}. The default optimization method used is Adam (Kingma
#' and Ba, 2014).
#' \itemize{
#' \item \code{method} The optimization method to use. Either \code{"adam"}
#' or \code{"sgd"} (stochastic gradient descent). Default: \code{"adam"}.
#' \item \code{beta1} (Adam only). The weighting parameter for the
#' exponential moving average of the first moment estimator. Effectively the
#' momentum parameter. Should be a floating point value between 0 and 1.
#' Higher values can smooth oscillatory updates in poorly-conditioned
#' situations and may allow for a larger \code{learning_rate} to be
#' specified, but too high can cause divergence. Default: \code{0.5}.
#' \item \code{beta2} (Adam only). The weighting parameter for the
#' exponential moving average of the uncentered second moment estimator.
#' Should be a floating point value between 0 and 1. Controls the degree of
#' adaptivity in the step-size. Higher values put more weight on previous
#' time steps. Default: \code{0.9}.
#' \item \code{eps} (Adam only). Intended to be a small value to prevent
#' division by zero, but in practice can also affect convergence due to its
#' interaction with \code{beta2}. Higher values reduce the effect of the
#' step-size adaptivity and bring the behavior closer to stochastic gradient
#' descent with momentum. Typical values are between 1e-8 and 1e-3. Default:
#' \code{1e-7}.
#' \item \code{alpha} The initial learning rate. Default: the value of the
#' \code{learning_rate} parameter.
#' }
#' @param epoch_callback A function which will be invoked at the end of every
#' epoch. Its signature should be: \code{(epoch, n_epochs, coords)}, where:
#' \itemize{
#' \item \code{epoch} The current epoch number (between \code{1} and
#' \code{n_epochs}).
#' \item \code{n_epochs} Number of epochs to use during the optimization of
#' the embedded coordinates.
#' \item \code{coords} The embedded coordinates as of the end of the current
#' epoch, as a matrix with dimensions (N, \code{n_components}).
#' }
#' @param binary_edge_weights If \code{TRUE} then edge weights in the input
#' graph are treated as binary (0/1) rather than real valued. This affects the
#' sampling frequency of neighbors and is the strategy used by the PaCMAP
#' method (Wang and co-workers, 2020). Practical (Bรถhm and co-workers, 2020)
#' and theoretical (Damrich and Hamprecht, 2021) work suggests this has little
#' effect on UMAP's performance.
#' @param dens_scale A value between 0 and 1. If > 0 then the output attempts
#' to preserve relative local density around each observation. This uses an
#' approximation to the densMAP method (Narayan and co-workers, 2021). The
#' larger the value of \code{dens_scale}, the greater the range of output
#' densities that will be used to map the input densities. This option is
#' ignored if using multiple \code{metric} blocks.
#' @param seed Integer seed to use to initialize the random number generator
#' state. Combined with \code{n_sgd_threads = 1} or \code{batch = TRUE}, this
#' should give consistent output across multiple runs on a given installation.
#' Setting this value is equivalent to calling \code{\link[base]{set.seed}},
#' but it may be more convenient in some situations than having to call a
#' separate function. The default is to not set a seed. If
#' \code{ret_model = TRUE}, the seed will be stored in the output model and
#' then used to set the seed inside \code{\link{umap_transform}}.
#' @return A matrix of optimized coordinates, or:
#' \itemize{
#' \item if \code{ret_model = TRUE} (or \code{ret_extra} contains
#' \code{"model"}), returns a list containing extra information that can be
#' used to add new data to an existing embedding via
#' \code{\link{umap_transform}}. In this case, the coordinates are available
#' in the list item \code{embedding}. \bold{NOTE}: The contents of
#' the \code{model} list should \emph{not} be considered stable or part of
#' the public API, and are purposely left undocumented.
#' \item if \code{ret_nn = TRUE} (or \code{ret_extra} contains \code{"nn"}),
#' returns the nearest neighbor data as a list called \code{nn}. This
#' contains one list for each \code{metric} calculated, itself containing a
#' matrix \code{idx} with the integer ids of the neighbors; and a matrix
#' \code{dist} with the distances. The \code{nn} list (or a sub-list) can be
#' used as input to the \code{nn_method} parameter.
#' \item if \code{ret_extra} contains \code{"fgraph"}, returns the high
#' dimensional fuzzy graph as a sparse matrix called \code{fgraph}, of type
#' \link[Matrix]{dgCMatrix-class}.
#' \item if \code{ret_extra} contains \code{"sigma"}, returns a vector of the
#' smooth knn distance normalization terms for each observation as
#' \code{"sigma"} and a vector \code{"rho"} containing the largest
#' distance to the locally connected neighbors of each observation.
#' \item if \code{ret_extra} contains \code{"localr"}, returns a vector of
#' the estimated local radii, the sum of \code{"sigma"} and \code{"rho"}.
#' }
#' The returned list contains the combined data from any combination of
#' specifying \code{ret_model}, \code{ret_nn} and \code{ret_extra}.
#' @examples
#'
#' iris30 <- iris[c(1:10, 51:60, 101:110), ]
#'
#' # Non-numeric columns are automatically removed so you can pass data frames
#' # directly in a lot of cases without pre-processing
#' iris_umap <- umap(iris30, n_neighbors = 5, learning_rate = 0.5, init = "random", n_epochs = 20)
#'
#' # Faster approximation to the gradient and return nearest neighbors
#' iris_umap <- umap(iris30, n_neighbors = 5, approx_pow = TRUE, ret_nn = TRUE, n_epochs = 20)
#'
#' # Can specify min_dist and spread parameters to control separation and size
#' # of clusters and reuse nearest neighbors for efficiency
#' nn <- iris_umap$nn
#' iris_umap <- umap(iris30, n_neighbors = 5, min_dist = 1, spread = 5, nn_method = nn, n_epochs = 20)
#'
#' # Supervised dimension reduction using the 'Species' factor column
#' iris_sumap <- umap(iris30, n_neighbors = 5, min_dist = 0.001, y = iris30$Species,
#' target_weight = 0.5, n_epochs = 20)
#'
#' # Calculate Petal and Sepal neighbors separately (uses intersection of the resulting sets):
#' iris_umap <- umap(iris30, metric = list(
#' "euclidean" = c("Sepal.Length", "Sepal.Width"),
#' "euclidean" = c("Petal.Length", "Petal.Width")
#' ))
#'
#' @references
#' Belkin, M., & Niyogi, P. (2002).
#' Laplacian eigenmaps and spectral techniques for embedding and clustering.
#' In \emph{Advances in neural information processing systems}
#' (pp. 585-591).
#' \url{http://papers.nips.cc/paper/1961-laplacian-eigenmaps-and-spectral-techniques-for-embedding-and-clustering.pdf}
#'
#' Bรถhm, J. N., Berens, P., & Kobak, D. (2020).
#' A unifying perspective on neighbor embeddings along the attraction-repulsion spectrum.
#' \emph{arXiv preprint} \emph{arXiv:2007.08902}.
#' \url{https://arxiv.org/abs/2007.08902}
#'
#' Damrich, S., & Hamprecht, F. A. (2021).
#' On UMAP's true loss function.
#' \emph{Advances in Neural Information Processing Systems}, \emph{34}.
#' \url{https://proceedings.neurips.cc/paper/2021/hash/2de5d16682c3c35007e4e92982f1a2ba-Abstract.html}
#'
#' Kingma, D. P., & Ba, J. (2014).
#' Adam: A method for stochastic optimization.
#' \emph{arXiv preprint} \emph{arXiv}:1412.6980.
#' \url{https://arxiv.org/abs/1412.6980}
#'
#' McInnes, L., & Healy, J. (2018).
#' UMAP: Uniform Manifold Approximation and Projection for Dimension Reduction
#' \emph{arXiv preprint} \emph{arXiv}:1802.03426.
#' \url{https://arxiv.org/abs/1802.03426}
#'
#' Narayan, A., Berger, B., & Cho, H. (2021).
#' Assessing single-cell transcriptomic variability through density-preserving data visualization.
#' \emph{Nature biotechnology}, \emph{39}(6), 765-774.
#' \doi{10.1038/s41587-020-00801-7}
#'
#' OโNeill, M. E. (2014).
#' \emph{PCG: A family of simple fast space-efficient statistically good
#' algorithms for random number generation}
#' (Report No. HMC-CS-2014-0905). Harvey Mudd College.
#'
#' Tang, J., Liu, J., Zhang, M., & Mei, Q. (2016, April).
#' Visualizing large-scale and high-dimensional data.
#' In \emph{Proceedings of the 25th International Conference on World Wide Web}
#' (pp. 287-297).
#' International World Wide Web Conferences Steering Committee.
#' \url{https://arxiv.org/abs/1602.00370}
#'
#' Van der Maaten, L., & Hinton, G. (2008).
#' Visualizing data using t-SNE.
#' \emph{Journal of Machine Learning Research}, \emph{9} (2579-2605).
#' \url{https://www.jmlr.org/papers/v9/vandermaaten08a.html}
#'
#' Wang, Y., Huang, H., Rudin, C., & Shaposhnik, Y. (2021).
#' Understanding How Dimension Reduction Tools Work: An Empirical Approach to Deciphering t-SNE, UMAP, TriMap, and PaCMAP for Data Visualization.
#' \emph{Journal of Machine Learning Research}, \emph{22}(201), 1-73.
#' \url{https://www.jmlr.org/papers/v22/20-1061.html}
#'
#' @export
umap <- function(X, n_neighbors = 15, n_components = 2, metric = "euclidean",
n_epochs = NULL, learning_rate = 1, scale = FALSE,
init = "spectral", init_sdev = NULL,
spread = 1, min_dist = 0.01,
set_op_mix_ratio = 1.0, local_connectivity = 1.0,
bandwidth = 1.0, repulsion_strength = 1.0,
negative_sample_rate = 5.0, a = NULL, b = NULL,
nn_method = NULL, n_trees = 50,
search_k = 2 * n_neighbors * n_trees,
approx_pow = FALSE,
y = NULL, target_n_neighbors = n_neighbors,
target_metric = "euclidean",
target_weight = 0.5,
pca = NULL, pca_center = TRUE,
pcg_rand = TRUE,
fast_sgd = FALSE,
ret_model = FALSE, ret_nn = FALSE, ret_extra = c(),
n_threads = NULL,
n_sgd_threads = 0,
grain_size = 1,
tmpdir = tempdir(),
verbose = getOption("verbose", TRUE),
batch = FALSE,
opt_args = NULL, epoch_callback = NULL, pca_method = NULL,
binary_edge_weights = FALSE,
dens_scale = NULL,
seed = NULL) {
uwot(
X = X, n_neighbors = n_neighbors, n_components = n_components,
metric = metric, n_epochs = n_epochs, alpha = learning_rate, scale = scale,
init = init, init_sdev = init_sdev,
spread = spread, min_dist = min_dist,
set_op_mix_ratio = set_op_mix_ratio,
local_connectivity = local_connectivity, bandwidth = bandwidth,
gamma = repulsion_strength, negative_sample_rate = negative_sample_rate,
a = a, b = b, nn_method = nn_method, n_trees = n_trees,
search_k = search_k,
method = "umap", approx_pow = approx_pow,
n_threads = n_threads, n_sgd_threads = n_sgd_threads,
grain_size = grain_size,
y = y, target_n_neighbors = target_n_neighbors,
target_weight = target_weight, target_metric = target_metric,
pca = pca, pca_center = pca_center, pca_method = pca_method,
pcg_rand = pcg_rand,
fast_sgd = fast_sgd,
ret_model = ret_model || "model" %in% ret_extra,
ret_nn = ret_nn || "nn" %in% ret_extra,
ret_fgraph = "fgraph" %in% ret_extra,
ret_sigma = "sigma" %in% ret_extra,
ret_localr = "localr" %in% ret_extra,
batch = batch,
opt_args = opt_args,
epoch_callback = epoch_callback,
binary_edge_weights = binary_edge_weights,
tmpdir = tempdir(),
verbose = verbose,
dens_scale = dens_scale,
seed = seed
)
}
#' Dimensionality Reduction Using t-Distributed UMAP (t-UMAP)
#'
#' A faster (but less flexible) version of the UMAP gradient. For more detail on
#' UMAP, see the \code{\link{umap}} function.
#'
#' By setting the UMAP curve parameters \code{a} and \code{b} to \code{1}, you
#' get back the Cauchy distribution as used in t-SNE and LargeVis. It also
#' results in a substantially simplified gradient expression. This can give
#' a speed improvement of around 50\%.
#'
#' @param X Input data. Can be a \code{\link{data.frame}}, \code{\link{matrix}},
#' \code{\link[stats]{dist}} object or \code{\link[Matrix]{sparseMatrix}}.
#' Matrix and data frames should contain one observation per row. Data frames
#' will have any non-numeric columns removed, although factor columns will be
#' used if explicitly included via \code{metric} (see the help for
#' \code{metric} for details). A sparse matrix is interpreted as a distance
#' matrix, and is assumed to be symmetric, so you can also pass in an
#' explicitly upper or lower triangular sparse matrix to save storage. There
#' must be at least \code{n_neighbors} non-zero distances for each row. Both
#' implicit and explicit zero entries are ignored. Set zero distances you want
#' to keep to an arbitrarily small non-zero value (e.g. \code{1e-10}).
#' \code{X} can also be \code{NULL} if pre-computed nearest neighbor data is
#' passed to \code{nn_method}, and \code{init} is not \code{"spca"} or
#' \code{"pca"}.
#' @param n_neighbors The size of local neighborhood (in terms of number of
#' neighboring sample points) used for manifold approximation. Larger values
#' result in more global views of the manifold, while smaller values result in
#' more local data being preserved. In general values should be in the range
#' \code{2} to \code{100}.
#' @param n_components The dimension of the space to embed into. This defaults
#' to \code{2} to provide easy visualization, but can reasonably be set to any
#' integer value in the range \code{2} to \code{100}.
#' @param metric Type of distance metric to use to find nearest neighbors. One
#' of:
#' \itemize{
#' \item \code{"euclidean"} (the default)
#' \item \code{"cosine"}
#' \item \code{"manhattan"}
#' \item \code{"hamming"}
#' \item \code{"correlation"} (a distance based on the Pearson correlation)
#' \item \code{"categorical"} (see below)
#' }
#' Only applies if \code{nn_method = "annoy"} (for \code{nn_method = "fnn"}, the
#' distance metric is always "euclidean").
#'
#' If \code{X} is a data frame or matrix, then multiple metrics can be
#' specified, by passing a list to this argument, where the name of each item in
#' the list is one of the metric names above. The value of each list item should
#' be a vector giving the names or integer ids of the columns to be included in
#' a calculation, e.g. \code{metric = list(euclidean = 1:4, manhattan = 5:10)}.
#'
#' Each metric calculation results in a separate fuzzy simplicial set, which are
#' intersected together to produce the final set. Metric names can be repeated.
#' Because non-numeric columns are removed from the data frame, it is safer to
#' use column names than integer ids.
#'
#' Factor columns can also be used by specifying the metric name
#' \code{"categorical"}. Factor columns are treated different from numeric
#' columns and although multiple factor columns can be specified in a vector,
#' each factor column specified is processed individually. If you specify
#' a non-factor column, it will be coerced to a factor.
#'
#' For a given data block, you may override the \code{pca} and \code{pca_center}
#' arguments for that block, by providing a list with one unnamed item
#' containing the column names or ids, and then any of the \code{pca} or
#' \code{pca_center} overrides as named items, e.g. \code{metric =
#' list(euclidean = 1:4, manhattan = list(5:10, pca_center = FALSE))}. This
#' exists to allow mixed binary and real-valued data to be included and to have
#' PCA applied to both, but with centering applied only to the real-valued data
#' (it is typical not to apply centering to binary data before PCA is applied).
#' @param n_epochs Number of epochs to use during the optimization of the
#' embedded coordinates. By default, this value is set to \code{500} for
#' datasets containing 10,000 vertices or less, and \code{200} otherwise.
#' If \code{n_epochs = 0}, then coordinates determined by \code{"init"} will
#' be returned.
#' @param learning_rate Initial learning rate used in optimization of the
#' coordinates.
#' @param scale Scaling to apply to \code{X} if it is a data frame or matrix:
#' \itemize{
#' \item{\code{"none"} or \code{FALSE} or \code{NULL}} No scaling.
#' \item{\code{"Z"} or \code{"scale"} or \code{TRUE}} Scale each column to
#' zero mean and variance 1.
#' \item{\code{"maxabs"}} Center each column to mean 0, then divide each
#' element by the maximum absolute value over the entire matrix.
#' \item{\code{"range"}} Range scale the entire matrix, so the smallest
#' element is 0 and the largest is 1.
#' \item{\code{"colrange"}} Scale each column in the range (0,1).
#' }
#' For t-UMAP, the default is \code{"none"}.
#' @param init Type of initialization for the coordinates. Options are:
#' \itemize{
#' \item \code{"spectral"} Spectral embedding using the normalized Laplacian
#' of the fuzzy 1-skeleton, with Gaussian noise added.
#' \item \code{"normlaplacian"}. Spectral embedding using the normalized
#' Laplacian of the fuzzy 1-skeleton, without noise.
#' \item \code{"random"}. Coordinates assigned using a uniform random
#' distribution between -10 and 10.
#' \item \code{"lvrandom"}. Coordinates assigned using a Gaussian
#' distribution with standard deviation 1e-4, as used in LargeVis
#' (Tang et al., 2016) and t-SNE.
#' \item \code{"laplacian"}. Spectral embedding using the Laplacian Eigenmap
#' (Belkin and Niyogi, 2002).
#' \item \code{"pca"}. The first two principal components from PCA of
#' \code{X} if \code{X} is a data frame, and from a 2-dimensional classical
#' MDS if \code{X} is of class \code{"dist"}.
#' \item \code{"spca"}. Like \code{"pca"}, but each dimension is then scaled
#' so the standard deviation is 1e-4, to give a distribution similar to that
#' used in t-SNE. This is an alias for \code{init = "pca", init_sdev =
#' 1e-4}.
#' \item \code{"agspectral"} An "approximate global" modification of
#' \code{"spectral"} which all edges in the graph to a value of 1, and then
#' sets a random number of edges (\code{negative_sample_rate} edges per
#' vertex) to 0.1, to approximate the effect of non-local affinities.
#' \item A matrix of initial coordinates.
#' }
#' For spectral initializations, (\code{"spectral"}, \code{"normlaplacian"},
#' \code{"laplacian"}, \code{"agspectral"}), if more than one connected
#' component is identified, no spectral initialization is attempted. Instead
#' a PCA-based initialization is attempted. If \code{verbose = TRUE} the
#' number of connected components are logged to the console. The existence of
#' multiple connected components implies that a global view of the data cannot
#' be attained with this initialization. Increasing the value of
#' \code{n_neighbors} may help.
#' @param init_sdev If non-\code{NULL}, scales each dimension of the initialized
#' coordinates (including any user-supplied matrix) to this standard
#' deviation. By default no scaling is carried out, except when \code{init =
#' "spca"}, in which case the value is \code{0.0001}. Scaling the input may
#' help if the unscaled versions result in initial coordinates with large
#' inter-point distances or outliers. This usually results in small gradients
#' during optimization and very little progress being made to the layout.
#' Shrinking the initial embedding by rescaling can help under these
#' circumstances. Scaling the result of \code{init = "pca"} is usually
#' recommended and \code{init = "spca"} as an alias for \code{init = "pca",
#' init_sdev = 1e-4} but for the spectral initializations the scaled versions
#' usually aren't necessary unless you are using a large value of
#' \code{n_neighbors} (e.g. \code{n_neighbors = 150} or higher). For
#' compatibility with recent versions of the Python UMAP package, if you are
#' using \code{init = "spectral"}, then you should also set
#' \code{init_sdev = "range"}, which will range scale each of the columns
#' containing the initial data between 0-10. This is not set by default to
#' maintain backwards compatibility with previous versions of uwot.
#' @param set_op_mix_ratio Interpolate between (fuzzy) union and intersection as
#' the set operation used to combine local fuzzy simplicial sets to obtain a
#' global fuzzy simplicial sets. Both fuzzy set operations use the product
#' t-norm. The value of this parameter should be between \code{0.0} and
#' \code{1.0}; a value of \code{1.0} will use a pure fuzzy union, while
#' \code{0.0} will use a pure fuzzy intersection.
#' @param local_connectivity The local connectivity required -- i.e. the number
#' of nearest neighbors that should be assumed to be connected at a local
#' level. The higher this value the more connected the manifold becomes
#' locally. In practice this should be not more than the local intrinsic
#' dimension of the manifold.
#' @param bandwidth The effective bandwidth of the kernel if we view the
#' algorithm as similar to Laplacian Eigenmaps. Larger values induce more
#' connectivity and a more global view of the data, smaller values concentrate
#' more locally.
#' @param repulsion_strength Weighting applied to negative samples in low
#' dimensional embedding optimization. Values higher than one will result in
#' greater weight being given to negative samples.
#' @param negative_sample_rate The number of negative edge/1-simplex samples to
#' use per positive edge/1-simplex sample in optimizing the low dimensional
#' embedding.
#' @param nn_method Method for finding nearest neighbors. Options are:
#' \itemize{
#' \item \code{"fnn"}. Use exact nearest neighbors via the
#' \href{https://cran.r-project.org/package=FNN}{FNN} package.
#' \item \code{"annoy"} Use approximate nearest neighbors via the
#' \href{https://cran.r-project.org/package=RcppAnnoy}{RcppAnnoy} package.
#' }
#' By default, if \code{X} has less than 4,096 vertices, the exact nearest
#' neighbors are found. Otherwise, approximate nearest neighbors are used.
#' You may also pass pre-calculated nearest neighbor data to this argument. It
#' must be one of two formats, either a list consisting of two elements:
#' \itemize{
#' \item \code{"idx"}. A \code{n_vertices x n_neighbors} matrix
#' containing the integer indexes of the nearest neighbors in \code{X}. Each
#' vertex is considered to be its own nearest neighbor, i.e.
#' \code{idx[, 1] == 1:n_vertices}.
#' \item \code{"dist"}. A \code{n_vertices x n_neighbors} matrix
#' containing the distances of the nearest neighbors.
#' }
#' or a sparse distance matrix of type \code{dgCMatrix}, with dimensions
#' \code{n_vertices x n_vertices}. Distances should be arranged by column,
#' i.e. a non-zero entry in row \code{j} of the \code{i}th column indicates
#' that the \code{j}th observation in \code{X} is a nearest neighbor of the
#' \code{i}th observation with the distance given by the value of that
#' element.
#' The \code{n_neighbors} parameter is ignored when using precomputed
#' nearest neighbor data. If using the sparse distance matrix input, each
#' column can contain a different number of neighbors.
#' @param n_trees Number of trees to build when constructing the nearest
#' neighbor index. The more trees specified, the larger the index, but the
#' better the results. With \code{search_k}, determines the accuracy of the
#' Annoy nearest neighbor search. Only used if the \code{nn_method} is
#' \code{"annoy"}. Sensible values are between \code{10} to \code{100}.
#' @param search_k Number of nodes to search during the neighbor retrieval. The
#' larger k, the more the accurate results, but the longer the search takes.
#' With \code{n_trees}, determines the accuracy of the Annoy nearest neighbor
#' search. Only used if the \code{nn_method} is \code{"annoy"}.
#' @param y Optional target data for supervised dimension reduction. Can be a
#' vector, matrix or data frame. Use the \code{target_metric} parameter to
#' specify the metrics to use, using the same syntax as \code{metric}. Usually
#' either a single numeric or factor column is used, but more complex formats
#' are possible. The following types are allowed:
#' \itemize{
#' \item Factor columns with the same length as \code{X}. \code{NA} is
#' allowed for any observation with an unknown level, in which case
#' UMAP operates as a form of semi-supervised learning. Each column is
#' treated separately.
#' \item Numeric data. \code{NA} is \emph{not} allowed in this case. Use the
#' parameter \code{target_n_neighbors} to set the number of neighbors used
#' with \code{y}. If unset, \code{n_neighbors} is used. Unlike factors,
#' numeric columns are grouped into one block unless \code{target_metric}
#' specifies otherwise. For example, if you wish columns \code{a} and
#' \code{b} to be treated separately, specify
#' \code{target_metric = list(euclidean = "a", euclidean = "b")}. Otherwise,
#' the data will be effectively treated as a matrix with two columns.
#' \item Nearest neighbor data, consisting of a list of two matrices,
#' \code{idx} and \code{dist}. These represent the precalculated nearest
#' neighbor indices and distances, respectively. This
#' is the same format as that expected for precalculated data in
#' \code{nn_method}. This format assumes that the underlying data was a
#' numeric vector. Any user-supplied value of the \code{target_n_neighbors}
#' parameter is ignored in this case, because the the number of columns in
#' the matrices is used for the value. Multiple nearest neighbor data using
#' different metrics can be supplied by passing a list of these lists.
#' }
#' Unlike \code{X}, all factor columns included in \code{y} are automatically
#' used.
#' @param target_n_neighbors Number of nearest neighbors to use to construct the
#' target simplicial set. Default value is \code{n_neighbors}. Applies only if
#' \code{y} is non-\code{NULL} and \code{numeric}.
#' @param target_metric The metric used to measure distance for \code{y} if
#' using supervised dimension reduction. Used only if \code{y} is numeric.
#' @param target_weight Weighting factor between data topology and target
#' topology. A value of 0.0 weights entirely on data, a value of 1.0 weights
#' entirely on target. The default of 0.5 balances the weighting equally
#' between data and target. Only applies if \code{y} is non-\code{NULL}.
#' @param pca If set to a positive integer value, reduce data to this number of
#' columns using PCA. Doesn't applied if the distance \code{metric} is
#' \code{"hamming"}, or the dimensions of the data is larger than the
#' number specified (i.e. number of rows and columns must be larger than the
#' value of this parameter). If you have > 100 columns in a data frame or
#' matrix, reducing the number of columns in this way may substantially
#' increase the performance of the nearest neighbor search at the cost of a
#' potential decrease in accuracy. In many t-SNE applications, a value of 50
#' is recommended, although there's no guarantee that this is appropriate for
#' all settings.
#' @param pca_center If \code{TRUE}, center the columns of \code{X} before
#' carrying out PCA. For binary data, it's recommended to set this to
#' \code{FALSE}.
#' @param pca_method Method to carry out any PCA dimensionality reduction when
#' the \code{pca} parameter is specified. Allowed values are:
#' \itemize{
#' \item{\code{"irlba"}}. Uses \code{\link[irlba]{prcomp_irlba}} from the
#' \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' \item{\code{"rsvd"}}. Uses 5 iterations of \code{\link[irlba]{svdr}} from
#' the \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' This is likely to give much faster but potentially less accurate results
#' than using \code{"irlba"}. For the purposes of nearest neighbor
#' calculation and coordinates initialization, any loss of accuracy doesn't
#' seem to matter much.
#' \item{\code{"bigstatsr"}}. Uses \code{\link[bigstatsr]{big_randomSVD}}
#' from the \href{https://cran.r-project.org/package=bigstatsr}{bigstatsr}
#' package. The SVD methods used in \code{bigstatsr} may be faster on
#' systems without access to efficient linear algebra libraries (e.g.
#' Windows). \strong{Note}: \code{bigstatsr} is \emph{not} a dependency of
#' uwot: if you choose to use this package for PCA, you \emph{must} install
#' it yourself.
#' \item{\code{"svd"}}. Uses \code{\link[base]{svd}} for the SVD. This is
#' likely to be slow for all but the smallest datasets.
#' \item{\code{"auto"}} (the default). Uses \code{"irlba"}, unless more than
#' 50% of the full set of singular vectors would be calculated, in which
#' case \code{"svd"} is used.
#' }
#' @param pcg_rand If \code{TRUE}, use the PCG random number generator (O'Neill,
#' 2014) during optimization. Otherwise, use the faster (but probably less
#' statistically good) Tausworthe "taus88" generator. The default is
#' \code{TRUE}.
#' @param fast_sgd If \code{TRUE}, then the following combination of parameters
#' is set: \code{pcg_rand = TRUE} and \code{n_sgd_threads = "auto"}. The
#' default is \code{FALSE}. Setting this to \code{TRUE} will speed up the
#' stochastic optimization phase, but give a potentially less accurate
#' embedding, and which will not be exactly reproducible even with a fixed
#' seed. For visualization, \code{fast_sgd = TRUE} will give perfectly good
#' results. For more generic dimensionality reduction, it's safer to leave
#' \code{fast_sgd = FALSE}. If \code{fast_sgd = TRUE}, then user-supplied
#' values of \code{pcg_rand} and \code{n_sgd_threads}, are ignored.
#' @param batch If \code{TRUE}, then embedding coordinates are updated at the
#' end of each epoch rather than during the epoch. In batch mode, results are
#' reproducible with a fixed random seed even with \code{n_sgd_threads > 1},
#' at the cost of a slightly higher memory use. You may also have to modify
#' \code{learning_rate} and increase \code{n_epochs}, so whether this provides
#' a speed increase over the single-threaded optimization is likely to be
#' dataset and hardware-dependent.
#' @param ret_model If \code{TRUE}, then return extra data that can be used to
#' add new data to an existing embedding via \code{\link{umap_transform}}. The
#' embedded coordinates are returned as the list item \code{embedding}. If
#' \code{FALSE}, just return the coordinates. This parameter can be used in
#' conjunction with \code{ret_nn} and \code{ret_extra}. Note that some
#' settings are incompatible with the production of a UMAP model: external
#' neighbor data (passed via a list to \code{nn_method}), and factor columns
#' that were included via the \code{metric} parameter. In the latter case, the
#' model produced is based only on the numeric data. A transformation using
#' new data is possible, but the factor columns in the new data are ignored.
#' Note that setting \code{ret_model = TRUE} forces the use of the approximate
#' nearest neighbors method. Because small datasets would otherwise use exact
#' nearest neighbor calculations, setting \code{ret_model = TRUE} means that
#' different results may be returned for small datasets in terms of both the
#' returned nearest neighbors (if requested) and the final embedded
#' coordinates, compared to \code{ret_model = FALSE}, even if the random
#' number seed is fixed. To avoid this, explicitly set
#' \code{nn_method = "annoy"} in the \code{ret_model = FALSE} case.
#' @param ret_nn If \code{TRUE}, then in addition to the embedding, also return
#' nearest neighbor data that can be used as input to \code{nn_method} to
#' avoid the overhead of repeatedly calculating the nearest neighbors when
#' manipulating unrelated parameters (e.g. \code{min_dist}, \code{n_epochs},
#' \code{init}). See the "Value" section for the names of the list items. If
#' \code{FALSE}, just return the coordinates. Note that the nearest neighbors
#' could be sensitive to data scaling, so be wary of reusing nearest neighbor
#' data if modifying the \code{scale} parameter. This parameter can be used in
#' conjunction with \code{ret_model} and \code{ret_extra}.
#' @param ret_extra A vector indicating what extra data to return. May contain
#' any combination of the following strings:
#' \itemize{
#' \item \code{"model"} Same as setting \code{ret_model = TRUE}.
#' \item \code{"nn"} Same as setting \code{ret_nn = TRUE}.
#' \item \code{"fgraph"} the high dimensional fuzzy graph (i.e. the fuzzy
#' simplicial set of the merged local views of the input data). The graph
#' is returned as a sparse symmetric N x N matrix of class
#' \link[Matrix]{dgCMatrix-class}, where a non-zero entry (i, j) gives the
#' membership strength of the edge connecting vertex i and vertex j. This
#' can be considered analogous to the input probability (or similarity or
#' affinity) used in t-SNE and LargeVis. Note that the graph is further
#' sparsified by removing edges with sufficiently low membership strength
#' that they would not be sampled by the probabilistic edge sampling
#' employed for optimization and therefore the number of non-zero elements
#' in the matrix is dependent on \code{n_epochs}. If you are only
#' interested in the fuzzy input graph (e.g. for clustering), setting
#' \code{n_epochs = 0} will avoid any further sparsifying. Be aware that
#' setting \code{binary_edge_weights = TRUE} will affect this graph (all
#' non-zero edge weights will be 1).
#' \item \code{"sigma"} the normalization value for each observation in the
#' dataset when constructing the smoothed distances to each of its
#' neighbors. This gives some sense of the local density of each
#' observation in the high dimensional space: higher values of
#' \code{sigma} indicate a higher dispersion or lower density.
#' }
#' @param n_threads Number of threads to use (except during stochastic gradient
#' descent). Default is half the number of concurrent threads supported by the
#' system. For nearest neighbor search, only applies if
#' \code{nn_method = "annoy"}. If \code{n_threads > 1}, then the Annoy index
#' will be temporarily written to disk in the location determined by
#' \code{\link[base]{tempfile}}.
#' @param n_sgd_threads Number of threads to use during stochastic gradient
#' descent. If set to > 1, then be aware that if \code{batch = FALSE}, results
#' will \emph{not} be reproducible, even if \code{set.seed} is called with a
#' fixed seed before running. Set to \code{"auto"} to use the same value as
#' \code{n_threads}.
#' @param grain_size The minimum amount of work to do on each thread. If this
#' value is set high enough, then less than \code{n_threads} or
#' \code{n_sgd_threads} will be used for processing, which might give a
#' performance improvement if the overhead of thread management and context
#' switching was outweighing the improvement due to concurrent processing.
#' This should be left at default (\code{1}) and work will be spread evenly
#' over all the threads specified.
#' @param tmpdir Temporary directory to store nearest neighbor indexes during
#' nearest neighbor search. Default is \code{\link{tempdir}}. The index is
#' only written to disk if \code{n_threads > 1} and
#' \code{nn_method = "annoy"}; otherwise, this parameter is ignored.
#' @param verbose If \code{TRUE}, log details to the console.
#' @param opt_args A list of optimizer parameters, used when
#' \code{batch = TRUE}. The default optimization method used is Adam (Kingma
#' and Ba, 2014).
#' \itemize{
#' \item \code{method} The optimization method to use. Either \code{"adam"}
#' or \code{"sgd"} (stochastic gradient descent). Default: \code{"adam"}.
#' \item \code{beta1} (Adam only). The weighting parameter for the
#' exponential moving average of the first moment estimator. Effectively the
#' momentum parameter. Should be a floating point value between 0 and 1.
#' Higher values can smooth oscillatory updates in poorly-conditioned
#' situations and may allow for a larger \code{learning_rate} to be
#' specified, but too high can cause divergence. Default: \code{0.5}.
#' \item \code{beta2} (Adam only). The weighting parameter for the
#' exponential moving average of the uncentered second moment estimator.
#' Should be a floating point value between 0 and 1. Controls the degree of
#' adaptivity in the step-size. Higher values put more weight on previous
#' time steps. Default: \code{0.9}.
#' \item \code{eps} (Adam only). Intended to be a small value to prevent
#' division by zero, but in practice can also affect convergence due to its
#' interaction with \code{beta2}. Higher values reduce the effect of the
#' step-size adaptivity and bring the behavior closer to stochastic gradient
#' descent with momentum. Typical values are between 1e-8 and 1e-3. Default:
#' \code{1e-7}.
#' \item \code{alpha} The initial learning rate. Default: the value of the
#' \code{learning_rate} parameter.
#' }
#' @param epoch_callback A function which will be invoked at the end of every
#' epoch. Its signature should be: \code{(epoch, n_epochs, coords)}, where:
#' \itemize{
#' \item \code{epoch} The current epoch number (between \code{1} and
#' \code{n_epochs}).
#' \item \code{n_epochs} Number of epochs to use during the optimization of
#' the embedded coordinates.
#' \item \code{coords} The embedded coordinates as of the end of the current
#' epoch, as a matrix with dimensions (N, \code{n_components}).
#' }
#' @param binary_edge_weights If \code{TRUE} then edge weights in the input
#' graph are treated as binary (0/1) rather than real valued. This affects the
#' sampling frequency of neighbors and is the strategy used by the PaCMAP
#' method (Wang and co-workers, 2020). Practical (Bรถhm and co-workers, 2020)
#' and theoretical (Damrich and Hamprecht, 2021) work suggests this has little
#' effect on UMAP's performance.
#' @param seed Integer seed to use to initialize the random number generator
#' state. Combined with \code{n_sgd_threads = 1} or \code{batch = TRUE}, this
#' should give consistent output across multiple runs on a given installation.
#' Setting this value is equivalent to calling \code{\link[base]{set.seed}},
#' but it may be more convenient in some situations than having to call a
#' separate function. The default is to not set a seed. If
#' \code{ret_model = TRUE}, the seed will be stored in the output model and
#' then used to set the seed inside \code{\link{umap_transform}}.
#' @return A matrix of optimized coordinates, or:
#' \itemize{
#' \item if \code{ret_model = TRUE} (or \code{ret_extra} contains
#' \code{"model"}), returns a list containing extra information that can be
#' used to add new data to an existing embedding via
#' \code{\link{umap_transform}}. In this case, the coordinates are available
#' in the list item \code{embedding}. \bold{NOTE}: The contents of
#' the \code{model} list should \emph{not} be considered stable or part of
#' the public API, and are purposely left undocumented.
#' \item if \code{ret_nn = TRUE} (or \code{ret_extra} contains \code{"nn"}),
#' returns the nearest neighbor data as a list called \code{nn}. This
#' contains one list for each \code{metric} calculated, itself containing a
#' matrix \code{idx} with the integer ids of the neighbors; and a matrix
#' \code{dist} with the distances. The \code{nn} list (or a sub-list) can be
#' used as input to the \code{nn_method} parameter.
#' \item if \code{ret_extra} contains \code{"fgraph"} returns the high
#' dimensional fuzzy graph as a sparse matrix called \code{fgraph}, of type
#' \link[Matrix]{dgCMatrix-class}.
#' \item if \code{ret_extra} contains \code{"sigma"}, returns a vector of the
#' smooth knn distance normalization terms for each observation as
#' \code{"sigma"} and a vector \code{"rho"} containing the largest
#' distance to the locally connected neighbors of each observation.
#' \item if \code{ret_extra} contains \code{"localr"}, returns a vector of
#' the estimated local radii, the sum of \code{"sigma"} and \code{"rho"}.
#' }
#' The returned list contains the combined data from any combination of
#' specifying \code{ret_model}, \code{ret_nn} and \code{ret_extra}.
#' @examples
#' iris_tumap <- tumap(iris, n_neighbors = 50, learning_rate = 0.5)
#' @export
tumap <- function(X, n_neighbors = 15, n_components = 2, metric = "euclidean",
n_epochs = NULL,
learning_rate = 1, scale = FALSE,
init = "spectral", init_sdev = NULL,
set_op_mix_ratio = 1.0, local_connectivity = 1.0,
bandwidth = 1.0, repulsion_strength = 1.0,
negative_sample_rate = 5.0,
nn_method = NULL, n_trees = 50,
search_k = 2 * n_neighbors * n_trees,
n_threads = NULL,
n_sgd_threads = 0,
grain_size = 1,
y = NULL, target_n_neighbors = n_neighbors,
target_metric = "euclidean",
target_weight = 0.5,
pca = NULL, pca_center = TRUE,
pcg_rand = TRUE,
fast_sgd = FALSE,
ret_model = FALSE, ret_nn = FALSE, ret_extra = c(),
tmpdir = tempdir(),
verbose = getOption("verbose", TRUE),
batch = FALSE,
opt_args = NULL, epoch_callback = NULL,
pca_method = NULL,
binary_edge_weights = FALSE,
seed = NULL) {
uwot(
X = X, n_neighbors = n_neighbors, n_components = n_components,
metric = metric,
n_epochs = n_epochs, alpha = learning_rate, scale = scale,
init = init, init_sdev = init_sdev,
spread = NULL, min_dist = NULL, set_op_mix_ratio = set_op_mix_ratio,
local_connectivity = local_connectivity, bandwidth = bandwidth,
gamma = repulsion_strength, negative_sample_rate = negative_sample_rate,
a = NULL, b = NULL, nn_method = nn_method, n_trees = n_trees,
search_k = search_k,
method = "tumap",
n_threads = n_threads, n_sgd_threads = n_sgd_threads,
grain_size = grain_size,
y = y, target_n_neighbors = target_n_neighbors,
target_weight = target_weight, target_metric = target_metric,
pca = pca, pca_center = pca_center, pca_method = pca_method,
pcg_rand = pcg_rand,
fast_sgd = fast_sgd,
ret_model = ret_model || "model" %in% ret_extra,
ret_nn = ret_nn || "nn" %in% ret_extra,
ret_fgraph = "fgraph" %in% ret_extra,
ret_sigma = "sigma" %in% ret_extra,
ret_localr = "localr" %in% ret_extra,
batch = batch,
opt_args = opt_args,
epoch_callback = epoch_callback,
binary_edge_weights = binary_edge_weights,
seed = seed,
tmpdir = tmpdir,
verbose = verbose
)
}
#' Dimensionality Reduction with a LargeVis-like method
#'
#' Carry out dimensionality reduction of a dataset using a method similar to
#' LargeVis (Tang et al., 2016).
#'
#' \code{lvish} differs from the official LargeVis implementation in the
#' following:
#'
#' \itemize{
#' \item Only the nearest-neighbor index search phase is multi-threaded.
#' \item Matrix input data is not normalized.
#' \item The \code{n_trees} parameter cannot be dynamically chosen based on
#' data set size.
#' \item Nearest neighbor results are not refined via the
#' neighbor-of-my-neighbor method. The \code{search_k} parameter is twice
#' as large than default to compensate.
#' \item Gradient values are clipped to \code{4.0} rather than \code{5.0}.
#' \item Negative edges are generated by uniform sampling of vertexes rather
#' than their degree ^ 0.75.
#' \item The default number of samples is much reduced. The default number of
#' epochs, \code{n_epochs}, is set to \code{5000}, much larger than for
#' \code{\link{umap}}, but may need to be increased further depending on your
#' dataset. Using \code{init = "spectral"} can help.
#' }
#'
#' @param X Input data. Can be a \code{\link{data.frame}}, \code{\link{matrix}},
#' \code{\link[stats]{dist}} object or \code{\link[Matrix]{sparseMatrix}}.
#' Matrix and data frames should contain one observation per row. Data frames
#' will have any non-numeric columns removed, although factor columns will be
#' used if explicitly included via \code{metric} (see the help for
#' \code{metric} for details). A sparse matrix is interpreted as a distance
#' matrix, and is assumed to be symmetric, so you can also pass in an
#' explicitly upper or lower triangular sparse matrix to save storage. There
#' must be at least \code{n_neighbors} non-zero distances for each row. Both
#' implicit and explicit zero entries are ignored. Set zero distances you want
#' to keep to an arbitrarily small non-zero value (e.g. \code{1e-10}).
#' \code{X} can also be \code{NULL} if pre-computed nearest neighbor data is
#' passed to \code{nn_method}, and \code{init} is not \code{"spca"} or
#' \code{"pca"}.
#' @param perplexity Controls the size of the local neighborhood used for
#' manifold approximation. This is the analogous to \code{n_neighbors} in
#' \code{\link{umap}}. Change this, rather than \code{n_neighbors}.
#' @param n_neighbors The number of neighbors to use when calculating the
#' \code{perplexity}. Usually set to three times the value of the
#' \code{perplexity}. Must be at least as large as \code{perplexity}.
#' @param n_components The dimension of the space to embed into. This defaults
#' to \code{2} to provide easy visualization, but can reasonably be set to any
#' integer value in the range \code{2} to \code{100}.
#' @param metric Type of distance metric to use to find nearest neighbors. One
#' of:
#' \itemize{
#' \item \code{"euclidean"} (the default)
#' \item \code{"cosine"}
#' \item \code{"manhattan"}
#' \item \code{"hamming"}
#' \item \code{"correlation"} (a distance based on the Pearson correlation)
#' \item \code{"categorical"} (see below)
#' }
#' Only applies if \code{nn_method = "annoy"} (for \code{nn_method = "fnn"}, the
#' distance metric is always "euclidean").
#'
#' If \code{X} is a data frame or matrix, then multiple metrics can be
#' specified, by passing a list to this argument, where the name of each item in
#' the list is one of the metric names above. The value of each list item should
#' be a vector giving the names or integer ids of the columns to be included in
#' a calculation, e.g. \code{metric = list(euclidean = 1:4, manhattan = 5:10)}.
#'
#' Each metric calculation results in a separate fuzzy simplicial set, which are
#' intersected together to produce the final set. Metric names can be repeated.
#' Because non-numeric columns are removed from the data frame, it is safer to
#' use column names than integer ids.
#'
#' Factor columns can also be used by specifying the metric name
#' \code{"categorical"}. Factor columns are treated different from numeric
#' columns and although multiple factor columns can be specified in a vector,
#' each factor column specified is processed individually. If you specify
#' a non-factor column, it will be coerced to a factor.
#'
#' For a given data block, you may override the \code{pca} and \code{pca_center}
#' arguments for that block, by providing a list with one unnamed item
#' containing the column names or ids, and then any of the \code{pca} or
#' \code{pca_center} overrides as named items, e.g. \code{metric =
#' list(euclidean = 1:4, manhattan = list(5:10, pca_center = FALSE))}. This
#' exists to allow mixed binary and real-valued data to be included and to have
#' PCA applied to both, but with centering applied only to the real-valued data
#' (it is typical not to apply centering to binary data before PCA is applied).
#' @param n_epochs Number of epochs to use during the optimization of the
#' embedded coordinates. The default is calculate the number of epochs
#' dynamically based on dataset size, to give the same number of edge samples
#' as the LargeVis defaults. This is usually substantially larger than the
#' UMAP defaults. If \code{n_epochs = 0}, then coordinates determined by
#' \code{"init"} will be returned.
#' @param learning_rate Initial learning rate used in optimization of the
#' coordinates.
#' @param scale Scaling to apply to \code{X} if it is a data frame or matrix:
#' \itemize{
#' \item{\code{"none"} or \code{FALSE} or \code{NULL}} No scaling.
#' \item{\code{"Z"} or \code{"scale"} or \code{TRUE}} Scale each column to
#' zero mean and variance 1.
#' \item{\code{"maxabs"}} Center each column to mean 0, then divide each
#' element by the maximum absolute value over the entire matrix.
#' \item{\code{"range"}} Range scale the entire matrix, so the smallest
#' element is 0 and the largest is 1.
#' \item{\code{"colrange"}} Scale each column in the range (0,1).
#' }
#' For lvish, the default is \code{"maxabs"}, for consistency with LargeVis.
#' @param init Type of initialization for the coordinates. Options are:
#' \itemize{
#' \item \code{"spectral"} Spectral embedding using the normalized Laplacian
#' of the fuzzy 1-skeleton, with Gaussian noise added.
#' \item \code{"normlaplacian"}. Spectral embedding using the normalized
#' Laplacian of the fuzzy 1-skeleton, without noise.
#' \item \code{"random"}. Coordinates assigned using a uniform random
#' distribution between -10 and 10.
#' \item \code{"lvrandom"}. Coordinates assigned using a Gaussian
#' distribution with standard deviation 1e-4, as used in LargeVis
#' (Tang et al., 2016) and t-SNE.
#' \item \code{"laplacian"}. Spectral embedding using the Laplacian Eigenmap
#' (Belkin and Niyogi, 2002).
#' \item \code{"pca"}. The first two principal components from PCA of
#' \code{X} if \code{X} is a data frame, and from a 2-dimensional classical
#' MDS if \code{X} is of class \code{"dist"}.
#' \item \code{"spca"}. Like \code{"pca"}, but each dimension is then scaled
#' so the standard deviation is 1e-4, to give a distribution similar to that
#' used in t-SNE and LargeVis. This is an alias for \code{init = "pca",
#' init_sdev = 1e-4}.
#' \item \code{"agspectral"} An "approximate global" modification of
#' \code{"spectral"} which all edges in the graph to a value of 1, and then
#' sets a random number of edges (\code{negative_sample_rate} edges per
#' vertex) to 0.1, to approximate the effect of non-local affinities.
#' \item A matrix of initial coordinates.
#' }
#' For spectral initializations, (\code{"spectral"}, \code{"normlaplacian"},
#' \code{"laplacian"}, \code{"agspectral"}), if more than one connected
#' component is identified, no spectral initialization is attempted. Instead
#' a PCA-based initialization is attempted. If \code{verbose = TRUE} the
#' number of connected components are logged to the console. The existence of
#' multiple connected components implies that a global view of the data cannot
#' be attained with this initialization. Increasing the value of
#' \code{n_neighbors} may help.
#' @param init_sdev If non-\code{NULL}, scales each dimension of the initialized
#' coordinates (including any user-supplied matrix) to this standard
#' deviation. By default no scaling is carried out, except when \code{init =
#' "spca"}, in which case the value is \code{0.0001}. Scaling the input may
#' help if the unscaled versions result in initial coordinates with large
#' inter-point distances or outliers. This usually results in small gradients
#' during optimization and very little progress being made to the layout.
#' Shrinking the initial embedding by rescaling can help under these
#' circumstances. Scaling the result of \code{init = "pca"} is usually
#' recommended and \code{init = "spca"} as an alias for \code{init = "pca",
#' init_sdev = 1e-4} but for the spectral initializations the scaled versions
#' usually aren't necessary unless you are using a large value of
#' \code{n_neighbors} (e.g. \code{n_neighbors = 150} or higher). For
#' compatibility with recent versions of the Python UMAP package, if you are
#' using \code{init = "spectral"}, then you should also set
#' \code{init_sdev = "range"}, which will range scale each of the columns
#' containing the initial data between 0-10. This is not set by default to
#' maintain backwards compatibility with previous versions of uwot.
#' @param repulsion_strength Weighting applied to negative samples in low
#' dimensional embedding optimization. Values higher than one will result in
#' greater weight being given to negative samples.
#' @param negative_sample_rate The number of negative edge/1-simplex samples to
#' use per positive edge/1-simplex sample in optimizing the low dimensional
#' embedding.
#' @param nn_method Method for finding nearest neighbors. Options are:
#' \itemize{
#' \item \code{"fnn"}. Use exact nearest neighbors via the
#' \href{https://cran.r-project.org/package=FNN}{FNN} package.
#' \item \code{"annoy"} Use approximate nearest neighbors via the
#' \href{https://cran.r-project.org/package=RcppAnnoy}{RcppAnnoy} package.
#' }
#' By default, if \code{X} has less than 4,096 vertices, the exact nearest
#' neighbors are found. Otherwise, approximate nearest neighbors are used.
#' You may also pass precalculated nearest neighbor data to this argument. It
#' must be a list consisting of two elements:
#' \itemize{
#' \item \code{"idx"}. A \code{n_vertices x n_neighbors} matrix
#' containing the integer indexes of the nearest neighbors in \code{X}. Each
#' vertex is considered to be its own nearest neighbor, i.e.
#' \code{idx[, 1] == 1:n_vertices}.
#' \item \code{"dist"}. A \code{n_vertices x n_neighbors} matrix
#' containing the distances of the nearest neighbors.
#' }
#' Multiple nearest neighbor data (e.g. from two different precomputed
#' metrics) can be passed by passing a list containing the nearest neighbor
#' data lists as items.
#' The \code{n_neighbors} parameter is ignored when using precomputed
#' nearest neighbor data.
#' @param n_trees Number of trees to build when constructing the nearest
#' neighbor index. The more trees specified, the larger the index, but the
#' better the results. With \code{search_k}, determines the accuracy of the
#' Annoy nearest neighbor search. Only used if the \code{nn_method} is
#' \code{"annoy"}. Sensible values are between \code{10} to \code{100}.
#' @param search_k Number of nodes to search during the neighbor retrieval. The
#' larger k, the more the accurate results, but the longer the search takes.
#' With \code{n_trees}, determines the accuracy of the Annoy nearest neighbor
#' search. Only used if the \code{nn_method} is \code{"annoy"}.
#' @param n_threads Number of threads to use (except during stochastic gradient
#' descent). Default is half the number of concurrent threads supported by the
#' system. For nearest neighbor search, only applies if
#' \code{nn_method = "annoy"}. If \code{n_threads > 1}, then the Annoy index
#' will be temporarily written to disk in the location determined by
#' \code{\link[base]{tempfile}}.
#' @param n_sgd_threads Number of threads to use during stochastic gradient
#' descent. If set to > 1, then be aware that if \code{batch = FALSE}, results
#' will \emph{not} be reproducible, even if \code{set.seed} is called with a
#' fixed seed before running. Set to \code{"auto"} to use the same value as
#' \code{n_threads}.
#' @param grain_size The minimum amount of work to do on each thread. If this
#' value is set high enough, then less than \code{n_threads} or
#' \code{n_sgd_threads} will be used for processing, which might give a
#' performance improvement if the overhead of thread management and context
#' switching was outweighing the improvement due to concurrent processing.
#' This should be left at default (\code{1}) and work will be spread evenly
#' over all the threads specified.
#' @param kernel Type of kernel function to create input probabilities. Can be
#' one of \code{"gauss"} (the default) or \code{"knn"}. \code{"gauss"} uses
#' the usual Gaussian weighted similarities. \code{"knn"} assigns equal
#' probabilities to every edge in the nearest neighbor graph, and zero
#' otherwise, using \code{perplexity} nearest neighbors. The \code{n_neighbors}
#' parameter is ignored in this case.
#' @param pca If set to a positive integer value, reduce data to this number of
#' columns using PCA. Doesn't applied if the distance \code{metric} is
#' \code{"hamming"}, or the dimensions of the data is larger than the
#' number specified (i.e. number of rows and columns must be larger than the
#' value of this parameter). If you have > 100 columns in a data frame or
#' matrix, reducing the number of columns in this way may substantially
#' increase the performance of the nearest neighbor search at the cost of a
#' potential decrease in accuracy. In many t-SNE applications, a value of 50
#' is recommended, although there's no guarantee that this is appropriate for
#' all settings.
#' @param pca_center If \code{TRUE}, center the columns of \code{X} before
#' carrying out PCA. For binary data, it's recommended to set this to
#' \code{FALSE}.
#' @param pca_method Method to carry out any PCA dimensionality reduction when
#' the \code{pca} parameter is specified. Allowed values are:
#' \itemize{
#' \item{\code{"irlba"}}. Uses \code{\link[irlba]{prcomp_irlba}} from the
#' \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' \item{\code{"rsvd"}}. Uses 5 iterations of \code{\link[irlba]{svdr}} from
#' the \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' This is likely to give much faster but potentially less accurate results
#' than using \code{"irlba"}. For the purposes of nearest neighbor
#' calculation and coordinates initialization, any loss of accuracy doesn't
#' seem to matter much.
#' \item{\code{"bigstatsr"}}. Uses \code{\link[bigstatsr]{big_randomSVD}}
#' from the \href{https://cran.r-project.org/package=bigstatsr}{bigstatsr}
#' package. The SVD methods used in \code{bigstatsr} may be faster on
#' systems without access to efficient linear algebra libraries (e.g.
#' Windows). \strong{Note}: \code{bigstatsr} is \emph{not} a dependency of
#' uwot: if you choose to use this package for PCA, you \emph{must} install
#' it yourself.
#' \item{\code{"svd"}}. Uses \code{\link[base]{svd}} for the SVD. This is
#' likely to be slow for all but the smallest datasets.
#' \item{\code{"auto"}} (the default). Uses \code{"irlba"}, unless more than
#' 50% of the full set of singular vectors would be calculated, in which
#' case \code{"svd"} is used.
#' }
#' @param pcg_rand If \code{TRUE}, use the PCG random number generator (O'Neill,
#' 2014) during optimization. Otherwise, use the faster (but probably less
#' statistically good) Tausworthe "taus88" generator. The default is
#' \code{TRUE}.
#' @param fast_sgd If \code{TRUE}, then the following combination of parameters
#' is set: \code{pcg_rand = TRUE} and \code{n_sgd_threads = "auto"}. The
#' default is \code{FALSE}. Setting this to \code{TRUE} will speed up the
#' stochastic optimization phase, but give a potentially less accurate
#' embedding, and which will not be exactly reproducible even with a fixed
#' seed. For visualization, \code{fast_sgd = TRUE} will give perfectly good
#' results. For more generic dimensionality reduction, it's safer to leave
#' \code{fast_sgd = FALSE}. If \code{fast_sgd = TRUE}, then user-supplied
#' values of \code{pcg_rand} and \code{n_sgd_threads}, are ignored.
#' @param batch If \code{TRUE}, then embedding coordinates are updated at the
#' end of each epoch rather than during the epoch. In batch mode, results are
#' reproducible with a fixed random seed even with \code{n_sgd_threads > 1},
#' at the cost of a slightly higher memory use. You may also have to modify
#' \code{learning_rate} and increase \code{n_epochs}, so whether this provides
#' a speed increase over the single-threaded optimization is likely to be
#' dataset and hardware-dependent.
#' @param ret_nn If \code{TRUE}, then in addition to the embedding, also return
#' nearest neighbor data that can be used as input to \code{nn_method} to
#' avoid the overhead of repeatedly calculating the nearest neighbors when
#' manipulating unrelated parameters (e.g. \code{min_dist}, \code{n_epochs},
#' \code{init}). See the "Value" section for the names of the list items. If
#' \code{FALSE}, just return the coordinates. Note that the nearest neighbors
#' could be sensitive to data scaling, so be wary of reusing nearest neighbor
#' data if modifying the \code{scale} parameter.
#' @param ret_extra A vector indicating what extra data to return. May contain
#' any combination of the following strings:
#' \itemize{
#' \item \code{"nn"} same as setting \code{ret_nn = TRUE}.
#' \item \code{"P"} the high dimensional probability matrix. The graph
#' is returned as a sparse symmetric N x N matrix of class
#' \link[Matrix]{dgCMatrix-class}, where a non-zero entry (i, j) gives the
#' input probability (or similarity or affinity) of the edge connecting
#' vertex i and vertex j. Note that the graph is further sparsified by
#' removing edges with sufficiently low membership strength that they
#' would not be sampled by the probabilistic edge sampling employed for
#' optimization and therefore the number of non-zero elements in the
#' matrix is dependent on \code{n_epochs}. If you are only interested in
#' the fuzzy input graph (e.g. for clustering), setting
#' \code{n_epochs = 0} will avoid any further sparsifying. Be aware that
#' setting \code{binary_edge_weights = TRUE} will affect this graph (all
#' non-zero edge weights will be 1).
#' \item \code{sigma} a vector of the bandwidths used to calibrate the input
#' Gaussians to reproduce the target \code{"perplexity"}.
#' }
#' @param tmpdir Temporary directory to store nearest neighbor indexes during
#' nearest neighbor search. Default is \code{\link{tempdir}}. The index is
#' only written to disk if \code{n_threads > 1} and
#' \code{nn_method = "annoy"}; otherwise, this parameter is ignored.
#' @param verbose If \code{TRUE}, log details to the console.
#' @param opt_args A list of optimizer parameters, used when
#' \code{batch = TRUE}. The default optimization method used is Adam (Kingma
#' and Ba, 2014).
#' \itemize{
#' \item \code{method} The optimization method to use. Either \code{"adam"}
#' or \code{"sgd"} (stochastic gradient descent). Default: \code{"adam"}.
#' \item \code{beta1} (Adam only). The weighting parameter for the
#' exponential moving average of the first moment estimator. Effectively the
#' momentum parameter. Should be a floating point value between 0 and 1.
#' Higher values can smooth oscillatory updates in poorly-conditioned
#' situations and may allow for a larger \code{learning_rate} to be
#' specified, but too high can cause divergence. Default: \code{0.5}.
#' \item \code{beta2} (Adam only). The weighting parameter for the
#' exponential moving average of the uncentered second moment estimator.
#' Should be a floating point value between 0 and 1. Controls the degree of
#' adaptivity in the step-size. Higher values put more weight on previous
#' time steps. Default: \code{0.9}.
#' \item \code{eps} (Adam only). Intended to be a small value to prevent
#' division by zero, but in practice can also affect convergence due to its
#' interaction with \code{beta2}. Higher values reduce the effect of the
#' step-size adaptivity and bring the behavior closer to stochastic gradient
#' descent with momentum. Typical values are between 1e-8 and 1e-3. Default:
#' \code{1e-7}.
#' \item \code{alpha} The initial learning rate. Default: the value of the
#' \code{learning_rate} parameter.
#' }
#' @param epoch_callback A function which will be invoked at the end of every
#' epoch. Its signature should be: \code{(epoch, n_epochs, coords)}, where:
#' \itemize{
#' \item \code{epoch} The current epoch number (between \code{1} and
#' \code{n_epochs}).
#' \item \code{n_epochs} Number of epochs to use during the optimization of
#' the embedded coordinates.
#' \item \code{coords} The embedded coordinates as of the end of the current
#' epoch, as a matrix with dimensions (N, \code{n_components}).
#' }
#' @param binary_edge_weights If \code{TRUE} then edge weights in the input
#' graph are treated as binary (0/1) rather than real valued. This affects the
#' sampling frequency of neighbors and is the strategy used by the PaCMAP
#' method (Wang and co-workers, 2020). Practical (Bรถhm and co-workers, 2020)
#' and theoretical (Damrich and Hamprecht, 2021) work suggests this has little
#' effect on UMAP's performance.
#' @return A matrix of optimized coordinates, or:
#' \itemize{
#' \item if \code{ret_nn = TRUE} (or \code{ret_extra} contains \code{"nn"}),
#' returns the nearest neighbor data as a list called \code{nn}. This
#' contains one list for each \code{metric} calculated, itself containing a
#' matrix \code{idx} with the integer ids of the neighbors; and a matrix
#' \code{dist} with the distances. The \code{nn} list (or a sub-list) can be
#' used as input to the \code{nn_method} parameter.
#' \item if \code{ret_extra} contains \code{"P"}, returns the high
#' dimensional probability matrix as a sparse matrix called \code{P}, of
#' type \link[Matrix]{dgCMatrix-class}.
#' \item if \code{ret_extra} contains \code{"sigma"}, returns a vector of
#' the high dimensional gaussian bandwidths for each point, and
#' \code{"dint"} a vector of estimates of the intrinsic dimensionality at
#' each point, based on the method given by Lee and co-workers (2015).
#' }
#' The returned list contains the combined data from any combination of
#' specifying \code{ret_nn} and \code{ret_extra}.
#' @references
#' Tang, J., Liu, J., Zhang, M., & Mei, Q. (2016, April).
#' Visualizing large-scale and high-dimensional data.
#' In \emph{Proceedings of the 25th International Conference on World Wide Web}
#' (pp. 287-297).
#' International World Wide Web Conferences Steering Committee.
#' \url{https://arxiv.org/abs/1602.00370}
#'
#' Lee, J. A., Peluffo-Ordรณรฑez, D. H., & Verleysen, M. (2015).
#' Multi-scale similarities in stochastic neighbour embedding: Reducing
#' dimensionality while preserving both local and global structure.
#' \emph{Neurocomputing}, \emph{169}, 246-261.
#'
#' @examples
#' # Default number of epochs is much larger than for UMAP, assumes random
#' # initialization. Use perplexity rather than n_neighbors to control the size
#' # of the local neighborhood 20 epochs may be too small for a random
#' # initialization
#' iris_lvish <- lvish(iris,
#' perplexity = 50, learning_rate = 0.5,
#' init = "random", n_epochs = 20
#' )
#' @export
lvish <- function(X, perplexity = 50, n_neighbors = perplexity * 3,
n_components = 2, metric = "euclidean", n_epochs = -1,
learning_rate = 1, scale = "maxabs",
init = "lvrandom", init_sdev = NULL,
repulsion_strength = 7,
negative_sample_rate = 5.0,
nn_method = NULL, n_trees = 50,
search_k = 2 * n_neighbors * n_trees,
n_threads = NULL,
n_sgd_threads = 0,
grain_size = 1,
kernel = "gauss",
pca = NULL, pca_center = TRUE,
pcg_rand = TRUE,
fast_sgd = FALSE,
ret_nn = FALSE, ret_extra = c(),
tmpdir = tempdir(),
verbose = getOption("verbose", TRUE),
batch = FALSE,
opt_args = NULL, epoch_callback = NULL,
pca_method = NULL,
binary_edge_weights = FALSE) {
uwot(X,
n_neighbors = n_neighbors, n_components = n_components,
metric = metric,
n_epochs = n_epochs, alpha = learning_rate, scale = scale,
init = init, init_sdev = init_sdev,
gamma = repulsion_strength, negative_sample_rate = negative_sample_rate,
nn_method = nn_method,
n_trees = n_trees, search_k = search_k,
method = "largevis", perplexity = perplexity,
pca = pca, pca_center = pca_center, pca_method = pca_method,
n_threads = n_threads, n_sgd_threads = n_sgd_threads,
grain_size = grain_size,
kernel = kernel,
ret_nn = ret_nn || "nn" %in% ret_extra,
ret_fgraph = "P" %in% ret_extra,
ret_sigma = "sigma" %in% ret_extra,
pcg_rand = pcg_rand,
fast_sgd = fast_sgd,
batch = batch,
opt_args = opt_args,
epoch_callback = epoch_callback,
tmpdir = tmpdir,
binary_edge_weights = binary_edge_weights,
verbose = verbose
)
}
#' Similarity Graph
#'
#' Create a graph (as a sparse symmetric weighted adjacency matrix) representing
#' the similarities between items in a data set. No dimensionality reduction is
#' carried out. By default, the similarities are calculated using the merged
#' fuzzy simplicial set approach in the Uniform Manifold Approximation and
#' Projection (UMAP) method (McInnes et al., 2018), but the approach from
#' LargeVis (Tang et al., 2016) can also be used.
#'
#' This is equivalent to running \code{\link{umap}} with the
#' \code{ret_extra = c("fgraph")} parameter, but without the overhead of
#' calculating (or returning) the optimized low-dimensional coordinates.
#'
#' @param X Input data. Can be a \code{\link{data.frame}}, \code{\link{matrix}},
#' \code{\link[stats]{dist}} object or \code{\link[Matrix]{sparseMatrix}}.
#' Matrix and data frames should contain one observation per row. Data frames
#' will have any non-numeric columns removed, although factor columns will be
#' used if explicitly included via \code{metric} (see the help for
#' \code{metric} for details). A sparse matrix is interpreted as a distance
#' matrix, and is assumed to be symmetric, so you can also pass in an
#' explicitly upper or lower triangular sparse matrix to save storage. There
#' must be at least \code{n_neighbors} non-zero distances for each row. Both
#' implicit and explicit zero entries are ignored. Set zero distances you want
#' to keep to an arbitrarily small non-zero value (e.g. \code{1e-10}).
#' \code{X} can also be \code{NULL} if pre-computed nearest neighbor data is
#' passed to \code{nn_method}.
#' @param n_neighbors The size of local neighborhood (in terms of number of
#' neighboring sample points) used for manifold approximation. Larger values
#' result in more global views of the manifold, while smaller values result in
#' more local data being preserved. In general values should be in the range
#' \code{2} to \code{100}.
#' @param metric Type of distance metric to use to find nearest neighbors. One
#' of:
#' \itemize{
#' \item \code{"euclidean"} (the default)
#' \item \code{"cosine"}
#' \item \code{"manhattan"}
#' \item \code{"hamming"}
#' \item \code{"correlation"} (a distance based on the Pearson correlation)
#' \item \code{"categorical"} (see below)
#' }
#' Only applies if \code{nn_method = "annoy"} (for \code{nn_method = "fnn"}, the
#' distance metric is always "euclidean").
#'
#' If \code{X} is a data frame or matrix, then multiple metrics can be
#' specified, by passing a list to this argument, where the name of each item in
#' the list is one of the metric names above. The value of each list item should
#' be a vector giving the names or integer ids of the columns to be included in
#' a calculation, e.g. \code{metric = list(euclidean = 1:4, manhattan = 5:10)}.
#'
#' Each metric calculation results in a separate fuzzy simplicial set, which are
#' intersected together to produce the final set. Metric names can be repeated.
#' Because non-numeric columns are removed from the data frame, it is safer to
#' use column names than integer ids.
#'
#' Factor columns can also be used by specifying the metric name
#' \code{"categorical"}. Factor columns are treated different from numeric
#' columns and although multiple factor columns can be specified in a vector,
#' each factor column specified is processed individually. If you specify
#' a non-factor column, it will be coerced to a factor.
#'
#' For a given data block, you may override the \code{pca} and \code{pca_center}
#' arguments for that block, by providing a list with one unnamed item
#' containing the column names or ids, and then any of the \code{pca} or
#' \code{pca_center} overrides as named items, e.g. \code{metric =
#' list(euclidean = 1:4, manhattan = list(5:10, pca_center = FALSE))}. This
#' exists to allow mixed binary and real-valued data to be included and to have
#' PCA applied to both, but with centering applied only to the real-valued data
#' (it is typical not to apply centering to binary data before PCA is applied).
#' @param scale Scaling to apply to \code{X} if it is a data frame or matrix:
#' \itemize{
#' \item{\code{"none"} or \code{FALSE} or \code{NULL}} No scaling.
#' \item{\code{"Z"} or \code{"scale"} or \code{TRUE}} Scale each column to
#' zero mean and variance 1.
#' \item{\code{"maxabs"}} Center each column to mean 0, then divide each
#' element by the maximum absolute value over the entire matrix.
#' \item{\code{"range"}} Range scale the entire matrix, so the smallest
#' element is 0 and the largest is 1.
#' \item{\code{"colrange"}} Scale each column in the range (0,1).
#' }
#' For \code{method} \code{"umap"}, the default is \code{"none"}. For
#' \code{"largevis"}, the default is \code{"maxabs"}.
#' @param set_op_mix_ratio Interpolate between (fuzzy) union and intersection as
#' the set operation used to combine local fuzzy simplicial sets to obtain a
#' global fuzzy simplicial sets. Both fuzzy set operations use the product
#' t-norm. The value of this parameter should be between \code{0.0} and
#' \code{1.0}; a value of \code{1.0} will use a pure fuzzy union, while
#' \code{0.0} will use a pure fuzzy intersection. Ignored if
#' \code{method = "largevis"}
#' @param local_connectivity The local connectivity required -- i.e. the number
#' of nearest neighbors that should be assumed to be connected at a local
#' level. The higher this value the more connected the manifold becomes
#' locally. In practice this should be not more than the local intrinsic
#' dimension of the manifold. Ignored if \code{method = "largevis"}.
#' @param nn_method Method for finding nearest neighbors. Options are:
#' \itemize{
#' \item \code{"fnn"}. Use exact nearest neighbors via the
#' \href{https://cran.r-project.org/package=FNN}{FNN} package.
#' \item \code{"annoy"} Use approximate nearest neighbors via the
#' \href{https://cran.r-project.org/package=RcppAnnoy}{RcppAnnoy} package.
#' }
#' By default, if \code{X} has less than 4,096 vertices, the exact nearest
#' neighbors are found. Otherwise, approximate nearest neighbors are used.
#' You may also pass pre-calculated nearest neighbor data to this argument. It
#' must be one of two formats, either a list consisting of two elements:
#' \itemize{
#' \item \code{"idx"}. A \code{n_vertices x n_neighbors} matrix
#' containing the integer indexes of the nearest neighbors in \code{X}. Each
#' vertex is considered to be its own nearest neighbor, i.e.
#' \code{idx[, 1] == 1:n_vertices}.
#' \item \code{"dist"}. A \code{n_vertices x n_neighbors} matrix
#' containing the distances of the nearest neighbors.
#' }
#' or a sparse distance matrix of type \code{dgCMatrix}, with dimensions
#' \code{n_vertices x n_vertices}. Distances should be arranged by column,
#' i.e. a non-zero entry in row \code{j} of the \code{i}th column indicates
#' that the \code{j}th observation in \code{X} is a nearest neighbor of the
#' \code{i}th observation with the distance given by the value of that
#' element.
#' The \code{n_neighbors} parameter is ignored when using precomputed
#' nearest neighbor data. If using the sparse distance matrix input, each
#' column can contain a different number of neighbors.
#' @param n_trees Number of trees to build when constructing the nearest
#' neighbor index. The more trees specified, the larger the index, but the
#' better the results. With \code{search_k}, determines the accuracy of the
#' Annoy nearest neighbor search. Only used if the \code{nn_method} is
#' \code{"annoy"}. Sensible values are between \code{10} to \code{100}.
#' @param search_k Number of nodes to search during the neighbor retrieval. The
#' larger k, the more the accurate results, but the longer the search takes.
#' With \code{n_trees}, determines the accuracy of the Annoy nearest neighbor
#' search. Only used if the \code{nn_method} is \code{"annoy"}.
#' @param perplexity Used only if \code{method = "largevis"}. Controls the size
#' of the local neighborhood used for manifold approximation. Should be a
#' value between 1 and one less than the number of items in \code{X}. If
#' specified, you should \emph{not} specify a value for \code{n_neighbors}
#' unless you know what you are doing.
#' @param kernel Used only if \code{method = "largevis"}. Type of kernel
#' function to create input similiarties. Can be one of \code{"gauss"} (the
#' default) or \code{"knn"}. \code{"gauss"} uses the usual Gaussian weighted
#' similarities. \code{"knn"} assigns equal similiarties. to every edge in the
#' nearest neighbor graph, and zero otherwise, using \code{perplexity} nearest
#' neighbors. The \code{n_neighbors} parameter is ignored in this case.
#' @param method How to generate the similarities between items. One of:
#' \itemize{
#' \item \code{"umap"} The UMAP method of McInnes et al. (2018).
#' \item \code{"largevis"} The LargeVis method of Tang et al. (2016).
#' }
#' @param y Optional target data to add supervised or semi-supervised weighting
#' to the similarity graph . Can be a vector, matrix or data frame. Use the
#' \code{target_metric} parameter to specify the metrics to use, using the
#' same syntax as \code{metric}. Usually either a single numeric or factor
#' column is used, but more complex formats are possible. The following types
#' are allowed:
#' \itemize{
#' \item Factor columns with the same length as \code{X}. \code{NA} is
#' allowed for any observation with an unknown level, in which case
#' UMAP operates as a form of semi-supervised learning. Each column is
#' treated separately.
#' \item Numeric data. \code{NA} is \emph{not} allowed in this case. Use the
#' parameter \code{target_n_neighbors} to set the number of neighbors used
#' with \code{y}. If unset, \code{n_neighbors} is used. Unlike factors,
#' numeric columns are grouped into one block unless \code{target_metric}
#' specifies otherwise. For example, if you wish columns \code{a} and
#' \code{b} to be treated separately, specify
#' \code{target_metric = list(euclidean = "a", euclidean = "b")}. Otherwise,
#' the data will be effectively treated as a matrix with two columns.
#' \item Nearest neighbor data, consisting of a list of two matrices,
#' \code{idx} and \code{dist}. These represent the precalculated nearest
#' neighbor indices and distances, respectively. This
#' is the same format as that expected for precalculated data in
#' \code{nn_method}. This format assumes that the underlying data was a
#' numeric vector. Any user-supplied value of the \code{target_n_neighbors}
#' parameter is ignored in this case, because the the number of columns in
#' the matrices is used for the value. Multiple nearest neighbor data using
#' different metrics can be supplied by passing a list of these lists.
#' }
#' Unlike \code{X}, all factor columns included in \code{y} are automatically
#' used. This parameter is ignored if \code{method = "largevis"}.
#' @param target_n_neighbors Number of nearest neighbors to use to construct the
#' target simplicial set. Default value is \code{n_neighbors}. Applies only if
#' \code{y} is non-\code{NULL} and \code{numeric}. This parameter is ignored
#' if \code{method = "largevis"}.
#' @param target_metric The metric used to measure distance for \code{y} if
#' using supervised dimension reduction. Used only if \code{y} is numeric.
#' This parameter is ignored if \code{method = "largevis"}.
#' @param target_weight Weighting factor between data topology and target
#' topology. A value of 0.0 weights entirely on data, a value of 1.0 weights
#' entirely on target. The default of 0.5 balances the weighting equally
#' between data and target. Only applies if \code{y} is non-\code{NULL}. This
#' parameter is ignored if \code{method = "largevis"}.
#' @param pca If set to a positive integer value, reduce data to this number of
#' columns using PCA. Doesn't applied if the distance \code{metric} is
#' \code{"hamming"}, or the dimensions of the data is larger than the
#' number specified (i.e. number of rows and columns must be larger than the
#' value of this parameter). If you have > 100 columns in a data frame or
#' matrix, reducing the number of columns in this way may substantially
#' increase the performance of the nearest neighbor search at the cost of a
#' potential decrease in accuracy. In many t-SNE applications, a value of 50
#' is recommended, although there's no guarantee that this is appropriate for
#' all settings.
#' @param pca_center If \code{TRUE}, center the columns of \code{X} before
#' carrying out PCA. For binary data, it's recommended to set this to
#' \code{FALSE}.
#' @param pca_method Method to carry out any PCA dimensionality reduction when
#' the \code{pca} parameter is specified. Allowed values are:
#' \itemize{
#' \item{\code{"irlba"}}. Uses \code{\link[irlba]{prcomp_irlba}} from the
#' \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' \item{\code{"rsvd"}}. Uses 5 iterations of \code{\link[irlba]{svdr}} from
#' the \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' This is likely to give much faster but potentially less accurate results
#' than using \code{"irlba"}. For the purposes of nearest neighbor
#' calculation and coordinates initialization, any loss of accuracy doesn't
#' seem to matter much.
#' \item{\code{"bigstatsr"}}. Uses \code{\link[bigstatsr]{big_randomSVD}}
#' from the \href{https://cran.r-project.org/package=bigstatsr}{bigstatsr}
#' package. The SVD methods used in \code{bigstatsr} may be faster on
#' systems without access to efficient linear algebra libraries (e.g.
#' Windows). \strong{Note}: \code{bigstatsr} is \emph{not} a dependency of
#' uwot: if you choose to use this package for PCA, you \emph{must} install
#' it yourself.
#' \item{\code{"svd"}}. Uses \code{\link[base]{svd}} for the SVD. This is
#' likely to be slow for all but the smallest datasets.
#' \item{\code{"auto"}} (the default). Uses \code{"irlba"}, unless more than
#' 50% of the full set of singular vectors would be calculated, in which
#' case \code{"svd"} is used.
#' }
#' @param ret_extra A vector indicating what extra data to return. May contain
#' any combination of the following strings:
#' \itemize{
#' \item \code{"nn"} nearest neighbor data that can be used as input to
#' \code{nn_method} to avoid the overhead of repeatedly calculating the
#' nearest neighbors when manipulating unrelated parameters. See the
#' "Value" section for the names of the list items. Note that the nearest
#' neighbors could be sensitive to data scaling, so be wary of reusing
#' nearest neighbor data if modifying the \code{scale} parameter.
#' \item \code{"sigma"} the normalization value for each observation in the
#' dataset when constructing the smoothed distances to each of its
#' neighbors. This gives some sense of the local density of each
#' observation in the high dimensional space: higher values of
#' \code{sigma} indicate a higher dispersion or lower density.
#' }
#' @param n_threads Number of threads to use. Default is half the number of
#' concurrent threads supported by the system. For nearest neighbor search,
#' only applies if \code{nn_method = "annoy"}. If \code{n_threads > 1}, then
#' the Annoy index will be temporarily written to disk in the location
#' determined by \code{\link[base]{tempfile}}.
#' @param grain_size The minimum amount of work to do on each thread. If this
#' value is set high enough, then less than \code{n_threads} will be used for
#' processing, which might give a performance improvement if the overhead of
#' thread management and context switching was outweighing the improvement due
#' to concurrent processing. This should be left at default (\code{1}) and
#' work will be spread evenly over all the threads specified.
#' @param tmpdir Temporary directory to store nearest neighbor indexes during
#' nearest neighbor search. Default is \code{\link{tempdir}}. The index is
#' only written to disk if \code{n_threads > 1} and
#' \code{nn_method = "annoy"}; otherwise, this parameter is ignored.
#' @param verbose If \code{TRUE}, log details to the console.
#' @param binary_edge_weights If \code{TRUE} then edge weights of the returned
#' graph are binary (0/1) rather than reflecting the degree of similarity.
#' @return A sparse symmetrized matrix of the similarities between the items in
#' \code{X} or if \code{nn_method} contains pre-computed nearest neighbor
#' data, the items in \code{nn_method}. Because of the symmetrization, there
#' may be more non-zero items in each column than the specified value of
#' \code{n_neighbors} (or pre-computed neighbors in \code{nn_method}).
#' If \code{ret_extra} is specified then the return value will be a list
#' containing:
#' \itemize{
#' \item \code{similarity_graph} the similarity graph as a sparse matrix
#' as described above.
#' \item \code{nn} (if \code{ret_extra} contained \code{"nn"}) the nearest
#' neighbor data as a list called \code{nn}. This contains one list for each
#' \code{metric} calculated, itself containing a matrix \code{idx} with the
#' integer ids of the neighbors; and a matrix \code{dist} with the
#' distances. The \code{nn} list (or a sub-list) can be used as input to the
#' \code{nn_method} parameter.
#' \item \code{sigma} (if \code{ret_extra} contains \code{"sigma"}),
#' a vector of calibrated parameters, one for each item in the input data,
#' reflecting the local data density for that item. The exact definition of
#' the values depends on the choice of the \code{method} parameter.
#' \item \code{rho} (if \code{ret_extra} contains \code{"sigma"}), a
#' vector containing the largest distance to the locally connected neighbors
#' of each item in the input data. This will exist only if
#' \code{method = "umap"}.
#' \item \code{localr} (if \code{ret_extra} contains \code{"localr"}) a
#' vector of the estimated local radii, the sum of \code{"sigma"} and
#' \code{"rho"}. This will exist only if \code{method = "umap"}.
#' }
#' @examples
#'
#' iris30 <- iris[c(1:10, 51:60, 101:110), ]
#'
#' # return a 30 x 30 sparse matrix with similarity data based on 10 nearest
#' # neighbors per item
#' iris30_sim_graph <- similarity_graph(iris30, n_neighbors = 10)
#'
#' # Default is to use the UMAP method of calculating similarities, but LargeVis
#' # is also available: for that method, use perplexity instead of n_neighbors
#' # to control neighborhood size. Use ret_extra = "nn" to return nearest
#' # neighbor data as well as the similarity graph. Return value is a list
#' # containing similarity_graph' and 'nn' items.
#' iris30_lv_graph <- similarity_graph(iris30, perplexity = 10,
#' method = "largevis", ret_extra = "nn")
#' # If you have the neighbor information you don't need the original data
#' iris30_lv_graph_nn <- similarity_graph(nn_method = iris30_lv_graph$nn,
#' perplexity = 10, method = "largevis")
#' all(iris30_lv_graph_nn == iris30_lv_graph$similarity_graph)
#'
#' @references
#' McInnes, L., Healy, J., & Melville, J. (2018).
#' UMAP: Uniform Manifold Approximation and Projection for Dimension Reduction
#' \emph{arXiv preprint} \emph{arXiv}:1802.03426.
#' \url{https://arxiv.org/abs/1802.03426}
#'
#' Tang, J., Liu, J., Zhang, M., & Mei, Q. (2016, April).
#' Visualizing large-scale and high-dimensional data.
#' In \emph{Proceedings of the 25th International Conference on World Wide Web}
#' (pp. 287-297).
#' International World Wide Web Conferences Steering Committee.
#' \url{https://arxiv.org/abs/1602.00370}
#'
#' @export
similarity_graph <- function(X = NULL, n_neighbors = NULL, metric = "euclidean",
scale = NULL,
set_op_mix_ratio = 1.0, local_connectivity = 1.0,
nn_method = NULL, n_trees = 50,
search_k = 2 * n_neighbors * n_trees,
perplexity = 50,
method = "umap",
y = NULL, target_n_neighbors = n_neighbors,
target_metric = "euclidean",
target_weight = 0.5,
pca = NULL, pca_center = TRUE,
ret_extra = c(),
n_threads = NULL,
grain_size = 1,
kernel = "gauss",
tmpdir = tempdir(),
verbose = getOption("verbose", TRUE),
pca_method = NULL,
binary_edge_weights = FALSE) {
if (is.null(n_neighbors)) {
if (method == "largevis") {
n_neighbors <- perplexity * 3
scale <- "maxabs"
}
else {
n_neighbors <- 15
scale <- FALSE
}
}
uwot_res <- uwot(
X = X, n_neighbors = n_neighbors,
metric = metric, n_epochs = 0, scale = scale,
init = NULL,
set_op_mix_ratio = set_op_mix_ratio,
local_connectivity = local_connectivity,
nn_method = nn_method, n_trees = n_trees,
search_k = search_k,
method = method,
n_threads = n_threads,
grain_size = grain_size,
kernel = kernel, perplexity = perplexity,
y = y, target_n_neighbors = target_n_neighbors,
target_weight = target_weight, target_metric = target_metric,
pca = pca, pca_center = pca_center, pca_method = pca_method,
ret_model = FALSE,
ret_nn = "nn" %in% ret_extra,
ret_fgraph = TRUE,
ret_sigma = "sigma" %in% ret_extra,
ret_localr = "localr" %in% ret_extra,
binary_edge_weights = binary_edge_weights,
tmpdir = tempdir(),
verbose = verbose
)
res <- list()
for (name in names(uwot_res)) {
if (name == "embedding") {
# embedding will be NULL so remove it
next
}
if (name == "P" || name == "fgraph") {
res$similarity_graph <- uwot_res[[name]]
}
else {
res[[name]] <- uwot_res[[name]]
}
}
if (length(names(res)) == 1 && !is.null(res$similarity_graph)) {
# return just the similarity graph if no extras were requested
res <- res$similarity_graph
}
res
}
#' Optimize Graph Layout
#'
#' Carry out dimensionality reduction on an input graph, where the distances in
#' the low dimensional space attempt to reproduce the neighbor relations in the
#' input data. By default, the cost function used to optimize the output
#' coordinates use the Uniform Manifold Approximation and Projection (UMAP)
#' method (McInnes et al., 2018), but the approach from LargeVis (Tang et al.,
#' 2016) can also be used. This function can be used to produce a low
#' dimensional representation of the graph produced by
#' \code{\link{similarity_graph}}.
#'
#' @param graph A sparse, symmetric N x N weighted adjacency matrix
#' representing a graph. Non-zero entries indicate an edge between two nodes
#' with a given edge weight. There can be a varying number of non-zero entries
#' in each row/column.
#' @param X Optional input data. Used only for PCA-based initialization.
#' @param n_components The dimension of the space to embed into. This defaults
#' to \code{2} to provide easy visualization, but can reasonably be set to any
#' integer value in the range \code{2} to \code{100}.
#' @param n_epochs Number of epochs to use during the optimization of the
#' embedded coordinates. By default, this value is set to \code{500} for
#' datasets containing 10,000 vertices or less, and \code{200} otherwise.
#' If \code{n_epochs = 0}, then coordinates determined by \code{"init"} will
#' be returned.
#' For UMAP, the default is \code{"none"}.
#' @param learning_rate Initial learning rate used in optimization of the
#' coordinates.
#' @param init Type of initialization for the coordinates. Options are:
#' \itemize{
#' \item \code{"spectral"} Spectral embedding using the normalized Laplacian
#' of the fuzzy 1-skeleton, with Gaussian noise added.
#' \item \code{"normlaplacian"}. Spectral embedding using the normalized
#' Laplacian of the fuzzy 1-skeleton, without noise.
#' \item \code{"random"}. Coordinates assigned using a uniform random
#' distribution between -10 and 10.
#' \item \code{"lvrandom"}. Coordinates assigned using a Gaussian
#' distribution with standard deviation 1e-4, as used in LargeVis
#' (Tang et al., 2016) and t-SNE.
#' \item \code{"laplacian"}. Spectral embedding using the Laplacian Eigenmap.
#' \item \code{"pca"}. The first two principal components from PCA of
#' \code{X} if \code{X} is a data frame, and from a 2-dimensional classical
#' MDS if \code{X} is of class \code{"dist"}.
#' \item \code{"spca"}. Like \code{"pca"}, but each dimension is then scaled
#' so the standard deviation is 1e-4, to give a distribution similar to that
#' used in t-SNE. This is an alias for \code{init = "pca", init_sdev =
#' 1e-4}.
#' \item \code{"agspectral"} An "approximate global" modification of
#' \code{"spectral"} which all edges in the graph to a value of 1, and then
#' sets a random number of edges (\code{negative_sample_rate} edges per
#' vertex) to 0.1, to approximate the effect of non-local affinities.
#' \item A matrix of initial coordinates.
#' }
#' For spectral initializations, (\code{"spectral"}, \code{"normlaplacian"},
#' \code{"laplacian"}, \code{"agspectral"}), if more than one connected
#' component is identified, no spectral initialization is attempted. Instead
#' a PCA-based initialization is attempted. If \code{verbose = TRUE} the
#' number of connected components are logged to the console. The existence of
#' multiple connected components implies that a global view of the data cannot
#' be attained with this initialization. Increasing the value of
#' \code{n_neighbors} may help.
#' @param init_sdev If non-\code{NULL}, scales each dimension of the initialized
#' coordinates (including any user-supplied matrix) to this standard
#' deviation. By default no scaling is carried out, except when \code{init =
#' "spca"}, in which case the value is \code{0.0001}. Scaling the input may
#' help if the unscaled versions result in initial coordinates with large
#' inter-point distances or outliers. This usually results in small gradients
#' during optimization and very little progress being made to the layout.
#' Shrinking the initial embedding by rescaling can help under these
#' circumstances. Scaling the result of \code{init = "pca"} is usually
#' recommended and \code{init = "spca"} as an alias for \code{init = "pca",
#' init_sdev = 1e-4} but for the spectral initializations the scaled versions
#' usually aren't necessary unless you are using a large value of
#' \code{n_neighbors} (e.g. \code{n_neighbors = 150} or higher). For
#' compatibility with recent versions of the Python UMAP package, if you are
#' using \code{init = "spectral"}, then you should also set
#' \code{init_sdev = "range"}, which will range scale each of the columns
#' containing the initial data between 0-10. This is not set by default to
#' maintain backwards compatibility with previous versions of uwot.
#' @param spread The effective scale of embedded points. In combination with
#' \code{min_dist}, this determines how clustered/clumped the embedded points
#' are.
#' @param min_dist The effective minimum distance between embedded points.
#' Smaller values will result in a more clustered/clumped embedding where
#' nearby points on the manifold are drawn closer together, while larger
#' values will result on a more even dispersal of points. The value should be
#' set relative to the \code{spread} value, which determines the scale at
#' which embedded points will be spread out.
#' @param repulsion_strength Weighting applied to negative samples in low
#' dimensional embedding optimization. Values higher than one will result in
#' greater weight being given to negative samples.
#' @param negative_sample_rate The number of negative edge/1-simplex samples to
#' use per positive edge/1-simplex sample in optimizing the low dimensional
#' embedding.
#' @param a More specific parameters controlling the embedding. If \code{NULL}
#' these values are set automatically as determined by \code{min_dist} and
#' \code{spread}.
#' @param b More specific parameters controlling the embedding. If \code{NULL}
#' these values are set automatically as determined by \code{min_dist} and
#' \code{spread}.
#' @param approx_pow If \code{TRUE}, use an approximation to the power function
#' in the UMAP gradient, from
#' \url{https://martin.ankerl.com/2012/01/25/optimized-approximative-pow-in-c-and-cpp/}.
#' @param method Cost function to optimize. One of:
#' \itemize{
#' \item{\code{"umap"}}. The UMAP method of McInnes and co-workers (2018).
#' \item{\code{"tumap"}}. UMAP with the \code{a} and \code{b} parameters fixed
#' to 1.
#' \item{\code{"largevis"}}. The LargeVis method Tang and co-workers (2016).
#' }
#' @param pca_method Method to carry out any PCA dimensionality reduction when
#' the \code{pca} parameter is specified. Allowed values are:
#' \itemize{
#' \item{\code{"irlba"}}. Uses \code{\link[irlba]{prcomp_irlba}} from the
#' \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' \item{\code{"rsvd"}}. Uses 5 iterations of \code{\link[irlba]{svdr}} from
#' the \href{https://cran.r-project.org/package=irlba}{irlba} package.
#' This is likely to give much faster but potentially less accurate results
#' than using \code{"irlba"}. For the purposes of nearest neighbor
#' calculation and coordinates initialization, any loss of accuracy doesn't
#' seem to matter much.
#' \item{\code{"bigstatsr"}}. Uses \code{\link[bigstatsr]{big_randomSVD}}
#' from the \href{https://cran.r-project.org/package=bigstatsr}{bigstatsr}
#' package. The SVD methods used in \code{bigstatsr} may be faster on
#' systems without access to efficient linear algebra libraries (e.g.
#' Windows). \strong{Note}: \code{bigstatsr} is \emph{not} a dependency of
#' uwot: if you choose to use this package for PCA, you \emph{must} install
#' it yourself.
#' \item{\code{"svd"}}. Uses \code{\link[base]{svd}} for the SVD. This is
#' likely to be slow for all but the smallest datasets.
#' \item{\code{"auto"}} (the default). Uses \code{"irlba"}, unless more than
#' 50% of the full set of singular vectors would be calculated, in which
#' case \code{"svd"} is used.
#' }
#' @param pcg_rand If \code{TRUE}, use the PCG random number generator (O'Neill,
#' 2014) during optimization. Otherwise, use the faster (but probably less
#' statistically good) Tausworthe "taus88" generator. The default is
#' \code{TRUE}.
#' @param fast_sgd If \code{TRUE}, then the following combination of parameters
#' is set: \code{pcg_rand = TRUE}, \code{n_sgd_threads = "auto"} and
#' \code{approx_pow = TRUE}. The default is \code{FALSE}. Setting this to
#' \code{TRUE} will speed up the stochastic optimization phase, but give a
#' potentially less accurate embedding, and which will not be exactly
#' reproducible even with a fixed seed. For visualization, \code{fast_sgd =
#' TRUE} will give perfectly good results. For more generic dimensionality
#' reduction, it's safer to leave \code{fast_sgd = FALSE}. If \code{fast_sgd =
#' TRUE}, then user-supplied values of \code{pcg_rand}, \code{n_sgd_threads},
#' and \code{approx_pow} are ignored.
#' @param batch If \code{TRUE}, then embedding coordinates are updated at the
#' end of each epoch rather than during the epoch. In batch mode, results are
#' reproducible with a fixed random seed even with \code{n_sgd_threads > 1},
#' at the cost of a slightly higher memory use. You may also have to modify
#' \code{learning_rate} and increase \code{n_epochs}, so whether this provides
#' a speed increase over the single-threaded optimization is likely to be
#' dataset and hardware-dependent.
#' @param n_sgd_threads Number of threads to use during stochastic gradient
#' descent. If set to > 1, then be aware that if \code{batch = FALSE}, results
#' will \emph{not} be reproducible, even if \code{set.seed} is called with a
#' fixed seed before running. If set to \code{"auto"} then half the number of
#' concurrent threads supported by the system will be used.
#' @param grain_size The minimum amount of work to do on each thread. If this
#' value is set high enough, then less than \code{n_threads} or
#' \code{n_sgd_threads} will be used for processing, which might give a
#' performance improvement if the overhead of thread management and context
#' switching was outweighing the improvement due to concurrent processing.
#' This should be left at default (\code{1}) and work will be spread evenly
#' over all the threads specified.
#' @param verbose If \code{TRUE}, log details to the console.
#' @param opt_args A list of optimizer parameters, used when
#' \code{batch = TRUE}. The default optimization method used is Adam (Kingma
#' and Ba, 2014).
#' \itemize{
#' \item \code{method} The optimization method to use. Either \code{"adam"}
#' or \code{"sgd"} (stochastic gradient descent). Default: \code{"adam"}.
#' \item \code{beta1} (Adam only). The weighting parameter for the
#' exponential moving average of the first moment estimator. Effectively the
#' momentum parameter. Should be a floating point value between 0 and 1.
#' Higher values can smooth oscillatory updates in poorly-conditioned
#' situations and may allow for a larger \code{learning_rate} to be
#' specified, but too high can cause divergence. Default: \code{0.5}.
#' \item \code{beta2} (Adam only). The weighting parameter for the
#' exponential moving average of the uncentered second moment estimator.
#' Should be a floating point value between 0 and 1. Controls the degree of
#' adaptivity in the step-size. Higher values put more weight on previous
#' time steps. Default: \code{0.9}.
#' \item \code{eps} (Adam only). Intended to be a small value to prevent
#' division by zero, but in practice can also affect convergence due to its
#' interaction with \code{beta2}. Higher values reduce the effect of the
#' step-size adaptivity and bring the behavior closer to stochastic gradient
#' descent with momentum. Typical values are between 1e-8 and 1e-3. Default:
#' \code{1e-7}.
#' \item \code{alpha} The initial learning rate. Default: the value of the
#' \code{learning_rate} parameter.
#' }
#' @param epoch_callback A function which will be invoked at the end of every
#' epoch. Its signature should be: \code{(epoch, n_epochs, coords)}, where:
#' \itemize{
#' \item \code{epoch} The current epoch number (between \code{1} and
#' \code{n_epochs}).
#' \item \code{n_epochs} Number of epochs to use during the optimization of
#' the embedded coordinates.
#' \item \code{coords} The embedded coordinates as of the end of the current
#' epoch, as a matrix with dimensions (N, \code{n_components}).
#' }
#' @param binary_edge_weights If \code{TRUE} then edge weights in the input
#' graph are treated as binary (0/1) rather than real valued.
#' @return A matrix of optimized coordinates.
#'
#' @examples
#'
#' iris30 <- iris[c(1:10, 51:60, 101:110), ]
#'
#' # return a 30 x 30 sparse matrix with similarity data based on 10 nearest
#' # neighbors per item
#' iris30_sim_graph <- similarity_graph(iris30, n_neighbors = 10)
#' # produce 2D coordinates replicating the neighbor relations in the similarity
#' # graph
#' set.seed(42)
#' iris30_opt <- optimize_graph_layout(iris30_sim_graph, X = iris30)
#'
#' # the above two steps are the same as:
#' # set.seed(42); iris_umap <- umap(iris30, n_neighbors = 10)
#'
#' @references
#' Kingma, D. P., & Ba, J. (2014).
#' Adam: A method for stochastic optimization.
#' \emph{arXiv preprint} \emph{arXiv}:1412.6980.
#' \url{https://arxiv.org/abs/1412.6980}
#'
#' McInnes, L., Healy, J., & Melville, J. (2018).
#' UMAP: Uniform Manifold Approximation and Projection for Dimension Reduction
#' \emph{arXiv preprint} \emph{arXiv}:1802.03426.
#' \url{https://arxiv.org/abs/1802.03426}
#'
#' Tang, J., Liu, J., Zhang, M., & Mei, Q. (2016, April).
#' Visualizing large-scale and high-dimensional data.
#' In \emph{Proceedings of the 25th International Conference on World Wide Web}
#' (pp. 287-297).
#' International World Wide Web Conferences Steering Committee.
#' \url{https://arxiv.org/abs/1602.00370}
#'
#' @export
optimize_graph_layout <-
function(graph,
X = NULL,
n_components = 2,
n_epochs = NULL,
learning_rate = 1,
init = "spectral",
init_sdev = NULL,
spread = 1,
min_dist = 0.01,
repulsion_strength = 1.0,
negative_sample_rate = 5.0,
a = NULL,
b = NULL,
method = "umap",
approx_pow = FALSE,
pcg_rand = TRUE,
fast_sgd = FALSE,
n_sgd_threads = 0,
grain_size = 1,
verbose = getOption("verbose", TRUE),
batch = FALSE,
opt_args = NULL,
epoch_callback = NULL,
pca_method = NULL,
binary_edge_weights = FALSE) {
if (!is_sparse_matrix(graph)) {
stop("graph should be a sparse matrix")
}
if (nrow(graph) != ncol(graph)) {
stop("graph should be a square matrix")
}
if (!Matrix::isSymmetric(graph)) {
stop("graph should be symmetric")
}
if (!all(diff(graph@p) > 0)) {
stop("All items must have at least one neighbor similarity defined")
}
# Just do things the UMAP way or we will have a very slow largevis
# optimization
if (is.null(n_epochs)) {
n_vertices <- nrow(graph)
if (n_vertices <= 10000) {
n_epochs <- 500
}
else {
n_epochs <- 200
}
}
uwot(
X = X,
nn_method = graph,
is_similarity_graph = TRUE,
n_components = n_components,
n_epochs = n_epochs,
alpha = learning_rate,
init = init,
init_sdev = init_sdev,
spread = spread,
min_dist = min_dist,
gamma = repulsion_strength,
negative_sample_rate = negative_sample_rate,
a = a,
b = b,
method = method,
approx_pow = approx_pow,
pcg_rand = pcg_rand,
fast_sgd = fast_sgd,
n_sgd_threads = n_sgd_threads,
grain_size = grain_size,
verbose = verbose,
batch = batch,
opt_args = opt_args,
epoch_callback = epoch_callback,
pca_method = pca_method
)
}
#' Merge Similarity Graph by Simplicial Set Union
#'
#' Combine two similarity graphs by treating them as fuzzy topological sets and
#' forming the union.
#'
#' @param x A sparse matrix representing the first similarity graph in the union
#' operation.
#' @param y A sparse matrix representing the second similarity graph in the
#' union operation.
#' @param n_threads Number of threads to use when resetting the local metric.
#' Default is half the number of concurrent threads supported by the system.
#' @param verbose If \code{TRUE}, log progress to the console.
#' @returns A sparse matrix containing the union of \code{x} and \code{y}.
#' @examples
#'
#' # Form two different "views" of the same data
#' iris30 <- iris[c(1:10, 51:60, 101:110), ]
#' iris_sg12 = similarity_graph(iris30[, 1:2], n_neighbors = 5)
#' iris_sg34 = similarity_graph(iris30[, 3:4], n_neighbors = 5)
#'
#' # Combine the two representations into one
#' iris_combined <- simplicial_set_union(iris_sg12, iris_sg34)
#'
#' # Optimize the layout based on the combined view
#' iris_combined_umap <- optimize_graph_layout(iris_combined, n_epochs = 100)
#' @export
simplicial_set_union <-
function(x,
y,
n_threads = NULL,
verbose = FALSE) {
if (!is_sparse_matrix(x)) {
stop("similarity graph x must be a sparse matrix")
}
if (!is_sparse_matrix(y)) {
stop("similarity graph y must be a sparse matrix")
}
if (!all(dim(x) == dim(y))) {
stop("x and y must have identical dimensions")
}
z <- methods::as(x + y, "TsparseMatrix")
z@x <- general_sset_union_cpp(
x@p,
x@i,
x@x,
y@p,
y@i,
y@x,
z@i,
z@j,
z@x
)
z <- Matrix::drop0(z)
reset_local_connectivity(
z,
reset_local_metric = TRUE,
n_threads = n_threads,
verbose = verbose
)
}
#' Merge Similarity Graph by Simplicial Set Intersection
#'
#' Combine two similarity graphs by treating them as fuzzy topological sets and
#' forming the intersection.
#'
#' @param x A sparse matrix representing the first similarity graph in the
#' intersection operation.
#' @param y A sparse matrix representing the second similarity graph in the
#' intersection operation.
#' @param weight A value between \code{0 - 1}, controlling the relative
#' influence of \code{x} and \code{y} in the intersection. Default
#' (\code{0.5}) gives equal influence. Values smaller than \code{0.5} put more
#' weight on \code{x}. Values greater than \code{0.5} put more weight on
#' \code{y}.
#' @param n_threads Number of threads to use when resetting the local metric.
#' Default is half the number of concurrent threads supported by the system.
#' @param verbose If \code{TRUE}, log progress to the console.
#' @returns A sparse matrix containing the intersection of \code{x} and
#' \code{y}.
#' @examples
#'
#' # Form two different "views" of the same data
#' iris30 <- iris[c(1:10, 51:60, 101:110), ]
#' iris_sg12 = similarity_graph(iris30[, 1:2], n_neighbors = 5)
#' iris_sg34 = similarity_graph(iris30[, 3:4], n_neighbors = 5)
#'
#' # Combine the two representations into one
#' iris_combined <- simplicial_set_intersect(iris_sg12, iris_sg34)
#'
#' # Optimize the layout based on the combined view
#' iris_combined_umap <- optimize_graph_layout(iris_combined, n_epochs = 100)
#' @export
simplicial_set_intersect <- function(x, y, weight = 0.5, n_threads = NULL,
verbose = FALSE) {
if (weight < 0 || weight > 1) {
stop("weight must be between 0-1")
}
if (!is_sparse_matrix(x)) {
stop("similarity graph x must be a sparse matrix")
}
if (!is_sparse_matrix(y)) {
stop("similarity graph y must be a sparse matrix")
}
if (!all(dim(x) == dim(y))) {
stop("x and y must have identical dimensions")
}
set_intersect(A = x, B = y, weight = weight, reset_connectivity = TRUE,
reset_local_metric = TRUE, n_threads = n_threads,
verbose = verbose)
}
# Function that does all the real work
uwot <- function(X, n_neighbors = 15, n_components = 2, metric = "euclidean",
n_epochs = NULL,
alpha = 1, scale = FALSE,
init = "spectral", init_sdev = NULL,
spread = 1, min_dist = 0.01,
set_op_mix_ratio = 1.0, local_connectivity = 1.0,
bandwidth = 1.0, gamma = 1.0,
negative_sample_rate = 5.0, a = NULL, b = NULL,
nn_method = NULL, n_trees = 50,
search_k = 2 * n_neighbors * n_trees,
method = "umap",
perplexity = 50, approx_pow = FALSE,
y = NULL, target_n_neighbors = n_neighbors,
target_metric = "euclidean",
target_weight = 0.5,
n_threads = NULL,
n_sgd_threads = 0,
grain_size = 1,
kernel = "gauss",
ret_model = FALSE, ret_nn = FALSE, ret_fgraph = FALSE,
ret_sigma = FALSE, ret_localr = FALSE,
pca = NULL, pca_center = TRUE, pca_method = NULL,
pcg_rand = TRUE,
fast_sgd = FALSE,
batch = FALSE,
opt_args = NULL,
tmpdir = tempdir(),
verbose = getOption("verbose", TRUE),
epoch_callback = NULL,
binary_edge_weights = FALSE,
dens_scale = NULL,
is_similarity_graph = FALSE,
seed = NULL) {
if (is.null(n_threads)) {
n_threads <- default_num_threads()
}
method <- match.arg(tolower(method), c("umap", "tumap", "largevis"))
if (method == "umap") {
if (is.null(a) || is.null(b)) {
ab_res <- find_ab_params(spread = spread, min_dist = min_dist)
a <- ab_res[1]
b <- ab_res[2]
tsmessage("UMAP embedding parameters a = ", formatC(a), " b = ", formatC(b))
}
else {
# set min_dist and spread to NULL so if ret_model = TRUE, their default
# values are not mistaken for having been used for anything
min_dist <- NULL
spread <- NULL
}
}
if (n_neighbors < 2) {
stop("n_neighbors must be >= 2")
}
if (set_op_mix_ratio < 0.0 || set_op_mix_ratio > 1.0) {
stop("set_op_mix_ratio must be between 0.0 and 1.0")
}
if (local_connectivity < 1.0) {
stop("local_connectivity cannot be < 1.0")
}
if (!is.null(y) && is.numeric(y) && any(is.na(y))) {
stop("numeric y cannot contain NA")
}
if (!is.numeric(n_components) || n_components < 1) {
stop("'n_components' must be a positive integer")
}
if (!is.null(pca)) {
if (!is.numeric(pca) || pca < 1) {
stop("'pca' must be a positive integer")
}
if (pca < n_components) {
stop("'pca' must be >= n_components")
}
if (pca > min(nrow(X), na.rm = col(X))) {
stop("'pca' must be <= min(nrow(X), ncol(X))")
}
}
if (is.null(pca_method)) {
pca_method <- "auto"
}
pca_method <-
match.arg(pca_method,
choices = c("irlba", "svdr", "bigstatsr", "svd", "auto"))
if (fast_sgd) {
n_sgd_threads <- "auto"
pcg_rand <- FALSE
approx_pow <- TRUE
}
if (n_threads < 0) {
stop("n_threads cannot be < 0")
}
if (n_threads %% 1 != 0) {
n_threads <- round(n_threads)
tsmessage("Non-integer 'n_threads' provided. Setting to ", n_threads)
}
if (n_sgd_threads == "auto") {
n_sgd_threads <- n_threads
}
if (n_sgd_threads < 0) {
stop("n_sgd_threads cannot be < 0")
}
if (n_sgd_threads %% 1 != 0) {
n_sgd_threads <- round(n_sgd_threads)
tsmessage("Non-integer 'n_sgd_threads' provided. Setting to ", n_sgd_threads)
}
if (!is.null(dens_scale) && approx_pow) {
warning("approx_pow parameter is ignored when using dens_scale")
approx_pow <- FALSE
}
# 110: for more consistent reproducibility set a user-supplied seed
if (!is.null(seed)) {
tsmessage("Setting random seed ", seed)
set.seed(seed)
}
ret_extra <- ret_model || ret_nn || ret_fgraph || ret_sigma || ret_localr
# Store categorical columns to be used to generate the graph
Xcat <- NULL
# number of original columns in data frame (or matrix)
# will be used only if using df or matrix and ret_model = TRUE
norig_col <- NULL
# row names for the input data, which we will apply to the embedding if
# needed
Xnames <- NULL
num_precomputed_nns <- 0
if (is.null(X)) {
if (!nn_is_precomputed(nn_method)) {
stop("If X is NULL, must provide NN data in nn_method")
}
if (is.character(init) && tolower(init) %in% c("spca", "pca")) {
stop("init = 'pca' and 'spca' can't be used with X = NULL")
}
if (length(nn_method) == 0) {
stop("Incorrect format for precalculated neighbor data")
}
n_vertices <- x2nv(nn_method)
stopifnot(n_vertices > 0)
num_precomputed_nns <- check_graph_list(nn_method, n_vertices,
bipartite = FALSE)
Xnames <- nn_graph_row_names_list(nn_method)
}
else if (methods::is(X, "dist")) {
if (ret_model) {
stop("Can only create models with dense matrix or data frame input")
}
checkna(X)
n_vertices <- attr(X, "Size")
tsmessage("Read ", n_vertices, " rows")
Xnames <- labels(X)
}
else if (is_sparse_matrix(X)) {
if (ret_model) {
stop("Can only create models with dense matrix or data frame input")
}
checkna(X)
n_vertices <- nrow(X)
if (ncol(X) != n_vertices) {
stop("Sparse matrices are only supported as distance matrices")
}
tsmessage("Read ", n_vertices, " rows of sparse distance matrix")
Xnames <- row.names(X)
}
else {
cat_ids <- NULL
norig_col <- ncol(X)
if (methods::is(X, "data.frame") || methods::is(X, "matrix")) {
cat_res <- find_categoricals(metric)
metric <- cat_res$metrics
cat_ids <- cat_res$categoricals
# Convert categorical columns to factors if they aren't already
if (!is.null(cat_ids)) {
X[, cat_ids] <- sapply(X[, cat_ids, drop = FALSE], factor,
simplify = methods::is(X, "matrix"))
Xcat <- X[, cat_ids, drop = FALSE]
}
if (methods::is(X, "data.frame")) {
indexes <- which(vapply(X, is.numeric, logical(1)))
if (length(indexes) == 0) {
stop("No numeric columns found")
}
tsmessage("Converting dataframe to numerical matrix")
if (length(indexes) != ncol(X)) {
X <- X[, indexes]
}
X <- as.matrix(X)
}
if (n_components > ncol(X)) {
warning(
"n_components ",
"> number of columns in input data: ",
n_components,
" > ",
ncol(X),
", this may give poor or unexpected results"
)
}
}
else {
stop("Unknown input data format")
}
checkna(X)
n_vertices <- nrow(X)
tsmessage(
"Read ", n_vertices, " rows and found ", ncol(X),
" numeric columns",
appendLF = is.null(cat_ids)
)
if (length(cat_ids) > 0) {
tsmessage(" and ", pluralize("categorical column", length(cat_ids)),
time_stamp = FALSE
)
}
Xnames <- row.names(X)
X <- scale_input(X,
scale_type = scale, ret_model = ret_model,
verbose = verbose
)
}
# Store number of precomputed nn if X is non-NULL (NULL X case handled above)
if (nn_is_precomputed(nn_method) && num_precomputed_nns == 0) {
num_precomputed_nns <- check_graph_list(nn_method, n_vertices,
bipartite = FALSE)
if (is.null(Xnames)) {
Xnames <- nn_graph_row_names_list(nn_method)
}
}
if (method == "largevis" && kernel == "knn") {
n_neighbors <- perplexity
}
if (max(n_neighbors) > n_vertices) {
# pre-calculated nearest neighbors ignores the user-supplied n_neighbors
# which is handled later
if (!is.list(nn_method)) {
if (method == "largevis") {
# for LargeVis, n_neighbors normally determined from perplexity not an
# error to be too large
tsmessage("Setting n_neighbors to ", n_vertices)
n_neighbors <- n_vertices
}
else {
stop("n_neighbors must be smaller than the dataset size")
}
}
}
if (!is.list(metric)) {
metrics <- list(c())
names(metrics) <- metric
}
else {
metrics <- metric
}
# For typical case of numeric matrix X and not using hamming distance, save
# PCA results here in case initialization uses PCA too
pca_models <- NULL
pca_shortcut <- FALSE
if (!is.null(pca) && length(metric) == 1 && metric != "hamming" &&
is.matrix(X) && ncol(X) > pca) {
tsmessage("Reducing X column dimension to ", pca, " via PCA")
pca_res <- pca_init(X,
ndim = pca, center = pca_center, pca_method = pca_method,
ret_extra = ret_model, verbose = verbose
)
if (ret_model) {
X <- pca_res$scores
pca_models[["1"]] <- pca_res[c("center", "rotation")]
pca_res <- NULL
}
else {
X <- pca_res
}
pca_shortcut <- TRUE
}
if (is_similarity_graph) {
d2sr <-
list(
V = nn_method,
nns = NULL,
pca_models = NULL,
sigma = NULL,
rho = NULL
)
need_sigma <- FALSE
}
else {
need_sigma <- ret_sigma || ret_localr || !is.null(dens_scale)
d2sr <- data2set(X, Xcat, n_neighbors, metrics, nn_method,
n_trees, search_k,
method,
set_op_mix_ratio, local_connectivity, bandwidth,
perplexity, kernel, need_sigma,
n_threads, grain_size,
ret_model,
pca = pca, pca_center = pca_center, pca_method = pca_method,
n_vertices = n_vertices,
tmpdir = tmpdir,
verbose = verbose
)
}
V <- d2sr$V
nns <- d2sr$nns
if (is.null(pca_models)) {
pca_models <- d2sr$pca_models
}
# Calculate approximate local radii
sigma <- NULL
rho <- NULL
localr <- NULL
dint <- NULL
if (need_sigma) {
sigma <- d2sr$sigma
rho <- d2sr$rho
dint <- d2sr$dint
}
if (!is.null(dens_scale) || ret_localr) {
localr <- sigma + rho
}
if (!is.null(y)) {
tsmessage("Processing y data")
if (!is.list(target_metric)) {
target_metrics <- list(c())
names(target_metrics) <- target_metric
}
else {
target_metrics <- target_metric
}
ycat <- NULL
ycat_ids <- NULL
if (methods::is(y, "data.frame")) {
ycat_res <- find_categoricals(target_metric)
target_metric <- ycat_res$metrics
ycat_ids <- ycat_res$categoricals
if (!is.null(ycat_ids)) {
ycat <- y[, ycat_ids, drop = FALSE]
}
else {
ycindexes <- which(vapply(y, is.factor, logical(1)))
if (length(ycindexes) > 0) {
ycat <- (y[, ycindexes, drop = FALSE])
}
}
yindexes <- which(vapply(y, is.numeric, logical(1)))
if (length(yindexes) > 0) {
y <- as.matrix(y[, yindexes])
}
else {
y <- NULL
}
}
else if (is.list(y)) {
nn_method <- y
}
else if (is.numeric(y)) {
y <- as.matrix(y)
}
else if (is.factor(y)) {
ycat <- data.frame(y)
y <- NULL
}
if (!is.null(y)) {
yd2sr <- data2set(y, ycat, target_n_neighbors, target_metrics, nn_method,
n_trees, search_k,
method,
set_op_mix_ratio = 1.0,
local_connectivity = 1.0,
bandwidth = 1.0,
perplexity = perplexity, kernel = kernel,
ret_sigma = FALSE,
n_threads = n_threads, grain_size = grain_size,
ret_model = FALSE,
pca = pca, pca_center = TRUE, pca_method = pca_method,
n_vertices = n_vertices,
tmpdir = tmpdir,
verbose = verbose
)
tsmessage(
"Intersecting X and Y sets with target weight = ",
formatC(target_weight)
)
# behavior for supervised UMAP: do reset local connectivity
# don't reset metric (same as Python UMAP as of 0.5.3)
V <- set_intersect(V, yd2sr$V, target_weight, reset_connectivity = TRUE)
yd2sr$V <- NULL
yd2sr$nns <- NULL
}
else if (!is.null(ycat)) {
V <- categorical_intersection_df(ycat, V,
weight = target_weight,
verbose = verbose
)
}
}
if (!(ret_model || ret_nn)) {
nns <- NULL
gc()
}
if (methods::is(init, "matrix")) {
if (nrow(init) != n_vertices || ncol(init) != n_components) {
stop("init matrix does not match necessary configuration for X: ", "should
have dimensions (", n_vertices, ", ", n_components, ")")
}
tsmessage("Initializing from user-supplied matrix")
embedding <- scale_coords(init, init_sdev, verbose = verbose)
}
else if (!(methods::is(init, "character") && length(init) == 1)) {
if (is.null(init) && !is.null(n_epochs) && n_epochs == 0) {
embedding <- NULL
if (!ret_extra) {
warning("Neither high-dimensional nor low-dimensional data will be ",
"returned with this combination of settings")
}
if (ret_model) {
warning("Returning a model but it will not be valid for transforming ",
"new data")
}
}
else {
stop("init should be either a matrix or string describing the ",
"initialization method")
}
}
else {
init <- match.arg(tolower(init), c(
"spectral", "random", "lvrandom", "normlaplacian",
"laplacian", "spca", "pca", "inormlaplacian", "ispectral",
"agspectral", "irlba_spectral", "irlba_laplacian", "pacpca"
))
if (init_is_spectral(init)) {
connected <- connected_components(V)
if (connected$n_components > 1) {
tsmessage("Found ", connected$n_components, " connected components, ", appendLF = FALSE)
if (is.null(X)) {
tsmessage("falling back to random initialization", time_stamp = FALSE)
init <- "random"
}
else {
tsmessage("falling back to 'spca' initialization with init_sdev = 1",
time_stamp = FALSE)
init <- "spca"
init_sdev <- 1
}
}
}
# Don't repeat PCA initialization if we've already done it once
if (pca_shortcut && init %in% c("spca", "pca", "pacpca") && pca >= n_components) {
embedding <- X[, 1:n_components]
switch (init,
spca = tsmessage("Initializing from scaled PCA"),
pca = tsmessage("Initializing from PCA"),
pacpca = tsmessage("Initializing from PaCMAP-style PCA"),
stop("Unknown init method '", init, "'")
)
}
else {
embedding <- switch(init,
spectral = spectral_init(V, ndim = n_components, verbose = verbose),
random = rand_init(n_vertices, n_components, verbose = verbose),
lvrandom = rand_init_lv(n_vertices, n_components, verbose = verbose),
normlaplacian = normalized_laplacian_init(V,
ndim = n_components,
verbose = verbose
),
laplacian = laplacian_eigenmap(V, ndim = n_components, verbose = verbose),
# we handle scaling pca below
spca = pca_init(X, ndim = n_components, pca_method = pca_method,
verbose = verbose),
pca = pca_init(X, ndim = n_components, pca_method = pca_method,
verbose = verbose),
pacpca = pca_init(X, ndim = n_components, pca_method = pca_method,
verbose = verbose),
ispectral = irlba_spectral_init(V, ndim = n_components, verbose = verbose),
inormlaplacian = irlba_normalized_laplacian_init(V,
ndim = n_components,
verbose = verbose
),
agspectral = agspectral_init(V,
n_neg_nbrs = negative_sample_rate,
ndim = n_components, verbose = verbose
),
irlba_spectral = spectral_init(V, ndim = n_components, verbose = verbose, force_irlba = TRUE),
irlba_laplacian = laplacian_eigenmap(V, ndim = n_components, verbose = verbose, force_irlba = TRUE),
stop("Unknown initialization method: '", init, "'")
)
}
if (init == "pacpca") {
embedding <- 0.01 * embedding
}
if (!is.null(init_sdev) || init == "spca") {
if (is.null(init_sdev)) {
init_sdev <- 1e-4
}
if (is.numeric(init_sdev)) {
embedding <- scale_coords(embedding, init_sdev, verbose = verbose)
}
else if (is.character(init_sdev) && init_sdev == "range") {
# #99: range scale coordinates like python UMAP does
tsmessage("Range-scaling initial input columns to 0-10")
embedding <- apply(embedding, 2, range_scale, max = 10.0)
}
}
}
if (any(is.na(embedding))) {
stop("Initial data contains NA values: is n_components too high?")
}
if (is.null(n_epochs) || n_epochs < 0) {
if (method == "largevis") {
n_epochs <- lvish_epochs(n_vertices, V)
}
else {
if (n_vertices <= 10000) {
n_epochs <- 500
}
else {
n_epochs <- 200
}
}
}
full_opt_args <- get_opt_args(opt_args, alpha)
if (binary_edge_weights) {
V@x <- rep(1, length(V@x))
}
if (n_epochs > 0) {
if (any(apply(embedding, 2, stats::sd) > 10.0)) {
warning("Initial embedding standard deviation > 10.0, this can lead to ",
"poor optimization")
}
# remove edges which can't be sampled due to n_epochs
V@x[V@x < max(V@x) / n_epochs] <- 0
V <- Matrix::drop0(V)
# Create the (0-indexed) indices of the head and tail of each directed edge
# in V. Graph is symmetric, so both (i->j) and (j->i) are present
if (batch) {
V <- Matrix::t(V)
# head is ordered in non-decreasing order of node index
positive_head <- Matrix::which(V != 0, arr.ind = TRUE)[, 2] - 1
# tail is unordered
positive_tail <- V@i
}
else {
# Use the Python UMAP ordering
# head is unordered
positive_head <- V@i
# tail is ordered in non-decreasing order of node index
positive_tail <- Matrix::which(V != 0, arr.ind = TRUE)[, 2] - 1
}
# start/end pointers into the ordered vector
positive_ptr <- V@p
epochs_per_sample <- make_epochs_per_sample(V@x, n_epochs)
tsmessage(
"Commencing optimization for ", n_epochs, " epochs, with ",
length(positive_head), " positive edges",
pluralize("thread", n_sgd_threads, " using")
)
ai <- NULL
if (!is.null(dens_scale)) {
ai <- scale_radii(localr, dens_scale, a)
method <- "leopold"
if (ret_model) {
# store the linear transform from localr to ai for transforming new data
lai2 <- 2 * log(range(ai))
llr <- -log(rev(range(localr)))
rad_coeff <- stats::lm(lai2 ~ llr)$coefficients
}
}
method <- tolower(method)
method_args <- switch(method,
umap = list(a = a, b = b, gamma = gamma, approx_pow = approx_pow),
tumap = list(),
# a = 1 b = 10 for final phase of PaCMAP optimization
pacmap = list(a = a, b = b),
largevis = list(gamma = gamma),
leopold = list(ai = ai, b = b, ndim = n_components),
stop("Unknown dimensionality reduction method '", method, "'")
)
embedding <- t(embedding)
embedding <- optimize_layout_r(
head_embedding = embedding,
tail_embedding = NULL,
positive_head = positive_head,
positive_tail = positive_tail,
positive_ptr = positive_ptr,
n_epochs = n_epochs,
n_head_vertices = n_vertices,
n_tail_vertices = n_vertices,
epochs_per_sample = epochs_per_sample,
method = method,
method_args = method_args,
initial_alpha = alpha,
opt_args = full_opt_args,
negative_sample_rate = negative_sample_rate,
pcg_rand = pcg_rand,
batch = batch,
n_threads = n_sgd_threads,
grain_size = grain_size,
move_other = TRUE,
epoch_callback = epoch_callback,
verbose = verbose
)
embedding <- t(embedding)
gc()
# Center the points before returning
embedding <- scale(embedding, center = TRUE, scale = FALSE)
if (is.null(row.names(embedding)) &&
!is.null(Xnames) && length(Xnames) == nrow(embedding)) {
row.names(embedding) <- Xnames
}
tsmessage("Optimization finished")
}
if (ret_extra) {
nblocks <- length(nns)
res <- list(embedding = embedding)
if (ret_model) {
res <- append(res, list(
scale_info = if (!is.null(X)) { attr_to_scale_info(X) } else { NULL },
search_k = search_k,
local_connectivity = local_connectivity,
n_epochs = n_epochs,
alpha = alpha,
negative_sample_rate = negative_sample_rate,
method = method,
a = a,
b = b,
gamma = gamma,
approx_pow = approx_pow,
metric = metrics,
norig_col = norig_col,
pcg_rand = pcg_rand,
batch = batch,
opt_args = full_opt_args,
num_precomputed_nns = num_precomputed_nns,
# #95: min_dist and spread are exported for documentation purposes only
min_dist = min_dist,
spread = spread,
binary_edge_weights = binary_edge_weights,
seed = seed
))
if (nn_is_precomputed(nn_method)) {
res$n_neighbors <- nn_graph_nbrs_list(nn_method)
}
else {
res$n_neighbors <- n_neighbors
}
if (method == "leopold") {
res$dens_scale <- dens_scale
res$ai <- ai
res$rad_coeff <- rad_coeff
}
if (nblocks > 1) {
if (!nn_is_precomputed(nn_method)) {
res$nn_index <- list()
for (i in 1:nblocks) {
res$nn_index[[i]] <- nns[[i]]$index
}
}
}
else {
if (!is.null(nns[[1]]$index)) {
res$nn_index <- nns[[1]]$index
if (is.null(res$metric[[1]])) {
# 31: Metric usually lists column indices or names, NULL means use all
# of them, but for loading the NN index we need the number of
# columns explicitly (we don't have access to the column dimension of
# the input data at load time)
# To be sure of the dimensionality, fetch the first item from the
# index and see how many elements are in the returned vector.
if(!is.null(X)){
rcppannoy <- get_rcppannoy(res$nn_index)
res$metric[[1]] <- list(ndim = length(rcppannoy$getItemsVector(0)))
} else {
res$metric[[1]] <- list()
}
}
}
else {
if (nn_is_precomputed(nn_method)) {
tsmessage("Note: model requested with precomputed neighbors. ",
"For transforming new data, distance data must be ",
"provided separately")
}
}
}
if (!is.null(pca_models)) {
res$pca_models <- pca_models
}
}
if (ret_nn) {
res$nn <- list()
for (i in 1:nblocks) {
if (is.list(nns[[i]])) {
res$nn[[i]] <- list(idx = nns[[i]]$idx, dist = nns[[i]]$dist)
if (!is.null(Xnames) && nrow(res$nn[[i]]$idx) == length(Xnames)) {
row.names(res$nn[[i]]$idx) <- Xnames
row.names(res$nn[[i]]$dist) <- Xnames
}
}
else if (is_sparse_matrix(nns[[i]])) {
res$nn[[i]] <- nns[[i]]
if (!is.null(Xnames) && nrow(res$nn[[i]]) == length(Xnames)) {
row.names(res$nn[[i]]) <- Xnames
colnames(res$nn[[i]]) <- Xnames
}
}
}
names(res$nn) <- names(nns)
}
if (ret_fgraph) {
if (method == "largevis") {
res$P <- V
}
else {
res$fgraph <- V
}
}
if (ret_sigma) {
res$sigma <- sigma
res$rho <- rho
res$dint <- dint
}
if (ret_localr && !is.null(localr)) {
res$localr <- localr
}
}
else {
res <- embedding
}
res
}
#' Save or Load a Model
#'
#' Functions to write a UMAP model to a file, and to restore.
#'
#' @param model a UMAP model create by \code{\link{umap}}.
#' @param file name of the file where the model is to be saved or read from.
#' @param unload if \code{TRUE}, unload all nearest neighbor indexes for the
#' model. The \code{model} will no longer be valid for use in
#' \code{\link{umap_transform}} and the temporary working directory used
#' during model saving will be deleted. You will need to reload the model with
#' \code{load_uwot} to use the model. If \code{FALSE}, then the model can be
#' re-used without reloading, but you must manually unload the NN index when
#' you are finished using it if you want to delete the temporary working
#' directory. To unload manually, use \code{\link{unload_uwot}}. The absolute
#' path of the working directory is found in the \code{mod_dir} item of the
#' return value.
#' @param verbose if \code{TRUE}, log information to the console.
#' @return \code{model} with one extra item: \code{mod_dir}, which contains the
#' path to the working directory. If \code{unload = FALSE} then this directory
#' still exists after this function returns, and can be cleaned up with
#' \code{\link{unload_uwot}}. If you don't care about cleaning up this
#' directory, or \code{unload = TRUE}, then you can ignore the return value.
#' @examples
#' iris_train <- iris[c(1:10, 51:60), ]
#' iris_test <- iris[100:110, ]
#'
#' # create model
#' model <- umap(iris_train, ret_model = TRUE, n_epochs = 20)
#'
#' # save without unloading: this leaves behind a temporary working directory
#' model_file <- tempfile("iris_umap")
#' model <- save_uwot(model, file = model_file)
#'
#' # The model can continue to be used
#' test_embedding <- umap_transform(iris_test, model)
#'
#' # To manually unload the model from memory when finished and to clean up
#' # the working directory (this doesn't touch your model file)
#' unload_uwot(model)
#'
#' # At this point, model cannot be used with umap_transform, this would fail:
#' # test_embedding2 <- umap_transform(iris_test, model)
#'
#' # restore the model: this also creates a temporary working directory
#' model2 <- load_uwot(file = model_file)
#' test_embedding2 <- umap_transform(iris_test, model2)
#'
#' # Unload and clean up the loaded model temp directory
#' unload_uwot(model2)
#'
#' # clean up the model file
#' unlink(model_file)
#'
#' # save with unloading: this deletes the temporary working directory but
#' # doesn't allow the model to be re-used
#' model3 <- umap(iris_train, ret_model = TRUE, n_epochs = 20)
#' model_file3 <- tempfile("iris_umap")
#' model3 <- save_uwot(model3, file = model_file3, unload = TRUE)
#'
#' @seealso \code{\link{load_uwot}}, \code{\link{unload_uwot}}
#' @export
save_uwot <- function(model, file, unload = FALSE, verbose = FALSE) {
if (!all_nn_indices_are_loaded(model)) {
stop("cannot save: NN index is unloaded")
}
wd <- getwd()
model_file <- abspath(file)
if (file.exists(model_file)) {
stop("model file ", model_file, " already exists")
}
tmp_model_file <- NULL
tryCatch(
{
# create directory to store files in
mod_dir <- tempfile(pattern = "dir")
tsmessage("Creating temp model dir ", mod_dir)
dir.create(mod_dir)
# create the tempdir/uwot subdirectory
uwot_dir <- file.path(mod_dir, "uwot")
tsmessage("Creating dir ", mod_dir)
dir.create(uwot_dir)
# save model file to tempdir/uwot/model
model_tmpfname <- file.path(uwot_dir, "model")
saveRDS(model, file = model_tmpfname)
# save each nn index inside tempdir/uwot/model
metrics <- names(model$metric)
n_metrics <- length(metrics)
for (i in 1:n_metrics) {
nn_tmpfname <- file.path(uwot_dir, paste0("nn", i))
if (n_metrics == 1) {
model$nn_index$ann$save(nn_tmpfname)
}
else {
model$nn_index[[i]]$ann$save(nn_tmpfname)
}
}
# archive the files under the temp dir into the single target file
# change directory so the archive only contains one directory
tsmessage("Changing to ", mod_dir)
setwd(mod_dir)
tmp_model_file <- abspath(file)
tsmessage("Creating ", tmp_model_file)
# #109: Windows 7 tar needs "--force-local" to avoid interpreting colon
# as indicating a remote machine
extra_flags <- ""
if (is_win7()) {
extra_flags <- "--force-local"
}
utils::tar(tarfile = tmp_model_file,
extra_flags = extra_flags,
files = "uwot/")
},
finally = {
setwd(wd)
if (!is.null(tmp_model_file) && model_file != tmp_model_file) {
tsmessage("Copying ", tmp_model_file, " to ", model_file)
file.copy(from = tmp_model_file, to = model_file)
}
model$mod_dir <- mod_dir
if (unload) {
unload_uwot(model, cleanup = TRUE, verbose = verbose)
}
}
)
model
}
#' Save or Load a Model
#'
#' Functions to write a UMAP model to a file, and to restore.
#'
#' @param file name of the file where the model is to be saved or read from.
#' @param verbose if \code{TRUE}, log information to the console.
#' @return The model saved at \code{file}, for use with
#' \code{\link{umap_transform}}. Additionally, it contains an extra item:
#' \code{mod_dir}, which contains the path to the temporary working directory
#' used during loading of the model. This directory cannot be removed until
#' this model has been unloaded by using \code{\link{unload_uwot}}.
#' @examples
#' iris_train <- iris[c(1:10, 51:60), ]
#' iris_test <- iris[100:110, ]
#'
#' # create model
#' model <- umap(iris_train, ret_model = TRUE, n_epochs = 20)
#'
#' # save without unloading: this leaves behind a temporary working directory
#' model_file <- tempfile("iris_umap")
#' model <- save_uwot(model, file = model_file)
#'
#' # The model can continue to be used
#' test_embedding <- umap_transform(iris_test, model)
#'
#' # To manually unload the model from memory when finished and to clean up
#' # the working directory (this doesn't touch your model file)
#' unload_uwot(model)
#'
#' # At this point, model cannot be used with umap_transform, this would fail:
#' # test_embedding2 <- umap_transform(iris_test, model)
#'
#' # restore the model: this also creates a temporary working directory
#' model2 <- load_uwot(file = model_file)
#' test_embedding2 <- umap_transform(iris_test, model2)
#'
#' # Unload and clean up the loaded model temp directory
#' unload_uwot(model2)
#'
#' # clean up the model file
#' unlink(model_file)
#'
#' # save with unloading: this deletes the temporary working directory but
#' # doesn't allow the model to be re-used
#' model3 <- umap(iris_train, ret_model = TRUE, n_epochs = 20)
#' model_file3 <- tempfile("iris_umap")
#' model3 <- save_uwot(model3, file = model_file3, unload = TRUE)
#'
#' @seealso \code{\link{save_uwot}}, \code{\link{unload_uwot}}
#' @export
load_uwot <- function(file, verbose = FALSE) {
# create directory to store files in
mod_dir <- tempfile(pattern = "dir")
tsmessage("Creating temp directory ", mod_dir)
dir.create(mod_dir)
# #109: Windows 7 tar needs "--force-local" to avoid interpreting colon
# as indicating a remote machine
extras <- NULL
if (is_win7()) {
extras <- "--force-local"
}
utils::untar(abspath(file),
exdir = mod_dir,
extras = extras,
verbose = verbose)
model_fname <- file.path(mod_dir, "uwot/model")
if (!file.exists(model_fname)) {
stop("Can't find model in ", file)
}
model <- readRDS(file = model_fname)
metrics <- names(model$metric)
n_metrics <- length(metrics)
for (i in 1:n_metrics) {
nn_fname <- file.path(mod_dir, paste0("uwot/nn", i))
if (!file.exists(nn_fname)) {
stop("Can't find nearest neighbor index ", nn_fname, " in ", file)
}
metric <- metrics[[i]]
# 31: need to specify the index dimensionality when creating the index
if (is.list(model$metric[[i]])) {
# in case where there is only one metric, the value is a one-item list
# named 'ndim' giving the number of dimensions directly: all columns
# are used in this metric
ndim <- model$metric[[i]]$ndim
}
else {
# otherwise, metric specifies the name or index used for each metric,
# so the dimension is the number of them
ndim = length(model$metric[[i]])
}
annoy_metric <- metric
if (metric == "correlation") {
annoy_metric <- "cosine"
}
ann <- create_ann(annoy_metric, ndim = ndim)
ann$load(nn_fname)
if (n_metrics == 1) {
model$nn_index <- list(ann = ann, type = "annoyv1", metric = annoy_metric)
}
else {
model$nn_index[[i]] <- list(ann = ann, type = "annoyv1", metric = annoy_metric)
}
}
model$mod_dir <- mod_dir
model
}
#' Unload a Model
#'
#' Unloads the UMAP model. This prevents the model being used with
#' \code{\link{umap_transform}}, but allows the temporary working directory
#' associated with saving or loading the model to be removed.
#'
#' @param model a UMAP model create by \code{\link{umap}}.
#' @param cleanup if \code{TRUE}, attempt to delete the temporary working
#' directory that was used in either the save or load of the model.
#' @param verbose if \code{TRUE}, log information to the console.
#'
#' @examples
#' iris_train <- iris[c(1:10, 51:60), ]
#' iris_test <- iris[100:110, ]
#'
#' # create model
#' model <- umap(iris_train, ret_model = TRUE, n_epochs = 20)
#'
#' # save without unloading: this leaves behind a temporary working directory
#' model_file <- tempfile("iris_umap")
#' model <- save_uwot(model, file = model_file)
#'
#' # The model can continue to be used
#' test_embedding <- umap_transform(iris_test, model)
#'
#' # To manually unload the model from memory when finished and to clean up
#' # the working directory (this doesn't touch your model file)
#' unload_uwot(model)
#'
#' # At this point, model cannot be used with umap_transform, this would fail:
#' # test_embedding2 <- umap_transform(iris_test, model)
#'
#' # restore the model: this also creates a temporary working directory
#' model2 <- load_uwot(file = model_file)
#' test_embedding2 <- umap_transform(iris_test, model2)
#'
#' # Unload and clean up the loaded model temp directory
#' unload_uwot(model2)
#'
#' # clean up the model file
#' unlink(model_file)
#'
#' # save with unloading: this deletes the temporary working directory but
#' # doesn't allow the model to be re-used
#' model3 <- umap(iris_train, ret_model = TRUE, n_epochs = 20)
#' model_file3 <- tempfile("iris_umap")
#' model3 <- save_uwot(model3, file = model_file3, unload = TRUE)
#'
#' @seealso \code{\link{save_uwot}}, \code{\link{load_uwot}}
#' @export
unload_uwot <- function(model, cleanup = TRUE, verbose = FALSE) {
tsmessage("Unloading NN index: model will be invalid")
metrics <- names(model$metric)
n_metrics <- length(metrics)
for (i in 1:n_metrics) {
if (n_metrics == 1) {
rcppannoy <- get_rcppannoy(model$nn_index)
rcppannoy$unload()
}
else {
rcppannoy <- get_rcppannoy(model$nn_index[[i]])
rcppannoy$unload()
}
}
if (cleanup) {
if (is.null(model$mod_dir)) {
tsmessage("Model is missing temp dir location, can't clean up")
return()
}
else {
mod_dir <- model$mod_dir
if (!file.exists(mod_dir)) {
tsmessage("model temp dir location '", mod_dir, "' no longer exists")
return()
}
tsmessage("Deleting temp model dir ", mod_dir)
res <- unlink(mod_dir, recursive = TRUE)
if (res != 0) {
tsmessage("Unable to delete tempdir ", mod_dir)
}
}
}
}
all_nn_indices_are_loaded <- function(model) {
if (is.null(model$nn_index)) {
stop("Invalid model: has no 'nn_index'")
}
if (is.list(model$nn_index) && is.null(model$nn_index$type)) {
for (i in 1:length(model$nn_index)) {
rcppannoy <- get_rcppannoy(model$nn_index[[i]])
if (rcppannoy$getNTrees() == 0) {
return(FALSE)
}
}
}
else {
rcppannoy <- get_rcppannoy(model$nn_index)
if (rcppannoy$getNTrees() == 0) {
return(FALSE)
}
}
TRUE
}
abspath <- function(filename) {
file.path(normalizePath(dirname(filename)), basename(filename))
}
# Half of whatever the C++ implementation thinks are the number of concurrent
# threads supported, but at least 1
default_num_threads <- function() {
max(1, hardware_concurrency() / 2)
}
# Get the number of vertices in X
x2nv <- function(X) {
if (is.list(X)) {
if (!is.null(X$idx)) {
n_vertices <- x2nv(X$idx)
}
else {
if (length(X) > 0) {
n_vertices <- x2nv(X[[1]])
}
else {
stop("Can't find n_vertices for list X")
}
}
}
else if (methods::is(X, "dist")) {
n_vertices <- attr(X, "Size")
}
else if (is_sparse_matrix(X)) {
# older code path where distance matrix was part of X rather than nn_method
# used nrow, but transform was not supported so nrow == ncol
n_vertices <- ncol(X)
}
else if (methods::is(X, "data.frame") || methods::is(X, "matrix")) {
n_vertices <- nrow(X)
}
else if (is.numeric(X)) {
n_vertices <- length(X)
}
else {
stop("Can't find number of vertices for X of type '", class(X)[1], "'")
}
n_vertices
}
data2set <- function(X, Xcat, n_neighbors, metrics, nn_method,
n_trees, search_k,
method,
set_op_mix_ratio, local_connectivity, bandwidth,
perplexity, kernel, ret_sigma,
n_threads, grain_size,
ret_model,
n_vertices = x2nv(X),
tmpdir = tempdir(),
pca = NULL, pca_center = TRUE, pca_method = "irlba",
verbose = FALSE) {
V <- NULL
nns <- list()
nblocks <- length(metrics)
sigma <- NULL
# Check for precalculated NN data in nn_method
if (is.list(nn_method)) {
if (is.null(nn_method$idx)) {
nblocks <- length(nn_method)
if (nblocks == 0) {
stop("Incorrect format for precalculated neighbor data")
}
}
else {
nblocks <- 1
# wrap nn data in a list so data is always a list of lists
nn_method <- list(nn_method)
}
metrics <- replicate(nblocks, NULL, simplify = FALSE)
names(metrics) <- rep("precomputed", nblocks)
}
if (nblocks > 1) {
tsmessage("Found ", nblocks, " blocks of data")
}
mnames <- tolower(names(metrics))
if (is.null(nn_method)) {
if (n_vertices < 4096 && !ret_model && all(mnames == "euclidean")) {
tsmessage("Using FNN for neighbor search, n_neighbors = ", n_neighbors)
nn_method <- "fnn"
}
else {
tsmessage("Using Annoy for neighbor search, n_neighbors = ", n_neighbors)
nn_method <- "annoy"
}
}
pca_models <- list()
for (i in 1:nblocks) {
metric <- mnames[[i]]
metric <- match.arg(metric, c(
"euclidean", "cosine", "manhattan",
"hamming", "correlation", "precomputed"
))
# Defaults for this block which can be overridden
pca_i <- pca
pca_center_i <- pca_center
subset <- metrics[[i]]
if (is.null(subset)) {
Xsub <- X
}
else if (is.list(subset)) {
# e.g. "euclidean" = list(1:10, pca_center = FALSE),
lsres <- lsplit_unnamed(subset)
if (is.null(lsres$unnamed)) {
stop("Error: no subset provided for block ", i)
}
if (length(lsres$unnamed) != 1) {
stop("Error: only one unnamed item should be provided for block ", i)
}
subset <- lsres$unnamed[[1]]
# possible overrides
if (!is.null(lsres$named)) {
lsnamed <- lsres$named
lsnames <- names(lsnamed)
if (!is.null(lsnamed$pca_center)) {
pca_center_i <- lsnamed$pca_center
}
# PCA argument can be NULL, so need to check if it was explicitly provided
if ("pca" %in% lsnames) {
pca_i <- lsnamed$pca
}
}
Xsub <- X[, subset, drop = FALSE]
}
else {
Xsub <- X[, subset, drop = FALSE]
}
if (!is.null(X) && is.matrix(X)) {
block_size <- ncol(Xsub)
if (block_size == 0) {
stop("Block ", i, " has zero size")
}
if (nblocks > 1) {
tsmessage(
"Processing block ", i, " of ", nblocks,
" with size ", block_size,
" using metric '", metric, "'"
)
}
}
else {
# X is NULL or dist or something like that
if (nblocks > 1) {
tsmessage(
"Processing block ", i, " of ", nblocks,
" using metric '", metric, "'"
)
}
}
if (!is.null(pca_i) && is.matrix(X) && metric != "hamming" &&
ncol(X) > pca_i && nrow(X) > pca_i) {
tsmessage("Reducing column dimension to ", pca_i, " via PCA")
pca_res <- pca_init(Xsub, pca_i,
ret_extra = ret_model,
center = pca_center_i,
pca_method = pca_method,
verbose = verbose
)
if (ret_model) {
Xsub <- pca_res$scores
pca_models[[as.character(i)]] <- pca_res[c("center", "rotation")]
pca_res <- NULL
}
else {
Xsub <- pca_res
}
}
nn_sub <- nn_method
# Extract this block of nn data from list of lists
if (metric == "precomputed") {
nn_sub <- nn_method[[i]]
n_neighbors <- NULL
}
x2set_res <- x2set(Xsub, n_neighbors, metric,
nn_method = nn_sub,
n_trees, search_k,
method,
set_op_mix_ratio, local_connectivity, bandwidth,
perplexity, kernel,
ret_sigma,
n_threads, grain_size,
ret_model,
n_vertices = n_vertices,
tmpdir = tmpdir,
verbose = verbose
)
Vblock <- x2set_res$V
nn <- x2set_res$nn
nns[[i]] <- nn
names(nns)[[i]] <- metric
if (is.null(V)) {
V <- Vblock
}
else {
# TODO: should at least offer the option to reset the local metric here
# TODO: make this the default (breaking change)
V <- set_intersect(V, Vblock, weight = 0.5, reset_connectivity = TRUE)
}
if (ret_sigma && is.null(sigma)) {
# No idea how to combine different neighborhood sizes so just return the
# first set
sigma <- x2set_res$sigma
rho <- x2set_res$rho
dint <- x2set_res$dint
}
}
if (!is.null(Xcat)) {
V <- categorical_intersection_df(Xcat, V, weight = 0.5, verbose = verbose)
}
res <- list(V = V, nns = nns, pca_models = pca_models)
if (!is.null(sigma)) {
res$sigma <- sigma
res$rho <- rho
res$dint <- dint
}
res
}
x2nn <- function(X, n_neighbors, metric, nn_method,
n_trees, search_k,
tmpdir = tempdir(),
n_threads, grain_size,
ret_model,
n_vertices = x2nv(X),
verbose = FALSE) {
if (is.list(nn_method)) {
validate_nn(nn_method, n_vertices)
nn <- nn_method
}
else {
nn_method <- match.arg(tolower(nn_method), c("annoy", "fnn"))
if (nn_method == "fnn" && metric != "euclidean") {
stop(
"nn_method = 'FNN' is only compatible with distance metric ",
"'euclidean'"
)
}
if (nn_method == "fnn" && ret_model) {
stop("nn_method = 'FNN' is incompatible with ret_model = TRUE")
}
nn <- find_nn(X, n_neighbors,
method = nn_method, metric = metric,
n_trees = n_trees, search_k = search_k,
tmpdir = tmpdir,
n_threads = n_threads, grain_size = grain_size,
ret_index = ret_model, verbose = verbose
)
}
nn
}
validate_nn <- function(nn_method, n_vertices) {
if (!is.matrix(nn_method$idx)) {
stop("Couldn't find precalculated 'idx' matrix")
}
if (nrow(nn_method$idx) != n_vertices) {
stop(
"Precalculated 'idx' matrix must have ", n_vertices,
" rows, but found ", nrow(nn_method$idx)
)
}
if (!is.matrix(nn_method$dist)) {
stop("Couldn't find precalculated 'dist' matrix")
}
if (nrow(nn_method$idx) != n_vertices) {
stop("Precalculated 'dist' matrix must have ", n_vertices, " rows, but
found ", nrow(nn_method$dist))
}
if (ncol(nn_method$dist) != ncol(nn_method$idx)) {
stop("Precalculated 'dist' matrix must have ", ncol(nn_method$idx), " cols, but
found ", ncol(nn_method$dist))
}
}
nn2set <- function(method, nn,
set_op_mix_ratio, local_connectivity, bandwidth,
perplexity, kernel,
ret_sigma,
n_threads, grain_size,
verbose = FALSE) {
sigma <- NULL
res <- list()
if (method == "largevis") {
n_vertices <- nrow(nn$dist)
if (perplexity >= n_vertices) {
stop("perplexity can be no larger than ", n_vertices - 1)
}
Vres <- perplexity_similarities(
nn = nn, perplexity = perplexity,
ret_sigma = ret_sigma,
n_threads = n_threads,
grain_size = grain_size,
kernel = kernel,
verbose = verbose
)
res$V <- Vres$matrix
if (ret_sigma && !is.null(Vres$sigma)) {
res$sigma <- Vres$sigma
res$dint <- Vres$dint
}
}
else {
Vres <- fuzzy_simplicial_set(
nn = nn,
set_op_mix_ratio = set_op_mix_ratio,
local_connectivity = local_connectivity,
bandwidth = bandwidth,
ret_sigma = ret_sigma,
n_threads = n_threads,
grain_size = grain_size,
verbose = verbose
)
if (ret_sigma) {
res$V <- Vres$matrix
res$sigma <- Vres$sigma
res$rho <- Vres$rho
}
else {
res$V <- Vres
}
}
res
}
x2set <- function(X, n_neighbors, metric, nn_method,
n_trees, search_k,
method,
set_op_mix_ratio, local_connectivity, bandwidth,
perplexity, kernel,
ret_sigma,
n_threads, grain_size,
ret_model,
n_vertices = x2nv(X),
tmpdir = tempdir(),
verbose = FALSE) {
if (is_sparse_matrix(nn_method)) {
nn <- nn_method
if (nrow(nn) != ncol(nn)) {
stop("Sparse distance matrix must have same number of rows and cols")
}
if (nrow(nn) != n_vertices) {
stop("Sparse distance matrix must have same dimensions as input data")
}
}
else {
nn <- x2nn(X,
n_neighbors = n_neighbors,
metric = metric,
nn_method = nn_method,
n_trees = n_trees, search_k = search_k,
tmpdir = tmpdir,
n_threads = n_threads, grain_size = grain_size,
ret_model = ret_model,
n_vertices = n_vertices,
verbose = verbose
)
if (any(is.infinite(nn$dist))) {
stop("Infinite distances found in nearest neighbors")
}
}
gc()
nn2set_res <- nn2set(method, nn,
set_op_mix_ratio, local_connectivity, bandwidth,
perplexity, kernel, ret_sigma,
n_threads, grain_size,
verbose = verbose
)
V <- nn2set_res$V
if (any(is.na(V))) {
stop("Non-finite entries in the input matrix")
}
gc()
res <- list(
nn = nn,
V = V
)
if (ret_sigma && !is.null(nn2set_res$sigma)) {
res$sigma <- nn2set_res$sigma
res$rho <- nn2set_res$rho
res$dint <- nn2set_res$dint
}
res
}
set_intersect <- function(A, B, weight = 0.5, reset_connectivity = TRUE,
reset_local_metric = FALSE, n_threads = NULL,
verbose = FALSE) {
A <- general_simplicial_set_intersection(
A, B, weight
)
A <- Matrix::drop0(A)
# https://github.com/lmcinnes/umap/issues/58#issuecomment-437633658
# For now always reset
if (reset_connectivity) {
A <- reset_local_connectivity(A, reset_local_metric = reset_local_metric,
n_threads = n_threads, verbose = verbose)
}
A
}
categorical_intersection_df <- function(X, V, weight = 0.5, verbose = FALSE) {
tsmessage(
"Carrying out categorical intersection for ",
pluralize("column", ncol(X))
)
for (i in 1:ncol(X)) {
V <- categorical_intersection(X[, i], V,
weight = weight,
verbose = (verbose && i == 1)
)
}
V
}
categorical_intersection <- function(x, V, weight, verbose = FALSE) {
if (is.null(V)) {
stop("V cannot be null for categorical intersection")
}
if (weight < 1.0) {
far_dist <- 2.5 * (1.0 / (1.0 - weight))
}
else {
far_dist <- 1.0e12
}
tsmessage(
"Applying categorical set intersection, weight = ", formatC(weight),
" far distance = ", formatC(far_dist)
)
V <- categorical_simplicial_set_intersection(V, x,
far_dist = far_dist,
verbose = verbose
)
V
}
# Creates the number of epochs per sample for each weight
# weights are the non-zero input affinities (1-simplex)
# n_epoch the total number of epochs
# There is an inverse relationship between the weights and the return vector.
make_epochs_per_sample <- function(weights, n_epochs) {
result <- rep(-1, length(weights))
n_samples <- n_epochs * (weights / max(weights))
result[n_samples > 0] <- n_epochs / n_samples[n_samples > 0]
result
}
# Create the a/b parameters from spread and min_dist
find_ab_params <- function(spread = 1, min_dist = 0.001) {
xv <- seq(from = 0, to = spread * 3, length.out = 300)
yv <- rep(0, length(xv))
yv[xv < min_dist] <- 1
yv[xv >= min_dist] <- exp(-(xv[xv >= min_dist] - min_dist) / spread)
result <- try(
{
stats::nls(yv ~ 1 / (1 + a * xv^(2 * b)),
start = list(a = 1, b = 1)
)$m$getPars()
},
silent = TRUE
)
if (inherits(result, "try-error")) {
stop(
"Can't find a, b for provided spread = ", spread,
" min_dist = ", min_dist
)
}
result
}
# The default number of edge samples used by LargeVis
lvish_samples <- function(n_vertices) {
n_samples <- 0
if (n_vertices < 10000) {
n_samples <- 1000
}
else if (n_vertices < 1000000) {
n_samples <- (n_vertices - 10000) * 9000 / (1000000 - 10000) + 1000
}
else {
n_samples <- n_vertices / 100
}
round(n_samples * 1000000)
}
# Returns the number of epochs required to generate the default number of edge samples
# used in LargeVis
lvish_epochs <- function(n_vertices, V) {
n_samples <- lvish_samples(n_vertices)
round(n_samples * max(V) / sum(V))
}
# Scale X according to various strategies
scale_input <- function(X, scale_type, ret_model = FALSE, verbose = FALSE) {
if (is.null(scale_type)) {
scale_type <- "none"
}
else if (is.logical(scale_type)) {
scale_type <- ifelse(scale_type, "scale", "none")
}
else if (tolower(scale_type) == "z") {
scale_type <- "scale"
}
scale_type <- match.arg(
tolower(scale_type),
c("none", "scale", "range", "colrange", "maxabs")
)
switch(scale_type,
range = {
tsmessage("Range scaling X")
min_X <- min(X)
X <- X - min_X
max_X <- max(X)
X <- X / max_X
if (ret_model) {
attr(X, "scaled:range:min") <- min_X
attr(X, "scaled:range:max") <- max_X
}
},
colrange = {
tsmessage("Column range scaling X")
min_X <- apply(X, 2, min)
X <- sweep(X, 2, min_X)
max_X <- apply(X, 2, max)
X <- sweep(X, 2, max_X, `/`)
if (ret_model) {
attr(X, "scaled:colrange:min") <- min_X
attr(X, "scaled:colrange:max") <- max_X
}
},
maxabs = {
tsmessage("Normalizing by max-abs")
X <- base::scale(X, scale = FALSE)
max_abs <- max(abs(X))
X <- X / max_abs
if (ret_model) {
attr(X, "scaled:maxabs") <- max_abs
}
},
scale = {
tsmessage("Scaling to zero mean and unit variance")
varf <- function(x) {
sum((x - sum(x) / length(x))^2)
}
non_zero_var_cols <- apply(X, 2, varf) >= .Machine$double.xmin
if (length(non_zero_var_cols) == 0) {
stop("Matrix has zero variance")
}
X <- X[, non_zero_var_cols]
tsmessage("Kept ", ncol(X), " non-zero-variance columns")
X <- base::scale(X, scale = TRUE)
if (ret_model) {
attr(X, "scaled:nzvcols") <- which(non_zero_var_cols)
}
}
)
X
}
attr_to_scale_info <- function(X) {
Xattr <- attributes(X)
Xattr <- Xattr[startsWith(names(Xattr), "scaled:")]
if (length(Xattr) == 0) {
Xattr <- NULL
}
Xattr
}
get_opt_args <- function(opt_args, alpha) {
default_batch_opt = "adam"
default_opt_args <- list(
sgd = list(alpha = alpha),
adam = list(alpha = alpha, beta1 = 0.5, beta2 = 0.9, eps = 1e-7)
)
if (is.null(opt_args)) {
opt_args <- list()
}
if (is.null(opt_args$method)) {
opt_args$method <- "adam"
}
if (!(opt_args$method %in% names(default_opt_args))) {
stop("Unknown optimization method '", opt_args$method, "'")
}
lmerge(default_opt_args[[opt_args$method]], opt_args)
}
# Takes local radii from the input dimension and converts to approximate
# densities in the output space by mapping them to a vector of a parameters
# as used in the UMAP output weight: 1/(1 + a + d^2b).
# Based on testing a rough range of usable a values is 0.01-100. To get that
# we want each a value to be the product of the local density of i and j, so
# a = sqrt(a_i * a_j)
# Also, we want dens_scale to control the spread of a values and for
# dens_scale = 0, the vector of a_i give the the user-selected scalar value of
# a, so we scale the log of the reciprocal of localr to be within [log(a * 1e-(2
# * dens_scale)) ... log(a * 1e(2 * dens_scale))] We take the sqrt of the a_i in
# this function to avoid repeatedly calling it inside the optimization loop.
scale_radii <- function(localr, dens_scale, a) {
log_denso <- -log(localr)
min_densl <- a * (10 ^ (-2 * dens_scale))
log_min_densl <- log(min_densl)
max_densl <- a * (10 ^ (2 * dens_scale))
log_max_densl <- log(max_densl)
log_denso_scale <- range_scale(log_denso, log_min_densl, log_max_densl)
sqrt(exp(log_denso_scale))
}
#' @useDynLib uwot, .registration=TRUE
#' @importFrom Rcpp sourceCpp
.onUnload <- function(libpath) {
library.dynam.unload("uwot", libpath)
}
|
/scratch/gouwar.j/cran-all/cranData/uwot/R/uwot.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.