content
stringlengths
0
14.9M
filename
stringlengths
44
136
#!/usr/bin/env Rscript args <- commandArgs(trailingOnly = TRUE) source_file <- args[[1]] out_file <- args[[2]] file <- args[-c(1:2)] library(vroom) fields <- vroom(col_names = c("begin", "end", "width", "col_names"), delim = "\t", "1 1 1 RECTYPE 2 8 7 SERIALNO 9 9 1 SAMPLE 10 11 2 STATE 12 12 1 REGION 13 13 1 DIVISION 14 18 5 PUMA5 19 23 5 PUMA1 24 27 4 MSACMSA5 28 31 4 MSAPMSA5 32 35 4 MSACMSA1 36 39 4 MSAPMSA1 40 41 2 AREATYP5 42 43 2 AREATYP1 44 57 14 TOTPUMA5 58 71 14 LNDPUMA5 72 85 14 TOTPUMA1 86 99 14 LNDPUMA1 100 101 2 SUBSAMPL 102 105 4 HWEIGHT 106 107 2 PERSONS 108 108 1 UNITTYPE 109 109 1 HSUB 110 110 1 HAUG 111 111 1 VACSTAT 112 112 1 VACSTATA 113 113 1 TENURE 114 114 1 TENUREA 115 116 2 BLDGSZ 117 117 1 BLDGSZA 118 118 1 YRBUILT 119 119 1 YRBUILTA 120 120 1 YRMOVED 121 121 1 YRMOVEDA 122 122 1 ROOMS 123 123 1 ROOMSA 124 124 1 BEDRMS 125 125 1 BEDRMSA 126 126 1 CPLUMB 127 127 1 CPLUMBA 128 128 1 CKITCH 129 129 1 CKITCHA 130 130 1 PHONE 131 131 1 PHONEA 132 132 1 FUEL 133 133 1 FUELA 134 134 1 VEHICL 135 135 1 VEHICLA 136 136 1 BUSINES 137 137 1 BUSINESA 138 138 1 ACRES 139 139 1 ACRESA 140 140 1 AGSALES 141 141 1 AGSALESA 142 145 4 ELEC 146 146 1 ELECA 147 150 4 GAS 151 151 1 GASA 152 155 4 WATER 156 156 1 WATERA 157 160 4 OIL 161 161 1 OILA 162 165 4 RENT 166 166 1 RENTA 167 167 1 MEALS 168 168 1 MEALSA 169 169 1 MORTG1 170 170 1 MORTG1A 171 175 5 MRT1AMT 176 176 1 MRT1AMTA 177 177 1 MORTG2 178 178 1 MORTG2A 179 183 5 MRT2AMT 184 184 1 MRT2AMTA 185 185 1 TAXINCL 186 186 1 TAXINCLA 187 188 2 TAXAMT 189 189 1 TAXAMTA 190 190 1 INSINCL 191 191 1 INSINCLA 192 195 4 INSAMT 196 196 1 INSAMTA 197 200 4 CONDFEE 201 201 1 CONDFEEA 202 203 2 VALUE 204 204 1 VALUEA 205 205 1 MHLOAN 206 206 1 MHLOANA 207 211 5 MHCOST 212 212 1 MHCOSTA 213 213 1 HHT 214 215 2 P65 216 217 2 P18 218 219 2 NPF 220 221 2 NOC 222 223 2 NRC 224 224 1 PSF 225 225 1 PAOC 226 226 1 PARC 227 227 1 SVAL 228 232 5 SMOC 233 235 3 SMOCAPI 236 236 1 SRNT 237 240 4 GRENT 241 243 3 GRAPI 244 244 1 FNF 245 245 1 HHL 246 246 1 LNGI 247 247 1 WIF 248 248 1 EMPSTAT 249 250 2 WORKEXP 251 258 8 HINC 259 266 8 FINC ") fields$begin <- fields$begin - 1 types <- cols( .default = col_double(), RECTYPE = col_character(), SERIALNO = col_character(), STATE = col_character(), PUMA5 = col_character(), PUMA1 = col_character(), MSACMSA5 = col_character(), MSAPMSA5 = col_character(), MSACMSA1 = col_character(), MSAPMSA1 = col_character(), AREATYP5 = col_character(), AREATYP1 = col_character(), LNDPUMA5 = col_character(), TOTPUMA1 = col_character(), LNDPUMA1 = col_character(), SUBSAMPL = col_character(), HWEIGHT = col_character(), PERSONS = col_character(), BLDGSZ = col_character(), ELEC = col_character(), GAS = col_character(), WATER = col_character(), OIL = col_character(), RENT = col_character(), MRT1AMT = col_character(), MRT2AMT = col_character(), TAXAMT = col_character(), INSAMT = col_character(), CONDFEE = col_character(), VALUE = col_character(), MHCOST = col_character(), P65 = col_character(), P18 = col_character(), NPF = col_character(), NOC = col_character(), NRC = col_character(), SMOC = col_character(), SMOCAPI = col_character(), GRENT = col_character(), GRAPI = col_character(), WORKEXP = col_character(), HINC = col_character(), FINC = col_character() ) cat(source_file, "\n") out <- bench::workout_expressions(as.list(parse(source_file, keep.source = FALSE))) x <- vroom::vroom(file, col_types = list()) out$size <- sum(file.size(file)) out$rows <- nrow(x) out$cols <- ncol(x) out$process <- as.numeric(out$process) out$real <- as.numeric(out$real) out$max_memory <- as.numeric(bench::bench_process_memory()[["max"]]) vroom::vroom_write(out, out_file)
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/run-bench-fwf.R
#!/usr/bin/env Rscript args <- commandArgs(trailingOnly = TRUE) source_file <- args[[1]] out_file <- args[[2]] file <- args[-c(1:2)] cat(source_file, "\n") out <- bench::workout_expressions(as.list(parse(source_file, keep.source = FALSE))) x <- vroom::vroom(file, col_types = list()) out$size <- sum(file.size(file)) out$rows <- nrow(x) out$cols <- ncol(x) out$process <- as.numeric(out$process) out$real <- as.numeric(out$real) out$max_memory <- as.numeric(bench::bench_process_memory()[["max"]]) vroom::vroom_write(out, out_file)
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/run-bench.R
#!/usr/bin/env Rscript vroom::vroom_write( sessioninfo::package_info(c("vroom", "readr", "dplyr", "data.table", "base"), dependencies = FALSE, include_base = TRUE), here::here("inst", "bench", "session_info.tsv"), delim = "\t" )
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/session_info.R
library(vroom) library(dplyr) library(fs) library(purrr) library(tidyr) summarise_dir <- function(dir, desc) { out_file <- path(path_dir(dir), path_ext_set(path_file(dir), "tsv")) col_types <- cols( exprs = col_character(), process = col_character(), real = col_character(), size = col_double(), rows = col_double(), cols = col_double() ) dir_ls(dir, glob = "*tsv") %>% discard(~endsWith(.x, "input.tsv")) %>% vroom(id = "path", col_types = col_types) %>% mutate(path = path_ext_remove(path_file(path))) %>% group_by(path) %>% mutate(op = desc) %>% separate(path, c("reading_package", "manip_package"), "-") %>% pivot_longer(., cols = c(process, real), names_to = "type", values_to = "time") %>% select(reading_package, manip_package, op, type, time, size, max_memory, rows, cols) %>% vroom_write(out_file, delim = "\t") } summarise_dir(here::here("inst/bench/all_numeric-long"), c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate")) summarise_dir(here::here("inst/bench/all_numeric-wide"), c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate")) summarise_dir(here::here("inst/bench/all_character-long"), c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate")) summarise_dir(here::here("inst/bench/all_character-wide"), c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate")) summarise_dir(here::here("inst/bench/taxi"), c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate")) summarise_dir(here::here("inst/bench/taxi_multiple"), c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate")) summarise_dir(here::here("inst/bench/taxi_writing"), c("setup", "writing")) summarise_dir(here::here("inst/bench/fwf"), c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate"))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/summarise-benchmarks.R
library(data.table) x <- fread(file, sep = ",", quote = "", strip.white = FALSE, na.strings = NULL) print(x) a <- head(x) b <- tail(x) c <- x[sample(NROW(x), 100), ] d <- x[payment_type == "UNK", ] e <- x[ , .(mean(tip_amount)), by = payment_type]
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi/data.table-data.table.R
({}) x <- read.delim(file, sep = ",", quote = "", na.strings = NULL, stringsAsFactors = FALSE) print(head(x, 10)) a <- head(x) b <- tail(x) c <- x[sample(NROW(x), 100), ] d <- x[x$payment_type == "UNK", ] e <- tapply(x$tip_amount, x$payment_type, mean)
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi/read.delim-base.R
({ library(readr); library(dplyr) }) x <- read_csv(file, col_types = c(pickup_datetime = "c"), quote = "", trim_ws = FALSE, na = character()) print(x) a <- head(x) b <- tail(x) c <- sample_n(x, 100) d <- filter(x, payment_type == "UNK") e <- group_by(x, payment_type) %>% summarise(avg_tip = mean(tip_amount))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi/readr-dplyr.R
library(vroom) x <- vroom(file, col_types = c(pickup_datetime = "c"), trim_ws = FALSE, quote = "", escape_double = FALSE, na = character()) print(x) a <- head(x) b <- tail(x) c <- x[sample(NROW(x), 100), ] d <- x[x$payment_type == "UNK", ] e <- tapply(x$tip_amount, x$payment_type, mean)
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi/vroom-base.R
({ library(vroom); library(dplyr) }) x <- vroom(file, col_types = c(pickup_datetime = "c"), trim_ws = FALSE, quote = "", escape_double = FALSE, na = character()) print(x) a <- head(x) b <- tail(x) c <- sample_n(x, 100) d <- filter(x, payment_type == "UNK") e <- group_by(x, payment_type) %>% summarise(avg_tip = mean(tip_amount))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi/vroom-dplyr.R
({library(vroom); library(dplyr)}) x <- vroom(file, col_types = c(pickup_datetime = "c"), trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = FALSE) print(x) a <- head(x) b <- tail(x) c <- sample_n(x, 100) d <- filter(x, payment_type == "UNK") e <- group_by(x, payment_type) %>% summarise(avg_tip = mean(tip_amount))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi/vroom_no_altrep-dplyr.R
library(data.table) x <- rbindlist(idcol = "path", lapply(stats::setNames(file, file), fread, sep = ",", quote = "", strip.white = FALSE, na.strings = NULL) ) print(x) a <- head(x) b <- tail(x) c <- x[sample(NROW(x), 100), ] d <- x[payment_type == "UNK", ] e <- x[ , .(mean(tip_amount)), by = payment_type]
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_multiple/data.table-data.table.R
({ library(readr); library(dplyr); library(purrr) }) x <- map_dfr(set_names(file), .id = "path", ~ read_csv(.x, col_types = c(pickup_datetime = "c"), quote = "", trim_ws = FALSE, na = character()) ) print(x) a <- head(x) b <- tail(x) c <- sample_n(x, 100) d <- filter(x, payment_type == "UNK") e <- group_by(x, payment_type) %>% summarise(avg_tip = mean(tip_amount))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_multiple/readr-dplyr.R
library(vroom) x <- vroom(file, id = "path", col_types = c(pickup_datetime = "c"), trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = TRUE) print(head(x, 10)) a <- head(x) b <- tail(x) c <- x[sample(NROW(x), 100), ] d <- x[x$payment_type == "UNK", ] e <- tapply(x$tip_amount, x$payment_type, mean)
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_multiple/vroom-base.R
({library(vroom); library(dplyr)}) x <- vroom(file, id = "path", col_types = c(pickup_datetime = "c"), trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = TRUE) print(x) a <- head(x) b <- tail(x) c <- sample_n(x, 100) d <- filter(x, payment_type == "UNK") e <- group_by(x, payment_type) %>% summarise(avg_tip = mean(tip_amount))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_multiple/vroom-dplyr.R
({library(vroom); library(dplyr)}) x <- vroom(file, id = "path", col_types = c(pickup_datetime = "c"), trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = FALSE) print(x) a <- head(x) b <- tail(x) c <- sample_n(x, 100) d <- filter(x, payment_type == "UNK") e <- group_by(x, payment_type) %>% summarise(avg_tip = mean(tip_amount))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_multiple/vroom_no_altrep-dplyr.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } { con <- gzfile(tempfile(fileext = ".gz"), "wb") write.table(data, con, sep = "\t", quote = FALSE, row.names = FALSE) close(con) }
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/base-gzip.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } { con <- pipe(sprintf("pigz > %s", tempfile(fileext = ".gz")), "wb") write.table(data, con, sep = "\t", quote = FALSE, row.names = FALSE) close(con) }
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/base-multithreaded_gzip.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } write.table(data, tempfile(fileext = ".tsv"), sep = "\t", quote = FALSE, row.names = FALSE)
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/base-uncompressed.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } { con <- pipe(sprintf("zstd > %s", tempfile(fileext = ".zst")), "wb") write.table(data, con, sep = "\t", quote = FALSE, row.names = FALSE) close(con) }
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/base-zstandard.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } data.table::fwrite(data, tempfile(fileext = ".gz"), sep = "\t", nThread = 1)
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/data.table-gzip.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } data.table::fwrite(data, tempfile(fileext = ".gz"), sep = "\t")
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/data.table-multithreaded_gzip.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } data.table::fwrite(data, tempfile(fileext = ".tsv"), sep = "\t")
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/data.table-uncompressed.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } readr::write_tsv(data, tempfile(fileext = ".gz"))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/readr-gzip.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } readr::write_tsv(data, pipe(sprintf("pigz > %s", tempfile(fileext = ".gz"))))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/readr-multithreaded_gzip.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } readr::write_tsv(data, tempfile(fileext = ".tsv"))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/readr-uncompressed.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } readr::write_tsv(data, pipe(sprintf("zstd > %s", tempfile(fileext = ".zst"))))
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/readr-zstandard.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } vroom_write(data, tempfile(fileext = ".gz"), delim = "\t")
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/vroom-gzip.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } vroom_write(data, pipe(sprintf("pigz > %s", tempfile(fileext = ".gz"))), delim = "\t")
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/vroom-multithreaded_gzip.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } vroom_write(data, tempfile(fileext = ".tsv"), delim = "\t")
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/vroom-uncompressed.R
{ library(vroom) data <- vroom(file, col_types = c(pickup_datetime = "c")) vroom:::vroom_materialize(data, replace = TRUE) } vroom_write(data, pipe(sprintf("zstd > %s", tempfile(fileext = ".zst"))), delim = "\t")
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/taxi_writing/vroom-zstandard.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(ggplot2) library(forcats) library(dplyr) library(tidyr) library(fs) pretty_sec <- function(x) { x[!is.na(x)] <- prettyunits::pretty_sec(x[!is.na(x)]) x } pretty_lgl <- function(x) { case_when( x == TRUE ~ "TRUE", x == FALSE ~ "FALSE", TRUE ~ "" ) } read_benchmark <- function(file, desc) { vroom::vroom(file, col_types = c("ccccddddd")) %>% filter(op != "setup") %>% mutate( altrep = case_when( grepl("^vroom_no_altrep", reading_package) ~ FALSE, grepl("^vroom", reading_package) ~ TRUE, TRUE ~ NA ), reading_package = case_when( grepl("^vroom", reading_package) ~ "vroom", TRUE ~ reading_package ), label = fct_reorder( glue::glue("{reading_package}{altrep}\n{manip_package}", altrep = ifelse(is.na(altrep), "", glue::glue("(altrep = {altrep})")) ), case_when(type == "real" ~ time, TRUE ~ 0), sum), op = factor(op, desc) ) } generate_subtitle <- function(data) { rows <- scales::comma(data$rows[[1]]) cols <- scales::comma(data$cols[[1]]) size <- fs_bytes(data$size[[1]]) glue::glue("{rows} x {cols} - {size}B") } plot_benchmark <- function(data, title) { subtitle <- generate_subtitle(data) data <- data %>% filter(reading_package != "read.delim", type == "real") p1 <- data %>% ggplot() + geom_bar(aes(x = label, y = time, fill = op, group = label), stat = "identity") + scale_fill_brewer(type = "qual", palette = "Set2") + scale_y_continuous(labels = scales::number_format(suffix = "s")) + coord_flip() + labs(title = title, subtitle = subtitle, x = NULL, y = NULL, fill = NULL) + theme(legend.position = "bottom") p2 <- data %>% group_by(label) %>% summarise(max_memory = max(max_memory)) %>% ggplot() + geom_bar(aes(x = label, y = max_memory / (1024 * 1024)), stat = "identity") + scale_y_continuous(labels = scales::number_format(suffix = "Mb")) + coord_flip() + labs(title = "Maximum memory usage", x = NULL, y = NULL) + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) library(patchwork) p1 + p2 + plot_layout(widths = c(2, 1)) } make_table <- function(data) { data %>% filter(type == "real") %>% select(-label, -size, -type, -rows, -cols) %>% spread(op, time) %>% mutate( total = read + print + head + tail + sample + filter + aggregate, max_memory = as.character(bench::as_bench_bytes(max_memory)) ) %>% arrange(desc(total)) %>% mutate_if(is.numeric, pretty_sec) %>% mutate_if(is.logical, pretty_lgl) %>% select(reading_package, manip_package, altrep, max_memory, everything()) %>% rename( "reading\npackage" = reading_package, "manipulating\npackage" = manip_package, memory = max_memory ) %>% knitr::kable(digits = 2, align = "r", format = "html") } desc <- c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate") ## ----fig.height = 8, fig.width=10, warning = FALSE, echo = FALSE, message = FALSE---- taxi <- read_benchmark(path_package("vroom", "bench", "taxi.tsv"), desc) plot_benchmark(taxi, "Time to analyze taxi trip data") make_table(taxi) ## ----fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE---- all_num <- read_benchmark(path_package("vroom", "bench", "all_numeric-long.tsv"), desc) plot_benchmark(all_num, "Time to analyze long all numeric data") make_table(all_num) ## ----fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE---- all_num_wide <- read_benchmark(path_package("bench", "all_numeric-wide.tsv", package = "vroom"), desc) plot_benchmark(all_num_wide, "Time to analyze wide all numeric data") make_table(all_num_wide) ## ----fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE---- all_chr <- read_benchmark(path_package("vroom", "bench", "all_character-long.tsv"), desc) plot_benchmark(all_chr, "Time to analyze long all character data") make_table(all_chr) ## ----fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE---- all_chr_wide <- read_benchmark(path_package("vroom", "bench", "all_character-wide.tsv"), desc) plot_benchmark(all_chr_wide, "Time to analyze wide all character data") make_table(all_chr_wide) ## ----echo = FALSE, message = FALSE, eval = TRUE------------------------------- mult <- read_benchmark(path_package("vroom", "bench", "taxi_multiple.tsv"), desc) ## ----fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE---- plot_benchmark(mult, "Time to analyze multiple file data") make_table(mult) ## ----echo = FALSE, message = FALSE, eval = TRUE------------------------------- fwf <- read_benchmark(path_package("vroom", "bench", "fwf.tsv"), desc) ## ----fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE---- plot_benchmark(fwf, "Time to analyze fixed width data") make_table(fwf) ## ----fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE---- taxi_writing <- read_benchmark(path_package("vroom", "bench", "taxi_writing.tsv"), c("setup", "writing")) %>% rename( package = reading_package, compression = manip_package ) %>% mutate( package = factor(package, c("base", "readr", "data.table", "vroom")), compression = factor(compression, rev(c("gzip", "multithreaded_gzip", "zstandard", "uncompressed"))) ) %>% filter(type == "real") subtitle <- generate_subtitle(taxi_writing) taxi_writing %>% ggplot(aes(x = compression, y = time, fill = package)) + geom_bar(stat = "identity", position = position_dodge2(reverse = TRUE, padding = .05)) + scale_fill_brewer(type = "qual", palette = "Set2") + scale_y_continuous(labels = scales::number_format(suffix = "s")) + theme(legend.position = "bottom") + coord_flip() + labs(title = "Writing taxi trip data", subtitle = subtitle, x = NULL, y = NULL, fill = NULL) taxi_writing %>% select(-size, -op, -rows, -cols, -type, -altrep, -label, -max_memory) %>% mutate_if(is.numeric, pretty_sec) %>% pivot_wider(names_from = package, values_from = time) %>% arrange(desc(compression)) %>% knitr::kable(digits = 2, align = "r", format = "html") ## ----echo = FALSE, warning = FALSE, message = FALSE--------------------------- si <- vroom::vroom(path_package("vroom", "bench", "session_info.tsv")) class(si) <- c("packages_info", "data.frame") select(as.data.frame(si), package, version = ondiskversion, date, source) %>% knitr::kable()
/scratch/gouwar.j/cran-all/cranData/vroom/inst/doc/benchmarks.R
--- title: "Vroom Benchmarks" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Vroom Benchmarks} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(ggplot2) library(forcats) library(dplyr) library(tidyr) library(fs) pretty_sec <- function(x) { x[!is.na(x)] <- prettyunits::pretty_sec(x[!is.na(x)]) x } pretty_lgl <- function(x) { case_when( x == TRUE ~ "TRUE", x == FALSE ~ "FALSE", TRUE ~ "" ) } read_benchmark <- function(file, desc) { vroom::vroom(file, col_types = c("ccccddddd")) %>% filter(op != "setup") %>% mutate( altrep = case_when( grepl("^vroom_no_altrep", reading_package) ~ FALSE, grepl("^vroom", reading_package) ~ TRUE, TRUE ~ NA ), reading_package = case_when( grepl("^vroom", reading_package) ~ "vroom", TRUE ~ reading_package ), label = fct_reorder( glue::glue("{reading_package}{altrep}\n{manip_package}", altrep = ifelse(is.na(altrep), "", glue::glue("(altrep = {altrep})")) ), case_when(type == "real" ~ time, TRUE ~ 0), sum), op = factor(op, desc) ) } generate_subtitle <- function(data) { rows <- scales::comma(data$rows[[1]]) cols <- scales::comma(data$cols[[1]]) size <- fs_bytes(data$size[[1]]) glue::glue("{rows} x {cols} - {size}B") } plot_benchmark <- function(data, title) { subtitle <- generate_subtitle(data) data <- data %>% filter(reading_package != "read.delim", type == "real") p1 <- data %>% ggplot() + geom_bar(aes(x = label, y = time, fill = op, group = label), stat = "identity") + scale_fill_brewer(type = "qual", palette = "Set2") + scale_y_continuous(labels = scales::number_format(suffix = "s")) + coord_flip() + labs(title = title, subtitle = subtitle, x = NULL, y = NULL, fill = NULL) + theme(legend.position = "bottom") p2 <- data %>% group_by(label) %>% summarise(max_memory = max(max_memory)) %>% ggplot() + geom_bar(aes(x = label, y = max_memory / (1024 * 1024)), stat = "identity") + scale_y_continuous(labels = scales::number_format(suffix = "Mb")) + coord_flip() + labs(title = "Maximum memory usage", x = NULL, y = NULL) + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) library(patchwork) p1 + p2 + plot_layout(widths = c(2, 1)) } make_table <- function(data) { data %>% filter(type == "real") %>% select(-label, -size, -type, -rows, -cols) %>% spread(op, time) %>% mutate( total = read + print + head + tail + sample + filter + aggregate, max_memory = as.character(bench::as_bench_bytes(max_memory)) ) %>% arrange(desc(total)) %>% mutate_if(is.numeric, pretty_sec) %>% mutate_if(is.logical, pretty_lgl) %>% select(reading_package, manip_package, altrep, max_memory, everything()) %>% rename( "reading\npackage" = reading_package, "manipulating\npackage" = manip_package, memory = max_memory ) %>% knitr::kable(digits = 2, align = "r", format = "html") } desc <- c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate") ``` vroom is a new approach to reading delimited and fixed width data into R. It stems from the observation that when parsing files reading data from disk and finding the delimiters is generally not the main bottle neck. Instead (re)-allocating memory and parsing the values into R data types (particularly for characters) takes the bulk of the time. Therefore you can obtain very rapid input by first performing a fast indexing step and then using the Altrep framework available in R versions 3.5+ to access the values in a lazy / delayed fashion. ## How it works The initial reading of the file simply records the locations of each individual record, the actual values are not read into R. Altrep vectors are created for each column in the data which hold a pointer to the index and the memory mapped file. When these vectors are indexed the value is read from the memory mapping. This means initial reading is extremely fast, in the real world dataset below it is ~ 1/4 the time of the multi-threaded `data.table::fread()`. Sampling operations are likewise extremely fast, as only the data actually included in the sample is read. This means things like the tibble print method, calling `head()`, `tail()` `x[sample(), ]` etc. have very low overhead. Filtering also can be fast, only the columns included in the filter selection have to be fully read and only the data in the filtered rows needs to be read from the remaining columns. Grouped aggregations likewise only need to read the grouping variables and the variables aggregated. Once a particular vector is fully materialized the speed for all subsequent operations should be identical to a normal R vector. This approach potentially also allows you to work with data that is larger than memory. As long as you are careful to avoid materializing the entire dataset at once it can be efficiently queried and subset. # Reading delimited files The following benchmarks all measure reading delimited files of various sizes and data types. Because vroom delays reading the benchmarks also do some manipulation of the data afterwards to try and provide a more realistic performance comparison. Because the `read.delim` results are so much slower than the others they are excluded from the plots, but are retained in the tables. ## Taxi Trip Dataset This real world dataset is from Freedom of Information Law (FOIL) Taxi Trip Data from the NYC Taxi and Limousine Commission 2013, originally posted at <https://chriswhong.com/open-data/foil_nyc_taxi/>. It is also hosted on [archive.org](https://archive.org/details/nycTaxiTripData2013). The first table trip_fare_1.csv is 1.55G in size. #> Observations: 14,776,615 #> Variables: 11 #> $ medallion <chr> "89D227B655E5C82AECF13C3F540D4CF4", "0BD7C8F5B... #> $ hack_license <chr> "BA96DE419E711691B9445D6A6307C170", "9FD8F69F0... #> $ vendor_id <chr> "CMT", "CMT", "CMT", "CMT", "CMT", "CMT", "CMT... #> $ pickup_datetime <chr> "2013-01-01 15:11:48", "2013-01-06 00:18:35", ... #> $ payment_type <chr> "CSH", "CSH", "CSH", "CSH", "CSH", "CSH", "CSH... #> $ fare_amount <dbl> 6.5, 6.0, 5.5, 5.0, 9.5, 9.5, 6.0, 34.0, 5.5, ... #> $ surcharge <dbl> 0.0, 0.5, 1.0, 0.5, 0.5, 0.0, 0.0, 0.0, 1.0, 0... #> $ mta_tax <dbl> 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0... #> $ tip_amount <int> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... #> $ tolls_amount <dbl> 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.8, 0.0, 0... #> $ total_amount <dbl> 7.0, 7.0, 7.0, 6.0, 10.5, 10.0, 6.5, 39.3, 7.0... ### Taxi Benchmarks code: [bench/taxi](https://github.com/tidyverse/vroom/tree/main/inst/bench/taxi) All benchmarks were run on a Amazon EC2 [m5.4xlarge](https://aws.amazon.com/ec2/instance-types/m5/) instance with 16 vCPUs and an [EBS](https://aws.amazon.com/ebs/) volume type. The benchmarks labeled `vroom_base` uses `vroom` with base functions for manipulation. `vroom_dplyr` uses `vroom` to read the file and dplyr functions to manipulate. `data.table` uses `fread()` to read the file and `data.table` functions to manipulate and `readr` uses `readr` to read the file and `dplyr` to manipulate. By default vroom only uses Altrep for character vectors, these are labeled `vroom(altrep: normal)`. The benchmarks labeled `vroom(altrep: full)` instead use Altrep vectors for all supported types and `vroom(altrep: none)` disable Altrep entirely. The following operations are performed. - The data is read - `print()` - _N.B. read.delim uses `print(head(x, 10))` because printing the whole dataset takes > 10 minutes_ - `head()` - `tail()` - Sampling 100 random rows - Filtering for "UNK" payment, this is 6434 rows (0.0435% of total). - Aggregation of mean fare amount per payment type. ```{r, fig.height = 8, fig.width=10, warning = FALSE, echo = FALSE, message = FALSE} taxi <- read_benchmark(path_package("vroom", "bench", "taxi.tsv"), desc) plot_benchmark(taxi, "Time to analyze taxi trip data") make_table(taxi) ``` (*N.B. Rcpp used in the dplyr implementation fully materializes all the Altrep numeric vectors when using `filter()` or `sample_n()`, which is why the first of these cases have additional overhead when using full Altrep.*). ## All numeric data All numeric data is really a worst case scenario for vroom. The index takes about as much memory as the parsed data. Also because parsing doubles can be done quickly in parallel and text representations of doubles are only ~25 characters at most there isn't a great deal of savings for delayed parsing. For these reasons (and because the data.table implementation is very fast) vroom is a bit slower than fread for pure numeric data. However because vroom is multi-threaded it is a bit quicker than readr and read.delim for this type of data. ### Long code: [bench/all_numeric-long](https://github.com/tidyverse/vroom/tree/main/inst/bench/all_numeric-long) ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} all_num <- read_benchmark(path_package("vroom", "bench", "all_numeric-long.tsv"), desc) plot_benchmark(all_num, "Time to analyze long all numeric data") make_table(all_num) ``` ### Wide code: [bench/all_numeric-wide](https://github.com/tidyverse/vroom/tree/main/inst/bench/all_numeric-wide) ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} all_num_wide <- read_benchmark(path_package("bench", "all_numeric-wide.tsv", package = "vroom"), desc) plot_benchmark(all_num_wide, "Time to analyze wide all numeric data") make_table(all_num_wide) ``` ## All character data code: [bench/all_character-long](https://github.com/tidyverse/vroom/tree/main/inst/bench/all_character-long) All character data is a best case scenario for vroom when using Altrep, as it takes full advantage of the lazy reading. ### Long ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} all_chr <- read_benchmark(path_package("vroom", "bench", "all_character-long.tsv"), desc) plot_benchmark(all_chr, "Time to analyze long all character data") make_table(all_chr) ``` ### Wide code: [bench/all_character-wide](https://github.com/tidyverse/vroom/tree/main/inst/bench/all_character-wide) ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} all_chr_wide <- read_benchmark(path_package("vroom", "bench", "all_character-wide.tsv"), desc) plot_benchmark(all_chr_wide, "Time to analyze wide all character data") make_table(all_chr_wide) ``` # Reading multiple delimited files code: [bench/taxi_multiple](https://github.com/tidyverse/vroom/tree/main/inst/bench/taxi_multiple) ```{r, echo = FALSE, message = FALSE, eval = TRUE} mult <- read_benchmark(path_package("vroom", "bench", "taxi_multiple.tsv"), desc) ``` The benchmark reads all 12 files in the taxi trip fare data, totaling `r scales::comma(mult$rows[[1]])` rows and `r mult$cols[[1]]` columns for a total file size of `r format(fs_bytes(mult$size[[1]]))`. ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} plot_benchmark(mult, "Time to analyze multiple file data") make_table(mult) ``` # Reading fixed width files ## United States Census 5-Percent Public Use Microdata Sample files ```{r, echo = FALSE, message = FALSE, eval = TRUE} fwf <- read_benchmark(path_package("vroom", "bench", "fwf.tsv"), desc) ``` This fixed width dataset contains individual records of the characteristics of a 5 percent sample of people and housing units from the year 2000 and is freely available at <https://web.archive.org/web/20150908055439/https://www2.census.gov/census_2000/datasets/PUMS/FivePercent/California/all_California.zip>. The data is split into files by state, and the state of California was used in this benchmark. The data totals `r scales::comma(fwf$rows[[1]])` rows and `r fwf$cols[[1]]` columns with a total file size of `r format(fs_bytes(fwf$size[[1]]))`. ## Census data benchmarks code: [bench/fwf](https://github.com/tidyverse/vroom/tree/main/inst/bench/fwf) ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} plot_benchmark(fwf, "Time to analyze fixed width data") make_table(fwf) ``` # Writing delimited files code: [bench/taxi_writing](https://github.com/tidyverse/vroom/tree/main/inst/bench/taxi_writing) The benchmarks write out the taxi trip dataset in a few different ways. - An uncompressed file - A gzip compressed file using `gzfile()` _(readr and vroom do this automatically for files ending in `.gz`)_ - A gzip compressed file compressed with multiple threads (natively for data.table and using a `pipe()` connection to [pigz](https://zlib.net/pigz/) for the rest). - A [Zstandard](https://facebook.github.io/zstd/) compressed file (data.table does not support this format). ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} taxi_writing <- read_benchmark(path_package("vroom", "bench", "taxi_writing.tsv"), c("setup", "writing")) %>% rename( package = reading_package, compression = manip_package ) %>% mutate( package = factor(package, c("base", "readr", "data.table", "vroom")), compression = factor(compression, rev(c("gzip", "multithreaded_gzip", "zstandard", "uncompressed"))) ) %>% filter(type == "real") subtitle <- generate_subtitle(taxi_writing) taxi_writing %>% ggplot(aes(x = compression, y = time, fill = package)) + geom_bar(stat = "identity", position = position_dodge2(reverse = TRUE, padding = .05)) + scale_fill_brewer(type = "qual", palette = "Set2") + scale_y_continuous(labels = scales::number_format(suffix = "s")) + theme(legend.position = "bottom") + coord_flip() + labs(title = "Writing taxi trip data", subtitle = subtitle, x = NULL, y = NULL, fill = NULL) taxi_writing %>% select(-size, -op, -rows, -cols, -type, -altrep, -label, -max_memory) %>% mutate_if(is.numeric, pretty_sec) %>% pivot_wider(names_from = package, values_from = time) %>% arrange(desc(compression)) %>% knitr::kable(digits = 2, align = "r", format = "html") ``` ## Session and package information ```{r, echo = FALSE, warning = FALSE, message = FALSE} si <- vroom::vroom(path_package("vroom", "bench", "session_info.tsv")) class(si) <- c("packages_info", "data.frame") select(as.data.frame(si), package, version = ondiskversion, date, source) %>% knitr::kable() ```
/scratch/gouwar.j/cran-all/cranData/vroom/inst/doc/benchmarks.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_knit$set(root.dir = tempdir()) knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(tibble.print_min = 3) ## ----------------------------------------------------------------------------- library(vroom) ## ----------------------------------------------------------------------------- # See where the example file is stored on your machine file <- vroom_example("mtcars.csv") file # Read the file, by default vroom will guess the delimiter automatically. vroom(file) # You can also specify it explicitly, which is (slightly) faster, and safer if # you know how the file is delimited. vroom(file, delim = ",") ## ----------------------------------------------------------------------------- ve <- grep("mtcars-[0-9].csv", vroom_examples(), value = TRUE) files <- sapply(ve, vroom_example) files ## ----------------------------------------------------------------------------- vroom(files) ## ----------------------------------------------------------------------------- vroom(files, id = "path") ## ----------------------------------------------------------------------------- file <- vroom_example("mtcars.csv.gz") vroom(file) ## ----------------------------------------------------------------------------- zip_file <- vroom_example("mtcars-multi-cyl.zip") filenames <- unzip(zip_file, list = TRUE)$Name filenames # imagine we only want to read 2 of the 3 files vroom(purrr::map(filenames[c(1, 3)], ~ unz(zip_file, .x))) ## ----eval = as.logical(Sys.getenv("NOT_CRAN", "false"))----------------------- # file <- "https://raw.githubusercontent.com/tidyverse/vroom/main/inst/extdata/mtcars.csv" # vroom(file) ## ----eval = as.logical(Sys.getenv("NOT_CRAN", "false"))----------------------- # file <- "https://raw.githubusercontent.com/tidyverse/vroom/main/inst/extdata/mtcars.csv.gz" # vroom(file) ## ----------------------------------------------------------------------------- file <- vroom_example("mtcars.csv.gz") vroom(file, col_select = c(model, cyl, gear)) ## ----------------------------------------------------------------------------- vroom(file, col_select = c(1, 3, 11)) ## ----------------------------------------------------------------------------- vroom(file, col_select = starts_with("d")) ## ----------------------------------------------------------------------------- vroom(file, col_select = c(car = model, everything())) ## ----------------------------------------------------------------------------- fwf_sample <- vroom_example("fwf-sample.txt") cat(readLines(fwf_sample)) ## ----------------------------------------------------------------------------- vroom_fwf(fwf_sample, fwf_empty(fwf_sample, col_names = c("first", "last", "state", "ssn"))) ## ----------------------------------------------------------------------------- vroom_fwf(fwf_sample, fwf_widths(c(20, 10, 12), c("name", "state", "ssn"))) ## ----------------------------------------------------------------------------- vroom_fwf(fwf_sample, fwf_positions(c(1, 30), c(20, 42), c("name", "ssn"))) ## ----------------------------------------------------------------------------- vroom_fwf(fwf_sample, fwf_cols(name = 20, state = 10, ssn = 12)) ## ----------------------------------------------------------------------------- vroom_fwf(fwf_sample, fwf_cols(name = c(1, 20), ssn = c(30, 42))) ## ----------------------------------------------------------------------------- # read the 'hp' columns as an integer vroom(vroom_example("mtcars.csv"), col_types = c(hp = "i")) # also skip reading the 'cyl' column vroom(vroom_example("mtcars.csv"), col_types = c(hp = "i", cyl = "_")) # also read the gears as a factor vroom(vroom_example("mtcars.csv"), col_types = c(hp = "i", cyl = "_", gear = "f")) ## ----------------------------------------------------------------------------- vroom(vroom_example("mtcars.csv"), col_types = c(.default = "c")) ## ----------------------------------------------------------------------------- vroom( vroom_example("mtcars.csv"), col_types = list(hp = col_integer(), cyl = col_skip(), gear = col_factor()) ) ## ----------------------------------------------------------------------------- vroom( vroom_example("mtcars.csv"), col_types = list(gear = col_factor(levels = c(gear = c("3", "4", "5")))) ) ## ----eval = FALSE------------------------------------------------------------- # vroom( # vroom_example("mtcars.csv"), # .name_repair = ~ janitor::make_clean_names(., case = "all_caps") # ) ## ----------------------------------------------------------------------------- vroom_write(mtcars, "mtcars.tsv") ## ----include = FALSE---------------------------------------------------------- unlink("mtcars.tsv") ## ----------------------------------------------------------------------------- vroom_write(mtcars, "mtcars.csv", delim = ",") ## ----include = FALSE---------------------------------------------------------- unlink("mtcars.csv") ## ----------------------------------------------------------------------------- vroom_write(mtcars, "mtcars.tsv.gz") vroom_write(mtcars, "mtcars.tsv.bz2") vroom_write(mtcars, "mtcars.tsv.xz") ## ----include = FALSE---------------------------------------------------------- unlink(c("mtcars.tsv.gz", "mtcars.tsv.bz2", "mtcars.tsv.xz")) ## ----eval = nzchar(Sys.which("pigz"))----------------------------------------- # vroom_write(mtcars, pipe("pigz > mtcars.tsv.gz")) ## ----include = FALSE---------------------------------------------------------- unlink("mtcars.tsv.gz")
/scratch/gouwar.j/cran-all/cranData/vroom/inst/doc/vroom.R
--- title: "Get started with vroom" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Get started with vroom} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_knit$set(root.dir = tempdir()) knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(tibble.print_min = 3) ``` The vroom package contains one main function `vroom()` which is used to read all types of delimited files. A delimited file is any file in which the data is separated (delimited) by one or more characters. The most common type of delimited files are CSV (Comma Separated Values) or TSV (Tab Separated Values) files, typically these files have a `.csv` and `.tsv` suffix respectively. ```{r} library(vroom) ``` This vignette covers the following topics: - The basics of reading files, including - single files - multiple files - compressed files - remote files - Skipping particular columns. - Specifying column types, for additional safety and when the automatic guessing fails. - Writing regular and compressed files ## Reading files To read a CSV, or other type of delimited file with vroom pass the file to `vroom()`. The delimiter will be automatically guessed if it is a common delimiter; e.g. ("," "\t" " " "|" ":" ";"). If the guessing fails or you are using a less common delimiter specify it with the `delim` parameter. (e.g. `delim = ","`). We have included an example CSV file in the vroom package for use in examples and tests. Access it with `vroom_example("mtcars.csv")` ```{r} # See where the example file is stored on your machine file <- vroom_example("mtcars.csv") file # Read the file, by default vroom will guess the delimiter automatically. vroom(file) # You can also specify it explicitly, which is (slightly) faster, and safer if # you know how the file is delimited. vroom(file, delim = ",") ``` ## Reading multiple files If you are reading a set of files which all have the same columns (as in, names and types), you can pass the filenames directly to `vroom()` and it will combine them into one result. vroom's example datasets include several files named like `mtcars-i.csv`. These files contain subsets of the `mtcars` data, for cars with different numbers of cylinders. First, we get a character vector of these filepaths. ```{r} ve <- grep("mtcars-[0-9].csv", vroom_examples(), value = TRUE) files <- sapply(ve, vroom_example) files ``` Now we can efficiently read them into one table by passing the filenames directly to vroom. ```{r} vroom(files) ``` Often the filename or directory where the files are stored contains information. The `id` parameter can be used to add an extra column to the result with the full path to each file. (in this case we name the column `path`). ```{r} vroom(files, id = "path") ``` ## Reading compressed files vroom supports reading zip, gz, bz2 and xz compressed files automatically, just pass the filename of the compressed file to vroom. ```{r} file <- vroom_example("mtcars.csv.gz") vroom(file) ``` `vroom()` decompresses, indexes and writes the decompressed data to a file in the temp directory in a single stream. The temporary file is used to lazily look up the values and will be automatically cleaned up when all values in the object have been fully read, the object is removed, or the R session ends. ### Reading individual files from a multi-file zip archive If you are reading a zip file that contains multiple files with the same format, you can read a subset of the files at once like so: ```{r} zip_file <- vroom_example("mtcars-multi-cyl.zip") filenames <- unzip(zip_file, list = TRUE)$Name filenames # imagine we only want to read 2 of the 3 files vroom(purrr::map(filenames[c(1, 3)], ~ unz(zip_file, .x))) ``` ## Reading remote files vroom can read files directly from the internet as well by passing the URL of the file to vroom. ```{r, eval = as.logical(Sys.getenv("NOT_CRAN", "false"))} file <- "https://raw.githubusercontent.com/tidyverse/vroom/main/inst/extdata/mtcars.csv" vroom(file) ``` It can even read gzipped files from the internet (although not the other compressed formats). ```{r, eval = as.logical(Sys.getenv("NOT_CRAN", "false"))} file <- "https://raw.githubusercontent.com/tidyverse/vroom/main/inst/extdata/mtcars.csv.gz" vroom(file) ``` ## Column selection vroom provides the same interface for column selection and renaming as [dplyr::select()](https://dplyr.tidyverse.org/reference/select.html). This provides very efficient, flexible and readable selections. For example you can select by: - A character vector of column names ```{r} file <- vroom_example("mtcars.csv.gz") vroom(file, col_select = c(model, cyl, gear)) ``` - A numeric vector of column indexes, e.g. `c(1, 2, 5)` ```{r} vroom(file, col_select = c(1, 3, 11)) ``` - Using the selection helpers such as `starts_with()` and `ends_with()` ```{r} vroom(file, col_select = starts_with("d")) ``` - You can also rename columns ```{r} vroom(file, col_select = c(car = model, everything())) ``` ## Reading fixed width files A fixed width file can be a very compact representation of numeric data. Unfortunately, it's also often painful to read because you need to describe the length of every field. vroom aims to make it as easy as possible by providing a number of different ways to describe the field structure. Use `vroom_fwf()` in conjunction with one of the following helper functions to read the file. ```{r} fwf_sample <- vroom_example("fwf-sample.txt") cat(readLines(fwf_sample)) ``` - `fwf_empty()` - Guess based on the position of empty columns. ```{r} vroom_fwf(fwf_sample, fwf_empty(fwf_sample, col_names = c("first", "last", "state", "ssn"))) ``` - `fwf_widths()` - Use user provided set of field widths. ```{r} vroom_fwf(fwf_sample, fwf_widths(c(20, 10, 12), c("name", "state", "ssn"))) ``` - `fwf_positions()` - Use user provided sets of start and end positions. ```{r} vroom_fwf(fwf_sample, fwf_positions(c(1, 30), c(20, 42), c("name", "ssn"))) ``` - `fwf_cols()` - Use user provided named widths. ```{r} vroom_fwf(fwf_sample, fwf_cols(name = 20, state = 10, ssn = 12)) ``` - `fwf_cols()` - Use user provided named pairs of positions. ```{r} vroom_fwf(fwf_sample, fwf_cols(name = c(1, 20), ssn = c(30, 42))) ``` ## Column types vroom guesses the data types of columns as they are read, however sometimes the guessing fails and it is necessary to explicitly set the type of one or more columns. The available specifications are: (with single letter abbreviations in quotes) * `col_logical()` 'l', containing only `T`, `F`, `TRUE`, `FALSE`, `1` or `0`. * `col_integer()` 'i', integer values. * `col_big_integer()` 'I', Big integer values. (64bit integers) * `col_double()` 'd', floating point values. * `col_number()` 'n', numbers containing the `grouping_mark` * `col_date(format = "")` 'D': with the locale's `date_format`. * `col_time(format = "")` 't': with the locale's `time_format`. * `col_datetime(format = "")` 'T': ISO8601 date times. * `col_factor(levels, ordered)` 'f', a fixed set of values. * `col_character()` 'c', everything else. * `col_skip()` '_, -', don't import this column. * `col_guess()` '?', parse using the "best" type based on the input. You can tell vroom what columns to use with the `col_types()` argument in a number of ways. If you only need to override a single column the most concise way is to use a named vector. ```{r} # read the 'hp' columns as an integer vroom(vroom_example("mtcars.csv"), col_types = c(hp = "i")) # also skip reading the 'cyl' column vroom(vroom_example("mtcars.csv"), col_types = c(hp = "i", cyl = "_")) # also read the gears as a factor vroom(vroom_example("mtcars.csv"), col_types = c(hp = "i", cyl = "_", gear = "f")) ``` You can read all the columns with the same type, by using the `.default` argument. For example reading everything as a character. ```{r} vroom(vroom_example("mtcars.csv"), col_types = c(.default = "c")) ``` However you can also use the `col_*()` functions in a list. ```{r} vroom( vroom_example("mtcars.csv"), col_types = list(hp = col_integer(), cyl = col_skip(), gear = col_factor()) ) ``` This is most useful when a column type needs additional information, such as for categorical data when you know all of the levels of a factor. ```{r} vroom( vroom_example("mtcars.csv"), col_types = list(gear = col_factor(levels = c(gear = c("3", "4", "5")))) ) ``` ## Name repair Often the names of columns in the original dataset are not ideal to work with. `vroom()` uses the same `.name_repair` argument as tibble, so you can use one of the default name repair strategies or provide a custom function. A great approach is to use the [`janitor::make_clean_names()`](https://sfirke.github.io/janitor/reference/make_clean_names.html) function as the input. This will automatically clean the names to use whatever case you specify, here I am setting it to use `ALLCAPS` names. ```{r, eval = FALSE} vroom( vroom_example("mtcars.csv"), .name_repair = ~ janitor::make_clean_names(., case = "all_caps") ) ``` ## Writing delimited files Use `vroom_write()` to write delimited files, the default delimiter is tab, to write TSV files. Writing to TSV by default has the following benefits: - Avoids the issue of whether to use `;` (common in Europe) or `,` (common in the US) - Unlikely to require quoting in fields, as very few fields contain tabs - More easily and efficiently ingested by Unix command line tools such as `cut`, `perl` and `awk`. ```{r} vroom_write(mtcars, "mtcars.tsv") ``` ```{r, include = FALSE} unlink("mtcars.tsv") ``` ### Writing CSV delimited files However you can also use `delim = ','` to write CSV files, which are common as inputs to GUI spreadsheet tools like Excel or Google Sheets. ```{r} vroom_write(mtcars, "mtcars.csv", delim = ",") ``` ```{r, include = FALSE} unlink("mtcars.csv") ``` ### Writing compressed files For gzip, bzip2 and xz compression the outputs will be automatically compressed if the filename ends in `.gz`, `.bz2` or `.xz`. ```{r} vroom_write(mtcars, "mtcars.tsv.gz") vroom_write(mtcars, "mtcars.tsv.bz2") vroom_write(mtcars, "mtcars.tsv.xz") ``` ```{r, include = FALSE} unlink(c("mtcars.tsv.gz", "mtcars.tsv.bz2", "mtcars.tsv.xz")) ``` It is also possible to use other compressors by using `pipe()` with `vroom_write()` to create a pipe connection to command line utilities, such as - [pigz](https://zlib.net/pigz/), a parallel gzip implementation - lbzip2, a parallel bzip2 implementation - [pixz](https://github.com/vasi/pixz), a parallel xz implementation - [Zstandard](https://facebook.github.io/zstd/), a modern real-time compression algorithm. The parallel compression versions can be considerably faster for large output files and generally `vroom_write()` is fast enough that the compression speed becomes the bottleneck when writing. ```{r, eval = nzchar(Sys.which("pigz"))} vroom_write(mtcars, pipe("pigz > mtcars.tsv.gz")) ``` ```{r, include = FALSE} unlink("mtcars.tsv.gz") ``` ### Reading and writing from standard input and output vroom supports reading and writing to the C-level `stdin` and `stdout` of the R process by using `stdin()` and `stdout()`. E.g. from a shell prompt you can pipe to and from vroom directly. ```shell cat inst/extdata/mtcars.csv | Rscript -e 'vroom::vroom(stdin())' Rscript -e 'vroom::vroom_write(iris, stdout())' | head ``` Note this interpretation of `stdin()` and `stdout()` differs from that used elsewhere by R, however we believe it better matches most user's expectations for this use case. ## Further reading - `vignette("benchmarks")` discusses the performance of vroom, how it compares to alternatives and how it achieves its results. - [📽 vroom: Because Life is too short to read slow](https://www.youtube.com/watch?v=RA9AjqZXxMU&t=10s) - Presentation of vroom at UseR!2019 ([slides](https://speakerdeck.com/jimhester/vroom)) - [📹 vroom: Read and write rectangular data quickly](https://www.youtube.com/watch?v=ZP_y5eaAc60) - a video tour of the vroom features.
/scratch/gouwar.j/cran-all/cranData/vroom/inst/doc/vroom.Rmd
--- title: "Vroom Benchmarks" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Vroom Benchmarks} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(ggplot2) library(forcats) library(dplyr) library(tidyr) library(fs) pretty_sec <- function(x) { x[!is.na(x)] <- prettyunits::pretty_sec(x[!is.na(x)]) x } pretty_lgl <- function(x) { case_when( x == TRUE ~ "TRUE", x == FALSE ~ "FALSE", TRUE ~ "" ) } read_benchmark <- function(file, desc) { vroom::vroom(file, col_types = c("ccccddddd")) %>% filter(op != "setup") %>% mutate( altrep = case_when( grepl("^vroom_no_altrep", reading_package) ~ FALSE, grepl("^vroom", reading_package) ~ TRUE, TRUE ~ NA ), reading_package = case_when( grepl("^vroom", reading_package) ~ "vroom", TRUE ~ reading_package ), label = fct_reorder( glue::glue("{reading_package}{altrep}\n{manip_package}", altrep = ifelse(is.na(altrep), "", glue::glue("(altrep = {altrep})")) ), case_when(type == "real" ~ time, TRUE ~ 0), sum), op = factor(op, desc) ) } generate_subtitle <- function(data) { rows <- scales::comma(data$rows[[1]]) cols <- scales::comma(data$cols[[1]]) size <- fs_bytes(data$size[[1]]) glue::glue("{rows} x {cols} - {size}B") } plot_benchmark <- function(data, title) { subtitle <- generate_subtitle(data) data <- data %>% filter(reading_package != "read.delim", type == "real") p1 <- data %>% ggplot() + geom_bar(aes(x = label, y = time, fill = op, group = label), stat = "identity") + scale_fill_brewer(type = "qual", palette = "Set2") + scale_y_continuous(labels = scales::number_format(suffix = "s")) + coord_flip() + labs(title = title, subtitle = subtitle, x = NULL, y = NULL, fill = NULL) + theme(legend.position = "bottom") p2 <- data %>% group_by(label) %>% summarise(max_memory = max(max_memory)) %>% ggplot() + geom_bar(aes(x = label, y = max_memory / (1024 * 1024)), stat = "identity") + scale_y_continuous(labels = scales::number_format(suffix = "Mb")) + coord_flip() + labs(title = "Maximum memory usage", x = NULL, y = NULL) + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) library(patchwork) p1 + p2 + plot_layout(widths = c(2, 1)) } make_table <- function(data) { data %>% filter(type == "real") %>% select(-label, -size, -type, -rows, -cols) %>% spread(op, time) %>% mutate( total = read + print + head + tail + sample + filter + aggregate, max_memory = as.character(bench::as_bench_bytes(max_memory)) ) %>% arrange(desc(total)) %>% mutate_if(is.numeric, pretty_sec) %>% mutate_if(is.logical, pretty_lgl) %>% select(reading_package, manip_package, altrep, max_memory, everything()) %>% rename( "reading\npackage" = reading_package, "manipulating\npackage" = manip_package, memory = max_memory ) %>% knitr::kable(digits = 2, align = "r", format = "html") } desc <- c("setup", "read", "print", "head", "tail", "sample", "filter", "aggregate") ``` vroom is a new approach to reading delimited and fixed width data into R. It stems from the observation that when parsing files reading data from disk and finding the delimiters is generally not the main bottle neck. Instead (re)-allocating memory and parsing the values into R data types (particularly for characters) takes the bulk of the time. Therefore you can obtain very rapid input by first performing a fast indexing step and then using the Altrep framework available in R versions 3.5+ to access the values in a lazy / delayed fashion. ## How it works The initial reading of the file simply records the locations of each individual record, the actual values are not read into R. Altrep vectors are created for each column in the data which hold a pointer to the index and the memory mapped file. When these vectors are indexed the value is read from the memory mapping. This means initial reading is extremely fast, in the real world dataset below it is ~ 1/4 the time of the multi-threaded `data.table::fread()`. Sampling operations are likewise extremely fast, as only the data actually included in the sample is read. This means things like the tibble print method, calling `head()`, `tail()` `x[sample(), ]` etc. have very low overhead. Filtering also can be fast, only the columns included in the filter selection have to be fully read and only the data in the filtered rows needs to be read from the remaining columns. Grouped aggregations likewise only need to read the grouping variables and the variables aggregated. Once a particular vector is fully materialized the speed for all subsequent operations should be identical to a normal R vector. This approach potentially also allows you to work with data that is larger than memory. As long as you are careful to avoid materializing the entire dataset at once it can be efficiently queried and subset. # Reading delimited files The following benchmarks all measure reading delimited files of various sizes and data types. Because vroom delays reading the benchmarks also do some manipulation of the data afterwards to try and provide a more realistic performance comparison. Because the `read.delim` results are so much slower than the others they are excluded from the plots, but are retained in the tables. ## Taxi Trip Dataset This real world dataset is from Freedom of Information Law (FOIL) Taxi Trip Data from the NYC Taxi and Limousine Commission 2013, originally posted at <https://chriswhong.com/open-data/foil_nyc_taxi/>. It is also hosted on [archive.org](https://archive.org/details/nycTaxiTripData2013). The first table trip_fare_1.csv is 1.55G in size. #> Observations: 14,776,615 #> Variables: 11 #> $ medallion <chr> "89D227B655E5C82AECF13C3F540D4CF4", "0BD7C8F5B... #> $ hack_license <chr> "BA96DE419E711691B9445D6A6307C170", "9FD8F69F0... #> $ vendor_id <chr> "CMT", "CMT", "CMT", "CMT", "CMT", "CMT", "CMT... #> $ pickup_datetime <chr> "2013-01-01 15:11:48", "2013-01-06 00:18:35", ... #> $ payment_type <chr> "CSH", "CSH", "CSH", "CSH", "CSH", "CSH", "CSH... #> $ fare_amount <dbl> 6.5, 6.0, 5.5, 5.0, 9.5, 9.5, 6.0, 34.0, 5.5, ... #> $ surcharge <dbl> 0.0, 0.5, 1.0, 0.5, 0.5, 0.0, 0.0, 0.0, 1.0, 0... #> $ mta_tax <dbl> 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0... #> $ tip_amount <int> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0... #> $ tolls_amount <dbl> 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.8, 0.0, 0... #> $ total_amount <dbl> 7.0, 7.0, 7.0, 6.0, 10.5, 10.0, 6.5, 39.3, 7.0... ### Taxi Benchmarks code: [bench/taxi](https://github.com/tidyverse/vroom/tree/main/inst/bench/taxi) All benchmarks were run on a Amazon EC2 [m5.4xlarge](https://aws.amazon.com/ec2/instance-types/m5/) instance with 16 vCPUs and an [EBS](https://aws.amazon.com/ebs/) volume type. The benchmarks labeled `vroom_base` uses `vroom` with base functions for manipulation. `vroom_dplyr` uses `vroom` to read the file and dplyr functions to manipulate. `data.table` uses `fread()` to read the file and `data.table` functions to manipulate and `readr` uses `readr` to read the file and `dplyr` to manipulate. By default vroom only uses Altrep for character vectors, these are labeled `vroom(altrep: normal)`. The benchmarks labeled `vroom(altrep: full)` instead use Altrep vectors for all supported types and `vroom(altrep: none)` disable Altrep entirely. The following operations are performed. - The data is read - `print()` - _N.B. read.delim uses `print(head(x, 10))` because printing the whole dataset takes > 10 minutes_ - `head()` - `tail()` - Sampling 100 random rows - Filtering for "UNK" payment, this is 6434 rows (0.0435% of total). - Aggregation of mean fare amount per payment type. ```{r, fig.height = 8, fig.width=10, warning = FALSE, echo = FALSE, message = FALSE} taxi <- read_benchmark(path_package("vroom", "bench", "taxi.tsv"), desc) plot_benchmark(taxi, "Time to analyze taxi trip data") make_table(taxi) ``` (*N.B. Rcpp used in the dplyr implementation fully materializes all the Altrep numeric vectors when using `filter()` or `sample_n()`, which is why the first of these cases have additional overhead when using full Altrep.*). ## All numeric data All numeric data is really a worst case scenario for vroom. The index takes about as much memory as the parsed data. Also because parsing doubles can be done quickly in parallel and text representations of doubles are only ~25 characters at most there isn't a great deal of savings for delayed parsing. For these reasons (and because the data.table implementation is very fast) vroom is a bit slower than fread for pure numeric data. However because vroom is multi-threaded it is a bit quicker than readr and read.delim for this type of data. ### Long code: [bench/all_numeric-long](https://github.com/tidyverse/vroom/tree/main/inst/bench/all_numeric-long) ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} all_num <- read_benchmark(path_package("vroom", "bench", "all_numeric-long.tsv"), desc) plot_benchmark(all_num, "Time to analyze long all numeric data") make_table(all_num) ``` ### Wide code: [bench/all_numeric-wide](https://github.com/tidyverse/vroom/tree/main/inst/bench/all_numeric-wide) ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} all_num_wide <- read_benchmark(path_package("bench", "all_numeric-wide.tsv", package = "vroom"), desc) plot_benchmark(all_num_wide, "Time to analyze wide all numeric data") make_table(all_num_wide) ``` ## All character data code: [bench/all_character-long](https://github.com/tidyverse/vroom/tree/main/inst/bench/all_character-long) All character data is a best case scenario for vroom when using Altrep, as it takes full advantage of the lazy reading. ### Long ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} all_chr <- read_benchmark(path_package("vroom", "bench", "all_character-long.tsv"), desc) plot_benchmark(all_chr, "Time to analyze long all character data") make_table(all_chr) ``` ### Wide code: [bench/all_character-wide](https://github.com/tidyverse/vroom/tree/main/inst/bench/all_character-wide) ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} all_chr_wide <- read_benchmark(path_package("vroom", "bench", "all_character-wide.tsv"), desc) plot_benchmark(all_chr_wide, "Time to analyze wide all character data") make_table(all_chr_wide) ``` # Reading multiple delimited files code: [bench/taxi_multiple](https://github.com/tidyverse/vroom/tree/main/inst/bench/taxi_multiple) ```{r, echo = FALSE, message = FALSE, eval = TRUE} mult <- read_benchmark(path_package("vroom", "bench", "taxi_multiple.tsv"), desc) ``` The benchmark reads all 12 files in the taxi trip fare data, totaling `r scales::comma(mult$rows[[1]])` rows and `r mult$cols[[1]]` columns for a total file size of `r format(fs_bytes(mult$size[[1]]))`. ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} plot_benchmark(mult, "Time to analyze multiple file data") make_table(mult) ``` # Reading fixed width files ## United States Census 5-Percent Public Use Microdata Sample files ```{r, echo = FALSE, message = FALSE, eval = TRUE} fwf <- read_benchmark(path_package("vroom", "bench", "fwf.tsv"), desc) ``` This fixed width dataset contains individual records of the characteristics of a 5 percent sample of people and housing units from the year 2000 and is freely available at <https://web.archive.org/web/20150908055439/https://www2.census.gov/census_2000/datasets/PUMS/FivePercent/California/all_California.zip>. The data is split into files by state, and the state of California was used in this benchmark. The data totals `r scales::comma(fwf$rows[[1]])` rows and `r fwf$cols[[1]]` columns with a total file size of `r format(fs_bytes(fwf$size[[1]]))`. ## Census data benchmarks code: [bench/fwf](https://github.com/tidyverse/vroom/tree/main/inst/bench/fwf) ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} plot_benchmark(fwf, "Time to analyze fixed width data") make_table(fwf) ``` # Writing delimited files code: [bench/taxi_writing](https://github.com/tidyverse/vroom/tree/main/inst/bench/taxi_writing) The benchmarks write out the taxi trip dataset in a few different ways. - An uncompressed file - A gzip compressed file using `gzfile()` _(readr and vroom do this automatically for files ending in `.gz`)_ - A gzip compressed file compressed with multiple threads (natively for data.table and using a `pipe()` connection to [pigz](https://zlib.net/pigz/) for the rest). - A [Zstandard](https://facebook.github.io/zstd/) compressed file (data.table does not support this format). ```{r, fig.height = 8, fig.width=10, warning = FALSE, message = FALSE, echo = FALSE} taxi_writing <- read_benchmark(path_package("vroom", "bench", "taxi_writing.tsv"), c("setup", "writing")) %>% rename( package = reading_package, compression = manip_package ) %>% mutate( package = factor(package, c("base", "readr", "data.table", "vroom")), compression = factor(compression, rev(c("gzip", "multithreaded_gzip", "zstandard", "uncompressed"))) ) %>% filter(type == "real") subtitle <- generate_subtitle(taxi_writing) taxi_writing %>% ggplot(aes(x = compression, y = time, fill = package)) + geom_bar(stat = "identity", position = position_dodge2(reverse = TRUE, padding = .05)) + scale_fill_brewer(type = "qual", palette = "Set2") + scale_y_continuous(labels = scales::number_format(suffix = "s")) + theme(legend.position = "bottom") + coord_flip() + labs(title = "Writing taxi trip data", subtitle = subtitle, x = NULL, y = NULL, fill = NULL) taxi_writing %>% select(-size, -op, -rows, -cols, -type, -altrep, -label, -max_memory) %>% mutate_if(is.numeric, pretty_sec) %>% pivot_wider(names_from = package, values_from = time) %>% arrange(desc(compression)) %>% knitr::kable(digits = 2, align = "r", format = "html") ``` ## Session and package information ```{r, echo = FALSE, warning = FALSE, message = FALSE} si <- vroom::vroom(path_package("vroom", "bench", "session_info.tsv")) class(si) <- c("packages_info", "data.frame") select(as.data.frame(si), package, version = ondiskversion, date, source) %>% knitr::kable() ```
/scratch/gouwar.j/cran-all/cranData/vroom/vignettes/benchmarks.Rmd
--- title: "Get started with vroom" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Get started with vroom} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_knit$set(root.dir = tempdir()) knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(tibble.print_min = 3) ``` The vroom package contains one main function `vroom()` which is used to read all types of delimited files. A delimited file is any file in which the data is separated (delimited) by one or more characters. The most common type of delimited files are CSV (Comma Separated Values) or TSV (Tab Separated Values) files, typically these files have a `.csv` and `.tsv` suffix respectively. ```{r} library(vroom) ``` This vignette covers the following topics: - The basics of reading files, including - single files - multiple files - compressed files - remote files - Skipping particular columns. - Specifying column types, for additional safety and when the automatic guessing fails. - Writing regular and compressed files ## Reading files To read a CSV, or other type of delimited file with vroom pass the file to `vroom()`. The delimiter will be automatically guessed if it is a common delimiter; e.g. ("," "\t" " " "|" ":" ";"). If the guessing fails or you are using a less common delimiter specify it with the `delim` parameter. (e.g. `delim = ","`). We have included an example CSV file in the vroom package for use in examples and tests. Access it with `vroom_example("mtcars.csv")` ```{r} # See where the example file is stored on your machine file <- vroom_example("mtcars.csv") file # Read the file, by default vroom will guess the delimiter automatically. vroom(file) # You can also specify it explicitly, which is (slightly) faster, and safer if # you know how the file is delimited. vroom(file, delim = ",") ``` ## Reading multiple files If you are reading a set of files which all have the same columns (as in, names and types), you can pass the filenames directly to `vroom()` and it will combine them into one result. vroom's example datasets include several files named like `mtcars-i.csv`. These files contain subsets of the `mtcars` data, for cars with different numbers of cylinders. First, we get a character vector of these filepaths. ```{r} ve <- grep("mtcars-[0-9].csv", vroom_examples(), value = TRUE) files <- sapply(ve, vroom_example) files ``` Now we can efficiently read them into one table by passing the filenames directly to vroom. ```{r} vroom(files) ``` Often the filename or directory where the files are stored contains information. The `id` parameter can be used to add an extra column to the result with the full path to each file. (in this case we name the column `path`). ```{r} vroom(files, id = "path") ``` ## Reading compressed files vroom supports reading zip, gz, bz2 and xz compressed files automatically, just pass the filename of the compressed file to vroom. ```{r} file <- vroom_example("mtcars.csv.gz") vroom(file) ``` `vroom()` decompresses, indexes and writes the decompressed data to a file in the temp directory in a single stream. The temporary file is used to lazily look up the values and will be automatically cleaned up when all values in the object have been fully read, the object is removed, or the R session ends. ### Reading individual files from a multi-file zip archive If you are reading a zip file that contains multiple files with the same format, you can read a subset of the files at once like so: ```{r} zip_file <- vroom_example("mtcars-multi-cyl.zip") filenames <- unzip(zip_file, list = TRUE)$Name filenames # imagine we only want to read 2 of the 3 files vroom(purrr::map(filenames[c(1, 3)], ~ unz(zip_file, .x))) ``` ## Reading remote files vroom can read files directly from the internet as well by passing the URL of the file to vroom. ```{r, eval = as.logical(Sys.getenv("NOT_CRAN", "false"))} file <- "https://raw.githubusercontent.com/tidyverse/vroom/main/inst/extdata/mtcars.csv" vroom(file) ``` It can even read gzipped files from the internet (although not the other compressed formats). ```{r, eval = as.logical(Sys.getenv("NOT_CRAN", "false"))} file <- "https://raw.githubusercontent.com/tidyverse/vroom/main/inst/extdata/mtcars.csv.gz" vroom(file) ``` ## Column selection vroom provides the same interface for column selection and renaming as [dplyr::select()](https://dplyr.tidyverse.org/reference/select.html). This provides very efficient, flexible and readable selections. For example you can select by: - A character vector of column names ```{r} file <- vroom_example("mtcars.csv.gz") vroom(file, col_select = c(model, cyl, gear)) ``` - A numeric vector of column indexes, e.g. `c(1, 2, 5)` ```{r} vroom(file, col_select = c(1, 3, 11)) ``` - Using the selection helpers such as `starts_with()` and `ends_with()` ```{r} vroom(file, col_select = starts_with("d")) ``` - You can also rename columns ```{r} vroom(file, col_select = c(car = model, everything())) ``` ## Reading fixed width files A fixed width file can be a very compact representation of numeric data. Unfortunately, it's also often painful to read because you need to describe the length of every field. vroom aims to make it as easy as possible by providing a number of different ways to describe the field structure. Use `vroom_fwf()` in conjunction with one of the following helper functions to read the file. ```{r} fwf_sample <- vroom_example("fwf-sample.txt") cat(readLines(fwf_sample)) ``` - `fwf_empty()` - Guess based on the position of empty columns. ```{r} vroom_fwf(fwf_sample, fwf_empty(fwf_sample, col_names = c("first", "last", "state", "ssn"))) ``` - `fwf_widths()` - Use user provided set of field widths. ```{r} vroom_fwf(fwf_sample, fwf_widths(c(20, 10, 12), c("name", "state", "ssn"))) ``` - `fwf_positions()` - Use user provided sets of start and end positions. ```{r} vroom_fwf(fwf_sample, fwf_positions(c(1, 30), c(20, 42), c("name", "ssn"))) ``` - `fwf_cols()` - Use user provided named widths. ```{r} vroom_fwf(fwf_sample, fwf_cols(name = 20, state = 10, ssn = 12)) ``` - `fwf_cols()` - Use user provided named pairs of positions. ```{r} vroom_fwf(fwf_sample, fwf_cols(name = c(1, 20), ssn = c(30, 42))) ``` ## Column types vroom guesses the data types of columns as they are read, however sometimes the guessing fails and it is necessary to explicitly set the type of one or more columns. The available specifications are: (with single letter abbreviations in quotes) * `col_logical()` 'l', containing only `T`, `F`, `TRUE`, `FALSE`, `1` or `0`. * `col_integer()` 'i', integer values. * `col_big_integer()` 'I', Big integer values. (64bit integers) * `col_double()` 'd', floating point values. * `col_number()` 'n', numbers containing the `grouping_mark` * `col_date(format = "")` 'D': with the locale's `date_format`. * `col_time(format = "")` 't': with the locale's `time_format`. * `col_datetime(format = "")` 'T': ISO8601 date times. * `col_factor(levels, ordered)` 'f', a fixed set of values. * `col_character()` 'c', everything else. * `col_skip()` '_, -', don't import this column. * `col_guess()` '?', parse using the "best" type based on the input. You can tell vroom what columns to use with the `col_types()` argument in a number of ways. If you only need to override a single column the most concise way is to use a named vector. ```{r} # read the 'hp' columns as an integer vroom(vroom_example("mtcars.csv"), col_types = c(hp = "i")) # also skip reading the 'cyl' column vroom(vroom_example("mtcars.csv"), col_types = c(hp = "i", cyl = "_")) # also read the gears as a factor vroom(vroom_example("mtcars.csv"), col_types = c(hp = "i", cyl = "_", gear = "f")) ``` You can read all the columns with the same type, by using the `.default` argument. For example reading everything as a character. ```{r} vroom(vroom_example("mtcars.csv"), col_types = c(.default = "c")) ``` However you can also use the `col_*()` functions in a list. ```{r} vroom( vroom_example("mtcars.csv"), col_types = list(hp = col_integer(), cyl = col_skip(), gear = col_factor()) ) ``` This is most useful when a column type needs additional information, such as for categorical data when you know all of the levels of a factor. ```{r} vroom( vroom_example("mtcars.csv"), col_types = list(gear = col_factor(levels = c(gear = c("3", "4", "5")))) ) ``` ## Name repair Often the names of columns in the original dataset are not ideal to work with. `vroom()` uses the same `.name_repair` argument as tibble, so you can use one of the default name repair strategies or provide a custom function. A great approach is to use the [`janitor::make_clean_names()`](https://sfirke.github.io/janitor/reference/make_clean_names.html) function as the input. This will automatically clean the names to use whatever case you specify, here I am setting it to use `ALLCAPS` names. ```{r, eval = FALSE} vroom( vroom_example("mtcars.csv"), .name_repair = ~ janitor::make_clean_names(., case = "all_caps") ) ``` ## Writing delimited files Use `vroom_write()` to write delimited files, the default delimiter is tab, to write TSV files. Writing to TSV by default has the following benefits: - Avoids the issue of whether to use `;` (common in Europe) or `,` (common in the US) - Unlikely to require quoting in fields, as very few fields contain tabs - More easily and efficiently ingested by Unix command line tools such as `cut`, `perl` and `awk`. ```{r} vroom_write(mtcars, "mtcars.tsv") ``` ```{r, include = FALSE} unlink("mtcars.tsv") ``` ### Writing CSV delimited files However you can also use `delim = ','` to write CSV files, which are common as inputs to GUI spreadsheet tools like Excel or Google Sheets. ```{r} vroom_write(mtcars, "mtcars.csv", delim = ",") ``` ```{r, include = FALSE} unlink("mtcars.csv") ``` ### Writing compressed files For gzip, bzip2 and xz compression the outputs will be automatically compressed if the filename ends in `.gz`, `.bz2` or `.xz`. ```{r} vroom_write(mtcars, "mtcars.tsv.gz") vroom_write(mtcars, "mtcars.tsv.bz2") vroom_write(mtcars, "mtcars.tsv.xz") ``` ```{r, include = FALSE} unlink(c("mtcars.tsv.gz", "mtcars.tsv.bz2", "mtcars.tsv.xz")) ``` It is also possible to use other compressors by using `pipe()` with `vroom_write()` to create a pipe connection to command line utilities, such as - [pigz](https://zlib.net/pigz/), a parallel gzip implementation - lbzip2, a parallel bzip2 implementation - [pixz](https://github.com/vasi/pixz), a parallel xz implementation - [Zstandard](https://facebook.github.io/zstd/), a modern real-time compression algorithm. The parallel compression versions can be considerably faster for large output files and generally `vroom_write()` is fast enough that the compression speed becomes the bottleneck when writing. ```{r, eval = nzchar(Sys.which("pigz"))} vroom_write(mtcars, pipe("pigz > mtcars.tsv.gz")) ``` ```{r, include = FALSE} unlink("mtcars.tsv.gz") ``` ### Reading and writing from standard input and output vroom supports reading and writing to the C-level `stdin` and `stdout` of the R process by using `stdin()` and `stdout()`. E.g. from a shell prompt you can pipe to and from vroom directly. ```shell cat inst/extdata/mtcars.csv | Rscript -e 'vroom::vroom(stdin())' Rscript -e 'vroom::vroom_write(iris, stdout())' | head ``` Note this interpretation of `stdin()` and `stdout()` differs from that used elsewhere by R, however we believe it better matches most user's expectations for this use case. ## Further reading - `vignette("benchmarks")` discusses the performance of vroom, how it compares to alternatives and how it achieves its results. - [📽 vroom: Because Life is too short to read slow](https://www.youtube.com/watch?v=RA9AjqZXxMU&t=10s) - Presentation of vroom at UseR!2019 ([slides](https://speakerdeck.com/jimhester/vroom)) - [📹 vroom: Read and write rectangular data quickly](https://www.youtube.com/watch?v=ZP_y5eaAc60) - a video tour of the vroom features.
/scratch/gouwar.j/cran-all/cranData/vroom/vignettes/vroom.Rmd
ABEL1Q <- function(T,C) { ALPHA <- 4 * (C[1]^2)/((1-C[1])^4) ALPHA <- 1.3221*((ALPHA*T)^.2) return(ALPHA) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/ABEL1Q.R
AR1 <- function(x) { T <- length(x) -1 Y <- x[2:T] Y_ <- x[1:(T-1)] ALPHA <- solve(t(Y_) %*% Y_ ) %*% t(Y_) %*% Y RE <- Y - Y_ %*% ALPHA SIGMAS <- sum(RE^2) / (T-1) STDA <- sqrt( SIGMAS * solve(t(Y_) %*% Y_ )) return(list(ALPHA=ALPHA,STDA=STDA)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/AR1.R
Adjust.thin <- function(y) { n <- length(y) m <- ar.ols(y,aic=F,order.max=1) b <- m$ar e <- m$resid e <- matrix(e[!is.na(e)],nrow=n-1) r <- 1/(1-b[1]) * e return(r) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Adjust.thin.R
Auto.Q <- function(y,lags=10) { data <- y - mean(y) T <- length(data) ac <- matrix(acf(data,lag.max=lags,plot=F)$acf[, , 1]) ac <- ac[2:(lags+1),1] ac1 <- matrix(acf(data,lag.max=lags,type="covariance",plot=F)$acf[, , 1]) ac2 <- matrix(NA,nrow=lags) for( i in 1:lags){ y <- data[(i+1):T]^2 x <- data[1:(T-i)]^2 t <- length(y) ac2[i] <- crossprod(x,y)/t } ac3 <- (ac1[2:(lags+1),1]^2) /ac2 #Qps <- T*sum(ac3) BP<- T * cumsum(ac3) aux <- matrix(1:lags) q <- 2.4 maxro <- sqrt(T)*max(sqrt(ac3)) pin <- 2 if(maxro <= sqrt(q*log(T)) ) pin <-log(T) Lp <- BP-aux*pin; phat <- which.max(Lp) Tn <- BP[phat] pvalue <- 1-pchisq(Tn,1) return(list(Stat=Tn,Pvalue=pvalue)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Auto.Q.R
Auto.VR <- function(y) { coe <- AR1(y)$ALPHA T <- length(y) lq <- ABEL1Q(T,coe) vrsum <- 1 for (i in 1:(T-1)) { sum1 <- sum(y[1:(T-i)] * y[(1+i):T]) sum1 <- sum1/(sum(y^2)) vrsum <- vrsum + 2*kfunc(i/lq)*sum1 } vr <- sqrt(T/lq)*(vrsum - 1)/sqrt(2) return(list(stat=vr,sum=vrsum)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Auto.VR.R
AutoBoot.test <- function(y,nboot,wild,prob=c(0.025,0.975)) { y <- as.matrix(y) test <- Auto.VR(y); LC=test$stat statmat1 <- matrix(NA, nrow=nboot, ncol=1) statmat2 <- matrix(NA, nrow=nboot, ncol=1) if (wild == "Normal") { for (i in 1:nboot) { ys <- y * rnorm(nrow(y)) M=Auto.VR(ys) statmat1[i,] <- M$stat; statmat2[i,]=M$sum } } if (wild == "Mammen") { for (i in 1:nboot) { ys <- y * Mammen(nrow(y)) M=Auto.VR(ys) statmat1[i,] <- M$stat; statmat2[i,]=M$sum } } if (wild == "Rademacher") { for (i in 1:nboot) { ys <- y * Rademacher(nrow(y)) M=Auto.VR(ys) statmat1[i,] <- M$stat; statmat2[i,]=M$sum } } tem <- abs(statmat1) > abs(LC) tem[tem == "TRUE"] <- 1 p <- mean(tem) CI1 <- quantile(statmat1,prob); CI2 <- quantile(statmat2,prob) return(list(test.stat=LC,VRsum=test$sum,pval=p,CI.stat=CI1,CI.VRsum=CI2)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/AutoBoot.test.R
Ave.Ex <- function(y) { T <- length(y) B <- rep(0,T-1) SIGMAT <- sum(y^2)/(T-1) CPI <- seq(-0.8,0.8,0.01) EXLMINF <- 0 EXLRINF <- 0 for (j in 1:161) { B[1] <- y[1] for (k in 2:(T-1)) { B[k] <- y[k] + CPI[j]*B[k-1] } SIGMAH <- SIGMAT - (( sum(y[2:T]*B) )^2)/T/(sum(B^2)) LM <- (sum(y[2:T]*B))^2/T*(1 - CPI[j]^2)/SIGMAT/SIGMAT LR <- T*log(SIGMAT/SIGMAH) EXLMINF <- EXLMINF + exp(LM/2)/161; EXLRINF <- EXLRINF + exp(LR/2)/161; } EXLMINF <- log(EXLMINF) EXLRINF <- log(EXLRINF) return(list(Ex.LM=EXLMINF,Ex.LR=EXLRINF)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Ave.Ex.R
Boot.test <- function(y,kvec,nboot,wild,prob=c(0.025,0.975)) { y <- as.matrix(y) LC <- FastLMCD(y,kvec) statmat <- matrix(NA, nrow=nboot, ncol=length(kvec)+1) if (wild == "Normal") { stat <- matrix(c(LC$M2,LC$CD2)) for (i in 1:nboot) { ys <- y * rnorm(nrow(y)) LCs <- FastLMCD(ys,kvec) statmat[i,] <- c(LCs$M2,LCs$CD2) } } if (wild == "No") { stat <- matrix(c(LC$M1,LC$CD1)) for (i in 1:nboot) { index <- as.integer(runif(nrow(y), min=1, max=nrow(y))) ys <- as.matrix(y[index]) LCs <- FastLMCD(ys,kvec) statmat[i,] <- c(LCs$M1,LCs$CD1) } } if (wild == "Mammen") { stat <- matrix(c(LC$M2,LC$CD2)) for (i in 1:nboot) { ys <- y * Mammen(nrow(y)) LCs <- FastLMCD(ys,kvec) statmat[i,] <- c(LCs$M2,LCs$CD2) } } if (wild == "Rademacher") { stat <- matrix(c(LC$M2,LC$CD2)) for (i in 1:nboot) { ys <- y * Rademacher(nrow(y)) LCs <- FastLMCD(ys,kvec) statmat[i,] <- c(LCs$M2,LCs$CD2) } } p <- matrix(NA,nrow = ncol(statmat), ncol=1) CI <- matrix(NA,nrow = ncol(statmat), ncol=length(prob)) for (i in 1:ncol(statmat)) { tem <- abs(statmat[,i]) > abs(stat[i]) tem[tem == "TRUE"] <- 1 p[i] <- mean(tem) CI[i,] <- quantile(statmat[,i],prob) } CI <- CI[1:length(kvec),] colnames(CI) <- paste(100*prob,"%",sep="") rownames(CI) <- paste("k=",kvec,sep="") return(list(Holding.Period=kvec,LM.pval=as.numeric(p[1:length(kvec)]),CD.pval=as.numeric(p[length(kvec)+1]),CI=CI)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Boot.test.R
Chen.Deo <- function(x,kvec) { ks <- max(kvec) sig2 <- sd(x)^2 z <- as.matrix(x - mean(x)) n <- nrow(z) lamda <- as.matrix(2*pi/n * (1:as.integer((n-1)/2))) #@ equation 9 of Chen and Deo @ wmat <- matrix(NA,nrow=nrow(lamda),ncol=length(kvec)) for(i in 1:nrow(lamda)) { wmat[i,] = (kvec^(-1) * ( sin( kvec*lamda[i]/2 ) /sin( lamda[i]/2 ) )^2 ) } w1 <-colSums(wmat) w2 <-colSums(wmat^2) w3 <-colSums(wmat^3) beta <- 1 - 2/3*( w1*w3) /(w2^2) #@ equation 10 of Chen and Deo @ tem <- complex(imaginary=-lamda) Ilamda <- matrix(NA,nrow=nrow(lamda),1) for( j in 1:nrow(lamda) ) { sum1 <- 0 for( i in 1:n ) { sum1 <- sum1 + z[i] * exp(tem[j]*i) } Ilamda[j] <- (2*pi*n)^(-1) * Mod(sum1)^2 } sum1 <- numeric(0) for( j in 1:length(kvec) ) { sum1 <- c(sum1,colSums(Ilamda * wmat[,j]) * (1 - kvec[j]/n)^(-1) * (4*pi) / (n*sig2) ) } Vp <- sum1^(beta) #@ tau values on page 215 @ tauvec <- numeric(0) for( j in 1:ks) { tauvec <- c(tauvec, sum( (z[(j+1):n]^2)*(z[1:(n-j)]^2))*sig2^(-2)/(n-j-4)) } cnk <- n*(n-kvec)^(-1) #@ Matrices in equation 11 @ lmat <- matrix(0,nrow=ks+1,ncol=length(kvec)) for( i in 1:length(kvec)) { for( j in 1:(kvec[i]-1)) {lmat[j,i] <- 2*cnk[i]*(1-j/kvec[i])} } lmat[ks+1,] <- -(kvec * cnk - n/(n-1)) bvec <- matrix(0,nrow=ks,ncol=1) for(j in 1:ks) {bvec[j] <- 2*(n-j)*n^(-3)*tauvec[j] + 2*j*n^(-3)} avec <- ( ( n- 1:ks ) * tauvec )/n^2 + (1:ks)/n^2 amat <- diag(avec) sigmat1 <- cbind(amat,bvec) sigmat2 <- cbind(t(bvec),2*n^(-2)) sigmat3 <- rbind(sigmat1,sigmat2) sigmat <- t(lmat) %*% sigmat3 %*% lmat # mu vector in Theorem 5@ mubeta <- 1 + 0.5*beta * (beta-1) * diag(sigmat) #@ Sigma matrix vector in Theorem 5@ sigbeta <- matrix(0,nrow=nrow(sigmat),ncol=ncol(sigmat)) for( i in 1:nrow(sigmat)) { for( j in 1:ncol(sigmat)) {sigbeta[i,j] <- beta[i]*beta[j]*sigmat[i,j]} } #@ Sum stat in equation 15 @ stat1 <- sum(Vp-1) #@ QP stat in equation 16 @ QP <- t(Vp-mubeta) %*% solve(sigbeta) %*% (Vp-mubeta) crit <- qchisq(c(0.01,0.02,0.05,0.10,0.20),df=length(kvec),lower.tail=FALSE) return(list(Holding.Period=kvec,VRsum=stat1,QPn=QP,ChiSQ.Quantiles_1_2_5_10_20_percent=crit)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Chen.Deo.R
Chow.Denning <- function(y,kvec) { y <- as.matrix(y) n <- nrow(y) mq <- matrix(NA, nrow=length(kvec), ncol=2) for (i in 1:length(kvec)) { k <- kvec[i] LM <- LM_stat(y,k) mq[i,] <- cbind(LM$LM1,LM$LM2) } mv1 <- max(abs(mq[,1])) mv2 <- max(abs(mq[,2])) alpha <- c(0.1,0.05,0.01) per <- 0.5*( 1-(1-alpha)^(1/length(kvec))) crit <- qnorm(1-per) return(list(Holding.Periods=kvec,CD1=mv1,CD2=mv2,Critical.Values_10_5_1_percent=crit)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Chow.Denning.R
DL.test <- function(y,B = 300,p=1) { y <- as.matrix(y) n <- nrow(y) Stat <- DLtest(y,p) statmat1 <- matrix(NA, nrow=B, ncol=1) statmat2 <- matrix(NA, nrow=B, ncol=1) for(i in 1:B){ m <- Mammen(n) ys <- (y-mean(y))*(m-mean(m)) Stats <- DLtest(ys,p) statmat1[i,1] = Stats$Cpstat statmat2[i,1] = Stats$Kpstat } tem <- abs(statmat1) > abs(Stat$Cpstat) tem[tem == "TRUE"] <- 1 p1 <- mean(tem) tem <- abs(statmat2) > abs(Stat$Kpstat) tem[tem == "TRUE"] <- 1 p2 <- mean(tem) return(list(Cp=Stat$Cpstat,Kp=Stat$Kpstat,Cp_pval=p1,Kp_pval=p2)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/DL.test.R
DLtest <- function(y,p) { ym <- as.matrix(y-mean(y)) n <- nrow(ym); s2 <- sum(ym^2)/(n-p) sum3 <- numeric(n-p) sum2 <- 0 for(j in (p+1):n) { sum1 <- 0 for(i in (p+1):n){ indicate <- 0 zi <- ym[(i-1):(i-p),1] zj <- ym[(j-1):(j-p),1] tem1 <- as.numeric(zi <= zj) if( prod(tem1) == 1) indicate <- 1 sum1 <- sum1 + ym[i,1]*indicate } sum2 <- sum2 + sum1^2 sum3[j-p] <- abs(sum1/sqrt(n-p)) } Cp <- sum2/(s2*(n-p)^2) Kp <- max(sum3)/sqrt(s2) return(list(Cpstat=Cp,Kpstat=Kp)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/DLtest.R
FastLMCD <- function(y,kvec) { y <- as.matrix(y) n <- nrow(y) mq <- matrix(NA, nrow=length(kvec), ncol=2) for (i in 1:length(kvec)) { k <- kvec[i] LM <- FastLM_stat(y,k) mq[i,] <- cbind(LM$LM1,LM$LM2) } mv1 <- max(abs(mq[,1])) mv2 <- max(abs(mq[,2])) return(list(M1=mq[,1],M2=mq[,2],CD1=mv1,CD2=mv2)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/FastLMCD.R
FastLM_stat <- function(y,k) { y <- as.matrix(y); y1 <- (y-mean(y))^2; n <- nrow(y) vr <- FastVR(y,k) tem1 <- 2*(2*k-1)*(k-1); tem2 <- 3*k m1 <- sqrt(n)*(vr-1)/sqrt( tem1/tem2 ) w <- 4*as.matrix((1-(1:(k-1))/k)^2,nrow=k-1) dvec <- matrix(NA, nrow=(k-1), ncol=1) for (j in 1:(k-1)) { dvec[j] <- sum(y1[(j+1):n] * y1[1:(n-j)])/( sum(y1)^2 ) } summ <- crossprod(w,dvec) m2 <- sqrt(n)*(vr-1)*((n*summ)^(-.5) ) return(list(LM1=m1,LM2=m2)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/FastLM_stat.R
FastVR <- function(x,kvec) { x <- as.matrix(x) m <- acf(x,lag.max = max(kvec),plot=FALSE)$acf[2:(max(kvec)+1)] VR <- matrix(NA,nrow=length(kvec)) for(i in 1:length(kvec)) { k <- kvec[i] w <- (1-(1:(k-1))/k) VR[i] <- 1+ 2*sum(w*m[1:(k-1)]) } return(VR) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/FastVR.R
Gen.Spec.Test <- function(y,B=300) { n<- length(y) e <- y - mean(y) v <- var(e) y1 <- y[1:(n-1)] weiexp <- compweexp(y1) CvMexp <- 0 for(j in 1:(n-1)) { aux2 <- 1/((j*pi)^2) aux2 <- aux2/(n-j+1) CvMexp <- CvMexp+ aux2* t(e[(1+j):n]) %*% weiexp[1:(n-j),1:(n-j)] %*% e[(1+j):n] } CvMexp <- CvMexp/v CvMexpb <- matrix(0,nrow=B,ncol=2) for(k in 1:B) { eb <- e * Mammen(n) eb <- eb - mean(eb) tem <- 0 for( j in 1:(n-1) ){ aux2 <- 1/((j*pi)^2) aux2 <- aux2/(n-j+1) tem <- tem+aux2* t(eb[(1+j):n]) %*% weiexp[1:(n-j),1:(n-j)] %*% eb[(1+j):n] } CvMexpb[k,] <- cbind(tem/v > CvMexp,tem/v) } pboot <- mean(CvMexpb[,1]) Critboot <- quantile(CvMexpb[,2],c(0.9,0.95,0.99)) return(pboot) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Gen.Spec.Test.R
IACM <- function(r,uhat) { T <- length(uhat) iacm <- numeric(3) seq <- 0:(T-1) gamz <- sum(uhat^2)/T fhat <- 0 if (r > 0) { for (s in 1:as.integer(T*r/2)) { lamda <- 2*pi*s/T co <- cos(lamda*seq) si <- sin(lamda*seq) perio <- ( (sum(co*uhat) )^2 + ( sum(si*uhat) )^2 )/(2*pi*T) fhat <- fhat + perio } } fhat <- 2*pi*fhat/T ut <- fhat - gamz*r/2 uts <- sqrt(2*T)*ut/gamz iacm[1] <- uts^2/(r*(1-r)) if (r == 0 | r ==1 ) iacm[1] <- 0 iacm[2] <- uts^2 iacm[3] <- abs(uts) return(iacm) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/IACM.R
ISIMP <- function(a,b,f) { # if an even number take the first obs out n <- (length(f)-1)/2 sum <- f[1] + 4*f[2*n] + f[2*n+1] for (i in 2:(2*n-1)) { if (i/2 - as.integer(i/2) == 0) c <- 4 else c <- 2 sum <- sum + c*f[i] } h <- (b - a)/(2*n) sum <- h*sum/3 return(sum) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/ISIMP.R
JWright.crit <- function(n,kvec,nit) { mat <- matrix(NA,nrow=nit,ncol=3) for (i in 1:nit) { ranking <- as.matrix(sample(1:n,n,replace=FALSE)) r1 <- (ranking - 0.5*(n+1) )/sqrt((n-1)*(n+1)/12) r2 <- qnorm(ranking/(n+1)) y <- as.matrix(rnorm(n)) s <- sign(y) s[ s == 0] <- -1 statmat <- matrix(NA, nrow=length(kvec), ncol=3) for (j in 1:length(kvec)) { k <- kvec[j] statmat[j,] <- cbind(stat(r1,k),stat(r2,k),stat(s,k)) } R1 <- max(abs(statmat[,1])) R2 <- max(abs(statmat[,2])) S1 <- max(abs(statmat[,3])) mat[i,] <- c(R1,R2,S1) } alpha <- c(0.90,0.95,0.99) R1crit <- quantile(mat[,1],alpha ) R2crit <- quantile(mat[,2],alpha ) S1crit <- quantile(mat[,3],alpha ) return(list(Holding.Period=kvec,JR1.crit=R1crit,JR2.crit=R2crit,JS1.crit=S1crit)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/JWright.crit.R
Joint.Wright <- function(y,kvec) { y <- as.matrix(y) n <- nrow(y) W_mat <- matrix(NA, nrow=length(kvec), ncol=3) for (i in 1:length(kvec)) { k <- kvec[i] W <- Wright_stat(y,k) W_mat[i,] <- cbind(W$WR1,W$WR2,W$WS1) } jr1 <- max(abs(W_mat[,1])) jr2 <- max(abs(W_mat[,2])) js1 <- max(abs(W_mat[,3])) return(list(Holding.Period=kvec,JR1=jr1,JR2=jr2,JS1=js1)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Joint.Wright.R
LMCD <- function(y,kvec) { y <- as.matrix(y) n <- nrow(y) mq <- matrix(NA, nrow=length(kvec), ncol=2) for (i in 1:length(kvec)) { k <- kvec[i] LM <- LM_stat(y,k) mq[i,] <- cbind(LM$LM1,LM$LM2) } mv1 <- max(abs(mq[,1])) mv2 <- max(abs(mq[,2])) return(list(M1=mq[,1],M2=mq[,2],CD1=mv1,CD2=mv2)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/LMCD.R
LM_stat <- function(y,k) { y <- as.matrix(y); y1 <- (y-mean(y))^2; n <- nrow(y); m <- mean(y) vr1 <- sum( (y-m)^2 )/n # use the filter function flt = filter(y, rep(1,k), method = "convolution") flt = flt[!is.na(flt)] summ = sum((flt - k * m)^2) vr2 <- summ/(n*k) vr <- vr2/vr1 tem1 <- 2*(2*k-1)*(k-1) tem2 <- 3*k m1 <- sqrt(n)*(vr-1)/sqrt( tem1/tem2 ) w <- 4*as.matrix((1-(1:(k-1))/k)^2,nrow=k-1) dvec <- matrix(NA, nrow=(k-1), ncol=1) for (j in 1:(k-1)) { dvec[j] <- sum(y1[(j+1):n] * y1[1:(n-j)])/( sum(y1)^2 ) } summ <- crossprod(w,dvec) m2 <- sqrt(n)*(vr-1)*((n*summ)^(-.5) ) return(list(LM1=m1,LM2=m2)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/LM_stat.R
Lo.Mac <- function(y,kvec) { y <- as.matrix(y) n <- nrow(y) mq <- matrix(NA, nrow=length(kvec), ncol=2) for (i in 1:length(kvec)) { k <- kvec[i] LM <- LM_stat(y,k) mq[i,] <- cbind(LM$LM1,LM$LM2) } VR <- mq rownames(VR) <- paste("k=",kvec,sep="") colnames(VR) <- c("M1","M2") return(list(Stats=VR)) #return(list(Holding.Periods=kvec,M1.stat=mq[,1],M2.stat=mq[,2])) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Lo.Mac.R
Mammen <- function(n) { p <- (sqrt(5)+1)/(2*sqrt(5)) zmat <- rep(1,n)*(-(sqrt(5)-1)/2); u <- runif(n,0,1) zmat[u > p] <- (sqrt(5)+1)/2 return(zmat) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Mammen.R
Panel.VR <- function(dat,nboot=500){ k=ncol(dat) vrstat=matrix(NA,nrow=k) for (i in 1:k) vrstat[i,]=Auto.VR(dat[,i])$stat vr1 = max(abs(vrstat)); vr2 = sum(vrstat^2);vr3 = sqrt(k)*mean(vrstat); stats1=matrix(NA,nrow=nboot); stats2=matrix(NA,nrow=nboot); stats3=matrix(NA,nrow=nboot); for (ii in 1:nboot){ ys = dat * Mammen(nrow(dat)); vrstats=matrix(NA,nrow=k) for (jj in 1:k) vrstats[jj,]=Auto.VR(ys[,jj])$stat stats1[ii,]=max(abs(vrstats)); stats2[ii,]=sum(vrstats^2); stats3[ii,]=sqrt(k)*mean(vrstats); } pboot1 = mean( stats1 > vr1 ); pboot2 = mean( stats2 > vr2 ); pboot3 = mean( abs(stats3) > abs(vr3) ); return(list(MaxAbs.stat=vr1,SumSquare.stat=vr2,Mean.stat=vr3,MaxAbs.pval=pboot1,SunSquare.pval=pboot2,Mean.pval=pboot3)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Panel.VR.R
Rademacher <- function(n) { p <- 0.5 zmat <- rep(1,n); u <- runif(n,0,1) zmat[u > p] <- -1 return(zmat) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Rademacher.R
Spec.shape <- function(x) { iad <- numeric(11) icvm <- numeric(11) ima <- numeric(11) for (j in 1:11) { iacm <- IACM( (j-1)*0.1,x) iad[j] <- iacm[1] icvm[j] <- iacm[2] ima[j] <- iacm[3] } ad <- ISIMP(0,1,iad) cvm <- ISIMP(0,1,icvm) ma <- ISIMP(0,1,ima) return(list(AD=ad,CVM=cvm,M=ma)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Spec.shape.R
Subsample.test <- function(y,kvec) { y <- as.matrix(y) n <- nrow(y) b1 <- as.integer(2.5*n^(0.3)) b2 <- as.integer(3.5*n^(0.6)) term <- as.integer( (b2-b1)/7 ) b1vec <- as.matrix(seq(b1,b2,term)[2:7]) p <- matrix(NA,nrow = nrow(b1vec), ncol=1) for (i in 1:nrow(b1vec)) { mv <- WK_stat2(y,kvec) b1 <- b1vec[i] mvsamp <- matrix(NA,nrow=(n-b1+1),ncol=1) index <- 1:b1 for (j in 1:(n-b1+1)) { xsub <- as.matrix(y[index]) mvsamp[j] <- WK_stat2(xsub,kvec) index <- index+1 } tem <- mvsamp > mv tem[tem == "TRUE"] <- 1 p[i] <- mean(tem) } rownames(p) <- paste("bl=",b1vec,sep="") colnames(p) <- c("pval") return(list(Holding.Period=kvec,Pval=p,Block.length=as.numeric(b1vec))) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Subsample.test.R
VR.minus.1 <- function(y,kvec) { coe <- AR1(y)$ALPHA T <- length(y) lq <- ABEL1Q(T,coe) vrsum <- 1 for (i in 1:(T-1)) { sum1 <- 0 for (t in 1:(T-i)) { sum1 <- sum1 + y[t]*y[t+i] } sum1 <- sum1/(sum(y^2)) vrsum <- vrsum + 2*kfunc(i/lq)*sum1 } vr.auto <- (vrsum - 1) y <- as.matrix(y) n <- nrow(y) m <- mean(y) vr1 <- sum( (y-m)^2 )/n mq <- numeric() for (i in 1:length(kvec)) { k <- kvec[i] # use the filter function flt = filter(y, rep(1,k), method = "convolution") flt = flt[!is.na(flt)] summ = sum((flt - k * m)^2) vr2 <- summ/(n*k) vr <- vr2/vr1 mq <- c(mq,(vr-1)) } # rownames(mq) <- paste("k",kvec,sep=""); colnames(mq) <- "|VR-1|" return(list(VR.auto=vr.auto,Holding.Periods=kvec,VR.kvec=mq)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/VR.minus.1.R
VR.plot <- function(y,kvec) { val <- matrix(NA,nrow=max(kvec),ncol=3) for( i in 2:max(kvec)) { tem1 <- stat.plot(y,i)$vr tem2 <- stat.plot(y,i)$se val[i,] <- c(tem1,1-1.96*tem2,1+1.96*tem2)} matplot(val,type="l",col=c(2,4,4),xlab="holding period",ylab="variance ratio",lwd=c(5,2,2)) abline(h=1) grid(nx=max(kvec),lwd=1) title(main = "Variance Ratios and 95% confidence band") VAL <- as.matrix(val[2:max(kvec),1]) rownames(VAL) <- paste("k=",2:max(kvec),sep="") colnames(VAL) <- "VR" return(list(VR=VAL)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/VR.plot.R
WK_stat1 <- function(y,k) { y <- as.matrix(y) n <- nrow(y) m <- mean(y) vr1 <- sum( (y-m)^2 )/n # use the filter function flt = filter(y, rep(1,k), method = "convolution") flt = flt[!is.na(flt)] summ = sum((flt - k * m)^2) vr2 <- summ/(n*k) vr <- vr2/vr1 m1 <- sqrt(n)*(vr-1) return(m1) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/WK_stat1.R
WK_stat2 <- function(y,kvec) { y <- as.matrix(y) n <- nrow(y) mq <- matrix(NA, nrow=length(kvec), ncol=1) for (i in 1:length(kvec)) { k <- kvec[i] mq[i,] <- WK_stat1(y,k) } return(max(abs(mq))) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/WK_stat2.R
Wald <- function(y,kvec) { y <- as.matrix(y) n <- nrow(y) mvr <- matrix(NA, nrow=length(kvec), ncol=1) for (i in 1:length(kvec)) { k <- kvec[i] VR <- Wald1(y,k) mvr[i,] <- cbind(VR) } mat <-covmat(kvec) w <- n* t(mvr) %*% solve(mat) %*% mvr alpha <- c(0.1,0.05,0.01) cr <- qchisq(1-alpha,length(kvec)) return(list(Holding.Period=kvec,Wald.stat=as.numeric(w),Critical.Values_10_5_1_percent=cr)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Wald.R
Wald1 <- function(y,k) { y <- as.matrix(y) n <- nrow(y) m <- mean(y) vr1 <- sum( (y-m)^2 )/n # use the filter function flt = filter(y, rep(1,k), method = "convolution") flt = flt[!is.na(flt)] summ = sum((flt - k * m)^2) vr2 <- summ/(n*k) vr <- vr2/vr1 -1 return(vr) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Wald1.R
Wright <- function(y,kvec) { y <- as.matrix(y) n <- nrow(y) W_mat <- matrix(NA, nrow=length(kvec), ncol=3) for (i in 1:length(kvec)) { k <- kvec[i] W <- Wright_stat(y,k) W_mat[i,] <- cbind(W$WR1,W$WR2,W$WS1) } VR <- W_mat rownames(VR) <- paste("k=",kvec,sep="") colnames(VR) <- c("R1","R2","S1") return(list(Stats=VR)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Wright.R
Wright.crit <- function(n,k,nit) { mat <- matrix(NA,nrow=nit,ncol=3) for (i in 1:nit) { ranking <- as.matrix(sample(1:n,n,replace=FALSE)) r1 <- (ranking - 0.5*(n+1) )/sqrt((n-1)*(n+1)/12) r2 <- qnorm(ranking/(n+1)) y <- as.matrix(rnorm(n)) s <- sign(y) s[ s == 0] <- -1 R1 <- stat(r1,k) R2 <- stat(r2,k) S1 <- stat(s,k) mat[i,] <- c(R1,R2,S1) } alpha <- c(0.01,0.05,0.1) R1crit <- quantile(mat[,1],c(0.5*alpha, rev(1-0.5*alpha)) ) R2crit <- quantile(mat[,2],c(0.5*alpha, rev(1-0.5*alpha)) ) S1crit <- quantile(mat[,3],c(0.5*alpha, rev(1-0.5*alpha)) ) return(list(Holding.Period=k,R1.crit=R1crit,R2.crit=R2crit,S1.crit=S1crit)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Wright.crit.R
Wright_stat <- function(y,k) { y <- as.matrix(y) n <- nrow(y) ranking <- as.matrix(rank(y)) r1 <- (ranking - 0.5*(n+1) )/sqrt((n-1)*(n+1)/12) r2 <- qnorm(ranking/(n+1)) s <- sign(y) s[ s == 0] <- -1 R1 <- stat(r1,k) R2 <- stat(r2,k) S1 <- stat(s,k) return(list(WR1=R1,WR2=R2,WS1=S1)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/Wright_stat.R
compweexp <- function(inf) { n <- length(inf) weiexp <- matrix(1,nrow=n,ncol=n) for(i in 1:n) { for(j in (i+1):n) { if(j > n) break aux1 <- (inf[i]-inf[j]) %*% t(inf[i]-inf[j]) weiexp[i,j] <- exp(-0.5*aux1) weiexp[j,i] <- weiexp[i,j] } } return(weiexp) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/compweexp.R
covmat <- function(kvec) { d <- length(kvec) mat <- matrix(0,nrow=d,ncol=d) dvec <- (2*(2*kvec-1) * (kvec-1)) / (3*kvec) diag(mat) <- dvec for (i in 1:d) { for (j in 1:d) { if (i==j) tem <- 0 if (j > i) tem <- 0 mat[i,j] <- (2*(3*kvec[i]-kvec[j]-1)*(kvec[j]-1))/(3*kvec[i]) mat[j,i] <- mat[i,j] } } return(mat) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/covmat.R
kfunc <- function(X) { WS <- (25/(12*pi^2*X^2))*( sin(6*pi*X/5)/(6*pi*X/5) - cos(6*pi*X/5) ) return(WS) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/kfunc.R
stat <- function(x,k) { x <- as.matrix(x) n <- nrow(x) index <- 1:k summ <- 0 for (i in k:n) { summ <- summ + sum(x[index])^2 index <- index+1 } vr1 <- sum(x^2)/n vr2 <- summ/(n*k) vr <- vr2/vr1 tem1 <- 2*(2*k-1)*(k-1) tem2 <- 3*k*n vrstat <- (vr-1)/sqrt( tem1/tem2 ) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/stat.R
stat.plot <- function (y, k) { y <- as.matrix(y) n <- nrow(y) m <- mean(y) vr1 <- sum((y - m)^2)/n # use the filter function flt = filter(y, rep(1,k), method = "convolution") flt = flt[!is.na(flt)] summ = sum((flt - k * m)^2) vr2 <- summ/(n * k) vr <- vr2/vr1 tem1 <- 2 * (2 * k - 1) * (k - 1) tem2 <- 3 * k * n se <- sqrt(tem1/tem2) return(list(vr = vr, se = se)) }
/scratch/gouwar.j/cran-all/cranData/vrtest/R/stat.plot.R
plot.vscc <- function(x, ...){ classcolours <- rainbow(length(unique(x$bestmod$class))) pairs(x$top, col=classcolours[x$best$class]) }
/scratch/gouwar.j/cran-all/cranData/vscc/R/plot.vscc.R
print.vscc <- function(x, ...){ summary(x) }
/scratch/gouwar.j/cran-all/cranData/vscc/R/print.vscc.R
summary.vscc <- function(object, ...){ x <- object cat("---------- Summary for VSCC ----------", "\n\n") cat(" ---- RESULTS ---- ", "\n") cat("# Vars: ", ncol(x$top), "\n") cat("Relation: ", x$chosen, "\n") cat("BIC: ", x$bestmod$bic, "\n") cat("Model: ", x$bestmod$model, "\n") cat("Family: ", x$family, "\n") cat("# Groups: ", x$bestmod$G, "\n") }
/scratch/gouwar.j/cran-all/cranData/vscc/R/summary.vscc.R
vscc <-function(x, G=1:9, automate="mclust", initial=NULL, initunc=NULL, train=NULL, forcereduction=FALSE){ origx <- x origG <- G x <- scale(x) p <- ncol(x) originit <- initial if(is.null(train)){ if(is.null(automate)){ if(is.null(initial)){stop("If an initial clustering vector is not supplied, automate='teigen' or 'mclust' must be specified")} G <- length(unique(initial)) n <- nrow(x) zmat <- matrix(0,n,G) for(i in 1:G){ zmat[initial==i, i]<-1 } } else{ if(automate=="teigen"){ # require("teigen") if(as.numeric(substr(packageVersion("teigen"), 1,3))<1.9){ warning(paste("The 'vscc' package requires 'teigen' version 1.9 or higher, version", packageVersion("teigen"), "is currently installed: issues may arise.")) } if(is.null(initial)){ mclinit1 <- hc("VVV",x) mclinit2 <- hclass(mclinit1, G) mclist <- list() for(g in length(G)){ mclist[[G[g]]] <- mclinit2[,g] } initrun <- teigen(x, G, init=mclist, training=train, verbose=FALSE) if(initrun$G==1){stop("teigen initialization gives G=1 solution...please use an initialization where G>1")} initial <- initrun$classification initunc <- sum(1-apply(initrun$fuzzy,1,max)) } G <- length(unique(initial)) n <- nrow(x) zmat <- matrix(0,n,G) for(i in 1:G){ zmat[initial==i, i]<-1 } } else{ if(automate=="mclust"){ # require("mclust") if(as.numeric(substr(packageVersion("mclust"), 1,3))<4.0){ warning(paste("VSCC requires 'mclust' version 4.0 or higher, version", packageVersion("mclust"), "is currently installed: issues may arise.")) } if(is.null(initial)){ initrun <- Mclust(x, G) if(initrun$G==1){stop("mclust initialization gives G=1 solution...please use an initialization where G>1")} initial <- initrun$classification initunc <- sum(initrun$unc) } G <- length(unique(initial)) n <- nrow(x) zmat <- matrix(0,n,G) for(i in 1:G){ zmat[initial==i, i]<-1 } } } } } else{ if(is.null(initial)){ stop("If using 'train', 'initial' vector must also be given") } origx <- x x <- x[train,] G <- length(unique(initial[train])) n <- nrow(x) zmat <- matrix(0,n,G) for(i in 1:G){ zmat[initial[train]==i, i]<-1 } } if(is.null(colnames(x))){ colnames(x) <- 1:p colnames(origx) <- 1:p } ng <- colSums(zmat) mug <- matrix(0,G,p) for(g in 1:G){ mug[g,] <- colSums(zmat[,g]*x)/ng[g] } # mug <- muginit(G,p,x,zmat,ng) mugarr <- array(0,dim=c(n,p,G)) for(g in 1:G){ mugarr[,,g] <- t(mug[g,] * t(matrix(1,n,p))) } mugmat <- matrix(0,n,p) for(g in 1:G){ mugmat <- mugmat + zmat[,g] * mugarr[,,g] } xminusmug <- x - mugmat ss <- xminusmug * xminusmug ssbyvar <- colSums(ss)/n # bssmugmat <- array(0,dim=c(n,p,G)) # for(g in 1:G){ # bssmugmat[,,g] <- bssmugmat[,,g] + (1-zmat[,g]) * mugarr[,,g] # } # bssxminusmug <- array(0,dim=c(n,p,G)) # for(g in 1:G){ # bssxminusmug[,,g] <- (x-bssmugmat[,,g])^2/(n-ng[g]) # } # bssbyvar <- rep(0,p) # for(g in 1:G){ # bssbyvar <- bssbyvar + colSums(bssxminusmug[,,g]) # } # sortbss <- sort(bssbyvar) sorted <- t(as.matrix(sort(ssbyvar))) select <- list() useselect <- list() varnames <- list() trun <- list() numvars <- NA for(i in 1:5){ select[[i]] <- matrix(data=origx[,colnames(sorted)[1]]) useselect[[i]] <- matrix(data=x[,colnames(sorted)[1]]) varnames[[i]] <- colnames(sorted)[1] } counts <- rep(2,5) for(k in 2:p){ curname <- colnames(sorted)[k] for(i in 1:5){ curcor <- cor(cbind(x[,curname],useselect[[i]])) if(all(abs(curcor[1,-1])<=(1-sorted[1,k]^i))){ select[[i]] <- cbind(select[[i]],origx[,curname]) useselect[[i]] <- cbind(useselect[[i]],x[,curname]) varnames[[i]][counts[i]] <- curname counts[i] <- counts[i]+1 } } } for(i in 1:5){ colnames(select[[i]]) <- varnames[[i]] } if(!is.null(automate)){ tuncs <- Inf numvars <- counts-1 counttab <- table(counts-1) runteig <- rep(TRUE,5) if(any(counttab>1)){ # dubvars <- as.numeric(names(which(counttab>1))) # for(j in 1:length(dubvars)){ # relneedcheck <- which(numvars==dubvars[j]) # k <- 1 # while(k < length(relneedcheck)){ # for(i in (k+1):length(relneedcheck)){ # if(all(varnames[[relneedcheck[k]]] %in% varnames[[relneedcheck[i]]])){ # runteig[relneedcheck[i]] <- FALSE # } # } # k <- k+1 # } # } #This could be improved for(i in 1:4){ for(j in (i+1):5){ if(length(varnames[[i]])==length(varnames[[j]])){ if(all(varnames[[i]] %in% varnames[[j]])){ runteig[j] <- FALSE } } } } } if(automate=="teigen"){ for(i in 1:5){ if(runteig[i]){ G <- origG mclinit1 <- hc("VVV",x) mclinit2 <- hclass(mclinit1, G) mclist <- list() for(g in length(G)){ mclist[[G[g]]] <- mclinit2[,g] } trun[[i]] <- teigen(select[[i]], G, training=train, init=mclist, verbose=FALSE) if(trun[[i]]$G>1){ tuncs[i] <- sum(1-apply(trun[[i]]$fuzzy,1,max)) } else{ tuncs[i] <- Inf } } else{ trun[[i]] <- "Same as simpler relation" tuncs[i] <- Inf } } } else{ if(is.null(train)){ for(i in 1:5){ if(runteig[i]){ G <- origG trun[[i]] <- Mclust(scale(select[[i]]), G) if(trun[[i]]$G>1){ tuncs[i] <- sum(trun[[i]]$unc) } else{ tuncs[i] <- Inf } } else{ trun[[i]] <- "Same as simpler relation" tuncs[i] <- Inf } } } else{ for(i in 1:5){ if(runteig[i]){ G <- origG trun[[i]] <- teigen(select[[i]], G, models="mclust", training=train, init="uniform", verbose=FALSE, known=initial) if(trun[[i]]$G>1){ tuncs[i] <- sum(1-apply(trun[[i]]$fuzzy,1,max)) } else{ tuncs[i] <- Inf } } else{ trun[[i]] <- "Same as simpler relation" tuncs[i] <- Inf } } } } } store <- list() store[["selected"]] <- select if(!is.null(automate)){ # if(is.null(initial)){ if(is.null(train)&&is.null(originit)){ store[["initialrun"]] <- initrun } else{ if(is.null(initunc)){ initunc <- Inf } } if(forcereduction){ store[["bestmodel"]] <- trun[[which.min(tuncs)]] store[["chosenrelation"]] <- which.min(tuncs) } else{ if(min(tuncs)<initunc){ store[["bestmodel"]] <- trun[[which.min(tuncs)]] store[["chosenrelation"]] <- which.min(tuncs) store[["topselected"]] <- select[[which.min(tuncs)]] store[["uncertainty"]] <- min(tuncs) } else{ if(is.null(originit)){ store[["bestmodel"]] <- initrun store[["uncertainty"]] <- initunc } else{ bm <- list() bm$classification <- initial store[["bestmodel"]] <- bm store[["uncertainty"]] <- initunc } store[["chosenrelation"]] <- "Full dataset" store[["topselected"]] <- origx } } # } # else{ # store[["bestmodel"]] <- trun[[which.min(tuncs)]] # store[["chosenrelation"]] <- which.min(tuncs) # store[["topselected"]] <- select[[which.min(tuncs)]] # store[["uncertainty"]] <- min(tuncs) # } store[["allmodelfit"]] <- trun } store[["family"]] <- automate store[["wss"]] <- sorted class(store) <- "vscc" store }
/scratch/gouwar.j/cran-all/cranData/vscc/R/vscc.R
vsccmanly <-function(x, G=2:9, numstart=100, selection="backward",forcereduction=FALSE, initstart="k-means", seedval=2354){ if(initstart!="k-means" & initstart!="hierarchical") { stop("No valid initialization method has been provided.") } if(selection!="forward" & selection!="backward" & selection!="none"){ stop("No valid selection method has been provided.") } origx <- x origG <- G x <- x p <- ncol(x) initruns=list() Gmod=list() initbic=vector(length=length(G)) Mmod=list() initbic2=vector(length=length(G)) listfcn=function(dim1){ nameslist=c("la","tau","Mu","S","gamma","id","ll","bic","iter","flag" ) list1 <- as.list(rep(NA, length(nameslist))) names(list1) <- nameslist return(list1) } listfcn2=function(dim1){ nameslist=c("la","Mu","S","id","iter","flag" ) list1 <- as.list(rep(NA, length(nameslist))) names(list1) <- nameslist return(list1) } if(selection=="backward"){ for(j in 1:length(G)){ Gtest=G[j] if(initstart=="k-means"){ set.seed(seedval) id.start <- kmeans(x, Gtest, nstart=numstart)$cluster } else if(initstart=="hierarchical"){ H <- hclust(dist(x), method = "ward.D") id.start<-cutree(H, Gtest) } #Following initialization steps aid in convergence la <- matrix(0.1, Gtest, p) C <- tryCatch({Manly.Kmeans(x, id = id.start, la = la) }, error=function(e) listfcn2(6)) id.CK<-C$id suppressWarnings({ Gmod[[j]] <- tryCatch({Manly.EM(x,id=id.CK,la=la) },error=function(e) listfcn(10)) }) initbic[j]<-Gmod[[j]]$bic } G=origG[which.min(initbic)] Gfinmod=Gmod[[which.min(initbic)]] initrun <-tryCatch({ Manly.select(x, model = Gfinmod, method = "backward") #transformation parameter selection },error=function(e) listfcn(10)) x$guessid=initrun$id } else if (selection=="forward"){ for(j in 1:length(G)){ Gtest=G[j] if(initstart=="k-means"){ set.seed(seedval) id.start <- kmeans(x, Gtest, nstart=numstart)$cluster } else if(initstart=="hierarchical"){ H <- hclust(dist(x), method = "ward.D") id.start<-cutree(H, Gtest) } #Following initialization steps aid in convergence id.CK=id.start suppressWarnings({ Gmod[[j]] <- tryCatch({Manly.EM(x,id=id.CK,la=matrix(0, Gtest, p)) }, error=function(e) listfcn(10)) }) initbic[j]<-Gmod[[j]]$bic } G=origG[which.min(initbic)] Gfinmod=Gmod[[which.min(initbic)]] initrun <- tryCatch({ Manly.select(x, model = Gfinmod, method = "forward") }, error=function(e) listfcn(10)) x$guessid=initrun$id } else if (selection=="none"){ #full Manly for(j in 1:length(G)){ Gtest=G[j] if(initstart=="k-means"){ set.seed(seedval) id.start <- kmeans(x, Gtest, nstart=numstart)$cluster } else if(initstart=="hierarchical"){ H <- hclust(dist(x), method = "ward.D") id.start<-cutree(H, Gtest) } #Following initialization steps aid in convergence la <- matrix(0.1, Gtest, p) C <- tryCatch({Manly.Kmeans(x, id = id.start, la = la) }, error=function(e) listfcn2(6)) id.CK<-C$id Gmod[[j]] <- tryCatch({Manly.EM(x,id=id.CK,la=la) },error=function(e) listfcn(10)) initbic[j]<-Gmod[[j]]$bic } G=origG[which.min(initbic)] initrun=Gmod[[which.min(initbic)]] #Gmod is used as there is no transformation parameter selection for the full model x$guessid=initrun$id } #Transforming data Xnew=as.vector(NA) transdata=matrix(NA,nrow=nrow(x),ncol=p) x$guessid=initrun$id for(j in 1:p){ Xnew=as.vector(NA) for(i in 1:G){ groupD=x[x$guessid==i,j] varlambda=initrun$la[i,j] if(varlambda==0){ Xtrans=groupD } else{ Xtrans=(exp(varlambda*groupD)-1)/varlambda } Xnew=c(Xnew,Xtrans) } Xnew=Xnew[-1] transdata[,j]<-Xnew } transdata<-as.data.frame(transdata) transdata<-scale(transdata) colnames(transdata)<-colnames(origx) #End of transformation initial <- initrun$id initunc=sum(1-apply(initrun$gamma,1,max)) #uncertainty G <- length(unique(initial)) n <- nrow(x) zmat <- matrix(0,n,G) for(i in 1:G){ for(j in 1:n){ if(initial[j]==i){ zmat[j, i]<-1 } } } ng <- colSums(zmat) meang <- matrix(0,G,p) #Mean for group and variable for(g in 1:G){ meang[g,] <- colSums(zmat[,g]*transdata)/ng[g] } wvar=matrix(0,1,p) #Within-group variance for each variable. for(j in 1:p){ for(g in 1:G){ for(i in 1:n){ wvar[,j] = wvar[,j]+zmat[i,g]*(transdata[i,j] - meang[g,j])^2 } } wvar[,j]=wvar[,j]/n } colnames(wvar)<-colnames(transdata) #Lines 189-220 come from vscc function in the vscc package (Andrews and McNicholas, 2013). #The only difference is that selection occurs on the transformed variables and the original #variables are stored in 'select' as the Manly models are fit to the original data. sorted <- t(as.matrix(sort(wvar[1,]))) select <- list() transelect <- list() varnames <- list() modrun <- list() numvars <- NA for(i in 1:5){ #Automatically select the variable that minimizes wmat into each set select[[i]] <- matrix(data=origx[,colnames(sorted)[1]]) transelect[[i]] <- matrix(data=transdata[,colnames(sorted)[1]]) varnames[[i]] <- colnames(sorted)[1] } counts <- rep(2,5) for(k in 2:p){ curname <- colnames(sorted)[k] for(i in 1:5){ curcor <- cor(cbind(transdata[,curname],transelect[[i]])) if(all(abs(curcor[upper.tri(curcor)])<=(1-sorted[1,k]^i))){ #Selection criteria select[[i]] <- cbind(select[[i]],origx[,curname]) transelect[[i]] <- cbind(transelect[[i]],transdata[,curname]) varnames[[i]][counts[i]] <- curname counts[i] <- counts[i]+1 } } } for(i in 1:5){ colnames(select[[i]]) <- varnames[[i]] } moduncs <- Inf runif <- rep(TRUE,5) for(i in 1:4){ for(j in (i+1):5){ if(runif[j]==TRUE && identical(varnames[[i]],varnames[[j]])){ runif[j]=FALSE # If subset contains same variables as another subset assign runif FALSE } } } for(i in 1:5){ if(runif[i]){ G=origG if(selection=="backward"){ for(j in 1:length(G)){ Gtest=G[j] if(initstart=="k-means"){ set.seed(seedval) id.start <- kmeans(select[[i]], Gtest,nstart=numstart)$cluster } else if(initstart=="hierarchical"){ H <- hclust(dist(select[[i]]), method = "ward.D") id.start<-cutree(H, Gtest) } #Following initialization steps aid in convergence p=ncol(select[[i]]) la <- matrix(0.1, Gtest, p) C <- tryCatch({Manly.Kmeans(select[[i]], id = id.start, la = la) }, error=function(e) listfcn2(6)) id.CK<-C$id suppressWarnings({ Mmod[[j]] <- tryCatch({Manly.EM(select[[i]], id = id.CK,la=la) },error=function(e) listfcn(10)) }) initbic2[j]<-Mmod[[j]]$bic } Gnew=origG[which.min(initbic2)] Mfinmod=Mmod[[which.min(initbic2)]] modrun[[i]] <- tryCatch({Manly.select(select[[i]], model = Mfinmod, method = "backward") }, error=function(e) listfcn(10)) } else if (selection=="forward"){ for(j in 1:length(G)){ Gtest=G[j] if(initstart=="k-means"){ set.seed(seedval) id.start <- kmeans(select[[i]], Gtest,nstart=numstart)$cluster } else if(initstart=="hierarchical"){ H <- hclust(dist(select[[i]]), method = "ward.D") id.start<-cutree(H, Gtest) } p=ncol(select[[i]]) id.CK=id.start #Following initialization steps aid in convergence suppressWarnings({ Mmod[[j]] <- tryCatch({Manly.EM(select[[i]], id = id.CK,la = matrix(0, Gtest, p)) }, error=function(e) listfcn(10)) }) id.G <- Gmod$id initbic2[j]<-Mmod[[j]]$bic } Gnew=origG[which.min(initbic2)] Mfinmod=Mmod[[which.min(initbic2)]] modrun[[i]] <- tryCatch({ inittest <- Manly.select(select[[i]], model = Mfinmod, method = "forward") }, error=function(e) listfcn(10)) } else if (selection=="none"){ for(j in 1:length(G)){ Gtest=G[j] if(initstart=="k-means"){ set.seed(seedval) id.start <- kmeans(select[[i]], Gtest,nstart=numstart)$cluster } else if(initstart=="hierarchical"){ H <- hclust(dist(select[[i]]), method = "ward.D") id.start<-cutree(H, Gtest) } #Following initialization steps aid in convergence p=ncol(select[[i]]) la <- matrix(0.1, Gtest, p) C <- tryCatch({Manly.Kmeans(select[[i]], id = id.start, la = la) }, error=function(e) listfcn2(6)) id.CK<-C$id Mmod[[j]] <- tryCatch({Manly.EM(select[[i]], id = id.CK,la = la) }, error=function(e) listfcn(10)) id.G <- Gmod$id initbic2[j]<-Mmod[[j]]$bic } Gnew=origG[which.min(initbic2)] modrun[[i]]=Mmod[[which.min(initbic2)]] } moduncs[i]=tryCatch({sum(1-apply(modrun[[i]]$gamma,1,max)) }, error=function(e) NA) } else{ modrun[[i]] <- "Same as simpler relation" moduncs[i] <- Inf } } #change bic to bigger is better suppressWarnings({ for(i in 1:5){ modrun[[i]]$bic=tryCatch({-1*modrun[[i]]$bic}, error=function(e) NA) } }) #The following code, used to output results, comes from the vscc function in the vscc package (Andrews and McNicholas, 2013) #to ensure consistency between functions. store <- list() store[["selected"]] <- select store[["initialrun"]] <- initrun if(forcereduction){ store[["bestmodel"]] <- modrun[[which.min(moduncs)]] store[["chosenrelation"]] <- which.min(moduncs) store[["variables"]]<-names(as.data.frame(select[[which.min(moduncs)]])) store[["topselected"]] <- select[[which.min(moduncs)]] store[["uncertainty"]] <- min(moduncs) } else{ if(min(moduncs)<initunc){ store[["bestmodel"]] <- modrun[[which.min(moduncs)]] store[["chosenrelation"]] <- which.min(moduncs) store[["topselected"]] <- select[[which.min(moduncs)]] store[["uncertainty"]] <- min(moduncs) store[["variables"]]<-names(as.data.frame(select[[which.min(moduncs)]])) } else{ store[["bestmodel"]] <- initrun store[["chosenrelation"]] <- "Full dataset" store[["topselected"]] <- origx store[["uncertainty"]] <- initunc } } store[["allmodelfit"]] <- modrun store[["wss"]] <- sorted class(store) <- "vsccmanly" store }
/scratch/gouwar.j/cran-all/cranData/vscc/R/vsccmanly.R
#' VSD Options (Internal) #' #' Generates lists of lists of options for each graph type, from function call #' #' @param arguments Graph-specific arguments #' @param ... General arguments #' @keywords internal #' #' @return List of list of graph options .options <- function(arguments = list(), ...) { preset_ggpar <- list( main = NULL, title = NULL, submain = NULL, subtitle = NULL, xlab = "Time", legend.title = "Strata", size = 1, linetype = NULL, alpha = 1, color = NULL, palette = NULL, ggtheme = ggpubr::theme_pubr() ) preset_ggsurv <- append(preset_ggpar, list( ylab = NULL, censor = NULL, censor.shape = NULL, censor.size = 4.5, conf.int = NULL )) preset_fit <- append(preset_ggsurv, list( conf.int.style = NULL )) preset_parametric <- append(preset_ggsurv, list( conf.int.km = FALSE )) preset_forest <- list( main = "Hazard ratio", title = NULL, cpositions = NULL, fontsize = NULL, refLabel = NULL, noDigits = NULL ) preset_residuals <- append( preset_ggpar, list( ylab = NULL, resid = NULL, se = F, df = NULL, nsmo = NULL, var = NULL, point.col = NULL, point.size = NULL, point.shape = NULL, point.alpha = NULL, caption = NULL ) ) preset_hazard <- append(preset_ggpar, list(ylab = "Hazard rate")) preset <- list( fit = preset_fit, parametric = preset_parametric, forest = preset_forest, residuals = preset_residuals, hazard = preset_hazard ) ellipsis <- list(...) for (type in names(preset)) { preset[[type]] <- .subset_options(preset[[type]], ellipsis, arguments[[type]]) } return(preset) } #' VSD (Sub)Options (Internal) #' #' Agglutinates preset, ellipsis, and arguments under a graph type #' #' @param subset Graph-specific preset (and allowed) values #' @param ellipsis General arguments #' @param subarguments Graph-specific arguments #' @keywords internal #' #' @return subset .subset_options <- function(subset, ellipsis, subarguments = NULL) { # replaces preset with ellipsis arguments, only if they're already named with presets to_replace <- ellipsis[names(ellipsis) %in% names(subset)] subset[names(to_replace)] <- to_replace # does the same for the sublist in arguments named after the object if (is.list(subarguments)) { to_replace <- subarguments[names(subarguments) %in% names(subset)] subset[names(to_replace)] <- to_replace } # cleanup: main/submain to title/subtitle # ggsurv requires it to be title, ggpar allows title by default if (!is.null(subset$main) && "title" %in% names(subset)) { subset$title <- subset$main subset$main <- NULL } if (!is.null(subset$submain) && "subtitle" %in% names(subset)) { subset$subtitle <- subset$submain subset$submain <- NULL } # cleanup: remove NULL values subset[sapply(subset, is.null)] <- NULL return(subset) }
/scratch/gouwar.j/cran-all/cranData/vsd/R/options.R
# Generates forest plots for coxph model (more than one if strata isn't null) plot_forest <- function(formula, data, strata = NULL, title, ...) { plots <- list() if (!is.null(strata)) { # strategy: remake formula coxph with strata removed (using call and grep, 'optional:(+\w*)?strata\(.+\)' # with '') then do several ggforests, using the strata to filter the *data* off fit_expression <- deparse(formula$call) fit_expression <- str2expression(gsub("\\+?\\s?(strata\\(.+\\)) ", "", fit_expression)) fit_strataless <- eval(fit_expression, data) forest_plots <- list() class(forest_plots) <- "vsdstrata" forest_plots$all <- survminer::ggforest(fit_strataless, data, main = title, ...) forest_plots$strata <- list() for (i in levels(strata)) { # does a forest for each strata, separatedly! subdata <- data[strata == i, ] fit_expression[[1]]$data <- subdata forest_plots$strata[[i]] <- survminer::ggforest(eval(fit_expression), subdata, main = paste(title, i, sep = ", "), ...) } plots$forest <- forest_plots } else { plots$forest <- survminer::ggforest(formula, data, main = title) } return(plots) }
/scratch/gouwar.j/cran-all/cranData/vsd/R/plot_forest.R
# Generates hazard plot plot_hazard <- function(surv, strata = NULL, size, ...) { plots <- list() if (is.null(strata)) { # make a simple muhaz graphic hazard <- muhaz::muhaz(surv$time, surv$status) hazard_df <- data.frame( x = hazard$est.grid, y = hazard$haz.est, strata = factor(rep("All", length( hazard$est.grid ))) ) } else { # make several separate hazard maps hazard_df <- data.frame(x = numeric(), y = numeric(), strata = numeric()) hazard_count <- table(strata) for (i in levels(strata)) { # TODO: is it always ten? if (hazard_count[[i]] < 10) { warning( "Level ", i, " doesn't have enough datapoints to estimate the hazard function", call. = FALSE, immediate. = TRUE ) } else { # creates a sub-table with each muhaz graphic, appends the corresponding strata hazard <- muhaz::muhaz(surv$time, surv$status, strata == i) hazard_df_level <- data.frame( x = hazard$est.grid, y = hazard$haz.est, strata = rep(i, length(hazard$est.grid)) ) hazard_df <- rbind(hazard_df, hazard_df_level) } } hazard_df$strata <- factor(hazard_df$strata, levels(strata)) } plot <- ggplot(hazard_df, aes(.data$x, .data$y, color = .data$strata)) + geom_line(size = size) plots$hazard <- ggpubr::ggpar(plot, ...) return(plots) }
/scratch/gouwar.j/cran-all/cranData/vsd/R/plot_hazard.R
# Generates parametric graph, with KM on the background #' @importFrom magrittr %>% plot_parametric <- function(model, km_fit, strata = NULL, data, alpha, conf.int, conf.int.km, size, ...) { if (missing(conf.int)) { conf.int <- TRUE } plots <- list() summary <- summary(model) if (!is.factor(strata)) { plots$parametric <- do.call(survminer::ggflexsurvplot, append( list( model, data, size = size, alpha = alpha, conf.int = conf.int, conf.int.km = conf.int.km ), list(...) )) # summary <- summary[[1]] %>% dplyr::mutate(strata = "All") } else { for (level in levels(strata)) { summary[[level]] <- summary[[level]] %>% dplyr::mutate(strata = level) } summary <- do.call(rbind, summary) plot_fit <- do.call(survminer::ggsurvplot, append( list( km_fit, data, alpha = alpha / 2, size = size, conf.int = conf.int.km ), list(...) ))$plot plot_parametric <- plot_fit + geom_line(aes(.data$time, .data$est, color = .data$strata), data = summary, size = size) if (conf.int) { plot_parametric <- plot_parametric + geom_line( aes(.data$time, .data$lcl, color = .data$strata), data = summary, size = size / 2, linetype = "dashed" ) + geom_line( aes(.data$time, .data$ucl, color = .data$strata), data = summary, size = size / 2, linetype = "dashed" ) } plots$parametric <- plot_parametric } return(plots) }
/scratch/gouwar.j/cran-all/cranData/vsd/R/plot_parametric.R
# gets strata factor from formula and its respective model .get_strata <- function(formula, model) { if (inherits(model, "Surv")) { # in case the model is JUST a surv object (hey! it happens) return() } if (inherits(formula, "coxph")) { # discards any columns not starting with strata() assumedly it's only one but... columns <- which(grepl("^strata\\(", colnames(model))) if (length(columns) > 1) { return(survival::strata(model[, columns])) } else if (length(columns) == 1) { return(model[, columns]) } } else { if (ncol(model) >= 2) { # the whole right side of the formula IS the strata discard the left side # (which is always the Surv object) if (ncol(model) == 2 && !is.factor(model[, 2])) { return(as.factor(model[, 2])) } return(survival::strata(model[, -1, drop = FALSE])) } } } # flattens list of graphs (as ggplots are also lists, can't just use unlist) .unlist_plots <- function(plots) { result <- list() for (type in names(plots)) { item <- plots[[type]] name <- type sublist <- NULL if (is.list(item) && !inherits(item, c("ggplot", "ggsurvplot"))) { if (inherits(item, "vsdstrata")) { sublist <- list(all = list(item$all), strata = item$strata) sublist <- unlist(sublist, recursive = FALSE) } else { sublist <- item } } if (is.list(sublist)) { for (subname in names(sublist)) { result[[paste(name, subname, sep = "$")]] <- list(plot = sublist[[subname]], type = type) } } else { result[[name]] <- list(plot = item, type = type) } } return(result) }
/scratch/gouwar.j/cran-all/cranData/vsd/R/util.R
#' @details The only function you're likely to need is [vsd], all other #' functions are either private or ones that shouldn't be called #' directly. #' #' @include options.R #' @include util.R #' @keywords internal #' @aliases vsd-package "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/vsd/R/vsd-package.R
#' Visualizing Survival Data #' #' This function outputs renders of the inputted survival data analysis data #' and/or model, and their components, into a graphically pleasing output, under #' the [ggplot2] format. #' #' Depending on the kind of model passed to the function, the kind of generated #' graphics might vary, but usually an estimation of the survival and risk #' curves (depending if the model has covariables) is expected. The kinds of #' graphics that can be created according to a specific R object are detailed on #' Usage on `.include`'s definition, but non-relevant graphics can be requested #' without error, as the function ignores them silently. #' #' Extra options for each graph kind can be passed to either all created #' graphics, by having them as generic arguments, or to specific graphic types, #' using a list in `.arguments`. Arguments are filtered, so that generic #' arguments aren't applied if a graphic kind wouldn't use them. As an example, #' ``` #' vsd(model, data, #' .arguments = list(fit = (size = 3, xlab = "Weeks")), #' xlab = "Days") #' ``` #' would set all graphics that have an label on the x axis to #' "Days", except the `fit` graph, which would have "Weeks" instead. #' #' # Generic graphical arguments #' #' Unless specified, all graphics are created under [ggpubr::ggpar()] and have #' as additional options `palette`, `main`, `submain`, `xlab`, `ylab`, #' `legend.title` and `ggtheme`. Most line graphics also allow to set the #' options `size`, `linetype`, `alpha` and `color` to determine line styles, as #' detailed on [survminer::ggsurvplot()]. #' #' ## fit #' #' Line graphic, with a further subset of the options present in #' [survminer::ggsurvplot()]: `censor`, `censor.shape`, `censor.size`, #' `conf.int`, `conf.int.style`. #' #' ## parametric #' #' Line graphic, with a further subset of the options present in #' [survminer::ggflexsurvplot()]: `conf.int.km`. #' #' ## forest #' #' Non-standard graphic(s), using all options within [survminer::ggforest()]: #' `main`, `cpositions`, `fontsize`, `refLabel`, `noDigits`. #' #' ## residuals #' #' Line graphic(s), with a further subset of options present in #' [survminer::ggcoxzph()]: `resid`, `se`, `df`, `nsmo`, `var`, `caption`; and #' point style customization options as `point.col`, `point.size`, #' `point.shape`, and `point.alpha`. #' #' ## hazard #' #' Line graphics, using the generic graphical arguments. #' #' @param model The survival model, or data structure, to generate graphics from #' @param data Dataframe from where the model fetches its variables, if left #' blank will be extracted from the model, if possible #' @param .interactive Allows to explore the generated graphs before returning #' (use with [plotly](https://plotly.com/r/) package for best results) #' @param .include Graph types to output if relevant, defaults to all possible #' @param .arguments Collection of list of arguments, indexed by the specific #' type of graph they should be passed to, has priority over \dots #' @param ... Miscellaneous arguments, passed to ALL graphs #' #' @import survival #' @import ggplot2 #' @importFrom stats model.frame #' @export #' @return A list of ggplot2 graphs and/or list of graphs, relevant to the model #' #' @examples #' # non-models are cohersed into a survfit object with default arguments #' vsd(coxph(Surv(time, status) ~ sex + strata(rx) + adhere, data = colon), #' .include = c("haz")) #' #' # parametric models are also supported with flexsurv #' vsd(flexsurv::flexsurvreg(Surv(rectime, censrec) ~ group, data = flexsurv::bc, dist = 'gengamma'), #' .include = c("par")) #' vsd <- function(model, data = NULL, .interactive = FALSE, .include = c("fit", "parametric", "forest", "residuals", "hazard"), .arguments = list(), ...) { UseMethod("vsd") } #' @describeIn vsd Wraps `Surv(...) ~ (...)` in a survfit object (Kaplan-Meier #' model) #' @export vsd.formula <- function(model, data = NULL, .interactive = FALSE, .include = c("fit", "hazard"), .arguments = list(), ...) { # (Assumedly) '~' call (TODO: fail first?) if (is.null(data)) { stop("Data structure required with fit object of type call.") } new_model <- survfit(model, data) new_model$call$formula <- eval(model, data) vsd(new_model, data, .interactive, .include, .arguments, ...) } #' @describeIn vsd Wraps [Surv()] in a survfit object (Kaplan-Meier #' model) #' @export vsd.Surv <- function(model, data = NULL, .interactive = FALSE, .include = c("fit", "hazard"), .arguments = list(), ...) { # Surv object TODO: more than just right-censored survival data <- as.data.frame(as.matrix(model)) warning("Fetched `data`: ", data, call. = FALSE, immediate. = TRUE) new_model <- survfit(Surv(time, status) ~ 1, data) warning("New model: ", new_model, call. = FALSE, immediate. = TRUE) vsd(new_model, data, .interactive, .include, .arguments, ...) } #' @describeIn vsd Wraps \code{coxph(...)} in a survfit object (Kaplan-Meier #' model) #' @export vsd.coxph <- function(model, data = NULL, .interactive = FALSE, .include = c("fit", "forest", "residuals", "hazard"), .arguments = list(), ...) { if (is.null(data)) { data <- eval(model$call$data) if (is.null(data)) { stop("Original data structure couldn't be extracted, ", "supply it to function call instead") } } # http://adv-r.had.co.nz/Expressions.html#capturing-call new_model <- survfit(model) new_model$call$formula <- substitute(model) vsd(new_model, data, .interactive, .include, .arguments, ...) } #' @describeIn vsd Graphical output for survfit objects (Kaplan-Meier model) #' @export vsd.survfit <- function(model, data = NULL, .interactive = FALSE, .include = c("fit", "hazard"), .arguments = list(), ...) { plots <- list() options <- .options(.arguments, ...) include <- as.vector(match.arg(.include, several.ok = TRUE)) # retrieving mid-objects if (is.null(data)) { if (!is.null(model$call$data)) { data <- eval(model$call$data) } else if (is.call(model$call$formula) && model$call$formula[[1]] == "coxph") { data <- eval(eval(model$call$formula)$call$data) } if (is.null(data)) { stop("Original data structure couldn't be extracted,", " supply it to function call instead") } } formula <- model$call$formula model_frame <- model.frame(formula, data) strata <- .get_strata(formula, model_frame) surv <- as.data.frame(as.matrix(model_frame[, 1])) #### PLOT$FIT if (("fit" %in% include)) { fit_plots <- do.call(survminer::ggsurvplot, append( list(model, data), options$fit)) plots$fit <- fit_plots$plot } #### PLOT$HAZARD if (("hazard" %in% include)) { hazard_plots <- do.call(plot_hazard, append(list(surv, strata), options$hazard)) plots <- append(plots, hazard_plots) } if (.interactive && interactive()) { .do_interactive(plots) } return(plots) } #' @describeIn vsd Graphical output for survfit objects (Cox model) #' @export vsd.survfitcox <- function(model, data = NULL, .interactive = FALSE, .include = c("fit", "forest", "residuals", "hazard"), .arguments = list(), ...) { plots <- list() options <- .options(.arguments, ...) include <- as.vector(match.arg(.include, several.ok = TRUE)) # retrieving mid-objects if (is.null(data)) { data <- eval(eval(model$call$formula)$call$data) if (is.null(data)) { stop("Original data structure couldn't be extracted, ", "supply it to function call instead") } } cox_model <- eval(model$call$formula, data) formula <- cox_model$formula model_frame <- model.frame(formula, data) strata <- .get_strata(cox_model, model_frame) surv <- as.data.frame(as.matrix(model_frame[, 1])) #### PLOT$FIT if (("fit" %in% include)) { fit_plots <- do.call(survminer::ggsurvplot, append( list(model, data), options$fit)) plots$fit <- fit_plots$plot } #### PLOT$FOREST if (("forest" %in% include)) { forest_plots <- do.call(plot_forest, append( list(cox_model, data, strata), options$forest)) plots <- append(plots, forest_plots) } #### PLOT$RESIDUALS (for coxph) if (("residuals" %in% include)) { plots$residuals <- do.call(survminer::ggcoxzph, append( list(cox.zph(cox_model)), options$residuals)) } #### PLOT$HAZARD if (("hazard" %in% include)) { hazard_plots <- do.call(plot_hazard, append( list(surv, strata), options$hazard)) plots <- append(plots, hazard_plots) } if (.interactive && interactive()) { .do_interactive(plots) } return(plots) } #' @describeIn vsd Graphical output for flexsurvreg objects (various parametric models) #' @export vsd.flexsurvreg <- function(model, data = NULL, .interactive = FALSE, .include = c("fit", "parametric", "hazard"), .arguments = list(), ...) { plots <- list() options <- .options(.arguments, ...) include <- as.vector(match.arg(.include, several.ok = TRUE)) if (is.null(data)) { if (!is.null(model$call$data)) { data <- eval(model$call$data) } if (is.null(data)) { stop("Original data structure couldn't be extracted, ", "supply it to function call instead") } } formula <- eval(model$call$formula, data) model_frame <- model.frame(model) strata <- .get_strata(formula, model_frame[, !(names(model_frame) == "(weights)")]) surv <- as.data.frame(as.matrix(model_frame[, 1])) km_fit <- survfit(formula, data) km_fit$call$formula <- eval(km_fit$call$formula, data) #### PLOT$FIT if ("fit" %in% include) { plot_fit <- do.call(survminer::ggsurvplot, append( list(km_fit, data), options$fit)) plots$fit <- plot_fit$plot } #### PLOT$PARAMETRIC if (("parametric" %in% include)) { parametric_plots <- do.call(plot_parametric, append( list(model, km_fit, strata, data), options$parametric)) plots <- append(plots, parametric_plots) } #### PLOT$HAZARD if (("hazard" %in% include)) { hazard_plots <- do.call(plot_hazard, append( list(surv, strata), options$hazard)) plots <- append(plots, hazard_plots) } if (.interactive && interactive()) { .do_interactive(plots) } return(plots) } .do_interactive <- function(plots) { # TODO: make choices into two lists: plots, types ? choices <- .unlist_plots(plots) whitelist <- c("fit", "parametric", "residuals", "hazard") repeat { choice <- utils::menu(names(choices), title = "Pick a graphic (or 0 to exit)") if (choice <= 0) break choice <- choices[[choice]] plot <- choice$plot type <- choice$type if (type %in% whitelist) { if (requireNamespace("plotly", quietly = TRUE)) { if (inherits(plot, "ggsurvplot")) { print(plotly::ggplotly(plot$plot)) } else { print(plotly::ggplotly(plot)) } } else { print(plot) } } else { print(plot) } } }
/scratch/gouwar.j/cran-all/cranData/vsd/R/vsd.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 moy <- function(x) { .Call('_vsgoftest_moy', PACKAGE = 'vsgoftest', x) } vestimates <- function(x, binf, bsup) { .Call('_vsgoftest_vestimates', PACKAGE = 'vsgoftest', x, binf, bsup) }
/scratch/gouwar.j/cran-all/cranData/vsgoftest/R/RcppExports.R
#### Heading #### ## File: external.R ## Desc: external (user-available) functions of the package vsgoftest ## Date: 2017-12-21 ## R version: 3.4.3 ## Required packages: fitdistrplus, Rcpp ## source internal.R before sourcing external.R ## Script designed with and optimized for RStudio #### Pareto distributions: related functions #### ## dpareto (function): density function of the Pareto distribution. dpareto <- function(x, mu, c, log=FALSE){ ## BEGIN CHECKING ARGUMENTS VALIDITY if(any(mu<=0))stop("mu must be positive") if(any(c<=0))stop("c must be positive") ## END CHECKING tmp <- (mu*c^mu*(x)^(-1-mu))*(x>=c) if (log) {tmp<-log(tmp)} else{} return(tmp) } ## ppareto (function): distribution function of the Pareto distribution. ppareto <- function(q,mu,c, lower.tail=TRUE, log.p=FALSE){ ## BEGIN CHECKING ARGUMENTS VALIDITY if(any(mu<=0))stop("mu must be positive") if(any(c<=0))stop("c must be positive") ## END CHECKING if(lower.tail) {res <- (1-(c/q)^mu)*(q>=c)} else{res <- 1-(1-(c/q)^mu)(q>=c)} if (log.p) {res <- log(res)} else{} return(res) } ##qpareto (function): quantile function of the Pareto distribution. qpareto <- function(p,mu,c, lower.tail=TRUE, log.p=FALSE){ ## BEGIN CHECKING ARGUMENTS VALIDITY if(any(mu<=0))stop("mu must be positive") if(any(c<=0))stop("c must be positive") ## END CHECKING if(lower.tail) {res <- c*(1-p)^(-1/mu)} else {res <- c*p^(-1/mu)} if (log.p) {res <- log(res)} else{} return(res) } ## rpareto (function): pseudo-random generator for Pareto distribution. rpareto=function(n,mu,c){ ## BEGIN CHECKING ARGUMENTS VALIDITY if(any(mu<=0))stop("mu must be positive") if(any(c<=0))stop("c must be positive") ## END CHECKING u <- runif(n) res <- c*(1/((1-u)^(1/(mu)))) return(res) } #### Laplace distributions, related functions #### dlaplace <- function(x, mu, b, log = FALSE){ ## BEGIN CHECKING ARGUMENTS VALIDITY if(!is.numeric(mu)) {stop("mu must be positive")} else{} if(any(b<=0)) {stop("c must be positive")} else{} ## END CHECKING tmp <- exp( - abs(x-mu)/b) /(2*b) if (log) {tmp<-log(tmp)} else{} return(tmp) } plaplace <- function(q,mu,b, lower.tail = TRUE, log.p = FALSE){ ## BEGIN CHECKING ARGUMENTS VALIDITY if(!is.numeric(mu)) {stop("mu must be positive")} else{} if(any(b<=0)) {stop("c must be positive")} else{} ## END CHECKING cd <- (q < mu) res <- (!cd) + (-1)^(!cd)*exp((-1)^(!cd)*(q-mu)/b)/2 if(!lower.tail) {res <- 1-res} else{} if (log.p) {res <- log(res)} else{} return(res) } qlaplace <- function(p,mu,b, lower.tail = TRUE, log.p = FALSE){ ## BEGIN CHECKING ARGUMENTS VALIDITY if(!is.numeric(mu)) {stop("mu must be positive")} else{} if(any(b<=0)) {stop("c must be positive")} else{} ## END CHECKING cd <- (p > 1/2) res <- mu + (-1)^(cd)*b*log(2*(cd + (-1)^(cd)*p)) if(!lower.tail) {res <- mu - res} else {} if (log.p) {res <- log(res)} else{} return(res) } rlaplace=function(n,mu,b){ ## BEGIN CHECKING ARGUMENTS VALIDITY if(!is.numeric(mu)) {stop("mu must be positive")} else{} if(any(b<=0)) {stop("c must be positive")} else{} ## END CHECKING u <- runif(n) res <- qlaplace(p = u, mu, b) return(res) } #### Vasicek estimator of Shannon differential entropy #### ## entropy.estimate (function) : computes Vasicek estimate of differential Shannon entropy from a numeric sample ## Arguments: ## x : a numeric vector ## m : a numeric value specifying the window size; see details. ## Value: vasicek.estimate returns a numeric value being Vasicek estimate, with window size m of the differential entropy of the sample x. entropy.estimate <- function(x,window){ if (missing(x)) stop('Argument \"x\" is missing, with no default value.') else {} if (!is.vector(x) | !is.numeric(x)) stop('Argument \"x\" must be a numeric vector') else{} if (missing(window)) stop('Argument \"window\" is missing, with no default value.') else {} if (!is.numeric(window) | !(length(window)==1)) stop('window must be postive and smaller than half of the sample length.') else{} window <- ceiling(window) # If a numeric value is given, it is tranformed into an integer. if (window <0 | window>length(x)/2) stop('the window must be postive and smaller than half of the sample length') else{} n <- length(x) #sample length TIES=FALSE if ((length(unique(x)) < n)){ warning('Ties hinder Vasicek-Song test') TIES=TRUE } xord <- sort(x) #ordered statistics m <- window #Vasicek's estimator A <- numeric(n) if (!TIES){ A[1:m] <- log(n*(xord[1:m+m]-xord[1])/(2*m)) A[(m+1):(n-m-1)] <- log(n*(xord[(m+1):(n-m-1)+m]-xord[(m+1):(n-m-1)-m])/(2*m)) A[(n-m):n] <- log(n*(xord[n]-xord[(n-m):n-m])/(2*m)) V <- mean(A) } else{ NTmax=max(table(x)) if(m < NTmax) #if bound is too small and there are too many ties, impossible to compute Vasicek estimate. {stop('Too many ties for computing Vasicek estimate.') } else{ A[1:m] <- log(n*(xord[1:m+m]-xord[1])/(2*m)) A[(m+1):(n-m-1)] <- log(n*(xord[(m+1):(n-m-1)+m]-xord[(m+1):(n-m-1)-m])/(2*m)) A[(n-m):n] <- log(n*(xord[n]-xord[(n-m):n-m])/(2*m)) V <- mean(A) } } return(V) } #### Vasicek-Song GOF tests #### ## vs.test (function) : performs Vasicek-Song test vs.test <- function(x, densfun, param = NULL, simulate.p.value = NULL, B = 5000, delta = NULL, extend = FALSE, relax = FALSE){ ### CHECKING FOR ARGUMENT VALIDITY #densfun if(missing(densfun) | !is.character(densfun)) stop("'densfun' must be supplied as a function or name") if (!(densfun %in% c("dunif","dnorm", "dlnorm", "dexp", "dgamma", "dweibull", "dpareto","df","dlaplace","dbeta"))) {stop("Unsupported distribution. Please, report to help file of function \"vasicek.test\" to get the list of available distributions.")} else{} #x if (missing(x)) stop('Argument \"x\" is missing, with no default value.') else{} if (!is.vector(x) | !is.numeric(x)) stop('Argument \"x\" must be a numeric vector') else{} n <- length(x) # Sample size Cond <- switch(densfun, "dunif"=FALSE, "dlnorm"=(any(x<0)), "dnorm"=FALSE, "dexp"=(any(x<0)), "dgamma"=(any(x<0)), "dweibull"=(any(x<0)), "dpareto"=FALSE, "df"=(any(x<0)), "dlaplace"=FALSE, "dbeta"=((any(x<=0))|(any(x>=1))) ) if (Cond) stop("\"x\": invalid values (not compatible with the specified distribution)") #param if (!is.null(param) & !is.numeric(param)) {stop(paste("\"param\" must be a numeric vector specifying parameters for", densfun, sep=' '))} if (is.null(param)) { ESTIM <- suppressWarnings( tryCatch(MLE.param(x, densfun), error=function(err){stop('Unable to compute MLE for this model; see help file of fitdistrplus::fitdist for details.\n')} )) METHOD <- 'family' } else{ESTIM <- param} if(is.numeric(ESTIM)){ Cond <- switch(densfun, "dunif"=(length(ESTIM)!=2)|(ESTIM[2]<=ESTIM[1])|(any(x<ESTIM[1]))|(any(x>ESTIM[2])), "dnorm"=(length(ESTIM)!=2)|(ESTIM[2]<=0), "dlnorm"=(length(ESTIM)!=2)|(ESTIM[2]<=0), "dexp"=(length(ESTIM)!=1)|(ESTIM<=0), "dgamma"=(length(ESTIM)!=2)|(any(ESTIM<=0)), "dweibull"=(length(ESTIM)!=2)|(any(ESTIM<=0)), "dpareto"=(length(ESTIM)!=2)|(any(ESTIM<=0)), "df"=(length(ESTIM)!=3)|(any(ESTIM<=0)), "dlaplace"=(length(ESTIM)!=2)|(ESTIM[2]<=0), "dbeta"=(length(ESTIM)!=2)|(ESTIM[1]<=0)|(ESTIM[2]<=0) ) if (Cond) stop("\"param\": invalid parameter (not consistent with the specified distribution)") else {} } names(ESTIM) <- switch(densfun, 'dunif'=c('Min','Max'), 'dnorm'=c('Mean','St. dev.'), 'dlnorm'=c('Location','Scale'), 'dexp'='Rate', 'dgamma'=c('Shape', 'Rate'), 'dweibull'=c('Shape', 'Scale'), 'dpareto'=c('mu', 'c'), 'df'=c('df1','df2','ncp'), 'dlaplace'=c('Shape','Scale'), 'dbeta'=c('Shape1','Shape2') ) #simulate.p.value if (!is.null(simulate.p.value) & !is.logical(simulate.p.value)){ warning('Invalid for simulate.p.value. Reset to default: NULL.') simulate.p.value=NULL } if (is.null(simulate.p.value)){ if (n < 80) {simulate.p.value=TRUE} else {simulate.p.value=FALSE} } #B if (!is.numeric(B) | !(length(B)==1) | B<=0) {stop("B must be a positive integer")} else { #warning('B converted to a positive integer') B <- ceiling(B) } #Converts B into an integer #delta if (!is.null(delta) & (!(is.numeric(delta)) | !(length(delta)==1)) ) { warning('Argument delta must be a numeric value. Reset to default value.') delta <- NULL } if (is.numeric(delta)) { if (delta >= 1/3){ warning('Argument delta must be a numeric value. Reset to default value.') #Bug delta <- NULL } } #extend if (!is.logical(extend) | ! length(extend) == 1){ warning('Argument extend must be a logical value (TRUE or FALSE). Reset to default value.') extend <- FALSE } #relax if (!is.logical(relax) | ! length(relax) == 1){ warning('Argument relax must be a logical value (TRUE or FALSE). Reset to default value.') relax <- FALSE } ### END CHECKING DNAME <- deparse(substitute(x)) NVALUE <- switch(densfun, 'dunif'='the uniform distribution', 'dnorm'='the normal distribution', 'dlnorm'='the log-normal distribution', 'dexp'='the exponential distribution', 'dgamma'='the gamma distribution', 'dweibull'='the Weibull distribution', 'dpareto'='the Pareto distribution', 'df'='the Fisher distribution', 'dlaplace'='the Laplace distribution', 'dbeta'='the Beta distribution') names(NVALUE) <- 'Distribution under null hypothesis' resVE <- vs.estimate(x, densfun, ESTIM, extend, delta, relax) STAT <- -resVE$estimate - mean(log(likelihood(x, densfun, ESTIM))) names(STAT) <- "Test statistic" PARAM <- resVE$window names(PARAM) <- 'Optimal window' #p-value computed by Monte Carlo simulation or asymptotic normality if (simulate.p.value){ dist <- suppressWarnings(simulate.vs.dist(n, densfun, ESTIM, B, extend, delta, relax)) if (any(is.na(dist))) { warning(paste('For', sum(is.na(dist)), 'simulations (over', B, '), entropy estimate is greater than empirical maximum entropy for all window sizes.')) } PVAL <- mean(dist>STAT, na.rm = TRUE) if (is.nan(PVAL)) { #It happens when all components of dist are NA. stop('Unable to compute Monte-Carlo estimate of the p-value.') } } else { bias <- log(2*PARAM) - log(n) + digamma(n+1) -digamma(2*PARAM) + 2*PARAM/n * sum(1/(1:(2*PARAM-1))) - 2*PARAM*sum(1/(1:(PARAM-1)))/n - 2*sum(((PARAM-1):1)/ (PARAM:(2*PARAM-2)))/n PVAL <- 1 -pnorm( (6*PARAM*n)^(1/2)*(STAT-bias )) } names(PVAL) <- 'p-value' if (is.null(param)) { METHOD <- paste("Vasicek-Song GOF test for",NVALUE, sep=' ') structure(list(statistic=STAT, null.value=NVALUE, parameter=PARAM, estimate=ESTIM, p.value=PVAL, method=METHOD, data.name=DNAME, observed=x), class="htest") } else{ METHOD <- paste("Vasicek-Song GOF test for",NVALUE, paste('with ', myprint(names(ESTIM), param), sep=''), sep=' ') structure(list(statistic=STAT, null.value=NVALUE, parameter=PARAM, p.value=PVAL, method=METHOD, data.name=DNAME, observed=x), class="htest") } }
/scratch/gouwar.j/cran-all/cranData/vsgoftest/R/external.R
#### Heading #### ## File: internal.R ## Desc: internal functions of the package maxengoftest ## Date: 2017-12-21 ## R version: 3.4.3 ## Required packages: fitdistrplus, Rcpp ## Script designed with and optimized for RStudio #### Optimal estimation of differential Shannon entropy for a specified model #### ## likelihood (function): a wrapper for computing the likelihood of the sample, depending on the null distribution the test is intended to be applied to likelihood=function(x, densfun, param){ res <- switch(densfun, "dunif"=dunif(x,min=param[1],max=param[2]), "dnorm"=dnorm(x, mean=param[1], sd=param[2]), "dlnorm"=dlnorm(x,meanlog=param[1],sdlog=param[2]), "dexp"=dexp(x, rate=param[1]), "dgamma"=dgamma(x, shape=param[1], rate=param[2]), "dweibull"=dweibull(x, shape=param[1], scale=param[2]), "dpareto"=dpareto(x, param[1], param[2]), "df"=df(x,df1=param[1],df2=param[2],ncp=param[3]), "dlaplace"=dlaplace(x,mu=param[1],b=param[2]), "dbeta"=dbeta(x,shape1=param[1],shape2=param[2]) ) return(res) } ## vs.estimate (function): Vasicek estimate (with the optimal window) of differential Shannon entropy from a numeric sample. vs.estimate <- function(x,densfun = NULL, param, extend, delta, relax, suppress.error = FALSE){ n <- length(x) #sample length TIES=FALSE if ((length(unique(x)) < n)){ warning('Ties should not be present for Vasicek-Song test') TIES=TRUE } xord <- sort(x) #ordered statistics NTmax <- max(table(x)) #Max number of ties in the sample if (is.null(delta)) { delta <- switch(densfun, 'dunif'=1/12, 'dnorm'=1/12, 'dlnorm'=1/12, 'dexp'=1/12, 'dgamma'=2/15, 'dweibull'=2/15, 'dpareto'=1/12, 'df'=2/15, 'dlaplace'=1/12, 'dbeta'=2/15) } if (extend) {bound = floor(n/2)} else { bound <- min(max(1,floor(n^(1/3 - delta))), n/2) } if (!TIES){ V <- vestimates(xord,1,bound) } else{ if(bound <= NTmax -1) {Vopt <- -Inf mopt <- Inf stop('Too many ties to compute Vasicek estimate.') } else{ V <- vestimates(xord,NTmax,bound) } } if (relax) {#If relax = TRUE, the constraint Vmn < empirical entropy is avoided res <- V } else {#If relax = FALSE, the constraint Vmn < empirical entropy is active res <- V[V< -mean(log(likelihood(x, densfun, param)))] #Keep only estimates smaller than empirical log-likelihood } if (length(res)==0 & !suppress.error) { stop("The sample entropy is greater than empirical maximal entropy for all possible window sizes; the sample may be too small or is unlikely to be drawn from the null distribution.") } else{} if (length(res)==0 & suppress.error) { Vopt <- NA mopt <- NA } else{ Vopt <- max(res) #Final estimate for V mopt <- min(which(V==Vopt)) + NTmax - 1#optimal window size m } return(list(estimate=Vopt, window=mopt, ties=TIES)) } #### Monte-Carlo simulation of the distribution of Vasicek estimate under normality hypothesis #### ## simulate.vasicek.dist (function): simulate a sample of Vasicek test statistic for GOF to a given parametric distribution (for later use in Monte-Carlo methods). simulate.vs.dist <- function(n,densfun,param,B, extend, delta, relax){ tmp <- function(){ ech <- switch(densfun, "dunif"=runif(n,min=param[1],max=param[2]), "dnorm"=rnorm(n, mean=param[1], sd=param[2]), "dlnorm"=rlnorm(n,meanlog=param[1],sdlog=param[2]), "dexp"=rexp(n, rate=param), "dgamma"=rgamma(n, shape=param[1], rate=param[2]), "dweibull"=rweibull(n, shape=param[1], scale=param[2]), "dpareto"=rpareto(n,param[1],param[2]), "df"=rf(n,df1=param[1],df2=param[2],ncp=param[3]), "dlaplace"=rlaplace(n,mu=param[1],b=param[2]), "dbeta"=rbeta(n,shape1=param[1],shape2=param[2])) est <- - vs.estimate(ech, densfun, param, extend, delta, relax, suppress.error = TRUE)$estimate loglik <- mean(log(likelihood(ech, densfun, param))) res <- est -loglik } return(replicate(B, tmp())) } #### MLE of the parameters of the distribution #### ## MLE.fisher (function): returns maximum-likelihood estimates of the parameters for the Fisher distribution, with specified starting values. MLE.fisher <- function(x){ dens <- density(x) mode <- dens$x[which(dens$y==max(dens$y))] if((mean(x)>1) & (mode<(mean(x)/(2*mean(x)-1)))) { d1start <- -2/(((d2start+2)/d2start)*mode-1) d2start <- 2*mean(x)/(mean(x)-1) fitdist(x,"f",start=list(df1=d1start,df2=d2start,ncp=5)) } else {fitdist(x,"f",start=list(df1=1,df2=1,ncp=1),lower=c(0,0,0))} } ## MLE.param (function): returns maximum-likelihood estimates of the parameters for the specified distribution. MLE.param <- function(x, densfun){ n <- length(x) res <- switch(densfun, "dunif"=c(min(x),max(x)), "dnorm"=c(mean(x), sqrt((n-1)*var(x)/n)), "dlnorm"=c(mean(log(x)),sqrt((n-1)*var(log(x))/n)), "dexp"=1/mean(x), "dgamma"=suppressWarnings( tryCatch(fitdist(x,"gamma",lower=c(0.0001,0.0001))$estimate, error=function(err){cat('Unable to compute MLE of the sample for Gamma distribution.\n')} ) ), "dweibull"=suppressWarnings( tryCatch(fitdist(x,"weibull",lower=c(0.0001,0.0001))$estimate, error=function(err){cat('Unable to compute MLE of the sample for Weibull distribution.\n')} ) ), "dpareto"=c(1/log(exp(mean(log(x)))/min(x)), min(x)), "df"=MLE.fisher(x)$estimate, "dlaplace"=c(mean(x),mean(abs(x-mean(x)))), "dbeta"=suppressWarnings( tryCatch(fitdist(x,"beta",lower=c(0.0001,0.0001))$estimate, error=function(err){cat('Unable to compute MLE of the sample for Beta distribution.\n')})) ) return(res) } #### Miscellianous ### ## myprint (function): append some character strings to be printed when vs.test is executed myprint <- function(x,y) { n <- length(x) tmp <- character(n) for (i in 1:n){ tmp[i] <- paste(x[i], '=', y[i], sep='') } return(paste(tmp, collapse = ', ')) }
/scratch/gouwar.j/cran-all/cranData/vsgoftest/R/internal.R
#### header #### ## vsgoftest_performances.R ## Code chunks for generating outputs presented in Section "Performances of Vasicek-Song tests" of ## J Lequesne, P Regnault (2018). Package vsgoftest for R: goodness-of-fit tests based on Kullback-Leibler divergence. #### List of required packages #### library(vsgoftest) library(dbEmpLikeGOF) library(ggplot2) library(dplyr) library(knitr) library(microbenchmark) library(goftest) #### vsgoftest versus dbEmpLikeGOF #### ## Comparison of computation times ## # For normal distribution set.seed(1) sample <- rnorm(n = 50) bm <- microbenchmark(vs.test = vs.test(x = sample, densfun = 'dnorm', simulate.p.value = TRUE, B = 1000, delta = -1/6), dbEmpLikeGOF = dbEmpLikeGOF(x = sample, testcall = "normal", pvl.Table = FALSE, num.mc = 1000, vrb = FALSE), times = 100L) bm %>% rename(`function` = expr) %>% group_by(`function`) %>% summarize(min = min(time/10^6), `1stQ` = quantile(time/10^6,0.25), median = median(time/10^6), mean = mean(time/10^6), `3rdQ` = quantile(time/10^6,0.75), max = max(time/10^6), sd = sd(time/10^6)) %>% kable() # Violin plots for comparing computation times of vs.test and dbEmpLikeGOF # postscript(file = '../../papier/PKGFigCompTimeNorm.ps') bm %>% ggplot(mapping = aes(x = expr, y = time/10^6)) + geom_violin() + coord_flip() + stat_summary(fun = "median", geom = "errorbar", mapping = aes(ymax = ..y.., ymin = ..y..), linetype = "dashed", col = 'red', show.legend = TRUE) + labs(x = '', y = 'Computation time (ms)') + theme(text = element_text(size = 18)) # dev.off() # For uniform distribution set.seed(1) sample <- runif(n = 50, min = 1, max = 3) bm <- microbenchmark(vs.test = vs.test(x = sample, densfun = 'dunif', simulate.p.value = TRUE, B = 1000, delta = -1/6), dbEmpLikeGOF = dbEmpLikeGOF(x = sample, testcall = "uniform", pvl.Table = FALSE, num.mc = 1000, vrb = FALSE), times = 100L) bm %>% rename(`function` = expr) %>% group_by(`function`) %>% summarize(min = min(time/10^6), `1stQ` = quantile(time/10^6,0.25), median = median(time/10^6), mean = mean(time/10^6), `3rdQ` = quantile(time/10^6,0.75), max = max(time/10^6), sd = sd(time/10^6)) %>% kable() # Violin plots for comparing computation times of vs.test and dbEmpLikeGOF # postscript(file = '../../papier/PKGFigCompTimeUnif.ps') bm %>% ggplot(mapping = aes(x = expr, y = time/10^6)) + geom_violin() + coord_flip() + stat_summary(fun.y = "median", geom = "errorbar", mapping = aes(ymax = ..y.., ymin = ..y..), linetype = "dashed", col = 'red', show.legend = TRUE) + labs(x = '', y = 'Computation time (ms)') + theme(text = element_text(size = 18)) # dev.off() ## Power comparison when applied to heavy tailed samples ## ##With moderate sample size (n = 50) #For laplace samples tmp <- function(n=50) { samp <- rlaplace(n, mu=0, b= 1) pvs <- vs.test(x = samp, densfun = "dnorm")$p.value pelr <- dbEmpLikeGOF(x = samp, testcall = "normal", vrb = FALSE)$pvalue return(c(VS = pvs, ELR = pelr)) } set.seed(seed = 3) res <- replicate(n = 1000, expr = tmp(50)) apply(res < 0.05, 1, mean) #For student samples tmp2 <- function(n = 50) { samp <- rt(n, df = 4, ncp = 0) pvs <- vs.test(x = samp, densfun = "dnorm")$p.value pelr <- dbEmpLikeGOF(x = samp, testcall = "normal", vrb = FALSE)$pvalue return(c(VS = pvs, ELR = pelr)) } set.seed(seed = 4) res2 <- replicate(n = 1000, expr = tmp2(50)) apply(res2 < 0.05, 1, mean) ##With large samples (n = 200) #Laplace set.seed(seed = 5) res3 <- replicate(n = 1000, expr = tmp(200)) apply(res3 < 0.05, 1, mean) #Student set.seed(seed = 6) res4 <- replicate(n = 1000, expr = tmp2(200)) apply(res4 < 0.05, 1, mean) #### Power comparisons #### lstn <- c(20, 30, 50, 100) #List of samples sizes N <- 10000 #Number of replicates for MC simulations ## Pareto against log-normal # auxiliary function for simulating a sample, applying GOF tests and gathering their pvalues pop <- function(n, m = 0, s) { ech <- 1 + rlnorm(n, meanlog = m, sdlog = s) pvs <- vs.test(x = ech, densfun = 'dpareto', param = c(1/s, 1), B = 1000)$p.value pks <- ks.test(x = ech, y = 'ppareto', mu = 1/s, c = 1)$p.value pad <- ad.test(x = ech, null = 'ppareto', mu = 1/s, c = 1)$p.value pcvm <- cvm.test(x = ech, null = 'ppareto', mu = 1/s, c = 1)$p.value return(c(pvs, pks, pad, pcvm)) } #Pareto with c = 1, mu = 1 against shifted log-normal with meanlog = 0, sdlog = 1 mu <- 1 powers1 <- matrix(0, nrow = 4, ncol = length(lstn)) set.seed(54) for (i in seq_along(lstn)) { res.pow <- replicate(n = N, expr = pop(lstn[i], 0, 1/mu)) powers1[i,] <- apply(res.pow < 0.05, 1, mean) } #Pareto with c = 1, mu = 0.8 against shifted log-normal with meanlog = 0, sdlog = 1.25 mu <- 4/5 powers2 <- matrix(0, nrow = 4, ncol = length(lstn)) set.seed(32) for (i in seq_along(lstn)) { res.pow <- replicate(n = N, expr = pop(lstn[i], 0, 1/mu)) powers2[i,] <- apply(res.pow < 0.05, 1, mean) } ## Exponential vs Weibull #Auxialiary function pop <- function(n, sh) { ech <- rweibull(n, shape = sh, scale = 1) pvs <- vs.test(x = ech, densfun = 'dexp', param = 1, B = 1000)$p.value pks <- ks.test(x = ech, y = 'pexp', rate = 1)$p.value pad <- ad.test(x = ech, null = 'pexp', rate = 1)$p.value pcvm <- cvm.test(x = ech, null = 'pexp', rate = 1)$p.value return(c(pvs, pks, pad, pcvm)) } # Exp 1/2 vs Weibull(1.2, 2) powers3 <- matrix(0, nrow = 4, ncol = length(lstn)) set.seed(12) for (i in seq_along(lstn)) { res.pow <- replicate(n = N, expr = pop(lstn[i], 1.2)) powers3[i,] <- apply(res.pow < 0.05, 1, mean) } # Exp 1/2 vs Weibull(1.3, 2) powers4 <- matrix(0, nrow = 4, ncol = length(lstn)) set.seed(23) for (i in seq_along(lstn)) { res.pow <- replicate(n = N, expr = pop(lstn[i], 1.3)) powers4[i,] <- apply(res.pow < 0.05, 1, mean) }
/scratch/gouwar.j/cran-all/cranData/vsgoftest/inst/doc/vsgoftest_performances.R
## ----echo = FALSE------------------------------------------------------------- knitr::opts_chunk$set(comment = '') ## ----eval = FALSE------------------------------------------------------------- # install.packages('vsgoftest') ## ----echo = TRUE, eval = FALSE------------------------------------------------ # #Package devtools must be installed # devtools::install_github('pregnault/vsgoftest') ## ----------------------------------------------------------------------------- library('vsgoftest') set.seed(2) #set seed of PRNG samp <- rnorm(n = 100, mean = 0, sd = 1) #sampling from normal distribution entropy.estimate(x = samp, window = 8) #estimating entropy with window = 8 log(2*pi*exp(1))/2 #the exact value of entropy ## ----------------------------------------------------------------------------- sapply(1:10, function(w) entropy.estimate(x = samp, window =w)) ## ----------------------------------------------------------------------------- n <- 100 #sample size V <- sapply(1:(n/2 - 1), function(w) entropy.estimate(x = samp, window =w)) which.max(V) #Choose window that maximizes entropy ## ----------------------------------------------------------------------------- set.seed(5) n <- 100 #Sample size samp <- rpareto(n, c = 1, mu = 2) #sampling from Pareto distribution entropy.estimate(x = samp, window = 3) -log(2) + 3/2 #Exact value of entropy ## ----------------------------------------------------------------------------- set.seed(5) samp <- rnorm(50,2,3) vs.test(x = samp, densfun = 'dlaplace') ## ----------------------------------------------------------------------------- set.seed(4) vs.test(x = samp, densfun = 'dnorm') ## ----------------------------------------------------------------------------- set.seed(26) vs.test(x = samp, densfun = 'dnorm', param = c(2,3)) ## ----echo = TRUE, eval = TRUE, error = TRUE----------------------------------- set.seed(2) samp <- rnorm(50, -2, 1) vs.test(samp, densfun = 'dnorm', param = -2) ## ----------------------------------------------------------------------------- set.seed(1) samp <- rweibull(200, shape = 1.05, scale = 1) vs.test(samp, densfun = 'dexp') ## ----------------------------------------------------------------------------- set.seed(2) vs.test(samp, densfun = 'dexp', simulate.p.value = TRUE, B = 10000) ## ----------------------------------------------------------------------------- set.seed(63) vs.test(samp, densfun = 'dexp', delta = 5/30) ## ----------------------------------------------------------------------------- set.seed(8) samp <- rexp(30, rate = 3) vs.test(x = samp, densfun = "dlnorm") ## ----------------------------------------------------------------------------- vs.test(x = samp, densfun = "dlnorm", extend = TRUE) ## ----echo = TRUE, eval = TRUE, error = TRUE----------------------------------- samp <- c(samp, rep(4,3)) #add ties in the previous sample vs.test(x = samp, densfun = "dexp") ## ----------------------------------------------------------------------------- vs.test(x = samp, densfun = "dexp", extend = TRUE) ## ----echo = TRUE, eval = TRUE, error = TRUE----------------------------------- set.seed(84) ech <- rpareto(20, mu = 1/2, c = 1) vs.test(x = ech, densfun = 'dpareto', param = c(1/2, 1)) ## ----------------------------------------------------------------------------- data(contaminants) set.seed(1) vs.test(x = aluminium2, densfun = 'dpareto') ## ----echo = FALSE------------------------------------------------------------- knitr::opts_chunk$set(warning = FALSE) ## ----------------------------------------------------------------------------- set.seed(1) vs.test(x = aluminium1, densfun = 'dlnorm') ## ----------------------------------------------------------------------------- set.seed(1) vs.test(x = aluminium2, densfun = 'dlnorm') ## ----------------------------------------------------------------------------- set.seed(1) vs.test(x = toluene, densfun = 'dlnorm', extend = TRUE, relax = TRUE) ## ----------------------------------------------------------------------------- set.seed(1) vs.test(x = log(toluene), densfun ='dnorm', extend = TRUE) ## ----------------------------------------------------------------------------- set.seed(1) vs.test(x = aluminium2, densfun = 'dpareto') ## ----------------------------------------------------------------------------- #Compute the MLE of parameters of Pareto dist. res.test <- vs.test(x = toluene, densfun = 'dpareto', extend = TRUE, relax = TRUE) #Test uniformity of transformed data set.seed(5) vs.test(x = ppareto(toluene, mu = res.test$estimate[1], c = res.test$estimate[2]), densfun ='dunif', param = c(0,1), extend = TRUE)
/scratch/gouwar.j/cran-all/cranData/vsgoftest/inst/doc/vsgoftest_tutorial.R
#' Get left singular vectors in a tibble #' #' @param fa A [vsp_fa()] object. #' @param factors The specific columns to index into. The most reliable #' option here is to index with an integer vector of column indices, #' but you could also use a character vector if columns have been named. #' By default returns all factors/singular vectors. #' #' @return A [tibble::tibble()] with one row for each node, and one column #' containing each of the requested factor or singular vector, plus #' an additional `id` column. #' #' @export #' get_svd_u <- function(fa, factors = 1:fa$rank) { as_tibble(as.matrix(fa$u[, factors, drop = FALSE]), rownames = "id") } #' @export #' @describeIn get_svd_u Get right singular vectors in a tibble get_svd_v <- function(fa, factors = 1:fa$rank) { as_tibble(as.matrix(fa$v[, factors, drop = FALSE]), rownames = "id") } #' @export #' @describeIn get_svd_u Get varimax Y factors in a tibble get_varimax_z <- function(fa, factors = 1:fa$rank) { as_tibble(as.matrix(fa$Z[, factors, drop = FALSE]), rownames = "id") } #' @export #' @describeIn get_svd_u Get varimax Z factors in a tibble get_varimax_y <- function(fa, factors = 1:fa$rank) { as_tibble(as.matrix(fa$Y[, factors, drop = FALSE]), rownames = "id") } #' Get most important hubs for each Z factor #' #' @param hubs_per_factor The number of important nodes to get per #' latent factor. Defaults to `10`. #' #' @inheritParams get_svd_u #' #' @return A [tibble::tibble()] where each row corresponds to a single #' hub, and three columns: #' #' - `id`: Node id of hub node #' - `factor`: Which factor that node is a hub for. Nodes can be hubs #' of multiple factors. #' - `loading`: The actual value of the hubs factor loading for that factor. #' #' @export #' get_z_hubs <- function(fa, hubs_per_factor = 10, factors = 1:fa$rank) { stop_if_not_installed("dplyr") stop_if_not_installed("tidyr") fa %>% get_varimax_z(factors) %>% tidyr::gather(factor, loading, dplyr::contains("z"), -id) %>% dplyr::group_by(factor) %>% dplyr::slice_max(order_by = abs(loading), n = hubs_per_factor, with_ties = FALSE) } #' @export #' @describeIn get_z_hubs Get most important hubs for each Y factor get_y_hubs <- function(fa, hubs_per_factor = 10, factors = 1:fa$rank) { stop_if_not_installed("dplyr") stop_if_not_installed("tidyr") fa %>% get_varimax_y() %>% tidyr::gather(factor, loading, dplyr::contains("y"), -id) %>% dplyr::group_by(factor) %>% dplyr::slice_max(order_by = abs(loading), n = hubs_per_factor, with_ties = FALSE) } #' Add Z factor loadings to node table of tidygraph #' #' @param graph A [tidygraph::tbl_graph] object. #' @param fa Optionally, a [vsp] object to extract varimax loadings from. If #' you do not passed a [vsp] object, one will be created. #' @inheritDotParams vsp #' #' @return The same `graph` object with columns `factor1`, ..., `factor{rank}` #' in the table of node information. #' #' @export bind_varimax_z <- function(graph, fa, ...) { stopifnot(inherits(graph, "tbl_graph")) graph <- graph %>% activate(nodes) %>% mutate(!!!get_varimax_z(fa)) graph } #' @export #' @describeIn bind_varimax_z Add Y factor loadings to node table of tidygraph bind_varimax_y <- function(graph, fa, ...) { stopifnot(inherits(graph, "tbl_graph")) graph <- graph %>% activate(nodes) %>% mutate(!!!get_varimax_y(fa)) graph } #' @export #' @describeIn bind_varimax_z Add left singular vectors to node table of tidygraph bind_svd_u <- function(graph, fa, ...) { stopifnot(inherits(graph, "tbl_graph")) graph <- graph %>% activate(nodes) %>% mutate(!!!get_svd_u(fa)) graph } #' @export #' @describeIn bind_varimax_z Add right singular vectors to node table of tidygraph bind_svd_v <- function(graph, fa, ...) { stopifnot(inherits(graph, "tbl_graph")) graph <- graph %>% activate(nodes) %>% mutate(!!!get_svd_v(fa)) graph }
/scratch/gouwar.j/cran-all/cranData/vsp/R/accessors.R
#' Find features most associated with cluster membership #' #' @param loadings An `n` by `k` matrix of weights that indicates how #' important that ith user is to the jth cluster, i.e., the `Z` or `Y` #' matrix calculated by [vsp()]. #' #' @param features An `n` by `d` matrix of features measured for each #' node in the network. # #' @param num_best An integer indicating how many of the top features #' for differentiating between loadings you want. #' #' @return An `n` by `k` matrix whose `[i, j]` entry is the #' ith "most important" feature for cluster j. #' #' @details See `vignette("bff")`. #' #' @export bff <- function(loadings, features, num_best) { l1_normalize <- function(x) x / sum(x) # Fan has this line in his code but I don't understand why? loadings[loadings < 0] <- 0 k <- ncol(loadings) best_feat <- matrix("", ncol = k, nrow = num_best) ## normalize the cluster to a probability distribution (select one member at random) nc <- apply(loadings, 2, l1_normalize) nOutC <- apply(loadings == 0, 2, l1_normalize) inCluster <- sqrt(crossprod(features, nc)) outCluster <- sqrt(crossprod(features, nOutC)) # diff is d x K, element [i,j] indicates importance of i-th feature to j-th block # contrast: sqrt(A) - sqrt(B), variance stabilization diff <- inCluster - outCluster diff %>% as.matrix() %>% as_tibble(rownames = "word") %>% pivot_longer( -word, names_to = "factor", values_to = "importance" ) %>% group_by(factor) %>% top_n(num_best, importance) %>% arrange(factor, desc(importance)) %>% mutate( rank = row_number() ) %>% ungroup() %>% pivot_wider( id_cols = factor, names_from = rank, names_prefix = "word", values_from = word ) }
/scratch/gouwar.j/cran-all/cranData/vsp/R/bff.R
#' Create a vintage sparse factor analysis object #' #' `vsp_fa` objects are a subclass of [LRMF3::fa_like()], with additional #' fields `u`, `d`, `v`, `transformers`, `R_U`, and `R_V` #' #' @inheritParams LRMF3::fa_like #' #' @param u A [matrix()] of "left singular-ish" vectors. #' #' @param d A [numeric()] vector of "singular-ish" values. #' #' @param v A [matrix()] of "right singular-ish" vectors. #' #' @param transformers A list of transformatioms from the [invertiforms] #' package. #' #' @param R_U Varimax rotation matrix use to transform `u` into `Z`. #' @param R_V Varimax rotation matrix use to transform `v` into `Y`. #' @param rownames Identifying names for each row of the original #' data. Defaults to `NULL`, in which cases each row is given a #' row number left-padded with zeros as a name. #' @param colnames Identifying names for each column of the original #' data. Defaults to `NULL`, in which cases each column is given a #' row column left-padded with zeros as a name. #' #' @return A `svd_fa` object. #' vsp_fa <- function( u, d, v, Z, B, Y, transformers, R_U, R_V, rownames = NULL, colnames = NULL) { fa <- new_vsp_fa( Z = as.matrix(Z), B = as.matrix(B), Y = as.matrix(Y), u = as.matrix(u), d = d, v = as.matrix(v), transformers = transformers, R_U = as.matrix(R_U), R_V = as.matrix(R_V) ) if (is.null(rownames)) { rownames <- paste0("row", left_padded_sequence(1:nrow(fa$u))) } if (is.null(colnames)) { colnames <- paste0("col", left_padded_sequence(1:nrow(fa$v))) } rownames(fa$Z) <- rownames rownames(fa$u) <- rownames colnames(fa$Z) <- paste0("z", left_padded_sequence(1:fa$rank)) colnames(fa$u) <- paste0("u", left_padded_sequence(1:fa$rank)) rownames(fa$B) <- paste0("z", left_padded_sequence(1:fa$rank)) colnames(fa$B) <- paste0("y", left_padded_sequence(1:fa$rank)) rownames(fa$Y) <- colnames rownames(fa$v) <- colnames colnames(fa$Y) <- paste0("y", left_padded_sequence(1:fa$rank)) colnames(fa$v) <- paste0("v", left_padded_sequence(1:fa$rank)) fa } #' Give the dimensions of Z factors informative names #' #' @param fa A [vsp_fa()] object. #' @param names Describe new names for Z/Y factors. #' #' @return A new [vsp_fa()] object, but the columns names of `Z` and the #' row names of `B` have been set to `names` (for `set_z_factor_names`), #' and the column names of `B` and the column names of `Y` have been #' set to `names` (for `set_y_factor_names`). #' #' @export set_z_factor_names <- function(fa, names) { if (length(names) != fa$rank) { stop( glue( "Incorrect number of Z factor names. Got {length(names)} but needed {fa$rank}." ), call. = FALSE ) } colnames(fa$Z) <- names rownames(fa$B) <- names fa } #' @export #' @describeIn set_z_factor_names Give the dimensions of Y factors informative names set_y_factor_names <- function(fa, names) { if (length(names) != fa$rank) { stop( glue( "Incorrect number of Y factor names. Got {length(names)} but needed {fa$rank}." ), call. = FALSE ) } colnames(fa$B) <- names colnames(fa$Y) <- names fa } new_vsp_fa <- function(u, d, v, Z, B, Y, transformers, R_U, R_V) { fa_like( Z = Z, B = B, Y = Y, subclasses = "vsp_fa", u = u, d = d, v = v, transformers = transformers, R_U = R_U, R_V = R_V ) } #' @importFrom LRMF3 dim_and_class #' @method print vsp_fa #' @export print.vsp_fa <- function(x, ...) { cat("Vintage Sparse PCA Factor Analysis\n\n") cat(glue("Rows (n): {nrow(x$u)}"), sep = "\n") cat(glue("Cols (d): {nrow(x$v)}"), sep = "\n") cat(glue("Factors (rank): {x$rank}"), sep = "\n") cat(glue("Lambda[rank]: {round(x$d[x$rank], 4)}"), sep = "\n") # cat("\nPre-Processing Options (TODO) \n\n") cat("Components\n\n") # get the class printing to line up cat("Z:", dim_and_class(x$Z), "\n") cat("B:", dim_and_class(x$B), "\n") cat("Y:", dim_and_class(x$Y), "\n") cat("u:", dim_and_class(x$u), "\n") cat("d:", dim_and_class(x$d), "\n") cat("v:", dim_and_class(x$v), "\n\n") }
/scratch/gouwar.j/cran-all/cranData/vsp/R/object.R
#' Create a pairs plot of select Y factors #' #' To avoid overplotting, plots data for a maximum of 1000 nodes. If there #' are more than 1000 nodes, samples 1000 nodes randomly proportional to #' row norms (i.e. nodes with embeddings larger in magniture are more likely #' to be sampled). #' #' @inheritParams get_svd_u #' @inheritDotParams GGally::ggpairs #' #' @import ggplot2 #' #' @return A [ggplot2::ggplot()] plot or [GGally::ggpairs()] plot. #' #' @export #' plot_varimax_z_pairs <- function(fa, factors = 1:min(5, fa$rank), ...) { stop_if_not_installed("dplyr") stop_if_not_installed("GGally") stop_if_not_installed("purrr") fa %>% get_varimax_z(factors) %>% dplyr::select(-id) %>% dplyr::mutate( leverage = purrr::pmap_dbl(., sum) ) %>% dplyr::sample_n(min(nrow(.), 1000), weight = leverage^2) %>% dplyr::select(-leverage) %>% GGally::ggpairs(ggplot2::aes(alpha = 0.001), ...) + ggplot2::theme_minimal() } #' @describeIn plot_varimax_z_pairs Create a pairs plot of select Z factors #' @export plot_varimax_y_pairs <- function(fa, factors = 1:min(5, fa$rank), ...) { stop_if_not_installed("dplyr") stop_if_not_installed("GGally") stop_if_not_installed("purrr") fa %>% get_varimax_y(factors) %>% dplyr::select(-id) %>% dplyr::mutate( leverage = purrr::pmap_dbl(., sum) ) %>% dplyr::sample_n(min(nrow(.), 1000), weight = leverage^2) %>% dplyr::select(-leverage) %>% GGally::ggpairs(ggplot2::aes(alpha = 0.001), ...) + ggplot2::theme_minimal() } #' @describeIn plot_varimax_z_pairs Create a pairs plot of select left singular vectors #' @export plot_svd_u <- function(fa, factors = 1:min(5, fa$rank)) { stop_if_not_installed("dplyr") stop_if_not_installed("ggplot2") stop_if_not_installed("tidyr") fa %>% get_svd_u(factors) %>% dplyr::select(-id) %>% dplyr::mutate( leverage = purrr::pmap_dbl(., sum) ) %>% dplyr::sample_n(min(nrow(.), 1000), weight = leverage^2) %>% dplyr::mutate(node = row_number()) %>% tidyr::gather(eigen, value, -node) %>% ggplot2::ggplot(ggplot2::aes(node, value)) + ggplot2::geom_line() + ggplot2::facet_wrap(~eigen) + ggplot2::theme_minimal() + ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) } #' @describeIn plot_varimax_z_pairs Create a pairs plot of select right singular vectors #' @export plot_svd_v <- function(fa, factors = 1:min(5, fa$rank)) { stop_if_not_installed("dplyr") stop_if_not_installed("scales") stop_if_not_installed("tidyr") fa %>% get_svd_v(factors) %>% dplyr::select(-id) %>% dplyr::mutate( leverage = purrr::pmap_dbl(., sum) ) %>% dplyr::sample_n(min(nrow(.), 1000), weight = leverage^2) %>% dplyr::mutate(node = row_number()) %>% tidyr::gather(eigen, value, -node) %>% ggplot2::ggplot(ggplot2::aes(node, value)) + ggplot2::geom_line() + ggplot2::facet_wrap(~eigen) + ggplot2::theme_minimal() + ggplot2::scale_x_continuous(breaks = scales::pretty_breaks()) } #' Create a screeplot from a factor analysis object #' #' @param x A [vsp_fa()] object. #' @inherit get_svd_u return #' @param ... Ignored, included only for consistency with S3 generic. #' #' @method screeplot vsp_fa #' @export #' @import ggplot2 #' @importFrom stats screeplot screeplot.vsp_fa <- function(x, ...) { ggplot2::ggplot(data = NULL, ggplot2::aes(1:x$rank, x$d)) + ggplot2::geom_point() + ggplot2::labs( title = "Screeplot of graph spectrum", x = "Index", y = "Singular value" ) + ggplot2::expand_limits(x = 1, y = 0) + ggplot2::theme_minimal() } #' Plot the mixing matrix B #' #' @inherit get_svd_u params return #' #' @export plot_mixing_matrix <- function(fa) { as_tibble(as.matrix(fa$B), rownames = "row") %>% tidyr::gather(col, value, -row) %>% ggplot2::ggplot(ggplot2::aes(x = col, y = row, fill = value)) + ggplot2::geom_tile() + ggplot2::scale_fill_gradient2() + ggplot2::theme_minimal() } #' Plot pairs of inverse participation ratios for singular vectors #' #' When IPR for a given singular vector is O(1) rather than O(1 / sqrt(n)), #' this can indicate that the singular vector is localizing on a small #' subset of nodes. Oftentimes this localization indicates overfitting. #' If you see IPR values that are not close to zero (where "close to zero" #' is something you sort of have to pick up over time), then you need #' to some further investigation to see if you have localization and that #' localization corresponds to overfitting. Note, however, that not all #' localization is overfitting. #' #' @inherit get_svd_u params return #' #' @export plot_ipr_pairs <- function(fa) { ipr <- function(x) sum(x^4) ipr_u <- apply(fa$u, 2, ipr) ipr_v <- apply(fa$v, 2, ipr) ggplot2::ggplot(data = NULL) + ggplot2::aes(x = ipr_u, y = ipr_v) + ggplot2::geom_point(alpha = 0.5) + ggplot2::expand_limits(x = 0, y = 0) + ggplot2::labs( title = "Inverse participation ratios of singular vectors", x = "U (left singular vectors)", y = "V (right singular vectors)" ) + ggplot2::theme_minimal() }
/scratch/gouwar.j/cran-all/cranData/vsp/R/plots.R
#' Pipe operator #' #' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. #' #' @name %>% #' @rdname pipe #' @keywords internal #' @export #' @importFrom magrittr %>% #' @usage lhs \%>\% rhs NULL left_padded_sequence <- function(x) { original <- withr::with_options( c(scipen = 999), as.character(x) ) max_digits <- max(vapply(original, nchar, integer(1))) formatC(x, width = max_digits, format = "d", flag = "0") } # return +1 when skew positive, -1 when skew negative skew_sign <- function(x) { sign(sum((x - mean(x))^3)) } #' Make factors columnwise skew positive #' #' Given a factor analysis like object, flip #' signs so that columns of `Z` and `Y` are #' skew positive. Note that this also causes #' corresponding sign flips in `B`. This #' helps with interpretability of factors. #' #' @param fa A [fa_like()] object. #' #' @return A new [fa_like()] object where the columns #' of `Z` and `Y` has positive skew, that is otherwise #' equivalent to the original object. #' #' @export #' @keywords internal make_skew_positive <- function(fa) { if (!inherits(fa, "fa_like")) stop("`make_skew_positive` is only intended for `fa_like` objects.") Z_column_skew_signs <- apply(fa$Z, 2, skew_sign) Y_column_skew_signs <- apply(fa$Y, 2, skew_sign) S_Z <- Diagonal(n = ncol(fa$Z), x = Z_column_skew_signs) S_Y <- Diagonal(n = ncol(fa$Y), x = Y_column_skew_signs) # note that S_Z and S_Y are their own inverses fa$Z <- fa$Z %*% S_Z fa$B <- S_Z %*% fa$B %*% S_Y fa$Y <- fa$Y %*% S_Y # update the rotation matrices so that we still have # Z = sqrt(n) * U %*% R_U, etc fa$R_U <- fa$R_U %*% S_Z fa$R_V <- fa$R_V %*% S_Y # in some cases (i.e. columns of Y or Z are constant) the skew # is zero stopifnot(all(apply(fa$Z, 2, skew_sign) >= 0)) stopifnot(all(apply(fa$Y, 2, skew_sign) >= 0)) fa } stop_if_not_installed <- function(package) { if (!requireNamespace(package, quietly = TRUE)) { stop(glue("Must install {package} for this functionality.", call. = FALSE)) } } #' Safe L2 row normalization #' #' Helper function for Kaiser normalization to handle rows with zero (or #' numerically zero) norm, which results in a divide by zero error #' in the `stats::varimax()` implementation. #' #' @param x A matrix to row normalize. #' @param eps Tolerance to use when assessing if squared L2 row norm is #' numerically larger or smaller than zero. #' #' @keywords internal #' #' @return The row-rescaled matrix #' safe_row_l2_normalize <- function(x, eps = 1e-10) { sc <- drop(apply(x, 1L, function(y) sum(y^2))) sc[sc < eps] <- 1 x / sqrt(sc) } utils::globalVariables( c( ".", "activate", "arrange", "desc", "element", "gather", "group_by", "id", "importance", "leverage", "loading", "mutate", "node", "nodes", "pivot_longer", "pivot_wider", "row_number", "sample_n", "select", "top_n", "ungroup", "value", "word" ) )
/scratch/gouwar.j/cran-all/cranData/vsp/R/utils.R