content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Run a Linear Mixed Effects Model on all voxels of a NIfTI image within a mask and and return parametric coefficients tables
#'
#'
#' This function is able to run a Linear Mixed Effect Model using the lmer() function.
#' The analysis will run in all voxels in in the mask and will return the model fit for each voxel.
#' The function relies on lmerTest to create p-values using the Satterthwaite Approximation.
#'
#'
#' @param image Input image of type 'nifti' or vector of path(s) to images. If multiple paths, the script will all mergeNifti() and merge across time.
#' @param mask Input mask of type 'nifti' or path to mask. Must be a binary mask
#' @param fourdOut To be passed to mergeNifti, This is the path and file name without the suffix to save the fourd file. Default (NULL) means script won't write out 4D image.
#' @param formula Must be a formula passed to lmer()
#' @param subjData Dataframe containing all the covariates used for the analysis
#' @param mc.preschedule Argument to be passed to mclapply, whether or not to preschedule the jobs. More info in parallel::mclapply
#' @param ncores Number of cores to use
#' @param ... Additional arguments passed to lmer()
#'
#' @keywords internal
#' @return Return list of parametric and spline coefficients (include standard errors and p-values) fitted to each voxel over the masked images passed to function.
#' @export
#'
#'
#'
#' @examples
#'
#'
#' image <- oro.nifti::nifti(img = array(1:1600, dim =c(4,4,4,25)))
#' mask <- oro.nifti::nifti(img = array(c(rep(0,15),1), dim = c(4,4,4,1)))
#' set.seed(1)
#' covs <- data.frame(x = runif(25), id = rep(1:5,5))
#' fm1 <- "~ x + (1|id)"
#' models <- vlmerParam(image, mask, formula = fm1, subjData = covs, ncores = 1)
#'
vlmerParam <- function(image, mask , fourdOut = NULL, formula, subjData, mc.preschedule = TRUE, ncores = 1, ...) {
if (missing(image)) { stop("image is missing")}
if (missing(mask)) { stop("mask is missing")}
if (missing(formula)) { stop("formula is missing")}
if (missing(subjData)) { stop("subjData is missing")}
if (class(formula) != "character") { stop("formula class must be character")}
if (class(image) == "character" & length(image) == 1) {
image <- oro.nifti::readNIfTI(fname=image)
} else if (class(image) == "character" & length(image) > 1) {
image <- mergeNiftis(inputPaths = image, direction = "t", outfile = fourdOut)
}
if (class(mask) == "character" & length(mask) == 1) {
mask <- oro.nifti::readNIfTI(fname=mask)
}
imageMat <- ts2matrix(image, mask)
voxNames <- as.character(names(imageMat))
rm(image)
rm(mask)
gc()
print("Created time series to matrix")
m <- parallel::mclapply(voxNames,
FUN = listFormula, formula, mc.cores = ncores)
print("Created formula list")
timeIn <- proc.time()
imageMat <- cbind(imageMat, subjData)
print("Running test model")
foo <- base::do.call(lmerTest::lmer, list(formula = m[[1]], data=imageMat, ...))
print("Running parallel models")
model <- parallel::mclapply(m,
FUN = function(x, data, ...) {
foo <- base::do.call(lmerTest::lmer, list(formula = x, data=data, ...))
return(summary(foo)$coefficients)
}, data=imageMat, ..., mc.preschedule = mc.preschedule, mc.cores = ncores)
timeOut <- proc.time() - timeIn
print(timeOut[3])
print("Parallel Models Ran")
return(model)
}
|
/scratch/gouwar.j/cran-all/cranData/voxel/R/vlmerParam.R
|
#' Add noise / residual error to data
#'
#' @param x data
#' @param ruv list describing the magnitude of errors. List arguments: "proportional", "additive", "exponential".
#' @export
#' @examples
#' library(dplyr)
#' ipred <- c(10, 8, 6, 4, 2, 0) %>% add_noise(ruv = list(proportional = 0.1, additive = 0.2))
add_noise <- function(x, ruv = list(proportional = 0, additive = 0, exponential = 0)) {
if (is.null(ruv$proportional)) { ruv$proportional <- 0 }
if (is.null(ruv$additive)) { ruv$additive <- 0 }
if (is.null(ruv$exponential)) { ruv$exponential <- 0 }
x * (1 + rnorm(length(x), 0, ruv$proportional)) + rnorm(length(x), 0, ruv$additive) * exp(rnorm(length(x), 0, ruv$exponential))
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/add_noise.R
|
#' Add sim index number
#'
#' @description Add simulation index number to simulation when not present
#' @param sim a data.frame containing the simulation data
#' @param id character specifying the column name in the data.frame
#' @param sim_label label to indicate simulation index (if available)
add_sim_index_number <- function (sim, id = "id", sim_label = "sim") { # for multiple simulations in a single dataframe, add an index number for every simulation
if(!is.null(sim_label) && sim_label %in% colnames(sim)) { # Keep simulation index column if already present
return(sim[[sim_label]])
}
sim[[id]] <- as.num(sim[[id]])
sim_id <- cumsum(unlist(sapply(rle(sim[[id]])$lengths, FUN = function(w) seq(length(w), w, 1))) < 2)
sim$sim <- 1
for(i in unique(sim[[id]])){
sim$sim[sim[[id]]==i] <- cumsum(stats::ave(1:length(sim_id[sim[[id]]==i]),
sim_id[sim[[id]]==i], FUN = function(h) 1:length(h))<2)
}
return(sim$sim)
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/add_sim_index_number.R
|
#' Adds stratification to data set
#'
#' @param dat data.frame
#' @param strat vector of stratification variables
#' @param verbose verbosity (`TRUE` or `FALSE`)
add_stratification <- function (dat, strat, verbose = FALSE) {
if(is.null(strat)) {
dat$strat <- 1
} else {
if (all(strat %in% colnames(dat))) {
if(length(strat) == 1) {
dat$strat <- data.frame(dat)[,strat]
} else {
dat$strat <- ""
for(i in seq(strat)) {
if(i > 1) {
dat$strat <- paste0(dat$strat, ", ")
}
dat$strat <- paste0(dat$strat, data.frame(dat)[,strat[i]])
}
}
} else {
dat$strat <- 1
msg("Specified stratification column name not found, not performing stratification.", verbose)
}
}
if(class(dat$strat) != "factor") {
dat$strat <- as.factor(dat$strat)
}
return(dat)
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/add_stratification.R
|
#' Calculate appropriate bin separators for vpc
#'
#' This function calculates bin separators either using R's native binning
#' approaches available in the classInt library such as `kmeans`, `jenks`,
#' `pretty` etc. Alternatively, a custom approach is available which is based on
#' finding the nadirs in the density functions for the independent variable.
#' Default approach is k-means clustering.
#'
#' @param dat data frame
#' @param type auto-binning type: "density", "time", or "data"
#' @param n_bins number of bins to use; either a positive integer or "auto". For
#' "density" the function might not return a solution with the exact number of
#' bins.
#' @param verbose show warnings and other messages (TRUE or FALSE)
#' @param ... arguments passed on to underlying binning functions
#' @return A vector of bin separators
#' @export
auto_bin <- function(dat, type = "kmeans", n_bins = 8, verbose = FALSE, ...) {
UseMethod("auto_bin")
}
#' @rdname auto_bin
#' @export
auto_bin.numeric <- function(dat, type = "kmeans", n_bins = 8, verbose = FALSE, ...) {
all_bins <- list()
l_bins <- c()
if (is.null(type) || type == "none") {
msg("No binning performed.", verbose)
return(unique(dat))
}
# use R's native binning approaches?
if(!is.null(type) && type %in% c("jenks", "kmeans", "pretty", "quantile", "hclust", "sd", "bclust", "fisher")) {
suppressWarnings({
if(class(n_bins) != "numeric" | is.null(n_bins)) {
bins <- classInt::classIntervals(dat, style = type)
} else {
bins <- classInt::classIntervals(dat, n = n_bins, style = type)
}
})
return(bins$brks)
}
if (n_bins == "auto") {
if(type == "percentiles") n_bins <- min(max(3, ceiling(length(dat)/40)), 15)
else {
msg("Automatic optimization of bin number is not available for this binning method, reverting to 8 bins.", verbose)
n_bins <- 8
}
}
n_bins <- n_bins + 1 # bin_separators
if(type != "time" & type != "data" & type != "percentiles") {
if (type == "density" || type == "auto") {
bws <- diff(range(dat)) * seq(from=0.01, to = .25, by=0.01)
for (i in seq(bws)) {
d <- density(dat, bw=bws[i])
all_bins[[i]] <- c(0, d$x[find_nadirs(d$y)], max(dat)*1.01)
l_bins[i] <- length(all_bins[[i]])
}
return(all_bins[[order(abs(l_bins - n_bins))[1]]]) # return closest to requested bins
}
stop("Specified binning method not recognized!")
} else {
if (type == "time") {
tmp <- levels(cut(x = unique(dat), breaks = n_bins, right = TRUE))
tmp <- gsub("\\(", "", tmp)
tmp <- gsub("\\]", "", tmp)
tmp2 <- unlist(strsplit(tmp, ","))
sel <- 1:(length(tmp2)/2)*2 - 1
bins <- c(as.num(tmp2[sel]), max(dat)*1.001)
return(bins)
}
if (type == "data") {
sorted <- sort(dat)
tmp <- levels(cut(x = 1:length(sorted), breaks = n_bins, right = TRUE))
tmp <- gsub("\\(", "", tmp)
tmp <- gsub("\\]", "", tmp)
tmp2 <- unlist(strsplit(tmp, ","))
sel <- 1:(length(tmp2)/2)*2 - 1
idx <- as.num(tmp2[sel])
idx[idx < 0] <- 0
bins <- c(sorted[idx], max(dat)*1.001)
return(bins)
}
if (type == "percentiles") {
bins <- quantile(dat, probs = seq(0,1,length.out = n_bins))
return(bins)
}
}
stop(paste0("Binning method ", type, " not implemented yet!"))
}
#' @rdname auto_bin
#' @export
auto_bin.data.frame <- function(dat, type = "kmeans", n_bins = 8, verbose = FALSE, ...) {
auto_bin(dat=dat[["idv"]], type, n_bins, verbose, ...)
}
find_nadirs <- function (x, thresh = 0) {
pks <- which(diff(sign(diff(x, na.pad = FALSE)), na.pad = FALSE) > 0) + 2
if (!missing(thresh)) {
pks[x[pks - 1] - x[pks] > thresh]
}
else pks
}
#' Function to bin data based on a vector of bin separators, e.g. for use in VPC
#'
#' @param x data
#' @param bins numeric vector specifying bin separators
#' @param idv variable in the data specifies the independent variable (e.g. "time")
#' @param labeled whether a labeled factor instead of integers should be returned
#' @export
bin_data <- function(x, bins = c(0, 3, 5, 7), idv = "time", labeled = F) {
if(!labeled) {
x$bin <- cut(x[[idv]], bins, labels = FALSE, right=FALSE, include.lowest = TRUE)
} else {
x$bin <- cut(x[[idv]], bins, right=FALSE, include.lowest = TRUE)
}
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/binning.R
|
#' Check whether stratification columns are available
#'
#' @param data `data.frame` with observation or simulation data
#' @param stratify vector of stratification columns
#' @param type either `observation` or `simulation`
check_stratification_columns_available <- function(data, stratify, type = "observation") {
diffs <- setdiff(stratify, names(data))
if(length(diffs) >=1) {
stop(paste0("The following specified stratification columns were NOT found in ",type, " data: \n",
paste(diffs, collapse = ",")))
}
return(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/check_stratification_columns_available.R
|
#' Compute Kaplan-Meier statistics
#'
#' @param dat data.frame with events
#' @param strat vector of stratification variables
#' @param reverse_prob reverse the probability (i.e. return `1-probability`)?
#' @param rtte_conditional `TRUE` (default) or `FALSE`. Compute the probability for each event newly (`TRUE`), or calculate the absolute probability (`FALSE`, i.e. the "probability of a 1st, 2nd, 3rd event etc" rather than the "probability of an event happening").
#' @param ci confidence interval to calculate, numeric vector of length 2
compute_kaplan <- function(
dat,
strat = "strat",
reverse_prob = FALSE,
rtte_conditional = TRUE,
ci = NULL) {
if(length(dat[[strat]]) == 0) {
dat$strat <- 1
}
strats <- unique(dat[[strat]])
tmp <- c()
include_ci <- FALSE
if(!is.null(ci)) {
include_ci <- TRUE
if(length(ci) == 2) { # when specified as c(0.05, 0.95)
ci = diff(ci)
}
} else {
ci = 0.95
}
for (i in seq(strats)) {
if(rtte_conditional) {
tmp1 <- dat[dat[[strat]] == strats[i],]
} else {
tmp1 <- dat
tmp1[[strat]] <- as.num(tmp1[[strat]])
idx1 <- tmp1[[strat]] < as.num(strats[i])
if(sum(idx1) > 0) tmp1[idx1,]$dv <- 0
idx2 <- tmp1[[strat]] <= as.num(strats[i])
if(sum(idx2) > 0) tmp1 <- tmp1[idx2,]
}
if(length(grep( "rtte", strats[1])) > 0 & i > 1) {
for (j in 1:i) {
tmp_j <- dat[dat[[strat]] == strats[j] & dat$dv == 0,]
tmp1 <- rbind(tmp1, tmp_j)
}
}
km_fit <- survival::survfit(survival::Surv(time = time, dv != 0) ~ 1, data = tmp1, conf.int = ci)
km_dat <- data.frame(time = km_fit$time, surv = km_fit$surv, strat = strats[i])
if(include_ci) {
km_dat <- data.frame(cbind(km_dat, lower=km_fit$lower, upper=km_fit$upper))
}
if(reverse_prob) {
km_dat$surv <- 1 - km_dat$surv
if(!is.null(ci)) {
km_dat$upper <- 1 - km_dat$upper
km_dat$lower <- 1 - km_dat$lower
}
}
first_rec <- data.frame(time=0, surv=1-as.numeric(reverse_prob), strat = strats[i])
if(include_ci) {
first_rec <- data.frame(cbind(first_rec, lower=1-as.numeric(reverse_prob), upper=1-as.numeric(reverse_prob)))
}
tmp <- rbind(tmp, rbind(first_rec, km_dat))
}
tmp$qmed <- tmp$surv
return(tmp)
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/compute_kaplan.R
|
#' Compute KMMC statistics
#'
#' Kaplan-Meier Mean Covariate plots are a simulation-based diagnostic to study the influence of covariates and identify potential model misspecification.
#'
#' @param dat data.frame with events
#' @param strat vector of stratification variables
#' @param reverse_prob reverse the probability (i.e. return `1-probability`)?
#' @param kmmc variable to create the KMMC plot for.
compute_kmmc <- function(dat, strat = NULL, reverse_prob = FALSE, kmmc = "DOSE") {
# mostly kept variable names intact to maintain similarity to compute_kaplan
dat$kmmc <- dat[[kmmc]]
if (!is.null(strat)) {
strats <- unique(dat[[strat]])
tmp <- c()
for (i in seq(strats)) {
summid <- dat %>% dplyr::group_by_("id") %>% dplyr::mutate(covt = mean(kmmc))
t <- unique(summid$time)
km_fit <- data.frame(time = t[order(t)], surv=1)
for (j in seq(km_fit$time)) {
km_fit$surv[j] <- mean(summid[summid$time >= km_fit$time[j],]$covt)
}
if(reverse_prob) {
tmp <- rbind(tmp, data.frame(time = km_fit$time, surv = 1-km_fit$surv, strat = strats[i]))
} else {
tmp <- rbind(tmp, data.frame(time = km_fit$time, surv = km_fit$surv, strat = strats[i]))
}
}
return(tmp)
} else {
km_fit <- dat %>% dplyr::group_by(time) %>% dplyr::mutate(tmp = kmmc) %>% dplyr::summarise(surv=mean(tmp))
if(reverse_prob) {
data.frame(time = km_fit$time, surv = 1-km_fit$surv)
} else {
data.frame(time = km_fit$time, surv = km_fit$surv)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/compute_kmmc.R
|
#' Create new vpc theme
#'
#' @param ... pass arguments to `new_vpc_theme`
#' @export
create_vpc_theme <- function(...) {
message("Note: the `create_vpc_theme()` function has been renamed to `new_vpc_theme()`. Please update your scripts, `create_vpc_theme()` will be deprecated in future releases.")
return(new_vpc_theme(...))
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/create_vpc_theme.R
|
#' A small rich dataset
#' @details
#' a list containing the obs and sim data for an example dataset to run a
#' simple vpc.
#' @examples
#' \dontrun{
#' vpc(simple_data$sim, simple_data$obs)
#' }
#' @docType data
"simple_data"
#' Simulated RTTE data (100x)
#'
#' An example dataset with simulated repeated time-to-event data (100 simulations)
"rtte_sim_nm"
#' Simulated RTTE data (1x)
#'
#' An example dataset with simulated repeated time-to-event data
#' @name rtte_obs_nm
"rtte_obs_nm"
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/datasets.R
|
#' Define data column defaults for various softwares
#'
#' @param sim simulated data
#' @param obs observed data
#' @param sim_cols list for mapping simulation data columns, e.g. `list(dv = "DV", id = "ID", idv = "TIME", pred="PRED")`
#' @param obs_cols list for mapping observation data columns, e.g. `list(dv = "DV", id = "ID", idv = "TIME", pred="PRED")`
#' @param software_type software type, one of `nonmem`, `phoenix`, `PKPDsim`
define_data_columns <- function(sim, obs, sim_cols, obs_cols, software_type) {
software_types <- c("nonmem", "phoenix", "PKPDsim")
if(software_type %in% software_types) {
if (software_type == "nonmem") {
obs_cols_default <- list(dv = "DV", id = "ID", idv = "TIME", pred = "PRED")
sim_cols_default <- list(dv = "DV", id = "ID", idv = "TIME", pred = "PRED", sim = "NSIM")
}
if (software_type == "phoenix") {
obs_cols_default <- list(dv = "COBS", id = "ID", idv = "TIME", pred = "PRED")
sim_cols_default <- list(dv = "COBS", id = "ID", idv = "TIME", pred = "PRED")
}
if (software_type == "PKPDsim") {
obs_cols_default <- list(dv = "y", id = "id", idv = "t")
sim_cols_default <- list(dv = "y", id = "id", idv = "t", pred="pred")
}
} else {
obs_cols_default <- list(dv = "dv", id = "id", idv = "time", pred = "pred")
sim_cols_default <- list(dv = "dv", id = "id", idv = "time", pred = "pred", sim = "sim")
}
obs_cols <- replace_list_elements(obs_cols_default, obs_cols)
sim_cols <- replace_list_elements(sim_cols_default, sim_cols)
return(list(sim = sim_cols, obs = obs_cols))
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/define_data_columns.R
|
#' Draw parameters from multivariate distribution
#'
#' @param ids vector of id numbers
#' @param n_sim number of simulations
#' @param theta theta vector
#' @param omega_mat omega matrix
#' @param par_names parameter names vector
draw_params_mvr <- function(ids, n_sim, theta, omega_mat, par_names = NULL) {
n_ids <- length(ids)
if (!is.null(par_names)) {
par <- data.frame(cbind(sim = rep(1:n_sim, each=n_ids),
id = rep(ids, n_sim),
rep(theta, each=n_sim*n_ids) * exp (MASS::mvrnorm(n=n_sim*n_ids, c(0,0,0), omega_mat))))
colnames(par) <- c("sim", "id", par_names)
return(par)
} else {
cat("Parameter names have to be supplied!")
}
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/draw_params_mvr.R
|
filter_dv <- function(x, verbose = FALSE, ...) {
available_methods <- c("phx", "nonmem")
if(sum(available_methods %in% class(x)) > 0) {
f <- filter_dv_software[[class(x)[1]]]
do.call(f, args = list(x = x, verbose = verbose, ...))
} else {
x
}
}
filter_dv_software <- list(
"phx" = function(x, dv, verbose = FALSE, ...) {
msg("Filtering rows with no DV values", verbose)
x[!is.na(x[[dv]]),]
},
"nonmem" = function(x, verbose = FALSE, ...) {
if ("EVID" %in% names(x)){
msg("Filtering rows where EVID not 0", verbose)
x <- x[x[["EVID"]] == 0,]
}
if("MDV" %in% names(x)) {
msg("Filtering rows where MDV not 0", verbose)
x <- x[x[["MDV"]] == 0,]
}
if(sum(c("EVID", "MDV") %in% names(x)) == 0) {
msg("No MDV or EVID columns found to filter on", verbose)
}
return(x)
}
)
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/filter_dv.R
|
format_vpc_input_data <- function(dat, cols, lloq, uloq, strat, bins, log_y, log_y_min, what = "observed", verbose = FALSE, pred_corr = FALSE) {
if(cols[["id"]] %in% colnames(dat)) {
if ("id" %in% colnames(dat) &! cols$id == "id") {
colnames(dat)[match("id", colnames(dat))] <- "id.old"
}
colnames(dat)[match(cols$id, colnames(dat))] <- "id"
}
if(is.na(match("id", colnames(dat)))[1]) {
stop (paste0("No column for id indicator found in ", what, " data, can't continue! Available columns: ", paste(colnames(dat), collapse = " ")))
}
if(cols$dv %in% colnames(dat)) {
if ("dv" %in% colnames(dat) &! cols$dv == "dv") {
colnames(dat)[match("dv", colnames(dat))] <- "dv.old"
}
colnames(dat)[match(cols$dv, colnames(dat))] <- "dv"
}
if(is.na(match("dv", colnames(dat)))[1]) {
stop (paste0("No column for dependent variable found in ", what, " data, can't continue! Available columns: ", paste(colnames(dat), collapse = " ")))
}
if(cols$idv %in% colnames(dat)) {
if ("idv" %in% colnames(dat) &! cols$idv == "idv") {
colnames(dat)[match("idv", colnames(dat))] <- "idv.old"
}
colnames(dat)[match(cols$idv, colnames(dat))] <- "idv"
}
if(is.na(match("idv", colnames(dat)))[1]) {
stop (paste0("No column for indepentent variable found in ", what, " data, can't continue! Available columns: ", paste(colnames(dat), collapse = " ")))
}
if(pred_corr) {
if (!is.null(uloq)) { dat$dv[dat$dv > uloq] <- uloq }
if (!is.null(lloq)) { dat$dv[dat$dv < lloq] <- lloq }
} else {
if (!is.null(uloq)) { dat$dv[dat$dv > uloq] <- NA }
if (!is.null(lloq)) { dat$dv[dat$dv < lloq] <- NA }
}
if (log_y) {
dat$dv[dat$dv < log_y_min] <- log_y_min
}
dat <- add_stratification(dat, strat)
return(dat)
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/format_vpc_input_data.R
|
msg <- function(txt, verbose = FALSE) {
if(verbose) {
message(txt)
}
}
add_recurs <- function(x, n, max) {
x <- x + n
n <- n + 1
if (n <= max) {
x <- add_recurs(x, n, max)
}
x
}
locf <- function(S) {
L <- !is.na(S)
c(S[L][1], S[L])[cumsum(L)+1]
}
add_step <- function(dat = ., vars) {
dat$step <- 0
tmp <- dat[-1,]
tmp$step <- 1
tmp[,vars] <- dat[-length(dat[,1]), vars]
newdat <- data.frame(rbind(dat, tmp))
newdat %>% dplyr::arrange(bin, -step)
}
as.num <- function(x) { as.numeric(as.character(x)) }
convert_to_dense_grid <- function(dat, t = "t", id = "id", t_start = 0, t_step = 1, add = NULL) {
t = seq(from=t_start, to=max(dat$t), by=t_step)
tmp <- data.frame(cbind(id = rep(unique(dat$id), each = length(t)),
t = rep(t, n = length(unique(dat$id))) ) )
tmp$dv <- 0
id_t <- paste0(dat$id, "-", dat$t)
tmp[match(id_t, paste0(tmp$id,"-",tmp$t)),]$dv <- dat$dv
tmp$rtte <- 0
tmp[match(id_t, paste0(tmp$id,"-",tmp$t)),]$rtte <- 1
if (!is.null(add)) {
tmp2 <- merge(tmp, dat[,c("id", add)] %>% dplyr::group_by_("id") %>% dplyr::do(.[1,]), by = "id", all.y = FALSE)
}
return(tmp2)
}
relative_times <- function (dat, simulation = FALSE) {
if (simulation) {
tmp <- dat %>% dplyr::group_by_("sim", "id")
} else {
tmp <- dat %>% dplyr::group_by_("id")
}
tmp2 <- tmp %>% dplyr::arrange_("time") %>% dplyr::mutate(time = c(time[1], diff(time)))
if (simulation) {
return(tmp2 %>% dplyr::arrange_("sim", "id", "time"))
} else {
return(tmp2 %>% dplyr::arrange_("id", "time"))
}
}
convert_from_dense_grid <- function (dat) {
## Note RK: only for a single trial, requires a loop or ddply for multiple subproblems
tmp <- dat %>% dplyr::group_by_("id")
if("rtte" %in% names(dat)) {
tmp <- tmp %>% dplyr::filter(rtte == 1)
}
tmp2 <- rbind(tmp %>% dplyr::filter(length(time) > 1) %>% dplyr::mutate(time = time - c(0,time[1:(length(time)-1)])),
tmp %>% dplyr::filter(length(time) == 1) )
return(tmp2 %>% dplyr::arrange_("id", "time"))
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/functions.R
|
# guess software package based on data
# @param software string specifying type of software
# @param x analysis data from software
# dont document for Roxygen as internal function
guess_software <- function(software, x) {
options <- c("auto", "nonmem", "phoenix", "PKPDsim")
software <- tolower(software)
if(!software %in% options) {
stop(paste("Please define one of the following software types:", paste(options, collapse=", ")))
}
if(software == "nonmem" | software == "phoenix") return(software)
# nonmem typically will have MDV and DV
software <- "other"
if(all(c("ID", "TIME") %in% names(x)) || all(c("ID", "DV") %in% names(x)) || all(c("MDV", "DV") %in% names(x)) || all(c("EVID", "DV") %in% names(x))) {
software <- "nonmem"
}
if("COBS" %in% names(x)) {
software <- "phoenix"
}
if("PKPDsim" %in% class(x)) {
software <- "PKPDsim"
}
return(software)
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/guess_software.R
|
#' Create a customized VPC theme
#'
#' @param update list containing the plot elements to be updated. Run `new_vpc_theme()` with no arguments to show an overview of available plot elements.
#'
#' @details
#' This function creates a theme that customizes how the VPC looks, i.e. colors, fills, transparencies, linetypes an sizes, etc. The following arguments can be specified in the input list:
#' \itemize{
#' \item{obs_color}: {color for observations points}
#' \item{obs_size}: {size for observation points}
#' \item{obs_median_color}: {color for median observation line}
#' \item{obs_median_linetype}: {linetype for median observation line}
#' \item{obs_median_size}: {size for median observation line}
#' \item{obs_ci_fill}: {color for observation CI fill}
#' \item{obs_ci_color}: {color for observation CI lines}
#' \item{obs_ci_linetype}: {linetype for observation CI lines}
#' \item{obs_ci_size}: {size for observations CI lines}
#' \item{sim_pi_fill}: {fill color for simulated prediction interval areas}
#' \item{sim_pi_alpha}: {transparency for simulated prediction interval areas}
#' \item{sim_pi_color}: {color for simulated prediction interval lines}
#' \item{sim_pi_linetype}: {linetype for simulated prediction interval lines}
#' \item{sim_pi_size}: {size for simulated prediction interval lines}
#' \item{sim_median_fill}: {fill color for simulated median area}
#' \item{sim_median_alpha}: {transparency for simulated median area}
#' \item{sim_median_color}: {color for simulated median line}
#' \item{sim_median_linetype}: {linetype for simulated median line}
#' \item{sim_median_size}: {size for simulated median line}
#' \item{bin_separators_color}: {color for bin separator lines, NA for don't plot}
#' \item{bin_separators_location}: {where to plot bin separators ("t" for top, "b" for bottom)}
#' \item{loq_color}: {color of line showing limit of quantification}
#' }
#' @return A list with vpc theme specifiers
#' @export
#' @examples
#' theme1 <- new_vpc_theme(update = list(
#' obs_color = "red",
#' obs_ci_color = "#aa0000",
#' obs_alpha = .3,
#' sim_pi_fill = "#cc8833",
#' sim_pi_size = 2
#' ))
#' vpc(simple_data$sim, simple_data$obs, vpc_theme = theme1)
#'
new_vpc_theme <- function (update = NULL) {
tmp <- structure(list(
obs_color = "#000000",
obs_size = 1,
obs_median_color = "#000000",
obs_median_linetype = "solid",
obs_median_size = 1,
obs_alpha = .7,
obs_shape = 1,
obs_ci_color = "#000000",
obs_ci_linetype = "dashed",
obs_ci_fill = grDevices::rgb(0.5,0.5,0.5,0.2), ## only for TTE
obs_ci_size = .5,
sim_pi_fill = "#3388cc",
sim_pi_alpha = 0.15,
sim_pi_color = "#000000",
sim_pi_linetype = 'dotted',
sim_pi_size = 1,
sim_median_fill = "#3388cc",
sim_median_alpha = 0.3,
sim_median_color = "#000000",
sim_median_linetype = "dashed",
sim_median_size = 1,
bin_separators_color = "#000000",
loq_color = "#990000"
), class = "vpc_theme")
n <- names(tmp)
if(is.null(update)) {
# stop(paste0("Please specify a list with plot elements to update. Available elements: \n - ", paste(n, collapse="\n - ")))
return(tmp)
}
if(!is.null(update) & length(names(update)) > 0) {
for(i in seq(names(update))) {
if(names(update)[i] %in% n) {
tmp[[names(update)[i]]] <- update[[names(update)[i]]]
} else {
warning(paste0("`", names(update)[i],"` is not recognized as a plot element, ignoring."))
}
}
}
tmp
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/new_vpc_theme.R
|
#' Simulate PK data from a 1-compartment oral model
#'
#' @param t Time after dose
#' @param tau Dosing interval
#' @param dose Dose
#' @param ka Absorption rate
#' @param ke Elimination rate
#' @param cl Clearance
#' @param ruv Residual variability
#' @return A vector of predicted values, with or without added residual variability
#' @examples
#' dat1 <- vpc:::pk_oral_1cmt(t = c(0:72), tau = 24, dose = 120,
#' ka = 1, ke = 1, cl = 10)
#' dat2 <- vpc:::pk_oral_1cmt(t = c(0:72), tau = 24, dose = 120,
#' ka = 1, ke = 1, cl = 10,
#' ruv = list(proportional = 0.1, additive = 0.1))
pk_oral_1cmt <- function (t, tau = 24, dose=120, ka = 1, ke = 1, cl = 10, ruv = NULL) {
v = cl / ke
tmp <- (dose/v) * (ka/(ka-ke)) * (exp(-ke*t) - exp(-ka*(t)))
if(!is.null(ruv)) {
tmp <- add_noise (tmp, ruv)
}
tmp
}
#' Simulate PK data from a 1-compartment iv model
#'
#' @param t Time after dose
#' @param t_inf Infusion length
#' @param tau Dosing interval
#' @param dose Dose
#' @param CL Clearance
#' @param Vc Volume of distribution
#' @param ruv Residual variability
#' @return A vector of predicted values, with or without added residual variability
#' @examples
#' dat1 <- vpc:::pk_iv_1cmt(t = c(0:72), tau = 24, dose = 120,
#' CL = 5, Vc = 50)
#' dat2 <- vpc:::pk_iv_1cmt(t = c(0:72), tau = 24, dose = 120,
#' CL = 5, Vc = 50,
#' ruv = list(proportional = 0.1, additive = 0.1))
pk_iv_1cmt <- function (t, t_inf = 1, tau = 24, dose=120, CL = 0.345, Vc = 1.75, ruv = NULL) {
k <- CL / Vc
tmp <- c()
tmp <- c(tmp, (dose / (CL * t_inf)) * (1-exp(-k*t[t < t_inf])) )
tmp <- c(tmp, (dose / (CL * t_inf)) * (1-exp(-k*t_inf)) * exp(-k*(t[t >= t_inf] - t_inf)) )
if(!is.null(ruv)) {
tmp <- add_noise (tmp, ruv)
}
tmp
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/pk_functions.R
|
#' VPC plotting function
#'
#' This function performs no parsing of data, it just plots the already calculated statistics generated using one of the
#' `vpc` functions.
#'
#' @param db object created using the `vpc` function
#' @param show what to show in VPC (obs_dv, obs_ci, pi, pi_as_area, pi_ci, obs_median, sim_median, sim_median_ci)
#' @param vpc_theme theme to be used in VPC. Expects list of class vpc_theme created with function vpc_theme()
#' @param smooth "smooth" the VPC (connect bin midpoints) or show bins as rectangular boxes. Default is TRUE.
#' @param log_x Boolean indicting whether x-axis should be shown as logarithmic. Default is FALSE.
#' @param log_y Boolean indicting whether y-axis should be shown as logarithmic. Default is FALSE.
#' @param title title
#' @param xlab label for x axis
#' @param ylab label for y axis
#' @param verbose verbosity (T/F)
#' @export
#' @seealso \link{sim_data}, \link{vpc_cens}, \link{vpc_tte}, \link{vpc_cat}
#' @examples
#' ## See vpc.ronkeizer.com for more documentation and examples
#'
#' library(vpc)
#' vpc_db <- vpc(sim = simple_data$sim, obs = simple_data$obs, vpcdb = TRUE)
#' plot_vpc(vpc_db, title = "My new vpc", x = "Custom x label")
plot_vpc <- function(db,
show = NULL,
vpc_theme = NULL,
smooth = TRUE,
log_x = FALSE,
log_y = FALSE,
xlab = NULL,
ylab = NULL,
title = NULL,
verbose = FALSE) {
if(is.null(vpc_theme) || (class(vpc_theme) != "vpc_theme")) {
vpc_theme <- new_vpc_theme()
}
idv_as_factor <- is.factor(db$vpc_dat$bin)
if(db$type != "time-to-event") {
show <- replace_list_elements(show_default, show)
if(!is.null(db$stratify)) {
## rename "strat" to original stratification variable names
if(length(db$stratify) == 1) {
if(!is.null(db$aggr_obs)) colnames(db$aggr_obs)[match("strat", colnames(db$aggr_obs))] <- db$stratify[1]
if(!is.null(db$vpc_dat)) colnames(db$vpc_dat)[match("strat", colnames(db$vpc_dat))] <- db$stratify[1]
}
if(length(db$stratify) == 2) {
if(!is.null(db$aggr_obs)) {
colnames(db$aggr_obs)[match("strat1", colnames(db$aggr_obs))] <- db$stratify[1]
colnames(db$aggr_obs)[match("strat2", colnames(db$aggr_obs))] <- db$stratify[2]
}
if(!is.null(db$vpc_dat)) {
colnames(db$vpc_dat)[match("strat1", colnames(db$vpc_dat))] <- db$stratify[1]
colnames(db$vpc_dat)[match("strat2", colnames(db$vpc_dat))] <- db$stratify[2]
}
}
}
################################################################
## VPC for continous, censored or categorical
## note: for now, tte-vpc is separated off, but need to unify
## this with generic plotting.
################################################################
if (!is.null(db$sim)) {
if(idv_as_factor) db$vpc_dat$bin_mid <- db$vpc_dat$bin
pl <- ggplot2::ggplot(db$vpc_dat, ggplot2::aes(x=bin_mid, group=1))
if(show$sim_median) {
pl <- pl + ggplot2::geom_line(ggplot2::aes(y=q50.med), colour=vpc_theme$sim_median_color, linetype=vpc_theme$sim_median_linetype, size=vpc_theme$sim_median_size)
}
if(show$pi_as_area) {
if (smooth) {
pl <- pl +
ggplot2::geom_ribbon(ggplot2::aes(x=bin_mid, ymin=q5.med, ymax=q95.med), alpha=vpc_theme$sim_median_alpha, fill = vpc_theme$sim_median_fill)
} else {
pl <- pl +
ggplot2::geom_rect(ggplot2::aes(xmin=bin_min, xmax=bin_max, ymin=q5.med, ymax=q95.med), alpha=vpc_theme$sim_median_alpha, fill = vpc_theme$sim_median_fill)
}
} else {
if(show$sim_median_ci) {
if (smooth) {
pl <- pl +
ggplot2::geom_ribbon(ggplot2::aes(x=bin_mid, ymin=q50.low, ymax=q50.up), alpha=vpc_theme$sim_median_alpha, fill = vpc_theme$sim_median_fill)
} else {
pl <- pl +
ggplot2::geom_rect(ggplot2::aes(xmin=bin_min, xmax=bin_max, ymin=q50.low, ymax=q50.up), alpha=vpc_theme$sim_median_alpha, fill = vpc_theme$sim_median_fill)
}
}
if (show$pi) {
pl <- pl +
ggplot2::geom_line(ggplot2::aes(x=bin_mid, y=q5.med), colour=vpc_theme$sim_pi_color, linetype=vpc_theme$sim_pi_linetype, size=vpc_theme$sim_pi_size) +
ggplot2::geom_line(ggplot2::aes(x=bin_mid, y=q95.med), colour=vpc_theme$sim_pi_color, linetype=vpc_theme$sim_pi_linetype, size=vpc_theme$sim_pi_size)
}
if(show$pi_ci && "q5.low" %in% names(db$vpc_dat)) {
if (smooth) {
pl <- pl +
ggplot2::geom_ribbon(ggplot2::aes(x=bin_mid, ymin=q5.low, ymax=q5.up), alpha=vpc_theme$sim_pi_alpha, fill = vpc_theme$sim_pi_fill) +
ggplot2::geom_ribbon(ggplot2::aes(x=bin_mid, ymin=q95.low, ymax=q95.up), alpha=vpc_theme$sim_pi_alpha, fill = vpc_theme$sim_pi_fill)
} else {
pl <- pl +
ggplot2::geom_rect(ggplot2::aes(xmin=bin_min, xmax=bin_max, y=q5.low, ymin=q5.low, ymax=q5.up), alpha=vpc_theme$sim_pi_alpha, fill = vpc_theme$sim_pi_fill) +
ggplot2::geom_rect(ggplot2::aes(xmin=bin_min, xmax=bin_max, y=q95.low, ymin=q95.low, ymax=q95.up), alpha=vpc_theme$sim_pi_alpha, fill = vpc_theme$sim_pi_fill)
}
}
}
} else {
pl <- ggplot2::ggplot(db$aggr_obs)
}
if(!is.null(db$obs)) {
if(idv_as_factor) db$aggr_obs$bin_mid <- db$aggr_obs$bin
if (show$obs_median) {
pl <- pl +
ggplot2::geom_line(data=db$aggr_obs, ggplot2::aes(x=bin_mid, y=obs50),
linetype=vpc_theme$obs_median_linetype,
colour=vpc_theme$obs_median_color,
size=vpc_theme$obs_median_size)
}
if(show$obs_ci && !is.null(db$aggr_obs[["obs5"]])) {
pl <- pl +
ggplot2::geom_line(data=db$aggr_obs, ggplot2::aes(x=bin_mid, y=obs5), linetype=vpc_theme$obs_ci_linetype, colour=vpc_theme$obs_ci_color, size=vpc_theme$obs_ci_size) +
ggplot2::geom_line(data=db$aggr_obs, ggplot2::aes(x=bin_mid, y=obs95), linetype=vpc_theme$obs_ci_linetype, colour=vpc_theme$obs_ci_color, size=vpc_theme$obs_ci_size)
}
if(show$obs_dv) {
pl <- pl + ggplot2::geom_point(data=db$obs, ggplot2::aes(x=idv, y = dv), size=vpc_theme$obs_size, colour=vpc_theme$obs_color, alpha = vpc_theme$obs_alpha, shape = vpc_theme$obs_shape)
}
}
bdat <- data.frame(cbind(x=db$bins, y=NA))
if(show$bin_sep && !idv_as_factor) {
pl <- pl +
ggplot2::geom_rug(data=bdat, sides = "t", ggplot2::aes(x = x, y = y), colour=vpc_theme$bin_separators_color)
}
if(!is.null(xlab)) {
pl <- pl + ggplot2::xlab(xlab)
} else {
pl <- pl + ggplot2::xlab(db$xlab)
}
if(!is.null(ylab)) {
pl <- pl + ggplot2::ylab(ylab)
} else {
pl <- pl + ggplot2::ylab(db$ylab)
}
if (log_x) {
if(!idv_as_factor) pl <- pl + ggplot2::scale_x_log10()
else warning("log_x option has no effect when the IDV is a factor ")
}
if (log_y) {
pl <- pl + ggplot2::scale_y_log10()
}
if(!is.null(db$stratify)) {
if(is.null(db$labeller)) db$labeller <- ggplot2::label_both
if(length(db$stratify) == 1) {
if (db$facet == "wrap") {
pl <- pl + ggplot2::facet_wrap(stats::reformulate(db$stratify[1], NULL), scales = db$scales,
labeller = db$labeller)
} else {
if(length(grep("row", db$facet))>0) {
pl <- pl + ggplot2::facet_grid(stats::reformulate(db$stratify[1], NULL), scales = db$scales,
labeller = db$labeller)
} else {
pl <- pl + ggplot2::facet_grid(stats::reformulate(".", db$stratify[1]), scales = db$scales,
labeller = db$labeller)
}
}
} else { # 2 grid-stratification
if (db$stratify[1] %in% c(colnames(db$vpc_dat), colnames(db$aggr_obs))) {
if(length(grep("row", db$facet))>0) {
pl <- pl + ggplot2::facet_grid(stats::reformulate(db$stratify[1], db$stratify[2]), scales = db$scales,
labeller = db$labeller)
} else {
pl <- pl + ggplot2::facet_grid(stats::reformulate(db$stratify[2], db$stratify[1]), scales = db$scales,
labeller = db$labeller)
}
} else { # only color stratification
if ("strat" %in% c(colnames(db$vpc_dat), colnames(db$aggr_obs))) {
# color stratification only
} else {
stop ("Stratification unsuccesful.")
}
}
}
}
if(!is.null(db$lloq)) {
pl <- pl + ggplot2::geom_hline(yintercept = db$lloq, colour=vpc_theme$loq_color)
}
if(!is.null(db$uloq)) {
pl <- pl + ggplot2::geom_hline(yintercept = db$uloq, colour=vpc_theme$loq_color)
}
if (!is.null(title)) {
pl <- pl + ggplot2::ggtitle(title)
}
pl <- pl + theme_plain()
return(pl)
} else {
################################################################
## VPC for time-to-event data
################################################################
show <- replace_list_elements(show_default_tte, show)
if(!is.null(db$stratify_pars)) {
## rename "strat" to original stratification variable names
if(length(db$stratify_pars) == 1) {
# "strat1" ==> "rtte"
if(!is.null(db$obs_km)) db$obs_km[[db$stratify_pars[1]]] <- as.factor(db$obs_km$strat)
if(!is.null(db$sim_km)) db$sim_km[[db$stratify_pars[1]]] <- as.factor(db$sim_km$strat)
if(!is.null(db$all_dat)) db$all_dat[[db$stratify_pars[1]]] <- as.factor(db$all_dat$strat)
}
if(length(db$stratify_pars) == 2) {
if(!is.null(db$obs_km)) {
db$obs_km[[db$stratify_pars[1]]] <- as.factor(db$obs_km$strat1)
db$obs_km[[db$stratify_pars[2]]] <- as.factor(db$obs_km$strat2)
}
if(!is.null(db$sim_km)) {
db$sim_km[[db$stratify_pars[1]]] <- as.factor(db$sim_km$strat1)
db$sim_km[[db$stratify_pars[2]]] <- as.factor(db$sim_km$strat2)
}
}
}
if(!is.null(db$obs_km)) db$obs_km$bin_mid <- c(0, diff(db$obs_km$time))
show$pi_as_area <- TRUE
if(!is.null(db$sim_km)) {
pl <- ggplot2::ggplot(db$sim_km, ggplot2::aes(x=bin_mid, y=qmed))
} else {
pl <- ggplot2::ggplot(db$obs_km, ggplot2::aes(x=bin_mid, y=qmed))
show$sim_median <- FALSE
show$sim_median_ci <- FALSE
show$pi_ci <- FALSE
show$pi_as_area <- FALSE
show$sim_km <- FALSE
}
if(show$sim_km) {
db$all_dat$strat_sim <- paste0(db$all_dat$strat, "_", db$all_dat$i)
transp <- min(.1, 20*(1/length(unique(db$all_dat$i))))
pl <- pl + ggplot2::geom_step(data = db$all_dat, ggplot2::aes(x=bin_mid, y=surv, group=strat_sim), colour=grDevices::rgb(0.2,.53,0.796, transp))
}
if(show$pi_as_area) {
if(smooth) {
if(!is.null(db$stratify_color)) {
pl <- pl + ggplot2::geom_ribbon(data = db$sim_km,
ggplot2::aes(min = qmin, max=qmax, fill = get(db$stratify_color[1])),
alpha=vpc_theme$sim_median_alpha)
} else {
pl <- pl + ggplot2::geom_ribbon(data = db$sim_km,
ggplot2::aes(ymin = qmin, ymax=qmax),
fill = vpc_theme$sim_median_fill,
alpha=vpc_theme$sim_median_alpha)
}
} else {
if(!is.null(db$stratify_color)) {
pl <- pl + ggplot2::geom_rect(data = db$sim_km,
ggplot2::aes(xmin=bin_min, xmax=bin_max, ymin=qmin, ymax=qmax, fill = get(db$stratify_color[1])),
alpha=vpc_theme$sim_median_alpha)
} else {
pl <- pl + ggplot2::geom_rect(data = db$sim_km,
ggplot2::aes(xmin=bin_min, xmax=bin_max, ymin=qmin, ymax=qmax),
alpha=vpc_theme$sim_median_alpha,
fill = vpc_theme$sim_median_fill)
}
}
} else {
if(!is.null(db$obs)) {
pl <- ggplot2::ggplot(db$obs_km)
}
}
if(!is.null(db$cens_dat) && nrow(db$cens_dat)>0) {
pl <- pl + ggplot2::geom_point(data=db$cens_dat,
ggplot2::aes(x=time, y = y), shape="|", size=2.5)
}
if(show$sim_median) {
if (smooth) {
geom_line_custom <- ggplot2::geom_line
} else {
geom_line_custom <- ggplot2::geom_step
}
pl <- pl + geom_line_custom(linetype="dashed")
}
if(!is.null(db$obs) && show$obs_ci) {
pl <- pl + ggplot2::geom_ribbon(
data=db$obs_km,
ggplot2::aes(x=time, ymin=lower, ymax=upper, group=strat),
fill=vpc_theme$obs_ci_fill, colour = NA)
}
if (!is.null(db$obs)) {
chk_tbl <- db$obs_km %>%
dplyr::group_by(strat) %>%
dplyr::summarise(t = length(time))
if (sum(chk_tbl$t <= 1)>0) { # it is not safe to use geom_step, so use
geom_step <- ggplot2::geom_line
}
msg("Warning: some strata in the observed data had zero or one observations, using line instead of step plot. Consider using less strata (e.g. using the 'events' argument).", verbose)
if(!is.null(db$stratify_color)) {
pl <- pl + ggplot2::geom_step(data = db$obs_km,
ggplot2::aes(x=time, y=surv, colour=get(db$stratify_color[1])), size=.8)
} else {
pl <- pl + ggplot2::geom_step(data = db$obs_km,
ggplot2::aes(x=time, y=surv, group=strat), size=.8)
}
}
if(!is.null(db$stratify) || db$rtte) {
if(is.null(db$labeller)) db$labeller <- ggplot2::label_both
if (length(db$stratify_pars) == 1 | db$rtte) {
if (db$facet == "wrap") {
pl + ggplot2::facet_wrap(~sex)
pl <- pl + ggplot2::facet_wrap(stats::reformulate(db$stratify_pars[1], NULL), scales = db$scales,
labeller = db$labeller)
} else {
if(length(grep("row", db$facet)) > 0) {
pl <- pl + ggplot2::facet_grid(stats::reformulate(db$stratify_pars[1], NULL), scales = db$scales,
labeller = db$labeller)
} else {
pl <- pl + ggplot2::facet_grid(stats::reformulate(".", db$stratify_pars[1]), scales = db$scales,
labeller = db$labeller)
}
}
} else {
if(length(grep("row", db$facet)) > 0) {
pl <- pl + ggplot2::facet_grid(stats::reformulate(db$stratify_pars[1], db$stratify_pars[2]), scales = db$scales,
labeller = db$labeller)
} else {
pl <- pl + ggplot2::facet_grid(stats::reformulate(db$stratify_pars[2], db$stratify_pars[1]), scales = db$scales,
labeller = db$labeller)
}
}
}
if(show$bin_sep) {
if(!(class(db$bins) == "logical" && db$bins == FALSE)) {
bdat <- data.frame(cbind(x = db$tmp_bins, y = NA))
pl <- pl + ggplot2::geom_rug(data=bdat, sides = "t", ggplot2::aes(x = x, y = y, group=NA), colour=vpc_theme$bin_separators_color)
}
}
if(!is.null(db$stratify_color)) {
pl <- pl + ggplot2::guides(fill = ggplot2::guide_legend(title=db$stratify_color[1]),
colour = ggplot2::guide_legend(title=db$stratify_color[1]))
}
if(!is.null(xlab)) {
pl <- pl + ggplot2::xlab(xlab)
} else {
pl <- pl + ggplot2::xlab(db$xlab)
}
if(!is.null(ylab)) {
pl <- pl + ggplot2::ylab(ylab)
} else {
pl <- pl + ggplot2::ylab(db$ylab)
}
return(pl)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/plot_vpc.R
|
#' Calculate quantiles respecting the censored data
#'
#' @param x data
#' @param p quantile
#' @param limit censoring limit
#' @param cens censoring direction (left/right)
#'
#' @export
quantile_cens <- function(x, p = 0.5, limit = 1, cens = "left") {
if(cens %in% c("left", "lower", "bloq", "loq", "lloq")) {
x[is.na(x)] <- -Inf
x[x<limit] <- -Inf
} else {
x[is.na(x)] <- Inf
x[x>limit] <- Inf
}
q <- quantile(x, p)
ifelse(q %in% c(Inf, -Inf), NA, q)
}
#' Calculate percentiles below / above lloq / uloq
#'
#' @param x data
#' @param limit censoring limit
#' @param cens censoring direction (left/right)
loq_perc <- function(x, limit = 1, cens = "left") {
if(cens %in% c("left", "lower", "bloq", "loq", "lloq")) {
(sum(x < limit, na.rm=TRUE) + sum(is.na(x))) / length(x)
} else {
(sum(x > limit, na.rm=TRUE) + sum(is.na(x))) / length(x)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/quantile_cens.R
|
#' NONMEM output table import function
#'
#' @description Quickly import NONMEM output tables into R.
#' Function taken from `modelviz` package by Benjamin Guiastrennec.
#' When both \code{skip} and \code{header} are \code{NULL},
#' \code{read_nmtab} will automatically detect the optimal
#' settings to import the tables. When more than one files are
#' provided for a same NONMEM run, they will be combined into
#' a single \code{data.frame}.
#'
#' @param file full file name
#' @param skip number of lines to skip before reading data
#' @param header logical value indicating whether the file contains the names
#' of the variables as its first line
#' @param rm_duplicates logical value indicating whether duplicated columns should be removed
#' @param nonmem_tab logical value indicating to the function whether the file is a
#' table or a nonmem additional output file.
#'
#' @return A \code{data.frame}
#' @examples
#' \dontrun{
#' data <- read_table_nm(file = '../models/pk/sdtab101')
#' }
#' @export
read_table_nm <- function(
file = NULL,
skip = NULL,
header = NULL,
rm_duplicates = FALSE,
nonmem_tab = TRUE) {
# Check inputs
if(is.null(file)) {
stop('Argument \"file\" required.')
}
if(!any(file.exists(file))) {
stop('No file not found.')
} else {
file <- file[file.exists(file)]
}
if(nonmem_tab) {
# If auto mode required
if(is.null(skip) & is.null(header)) {
test <- readLines(file[1], n = 3)
skip <- ifelse(grepl('TABLE NO', test[1]), 1, 0)
header <- ifelse(grepl('[a-zA-Z]', test[2]), TRUE, FALSE)
}
# Import data
tab_file <- do.call('cbind', lapply(file, readr::read_table,
skip = skip, col_names = header))
tab_file <- as.data.frame(apply(tab_file, MARGIN = 2, FUN = as.numeric))
# Drop rows with NA (in simtab)
tab_file <- na.omit(tab_file)
# Correct bug in the headers
if(header) {
colnames(tab_file)[grepl('\n',colnames(tab_file))] <-
gsub('\n.+', '', colnames(tab_file)[grepl('\n', colnames(tab_file))])
}
} else {
# Search for final results only
skip <- max(grep('TABLE NO', readLines(file[1])))
# Import all files
tab_file <- do.call('cbind', lapply(file, read.table, skip = skip,
header = FALSE, fill = TRUE, as.is = TRUE))
colnames(tab_file) <- tab_file[1, ]
tab_file <- suppressWarnings(as.data.frame(apply(tab_file[-1, ], 2, as.numeric)))
}
if(rm_duplicates) {
tab_file <- tab_file[, !duplicated(colnames(tab_file))]
}
return(tab_file)
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/read_table_nm.R
|
#' Replace list elements by name
#'
#' @param list original list
#' @param replacement replacement list
#' @details
#' Finds and replaces list elements by name and throws an error if an
#' element is not available in the original list. This is a local duplicate
#' of the PKPDmisc copy for the VPC package to reduce dependency on PKPDmisc
#' at this time.
#' @examples
#' \dontrun{
#' list <- list(ipred = "ipred", dv = "dv", idv = "idv", "pred" = "pred")
#' replacement <- list(dv = "conc", idv = "time")
#' list <- replace_list_elements(list, replacement)
#' }
#' @export
replace_list_elements <- function(list, replacement) {
missing <- which(!names(replacement) %in% names(list))
if(length(missing) != 0) {
warning(paste("Nothing named: ", paste(names(replacement)[missing], collapse= ", ", "found to replace") ))
replacement <- replacement[-missing]
}
list[names(replacement)] <- lapply(names(replacement), function(x) list[[x]] <- replacement[[x]])
return(list)
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/replace_list_elements.R
|
#' Defaults for show argument
#'
#' @export
show_default <- list (
obs_dv = FALSE,
obs_ci = TRUE,
obs_median = TRUE,
sim_median = FALSE,
sim_median_ci = TRUE,
pi = FALSE,
pi_ci = TRUE,
pi_as_area = FALSE,
bin_sep = TRUE,
sim_km = FALSE,
obs_cens = TRUE
)
#' Defaults for show argument for TTE VPC
#'
#' @export
show_default_tte <- list (
obs_dv = FALSE,
obs_ci = FALSE,
obs_median = TRUE,
sim_median = FALSE,
sim_median_ci = TRUE,
pi = FALSE,
pi_ci = TRUE,
pi_as_area = FALSE,
bin_sep = TRUE,
sim_km = FALSE,
obs_cens = TRUE
)
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/show_default.R
|
#' Simulate data based on a model and parameter distributions
#'
#' @param design a design dataset. See example
#' @param model A function with the first argument the simulation design, i.e. a dataset with the columns ... The second argument to this function is a dataset with parameters for every individual. This can be supplied by the user, or generated by this sim_data if theta and omega_mat are supplied.
#' @param theta vector of fixed effect parameters
#' @param omega_mat vector of between subject random effects, specified as lower triangle
#' @param par_names A character vector linking the parameters in the model to the variables in the dataset. See example.
#' @param par_values parameter values
#' @param draw_iiv draw between subject random effects?
#' @param error see example
#' @param n number of simulations to perform
#' @return a vector of simulated dependent variables (for us in the VPC plotting function)
#' @family aggregate functions
#' @seealso \code{\link{vpc}}
#' @export
#' @details
#' This function generates the simulated dependent values for use in the VPC plotting function.
sim_data <- function (design = cbind(id = c(1,1,1), idv = c(0,1,2)),
model = function(x) { return(x$alpha + x$beta) },
theta,
omega_mat,
par_names,
par_values = NULL,
draw_iiv = "mvrnorm",
error = list(proportional = 0, additive = 0, exponential = 0),
n=100) {
if (is.null(par_values)) {
param <- draw_params_mvr( # draw parameter values. can also be just population values, or specified manually ()
ids = 1:n,
n_sim = n,
theta,
omega_mat = triangle_to_full(omega_mat),
par_names = par_names)
} else {
param <- par_values
}
sim_des <- do.call("rbind", replicate(n, design, simplify = FALSE))
sim_des$sim <- rep(1:n, each=nrow(design[,1]))
sim_des$join <- paste(sim_des$sim, sim_des$id, sep="_")
param$join <- paste(param$sim, param$id, sep="_")
tmp <- dplyr::as_tibble(merge(sim_des, param,
by.x="join", by.y="join"))
tmp_pred <- cbind(data.frame(design), matrix(rep(theta, each=nrow(design[,1])), ncol=length(theta)))
colnames(tmp_pred)[length(tmp_pred)-length(par_names)+1:3] <- par_names
tmp$dv <- add_noise(model(tmp), ruv = error)
tmp$pred <- rep(model(tmp_pred), n)
colnames(tmp) <- gsub("\\.x", "", colnames(tmp))
tmp %>%
dplyr::arrange_("sim", "id", "time")
}
sim_data_tte <- function (fit, t_cens = NULL, n = 100) {
fit$coefficients <- as.list(fit$coefficients)
dat <- data.frame(model.matrix(fit))
for (i in seq(fit$coefficients)) { fit$coefficients[[i]] <- as.numeric (fit$coefficients[[i]]) }
fact <- as.matrix(attr(fit$terms, "factors"))
parm <- t(fact) %*% as.numeric(fit$coefficients)
tmp.single <- data.frame (
par = exp(as.numeric(fit$coefficients[1]) + as.matrix(dat[,rownames(parm)]) %*% parm),
dv = 1
)
tmp <- do.call("rbind", replicate(n, tmp.single, simplify = FALSE))
tmp$sim <- rep(1:n, each=nrow(tmp.single[,1]))
if (!fit$dist %in% c("exponential", "weibull")) {
cat (paste("Simulation of ", fit$dist, "distribution not yet implemented, sorry."))
return()
}
if (fit$dist == "exponential") {
tmp$t = rweibull(nrow(dat[,1]) * n, shape = 1, scale = tmp$par)
# or using: tmp$t = rexp(length(design$id), 1/tmp$par)
}
if (fit$dist == "weibull") {
# annoyinly, the survreg and rweibull mix up the shape/scale parameter names and also take the inverse!!!
tmp$t = rweibull(nrow(dat[,1]) * n, shape = 1/fit$scale, scale = tmp$par)
}
if (sum(tmp$t > t_cens) > 0) {
tmp[tmp$t > t_cens,]$t <- t_cens
}
out <- c()
for (i in 1:n) {
km_fit <- compute_kaplan(tmp[tmp$sim == i,])
idx_new <- idx + length(unique(tmp[tmp$sim == i,]$t))-1
out <- rbind(out, cbind(i, km_fit$time, km_fit$surv))
}
colnames(out) <- c("sim", "time", "dv")
return(dplyr::as_tibble(data.frame(out)))
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/sim_data.R
|
#' A nicer default theme for ggplot2
#'
#' @examples
#' vpc(simple_data$sim, simple_data$obs) + theme_plain()
#'
#' @export
theme_plain <- function () {
ggplot2::theme(
text = ggplot2::element_text(family="mono"),
plot.title = ggplot2::element_text(family="sans", size = 16, vjust = 1.5),
axis.title.x = ggplot2::element_text(family="sans",vjust=-0.25),
axis.title.y = ggplot2::element_text(family="sans"),
legend.background = ggplot2::element_rect(fill = "white"),
#legend.position = c(0.14, 0.80),
panel.grid.major = ggplot2::element_line(colour = "#e5e5e5"),
panel.grid.minor = ggplot2::element_blank(),
panel.background = ggplot2::element_rect(fill = "#efefef", colour = NA),
strip.background = ggplot2::element_rect(fill = "#444444", colour = NA),
strip.text = ggplot2::element_text(face="bold", colour = "white")
)
}
#' Empty ggplot2 theme
#'
#' @examples
#' vpc(simple_data$sim, simple_data$obs) + theme_empty()
#'
#' @export
theme_empty <- function () {
ggplot2::theme(panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
panel.background = ggplot2::element_blank(),
axis.line = ggplot2::element_line(colour = "black"))
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/themes.R
|
#' Lower to full triangle
#'
#' @description Convert the lower triangle of a covariance matrix to a full matrix object
#' @param vect the lower triangle of a covariance matrix
triangle_to_full <- function (vect) {
for (i in 1:100) { # find the size of the matrix
if (length(vect) == add_recurs(0,0,i)) {
nr = i
}
}
add_recurs <- function(x, n, max) {
x <- x + n
n <- n + 1
if (n <= max) {
x <- add_recurs(x, n, max)
}
x
}
k_given_i_j <- function(x , y ) ifelse( y<x, x*(x-1)/2 + y, y*(y-1)/2 + x )
k_mat <- function(p) outer( 1:p, 1:p, k_given_i_j )
return (matrix(vect[ k_mat( nr ) ] , nrow = nr ))
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/triangle_to_full.R
|
#' VPC package
#'
#' Create Visual Predictive Checks in R
#'
#' @docType package
#' @name vpc-package
#' @author Ron Keizer \email{ronkeizer@@gmail.com}
#' @importFrom stats density median model.matrix na.omit quantile rnorm rweibull step time
#' @importFrom utils head read.table tail
## to avoid warnings related to dplyr/ggplot usage:
globalVariables(c(".", "comp", "strat", "strat2", "bin", "pred", "dv", "idv", "q5", "q50",
"q95", "q5.med", "q5.low", "q5.up", "q50.med", "q50.low", "q50.up",
"q95.med", "q95.low", "q95.up", "obs.med", "obs.low", "obs.up",
"obs5", "obs50", "obs95", "last_obs", "id",
"mn_idv", "value", "mn_idv", "ploq", "bin_mid", "bin_min",
"bin_max", "surv", "qmed", "strat_sim", "qmin", "qmax", "strat_color",
"y", "lower", "upper", "x", "classIntervals", "idx", "obs", "rtte",
"sim"))
NULL
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/vpc-package.R
|
#' VPC function
#'
#' Creates a VPC plot from observed and simulation data
#' @param sim this is usually a data.frame with observed data, containing the independent and dependent variable, a column indicating the individual, and possibly covariates. E.g. load in from NONMEM using \link{read_table_nm}. However it can also be an object like a nlmixr or xpose object
#' @param obs a data.frame with observed data, containing the independent and dependent variable, a column indicating the individual, and possibly covariates. E.g. load in from NONMEM using \link{read_table_nm}
#' @param psn_folder instead of specifying "sim" and "obs", specify a PsN-generated VPC-folder
#' @param bins either "density", "time", or "data", "none", or one of the approaches available in classInterval() such as "jenks" (default) or "pretty", or a numeric vector specifying the bin separators.
#' @param n_bins when using the "auto" binning method, what number of bins to aim for
#' @param bin_mid either "mean" for the mean of all timepoints (default) or "middle" to use the average of the bin boundaries.
#' @param obs_cols observation dataset column names (list elements: "dv", "idv", "id", "pred")
#' @param sim_cols simulation dataset column names (list elements: "dv", "idv", "id", "pred", "sim")
#' @param show what to show in VPC (obs_dv, obs_ci, pi, pi_as_area, pi_ci, obs_median, sim_median, sim_median_ci)
#' @param software name of software platform using (e.g. nonmem, phoenix)
#' @param stratify character vector of stratification variables. Only 1 or 2 stratification variables can be supplied.
#' @param pred_corr perform prediction-correction?
#' @param pred_corr_lower_bnd lower bound for the prediction-correction
#' @param pi simulated prediction interval to plot. Default is c(0.05, 0.95),
#' @param ci confidence interval to plot. Default is (0.05, 0.95)
#' @param uloq Number or NULL indicating upper limit of quantification. Default is NULL.
#' @param lloq Number or NULL indicating lower limit of quantification. Default is NULL.
#' @param log_y Boolean indicting whether y-axis should be shown as logarithmic. Default is FALSE.
#' @param log_y_min minimal value when using log_y argument. Default is 1e-3.
#' @param xlab label for x axis
#' @param ylab label for y axis
#' @param title title
#' @param smooth "smooth" the VPC (connect bin midpoints) or show bins as rectangular boxes. Default is TRUE.
#' @param vpc_theme theme to be used in VPC. Expects list of class vpc_theme created with function vpc_theme()
#' @param facet either "wrap", "columns", or "rows"
#' @param scales either "fixed" (default), "free_y", "free_x" or "free"
#' @param labeller ggplot2 labeller function to be passed to underlying ggplot object
#' @param vpcdb Boolean whether to return the underlying vpcdb rather than the plot
#' @param verbose show debugging information (TRUE or FALSE)
#' @param ... Other arguments sent to other methods (like xpose or nlmixr); Note these arguments are not used in the default vpc and are ignored by the default method.
#' @return a list containing calculated VPC information (when vpcdb=TRUE), or a ggplot2 object (default)
#' @export
#' @seealso \link{sim_data}, \link{vpc_cens}, \link{vpc_tte}, \link{vpc_cat}
#' @examples
#'
#' ## See vpc.ronkeizer.com for more documentation and examples
#' library(vpc)
#'
#' # Basic commands:
#' vpc(sim = simple_data$sim, obs = simple_data$obs)
#' vpc(sim = simple_data$sim, obs = simple_data$obs, lloq = 20)
#'
#'@export
vpc <- function(sim, ...){
UseMethod("vpc")
}
#' @rdname vpc
#'@export
vpc.default <- function(sim, ...){
call <- as.list(match.call(expand.dots=TRUE))[-1];
do.call(utils::getFromNamespace("vpc_vpc","vpc"), call, envir = parent.frame(1))
}
#' @rdname vpc
#'@export
vpc_vpc <- function(sim = NULL,
obs = NULL,
psn_folder = NULL,
bins = "jenks",
n_bins = "auto",
bin_mid = "mean",
obs_cols = NULL,
sim_cols = NULL,
software = "auto",
show = NULL,
stratify = NULL,
pred_corr = FALSE,
pred_corr_lower_bnd = 0,
pi = c(0.05, 0.95),
ci = c(0.05, 0.95),
uloq = NULL,
lloq = NULL,
log_y = FALSE,
log_y_min = 1e-3,
xlab = NULL,
ylab = NULL,
title = NULL,
smooth = TRUE,
vpc_theme = NULL,
facet = "wrap",
scales = "fixed",
labeller = NULL,
vpcdb = FALSE,
verbose = FALSE, ...) {
if(!is.null(psn_folder)) {
if(is.null(obs)) {
if(verbose) {
message("Reading oberved data...")
}
obs <- read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="original.npctab")[1]))
}
if(is.null(sim)) {
if(verbose) {
message("Reading simulated data...")
}
sim <- read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="simulation.1.npctab")[1]))
}
software <- "nonmem"
}
if(is.null(obs) & is.null(sim)) {
stop("At least a simulation or an observation dataset are required to create a plot!")
}
if(verbose) {
message("Configuring and initializing...")
}
if (!is.null(obs)) {
software_type <- guess_software(software, obs)
} else {
software_type <- guess_software(software, sim)
}
if(!is.null(facet)) {
if(! facet %in% c("wrap", "grid", "columns", "rows")) {
stop("`facet` argument needs to be one of `wrap`, `columns`, or `rows`.")
}
if(facet == "grid") facet <- "rows"
}
### Added by Satyaprakash Nayak ---
if(!is.null(scales)) {
if(! scales %in% c("fixed", "free_x", "free_y", "free")) {
stop("`scales` argument needs to be one of `fixed`, `free_y`, `free_x` or `free`.")
}
if(scales == "fixed") scales <- "fixed"
}
###---
## software specific parsing, if necessary
if (software_type == "PKPDsim") {
if (!is.null(obs)) {
if("obs" %in% obs$comp) {
obs <- obs %>% dplyr::filter(comp == "obs")
}
obs <- data.frame(obs)
}
if (!is.null(sim)) {
if("obs" %in% sim$comp) {
sim <- sim %>% dplyr::filter(comp == "obs")
}
sim <- data.frame(sim)
}
}
## define what to show in plot
show <- replace_list_elements(show_default, show)
## define column names
cols <- define_data_columns(sim, obs, sim_cols, obs_cols, software_type)
if(!is.null(obs)) {
old_class <- class(obs)
class(obs) <- c(software_type, old_class)
}
if(!is.null(sim)) {
old_class <- class(sim)
class(sim) <- c(software_type, old_class)
}
## checking whether stratification columns are available
if(!is.null(stratify)) {
if(verbose) {
message("Stratifying oberved data...")
}
if(!is.null(obs)) {
check_stratification_columns_available(obs, stratify, "observation")
}
if(!is.null(sim)) {
check_stratification_columns_available(sim, stratify, "simulation")
}
}
## Currently we can't handle both LLOQ and ULOQ
if(!is.null(uloq) && !is.null(lloq)) {
stop("Sorry, currently the vpc function cannot handle both upper and lower limit of quantification. Please specify either `lloq` or `uloq`.")
}
## parse data into specific format
if(!is.null(obs)) {
if(verbose) {
message("Parsing observed data...")
}
obs <- filter_dv(obs, verbose)
obs <- format_vpc_input_data(obs, cols$obs, lloq, uloq, stratify, bins, log_y, log_y_min, "observed", verbose, pred_corr)
}
if(!is.null(sim)) {
if(verbose) {
message("Parsing simulated data...")
}
sim <- filter_dv(sim, verbose)
if((!is.null(lloq) || !is.null(uloq)) && pred_corr) {
message("Prediction-correction cannot be used together with censored data (<LLOQ or >ULOQ). VPC plot will be shown for non-censored data only!")
sim <- format_vpc_input_data(sim, cols$sim, lloq, uloq, stratify, bins, log_y, log_y_min, "simulated", verbose, pred_corr)
} else {
sim <- format_vpc_input_data(sim, cols$sim, NULL, NULL, stratify, bins, log_y, log_y_min, "simulated", verbose, pred_corr)
}
}
if(pred_corr) {
uloq <- NULL
lloq <- NULL
}
labeled_bins <- bins[1] == "percentiles"
if (class(bins) != "numeric") {
if(!is.null(obs)) {
bins <- auto_bin(obs, bins, n_bins)
} else { # get from sim
bins <- auto_bin(sim, bins, n_bins)
}
if (is.null(bins)) {
msg("Automatic binning unsuccessful, try increasing the number of bins, or specify vector of bin separators manually.", verbose)
}
}
bins <- unique(bins)
if(verbose) message(paste0("Binning: ", paste(bins, collapse=' ')))
if(!is.null(obs)) {
obs <- bin_data(obs, bins, "idv", labeled = labeled_bins)
}
if(!is.null(sim)) {
sim <- bin_data(sim, bins, "idv", labeled = labeled_bins)
}
if(pred_corr) {
if(!is.null(obs) & !cols$obs$pred %in% names(obs)) {
msg("Warning: Prediction-correction: specified pred-variable not found in observation dataset, trying to get from simulated dataset...", verbose)
if (!cols$obs$pred %in% names(sim)) {
stop("Error: Prediction-correction: specified pred-variable not found in simulated dataset, not able to perform pred-correction!")
} else {
obs <- obs %>% dplyr::ungroup()
obs[[cols$obs$pred]] <- unlist(sim[1:length(obs$id), cols$sim$pred])
msg ("OK", verbose)
}
} else {
if (!cols$sim$pred %in% names(sim)) {
stop("Warning: Prediction-correction: specified pred-variable not found in simulated dataset, not able to perform pred-correction!")
}
}
if(!is.null(obs)) {
obs$pred <- obs[[cols$obs$pred]]
}
if(!is.null(sim)) {
sim$pred <- sim[[cols$sim$pred]]
}
}
if(!is.null(obs)) {
if(pred_corr) {
if(verbose) message("Performing prediction-correction on observed data...")
obs <- obs %>% dplyr::group_by(strat, bin) %>% dplyr::mutate(pred_bin = median(as.num(pred)))
obs[obs$pred != 0,]$dv <- pred_corr_lower_bnd + (obs[obs$pred != 0,]$dv - pred_corr_lower_bnd) * (obs[obs$pred != 0,]$pred_bin - pred_corr_lower_bnd) / (obs[obs$pred != 0,]$pred - pred_corr_lower_bnd)
}
}
if(!is.null(sim)) {
sim$sim <- add_sim_index_number(sim, id = "id", sim_label=sim_cols$sim)
if(pred_corr) {
if(verbose) message("Performing prediction-correction on simulated data...")
sim <- sim %>% dplyr::group_by(strat, bin) %>% dplyr::mutate(pred_bin = median(pred))
sim[sim$pred != 0,]$dv <- pred_corr_lower_bnd + (sim[sim$pred != 0,]$dv - pred_corr_lower_bnd) * (sim[sim$pred != 0,]$pred_bin - pred_corr_lower_bnd) / (sim[sim$pred != 0,]$pred - pred_corr_lower_bnd)
}
}
if(!is.null(sim)) {
if(verbose) message("Calculating statistics for simulated data...")
aggr_sim <- sim %>%
dplyr::group_by(strat, sim, bin) %>%
dplyr::summarise(
q5 = quantile(dv, pi[1]),
q50 = quantile(dv, 0.5),
q95 = quantile(dv, pi[2]),
mn_idv = mean(idv)
)
vpc_dat <- aggr_sim %>% dplyr::group_by(strat, bin) %>%
dplyr::summarise(
q5.low = quantile(q5, ci[1]),
q5.med = quantile(q5, 0.5),
q5.up = quantile(q5, ci[2]),
q50.low = quantile(q50, ci[1]),
q50.med = quantile(q50, 0.5),
q50.up = quantile(q50, ci[2]),
q95.low = quantile(q95, ci[1]),
q95.med = quantile(q95, 0.5),
q95.up = quantile(q95, ci[2]),
bin_mid = mean(mn_idv)
)
vpc_dat$bin_min <- rep(bins[1:(length(bins)-1)], length(unique(vpc_dat$strat)))[vpc_dat$bin]
vpc_dat$bin_max <- rep(bins[2:length(bins)], length(unique(vpc_dat$strat)))[vpc_dat$bin]
if(bin_mid == "middle") {
vpc_dat$bin_mid <- apply(cbind(vpc_dat$bin_min, vpc_dat$bin_max), 1, mean)
}
} else {
vpc_dat <- NULL
}
if(!is.null(obs)) {
if(verbose) {
message("Calculating statistics for observed data...")
}
tmp1 <- obs %>% dplyr::group_by(strat,bin)
if(!is.null(lloq) || !is.null(uloq)) {
if(!is.null(uloq)) { limit <- uloq; cens = "right" }
if(!is.null(lloq)) { limit <- lloq; cens = "left" }
aggr_obs <- tmp1 %>%
dplyr::summarise(
obs5 = quantile_cens(dv, pi[1], limit = limit, cens = cens),
obs50 = quantile_cens(dv, 0.5, limit = limit, cens = cens),
obs95 = quantile_cens(dv, pi[2], limit = limit, cens = cens),
bin_mid = mean(idv)
)
} else {
aggr_obs <- tmp1 %>%
dplyr::summarise(
obs5 = quantile(dv, pi[1]),
obs50 = quantile(dv, 0.5),
obs95 = quantile(dv, pi[2]),
bin_mid = mean(idv)
)
}
aggr_obs$bin_min <- rep(bins[1:(length(bins)-1)], length(unique(aggr_obs$strat)) )[aggr_obs$bin]
aggr_obs$bin_max <- rep(bins[2:length(bins)], length(unique(aggr_obs$strat)) )[aggr_obs$bin]
if(bin_mid == "middle") {
aggr_obs$bin_mid <- apply(cbind(aggr_obs$bin_min, aggr_obs$bin_max), 1, mean)
}
} else {
aggr_obs <- NULL
}
if(is.null(xlab)) {
xlab <- cols$obs$idv
}
if(is.null(ylab)) {
ylab <- cols$obs$dv
}
if(!is.null(stratify)) {
if(length(stratify) == 2) {
vpc_dat$strat1 <- unlist(strsplit(as.character(vpc_dat$strat), ", "))[(1:length(vpc_dat$strat)*2)-1]
vpc_dat$strat2 <- unlist(strsplit(as.character(vpc_dat$strat), ", "))[(1:length(vpc_dat$strat)*2)]
aggr_obs$strat1 <- unlist(strsplit(as.character(aggr_obs$strat), ", "))[(1:length(aggr_obs$strat)*2)-1]
aggr_obs$strat2 <- unlist(strsplit(as.character(aggr_obs$strat), ", "))[(1:length(aggr_obs$strat)*2)]
}
}
# data combined and handed off to separate plotting function
if(verbose & !vpcdb) {
message("Creating plot...")
}
vpc_db <- list(sim = sim,
vpc_dat = vpc_dat,
smooth = smooth,
stratify = stratify,
aggr_obs = aggr_obs,
obs = obs,
bins = bins,
facet = facet,
scales = scales,
labeller = labeller,
lloq = lloq,
uloq = uloq,
type = "continuous",
xlab = xlab,
ylab = ylab)
if(vpcdb) {
return(vpc_db)
} else {
pl <- plot_vpc(vpc_db,
show = show,
vpc_theme = vpc_theme,
smooth = smooth,
log_y = log_y,
title = title)
return(pl)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/vpc.R
|
#' VPC function for categorical
#'
#' Creates a VPC plot from observed and simulation data for categorical variables.
#'
#' @param sim a data.frame with observed data, containing the independent and dependent variable, a column indicating the individual, and possibly covariates. E.g. load in from NONMEM using \link{read_table_nm}
#' @param obs a data.frame with observed data, containing the independent and dependent variable, a column indicating the individual, and possibly covariates. E.g. load in from NONMEM using \link{read_table_nm}
#' @param psn_folder instead of specifying "sim" and "obs", specify a PsN-generated VPC-folder
#' @param bins either "density", "time", or "data", "none", or one of the approaches available in classInterval() such as "jenks" (default) or "pretty", or a numeric vector specifying the bin separators.
#' @param n_bins when using the "auto" binning method, what number of bins to aim for
#' @param bin_mid either "mean" for the mean of all timepoints (default) or "middle" to use the average of the bin boundaries.
#' @param obs_cols observation dataset column names (list elements: "dv", "idv", "id", "pred")
#' @param sim_cols simulation dataset column names (list elements: "dv", "idv", "id", "pred")
#' @param show what to show in VPC (obs_ci, pi, pi_as_area, pi_ci, obs_median, sim_median, sim_median_ci)
#' @param software name of software platform using (e.g. nonmem, phoenix)
#' @param ci confidence interval to plot. Default is (0.05, 0.95)
#' @param uloq Number or NULL indicating upper limit of quantification. Default is NULL.
#' @param lloq Number or NULL indicating lower limit of quantification. Default is NULL.
#' @param plot Boolean indicting whether to plot the ggplot2 object after creation. Default is FALSE.
#' @param xlab label for x-axis
#' @param ylab label for y-axis
#' @param title title
#' @param smooth "smooth" the VPC (connect bin midpoints) or show bins as rectangular boxes. Default is TRUE.
#' @param vpc_theme theme to be used in VPC. Expects list of class vpc_theme created with function vpc_theme()
#' @param facet either "wrap", "columns", or "rows"
#' @param labeller ggplot2 labeller function to be passed to underlying ggplot object
#' @param vpcdb boolean whether to return the underlying vpcdb rather than the plot
#' @param verbose show debugging information (TRUE or FALSE)
#' @return a list containing calculated VPC information (when vpcdb=TRUE), or a ggplot2 object (default)
#' @export
#' @seealso \link{sim_data}, \link{vpc}, \link{vpc_tte}, \link{vpc_cens}
#' @examples
#'
#' ## See vpc.ronkeizer.com for more documentation and examples
#' library(vpc)
#'
#' # simple function to simulate categorical data for single individual
#' sim_id <- function(id = 1) {
#' n <- 10
#' logit <- function(x) exp(x) / (1+exp(x))
#' data.frame(id = id, time = seq(1, n, length.out = n),
#' dv = round(logit((1:n) - n/2 + rnorm(n, 0, 1.5))) )
#' }
#' ## simple function to simulate categorical data for a trial
#' sim_trial <- function(i = 1, n = 20) { # function to simulate categorical data for a trial
#' data.frame(sim = i, do.call("rbind", lapply(1:n, sim_id)))
#' }
#'
#' ## simulate single trial for 20 individuals
#' obs <- sim_trial(n = 20)
#'
#' ## simulate 200 trials of 20 individuals
#' sim <- do.call("rbind", lapply(1:200, sim_trial, n = 20))
#'
#' ## Plot categorical VPC
#' vpc_cat(sim = sim, obs = obs)
vpc_cat <- function(sim = NULL,
obs = NULL,
psn_folder = NULL,
bins = "jenks",
n_bins = "auto",
bin_mid = "mean",
obs_cols = NULL,
sim_cols = NULL,
software = "auto",
show = NULL,
ci = c(0.05, 0.95),
uloq = NULL,
lloq = NULL,
xlab = NULL,
ylab = NULL,
title = NULL,
smooth = TRUE,
vpc_theme = NULL,
facet = "wrap",
labeller = NULL,
plot = TRUE,
vpcdb = FALSE,
verbose = FALSE) {
if(is.null(obs) & is.null(sim)) {
stop("At least a simulation or an observation dataset are required to create a plot!")
}
if(!is.null(psn_folder)) {
if(!is.null(obs)) {
obs <- read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="original.npctab")[1]))
}
if(!is.null(sim)) {
sim <- read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="simulation.1.npctab")[1]))
}
software = "nonmem"
}
if (!is.null(obs)) {
software_type <- guess_software(software, obs)
} else {
software_type <- guess_software(software, sim)
}
## define what to show in plot
show <- replace_list_elements(show_default, show)
## define column names
cols <- define_data_columns(sim, obs, sim_cols, obs_cols, software_type)
if(!is.null(obs)) {
old_class <- class(obs)
class(obs) <- c(software_type, old_class)
}
if(!is.null(sim)) {
old_class <- class(sim)
class(sim) <- c(software_type, old_class)
}
## parse data into specific format
if(!is.null(obs)) {
obs <- filter_dv(obs, verbose)
obs <- format_vpc_input_data(obs, cols$obs, lloq, uloq, strat = NULL, bins, FALSE, 0, "observed", verbose)
}
if(!is.null(sim)) {
sim <- filter_dv(sim, verbose)
sim <- format_vpc_input_data(sim, cols$sim, lloq, uloq, strat = NULL, bins, FALSE, 0, "simulated", verbose)
sim$sim <- add_sim_index_number(sim, id = "id")
}
if (class(bins) != "numeric") {
if(!is.null(obs)) {
bins <- auto_bin(obs, bins, n_bins)
} else { # get from sim
bins <- auto_bin(sim, bins, n_bins)
}
if (is.null(bins)) {
msg("Automatic binning unsuccessful, try increasing the number of bins, or specify vector of bin separators manually.", verbose)
}
}
bins <- unique(bins)
if(!is.null(obs)) {
obs <- bin_data(obs, bins, "idv")
}
if(!is.null(sim)) {
sim <- bin_data(sim, bins, "idv")
}
## parsing
fact_perc <- function(x, fact) { sum(x == fact) / length(x) } # below lloq, default
obs$dv <- as.factor(obs$dv)
lev <- levels(obs$dv)
if (!is.null(sim)) {
tmp1 <- sim %>%
dplyr::group_by(sim, bin)
for (i in seq(lev)) {
if (i == 1) {
aggr_sim <- tmp1 %>%
dplyr::summarise(fact_perc(dv, lev[i]))
} else {
suppressMessages({
aggr_sim <- dplyr::bind_cols(aggr_sim, tmp1 %>%
dplyr::summarise(fact_perc(dv, lev[i])) %>%
dplyr::ungroup() %>%
dplyr::select(-sim, -bin))
})
}
}
aggr_sim <- dplyr::bind_cols(aggr_sim, tmp1 %>%
dplyr::summarise(mean(idv)) %>%
dplyr::ungroup() %>%
dplyr::select(-sim, -bin))
colnames(aggr_sim) <- c("sim", "bin", paste0("fact_", lev), "mn_idv")
tmp3 <- tidyr::pivot_longer(aggr_sim, names_to = "strat", cols = paste0("fact_", lev)) %>%
dplyr::arrange(sim, strat, bin) %>%
dplyr::mutate(strat = stringr::str_replace(strat, "fact_", ""))
vpc_dat <- tmp3 %>%
dplyr::group_by(strat, bin) %>%
dplyr::summarise(q50.low = quantile(value, ci[1]),
q50.med = quantile(value, 0.5),
q50.up = quantile(value, ci[2]),
bin_mid = mean(mn_idv)) %>%
dplyr::ungroup()
vpc_dat$bin_min <- rep(bins[1:(length(bins)-1)], length(unique(vpc_dat$strat)))[vpc_dat$bin]
vpc_dat$bin_max <- rep(bins[2:length(bins)], length(unique(vpc_dat$strat)))[vpc_dat$bin]
if(bin_mid == "middle") {
vpc_dat$bin_mid <- apply(cbind(vpc_dat$bin_min, vpc_dat$bin_max), 1, mean)
}
} else {
vpc_dat <- NULL
}
if(!is.null(obs)) {
tmp <- obs %>% dplyr::group_by(bin)
for (i in seq(lev)) {
if (i == 1) {
aggr_obs <- tmp %>%
dplyr::summarise(fact_perc(dv, lev[i]))
} else {
aggr_obs <- cbind(aggr_obs, tmp %>%
dplyr::summarise(fact_perc(dv, lev[i])) %>%
dplyr::ungroup() %>%
dplyr::select(-bin) )
}
}
tmp1 <- cbind(aggr_obs, tmp %>%
dplyr::summarise(mean(idv)) %>%
dplyr::ungroup() %>%
dplyr::select(-bin))
colnames(tmp1) <- c("bin", paste0("fact_", lev), "bin_mid")
tmp2 <- tidyr::pivot_longer(tmp1, names_to = "strat", cols = paste0("fact_", lev)) %>%
dplyr::arrange(strat, bin) %>%
dplyr::mutate(strat = stringr::str_replace(strat, "fact_", ""))
tmp2$bin_min <- rep(bins[1:(length(bins)-1)], length(unique(tmp2$strat)) )[tmp2$bin]
tmp2$bin_max <- rep(bins[2:length(bins)], length(unique(tmp2$strat)) )[tmp2$bin]
if(bin_mid == "middle") {
tmp2$bin_mid <- apply(dplyr::bind_cols(tmp2$bin_min, tmp2$bin_max), 1, mean)
}
aggr_obs <- tmp2
colnames(aggr_obs)[4] <- "obs50"
} else {
aggr_obs <- NULL
}
## plotting starts here
show$median_ci = FALSE
show$obs_dv = FALSE
show$obs_ci = FALSE
show$sim_median = TRUE
show$sim_median_ci = TRUE
show$pi_as_area = FALSE
show$pi_ci = FALSE
show$pi = FALSE
vpc_db <- list(sim = sim,
vpc_dat = vpc_dat,
stratify = "strat", # the stratification is the various categories!
stratify_original = "strat",
aggr_obs = aggr_obs,
obs = obs,
bins = bins,
facet = facet,
labeller = labeller,
type = "categorical",
xlab = xlab,
ylab = ylab)
if(vpcdb) {
return(vpc_db)
} else {
pl <- plot_vpc(db = vpc_db,
show = show,
vpc_theme = vpc_theme,
smooth = smooth,
log_y = FALSE,
title = title)
pl <- pl + ggplot2::ylim(c(0,1))
return(pl)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/vpc_cat.R
|
#' VPC function for left- or right-censored data (e.g. BLOQ data)
#'
#' Creates a VPC plot from observed and simulation data for censored data. Function can handle both left- (below lower limit of quantification) and right-censored (above upper limit of quantification) data.
#'
#' @param sim a data.frame with observed data, containing the independent and dependent variable, a column indicating the individual, and possibly covariates. E.g. load in from NONMEM using \link{read_table_nm}
#' @param obs a data.frame with observed data, containing the independent and dependent variable, a column indicating the individual, and possibly covariates. E.g. load in from NONMEM using \link{read_table_nm}
#' @param psn_folder instead of specifying "sim" and "obs", specify a PsN-generated VPC-folder
#' @param bins either "density", "time", or "data", or a numeric vector specifying the bin separators.
#' @param n_bins number of bins
#' @param bin_mid either "mean" for the mean of all timepoints (default) or "middle" to use the average of the bin boundaries.
#' @param obs_cols observation dataset column names (list elements: "dv", "idv", "id", "pred")
#' @param sim_cols simulation dataset column names (list elements: "dv", "idv", "id", "pred")
#' @param show what to show in VPC (obs_ci, pi, pi_as_area, pi_ci, obs_median, sim_median, sim_median_ci)
#' @param software name of software platform using (e.g. nonmem, phoenix)
#' @param stratify character vector of stratification variables. Only 1 or 2 stratification variables can be supplied.
#' @param stratify_color variable to stratify and color lines for observed data. Only 1 stratification variables can be supplied.
#' @param ci confidence interval to plot. Default is (0.05, 0.95)
#' @param uloq Number or NULL indicating upper limit of quantification. Default is NULL.
#' @param lloq Number or NULL indicating lower limit of quantification. Default is NULL.
#' @param plot Boolean indicating whether to plot the ggplot2 object after creation. Default is FALSE.
#' @param xlab ylab as numeric vector of size 2
#' @param ylab ylab as numeric vector of size 2
#' @param title title
#' @param smooth "smooth" the VPC (connect bin midpoints) or show bins as rectangular boxes. Default is TRUE.
#' @param vpc_theme theme to be used in VPC. Expects list of class vpc_theme created with function vpc_theme()
#' @param facet either "wrap", "columns", or "rows"
#' @param labeller ggplot2 labeller function to be passed to underlying ggplot object
#' @param vpcdb boolean whether to return the underlying vpcdb rather than the plot
#' @param verbose show debugging information (TRUE or FALSE)
#' @return a list containing calculated VPC information, and a ggplot2 object
#' @export
#' @seealso \link{sim_data}, \link{vpc}, \link{vpc_tte}, \link{vpc_cat}
#' @examples
#'
#' ## See vpc.ronkeizer.com for more documentation and examples
#' library(vpc)
#'
#' vpc_cens(sim = simple_data$sim, obs = simple_data$obs, lloq = 30)
#' vpc_cens(sim = simple_data$sim, obs = simple_data$obs, uloq = 120)
#'
vpc_cens <- function(sim = NULL,
obs = NULL,
psn_folder = NULL,
bins = "jenks",
n_bins = 8,
bin_mid = "mean",
obs_cols = NULL,
sim_cols = NULL,
software = "auto",
show = NULL,
stratify = NULL,
stratify_color = NULL,
ci = c(0.05, 0.95),
uloq = NULL,
lloq = NULL,
plot = FALSE,
xlab = "Time",
ylab = "Probability of <LOQ",
title = NULL,
smooth = TRUE,
vpc_theme = NULL,
facet = "wrap",
labeller = NULL,
vpcdb = FALSE,
verbose = FALSE) {
if(is.null(uloq) & is.null(lloq)) {
stop("You have to specify either a lower limit of quantification (lloq=...) or an upper limit (uloq=...).")
}
if(!is.null(uloq) & !is.null(lloq)) {
stop("You have to specify either a lower limit of quantification (lloq=...) or an upper limit (uloq=...), but you can't specify both.")
}
if(is.null(lloq)) {
type <- "right-censored"
}
if(is.null(uloq)) {
type <- "left-censored"
}
if(is.null(obs) & is.null(sim)) {
stop("At least a simulation or an observation dataset are required to create a plot!")
}
if(!is.null(psn_folder)) {
if(!is.null(obs)) {
obs <- read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="original.npctab")[1]))
}
if(!is.null(sim)) {
sim <- read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="simulation.1.npctab")[1]))
}
software = "nonmem"
}
if (!is.null(obs)) {
software_type <- guess_software(software, obs)
} else {
software_type <- guess_software(software, sim)
}
## checking whether stratification columns are available
if(!is.null(stratify)) {
if(!is.null(obs)) {
check_stratification_columns_available(obs, stratify, "observation")
}
if(!is.null(sim)) {
check_stratification_columns_available(sim, stratify, "simulation")
}
}
if(!is.null(stratify_color)) {
if(!is.null(obs)) {
check_stratification_columns_available(obs, stratify_color, "observation")
}
if(!is.null(obs)) {
check_stratification_columns_available(sim, stratify_color, "simulation")
}
}
## define what to show in plot
show <- replace_list_elements(show_default, show)
## define column names
cols <- define_data_columns(sim, obs, sim_cols, obs_cols, software_type)
if(!is.null(obs)) {
old_class <- class(obs)
class(obs) <- c(software_type, old_class)
}
if(!is.null(sim)) {
old_class <- class(sim)
class(sim) <- c(software_type, old_class)
}
## parse data into specific format
if(!is.null(obs)) {
obs <- filter_dv(obs, verbose)
obs <- format_vpc_input_data(obs, cols$obs, lloq, uloq, stratify, bins, FALSE, 0, "observed", verbose)
}
if(!is.null(sim)) {
sim <- filter_dv(sim, verbose)
sim <- format_vpc_input_data(sim, cols$sim, NULL, NULL, stratify, bins, FALSE, 0, "simulated", verbose)
# add sim index number
sim$sim <- add_sim_index_number(sim)
}
stratify_original <- stratify
if(!is.null(stratify_color)) {
if (is.null(stratify)) {
stratify <- stratify_color
}
if (length(stratify_color) > 1) {
stop("Error: please specify only 1 stratification variable for color!")
}
if (!stratify_color %in% stratify) {
stratify_original <- stratify
stratify <- c(stratify, stratify_color)
}
}
if (class(bins) != "numeric") {
if(!is.null(obs)) {
bins <- auto_bin(obs, type = bins, n_bins = n_bins)
} else { # get from sim
bins <- auto_bin(sim, type = bins, n_bins = n_bins)
}
if (is.null(bins)) {
msg("Automatic binning unsuccessful, try increasing the number of bins, or specify vector of bin separators manually.", verbose)
}
}
bins <- unique(bins)
if(!is.null(obs)) {
obs <- bin_data(obs, bins, "idv")
}
if(!is.null(sim)) {
sim <- bin_data(sim, bins, "idv")
}
if(!is.null(lloq)) {
cens <- "left"
limit <- lloq
} else {
cens <- "right"
limit <- uloq
}
## Parsing data to get the quantiles for the VPC
if (!is.null(sim)) {
tmp1 <- sim %>%
dplyr::group_by(strat, sim, bin)
vpc_dat <- tmp1 %>%
dplyr::summarise(ploq = loq_perc(dv, limit = limit, cens = cens),
mn_idv = mean(idv)) %>%
dplyr::group_by(strat, bin) %>%
dplyr::summarise(q50.low = quantile(ploq, ci[1]),
q50.med = quantile(ploq, 0.5),
q50.up = quantile(ploq, ci[2]),
bin_mid = mean(mn_idv)) %>%
dplyr::ungroup()
vpc_dat$bin_min <- rep(bins[1:(length(bins)-1)], length(unique(vpc_dat$strat)))[vpc_dat$bin]
vpc_dat$bin_max <- rep(bins[2:length(bins)], length(unique(vpc_dat$strat)))[vpc_dat$bin]
if(bin_mid == "middle") {
vpc_dat$bin_mid <- apply(dplyr::bind_cols(vpc_dat$bin_min, vpc_dat$bin_max), 1, mean)
}
} else {
vpc_dat <- NULL
}
if(!is.null(obs)) {
tmp <- obs %>%
dplyr::group_by(strat,bin)
aggr_obs <- tmp %>%
dplyr::summarise(obs50 = loq_perc(dv, limit = lloq, cens = cens),
bin_mid = mean(idv)) %>%
dplyr::ungroup()
aggr_obs$bin_min <- rep(bins[1:(length(bins)-1)], length(unique(aggr_obs$strat)) )[aggr_obs$bin]
aggr_obs$bin_max <- rep(bins[2:length(bins)], length(unique(aggr_obs$strat)) )[aggr_obs$bin]
if(bin_mid == "middle") {
aggr_obs$bin_mid <- apply(dplyr::bind_cols(aggr_obs$bin_min, aggr_obs$bin_max), 1, mean)
}
} else {
aggr_obs <- NULL
}
if (!is.null(stratify_original)) {
if (length(stratify) == 2) {
vpc_dat$strat1 <- unlist(strsplit(as.character(vpc_dat$strat), ", "))[(1:length(vpc_dat$strat)*2)-1]
vpc_dat$strat2 <- unlist(strsplit(as.character(vpc_dat$strat), ", "))[(1:length(vpc_dat$strat)*2)]
aggr_obs$strat1 <- unlist(strsplit(as.character(aggr_obs$strat), ", "))[(1:length(aggr_obs$strat)*2)-1]
aggr_obs$strat2 <- unlist(strsplit(as.character(aggr_obs$strat), ", "))[(1:length(aggr_obs$strat)*2)]
}
}
## plotting starts here
show$obs_dv = FALSE
show$obs_ci = FALSE
show$obs_median = TRUE
show$sim_median = FALSE
show$sim_median_ci = TRUE
show$pi_as_area = FALSE
show$pi_ci = FALSE
show$pi = FALSE
vpc_db <- list(sim = sim,
vpc_dat = vpc_dat,
stratify = stratify,
stratify_original = stratify_original,
stratify_color = stratify_color,
aggr_obs = aggr_obs,
obs = obs,
bins = bins,
facet = facet,
labeller = labeller,
type = "censored",
xlab = xlab,
ylab = ylab)
if(vpcdb) {
return(vpc_db)
} else {
pl <- plot_vpc(db = vpc_db,
show = show,
vpc_theme = vpc_theme,
smooth = smooth,
log_y = FALSE,
title = title)
return(pl)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/vpc_cens.R
|
#' VPC function for time-to-event (survival) data
#'
#' This function can be used for either single time-to-event (TTE) or repeated time-to-event (RTTE) data.
#'
#' Creates a VPC plot from observed and simulation survival data
#' @param sim a data.frame with observed data, containing the independent and dependent variable, a column indicating the individual, and possibly covariates. E.g. load in from NONMEM using \link{read_table_nm}
#' @param obs a data.frame with observed data, containing the independent and dependent variable, a column indicating the individual, and possibly covariates. E.g. load in from NONMEM using \link{read_table_nm}
#' @param psn_folder instead of specifying "sim" and "obs", specify a PsN-generated VPC-folder
#' @param bins either "density", "time", or "data", or a numeric vector specifying the bin separators.
#' @param n_bins number of bins
#' @param obs_cols observation dataset column names (list elements: "dv", "idv", "id", "pred")
#' @param sim_cols simulation dataset column names (list elements: "dv", "idv", "id", "pred", "sim")
#' @param software name of software platform using (e.g. nonmem, phoenix)
#' @param show what to show in VPC (obs_ci, obs_median, sim_median, sim_median_ci)
#' @param rtte repeated time-to-event data? Default is FALSE (treat as single-event TTE)
#' @param rtte_calc_diff recalculate time (T/F)? When simulating in NONMEM, you will probably need to set this to TRUE to recalculate the TIME to relative times between events (unless you output the time difference between events and specify that as independent variable to the vpc_tte() function.
#' @param rtte_conditional `TRUE` (default) or `FALSE`. Compute the probability for each event newly (`TRUE`), or calculate the absolute probability (`FALSE`, i.e. the "probability of a 1st, 2nd, 3rd event etc" rather than the "probability of an event happening").
#' @param kmmc either NULL (for regular TTE vpc, default), or a variable name for a KMMC plot (e.g. "WT")
#' @param events numeric vector describing which events to show a VPC for when repeated TTE data, e.g. c(1:4). Default is NULL, which shows all events.
#' @param reverse_prob reverse the probability scale (i.e. plot 1-probability)
#' @param stratify character vector of stratification variables. Only 1 or 2 stratification variables can be supplied.
#' @param stratify_color character vector of stratification variables. Only 1 stratification variable can be supplied, cannot be used in conjunction with `stratify`.
#' @param ci confidence interval to plot. Default is (0.05, 0.95)
#' @param plot Boolean indicating whether to plot the ggplot2 object after creation. Default is FALSE.
#' @param as_percentage Show y-scale from 0-100 percent? TRUE by default, if FALSE then scale from 0-1.
#' @param xlab label for x-axis
#' @param ylab label for y-axis
#' @param title title
#' @param smooth "smooth" the VPC (connect bin midpoints) or show bins as rectangular boxes. Default is TRUE.
#' @param vpc_theme theme to be used in VPC. Expects list of class vpc_theme created with function vpc_theme()
#' @param facet either "wrap", "columns", or "rows"
#' @param labeller ggplot2 labeller function to be passed to underlying ggplot object
#' @param verbose TRUE or FALSE (default)
#' @param vpcdb Boolean whether to return the underlying vpcdb rather than the plot
#' @return a list containing calculated VPC information, and a ggplot2 object
#' @export
#' @seealso \link{sim_data}, \link{vpc}, \link{vpc_tte}, \link{vpc_cens}
#' @examples
#' ## See vpc-docs.ronkeizer.com for more documentation and examples.
#'
#' ## Example for repeated) time-to-event data
#' ## with NONMEM-like data (e.g. simulated using a dense grid)
#'
#' data(rtte_obs_nm)
#' data(rtte_sim_nm)
#'
#' # treat RTTE as TTE, no stratification
#' vpc_tte(sim = rtte_sim_nm[rtte_sim_nm$sim <= 20,],
#' obs = rtte_obs_nm,
#' rtte = FALSE,
#' sim_cols=list(dv = "dv", idv = "t"), obs_cols=list(idv = "t"))
#'
vpc_tte <- function(sim = NULL,
obs = NULL,
psn_folder = NULL,
rtte = FALSE,
rtte_calc_diff = TRUE,
rtte_conditional = TRUE,
events = NULL,
bins = FALSE,
n_bins = 10,
software = "auto",
obs_cols = NULL,
sim_cols = NULL,
kmmc = NULL,
reverse_prob = FALSE,
stratify = NULL,
stratify_color = NULL,
ci = c(0.05, 0.95),
plot = FALSE,
xlab = "Time",
ylab = "Survival (%)",
show = NULL,
as_percentage = TRUE,
title = NULL,
smooth = FALSE,
vpc_theme = NULL,
facet = "wrap",
labeller = NULL,
verbose = FALSE,
vpcdb = FALSE) {
if(is.null(obs) & is.null(sim)) {
stop("At least a simulation or an observation dataset are required to create a plot!")
}
if(!is.null(bins) && bins != FALSE) {
message("Binning is not recommended for `vpc_tte()`, plot might not show correctly!")
}
if(!is.null(kmmc)) {
if(!kmmc %in% names(obs)) {
stop(paste0("Specified covariate ", kmmc, " not found among column names in observed data."))
}
}
if(!is.null(kmmc)) {
if(!kmmc %in% names(sim)) {
stop(paste0("Specified covariate ", kmmc, " not found among column names in simulated data."))
}
}
message("Initializing.")
if(!is.null(psn_folder)) {
if(!is.null(obs)) {
obs <- vpc::read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="original.npctab")[1]))
}
if(!is.null(sim)) {
sim <- vpc::read_table_nm(paste0(psn_folder, "/m1/", dir(paste0(psn_folder, "/m1"), pattern="simulation.1.npctab")[1]))
}
software = "nonmem"
}
if (!is.null(obs)) {
software_type <- guess_software(software, obs)
} else {
software_type <- guess_software(software, sim)
}
if(is.null(sim)) {
show_default$obs_ci <- TRUE
}
## define what to show in plot
show <- vpc::replace_list_elements(show_default_tte, show)
## checking whether stratification columns are available
stratify_pars <- NULL
if(!is.null(stratify)) stratify_pars <- stratify
if(!is.null(stratify_color)) {
if(!is.null(stratify)) stop("Sorry, stratification using both facets and color is currently not supported, use either `stratify` or `stratify_color`.")
if(length(stratify_color) != 1) {
stop("Sorry, please specify only a single stratification variable for `stratify_color`.")
}
stratify_pars <- stratify_color
}
if(!is.null(stratify_pars)) {
if(!is.null(obs)) {
check_stratification_columns_available(obs, stratify_pars, "observation")
}
if(!is.null(sim)) {
check_stratification_columns_available(sim, stratify_pars, "simulation")
}
}
## redefine strat column in case of "strat"
if(!is.null(stratify_pars) && !is.null(obs)) {
if(stratify_pars[1] == "strat") {
if(!is.null(obs)) {
obs$strat_orig = obs$strat
} else if (!is.null(sim)){
sim$strat_orig = sim$strat
}
stratify <- "strat_orig"
}
}
## define column names
cols <- define_data_columns(sim, obs, sim_cols, obs_cols, software_type)
if(!is.null(obs)) {
old_class <- class(obs)
class(obs) <- c(software_type, old_class)
}
if(!is.null(sim)) {
old_class <- class(sim)
class(sim) <- c(software_type, old_class)
}
## remove EVID != 0 / MDV != 0
if(!is.null(obs)) {
obs <- filter_dv(obs, verbose)
}
if(!is.null(sim)) {
sim <- filter_dv(sim, verbose)
}
## stratification
stratify_original <- stratify_pars
if(!is.null(stratify_pars)) {
if(rtte) {
if (length(stratify_pars) > 1) {
stop ("Sorry, with repeated time-to-event data, stratification on more than 1 variables is currently not supported!")
invisible()
}
} else {
if (length(stratify_pars) > 2) {
stop ("Sorry, stratification on more than 2 variables is currently not supported!")
invisible()
}
}
}
## format obs data
if(!is.null(obs)) {
obs$id <- obs[[cols$obs$id]]
if (length(obs[[cols$obs$id]]) == 0) {
msg("Warning: No ID column found, assuming 1 row per ID.", verbose)
obs$id <- 1:length(obs[,1])
}
obs$time <- as.num(obs[[cols$obs$idv]])
obs$dv <- as.num(obs[[cols$obs$dv]])
if(max(obs$dv) > 1) { # guessing DV definition if not just 0/1
if(max(obs$dv) == 2) { # common approach in NONMEM, 2 = censored
obs[obs$dv != 1,]$dv <- 0
msg("Warning: vpc_tte() expected the observed dependent variable to contain only 0 (censored, or no event observed) or 1 (event observed). Setting all observations != 1 to 0.", verbose)
} else {
obs[obs$dv != 1,]$dv <- 1 # some people use DV to indicate the event time.
msg("Warning: vpc_tte() expected the dependent variable to contain only 0 (censored, or no event observed) or 1 (event observed). Setting all observations != 1 to 1.", verbose)
}
}
if("cens" %in% tolower(colnames(obs))) { # some people use a 'cens' column to indicate censoring
msg(paste0("Detected column '",colnames(obs)[match("cens", tolower(colnames(obs)))],"' with censoring information in observation data, assuming 1=censored event, 0=observed event. Please transpose data if assumption not correct."), TRUE)
colnames(obs)[match("cens", tolower(colnames(obs)))] <- "cens"
obs[obs$cens == 1,]$dv <- 0
}
if(rtte) {
if(rtte_calc_diff) {
obs <- relative_times(obs)
}
obs <- obs %>%
dplyr::group_by_("id") %>%
dplyr::arrange_("id", "t") %>%
dplyr::mutate(rtte = 1:length(dv))
# obs %>% dplyr::group_by(id) %>% dplyr::mutate(rtte = cumsum(dv != 0))
# obs[obs$dv == 0,]$rtte <- obs[obs$dv == 0,]$rtte + 1 # these censored points actually "belong" to the next rtte strata
stratify_pars <- c(stratify_pars, "rtte")
} else {
obs <- obs %>%
dplyr::group_by_("id") %>%
dplyr::mutate(last_obs = 1*(1:length(time) == length(time)), repeat_obs = 1*(cumsum(dv) > 1)) %>%
dplyr::filter(dv == 1 | last_obs == 1) %>%
dplyr::filter(!duplicated(id))
obs$rtte <- 1
}
# add stratification column and comput KM curve for observations
obs <- add_stratification(obs, stratify_pars)
if(!is.null(kmmc) && kmmc %in% names(obs)) {
obs_km <- compute_kmmc(obs, strat = "strat", reverse_prob = reverse_prob, kmmc=kmmc)
} else {
if(show$obs_ci) {
if(length(ci) == 2 && (round(ci[1],3) != round((1-ci[2]),3))) {
stop("Sorry, only symmetric confidence intervals can be computed. Please adjust the ci argument.")
}
obs_km <- compute_kaplan(obs, strat = "strat", reverse_prob = reverse_prob, rtte_conditional = rtte_conditional, ci = ci)
} else {
obs_km <- compute_kaplan(obs, strat = "strat", reverse_prob = reverse_prob, rtte_conditional = rtte_conditional)
}
}
} else { # get bins from sim
obs_km <- NULL
}
if(!is.null(kmmc) & (class(bins) == "logical" && bins == FALSE)) {
msg("Tip: with KMMC-type plots, binning of simulated data is recommended. See documentation for the 'bins' argument for more information.", verbose)
}
all_dat <- c()
if(!is.null(sim)) {
# format sim data and compute KM curve CI for simulations
if (all(c(cols$sim$idv, cols$sim$id, cols$sim$dv) %in% names(sim))) {
sim$id <- sim[[cols$sim$id]]
sim$dv <- sim[[cols$sim$dv]]
sim$time <- sim[[cols$sim$idv]]
} else {
stop("Not all required variables were found, please check column definitions for id, dv and time.")
}
if(max(sim$dv) > 2) { # guessing DV definition if not just 0/1
if(max(sim$dv) == 2) { # common approach in NONMEM, 2 = censored
sim[sim$dv != 1,]$dv <- 1
msg("Warning: Expected simulated dependent variable to contain only 0 (censored, or no event simerved) or 1 (event simerved). Setting all simulated observations != 1 to 0.", verbose)
} else {
sim[sim$dv != 1,]$dv <- 1 # some people use DV to indicate the event time.
msg("Warning: Expected simulated dependent variable to contain only 0 (censored, or no event simerved) or 1 (event simerved). Setting all simulated observations != 1 to 1.", verbose)
}
}
if("nonmem" %in% class(sim)) { # necessary due to a bug in NONMEM simulation
sim <- sim[!(sim$time == 0 & sim$dv == 1),]
}
if(max(sim$dv) == 1) {
if (sum(sim$dv > 0 & sim$dv < 1) > 0) {
sim[sim$dv > 0 & sim$dv < 1,]$dv <- 0
}
}
if("cens" %in% tolower(names(sim$cens))) { # some people use a 'cens' column to indicate censoring
cat("Detected extra column with censoring information in simulation data.")
colnames(sim)[match("cens", tolower(colnames(sim)))] <- "cens"
sim[sim$cens == 1,]$dv <- 0
}
# add sim index number
sim$sim <- add_sim_index_number(sim, id = cols$sim$id, sim_label = cols$sim$sim)
# set last_observation and repeat_obs per sim&id
sim <- sim %>%
dplyr::group_by_("sim", "id") %>%
dplyr::mutate(last_obs = 1*(1:length(time) == length(time)), repeat_obs = 1*(cumsum(dv) > 1))
# filter out stuff and recalculate rtte times
sim <- sim[sim$dv == 1 | (sim$last_obs == 1 & sim$dv == 0),]
if(rtte) {
sim <- sim %>%
dplyr::group_by_("sim", "id") %>%
dplyr::arrange_("sim", "id", "time") %>%
dplyr::mutate(rtte = 1:length(dv)) %>%
dplyr::arrange_("sim", "id")
if(rtte_calc_diff) {
sim <- relative_times(sim, simulation=TRUE)
}
} else {
sim$sim_id <- paste0(sim$sim, "_", sim$id) # remove duplicate observations rows per id to filter out repeated obs
sim <- sim[!duplicated(sim$sim_id),]
}
tmp_bins <- unique(c(0, sort(unique(sim$time)), max(sim$time)))
n_sim <- length(unique(sim$sim))
if(n_sim <= 1) {
stop(paste0("Something seems wrong with your simulation dataset, only ", n_sim, " iterations of the simulation were identified."))
}
all_dat <- c()
if(!(class(bins) == "logical" && bins == FALSE)) {
if(class(bins) == "logical" && bins == TRUE) {
bins <- "time"
}
if(class(bins) == "character") {
if (bins == "obs") {
tmp_bins <- unique(c(0, sort(unique(obs$time)), max(obs$time)))
} else {
if (!(bins %in% c("time","data"))) {
msg(paste0("Note: bining method ", bins," might be slow. Consider using method 'time', or specify 'bins' as numeric vector"), verbose)
}
tmp_bins <- unique(c(0, auto_bin(sim %>% dplyr::mutate(idv=time), type=bins, n_bins = n_bins-1), max(sim$time)))
}
}
if(class(bins) == "numeric") {
tmp_bins <- unique(c(0, bins, max(obs$time)))
}
}
message("Calculating simulation stats.")
pb <- utils::txtProgressBar(min = 1, max = n_sim)
for (i in 1:n_sim) {
utils::setTxtProgressBar(pb, i)
tmp <- sim %>% dplyr::filter(sim == i)
tmp2 <- add_stratification(tmp %>%
dplyr::arrange_("id", "time"), stratify_pars)
if(!is.null(kmmc) && kmmc %in% names(obs)) {
tmp3 <- compute_kmmc(tmp2, strat = "strat", reverse_prob = reverse_prob, kmmc = kmmc)
} else {
tmp3 <- compute_kaplan(tmp2, strat = "strat", reverse_prob = reverse_prob, rtte_conditional = rtte_conditional)
}
tmp3$time_strat <- paste0(tmp3$time, "_", tmp3$strat)
tmp4 <- expand.grid(time = c(0, unique(sim$time)), surv=NA, lower=NA, upper=NA,
strat = unique(tmp3$strat))
tmp4$time_strat <- paste0(tmp4$time, "_", tmp4$strat)
tmp4[match(tmp3$time_strat, tmp4$time_strat),]$surv <- tmp3$surv
# tmp4[match(tmp3$time_strat, tmp4$time_strat),]$lower <- tmp3$lower
# tmp4[match(tmp3$time_strat, tmp4$time_strat),]$upper <- tmp3$upper
tmp4 <- tmp4 %>%
dplyr::arrange(strat, time)
tmp4$surv <- locf(tmp4$surv)
tmp4[,c("bin", "bin_min", "bin_max", "bin_mid")] <- 0
tmp4$bin <- cut(tmp4$time, breaks = tmp_bins, labels = FALSE, right = TRUE)
tmp4$bin_min <- tmp_bins[tmp4$bin]
tmp4$bin_max <- tmp_bins[tmp4$bin+1]
tmp4$bin_mid <- (tmp4$bin_min + tmp4$bin_max) / 2
all_dat <- dplyr::bind_rows(all_dat, cbind(i, tmp4)) ## RK: this can be done more efficient!
}
sim_km <- all_dat %>%
dplyr::group_by_("strat", "bin") %>%
dplyr::summarise (bin_mid = head(bin_mid,1),
bin_min = head(bin_min,1),
bin_max = head(bin_max,1),
qmin = quantile(surv, 0.05),
qmax = quantile(surv, 0.95),
qmed = median(surv),
# lower_med = median(lower, 0.05),
# upper_med = median(upper, 0.05),
step = 0)
} else {
sim_km <- NULL
tmp_bins <- unique(c(0, sort(unique(obs$time)), max(obs$time)))
}
if (rtte) {
if(!is.null(sim)) {
sim_km$rtte <- as.num(gsub(".*, (\\d)", "\\1", sim_km$strat, perl = TRUE))
if (!is.null(events)) {
sim_km <- sim_km %>%
dplyr::filter(rtte %in% events)
# redefine strat factors, since otherwise empty panels will be shown
sim_km$strat <- factor(sim_km$strat, levels = unique(sim_km$strat))
}
}
if(!is.null(obs)) {
obs_km$rtte <- as.num(gsub(".*, (\\d)", "\\1", obs_km$strat, perl = TRUE))
if (!is.null(events)) {
obs_km <- obs_km %>%
dplyr::filter(rtte %in% events)
obs_km$strat <- factor(obs_km$strat, levels = unique(obs_km$strat))
}
}
}
cens_dat <- NULL
if(show$obs_cens && !is.null(obs)) {
cens_dat <- obs
if(rtte) {
if(!rtte_conditional || !rtte_calc_diff) {
cens_dat <- cens_dat %>%
dplyr::mutate(time = t)
}
}
cens_dat <- cens_dat %>%
dplyr::filter(dv == 0, time > 0)
}
if (!is.null(stratify_original)) {
if (length(stratify_pars) == 2) {
if(!is.null(sim_km)) {
sim_km$strat1 <- unlist(strsplit(as.character(sim_km$strat), ", "))[(1:length(sim_km$strat)*2)-1]
sim_km$strat2 <- unlist(strsplit(as.character(sim_km$strat), ", "))[(1:length(sim_km$strat)*2)]
}
if(!is.null(obs_km)) {
obs_km$strat1 <- unlist(strsplit(as.character(obs_km$strat), ", "))[(1:length(obs_km$strat)*2)-1]
obs_km$strat2 <- unlist(strsplit(as.character(obs_km$strat), ", "))[(1:length(obs_km$strat)*2)]
}
if(!is.null(cens_dat)) {
cens_dat$strat1 <- unlist(strsplit(as.character(cens_dat$strat), ", "))[(1:length(cens_dat$strat)*2)-1]
cens_dat$strat2 <- unlist(strsplit(as.character(cens_dat$strat), ", "))[(1:length(cens_dat$strat)*2)]
}
}
}
if (!is.null(obs)) {
if (show$obs_cens) {
if(nrow(cens_dat)>0) {
cens_dat$y <- 1
cens_dat$strat1 <- NA
cens_dat$strat2 <- NA
for (j in 1:nrow(cens_dat[,1])) {
tmp <- obs_km[as.character(obs_km$strat) == as.character(cens_dat$strat[j]),]
cens_dat$y[j] <- rev(tmp$surv[(cens_dat$time[j] - tmp$time) > 0])[1]
if ("strat1" %in% names(tmp)) {
cens_dat$strat1[j] <- rev(tmp$strat1[(cens_dat$time[j] - tmp$time) > 0])[1]
}
if ("strat2" %in% names(tmp)) {
cens_dat$strat2[j] <- rev(tmp$strat2[(cens_dat$time[j] - tmp$time) > 0])[1]
}
}
cens_dat <- cens_dat[!is.na(cens_dat$y),]
}
}
}
if(!is.null(obs)) {
show$obs_dv <- TRUE
} else {
show$obs_dv <- FALSE
}
show$pi <- TRUE
if(!is.null(kmmc)) {
ylab <- paste0("Mean (", kmmc, ")")
}
# plotting starts here
vpc_db <- list(sim = sim,
sim_km = sim_km,
obs = obs,
obs_km = obs_km,
all_dat = all_dat,
stratify_pars = stratify_pars,
stratify = stratify,
stratify_color = stratify_color,
stratify_original = stratify_original,
bins = bins,
facet = facet,
labeller = labeller,
kmmc = kmmc,
cens_dat = cens_dat,
rtte = rtte,
type = "time-to-event",
as_percentage = as_percentage,
tmp_bins = tmp_bins,
xlab = xlab,
ylab = ylab)
if(is.null(xlab)) {
xlab <- "Time (days)"
}
if(vpcdb) {
return(vpc_db)
} else {
message("\nPlotting.")
pl <- plot_vpc(vpc_db,
show = show,
vpc_theme = vpc_theme,
smooth = smooth,
log_y = FALSE,
title = title)
return(pl)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/vpc_tte.R
|
#' @importFrom dplyr "%>%"
NULL
is_equal <- function(test, ref, tol = 1e-3, relative=TRUE) {
if(relative) {
res <- abs((test - ref) / ref) < tol
} else {
res <- abs(test - ref) < tol
}
if(any(!res)) message(paste0("Test: ", test, " / Ref: ", ref, "\n"))
return(all(res))
}
|
/scratch/gouwar.j/cran-all/cranData/vpc/R/zzz.R
|
##### Import packages ####
#' Packages
#' VPR processing functions depend on these packages
#'
#'These packages are needed!
#'
#' @import dplyr ggplot2 oce
#' @importFrom graphics hist par plot.new
#' @importFrom stats aggregate median quantile
#' @importFrom utils menu read.csv read.table write.table
#'
#' @rawNamespace import(gridExtra, except = combine)
#' @rawNamespace import(metR, except = coriolis)
#'
NULL
options(dplyr.summarise.inform = FALSE) # TODO: is this needed?
#### PROCESSING FUNCTIONS ####
#' Read prediction output from a CNN model
#'
#' @param filename model prediction output file (.txt) from `vpr_transferlearn::save_output()`
#'
#' @return a dataframe
#' @export
#'
#'
vpr_pred_read <- function(filename){
# do some checks on the file
# check for .txt file
# check that data index exists
all_lines <- readLines(filename)
dat_index <- grep(all_lines, pattern = 'DATA ----')
dat_tb <- read.table(filename, header = TRUE, sep = ',', skip = dat_index)
dat <- list()
dat$metadata <- as.list(all_lines[seq_len(dat_index -1)])
dat$data <- dat_tb
md_names <- stringr::str_split_fixed(dat$metadata, pattern = ':', 2)[,1]
md_values <- stringr::str_split_fixed(dat$metadata, pattern = ':', 2)[,2]
dat$metadata <- md_values
names(dat$metadata) <- md_names
return(dat)
}
#' Save VPR data as an \link[oce]{as.oce} object
#'
#' @details This function will pass a VPR data frame object to an `oce` object.
#' Using an `oce` object as the default export format for VPR data allows for
#' metadata and data to be kept in the same, space efficient file, and avoid
#' redundancy in the data frame. The function check for data parameters that
#' may actually be metadata parameters (rows which have the same value
#' repeated for every observation). These parameters will automatically be
#' copied into the metadata slot of the `oce` object. The function will also
#' prompt for a variety of required metadata fields. Depending on specific
#' research / archiving requirements, these metadata parameters could be
#' updated by providing the argument `metadata`.
#'
#' Default metadata parameters include 'deploymentType', 'waterDepth',
#' 'serialNumber', 'latitude', 'longitude', 'castDate', 'castStartTime',
#' 'castEndTime', 'processedBy', 'opticalSetting', 'imageVolume', 'comment'.
#'
#'
#' @param data a VPR data frame
#' @param metadata (optional) a named list of character values giving metadata
#' values. If this argument is not provided user will be prompted for a few
#' generic metadata requirements.
#'
#'
#' @return an oce CTD object with all VPR data as well as metadata
#' @export
#'
#' @examples
#' data("taxa_conc_n")
#' metadata <- c('deploymentType' = 'towyo', 'waterDepth' =
#' max(ctd_roi_merge$pressure), 'serialNumber' = NA, 'latitude' = 47,
#' 'longitude' = -65, 'castDate' = '2019-08-11', 'castStartTime'= '00:00',
#' 'castEndTime' = '01:00', 'processedBy' = 'E. Chisholm', 'opticalSetting' =
#' 'S2', 'imageVolume' = 83663, 'comment' = 'test data')
#'
#' oce_dat <- vpr_save(taxa_conc_n, metadata)
#' # save(oce_dat, file = vpr_save.RData') # save data
#'
vpr_save <- function(data, metadata){
# create oce objects
oce_data <- as.oce(data)
# check for metadata in dataframe
rem_list <- list()
for(i in seq_len(length(oce_data@data))){
if(length(unique(oce_data@data[[i]])) == 1){
print(paste('Metadata parameter found in Data object! ', names(oce_data@data)[[i]], 'value of' , unique(oce_data@data[[i]]), 'moved to metadata slot. '))
# add as metadtaa parameter
oce_data <- oceSetMetadata(oce_data, name = names(oce_data@data)[[i]], value = unique(oce_data@data[[i]]))
rem_list[[i]] <- i
}
}
# remove data lines
oce_data@data <- oce_data@data[-unlist(rem_list)]
# check for other metadata and ask user to supply
if(missing(metadata)){
req_meta <- c('deploymentType', 'waterDepth', 'serialNumber', 'latitude', 'longitude', 'castDate', 'castStartTime', 'castEndTime', 'processedBy', 'opticalSetting', 'imageVolume', 'comment')
# TODO include metadata examples or skip
# clarify serial number, water depth?
for(rm in req_meta){
if(is.null(oce_data@metadata[[rm]])){
print(paste('Please provide value for Metadata parameter', rm))
rm_val <- readline(paste('Metadata slot, ', rm, ': '))
oce_data <- oceSetMetadata(oce_data, name = rm , value = rm_val, note = NULL)
}
# TODO : add possibility of value existing as 'unknown or some other placeholder which should be overwritten
}
}else{
# if metadata names and values are provided as list
for(rm in names(metadata)){
rm_val <- metadata[[rm]]
oce_data <- oceSetMetadata(oce_data, name = rm , value = rm_val, note = NULL)
}
}
return(oce_data)
}
#' Add Year/ month/ day hour:minute:second information
#'
#' Calculate and record calendar dates for vpr data from day-of-year, hour, and time (in milliseconds) info.
#' Will also add 'time_hr' parameter if not already present.
#'
#' @param data VPR data frame from \code{\link{vpr_ctdroi_merge}}
#' @param year Year of data collection
#' @param offset time offset in hours between VPR CPU and processed data times (optional)
#'
#' @return a VPR data frame with complete date/time information in a new row named 'ymdhms'
#'
#' @examples
#' year <- 2019
#' data('ctd_roi_merge')
#' dat <- vpr_ctd_ymd(ctd_roi_merge, year)
#'
#'
#' @export
vpr_ctd_ymd <- function(data, year, offset){
# avoid CRAN notes
. <- time_ms <- NA
d <- grep(names(data), pattern = 'time_hr')
if(length(d) == 0){
data <- data %>%
dplyr::mutate(., time_hr = time_ms / 3.6e+06)
}
day_num <- substr(data$day, 2, 4)
hour_num <- substr(data$hour, 2, 3)
ymdd <- as.Date(as.numeric(day_num), origin = paste0(year,'-01-01'))
l_per <- round(lubridate::seconds_to_period(data$time_ms/1000),0)
ymdhms_obj <- as.POSIXct(l_per, origin = ymdd, tz = 'UTC')
if(!missing(offset)){
ymdhms_obj <- ymdhms_obj + offset*3600 # convert hour offset to seconds and adjust times
}
data <- data %>%
dplyr::mutate(., ymdhms = ymdhms_obj)
return(data)
}
#' Bin VPR size data
#'
#' Calculates statistics for VPR measurement data in depth averaged bins for analysis and visualization
#'
#' @param data_all a VPR CTD and measurement dataframe from \code{\link{vpr_ctdroisize_merge}}
#' @param bin_mea Numerical value representing size of depth bins over which data will be combined, unit is metres, typical values range from 1 - 5
#'
#' @return a dataframe of binned VPR size data statistics including number of observations, median, interquartile ranges, salinity and pressure, useful for making boxplots
#'
#' @examples
#'
#' data('size_df_f')
#' vpr_size_bin(size_df_f, bin_mea = 5)
#'
#' @export
#'
#'
#'
vpr_size_bin <- function(data_all, bin_mea){
#Bin by depth
p <- data_all$pressure
max_pressure <- max(p, na.rm = TRUE)
min_pressure <- min(p, na.rm = TRUE)
x_breaks <- seq(from = floor(min_pressure), to = ceiling(max_pressure), by = bin_mea)
#Get variables of interest using oce bin functions
med <- oce::binApply1D(p, data_all$long_axis_length, xbreaks = x_breaks, median)$result
iqr3 <- oce::binApply1D(p, data_all$long_axis_length, xbreaks = x_breaks, quantile, probs = 0.75)$result
iqr1 <- oce::binApply1D(p, data_all$long_axis_length, xbreaks = x_breaks, quantile, probs = 0.25)$result
n_obs <- oce::binApply1D(p, data_all$long_axis_length, xbreaks = x_breaks, length)$result
temperature <- oce::binApply1D(p, data_all$temperature, xbreaks = x_breaks, mean)$result
salinity <- oce::binApply1D(p, data$salinity, xbreaks = x_breaks, mean)$result
pressure <- oce::binApply1D(p, data$salinity, xbreaks = x_breaks, mean)$xmids #Could be any of the variables computed, but I just went with salinity
if (!(length(pressure) == length(salinity))) {
salinity_mean <- binMean1D(p, data_all$salinity, xbreaks = x_breaks)$result
idx_rm <- which(is.na(salinity_mean))
#informs user where bins were removed due to NAs
#note if a bin is 'NA' typically because there is no valid data in that depth range,
#if you have a lot of NA bins, think about increasing your binSize
print(paste('Removed bins at', pressure[idx_rm]))
lp <- length(pressure)
pressure <- pressure[-idx_rm]
}
station_id <- unique(data$station)
dfs <- data.frame('median' = med, 'IQR1' = iqr1,
'IQR3' = iqr3, 'n_obs' = n_obs,
'temperature' = temperature, 'salinity' = salinity,
'pressure' = pressure)
return(dfs)
}
#' Format CTD and Size data from VPR
#'
#' Format CTD and Meas data frames into combined data frame for analysis and plotting of size data
#'
#' @param data VPR dataframe from \code{\link{vpr_ctdroi_merge}}, with calculated variable sigmaT
#' @param data_mea VPR size data frame from \code{\link{vpr_autoid_read}}
#' @param taxa_of_interest a list of taxa of interest to be included in output dataframe
#'
#' @return A dataframe containing VPR CTD and size data
#'
#' @examples
#'
#' data("ctd_roi_merge")
#' data("roimeas_dat_combine")
#' category_of_interest = 'Calanus'
#'
#'ctd_roi_merge$time_hr <- ctd_roi_merge$time_ms /3.6e+06
#'
#' size_df_f <- vpr_ctdroisize_merge(ctd_roi_merge, data_mea = roimeas_dat_combine,
#' taxa_of_interest = category_of_interest)
#'
#' @export
#'
vpr_ctdroisize_merge <- function(data, data_mea, taxa_of_interest){
# avoid CRAN notes
. <- time_ms <- day <- hour <- roi_ID <- day_hour <- frame_ID <- pressure <- temperature <- salinity <- sigmaT <- fluorescence_mv <- turbidity_mv <- Perimeter <- Area <- width1 <- width2 <- width3 <- short_axis_length <- long_axis_length <- taxa <- NA
data <- data[!duplicated(data$time_ms),]
#get CTD data
data_ctd <- data %>%
dplyr::mutate(., roi_ID = as.character(time_ms)) %>%
dplyr::mutate(., day_hour = paste(day, hour, sep = ".")) %>%
dplyr::mutate(., frame_ID = paste(roi_ID, day_hour, sep = "_")) %>%
dplyr::select(., frame_ID, pressure, temperature, salinity, sigmaT, fluorescence_mv, turbidity_mv)
#get measurement data
data_mea <- data_mea %>%
dplyr::mutate(., roi_ID = as.character(time_ms)) %>%
dplyr::mutate(., frame_ID = paste(roi_ID, day_hour, sep = "_")) %>%
dplyr::select(., -Perimeter, -Area, -width1, -width2, -width3, -short_axis_length)
#combine measurement and ctd data
data_all <- right_join(data_ctd, data_mea) %>%
dplyr::filter(., !(is.na(pressure))) %>% #There are NAs at the beginning of CAP3.1 (i.e. measurements that are not in the ctd data)
dplyr::mutate(., long_axis_length = as.numeric(long_axis_length)) %>%
dplyr::filter(., taxa %in% taxa_of_interest)
#cut off data below maximum pressure to maintain consistent analysis between stations with varying depths
#data_all <- data_all %>%
#dplyr::filter(., pressure <= max_pressure)
return(data_all)
}
#' Copy VPR images into folders
#'
#' Organize VPR images into folders based on classifications provided by visual plankton
#'
#' @param basepath A file path to your autoid folder where VP data is stored eg. "C:\\\\data\\\\cruise_XXXXXXXXXXXXXXX\\\\autoid\\\\"
#' @param day character string representing numeric day of interest
#' @param hour character string representing hour of interest
#' @param classifier_type character string representing the type of classifier (either 'svm', 'nn' or 'dual') from Visual Plankton
#' @param classifier_name character string representing name of Visual Plankton classifier
#' @param taxa optional list of character strings if you wish to only copy images from specific classification groups
#'
#' @return organized file directory where VPR images are contained with folders, organized by day, hour and classification,
#' inside your basepath/autoid folder
#'
#' @export
vpr_autoid_copy <- function(basepath, day, hour, classifier_type, classifier_name, taxa){
folder_names <- list.files(basepath)
if(!missing(taxa)){
folder_names <- folder_names[folder_names %in% taxa]
}
#check valid folders
if(length(folder_names) < 1){
stop('No valid taxa folders found in basepath!')
}
day_hour <- paste0('d', day, '.h', hour)
if(!missing(classifier_type)){
type_day_hour <- paste0(classifier_type,'aid.', day_hour)
} else{
warning('No classifier information provided, attempting to pull ROIs based only on day/hour, please check there is only one aid file for each category!')
type_day_hour <- day_hour
}
for (i in folder_names) {
#Get name of folder containing .txt files with roi paths within a category
dir_roi <- paste(basepath, i, "/", "aid", sep = "")
#Get names of text files
txt_roi <- list.files(dir_roi)
subtxt <- grep(txt_roi, pattern = type_day_hour, value = TRUE)
txt_roi <- subtxt
if(!missing(classifier_name)){
subtxt2 <- grep(txt_roi, pattern = classifier_name, value = TRUE)
txt_roi <- subtxt2
}
for(ii in txt_roi) {
# setwd(dir_roi)
withr::with_dir(dir_roi, code = {
roi_path_str <- read.table(ii, stringsAsFactors = FALSE)
#Create a new folder for autoid rois
roi_folder <- paste(basepath, i, "\\", ii, "_ROIS", sep = "")
command1 <- paste('mkdir', roi_folder, sep = " ")
shell(command1)
#Copy rois to this directory
for (iii in seq_len(nrow(roi_path_str))) {
dir_tmp <- as.character(roi_path_str[iii,1])
command2 <- paste("copy", dir_tmp, roi_folder, sep = " ")
shell(command2)
print(paste(iii, '/', nrow(roi_path_str),' completed!'))
}
})
}
print(paste(i, 'completed!'))
}
print(paste('Day ', day, ', Hour ', hour, 'completed!'))
}
#'Calculate VPR concentrations
#'
#' Calculates concentrations for each named taxa in dataframe
#'
#' @param data a VPR dataframe as produced by \code{\link{vpr_ctdroi_merge}}
#' @param taxas_list a list of character strings representing taxa present in the station being processed
#' @param station_of_interest The station being processed
#' @param binSize passed to \code{\link{bin_calculate}}, determines size of depth bins over which data is averaged
#' @param imageVolume the volume of VPR images used for calculating concentrations (mm^3)
#'
#' @examples
#'
#' data('ctd_roi_merge')
#' ctd_roi_merge$time_hr <- ctd_roi_merge$time_ms /3.6e+06
#'
#' taxas_list <- c('Calanus', 'krill')
#' binSize <- 5
#' station_of_interest <- 'test'
#' imageVolume <- 83663
#'
#' taxa_conc_n <- vpr_roi_concentration(ctd_roi_merge, taxas_list,
#' station_of_interest, binSize, imageVolume)
#'
#'@export
#'
#'
vpr_roi_concentration <- function(data, taxas_list, station_of_interest, binSize, imageVolume){
# avoid CRAN notes
. <- NA
# check that taxa exist for this station
taxa_in_data <- names(data) %in% taxas_list
valid_taxa <- names(data)[taxa_in_data == TRUE]
# calculate concentrations
conc_dat <- list()
for ( ii in seq_len(length(valid_taxa))){
conc_dat[[ii]] <- concentration_category(data, valid_taxa[ii], binSize, imageVolume) %>%
dplyr::mutate(., taxa = valid_taxa[ii])
}
names(conc_dat) <- valid_taxa
taxa_conc <- do.call(rbind, conc_dat)
taxa_conc_n <- taxa_conc %>%
dplyr::mutate(., station = station_of_interest)
return(taxa_conc_n)
}
#' Binned concentrations
#'
#' This function produces depth binned concentrations for a specified taxa. Similar to \code{\link{bin_cast}} but calculates concentrations for only one taxa.
#' Used inside \code{\link{vpr_roi_concentration}}
#'
#'
#' @param data dataframe produced by processing internal to vpr_roi_concentration
#' @param taxa name of taxa isolated
#' @param binSize passed to \code{\link{bin_calculate}}, determines size of depth bins over which data is averaged
#' @param imageVolume the volume of VPR images used for calculating concentrations (mm^3)
#' @param rev Logical value defining direction of binning, FALSE - bins will be
#' calculated from surface to bottom, TRUE- bins will be calculated bottom to
#' surface
#'
#' @details Image volume calculations can change based on optical setting of VPR as well as autodeck setting used to process images
#' For IML2018051 (S2) image volume was calculated as 108155 mm^3 by seascan (6.6 cubic inches)
#' For COR2019002 S2 image volume was calculated as 83663 mm^3 and S3 image volume was calculated as 366082 mm^3
#'
#'
#' @author E. Chisholm
#'
#' @export
concentration_category <- function(data, taxa, binSize, imageVolume, rev = FALSE){
. <- NA # avoid CRAN notes
# remove other data rows #ADDED BY KS, DAY HOUR CHANGED TO DAY, HOUR
nontaxa <-
c(
'time_ms',
'conductivity',
'temperature',
'pressure',
'salinity',
'sigmaT',
'fluor_ref',
'fluorescence_mv',
'turbidity_ref',
'turbidity_mv',
'altitude_NA',
'day',
'hour',
'station',
'time_hr',
'roi',
'depth'
)
dt <- data %>%
dplyr::select(., nontaxa, taxa)
# get n_roi of only one taxa
names(dt) <-
gsub(names(dt), pattern = taxa, replacement = 'n_roi')
# format into oce ctd
ctd_roi_oce <- vpr_oce_create(dt)
# bin data
final <- bin_cast(ctd_roi_oce = ctd_roi_oce, imageVolume = imageVolume, binSize = binSize, rev = rev)
return(final)
}
#' Bin vpr data
#'
#' Formats \code{oce} style VPR data into depth averaged bins using \code{\link{ctd_cast}} and \code{\link{bin_calculate}}
#' This function is used inside \code{\link{concentration_category}}
#'
#'
#' @param ctd_roi_oce \code{oce} ctd format VPR data from \code{\link{vpr_oce_create}}
#' @param binSize passed to \code{\link{bin_calculate}}, determines size of depth bins over which data is averaged
#' @param imageVolume the volume of VPR images used for calculating concentrations (mm^3)
#' @param rev logical value,passed to \code{\link{bin_calculate}} if TRUE, binning will begin at bottom of each cast,
#' this controls data loss due to uneven binning over depth. If bins begin at
#' bottom, small amounts of data may be lost at the surface of each cast, if
#' binning begins at surface (rev = FALSE), small amounts of data may be lost
#' at bottom of each cast
#'
#' @details Image volume calculations can change based on optical setting of VPR as well as autodeck setting used to process images
#' For IML2018051 (S2) image volume was calculated as 108155 mm^3 by seascan (6.6 cubic inches)
#' For COR2019002 S2 image volume was calculated as 83663 mm^3 and S3 image volume was calculated as 366082 mm^3
#'
#'
#'@return A dataframe of depth averaged bins of VPR data over an entire cast with calculated concentration values
#' @export
#'
#'
bin_cast <- function(ctd_roi_oce, imageVolume, binSize, rev = FALSE){
. <- time_hr <- conc_m3 <- NA
#find upcasts
upcast <- ctd_cast(data = ctd_roi_oce, cast_direction = 'ascending', data_type = 'df')
upcast2 <- lapply(X = upcast, FUN = bin_calculate, binSize = binSize, imageVolume = imageVolume, rev = rev)
upcast_df <- do.call(rbind, upcast2)
#find downcasts
downcast <- ctd_cast(ctd_roi_oce, cast_direction = "descending", data_type = "df")
downcast2 <- lapply(X = downcast, FUN = bin_calculate, binSize = binSize, imageVolume = imageVolume, rev = rev)
downcast_df <- do.call(rbind, downcast2)
#combine_data in bins
vpr_depth_bin <- rbind(upcast_df, downcast_df)
vpr_depth_bin <- data.frame(vpr_depth_bin)
#Remove infinite concentrations (why do these occur again?)
vpr_depth_bin <- vpr_depth_bin %>%
# dplyr::mutate(., time_hr = time_hr - min(time_hr)) %>% # this is potentially creating issues where time is not aligned in plots
dplyr::filter(., is.finite(conc_m3))
return(vpr_depth_bin)
}
#' Create ctd oce object with vpr data
#'
#' Formats VPR data frame into \code{oce} format CTD object
#'
#' @author E. Chisholm
#'
#' @param data data frame of vpr data with variable names \itemize{'time_ms', 'fluorescence_mv', 'turbidity_mv', 'n_roi', 'sigmaT'}
#'
#' @examples
#' data('ctd_roi_merge')
#' oce_dat <- vpr_oce_create(ctd_roi_merge)
#'
#' @export
vpr_oce_create <- function(data){
# create oce objects
ctd_roi_oce <- oce::as.ctd(data)
otherVars<- c('time_ms', 'fluorescence_mv', 'turbidity_mv', 'n_roi', 'sigmaT', 'depth', 'time_hr') # TODO edit to avoid hard coding variable names
for ( o in otherVars){
eval(parse(text = paste0("ctd_roi_oce <- oce::oceSetData(ctd_roi_oce, name = '",o,"', value = data$",o,")")))
}
return(ctd_roi_oce)
}
#' Read and format CTD VPR data
#'
#' Acts as a wrapper for \code{\link{ctd_df_cols}}
#'
#' Reads CTD data and adds day, hour, and station information.
#' Calculates sigma T and depth variables from existing CTD data to supplement raw data.
#' If there are multiple hours of CTD data, combines them into single dataframe.
#'
#' **WARNING** \code{\link{ctd_df_cols}} is hard coded to accept a specific
#' order of CTD data columns. The names and values in these columns can change
#' based on the specific instrument and should be updated/confirmed before processing data
#' from a new VPR.
#'
#' @author E. Chisholm & K. Sorochan
#'
#'
#' @param ctd_files full file paths to vpr ctd \code{.dat} files
#' @param station_of_interest VPR station name
#' @param day Day of interest, if not provided will be pulled from file path
#' @param hour Hour of interest, if not provided will be pulled from file path
#' @param col_list Optional list of CTD data column names
#'
#' @examples
#'
#' station_of_interest <- 'test'
#'
#' ctd_files <- system.file("extdata/COR2019002/rois/vpr5/d222", "h03ctd.dat",
#' package = "vprr", mustWork = TRUE)
#'
#' ctd_dat_combine <- vpr_ctd_read(ctd_files, station_of_interest)
#'
#' @export
vpr_ctd_read <- function(ctd_files, station_of_interest, day, hour, col_list){
# avoid CRAN notes
. <- NA
if(length(ctd_files) == 0){
stop('No CTD files provided!')
}
ctd_dat <- list()
for (i in seq_len(length(ctd_files))){
if(missing(day)){
day_id <- unlist(vpr_day(ctd_files[i]))
}else{
day_id <- day
}
if(missing(hour)){
hour_id <- unlist(vpr_hour(ctd_files[i]))
}else{
hour_id <- hour
}
station_id <- station_of_interest
if(missing(col_list)){
ctd_dat_tmp <- ctd_df_cols(ctd_files[i])
}else{
ctd_dat_tmp <- ctd_df_cols(ctd_files[i], col_list)
}
ctd_dat[[i]] <- data.frame(ctd_dat_tmp,
day = day_id,
hour = hour_id,
station = station_id,
stringsAsFactors = FALSE)
}
# combine ctd dat
ctd_dat_combine <- do.call(rbind, ctd_dat)
# add calculated vars as default
# sigma t, depth
ctd_dat_combine <- ctd_dat_combine %>%
dplyr::mutate(., sigmaT = oce::swSigmaT(
ctd_dat_combine$salinity,
ctd_dat_combine$temperature,
ctd_dat_combine$pressure
)) %>%
dplyr::mutate(., depth = oce::swDepth(ctd_dat_combine$pressure)) # note that default latitude is used (45)
return(ctd_dat_combine)
}
#'Merge CTD and ROI data from VPR
#'
#'Combines CTD data (time, hydrographic parameters), with ROI information
#'(identification number) into single dataframe, aligning ROI identification
#'numbers and taxa classifications with time and hydrographic parameters
#'
#'@author E. Chisholm & K. Sorochan
#'
#'@param ctd_dat_combine a CTD dataframe from VPR processing from \code{\link{vpr_ctd_read}}
#'@param roi_dat_combine a data frame of roi aid data from \code{\link{vpr_autoid_read}}
#'
#'
#' @examples
#' data('ctd_dat_combine')
#' data('roi_dat_combine')
#'
#' ctd_roi_merge <- vpr_ctdroi_merge(ctd_dat_combine, roi_dat_combine)
#'@export
#'
vpr_ctdroi_merge <- function(ctd_dat_combine, roi_dat_combine){
# avoid CRAN notes
. <- roi <- NA
# First subset ctd data by roi id
ctd_time <- ctd_dat_combine$time_ms
roi_time <- as.numeric(roi_dat_combine$time_ms)
roi_index <- which(ctd_time %in% roi_time)
ctd_subset <- data.frame(ctd_dat_combine[roi_index, ])
# Get total number of rois per frame
taxas <- colnames(roi_dat_combine)[!(colnames(roi_dat_combine) %in% c('time_ms', 'roi'))]
taxa_col_id <- which(colnames(roi_dat_combine) %in% taxas)
taxa_subset <- roi_dat_combine[,taxa_col_id]
n_roi_total <- base::rowSums(taxa_subset)
roi_dat_2 <- data.frame(roi_dat_combine, n_roi_total)
# Combine subsetted CTD and roi data
ctd_subset_roi <- full_join(ctd_subset, roi_dat_2)
# combine subsetted roi data and all CTD data such that frames with zero rois are included
ctd_roi_merge <- ctd_subset_roi %>%
dplyr::right_join(., ctd_dat_combine)
ctd_roi_merge[is.na(ctd_roi_merge)] <- 0
ctd_roi_merge <- ctd_roi_merge %>%
dplyr::mutate(., roi = ifelse(roi == 0, NA, roi))
return (ctd_roi_merge)
}
#'Read VPR aid files
#'
#'Read aid text files containing ROI string information or measurement data and output as a dataframe
#'
#'Only outputs either ROI string information OR measurement data
#'
#'
#' @author E. Chisholm & K. Sorochan
#'
#'@param file_list_aid a list object of aid text files, containing roi strings. Output from matlab Visual Plankton software.
#'@param file_list_aidmeas a list object of aidmea text files, containing ROI measurements. Output from matlab Visual Plankton software.
#'@param export a character string specifying which type of data to output, either 'aid' (roi strings) or 'aidmeas' (measurement data)
#'@param station_of_interest Station information to be added to ROI data output, use NA if irrelevant
#'@param opticalSetting Optional argument specifying VPR optical setting. If provided will be used to convert size data into mm from pixels, if missing size data will be output in pixels
#'@param warn Logical, FALSE silences size data unit warnings
#'
#'@note Full paths to each file should be specified
#'
#' @examples
#'
#' station_of_interest <- 'test'
#' dayhour <- c('d222.h03', 'd222.h04')
#'
#' #' #VPR OPTICAL SETTING (S0, S1, S2 OR S3)
#' opticalSetting <- "S2"
#' imageVolume <- 83663 #mm^3
#'
#' auto_id_folder <- system.file('extdata/COR2019002/autoid/', package = 'vprr', mustWork = TRUE)
#' auto_id_path <- list.files(paste0(auto_id_folder, "/"), full.names = TRUE)
#'
#' #' # Path to aid for each taxa
#' aid_path <- paste0(auto_id_path, '/aid/')
#' # Path to mea for each taxa
#' aidmea_path <- paste0(auto_id_path, '/aidmea/')
#'
#' # AUTO ID FILES
#' aid_file_list <- list()
#' aidmea_file_list <- list()
#' for (i in 1:length(dayhour)) {
#' aid_file_list[[i]] <-
#' list.files(aid_path, pattern = dayhour[[i]], full.names = TRUE)
#' # SIZE DATA FILES
#' aidmea_file_list[[i]] <-
#' list.files(aidmea_path, pattern = dayhour[[i]], full.names = TRUE)
#' }
#'
#' aid_file_list_all <- unlist(aid_file_list)
#' aidmea_file_list_all <- unlist(aidmea_file_list)
#'
#' # ROIs
#' roi_dat_combine <-
#' vpr_autoid_read(
#' file_list_aid = aid_file_list_all,
#' file_list_aidmeas = aidmea_file_list_all,
#' export = 'aid',
#' station_of_interest = station_of_interest,
#' opticalSetting = opticalSetting,
#' warn = FALSE
#' )
#'
#' # MEASUREMENTS
#' roimeas_dat_combine <-
#' vpr_autoid_read(
#' file_list_aid = aid_file_list_all,
#' file_list_aidmeas = aidmea_file_list_all,
#' export = 'aidmeas',
#' station_of_interest = station_of_interest,
#' opticalSetting = opticalSetting,
#' warn = FALSE
#' )
#'
#' @export
vpr_autoid_read <- function(file_list_aid, file_list_aidmeas, export, station_of_interest, opticalSetting, warn = TRUE){
# set-up for only processing aid data
if(missing(file_list_aidmeas)){export <- 'aid'}
# avoid CRAN notes
. <- roi <- taxa <- n_roi <- day_hour <- Perimeter <- Area <- width1 <- width2 <- width3 <- short_axis_length <- long_axis_length <- NA
if( export == 'aidmeas'){
if (missing(opticalSetting)){
opticalSetting <- NA
if(warn != FALSE){
warning('No optical setting provided, size data output in pixels!!!')
}
}
}
# aid
col_names <- "roi"
dat <- list()
for(i in seq_len(length(file_list_aid))) {
data_tmp <- read.table(file = file_list_aid[i], stringsAsFactors = FALSE, col.names = col_names)
data_tmp$roi <- unlist(vpr_roi(data_tmp$roi))
data_tmp$taxa <- unlist(unique(vpr_category(file_list_aid[i])[[1]]))
day <- unlist(vpr_day(file_list_aid[i]))
hour <- unlist(vpr_hour(file_list_aid[i]))
if(length(day) >1 | length(hour) >1){
stop('Problem detecting day/hour values!')
}
data_tmp$day_hour <- paste(day, hour, sep = ".")
dat[[i]]<- data_tmp
}
dat_combine_aid <- do.call(rbind, dat)
#browser()
remove(dat, data_tmp, day, hour)
# format
dat_combine_aid$id <- row.names(dat_combine_aid)
# Get tabulated rois per time by taxa
roi_df <- dat_combine_aid %>%
dplyr::mutate(., roi = substr(roi, 1, 8)) %>%
dplyr::group_by(., taxa, roi) %>%
dplyr::summarise(., n_roi = dplyr::n(), .groups = NULL) %>%
tidyr::spread(., taxa, n_roi) %>%
dplyr::mutate(., time_ms = as.numeric(roi))
roi_dat <- data.frame(roi_df)
roi_dat[is.na(roi_dat)] <- 0
# aidmeas
# TODO: update code so it can run without measurement input
if(export == 'aidmeas'){
dat <- list()
col_names <- c('Perimeter','Area','width1','width2','width3','short_axis_length','long_axis_length')
for(i in seq_len(length(file_list_aidmeas))) {
data_tmp <- read.table(file_list_aidmeas[i], stringsAsFactors = FALSE, col.names = col_names)
if(!is.na(opticalSetting)){
data_tmp <- px_to_mm(data_tmp, opticalSetting)
}
data_tmp$taxa <- unlist(vpr_category(file_list_aidmeas[i]))
day <- unlist(vpr_day(file_list_aidmeas[i]))
hour <- unlist(vpr_hour(file_list_aidmeas[i]))
data_tmp$day_hour <- paste(day, hour, sep = ".")
dat[[i]]<- data_tmp
}
dat_combine_aidmeas <- do.call(rbind, dat)
# remove(dat, data_tmp, day, hour)
dat_combine_aidmeas$id <- row.names(dat_combine_aidmeas)
# Get roi measurement data frame
dat_combine_selected <- dat_combine_aidmeas %>%
dplyr::select(., taxa, day_hour, id, Perimeter, Area, width1, width2, width3, short_axis_length, long_axis_length) #added all measurement columns EC Jan 28 2020
roimeas_dat_combine <- right_join(dat_combine_aid, dat_combine_selected, by = c('taxa', 'day_hour', 'id') ) %>%
dplyr::select(., - id) %>%
dplyr::mutate(., station = station_of_interest) %>%
dplyr::mutate(., long_axis_length = as.numeric(long_axis_length)) %>%
dplyr::mutate(., time_ms = as.numeric(substr(roi, 1, 8)))
} # end aidmeas section
# browser()
# export
if (export == 'aid'){
return(roi_dat)
}
if (export == 'aidmeas'){
return(roimeas_dat_combine)
}
}
#'Get conversion factor for pixels to mm for roi measurements
#'
#'Used internally
#'
#' @details converts pixels to mm using conversion factor specific to optical setting
#'
#' @param x an aidmea data frame (standard) to be converted into mm from pixels
#' @param opticalSetting the VPR setting determining the field of view and conversion factor between mm and pixels
#'
#' @details Options for opticalSetting are 'S0', 'S1', 'S2', or 'S3'
#'
#' @export
px_to_mm <- function(x, opticalSetting) {
#find correct conversion factor based on VPR optical setting
if (opticalSetting == 'S0'){
#px to mm conversion factor
frame_mm <- 7
mm_px <- frame_mm/1024 #1024 is resolution of VPR images (p.4 DAVPR manual)
}
if (opticalSetting == 'S1'){
#px to mm conversion factor
frame_mm <- 14
mm_px <- frame_mm/1024 #1024 is resolution of VPR images (p.4 DAVPR manual)
}
if (opticalSetting == 'S2'){
#px to mm conversion factor
frame_mm <- 24
mm_px <- frame_mm/1024 #1024 is resolution of VPR images (p.4 DAVPR manual)
}
if (opticalSetting == 'S3'){
#px to mm conversion factor
frame_mm <- 48
mm_px <- frame_mm/1024 #1024 is resolution of VPR images (p.4 DAVPR manual)
}
#original default to S2 setting
#mm_px <- 24/1024 #mm/pixel
mm2_px2 <- (mm_px)^2
x[, c(1, 3:7)] <- x[, c(1, 3:7)]*mm_px
x[,2] <- x[,2]*mm2_px2
return(x)
}
#'Read CTD data (SBE49) and Fluorometer data from CTD- VPR package
#'
#'Internal use \code{\link{vpr_ctd_read}}
#'
#'**WARNING** This is hard coded to accept a specific
#' order of CTD data columns. The names and values in these columns can change
#' based on the specific instrument and should be updated before processing data
#' from a new VPR.
#'
#'Text file format .dat file
#'Outputs ctd dataframe with variables time_ms, conductivity, temperature,
#'pressure, salinity, fluor_ref, fluorescence_mv, turbidity_ref,
#'turbidity_mv, altitude_NA
#' @author K. Sorochan, E. Chisholm
#'
#'
#'
#'@param x full filename (ctd .dat file)
#' @param col_list list of CTD data column names
#'
#'@export
ctd_df_cols <- function(x, col_list) {
if(missing(col_list)){
col_list <- c("time_ms", "conductivity", "temperature", "pressure", "salinity", "fluor_ref", "fluorescence_mv",
"turbidity_ref", "turbidity_mv", "altitude_NA")
warning('CTD data columns named based on defaults!')
}
data <- read.table(textConnection(gsub(":", ",", readLines(x))), sep = ",")
time <- data[,1]
time <- as.numeric(gsub("[^[:digit:]]", "", time))
data2 <- cbind(time, data[,-1])
colnames(data2) <- col_list
data2 <- data2[!duplicated(data2), ]
data2
}
#' Normalize a matrix
#'
#' take each element of matrix dived by column total
#'
#' Make sure to remove total rows before using with VP data
#'
#' @note used internally for visualization of confusion matrices
#'
#' @param mat a matrix to normalize
#'
#'
normalize_matrix <- function(mat){
nm <- matrix(nrow = dim(mat)[1], ncol = dim(mat)[2])
for(i in seq_len(length(nm[,1]))){ # 1:length(nm[,1])
for (j in seq_len(length(nm[1,]))){ # 1:length(nm[1,])
nm[i,j] <- mat[i,j]/colSums(mat)[j]
}
}
return(nm)
}
#' Get size data from idsize files
#'
#'
#' useful for getting size distribution of known rois from each taxa. gathers
#' size information from idsize text files produced when training a new
#' classifier in VP (Visual Plankton)
#'
#'
#'@param directory cruise directory eg. 'C:/data/IML2018051/'
#'@param taxa list of character elements containing taxa of interest
#'@param opticalSetting VPR optical setting determining conversion between pixels and millimetres (options are 'S0', 'S1', 'S2', or 'S3')
#'
#' @export
# @examples
# \dontrun{
#
# }
vpr_trrois_size <- function(directory, taxa, opticalSetting){
#loop for each taxa of interest
for (t in taxa){
#check
# g <- grep(taxa_names, pattern = t)
# if (length(g) == 0){
# stop(paste('Taxa of interest, ', t, 'not found in data provided!'))
# }
#
size_file <- list.files(path = paste0(directory,'/idsize'), pattern = paste0('mea.', t))
#roi_file <- list.files(path = paste0(directory,'/idsize'), pattern = paste0('hid.v0.',t))
#Get info
#roi_ID <- read.table(paste0(directory,'/idsize/', roi_file), stringsAsFactors = FALSE)
auto_measure_px <- read.table(paste0(directory, '/idsize/', size_file), stringsAsFactors = FALSE, col.names = c('Perimeter','Area','width1','width2','width3','short_axis_length','long_axis_length'))
eval(parse(text = paste0('auto_measure_', t,'_mm <- px_to_mm(auto_measure_px, opticalSetting )'))) #Convert to mm
}
#returns a data frame with size information and named columns
eval(parse(text = paste0('return(auto_measure_', t,'_mm)')))
}
#' Get bin averages for VPR and CTD data
#'
#' Bins CTD data for an individual cast to avoid depth averaging across tow-yo's
#'
#' @author E. Chisholm, K. Sorochan
#'
#'
#' @param data ctd data frame object including scan, salinity, temperature,
#' depth, conductivity, time, fluor_ref, turbidity_ref, turbidity_mv,
#' altitude, cast_id, n_roi
#' @param binSize the height of bins over which to average, default is 1 metre
#' @param imageVolume the volume of VPR images used for calculating concentrations (mm^3)
#' @param rev logical value, if TRUE, binning will begin at bottom of each cast,
#' this controls data loss due to uneven binning over depth. If bins begin at
#' bottom, small amounts of data may be lost at the surface of each cast, if
#' binning begins at surface (rev = FALSE), small amounts of data may be lost
#' at bottom of each cast
#'
#'
#' @details Image volume calculations can change based on optical setting of VPR as well as autodeck setting used to process images
#' For IML2018051 (S2) image volume was calculated as 108155 mm^3 by seascan (6.6 cubic inches)
#' For COR2019002 S2 image volume was calculated as 83663 mm^3 and S3 image volume was calculated as 366082 mm^3.
#' Used internally ( \code{\link{bin_cast}} ) after \code{\link{ctd_cast}} on a single ascending or descending section of VPR cast
#'
#'
#'
#' @note binSize should be carefully considered for best results
#' @note Depth is used for calculations! Please ensure depth is included in data frame using \link[oce]{swDepth}
#'
#' @export
#'
bin_calculate <- function(data, binSize = 1, imageVolume, rev = FALSE){
# browser()
cast_id <-unique(data$cast_id)
cast_id <-unique(data$cast_id)
max_cast_depth <- max(data$depth) # ADDED BY KS TO IDENTIFY EACH TOWYO CHUNK
p <- data$depth
max_depth <- max(p, na.rm = TRUE)
min_depth <- min(p, na.rm = TRUE)
x_breaks <- seq(from = floor(min_depth), to = ceiling(max_depth), by = binSize)
if (rev == TRUE){
x_breaks <- seq(from = ceiling(max_depth), to = floor(min_depth), by = - binSize) #reversed by KS
}
# error when cast is too small
if(max_depth - min_depth < binSize){
warning(paste('Cast', cast_id, 'is too small to calculate information for bins of size', binSize))
data.frame(NULL)
}else{
# Get variables of interest using oce bin functions
min_time_s <- oce::binApply1D(p, data$time_ms/1000, xbreaks = x_breaks, min)$result
max_time_s <- oce::binApply1D(p, data$time_ms/1000, xbreaks = x_breaks, max)$result
min_depth <- oce::binApply1D(p, data$depth, xbreaks = x_breaks, min)$result
max_depth <- oce::binApply1D(p, data$depth, xbreaks = x_breaks, max)$result
n_roi_bin <- oce::binApply1D(p, data$n_roi, xbreaks = x_breaks, sum)$result
temperature <- oce::binApply1D(p, data$temperature, xbreaks = x_breaks, mean)$result
salinity <- oce::binApply1D(p, data$salinity, xbreaks = x_breaks, mean)$result
density <- oce::binApply1D(p, data$sigmaT, xbreaks = x_breaks, mean)$result
fluorescence <- oce::binApply1D(p, data$fluorescence_mv, xbreaks = x_breaks, mean)$result
turbidity <- oce::binApply1D(p, data$turbidity_mv, xbreaks = x_breaks, mean)$result
time_ms <- oce::binApply1D(p, data$time_ms, xbreaks = x_breaks, mean)$result
time_hr <- oce::binApply1D(p, data$time_ms/(1000*3600), xbreaks = x_breaks, mean)$result # update time naming scheme May 2022
if (rev == TRUE){
depth <- rev(oce::binApply1D(p, data$depth, xbreaks = x_breaks, mean)$xmids)
}else{ # simplify?
depth <- oce::binApply1D(p, data$salinity, xbreaks = x_breaks, mean)$xmids
}
# calculates number of frames captured per depth bin by counting number of pressure observations per bin
n_frames <- oce::binApply1D(p, data$depth, xbreaks = x_breaks, length)$result # KS edit 10/9/19
# WARNING
# binApply1D does not calculate NAs, if there is binned depth range that does
# not contain any data, the binApply function will not create an empty or NA
# placeholder bin in that case the result length will be different than the
# length of midpoints since the variable "pressure" is a mid point calculation it is used to
# test for non existent empty bins. If there are non existant empty bins,
# binMean1D will calculate them as NA, this loop finds where the bins would
# have been located and removes those indexes from the pressure vector so the
# length of variables is all identical
if (!(length(depth) == length(salinity))) {
salinity_mean <- binMean1D(p, data$salinity, xbreaks = x_breaks)$result
idx_rm <- which(is.na(salinity_mean))
# informs user where bins were removed due to NAs
# note if a bin is 'NA' typically because there is no valid data in that depth range,
# if you have a lot of NA bins, think about increasing your binSize
message(paste('Removed bins at', depth[idx_rm]))
lp <- length(depth)
depth <- depth[-idx_rm]
if (length(n_frames) == lp){
n_frames <- n_frames[-idx_rm]
}
}
# make sure n_frames matches the length of other data frame rows
if (length(n_frames) > length(depth)){
n_frames <- n_frames[-length(n_frames)]
}
if( length(n_frames) < length(depth)){
n_frames <- c(n_frames, 0)
}
if (length(n_frames) != length(depth)){
length(n_frames) <- length(depth)
}
# Get derived variables
time_diff_s <- max_time_s - min_time_s
# calculate concentration based on opticalSetting
# "Old way" of calculating concentration assuming constant frame rate of 15 fps
# conc_m3 <- n_roi_bin/((imageVolume/1e09)*(15)*(time_diff_s)) #
# "New way" of calculating concentration by summing volume associated with frames over depth bin
vol_sampled_bin_m3 <- (imageVolume/1e09)*n_frames
conc_m3 <- n_roi_bin/(vol_sampled_bin_m3) # KS edit 10/9/19
depth_diff <- max_depth - min_depth
# Output
data.frame(depth, min_depth, max_depth, depth_diff, min_time_s, max_time_s, time_diff_s,
n_roi_bin, conc_m3,
temperature, salinity, density, fluorescence, turbidity,
time_hr, n_frames, vol_sampled_bin_m3, time_ms,
towyo = cast_id, max_cast_depth) # MAX CAST PRESSURE ADDED BY KS
} # end else loop for size error
}
#' Isolate ascending or descending section of ctd cast
#'
#' This is an internal step required to bin data
#'
#'
#' @author K Sorochan, E Chisholm
#'
#' @param data an \code{oce} ctd object
#' @param cast_direction 'ascending' or 'descending' depending on desired section
#' @param data_type specify 'oce' or 'df' depending on class of desired output
#' @param cutoff Argument passed to \link[oce]{ctdFindProfiles}
#' @param breaks Argument passed to \link[oce]{ctdFindProfiles}
#' @return Outputs either data frame or oce ctd object
#'
#'
#'
#' @note \code{\link{ctdFindProfiles}} arguments for \code{minLength} and \code{cutOff} were updated to
#' prevent losing data (EC 2019/07/23)
#'
#'
#' @export
#'
ctd_cast <- function(data, cast_direction = 'ascending', data_type, cutoff = 0.1, breaks = NULL) {
cast_updated <- list()
if (is.null(breaks)){
cast <- oce::ctdFindProfiles(data, direction = cast_direction, minLength = 0, cutoff = cutoff)
}else{
cast <- oce::ctdFindProfiles(data, breaks = breaks, direction = cast_direction)
}
# append data with 'cast_id' to be able to identify/ combine data frames
for(i in seq_len(length(cast))) {
data <- cast[[i]]
n_obs <- length(data@data$pressure)
cast_number <- i
cast_id <- paste(cast_direction, i, sep = "_")
cast_id_vec <- rep(cast_id, n_obs)
cast_updated[[i]] <- oce::oceSetData(data, "cast_id", cast_id_vec, "no_unit")
}
# output in oce format
if(data_type == "oce") {
cast_updated
}
# output in dataframe
if(data_type == "df") {
getDf <- function(x) {
data.frame(x@data, stringsAsFactors = FALSE)
}
lapply(cast_updated, getDf)
}
}
#' Find day & hour info to match each station of interest for processing
#'
#'
#' @author E. Chisholm and K. Sorochan
#'
#' @param stations a vector of character values naming stations of interest
#' @param file CSV file containing 'day', 'hour', 'station', and 'day_hour' columns
#'
#' @return Vector of day-hour combinations corresponding to stations of interest
#'
#' @export
vpr_dayhour <- function(stations, file) {
# avoid CRAN notes
. <- station <- NA
#####DEFINE FOR MULTIPLE STATIONS
stations_of_interest <- stations
#USE STATION LIST WITH CORRESPONDING DAY AND HOUR TO MATCH AREA OF INTEREST
#match hour and day to station
station_info <- read.csv(file, stringsAsFactors = FALSE)
soi_info <- station_info %>%
dplyr::filter(., station %in% stations_of_interest)
dayhour <- unique(soi_info$day_hour)
return(dayhour)
}
#' Create a list of ctd files to be read
#'
#' Searches through typical VP directory structure
#'
#' Use with caution
#'
#'
#' @param castdir root directory for ctd cast files
#' @param cruise cruise name (exactly as in directory structure)
#' @param day_hour vector of day-hour combinations (e.g, dXXX.hXX)
#' @author E. Chisholm and K. Sorochan
#'
#' @return vector of ctd file paths matching days-hour combinations provided
#'
#' @export
vpr_ctd_files <- function(castdir, cruise, day_hour) {
# ADDED BY KS
vpr_cast_folders <- list.files(castdir, pattern = '')
# find right vpr cast -- subset by tow number #
# folder <- grep(vpr_cast_folders, pattern = paste0('AD_', vprnum,'.VPR.', cruise,'*'), value = T) #ADDED BY KS, AD PATTERN IS NOT CONSISTENT?
# not subset by tow number
folder <- grep(vpr_cast_folders, pattern = paste0('VPR.', cruise,'*'), value = TRUE) # removed leading period before VPR to fit file naming scheme in COR2019002
if (length(folder) == 0){stop("No CTD files found!")}
folder_path <- paste0(castdir, folder)
# grab all days
full_path <- list.files(folder_path, full.names = TRUE)
# extract for only specific days
ctd_files_all <- list.files(full_path, pattern = '*ctd*', full.names = TRUE)
day_id <- vpr_day(ctd_files_all)
hour_id <- vpr_hour(ctd_files_all)
day_hour_id <- paste(day_id, hour_id, sep = ".")
ctd_files_idx <- which(day_hour_id %in% day_hour)
ctd_files <- ctd_files_all[ctd_files_idx]
return(ctd_files)
}
#' Get roi ids from string
#'
#' @author K Sorochan
#' @param x A string specifying directory and file name of roi
#'
#' @return A string of only the 10 digit roi identifier
#'
#' @examples
#'
#' roi_string <- 'roi.0100000000.tif'
#' vpr_roi(roi_string)
#'
#' @seealso \code{\link{vpr_hour}}, \code{\link{vpr_day}}, \code{\link{vpr_category}}
#' @export
#'
#'
vpr_roi <- function(x) {
m <- gregexpr("\\d{10}", x)
y <- regmatches(x, m)
return(y)
}
#' Get taxa ids from string
#'
#' @author K Sorochan
#'
#' @param x A string specifying the directory of the "taxafolder", containing the taxa id
#'
#' @return A string of only the taxa id
#'
#' @examples
#' taxa_string <- 'C:/data/cruise/autoid/Calanus/d000/h00'
#' vpr_category(taxa_string)
#'
#' @seealso \code{\link{vpr_hour}}, \code{\link{vpr_day}}, \code{\link{vpr_roi}}
#' @export
#'
#'
vpr_category <- function(x) {
# TODO if x is a list
taxa_ids <- c(
"bad_image_blurry",
"bad_image_malfunction",
"bad_image_strobe",
"Calanus",
"chaetognaths",
"ctenophores",
"Echinoderm_larvae",
"krill",
"marine_snow",
"Other",
"small_copepod",
"stick",
"larval_fish",
'other_copepods',
'larval_crab',
'amphipod',
'Metridia',
'Paraeuchaeta',
'cnidarians'
)
for(i in seq_len(length(taxa_ids))) {
taxa_id <- taxa_ids[i]
m_tmp <- gregexpr(taxa_id, x)
if (m_tmp[[1]][1] > 0) {
m <- m_tmp
} else{
# stop('Taxa ID not found! Check internal list of taxa options!')
}
}
y <- regmatches(x, m)
return(y)
}
#' Get day identifier
#'
#' @author K Sorochan
#' @param x A string specifying the directory and file name of the size file
#'
#' @return A string of only the day identifier (i.e., "dXXX")
#'
#' @examples
#' day_string <- 'C:/data/cruise/autoid/Calanus/d000/h00'
#' vpr_day(day_string)
#'
#' @seealso \code{\link{vpr_hour}}, \code{\link{vpr_roi}}, \code{\link{vpr_category}}
#' @export
#'
#'
vpr_day <- function(x) {
m <- gregexpr("[d]+\\d{3}", x)
y <- regmatches(x, m)
return(y)
}
vpr_hour <- function(x) {
#' Get hour identifier
#'
#' @author K Sorochan
#' @param x A string specifying the directory and file name of the size file
#'
#' @return A string of only the hour identifier (i.e., "hXX")
#' @seealso \code{\link{vpr_day}}, \code{\link{vpr_roi}}, \code{\link{vpr_category}}
#'
#' @examples
#' hour_string <- 'C:/data/cruise/autoid/Calanus/d000/h00'
#' vpr_hour(hour_string)
#'
#' @export
#'
#'
m <- gregexpr("[h]+\\d{2}", x)
y <- regmatches(x, m)
return(y)
}
vpr_summary <- function(all_dat, fn, tow = tow, day = day, hour = hour){
#' Data Summary Report
#'
#' Part of VP easy plot processing, prints data summary report to give quantitative, exploratory analysis of data
#'
#' @author E Chisholm
#' @param all_dat data frame containing VPR and CTD data including time_ms,
#' time_hr, conductivity, temperature, pressure, salinity, fluorescence_mv,
#' turbidity_mv, sigmaT
#' @param fn file name to save data summary, if not provided, summary will print to console
#' @param tow VPR tow number
#' @param day julian day
#' @param hour two digit hour (24 hr clock)
#'
#'
#' @export
#prints a data summary report, part of VP easyPlot
if(!missing(fn)){sink(fn)} # TODO: update to use withr conventions
cat(' Data Summary Report \n')
cat('Report processed:', as.character(Sys.time()), '\n')
cat('Cast: ', tow, ' Day: ', day, ' Hour: ', hour, '\n')
cat('\n')
cat('\n')
cat(' >>>> Time \n')
cat('Data points: ', length(all_dat$time_ms),'\n')
cat('Range: ', min(all_dat$time_ms),' - ', max(all_dat$time_ms), ' (ms) \n')
cat('Range: ', min(all_dat$time_hr),' - ', max(all_dat$time_hr), ' (hr) \n')
cat('\n')
cat('\n')
cat(' >>>> Conductivity \n')
cat('Data points: ', length(all_dat$conductivity),'\n')
cat('Range: ', min(all_dat$conductivity),' - ', max(all_dat$conductivity), ' \n')
cat('\n')
cat('\n')
cat(' >>>> Temperature \n')
cat('Data points: ', length(all_dat$temperature),'\n')
cat('Range: ', min(all_dat$temperature),' - ', max(all_dat$temperature), ' (c) \n')
cat('QC: ', length(all_dat[all_dat$temperature < 0 ]), 'points below zero deg c \n')
cat('QC: ', length(all_dat[all_dat$temperature > 10]), 'points above ten deg c \n')
cat('\n')
cat('\n')
cat(' >>>> Pressure \n')
cat('Data points: ', length(all_dat$pressure),'\n')
cat('Range: ', min(all_dat$pressure),' - ', max(all_dat$pressure), ' (db) \n')
cat('QC: ', length(all_dat[all_dat$pressure < 0 ]), 'below zero db \n')
cat('\n')
cat('\n')
cat(' >>>> Salinity \n')
cat('Data points: ', length(all_dat$salinity),'\n')
cat('Range: ', min(all_dat$salinity),' - ', max(all_dat$salinity), ' (PSU) \n')
cat('QC: ', length(all_dat[all_dat$salinity < 28 ]), 'points below twenty-eight PSU \n')
cat('QC: ', length(all_dat[all_dat$salinity > 34 ]), 'points above thirty-four PSU \n')
cat('\n')
cat('\n')
cat(' >>>> Fluorescence \n')
cat('Data points: ', length(all_dat$fluorescence_mv),'\n')
cat('Range: ', min(all_dat$fluorescence_mv),' - ', max(all_dat$fluorescence_mv), ' (mv) \n')
cat('\n')
cat('\n')
cat(' >>>> Turbidity \n')
cat('Data points: ', length(all_dat$turbidity_mv),'\n')
cat('Range: ', min(all_dat$turbidity_mv),' - ', max(all_dat$turbidity_mv), ' (mv) \n')
cat('\n')
cat('\n')
cat(' >>>> ROI count \n')
cat('Data points: ', length(all_dat$n_roi),'\n')
cat('Range: ', min(all_dat$n_roi),' - ', max(all_dat$n_roi), ' (counts) \n')
cat('\n')
cat('\n')
cat(' >>>> Sigma T \n')
cat('Data points: ', length(all_dat$sigmaT),'\n')
cat('Range: ', min(all_dat$sigmaT),' - ', max(all_dat$sigmaT), ' \n')
cat('QC: ', length(all_dat[all_dat$sigmaT < 22 ]), 'points below twenty-two \n')
cat('QC: ', length(all_dat[all_dat$sigmaT > 28 ]), 'points above twenty-eight \n')
if(!missing(fn)){sink()}
}
#' INTERNAL USE ONLY
#' quick data frame function from github to insert row inside dat frame
#'
#'
#' @param existingDF data frame
#' @param newrow new row of data
#' @param r index of new row
#'
insertRow <- function(existingDF, newrow, r) {
existingDF[seq(r+1,nrow(existingDF)+1),] <- existingDF[seq(r,nrow(existingDF)),]
existingDF[r,] <- newrow
existingDF
}
##check through new aid files and fix errors
#outputs vpr_autoid_check report with any errors
#' Checks manually created aid files for errors
#'
#' Removes any empty aid files after manual reclassification, checks for tow
#' numbers and other metadata to match. Performs check to ensure measurement and
#' ROI files are the same length
#'
#' WARNING: This function will delete empty aid and aidmeas files, permanently changing your directory. Consider making a back up copy before running this function.
#'
#' @author E Chisholm
#'
#' @param basepath basepath to autoid folder eg. C:/data/CRUISENAME/autoid/
#' @param cruise name of cruise which is being checked
#' @param del Logical value, if `TRUE`, empty files will be deleted (see warning), if `FALSE`, files WILL NOT be deleted (they will be listed in output)
#'
#' @return text file (saved in working directory) named CRUISENAME_aid_file_check.txt
#'
#'
#' @export
#'
#'
vpr_autoid_check <- function(basepath, cruise, del){
on.exit(closeAllConnections()) # make sure text file gets closed
taxa_folders <- list.files(basepath, full.names = TRUE)
withr::with_output_sink(paste0(cruise,'_aid_file_check.txt'), code = {
# sink(paste0(cruise,'_aid_file_check.txt'))
# loop through each taxa
for (i in seq_len(length(taxa_folders))){
path <- taxa_folders[i]
# get all files (aid )
aid_fns <- list.files(file.path(path, 'aid'), full.names = TRUE)
#### EMPTY FILE CHECK
# check for empty files
empty_ind <- list()
for (ii in seq_len(length(aid_fns))){
fn <- readLines(aid_fns[ii])
if(length(fn) == 0){
cat('\n')
cat(aid_fns[ii], '\n')
cat('File is empty, please delete! \n')
cat('\n')
empty_ind[ii] <- TRUE
}else{
empty_ind[ii] <- FALSE
}
}
cat('Empty file check complete for', taxa_folders[i], '\n')
if (del == TRUE){
# automated deleteion of empty files
empty_aids <- aid_fns[empty_ind == TRUE]
# find corresponding aid meas files
# get all files (aidmea )
aidmea_fns <- list.files(file.path(path, 'aidmea'), full.names = TRUE)
empty_aidmeas <- aidmea_fns[empty_ind == TRUE]
# double check that files are empty
empty_files <- c(empty_aids, empty_aidmeas)
if (length(empty_files) != 0){ # only if empty files exist
for (ii in seq_len(length(empty_files))){
check <- readLines(empty_files[ii])
if (length(check) != 0){
stop('Attempting to delete file which is not empty!')
}else{
cat('\n')
cat('Deleteing empty aid and aidmea files! \n')
cat(empty_files[ii], 'deleted! \n')
unlink(empty_files[ii])
}
}
}
}
# remove any empty files from data frame before running next check
aid_fns <- aid_fns[empty_ind == FALSE]
if(length(aid_fns) != 0){
#### VPR TOW NUMBER CHECK
# check that all vpr two numbers are the same
# read each aid file
for (ii in seq_len(length(aid_fns))){
fn <- readLines(aid_fns[ii])
# check vpr tow number
v_loc <- stringr::str_locate(fn, 'vpr')
vpr_num <- list()
for (iii in seq_len(length(v_loc[,1]))){
fn_str <- fn[iii]
fn_str_split <- stringr::str_split(fn_str, pattern = '\\\\')
vpr_num[iii] <- fn_str_split[[1]][5]
}
# test that they are all the same
un_num_vpr <- unique(vpr_num)
if(length(un_num_vpr) > 1){
cat('\n')
cat('Warning, multiple vpr tow numbers present in', aid_fns[ii], '\n')
cat(unlist(un_num_vpr), '\n')
# get numeric tow numbers
tow_num <- as.numeric(substr(un_num_vpr,4, nchar(un_num_vpr)))
# should be less than 14 if duplicated
tow_num_final <- tow_num[tow_num <14]
final_vpr <- paste0('vpr', tow_num_final)
cat('Changing', aid_fns[ii], '\n')
cat(final_vpr, '\n')
cat('\n')
# put strings back together and save file
for(iii in seq_len(length(fn))){
fn_str <- fn[iii]
fn_str_split <- stringr::str_split(fn_str, pattern = '\\\\')
fn_str_split[[1]][5] <- final_vpr
# paste string back together
s_str <- paste(fn_str_split[[1]][1], fn_str_split[[1]][2], fn_str_split[[1]][3], fn_str_split[[1]][4], fn_str_split[[1]][5], fn_str_split[[1]][6], fn_str_split[[1]][7], fn_str_split[[1]][8], sep = '\\')
fn[iii] <- s_str
}
write.table(file = aid_fns[ii], fn, quote = FALSE, col.names = FALSE, row.names = FALSE)
}
}
cat('VPR tow number check complete, ', taxa_folders[i], '\n')
#### SIZE / ROI FILE LENGTH CHECK
# check that aid and aid mea files are same length
# find files
sizefiles <- list.files(paste(taxa_folders[i],'aidmea',sep='\\'), full.names = TRUE)
roifiles <- list.files(paste(taxa_folders[i],'aid',sep='\\'), full.names=TRUE)
if(length(sizefiles) != length(roifiles)){
cat('Mismatched number of size and roi files! \n')
}
for (ii in seq_len(length(sizefiles))){
s_fn <- readLines(sizefiles[[ii]])
r_fn <- readLines(roifiles[[ii]])
if (length(s_fn > 0)){
if (length(s_fn[[1]]) != length(r_fn[[1]])){
cat('Warning mismatched file lengths! \n')
cat(sizefiles[[ii]], ',', roifiles[[ii]], '\n')
cat(length(s_fn[[1]]), ',', length(r_fn[[1]]), '\n')
}
}
}
cat('File size check complete, ', taxa_folders[i], '\n')
cat('\n')
cat('------------ \n')
cat('\n')
}else{
cat('All files are empty, ', taxa_folders[i], '\n')
cat('No other checks completed \n')
cat('\n')
cat('------------ \n')
cat('\n')
} # end if all files are empty for taxa
}
# sink()
}) # end sink output
}
#deprecated ----------------------------------------------------------------------------------------------------------------------
getRoiMeasurements <- function(taxafolder, nchar_folder, unit = 'mm', opticalSetting) {
#' THIS FUNCTION HAS BEEN DEPRECATED
#'
#' pull roi measurements from all taxa, all files
#'
#' @param taxafolder path to taxa folder (base -- autoid folder)
#' @param nchar_folder number of characters in basepath
#' @param unit unit data will be output in, 'mm' (default -- millimetres) or 'px' (pixels)
#' @param opticalSetting VPR optical setting determining conversion between pixels and millimetres (options are 'S0', 'S1', 'S2', or 'S3')
#'
#' @note This function is very finicky, easily broken because it relies on character string splitting.
#' taxaFolder argument should not end in a backslash, please check output carefully to
#' ensure taxa names or ROI numbers have been properly sub string'd
#' @export
#browser()
# avoid CRAN notes
. <- day <- hour <- NA
.Deprecated('vpr_autoid_read')
auto_measure_mm_alltaxa_ls <- list()
# browser()
for (i in seq_len(length(taxafolder))) {
# print(paste( 'i = ',i))
#find files
sizefiles <- list.files(paste(taxafolder[i],'aidmea',sep='\\'), full.names = TRUE)
roifiles <- list.files(paste(taxafolder[i],'aid',sep='\\'), full.names=TRUE)
#remove dummy files for vpr_manual_classification
#check for dummy files
sfd <- grep(sizefiles, pattern = 'dummy')
rfd <- grep(roifiles, pattern = 'dummy')
if (length(rfd) != 0){
# print('dummy files removed')
#remove dummy files from meas consideration to avoid error
sizefiles <- sizefiles[-sfd]
roifiles <- roifiles[-rfd]
}
#skip for blank taxa
#browser()
if(length(roifiles) == 0){
SKIP = TRUE
# print(paste(i , ': SKIP == TRUE'))
#browser()
#i = i+1
#find files
# sizefiles <- list.files(paste(taxafolder[i],'aidmea',sep='\\'), full.names = T)
# roifiles <- list.files(paste(taxafolder[i],'aid',sep='\\'), full.names=T)
} else{
SKIP = FALSE
# print(paste(i, 'SKIP == FALSE'))
#prevent mixing of taxa in same list where some hours were not properly being overwirtten
auto_measure_mm_ls <- list() #moved from before i loop, attempt to correct bug
for(j in seq_len(length(sizefiles))) {
#print(paste('j = ', j))
sizefile <- sizefiles[j]
roifile <- roifiles[j]
##make sure file will not produce error
mtry <- try(read.table(sizefile, sep = ",", header = TRUE),
silent = TRUE)
if (inherits(mtry, what = 'try-error')) {
# print('try error == FALSE')
#Get info
roi_ID <- read.table(roifile, stringsAsFactors = FALSE)
auto_measure_px <- read.table(sizefile, stringsAsFactors = FALSE, col.names = c('Perimeter','Area','width1','width2','width3','short_axis_length','long_axis_length'))
} else {
# print(paste('cannot open roi file from ', taxafolder[i]))
# print(roifiles)
stop(paste("File [", roifile, "] doesn't exist or is empty, please check!"))
}
#convert to mm
if (unit == 'mm'){
auto_measure_mm_tmp <- px_to_mm(auto_measure_px, opticalSetting) #Convert to mm
}else{
#or leave in pixels
auto_measure_mm_tmp <- auto_measure_px
}
#auto_measure_mm$roi_ID <- (roi_ID$V1) #Get roi ids
#!!!!!!!!!!!!!!!!!!!!!!!!!#
#!! WARNING HARD CODING !!#
#!!!!!!!!!!!!!!!!!!!!!!!!!#
#auto_measure_mm$roi_ID <- substr(auto_measure_mm$roi_ID, nchar(auto_measure_mm$roi_ID)-13, nchar(auto_measure_mm$roi_ID)-4) #Remove path information for rois
#auto_measure_mm$roi_ID <- lapply(auto_measure_mm$roi_ID, vpr_roi)
#taxa <- substr(taxafolder[i], nchar_folder + 2, nchar(taxafolder[i])) #Get taxa label
#taxa <- substr(taxafolder[i], nchar_folder + 1, nchar(taxafolder[i])) #Get taxa label
#auto_measure_mm$taxa <- rep(taxa, nrow(auto_measure_mm)) #add taxa to dataset
#day_hour <- substr(sizefile, nchar(sizefile) - 7, nchar(sizefile))
#auto_measure_mm$day_hour <- rep(day_hour, nrow(auto_measure_mm))
#saveRDS(auto_measure_mm, paste(taxafolder, "/", "measurements_mm_", taxa[i], ".RDS", sep=""))
taxafolder_tmp <- taxafolder[i]
# browser()
auto_measure_mm <- auto_measure_mm_tmp %>%
dplyr::mutate(., roi_ID = unlist(lapply(roi_ID$V1, vpr_roi))) %>%
dplyr::mutate(., taxa = unlist(lapply(taxafolder_tmp, vpr_category))) %>%
dplyr::mutate(., day = unlist(lapply(sizefile, vpr_day))) %>%
dplyr::mutate(., hour = unlist(lapply(sizefile, vpr_hour))) %>%
dplyr::mutate(., day_hour = paste(as.character(day), as.character(hour), sep = ".")) %>%
dplyr::select(., -day, -hour)
auto_measure_mm_ls[[j]] <- auto_measure_mm
if(auto_measure_mm$day[1] == 'd240.h09' ){
# browser()
#cat('d240.h09')
# cat(taxafolder[i], '\n')
#cat('number of ROIs: ', length(auto_measure_mm$roi_ID), '\n')
#cat('number of unique ROIs: ', length(unique(auto_measure_mm$roi_ID)), '\n')
}
#print(paste('completed', roifiles))
}
auto_measure_mm_alltaxa_ls[[i]] <- do.call(rbind, auto_measure_mm_ls)
#browser()
# print(paste('completed', taxafolder[i]))
}
}
auto_measure_mm_alltaxa_df <- do.call(rbind, auto_measure_mm_alltaxa_ls)
#browser()
return(auto_measure_mm_alltaxa_df)
}
# deprecated ? ----------------------------------------------------------------------------------------------------------
#####PLOTTING FUNCTIONS#####
#' Size Frequency plots for VPR data
#'
#' This uses the \code{\link{hist}} plot function in base R to give a histogram of size (long axis length) frequency within a taxa.
#' \strong{!!WARNING:} this function uses hard coded plot attributes
#'
#' @author K. Sorochan
#'
#' @param x a data frame with columns 'taxa', 'long_axis_length'
#' @param number_of_classes numeric value passed to nclass argument in hist()
#' @param colour_of_bar character value defining colour of plotted bars
#'
#'
#' @export
#'
#'
vpr_plot_sizefreq <- function(x, number_of_classes, colour_of_bar) {
#oldpar <- par(no.readonly = TRUE)
#on.exit(par(oldpar))
# avoid CRAN notes
. <- NA
data <- x
taxa <- unique(data$taxa)
for(i in seq_len(length(taxa))) {
# par(mfrow = c(1,2))
withr::with_par(mfrow = c(1,2), code = {
taxa_id <- taxa[i]
data_hist <- data %>%
dplyr::filter(., taxa == taxa_id)
data_hist2 <- data_hist$long_axis_length
hist(data_hist2, nclass = number_of_classes, col = colour_of_bar, xlab = "Long axis of bug (mm)", main = taxa_id) #Eventually you will want to loop through taxa
if(length(taxa) == 1) {
plot.new()
}
})
}
}
#balloon plot with isopycnals final
#create TS data frame
isopycnal_calculate<- function(sal, pot.temp, reference.p = 0){
#' Get vector to draw isopycnal lines on TS plot
#' Used internally to create TS plots
#' @author E. Chisholm
#'
#' @param sal salinity vector
#' @param pot.temp temperature vector in deg C
#' @param reference.p reference pressure for calculation, set to 0
#'
#'
#' @note: modified from source:\url{https://github.com/Davidatlarge/ggTS/blob/master/ggTS_DK.R}
#' @export
# avoid CRAN notes
density <- NA
TS <- expand.grid(
sal = seq(floor(min(sal, na.rm = TRUE)), ceiling(max(sal, na.rm = TRUE)), length.out = 100),
pot.temp = seq(floor(min(pot.temp, na.rm = TRUE)), ceiling(max(pot.temp, na.rm = TRUE)), length.out = 100)
)
TS$density <- gsw::gsw_rho_t_exact(SA = TS$sal, t = TS$pot.temp, p = reference.p) - 1000 # the function calculates in-situ density, but because potential temperature and a single reference pressure is used the result equals potential density at reference pressure
# isopycnal labels
# +- horizontal isopycnals
h.isopycnals <- subset(TS,
sal == ceiling(max(TS$sal)) & # selects all rows where "sal" is the max limit of the x axis
round(density,1) %in% seq(min(round(TS$density*2)/2, na.rm = TRUE),
max(round(TS$density*2)/2, na.rm = TRUE),
by = .5)) # selects any line where the rounded denisty is equal to density represented by any isopycnal in the plot
if(nrow(h.isopycnals)>0){
h.isopycnals$density <- round(h.isopycnals$density, 1) # rounds the density
h.isopycnals <- aggregate(pot.temp~density, h.isopycnals, mean) # reduces number of "pot.temp" values to 1 per each unique "density" value
}
# +- vertical isopycnals
if(nrow(h.isopycnals)==0){ # if the isopycnals are not +- horizontal then the df will have no rows
rm(h.isopycnals) # remove the no-line df
v.isopycnals <- subset(TS, # make a df for labeling vertical isopycnals
pot.temp == ceiling(max(TS$pot.temp)) & # selects all rows where "sal" is the max limit of the x axis
round(density,1) %in% seq(min(round(TS$density*2)/2),
max(round(TS$density*2)/2),
by = .5)) # selects any line where the rounded denisty is equal to density represented by any isopycnal in the plot
v.isopycnals$density <- round(v.isopycnals$density, 1) # rounds the density
v.isopycnals <- aggregate(sal~density, v.isopycnals, mean) # reduces number of "pot.temp" values to 1 per each unique "density" value
}
return(TS)
}
vpr_plot_TS <- function(x, reference.p = 0, var){
#' Make a balloon plot against a TS plot
#'
#' TS balloon plot with ROI concentration, sorted by taxa
#' includes isopycnal line calculations
#'
#' @author E. Chisholm
#'
#' @param x dataframe with temperature, salinity, number of rois (n_roi_bin)
#' @param reference.p reference pressure (default at 0 for surface)- used to calculate isopycnals
#' @param var variable on which size of points will be based, eg conc_m3 or n_roi_bin
#'
#'
#'
#' @note modified from source: \url{https://github.com/Davidatlarge/ggTS/blob/master/ggTS_DK.R}
#'
#'@export
# avoid CRAN notes
p <- NA
#get isopycnal lines
sal <- x$salinity
pot.temp <- x$temperature
# make TS long table
TS <- expand.grid(
sal = seq(floor(min(sal, na.rm = TRUE)), ceiling(max(sal, na.rm = TRUE)), length.out = 100),
pot.temp = seq(floor(min(pot.temp, na.rm = TRUE)), ceiling(max(pot.temp, na.rm = TRUE)), length.out = 100)
)
TS$density <- gsw::gsw_rho_t_exact(SA = TS$sal, t = TS$pot.temp, p = reference.p) - 1000 # the function calculates in-situ density, but because potential temperature and a single reference pressure is used the result equals potential density at reference pressure
#removed isopycnal line labelling scheme so that every isopycnal line could be labelled using different method
# isopycnal labels for plotting
# +- horizontal isopycnals
# h.isopycnals <- subset(TS,
# sal == ceiling(max(TS$sal)) & # selects all rows where "sal" is the max limit of the x axis
# round(density,2) %in% seq(min(round(TS$density*2)/2, na.rm = TRUE),
# max(round(TS$density*2)/2, na.rm = TRUE),
# by = .5)) # selects any line where the rounded denisty is equal to density represented by any isopycnal in the plot
# if(nrow(h.isopycnals)>0){
# h.isopycnals$density <- round(h.isopycnals$density, 2) # rounds the density
# h.isopycnals <- aggregate(pot.temp~density, h.isopycnals, mean) # reduces number of "pot.temp" values to 1 per each unique "density" value
# }
#
# #removing this calculates more isopycnals to be labelled
# #otherwsie only labels one isopycnal line on plot
# # +- vertical isopycnals (labels for plotting)
# #if(nrow(h.isopycnals)==0){ # if the isopycnals are not +- horizontal then the df will have no rows
# # rm(h.isopycnals) # remove the no-line df
#
# v.isopycnals <- subset(TS, # make a df for labeling vertical isopycnals
# pot.temp == ceiling(max(TS$pot.temp)) & # selects all rows where "sal" is the max limit of the x axis
# round(density,2) %in% seq(min(round(TS$density*2)/2),
# max(round(TS$density*2)/2),
# by = .5)) # selects any line where the rounded denisty is equal to density represented by any isopycnal in the plot
# v.isopycnals$density <- round(v.isopycnals$density, 2) # rounds the density
# v.isopycnals <- aggregate(sal~density, v.isopycnals, mean) # reduces number of "pot.temp" values to 1 per each unique "density" value
# #}
#plot
eval(parse(text = paste0("p <- ggplot()+
#isopycnal lines
geom_contour(data = TS, aes(x = sal, y = pot.temp, z = density), col = 'grey', linetype = 'solid',
breaks = seq(min(round(TS$density*2)/2, na.rm = TRUE), # taking density times 2, rounding and dividing by 2 rounds it to the neares 0.5
max(round(TS$density*2)/2, na.rm = TRUE),
by = .5)) +
geom_text_contour(data = TS, aes(x = sal, y =pot.temp, z= density),binwidth = 0.5, col = 'grey', nudge_x = 0.1)+ #CONTOUR LABELS
#roi data sorted by number of rois and taxa
geom_point(data = x, aes(x = salinity, y = temperature, size = ", var, "), shape = 21) +
scale_size_area(max_size=10)+ #make balloons bigger
#label legends
labs(size = expression('Concentration /m'^3)) +
labs(col = 'Taxa')+
#set x axis (ensure scaling to data)
scale_x_continuous(name = 'Salinity [PSU]', expand = c(0,0),
limits = c(floor(min(x$salinity, na.rm = TRUE)), ceiling(max(x$salinity, na.rm = TRUE)))) + # use range of 'sal' for x axis
#set y axis (esure scaled to data)
scale_y_continuous(name = expression(paste('potential temperature [ ',degree,' C]')),
limits = c(floor(min(x$temperature, na.rm = TRUE)), ceiling(max(x$temperature, na.rm = TRUE)))) +
#get rid of grid lines and text formatting
theme_classic() + theme(text = element_text(size=14)) "
)))
#using geom_text_contour to label isopycnals instead
# add isopycnal labels if isopycnals run +- horizontal
# if(exists("h.isopycnals")){
# p <- p + geom_text(data = h.isopycnals,
# aes(x = ceiling(max(TS$sal)), y = pot.temp, label = density),
# hjust = "inward", vjust = 0, col = "grey")
# }
#
# # add isopycnal labels if isopycnals run +- vertical
# if(exists("v.isopycnals")){
# p <- p + geom_text(data = v.isopycnals,
# aes(x = sal, y = ceiling(max(TS$pot.temp)), label = density),
# vjust = "inward", hjust = 0, col = "grey")
# }
return(p)
}
vpr_plot_TScat <- function(x, reference.p = 0){
#' Make a balloon plot
#'
#' Balloon plot against a TS plot with ROI concentration and sorted by taxa
#' includes isopycnal line calculations. Version of \code{\link{vpr_plot_TS}}, with only relevant* taxa specified.
#' *to current analysis and research objectives (See note).
#'
#'
#' @note \strong{WARNING HARD CODED} FOR 5 TAXA, CALANUS, KRILL, ECHINODERM LARVAE, SMALL COPEPOD, CHAETOGNATHS
#' !! Uses isopycnal labelling method which does not label every contour
#'
#' @param x dataframe with temperature, salinity, number of rois named by taxa
#' @param reference.p reference pressure (default at 0 for surface)- used to calculate isopycnals
#'
#'
#'
#' @note modified from source: \url{https://github.com/Davidatlarge/ggTS/blob/master/ggTS_DK.R}
#'
#'@export
# avoid CRAN notes
density <- salinity <- temperature <- calanus <- chaetognaths <- small_copepod <- krill <- echinoderm_larvae <- NA
#get isopycnal lines
sal <- x$salinity
pot.temp <- x$temperature
# make TS long table
TS <- expand.grid(
sal = seq(floor(min(sal, na.rm = TRUE)), ceiling(max(sal, na.rm = TRUE)), length.out = 100),
pot.temp = seq(floor(min(pot.temp, na.rm = TRUE)), ceiling(max(pot.temp, na.rm = TRUE)), length.out = 100)
)
TS$density <- gsw::gsw_rho_t_exact(SA = TS$sal, t = TS$pot.temp, p = reference.p) - 1000 # the function calculates in-situ density, but because potential temperature and a single reference pressure is used the result equals potential density at reference pressure
# isopycnal labels for plotting
# +- horizontal isopycnals
h.isopycnals <- subset(TS,
sal == ceiling(max(TS$sal)) & # selects all rows where "sal" is the max limit of the x axis
round(density,1) %in% seq(min(round(TS$density*2)/2, na.rm = TRUE),
max(round(TS$density*2)/2, na.rm = TRUE),
by = .5)) # selects any line where the rounded denisty is equal to density represented by any isopycnal in the plot
if(nrow(h.isopycnals)>0){
h.isopycnals$density <- round(h.isopycnals$density, 1) # rounds the density
h.isopycnals <- aggregate(pot.temp~density, h.isopycnals, mean) # reduces number of "pot.temp" values to 1 per each unique "density" value
}
# +- vertical isopycnals (labels for plotting)
if(nrow(h.isopycnals)==0){ # if the isopycnals are not +- horizontal then the df will have no rows
rm(h.isopycnals) # remove the no-line df
v.isopycnals <- subset(TS, # make a df for labeling vertical isopycnals
pot.temp == ceiling(max(TS$pot.temp)) & # selects all rows where "sal" is the max limit of the x axis
round(density,1) %in% seq(min(round(TS$density*2)/2),
max(round(TS$density*2)/2),
by = .5)) # selects any line where the rounded denisty is equal to density represented by any isopycnal in the plot
v.isopycnals$density <- round(v.isopycnals$density, 1) # rounds the density
v.isopycnals <- aggregate(sal~density, v.isopycnals, mean) # reduces number of "pot.temp" values to 1 per each unique "density" value
}
#initialize taxa
#WARNING HARD CODING, 5 TAXA
cols <- c('t1' = 'darkorchid3', 't2' = 'deeppink3', 't3' = 'dodgerblue3', 't4' = 'tomato3', 't5' = 'gold3')
taxas <- c('calanus', 'chaetognaths', 'small_copepod', 'krill', 'echinoderm_larvae')
#plot
p <- ggplot()+
#isopycnal lines
geom_contour(data = TS, aes(x = sal, y = pot.temp, z = density), col = "grey", linetype = "solid",
breaks = seq(min(round(TS$density*2)/2, na.rm = TRUE), # taking density times 2, rounding and dividing by 2 rounds it to the neares 0.5
max(round(TS$density*2)/2, na.rm = TRUE),
by = .5)) +
#roi data sorted by number of rois and taxa
geom_point(data = x, aes(x = salinity, y = temperature, size = calanus, col = 't1' ), shape = 21) +
#geom_point(data = x, aes(x = salinity, y = temperature, size = marine_snow), shape = 21) +
#geom_point(data = x, aes(x = salinity, y = temperature, size = stick), shape = 21) +
geom_point(data = x, aes(x = salinity, y = temperature, size = chaetognaths, col = 't2'), shape = 21) +
geom_point(data = x, aes(x = salinity, y = temperature, size = small_copepod, col = 't3'), shape = 21) +
geom_point(data = x, aes(x = salinity, y = temperature, size = krill, col = 't4'), shape = 21) +
geom_point(data = x, aes(x = salinity, y = temperature, size = echinoderm_larvae, col = 't5'), shape = 21) +
scale_colour_manual(name = 'Taxas', values = cols, guide = guide_legend(), labels = taxas) +
scale_size_area(max_size=10)+ #make balloons bigger
#label legends
labs(size = 'Number of \n ROIs') +
#labs(col = 'Taxa')+
#set x axis (ensure scaling to data)
scale_x_continuous(name = "salinity", expand = c(0,0),
limits = c(floor(min(x$salinity, na.rm = TRUE)), ceiling(max(x$salinity, na.rm = TRUE)))) + # use range of "sal" for x axis
#set y axis (esure scaled to data)
scale_y_continuous(name = "potential temperature [C]",
limits = c(floor(min(x$temperature, na.rm = TRUE)), ceiling(max(x$temperature, na.rm = TRUE)))) +
#get rid of grid lines and text formatting
theme_classic() + theme(text = element_text(size=14))
# add isopycnal labels if isopycnals run +- horizontal
if(exists("h.isopycnals")){
p <- p + geom_text(data = h.isopycnals,
aes(x = ceiling(max(TS$sal)), y = pot.temp, label = density),
hjust = "inward", vjust = 0, col = "grey")
}
# add isopycnal labels if isopycnals run +- vertical
if(exists("v.isopycnals")){
p <- p + geom_text(data = v.isopycnals,
aes(x = sal, y = ceiling(max(TS$pot.temp)), label = density),
vjust = "inward", hjust = 0, col = "grey")
}
return(p)
}
vp_plot_matrix <- function(cm, classes, type, addLabels = TRUE, threshold = NULL){
#' Plots normalized confusion matrix
#'
#' @author E. Chisholm
#'
#' @param cm Confusion matrix (numeric)
#' @param classes character list of classes present in confusion matrix
#' (ordered)
#' @param type character value 'NN', 'SVM' or 'Dual', appended to 'Confusion
#' Matrix' to create title
#' @param addLabels logical value whether to add percentage accuracy labels to
#' plot (defaults to TRUE)
#' @param threshold numeric value which determines the minimum value of
#' frequency labelled on the plot on a normalized scale of 0-1 (useful for highlighting significant
#' disagreement)
#'
#' @return a visualization of the confusion matrix, normalized
#'
#' @export
#check dimensions
# avoid CRAN notes
Var1 <- Var2 <- Freq <- NA
# TODO check that this function runs , removed require(stringr), cant find stringr function
dimcm <- dim(cm)
if (dimcm[1] != length(classes) +1){
stop(' Incorrect dimensions, matrix does not match classes given!')
}
#remove total columns
conf <- cm[1:dimcm[1]-1,1:dimcm[2]-1]
#create matrix and normalize
input.matrix.normalized <- data.matrix(normalize_matrix(conf))
#add labels
colnames(input.matrix.normalized) = classes
rownames(input.matrix.normalized) = classes
#build conf mat
confusion <- as.data.frame(as.table(input.matrix.normalized))
#basic plot
plot <- ggplot(confusion)
#add data
p <- plot + geom_tile(aes(x=Var1, y=Var2, fill=Freq)) + #adds data fill
theme(axis.text.x=element_text(angle=45, hjust=1)) + #fixes x axis labels
scale_x_discrete(name="Actual Class") + #names x axis
scale_y_discrete(name="Predicted Class") + #names y axis
scale_fill_gradient(breaks=seq(from=-.5, to=4, by=.2)) + #creates colour scale
labs(fill="Normalized\nFrequency") + #legend title
#theme(legend.position = "none" ) + #removes legend
ggtitle(label = paste(type, 'Confusion Matrix'))
#accuracy labels along diagonal
if (addLabels == TRUE){
#find diagonal values
acc <- confusion$Freq[confusion$Var1 == confusion$Var2]
#for each taxa
for (i in seq_len(length(unique(confusion$Var1)))){
#add text label
p <- p + annotate('text', x = i, y = i, #position on diagonal
#label with frequency as percent rounded to 2 digits
label = paste0(round(acc[i], digits = 2)*100, '%'),
#text formatting
colour = 'white',
size = 3)
}
}
#threshold labels
if ( !is.null(threshold)){
for (i in seq_len(length(confusion$Var1))){
#if frequency is above threshold
if (confusion$Freq[i] > threshold ){
#find x and y locations to plot
x <- grep(levels(confusion$Var1), pattern = as.character(confusion$Var1[i]) )
y <- grep(levels(confusion$Var2), pattern = as.character(confusion$Var2[i]) )
#not already labelled on diagonal
if( x != y){
#add text
p <- p + annotate('text', x = x, y = y,
#label - frequency as percent, rounded
label = paste0(round(confusion$Freq[i], digits = 2)*100, '%'),
#text formatting
colour = 'white',
size = 3)
}
}
}
}
return(p)
#end of function
}
vpr_plot_histsize <- function(data, param, title = NULL , bw = 0.1, xlim = NULL){
#' Plot size frequency histogram
#'
#'@author E. Chisholm
#' @param param size parameter of interest (corresponds to sub lists within data argument)
#' @param data size data from auto_measure_mm subset into taxas
#' @param title main title for plot, if left null will default based on parameter and taxa
#' @param bw bin width, defines width of bars on histogram, defaults to 0.1, decrease for more detail
#' @param xlim plot xlimit, defaults to min max of data if not provided
#'
#'
#' @note param options are typically 'Perimeter', 'Area', 'width1','width2',
#' 'width3', 'short_axis_length', 'long_axis_length'
#'
#' @export
#'
if (is.null(title)){
title <- paste(param , ':', data$taxa[1])
}
if(is.null(xlim)){
xlim <- c(min(data[[param]]), max(data[[param]]))
}
qplot(data[[param]], geom = 'histogram',
binwidth = bw,
main = title,
xlab = 'length (mm)',
ylab = 'Frequency',
fill = I('green'),
col = I('green'),
alpha = I(.2),
xlim = xlim)
}
vp_plot_unkn <- function(cm, classes, threshold = 0, summary = TRUE, sample_size = NULL){
#' Function to visualize losses to unknown category due to disagreement in Dual classifier
#'
#' Makes confusion matrix like plot, where x axis represent SVM classification, y axis represent NN classification
#' Allows visual summary of data lost to unknown category
#'
#'
#' @param cm dual unknown confusion matrix from VP
#' @param classes taxa groups in order, from VP
#' @param threshold minimum value which will be labelled in plot
#' @param sample_size character string describes the sample size used to train the model being plotted (optional)
#' @param summary logical to add text summary to plot
#' E. Chisholm May 2019
#'
#'
#' @export
# avoid CRAN notes
Var1 <- Var2 <- Freq <- taxas <- NA
dimcm <- dim(cm)
#remove total columns
conf <- cm[1:dimcm[1]-1,1:dimcm[2]-1]
#create matrix and normalize
input.matrix<- data.matrix(conf)
#add labels
colnames(input.matrix) = classes
rownames(input.matrix) = classes
#build conf mat
confusion <- as.data.frame(as.table(input.matrix))
#basic plot
plot <- ggplot(confusion)
#add data
p<- plot + geom_tile(aes(x=Var1, y=Var2, fill=Freq, col = 'black')) + #adds data fill
theme(axis.text.x=element_text(angle=90, hjust=1)) + #fixes x axis labels
scale_x_discrete(name="SVM Class") + #names x axis
scale_y_discrete(name="NN Class") + #names y axis
scale_fill_gradient(low = 'orchid', high = 'darkorchid4',breaks=seq(from=-.5, to=4, by=.2)) + #creates colour scale
#labs(fill="Normalized\nFrequency") + #legend title
theme(legend.position = "none" ) + #removes legend
ggtitle(label = 'Disagreement in Dual Classifier')
threshold<- 0
#labels
for (i in seq_len(length(confusion$Var1))){
#if frequency is above threshold
if (confusion$Freq[i] > threshold ){
#find x and y locations to plot
x <- grep(levels(confusion$Var1), pattern = as.character(confusion$Var1[i]) )
y <- grep(levels(confusion$Var2), pattern = as.character(confusion$Var2[i]) )
#not already labelled on diagonal
#add text
p <- p + annotate('text', x = x, y = y,
#label - frequency as percent, rounded
label = round(confusion$Freq[i], digits = 2),
#text formatting
colour = 'white',
size = 3)
}
}
#add summary text
if (summary == TRUE){
tab <- as.data.frame(
c(
'Sample Size' = sample_size , #update for different sizes
'Total Disagreement' = sum(confusion$Freq),
'Average loss per taxa' = round(sum(confusion$Freq)/length(taxas), digits = 0)
)
)
# using gridExtra
p_tab <- tableGrob(unname(tab))
grid.arrange(p, p_tab, heights = c(1, 0.2))
}
return(p)
}
interp2xyz <- function(al, data.frame = FALSE) {
stopifnot(is.list(al), identical(names(al), c("x","y","z")))
xy <- expand.grid(x = al[["x"]], y = al[["y"]], KEEP.OUT.ATTRS=FALSE)
cbind(if(!data.frame) data.matrix(xy) else xy,
z = as.vector(al[["z"]]))
}
#contour plot with interpolation
vpr_plot_contour <- function(data, var, dup= 'mean', method = 'interp', labels = TRUE, bw = 1, cmo){
#' Interpolated contour plot of particular variable
#'
#' Creates interpolated contour plot, can be used as a background for ROI or tow yo information
#'
#' @author E. Chisholm & Kevin Sorochan
#'
#' @param data data frame needs to include time_hr, depth, and variable of
#' choice (var)
#' @param var variable in dataframe which will be interpolated and plotted
#' @param dup if method == 'interp'. Method of handling duplicates in interpolation, passed to interp function (options: 'mean', 'strip', 'error')
#' @param method Specifies interpolation method, options are 'interp' or
#' 'oce', oce uses slightly different method (oce is least error prone)
#' @param labels logical value indicating whether or not to plot contour labels
#' @param bw bin width defining interval at which contours are labelled
#' @param cmo name of a `cmocean` plotting theme, see `?cmocean` for more information
#'
#' @export
# avoid CRAN notes
x <- y <- z <- NA
#interpolate
#use interp package rather than akima to avoid breaking R
#ref: https://www.user2017.brussels/uploads/bivand_gebhardt_user17_a0.pdf
# browser()
# akima method deprecated 2022 - due to licensing issues
# if(method == 'akima'){
# interpdf <- akima::interp(x = data$time_hr, y = data$depth, z = data[[var]], duplicate = dup ,linear = TRUE )
# }
if(method == 'interp'){
interpdf <- try(interp::interp(x = data$time_hr, y = data$depth, z = data[[var]], duplicate = dup ,linear = TRUE ))
if(inherits(interpdf, 'try-error'))
stop("Interpolation failed, try method = 'oce'")
}
if(method == 'oce'){
interpdf_oce <- oce::interpBarnes(x = data$time_hr, y = data$depth, z = data[[var]] )
interpdf <- NULL
interpdf$x <- interpdf_oce$xg
interpdf$y <- interpdf_oce$yg
interpdf$z <- interpdf_oce$zg
}
#convert to dataframe
df <- interp2xyz(interpdf, data.frame = TRUE)
#zero time values
df$x <- df$x - min(df$x)
if(missing(cmo)){
# default to gray
cmof <- cmocean::cmocean('gray')
# set default col scheme based on variable name
if(var %in% c('temperature', 'conc_m3')){
cmof <- cmocean::cmocean('thermal')
}
if(var == 'salinity') {
cmof <- cmocean::cmocean('haline')
}
if(var == 'density') {
cmof <- cmocean::cmocean('dense')
}
if(var == 'fluorescence') {
cmof <- cmocean::cmocean('algae')
}
if(var == 'turbidity') {
cmof <- cmocean::cmocean('turbid')
}} else{
cmof <- cmocean::cmocean(cmo)
}
# theme_col <- cmocean::cmocean(cmo)
cmo_data <- cmof(100)
if(labels == TRUE){
# updated plotting from KS
p <- ggplot(df) +
geom_tile(aes(x = x, y = y, fill = z)) +
labs(fill = var) +
scale_y_reverse(name = "Depth [m]") +
scale_x_continuous(name = "Time [h]") +
theme_classic() +
geom_contour(aes(x = x, y = y, z = z), col = "black") +
geom_text_contour(aes(x = x, y = y, z = z), binwidth = bw,
col = "white", check_overlap = TRUE, size = 8) +
scale_fill_gradientn(colours = cmo_data, na.value = 'gray')
# p <- ggplot(df) +
# geom_tile(aes(x = x, y = y, fill = z)) +
# labs(fill = var) +
# scale_y_reverse(name = 'Depth [m]') +
# scale_x_continuous(name = 'Time [h]') +
# theme_classic() +
# geom_contour(aes(x = x, y = y, z= z), col = 'black') +
# geom_text_contour(aes(x = x, y = y, z= z),binwidth = bw, col = 'white', check_overlap = TRUE, size = 8)+ #CONTOUR LABELS
# scale_fill_continuous(na.value = 'white')
}else{
# updated plotting from KS
p <- ggplot(df) +
geom_tile(aes(x = x, y = y, fill = z)) +
labs(fill = var) + scale_y_reverse(name = "Depth [m]") +
scale_x_continuous(name = "Time [h]") + theme_classic() +
geom_contour(aes(x = x, y = y, z = z), col = "black") +
scale_fill_gradientn(colours = cmo_data, na.value = "gray")
# p <- ggplot(df) +
# geom_tile(aes(x = x, y = y, fill = z)) +
# labs(fill = var) +
# scale_y_reverse(name = 'Depth [m]') +
# scale_x_continuous(name = 'Time [h]') +
# theme_classic() +
# geom_contour(aes(x = x, y = y, z= z), col = 'black') +
# scale_fill_continuous(na.value = 'white')
}
return(p)
}
# profile plotting
#' Plots VPR profiles of temperature, salinity, density, fluorescence and concentration (by classification group)
#'
#'
#' This plot allows a good overview of vertical distribution of individual classification groups along with reference to hydrographic parameters.
#' Facet wrap is used to create distinct panels for each taxa provided
#'
#' @param taxa_conc_n A VPR data frame with hydrographic and concentration data separated by taxa (from \code{\link{vpr_roi_concentration}})
#' @param taxa_to_plot The specific classification groups which will be plotted, if NULL, will plot all taxa combined
#' @param plot_conc Logical value whether or not to include a concentration plot (FALSE just shows CTD data)
#'
#' @return A gridded object of at least 3 ggplot objects
#' @export
vpr_plot_profile <- function(taxa_conc_n, taxa_to_plot, plot_conc){
# check that depth is present
if(!'depth' %in% names(taxa_conc_n)){
stop("These plots require a 'depth' variable!")
}
# avoid CRAN notes
temperature <- depth <- salinity <- fluorescence <- density <- conc_m3 <- pressure <- NA
# plot temp
p <- ggplot(taxa_conc_n) +
geom_point(aes(x = temperature, y = depth), col = 'red') +
scale_y_reverse(name = 'Depth [m]')
# plot salinity
p_TS <- p + geom_point(aes(x = (salinity -25), y = depth), col = 'blue') +
scale_x_continuous(name = expression(paste("Temperature [",degree,"C]")),sec.axis = sec_axis(~ . +25, name = 'Salinity [PSU]')) +
theme(axis.line.x.bottom = element_line(colour = 'red'),
axis.ticks.x.bottom = element_line(colour = 'red'),
panel.background = element_blank(),
panel.grid = element_blank(),
axis.line.y = element_line(linetype = 'solid'),
axis.line.x.top = element_line(colour = 'blue'),
axis.ticks.x.top = element_line(colour = 'blue'),
axis.title = element_text(size = 20)
)
# have to force rescale for multi axes ggplot
# plot fluorescence
p <- ggplot(taxa_conc_n) +
geom_point(aes(x = fluorescence, y = depth), col = 'green') +
scale_y_reverse(name = 'Depth [m]')
# plot density
p_FD <- p + geom_point(aes(x = (density -20) *20, y = depth)) +
scale_x_continuous(name = 'Fluorescence [mv]',sec.axis = sec_axis(~. /20 +20, name = 'Density')) +
theme(axis.line.x.bottom = element_line(colour = 'green'),
axis.ticks.x.bottom = element_line(colour = 'green'),
panel.background = element_blank(),
panel.grid = element_blank(),
axis.line.y = element_line(linetype = 'solid'),
axis.line.x.top = element_line(colour = 'black'),
axis.title = element_text(size = 20)
)
# manual rescale
if(is.null(taxa_to_plot)){
pp <- ggplot(taxa_conc_n) +
geom_point(aes(x = depth, y = conc_m3/1000)) + #conversion of m3 to L using default density
stat_summary_bin(aes(x = depth, y = conc_m3/1000), fun = 'mean', col = 'red', geom = 'line', size = 3) +
scale_x_reverse(name = 'Depth [m]') +
scale_y_continuous(name = expression('ROI L'^'-1')) +
# ggtitle('Concentrations') +
theme_classic() +
theme(strip.text = element_text(size = 18),
plot.title = element_text(size = 25),
axis.title = element_text(size = 20))+
coord_flip()
}else{
# facet wrap plot all taxa, if only one taxa, comment out facet wrap
pp <- ggplot(taxa_conc_n[taxa_conc_n$taxa %in% c(taxa_to_plot),]) +
geom_point(aes(x = depth, y = conc_m3/1000)) + #conversion of m3 to L using default density
stat_summary_bin(aes(x = depth, y = conc_m3/1000), fun = 'mean', col = 'red', geom = 'line', size = 3) +
scale_x_reverse(name = 'Depth [m]') +
scale_y_continuous(name = expression('ROI L'^'-1')) +
# ggtitle('Concentrations') +
facet_wrap(~taxa, nrow = 1, ncol = length(unique(taxa_conc_n$taxa)), scales = 'free_x')+
theme_classic() +
theme(strip.text = element_text(size = 18),
plot.title = element_text(size = 25),
axis.title = element_text(size = 20))+
coord_flip()
}
if(plot_conc == TRUE){
p <- grid.arrange(p_TS, p_FD, pp , widths = c(1, 1, 2), heights = 2, nrow = 1, ncol = 3)
}else{
p <- grid.arrange(p_TS, p_FD, widths = c(1, 1), heights = 2, nrow = 1, ncol = 2)
}
return(p)
}
#### IMAGE ANALYSIS FUNCTIONS ####
#' Explore reclassified images
#'
#' Pull image from reclassified or misclassified files produced during \code{\link{vpr_manual_classification}}
#'
#' @param day Character string, 3 digit day of interest of VPR data
#' @param hour Character string, 2 digit hour of interest of VPR data
#' @param base_dir directory path to folder containing day/hour folders in which misclassified and reclassified files are organized (eg.'C:/VPR_PROJECT/r_project_data_vis/classification files/') which would contain 'd123.h01/reclassified_krill.txt' )
#' @param taxa_of_interest Classification group from which to pull images
#' @param image_dir directory path to ROI images, eg. "E:\\\\data\\\\cruise_IML2018051\\\\", file separator MUST BE "\\\\" in order to be recognized
#'
#' @return folders of misclassified or reclassified images inside image_dir
#' @export
#'
#'
vpr_img_reclassified <- function(day, hour, base_dir, taxa_of_interest, image_dir){
####directory where misclassified/reclassified files
####base_dir <- 'C:/VPR_PROJECT/r_project_data_vis/classification files/'
#basepath where ROI images are
##image_dir <- 'E:\\data\\cruise_IML2018051\\'
##setwd(image_dir)
#get misclassified/ reclassified images
day_hour <- paste0('d', day, '.h', hour)
folder <- list.files(base_dir, pattern = day_hour, full.names = TRUE)
files <- list.files(folder, pattern = taxa_of_interest)
#pulls out missclassified files
#(you only want to look at images
#you have reclassified as specific taxa - select "n" if pop up appears)
#only exception (reason to select "y",
#would be to look specifically at images that you pulled out of a taxa,
#for example to check the accuracy of an automatic ID scheme)
tt <- grep(files, pattern = 'misclassified')
if (length(tt) > 0 ){
warning(paste('misclassified files found for ', taxa_of_interest))
ans <- readline('Would you like to include these files? (y/n)
*NOTE, looking at misclassified images will show you images that were removed from you taxa of interest
during reclassification, this may be useful to get an idea of the accuracy of your automatic
classification or which images are confusing your automatic classification.')
if(ans == 'n'){
files <- files[-tt]
folder_name <- "reclassified_ROIS"
}else{
files <- files[tt]
folder_name <- "misclassified_ROIS"
}
}
message(paste('>>>>>', files, 'found for', taxa_of_interest, ' in ', day_hour, '!'))
message('>>>>> Copying images now!')
#runs through reclassified files (should only be one)
for(ii in files) {
#reads in roi strings
roi_path_str <- read.table(paste0(base_dir, '/', day_hour, '/', ii), stringsAsFactors = FALSE)
#sub out for new basepath where rois are located
#note this is an extra step because I moved the "data" folder from my C drive
tt<- stringr::str_locate(string = roi_path_str$V1[1], pattern = 'rois')
sub_roi_path <- substr(roi_path_str$V1, tt[1], nchar(roi_path_str))
new_roi_path <- file.path(image_dir, sub_roi_path, fsep = "\\")
#Create a new folder for autoid rois (where images will be stored)
roi_folder <- file.path(image_dir, taxa_of_interest, folder_name, fsep = "\\")
command1 <- paste('mkdir', roi_folder, sep = " ")
shell(command1)
#Copy rois to this directory
for (iii in seq_len(length(new_roi_path))) {
dir_tmp <- as.character(new_roi_path[iii])
command2 <- paste("copy", dir_tmp, roi_folder, sep = " ")
shell(command2)
print(paste(iii, '/', length(new_roi_path),' completed!'))
}
message(paste('Images saved to ', roi_folder))
}
}
vpr_img_depth <- function(data, min.depth , max.depth, roiFolder , format = 'list'){
#' Explore VPR images by depth bin
#'
#' Allows user to pull VPR images from specific depth ranges, to investigate
#' trends before classification of images into taxa groups
#'
#'
#'
#' @param data data frame containing CTD and ROI data from
#' \code{\link{vpr_ctdroi_merge}}, which also contains calculated variables
#' sigmaT and time_hr
#' @param min.depth minimum depth of ROIs you are interested in looking at
#' @param max.depth maximum depth of ROIs you are interested in exploring
#' @param roiFolder directory that ROIs are within (can be very general eg.
#' C:/data, but will be quicker to process with more specific file path)
#' @param format option of how images will be output, either as 'list' a list
#' of file names or 'image' where images will be displayed
#'
#' @export
#'
# #### examples
# #determine range of interest
# mid <- as.numeric(readline('Minimum depth of interest? '))
# mad <- as.numeric(readline('Maximum depth of interest? '))
# #run image exploration
# roi_files <- vpr_img_depth(all_dat, min.depth = mid, max.depth = mad,
# roiFolder = paste0('E:/data/IML2018051/rois/vpr', tow ), format = 'list')
#
# #copy image files into new directory to be browsed
# roi_file_unlist <- unlist(roi_files)
# newdir <- file.path(plotdir, paste0('vpr', tow, 'images_', mid, '_', mad, ''))
# dir.create(newdir)
# file.copy(roi_file_unlist, newdir)
#
# avoid CRAN notes
. <- pressure <- roi <- NA
data_filtered <- data %>%
dplyr::filter(., pressure >= min.depth) %>%
dplyr::filter(., pressure <= max.depth)
if(length(data_filtered$roi) < 1){
stop('No data exists within this depth range!')
}
#search for ROI files based on data
roi_files <- paste0('roi.', sprintf('%08d', data_filtered$roi), '*')
roi_file_list <- list()
#options(warn = 1)
for (i in seq_len(length(roi_files))){
roi_file_list[[i]] <- list.files(roiFolder, pattern = roi_files[i], recursive = TRUE, full.names = TRUE)
if (length(roi_file_list[[i]]) >= 1){
message(paste('Found', length(roi_file_list[[i]]),' files for ',roi_files[i] ))
}else{
warning('No file found in directory (', roiFolder, ') matching ', roi_files[i])
}
}
if( format == 'list'){
return(roi_file_list)
}
if (format == 'image'){
for(i in seq_len(length(roi_file_list))){
for(ii in seq_len(length(roi_file_list[[i]]))){
data_roi <- data_filtered %>%
dplyr::filter(., roi == roi[i])
meta_str <- paste0('time (hr): ', data_roi$time_hr[1], '\n temperature: ', data_roi$temperature[1], '\n pressure: ', data_roi$pressure[1], '\n salinity: ', data_roi$salinity[1], '\n')
pp <- magick::image_read(roi_file_list[[i]][ii]) %>%
#print metadata on image
#image_annotate(text = roi_files[i], color = 'white', size = 10) %>%
#image_annotate(text = meta_str, color = 'white', location = '-100') %>%
magick::image_scale(geometry = 'x300')
print(pp)
#print metadata
#cat(paste0(roi_files[i], '\n'))
#cat( paste0('time (hr): ', data_roi$time_hr, '\n temperature: ', data_roi$temperature, '\n pressure: ', data_roi$pressure, '\n salinity: ', data_roi$salinity, '\n'))
}
}
}
#
}
vpr_img_category <- function(data, min.depth , max.depth, roiFolder , format = 'list', taxa_of_interest){
#' Explore images by depth and classification
#'
#' Pulls images from specific depth ranges in specific classification group
#'
#'
#'
#'
#'
#' @param data data frame containing CTD and ROI data from
#' \code{\link{vpr_ctdroi_merge}}, which also contains calculated variables
#' sigmaT and time_hr
#' @param min.depth minimum depth of ROIs you are interested in looking at
#' @param max.depth maximum depth of ROIs you are interested in exploring
#' @param roiFolder directory that ROIs are within (can be very general eg.
#' C:/data, but will be quicker to process with more specific file path)
#' @param format option of how images will be output, either as 'list' a list
#' of file names or 'image' where images will be displayed
#' @param taxa_of_interest character string of classification group from which
#' to pull images
#'
#' @export
#'
# ### examples
# #determine range of interest
# mid <- as.numeric(readline('Minimum depth of interest? '))
# mad <- as.numeric(readline('Maximum depth of interest? '))
# #run image exploration
# roi_files <- vpr_img_category(all_dat, min.depth = mid, max.depth = mad,
# roiFolder = paste0('E:/data/IML2018051/rois/vpr', tow ), format = 'list',
# taxa = 'Calanus')
#
# #copy image files into new directory to be browsed
# roi_file_unlist <- unlist(roi_files)
# newdir <- file.path(plotdir, paste0('vpr', tow, 'images_', mid, '_', mad, '_', taxa))
# dir.create(newdir)
# file.copy(roi_file_unlist, newdir)
#
#
# avoid CRAN notes
. <- pressure <- taxa <- roi <- NA
if(length(taxa_of_interest) > 1){
stop("Only explore one taxa at a time!")
}
data_filtered <- data %>%
dplyr::filter(., pressure >= min.depth) %>%
dplyr::filter(., pressure <= max.depth) %>%
dplyr::filter(., taxa %in% taxa_of_interest)
if(length(data_filtered$roi) < 1){
stop('No data exists within this depth and taxa range!')
}
#search for ROI files based on data
roi_files <- paste0('roi.', sprintf('%08d', data_filtered$roi), '*')
roi_file_list <- list()
# options(warn = 1) # is this needed?
for (i in seq_len(length(roi_files))){
roi_file_list[[i]] <- list.files(roiFolder, pattern = roi_files[i], recursive = TRUE, full.names = TRUE)
if (length(roi_file_list[[i]]) >= 1){
print(paste('Found', length(roi_file_list[[i]]),' files for ',roi_files[i] ))
}else{
warning('No file found in directory (', roiFolder, ') matching ', roi_files[i])
}
}
if( format == 'list'){
return(roi_file_list)
}
if (format == 'image'){
for(i in seq_len(length(roi_file_list))){
for(ii in seq_len(length(roi_file_list[[i]]))){
data_roi <- data_filtered %>%
dplyr::filter(., roi == roi[i])
meta_str <- paste0('time (hr): ', data_roi$time_hr[1], '\n temperature: ', data_roi$temperature[1], '\n pressure: ', data_roi$pressure[1], '\n salinity: ', data_roi$salinity[1], '\n')
pp <- magick::image_read(roi_file_list[[i]][ii]) %>%
magick::image_scale(geometry = 'x300')
print(pp)
#print metadata
cat(paste0(roi_files[i], '\n'))
cat( paste0('time (hr): ', data_roi$time_hr[1], '\n temperature: ', data_roi$temperature[1], '\n pressure: ', data_roi$pressure[1], '\n salinity: ', data_roi$salinity[1], '\n'))
}
}
}
#
}
vpr_img_copy <- function(auto_id_folder, taxas.of.interest, day, hour){
#' Image copying function for specific taxa of interest
#'
#' This function can be used to copy images from a particular taxa, day and hour into distinct folders within the auto id directory
#' This is useful for visualizing the ROIs of a particular classification group or for performing manual tertiary checks to remove
#' images not matching classification group descriptions.
#'
#'
#'
#' @param auto_id_folder eg "D:/VP_data/IML2018051/autoid"
#' @param taxas.of.interest eg. taxas.of.interest <- c('Calanus')
#' @param day character, day of interest
#' @param hour character, hour of interest
#'
#' @export
#This code extracts ROIs from the VPR cast folder into folders corresponding to their autoID (from Visual Plankton)
#modified for pulling reclassigfied images
folder_names <- list.files(auto_id_folder)
folder_names <- folder_names[folder_names %in% taxas.of.interest]
day_hour <- paste0('d', day, '.h', hour)
aid_day_hour <- paste0('aid.', day_hour)
#read all days and hours
# st_dat <- read.csv('C:/VPR_PROJECT/vp_info/station_names_IML2018051.csv')
#
# day_list <- st_dat$day
# hour_list <- st_dat$hour
#
#
# for(j in 1:length(day_list)){
#
# day <- day_list[[j]]
# hour <- hour_list[[j]]
#
# day_hour <- paste0('d', day, '.h', hour)
# aid_day_hour <- paste0('aid.', day_hour)
for (i in folder_names) {
#Get name of folder containing .txt files with roi paths within a category
dir_roi <- file.path(auto_id_folder, i, "aid", fsep = "\\")
#Get names of text files
txt_roi <- list.files(dir_roi)
subtxt <- grep(txt_roi, pattern = aid_day_hour, value = TRUE)
txt_roi <- subtxt
#subtxt2 <- grep(txt_roi, pattern = clf_name, value = TRUE)
#txt_roi <- subtxt2
for(ii in txt_roi) {
# setwd(dir_roi)
withr::with_dir(dir_roi, code = {
roi_path_str <- read.table(ii, stringsAsFactors = FALSE)
path_parts <- stringr::str_split(auto_id_folder, pattern = '/')
auto_ind <- grep(path_parts[[1]], pattern = 'autoid')
base_path_parts <- path_parts[[1]][-auto_ind]
basepath <- stringr::str_c(base_path_parts, collapse = '\\')
auto_path <- stringr::str_c(path_parts[[1]], collapse = '\\')
tt<- stringr::str_locate(string = roi_path_str$V1[1], pattern = 'rois')
sub_roi_path <- substr(roi_path_str$V1, tt[1], nchar(roi_path_str))
new_roi_path <- file.path(basepath, sub_roi_path, fsep = '\\')
#Create a new folder for autoid rois
roi_folder <- file.path(auto_path, i, paste0(ii, "_ROIS"), fsep = "\\")
command1 <- paste('mkdir', roi_folder, sep = " ")
shell(command1)
#Copy rois to this directory
for (iii in seq_len(length(new_roi_path))) {
dir_tmp <- as.character(new_roi_path[iii])
command2 <- paste("copy", dir_tmp, roi_folder, sep = " ")
shell(command2)
print(paste(iii, '/', length(new_roi_path),' completed!'))
}
}) # close dir
}
print(paste(i, 'completed!'))
}
print(paste('Day ', day, ', Hour ', hour, 'completed!'))
# }
}
vpr_img_check <- function(folder_dir, basepath){
#' Remove ROI strings from aid and aidmeas files based on a manually organized folder of images
#'
#' Should be used after \code{\link{vpr_img_copy}}, and manual image removal from created folders
#'
#'
#'
#' @param folder_dir directory path to day hour folders containing manually
#' reorganized images of a specific taxa eg.
#' 'C:/data/cruise_IML2018051/krill/images/' where that folder contains
#' '......d123.h01/' which contains manually sorted images of krill
#' @param basepath directory path to original Visual Plankton files, specified
#' down to the classification group. eg.
#' 'C:/data/cruise_IML2018051/autoid/krill'
#'@export
#'
# this function can be used to edit aid and aidmeas files based on the images contained in a folder
#useful if images were reorganized into classifciation groups manually in file explorer and then
#user wants to translate this reorganization into data files
##part two of krill check to remove erroneus images
#after having copied krill images into folder using get_autoid_mod.R
#then manually removing any images which were not Krill
#once this is run, processing and plotting can be done
#E. Chisholm Sept 2019
#if not supplied, assume folders are the same
if(missing(basepath)){basepath <- folder_dir}
stfolders <- list.files(folder_dir, full.names = TRUE)
for (i in seq_len(length(stfolders))){ #for each day/hour loop
krill_ver <- list.files(stfolders[i])
#find matching original files
#dh_ind <- stringr::str_locate(stfolders[i], pattern = 'aid.d')
#dayhr <- substr(stfolders[i], dh_ind[2], nchar(stfolders[i])-5 )
day <- vpr_day(stfolders[i])
hour <- vpr_hour(stfolders[i])
dayhr <- paste0('d', day, '.h', hour)
#aid and aidmea
aid_fns <- list.files(paste0(basepath, '/aid'), pattern = dayhr, full.names = TRUE)
aidmeas_fns <- list.files(paste0(basepath, '/aidmea'), pattern = dayhr, full.names = TRUE)
#get indexes of aids which are not matched in krill_check and remove
#read in aid file
aid <- read.table(aid_fns ,stringsAsFactors = FALSE)
#get roi substring
#aid_old_gen <- substr(aid$V1, nchar(aid$V1) - 17, nchar(aid$V1))
#aid_old_gen <- trimws(aid_old_gen, which = 'both')
aid_old_gen <- vpr_roi(aid$V1)
#find index of images which are in maually verified folder
ver_ind <- aid_old_gen %in% krill_ver
ver_ind_unl <- grep(ver_ind, pattern = 'FALSE')
#remove any ROI number which do not match verified index
aid_new <- aid$V1[ -ver_ind_unl]
#read in aidmea file
aidMea_old <- read.table(aidmeas_fns)
#use same verified index to subset aidmea file
aidMea_new <- aidMea_old[-ver_ind_unl, ]
#check that files match
if( length(aidMea_new$V1) != length(aid_new)){
stop('roi and size files do not match!')
}
#print new aid and aidmea files
write.table(file = aidmeas_fns, aidMea_new, sep = " ", quote = FALSE, col.names = FALSE, row.names = FALSE)
write.table(file = aid_fns, aid_new, quote = FALSE, col.names = FALSE, row.names = FALSE)
cat(' \n')
cat(aidmeas_fns, 'updated! \n')
cat(aid_fns, 'updated! \n')
cat('\n')
}
}
|
/scratch/gouwar.j/cran-all/cranData/vprr/R/EC_functions.R
|
## classifier check
# E. Chisholm
# edited by K. Sorochan
## January 6th, 2020
## version 3
vpr_manual_classification <-
function(day,
hour,
basepath,
taxa_of_interest,
gr = TRUE,
scale = 'x300',
opticalSetting = 'S2',
img_bright = TRUE) {
#' Function to check results of classification manually
#'
#'
#' Displays each image in day hour specified,
#' prompts user to confirm or deny classification.
#' If classification is denied, asks for a reclassification
#' value based on available taxa
#'
#' @param day day of interest in autoid
#' @param hour hour of interest in autoid
#' @param basepath file path to auto id folder eg 'E:/autoID_EC_07032019/'
#' @param taxa_of_interest list of taxa folders you wish you sort through
#' @param gr logical indicating whether pop up graphic menus are used (user preference - defaults to TRUE)
#' @param scale argument passed to \code{\link{image_scale}}, default = 'x300'
#' @param opticalSetting specifies optical setting of VPR, defining image frame
#' size, current options are 'S0', 'S1', 'S2' (default), 'S3', see further
#' info in details
#'@param img_bright logical value indicating whether or not to include a blown
#' out high brightness version of image (can be helpful for viewing dark field
#' fine appendages)
#'
#' @details Optical Setting frame sizes: S0 = 7x7 mm, S1 = 14x14mm, S2 =
#' 24x24mm, S3 = 48x48 mm. These settings define the conversion factor from
#' pixels to millimetres and calculate image size for classification
#' reference
#'
#'
#' @section Development:
#' \itemize{
#' \item Add "undo" functionality to go back on a typing mistake
#' \item Fix scaling/ size issue so images are consistently sized
#' \item show ROI number for image somewhere for reference when in doubt of classification
#' }
#'
#'@export
day_hour <- paste0('d', day, '.h', hour)
dirpath <- file.path("manual_reclassification_record",day_hour)
dir.create(path = dirpath, showWarnings = FALSE, recursive = TRUE)
existingFiles <- list.files(dirpath, full.names = TRUE)
ans <-
menu(
c('Yes', 'No'),
graphics = FALSE,
title = paste(
'WARNING!!! ALL EXISITING FILES IN', day_hour,
'ARE ABOUT TO BE DELETED. DO YOU WISH TO PROCEED?'
)
)
if (ans == 1) {
file.remove(existingFiles)
} else{
warning(immediate. = TRUE,
paste('CAUTION, FILES FOR', day_hour, 'ARE BEING APPENDED!!'))
}
taxaFolders_og <- list.files(basepath, full.names = TRUE)
taxaNames <- list.files(basepath)
allTaxa <- list.files(basepath)
taxaFolders <- taxaFolders_og[taxaNames %in% taxa_of_interest]
taxaNames <- taxaNames[taxaNames %in% taxa_of_interest]
if (length(taxaFolders) == 0) {
stop('No taxa folders match taxa of interest!
Caution of capitalization!')
}
t_f <- dir.exists(taxaFolders)
# Make an empty list for reclassficiations with named elements for each taxa
reclassified <- vector("list", length(allTaxa))
names(reclassified) <- allTaxa
for (i in seq_len(length(taxaFolders))) {
misclassified <- vector()
print(paste('TAXA START : ', taxaFolders[i]))
y <- readline(paste('CONFIRM NEW TAXA : ', taxaFolders[i]))
# clear existing files
path <- taxaFolders[i]
if (t_f[i] == FALSE) {
print(paste('TAXA : ', taxaFolders[i], 'DOES NOT EXIST!'))
SKIP = TRUE
} else{
dayHrFolders <- list.files(path, full.names = TRUE)
dayHrFolder <-
grep(dayHrFolders, pattern = day_hour, value = TRUE)
if (length(dayHrFolder) == 0) {
print(paste('TAXA : ', taxaFolders[i], 'DOES NOT EXIST IN ', day_hour, '!'))
SKIP = TRUE
} else{
SKIP = FALSE
# grab aid file info
aidFolder <-
grep(dayHrFolders, pattern = 'aid$', value = TRUE)
aidFile <-
list.files(aidFolder, pattern = day_hour, full.names = TRUE)
aid_dat <- read.table(aidFile, stringsAsFactors = FALSE) # TODO read in pred_results instead of aid
aid_dat <- unique(aid_dat$V1) # KS added unique to duplicate bug fix
rois <- list.files(dayHrFolder, full.names = TRUE)
# find correct conversion factor based on VPR optical setting
if (opticalSetting == 'S0') {
# px to mm conversion factor
frame_mm <- 7
mm_px <-
frame_mm / 1024 # 1024 is resolution of VPR images (p.4 DAVPR manual)
pxtomm <- 1 / mm_px
}
if (opticalSetting == 'S1') {
# px to mm conversion factor
frame_mm <- 14
mm_px <-
frame_mm / 1024 # 1024 is resolution of VPR images (p.4 DAVPR manual)
pxtomm <- 1 / mm_px
}
if (opticalSetting == 'S2') {
# px to mm conversion factor
frame_mm <- 24
mm_px <-
frame_mm / 1024 # 1024 is resolution of VPR images (p.4 DAVPR manual)
pxtomm <- 1 / mm_px
}
if (opticalSetting == 'S3') {
# px to mm conversion factor
frame_mm <- 42 # correct conversion factor (7/11/2022)
mm_px <-
frame_mm / 1024 # 1024 is resolution of VPR images (p.4 DAVPR manual)
pxtomm <- 1 / mm_px
}
for (ii in seq_len(length(rois))) {
print(paste(ii, '/', length(rois)))
img <- magick::image_read(rois[ii], strip = FALSE) %>%
magick::image_scale(scale) %>%
magick::image_annotate(taxaNames[i], color = 'red', size = 12)
# read in original image without scaling
img_o <- magick::image_read(rois[ii])
imgdat <- magick::image_info(img_o)
# annotate original image size
img <-
magick::image_annotate(
img,
text = paste(
round(imgdat$width / pxtomm, digits = 2),
'x',
round(imgdat$height / pxtomm, digits = 2),
'mm'
),
location = '+0+10',
color = 'red'
)
if (img_bright == TRUE) {
img_n <- magick::image_modulate(img, brightness = 500)
img_f <- magick::image_append(c(img, img_n))
print(img_f)
} else{
print(img)
}
#pop up menu
ans <-
menu(
choices = c('Yes', 'No'),
graphics = gr,
title = paste(
"Is the classification, ",
taxaNames[i],
", accurate? (y/n)"
)
)
if (ans == 1) {
} else{
# original method
# sink(file = paste0(day_hour,'/misclassified_', taxaNames[i], '.txt'), append = TRUE)
# cat(aid_dat[[ii]], '\n')
# sink()
misclassified <- c(misclassified, aid_dat[[ii]])
# update to create generic taxa options
# EC 2019 October 30
ans <-
menu(c(allTaxa),
graphics = gr,
title = "Appropriate Taxa Classification?")
reclassified[[ans]] <-
c(reclassified[[ans]], aid_dat[[ii]])
# original method
# sink(file = paste0(day_hour,'/reclassify_', allTaxa[[ans]], '.txt'), append = TRUE)
# cat(aid_dat[ii], '\n')
# sink()
}
}
# Write information to file
# sink(
# file = paste0(day_hour, '/misclassified_', taxaNames[i], '.txt'),
# append = T
# )
withr::with_output_sink(paste0(dirpath, '/misclassified_', taxaNames[i], '.txt'),
append = TRUE,
code = {
cat(misclassified, sep = '\n')
})
#sink()
}# skip = TRUE loop (taxa)
}# skip = TRUE loop (dayhr)
if (SKIP == TRUE) {
# creates blank misclassified file if taxa of interest is not present in specified hour (so images reclassified as this taxa will be moved)
# sink(
# file = paste0(day_hour, '/misclassified_', taxaNames[i], '.txt'),
# append = TRUE
# )
# sink()
withr::with_output_sink(paste0(dirpath, '/misclassified_', taxaNames[i], '.txt'),
append = TRUE,
code = {
cat('\n')
})
}
}
# Write reclassified files for each taxa
for (i in seq_len(length(reclassified))) {
taxa_id <- names(reclassified[i])
recl_tmp <- reclassified[[i]]
# Make a reclassify file only for taxa that need to be reclassified
if (length(recl_tmp != 0)) {
# sink(
# file = paste0(day_hour, '/reclassify_', taxa_id, '.txt'),
# append = TRUE
# )
withr::with_output_sink(paste0(dirpath, '/reclassify_', taxa_id, '.txt'), append = TRUE, code = {
cat(recl_tmp, sep = '\n')
})
# sink()
}
}
}
vpr_autoid_create <- function(reclassify, misclassified, basepath, day, hour, mea = TRUE) {
#' Modifies aid and aid mea files based on manual reclassification
#' @author E. Chisholm
#'
#'@param reclassify list of reclassify files (output from vpr_manual_classification())
#'@param misclassified list misclassify files (output from vpr_manual_classification())
#'@param basepath base path to auto ID folder eg 'E:/autoID_EC_07032019/'
#'@param day day identifier for relevant aid & aidmeas files
#'@param hour hour identifier for relevant aid & aidmeas files
#'@param mea logical indicating whether or not there are accompanying measurement files to be created
#'
#'
#' ### examples
#'basepath <- 'E:/autoID_EC_07032019/'
#'day <- '289'
#'hr <- '08'
#'day_hour_files <- paste0('d', day, '.h', hr)
#'misclassified <- list.files(day_hour_files, pattern = 'misclassified_', full.names = TRUE)
#'reclassify <- list.files(day_hour_files, pattern = 'reclassify_', full.names = TRUE)
#'vpr_autoid_create(reclassify, misclassified, basepath)
#'
#'@export
# avoid CRAN notes
. <- day <- hour <- NA
taxaNames <- list.files(basepath)
# find aid txt files
taxaFolders <- list.files(basepath, full.names = TRUE)
# remove misclassified ROIS
for (i in seq_len(length(misclassified))) {
# TODO: generalize solution, remove hardcoding
# TODO make sure this works with new directory structure
taxa <- vpr_category(misclassified[i])
# if (taxa == 'ctenophores'){ browser()}
# <- substr(misclassified[i], 24, nchar(misclassified[i]) - 4)
taxaFolder <- grep(taxaFolders, pattern = taxa, value = TRUE)
if (!taxa %in% taxaNames) {
stop(
paste(
taxa,
'is not a valid taxa name. Please run vpr_category_create() to create proper file structure within basepath'
)
)
}
mis_roi <- readLines(misclassified[i])
if (length(mis_roi) != 0) {
day_hour <-
unique(substr(sub(
mis_roi, pattern = '.*d', replacement = 'd'
), 1, 8))
day_hour <-
gsub(pattern = "\\\\",
replacement = '.',
x = day_hour)
# mis_roi_gen <- substr(mis_roi, nchar(mis_roi) - 18, nchar(mis_roi))
mis_roi_gen <- unlist(vpr_roi(mis_roi))
mis_roi_df <-
data.frame(mis_roi_gen, day_hour, taxa, stringsAsFactors = FALSE)
aidFolder <-
list.files(taxaFolder, pattern = '^aid$', full.names = TRUE)
mis_roi_df <- mis_roi_df %>%
dplyr::group_by(., day_hour)
if (length(unique(mis_roi_df$day_hour)) > 1) {
stop('MULTIPLE HOURS IN ONE FILE, PLEASE CORRECT.')
}
} else{
# if there is no misclassified information
print(paste('Blank misclassified file found for', taxa, '!'))
# browser()
day_n <- vpr_day(misclassified[i])
hr_n <- vpr_hour(misclassified[i])
day_hour <- paste0('d', day, '.h', hour)
# day_hour <- unique(substr(misclassified[i], 1, 8))
aidFolder <-
list.files(taxaFolder, pattern = '^aid$', full.names = TRUE)
}
# open correct day hour aid file
aids <- list.files(aidFolder, full.names = TRUE)
aid_list_old_fn <- grep(aids, pattern = day_hour , value = TRUE)
# changed day hour pattern to accomodate lack of mis_roi_df in dummy = TRUE scenario
# possibility that original data file does not exist
if (length(aid_list_old_fn) == 0) {
# make blank dummy data file to insert reclassified info into
# needs file path (aidFolder)
aid_list_old_fn <-
paste0(aidFolder, '/dummy_svmaid.', day_hour)
#sink(aid_list_old_fn)
withr::with_output_sink(aid_list_old_fn, code = {
cat('\n')
})
#sink()
print(paste('DUMMY FILE CREATED FOR', taxa, ' : ', aid_list_old_fn))
DUMMY = TRUE
# blank file to be appended
aid_new <- NULL
} else{
aid_list_old <- readLines(aid_list_old_fn)
# BUG FIX 01/16/2020
# issue where duplicated ROIs in original aid files were not getting removed with misclassified/ reclassified data
#browser()
aid_list_old <- unique(aid_list_old)
aid_old_gen <- unlist(vpr_roi(aid_list_old))
# KS big fix: issue #24
if(length(mis_roi) == 0) {
aid_new <- aid_list_old
} else {
sub_mis_roi <- mis_roi_df %>%
dplyr::filter(., day_hour == unique(mis_roi_df$day_hour))
#%>%
# dplyr::filter(.,!duplicated(mis_roi_gen)) #remove duplicates #BUG FIX 01/16/20
mm <- sub_mis_roi$mis_roi_gen %in% aid_old_gen #switched order to prevent error (EC: 01/16/2020)
ind <- grep(mm , pattern = 'TRUE')
# new list with misclassified rois removed
aid_new <- aid_list_old[-ind]
cat(
paste(
'>>>>',
length(ind),
'ROIs removed from',
taxa ,
'in',
unique(day_hour),
'\n>>>> File:',
aid_list_old_fn,
'\n'
)
)
}
DUMMY <- FALSE
}
# FIX MEAS FILE TO MATCH
if(mea == TRUE){
aidMeaFolder <-
list.files(taxaFolder, pattern = '^aidmea$', full.names = TRUE)
aidMeaFile <-
list.files(aidMeaFolder,
pattern = paste0('*', day_hour),
full.names = TRUE)
# if there is no original data file
if (length(aidMeaFile) == 0) {
# make dummy data file
# needs file path
aidMeaFile <-
paste0(aidMeaFolder, "/dummy_svmaid.mea.", day_hour)
# sink(aidMeaFile)
withr::with_output_sink(aidMeaFile, code = {
cat('\n')
})
# sink()
print(paste('DUMMY FILE CREATED FOR MEAS OF', taxa, ' : ', aidMeaFile))
aidMea_new <- NULL
DUMMY = TRUE
} else{
aidMea_old <- read.table(aidMeaFile)
aidMea_old <- unique(aidMea_old) # KS fix for bug duplicates
# KS bug fix: Issue #24
if(length(mis_roi) == 0) {
aidMea_new <- aidMea_old
} else { aidMea_new <- aidMea_old[-ind,]
cat(
paste(
'>>>>',
length(ind),
'Measurements removed from',
taxa ,
'in',
unique(day_hour),
'\n>>>> File:',
aidMeaFile,
'\n'
)
)
}
DUMMY = FALSE
}
}
# add reclassified rois
# to specific taxa
recl <- grep(reclassify, pattern = taxa)
if (length(recl) == 0) {
print(paste('No', taxa, 'to be reclassified'))
# final files only have rois removed
if(mea == TRUE){aidMea_final <- aidMea_new}
aid_final <- aid_new
if (DUMMY == TRUE) {
warning(print(
'No original data and no reclassified data, consider removing taxa.'
))
}
} else{
# loop should end right before files are saved
reclassify_taxa <-
grep(reclassify, pattern = taxa, value = TRUE)
# pull one reclassify file at a time
recl_roi <- readLines(reclassify_taxa)
# get day.hour info
day_hour_re <- paste(day, hour, sep = ".") # arguments now given to function no need to find them in file names
# day_hour_re <-
# substr(sub(recl_roi, pattern = '.*d', replacement = 'd'), 1, 8)
# day_hour_re <-
# gsub(pattern = "\\\\",
# replacement = '.',
# x = day_hour_re)
# get generic roi string
recl_roi_gen <- unlist(vpr_roi(recl_roi))
# which taxa to add recl rois to
# check only one hour present in file
if (length(unique(day_hour_re)) > 1) {
stop(
paste(
reclassify_taxa,
'has more than one unique hour value!
Please double check file.'
)
)
}
recl_roi_df <-
data.frame(recl_roi_gen, day_hour_re, recl_roi, stringsAsFactors = FALSE)
recl_roi_df <- recl_roi_df %>%
dplyr::filter(.,!duplicated(recl_roi_gen))
# filter to remove duplicates causing errors in size and aid files being different lengths
# script was not catching duplicates because of different vpr tow numbers
# add reclassified rois
aid_final <- c(aid_new, recl_roi_df$recl_roi)
cat(paste(
'>>>>',
length(recl_roi_df$recl_roi),
'ROIs added to',
taxa ,
'in',
unique(day_hour),
'\n'
))
# ADD RECLASSIFIED ROI MEAS TO MEA FILE
# find original meas file
# need original taxa info
# find original taxa data
# read in all roi folders
# weird folder character cut off problems
#deprecating getRoiMeasurements
# bp <- substr(basepath, 1, nchar(basepath) - 1)
# auto_id_folder <- bp
# nchar_folder <- nchar(auto_id_folder)
# taxafolder <- list.files(auto_id_folder, full.names = T)
# auto_measure_px <-
# getRoiMeasurements(taxafolder, nchar_folder, unit = 'px')
# DONE : Edit so that only required size data is loaded, without using getRoiMeasurements [ EC 28-01-2020 ]
#get all taxa aid and aidmea files for day/hour of interest
aid_fn_list <- list()
for(l in seq_len(length(taxaFolders))){
all_aids <- list.files(file.path(taxaFolders[[l]], 'aid'), full.names = TRUE)
aid_fn_list[[l]] <- grep(all_aids, pattern = day_hour, value = TRUE)
}
if(mea == TRUE){
aidm_fn_list <- list()
for(l in seq_len(length(taxaFolders))){
all_aidms <- list.files(file.path(taxaFolders[[l]], 'aidmea'), full.names = TRUE)
aidm_fn_list[[l]] <- grep(all_aidms, pattern = day_hour, value = TRUE)
}
# MEASUREMENTS
# browser()
roimeas_dat_combine <-
vpr_autoid_read(
file_list_aid = unlist(aid_fn_list),
file_list_aidmeas = unlist(aidm_fn_list),
export = 'aidmeas',
station_of_interest = NA,
warn = FALSE
)
# find roi ids in meas
recl_roi_num <- recl_roi_df$recl_roi_gen
# day_hour_var <- day_hour #deprecated with getroiMeas
# subset auto measure to correct day hour
#deprecated with getRoiMeas
# auto_measure_px <- auto_measure_px %>%
# dplyr::filter(., day_hour == day_hour_var)
# find index of recl rois in auto measure
recl_roi_meas <-
roimeas_dat_combine[roimeas_dat_combine$roi %in% recl_roi_num ,]
# getRoiMeas method
# recl_roi_meas <-
# auto_measure_px[auto_measure_px$roi_ID %in% recl_roi_num ,]
# check for duplicate ROI IDs
if (length(recl_roi_meas$roi_ID) > length(recl_roi)) {
print(paste(
'Warning, duplicate ROI detected! Removing automatically'
))
print(recl_roi_meas[duplicated(recl_roi_meas$roi_ID), ])
recl_roi_meas <- recl_roi_meas %>%
dplyr::filter(.,!duplicated(recl_roi_meas$roi_ID))
}
# append old aidmea file with mis rois removed
#find measurement columns
col_names <- c('Perimeter','Area','width1','width2','width3','short_axis_length','long_axis_length')
recl_roi_meas <- recl_roi_meas %>%
dplyr::select(., col_names)
#combine new reclassifed meas data with original meas data
aidMea_list <- list()
for (iii in 1:7) {
aidMea_list[[iii]] <-
c(aidMea_new[, iii], unname(recl_roi_meas[, iii]))
}
# get into data frame format
aidMea_final <-
data.frame(matrix(unlist(aidMea_list), ncol = length(aidMea_list)))
cat(paste(
'>>>>',
length(recl_roi_meas$Perimeter),
'Measurements added to',
taxa ,
'in',
unique(day_hour),
'\n'
))
if (length(recl_roi_meas$Perimeter) != length(recl_roi_df$recl_roi_gen)) {
warning("Measurements and ROI numbers in reclassification do not match!!!")
}
}# end reclassified loop
}
# save files
dirpath <- file.path('new_autoid', taxa[[1]])
dir.create(dirpath, showWarnings = FALSE, recursive = TRUE)
if(mea == TRUE){
aidMea_final_nm <- paste0('new_aid.mea.', unique(day_hour))
aidMea_final_fn <- file.path(dirpath, 'aidmea', aidMea_final_nm)
dir.create(file.path(taxa, 'aidmea'),
showWarnings = FALSE,
recursive = TRUE)
write.table(
file = aidMea_final_fn,
aidMea_final,
sep = " ",
quote = FALSE,
col.names = FALSE,
row.names = FALSE
)
}
# note output could be better formatted to match line width in original files
aid_final_nm <- paste0('new_aid.', unique(day_hour))
aid_final_fn <- file.path(dirpath, 'aid', aid_final_nm)
dir.create(file.path(dirpath, 'aid'),
showWarnings = FALSE,
recursive = TRUE)
write.table(
file = aid_final_fn,
aid_final,
quote = FALSE,
col.names = FALSE,
row.names = FALSE
)
cat(paste(
'>>>> New aid and aid.mea files created for',
taxa,
'in',
unique(day_hour),
'\n'
))
# remove dummy files if they exist
if (DUMMY == TRUE) {
atf <- grep(aid_list_old_fn, pattern = 'dummy')
amtf <- grep(aidMeaFile, pattern = 'dummy')
if (length(atf) != 0 & length(amtf) != 0) {
print(paste('Deleting dummy files!'))
print(paste(aidMeaFile, ' & ', aid_list_old_fn))
unlink(aid_list_old_fn)
unlink(aidMeaFile)
}
}
}
}
# function to create new taxa within data structure post VP output
#' Create a new taxa to be considered for classification after processing with VP
#'
#' creates empty directory structure to allow consideration of new taxa during vpr_manual_classification()
#'
#' @param taxa new taxa name to be added (can be a list of multiple taxa names)
#' @param basepath basepath used for vpr_manual_classification
#'
#' @return empty directory structure using new taxa name inside basepath
#' @export
#'
#'
#'
#'
vpr_category_create <- function(taxa, basepath) {
for (i in seq_len(length(taxa))) {
# create new taxa folder
newtaxapath <- file.path(basepath, taxa[[i]])
dir.create(newtaxapath)
# create blank aid and aidmeas folders
dir.create(paste0(newtaxapath, '/aid'), showWarnings = FALSE)
dir.create(paste0(newtaxapath, '/aidmea'), showWarnings = FALSE)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vprr/R/clf_check_test.R
|
# E. chisholm
# March 23, 2020
# creating sample data for use in examples and tests for vprr
# THIS SHOULD ONLY BE RUN BY DEVELOPERS WHEN UPDATING DATA FOR TESTING
# set up data directory
# save_dir <- 'data/processed/'
## load raw files
## PROCESS
# process raw data saving a new data object after the use of each function
##### PROCESSING --------------------------------------------------------------------------------------------------------------------
#library(vprr)
#library(dplyr)
# source('R/EC_functions.R')
#### FILE PATHS & SETTINGS --------------------------------------------------------------------------------------------------------------------
# loads processing environment specific to user
cruise <- 'COR2019002'
station_of_interest <- 'test'
day_of_interest <- '222'
hour_of_interest <- c('03', '04')
dayhour <- paste0('d', day_of_interest, '.h', hour_of_interest)
year <- '2019'
binSize <- 5
category_of_interest <- c('Calanus', 'krill')
#VPR OPTICAL SETTING (S0, S1, S2 OR S3)
opticalSetting <- "S2"
imageVolume <- 83663 #mm^3
castdir <- 'inst/extdata/COR2019002/rois/vpr5/d222/'
auto_id_folder <- 'inst/extdata/COR2019002/autoid/'
auto_id_path <- list.files(paste0(auto_id_folder, "/"), full.names = T)
# TODO: include station names file
# get day and hour info from station names list
# dayhour <- vpr_dayhour(station_of_interest, file = station_names_file)
##### PULL CTD CASTS ----------------------------------------------------------------------------------------------------------------------------
# get file path for ctd data
# list ctd files for desired day.hours
# ctd_files <- vpr_ctd_files(castdir, cruise, dayhour)
ctd_files <- list.files('.dat', path = castdir, full.names = TRUE)
#ctd_files <- list()
#ctd_files[[1]] <- system.file('extdata/COR2019002/rois/vpr5/d222', 'h03ctd.dat', package = 'vprr', mustWork = TRUE)
#ctd_files[[2]] <- system.file('extdata/COR2019002/rois/vpr5/d222', 'h04ctd.dat', package = 'vprr', mustWork = TRUE)
##### READ CTD DATA ----------------------------------------------------------------------------------------------------------------------------
ctd_dat_combine <- vpr_ctd_read(ctd_files, station_of_interest)
# subset data for size concerns
ctd_dat_combine <- ctd_dat_combine[1:1000,]
# save(ctd_dat_combine, file = paste0(save_dir, 'vpr_ctd_read.RData'))
usethis::use_data(ctd_dat_combine, overwrite = TRUE)
##### FIND VPR DATA FILES ----------------------------------------------------------------------------------------------------------------------
# Path to aid for each taxa
aid_path <- paste0(auto_id_path, '/aid/')
# Path to mea for each taxa
aidmea_path <- paste0(auto_id_path, '/aidmea/')
# AUTO ID FILES
aid_file_list <- list()
aidmea_file_list <- list()
for (i in seq_len(length(dayhour))) {
aid_file_list[[i]] <-
list.files(aid_path, pattern = dayhour[[i]], full.names = TRUE)
# SIZE DATA FILES
aidmea_file_list[[i]] <-
list.files(aidmea_path, pattern = dayhour[[i]], full.names = TRUE)
}
aid_file_list_all <- unlist(aid_file_list)
aidmea_file_list_all <- unlist(aidmea_file_list)
# save(aid_file_list_all, file = paste0(save_dir,'aid_files.RData'))
# save(aidmea_file_list_all, file = paste0(save_dir, 'aidmea_files.RData'))
# usethis::use_data( aid_file_list_all, overwrite = TRUE)
# usethis::use_data(aidmea_file_list_all, overwrite = TRUE)
##### READ ROI AND MEASUREMENT DATA ------------------------------------------------------------------------------------------------------------
# ROIs
roi_dat_combine <-
vpr_autoid_read(
file_list_aid = aid_file_list_all,
file_list_aidmeas = aidmea_file_list_all,
export = 'aid',
station_of_interest = station_of_interest,
opticalSetting = opticalSetting,
warn = FALSE
)
# subset for size concerns
roi_dat_combine <- roi_dat_combine[1:1000,]
# save(roi_dat_combine, file = paste0(save_dir, 'vpr_autoid_read_aid.RData'))
usethis::use_data(roi_dat_combine, overwrite = TRUE)
# MEASUREMENTS
roimeas_dat_combine <-
vpr_autoid_read(
file_list_aid = aid_file_list_all,
file_list_aidmeas = aidmea_file_list_all,
export = 'aidmeas',
station_of_interest = station_of_interest,
opticalSetting = opticalSetting,
warn = FALSE
)
# subset for size concerns
roimeas_dat_combine <- roimeas_dat_combine[1:1000,]
# save(roimeas_dat_combine, file = paste0(save_dir, 'vpr_autoid_read_aidmeas.RData'))
usethis::use_data(roimeas_dat_combine, overwrite = TRUE)
##### MERGE CTD AND ROI DATA ---------------------------------------------------------------------------------------------------------------------
ctd_roi_merge <- vpr_ctdroi_merge(ctd_dat_combine, roi_dat_combine)
# save(ctd_roi_merge, file = paste0(save_dir, 'vpr_ctdroi_merge.RData'))
usethis::use_data(ctd_roi_merge, overwrite = TRUE)
##### CALCULATED VARS ----------------------------------------------------------------------------------------------------------------------------
# add avg hr and sigma T data and depth
data <- ctd_roi_merge %>%
dplyr::mutate(., avg_hr = time_ms / 3.6e+06)
data <- vpr_ctd_ymd(data, year)
##### BIN DATA AND DERIVE CONCENTRATION ----------------------------------------------------------------------------------------------------------
ctd_roi_oce <- vpr_oce_create(data)
# save(ctd_roi_oce, file = paste0(save_dir, 'vpr_oce_create.RData'))
usethis::use_data(ctd_roi_oce, overwrite = TRUE)
# bin and calculate concentration for all taxa (combined)
# vpr_depth_bin <- bin_cast(ctd_roi_oce = ctd_roi_oce, binSize = binSize, imageVolume = imageVolume)
# save(vpr_depth_bin, file = paste0(save_dir, 'bin_vpr_data.RData'))
# usethis::use_data(vpr_depth_bin, overwrite = TRUE)
# get list of valid taxa
taxas_list <- unique(roimeas_dat_combine$taxa)
# bin and calculate concentrations for each category
# taxa_conc_n <- vpr_roi_concentration(data, taxas_list, station_of_interest, binSize, imageVolume)
# save(taxa_conc_n, file = paste0(save_dir, 'vpr_roi_concentration.RData'))
# usethis::use_data(taxa_conc_n, overwrite = TRUE)
# bin size data
# size_df_f <- vpr_ctdroisize_merge(data, data_mea = roimeas_dat_combine, taxa_of_interest = category_of_interest)
# save(size_df_f, file = paste0(save_dir, 'vpr_ctdroisize_merge.RData'))
# usethis::use_data(size_df_f, overwrite = TRUE)
##### SAVE DATA ---------------------------------------------------------------------------------------------------------------------------------
# Save oce object
# oce_dat <- vpr_save(taxa_conc_n)
# save(oce_dat, file = paste0(save_dir, 'vpr_save.RData'))
# usethis::use_data(oce_dat, overwrite = TRUE)
# Save RData files
# save(file = paste0(savedir, '/ctdData_', station_of_interest,'.RData'), ctd_dat_combine) #CTD data
# save(file = paste0(savedir, '/stationData_', station_of_interest,'.RData'), data) # VPR and CTD data
# save(file = paste0(savedir, '/meas_dat_', station_of_interest,'.RData'), roimeas_dat_combine) #measurement data
# save(file = paste0(savedir, '/bin_dat_', station_of_interest,'.RData'), vpr_depth_bin) # binned data with cumulative concentrations
# save(file = paste0(savedir, '/bin_size_dat_', station_of_interest,'.RData'), size_df_b) # binned data inclouded measurements
|
/scratch/gouwar.j/cran-all/cranData/vprr/R/create_sample_data.R
|
# data documentation
# march 2020
#' VPR data including CTD and ROI information
#'
#' An oce formatted CTD object with VPR CTD and ROI data from package example
#' data set.
#'
#' @format An oce package format, a 'CTD' object with VPR CTD and ROI data (1000
#' data rows)
#'
"ctd_roi_oce"
#' VPR size information dataframe
#'
#' A sample data frame of size information from Visual Plankton outputs,
#' processed using \code{\link{vpr_ctdroisize_merge}}
#'
#' @format A dataframe with 14 variables including
#' \describe{
#' \item{frame_ID}{Unique identifier for each VPR frame}
#' \item{pressure}{Pressure measured from the VPR CTD in decibars}
#' \item{temperature}{Temperature measured from the VPR CTD in celsius}
#' \item{salinity}{Salinity measured from the VPR CTD}
#' \item{sigmaT}{Density calculated from temperature, salinity and pressure}
#' \item{fluorescence_mv}{Fluorescence measured by the VPR CTD in millivolts
#' (uncalibrated)}
#' \item{turbidity_mv}{Turbidity measured by the VPR CTD in
#' millivolts (uncalibrated)}
#' \item{roi}{Unique ROI identification number - 10
#' digits, 8 digit millisecond time stamp and two unique digits to denote
#' multiple ROIs within a millisecond}
#' \item{taxa}{Category in which ROI has
#' been classified by Visual Plankton}
#' \item{day_hour}{Day and hour in which
#' data was collected, from AutoDeck processing}
#' \item{long_axis_length}{The
#' length of the longest axis of the ROI image, measured by Visual Plankton}
#' \item{station}{Station identifier provided during processing}
#' \item{time_ms}{Time stamp when ROI was collected (milliseconds)}
#' \item{roi_ID}{ROI identification number- 8 digit time stamp, without unique
#' 2 digit ending}
#'}
#'
"size_df_f"
#' A binned data frame of concentration data per category
#'
#' A 'binned' dataframe from sample VPR data, including concentrations
#' of each category, where each data point represents a 5 metre bin of
#' averaged VPR data. Produced using \code{\link{vpr_roi_concentration}}
#'
#' @format A dataframe with 21 variables
#' \describe{
#' \item{depth}{Depth calculated from pressure in metres}
#' \item{min_depth}{The minimum depth of the bin in metres}
#' \item{max_depth}{The maximum depth of the bin in metres}
#' \item{depth_diff}{The difference between minimum and maximum bin depth in metres}
#' \item{min_time_s}{The minimum time in seconds of the bin}
#' \item{max_time_s}{The maximum time in seconds of the bin}
#' \item{time_diff_s}{The difference between minimum and maximum time in a bin, in seconds}
#' \item{n_roi_bin}{The number of ROI observations in a bin}
#' \item{conc_m3}{The concentration of ROIs in a bin, calculated based on image volume and number of frames per bin}
#' \item{temperature}{Temperature measured from the VPR CTD in celsius (averaged within the bin)}
#' \item{salinity}{Salinity measured from the VPR CTD (averaged within the bin)}
#' \item{density}{sigma T density calculated from temperature, salinity and pressure (averaged within the bin)}
#' \item{fluorescence}{Fluorescence measured by the VPR CTD in millivolts
#' (uncalibrated) (averaged within the bin)}
#' \item{turbidity}{Turbidity measured by the VPR CTD in
#' millivolts (uncalibrated) (averaged within the bin)}
#' \item{avg_hr}{The mean time in which bin data was collected, in hours}
#' \item{n_frames}{The number of frames captured within a bin}
#' \item{vol_sampled_bin_m3}{The volume of the bin sampled in metres cubed}
#' \item{toyo}{Identifier of the tow-yo section which bin is a part of, either ascending or descending, appended by a number}
#' \item{max_cast_depth}{The maximum depth of the entire VPR cast}
#' \item{taxa}{The category in which ROIs in bin have been classified by Visual Plankton}
#' \item{station}{Station identifier provided during processing}
#' }
"taxa_conc_n"
#' VPR CTD data combined with tabulated ROIs
#'
#' A dataframe representing CTD data which has been merged with tabulated
#' ROIs in each category, produced by \code{\link{vpr_ctdroi_merge}}
#'
#' @format A dataframe with 28 variables
#' \describe{
#' \item{time_ms}{Time stamp when ROI was collected (milliseconds)}
#' \item{conductivity}{Conductivity collected by the VPR CTD}
#' \item{pressure}{Pressure measured from the VPR CTD in decibars}
#' \item{temperature}{Temperature measured from the VPR CTD in celsius}
#' \item{salinity}{Salinity measured from the VPR CTD}
#' \item{fluor_ref}{A reference fluorescence baseline provided in millivolts by the VPR CTD for calibrating fluorescence_mv data}
#' \item{fluorescence_mv}{Fluorescence in millivolts from the VPR CTD (uncalibrated)}
#' \item{turbidity_ref}{A reference turbidity baseline provided in millivolts for calibrating turbidity_mv}
#' \item{turbidity_mv}{Turbidity in millivolts from the VPR CTD (uncalibrated)}
#' \item{altitude_NA}{Altitude data from the VPR CTD}
#' \item{day}{Day on which VPR data was collected (from AutoDeck)}
#' \item{hour}{Hour during which VPR data was collected (from AutoDeck)}
#' \item{station}{Station identifier provided during processing}
#' \item{sigmaT}{Density caluclated from temperature, pressure and salinity data}
#' \item{depth}{Depth in metres caluclated form pressure}
#' \item{roi}{ROI identification number}
#' \item{categories}{For each category name (eg. bad_image_blurry, Calanus, krill), there is a line in the dataframe representing the number of ROIs identified in this category}
#' \item{n_roi_total}{Total number of ROIs in all categories for each CTD data point}
#'
#' }
"ctd_roi_merge"
#' VPR measurement data calculated by Visual Plankton
#'
#' A data frame of measurement information for each ROI in the sample
#' data set including long axis length, perimeter and area, produced by
#' \code{\link{vpr_autoid_read}}
#'
#' @format A data frame with 12 variables
#' \describe{
#' \item{roi}{Unique ROI identifier - 10 digit}
#' \item{taxa}{Category in which ROI has been classified by Visual Plankton}
#' \item{day_hour}{day and hour in which data was collected (from Autodeck)}
#' \item{Perimeter}{The perimeter of the ROI in millimeters}
#' \item{Area}{The area of the ROI in millimeters}
#' \item{width1}{Width at a first point of the ROI in millimetres (defined in more detail in VPR manual)}
#' \item{width2}{Width at a second point of the ROI in millimetres (defined in more detail in VPR manual)}
#' \item{width3}{Width at a third point of the ROI in millimetres (defined in more detail in VPR manual)}
#' \item{short_axis_length}{The length in millimeters of the ROI along the shorter axis}
#' \item{long_axis_length}{The length in millimeters of the ROI along the longer axis}
#' \item{station}{Station identifier provided in processing}
#' \item{time_ms}{Time stamp when ROI was collected in milliseconds}
#' }
"roimeas_dat_combine"
#' VPR CTD data
#'
#' A dataframe including all CTD parameters from the VPR CTD,
#' produced by \code{\link{vpr_ctd_read}}
#'
#' @format A dataframe with 15 variables
#' \describe{
#' \item{time_ms}{Time stamp when ROI was collected (milliseconds)}
#' \item{conductivity}{Conductivity collected by the VPR CTD}
#' \item{pressure}{Pressure measured from the VPR CTD in decibars}
#' \item{temperature}{Temperature measured from the VPR CTD in celsius}
#' \item{salinity}{Salinity measured from the VPR CTD}
#' \item{fluor_ref}{A reference fluorescence baseline provided in millivolts by the VPR CTD for calibrating fluorescence_mv data}
#' \item{fluorescence_mv}{Fluorescence in millivolts from the VPR CTD (uncalibrated)}
#' \item{turbidity_ref}{A reference turbidity baseline provided in millivolts for calibrating turbidity_mv}
#' \item{turbidity_mv}{Turbidity in millivolts from the VPR CTD (uncalibrated)}
#' \item{altitude_NA}{Altitude data from the VPR CTD}
#' \item{day}{Day on which VPR data was collected (from AutoDeck)}
#' \item{hour}{Hour during which VPR data was collected (from AutoDeck)}
#' \item{station}{Station idnetifier provided during processing}
#' \item{sigmaT}{Density caluclated from temperature, pressure and salinity data}
#' \item{depth}{Depth in metres caluclated form pressure}
#' }
"ctd_dat_combine"
#' VPR ROI data
#'
#' A dataframe including VPR ROI data from the sample dataset, produced by
#' \code{\link{vpr_autoid_read}}
#'
#'
#' @format A dataframe with 13 variables
#' \describe{
#' \item{roi}{Unique ROI identifier - 8 digit}
#' \item{categories}{For each category name (eg. bad_image_blurry, Calanus, krill), there is a line in the dataframe representing the number of ROIs identified in this category}
#' \item{time_ms}{Time stamp when ROI was collected (milliseconds)}
#' }
#'
"roi_dat_combine"
|
/scratch/gouwar.j/cran-all/cranData/vprr/R/data.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE
)
## ----setup--------------------------------------------------------------------
# library(vprr)
## ---- eval = TRUE-------------------------------------------------------------
csv <- read.csv('station_names_COR2019002.csv')
head(csv)
## ---- eval = TRUE------------------------------------------------------------
aid <- read.table(file = system.file("extdata/COR2019002/autoid/bad_image_blurry/aid/sep20_2svmaid.d222.h04", package = 'vprr', mustWork = TRUE))
head(aid)
aidmeas <- readLines(system.file("extdata/COR2019002/autoid/bad_image_blurry/aidmea/sep20_2svmaid.mea.d222.h04", package = 'vprr', mustWork = TRUE))
head(aidmeas)
|
/scratch/gouwar.j/cran-all/cranData/vprr/inst/doc/VPR_processing.R
|
---
title: "VPR_processing"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{VPR_processing}
\usepackage[utf8]{inputenc}
%\VignetteEngine{knitr::knitr}
author: " Emily O'Grady, Kevin Sorochan, Catherine Johnson"
editor_options:
markdown:
wrap: 72
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE
)
```
```{r setup}
library(vprr)
```
# Section 1: Background
This document was produced at Bedford Institute of Oceanography (BIO) to
accompany the vprr package, a processing and visualization package for
data obtained from the Digital Auto Video Plankton Recorder (VPR)
produced by SeaScan Inc. The VPR consists of a CPU, CTD, and camera
system with different optical settings (i.e., magnifications). It
captures underwater images and records their corresponding salinity,
temperature, and depth. The vprr package functions to join environmental
and plankton data derived from the CTD and camera, respectively, and
calculate plankton concentration and averaged environmental variables
along the path of the VPR. The package does not include automated image
classification; however, there is an optional manual classification
module, which can be used to review and correct outputs from automated
image classification while providing a record of any
(re)classifications.
The VPR outputs two raw files (.dat and .idx) for a given time period in
a deployment. These files are processed together in a software provided
with the VPR (i.e., AutoDeck), which decompresses the images, extracts
"regions of interest" (ROIs), and outputs ROI image files and a
corresponding CTD data file (.dat). The ROI file names are numeric
consisting of 10 digits. The first 8 digits correspond to the number of
milliseconds elapsed in the day at the time the image was captured. The
last two digits correspond to the ROI identifier (01-99). The ROIs and
corresponding CTD data are linked by their 8 digit time stamp. After the
ROIs have been extracted from the raw files they may be sorted into
categories manually or by an automated classification procedure. In
vprr, file naming conventions and directory structures are inherited
from a VPR image classification and analysis software, Visual Plankton.
However, the functionality of vprr is not dependent on the use of VP.
The data inputs for processing in vprr consist of the following file
types: aid (.txt), aidmeas (.txt), and CTD (.dat). The aid and aidmeas
files are derived from separate image classification and measurement
steps outside of vprr. Each "aid" (i.e., autoid) file contains file
paths to individual ROIs that have been classified to the category of
interest. The corresponding "aidmeas" file contains morphological data
for the ROIs (e.g., long axis length, perimeter, etc.).

# Section 2: Summary of vprr data processing steps
The processing steps in R from the 'Post Processing and Visualization
box in Figure 1 are detailed in Figure 2.

## Section 2.1: Processing Environment
Before beginning data processing with vprr, it is recommended that a
processing environment be created containing commonly used variables and
file paths. The simplest and most reproducible way to achieve this is to
write an R script where all the mission and system specific variables
are contained, then save the environment as a RData file to be loaded at
the start of any processing scripts. This processing environment
contains reference to a station identifier csv file which should be
created for each mission. This file links station names from deck sheets
to the day and hour values assigned by AutoDeck. Day and hour values
represent the Julian day (3 digit) and two digit hour (24 hour clock)
when sampling was done. Note that the day and hour values will be in the
time zone of the computer used to run AutoDeck. Ensure that this matches
the time zone of the VPR CPU at the time of data collection to avoid a
time offset between data sources.
Another important part of setting up the processing environment is
ensuring the proper directory structure is in place, see Appendix 1 for
details on the required directory structure.
# set VPR processing environment
# WORKING DIRECTORY
wd <- "C:/VPR_PROJECT/"
setwd(wd)
# MISSION
cruise <- 'COR2019002'
year <- 2019
# CSV FILE WITH STATION NAMES AND CORRESPONDING DAY/HOUR INFO
station_names_file <- paste0("station_names_", cruise, ".csv")
# note columns should be labeled : station, day, hour
# DIRECTORY FOR CTD DATA (output from AutoDeck)
castdir <- paste0('D:/', cruise, "/", cruise, "_autodeck/")
# AUTOID FOLDER FOR MEASUREMENT DATA (aidmeas & aid files)
drive <- 'C:/'
auto_id_folder <- paste0(drive, "cruise_", cruise, "/", autoid)
#!!NO BACKSLASH AT END OF STRING
# PATH TO AUTOID CATEGORY FOLDERS
auto_id_path <- list.files(paste0(auto_id_folder, "/"), full.names = T)
# CREATE STANDARD DIRECTORY FOR SAVED DATA FILES PER MISSION
savedir <- paste0(cruise, '_data_files')
dir.create(savedir, showWarnings = FALSE)
# CREATE STANDARD DIRECTORY FOR SAVED DATA PRODUCTS PER MISSION AND STATION
stdir <- paste('data_product/', cruise, sep = "")
dir.create(stdir, showWarnings = FALSE, recursive = TRUE)
# BIN SIZE FOR COMPUTING DEPTH AVERAGED PLANKTON CONCENTRATION ALONG PATH OF THE VPR
binSize <- 3
# CATEGORY OF INTEREST (PLANKTON CATEGORIES TO BE PROCESSED AND VISUALIZED)
category_of_interest <-
c(
'krill',
'Calanus',
'chaetognaths',
)
#### SAVE ####
# SAVE ALL FILE PATHS AND SETTINGS AS PROJECT ENVIRONMENT
save.image(file = paste0(cruise,'_env.RData'))
An example of the station names csv file looks like this:
```{r, eval = TRUE}
csv <- read.csv('station_names_COR2019002.csv')
head(csv)
```
Once this environment is set, it can be loaded into any processing
session by using
load('COR2019002_env.RData') # where COR2019002 is mission name
If sharing processing code with colleagues on version control, keeping
the environment variables separate (outside of the git project) will
allow collaboration while avoiding inconsistencies in file paths or
folder names.
## Section 2.2: Image Copying (optional):
ROIs are organized into folders corresponding to their assigned
classification categories from automated image classification. The
information in each aid file is used to create a folder of images that
have been classified to that category. This step is only required if
manual re-classification (see Section 2.2) is intended. Further details
on image copying are provided in Section 3.
## Section 2.3: Manual re-classification (optional):
Automated classifications from are manually checked, which allows for
manual correction and addition of categories not previously used for
automated classification. ROIs that have been copied in Section 2.1 are
manually sorted to correct for misclassifications. Updated aid and aidmeas files are produced.
Further details on manual re-classification are provided in Section 4.
## Section 2.4: Processing:
Data outputs from Autodeck (ctd .dat files), automatic classifications (aid files)
and measurements (aidmeas files) are joined together. The aid and aidmeas files, which may have been updated
(see Section 2.3) are joined with CTD text files by the 8 digit time
stamp. The data are then averaged in user-defined vertical bins to
produce a time series of plankton concentrations and environmental
variables. Quality controlled data products (before and after binning)
are then exported in simple formats (csv, RData, oce) for plotting and
analysis. Further details on data processing are provided in Section 5.
# Section 3: Image Copying
In this step, ROIs are copied to folders that are organized based on the
day and hour of data collection and classification category assigned
from automatic classification (see Appendix 1: 'Image Folders'). The
images are organized by AutoDeck into day and hour; however,
reorganizing them based on classification allows easier human
interaction with the data and visual inspection of classifications.
Moreover, this directory structure is used by the next step of
processing (i.e., manual re-classification). To implement this step use
the function `vprr::vpr_autoid_copy()` For more information on input
variables, please see documentation for `vpr_autoid_copy()`
# create variables
# ---------------------
basepath <- "C:\\data\\cruise_COR2019002\\autoid\\"
# note this is the same as the auto_id_folder environment variable except the file separator is different, because this script will run source code in command line which does not recognize '/' as a file separator
day <- "123"
hour <- "01" # note leading zero
classifier_type <- "svm"
classifier_name <- "myclassifier"
# run file organizer
# ---------------------
vpr_autoid_copy(basepath, day, hour, classifer_type, classifier_name)
# Section 4: Manual Re-classification
Manual re-classification of some categories after automated
classification may be required to achieve identification accuracy
standards. In this step, ROIs are displayed on the screen one at a time
for manual verification. If an image has been misclassified or if it
falls into a new user-defined category (described below), the image can
be re-classified. This is especially useful for classification of rare
categories that were not defined prior to automatic classification.
After completing manual re-classification for a day-hour set, new aid
and aidmeas files are created for new categories, which are identical in
format to original aid and aidmeas files.
## Section 4.1: Preparing the environment by setting some variables
- Load the processing environment, which includes the `auto_id_folder`
variable.
- Set day and hour of interest.
- Set category of interest. These categories are the existing automated
classification categories which require manual re-classification, as
well as any new categories. The `vprr::vpr_category_create()`
function sets up the folder structure for any new categories which
have been added to the list of interest.
- Run manual re-classification with
`vprr::vpr_manual_classification()`. This function has a few
optional arguments to customize the manual re-classification
experience, notably `gr` which is a logical value determining
whether or not manual re-classification options appear as pop ups or
in the console, as well as `img_bright`, a logical which determines
whether or not the original image is appended with an extra bright
version of the image. Having a bright version of the image allows
the user to see the outline of the organism better, any thin
appendages become more clear and gelatinous organisms like
chaetognaths or ctenophores are easier to distinguish.
```{=html}
<!-- -->
```
#### MANUAL RE-CLASSIFICATION
# -------------------------------------
# Once classified images are sorted by taxa
# verify classification accuracy by manually
# looking through classified images
#### USER INPUT REQUIRED ####
load('COR2019002_env.RData')
day <- '235'
hr <- '19' # keep leading zeros, must be two characters
category_of_interest <-
c(
'krill',
'Calanus',
'chaetognaths',
'ctenophores',
'Other',
'larval_fish',
'marine_snow',
'small_copepod',
'other_copepods',
'larval_crab',
'amphipod',
'Metridia',
'Paraeuchaeta',
'cnidarians',
'speciesA', # new category
)
# add new category (optional)
vpr_category_create(taxa = category_of_interest, auto_id_folder)
# ensures there is proper folder structure for all categories of interest
# reclassify images
vpr_manual_classification(day = day, hour= hr, basepath = auto_id_folder,gr = FALSE,
taxa_of_interest = category_of_interest, scale = 'x300',
opticalSetting = 'S3')
## Section 4.2: Generate new aid and aidmeas files
The function`vprr::vpr_manual_classification()` produces two files
('misclassified' and 're-classified' text files) as a record of manual
re-classification, which are found in the R project working directory in
folders named by the day and hour that the data were collected. The
function`vprr::vpr_autoid_create()`takes these files and outputs new aid
and aidmeas files in the R working directory in folders named by
classification category. This step should be run after each hour of data
is manually re-classified.
#### REORGANIZE ROI AND ROIMEAS DATA
# -----------------------------------------
day_hour_files <- paste0('d', day, '.h', hr)
misclassified <- list.files(day_hour_files, pattern = 'misclassified_', full.names = TRUE)
reclassify <- list.files(day_hour_files, pattern = 'reclassify_', full.names = TRUE)
# MOVE ROIS THAT WERE MISCLASSIFIED INTO CORRECT FILES & REMOVE MISCLASSIFIED ROIS
vpr_autoid_create(reclassify, misclassified, auto_id_folder)
The aid and aidmeas files are both text files which are
specifically formatted to record classification outputs for further
processing. The format and naming conventions of these files has been inherited from a VPR image classification and data processing tool called Visual Plankton (written in Matlab); however, the functionality of vprr is independent from that of Visual Plankton. The aid files are text records of image paths,
where each individual text file represents a classification category. Each line of
the aid file is the full path to an image which was classified into the
designated category. Note that the naming scheme of aid files does not
include the category name in the file title and the category is only
identifiable by the folder in which it is located. For example the 'krill'
classification aid file might be named 'oct10_1svmaid.d224.h01' but be located within the
'krill' autoid folder. The aidmeas files are also text files which represent a
variety of different measurements taken of the object(s) within a ROI image. The columns of the aidmeas files are
c('Perimeter','Area','width1','width2','width3','short_axis_length','long_axis_length').Examples
of each of these files can be found below.
```{r, eval = TRUE}
aid <- read.table(file = system.file("extdata/COR2019002/autoid/bad_image_blurry/aid/sep20_2svmaid.d222.h04", package = 'vprr', mustWork = TRUE))
head(aid)
aidmeas <- readLines(system.file("extdata/COR2019002/autoid/bad_image_blurry/aidmea/sep20_2svmaid.mea.d222.h04", package = 'vprr', mustWork = TRUE))
head(aidmeas)
```
## Section 4.3: File check
The last step of manual re-classification includes some manual file
organization and final checks. These files should be manually reorganized
in a new directory which will become the new auto_id_folder (see Appendix 1:
Directory Structure). Any aid and aidmeas files from categories which were
not manually checked and re-classified should also be added to this new auto_id_folder
if they are to be included in further processing (e.g., computation of concentration
in user-specified depth bins). After the updated aid and aidmeas files have been
manually reorganized they can be quality controlled using vprr::vpr_autoid_check().
The user could also manually check the files. The automated check function removes
any empty aid files created. Empty files can cause errors in processing down the line.
This function also checks that (1) aid and aidmeas files are aligned within an hour of data;
(2) aid and aidmeas files include the same number of ROIs; and (3) the
VPR tow number for all files is the same.
#### FILE CHECK
# --------------------------------
# aid check step
# removes empty aid files, and checks for errors in writing
vpr_autoid_check(basepath, cruise) #OUTPUT: text log 'CRUISE_aid_file_check.txt’ in working directory
# Section 5: Data Processing
This is the main chunk of coding required to generate data products.
This step does not require image copying (Section 3) or manual
re-classification (Section 4) steps; however, if these steps were taken
the aid and aidmeas files generated from manual re-classification and
integrated into the directory structure (as specified in Section 4) are
used as an input. The following is a walk-through of processing data
from a DFO field mission (i.e. mission COR2019002) in the southern Gulf
of St. Lawrence in 2019. First, all libraries should be loaded and the
processing environment, described in Section 2.4 should be loaded.
##### PROCESSING --------------------------------------------------------------------------------------------------------------------
library(vprr)
#### FILE PATHS & SETTINGS --------------------------------------------------------------------------------------------------------------------
# loads processing environment specific to user
load('COR2019002_env.RData')
This section allows a user to process all stations of a particular
mission in a loop. This can be modified or removed based on personal
preference
##### STATION LOOP ----------------------------------------------------------------------------------------------------------------------------
all_stations <- read.csv(station_names_file, stringsAsFactors = FALSE)
all_stations_of_interest <- unique(all_stations$station)
for (j in 1:length(all_stations_of_interest)){
station_of_interest <- all_stations_of_interest[j]
cat('Station', station_of_interest, 'processing... \n')
cat('\n')
Optical settings and image volume variables should be set. If they are
consistent throughout the mission, they could also be added to the
processing environment (Section 2.4).
#==========================================#
# Set optical settings & Image Volume #
# !Should be updated with each mission! #
#==========================================#
if(cruise == "COR2019002") {
#VPR OPTICAL SETTING (S0, S1, S2 OR S3)
opticalSetting <- "S2"
imageVolume <- 108155 #mm^3
}
CTD data are loaded in using `vprr::vpr_ctd_files` to find files and
`vprr::vpr_ctd_read` to read in files. During CTD data read in, a
seawater density variable `sigmaT` is derived using the function
`oce::swSigmaT`, and `depth` (in meters) is derived from pressure using
the function `oce::swDepth`. For more information on the oce package,
see dankelley/oce on GitHub.
#get day and hour info from station names list
dayhour <- vpr_dayhour(station_of_interest, file = station_names_file)
##### PULL CTD CASTS ----------------------------------------------------------------------------------------------------------------------------
# get file path for ctd data
# list ctd files for desired day.hours
ctd_files <- vpr_ctd_files(castdir, cruise, dayhour)
##### READ CTD DATA ----------------------------------------------------------------------------------------------------------------------------
ctd_dat_combine <- vpr_ctd_read(ctd_files, station_of_interest)
cat('CTD data read complete! \n')
cat('\n')
The aid and aidmea files, which reflect manual classification (if used,
see Section 4), are then found
##### FIND VPR DATA FILES ----------------------------------------------------------------------------------------------------------------------
# Path to aid for each taxa
aid_path <- paste0(auto_id_path, '/aid/')
# Path to mea for each taxa
aidmea_path <- paste0(auto_id_path, '/aidmea/')
# AUTO ID FILES
aid_file_list <- list()
aidmea_file_list <- list()
for (i in 1:length(dayhour)) {
aid_file_list[[i]] <-
list.files(aid_path, pattern = dayhour[[i]], full.names = TRUE)
# SIZE DATA FILES
aidmea_file_list[[i]] <-
list.files(aidmea_path, pattern = dayhour[[i]], full.names = TRUE)
}
aid_file_list_all <- unlist(aid_file_list)
aidmea_file_list_all <- unlist(aidmea_file_list)
remove(aid_file_list, aidmea_file_list, aid_path, aidmea_path)
aid and aidmeas files are read in using `vprr::vpr_autoid_read()`.
##### READ ROI AND MEASUREMENT DATA ------------------------------------------------------------------------------------------------------------
# ROIs
roi_dat_combine <-
vpr_autoid_read(
file_list_aid = aid_file_list_all,
file_list_aidmeas = aidmea_file_list_all,
export = 'aid',
station_of_interest = station_of_interest,
opticalSetting = opticalSetting
)
# MEASUREMENTS
roimeas_dat_combine <-
vpr_autoid_read(
file_list_aid = aid_file_list_all,
file_list_aidmeas = aidmea_file_list_all,
export = 'aidmeas',
station_of_interest = station_of_interest,
opticalSetting = opticalSetting
)
cat('ROI and measurement data read in complete! \n')
cat('\n')
Next, CTD and aid data are merged to create a data frame describing both
environmental variables (eg. temperature, salinity) and classified
images. The function used is `vprr::vpr_ctdroi_merge()`.
##### MERGE CTD AND ROI DATA ---------------------------------------------------------------------------------------------------------------------
ctd_roi_merge <- vpr_ctdroi_merge(ctd_dat_combine, roi_dat_combine)
cat('CTD and ROI data combined! \n')
cat('\n')
Before final export of data products, the following variables are added
to the data frame: time in hours (time_hr) is calculated, and a time
stamp (ymdhms) with POSIXct signature in Y-M-D h:m:s format is added
using the function `vpr_ctd_ymd`.
##### CALCULATED VARS ----------------------------------------------------------------------------------------------------------------------------
# add time_hr and sigma T data and depth
data <- ctd_roi_merge %>%
dplyr::mutate(., time_hr = time_ms / 3.6e+06)
data <- vpr_ctd_ymd(data, year)
# ensure that data is sorted by time to avoid processing errors
data <- data %>%
dplyr::arrange(., time_ms)
cat('Initial processing complete! \n')
cat('\n')
# clean environment
remove(ctd_roi_merge)
Average plankton concentration and environmental variables (e.g.,
temperature, salinity, density, etc.) are then computed within a user
defined depth bin. The computation of plankton concentration is dependent
on the assumption that the same animals are not re-sampled by the
instrument. The bin-averaging step standardizes plankton concentrations
when the VPR does not sample the water column evenly. This can occur
due to characteristics of the deployment or variability in the sampling
rate, which is not necessarily constant in older versions of the VPR.
Binning also reduces noise in the data. First, an oce CTD object is
created using `vprr::vpr_oce_create()`. Then, bin-averaging is done
using `vprr::bin_cast()`. Concentrations are calculated for each
category of interest.
##### BIN DATA AND DERIVE CONCENTRATION ----------------------------------------------------------------------------------------------------------
ctd_roi_oce <- vpr_oce_create(data)
# bin and calculate concentration for all taxa (combined)
vpr_depth_bin <- bin_cast(ctd_roi_oce = ctd_roi_oce, binSize = binSize, imageVolume = imageVolume)
# get list of valid taxa
taxas_list <- unique(roimeas_dat_combine$taxa)
# bin and calculate concentrations for each category
taxa_conc_n <- vpr_roi_concentration(data, taxas_list, station_of_interest, binSize, imageVolume)
cat('Station', station_of_interest, 'processing complete! \n')
cat('\n')
# bin size data
size_df_f <- vpr_ctdroisize_merge(data, data_mea = roimeas_dat_combine, taxa_of_interest = category_of_interest)
size_df_b <- vpr_size_bin(size_df_f, bin_mea = 3)
Finally, data are saved as RData and csv files for export and plotting.
Data are also saved as an `oce` object in order to preserve both data
and metadata in an efficient format.
##### SAVE DATA ---------------------------------------------------------------------------------------------------------------------------------
# Save oce object
oce_dat <- vpr_save(taxa_conc_n)
save(file = paste0(savedir, '/oceData_', station_of_interest,'.RData'), oce_dat) # oce data and metadata object
# Save RData files
save(file = paste0(savedir, '/ctdData_', station_of_interest,'.RData'), ctd_dat_combine) #CTD data
save(file = paste0(savedir, '/stationData_', station_of_interest,'.RData'), data) # VPR and CTD data
save(file = paste0(savedir, '/meas_dat_', station_of_interest,'.RData'), roimeas_dat_combine) #measurement data
save(file = paste0(savedir, '/bin_dat_', station_of_interest,'.RData'), vpr_depth_bin) # binned data with cumulative concentrations
save(file = paste0(savedir, '/bin_size_dat_', station_of_interest,'.RData'), size_df_b) # binned data including measurements
cat('CTD, ROI-VPR merge, ROI measurement saved as RData! \n')
cat('\n')
# Write csv files
# write.csv(file = paste0(stdir, '/vpr_data_unbinned', station_of_interest, '.csv'), data, row.names = F) # VPR and CTD data (not binned)
# write.csv(file = paste0(stdir, '/vpr_meas', station_of_interest, '.csv'), roimeas_dat_combine) # measurement data
write.csv(file = paste0(stdir, '/vpr_data_binned', station_of_interest, '.csv'), taxa_conc_n) # VPR and CTD data with concentrations by taxa
cat('ROI measurments, ROI-CTD merge-unbinned, and ROI-CTD merge-binned written to csv! \n')
cat('\n')
} #end of station loop
# Section 6: Plotting
Although not primarily a plotting package, vprr can produce contour
plots, profile plots and temperature-salinity (TS) plots from VPR data
sets. A few example plots are provided in the following code. The first
step to plotting is properly loading in the processed VPR data objects
created in processing. The environment, described in Section 2.4 should
also be loaded. The individual data files are found by distinct names
(e.g., "stationData"). The directory structure may be different
depending on the `savedir` where data files were saved during
processing. Note that the following plotting examples are tailored for
tow-yo pattern VPR deployments.
##### FILE PATH & SETTINGS -----------------------------------------------------------------------------------------------------------------------
library(vprr)
# loads all file paths and environment vars specific to User
load('COR2019002_env.RData')
#find all data files
fn_all_st <- list.files(paste0(cruise, "_data_files/"), pattern = "stationData", full.names = T)
fn_all_meas <- list.files(paste0(cruise, "_data_files/"), pattern = "meas", full.names = T)
fn_all_conc <- list.files(paste0("data_product/", cruise, "/"), pattern = "data_binned", full.names = T)
fn_all_bin <- list.files(paste0(cruise,"_data_files/"), pattern = 'bin_dat', full.names = T)
Once files are loaded, plots for all stations in a mission can be
generated using a loop, in order to efficiently generate comparable
plots. The example below uses a loop to run through a list of stations
described by a csv file. This loop also isolates two specific
classification categories to plot (eg. "Calanus" and "krill").
####START STATION LOOP ---------------------------------------------------------------------------------------------------------------------------
setwd(wd)
all_stations <- read.csv(station_names_file, stringsAsFactors = FALSE)
all_stations_of_interest <- unique(all_stations$station)
taxa_to_plot <- c("Calanus", "krill")
for (j in 1:length(all_stations_of_interest)){
setwd(wd)
station <- all_stations_of_interest[j]
cat('station', station ,'starting to plot.... \n')
cat('\n')
Data files are loaded for the specific station of interest. This loads
in all relevant RData files as well as the concentration data saved as a
csv file.
#load station roi and ctd data
fn_st <- grep(fn_all_st, pattern = station, value = TRUE, ignore.case = TRUE)
fn_meas <- grep(fn_all_meas, pattern = station, value = TRUE, ignore.case = TRUE)
fn_conc <- grep(fn_all_conc, pattern = station, value = TRUE, ignore.case = TRUE)
fn_bin <- grep(fn_all_bin, pattern = station, value = TRUE, ignore.case = TRUE)
load(fn_st)
load(fn_meas)
load(fn_conc)
load(fn_bin)
# load concentration data
taxa_conc_n <- read.csv(fn_conc, stringsAsFactors = F)
station_name <- paste('Station ', station)
The final section of set up indicates the directory in which plots will
be saved and provides generic plot size arguments which will control how
large the saved .png files are.
# directory for plots
stdir <- paste0('figures/', cruise, '/station', station)
dir.create(stdir, showWarnings = FALSE, recursive = TRUE)
setwd(stdir)
width = 1200
height = 1000
The following example presents a plot of the concentrations of a taxon
as scaled bubbles along the tow path, overlain on contours of an
environmental variable from the CTD. The main function used is
`vprr::vpr_plot_contour()` which uses a standard VPR data frame
(`taxa_conc_n` - produced from processing (Section 5)) and plots the
background contours. Interpolation methods can be adjusted based on data
or preference. The VPR tow path can be added on top of contours. This
method can be repeated with various environmental variables (e.g.,
temperature, salinity etc.) used to calculate the contours, by changing
the `var` argument in `vprr::vpr_plot_contour()`.
# Density (sigmaT)
png('conPlot_taxa_dens.png', width = width, height = height)
p <- vpr_plot_contour(taxa_conc_n[taxa_conc_n$taxa %in% c(taxa_to_plot),], var = 'density', dup = 'strip', method = 'oce', bw = 0.5)
p <- p + geom_line(data = data, aes(x = time_hr - min(time_hr), y = pressure), col = 'snow4', inherit.aes = FALSE) +
geom_point(data = taxa_conc_n[taxa_conc_n$taxa %in% c(taxa_to_plot),], aes(x = time_hr, y = min_pressure, size = conc_m3), alpha = 0.5)+
ggtitle(station_name ) +
labs(size = expression("Concentration /m" ^3), fill = 'Density')+
scale_size_continuous(range = c(0, 10)) +
facet_wrap(~taxa, ncol = 1, scales = 'free') +
theme(legend.key.size = unit(0.8, 'cm'),
axis.title = element_text(size = 20),
strip.text = element_text(size = 20),
plot.title = element_text(size = 32),
axis.ticks = element_line(size = 1, lineend = 'square'),
axis.text = element_text(size = 30),
legend.text = element_text(size = 20),
legend.title = element_text(size = 25)
)
print(p)
dev.off()
Vertical profiles of plankton concentration and environmental variables
compressed over the sampling duration can be generated using
`vprr::vpr_plot_profile()`. This type of plot indicates the overall
pattern in vertical distribution over the VPR deployment.
png('profilePlots.png', width = 1000, height = 500)
p <- vpr_plot_profile(taxa_conc_n, taxa_to_plot)
print(p)
dev.off()
Temperature-salinity (TS) plots can be generated to visualize how
plankton concentration varies across different water masses. In the
example below, a TS plot is produced in ggplot (with labeled
isopycnals), and concentration bubbles for each selected classification
group are overlaid on the plot. The basic TS bubble plot can be easily
manipulated using ggplot2 grammar, for example the plots can be faceted
by classification group or axis labels and sizing can be adjusted (see
ggplot2 package for more information).
####TS BUBBLE PLOT ----------------------------------------------------------------------------------------------------------------------------
# plot by taxa
taxa_conc <- taxa_conc_n[taxa_conc_n$conc_m3 > 0,]
png('TS_conc_taxa.png', width = 1000, height = 500)
p <- vpr_plot_TS(taxa_conc[taxa_conc$taxa %in% c(taxa_to_plot),], var = 'conc_m3') +
facet_wrap(~taxa, nrow = 1) +
theme(strip.text = element_text(size = 18),
axis.title = element_text(size = 20),
panel.spacing = unit(2, 'lines'))
print(p)
dev.off()
cat('station', station, 'complete! \n')
cat('\n')
} # end station loop
# Section 7: Disclaimer
The functions in vprr were created for a specific project and have not
been tested on a broad range of field mission data. It is possible that
deviations in data format and directory structure from that described
herein may result in errors when using vprr. The vprr package was
developed for the purpose of processing data collected during tow-yo VPR
deployments and image classification using VP. The purpose of this
document is to provide a template for processing and visualizing VPR
data that can be adapted by other users for their own objectives.
# Appendix 1: Directory Structure
Visual Plankton (Matlab image classification software) requires a very
specific directory structure in order to function. Since this processing
is meant to directly follow this image classification, the VP directory
structure is used for consistency. This allows a smooth transition
between the Matlab classifications and the completion of processing in
R. The directory structure required is described below
- C:/
- data
- cruise_name
- autoid
- taxa
- aid
- aidmea
- image folders
- rois
- vprtow#
- day
- hour
- trrois
- vprtow#
- day
- hour
This is your project directory, where your R scripts and work products
will be stored:
- ...
- VPR_PROJECT
- R
- R scripts/ workflows
- new_autoid
- taxa
- aid
- aidmea
- manual_reclassification_record
- day/hour
- misclassified
- reclassified
- figures
- station names (csv)
# Appendix 2: Glossary
**Aid files** - Visual Plankton file output text file, listing file path
information for ROI's of a specific classification group
**AidMeas files (AutoID measurements)** - Visual Plankton output text
file, listing measurement data for ROI's of a specific classification
group. Unit is pixels and columns are 'Perimeter', 'Area', 'width1',
'width2', 'width3', 'short_axis_length', 'long_axis_length'
**Auto Deck** - software which pulls plankton images from Video Plankton
Recorder frames based on specific settings
**Auto ID** - The automatic classification given to an image from Visual
Plankton machine learning algorithm
**AutoID files** - Includes both Aid and AidMeas files as part of Visual
Plankton's automatic classifications
**BIO** - Bedford Institute of Oceanography, a research institute in
Halifax NS, Canada
**Classification category (Taxa)** - A defined group under which VPR
images can be classified, often represents a taxonomic group (e.g.
Krill), but can also be defined by image type (e.g. 'bad_image_blurry'),
or other (e.g. 'marine_snow'), should be one continuous string (no
spaces)
**CPU** - Central processing unit (computer processor)
**CTD** - Conductivity, Temperature and depth sensor instrument
**Day** - Julian calendar day on which VPR data was collected (three
digits)
**Hour** - Two digit hour (24 hour clock) describing time at which VPR
data was collected
**Image volume** - The measured volume of water captured within a VPR
image. Calculated based on optical setting and VPR standards. This is
based on AutoDeck settings, it is calculated from the VPR calibration
file (unique to each instrument). It will change based on AutoDeck
settings and should be updated with each cruise/ processing batch. It is
measured in cubic mm
**Optical Setting** - A VPR setting controlling image magnification and
field of view, which can be S0, S1, S2 or S3, where S0 has the greatest
magnification and smallest image volume, and S3 has the least
magnification and largest image volume
**ROI** - Region of interest, images identified by autodeck within VPR
frames based on settings defined in autoDeck program
**SeaScan** - Oceanographic instrument manufacturing company
**station** - A named geographic location, where the VPR was deployed
**Tow-yo** - A VPR deployment method where the VPR is towed behind a
vessel while being raised and lowered through the water column in order
to sample over both depth and distance
**TRROIS** - Training set of images used to train machine learning
algorithm in Visual Plankton
**VP** - Visual Plankton program run in Matlab
**VPR** - Video Plankton Recorder, oceanographic instrument used to
image small volumes of water for the purpose of capturing images of
plankton
**vprtow#** - A numeric code which is unique to each VPR deployment
**Working Directory** - File path on your computer that defines the
default location of any files you read into R, or save out of R
|
/scratch/gouwar.j/cran-all/cranData/vprr/inst/doc/VPR_processing.Rmd
|
---
title: "VPR_processing"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{VPR_processing}
\usepackage[utf8]{inputenc}
%\VignetteEngine{knitr::knitr}
author: " Emily O'Grady, Kevin Sorochan, Catherine Johnson"
editor_options:
markdown:
wrap: 72
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE
)
```
```{r setup}
library(vprr)
```
# Section 1: Background
This document was produced at Bedford Institute of Oceanography (BIO) to
accompany the vprr package, a processing and visualization package for
data obtained from the Digital Auto Video Plankton Recorder (VPR)
produced by SeaScan Inc. The VPR consists of a CPU, CTD, and camera
system with different optical settings (i.e., magnifications). It
captures underwater images and records their corresponding salinity,
temperature, and depth. The vprr package functions to join environmental
and plankton data derived from the CTD and camera, respectively, and
calculate plankton concentration and averaged environmental variables
along the path of the VPR. The package does not include automated image
classification; however, there is an optional manual classification
module, which can be used to review and correct outputs from automated
image classification while providing a record of any
(re)classifications.
The VPR outputs two raw files (.dat and .idx) for a given time period in
a deployment. These files are processed together in a software provided
with the VPR (i.e., AutoDeck), which decompresses the images, extracts
"regions of interest" (ROIs), and outputs ROI image files and a
corresponding CTD data file (.dat). The ROI file names are numeric
consisting of 10 digits. The first 8 digits correspond to the number of
milliseconds elapsed in the day at the time the image was captured. The
last two digits correspond to the ROI identifier (01-99). The ROIs and
corresponding CTD data are linked by their 8 digit time stamp. After the
ROIs have been extracted from the raw files they may be sorted into
categories manually or by an automated classification procedure. In
vprr, file naming conventions and directory structures are inherited
from a VPR image classification and analysis software, Visual Plankton.
However, the functionality of vprr is not dependent on the use of VP.
The data inputs for processing in vprr consist of the following file
types: aid (.txt), aidmeas (.txt), and CTD (.dat). The aid and aidmeas
files are derived from separate image classification and measurement
steps outside of vprr. Each "aid" (i.e., autoid) file contains file
paths to individual ROIs that have been classified to the category of
interest. The corresponding "aidmeas" file contains morphological data
for the ROIs (e.g., long axis length, perimeter, etc.).

# Section 2: Summary of vprr data processing steps
The processing steps in R from the 'Post Processing and Visualization
box in Figure 1 are detailed in Figure 2.

## Section 2.1: Processing Environment
Before beginning data processing with vprr, it is recommended that a
processing environment be created containing commonly used variables and
file paths. The simplest and most reproducible way to achieve this is to
write an R script where all the mission and system specific variables
are contained, then save the environment as a RData file to be loaded at
the start of any processing scripts. This processing environment
contains reference to a station identifier csv file which should be
created for each mission. This file links station names from deck sheets
to the day and hour values assigned by AutoDeck. Day and hour values
represent the Julian day (3 digit) and two digit hour (24 hour clock)
when sampling was done. Note that the day and hour values will be in the
time zone of the computer used to run AutoDeck. Ensure that this matches
the time zone of the VPR CPU at the time of data collection to avoid a
time offset between data sources.
Another important part of setting up the processing environment is
ensuring the proper directory structure is in place, see Appendix 1 for
details on the required directory structure.
# set VPR processing environment
# WORKING DIRECTORY
wd <- "C:/VPR_PROJECT/"
setwd(wd)
# MISSION
cruise <- 'COR2019002'
year <- 2019
# CSV FILE WITH STATION NAMES AND CORRESPONDING DAY/HOUR INFO
station_names_file <- paste0("station_names_", cruise, ".csv")
# note columns should be labeled : station, day, hour
# DIRECTORY FOR CTD DATA (output from AutoDeck)
castdir <- paste0('D:/', cruise, "/", cruise, "_autodeck/")
# AUTOID FOLDER FOR MEASUREMENT DATA (aidmeas & aid files)
drive <- 'C:/'
auto_id_folder <- paste0(drive, "cruise_", cruise, "/", autoid)
#!!NO BACKSLASH AT END OF STRING
# PATH TO AUTOID CATEGORY FOLDERS
auto_id_path <- list.files(paste0(auto_id_folder, "/"), full.names = T)
# CREATE STANDARD DIRECTORY FOR SAVED DATA FILES PER MISSION
savedir <- paste0(cruise, '_data_files')
dir.create(savedir, showWarnings = FALSE)
# CREATE STANDARD DIRECTORY FOR SAVED DATA PRODUCTS PER MISSION AND STATION
stdir <- paste('data_product/', cruise, sep = "")
dir.create(stdir, showWarnings = FALSE, recursive = TRUE)
# BIN SIZE FOR COMPUTING DEPTH AVERAGED PLANKTON CONCENTRATION ALONG PATH OF THE VPR
binSize <- 3
# CATEGORY OF INTEREST (PLANKTON CATEGORIES TO BE PROCESSED AND VISUALIZED)
category_of_interest <-
c(
'krill',
'Calanus',
'chaetognaths',
)
#### SAVE ####
# SAVE ALL FILE PATHS AND SETTINGS AS PROJECT ENVIRONMENT
save.image(file = paste0(cruise,'_env.RData'))
An example of the station names csv file looks like this:
```{r, eval = TRUE}
csv <- read.csv('station_names_COR2019002.csv')
head(csv)
```
Once this environment is set, it can be loaded into any processing
session by using
load('COR2019002_env.RData') # where COR2019002 is mission name
If sharing processing code with colleagues on version control, keeping
the environment variables separate (outside of the git project) will
allow collaboration while avoiding inconsistencies in file paths or
folder names.
## Section 2.2: Image Copying (optional):
ROIs are organized into folders corresponding to their assigned
classification categories from automated image classification. The
information in each aid file is used to create a folder of images that
have been classified to that category. This step is only required if
manual re-classification (see Section 2.2) is intended. Further details
on image copying are provided in Section 3.
## Section 2.3: Manual re-classification (optional):
Automated classifications from are manually checked, which allows for
manual correction and addition of categories not previously used for
automated classification. ROIs that have been copied in Section 2.1 are
manually sorted to correct for misclassifications. Updated aid and aidmeas files are produced.
Further details on manual re-classification are provided in Section 4.
## Section 2.4: Processing:
Data outputs from Autodeck (ctd .dat files), automatic classifications (aid files)
and measurements (aidmeas files) are joined together. The aid and aidmeas files, which may have been updated
(see Section 2.3) are joined with CTD text files by the 8 digit time
stamp. The data are then averaged in user-defined vertical bins to
produce a time series of plankton concentrations and environmental
variables. Quality controlled data products (before and after binning)
are then exported in simple formats (csv, RData, oce) for plotting and
analysis. Further details on data processing are provided in Section 5.
# Section 3: Image Copying
In this step, ROIs are copied to folders that are organized based on the
day and hour of data collection and classification category assigned
from automatic classification (see Appendix 1: 'Image Folders'). The
images are organized by AutoDeck into day and hour; however,
reorganizing them based on classification allows easier human
interaction with the data and visual inspection of classifications.
Moreover, this directory structure is used by the next step of
processing (i.e., manual re-classification). To implement this step use
the function `vprr::vpr_autoid_copy()` For more information on input
variables, please see documentation for `vpr_autoid_copy()`
# create variables
# ---------------------
basepath <- "C:\\data\\cruise_COR2019002\\autoid\\"
# note this is the same as the auto_id_folder environment variable except the file separator is different, because this script will run source code in command line which does not recognize '/' as a file separator
day <- "123"
hour <- "01" # note leading zero
classifier_type <- "svm"
classifier_name <- "myclassifier"
# run file organizer
# ---------------------
vpr_autoid_copy(basepath, day, hour, classifer_type, classifier_name)
# Section 4: Manual Re-classification
Manual re-classification of some categories after automated
classification may be required to achieve identification accuracy
standards. In this step, ROIs are displayed on the screen one at a time
for manual verification. If an image has been misclassified or if it
falls into a new user-defined category (described below), the image can
be re-classified. This is especially useful for classification of rare
categories that were not defined prior to automatic classification.
After completing manual re-classification for a day-hour set, new aid
and aidmeas files are created for new categories, which are identical in
format to original aid and aidmeas files.
## Section 4.1: Preparing the environment by setting some variables
- Load the processing environment, which includes the `auto_id_folder`
variable.
- Set day and hour of interest.
- Set category of interest. These categories are the existing automated
classification categories which require manual re-classification, as
well as any new categories. The `vprr::vpr_category_create()`
function sets up the folder structure for any new categories which
have been added to the list of interest.
- Run manual re-classification with
`vprr::vpr_manual_classification()`. This function has a few
optional arguments to customize the manual re-classification
experience, notably `gr` which is a logical value determining
whether or not manual re-classification options appear as pop ups or
in the console, as well as `img_bright`, a logical which determines
whether or not the original image is appended with an extra bright
version of the image. Having a bright version of the image allows
the user to see the outline of the organism better, any thin
appendages become more clear and gelatinous organisms like
chaetognaths or ctenophores are easier to distinguish.
```{=html}
<!-- -->
```
#### MANUAL RE-CLASSIFICATION
# -------------------------------------
# Once classified images are sorted by taxa
# verify classification accuracy by manually
# looking through classified images
#### USER INPUT REQUIRED ####
load('COR2019002_env.RData')
day <- '235'
hr <- '19' # keep leading zeros, must be two characters
category_of_interest <-
c(
'krill',
'Calanus',
'chaetognaths',
'ctenophores',
'Other',
'larval_fish',
'marine_snow',
'small_copepod',
'other_copepods',
'larval_crab',
'amphipod',
'Metridia',
'Paraeuchaeta',
'cnidarians',
'speciesA', # new category
)
# add new category (optional)
vpr_category_create(taxa = category_of_interest, auto_id_folder)
# ensures there is proper folder structure for all categories of interest
# reclassify images
vpr_manual_classification(day = day, hour= hr, basepath = auto_id_folder,gr = FALSE,
taxa_of_interest = category_of_interest, scale = 'x300',
opticalSetting = 'S3')
## Section 4.2: Generate new aid and aidmeas files
The function`vprr::vpr_manual_classification()` produces two files
('misclassified' and 're-classified' text files) as a record of manual
re-classification, which are found in the R project working directory in
folders named by the day and hour that the data were collected. The
function`vprr::vpr_autoid_create()`takes these files and outputs new aid
and aidmeas files in the R working directory in folders named by
classification category. This step should be run after each hour of data
is manually re-classified.
#### REORGANIZE ROI AND ROIMEAS DATA
# -----------------------------------------
day_hour_files <- paste0('d', day, '.h', hr)
misclassified <- list.files(day_hour_files, pattern = 'misclassified_', full.names = TRUE)
reclassify <- list.files(day_hour_files, pattern = 'reclassify_', full.names = TRUE)
# MOVE ROIS THAT WERE MISCLASSIFIED INTO CORRECT FILES & REMOVE MISCLASSIFIED ROIS
vpr_autoid_create(reclassify, misclassified, auto_id_folder)
The aid and aidmeas files are both text files which are
specifically formatted to record classification outputs for further
processing. The format and naming conventions of these files has been inherited from a VPR image classification and data processing tool called Visual Plankton (written in Matlab); however, the functionality of vprr is independent from that of Visual Plankton. The aid files are text records of image paths,
where each individual text file represents a classification category. Each line of
the aid file is the full path to an image which was classified into the
designated category. Note that the naming scheme of aid files does not
include the category name in the file title and the category is only
identifiable by the folder in which it is located. For example the 'krill'
classification aid file might be named 'oct10_1svmaid.d224.h01' but be located within the
'krill' autoid folder. The aidmeas files are also text files which represent a
variety of different measurements taken of the object(s) within a ROI image. The columns of the aidmeas files are
c('Perimeter','Area','width1','width2','width3','short_axis_length','long_axis_length').Examples
of each of these files can be found below.
```{r, eval = TRUE}
aid <- read.table(file = system.file("extdata/COR2019002/autoid/bad_image_blurry/aid/sep20_2svmaid.d222.h04", package = 'vprr', mustWork = TRUE))
head(aid)
aidmeas <- readLines(system.file("extdata/COR2019002/autoid/bad_image_blurry/aidmea/sep20_2svmaid.mea.d222.h04", package = 'vprr', mustWork = TRUE))
head(aidmeas)
```
## Section 4.3: File check
The last step of manual re-classification includes some manual file
organization and final checks. These files should be manually reorganized
in a new directory which will become the new auto_id_folder (see Appendix 1:
Directory Structure). Any aid and aidmeas files from categories which were
not manually checked and re-classified should also be added to this new auto_id_folder
if they are to be included in further processing (e.g., computation of concentration
in user-specified depth bins). After the updated aid and aidmeas files have been
manually reorganized they can be quality controlled using vprr::vpr_autoid_check().
The user could also manually check the files. The automated check function removes
any empty aid files created. Empty files can cause errors in processing down the line.
This function also checks that (1) aid and aidmeas files are aligned within an hour of data;
(2) aid and aidmeas files include the same number of ROIs; and (3) the
VPR tow number for all files is the same.
#### FILE CHECK
# --------------------------------
# aid check step
# removes empty aid files, and checks for errors in writing
vpr_autoid_check(basepath, cruise) #OUTPUT: text log 'CRUISE_aid_file_check.txt’ in working directory
# Section 5: Data Processing
This is the main chunk of coding required to generate data products.
This step does not require image copying (Section 3) or manual
re-classification (Section 4) steps; however, if these steps were taken
the aid and aidmeas files generated from manual re-classification and
integrated into the directory structure (as specified in Section 4) are
used as an input. The following is a walk-through of processing data
from a DFO field mission (i.e. mission COR2019002) in the southern Gulf
of St. Lawrence in 2019. First, all libraries should be loaded and the
processing environment, described in Section 2.4 should be loaded.
##### PROCESSING --------------------------------------------------------------------------------------------------------------------
library(vprr)
#### FILE PATHS & SETTINGS --------------------------------------------------------------------------------------------------------------------
# loads processing environment specific to user
load('COR2019002_env.RData')
This section allows a user to process all stations of a particular
mission in a loop. This can be modified or removed based on personal
preference
##### STATION LOOP ----------------------------------------------------------------------------------------------------------------------------
all_stations <- read.csv(station_names_file, stringsAsFactors = FALSE)
all_stations_of_interest <- unique(all_stations$station)
for (j in 1:length(all_stations_of_interest)){
station_of_interest <- all_stations_of_interest[j]
cat('Station', station_of_interest, 'processing... \n')
cat('\n')
Optical settings and image volume variables should be set. If they are
consistent throughout the mission, they could also be added to the
processing environment (Section 2.4).
#==========================================#
# Set optical settings & Image Volume #
# !Should be updated with each mission! #
#==========================================#
if(cruise == "COR2019002") {
#VPR OPTICAL SETTING (S0, S1, S2 OR S3)
opticalSetting <- "S2"
imageVolume <- 108155 #mm^3
}
CTD data are loaded in using `vprr::vpr_ctd_files` to find files and
`vprr::vpr_ctd_read` to read in files. During CTD data read in, a
seawater density variable `sigmaT` is derived using the function
`oce::swSigmaT`, and `depth` (in meters) is derived from pressure using
the function `oce::swDepth`. For more information on the oce package,
see dankelley/oce on GitHub.
#get day and hour info from station names list
dayhour <- vpr_dayhour(station_of_interest, file = station_names_file)
##### PULL CTD CASTS ----------------------------------------------------------------------------------------------------------------------------
# get file path for ctd data
# list ctd files for desired day.hours
ctd_files <- vpr_ctd_files(castdir, cruise, dayhour)
##### READ CTD DATA ----------------------------------------------------------------------------------------------------------------------------
ctd_dat_combine <- vpr_ctd_read(ctd_files, station_of_interest)
cat('CTD data read complete! \n')
cat('\n')
The aid and aidmea files, which reflect manual classification (if used,
see Section 4), are then found
##### FIND VPR DATA FILES ----------------------------------------------------------------------------------------------------------------------
# Path to aid for each taxa
aid_path <- paste0(auto_id_path, '/aid/')
# Path to mea for each taxa
aidmea_path <- paste0(auto_id_path, '/aidmea/')
# AUTO ID FILES
aid_file_list <- list()
aidmea_file_list <- list()
for (i in 1:length(dayhour)) {
aid_file_list[[i]] <-
list.files(aid_path, pattern = dayhour[[i]], full.names = TRUE)
# SIZE DATA FILES
aidmea_file_list[[i]] <-
list.files(aidmea_path, pattern = dayhour[[i]], full.names = TRUE)
}
aid_file_list_all <- unlist(aid_file_list)
aidmea_file_list_all <- unlist(aidmea_file_list)
remove(aid_file_list, aidmea_file_list, aid_path, aidmea_path)
aid and aidmeas files are read in using `vprr::vpr_autoid_read()`.
##### READ ROI AND MEASUREMENT DATA ------------------------------------------------------------------------------------------------------------
# ROIs
roi_dat_combine <-
vpr_autoid_read(
file_list_aid = aid_file_list_all,
file_list_aidmeas = aidmea_file_list_all,
export = 'aid',
station_of_interest = station_of_interest,
opticalSetting = opticalSetting
)
# MEASUREMENTS
roimeas_dat_combine <-
vpr_autoid_read(
file_list_aid = aid_file_list_all,
file_list_aidmeas = aidmea_file_list_all,
export = 'aidmeas',
station_of_interest = station_of_interest,
opticalSetting = opticalSetting
)
cat('ROI and measurement data read in complete! \n')
cat('\n')
Next, CTD and aid data are merged to create a data frame describing both
environmental variables (eg. temperature, salinity) and classified
images. The function used is `vprr::vpr_ctdroi_merge()`.
##### MERGE CTD AND ROI DATA ---------------------------------------------------------------------------------------------------------------------
ctd_roi_merge <- vpr_ctdroi_merge(ctd_dat_combine, roi_dat_combine)
cat('CTD and ROI data combined! \n')
cat('\n')
Before final export of data products, the following variables are added
to the data frame: time in hours (time_hr) is calculated, and a time
stamp (ymdhms) with POSIXct signature in Y-M-D h:m:s format is added
using the function `vpr_ctd_ymd`.
##### CALCULATED VARS ----------------------------------------------------------------------------------------------------------------------------
# add time_hr and sigma T data and depth
data <- ctd_roi_merge %>%
dplyr::mutate(., time_hr = time_ms / 3.6e+06)
data <- vpr_ctd_ymd(data, year)
# ensure that data is sorted by time to avoid processing errors
data <- data %>%
dplyr::arrange(., time_ms)
cat('Initial processing complete! \n')
cat('\n')
# clean environment
remove(ctd_roi_merge)
Average plankton concentration and environmental variables (e.g.,
temperature, salinity, density, etc.) are then computed within a user
defined depth bin. The computation of plankton concentration is dependent
on the assumption that the same animals are not re-sampled by the
instrument. The bin-averaging step standardizes plankton concentrations
when the VPR does not sample the water column evenly. This can occur
due to characteristics of the deployment or variability in the sampling
rate, which is not necessarily constant in older versions of the VPR.
Binning also reduces noise in the data. First, an oce CTD object is
created using `vprr::vpr_oce_create()`. Then, bin-averaging is done
using `vprr::bin_cast()`. Concentrations are calculated for each
category of interest.
##### BIN DATA AND DERIVE CONCENTRATION ----------------------------------------------------------------------------------------------------------
ctd_roi_oce <- vpr_oce_create(data)
# bin and calculate concentration for all taxa (combined)
vpr_depth_bin <- bin_cast(ctd_roi_oce = ctd_roi_oce, binSize = binSize, imageVolume = imageVolume)
# get list of valid taxa
taxas_list <- unique(roimeas_dat_combine$taxa)
# bin and calculate concentrations for each category
taxa_conc_n <- vpr_roi_concentration(data, taxas_list, station_of_interest, binSize, imageVolume)
cat('Station', station_of_interest, 'processing complete! \n')
cat('\n')
# bin size data
size_df_f <- vpr_ctdroisize_merge(data, data_mea = roimeas_dat_combine, taxa_of_interest = category_of_interest)
size_df_b <- vpr_size_bin(size_df_f, bin_mea = 3)
Finally, data are saved as RData and csv files for export and plotting.
Data are also saved as an `oce` object in order to preserve both data
and metadata in an efficient format.
##### SAVE DATA ---------------------------------------------------------------------------------------------------------------------------------
# Save oce object
oce_dat <- vpr_save(taxa_conc_n)
save(file = paste0(savedir, '/oceData_', station_of_interest,'.RData'), oce_dat) # oce data and metadata object
# Save RData files
save(file = paste0(savedir, '/ctdData_', station_of_interest,'.RData'), ctd_dat_combine) #CTD data
save(file = paste0(savedir, '/stationData_', station_of_interest,'.RData'), data) # VPR and CTD data
save(file = paste0(savedir, '/meas_dat_', station_of_interest,'.RData'), roimeas_dat_combine) #measurement data
save(file = paste0(savedir, '/bin_dat_', station_of_interest,'.RData'), vpr_depth_bin) # binned data with cumulative concentrations
save(file = paste0(savedir, '/bin_size_dat_', station_of_interest,'.RData'), size_df_b) # binned data including measurements
cat('CTD, ROI-VPR merge, ROI measurement saved as RData! \n')
cat('\n')
# Write csv files
# write.csv(file = paste0(stdir, '/vpr_data_unbinned', station_of_interest, '.csv'), data, row.names = F) # VPR and CTD data (not binned)
# write.csv(file = paste0(stdir, '/vpr_meas', station_of_interest, '.csv'), roimeas_dat_combine) # measurement data
write.csv(file = paste0(stdir, '/vpr_data_binned', station_of_interest, '.csv'), taxa_conc_n) # VPR and CTD data with concentrations by taxa
cat('ROI measurments, ROI-CTD merge-unbinned, and ROI-CTD merge-binned written to csv! \n')
cat('\n')
} #end of station loop
# Section 6: Plotting
Although not primarily a plotting package, vprr can produce contour
plots, profile plots and temperature-salinity (TS) plots from VPR data
sets. A few example plots are provided in the following code. The first
step to plotting is properly loading in the processed VPR data objects
created in processing. The environment, described in Section 2.4 should
also be loaded. The individual data files are found by distinct names
(e.g., "stationData"). The directory structure may be different
depending on the `savedir` where data files were saved during
processing. Note that the following plotting examples are tailored for
tow-yo pattern VPR deployments.
##### FILE PATH & SETTINGS -----------------------------------------------------------------------------------------------------------------------
library(vprr)
# loads all file paths and environment vars specific to User
load('COR2019002_env.RData')
#find all data files
fn_all_st <- list.files(paste0(cruise, "_data_files/"), pattern = "stationData", full.names = T)
fn_all_meas <- list.files(paste0(cruise, "_data_files/"), pattern = "meas", full.names = T)
fn_all_conc <- list.files(paste0("data_product/", cruise, "/"), pattern = "data_binned", full.names = T)
fn_all_bin <- list.files(paste0(cruise,"_data_files/"), pattern = 'bin_dat', full.names = T)
Once files are loaded, plots for all stations in a mission can be
generated using a loop, in order to efficiently generate comparable
plots. The example below uses a loop to run through a list of stations
described by a csv file. This loop also isolates two specific
classification categories to plot (eg. "Calanus" and "krill").
####START STATION LOOP ---------------------------------------------------------------------------------------------------------------------------
setwd(wd)
all_stations <- read.csv(station_names_file, stringsAsFactors = FALSE)
all_stations_of_interest <- unique(all_stations$station)
taxa_to_plot <- c("Calanus", "krill")
for (j in 1:length(all_stations_of_interest)){
setwd(wd)
station <- all_stations_of_interest[j]
cat('station', station ,'starting to plot.... \n')
cat('\n')
Data files are loaded for the specific station of interest. This loads
in all relevant RData files as well as the concentration data saved as a
csv file.
#load station roi and ctd data
fn_st <- grep(fn_all_st, pattern = station, value = TRUE, ignore.case = TRUE)
fn_meas <- grep(fn_all_meas, pattern = station, value = TRUE, ignore.case = TRUE)
fn_conc <- grep(fn_all_conc, pattern = station, value = TRUE, ignore.case = TRUE)
fn_bin <- grep(fn_all_bin, pattern = station, value = TRUE, ignore.case = TRUE)
load(fn_st)
load(fn_meas)
load(fn_conc)
load(fn_bin)
# load concentration data
taxa_conc_n <- read.csv(fn_conc, stringsAsFactors = F)
station_name <- paste('Station ', station)
The final section of set up indicates the directory in which plots will
be saved and provides generic plot size arguments which will control how
large the saved .png files are.
# directory for plots
stdir <- paste0('figures/', cruise, '/station', station)
dir.create(stdir, showWarnings = FALSE, recursive = TRUE)
setwd(stdir)
width = 1200
height = 1000
The following example presents a plot of the concentrations of a taxon
as scaled bubbles along the tow path, overlain on contours of an
environmental variable from the CTD. The main function used is
`vprr::vpr_plot_contour()` which uses a standard VPR data frame
(`taxa_conc_n` - produced from processing (Section 5)) and plots the
background contours. Interpolation methods can be adjusted based on data
or preference. The VPR tow path can be added on top of contours. This
method can be repeated with various environmental variables (e.g.,
temperature, salinity etc.) used to calculate the contours, by changing
the `var` argument in `vprr::vpr_plot_contour()`.
# Density (sigmaT)
png('conPlot_taxa_dens.png', width = width, height = height)
p <- vpr_plot_contour(taxa_conc_n[taxa_conc_n$taxa %in% c(taxa_to_plot),], var = 'density', dup = 'strip', method = 'oce', bw = 0.5)
p <- p + geom_line(data = data, aes(x = time_hr - min(time_hr), y = pressure), col = 'snow4', inherit.aes = FALSE) +
geom_point(data = taxa_conc_n[taxa_conc_n$taxa %in% c(taxa_to_plot),], aes(x = time_hr, y = min_pressure, size = conc_m3), alpha = 0.5)+
ggtitle(station_name ) +
labs(size = expression("Concentration /m" ^3), fill = 'Density')+
scale_size_continuous(range = c(0, 10)) +
facet_wrap(~taxa, ncol = 1, scales = 'free') +
theme(legend.key.size = unit(0.8, 'cm'),
axis.title = element_text(size = 20),
strip.text = element_text(size = 20),
plot.title = element_text(size = 32),
axis.ticks = element_line(size = 1, lineend = 'square'),
axis.text = element_text(size = 30),
legend.text = element_text(size = 20),
legend.title = element_text(size = 25)
)
print(p)
dev.off()
Vertical profiles of plankton concentration and environmental variables
compressed over the sampling duration can be generated using
`vprr::vpr_plot_profile()`. This type of plot indicates the overall
pattern in vertical distribution over the VPR deployment.
png('profilePlots.png', width = 1000, height = 500)
p <- vpr_plot_profile(taxa_conc_n, taxa_to_plot)
print(p)
dev.off()
Temperature-salinity (TS) plots can be generated to visualize how
plankton concentration varies across different water masses. In the
example below, a TS plot is produced in ggplot (with labeled
isopycnals), and concentration bubbles for each selected classification
group are overlaid on the plot. The basic TS bubble plot can be easily
manipulated using ggplot2 grammar, for example the plots can be faceted
by classification group or axis labels and sizing can be adjusted (see
ggplot2 package for more information).
####TS BUBBLE PLOT ----------------------------------------------------------------------------------------------------------------------------
# plot by taxa
taxa_conc <- taxa_conc_n[taxa_conc_n$conc_m3 > 0,]
png('TS_conc_taxa.png', width = 1000, height = 500)
p <- vpr_plot_TS(taxa_conc[taxa_conc$taxa %in% c(taxa_to_plot),], var = 'conc_m3') +
facet_wrap(~taxa, nrow = 1) +
theme(strip.text = element_text(size = 18),
axis.title = element_text(size = 20),
panel.spacing = unit(2, 'lines'))
print(p)
dev.off()
cat('station', station, 'complete! \n')
cat('\n')
} # end station loop
# Section 7: Disclaimer
The functions in vprr were created for a specific project and have not
been tested on a broad range of field mission data. It is possible that
deviations in data format and directory structure from that described
herein may result in errors when using vprr. The vprr package was
developed for the purpose of processing data collected during tow-yo VPR
deployments and image classification using VP. The purpose of this
document is to provide a template for processing and visualizing VPR
data that can be adapted by other users for their own objectives.
# Appendix 1: Directory Structure
Visual Plankton (Matlab image classification software) requires a very
specific directory structure in order to function. Since this processing
is meant to directly follow this image classification, the VP directory
structure is used for consistency. This allows a smooth transition
between the Matlab classifications and the completion of processing in
R. The directory structure required is described below
- C:/
- data
- cruise_name
- autoid
- taxa
- aid
- aidmea
- image folders
- rois
- vprtow#
- day
- hour
- trrois
- vprtow#
- day
- hour
This is your project directory, where your R scripts and work products
will be stored:
- ...
- VPR_PROJECT
- R
- R scripts/ workflows
- new_autoid
- taxa
- aid
- aidmea
- manual_reclassification_record
- day/hour
- misclassified
- reclassified
- figures
- station names (csv)
# Appendix 2: Glossary
**Aid files** - Visual Plankton file output text file, listing file path
information for ROI's of a specific classification group
**AidMeas files (AutoID measurements)** - Visual Plankton output text
file, listing measurement data for ROI's of a specific classification
group. Unit is pixels and columns are 'Perimeter', 'Area', 'width1',
'width2', 'width3', 'short_axis_length', 'long_axis_length'
**Auto Deck** - software which pulls plankton images from Video Plankton
Recorder frames based on specific settings
**Auto ID** - The automatic classification given to an image from Visual
Plankton machine learning algorithm
**AutoID files** - Includes both Aid and AidMeas files as part of Visual
Plankton's automatic classifications
**BIO** - Bedford Institute of Oceanography, a research institute in
Halifax NS, Canada
**Classification category (Taxa)** - A defined group under which VPR
images can be classified, often represents a taxonomic group (e.g.
Krill), but can also be defined by image type (e.g. 'bad_image_blurry'),
or other (e.g. 'marine_snow'), should be one continuous string (no
spaces)
**CPU** - Central processing unit (computer processor)
**CTD** - Conductivity, Temperature and depth sensor instrument
**Day** - Julian calendar day on which VPR data was collected (three
digits)
**Hour** - Two digit hour (24 hour clock) describing time at which VPR
data was collected
**Image volume** - The measured volume of water captured within a VPR
image. Calculated based on optical setting and VPR standards. This is
based on AutoDeck settings, it is calculated from the VPR calibration
file (unique to each instrument). It will change based on AutoDeck
settings and should be updated with each cruise/ processing batch. It is
measured in cubic mm
**Optical Setting** - A VPR setting controlling image magnification and
field of view, which can be S0, S1, S2 or S3, where S0 has the greatest
magnification and smallest image volume, and S3 has the least
magnification and largest image volume
**ROI** - Region of interest, images identified by autodeck within VPR
frames based on settings defined in autoDeck program
**SeaScan** - Oceanographic instrument manufacturing company
**station** - A named geographic location, where the VPR was deployed
**Tow-yo** - A VPR deployment method where the VPR is towed behind a
vessel while being raised and lowered through the water column in order
to sample over both depth and distance
**TRROIS** - Training set of images used to train machine learning
algorithm in Visual Plankton
**VP** - Visual Plankton program run in Matlab
**VPR** - Video Plankton Recorder, oceanographic instrument used to
image small volumes of water for the purpose of capturing images of
plankton
**vprtow#** - A numeric code which is unique to each VPR deployment
**Working Directory** - File path on your computer that defines the
default location of any files you read into R, or save out of R
|
/scratch/gouwar.j/cran-all/cranData/vprr/vignettes/VPR_processing.Rmd
|
#' @importFrom stats cor lm rexp rnorm runif sd
#' @importFrom graphics abline par points
NULL
#'
#' Non-negative tri-factorization of co-occurence matrix using minimum volume approach.
#'
#' \code{AnchorFree} method tri-factorizes (co-occurence) matrix in a product \eqn{P ~ C*E*t(C)} of non-negative matrices \eqn{C} and \eqn{E}
#' such that matrix \eqn{E} has mininum volume and columns of matrix \eqn{C} equal to 1.
#'
#' Implementation closely follows (Fu X \emph{et al.}, IEEE Trans Pattern Anal Mach Intell., 2019).
#'
#' @param vol An output object of vol_preprocess(). The method factorizes co-occurence matrix \code{vol$P}.
#' @param n.comp An integer. Number of components to extract (by default 3). Defines number of columns in matrix \eqn{C}. (default=3)
#' @param init A numeric matrix. Initial matrix \code{M}. (default=3)
#' @param init.type A character. A strategy to randomly initialize matrix \code{M}. (default="diag") Options are to
#'
#' 1) generate diagonal unit matrix ("diag"),
#'
#' 2) use ICA solution as initialization ("ica", "ica.pos").
#'
#' or sample entries from:
#'
#' 3) uniform distribution \code{[0,1]} ("unif.pos"),
#'
#' 4) unform distribution \code{[-1,1]},
#'
#' 5) uniform distribution \code{[0.9,1.1]} ("similar"),
#'
#' 6) normal distribution \code{N(0,1)}.
#' @param n.iter An integer. Number of iterations. (default=30)
#' @param err.cut A numeric. Relative error in determinant between iterations to stop algorithm (now is not used). (default=1e-30)
#' @param verbose A boolean. Print per-iteration information (default=FALSE)
#' @return List of objects:
#'
#' \code{C}, \code{E} Factorization matrices.
#'
#' \code{Pest} Estimate of \code{vol$P} co-occurence matrix \eqn{Pest = C*E*t(C)}.
#'
#' \code{M}, \code{detM} auxiliary matrix \code{M} and its determinant.
#'
#' \code{init.type} type of initialization of matrix \code{M} that was used.
#' @examples
#' small_example <- sim_factors(5, 5, 5)
#' vol <- vol_preprocess(t(small_example$X))
#' vol.anchor <- AnchorFree(vol)
#'
#' @export
AnchorFree <- function(vol, n.comp = 3, init = NULL, init.type = "diag",
n.iter = 30, err.cut = 1e-30, verbose = FALSE){
B <- -vol$U[,1:n.comp] %*% sqrt(diag(vol$eigens)[1:n.comp,1:n.comp])
Pclean <- B %*% t(B)
M <- init
if (is.null(M)){
if (init.type == "diag"){
#M <- diag(runif(n.comp, -1, 1), n.comp)
M <- diag(1, n.comp)
}else if (init.type == "similar"){
M <- matrix(runif(n.comp*n.comp, 0.9, 1.1), nrow = n.comp, ncol = n.comp)
}else if (init.type == "unif.pos"){
M <- matrix(runif(n.comp*n.comp, 0, 1), nrow = n.comp, ncol = n.comp)
}else if (init.type == "unif.both"){
M <- matrix(runif(n.comp*n.comp, -1, 1), nrow = n.comp, ncol = n.comp)
}else if (init.type == "normal"){
M <- matrix(rnorm(n.comp*n.comp, 0, 1), nrow = n.comp, ncol = n.comp)
}else if (init.type == "ica" | init.type == "ica.pos"){
ic <- ica::icafast(B, nc = n.comp)
icM <- t(t(ic$S) + (t(solve(t(ic$M))) %*% apply(B, 2, mean))[,1])
sgn <- apply(icM,2,function(x) sign(sum(sign(x) * x^2)))
icM <- t(t(icM) * sgn)
if (init.type == "ica.pos"){ icM[icM < 0] <- 0 }
ft <- lm( icM ~ B - 1 )
M <- ft$coefficients
}else if (init.type == "Epos"){
preE0 <- matrix(runif(n.comp*n.comp, 0, 1), nrow = n.comp, ncol = n.comp)
E0 <- preE0 %*% t(preE0)
svdE <- svd(E0)
Mrev <- svdE$u %*% sqrt(diag(svdE$d))
M <- solve(Mrev)
}
}
M.prev <- M
detM.prev <- det(M)
#require(lpSolveAPI)
iter <- 1
err <- 1e+5
while ( iter < n.iter & abs(err) > err.cut ){
for (f in 1:n.comp){
avec <- unlist(lapply(1:n.comp, function(k){
if (n.comp == 2) {
detM <- M[-k, -f]
}else{
detM <- det(M[-k, -f])
}
Minor <- (-1)^(k+f) * detM
}))
Bconstr <- rbind(B, as.numeric(rep(1,nrow(B)) %*% B))
#x <- M[,f] # + 1e-10#/183.6
#x <- x*1.0
#range(Bconstr[-nrow(Bconstr),]%*%x)
#as.numeric(rep(1,nrow(B))%*%B)%*%x
#avec%*%x
#avec%*%get.variables(lps1)
lps1 <- lpSolveAPI::make.lp( nrow(Bconstr), n.comp)
lpSolveAPI::lp.control(lps1, sense = 'max')
for(i in 1:ncol(Bconstr)){
lpSolveAPI::set.column(lps1, i, Bconstr[, i])
}
lpSolveAPI::set.objfn(lps1, avec)
lpSolveAPI::set.constr.type(lps1, c(rep(">=", nrow(Bconstr) - 1), "="))
lpSolveAPI::set.rhs(lps1, c(rep(0, nrow(Bconstr) - 1), 1))
lpSolveAPI::set.bounds(lps1, lower = rep(-Inf, ncol(Bconstr)), columns = 1:ncol(Bconstr))
solve(lps1)
#get.variables(lps1)
#get.objective(lps1)
#avec%*%x
#avec%*%get.variables(lps1)
lps2 <- lpSolveAPI::make.lp(nrow(Bconstr), n.comp)
lpSolveAPI::lp.control(lps2, sense = 'min')
for(i in 1:ncol(Bconstr)){
lpSolveAPI::set.column(lps2, i, Bconstr[, i])
}
lpSolveAPI::set.objfn(lps2, avec)
lpSolveAPI::set.constr.type(lps2, c(rep(">=", nrow(Bconstr) - 1), "="))
lpSolveAPI::set.rhs(lps2, c(rep(0, nrow(Bconstr) - 1), 1))
lpSolveAPI::set.bounds(lps2, lower = rep(-Inf, ncol(Bconstr)), columns = 1:ncol(Bconstr))
solve(lps2)
#get.variables(lps2)
#get.objective(lps2)
if ( abs(lpSolveAPI::get.objective(lps1)) > abs(lpSolveAPI::get.objective(lps2)) ){
M[, f] <- lpSolveAPI::get.variables(lps1)
}else{
M[, f] <- lpSolveAPI::get.variables(lps2)
}
}
detM <- det(M)
err <- (detM - detM.prev) / detM
if (verbose == TRUE){
message( paste("iteration:", iter, "det:", detM, "prev. det:", detM.prev, "error:", err) )
message('\n')
}
M.prev <- M
detM.prev <- detM
iter <- iter + 1
}
if ( abs(detM) > 1e-30 ){
C <- B %*% M
Ccov <- solve(t(C) %*% C)
E <- Ccov %*% (t(C) %*% Pclean %*% C) %*% Ccov
Ppred <- C %*% E %*% t(C)
} else{
C <- NA; Ccov <- NA; E <- NA; Ppred <- NA
}
return( list(C = C, E = E, Pest = Ppred, M = M, detM = detM, init.type = init.type) )
}
|
/scratch/gouwar.j/cran-all/cranData/vrnmf/R/anchor.R
|
#' Infer a matrix of non-negative intensities in NMF
#'
#' \code{infer_intensities} estimates a non-negative matrix \code{D} that optimizes the objective function \eqn{F = ||X - C*D||^2}
#' using per-row quadratic programming.
#'
#' @param C Numeric matrices.
#' @param X Numeric matrices.
#' @param esign A character. Keep elements of matrix \code{D} non-negative ("pos") or not ("all). (default="pos")
#' @param n.cores An integer. Number of cores to use. (default=1)
#' @return Fitted matrix \code{D}.
#'
#' @export
infer_intensities <- function(C, X, esign = "pos", n.cores = 1){
X <- t(X)
D <- t(C) %*% C
dmat <- X %*% C
Amat <- diag(1, nrow(D))
bvec <- rep(0, nrow(D))
if (esign == "all"){
bvec <- rep(-1e+5, nrow(D))
}
nr <- nrow(X)
inten <- do.call(cbind, parallel::mclapply(1:nr, function(i){
ft <- quadprog::solve.QP(D, dmat[i, ], Amat, bvec)
ft$solution
},mc.cores = n.cores))
rownames(inten) <- paste("comp",1:ncol(C),sep="")
return(inten)
}
#' Infer a matrix of non-negative intensities in NMF with offset/nmf-offset.
#'
#' \code{factor_intensities} estimates a non-negative matrix \code{D} that optimizes the objective function \eqn{F = ||X - C*D - offset||^2},
#' where offset is either column-specific offset or a "1-rank nmf term": product of row vector and column vector
#' @param C Numeric matrices.
#' @param X Numeric matrices.
#' @param fit.nmf A boolean. Fit both intensities and spectrum of the offset residuals.
#' @param fit.factor A boolean. Fit only spectrum of the offset residuals (keep intensities constant across samples).
#' @param qp.exact A boolean. Estimate intensities using exact quadratic programming (qp.exact = TRUE) or inexact QP via gradient decent with extrapolation (qp.exact = FALSE).
#' @param n.iter An integer. Number of iterations.
#' @param qp.iter = 1e+1 An integer. Number of iterations of inexact QP.
#' @param rel.error.cutoff A numeric. Relative error cutoff between iterations to stop iterations.
#' @param extrapolate A boolean. Use Nesterov-like extrapolation at each iteration.
#' @param extrapolate.const A boolean. Use extrapolation scheme that adds a constant extrapolation q.factor (described below) at each iteration.
#' @param extrapolate.convex A boolean. Use Nesterov extrapolation scheme.
#' @param q.factor A numeric. Specification of a a constant extrapolation factor used in case of extrapolate.const = T.
#' @param verbose A boolean. Print per-iteration information (by default TRUE).
#' @param n.cores An integer. Number of cores to use.
#' @return Fitted matrix \code{D}.
#' @export
factor_intensities <- function (C, X, fit.nmf = TRUE, fit.factor = FALSE, qp.exact = FALSE,
n.iter = 2e+2, qp.iter = 1e+1, rel.error.cutoff = 1e-5,
extrapolate = TRUE, extrapolate.const = TRUE, extrapolate.convex = FALSE,
q.factor = 1,
verbose = TRUE, n.cores = 1)
{
X <- t(X)
####
#X <- as.matrix(rate.rec)
X <- apply(X,2,function(x)x/sd(x))
#extrapolate.const <- TRUE
#extrapolate.convex <- FALSE
#extrapolate.majorate <- FALSE
#verbose = TRUE
#qp.exact <- FALSE
#fit.nmf <- TRUE; fit.factor <- FALSE
#n.cores <- 1
#n.iter <- 1e+3
#qp.iter <- 1e+1
#rel.error.cutoff <- 1e-5
#q.factor <- 1
####
# prepare matrices for QP
D <- t(C) %*% C
dmat <- X %*% C
#if (qp.exact == TRUE){
sv <- svd(D)
R <- t(sv$u %*% diag(sqrt(sv$d)))
R1 <- t(solve(R))
#}
iter <- 1
inten.update <- inten <- t(matrix(1, nrow = nrow(X), ncol = ncol(C)))
spec.offset.update <- spec.offset.old <- spec.offset <- rep(1,nrow(C))
int.offset.update <- int.offset <- rep(1,nrow(X))
objs <- vector(); sbio <- vector(); soff <- vector()
## precalculate some matrices/variables
grad.main <- X%*%C
X.offset.const <- grad.main%*%t(R1)
C1 <- C%*%t(R1)
Lip.int <- max( eigen(t(C)%*%C)$values )
##
q <- c(1,(1+sqrt(5))/5)
while (iter < n.iter){
#print(paste("iteration.. ",iter))
inten.old <- inten
### estimate intensities using exact QP
if (qp.exact == TRUE){
if (iter > 1) dmat <- (X - X.offset) %*% C
#dmat1 <- dmat%*%t(R1)
fctr <- as.numeric(spec.offset.update%*%C1)
nr <- nrow(X)
inten <- do.call(cbind, parallel::mclapply(1:nr, function(i) {
ft1 <- nnls::nnls(R, X.offset.const[i,] - int.offset.update[i]*fctr)
ft1$x
}, mc.cores = 1, mc.preschedule = TRUE))
}
### estimate intensities using inexact QP via gradient decent with extrapolation
if (qp.exact == FALSE){
grad.offset <- as.matrix(int.offset.update)%*%(as.matrix(spec.offset.update%*%C))
inten.update <- inten.old
q1 <- c(1,(1+sqrt(5))/5)
j <- 1
rel.error <- 1e+6
while (j < qp.iter){
#for (j in 1:qp.iter){
q1[j+1] <- (1+sqrt(1+4*q1[j]^2))/2
inten.old1 <- inten
grad.bio <- t(inten.update)%*%D
grad.inten <- -grad.main + grad.offset + grad.bio
# update inten
inten <- inten.update - t(grad.inten) / Lip.int
inten[inten < 0] <- 0
# extrapolate inten
inten.update <- inten
extr1 <- (q1[j]-1)/q1[j+1]
if (extrapolate == TRUE) inten.update <- inten + extr1 * (inten - inten.old1)
rel.error <- sum( (inten - inten.old1)^2 )/sum( (inten)^2 )
#print(paste(j,rel.error))
j <- j + 1
}
}
inten.update <- inten
if (extrapolate == TRUE){
extr <- (q[iter] - 1) / q[iter+1]
inten.update <- inten + extr * (inten - inten.old)
}
# fit NMF or offset spectrum
X.proc <- X - t(inten.update)%*%t(C)
spec.offset.old <- spec.offset
int.offset.old <- int.offset
if (fit.nmf == TRUE){
iter1 <- 1
rel.error <- 1e+6
while(iter1 < 10 & rel.error > rel.error.cutoff ){
spec.offset.prev <- spec.offset
spec.offset <- (t(int.offset)%*%X.proc)/sum(int.offset^2)
spec.offset <- as.numeric(pmax(spec.offset, 0))
int.offset <- (X.proc%*%spec.offset)/sum(spec.offset^2)
int.offset <- as.numeric(pmax(int.offset, 0))
rel.error <- sqrt(sum((spec.offset-spec.offset.prev)^2))/sqrt(sum((spec.offset)^2))
iter1 <- iter1 + 1
}
}else if (fit.factor == TRUE){
spec.offset <- colMeans(X.proc)
spec.offset <- pmax(0,spec.offset)
}
spec.offset.update <- spec.offset
int.offset.update <- int.offset
if (extrapolate==TRUE){
extr <- (q[iter]-1)/q[iter+1]
spec.offset.update <- spec.offset + extr*(spec.offset - spec.offset.old)
int.offset.update <- int.offset + extr*(int.offset - int.offset.old)
}
op <- par(no.readonly = TRUE)
on.exit(par(op))
if (verbose == TRUE & iter %% 5 == 0){
X.offset <- as.matrix(int.offset)%*%t(as.matrix(spec.offset))
X.bio <- t(inten)%*%t(C)
X.resid <- X - X.offset - X.bio
objs <- c(objs, sqrt(sum((X.resid)^2)))
sbio <- c(sbio, sum(X.bio))
soff <- c(soff, sum(X.offset))
temppar <- par(mfrow=c(2,1),mar=c(2,2,0.5,0.5))
on.exit(par(temppar))
plot(1:length(objs), objs, cex = 0.5)
plot(1:length(sbio), sbio, cex = 0.5, col = "darkred", ylim=c(min(c(sbio,soff)),max(c(sbio,soff))))
points(1:length(soff), soff, cex = 0.5, col = "darkgreen")
print(paste("iteration:",iter))
print(paste("offset difference:",sqrt(sum((spec.offset-spec.offset.old)^2))/sqrt(sum((spec.offset)^2)) ))
print( sum(X.bio) )
print( sum(X.offset) )
print(paste("objective:", sqrt(sum((X.resid)^2))))
}
iter <- iter + 1
if (extrapolate.const == TRUE){
q[iter+1] <- 1 + q.factor
}else if (extrapolate.convex == TRUE){
q[iter+1] <- (1+sqrt(1+4*q[iter]^2))/2
} ##else if (extrapolate.majorate == TRUE){
## q[iter + 1] <- min(q.upper, q[iter + 1] * rate.q.up)
##}
}
rownames(inten) <- paste("comp", 1:ncol(C), sep = "")
return(list(intensities = inten, spec.offset = spec.offset, int.offset = int.offset))
}
|
/scratch/gouwar.j/cran-all/cranData/vrnmf/R/infer_intensities.R
|
#' Simulate matrices to explores \code{vrnmf}
#'
#' \code{sim_factors} simulates non-negative factorization matrices \code{C} and \code{D}
#' under a variaty of conditions to explore factorization \eqn{X = C*D + noise}.
#'
#' @param m Integers. Size of matrices. Matrix \code{C} has a size of \code{m*r}
#' and matrix \code{D} has a size of \code{r*n}.
#' @param n Integers. Size of matrices. Matrix \code{C} has a size of \code{m*r}
#' and matrix \code{D} has a size of \code{r*n}.
#' @param r Integers. Size of matrices. Matrix \code{C} has a size of \code{m*r}
#' and matrix \code{D} has a size of \code{r*n}.
#' @param simplex A character. Either columns ("col") or rows ("row") of matrix \code{C} are projected onto unit simplex. (default="col")
#' @param distr A character. Distribution to simulate matrix entries: "unif" for uniform and "exp" for exponential distributions. (default="unif")
#' @param frac.zeros A numeric. Fraction of zeros in matrix \code{C}. It promotes sufficient scattering of matrix column/row vectors. (default=0.4)
#' @param condition A boolean. Generate more well-conditioned matrix \code{R}. (default=FALSE)
#' @param noise A numeric. Standard deviation of gaussian noise to add. (default=0e-4)
#' @return List of simulated matrices:
#'
#' \code{X.noise}, \code{X} - noisy and original matrix \code{X} to decompose.
#'
#' \code{C}, \code{D} - factorization matrices.
#'
#' @export
sim_factors <- function(m, n, r, simplex = "col", distr = "unif", frac.zeros = 0.4,
condition = FALSE, noise = 0e-4){
# sample matrices entries from a distribution
if (distr == "unif"){
C <- matrix(runif(m*r,0,1), nrow = m, ncol = r)
R <- matrix(runif(r*n,0,1), nrow = r, ncol = n)
}else if (distr == "exp"){
C <- matrix(rexp(m*r,1), nrow = m, ncol = r)
R <- matrix(rexp(r*n,1), nrow = r, ncol = n)
}
# add zeros but avoid zero rows and columns
if (!is.null(frac.zeros)){
C[sample(length(C), frac.zeros * length(C))] <- 0
C <- t(apply(C, 1, function(x){
if (max(x) < 1e-8) x[sample(length(x), 1)] <- runif(1,0,1)
return(x)
}))
C <- apply(C, 2, function(x){
if (max(x) < 1e-8) x[sample(length(x), 1)] <- runif(1,0,1)
return(x)
})
}
# project column/row vectors onto simplex
if (simplex=="col"){
C <- apply(C, 2, function(x) x/sum(x))
}else if (simplex=="row"){
C <- t(apply(C, 1, function(x) x/sum(x)))
}
# condition matrix if requested
if (condition == TRUE){
svR <- svd(R)
lams <- rep(1,length(svR$d))
R1 <- svR$u %*% diag(lams) %*% t(svR$v)
R <- R1
R[R < 0] <- 1e-3
}
X <- C %*% R
N <- matrix(rnorm(length(X), 0, noise), nrow = nrow(X), ncol = ncol(X))
X.noise <- X + N
return(list(X.noise = X.noise, X = X, C = C, D = R))
}
|
/scratch/gouwar.j/cran-all/cranData/vrnmf/R/simulate_factorization.R
|
#' Update of a matrix in NMF with equality contstraints on columns.
#'
#' \code{volnmf_simplex_col} finds non-negative matrix \code{C} that minimizes the objective \code{||X-C*R||^2}
#' under constraints that columns of C equal to 1 using local approximation with extrapolation.
#'
#' @param X Numeric Matrices. Matrices involved in the objective function.
#' @param R Numeric Matrices. Matrices involved in the objective function.
#' @param C.prev Numeric Matrices. Matrices involved in the objective function. Matrix \code{C.prev} serves as initialization. (default=NULL)
#' @param bound A numeric. Equality constraint on columns of matrix \code{C}. (default=1)
#' @param extrapolate A boolean. Use extrapolation after local approximation. (default=TRUE)
#' @param err.cut A numeric. Stop iterations if relative error between iterations is less than \code{err.cut} (parameter is not active now). (default=1e-10)
#' @param n.iter An integer. Number of iterations. (default=1000)
#' @param qmax A numeric. Maximum asymptotic (1 - 1/qmax) of extrapolation step.
#' @return An updated matrix \code{C}.
#' @export
volnmf_simplex_col <- function(X, R, C.prev = NULL, bound = 1, extrapolate = TRUE,
err.cut = 1e-10, n.iter = 1e+4, qmax = 1e+2){
if (is.null(C.prev)){
ft <- lm( t(X) ~ t(R) - 1) # estimate in a closed form!
C.prev <- t(ft$coefficients)
C.prev[C.prev < 0] <- 0
C.prev <- apply(C.prev, 2, function(x) x / sum(x) )
}
# precalculate matrices
S <- R %*% t(R)
K <- X %*% t(R)
Lip <- sqrt(sum(S^2))
err <- 1e+6
iter <- 1
C.update <- C <- C.prev
q <- c(1,(1+sqrt(5))/5)
obj <- vector()
while(err > err.cut & iter < n.iter){
G <- C.update %*% S - K
Chat <- C.update - G / Lip
C.prev <- C
C <- do.call(cbind, lapply(1:ncol(C), function(i){
projection_onto_simplex(Chat[, i], bound)
}))
if (extrapolate == TRUE){
extr <- (q[iter] - 1) / q[iter+1]
C.update <- C + extr * (C - C.prev)
}
obj <- c(obj, sqrt(sum((C-C.prev)^2))/sqrt(sum(C^2)) )
iter <- iter + 1
q[iter+1] <- min(qmax, (1 + sqrt(1 + 4 * q[iter]^2))/2 )
}
return(C)
}
#' Update of a matrix in NMF with equality contstraints on rows.
#'
#' \code{volnmf_simplex_row} finds non-negative matrix \code{C} that minimizes the objective \code{||X-C*R||^2}
#' under constraints that rows of C equal to 1 using per-row quadratic programming.
#'
#' @param X Numeric Matrices. Matrices involved in the objective function.
#' @param R Numeric Matrices. Matrices involved in the objective function.
#' @param C.prev Numeric Matrices. Matrices involved in the objective function. Matrix \code{C.prev} serves as initialization. (default=NULL)
#' @param meq An integer 0 or 1. Require equality (\code{meq=1}) or inequality (\code{meq=0}) constratint on rows (by default 1).
#' @return An updated matrix \code{C}.
#' @export
volnmf_simplex_row <- function(X, R, C.prev = NULL, meq = 1){
Dmat <- R %*% t(R)
C.upd <- do.call(rbind, lapply(1:nrow(X), function(irow){
dvec <- R %*% X[irow, ]
Amat <- cbind(rep(-1, nrow(R)), diag(1, nrow(R)))
bvec <- c(-1, rep(0, nrow(R)))
ft <- quadprog::solve.QP(Dmat, dvec, Amat, bvec, meq = meq)
ft$solution
}))
return(C.upd)
}
#' Project vector onto a probabilistic simplex.
#'
#' \code{projection_onto_simplex} projects a vector \code{unproj} onto a probabilistic simplex of sum \code{bound}.
#'
#' @param unproj A numeric vector. An unprojected vector
#' @param bound A numeric. Sum of projected vector elements.
#' @return A projected vector.
#' @export
projection_onto_simplex <- function(unproj, bound){
q <- sort(unproj, decreasing = TRUE, method = "quick")
qcum <- cumsum(q)
mu <- (qcum - bound) / 1:length(qcum)
cond1 <- (mu[-length(mu)] - q[-1]) > 0
if (max(cond1) == 0) {
ind <- length(mu)
}else{
ind <- which.max(cond1)
}
return( pmax(0, unproj - mu[ind]) )
}
|
/scratch/gouwar.j/cran-all/cranData/vrnmf/R/update_simplex.R
|
#' Procrustes algorithm estimates orthonormal transformation between two matrices.
#'
#' \code{volnmf_procrustes} finds orthonormal matrix \code{Q} that minimizes objective
#' \code{||A-B*Q||^2}
#'
#' @param A Numeric Matrices. Orthonormal transformation convert matrix \code{B} in matrix \code{A}.
#' @param B Numeric Matrices. Orthonormal transformation convert matrix \code{B} in matrix \code{A}.
#' @return An optimal orthonormal tranformation matrix \code{Q}.
#' @export
volnmf_procrustes <- function(A, B){
sv <- svd(t(A) %*% B)
return(sv$v %*% t(sv$u))
}
|
/scratch/gouwar.j/cran-all/cranData/vrnmf/R/update_unitary.R
|
#' Update volume-regularized matrix \code{R} using logdet volume approximation.
#'
#' \code{volnmf_logdet} finds matrix \code{R} that minimizes objective
#' \code{||X-C*R||^2 + w.vol*log(det(R)+delta)}.
#'
#' @param C Numeric Matrices. Matrices involved in objective function.Matrix R serves as initialization.
#' @param X Numeric Matrices. Matrices involved in objective function.Matrix R serves as initialization.
#' @param R Numeric Matrices. Matrices involved in objective function.Matrix R serves as initialization.
#' @param R.constraint A character. Set up ('pos') or not ('no') non-negative constraints on matrix \code{R} (by default 'pos').
#' @param majorate A boolean. Majorate logdet each iteration (by default FALSE).
#' @param extrapolate A boolean. Use Nesterov acceleration (by default FALSE, currently is not supported).
#' @param qmax A numeric. Maximum asymptotic (1 - 1/qmax) of extrapolation step.
#' @param w.vol A numeric. Volume (logdet) weight in objective function.
#' @param delta A numeric. Determinant pseudocount in objective function.
#' @param err.cut A numeric. Stop algorithm if relative erro in R between iteration is less than \code{err.cut}.
#' @param n.iter An integer. Number of iterations.
#' @return An updated matrix \code{R}.
#' @export
volnmf_logdet <- function(C, X, R, R.constraint = "pos", majorate = FALSE, extrapolate = TRUE, qmax = 100,
w.vol = 1e-1, delta = 1, err.cut = 1e-3, n.iter = 1e+3){
W.update <- W <- t(R)
H <- t(C)
FM <- solve(t(W) %*% (W) + delta * diag(1,nrow(W)))
iter <- 1
err <- 1e+5
q <- c(1,(1+sqrt(5))/5)
#obj <- vector(); obj.violent <- 0
while (err > err.cut & iter < n.iter){
W.prev <- W
if (majorate == TRUE){
Y <- W.prev
FM <- solve(t(Y) %*% Y + delta * diag(1, nrow(Y)) )
}
if (R.constraint == "pos"){
Lip <- sqrt(sum((H %*% t(H) + w.vol * FM)^2))
gradF <- W.update %*% (H %*% t(H) + w.vol*FM) - t(X) %*% t(H)
W <- W.update - gradF/Lip
W[W < 0] <- 0
}else{
W <- t(X) %*% t(H) %*% solve(H %*% t(H) + w.vol * FM)
}
if (extrapolate == TRUE){
extr <- (q[iter] - 1) / q[iter+1]
W.update <- W + extr*(W-W.prev)
# dow warm restart?
#obj[iter] <- sum((t(X)-W%*%H)^2) + w.vol*sum(diag(FM%*%(t(W)%*%W)))
#if (iter>=2){
# if (obj[iter] < obj[iter-1]){
# W <- Wfast
# }else{
# obj.violent <- obj.violent + 1
# }
#}
}else{
W.update <- W
}
#func1 <- sum((t(X) - W%*%H)^2)
#func2 <- w.vol*log(det( t(W)%*%W + delta*diag(1,nrow(Y)) ))
#func <- func1 + func2
#print(eigen( t(Y) %*% Y + delta * diag(1, nrow(Y)) )$values)
#print(c(func,func1,func2))
err <- sum((W-W.prev)^2)/sum(W^2)
iter <- iter + 1
q[iter+1] <- min(qmax, (1 + sqrt(1 + 4 * q[iter]^2))/2 )
}
#cat(paste('done.. ',iter,' iterations ',err,' error\n'))
#cat(paste('violations.. ',obj.violent,'\n'))
return(t(W))
}
#' Update volume-regularized matrix \code{R} using det volume approximation
#'
#' \code{volnmf_det} finds matrix \code{R} that minimizes objective
#' \code{||X-C*R||^2 + w.vol*det(R)}
#'
#' @param C Numeric Matrices. Matrices involved in objective function. Matrix R serves as initialization.
#' @param X Numeric Matrices. Matrices involved in objective function. Matrix R serves as initialization.
#' @param R Numeric Matrices. Matrices involved in objective function. Matrix R serves as initialization.
#' @param posit A boolean. Set up (TRUE) or not (FALSE) non-negative constraints on matrix \code{R}. (default=TRUE)
#' @param w.vol A numeric. Volume (det) weight in objective function. (default=0.1)
#' @param eigen.cut A numeric. Threshold on eigenvalue of SVD eigenvectors. (default=1e-16)
#' @param err.cut A numeric. Stop algorithm if relative erro in R between iteration is less than \code{err.cut}. (default=1e-3)
#' @param n.iter An integer. Number of iterations. (default=1e+3)
#' @return An updated matrix \code{R}.
#' @export
volnmf_det <- function(C, X, R, posit=FALSE,
w.vol = 1e-1, eigen.cut = 1e-16, err.cut = 1e-3, n.iter = 1e+3){
iter <- 1
err <- 1e+5
while (err > err.cut & iter < n.iter){
R.prev <- R
for (i in 1:nrow(R)){
W <- t(R)
WH <- W %*% t(C)
# find quadratic approximation of determinant
Wi <- W[, -i]
nui <- det(t(Wi) %*% Wi)
Xi <- t(X) - WH + as.matrix(W[, i]) %*% t(as.matrix(C[, i]))
#B <- diag(1,nrow(W)) - Wi%*%solve(t(Wi)%*%Wi)%*%t(Wi)
# more efficient method to find B using null subspace analysis!
sv <- svd(t(Wi), nu = ncol(Wi), nv = nrow(Wi))
nn <- sum(sv$d < eigen.cut)
Ci <- sv$v[, (ncol(sv$v) - nn):ncol(sv$v)] # I take only last column - but how many to take??
B <- Ci %*% t(Ci)
#nui * (W[,i]) %*% B %*% W[,i]
#det(t(W) %*% W)
# form quadratic form
Qi <- sum(C[, i]^2) * diag(1, nrow(B)) + w.vol * nui * B
fi <- Xi %*% C[,i]
Amat <- diag(1, nrow(Qi));
if (posit == TRUE){
bvec <- rep(0, nrow(Qi))
}else{
bvec <- rep(-1e+6, nrow(Qi))
}
ft <- quadprog::solve.QP(Dmat = Qi, dvec = fi, Amat = Amat, bvec = bvec)
R[i,] <- ft$solution
}
err <- sum((R-R.prev)^2)/sum(R^2)
iter <- iter + 1
}
return(R)
}
|
/scratch/gouwar.j/cran-all/cranData/vrnmf/R/update_volume.R
|
#' Volume-regularized NMF
#'
#' \code{volnmf_main} enables volume-regularized factorization of a matrix \code{B} using the following objective function:
#' \eqn{F = ||B*Q - C*R||^2 + w.vol*volume(R)}. Matrix \code{C} is required to be non-negative and having either column or row vectors on the simplex.
#' Matrix \code{R} can optionally have non-negativity constraint. Matrix \code{Q} can optionally be identity matrix or any unitary.
#' The latter option is used to decompose co-occurence matrix \code{vol_P}.
#'
#' @param vol An output object of vol_preprocess().
#' @param B A numeric matrix. A matrix to factorize (by default NULL). If not given than matrix \code{B} is taken to be a square root decomposition of \eqn{P = B*t(B)}.
#' @param volnmf An output object of \code{volnmf.main}. An option is useful to re-estimate solution using different parameters (by default NULL).
#' @param n.comp An integer. Number of components to extract (by default 3). Defines number of columns in matrix \eqn{C}.
#' @param n.reduce An integer. Dimensional reduction of matrix B (number of columns) if taken as a square root decomposition of \code{volP} (by default equal to \code{n.comp}).
#' @param do.nmf A boolean. Estimate standard solution with \code{w.vol=0} as initialization before applying volume regularization (by default \code{TRUE}).
#' @param iter.nmf An integer. Number of iterations to get solution with \code{w.vol=0} if the former requested (by default \code{1,000}).
#' @param seed An integer. Fix seed.
#' @param domain A character. Optimize unitary rotation matrix \code{Q} ("covariance") or keep it as identity matrix (as in standard NMF). By default "covariance".
#' @param volf A character. Function that approximate volume. Can have values of "logdet" or "det" (by default "logdet").
#' @param wvol A numeric. A weight of volume-regularized term \code{volume(R)}.
#' @param delta A numeric. Logdet regularization term \code{log(det(R) + delta)} (by default 1e-8).
#' @param n.iter An integer. Number of iterations (by default \code{1,000}).
#' @param err.cut A numeric. Relative error in determinant between iterations to stop algorithm (by default \code{1e-8}).
#' @param vol.iter An integer. Number of iterations to update volume-regularized matrix \code{R} at each alternating step.
#' @param c.iter An integer. Number of iterations to update simplex matrix \code{C} at each alternating step.
#' @param extrapolate A numeric. Do Nesterov extrapolation inside blocks of R and C optimization (by default TRUE).
#' @param accelerate A numeric. Do acceleration each update after R and C blocks estimated via Nesterov-like extrapolation.
#' @param acc.C A numeric. Acceleration parameter of matrix C.
#' @param acc.R A numeric. Acceleration parameter of matrix R.
#' @param C.constraint A character. Constraint either sum of columns ("col") or sum of rows ("row) to be equal to \code{C.bound} (By default "col").
#' @param C.bound A numeric. A simplex constraint on matrix C vectors.
#' @param R.constraint A character. Set up non-negativity ("pos") constraint on elements of \code{R} (by default "pos", alternative "no").
#' @param R.majorate A boolean. Majorate logdet each iteration of \code{volnmf_logdet()} (by default FALSE).
#' @param C.init Numeric matrices. Initialization of matrices \code{C, R, Q} (by default \code{NULL}).
#' @param R.init Numeric matrices. Initialization of matrices \code{C, R, Q} (by default \code{NULL}).
#' @param Q.init Numeric matrices. Initialization of matrices \code{C, R, Q} (by default \code{NULL}).
#' @param anchor An output object of \code{AnchorFree()}. Object is used optionally to initialize matrices (by default \code{NULL}).
#' @param Ctrue A matrix. Correct matrix C if known. Useful for benchmark.
#' @param verbose A boolean. Print per-iteration information (by default FALSE).
#' @param record A numeric. Record parameters every 'record' iterations (by default \code{NULL}).
#' @param verbose.nmf A boolean. Print per-iteration information for standard NMF (by default FALSE).
#' @param record.nmf A numeric. Record parameters every 'record' iterations for standard NMF (by default \code{NULL}).
#' @param mutation.run A boolean. Assess goodness of solution using reflection test if mutation.run=TRUE (applicable only to analysis of mutation patterns).
#' @return List of objects:
#'
#' \code{C, R, Q} Factorization matrices.
#'
#' \code{C.init, R.init, Q.init} Initialization matrices for volume-regularized optimization.
#'
#' \code{C.rand, R.rand, Q.rand} Random initialization matrices for NMF optimization \code{(w.vol=0)}.
#'
#' \code{rec} a list of objects that record and store state of matrices each \code{record} iterations.
#'
#' @export
volnmf_main <- function(vol, B = NULL, volnmf = NULL, n.comp = 3, n.reduce = n.comp,
do.nmf=TRUE, iter.nmf = 1e+2, seed = NULL,
domain = "covariance", volf = 'logdet',
wvol = NULL, delta = 1e-8, n.iter = 5e+2, err.cut = 1e-16,
vol.iter = 2e+1, c.iter = 2e+1,
extrapolate = TRUE, accelerate = FALSE, acc.C = 4/5, acc.R = 3/4,
C.constraint = "col", C.bound = 1, R.constraint = "pos", R.majorate = FALSE,
C.init = NULL, R.init = NULL, Q.init = NULL, anchor = NULL, Ctrue = NULL,
verbose = TRUE, record = 100, verbose.nmf = FALSE, record.nmf = NULL, mutation.run = FALSE){
#B <- NULL; n.comp <- 14; n.reduce <- n.comp; volnmf <- NULL;
#domain <- "covariance"; volf <- 'logdet';
#wvol <- NULL; delta <- 1e-8; n.iter <- 1e+4; err.cut <- 1e-8;
#vol.iter <- 1e+2; c.iter <- 1e+2;
#C.constraint <- "col"; C.bound <- 1; R.constraint <- "pos";
#C.init <- NULL; R.init <- NULL;
#anchor = NULL;
#frac.zero = 0.3; verbose = TRUE; record = 100
if (mutation.run==FALSE) {
rate.rec <- NULL
xcompl <- NULL
}
# matrix B
if (is.null(B)){
if (!is.null(volnmf)){
B <- volnmf$B
}else if(!is.null(vol)){
B <- -vol$U[,1:n.reduce]%*%sqrt(diag(vol$eigens)[1:n.reduce,1:n.reduce])
}
}
if (is.null(Q.init)) Q.init <- diag(1,nrow=n.reduce,ncol=n.comp)
if (!is.null(seed)) set.seed(seed)
### initialize matrices
if (!is.null(anchor)){ # initial matrices come from AnchorFree algorithm
C.init <- anchor$C
E <- anchor$E
svE <- svd(E)
Esq <- svE$u %*% sqrt(diag(svE$d))
Qe <- diag(1, nrow(E))
for (jj in 1:100){
R <- pmax(Esq %*% Qe,0)
Qe <- volnmf_procrustes(R, Esq)
}
R.init <- R
Q.init <- Qe
#Q.init <- diag(1, nrow = n.reduce,ncol = n.comp)
}else{ # initial matrices are generate from uniform distribution
if (is.null(C.init)){
C <- matrix(runif(nrow(B)*n.comp, 0, 1), nrow = nrow(B), ncol = n.comp)
if (C.constraint == "col"){
C <- apply(C, 2, function(x) x / sum(x))
}else if (C.constraint == "row"){
C <- apply(C, 1, function(x) x / sum(x))
}
C.init <- C * C.bound
}
if (is.null(R.init)){
R.init <- matrix(runif(n.comp*n.comp, 0, 1 / C.bound), n.comp, n.comp)
}
}
C.rand <- C.init; R.rand <- R.init; Q.rand <- Q.init
if (do.nmf == TRUE){
message('run standard nmf.. ')
nmf.solution <- volnmf_estimate(B, C = C.init, R = R.init, Q = Q.init,
domain = domain, volf = volf,
wvol = 0, delta = delta, n.iter = iter.nmf, err.cut = err.cut,
#vol.iter = vol.iter / 10, c.iter = c.iter / 10,
vol.iter = 5, c.iter = 5,
extrapolate = extrapolate, accelerate = accelerate, acc.C = acc.C, acc.R = acc.R,
C.constraint = C.constraint, C.bound = C.bound, R.constraint = R.constraint,
verbose = verbose.nmf, record = record.nmf, Ctrue = Ctrue)
message('done')
message('\n')
C.init <- nmf.solution$C; R.init <- nmf.solution$R; Q.init <- nmf.solution$Q
}
if (is.null(wvol)) {
wvol <- 0
}
# for logdet: wvol = 0.006, for det: wvol = 5e-11 or 1e-22?
message('run volume-regularized nmf.. ')
vol.solution <- volnmf_estimate(B, C = C.init, R = R.init, Q = Q.init,
domain = domain, volf = volf, R.majorate = R.majorate,
wvol = wvol, delta = delta, n.iter = n.iter, err.cut = err.cut,
vol.iter = vol.iter, c.iter = c.iter,
extrapolate = extrapolate, accelerate = accelerate, acc.C = acc.C, acc.R = acc.R,
C.constraint = C.constraint, C.bound = C.bound, R.constraint = R.constraint,
verbose = verbose, record = record, Ctrue = Ctrue, mutation.run = mutation.run )
message('done')
message('\n')
return( list( C = vol.solution$C, R = vol.solution$R, Q = vol.solution$Q,
C.init = C.init, R.init = R.init, Q.init = Q.init,
C.rand = C.rand, R.rand = R.rand, Q.rand = Q.rand,
rec = vol.solution$info.record ) )
}
#' Alternating optimization of volume-regularized NMF
#'
#' \code{volnmf_estimate} provides alternating optimization of volume-regularized factorization of a matrix \code{B} using the following objective function:
#' \eqn{F = ||B*Q - C*R||^2 + w.vol*volume(R)}. Matrix \code{C} is required to be non-negative and having either column or row vectors on the simplex.
#' Matrix \code{R} can optionally have non-negativity constraint. Matrix \code{Q} can optionally be identity matrix or any unitary.
#'
#' @param B A numeric matrix. A matrix to factorize (by default NULL). If not given than matrix \code{B} is taken to be a square root decomposition of \eqn{P = B*t(B)}.
#' @param C Numeric matrices. Initial matrices for optimiztion.
#' @param R Numeric matrices. Initial matrices for optimiztion.
#' @param Q Numeric matrices. Initial matrices for optimiztion.
#' @param domain A character. Optimize unitary rotation matrix \code{Q} ("covariance") or keep it as identity matrix (as in standard NMF). By default "covariance".
#' @param volf A character. Function that approximate volume. Can have values of "logdet" or "det" (by default "logdet").
#' @param R.majorate A boolean. Majorate logdet each iteration of \code{volnmf_logdet()} (by default FALSE).
#' @param wvol A numeric. A weight of volume-regularized term \code{volume(R)}.
#' @param delta A numeric. Logdet regularization term \code{log(det(R) + delta)} (by default 1e-8).
#' @param n.iter An integer. Number of iterations (by default \code{1,000}).
#' @param err.cut A numeric. Relative error in determinant between iterations to stop algorithm (by default \code{1e-8}).
#' @param vol.iter An integer. Number of iterations to update volume-regularized matrix \code{R} at each alternating step.
#' @param c.iter An integer. Number of iterations to update simplex matrix \code{C} at each alternating step.
#' @param extrapolate A numeric. Do Nesterov extrapolation inside blocks of R and C optimization (by default TRUE).
#' @param accelerate A numeric. Do acceleration each update after R and C blocks estimated via Nesterov-like extrapolation.
#' @param acc.C A numeric. Acceleration parameter of matrix C.
#' @param acc.R A numeric. Acceleration parameter of matrix R.
#' @param C.constraint A character. Constraint either sum of columns ("col") or sum of rows ("row) to be equal to \code{C.bound} (By default "col").
#' @param C.bound A numeric. A simplex constraint on matrix C vectors.
#' @param R.constraint A character. Set up non-negativity ("pos") constraint on elements of \code{R} (by default "pos", alternative "no").
#' @param verbose A boolean. Print per-iteration information (by default FALSE)
#' @param record A numeric. Record parameters every 'record' iterations (by default \code{NULL}).
#' @param Canchor A matrix. A matrix of anchor components (unused currently). (default=NULL)
#' @param Ctrue A matrix. Correct matrix C if known. Useful for benchmark.
#' @param mutation.run A boolean. Assess goodness of solution using reflection test if mutation.run=TRUE (applicable only to analysis of mutation patterns). (default=FALSE)
#' @return List of objects:
#'
#' \code{C, R, Q}, \code{E} Factorization matrices.
#'
#' \code{iter, err} Number of iterations and relative per-iteration error \code{err} in matrix \code{C}.
#'
#' \code{info.record} a list of objects that record and store state of matrices each \code{record} iterations.
#' @export
volnmf_estimate <- function(B, C, R, Q,
domain = "covariance", volf = 'logdet', R.majorate = FALSE,
wvol = NULL, delta = 1e-8, n.iter = 1e+4, err.cut = 1e-8,
vol.iter = 1e+2, c.iter = 1e+2,
extrapolate = TRUE, accelerate = TRUE, acc.C = 4/5, acc.R = 3/4,
C.constraint = "col", C.bound = 1, R.constraint = "pos",
verbose = TRUE, record = 100, Canchor = NULL, Ctrue = NULL, mutation.run = FALSE){
iter <- 1
err <- 1e+5
rvol <- vector()
aff.mean <- vector()
info.record <- list()
eigens <- 1
R.update <- R; C.update <- C
tot.update.prev <- tot.update <- 0
if (mutation.run==FALSE) {
rate.rec <- NULL
xcompl <- NULL
vol <- NULL
}
while (iter < n.iter & err > err.cut #& (min(eigens) > 1e-16 | iter<3)
){
if (domain == "covariance"){
X <- B %*% Q
} else{
X <- B
}
### update R
err.prev <- sum((X - C.update %*% R)^2)
if (volf == "logdet"){
vol.prev <- log(det(R %*% t(R) + delta * diag(1, nrow(R))))
} else if (volf == "det"){
vol.prev <- det(R %*% t(R))
}
R.prev <- R
if (volf == "logdet"){
R <- volnmf_logdet(C.update, X, R.update, R.constraint = R.constraint, extrapolate = extrapolate, majorate = R.majorate,
w.vol = wvol, delta = delta, err.cut = 1e-100, n.iter = vol.iter)
} else if (volf == "det"){
R <- volnmf_det(C.update, X, R.update, posit=FALSE, w.vol=wvol, eigen.cut=1e-20, err.cut = 1e-100, n.iter = vol.iter)
}
err.post <- sum((X - C.update %*% R)^2)
if (volf == "logdet"){
vol.post <- log(det(R %*% t(R)+delta * diag(1, nrow(R))))
} else if (volf == "det"){
vol.post <- det(R %*% t(R))
}
rvol[iter] <- vol.post
### update C
C.prev <- C
if (C.constraint == "col"){
C <- volnmf_simplex_col(X, R, C.prev = C.update, bound = C.bound, extrapolate = extrapolate,
err.cut = 1e-100, n.iter = c.iter)
} else{
C <- volnmf_simplex_row(X, R, C.prev = C.update, meq = 1)
}
err.post.C <- sum((X - C %*% R.update)^2)
# accelerate C if possible
if (accelerate == TRUE){
C.update <- C + acc.C * (C - C.prev)
R.update <- R + acc.R * (R - R.prev)
C.update[C.update < 0] <- 0
R.update[R.update < 0] <- 0
err.update <- sum((X - C %*% R.update)^2)
vol.update <- log(det(R.update %*% t(R.update)+delta * diag(1, nrow(R.update))))
tot.update <- err.update + wvol*vol.update
if (tot.update > tot.update.prev){
C.update <- C
R.update <- R
}
} else{
C.update <- C
R.update <- R
}
tot.update.prev <- tot.update
#err.post.C.prev <- err.post.C
### optimize Q
if (domain == "covariance"){
Q <- volnmf_procrustes(C %*% R, B)
}
err <- sum((C - C.prev)^2) / sum(C^2)
eigens <- eigen(R %*% t(R))$values
aff <- 1
if (mutation.run == TRUE){
rownames(C) <- colnames(rate.rec)
aff <- apply(abs(cor(C, C[xcompl, ])), 1, max)
}else if (!is.null(Ctrue)){
if (is.null(vol)){
aff <- apply(cor(C, Ctrue), 1, max)
}else{
aff <- apply(cor(C*vol$col.factors, Ctrue), 1, max)
}
}
aff.mean[iter] <- mean(aff)
##op <- par(no.readonly = TRUE)
##on.exit(par(op))
if (verbose == TRUE & (iter %% 100 == 0)){
temppar <- par(mfrow=c(2,1),mar=c(4,4,1,1))
on.exit(par(temppar))
plot(1:iter, rvol, pch=19, cex=0.1, xlab="iteration", ylab="Vol")
#if (!is.null(vol.ref)) {abline(h=vol.ref,col="red",lwd=1)}
cmax <- aff.mean[length(aff.mean)]
if (!is.null(Canchor)) {cmax <- mean(apply(abs(cor(Canchor, Canchor[xcompl, ])), 1, max))}
plot(1:iter, aff.mean, pch=19, cex=0.1, xlab="iteration", ylab="Affinity",
ylim=c(min(c(aff.mean, cmax)), 1))
abline(h=cmax, col="red", lwd=1)
}
if (verbose==TRUE & !is.null(record)){
if (iter %% record == 0){
message(paste("iteration", iter, "\n"))
message(paste("Before R update.. ","fit err:",err.prev,'vol:',wvol*vol.prev,'total:',err.prev + wvol*vol.prev,"\n" ))
message(paste("After R update.. ","fit err:",err.post,'vol:',wvol*vol.post,'total:',err.post + wvol*vol.post,"\n" ))
#cat(paste("Fraction R>0: ", sum(R > -1e-10)/length(R),"\n"))
message(paste("After C update.. ","fit err:",err.post.C,'vol:',wvol*vol.post,'total:',err.post.C + wvol*vol.post,"\n" ))
message(paste("Mean affinity:",mean(aff),"\n"))
message("Affinities: ")
message('\n')
message(aff)
message('\n')
message("Eigenvalues of R%*%t(R):")
message('\n')
message(eigens)
message("\n")
}
}
if (!is.null(record)){
if (iter %% record == 0){
rec <- list(C = C, R = R, Q = Q, iter = iter)
info.record <- c(info.record, rec)
}
}
iter <- iter+1
}
return( list(C = C, R = R, Q = Q, iter = iter, err = err, info.record = info.record) )
}
|
/scratch/gouwar.j/cran-all/cranData/vrnmf/R/volnmf_estimate.R
|
#' @import Matrix
NULL
#' Preprocess the data for downstream volume analysis.
#'
#' \code{vol_preprocess} Routine normalizes the data (as requested), estimates covariance and SVD decomposition.
#'
#' @param X A numeric matrix. Covariance is estimated for column vectors of \code{X}.
#' @param col.norm A character. Specifies column normalization strategy (by default "sd"). NULL to avoid normalization.
#' @param row.norm A character. Specifies row normalization strategy (by default NULL).
#' @param pfactor A numeric A factor to normalize co-occurence matrix (by default NULL).
#' Row normalization follows column normalization. NULL to avoid normalization.
#' @return A list of objects that include normalized matrix \code{X.process}, row and column normalization factors \code{row.factors} and \code{col.factors},
#' covariance matrix \code{P0}, covariance matrix \code{P} normalized to maximum value \code{pfactor},
#' orthonormal basis \code{U} and vector of eigenvalues \code{eigens}.
#' @examples
#' small_example <- sim_factors(5, 5, 5)
#' vol <- vol_preprocess(t(small_example$X))
#'
#' @export
vol_preprocess <- function(X, col.norm = "sd", row.norm = NULL, pfactor = NULL){
row.factors <- rep(1, nrow(X))
col.factors <- rep(1, ncol(X))
if (!is.null(col.norm)){
if (col.norm == "sd"){
col.factors <- apply(X,2,sd)
}
}
if (!is.null(row.norm)) {
if (row.norm == "sd") {
row.factors <- apply(X,1,sd)
}
}
X.process <- t(t(X) / col.factors) / row.factors
P0 <- t(X.process) %*% X.process
if (is.null(pfactor)) {
pfactor <- max(P0)
}
P <- P0 / pfactor # how to normalize P to avoid inf problems in AchorFree?
dimr <- svd(P)
return( list(X.process = X.process, row.factor=row.factors, col.factors=col.factors,
P0=P0, P = P, pfactor = pfactor, U = dimr$u, eigens = dimr$d) )
}
|
/scratch/gouwar.j/cran-all/cranData/vrnmf/R/vpreprocess.R
|
#' Structure of objects
#'
#' Similar to `str()` but with more information for Altrep objects.
#'
#' @param x a vector
#' @examples
#' # when used on non-altrep objects altrep will always be false
#' vroom_str(mtcars)
#'
#' mt <- vroom(vroom_example("mtcars.csv"), ",", altrep = c("chr", "dbl"))
#' vroom_str(mt)
#' @export
vroom_str <- function(x) {
UseMethod("vroom_str")
}
#' @export
vroom_str.data.frame <- function(x) {
classes <- glue::glue_collapse(glue::single_quote(class(x)), ", ", last = ", and ")
rows <- nrow(x)
cols <- ncol(x)
cat(glue::glue("{classes}: {rows} obs., {cols} vars.:\n\n"), sep = "")
nms <- names(x)
for (i in seq_along(x)) {
cat("$", nms[[i]], ":\t", sep = "")
vroom_str(x[[i]])
}
}
#' @export
vroom_str.default <- function(x) {
cat(vroom_str_(x))
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/altrep.R
|
#' Create column specification
#'
#' `cols()` includes all columns in the input data, guessing the column types
#' as the default. `cols_only()` includes only the columns you explicitly
#' specify, skipping the rest.
#'
#' The available specifications are: (long names in quotes and string abbreviations in brackets)
#'
#' | function | long name | short name | description |
#' | ---------- | ----------- | ---------- | ------------- |
#' | `col_logical()` | "logical" | "l" | Logical values containing only `T`, `F`, `TRUE` or `FALSE`. |
#' | `col_integer()` | "integer" | "i" | Integer numbers. |
#' | `col_big_integer()` | "big_integer" | "I" | Big Integers (64bit), requires the `bit64` package. |
#' | `col_double()` | "double", "numeric" | "d" | 64-bit double floating point numbers.
#' | `col_character()` | "character" | "c" | Character string data. |
#' | `col_factor(levels, ordered)` | "factor" | "f" | A fixed set of values. |
#' | `col_date(format = "")` | "date" | "D" | Calendar dates formatted with the locale's `date_format`. |
#' | `col_time(format = "")` | "time" | "t" | Times formatted with the locale's `time_format`. |
#' | `col_datetime(format = "")` | "datetime", "POSIXct" | "T" | ISO8601 date times. |
#' | `col_number()` | "number" | "n" | Human readable numbers containing the `grouping_mark` |
#' | `col_skip()` | "skip", "NULL" | "_", "-" | Skip and don't import this column. |
#' | `col_guess()` | "guess", "NA" | "?" | Parse using the "best" guessed type based on the input. |
#'
#' @param ... Either column objects created by `col_*()`, or their abbreviated
#' character names (as described in the `col_types` argument of
#' [vroom()]). If you're only overriding a few columns, it's
#' best to refer to columns by name. If not named, the column types must match
#' the column names exactly. In `col_*()` functions these are stored in the
#' object.
#' @param .default Any named columns not explicitly overridden in `...`
#' will be read with this column type.
#' @param .delim The delimiter to use when parsing. If the `delim` argument
#' used in the call to `vroom()` it takes precedence over the one specified in
#' `col_types`.
#' @export
#' @aliases col_types
#' @examples
#' cols(a = col_integer())
#' cols_only(a = col_integer())
#'
#' # You can also use the standard abbreviations
#' cols(a = "i")
#' cols(a = "i", b = "d", c = "_")
#'
#' # Or long names (like utils::read.csv)
#' cols(a = "integer", b = "double", c = "skip")
#'
#' # You can also use multiple sets of column definitions by combining
#' # them like so:
#'
#' t1 <- cols(
#' column_one = col_integer(),
#' column_two = col_number())
#'
#' t2 <- cols(
#' column_three = col_character())
#'
#' t3 <- t1
#' t3$cols <- c(t1$cols, t2$cols)
#' t3
cols <- function(..., .default = col_guess(), .delim = NULL) {
col_types <- list2(...)
is_character <- vapply(col_types, is.character, logical(1))
col_types[is_character] <- lapply(col_types[is_character], col_concise)
if (is.character(.default)) {
.default <- col_concise(.default)
}
col_spec(col_types, .default, .delim)
}
#' @export
#' @rdname cols
cols_only <- function(...) {
cols(..., .default = col_skip())
}
# col_spec ----------------------------------------------------------------
col_spec <- function(col_types, default = col_guess(), delim) {
stopifnot(is.list(col_types))
stopifnot(is.collector(default))
is_collector <- vapply(col_types, is.collector, logical(1))
if (any(!is_collector)) {
stop("Some `col_types` are not S3 collector objects: ",
paste(which(!is_collector), collapse = ", "), call. = FALSE)
}
structure(
list(
cols = col_types,
default = default,
delim = delim
),
class = "col_spec"
)
}
is.col_spec <- function(x) inherits(x, "col_spec")
#' Coerce to a column specification
#'
#' This is most useful for generating a specification using the short form or coercing from a list.
#'
#' @param x Input object
#' @keywords internal
#' @examples
#' as.col_spec("cccnnn")
#' @export
as.col_spec <- function(x) UseMethod("as.col_spec")
#' @export
as.col_spec.character <- function(x) {
if (is_named(x)) {
return(as.col_spec(as.list(x)))
}
letters <- strsplit(x, "")[[1]]
col_spec(lapply(letters, col_concise), col_guess(), delim = NULL)
}
#' @export
as.col_spec.NULL <- function(x) {
col_spec(list(), delim = NULL)
}
#' @export
as.col_spec.list <- function(x) {
do.call(cols, x)
}
#' @export
as.col_spec.col_spec <- function(x) {
if (!"delim" %in% names(x)) {
x["delim"] <- list(NULL)
}
x
}
#' @export
as.col_spec.default <- function(x) {
stop("`col_types` must be NULL, a list or a string", call. = FALSE)
}
# Conditionally exported in zzz.R
# @export
print.col_spec <- function(x, n = Inf, condense = NULL, colour = crayon::has_color(), ...) {
cat(format.col_spec(x, n = n, condense = condense, colour = colour, ...))
invisible(x)
}
#' @description
#' `cols_condense()` takes a spec object and condenses its definition by setting
#' the default column type to the most frequent type and only listing columns
#' with a different type.
#' @rdname spec
#' @export
cols_condense <- function(x) {
types <- vapply(x$cols, function(xx) class(xx)[[1]], character(1))
counts <- table(types)
most_common <- names(counts)[counts == max(counts)][[1]]
x$default <- x$cols[types == most_common][[1]]
x$cols <- x$cols[types != most_common]
x
}
# Conditionally exported in zzz.R
# @export
format.col_spec <- function(x, n = Inf, condense = NULL, colour = crayon::has_color(), ...) {
if (n == 0) {
return("")
}
# condense if cols >= n
condense <- condense %||% (length(x$cols) >= n)
if (isTRUE(condense)) {
x <- cols_condense(x)
}
# truncate to minumum of n or length
cols <- x$cols[seq_len(min(length(x$cols), n))]
default <- NULL
if (inherits(x$default, "collector_guess")) {
fun_type <- "cols"
} else if (inherits(x$default, "collector_skip")) {
fun_type <- "cols_only"
} else {
fun_type <- "cols"
type <- sub("^collector_", "", class(x$default)[[1]])
default <- paste0(".default = col_", type, "()")
}
delim <- x$delim
if (!is.null(delim) && nzchar(delim)) {
delim <- paste0('.delim = ', glue::double_quote(delim), '')
}
cols_args <- c(
default,
vapply(seq_along(cols),
function(i) {
col_funs <- sub("^collector_", "col_", class(cols[[i]])[[1]])
args <- vapply(cols[[i]], deparse2, character(1), sep = "\n ")
args <- paste(names(args), args, sep = " = ", collapse = ", ")
col_funs <- paste0(col_funs, "(", args, ")")
col_funs <- colourise_cols(col_funs, colour)
col_names <- names(cols)[[i]] %||% ""
# Need to handle unnamed columns and columns with non-syntactic names
named <- col_names != ""
non_syntactic <- !is_syntactic(col_names) & named
col_names[non_syntactic] <- paste0("`", gsub("`", "\\\\`", col_names[non_syntactic]), "`")
out <- paste0(col_names, " = ", col_funs)
out[!named] <- col_funs[!named]
out
},
character(1)
),
delim
)
if (length(x$cols) == 0 && length(cols_args) == 0) {
return(paste0(fun_type, "()\n"))
}
out <- paste0(fun_type, "(\n ", paste(collapse = ",\n ", cols_args))
if (length(x$cols) > n) {
out <- paste0(out, "\n # ... with ", length(x$cols) - n, " more columns")
}
out <- paste0(out, "\n)\n")
out
}
colourise_cols <- function(cols, colourise = crayon::has_color()) {
if (!isTRUE(colourise)) {
return(cols)
}
fname <- sub("[(].*", "", cols)
for (i in seq_along(cols)) {
cols[[i]] <- switch(fname,
col_skip = ,
col_guess = cols[[i]],
col_character = ,
col_factor = crayon::red(cols[[i]]),
col_logical = crayon::yellow(cols[[i]]),
col_double = ,
col_integer = ,
col_big_integer = ,
col_number = green(cols[[i]]),
col_date = ,
col_datetime = ,
col_time = blue(cols[[i]])
)
}
cols
}
# This allows str() on a tibble object to print a little nicer.
# Conditionally exported in zzz.R
# @export
str.col_spec <- function(object, ..., indent.str = "") {
# Split the formatted column spec into strings
specs <- strsplit(format(object), "\n")[[1]]
cat(sep = "",
"\n",
# Append the current indentation string to the specs
paste(indent.str, specs, collapse = "\n"),
"\n")
}
#' Examine the column specifications for a data frame
#'
#' `spec()` extracts the full column specification from a tibble
#' created by readr.
#'
#' @family parsers
#' @param x The data frame object to extract from
#' @return A col_spec object.
#' @export
#' @examples
#' df <- vroom(vroom_example("mtcars.csv"))
#' s <- spec(df)
#' s
#'
#' cols_condense(s)
spec <- function(x) {
stopifnot(inherits(x, "tbl_df"))
attr(x, "spec")
}
col_concise <- function(x) {
switch(x,
"_" = ,
"skip" =,
"NULL" =,
"-" = col_skip(),
"NA" = ,
"?" = col_guess(),
character =,
c = col_character(),
factor =,
f = col_factor(),
double =,
numeric =,
d = col_double(),
integer =,
i = col_integer(),
big_integer =,
I = col_big_integer(),
logical = ,
l = col_logical(),
number = ,
n = col_number(),
date = ,
Date = ,
D = col_date(),
datetime = ,
POSIXct = ,
T = col_datetime(),
time =,
t = col_time(),
stop("Unknown shortcut: ", x, call. = FALSE)
)
}
vroom_enquo <- function(x) {
if (quo_is_call(x, "c") || quo_is_call(x, "list")) {
return(as_quosures(get_expr(x)[-1], get_env(x)))
}
x
}
vroom_select <- function(x, col_select, id) {
spec <- attr(x, "spec")
# Drop any NULL columns
is_null <- vapply(x, is.null, logical(1))
x[is_null] <- NULL
# reorder and rename columns
if (inherits(col_select, "quosures") || !quo_is_null(col_select)) {
if (inherits(col_select, "quosures")) {
vars <- tidyselect::vars_select(c(names(spec(x)$cols), id), !!!col_select)
} else {
vars <- tidyselect::vars_select(c(names(spec(x)$cols), id), !!col_select)
}
if (!is.null(id) && !id %in% vars) {
names(id) <- id
vars <- c(id, vars)
}
# This can't be just names(x) as we need to have skipped
# names as well to pass to vars_select()
x <- x[vars]
names(x) <- names(vars)
}
attr(x, "spec") <- spec
x
}
col_types_standardise <- function(spec, num_cols, col_names, col_select, name_repair) {
if (num_cols == 0) {
if (length(spec$cols) > 0) {
num_cols <- length(spec$cols)
} else if (length(col_names) > 0) {
num_cols <- length(col_names)
}
}
if (length(col_names) == 0) {
col_names <- make_names(NULL, num_cols)
}
col_names <- vctrs::vec_as_names(col_names, repair = name_repair)
type_names <- names(spec$cols)
if (length(spec$cols) == 0) {
# no types specified so use defaults
spec$cols <- rep(list(spec$default), num_cols)
names(spec$cols) <- col_names[seq_along(spec$cols)]
} else if (is.null(type_names)) {
# unnamed types & names guessed from header: match exactly
if (num_cols < length(spec$cols)) {
spec$cols <- spec$cols[seq_len(num_cols)]
} else {
spec$cols <- c(spec$cols, rep(list(spec$default), num_cols - length(spec$cols)))
}
names(spec$cols) <- col_names[seq_along(spec$cols)]
} else {
# named types
if (num_cols > length(col_names)) {
col_names <- make_names(col_names, num_cols)
}
bad_types <- !(type_names %in% col_names)
if (any(bad_types)) {
warn(paste0("The following named parsers don't match the column names: ",
paste0(type_names[bad_types], collapse = ", ")), class = "vroom_mismatched_column_name")
spec$cols <- spec$cols[!bad_types]
type_names <- type_names[!bad_types]
}
default_types <- !(col_names %in% type_names)
if (any(default_types)) {
defaults <- rep(list(spec$default), sum(default_types))
names(defaults) <- col_names[default_types]
spec$cols[names(defaults)] <- defaults
}
spec$cols <- spec$cols[col_names]
}
if (inherits(col_select, "quosures") || !quo_is_null(col_select)) {
if (inherits(col_select, "quosures")) {
to_keep <- names(spec$cols) %in% tidyselect::vars_select(names(spec$cols), !!!col_select, .strict = FALSE)
} else {
to_keep <- names(spec$cols) %in% tidyselect::vars_select(names(spec$cols), !!col_select, .strict = FALSE)
}
spec$cols[!to_keep] <- rep(list(col_skip()), sum(!to_keep))
}
# Set the names, ignoring skipped columns
kept <- !vapply(spec$cols, inherits, logical(1), "collector_skip")
# Fill the column names if they are shorter than what is kept.
if (length(col_names) == length(spec$cols)) {
names(spec$cols)[kept] <- col_names[kept]
} else if (length(col_names) == sum(kept)) {
names(spec$cols)[kept] <- col_names
} else {
col_names <- make_names(col_names, sum(kept))
names(spec$cols)[kept] <- col_names
}
spec
}
#' Guess the type of a vector
#'
#' @inheritParams readr::guess_parser
#' @examples
#' # Logical vectors
#' guess_type(c("FALSE", "TRUE", "F", "T"))
#' # Integers and doubles
#' guess_type(c("1","2","3"))
#' guess_type(c("1.6","2.6","3.4"))
#' # Numbers containing grouping mark
#' guess_type("1,234,566")
#' # ISO 8601 date times
#' guess_type(c("2010-10-10"))
#' guess_type(c("2010-10-10 01:02:03"))
#' guess_type(c("01:02:03 AM"))
#' @export
guess_type <- function(x, na = c("", "NA"), locale = default_locale(), guess_integer = FALSE) {
type <- guess_type_(x, na = na, locale = locale, guess_integer = guess_integer)
get(paste0("col_", type), asNamespace("vroom"))()
}
guess_parser <- function(x, na = c("", "NA"), locale = default_locale(), guess_integer = FALSE) {
guess_type_(x, na = na, locale = locale, guess_integer = guess_integer)
}
show_dims <- function(x) {
cli_block(class = "vroom_dim_message", {
cli::cli_text("
{.strong Rows: }{.val {NROW(x)}}
{.strong Columns: }{.val {NCOL(x)}}
")
})
}
collector_value <- function(x, ...) {
UseMethod("collector_value")
}
#' @export
collector_value.collector_character <- function(x, ...) { character() }
#' @export
collector_value.collector_double <- function(x, ...) { numeric() }
#' @export
collector_value.collector_integer <- function(x, ...) { integer() }
#' @export
collector_value.collector_numeric <- function(x, ...) { numeric() }
#' @export
collector_value.collector_logical <- function(x, ...) { logical() }
#' @export
collector_value.collector_factor <- function(x, ...) { factor() }
# the more obvious as.POSIXct(double()) doesn't work on R < 4.0
# https://github.com/tidyverse/vroom/issues/453
#' @export
collector_value.collector_datetime <- function(x, ...) { vctrs::vec_ptype(Sys.time()) }
# the more obvious as.Date(double()) doesn't work on R < 4.0
# and again: https://github.com/tidyverse/vroom/issues/453
#' @export
collector_value.collector_date <- function(x, ...) { vctrs::vec_ptype(Sys.Date()) }
#' @export
collector_value.collector_time <- function(x, ...) { hms::hms() }
#' @export
collector_value.collector_guess <- function(x, ...) { character() }
#' @export
summary.col_spec <- function(object, width = getOption("width"), locale = default_locale(), ...) {
if (length(object$cols) == 0) {
return(invisible(object))
}
type_map <- c("collector_character" = "chr", "collector_double" = "dbl",
"collector_integer" = "int", "collector_number" = "num", "collector_logical" = "lgl",
"collector_factor" = "fct", "collector_datetime" = "dttm", "collector_date" = "date",
"collector_time" = "time",
"collector_guess" = "???")
col_types <- vapply(object$cols, function(x) class(x)[[1]], character(1))
col_types <- droplevels(factor(type_map[col_types], levels = unname(type_map)))
type_counts <- table(col_types)
n <- length(type_counts)
types <- format(vapply(names(type_counts), color_type, character(1)))
counts <- format(glue::glue("({type_counts})"), justify = "right")
col_width <- min(width - (crayon::col_nchar(types) + nchar(counts) + 4))
columns <- vapply(split(names(object$cols), col_types), function(x) glue::glue_collapse(x, ", ", width = col_width), character(1))
fmt_num <- function(x) {
prettyNum(x, big.mark = locale$grouping_mark, decimal.mark = locale$decimal_mark)
}
delim <- object$delim %||% ""
txt <- glue::glue(
.transformer = collapse_transformer(sep = "\n"),
entries = glue::glue("{format(types)} {counts}: {columns}"),
'
{if (nzchar(delim)) paste(bold("Delimiter:"), glue::double_quote(delim)) else ""}
{entries*}
')
cli_block(class = "vroom_spec_message", {
cli::cli_h1("Column specification")
cli::cli_verbatim(txt)
})
invisible(object)
}
show_col_types <- function(x, locale) {
show_dims(x)
summary(spec(x), locale = locale)
cli_block(class = "vroom_spec_message", {
cli::cli_verbatim("\n\n")
cli::cli_alert_info("Use {.fn spec} to retrieve the full column specification for this data.")
cli::cli_alert_info("Specify the column types or set {.arg show_col_types = FALSE} to quiet this message.")
})
}
cli_block <- function(expr, class = NULL, type = rlang::inform) {
msg <- ""
withCallingHandlers(
expr,
message = function(x) {
msg <<- paste0(msg, x$message)
invokeRestart("muffleMessage")
}
)
msg <- sub("^\n", "", msg)
msg <- sub("\n+$", "", msg)
type(msg, class = class)
}
color_type <- function(type) {
switch(type,
chr = ,
fct = crayon::red(type),
lgl = crayon::yellow(type),
dbl = ,
int = ,
num = green(type),
date = ,
dttm = ,
time = blue(type),
"???" = type
)
}
#' @rdname cols
#' @export
col_logical <- function(...) {
collector("logical", ...)
}
#' @rdname cols
#' @export
col_integer <- function(...) {
collector("integer", ...)
}
#' @rdname cols
#' @export
col_big_integer <- function(...) {
collector("big_integer", ...)
}
#' @rdname cols
#' @export
col_double <- function(...) {
collector("double", ...)
}
#' @rdname cols
#' @export
col_character <- function(...) {
collector("character", ...)
}
#' @rdname cols
#' @export
col_skip <- function(...) {
collector("skip", ...)
}
#' @rdname cols
#' @export
col_number <- function(...) {
collector("number", ...)
}
#' @rdname cols
#' @export
col_guess <- function(...) {
collector("guess", ...)
}
#' @inheritParams readr::col_factor
#' @rdname cols
#' @export
col_factor <- function(levels = NULL, ordered = FALSE, include_na = FALSE, ...) {
collector("factor", levels = levels, ordered = ordered, include_na = include_na, ...)
}
#' @inheritParams readr::col_datetime
#' @rdname cols
#' @export
col_datetime <- function(format = "", ...) {
collector("datetime", format = format, ...)
}
#' @rdname cols
#' @export
col_date <- function(format = "", ...) {
collector("date", format = format, ...)
}
#' @rdname cols
#' @export
col_time <- function(format = "", ...) {
collector("time", format = format, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/col_types.R
|
collector <- function(type, ...) {
structure(list(...), class = c(paste0("collector_", type), "collector"))
}
is.collector <- function(x) inherits(x, "collector")
# Conditionally exported in zzz.R
# @export
print.collector <- function(x, ...) {
cat("<", class(x)[1], ">\n", sep = "")
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/collector.R
|
# Generated by cpp11: do not edit by hand
force_materialization <- function(x) {
invisible(.Call(`_vroom_force_materialization`, x))
}
vroom_materialize <- function(x, replace) {
.Call(`_vroom_vroom_materialize`, x, replace)
}
vroom_convert <- function(x) {
.Call(`_vroom_vroom_convert`, x)
}
vroom_str_ <- function(x) {
.Call(`_vroom_vroom_str_`, x)
}
gen_character_ <- function(n, min, max, values, seed, seed2) {
.Call(`_vroom_gen_character_`, n, min, max, values, seed, seed2)
}
guess_type_ <- function(input, na, locale, guess_integer) {
.Call(`_vroom_guess_type_`, input, na, locale, guess_integer)
}
convert_connection <- function(in_con, out_con, from, to) {
.Call(`_vroom_convert_connection`, in_con, out_con, from, to)
}
vroom_ <- function(inputs, delim, quote, trim_ws, escape_double, escape_backslash, comment, skip_empty_rows, skip, n_max, progress, col_names, col_types, col_select, name_repair, id, na, locale, guess_max, num_threads, altrep) {
.Call(`_vroom_vroom_`, inputs, delim, quote, trim_ws, escape_double, escape_backslash, comment, skip_empty_rows, skip, n_max, progress, col_names, col_types, col_select, name_repair, id, na, locale, guess_max, num_threads, altrep)
}
has_trailing_newline <- function(filename) {
.Call(`_vroom_has_trailing_newline`, filename)
}
vroom_rle <- function(input) {
.Call(`_vroom_vroom_rle`, input)
}
utctime_ <- function(year, month, day, hour, min, sec, psec) {
.Call(`_vroom_utctime_`, year, month, day, hour, min, sec, psec)
}
vroom_errors_ <- function(errors) {
.Call(`_vroom_vroom_errors_`, errors)
}
vroom_fwf_ <- function(inputs, col_starts, col_ends, trim_ws, col_names, col_types, col_select, name_repair, skip, comment, skip_empty_rows, n_max, id, na, locale, guess_max, num_threads, altrep, progress) {
.Call(`_vroom_vroom_fwf_`, inputs, col_starts, col_ends, trim_ws, col_names, col_types, col_select, name_repair, skip, comment, skip_empty_rows, n_max, id, na, locale, guess_max, num_threads, altrep, progress)
}
whitespace_columns_ <- function(filename, skip, n, comment) {
.Call(`_vroom_whitespace_columns_`, filename, skip, n, comment)
}
vroom_write_ <- function(input, filename, delim, eol, na_str, col_names, append, options, num_threads, progress, buf_lines) {
invisible(.Call(`_vroom_vroom_write_`, input, filename, delim, eol, na_str, col_names, append, options, num_threads, progress, buf_lines))
}
vroom_write_connection_ <- function(input, con, delim, eol, na_str, col_names, options, num_threads, progress, buf_lines, is_stdout, append) {
invisible(.Call(`_vroom_vroom_write_connection_`, input, con, delim, eol, na_str, col_names, options, num_threads, progress, buf_lines, is_stdout, append))
}
vroom_format_ <- function(input, delim, eol, na_str, col_names, append, options, num_threads, progress, buf_lines) {
.Call(`_vroom_vroom_format_`, input, delim, eol, na_str, col_names, append, options, num_threads, progress, buf_lines)
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/cpp11.R
|
#' Create or retrieve date names
#'
#' When parsing dates, you often need to know how weekdays of the week and
#' months are represented as text. This pair of functions allows you to either
#' create your own, or retrieve from a standard list. The standard list is
#' derived from ICU (`https://site.icu-project.org`) via the _stringi_ package.
#'
#' @param mon,mon_ab Full and abbreviated month names.
#' @param day,day_ab Full and abbreviated week day names. Starts with Sunday.
#' @param am_pm Names used for AM and PM.
#' @export
#' @examples
#' date_names_lang("en")
#' date_names_lang("ko")
#' date_names_lang("fr")
date_names <- function(mon, mon_ab = mon, day, day_ab = day,
am_pm = c("AM", "PM")) {
stopifnot(is.character(mon), length(mon) == 12)
stopifnot(is.character(mon_ab), length(mon_ab) == 12)
stopifnot(is.character(day), length(day) == 7)
stopifnot(is.character(day_ab), length(day_ab) == 7)
structure(
list(
mon = enc2utf8(mon),
mon_ab = enc2utf8(mon_ab),
day = enc2utf8(day),
day_ab = enc2utf8(day_ab),
am_pm = enc2utf8(am_pm)
),
class = "date_names"
)
}
#' @export
#' @rdname date_names
#' @param language A BCP 47 locale, made up of a language and a region,
#' e.g. `"en_US"` for American English. See `date_names_langs()`
#' for a complete list of available locales.
date_names_lang <- function(language) {
stopifnot(is.character(language), length(language) == 1)
symbols <- date_symbols[[language]]
if (is.null(symbols)) {
stop("Unknown language '", language, "'", call. = FALSE)
}
symbols
}
#' @export
#' @rdname date_names
date_names_langs <- function() {
names(date_symbols)
}
# Conditionally exported in zzz.R
# @export
print.date_names <- function(x, ...) {
cat("<date_names>\n")
if (identical(x$day, x$day_ab)) {
day <- paste0(x$day, collapse = ", ")
} else {
day <- paste0(x$day, " (", x$day_ab, ")", collapse = ", ")
}
if (identical(x$mon, x$mon_ab)) {
mon <- paste0(x$mon, collapse = ", ")
} else {
mon <- paste0(x$mon, " (", x$mon_ab, ")", collapse = ", ")
}
am_pm <- paste0(x$am_pm, collapse = "/")
cat_wrap("Days: ", day)
cat_wrap("Months: ", mon)
cat_wrap("AM/PM: ", am_pm)
}
is.date_names <- function(x) inherits(x, "date_names")
cat_wrap <- function(header, body) {
body <- strwrap(body, exdent = nchar(header))
cat(header, paste(body, collapse = "\n"), "\n", sep = "")
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/date.R
|
#' Get path to vroom examples
#'
#' vroom comes bundled with a number of sample files in
#' its 'inst/extdata' directory. Use `vroom_examples()` to list all the
#' available examples and `vroom_example()` to retrieve the path to one
#' example.
#' @param path Name of file.
#' @param pattern A regular expression of filenames to match. If `NULL`, all
#' available files are returned.
#' @export
#' @examples
#' # List all available examples
#' vroom_examples()
#'
#' # Get path to one example
#' vroom_example("mtcars.csv")
vroom_example <- function (path) {
system.file("extdata", path, package = "vroom", mustWork = TRUE)
}
#' @rdname vroom_example
#' @export
vroom_examples <- function (pattern = NULL) {
list.files(system.file("extdata", package = "vroom"), pattern = pattern)
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/example.R
|
#' Generate individual vectors of the types supported by vroom
#'
#' @param n The size of the vector to generate
#' @param min The minimum range for the vector
#' @param max The maximum range for the vector
#' @param values The explicit values to use.
#' @param f The random function to use.
#' @inheritParams base::sample.int
#' @param ... Additional arguments passed to internal generation functions
#' @name generators
#' @examples
#' # characters
#' gen_character(4)
#'
#' # factors
#' gen_factor(4)
#'
#' # logical
#' gen_logical(4)
#'
#' # numbers
#' gen_double(4)
#' gen_integer(4)
#'
#' # temporal data
#' gen_time(4)
#' gen_date(4)
#' gen_datetime(4)
#' @export
gen_character <- function(n, min = 5, max = 25, values = c(letters, LETTERS, 0:9), ...) {
if (min > max) {
max <- min
}
# The seed for the C++ RNG used is an unsigned 32 bit integer, which is why I
# multiply int max by 2. Possibly an off by one error here though...
seeds <- sample.int(2 * .Machine$integer.max, 2)
gen_character_(n, min, max, paste(values, collapse = ""), seeds[[1]], seeds[[2]])
}
#' @rdname generators
#' @export
gen_double <- function(n, f = stats::rnorm, ...) {
f(n, ...)
}
#' @rdname generators
#' @export
gen_number <- gen_double
#' @rdname generators
#' @export
gen_integer <- function(n, min = 1L, max = .Machine$integer.max, prob = NULL, ...) {
max <- max - min + 1L
sample.int(max, size = n, replace = TRUE, prob = prob) + min - 1L
}
#' @rdname generators
#' @param num_levels The number of factor levels to generate
#' @param ordered Should the factors be ordered factors?
#' @param levels The explicit levels to use, if `NULL` random levels are generated using [gen_name()].
#' @export
gen_factor <- function(n, levels = NULL, ordered = FALSE, num_levels = gen_integer(1L, 1L, 25L), ...) {
if (is.null(levels)) {
levels <- gen_name(num_levels)
}
res <- gen_integer(n, max = length(levels), ...)
attr(res, "levels") <- levels
if (ordered) {
class(res) <- c("ordered", "factor")
} else {
class(res) <- "factor"
}
res
}
#' @rdname generators
#' @param fractional Whether to generate times with fractional seconds
#' @export
gen_time <- function(n, min = 0, max = hms::hms(days = 1), fractional = FALSE, ...) {
res <- hms::hms(seconds = stats::runif(n, min = min, max = max))
if (!fractional) {
res <- hms::as_hms(floor(res))
}
res
}
#' @rdname generators
#' @export
gen_date <- function(n, min = as.Date("2001-01-01"), max = as.Date("2021-01-01"), ...) {
structure(as.numeric(gen_integer(n, min = min, max = max)), class = "Date")
}
#' @rdname generators
#' @param tz The timezone to use for dates
#' @export
gen_datetime <- function(n, min = as.POSIXct("2001-01-01"), max = as.POSIXct("2021-01-01"), tz = "UTC", ...) {
structure(stats::runif(n, min = min, max = max), class = c("POSIXct", "POSIXt"), tzone = tz)
}
#' @rdname generators
#' @export
gen_logical <- function(n, ...) {
c(TRUE, FALSE)[sample.int(n = 2, n, replace = TRUE)]
}
all_col_types <- tibble::tribble(
~ type, ~ class,
"character", "character",
"factor", "character",
"double", "numeric",
"integer", "numeric",
"number", "numeric",
"date", "temporal",
"datetime", "temporal",
"time", "temporal",
)
#' Generate a random tibble
#'
#' This is useful for benchmarking, but also for bug reports when you cannot
#' share the real dataset.
#'
#' There is also a family of functions to generate individual vectors of each
#' type.
#'
#' @param rows Number of rows to generate
#' @param cols Number of columns to generate, if `NULL` this is derived from `col_types`.
#' @param missing The percentage (from 0 to 1) of missing data to use
#' @seealso [generators] to generate individual vectors.
#' @inheritParams vroom
#' @examples
#' # random 10 x 5 table with random column types
#' rand_tbl <- gen_tbl(10, 5)
#' rand_tbl
#'
#' # all double 25 x 4 table
#' dbl_tbl <- gen_tbl(25, 4, col_types = "dddd")
#' dbl_tbl
#'
#' # Use the dots in long form column types to change the random function and options
#' types <- rep(times = 4, list(col_double(f = stats::runif, min = -10, max = 25)))
#' types
#' dbl_tbl2 <- gen_tbl(25, 4, col_types = types)
#' dbl_tbl2
#' @export
gen_tbl <- function(rows, cols = NULL, col_types = NULL, locale = default_locale(), missing = 0) {
if (is.null(cols) && is.null(col_types)) {
stop("One of `cols` or `col_types` must be set", call. = FALSE)
}
spec <- as.col_spec(col_types)
if (is.null(cols)) {
cols <- length(spec$cols)
}
nms <- make_names(names(spec$cols), cols)
specs <- col_types_standardise(spec, length(nms), nms, vroom_enquo(quo(NULL)), "unique")
res <- vector("list", cols)
for (i in seq_len(cols)) {
type <- sub("collector_", "", class(specs$cols[[i]])[[1]])
if (type == "guess") {
type <- sample(all_col_types[["type"]], 1)
specs$cols[[i]] <- do.call(paste0("col_", type), list())
}
fun_nme <- paste0("gen_", type)
res[[i]] <- do.call(fun_nme, c(rows, specs$cols[[i]]))
}
if (missing > 0) {
res[] <- lapply(res, function(x) {
x[sample(c(TRUE, FALSE), size = rows, prob = c(missing, 1 - missing), replace = TRUE)] <- NA
x
})
}
names(res) <- nms
attr(res, "spec") <- specs
tibble::as_tibble(res)
}
# Name and adjective list from https://github.com/rstudio/cranwhales/blob/93349fe1bc790f115a3d56660b6b99ffe258d9a2/random-names.R
#' @rdname generators
#' @export
gen_name <- local({
# This will run during build / installation, but that is OK
adjectives <- readLines(system.file("words", "adjectives.txt", package = "vroom"))
animals <- readLines(system.file("words", "animals.txt", package = "vroom"))
function(n) {
paste0(sample(adjectives, n, replace = TRUE), "_", sample(animals, n, replace = TRUE))
}
})
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/generator.R
|
#' Create locales
#'
#' A locale object tries to capture all the defaults that can vary between
#' countries. You set the locale in once, and the details are automatically
#' passed on down to the columns parsers. The defaults have been chosen to
#' match R (i.e. US English) as closely as possible. See
#' `vignette("locales")` for more details.
#'
#' @param date_names Character representations of day and month names. Either
#' the language code as string (passed on to [date_names_lang()])
#' or an object created by [date_names()].
#' @param date_format,time_format Default date and time formats.
#' @param decimal_mark,grouping_mark Symbols used to indicate the decimal
#' place, and to chunk larger numbers. Decimal mark can only be `,` or
#' `.`.
#' @param tz Default tz. This is used both for input (if the time zone isn't
#' present in individual strings), and for output (to control the default
#' display). The default is to use "UTC", a time zone that does not use
#' daylight savings time (DST) and hence is typically most useful for data.
#' The absence of time zones makes it approximately 50x faster to generate
#' UTC times than any other time zone.
#'
#' Use `""` to use the system default time zone, but beware that this
#' will not be reproducible across systems.
#'
#' For a complete list of possible time zones, see [OlsonNames()].
#' Americans, note that "EST" is a Canadian time zone that does not have
#' DST. It is *not* Eastern Standard Time. It's better to use
#' "US/Eastern", "US/Central" etc.
#' @param encoding Default encoding.
#' @export
#' @examples
#' locale()
#' locale("fr")
#'
#' # South American locale
#' locale("es", decimal_mark = ",")
locale <- function(date_names = "en",
date_format = "%AD", time_format = "%AT",
decimal_mark = ".", grouping_mark = ",",
tz = "UTC", encoding = "UTF-8") {
if (is.character(date_names)) {
date_names <- date_names_lang(date_names)
}
stopifnot(is.date_names(date_names))
if (missing(grouping_mark) && !missing(decimal_mark)) {
grouping_mark <- if (decimal_mark == ".") "," else "."
} else if (missing(decimal_mark) && !missing(grouping_mark)) {
decimal_mark <- if (grouping_mark == ".") "," else "."
}
stopifnot(is.character(decimal_mark), length(decimal_mark) == 1)
stopifnot(is.character(grouping_mark), length(grouping_mark) == 1)
if (decimal_mark == grouping_mark) {
stop("`decimal_mark` and `grouping_mark` must be different", call. = FALSE)
}
tz <- check_tz(tz)
check_encoding(encoding)
structure(
list(
date_names = date_names,
date_format = date_format,
time_format = time_format,
decimal_mark = decimal_mark,
grouping_mark = grouping_mark,
tz = tz,
encoding = encoding
),
class = "locale"
)
}
is.locale <- function(x) inherits(x, "locale")
# Conditionally exported in zzz.R
# @export
print.locale <- function(x, ...) {
cat("<locale>\n")
cat("Numbers: ", prettyNum(123456.78, big.mark = x$grouping_mark,
decimal.mark = x$decimal_mark, digits = 8), "\n", sep = "")
cat("Formats: ", x$date_format, " / ", x$time_format, "\n", sep = "")
cat("Timezone: ", x$tz, "\n", sep = "")
cat("Encoding: ", x$encoding, "\n", sep = "")
print(x$date_names)
}
#' @export
#' @rdname locale
default_locale <- function() {
loc <- getOption("vroom.default_locale")
if (is.null(loc)) {
loc <- locale()
options("vroom.default_locale" = loc)
}
loc
}
check_tz <- function(x) {
stopifnot(is.character(x), length(x) == 1)
if (identical(x, "")) {
x <- Sys.timezone()
if (identical(x, "") || identical(x, NA_character_)) {
x <- "UTC"
}
}
if (x %in% tzdb::tzdb_names()) {
x
} else {
stop("Unknown TZ ", x, call. = FALSE)
}
}
check_encoding <- function(x) {
stopifnot(is.character(x), length(x) == 1)
if (tolower(x) %in% tolower(iconvlist()))
return(TRUE)
stop("Unknown encoding ", x, call. = FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/locale.R
|
is_ascii_compatible <- function(encoding) {
identical(iconv(list(charToRaw("\n")), from = "ASCII", to = encoding, toRaw = TRUE)[[1]], charToRaw("\n"))
}
# this is about the encoding of the file (contents), not the filepath
reencode_file <- function(path, encoding) {
if (length(path) > 1) {
stop(sprintf("Reading files of encoding '%s' can only be done for single files at a time", encoding), call. = FALSE)
}
if (inherits(path[[1]], "connection")) {
in_con <- path[[1]]
} else {
in_con <- file(path[[1]])
}
out_file <- tempfile()
out_con <- file(out_file)
convert_connection(in_con, out_con, encoding, "UTF-8")
withr::defer(unlink(out_file), envir = parent.frame())
return(list(out_file))
}
# These functions adapted from https://github.com/tidyverse/readr/blob/192cb1ca5c445e359f153d2259391e6d324fd0a2/R/source.R
standardise_path <- function(path,
arg = caller_arg(path),
call = caller_env(),
user_env = caller_env(2)) {
if (is.raw(path)) {
return(list(rawConnection(path, "rb")))
}
if (inherits(path, "connection")) {
return(list(standardise_connection(path)))
}
if (is_list(path)) {
is_connection <- vapply(path, function(x) inherits(x, "connection"), logical(1))
if (all(is_connection)) {
return(lapply(path, standardise_connection))
}
if (any(is_connection)) {
cli::cli_abort(
"{.arg {arg}} cannot be a mix of connection and non-connection inputs",
call = call
)
}
}
if (!is.character(path)) {
cli::cli_abort(
c(
"{.arg {arg}} is not one of the supported inputs:",
"*" = "A filepath or character vector of filepaths",
"*" = "A connection or list of connections",
"*" = "Literal or raw input"
),
call = call
)
}
if (inherits(path, "AsIs")) {
if (length(path) > 1) {
path <- paste(path, collapse = "\n")
}
return(list(chr_to_file(path, envir = parent.frame())))
}
if (any(grepl("\n", path))) {
lifecycle::deprecate_soft(
"1.5.0",
"vroom(file = 'must use `I()` for literal data')",
details = c(
" " = "",
" " = "# Bad:",
" " = 'vroom("X,Y\\n1.5,2.3\\n")',
" " = "",
" " = "# Good:",
" " = 'vroom(I("X,Y\\n1.5,2.3\\n"))'
),
user_env = user_env
)
return(list(chr_to_file(path, envir = parent.frame())))
}
as.list(enc2utf8(path))
}
standardise_connection <- function(con) {
# If the connection is `stdin()`, change it to `file("stdin")`, as we don't
# support text mode connections.
if (con == stdin()) {
return(file("stdin"))
}
con
}
standardise_one_path <- function (path, write = FALSE) {
if (is.raw(path)) {
return(rawConnection(path, "rb"))
}
if (!is.character(path)) {
return(path)
}
if (is_url(path)) {
if (requireNamespace("curl", quietly = TRUE)) {
con <- curl::curl(path)
} else {
inform("`curl` package not installed, falling back to using `url()`")
con <- url(path)
}
ext <- tolower(tools::file_ext(path))
return(
switch(ext,
zip = ,
bz2 = ,
xz = {
close(con)
stop("Reading from remote `", ext, "` compressed files is not supported,\n",
" download the files locally first.", call. = FALSE)
},
gz = gzcon(con),
con
)
)
}
path <- enc2utf8(path)
p <- split_path_ext(basename_utf8(path))
if (write) {
path <- normalizePath_utf8(path, mustWork = FALSE)
} else {
path <- check_path(path)
}
if (is_installed("archive")) {
formats <- archive_formats(p$extension)
extension <- p$extension
while(is.null(formats) && nzchar(extension)) {
extension <- split_path_ext(extension)$extension
formats <- archive_formats(extension)
}
if (!is.null(formats)) {
p$extension <- extension
if (write) {
if (is.null(formats[[1]])) {
return(archive::file_write(path, filter = formats[[2]]))
}
return(archive::archive_write(path, p$path, format = formats[[1]], filter = formats[[2]]))
}
if (is.null(formats[[1]])) {
return(archive::file_read(path, filter = formats[[2]]))
}
return(archive::archive_read(path, format = formats[[1]], filter = formats[[2]]))
}
}
if (!write) {
compression <- detect_compression(path)
} else {
compression <- NA
}
if (is.na(compression)) {
compression <- tools::file_ext(path)
}
if (write && compression == "zip") {
stop("Can only read from, not write to, .zip", call. = FALSE)
}
switch(compression,
gz = gzfile(path, ""),
bz2 = bzfile(path, ""),
xz = xzfile(path, ""),
zip = zipfile(path, ""),
if (!has_trailing_newline(path)) {
file(path)
} else {
path
}
)
}
split_path_ext <- function(path) {
regex <- "^([^.]*)[.](.*)"
res <- regexpr(regex, path, perl = TRUE)
if (length(res) == 0 || res == -1) {
return(list(path = path, extension = ""))
}
starts <- attr(res, "capture.start")[1, ]
lengths <- attr(res, "capture.length")[, ]
list(
path = substr(path, starts[[1]], starts[[1]] + lengths[[1]] - 1),
extension = substr(path, starts[[2]], starts[[2]] + lengths[[2]] - 1)
)
}
# Adapted from archive:::format_and_filter_by_extension
# https://github.com/r-lib/archive/blob/125f9930798dc20fa12cda30319ca3e9a134a409/R/archive.R#L73
archive_formats <- function(ext) {
switch(ext,
"7z" = list("7zip", "none"),
"cpio" = list("cpio", "none"),
"iso" = list("iso9660", "none"),
"mtree" = list("mtree", "none"),
"tar" = list("tar", "none"),
"tgz" = list("tar", "gzip"),
"taz" = list("tar", "gzip"),
"tar.gz" = list("tar", "gzip"),
"tbz" = list("tar", "bzip2"),
"tbz2" = list("tar", "bzip2"),
"tz2" = list("tar", "bzip2"),
"tar.bz2" = list("tar", "bzip2"),
"tlz" = list("tar", "lzma"),
"tar.lzma" = list("tar", "lzma"),
"txz" = list("tar", "xz"),
"tar.xz" = list("tar", "xz"),
"tzo" = list("tar", "lzop"),
"taZ" = list("tar", "compress"),
"tZ" = list("tar", "compress"),
"tar.zst"= list("tar", "zstd"),
"warc" = list("warc", "none"),
"jar" = list("zip", "none"),
"zip" = list("zip", "none"),
"Z" = list(NULL, "compress"),
"zst" = list(NULL, "zst"),
NULL)
}
is_url <- function(path) {
grepl("^((http|ftp)s?|sftp)://", path)
}
check_path <- function(path) {
if (file.exists(path)) {
return(normalizePath_utf8(path, mustWork = FALSE))
}
stop("'", path, "' does not exist",
if (!is_absolute_path(path)) {
paste0(" in current working directory ('", getwd(), "')")
},
".",
call. = FALSE
)
}
is_absolute_path <- function(path) {
grepl("^(/|[A-Za-z]:|\\\\|~)", path)
}
zipfile <- function(path, open = "r") {
files <- utils::unzip(path, list = TRUE)
file <- files$Name[[1]]
if (nrow(files) > 1) {
inform(paste0("Multiple files in zip: reading '", file, "'"))
}
unz(path, file, open = open)
}
utils::globalVariables("con")
chr_to_file <- function(x, envir = parent.frame()) {
out <- tempfile()
con <- file(out, "wb")
writeLines(sub("\n$", "", x), con, useBytes = TRUE)
close(con)
withr::defer(unlink(out), envir = envir)
normalizePath_utf8(out)
}
detect_compression <- function(path) {
bytes <- readBin(path, "raw", n = 6)
if (length(bytes) >= 2 && bytes[[1]] == 0x1f && bytes[[2]] == 0x8b) {
return("gz")
}
if (length(bytes) >= 6 &&
bytes[[1]] == 0xFD &&
bytes[[2]] == 0x37 &&
bytes[[3]] == 0x7A &&
bytes[[4]] == 0x58 &&
bytes[[5]] == 0x5A &&
bytes[[6]] == 0x00) {
return("xz")
}
if (length(bytes) >= 3 &&
bytes[[1]] == 0x42 &&
bytes[[2]] == 0x5a &&
bytes[[3]] == 0x68) {
return("bz2")
}
# normal zip
if (length(bytes) >= 4 &&
bytes[[1]] == 0x50 &&
bytes[[2]] == 0x4B &&
bytes[[3]] == 0x03 &&
bytes[[4]] == 0x04) {
return("zip")
}
# empty zip
if (length(bytes) >= 4 &&
bytes[[1]] == 0x50 &&
bytes[[2]] == 0x4B &&
bytes[[3]] == 0x05 &&
bytes[[4]] == 0x06) {
return("zip")
}
# spanned zip
if (length(bytes) >= 4 &&
bytes[[1]] == 0x50 &&
bytes[[2]] == 0x4B &&
bytes[[3]] == 0x07 &&
bytes[[4]] == 0x08) {
return("zip")
}
NA_character_
}
basename_utf8 <- function(path) {
enc2utf8(basename(path))
}
normalizePath_utf8 <- function(path, winslash = "/", mustWork = NA) {
enc2utf8(normalizePath(path, winslash = winslash, mustWork = mustWork))
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/path.R
|
#' Retrieve parsing problems
#'
#' vroom will only fail to parse a file if the file is invalid in a way that is
#' unrecoverable. However there are a number of non-fatal problems that you
#' might want to know about. You can retrieve a data frame of these problems
#' with this function.
#'
#' @param x A data frame from `vroom::vroom()`.
#' @param lazy If `TRUE`, just the problems found so far are returned. If
#' `FALSE` (the default) the lazy data is first read completely and all
#' problems are returned.
#' @return A data frame with one row for each problem and four columns:
#' - row,col - Row and column number that caused the problem, referencing the
#' original input
#' - expected - What vroom expected to find
#' - actual - What it actually found
#' - file - The file with the problem
#' @export
problems <- function(x = .Last.value, lazy = FALSE) {
if(!inherits(x, "tbl_df")) {
cli::cli_abort(c(
"The {.arg x} argument of {.fun vroom::problems} must be a data frame created by vroom:",
x = "{.arg x} has class {.cls {class(x)}}"
))
}
if (!isTRUE(lazy)) {
vroom_materialize(x, replace = FALSE)
}
probs <- attr(x, "problems")
if (typeof(probs) != "externalptr") {
cli::cli_abort(c(
"The {.arg x} argument of {.fun vroom::problems} must be a data frame created by vroom:",
x = "{.arg x} seems to have been created with something else, maybe readr?"
))
}
probs <- vroom_errors_(probs)
probs <- probs[!duplicated(probs), ]
probs <- probs[order(probs$file, probs$row, probs$col), ]
tibble::as_tibble(probs)
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/problems.R
|
#' @aliases select_helpers
#' @importFrom tidyselect contains
#' @export
tidyselect::contains
#' @importFrom tidyselect ends_with
#' @export
tidyselect::ends_with
#' @importFrom tidyselect everything
#' @export
tidyselect::everything
#' @importFrom tidyselect matches
#' @export
tidyselect::matches
#' @importFrom tidyselect num_range
#' @export
tidyselect::num_range
#' @importFrom tidyselect one_of
#' @export
tidyselect::one_of
#' @importFrom tidyselect starts_with
#' @export
tidyselect::starts_with
#' @importFrom tidyselect last_col
#' @export
tidyselect::last_col
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/tidyselect.R
|
env_to_logical <- function (var, default = TRUE) {
res <- Sys.getenv(var, default)
if (res %in% c("1", "yes", "true")) {
TRUE
} else if (res %in% c("0", "no", "false")) {
FALSE
} else {
default
}
}
is_windows <- function() {
identical(tolower(Sys.info()[["sysname"]]), "windows")
}
is_loaded <- function(pkg) {
isTRUE(pkg[[1]] %in% loadedNamespaces())
}
`%||%` <- function(x, y) if (is.null(x)) y else x
collapse_transformer <- function(regex = "[*]$", ...) {
function(text, envir) {
if (grepl(regex, text)) {
text <- sub(regex, "", text)
res <- eval(parse(text = text, keep.source = FALSE), envir)
glue::glue_collapse(res, ...)
} else {
glue::identity_transformer(text, envir)
}
}
}
is_named <- function (x) {
nms <- names(x)
if (is.null(nms)) {
return(FALSE)
}
all(nms != "" & !is.na(nms))
}
deparse2 <- function(expr, ..., sep = "\n") {
paste(deparse(expr, ...), collapse = sep)
}
is_syntactic <- function(x) make.names(x) == x
# Conditionally exported in zzz.R
# @export
compare.spec_tbl_df <- function (x, y, ...) {
attr(x, "spec") <- NULL
attr(x, "problems") <- NULL
attr(y, "spec") <- NULL
attr(y, "problems") <- NULL
class(x) <- setdiff(class(x), "spec_tbl_df")
class(y) <- setdiff(class(y), "spec_tbl_df")
NextMethod("compare")
}
# Conditionally exported in zzz.R
# @export
compare_proxy.spec_tbl_df <- function(x, path) {
attr(x, "spec") <- NULL
attr(x, "problems") <- NULL
class(x) <- setdiff(class(x), "spec_tbl_df")
if ("path" %in% names(formals(waldo::compare_proxy))) {
list(object = x, path = path)
} else {
x
}
}
# Conditionally exported in zzz.R
# @export
as_tibble.spec_tbl_df <- function(x, ...) {
attr(x, "spec") <- NULL
attr(x, "problems") <- NULL
class(x) <- setdiff(class(x), "spec_tbl_df")
NextMethod("as_tibble")
}
# Conditionally exported in zzz.R
# @export
all.equal.spec_tbl_df <- function(target, current, ...) {
attr(target, "spec") <- NULL
attr(target, "problems") <- NULL
attr(current, "spec") <- NULL
attr(current, "problems") <- NULL
class(target) <- setdiff(class(target), "spec_tbl_df")
class(current) <- setdiff(class(current), "spec_tbl_df")
NextMethod("all.equal")
}
# Conditionally exported in zzz.R
# @export
as.data.frame.spec_tbl_df <- function(x, ...) {
attr(x, "spec") <- NULL
attr(x, "problems") <- NULL
class(x) <- setdiff(class(x), "spec_tbl_df")
NextMethod("as.data.frame")
}
is_rstudio_console <- function() {
!(Sys.getenv("RSTUDIO", "") == "" || Sys.getenv("RSTUDIO_TERM", "") != "")
}
is_rstudio_version <- function(min, max = .Machine$integer.max) {
tryCatch(
expr = {
version <- rstudioapi::getVersion()
version >= min && version < max
},
error = function(e) FALSE
)
}
#' @importFrom methods setOldClass
setOldClass(c("spec_tbl_df", "tbl_df", "tbl", "data.frame"))
utctime <- function(year, month, day, hour, min, sec, psec) {
utctime_(as.integer(year), as.integer(month), as.integer(day),
as.integer(hour), as.integer(min), as.integer(sec), as.numeric(psec))
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/utils.R
|
#' @keywords internal
#' @aliases vroom-package
#' @useDynLib vroom, .registration = TRUE
"_PACKAGE"
## usethis namespace: start
#' @import rlang
#' @importFrom bit64 integer64
#' @importFrom crayon blue
#' @importFrom crayon bold
#' @importFrom crayon cyan
#' @importFrom crayon green
#' @importFrom crayon reset
#' @importFrom crayon silver
#' @importFrom glue glue
#' @importFrom lifecycle deprecate_warn
#' @importFrom lifecycle deprecated
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/vroom-package.R
|
#' Read a delimited file into a tibble
#'
#' @param file Either a path to a file, a connection, or literal data (either a
#' single string or a raw vector). `file` can also be a character vector
#' containing multiple filepaths or a list containing multiple connections.
#'
#' Files ending in `.gz`, `.bz2`, `.xz`, or `.zip` will be automatically
#' uncompressed. Files starting with `http://`, `https://`, `ftp://`, or
#' `ftps://` will be automatically downloaded. Remote gz files can also be
#' automatically downloaded and decompressed.
#'
#' Literal data is most useful for examples and tests. To be recognised as
#' literal data, wrap the input with `I()`.
#' @param delim One or more characters used to delimit fields within a
#' file. If `NULL` the delimiter is guessed from the set of `c(",", "\t", " ",
#' "|", ":", ";")`.
#' @param col_names Either `TRUE`, `FALSE` or a character vector
#' of column names.
#'
#' If `TRUE`, the first row of the input will be used as the column
#' names, and will not be included in the data frame. If `FALSE`, column
#' names will be generated automatically: X1, X2, X3 etc.
#'
#' If `col_names` is a character vector, the values will be used as the
#' names of the columns, and the first row of the input will be read into
#' the first row of the output data frame.
#'
#' Missing (`NA`) column names will generate a warning, and be filled
#' in with dummy names `...1`, `...2` etc. Duplicate column names
#' will generate a warning and be made unique, see `name_repair` to control
#' how this is done.
#' @param col_types One of `NULL`, a [cols()] specification, or
#' a string.
#'
#' If `NULL`, all column types will be imputed from `guess_max` rows
#' on the input interspersed throughout the file. This is convenient (and
#' fast), but not robust. If the imputation fails, you'll need to increase
#' the `guess_max` or supply the correct types yourself.
#'
#' Column specifications created by [list()] or [cols()] must contain
#' one column specification for each column. If you only want to read a
#' subset of the columns, use [cols_only()].
#'
#' Alternatively, you can use a compact string representation where each
#' character represents one column:
#' - c = character
#' - i = integer
#' - n = number
#' - d = double
#' - l = logical
#' - f = factor
#' - D = date
#' - T = date time
#' - t = time
#' - ? = guess
#' - _ or - = skip
#'
#' By default, reading a file without a column specification will print a
#' message showing what `readr` guessed they were. To remove this message,
#' set `show_col_types = FALSE` or set `options(readr.show_col_types = FALSE)`.
#' @param id Either a string or 'NULL'. If a string, the output will contain a
#' variable with that name with the filename(s) as the value. If 'NULL', the
#' default, no variable will be created.
#' @param skip Number of lines to skip before reading data. If `comment` is
#' supplied any commented lines are ignored _after_ skipping.
#' @param n_max Maximum number of lines to read.
#' @param na Character vector of strings to interpret as missing values. Set this
#' option to `character()` to indicate no missing values.
#' @param quote Single character used to quote strings.
#' @param comment A string used to identify comments. Any text after the
#' comment characters will be silently ignored.
#' @param skip_empty_rows Should blank rows be ignored altogether? i.e. If this
#' option is `TRUE` then blank rows will not be represented at all. If it is
#' `FALSE` then they will be represented by `NA` values in all the columns.
#' @param trim_ws Should leading and trailing whitespace (ASCII spaces and tabs) be trimmed from
#' each field before parsing it?
#' @param escape_double Does the file escape quotes by doubling them?
#' i.e. If this option is `TRUE`, the value '""' represents
#' a single quote, '"'.
#' @param escape_backslash Does the file use backslashes to escape special
#' characters? This is more general than `escape_double` as backslashes
#' can be used to escape the delimiter character, the quote character, or
#' to add special characters like `\\n`.
#' @param locale The locale controls defaults that vary from place to place.
#' The default locale is US-centric (like R), but you can use
#' [locale()] to create your own locale that controls things like
#' the default time zone, encoding, decimal mark, big mark, and day/month
#' names.
#' @param guess_max Maximum number of lines to use for guessing column types.
#' See `vignette("column-types", package = "readr")` for more details.
#' @param altrep Control which column types use Altrep representations,
#' either a character vector of types, `TRUE` or `FALSE`. See
#' [vroom_altrep()] for for full details.
#' @param altrep_opts \Sexpr[results=rd, stage=render]{lifecycle::badge("deprecated")}
#' @param col_select Columns to include in the results. You can use the same
#' mini-language as `dplyr::select()` to refer to the columns by name. Use
#' `c()` to use more than one selection expression. Although this
#' usage is less common, `col_select` also accepts a numeric column index. See
#' [`?tidyselect::language`][tidyselect::language] for full details on the
#' selection language.
#' @param num_threads Number of threads to use when reading and materializing
#' vectors. If your data contains newlines within fields the parser will
#' automatically be forced to use a single thread only.
#' @param progress Display a progress bar? By default it will only display
#' in an interactive session and not while knitting a document. The automatic
#' progress bar can be disabled by setting option `readr.show_progress` to
#' `FALSE`.
#' @param show_col_types Control showing the column specifications. If `TRUE`
#' column specifications are always show, if `FALSE` they are never shown. If
#' `NULL` (the default) they are shown only if an explicit specification is not
#' given to `col_types`.
#' @param .name_repair Handling of column names. The default behaviour is to
#' ensure column names are `"unique"`. Various repair strategies are
#' supported:
#' * `"minimal"`: No name repair or checks, beyond basic existence of names.
#' * `"unique"` (default value): Make sure names are unique and not empty.
#' * `"check_unique"`: no name repair, but check they are `unique`.
#' * `"universal"`: Make the names `unique` and syntactic.
#' * A function: apply custom name repair (e.g., `name_repair = make.names`
#' for names in the style of base R).
#' * A purrr-style anonymous function, see [rlang::as_function()].
#'
#' This argument is passed on as `repair` to [vctrs::vec_as_names()].
#' See there for more details on these terms and the strategies used
#' to enforce them.
#' @export
#' @examples
#' # get path to example file
#' input_file <- vroom_example("mtcars.csv")
#' input_file
#'
#' # Read from a path
#'
#' # Input sources -------------------------------------------------------------
#' # Read from a path
#' vroom(input_file)
#' # You can also use paths directly
#' # vroom("mtcars.csv")
#'
#' \dontrun{
#' # Including remote paths
#' vroom("https://github.com/tidyverse/vroom/raw/main/inst/extdata/mtcars.csv")
#' }
#'
#' # Or directly from a string with `I()`
#' vroom(I("x,y\n1,2\n3,4\n"))
#'
#' # Column selection ----------------------------------------------------------
#' # Pass column names or indexes directly to select them
#' vroom(input_file, col_select = c(model, cyl, gear))
#' vroom(input_file, col_select = c(1, 3, 11))
#'
#' # Or use the selection helpers
#' vroom(input_file, col_select = starts_with("d"))
#'
#' # You can also rename specific columns
#' vroom(input_file, col_select = c(car = model, everything()))
#'
#' # Column types --------------------------------------------------------------
#' # By default, vroom guesses the columns types, looking at 1000 rows
#' # throughout the dataset.
#' # You can specify them explicitly with a compact specification:
#' vroom(I("x,y\n1,2\n3,4\n"), col_types = "dc")
#'
#' # Or with a list of column types:
#' vroom(I("x,y\n1,2\n3,4\n"), col_types = list(col_double(), col_character()))
#'
#' # File types ----------------------------------------------------------------
#' # csv
#' vroom(I("a,b\n1.0,2.0\n"), delim = ",")
#' # tsv
#' vroom(I("a\tb\n1.0\t2.0\n"))
#' # Other delimiters
#' vroom(I("a|b\n1.0|2.0\n"), delim = "|")
#'
#' # Read datasets across multiple files ---------------------------------------
#' mtcars_by_cyl <- vroom_example(vroom_examples("mtcars-"))
#' mtcars_by_cyl
#'
#' # Pass the filenames directly to vroom, they are efficiently combined
#' vroom(mtcars_by_cyl)
#'
#' # If you need to extract data from the filenames, use `id` to request a
#' # column that reveals the underlying file path
#' dat <- vroom(mtcars_by_cyl, id = "source")
#' dat$source <- basename(dat$source)
#' dat
vroom <- function(
file,
delim = NULL,
col_names = TRUE,
col_types = NULL,
col_select = NULL,
id = NULL,
skip = 0,
n_max = Inf,
na = c("", "NA"),
quote = '"',
comment = "",
skip_empty_rows = TRUE,
trim_ws = TRUE,
escape_double = TRUE,
escape_backslash = FALSE,
locale = default_locale(),
guess_max = 100,
altrep = TRUE,
altrep_opts = deprecated(),
num_threads = vroom_threads(),
progress = vroom_progress(),
show_col_types = NULL,
.name_repair = "unique"
) {
# vroom does not support newlines as the delimiter, just as the EOL, so just
# assign a value that should never appear in CSV text as the delimiter,
# 001, start of heading.
if (identical(delim, "\n")) {
delim <- "\x01"
}
if (!is_missing(altrep_opts)) {
deprecate_warn("1.1.0", "vroom(altrep_opts = )", "vroom(altrep = )")
altrep <- altrep_opts
}
file <- standardise_path(file)
if (!is_ascii_compatible(locale$encoding)) {
file <- reencode_file(file, locale$encoding)
locale$encoding <- "UTF-8"
}
if (length(file) == 0 || (n_max == 0 & identical(col_names, FALSE))) {
return(tibble::tibble())
}
if (n_max < 0 || is.infinite(n_max)) {
n_max <- -1
}
if (guess_max < 0 || is.infinite(guess_max)) {
guess_max <- -1
}
# Workaround weird RStudio / Progress bug: https://github.com/r-lib/progress/issues/56#issuecomment-384232184
if (
isTRUE(progress) &&
is_windows() &&
identical(Sys.getenv("RSTUDIO"), "1")) {
Sys.setenv("RSTUDIO" = "1")
}
col_select <- vroom_enquo(enquo(col_select))
has_col_types <- !is.null(col_types)
col_types <- as.col_spec(col_types)
na <- enc2utf8(na)
out <- vroom_(file, delim = delim %||% col_types$delim, col_names = col_names,
col_types = col_types, id = id, skip = skip, col_select = col_select,
name_repair = .name_repair,
na = na, quote = quote, trim_ws = trim_ws, escape_double = escape_double,
escape_backslash = escape_backslash, comment = comment,
skip_empty_rows = skip_empty_rows, locale = locale,
guess_max = guess_max, n_max = n_max, altrep = vroom_altrep(altrep),
num_threads = num_threads, progress = progress)
# Drop any NULL columns
is_null <- vapply(out, is.null, logical(1))
out[is_null] <- NULL
# If no rows expand columns to be the same length and names as the spec
if (NROW(out) == 0) {
cols <- attr(out, "spec")[["cols"]]
for (i in seq_along(cols)) {
out[[i]] <- collector_value(cols[[i]])
}
names(out) <- names(cols)
}
out <- tibble::as_tibble(out, .name_repair = identity)
class(out) <- c("spec_tbl_df", class(out))
out <- vroom_select(out, col_select, id)
if (should_show_col_types(has_col_types, show_col_types)) {
show_col_types(out, locale)
}
out
}
should_show_col_types <- function(has_col_types, show_col_types) {
if (is.null(show_col_types)) {
return(isTRUE(!has_col_types))
}
isTRUE(show_col_types)
}
make_names <- function(x, len) {
if (len == 0) {
return(character())
}
if (length(x) == len) {
return(x)
}
if (length(x) > len) {
return(x[seq_len(len)])
}
nms <- make.names(seq_len(len))
nms[seq_along(x)] <- x
nms
}
#' Determine whether progress bars should be shown
#'
#' By default, vroom shows progress bars. However, progress reporting is
#' suppressed if any of the following conditions hold:
#' - The bar is explicitly disabled by setting the environment variable
#' `VROOM_SHOW_PROGRESS` to `"false"`.
#' - The code is run in a non-interactive session, as determined by
#' [rlang::is_interactive()].
#' - The code is run in an RStudio notebook chunk, as determined by
#' `getOption("rstudio.notebook.executing")`.
#' @export
#' @examples
#' vroom_progress()
vroom_progress <- function() {
env_to_logical("VROOM_SHOW_PROGRESS", TRUE) &&
is_interactive() &&
# some analysis re: rstudio.notebook.executing can be found in:
# https://github.com/r-lib/rlang/issues/1031
# TL;DR it's not consulted by is_interactive(), but probably should be
# consulted for progress reporting specifically
!isTRUE(getOption("rstudio.notebook.executing"))
}
pb_file_format <- function(filename) {
# Workaround RStudio bug https://github.com/rstudio/rstudio/issues/4777
withr::with_options(list(crayon.enabled = (!is_rstudio_console() || is_rstudio_version("1.2.1578")) && getOption("crayon.enabled", TRUE)),
glue::glue_col("{bold}indexing{reset} {blue}{basename(filename)}{reset} [:bar] {green}:rate{reset}, eta: {cyan}:eta{reset}")
)
}
pb_width <- function(format) {
ansii_chars <- nchar(format) - crayon::col_nchar(format)
getOption("width", 80L) + ansii_chars
}
pb_connection_format <- function(unused) {
withr::with_options(list(crayon.enabled = (!is_rstudio_console() || is_rstudio_version("1.2.1578")) && getOption("crayon.enabled", TRUE)),
glue::glue_col("{bold}indexed{reset} {green}:bytes{reset} in {cyan}:elapsed{reset}, {green}:rate{reset}")
)
}
pb_write_format <- function(unused) {
withr::with_options(list(crayon.enabled = (!is_rstudio_console() || is_rstudio_version("1.2.1578")) && getOption("crayon.enabled", TRUE)),
glue::glue_col("{bold}wrote{reset} {green}:bytes{reset} in {cyan}:elapsed{reset}, {green}:rate{reset}")
)
}
# Guess delimiter by splitting every line by each delimiter and choosing the
# delimiter which splits the lines into the highest number of consistent fields
guess_delim <- function(lines, delims = c(",", "\t", " ", "|", ":", ";")) {
if (length(lines) == 0) {
return("")
}
# blank text within quotes
lines <- gsub('"[^"]*"', "", lines)
splits <- lapply(delims, strsplit, x = lines, useBytes = TRUE, fixed = TRUE)
counts <- lapply(splits, function(x) table(lengths(x)))
num_fields <- vapply(counts, function(x) as.integer(names(x)[[1]]), integer(1))
num_lines <- vapply(counts, function(x) (x)[[1]], integer(1))
top_lines <- 0
top_idx <- 0
for (i in seq_along(delims)) {
if (num_fields[[i]] >= 2 && num_lines[[i]] > top_lines ||
(top_lines == num_lines[[i]] && (top_idx <= 0 || num_fields[[top_idx]] < num_fields[[i]]))) {
top_lines <- num_lines[[i]]
top_idx <- i
}
}
if (top_idx == 0) {
stop(glue::glue('
Could not guess the delimiter.\n
{silver("Use `vroom(delim =)` to specify one explicitly.")}
'), call. = FALSE)
}
delims[[top_idx]]
}
cached <- new.env(parent = emptyenv())
vroom_threads <- function() {
res <- as.integer(
Sys.getenv("VROOM_THREADS",
cached$num_threads <- cached$num_threads %||% parallel::detectCores()
)
)
if (is.na(res) || res <= 0) {
res <- 1
}
res
}
vroom_tempfile <- function() {
dir <- Sys.getenv("VROOM_TEMP_PATH")
if (!nzchar(dir)) {
dir <- tempdir()
}
tempfile(pattern = "vroom-", tmpdir = dir)
}
#' Show which column types are using Altrep
#'
#' `vroom_altrep()` can be used directly as input to the `altrep`
#' argument of [vroom()].
#'
#' Alternatively there is also a family of environment variables to control use of
#' the Altrep framework. These can then be set in your `.Renviron` file, e.g.
#' with `usethis::edit_r_environ()`. For versions of R where the Altrep
#' framework is unavailable (R < 3.5.0) they are automatically turned off and
#' the variables have no effect. The variables can take one of `true`, `false`,
#' `TRUE`, `FALSE`, `1`, or `0`.
#'
#' - `VROOM_USE_ALTREP_NUMERICS` - If set use Altrep for _all_ numeric types
#' (default `false`).
#'
#' There are also individual variables for each type. Currently only
#' `VROOM_USE_ALTREP_CHR` defaults to `true`.
#'
#' - `VROOM_USE_ALTREP_CHR`
#' - `VROOM_USE_ALTREP_FCT`
#' - `VROOM_USE_ALTREP_INT`
#' - `VROOM_USE_ALTREP_BIG_INT`
#' - `VROOM_USE_ALTREP_DBL`
#' - `VROOM_USE_ALTREP_NUM`
#' - `VROOM_USE_ALTREP_LGL`
#' - `VROOM_USE_ALTREP_DTTM`
#' - `VROOM_USE_ALTREP_DATE`
#' - `VROOM_USE_ALTREP_TIME`
#'
#' @param which A character vector of column types to use Altrep for. Can also
#' take `TRUE` or `FALSE` to use Altrep for all possible or none of the
#' types
#' @examples
#' vroom_altrep()
#' vroom_altrep(c("chr", "fct", "int"))
#' vroom_altrep(TRUE)
#' vroom_altrep(FALSE)
#' @export
vroom_altrep <- function(which = NULL) {
if (!is.null(which)) {
if (is.logical(which)) {
types <- names(altrep_vals())
if (isTRUE(which)) {
which <- as.list(stats::setNames(rep(TRUE, length(types)), types))
} else {
which <- as.list(stats::setNames(rep(FALSE, length(types)), types))
}
} else {
which <- match.arg(which, names(altrep_vals()), several.ok = TRUE)
which <- as.list(stats::setNames(rep(TRUE, length(which)), which))
}
}
args <- list(
getRversion() >= "3.5.0" && which$chr %||% vroom_use_altrep_chr(),
getRversion() >= "3.5.0" && which$fct %||% vroom_use_altrep_fct(),
getRversion() >= "3.5.0" && which$int %||% vroom_use_altrep_int(),
getRversion() >= "3.5.0" && which$dbl %||% vroom_use_altrep_dbl(),
getRversion() >= "3.5.0" && which$num %||% vroom_use_altrep_num(),
getRversion() >= "3.6.0" && which$lgl %||% vroom_use_altrep_lgl(), # logicals only supported in R 3.6.0+
getRversion() >= "3.5.0" && which$dttm %||% vroom_use_altrep_dttm(),
getRversion() >= "3.5.0" && which$date %||% vroom_use_altrep_date(),
getRversion() >= "3.5.0" && which$time %||% vroom_use_altrep_time(),
getRversion() >= "3.5.0" && which$big_int %||% vroom_use_altrep_big_int()
)
out <- 0L
for (i in seq_along(args)) {
out <- bitwOr(out, bitwShiftL(as.integer(args[[i]]), i - 1L))
}
structure(out, class = "vroom_altrep")
}
#' Show which column types are using Altrep
#'
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("deprecated")}
#' This function is deprecated in favor of `vroom_altrep()`.
#'
#' @inheritParams vroom_altrep
#' @export
vroom_altrep_opts <- function(which = NULL) {
deprecate_warn("1.1.0", "vroom_altrep_opts()", "vroom_altrep()")
vroom_altrep(which)
}
altrep_vals <- function() c(
"none" = 0L,
"chr" = 1L,
"fct" = 2L,
"int" = 4L,
"dbl" = 8L,
"num" = 16L,
"lgl" = 32L,
"dttm" = 64L,
"date" = 128L,
"time" = 256L,
"big_int" = 512L,
"skip" = 1024L
)
#' @export
print.vroom_altrep <- function(x, ...) {
vals <- altrep_vals()
reps <- names(vals)[bitwAnd(vals, x) > 0]
cat("Using Altrep representations for:\n",
glue::glue("
* {reps}
", reps = glue::glue_collapse(reps, "\n * ")), "\n", sep = "")
}
vroom_use_altrep_chr <- function() {
env_to_logical("VROOM_USE_ALTREP_CHR", TRUE)
}
vroom_use_altrep_fct <- function() {
# fct is a numeric internally
env_to_logical("VROOM_USE_ALTREP_NUMERICS", FALSE) || env_to_logical("VROOM_USE_ALTREP_FCT", FALSE)
}
vroom_use_altrep_int <- function() {
env_to_logical("VROOM_USE_ALTREP_NUMERICS", FALSE) || env_to_logical("VROOM_USE_ALTREP_INT", FALSE)
}
vroom_use_altrep_big_int <- function() {
env_to_logical("VROOM_USE_ALTREP_NUMERICS", FALSE) || env_to_logical("VROOM_USE_ALTREP_BIG_INT", FALSE)
}
vroom_use_altrep_dbl <- function() {
env_to_logical("VROOM_USE_ALTREP_NUMERICS", FALSE) || env_to_logical("VROOM_USE_ALTREP_DBL", FALSE)
}
vroom_use_altrep_num <- function() {
env_to_logical("VROOM_USE_ALTREP_NUMERICS", FALSE) || env_to_logical("VROOM_USE_ALTREP_NUM", FALSE)
}
vroom_use_altrep_lgl <- function() {
env_to_logical("VROOM_USE_ALTREP_NUMERICS", FALSE) || env_to_logical("VROOM_USE_ALTREP_LGL", FALSE)
}
vroom_use_altrep_dttm <- function() {
env_to_logical("VROOM_USE_ALTREP_NUMERICS", FALSE) || env_to_logical("VROOM_USE_ALTREP_DTTM", FALSE)
}
vroom_use_altrep_date <- function() {
env_to_logical("VROOM_USE_ALTREP_NUMERICS", FALSE) || env_to_logical("VROOM_USE_ALTREP_DATE", FALSE)
}
vroom_use_altrep_time <- function() {
env_to_logical("VROOM_USE_ALTREP_NUMERICS", FALSE) || env_to_logical("VROOM_USE_ALTREP_TIME", FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/vroom.R
|
#' Read a fixed width file into a tibble
#'
#' @details
#' *Note*: `fwf_empty()` cannot take a R connection such as a URL as input, as
#' this would result in reading from the connection twice. In these cases it is
#' better to download the file first before reading.
#' @inheritParams readr::read_fwf
#' @inheritParams vroom
#' @export
#' @examples
#' fwf_sample <- vroom_example("fwf-sample.txt")
#' writeLines(vroom_lines(fwf_sample))
#'
#' # You can specify column positions in several ways:
#' # 1. Guess based on position of empty columns
#' vroom_fwf(fwf_sample, fwf_empty(fwf_sample, col_names = c("first", "last", "state", "ssn")))
#' # 2. A vector of field widths
#' vroom_fwf(fwf_sample, fwf_widths(c(20, 10, 12), c("name", "state", "ssn")))
#' # 3. Paired vectors of start and end positions
#' vroom_fwf(fwf_sample, fwf_positions(c(1, 30), c(20, 42), c("name", "ssn")))
#' # 4. Named arguments with start and end positions
#' vroom_fwf(fwf_sample, fwf_cols(name = c(1, 20), ssn = c(30, 42)))
#' # 5. Named arguments with column widths
#' vroom_fwf(fwf_sample, fwf_cols(name = 20, state = 10, ssn = 12))
vroom_fwf <- function(file,
col_positions = fwf_empty(file, skip, n = guess_max),
col_types = NULL,
col_select = NULL, id = NULL,
locale = default_locale(), na = c("", "NA"),
comment = "",
skip_empty_rows = TRUE,
trim_ws = TRUE, skip = 0, n_max = Inf,
guess_max = 100,
altrep = TRUE,
altrep_opts = deprecated(),
num_threads = vroom_threads(),
progress = vroom_progress(),
show_col_types = NULL,
.name_repair = "unique") {
verify_fwf_positions(col_positions)
if (!is_missing(altrep_opts)) {
deprecate_warn("1.1.0", "vroom_fwf(altrep_opts = )", "vroom_fwf(altrep = )")
altrep <- altrep_opts
}
file <- standardise_path(file)
if (!is_ascii_compatible(locale$encoding)) {
file <- reencode_file(file, locale$encoding)
locale$encoding <- "UTF-8"
}
if (length(file) == 0 || (n_max == 0 & identical(col_positions$col_names, FALSE))) {
out <- tibble::tibble()
class(out) <- c("spec_tbl_df", class(out))
return(out)
}
if (n_max < 0 || is.infinite(n_max)) {
n_max <- -1
}
if (guess_max < 0 || is.infinite(guess_max)) {
guess_max <- -1
}
col_select <- vroom_enquo(enquo(col_select))
has_col_types <- !is.null(col_types)
col_types <- as.col_spec(col_types)
out <- vroom_fwf_(file, as.integer(col_positions$begin), as.integer(col_positions$end),
trim_ws = trim_ws, col_names = col_positions$col_names,
col_types = col_types, col_select = col_select,
name_repair = .name_repair,
id = id, na = na, guess_max = guess_max, skip = skip, comment = comment,
skip_empty_rows = skip_empty_rows,
n_max = n_max, num_threads = num_threads,
altrep = vroom_altrep(altrep), locale = locale,
progress = progress)
out <- tibble::as_tibble(out, .name_repair = .name_repair)
out <- vroom_select(out, col_select, id)
class(out) <- c("spec_tbl_df", class(out))
if (should_show_col_types(has_col_types, show_col_types)) {
show_col_types(out, locale)
}
out
}
#' @rdname vroom_fwf
#' @inheritParams readr::read_fwf
#' @export
#' @param n Number of lines the tokenizer will read to determine file structure. By default
#' it is set to 100.
fwf_empty <- function(file, skip = 0, col_names = NULL, comment = "", n = 100L) {
file <- standardise_one_path(standardise_path(file)[[1]])
if (inherits(file, "connection")) {
stop("`file` must be a regular file, not a connection", call. = FALSE)
}
if (n < 0 || is.infinite(n)) {
n <- -1
}
out <- whitespace_columns_(file[[1]], skip, comment = comment, n = n)
out$end[length(out$end)] <- NA
col_names <- fwf_col_names(col_names, length(out$begin))
out$col_names <- col_names
out
}
#' @rdname vroom_fwf
#' @export
#' @param widths Width of each field. Use NA as width of last field when
#' reading a ragged fwf file.
#' @param col_names Either NULL, or a character vector column names.
fwf_widths <- function(widths, col_names = NULL) {
pos <- cumsum(c(1L, abs(widths)))
fwf_positions(pos[-length(pos)], pos[-1] - 1L, col_names)
}
#' @rdname vroom_fwf
#' @export
#' @param start,end Starting and ending (inclusive) positions of each field.
#' Use NA as last end field when reading a ragged fwf file.
fwf_positions <- function(start, end = NULL, col_names = NULL) {
stopifnot(length(start) == length(end))
col_names <- fwf_col_names(col_names, length(start))
tibble::tibble(
begin = start - 1L,
end = end, # -1 to change to 0 offset, +1 to be exclusive,
col_names = as.character(col_names)
)
}
#' @rdname vroom_fwf
#' @export
#' @param ... If the first element is a data frame,
#' then it must have all numeric columns and either one or two rows.
#' The column names are the variable names. The column values are the
#' variable widths if a length one vector, and if length two, variable start and end
#' positions. The elements of `...` are used to construct a data frame
#' with or or two rows as above.
fwf_cols <- function(...) {
x <- lapply(list(...), as.integer)
names(x) <- fwf_col_names(names(x), length(x))
x <- tibble::as_tibble(x)
if (nrow(x) == 2) {
fwf_positions(as.integer(x[1, ]), as.integer(x[2, ]), names(x))
} else if (nrow(x) == 1) {
fwf_widths(as.integer(x[1, ]), names(x))
} else {
stop("All variables must have either one (width) two (start, end) values.",
call. = FALSE)
}
}
fwf_col_names <- function(nm, n) {
nm <- nm %||% rep("", n)
nm_empty <- (nm == "")
nm[nm_empty] <- paste0("X", seq_len(n))[nm_empty]
nm
}
verify_fwf_positions <- function(col_positions) {
is_greater <- stats::na.omit(col_positions$begin > col_positions$end)
if (any(is_greater)) {
bad <- which(is_greater)
stop("`col_positions` must have begin less than end.\n* Invalid values at position(s): ", paste0(collapse = ", ", bad), call. = FALSE)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/vroom_fwf.R
|
#' Read lines from a file
#'
#' `vroom_lines()` is similar to `readLines()`, however it reads the lines
#' lazily like [vroom()], so operations like `length()`, `head()`, `tail()` and `sample()`
#' can be done much more efficiently without reading all the data into R.
#' @inheritParams vroom
#' @examples
#' lines <- vroom_lines(vroom_example("mtcars.csv"))
#'
#' length(lines)
#' head(lines, n = 2)
#' tail(lines, n = 2)
#' sample(lines, size = 2)
#' @export
vroom_lines <- function(file, n_max = Inf, skip = 0,
na = character(), skip_empty_rows = FALSE,
locale = default_locale(), altrep = TRUE,
altrep_opts = deprecated(), num_threads = vroom_threads(),
progress = vroom_progress()) {
if (!is_missing(altrep_opts)) {
deprecate_warn("1.1.0", "vroom_lines(altrep_opts = )", "vroom_lines(altrep = )")
altrep <- altrep_opts
}
file <- standardise_path(file)
if (!is_ascii_compatible(locale$encoding)) {
file <- reencode_file(file, locale$encoding)
locale$encoding <- "UTF-8"
}
if (n_max < 0 || is.infinite(n_max)) {
n_max <- -1
}
if (length(file) == 0 || n_max == 0) {
return(character())
}
col_select <- quo(NULL)
# delim = "\1" sets the delimiter to be start of header, which should never
# appear in modern text. This essentially means the only record breaks will
# be newlines. Ideally this would be "\0", but R doesn't let you have nulls
# in character vectors.
out <- vroom_(file, delim = "\1", col_names = "V1", col_types = cols(col_character()),
id = NULL, skip = skip, col_select = col_select, name_repair = "minimal",
na = na, quote = "", trim_ws = FALSE, escape_double = FALSE,
escape_backslash = FALSE, comment = "", skip_empty_rows = skip_empty_rows,
locale = locale, guess_max = 0, n_max = n_max,
altrep = vroom_altrep(altrep), num_threads = num_threads,
progress = progress
)
if (length(out) == 0) {
return(character())
}
out[[1]]
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/vroom_lines.R
|
#' Write a data frame to a delimited file
#'
#' @inheritParams readr::write_tsv
#' @inheritParams vroom
#' @param escape The type of escape to use when quotes are in the data.
#' - `double` - quotes are escaped by doubling them.
#' - `backslash` - quotes are escaped by a preceding backslash.
#' - `none` - quotes are not escaped.
#' @param quote How to handle fields which contain characters that need to be
#' quoted.
#' - `needed` - Values are only quoted if needed: if they contain a delimiter,
#' quote, or newline.
#' - `all` - Quote all fields.
#' - `none` - Never quote fields.
#' @param bom If `TRUE` add a UTF-8 BOM at the beginning of the file. This is
#' recommended when saving data for consumption by excel, as it will force
#' excel to read the data with the correct encoding (UTF-8)
#' @param delim Delimiter used to separate values. Defaults to `\t` to write
#' tab separated value (TSV) files.
#' @param na String used for missing values. Defaults to 'NA'.
#' @param path `r lifecycle::badge("deprecated")` is no longer supported, use
#' `file` instead.
#' @export
#' @examples
#' # If you only specify a file name, vroom_write() will write
#' # the file to your current working directory.
#' out_file <- tempfile(fileext = "csv")
#' vroom_write(mtcars, out_file, ",")
#'
#' # You can also use a literal filename
#' # vroom_write(mtcars, "mtcars.tsv")
#'
#' # If you add an extension to the file name, write_()* will
#' # automatically compress the output.
#' # vroom_write(mtcars, "mtcars.tsv.gz")
#' # vroom_write(mtcars, "mtcars.tsv.bz2")
#' # vroom_write(mtcars, "mtcars.tsv.xz")
vroom_write <- function(x, file, delim = '\t', eol = "\n", na = "NA", col_names = !append,
append = FALSE, quote = c("needed", "all", "none"), escape =
c("double", "backslash", "none"), bom = FALSE, num_threads =
vroom_threads(), progress = vroom_progress(), path = deprecated()) {
if (lifecycle::is_present(path)) {
file <- path
lifecycle::deprecate_soft(
when = "1.5.0",
what = "vroom_write(file)"
)
}
input <- x
quote <- match.arg(quote)
escape <- match.arg(escape)
opts <- get_vroom_write_opts(quote, escape, bom)
# Standardise path returns a list, but we will only ever have 1 output file.
file <- standardise_one_path(file, write = TRUE)
if (NCOL(x) == 0) {
if (!append && !inherits(file, "connection")) {
# if file already exists, it is overwritten with an empty file!
file.create(file)
}
return(invisible(input))
}
# This seems to work ok in practice
buf_lines <- max(as.integer(Sys.getenv("VROOM_WRITE_BUFFER_LINES", nrow(x) / 100 / num_threads)), 1)
# Run `output_column()` before `vroom_convert()` to ensure that any ALTREP
# vectors created by `output_column()` will be fully materialized (#389)
x[] <- lapply(x, output_column)
# We need to convert any altrep vectors to normal vectors otherwise we can't fill the
# write buffers from other threads. This should be the last manipulation done
# to `x` before writing to ensure that no new altrep vectors are created.
x <- vroom_convert(x)
if (inherits(file, "connection")) {
vroom_write_connection_(x, file, delim, eol, na_str = na, col_names = col_names,
options = opts, num_threads = num_threads, progress = progress, buf_lines = buf_lines,
is_stdout = file == stdout(), append = append)
} else {
vroom_write_(x, file, delim, eol, na_str = na, col_names = col_names,
append = append, options = opts,
num_threads = num_threads, progress = progress, buf_lines = buf_lines)
}
invisible(input)
}
get_vroom_write_opts <- function(quote, escape, bom) {
v_opts <- vroom_write_opts()
bitwOr(
v_opts[paste0("quote_", quote)],
bitwOr(
v_opts[paste0("escape_", escape)],
if (bom) v_opts["bom"] else 0)
)
}
vroom_write_opts <- function() c(
"quote_none" = 0L,
"escape_none" = 0L,
"quote_needed" = 1L,
"quote_all" = 2L,
"escape_double" = 4L,
"escape_backslash" = 8L,
"bom" = 16L
)
#' Convert a data frame to a delimited string
#'
#' This is equivalent to [vroom_write()], but instead of writing to
#' disk, it returns a string. It is primarily useful for examples and for
#' testing.
#'
#' @inheritParams vroom_write
#' @export
vroom_format <- function(x, delim = "\t", eol = "\n", na = "NA", col_names = TRUE,
escape = c("double", "backslash", "none"),
quote = c("needed", "all", "none"),
bom = FALSE,
num_threads = vroom_threads()) {
stopifnot(is.data.frame(x))
if (NCOL(x) == 0) {
return("")
}
quote <- match.arg(quote)
escape <- match.arg(escape)
opts <- get_vroom_write_opts(quote, escape, bom)
# This seems to work ok in practice
buf_lines <- max(as.integer(Sys.getenv("VROOM_WRITE_BUFFER_LINES", nrow(x) / 100 / num_threads)), 1)
x[] <- lapply(x, output_column)
vroom_format_(x, delim = delim, eol = eol, na_str = na, col_names = col_names,
append = FALSE, options = opts, num_threads = vroom_threads(), progress = vroom_progress(), buf_lines = buf_lines)
}
#' Write lines to a file
#'
#' @param x A character vector.
#' @inheritParams vroom_write
#' @export
vroom_write_lines <- function(x, file, eol = "\n", na = "NA", append = FALSE, num_threads = vroom_threads()) {
stopifnot(is.character(x))
x <- list(X1 = x)
class(x) <- "data.frame"
attr(x, "row.names") <- c(NA_integer_, -length(x[[1]]))
vroom_write(x, file = file, delim = "", col_names = FALSE, eol = eol, na =
na, append = append, quote = "none", escape = "none", num_threads =
num_threads
)
}
#' Preprocess column for output
#'
#' This is a generic function that applied to each column before it is saved
#' to disk. It provides a hook for S3 classes that need special handling.
#'
#' @keywords internal
#' @param x A vector
#' @examples
#' # Most types are returned unchanged
#' output_column(1)
#' output_column("x")
#'
#' # datetimes are formatted in ISO 8601
#' output_column(Sys.Date())
#' output_column(Sys.time())
#' @export
output_column <- function(x) {
UseMethod("output_column")
}
#' @export
output_column.default <- function(x) {
if (!is.object(x)) return(x)
as.character(x)
}
#' @export
output_column.double <- function(x) {
x
}
#' @export
output_column.POSIXt <- function(x) {
format(x, "%Y-%m-%dT%H:%M:%OSZ", tz = "UTC", justify = "none")
}
#' @export
output_column.character <- function(x) {
x
}
#' @export
output_column.factor <- function(x) {
# TODO: look into doing writing directly in C++
as.character(x)
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/vroom_write.R
|
.onUnload <- function(libpath) {
library.dynam.unload("vroom", libpath)
}
.onLoad <- function(...) {
tzdb::tzdb_initialize()
# only register conflicting S3 methods if readr is not already loaded.
if (!"readr" %in% loadedNamespaces()) {
s3_register("base::format", "col_spec")
s3_register("base::print", "col_spec")
s3_register("base::print", "collector")
s3_register("base::print", "date_names")
s3_register("base::print", "locale")
s3_register("utils::str", "col_spec")
s3_register("base::all.equal", "spec_tbl_df")
s3_register("base::as.data.frame", "spec_tbl_df")
s3_register("tibble::as_tibble", "spec_tbl_df")
s3_register("testthat::compare", "spec_tbl_df")
s3_register("waldo::compare_proxy", "spec_tbl_df")
}
}
.conflicts.OK <- TRUE
s3_register <- function(generic, class, method = NULL) {
stopifnot(is.character(generic), length(generic) == 1)
stopifnot(is.character(class), length(class) == 1)
pieces <- strsplit(generic, "::")[[1]]
stopifnot(length(pieces) == 2)
package <- pieces[[1]]
generic <- pieces[[2]]
if (is.null(method)) {
method <- get(paste0(generic, ".", class), envir = parent.frame())
}
stopifnot(is.function(method))
if (package %in% loadedNamespaces()) {
registerS3method(generic, class, method, envir = asNamespace(package))
}
# Always register hook in case package is later unloaded & reloaded
setHook(
packageEvent(package, "onLoad"),
function(...) {
registerS3method(generic, class, method, envir = asNamespace(package))
}
)
}
|
/scratch/gouwar.j/cran-all/cranData/vroom/R/zzz.R
|
library(data.table)
x <- fread(file, sep = "\t", quote = "", strip.white = FALSE, na.strings = NULL)
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[X1 == "helpless_sheep", ]
e <- x[ , .(mean(nchar(X2))), by = X1]
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-long/data.table-data.table.R
|
args <- commandArgs(trailingOnly = TRUE)
rows <- as.integer(args[[1]])
cols <- as.integer(args[[2]])
output <- args[[3]]
set.seed(42)
RNGversion("3.5.3")
library(vroom)
# We want ~ 1000 rows to filter
num_levels <- 5
levels <- c("helpless_sheep", gen_name(num_levels - 1))
filt_p <- 1000 / rows
# The prob for the rest should just be evenly spaced
rest_p <- rep((1 - filt_p) / (num_levels - 1), num_levels - 1)
col_types <- stats::setNames(
c(list(
col_factor(levels = levels, prob = c(filt_p, rest_p))),
rep(list(col_character()), cols - 1)
), make.names(seq_len(cols)))
data <- gen_tbl(rows, cols, col_types = col_types)
vroom_write(data, output, "\t")
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-long/input.R
|
({})
x <- read.delim(file, quote = "", na.strings = NULL, stringsAsFactors = FALSE)
print(head(x, 10))
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 == "helpless_sheep", ]
e <- tapply(x$X2, x$X1, function(x) mean(nchar(x)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-long/read.delim-base.R
|
({ library(readr); library(dplyr) })
x <- read_tsv(file, trim_ws = FALSE, quote = "", na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 == "helpless_sheep")
e <- group_by(x, X1) %>% summarise(avg_nchar = mean(nchar(X2)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-long/readr-dplyr.R
|
library(vroom)
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 == "helpless_sheep", ]
e <- tapply(x$X2, x$X1, function(x) mean(nchar(x)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-long/vroom-base.R
|
({ library(vroom); library(dplyr) })
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 == "helpless_sheep")
e <- group_by(x, X1) %>% summarise(avg_nchar = mean(nchar(X2)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-long/vroom-dplyr.R
|
({library(vroom); library(dplyr)})
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = FALSE)
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 == "helpless_sheep")
e <- group_by(x, X1) %>% summarise(avg_nchar = mean(nchar(X2)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-long/vroom_no_altrep-dplyr.R
|
library(data.table)
x <- fread(file, sep = "\t", quote = "", strip.white = FALSE, na.strings = NULL)
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[X1 == "helpless_sheep", ]
e <- x[ , .(mean(nchar(X2))), by = X1]
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-wide/data.table-data.table.R
|
args <- commandArgs(trailingOnly = TRUE)
rows <- as.integer(args[[1]])
cols <- as.integer(args[[2]])
output <- args[[3]]
set.seed(42)
RNGversion("3.5.3")
library(vroom)
# We want ~ 1000 rows to filter
num_levels <- 5
levels <- c("helpless_sheep", gen_name(num_levels - 1))
filt_p <- 1000 / rows
# The prob for the rest should just be evenly spaced
rest_p <- rep((1 - filt_p) / (num_levels - 1), num_levels - 1)
col_types <- stats::setNames(
c(list(
col_factor(levels = levels, prob = c(filt_p, rest_p))),
rep(list(col_character()), cols - 1)
), make.names(seq_len(cols)))
data <- gen_tbl(rows, cols, col_types = col_types)
vroom_write(data, output, "\t")
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-wide/input.R
|
({})
x <- read.delim(file, quote = "", na.strings = NULL, stringsAsFactors = FALSE)
print(head(x, 10))
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 == "helpless_sheep", ]
e <- tapply(x$X2, x$X1, function(x) mean(nchar(x)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-wide/read.delim-base.R
|
({ library(readr); library(dplyr) })
x <- read_tsv(file, trim_ws = FALSE, quote = "", na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 == "helpless_sheep")
e <- group_by(x, X1) %>% summarise(avg_nchar = mean(nchar(X2)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-wide/readr-dplyr.R
|
library(vroom)
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 == "helpless_sheep", ]
e <- tapply(x$X2, x$X1, function(x) mean(nchar(x)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-wide/vroom-base.R
|
({ library(vroom); library(dplyr) })
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 == "helpless_sheep")
e <- group_by(x, X1) %>% summarise(avg_nchar = mean(nchar(X2)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-wide/vroom-dplyr.R
|
({library(vroom); library(dplyr)})
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = FALSE)
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 == "helpless_sheep")
e <- group_by(x, X1) %>% summarise(avg_nchar = mean(nchar(X2)))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_character-wide/vroom_no_altrep-dplyr.R
|
library(data.table)
x <- fread(file, sep = "\t", quote = "", strip.white = FALSE, na.strings = NULL)
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[X1 > 3, ]
e <- x[ , .(mean(X1)), by = as.integer(X2)]
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-long/data.table-data.table.R
|
args <- commandArgs(trailingOnly = TRUE)
rows <- as.integer(args[[1]])
cols <- as.integer(args[[2]])
output <- args[[3]]
set.seed(42)
RNGversion("3.5.3")
data <- vroom::gen_tbl(rows, cols, col_types = strrep("d", cols))
vroom::vroom_write(data, output, "\t")
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-long/input.R
|
({})
x <- read.delim(file, quote = "", na.strings = NULL, stringsAsFactors = FALSE)
print(head(x, 10))
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 > 3, ]
e <- tapply(x$X1, as.integer(x$X2), mean)
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-long/read.delim-base.R
|
({ library(readr); library(dplyr) })
x <- read_tsv(file, trim_ws = FALSE, quote = "", na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 > 3)
e <- group_by(x, as.integer(X2)) %>% summarise(avg_X1 = mean(X1))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-long/readr-dplyr.R
|
library(vroom)
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 > 3, ]
e <- tapply(x$X1, as.integer(x$X2), mean)
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-long/vroom-base.R
|
({ library(vroom); library(dplyr) })
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 > 3)
e <- group_by(x, as.integer(X2)) %>% summarise(avg_X1 = mean(X1))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-long/vroom-dplyr.R
|
library(vroom)
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = FALSE)
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 > 3, ]
e <- tapply(x$X1, as.integer(x$X2), mean)
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-long/vroom_no_altrep-base.R
|
({ library(vroom); library(dplyr) })
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = FALSE)
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 > 3)
e <- group_by(x, as.integer(X2)) %>% summarise(avg_X1 = mean(X1))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-long/vroom_no_altrep-dplyr.R
|
library(data.table)
x <- fread(file, sep = "\t", quote = "", strip.white = FALSE, na.strings = NULL)
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[X1 > 3, ]
e <- x[ , .(mean(X1)), by = as.integer(X2)]
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-wide/data.table-data.table.R
|
args <- commandArgs(trailingOnly = TRUE)
rows <- as.integer(args[[1]])
cols <- as.integer(args[[2]])
output <- args[[3]]
set.seed(42)
RNGversion("3.5.3")
data <- vroom::gen_tbl(rows, cols, col_types = strrep("d", cols))
vroom::vroom_write(data, output, "\t")
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-wide/input.R
|
({})
x <- read.delim(file, quote = "", na.strings = NULL, stringsAsFactors = FALSE)
print(head(x, 10))
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 > 3, ]
e <- tapply(x$X1, as.integer(x$X2), mean)
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-wide/read.delim-base.R
|
({ library(readr); library(dplyr) })
x <- read_tsv(file, trim_ws = FALSE, quote = "", na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 > 3)
e <- group_by(x, as.integer(X2)) %>% summarise(avg_X1 = mean(X1))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-wide/readr-dplyr.R
|
library(vroom)
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 > 3, ]
e <- tapply(x$X1, as.integer(x$X2), mean)
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-wide/vroom-base.R
|
({ library(vroom); library(dplyr) })
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character())
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 > 3)
e <- group_by(x, as.integer(X2)) %>% summarise(avg_X1 = mean(X1))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-wide/vroom-dplyr.R
|
library(vroom)
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = FALSE)
print(x)
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$X1 > 3, ]
e <- tapply(x$X1, as.integer(x$X2), mean)
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-wide/vroom_no_altrep-base.R
|
({ library(vroom); library(dplyr) })
x <- vroom(file, trim_ws = FALSE, quote = "", escape_double = FALSE, na = character(), altrep = FALSE)
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, X1 > 3)
e <- group_by(x, as.integer(X2)) %>% summarise(avg_X1 = mean(X1))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/all_numeric-wide/vroom_no_altrep-dplyr.R
|
({})
x <- read.fwf(file, widths = fields$width, col.names = fields$col_names)
print(head(x, 10))
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$PERSONS== "06", ]
e <- tapply(x$TOTPUMA5, x$STATE, mean)
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/fwf/read.delim-base.R
|
({ library(readr); library(dplyr) })
x <- read_fwf(file, fields, col_types = types)
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, PERSONS == "06")
e <- group_by(x, STATE) %>% summarise(avg_TOTPUM5 = mean(TOTPUMA5))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/fwf/readr-dplyr.R
|
library(vroom)
x <- vroom_fwf(file, fields, col_types = types)
print(head(x, 10))
a <- head(x)
b <- tail(x)
c <- x[sample(NROW(x), 100), ]
d <- x[x$PERSONS== "06", ]
e <- tapply(x$TOTPUMA5, x$STATE, mean)
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/fwf/vroom-base.R
|
({ library(vroom); library(dplyr) })
x <- vroom_fwf(file, fields, col_types = types)
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, PERSONS == "06")
e <- group_by(x, STATE) %>% summarise(avg_TOTPUM5 = mean(TOTPUMA5))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/fwf/vroom-dplyr.R
|
({library(vroom); library(dplyr)})
x <- vroom_fwf(file, fields, col_types = types, altrep_opts = FALSE)
print(x)
a <- head(x)
b <- tail(x)
c <- sample_n(x, 100)
d <- filter(x, PERSONS == "06")
e <- group_by(x, STATE) %>% summarise(avg_TOTPUM5 = mean(TOTPUMA5))
|
/scratch/gouwar.j/cran-all/cranData/vroom/inst/bench/fwf/vroom_no_altrep-dplyr.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.