content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Track harmonic frequency contour
#'
#' \code{track_harmonic} tracks the frequency contour of the dominant harmonic.
#' @usage track_harmonic(wave, f, wl = 512, wn = "hanning", ovlp = 0, fftw = FALSE, at = NULL,
#' tlim = NULL, threshold = 10, bandpass = NULL, clip = NULL, plot = TRUE,
#' xlab = "Times (s)", ylab = "Frequency (kHz)", ylim = c(0, f/2000),
#' adjust.wl = FALSE, dfrq = FALSE, ...)
#' @param wave A 'wave' object produced by \code{\link[tuneR]{readWave}} or similar functions.
#' @param f Sampling frequency of the wave object (in Hz). Does not need to be specified if embedded in wave.
#' @param wl A numeric vector of length 1 specifying the window length for the FFT, default
#' is 512.
#' @param wn Character vector of length 1 specifying window name. Default is
#' "hanning". See function \code{\link[seewave]{ftwindow}} for more options. This is used for calculating the frequency spectrum (using \code{\link[seewave]{meanspec}}) and producing the spectrogram (using \code{\link[seewave]{spectro}}, if \code{plot = TRUE}).
#' @param ovlp Numeric vector of length 1 specifying \% of overlap between two
#' consecutive time windows, as in \code{\link[seewave]{spectro}}. Default is 0.
#' @param fftw if TRUE calls the function FFT of the library fftw. See Notes of the \code{\link[seewave]{spectro}} function.
#' Default is \code{FALSE}.
#' @param at Time position where the harmonic frequency contour has to be computed (in seconds). Default is \code{NULL}.
#' @param tlim time range in which to measure frequency contours. Default is \code{NULL} (which means it will measure
#' across the entire wave object).
#' @param threshold Amplitude threshold (\%) for dominant frequency and detection. Default is 10.
#' @param bandpass A numeric vector of length 2 for the lower and upper limits of a frequency bandpass filter (in kHz).
#' @param clip A numeric value to select dominant frequency values according to their amplitude in reference to a maximal value of 1 for the whole signal (has to be >0 & < 1).
#' @param plot Logical, if TRUE plots the dominant frequency against time. Default is \code{TRUE}.
#' @param xlab Label of the time axis.
#' @param ylab Label of the frequency axis.
#' @param ylim A numeric vector of length 2 for the frequency limit of
#' the spectrogram (in kHz), as in \code{\link[seewave]{spectro}}. Default is c(0, f/2000).
#' @param adjust.wl Logical. If \code{TRUE} 'wl' (window length) is reset to be lower than the
#' number of samples in a selection if the number of samples is less than 'wl'. Default is \code{FALSE}.
#' @param dfrq Logical. If \code{TRUE} seewave's \code{\link[seewave]{dfreq}} is used instead. Default is \code{FALSE}.
#' @param ... Additional arguments to be passed to the plotting function.
#' @seealso \code{\link{track_freq_contour}} for tracking frequencies iteratively on selections tables.
#' @export
#' @name track_harmonic
#' @details This is a modified version of seewave's \code{\link[seewave]{dfreq}} function that allows to track the frequency
#' contour of a dominant harmonic even when the highest amplitude jumps between harmonics. The arguments and default values of the
#' original \code{\link[seewave]{dfreq}} function have been kept unchanged to facilitate switching between the 2 functions.
#'
#' @references {
#' Araya-Salas, M., & Smith-Vidaurre, G. (2017). warbleR: An R package to streamline analysis of animal acoustic signals. Methods in Ecology and Evolution, 8(2), 184-191.
#' }
#' @author Jerome Sueur, modified by Marcelo Araya-Salas (\email{marcelo.araya@@ucr.ac.cr})
# last modification on feb-22-2018 (MAS)
track_harmonic <- function(wave, f, wl = 512, wn = "hanning", ovlp = 0, fftw = FALSE,
at = NULL, tlim = NULL, threshold = 10, bandpass = NULL,
clip = NULL, plot = TRUE, xlab = "Times (s)", ylab = "Frequency (kHz)",
ylim = c(0, f / 2000), adjust.wl = FALSE, dfrq = FALSE, ...) {
#### set arguments from options
# get function arguments
argms <- methods::formalArgs(track_harmonic)
# get warbleR options
opt.argms <- if (!is.null(getOption("warbleR"))) getOption("warbleR") else SILLYNAME <- 0
# remove options not as default in call and not in function arguments
opt.argms <- opt.argms[!sapply(opt.argms, is.null) & names(opt.argms) %in% argms]
# get arguments set in the call
call.argms <- as.list(base::match.call())[-1]
# remove arguments in options that are in call
opt.argms <- opt.argms[!names(opt.argms) %in% names(call.argms)]
# set options left
if (length(opt.argms) > 0) {
for (q in seq_len(length(opt.argms))) {
assign(names(opt.argms)[q], opt.argms[[q]])
}
}
if (length(inputw(wave = wave, f = f)$w) < wl) {
if (adjust.wl) {
wl <- length(wave)
} else {
stop("number of samples lower than 'wl' (i.e. no enough samples) \n check 'adjust.wl' argument")
}
}
if (!is.null(at) && ovlp != 0) {
stop("The 'ovlp' argument cannot bue used in conjunction with the arguement 'at'.")
}
if (!is.null(clip)) {
if (clip <= 0 | clip >= 1) {
stop("'clip' value has to be superior to 0 and inferior to 1")
}
}
input <- inputw(wave = wave, f = f)
wave <- input$w
f <- input$f
rm(input)
if (!is.null(tlim)) {
wave <- cutw(wave, f = f, from = tlim[1], to = tlim[2])
}
if (!is.null(threshold)) {
wave <- afilter(
wave = wave, f = f, threshold = threshold,
plot = FALSE
)
}
n <- nrow(wave)
if (!is.null(at)) {
step <- at * f
N <- length(step)
if (step[1] <= 0) {
step[1] <- 1
}
if (step[N] + (wl / 2) >= n) {
step[N] <- n - wl
}
x <- c(0, at, n / f)
} else {
step <- seq(1, n - wl, wl - (ovlp * wl / 100))
N <- length(step)
x <- seq(0, n / f, length.out = N)
}
step <- round(step)
y1 <- seewave::stdft(
wave = wave, f = f, wl = wl, zp = 0, step = step,
wn = wn
)
if (!is.null(bandpass)) {
if (length(bandpass) != 2) {
stop("'The argument 'bandpass' should be a numeric vector of length 2'")
}
if (bandpass[1] > bandpass[2]) {
stop("The first element of 'bandpass' has to be inferior to the second element, i.e. bandpass[1] < bandpass[2]")
}
if (bandpass[1] == bandpass[2]) {
stop("The limits of the bandpass have to be different")
}
}
# lowlimit <- round((wl * bandpass[1])/f)
# upperlimit <- round((wl * bandpass[2])/f)
# y1[-(lowlimit:upperlimit), ] <- 0
# freq values for each freq window (using mid point of each window)
freq.val <- ((1:nrow(y1) * f / wl) - (f / (wl * 2)))
y1[freq.val < bandpass[1] | freq.val > bandpass[2]] <- 0
if (dfrq) {
maxi <- apply(y1, MARGIN = 2, FUN = max)
y2 <- apply(y1, MARGIN = 2, FUN = which.max)
} else {
# find peaks close to first dom freq
maxi <- NULL
y2 <- NULL
for (i in seq_len(ncol(y1)))
{
# standardize z between 0-1
z <- y1[, i] / max(y1[, i])
z <- ifelse(z > threshold / 100, z, 0)
# choose the maximum amplitude for the firt time window
if (i == 1) {
maxi[i] <- max(z)
y2[i] <- which.max(z)
} else {
pks <- pracma::findpeaks(z, npeaks = 5, sortstr = TRUE)[, 1:3]
if (is.vector(pks)) pks <- matrix(pks, ncol = 3)
pks[, 3] <- abs(pks[, 2] - y2[i - 1])
maxi[i] <- pks[which.min(pks[, 3]), 1]
y2[i] <- pks[which.min(pks[, 3]), 2]
}
}
}
y2[which(maxi == 0)] <- NA
if (!is.null(clip)) {
maxi <- apply(y1, MARGIN = 2, FUN = max)
y2[which(maxi < clip)] <- NA
}
# y <- (f * y2)/(1000 * wl) - f/(1000 * wl)
y <- freq.val[y2]
if (!is.null(at)) {
y <- c(NA, y, NA)
}
y <- y / 1000
if (plot) {
plot(
x = x, y = y, xaxs = "i", xlab = xlab, yaxs = "i",
ylab = ylab, ylim = ylim, ...
)
invisible(cbind(x, y))
} else {
return(cbind(x, y))
}
}
##############################################################################################################
#' alternative name for \code{\link{track_harmonic}}
#'
#' @keywords internal
#' @details see \code{\link{track_harmonic}} for documentation. \code{\link{track_harmonic}} will be deprecated in future versions.
#' @export
track_harmonic <- track_harmonic
|
/scratch/gouwar.j/cran-all/cranData/warbleR/R/track_harmonic.R
|
#' Wrapper for "try" function
#'
#' \code{try_na} silly wrapper for \code{\link[base]{try}} function that returns an NA if an error is found. TO BE DEPRECATED IN FUTURE VERSIONS.
#' @usage try_na(expr, silent = TRUE, outFile)
#' @param expr An R expression to try.
#' @param silent Logical to control whether the report of error messages is suppressed. Default is \code{TRUE}.
#' @param outFile A connection, or a character string naming the file to print to
#' (via message2(*, file = outFile)); used only if silent is false, as by default.
#' @return Returns an `NA` if any error occurs during the evaluation of a expression.
#' If not, it will return the result of the evaluation.
#' @details This is a silly wrapper on \code{\link[base]{try}} that returns an `NA` if any error occurs during the evaluation of a expression. See \code{\link[base]{try}} for details.
#' @export
#' @name try_na
#' @examples{
#' # try a function that does not exists to produce an error
#' try_na(crazy78(12))
#'
#' # try a real function (no error)
#' try_na(mean(1:5))
#' }
#'
#' @references {
#' Araya-Salas, M., & Smith-Vidaurre, G. (2017). warbleR: An R package to streamline analysis of animal acoustic signals. Methods in Ecology and Evolution, 8(2), 184-191.
#' }
#' @author Marcelo Araya-Salas (\email{marcelo.araya@@ucr.ac.cr})
# last modification on apr-17-2018 (MAS)
try_na <- function(expr, silent = TRUE, outFile) {
out <- try(expr = expr, silent = silent, outFile = outFile)
if (is(out, "try-error")) {
return(NA)
} else {
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/warbleR/R/try_na.R
|
#' Plot a mosaic of spectrograms with varying display parameters
#'
#' \code{tweak_spectro} plots a mosaic of spectrograms with varying display parameters to
#' facilitate selection of display parameters
#' @usage tweak_spectro(X, length.out = 5, ovlp = 90, wl = c(100, 1000), wn = "hanning",
#' collev.min = -40, pal = "reverse.gray.colors.2", path = NULL, rm.axes = TRUE, ...)
#' @param X object of class 'selection_table', 'extended_selection_table' or data frame with a single row and columns for sound file name (sound.files), selection number (selec),
#' and start and end time of signal (start and end). Default is \code{NULL}.
#' @param length.out Numeric vector of length 1 controlling the number of sublevels of
#' the numeric arguments for which a range has been provided. Ranges are allowed for
#' 'ovlp', 'wl', and 'collev.min' arguments.
#' @param ovlp Numeric vector of length 1 or 2 specifying \% of overlap (or
#' lower/upper values the desired range) between two consecutive windows, as in
#' \code{\link[seewave]{spectro}}. Default is 90.
#' @param wl A numeric vector of length 1 or 2 specifying the window length (length 1)
#' or the lower and upper range limits of the desired window length range (length 2) for creating spectrograms.
#' Default is c(100, 1000).
#' @param wn Character vector specifying the window function names to be used. Several
#' names can be provided. See \code{\link[seewave]{ftwindow}}
#' for name options. Default is "hanning". If "all", then all window functions available are used.
#' @param collev.min A (negative) numeric vector of length 1 or 2. Determines the first argument
#' to use in 'collevels' for the internal spectrogram creating function. This replaces the
#' first element in the 'collevels' as in \code{\link[seewave]{spectro}}. Note that
#' 'collevels' is not available in this function \code{\link[warbleR]{tweak_spectro}}.
#' @param pal Color palette function for spectrogram. Default is "reverse.gray.colors.2".
#' Several palettes can be provided in a character vector. Note that, contrary to
#' other \code{warbleR} and \code{seewave} functions, the
#' palette most be provided as character string rather than as a function. See
#' \code{\link[seewave]{spectro}} for more palettes.
#' @param path Character string containing the directory path where the sound file are located.
#' @param rm.axes Logical. If \code{TRUE} frequency and time axes are excluded. Default is \code{TRUE}.
#' @param ... Additional arguments to be passed to \code{\link{catalog}} function for customizing
#' graphical output. Check out \code{\link{catalog}} for more details.
#' @return Image files with spectrograms of entire sound files in the working directory. Multiple pages
#' can be returned, depending on the length of each sound file.
#' @export
#' @name tweak_spectro
#' @details This functions aims to simplify the selection of spectrogram parameters.
#' The function plots, for a single selection, a mosaic of spectrograms with varying
#' display parameters. For numeric arguments the upper and lower limits of a range can
#' be provided. The following arguments accept can have varying values:
#' \itemize{
#' \item \code{wl}: Windows length (numeric range)
#' \item \code{ovlp}: Overlap (numeric range)
#' \item \code{collev.min}: Minimum value of the color levels (numeric range)
#' \item \code{wn}: window function names (character)
#' \item \code{pal}: palette (character)
#' }
#' Outputs are similar to those of \code{\link{catalog}}. The output image files can be put together in a single pdf file with \code{\link{catalog2pdf}}.
#' We recommend using low resolution (~60-100) and smaller dimensions (width & height < 10) if
#' aiming to generate pdfs (otherwise pdfs could be pretty big).
#' @seealso \code{\link{catalog2pdf}}
#' @examples
#' \dontrun{
#' # Save to temporary working directory
#'
#' # save sound file examples
#' data(list = c("Phae.long1", "lbh_selec_table"))
#' writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
#'
#' # variable collevels
#' tweak_spectro(X = lbh_selec_table, wl = 164, ovlp = c(90), wn = c("flattop"),
#' length.out = 16, nrow = 4, ncol = 4, width = 20, height = 11.3, rm.axes = TRUE,
#' cex = 1, box = F, collev.min = c(-20, -150), path = tempdir(), flim = c(0, 10))
#'
#' # variable overlap and wn
#' tweak_spectro(X = lbh_selec_table, wl = 164, ovlp = c(50, 90),
#' wn = c("hanning", "hamming", "rectangle", "bartlett", "blackman", "flattop"),
#' length.out = 7, nrow = 6, ncol = 7, width = 20, height = 11.3, rm.axes = TRUE,
#' cex = 1, box = F, path = tempdir(), flim = c(0, 10))
#'
#' # variable wl and wn
#' tweak_spectro(X = lbh_selec_table, wl = c(100, 1000), ovlp = c(50, 90), wn = "all",
#' length.out = 5, nrow = 10, ncol = 14, width = 20, height = 11.3, rm.axes = TRUE,
#' cex = 0.7, path = tempdir(), flim = c(0, 10))
#'
#' # variable wl, collev.min and wn
#' tweak_spectro(X = lbh_selec_table, wl = c(100, 1000), ovlp = 90,
#' wn = c("hanning", "hamming", "rectangle"), collev.min = c(-110, -25),
#' length.out = 3, nrow = 10, ncol = 14, width = 20, height = 11.3, rm.axes = TRUE,
#' cex = 0.7, path = tempdir(), flim = c(0, 10))
#'
#' # variable wl, wn and pal
#' tweak_spectro(X = lbh_selec_table, wl = c(100, 1000), ovlp = 90,
#' wn = c("hanning", "hamming", "rectangle"),
#' pal = c("reverse.gray.colors.2", "reverse.topo.colors",
#' "reverse.terrain.colors", "reverse.cm.colors"),
#' length.out = 4, nrow = 5, ncol = 10, width = 20, height = 11.3,
#' rm.axes = TRUE, cex = 0.7, lab.mar = 2, path = tempdir(), flim = c(0, 10))
#'
#' # wl, wn and pal
#' tweak_spectro(X = lbh_selec_table, wl = c(100, 1000), ovlp = 90,
#' wn = c("hanning", "hamming", "rectangle"),
#' pal = c("reverse.gray.colors.2", "reverse.topo.colors",
#' "reverse.terrain.colors", "reverse.cm.colors"),
#' length.out = 4, nrow = 5, ncol = 10, width = 20, height = 11.3, rm.axes = TRUE,
#' cex = 0.7, group.tag = "wn", spec.mar = 0.4, lab.mar = 0.8, box = FALSE,
#' tag.pal = list(reverse.cm.colors), path = tempdir(), flim = c(0, 10))
#'
#' check this floder
#' tempdir()
#' }
#' @author Marcelo Araya-Salas (\email{marcelo.araya@@ucr.ac.cr})
#last modification on mar-08-2018 (MAS)
tweak_spectro <- function(X, length.out = 5, ovlp = 90, wl = c(100, 1000),
wn = "hanning", collev.min = -40,
pal = "reverse.gray.colors.2", path = NULL, rm.axes = TRUE, ...)
{
#### set arguments from options
# get function arguments
argms <- methods::formalArgs(tweak_spectro)
# get warbleR options
opt.argms <- if(!is.null(getOption("warbleR"))) getOption("warbleR") else SILLYNAME <- 0
opt.argms <- opt.argms[which(names(opt.argms) == "path")]
# remove options not as default in call and not in function arguments
opt.argms <- opt.argms[!sapply(opt.argms, is.null) & names(opt.argms) %in% argms]
# get arguments set in the call
call.argms <- as.list(base::match.call())[-1]
# remove arguments in options that are in call
opt.argms <- opt.argms[!names(opt.argms) %in% names(call.argms)]
# set options left
if (length(opt.argms) > 0)
for (q in seq_len(length(opt.argms)))
assign(names(opt.argms)[q], opt.argms[[q]])
# stop if pal is function
if (is.function(pal)) stop2("'pal' should be a character vector")
## reset parameters
# only seewave spectros
fast.spec <- FALSE
if (wn[1] == "all") wn <- c("bartlett", "blackman", "flattop", "hamming", "hanning", "rectangle")
#if X is not a data frame
if (!any(is.data.frame(X), is_selection_table(X), is_extended_selection_table(X))) stop2("X is not of a class 'data.frame', 'selection_table' or 'extended_selection_table'")
if (nrow(X) > 1){
X <- X[1, , drop = FALSE]
warning2(x = "Data frame provided has more than 1 selection (row), only the first one was used")
}
if (length.out < 2) stop2("'length.out' should be equal or higher than 2")
exp.cols <- c("ovlp", "wl", "wn", "collev.min", "pal")[which(c(length(ovlp), length(wl), length(wn), length(collev.min), length(pal)) > 1)]
# expand arguments
if (length(wl) > 1)
wl <- seq(wl[1], wl[2], length.out = length.out)
if (length(ovlp) > 1)
ovlp <- seq(ovlp[1], ovlp[2], length.out = length.out)
if (length(collev.min) > 1)
collev.min <- seq(collev.min[1], collev.min[2], length.out = length.out)
if (is_extended_selection_table(X)) X.orig <- X
# Expand data frame
X <- suppressWarnings(data.frame(X, expand.grid(ovlp = ovlp, wl = wl,
collev.min = collev.min, wn = wn, pal = pal), stringsAsFactors = FALSE))
X$ovlp <- round(X$ovlp, 0)
X$wl <- round(X$wl, 0)
X$collev.min <- round(X$collev.min, 0)
X$selec2 <- X$selec
X$selec <- 1:nrow(X)
X$lbs <- ""
if (length(exp.cols) > 0)
for(i in seq_len(length(exp.cols)))
X$lbs <- paste(X$lbs, exp.cols[i], "=", X[ , exp.cols[i]], " ")
co <- 32
if (max(nchar(X$lbs)) > co) {
empty_spc <- sapply(gregexpr(" ", substr(X$lbs, co, 1000)), "[[", 1) + co
for(i in 1:nrow(X)){
if (nchar(X$lbs[i]) > co)
substring(X$lbs[i], first = empty_spc[i], last = empty_spc[i] + 1) <- paste0("\n", substr(X$lbs[i], empty_spc[i], empty_spc[i] + 1))
}
}
X$lbs <- gsub(" \n$| $| $|^ |^ ", "", X$lbs)
if (exists("X.orig"))
{
attributes(X)$check.results <- do.call(rbind, lapply(1:nrow(X), function(x) attributes(X.orig)$check.results[1, ]))
attributes(X)$check.results$selec <- 1:nrow(X)
attributes(X)$wave.objects <- attributes(X.orig)$wave.objects[1]
attributes(X)$by.song <- attributes(X.orig)$by.song
class(X) <- class(X.orig)
}
catalog(X = X, ovlp = X$ovlp, wl = X$wl, collevels = "collev.min", title = paste(X$sound.files[1], X$selec2[1]), rm.axes = rm.axes, img.suffix = "tweak_spectro",
wn = X$wn, pal = "pal.list", path = path, labels = c("lbs"), ...)
return(NULL)
}
##############################################################################################################
#' alternative name for \code{\link{tweak_spectro}}
#'
#' @keywords internal
#' @details see \code{\link{tweak_spectro}} for documentation. \code{\link{spec_param}} will be deprecated in future versions.
#' @export
spec_param <- tweak_spectro
|
/scratch/gouwar.j/cran-all/cranData/warbleR/R/tweak_spectro.R
|
#' warbleR: A package to streamline bioacoustic analysis
#'
#' warbleR is intended to facilitate the analysis of the structure of animal acoustic signals in R. Users can collect open-access avian recordings or enter their own data into a workflow that facilitates spectrographic visualization and measurement of acoustic parameters. warbleR makes use of the fundamental sound analysis tools of the seewave package, and offers new tools for acoustic structure analysis. These tools are available for batch analysis of acoustic signals.
#'
#' The main features of the package are:
#' \itemize{
#' \item The use of loops to apply tasks through acoustic signals referenced in a selection table
#' \item The production of images in the working folder with spectrograms that allow to organize data and verify acoustic analyzes
#' }
#'
#' The package offers functions to:
#' \itemize{
#' \item Explore and download Xeno Canto recordings
#' \item Explore, organize and manipulate multiple sound files
#' \item Detect signals automatically (in frequency and time)
#' \item Create spectrograms of complete recordings or individual signals
#' \item Run different measures of acoustic signal structure
#' \item Evaluate the performance of measurement methods
#' \item Catalog signals
#' \item Characterize different structural levels in acoustic signals
#' \item Statistical analysis of duet coordination
#' \item Consolidate databases and annotation tables
#' }
#'
#' Most of the functions allow the parallelization of tasks, which distributes the tasks among several processors to improve computational efficiency. Tools to evaluate the performance of the analysis at each step are also available. In addition, warbleR satisfies the need for rigorous open source bioacoustic analysis, which facilitates opportunities for use in research and innovation of additional custom analyzes.
#'
#' The warbleR package offers three overarching categories of
#' functions:
#'
#' @section Obtaining animal vocalization data:
#'
#' \code{\link{query_xc}}: Download recordings and/or metadata from 'Xeno-Canto'
#'
#' \code{\link{sim_songs}}: Simulate animal vocalizations
#'
#'
#' @section Managing sound files:
#'
#' \code{\link{read_wave}}: Read wave files into 'wave' objects
#'
#' \code{\link{read_sound_file}}: Read sound files into 'wave' objects
#'
#' \code{\link{selection_table}}: Create 'selection_table' class objects
#'
#' \code{\link{mp32wav}}: Convert several .mp3 files in working directory to .wav
#' format
#'
#' \code{\link{check_sels}}: Check whether selections can be read by subsequent functions
#'
#' \code{\link{check_wavs}}: Check whether .wav files can be read by subsequent
#' functions and the minimum windows length ("wl" argument) that can be used
#'
#' \code{\link{fix_wavs}}: Fix .wav files so they can be read by other functions
#'
#' \code{\link{split_wavs}}: Split .wav fies in several sound files
#'
#' \code{\link{resample_est_waves}}: Resample wave objects in extended selection tables
#'
#' \code{\link{duration_wavs}}: Determine the duration of sound files
#'
#' \code{\link{cut_sels}}: Cut selections from a selection table into individual sound files
#'
#' \code{\link{remove_silence}}: Remove silence segments from wave files
#'
#' \code{\link{remove_channels}}: Remove channels in wave files
#'
#' \code{\link{consolidate}}: Consolidate sound files into a single folder
#'
#' \code{\link{selection_table}}: Create double-checked and self-contained selection tables
#'
#' \code{\link{fix_extended_selection_table}}: Fix attributes of extended selection tables
#'
#' @section Exploring/analyzing signal structure:
#'
#' \code{\link{auto_detec}}: Automatically detect start and
#' end of acoustic signals
#'
#' \code{\link{tailor_sels}}: Interactive view of spectrograms to tailor start and end of selections
#'
#' \code{\link{sig2noise}}: Measure signal-to-noise ratio across multiple files
#'
#' \code{\link{track_freq_contour}}: Create spectrograms to visualize frequency
#' measurements
#'
#' \code{\link{filter_sels}}: Filter selection data frames based on filtered image files
#'
#' \code{\link{freq_range}}: Detect frequency range iteratively from signals in a selection table
#'
#' \code{\link{freq_range_detec}}: Detect frequency range in a Wave object
#'
#' \code{\link{spectro_analysis}}: Measure acoustic parameters on selected acoustic
#' signals
#'
#' \code{\link{mfcc_stats}}: Calculate descriptive statistics on Mel-frequency cepstral coefficients
#'
#' \code{\link{song_analysis}}: Measure acoustic parameters at higher levels of organization
#'
#' \code{\link{cross_correlation}}: Pairwise cross-correlation of multiple signals
#'
#' \code{\link{gaps}}: Measures gap duration
#'
#' \code{\link{freq_ts}}: Extract frequency contours the signal as a time series
#'
#' \code{\link{find_peaks}}: Find peaks in cross-correlation scores from \code{\link{cross_correlation}}
#'
#' \code{\link{freq_DTW}}: Calculate acoustic dissimilarity using dynamic time warping
#' on frequency contours
#'
#' \code{\link{wpd_features}}: Measure wavelet packet decomposition features
#'
#' \code{\link{compare_methods}}: Produce graphs to visually assess performance of acoustic
#' distance measurements
#'
#' \code{\link{test_coordination}}: Assess statistical significance of singing coordination
#'
#' \code{\link{overlapping_sels}}: Find selections that overlap in time within a given sound file
#'
#' \code{\link{track_harmonic}}: Track harmonic frequency contour
#'
#' @section Graphical outputs:
#'
#' \code{\link{map_xc}}: Create maps to visualize the geographic spread of 'Xeno-Canto' recordings
#'
#' \code{\link{catalog}}: Produce a vocalization catalog with spectrograms in and array with
#' several rows and columns
#'
#' \code{\link{catalog2pdf}}: Combine catalog images to single pdf files
#'
#' \code{\link{plot_coordination}}: Create graphs of coordinated singing
#'
#' \code{\link{color_spectro}}: Highlight spectrogram regions
#'
#' \code{\link{full_spectrograms}}: Produce spectrograms of whole recordings split into
#' multiple rows
#'
#' \code{\link{full_spectrogram2pdf}}: Combine lspec images to single pdf files
#'
#' \code{\link{spectrograms}}: Create spectrograms of selections
#'
#' \code{\link{snr_spectrograms}}: Create spectrograms to visualize margins over which
#' noise will be measured by sig2noise
#'
#' \code{\link{phylo_spectro}}: Add spectrograms onto phylogenetic trees
#'
#' \code{\link{tweak_spectro}}: Visually inspect effect of different settings for creating (and improving) spectrograms
#'
#' @import NatureSounds
#' @import rjson
#' @import RCurl
#' @import pbapply
#' @import tuneR
#' @import seewave
#' @import fftw
#' @import graphics
#' @import grDevices
#' @import utils
#' @import parallel
#' @importFrom cli style_bold style_italic make_ansi_style num_ansi_colors
#' @importFrom testthat capture_output_lines
#' @importFrom knitr kable
#' @importFrom methods formalArgs new is slotNames slot
#' @importFrom dtw dtwDist
#' @importFrom Rcpp evalCpp
#' @importFrom Rcpp sourceCpp
#' @importFrom stats cor dist aggregate approx ave prcomp time ts predict smooth.spline complete.cases spline weighted.mean na.omit var sd rlnorm fft
#' @importClassesFrom tuneR Wave
#' @author Marcelo Araya-Salas & Grace Smith Vidaurre
#'
#' Maintainer: Marcelo Araya-Salas (\email{marcelo.araya@@ucr.ac.cr})
#'
#' @docType package
#' @name warbleR
#' @details License: GPL (>= 2)
#' @keywords internal
"_PACKAGE"
NULL
#> NULL
#'
|
/scratch/gouwar.j/cran-all/cranData/warbleR/R/warbleR-package.R
|
#' Setting warbleR options
#'
#' \code{warbleR_options} sets global parameters for warbleR functions
#' @usage warbleR_options(reset = FALSE, ...)
#' @param reset Logical. If \code{TRUE} then all global parameters are removed. Default is \code{FALSE}.
#' @param ... Arguments in `parameter = value` form, or a list of tagged values. The tags (i.e. parameters)
#' must come from the list of parameters described below.
#' @return When parameters are set by warbleR_options, their former values are
#' returned in an invisible named list. Such a list can be passed as an argument to
#' pboptions to restore the parameter values. If the function is called with no arguments the current option values are printed.
#' @export
#' @name warbleR_options
#' @details The function aims to simplify the use of parameters that apply to many warbleR functions (i.e. global parameters)
#' by setting a default value that will be used to any function in downstream
#' analyses. Tags that are set with warbleR_options will be used iby the functions
#' that share those arguments. However, if an argument is set within a function call
#' it will overwrite the values set by warbleR_options. Hence, the functions remain
#' 'flexible' as their parameters can also be modified 'on the fly'. The following tags are available:
#' \itemize{
#' \item \code{bp}: Numeric vector of length 2 giving the lower and upper limits of a
#' frequency bandpass filter (in kHz).
#' \item \code{collevels}: A numeric vector of length 3. Specifies levels to partition the
#' amplitude range of the spectrogram (in dB) as in \code{\link[seewave]{spectro}}. The more levels the higher the
#' resolution of the spectrogram. The lower the first value the darker the spectrograms.
#' \item \code{flim}: A numeric vector of length 2 for the frequency limit in kHz of
#' the spectrogram, as in \code{\link[seewave]{spectro}}.
#' \item \code{it}: A character vector of length 1 giving the image type to be used. Currently only
#' "tiff" and "jpeg" are admitted.
#' \item \code{osci}: Logical argument to add an oscillogram underneath spectrogram, as
#' in \code{\link[seewave]{spectro}}.
#' \item \code{pal}: A color palette function to be used to assign colors in the
#' plot, as in \code{\link[seewave]{spectro}}.
#' \item \code{parallel}: Numeric. Controls whether parallel computing is applied.
#' It specifies the number of cores to be used in iterative functions.
#' \item \code{pb}: Logical argument to control whether progress bar is used.
#' \item \code{res}: Numeric argument of length 1. Controls image resolution in all image creating functions.
#' \item \code{wav.path}: Character string containing the directory path where the
#' sound files are located. Used as 'path' in all functions in which sound files are read.
#' \item \code{wl}: A numeric vector of length 1 specifying the window length for creating spectrogram (either for plotting or for measuring spectrogram parameters).
#' \item \code{wn}: Character vector of length 1 specifying the window name for creating spectrogram (either for plotting or for measuring spectrogram parameters). See function \code{\link[seewave]{ftwindow}} for options.
#' }
#' @examples
#' {
#' # load data and save in temporary working directory
#' data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4", "lbh_selec_table"))
#' writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
#' writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"))
#' writeWave(Phae.long3, file.path(tempdir(), "Phae.long3.wav"))
#' writeWave(Phae.long4, file.path(tempdir(), "Phae.long4.wav"))
#'
#' # sig2noise with progress bar (by default is TRUE)
#' a <- sig2noise(X = lbh_selec_table, mar = 0.1, path = tempdir())
#'
#' # set progress bar to FALSE with warbleR_options
#' warbleR_options(pb = FALSE, path = tempdir())
#'
#' # sig2noise without progress bar
#' a <- sig2noise(X = lbh_selec_table, mar = 0.1)
#'
#' # sig2noise with progress bar by setting it within the function call (overwritting options)
#' a <- sig2noise(X = lbh_selec_table, pb = TRUE, mar = 0.1)
#'
#' # sig2noise without progress bar using warbleR_options setting again
#' a <- sig2noise(X = lbh_selec_table, mar = 0.1)
#' }
#' @author Marcelo Araya-Salas (\email{marcelo.araya@@ucr.ac.cr})
# last modification on apr-18-2018 (MAS)
warbleR_options <- function(reset = FALSE, ...) {
opar <- getOption("warbleR")
argms <- list(...)
if (length(argms) > 0) {
# rename wav.path as path
names(argms)[names(argms) == "wav.path"] <- "path"
if (!is.null(argms$path)) {
if (!dir.exists(argms$path)) {
stop2("provided 'wav.path' doesn't exist")
} else {
argms$path <- normalizePath(argms$path)
}
}
if (!is.null(argms$img.path)) {
if (!dir.exists(argms$img.path)) {
stop2("provided 'dest.path' doesn't exist")
} else {
argms$img.path <- normalizePath(argms$img.path)
}
}
if (!is.null(argms$dest.path)) {
if (!dir.exists(argms$dest.path)) {
stop2("provided 'dest.path' doesn't exist")
} else {
argms$dest.path <- normalizePath(argms$dest.path)
}
}
# check flac path
if (!is.null(argms$flac.path)) {
# on linox and macOS
if (.Platform$OS.type == "unix") {
run_flac <- if (argms$flac.path == "") {
"flac"
} else {
paste(argms$flac.path, "flac", sep = "/")
}
if (system(paste(run_flac, "-v --totally-silent"), ignore.stderr = TRUE) != 0) {
stop2("FLAC program was not found")
}
}
# on windows
if (.Platform$OS.type == "windows") {
run_flac <- paste("C:/Program Files/FLAC/", "flac",
sep = ""
)
if (!file.exists(run_flac)) {
run_flac <- paste("C:/Program Files (x86)/FLAC/",
"flac",
sep = ""
)
}
if (!file.exists(run_flac)) {
stop2("FLAC program was not found")
}
}
}
if (length(argms) > 0) {
if (length(argms) == 1 && is.list(argms[[1]])) {
npar <- argms[[1]]
} else {
npar <- argms
}
# add previous options not reset in new call
if (length(opar) > 0) {
opar <- opar[!names(opar) %in% names(npar)]
npar <- c(npar, opar)
}
# order aguments by name
npar <- npar[order(names(npar))]
options("warbleR" = npar)
}
invisible(opar)
} else if (reset) {
options("warbleR" = NULL)
} else {
print(opar)
}
}
|
/scratch/gouwar.j/cran-all/cranData/warbleR/R/warbleR_options.R
|
#' Convert .wav files to .flac
#'
#' \code{wav_2_flac} converts several .wav files to .flac compressed lossless format
#' @usage wav_2_flac(files = NULL, path = NULL, overwrite = FALSE,
#' pb = TRUE, parallel = 1, reverse = FALSE, compression = 5, flac.path)
#' @param files character vector with the names of files to be converted. If \code{NULL} all files in the working directory (or 'path' if supplied) are converted.
#' @param path Character string containing the directory path where the .wav files are located.
#' If \code{NULL} (default) then the current working directory is used.
#' @param overwrite Logical. Control whether a .flac sound file that is already in the working directory should be
#' overwritten.
#' @param pb Logical argument to control if progress bar is shown. Default is \code{TRUE}. It can also be
#' set globally using the 'pb' option (see \code{\link{warbleR_options}}).
#' @param parallel Numeric. Controls whether parallel computing is applied.
#' It specifies the number of cores to be used. Default is 1 (i.e. no parallel computing). It can also be
#' set globally using the 'parallel' option (see \code{\link{warbleR_options}}).
#' @param reverse Logical argument to control if .wav files are converted into .flac files (default, \code{reverse = FALSE}) or .flac files are converted into .wav files \code{reverse = TRUE}.
#' @param compression Numeric string on length 1 indicating the level of compression for .flac files. Must a number between 0 (lowest) to 8 (highest compression). Default is 5.
#' @param flac.path Path to the flac program, mostly needed for windows OS.
#' @return .flac files saved in the working directory with same name as original wav files.
#' @export
#' @details The function will convert all .wav files in working directory or 'path' supplied to .flac format (or the opposite if \code{reverse = TRUE}). For reading 'flac' files on windows the path to the .exe is required. This can be set using the 'flac.path' argument (or globally using the same argument in \code{\link{warbleR_options}}). Note that reading 'flac' files requires creating a temporary copy in 'wav' format, which can be particularly slow for long files.
#' @name wav_2_flac
#' @examples
#' \dontrun{
#' # create some .wav files
#' data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4"))
#' writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
#' writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"))
#' writeWave(Phae.long3, file.path(tempdir(), "Phae.long3.wav"))
#' writeWave(Phae.long4, file.path(tempdir(), "Phae.long4.wav"))
#'
#' # Convert all files to .flac format
#' wav_2_flac(path = tempdir())
#'
#' # check this folder!!
#' open_wd(tempdir())
#' }
#'
#' @details convert all .wav files in working directory to .flac compressed lossless format. It's just a silly wrapper over (\code{\link[seewave]{wav2flac}}) to simplify converting several files at once. The function works recursively, converting files within all subfolders.
#' @author Marcelo Araya-Salas (\email{marcelo.araya@@ucr.ac.cr})
# last modification on abr-13-2021 (MAS)
wav_2_flac <-
function(files = NULL,
path = NULL,
overwrite = FALSE,
pb = TRUE,
parallel = 1,
reverse = FALSE,
compression = 5,
flac.path = "") {
#### set arguments from options
# get function arguments
argms <- methods::formalArgs(wav_2_flac)
# get warbleR options
opt.argms <-
if (!is.null(getOption("warbleR"))) {
getOption("warbleR")
} else {
SILLYNAME <- 0
}
# remove options not as default in call and not in function arguments
opt.argms <-
opt.argms[!sapply(opt.argms, is.null) &
names(opt.argms) %in% argms]
# get arguments set in the call
call.argms <- as.list(base::match.call())[-1]
# remove arguments in options that are in call
opt.argms <- opt.argms[!names(opt.argms) %in% names(call.argms)]
# set options left
if (length(opt.argms) > 0) {
for (q in seq_len(length(opt.argms))) {
assign(names(opt.argms)[q], opt.argms[[q]])
}
}
# check path to working directory
if (is.null(path)) {
path <- getwd()
} else if (!dir.exists(path)) {
stop2("'path' provided does not exist")
} else {
path <- normalizePath(path)
}
# set path to flac
if (is.null(getOption("warbleR")$flac.path)) {
# on linux
if (.Platform$OS.type == "unix") {
if (missing(flac.path)) {
run_flac <- "flac"
} else {
run_flac <- paste(flac.path, "flac", sep = "/")
}
if (system(paste(run_flac, "-v"), ignore.stderr = TRUE) !=
0) {
stop2("FLAC program was not found")
}
}
# on windows
if (.Platform$OS.type == "windows") {
if (missing(flac.path)) {
"flac" <- "flac.exe"
}
if (missing(flac.path)) {
run_flac <- paste("C:/Program Files/FLAC/", "flac",
sep = ""
)
if (!file.exists(run_flac)) {
run_flac <- paste("C:/Program Files (x86)/FLAC/",
"flac",
sep = ""
)
}
} else {
run_flac <- paste(flac.path, "flac", sep = "/")
}
if (!file.exists(run_flac)) {
stop2("FLAC program was not found")
}
}
warbleR_options(flac.path = if (missing("flac.path")) {
""
} else {
flac.path
})
} else {
run_flac <-
if (getOption("warbleR")$flac.path == "") {
"flac"
} else {
file.path(getOption("warbleR")$flac.path, "flac")
}
}
# get files in path supplied
files_in_path <-
list.files(
path = path,
pattern = if (reverse) {
".flac$"
} else {
".wav$"
},
ignore.case = TRUE
)
if (is.null(files)) {
files <- files_in_path
} else {
if (!all(files %in% files_in_path)) {
stop2("some (or all) sound files were not found")
}
}
# check path to flac programs
# set clusters for windows OS
if (Sys.info()[1] == "Windows" & parallel > 1) {
cl <- parallel::makePSOCKcluster(getOption("cl.cores", parallel))
} else {
cl <- parallel
}
# run loop apply function
out_l <-
pblapply_wrblr_int(
pbar = pb,
X = files,
cl = cl,
FUN = function(i) {
# warbleR::try_na(
flacwav(
file = file.path(path, i),
overwrite = overwrite,
reverse = reverse,
compression = compression,
run_flac = run_flac
# )
)
}
)
}
flacwav <-
function(file,
reverse = FALSE,
overwrite = FALSE,
exename = NULL,
path2exe = NULL,
compression = 5,
run_flac) {
# set compression
compression <- paste0("--compression-level-", compression)
# on linox and macOS
if (.Platform$OS.type == "unix") {
if (reverse) {
wav_file <- gsub("flac$", "wav", file, ignore.case = TRUE)
e <-
system(
paste(
run_flac,
"-d",
file,
"-o",
wav_file,
"--totally-silent",
if (overwrite) {
"--force"
} else {
""
}
),
ignore.stderr = TRUE
)
} else {
e <-
system(
paste(
run_flac,
file,
"--totally-silent",
if (overwrite) {
"--force"
} else {
""
},
compression
),
ignore.stderr = TRUE,
intern = FALSE
)
}
}
if (.Platform$OS.type == "windows") {
if (missing(exename)) {
exename <- "flac.exe"
}
if (missing(path2exe)) {
run_flac <- paste("C:/Program Files/FLAC/", exename,
sep = ""
)
if (!file.exists(run_flac)) {
run_flac <- paste("C:/Program Files (x86)/FLAC/",
exename,
sep = ""
)
}
} else {
run_flac <- paste(path2exe, exename, sep = "/")
}
if (!file.exists(run_flac)) {
stop2("FLAC program was not found.")
}
if (reverse) {
e <- system(
paste(
shQuote(run_flac),
"-d",
shQuote(file,
type = "cmd"
),
"-o",
shQuote(wav_file,
type = "cmd"
),
"--totally-silent",
if (overwrite) {
"--force"
} else {
""
},
sep = " "
),
ignore.stderr = TRUE
)
} else {
e <-
system(
paste(
shQuote(run_flac),
shQuote(file, type = "cmd"),
"--totally-silent",
if (overwrite) {
"--force"
} else {
""
},
compression,
sep = " "
),
ignore.stderr = TRUE
)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/warbleR/R/wav_2_flac.R
|
#' Measure wavelet packet decomposition features (EXPERIMENTAL)
#'
#' \code{wpd_features} Measure wavelet packet decomposition features.
#' @usage wpd_features(X, normalize = TRUE, threshold1 = 6,
#' threshold2 = 0.5, path = NULL, pb = TRUE, parallel = 1)
#' @param X object of class 'selection_table', 'extended_selection_table' or data frame with the following columns: 1) "sound.files": name of the sound
#' files, 2) "sel": number of the selections, 3) "start": start time of selections, 4) "end":
#' end time of selections. The output of \code{\link{auto_detec}} can
#' also be used as the input data frame.
#' @param normalize Logical to determine if features are normalized by signal duration.
#' @param threshold1 Threshold (\%) for wavelet coefficient detection. Equivalent to denominator of equation 6 in Selin et al (2007). Must be a value between 0 and 1.
#' @param threshold2 Threshold for width detection. Equivalent to threshold 2 (th2) in equation 7 in Selin et al (2007).
#' @param parallel Numeric. Controls whether parallel computing is applied.
#' It specifies the number of cores to be used. Default is 1 (i.e. no parallel computing).
#' @param pb Logical argument to control progress bar and messages. Default is \code{TRUE}.
#' @param path Character string containing the directory path where the sound files are located.
#' If \code{NULL} (default) then the current working directory is used.
#' @return A data frame with rows for each of the selections in 'X' in addition to four wavelet packet decomposition features: max.energy, position, spread and width.
#' @export
#' @name wpd_features
#' @details Measures wavelet packet decomposition features. STILL UNDER DEVELOPMENT. USE IT UNDER YOUR OWN RISK.
#' @seealso \code{\link{mfcc_stats}}, \code{\link{mfcc_stats}}
#' @examples
#' {
#' data(list = c("Phae.long1", "Phae.long2", "lbh_selec_table"))
#' writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
#' writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"))
#'
#' # not normalize
#' wpd_features(lbh_selec_table[1:5, ], threshold2 = 0.3, nor = FALSE, path = tempdir())
#' }
#'
#' @references {
#' Araya-Salas, M., & Smith-Vidaurre, G. (2017). warbleR: An R package to streamline analysis of animal acoustic signals. Methods in Ecology and Evolution, 8(2), 184-191.
#'
#' Selin A., J. Turunen, and J. T. Tanttu, 2007. Wavelets in recognition of bird sounds. EURASIP Journal on Advances in Signal Processing.
#' }
#' @author Marcelo Araya-Salas (\email{marcelo.araya@@ucr.ac.cr})
# last modification on oct-7-2019 (MAS)
wpd_features <- function(X, normalize = TRUE, threshold1 = 6, threshold2 = 0.5, path = NULL, pb = TRUE, parallel = 1) {
# error message if wavethresh is not installed
if (!requireNamespace("wavethresh", quietly = TRUE)) {
stop2("must install 'wavethresh' to use this function")
}
#### set arguments from options
# get function arguments
argms <- methods::formalArgs(wpd_features)
# get warbleR options
opt.argms <- if (!is.null(getOption("warbleR"))) getOption("warbleR") else SILLYNAME <- 0
# remove options not as default in call and not in function arguments
opt.argms <- opt.argms[!sapply(opt.argms, is.null) & names(opt.argms) %in% argms]
# get arguments set in the call
call.argms <- as.list(base::match.call())[-1]
# remove arguments in options that are in call
opt.argms <- opt.argms[!names(opt.argms) %in% names(call.argms)]
# set options left
if (length(opt.argms) > 0) {
for (q in seq_len(length(opt.argms))) {
assign(names(opt.argms)[q], opt.argms[[q]])
}
}
# check path to working directory
if (is.null(path)) {
path <- getwd()
} else if (!dir.exists(path)) {
stop2("'path' provided does not exist")
} else {
path <- normalizePath(path)
}
# if X is not a data frame
if (!any(is.data.frame(X), is_selection_table(X), is_extended_selection_table(X))) stop2("X is not of a class 'data.frame', 'selection_table' or 'extended_selection_table'")
if (!all(c(
"sound.files", "selec",
"start", "end"
) %in% colnames(X))) {
stop2(paste(paste(c("sound.files", "selec", "start", "end")[!(c(
"sound.files", "selec",
"start", "end"
) %in% colnames(X))], collapse = ", "), "column(s) not found in data frame"))
}
# if there are NAs in start or end stop
if (any(is.na(c(X$end, X$start)))) stop2("NAs found in start and/or end")
# if end or start are not numeric stop
if (any(!is(X$end, "numeric"), !is(X$start, "numeric"))) stop2("'start' and 'end' must be numeric")
# if any start higher than end stop
if (any(X$end - X$start <= 0)) stop2(paste("Start is higher than or equal to end in", length(which(X$end - X$start <= 0)), "case(s)"))
# if any selections longer than 20 secs warning
if (any(X$end - X$start > 20)) warning2(paste(length(which(X$end - X$start > 20)), "selection(s) longer than 20 sec"))
# return warning if not all sound files were found
if (!is_extended_selection_table(X)) {
fs <- list.files(path = path, ignore.case = TRUE)
if (length(unique(X$sound.files[(X$sound.files %in% fs)])) != length(unique(X$sound.files))) {
message2(paste(
length(unique(X$sound.files)) - length(unique(X$sound.files[(X$sound.files %in% fs)])),
"sound file(s) not found"
))
}
# count number of sound files in working directory and if 0 stop
d <- which(X$sound.files %in% fs)
if (length(d) == 0) {
stop2("The sound files are not in the working directory")
} else {
X <- X[d, ]
}
}
# set clusters for windows OS
if (Sys.info()[1] == "Windows" & parallel > 1) {
cl <- parallel::makePSOCKcluster(getOption("cl.cores", parallel))
} else {
cl <- parallel
}
# run loop apply function
wdps <- pblapply_wrblr_int(pbar = pb, X = 1:nrow(X), cl = cl, FUN = function(i) {
# read rec segment
r <- warbleR::read_sound_file(X = X, path = path, index = i)
# run internal warbleR function to measure parameters
ftrs <- wpd_feature_wrblr_int(wave = r, normalize = normalize, thr1 = threshold1, thr2 = threshold2)
# return low and high freq
return(data.frame(sound.files = X$sound.files[i], selec = X$selec[i], t(ftrs)))
})
out <- do.call(rbind, wdps)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/warbleR/R/wpd_features.R
|
#' Maps of 'Xeno-Canto' recordings by species
#'
#' \code{map_xc} creates maps to visualize the geographic spread of 'Xeno-Canto'
#' recordings.
#' @usage map_xc(X, img = TRUE, it = "jpeg", res = 100, labels = FALSE,
#' path = NULL, leaflet.map = FALSE,
#' leaflet.cluster = FALSE)
#' @param X Data frame output from \code{\link{query_xc}}.
#' @param img A logical argument specifying whether an image file of each species
#' map should be returned, default is \code{TRUE}.
#' @param it A character vector of length 1 giving the image type to be used. Currently only
#' "tiff" and "jpeg" are admitted. Default is "jpeg".
#' @param res Numeric argument of length 1. Controls image resolution.
#' Default is 100 (faster) although 300 - 400 is recommended for publication/
#' presentation quality.
#' @param labels A logical argument defining whether dots depicting recording locations are labeled.
#' If \code{TRUE} then the Recording_ID is used as label.
#' @param path Character string with the directory path where the image files will be saved.
#' If \code{NULL} (default) then the current working directory is used.
#' Ignored if \code{img = FALSE}.
#' @param leaflet.map Logical to control whether the package 'leaflet' is used for displaying the maps. 'leaflet' maps are interactive and display information about recordings and links to the Xeno-Canto website. If \code{TRUE} a single map is displayed regardless of the number of species and all other image related arguments are ignored. Default is \code{FALSE}. The hovering label shows the species scientific name (or the subspecies if only 1 species is present in 'X'). Note that colors will be recycled if more after 18 species (or subspecies).
#' @param leaflet.cluster Logical to control if icons are clustered by locality (as in Xeno-Canto maps). Default is \code{FALSE}.
#' @return A map of 'Xeno-Canto' recordings per species (image file), or a faceted
#' plot of species map(s) in the active graphic device.
#' @export
#' @name map_xc
#' @details This function creates maps for visualizing the geographic spread of recordings from the open-access
#' online repository \href{https://www.xeno-canto.org/}{Xeno-Canto}. The function takes the output of
#' \code{\link{query_xc}} as input. Maps can be displayed in the graphic device (or Viewer if 'leaflet.map = TRUE') or saved as images in the
#' working directory. Note that only recordings with geographic coordinates are displayed.
#' @examples
#' \dontrun{
#' # search in xeno-canto
#' X <- query_xc("Phaethornis anthophilus", download = FALSE)
#'
#' # create image in R graphic device
#' map_xc(X, img = FALSE)
#'
#' # create leaflet map
#' map_xc(X, leaflet.map = TRUE)
#' }
#'
#' @references {
#' Araya-Salas, M., & Smith-Vidaurre, G. (2017). warbleR: An R package to streamline analysis of animal acoustic signals. Methods in Ecology and Evolution, 8(2), 184-191.
#' }
#' @author Marcelo Araya-Salas (\email{marcelo.araya@@ucr.ac.cr}) and Grace Smith Vidaurre
map_xc <- function(X, img = TRUE, it = "jpeg", res = 100, labels = FALSE,
path = NULL, leaflet.map = FALSE,
leaflet.cluster = FALSE) {
# error message if maps is not installed
if (!requireNamespace("maps", quietly = TRUE)) {
stop2("must install 'maps' to use this function")
}
# error message if leaflet is not installed
if (!requireNamespace("leaflet", quietly = TRUE) & leaflet.map) {
stop2("must install 'leaflet' to use leaflet style maps (when 'leaflet.map = TRUE')")
}
#### set arguments from options
# get function arguments
argms <- methods::formalArgs(map_xc)
# get warbleR options
opt.argms <- if (!is.null(getOption("warbleR"))) getOption("warbleR") else SILLYNAME <- 0
# remove options not as default in call and not in function arguments
opt.argms <- opt.argms[!sapply(opt.argms, is.null) & names(opt.argms) %in% argms]
# get arguments set in the call
call.argms <- as.list(base::match.call())[-1]
# remove arguments in options that are in call
opt.argms <- opt.argms[!names(opt.argms) %in% names(call.argms)]
# set options left
if (length(opt.argms) > 0) {
for (q in seq_len(length(opt.argms))) {
assign(names(opt.argms)[q], opt.argms[[q]])
}
}
# check path if not provided set to working directory
if (is.null(path)) path <- getwd() else if (!dir.exists(path)) stop2("'path' provided does not exist")
# stop if X is not a data frame
if (!is.data.frame(X)) stop2("X is not a data frame")
# make species column
X$species <- paste(X$Genus, X$Specific_epithet)
# make lat lon numeric and remove rows with no coords
X$Latitude <- as.numeric(as.character(X$Latitude))
X$Longitude <- as.numeric(as.character(X$Longitude))
X <- X[!is.na(X$Latitude) & !is.na(X$Longitude), , drop = FALSE]
# stop if no rows left
if (nrow(X) == 0) stop2("not a single with observation has coordinates")
# if no leatfet map
if (!leaflet.map) {
# if it argument is not "jpeg" or "tiff"
if (!any(it == "jpeg", it == "tiff")) stop2(paste("Image type", it, "not allowed"))
# get species names (common name)
spn <- length(unique(X$English_name))
# reset graphic device
try(dev.off(), silent = TRUE)
# Set threshold for maximum number of panels per plot device
if (spn <= 16) mat <- par(mfrow = c(ceiling(sqrt(spn)), round(sqrt(spn), 0))) else par(mfrow = c(4, 4))
par(mar = rep(0, 4))
# Create a map per species, with the recordings plotted over each map
for (i in sort(unique(X$species))) {
y <- X[X$species == i, ]
if (all(length(y$Latitude) > 0, length(y$Longitude) > 0)) {
if (abs(max(y$Longitude) - min(y$Longitude)) < 38) buf <- 12 else buf <- 5
if (img) {
prop <- abs((min(y$Longitude) - buf) - (max(y$Longitude) + buf)) / abs((min(y$Latitude) - buf) - (max(y$Latitude) + buf)) * 1.15
img_wrlbr_int(
filename = paste("Map of ", i, " recordings", it, sep = ""),
width = 480 * prop, path = path
)
# change margins
# par(mar = rep(2.5,4))
# add empty map
maps::map("world",
xlim = c(min(y$Longitude) - buf, max(y$Longitude) + buf), interior = FALSE,
ylim = c(min(y$Latitude) - buf, max(y$Latitude) + buf), fill = FALSE
)
# change background color
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4],
col =
cm.colors(10)[3]
)
# plot lat lon lines in background
abline(h = seq(-90, 90, 5), col = "white", lwd = 0.9)
abline(h = seq(-90, 90, 10), col = "white", lwd = 1.1)
abline(v = seq(-180, 180, 5), col = "white", lwd = 0.9)
abline(v = seq(-180, 180, 10), col = "white", lwd = 1.1)
# add map
maps::map("world",
xlim = c(min(y$Longitude) - buf, max(y$Longitude) + buf), add = TRUE,
ylim = c(min(y$Latitude) - buf, max(y$Latitude) + buf), fill = TRUE,
col = terrain.colors(10)[5], myborder = 0, interior = F, lty = 2
)
mtext("Longitude (DD)", side = 1, line = 2)
mtext("Latitude (DD)", side = 2, line = 2)
mtext(i, side = 3, line = 1, cex = 1.4, font = 4)
# add axes
maps::map.axes()
# add contour lines
maps::map("world",
xlim = c(min(y$Longitude) - buf, max(y$Longitude) + buf), interior = FALSE,
ylim = c(min(y$Latitude) - buf, max(y$Latitude) + buf), fill = FALSE, add = TRUE
)
# add points
points(y$Longitude, y$Latitude, pch = 21, cex = 1.8, col = "#E37222", bg = "#E37222")
# add labels
if (labels) {
text(y$Longitude, y$Latitude, labels = X$Recording_ID, cex = 0.7, pos = 4)
}
# add scale
maps::map.scale(ratio = FALSE, relwidth = 0.4)
dev.off()
} else {
# change margins
if (par()$mfrow[1] > 2) par(mfrow = c(2, 2))
if (par()$mfrow[1] > 1) u <- 0 else u <- 2
par(mar = rep(u, 4), mai = rep(0.2, 4))
# add empty map
maps::map("world",
xlim = c(min(y$Longitude) - buf, max(y$Longitude) + buf), interior = FALSE,
ylim = c(min(y$Latitude) - buf, max(y$Latitude) + buf), fill = FALSE
)
# change background color
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4],
col =
adjustcolor("#07889B", alpha.f = 0.2)
)
# plot lat lon lines in background
abline(h = seq(-90, 90, 5), col = "white", lwd = 0.9)
abline(h = seq(-90, 90, 10), col = "white", lwd = 1.1)
abline(v = seq(-180, 180, 5), col = "white", lwd = 0.9)
abline(v = seq(-180, 180, 10), col = "white", lwd = 1.1)
# add map
maps::map("world",
xlim = c(min(y$Longitude) - buf, max(y$Longitude) + buf), add = TRUE,
ylim = c(min(y$Latitude) - buf, max(y$Latitude) + buf), fill = TRUE, col = "white"
)
maps::map("world",
xlim = c(min(y$Longitude) - buf, max(y$Longitude) + buf), add = TRUE,
ylim = c(min(y$Latitude) - buf, max(y$Latitude) + buf), fill = TRUE,
col = adjustcolor("darkolivegreen2", alpha.f = 0.7), myborder = 0, interior = F, lty = 2
)
mtext("Longitude (DD)", side = 1, line = 2)
mtext("Latitude (DD)", side = 2, line = 2)
mtext(i, side = 3, line = 1, cex = 1, font = 4)
# add axes
maps::map.axes()
# add contour lines
maps::map("world",
xlim = c(min(y$Longitude) - buf, max(y$Longitude) + buf), interior = FALSE,
ylim = c(min(y$Latitude) - buf, max(y$Latitude) + buf), fill = FALSE, add = TRUE
)
# add points
points(y$Longitude, y$Latitude, pch = 21, cex = 1.3, bg = "white")
points(y$Longitude, y$Latitude, pch = 21, cex = 1.3, col = "gray3", bg = adjustcolor("#E37222", alpha.f = 0.7), lwd = 0.7)
# add labels
if (labels) {
text(y$Longitude, y$Latitude, labels = X$Recording_ID, cex = 0.7, pos = 4)
}
# add scale
maps::map.scale(ratio = FALSE, relwidth = 0.4)
}
}
}
} else { # if leaflet map
cols <- c("red", "darkred", "lightred", "orange", "beige", "green", "darkgreen", "lightgreen", "blue", "darkblue", "lightblue", "purple", "darkpurple", "pink", "cadetblue", "gray", "lightgray", "black")[c(c(1, 6, 12) + rep(1:6, each = 3), 1)]
# repeat many times so colors are "recycled"
cols <- rep(cols, 100)
# change NAs in subspecies
X$Subspecies <- as.character(X$Subspecies)
X$Subspecies[is.na(X$Subspecies) | X$Subspecies == ""] <- "not provided"
# if only one species use subspecies for color marker
if (length(unique((X$species))) == 1) {
# label pop up markers
X$labels <- X$Subspecies
X$labels[X$labels == "not provided"] <- "Subsp. not provided"
} else {
# labels for hovering
X$labels <- X$species
}
# color for marker
mrkcol <- cols[1:(length(unique(X$labels)))][as.numeric(as.factor(X$labels))]
mrkcol[X$labels == "Subsp. not provided"] <- "white"
# use ios icons with marker colors
icons <- leaflet::awesomeIcons(
icon = "ios-close",
iconColor = "black",
library = "ion",
markerColor = mrkcol
)
# make content for popup
content <- paste0("<b><a href='https://www.xeno-canto.org/", X$Recording_ID, "'>", paste0("XC", X$Recording_ID), "</a></b>", "<br/><i>", paste(X$Genus, X$Specific_epithet, sep = " "), "</i><br/> Subspecies: ", X$Subspecies, "<br/> Country: ", X$Country, "<br/> Locality: ", X$Locality, "<br/> Voc.type: ", X$Vocalization_type, "<br/> Recordist: ", X$Recordist, paste0("<b><a href='https://www.xeno-canto.org/", X$Recording_ID, "/download'>", "<br/>", "listen</a>"))
# make base map
leaf.map <- leaflet::leaflet(X)
# add tiles
leaf.map <- leaflet::addTiles(leaf.map)
# add markers
if (leaflet.cluster) {
leaf.map <- leaflet::addAwesomeMarkers(
map = leaf.map, ~Longitude, ~Latitude, icon = icons, label = ~labels, popup = content, data = X, clusterOptions = leaflet::markerClusterOptions(),
clusterId = "rec.cluster"
)
} else {
leaf.map <- leaflet::addAwesomeMarkers(map = leaf.map, ~Longitude, ~Latitude, icon = icons, label = ~labels, popup = content, data = X)
}
# add minimap view at bottom right
leaf.map <- leaflet::addMiniMap(leaf.map)
# add zoom-out button
leaf.map <- leaflet::addEasyButton(leaf.map, leaflet::easyButton(
icon = "fa-globe", title = "Zoom to full view",
onClick = leaflet::JS("function(btn, map){ map.setZoom(1); }")
))
if (leaflet.cluster) {
leaf.map <- leaflet::addEasyButton(leaf.map, leaflet::easyButton(
states = list(
leaflet::easyButtonState(
stateName = "unfrozen-markers",
icon = "ion-toggle",
title = "Freeze Clusters",
onClick = leaflet::JS("
function(btn, map) {
var clusterManager =
map.layerManager.getLayer('cluster', 'rec.cluster');
clusterManager.freezeAtZoom();
btn.state('frozen-markers');
}")
),
leaflet::easyButtonState(
stateName = "frozen-markers",
icon = "ion-toggle-filled",
title = "UnFreeze Clusters",
onClick = leaflet::JS("
function(btn, map) {
var clusterManager =
map.layerManager.getLayer('cluster', 'rec.cluster');
clusterManager.unfreeze();
btn.state('unfrozen-markers');
}")
)
)
))
}
# plot map
leaf.map
}
}
##############################################################################################################
#' alternative name for \code{\link{map_xc}}
#'
#' @keywords internal
#' @details see \code{\link{map_xc}} for documentation. \code{\link{xcmaps}} will be deprecated in future versions.
#' @export
xcmaps <- map_xc
|
/scratch/gouwar.j/cran-all/cranData/warbleR/R/xc_maps.R
|
# start up message for ggplot2
# use .onAttach for start up messages
# use .onLoad for custom options
# see https://github.com/hadley/r-pkgs/blob/master/r.rmd
# .onAttach <- function(libname, pkgname) {
# packageStartupmessage2("\nNOTE: functions have been renamed (run 'print(new_function_names)' to see new names). Both old and new names are available in this version \n Please see citation('warbleR') for use in publication")
# }
# set warbleR options
.onLoad <- function(libname, pkgname) {
opts <- list(
bp = NULL,
collevels = NULL,
flim = NULL,
it = NULL,
res = NULL,
osci = NULL,
pal = NULL,
parallel = NULL,
pb = TRUE,
wav.path = NULL,
wl = NULL,
wn = NULL
)
optsx <- getOption("warbleR")
if (!is.null(optsx)) {
for (i in intersect(names(opts), names(optsx))) {
opts[[i]] <- optsx[[i]]
}
for (i in setdiff(names(optsx), names(opts))) {
opts[[i]] <- optsx[[i]]
}
}
options("warbleR" = opts)
options("int_warbleR_steps" = c(current = 0, total = 0))
invisible(NULL)
}
.onUnload <- function(libpath) {
options("int_warbleR_steps" = NULL)
options("warbleR" = NULL)
# rm_new_names()
invisible(NULL)
}
|
/scratch/gouwar.j/cran-all/cranData/warbleR/R/zzz.R
|
## ----extn_sel_2, echo = FALSE, message = FALSE----------------------------------------------------
# load packages
library(warbleR)
library(knitr)
cf <- read.csv("function_descrip_table.csv", stringsAsFactors = FALSE)
data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4"))
writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"))
writeWave(Phae.long3, file.path(tempdir(), "Phae.long3.wav"))
writeWave(Phae.long4, file.path(tempdir(), "Phae.long4.wav"))
warbleR_options(wav.path = tempdir())
options(knitr.table.format = "html")
opts_chunk$set(comment = "")
opts_knit$set(root.dir = tempdir())
options(width = 100, max.print = 100)
## ----extn_sel_4.1, eval=FALSE---------------------------------------------------------------------
#
# data("lbh_selec_table")
#
# lbh_selec_table
## ----extn_sel_4.2, echo=FALSE, eval = TRUE--------------------------------------------------------
library(kableExtra)
kbl <- kable(lbh_selec_table, align = "c", row.names = F, format = "html")
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl <- scroll_box(kbl,
width = "740px",
box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
)
kbl
## ----extn_sel_4.32, eval = FALSE------------------------------------------------------------------
#
# data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4"))
# writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
# writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"))
# writeWave(Phae.long3, file.path(tempdir(), "Phae.long3.wav"))
# writeWave(Phae.long4, file.path(tempdir(), "Phae.long4.wav"))
#
# # parametros globales
# warbleR_options(wav.path = tempdir())
#
# st <- selection_table(X = lbh_selec_table, pb = FALSE)
#
# st
## ----eval = TRUE, echo = FALSE--------------------------------------------------------------------
st <- selection_table(X = lbh_selec_table, pb = FALSE)
## ----eval = TRUE, echo = FALSE--------------------------------------------------------------------
st
## -------------------------------------------------------------------------------------------------
class(st)
## ----extn_sel_4.3, eval = FALSE-------------------------------------------------------------------
#
# # global parameters
# warbleR_options(wav.path = tempdir())
#
#
# ext_st <- selection_table(
# X = lbh_selec_table, pb = FALSE,
# extended = TRUE, confirm.extended = FALSE
# )
## ----extn_sel_4.33, eval = TRUE, echo = FALSE-----------------------------------------------------
ext_st <- selection_table(
X = lbh_selec_table, pb = FALSE,
extended = TRUE, confirm.extended = FALSE
)
## ----extn_sel_5-----------------------------------------------------------------------------------
is_extended_selection_table(ext_st)
## ----extn_sel_6-----------------------------------------------------------------------------------
ext_st2 <- ext_st[1:2, ]
is_extended_selection_table(ext_st2)
## ----extn_sel_7-----------------------------------------------------------------------------------
## print
print(ext_st)
## ----extn_sel_7.1, eval=FALSE---------------------------------------------------------------------
#
# ext_st
## ----extn_sel_7/2, echo=FALSE---------------------------------------------------------------------
print(ext_st)
## ----extn_sel_8, eval = FALSE---------------------------------------------------------------------
#
# ext_st3 <- ext_st[1:5, ]
#
# ext_st4 <- ext_st[6:11, ]
#
# ext_st5 <- rbind(ext_st3, ext_st4)
#
# # print
# ext_st5
## ----extn_sel_8.1, echo=FALSE---------------------------------------------------------------------
ext_st3 <- ext_st[1:5, ]
ext_st4 <- ext_st[6:11, ]
ext_st5 <- rbind(ext_st3, ext_st4)
# print
print(ext_st5)
## ----extn_sel_8.2---------------------------------------------------------------------------------
# igual q el original
all.equal(ext_st, ext_st5)
## ----extn_sel_8.21--------------------------------------------------------------------------------
wv1 <- read_wave(X = ext_st, index = 3, from = 0, to = 0.37)
## ----extn_sel_8.22, out.width= 750----------------------------------------------------------------
class(wv1)
wv1
spectro(wv1, wl = 150, grid = FALSE, scale = FALSE, ovlp = 90)
## ----extn_sel_8.23, out.width= 750----------------------------------------------------------------
par(mfrow = c(3, 2), mar = rep(0, 4))
for (i in 1:6) {
wv <- read_wave(X = ext_st, index = i, from = 0.05, to = 0.32)
spectro(wv,
wl = 150, grid = FALSE, scale = FALSE, axisX = FALSE,
axisY = FALSE, ovlp = 90
)
}
## ----extn_sel_8.24--------------------------------------------------------------------------------
# create new data frame
Y <- data.frame(sound.files = ext_st$sound.files, site = "La Selva", lek = c(rep("SUR", 5), rep("CCL", 6)))
# combine
mrg_ext_st <- merge(ext_st, Y, by = "sound.files")
# check class
is_extended_selection_table(mrg_ext_st)
## ----extn_sel_8.25--------------------------------------------------------------------------------
# fix est
mrg_ext_st <- fix_extended_selection_table(X = mrg_ext_st, Y = ext_st)
# check class
is_extended_selection_table(mrg_ext_st)
## ----extn_sel_12.1, eval=FALSE--------------------------------------------------------------------
#
# # parametros espectrales
# sp <- spectro_analysis(ext_st)
#
# sp
## ----extn_sel_12.2, echo= FALSE, eval = FALSE-----------------------------------------------------
#
# sp <- spectro_analysis(ext_st)
#
# kbl <- kable(sp, align = "c", row.names = F, format = "html")
#
# kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
#
# kbl <- scroll_box(kbl,
# width = "740px",
# box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
# )
#
# kbl
## ----extn_sel_12.5, eval=FALSE--------------------------------------------------------------------
#
# snr <- sig2noise(ext_st, mar = 0.05)
#
# snr
## ----extn_sel_12.6, echo= FALSE, eval = FALSE-----------------------------------------------------
#
# snr <- sig2noise(ext_st, mar = 0.05)
#
# kbl <- kable(snr, align = "c", row.names = F, format = "html")
#
# kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
#
# kbl <- scroll_box(kbl,
# width = "740px",
# box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
# )
#
# kbl
## ----extn_sel_12.7, eval=FALSE--------------------------------------------------------------------
#
# dtw.dist <- freq_DTW(ext_st, img = FALSE)
#
# dtw.dist
## ----extn_sel_12.8, echo= FALSE, eval = FALSE-----------------------------------------------------
#
# dtw.dist <- freq_DTW(ext_st, img = FALSE)
#
# kbl <- kable(dtw.dist, align = "c", row.names = T, format = "html")
#
# kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
#
# kbl <- scroll_box(kbl,
# width = "740px",
# box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
# )
#
# kbl
## ----extn_sel_13, eval = FALSE--------------------------------------------------------------------
#
# # create long selection table
# lng.selec.table <- do.call(rbind, replicate(10, lbh_selec_table,
# simplify = FALSE
# ))
#
# # relabels selec
# lng.selec.table$selec <- 1:nrow(lng.selec.table)
#
# # create extended selection table
# lng_ext_st <- selection_table(
# X = lng.selec.table, pb = FALSE,
# extended = TRUE, confirm.extended = FALSE
# )
#
#
# # load packages
# library(microbenchmark)
# library(ggplot2)
#
# # check performance
# mbmrk.snr <- microbenchmark(extended = sig2noise(lng_ext_st,
# mar = 0.05
# ), regular = sig2noise(lng.selec.table,
# mar = 0.05
# ), times = 50)
#
# autoplot(mbmrk.snr) + ggtitle("sig2noise")
## -------------------------------------------------------------------------------------------------
data("Phae.long.est")
Phae.long.est
table(Phae.long.est$lek.song.type)
## ----eval = FALSE, echo = FALSE-------------------------------------------------------------------
#
# library(warbleR)
#
# wf <- ls("package:warbleR")
#
# wf <- wf[-c(2, 7, 8, 10, 12, 16, 17, 19, 20, 23, 24, 28, 31, 32, 33, 38, 42, 43, 44, 47, 50, 53, 59, 64, 66, 68, 68, 72, 74, 80, 81, 85, 90, 93, 94, 96)]
#
# df <- data.frame(funciones = wf, `Obtener-preparar grabaciones` = "", `Anotar` = "", `Medir` = "", `Revision` = "", `Inspeccion visual` = "", `Analisis estadistico` = "", `Otros` = "")
#
# df2 <- edit(df)
#
#
# df2$`organizar.anotaciones` <- ""
#
# names(df2) <- names(df2)[c(1:3, 9, 4:8)]
#
# df3 <- edit(df2)
#
# df4 <- df3
#
# df4[is.na(df4)] <- ""
#
# df4 <- df4[df4$Obtener.preparar.grabaciones != "borrar", ]
#
# names(df4) <- c("Funcion", "Obtener-preparar grabaciones", "Anotar", "Organizar anotaciones", "Medir estructura", "Verificar", "Inspeccion visual", "Analisis estadistico", "Otros")
#
# rownames(df4) <- 1:nrow(df4)
#
# df5 <- df4[order(df4$`Obtener-preparar grabaciones`, df4$Anotar, df4$`Organizar anotaciones`, df4$`Medir estructura`, df4$Verificar, df4$`Inspeccion visual`, df4$`Analisis estadistico`, df4$Otros, decreasing = TRUE), ]
#
# df4 <- df4[c(5, 8, 18, 29, 34, 35, 37, 38, 39, 55, 56, 26, 1, 19, 40, 46, 4, 11, 16, 17, 24, 25, 32, 41, 45, 7, 12, 13, 14, 15, 23, 27, 30, 42, 47, 48, 57, 2, 3, 28, 44, 50, 51, 52, 58, 9, 10, 21, 22, 59, 6, 20, 31, 33, 36, 43, 49, 53, 54), ]
#
# # write.csv(df4, "cuadro de funciones warbleR.csv", row.names = FALSE)
## ----echo = FALSE, eval = TRUE--------------------------------------------------------------------
library(kableExtra)
names(cf) <- gsub("\\.", " ", names(cf))
cf2 <- cf[cf$`Obtener preparar grabaciones` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
## ----echo = FALSE, eval = TRUE--------------------------------------------------------------------
cf2 <- cf[cf$Anotar == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
## ----echo = FALSE, eval = TRUE--------------------------------------------------------------------
cf2 <- cf[cf$`Organizar anotaciones` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
## ----echo = FALSE, eval = TRUE--------------------------------------------------------------------
cf2 <- cf[cf$`Medir estructura` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
## ----echo = FALSE, eval = TRUE--------------------------------------------------------------------
cf2 <- cf[cf$Verificar == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
## ----echo = FALSE, eval = TRUE--------------------------------------------------------------------
cf2 <- cf[cf$`Inspeccion visual` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
## ----echo = FALSE, eval = TRUE--------------------------------------------------------------------
cf2 <- cf[cf$`Analisis estadistico` == "x" | cf$Otros == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
## ----session info, echo=F-------------------------------------------------------------------------
sessionInfo()
|
/scratch/gouwar.j/cran-all/cranData/warbleR/inst/doc/Intro_to_warbleR.R
|
---
title: <font size="7"><b>Introduction to warbleR</b></font>
pagetitle: Introduction to warbleR
author:
- <a href="https://marce10.github.io">Marcelo Araya-Salas, PhD</a> & <a href="https://smith-vidaurre.com/">Grace Smith-Vidaurre</a>
date: "`r Sys.Date()`"
output:
rmarkdown::html_document:
self_contained: yes
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: true
vignette: >
%\VignetteIndexEntry{1. Introduction to warbleR}
%\usepackage[utf8]{inputenc}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
<!-- <script> -->
<!-- $(document).ready(function() { -->
<!-- $head = $('#header'); -->
<!-- $head.prepend('<img src=\"logo.png\"/>') -->
<!-- }); -->
<!-- </script> -->
<!-- -->
```{css, echo = FALSE}
div#header h1.title, div#header h3.subtitle, div#header h4.author, div#header h4.date {
text-align: center
}
```
<img src="warbleR_sticker.png" alt="warbleR logo" align="right" width="25%" height="25%">
The [warbleR](https://cran.r-project.org/package=warbleR) package is intended to facilitate the analysis of the structure of the animal acoustic signals in R. Users can enter their own data into a workflow that facilitates spectrographic visualization and measurement of acoustic parameters **warbleR** makes use of the fundamental sound analysis tools of the **seewave** package, and offers new tools for acoustic structure analysis. These tools are available for batch analysis of acoustic signals.
<font size = "3">The main features of the package are:
- The use of loops to apply tasks through acoustic signals referenced in a selection table
- The production of image files with spectrograms that let users organize data and verify acoustic analyzes </font>
<center><img src = "loop_warbleR_images_optim.gif" alt = "warbleR image loop" width = "500"></center>
The package offers functions for:
- Browse and download recordings of [Xeno-Canto](https://www.xeno-canto.org/)
- Explore, organize and manipulate multiple sound files
- Detect signals automatically (in frequency and time)
- Create spectrograms of complete recordings or individual signals
- Run different measures of acoustic signal structure
- Evaluate the performance of measurement methods
- Catalog signals
- Characterize different structural levels in acoustic signals
- Statistical analysis of duet coordination
- Consolidate databases and annotation tables
Most of the functions allow the parallelization of tasks, which distributes the tasks among several cores to improve computational efficiency. Tools to evaluate the performance of the analysis at each step are also available. All these tools are provided in a standardized workflow for the analysis of the signal structure, making them accessible to a wide range of users, including those without much knowledge of R. **warbleR** is a young package (officially published in 2017) currently in a maturation stage.
## Selection tables
These objects are created with the `selection_table()` function. The function takes data frames containing selection data (name of the sound file, selection, start, end ...), verifies if the information is consistent (see the function `checksels()` for details) and saves the 'diagnostic' metadata as an attribute. The selection tables are basically data frames in which the information contained has been corroborated so it can be read by other **warbleR** functions. The selection tables must contain (at least) the following columns:
1. sound files (sound.files)
1. selection (select)
1. start
1. end
The sample data "lbh_selec_table" contains these columns:
```{r extn_sel_2, echo = FALSE, message = FALSE}
# load packages
library(warbleR)
library(knitr)
cf <- read.csv("function_descrip_table.csv", stringsAsFactors = FALSE)
data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4"))
writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"))
writeWave(Phae.long3, file.path(tempdir(), "Phae.long3.wav"))
writeWave(Phae.long4, file.path(tempdir(), "Phae.long4.wav"))
warbleR_options(wav.path = tempdir())
options(knitr.table.format = "html")
opts_chunk$set(comment = "")
opts_knit$set(root.dir = tempdir())
options(width = 100, max.print = 100)
```
```{r extn_sel_4.1, eval=FALSE}
data("lbh_selec_table")
lbh_selec_table
```
```{r extn_sel_4.2, echo=FALSE, eval = TRUE}
library(kableExtra)
kbl <- kable(lbh_selec_table, align = "c", row.names = F, format = "html")
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl <- scroll_box(kbl,
width = "740px",
box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
)
kbl
```
... and can be converted to the *selection_table* format like this (after saving the corresponding sound files):
```{r extn_sel_4.32, eval = FALSE}
data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4"))
writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"))
writeWave(Phae.long3, file.path(tempdir(), "Phae.long3.wav"))
writeWave(Phae.long4, file.path(tempdir(), "Phae.long4.wav"))
# parametros globales
warbleR_options(wav.path = tempdir())
st <- selection_table(X = lbh_selec_table, pb = FALSE)
st
```
```{r, eval = TRUE, echo = FALSE}
st <- selection_table(X = lbh_selec_table, pb = FALSE)
```
```{r, eval = TRUE, echo = FALSE}
st
```
Note that the path to the sound files has been provided. This is necessary in order to verify that the data provided conforms to the characteristics of the audio files.
Selection tables have their own class in R:
```{r}
class(st)
```
### Extended selection tables
When the `extended = TRUE` argument the function generates an object of the *extended_selection_table* class that also contains a list of 'wave' objects corresponding to each of the selections in the data. Therefore, the function **transforms the selection table into self-contained objects** since the original sound files are no longer needed to perform most of the acoustic analysis in **warbleR**. This can greatly facilitate the storage and exchange of (bio)acoustic data. In addition, it also speeds up analysis, since it is not necessary to read the sound files every time the data is analyzed.
Now, as mentioned earlier, you need the `selection_table()` function to create an extended selection table. You must also set the argument `extended = TRUE` (otherwise, the class would be a selection table). The following code converts the sample data into an extended selection table:
```{r extn_sel_4.3, eval = FALSE}
# global parameters
warbleR_options(wav.path = tempdir())
ext_st <- selection_table(
X = lbh_selec_table, pb = FALSE,
extended = TRUE, confirm.extended = FALSE
)
```
```{r extn_sel_4.33, eval = TRUE, echo = FALSE}
ext_st <- selection_table(
X = lbh_selec_table, pb = FALSE,
extended = TRUE, confirm.extended = FALSE
)
```
And that is. Now the acoustic data and the selection data (as well as the additional metadata) are all together in a single R object.
### Handling extended selection tables
Several functions can be used to deal with objects of this class. You can test if the object belongs to the *extended_selection_table*:
```{r extn_sel_5}
is_extended_selection_table(ext_st)
```
You can subset the selection in the same way that any other data frame and it will still keep its attributes:
```{r extn_sel_6}
ext_st2 <- ext_st[1:2, ]
is_extended_selection_table(ext_st2)
```
There is also a generic version of `print()` for this class of objects:
```{r extn_sel_7}
## print
print(ext_st)
```
... which is equivalent to:
```{r extn_sel_7.1, eval=FALSE}
ext_st
```
```{r extn_sel_7/2, echo=FALSE}
print(ext_st)
```
You can also join them in rows. Here the original *extended_selection_table* is divided into 2 and bound again using `rbind()`:
```{r extn_sel_8, eval = FALSE}
ext_st3 <- ext_st[1:5, ]
ext_st4 <- ext_st[6:11, ]
ext_st5 <- rbind(ext_st3, ext_st4)
# print
ext_st5
```
```{r extn_sel_8.1, echo=FALSE}
ext_st3 <- ext_st[1:5, ]
ext_st4 <- ext_st[6:11, ]
ext_st5 <- rbind(ext_st3, ext_st4)
# print
print(ext_st5)
```
```{r extn_sel_8.2}
# igual q el original
all.equal(ext_st, ext_st5)
```
The 'wave' objects can be read individually using `read_wave()`, a wrapper for the `readWave()` function of **tuneR**, which can handle extended selection tables:
```{r extn_sel_8.21}
wv1 <- read_wave(X = ext_st, index = 3, from = 0, to = 0.37)
```
These are regular 'wave' objects:
```{r extn_sel_8.22, out.width= 750}
class(wv1)
wv1
spectro(wv1, wl = 150, grid = FALSE, scale = FALSE, ovlp = 90)
```
```{r extn_sel_8.23, out.width= 750}
par(mfrow = c(3, 2), mar = rep(0, 4))
for (i in 1:6) {
wv <- read_wave(X = ext_st, index = i, from = 0.05, to = 0.32)
spectro(wv,
wl = 150, grid = FALSE, scale = FALSE, axisX = FALSE,
axisY = FALSE, ovlp = 90
)
}
```
The `read_wave()` function requires the selection table, as well as the row index (i.e. the row number) to be able to read the 'wave' objects. It can also read a regular 'wave' file if the path is provided.
Note that other functions that modify data frames are likely to delete the attributes in which the 'wave' objects and metadata are stored. For example, the merge and the extended selection box will remove its attributes:
```{r extn_sel_8.24}
# create new data frame
Y <- data.frame(sound.files = ext_st$sound.files, site = "La Selva", lek = c(rep("SUR", 5), rep("CCL", 6)))
# combine
mrg_ext_st <- merge(ext_st, Y, by = "sound.files")
# check class
is_extended_selection_table(mrg_ext_st)
```
In this case, we can use the `fix_extended_selection_table()` function to transfer the attributes of the original extended selection table:
```{r extn_sel_8.25}
# fix est
mrg_ext_st <- fix_extended_selection_table(X = mrg_ext_st, Y = ext_st)
# check class
is_extended_selection_table(mrg_ext_st)
```
This works as long as some of the original sound files are retained and no other selections are added.
### Analysis using extended selection tables
These objects can be used as input for most **warbleR** functions. Here are some examples of **warbleR** functions using *extended_selection_table*:
#### Spectral parameters
```{r extn_sel_12.1, eval=FALSE}
# parametros espectrales
sp <- spectro_analysis(ext_st)
sp
```
```{r, extn_sel_12.2, echo= FALSE, eval = FALSE}
sp <- spectro_analysis(ext_st)
kbl <- kable(sp, align = "c", row.names = F, format = "html")
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl <- scroll_box(kbl,
width = "740px",
box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
)
kbl
```
#### Signal-to-noise ratio
```{r extn_sel_12.5, eval=FALSE}
snr <- sig2noise(ext_st, mar = 0.05)
snr
```
```{r, extn_sel_12.6, echo= FALSE, eval = FALSE}
snr <- sig2noise(ext_st, mar = 0.05)
kbl <- kable(snr, align = "c", row.names = F, format = "html")
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl <- scroll_box(kbl,
width = "740px",
box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
)
kbl
```
#### Dynamic time warping (DTW)
```{r extn_sel_12.7, eval=FALSE}
dtw.dist <- freq_DTW(ext_st, img = FALSE)
dtw.dist
```
```{r, extn_sel_12.8, echo= FALSE, eval = FALSE}
dtw.dist <- freq_DTW(ext_st, img = FALSE)
kbl <- kable(dtw.dist, align = "c", row.names = T, format = "html")
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl <- scroll_box(kbl,
width = "740px",
box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
)
kbl
```
### Performance
The use of *extended_selection_table* objects can improve performance (in our case, measured as time). Here we use **microbenchmark** to compare the performance of `sig2noise()` and **ggplot2** to plot the results. First, a selection table with 1000 selections is created simply by repeating the sample data frame several times and then is converted to an extended selection table:
```{r extn_sel_13, eval = FALSE}
# create long selection table
lng.selec.table <- do.call(rbind, replicate(10, lbh_selec_table,
simplify = FALSE
))
# relabels selec
lng.selec.table$selec <- 1:nrow(lng.selec.table)
# create extended selection table
lng_ext_st <- selection_table(
X = lng.selec.table, pb = FALSE,
extended = TRUE, confirm.extended = FALSE
)
# load packages
library(microbenchmark)
library(ggplot2)
# check performance
mbmrk.snr <- microbenchmark(extended = sig2noise(lng_ext_st,
mar = 0.05
), regular = sig2noise(lng.selec.table,
mar = 0.05
), times = 50)
autoplot(mbmrk.snr) + ggtitle("sig2noise")
```
<center> </center>
The function runs much faster in the extended selection tables. Performance gain is likely to improve when longer recordings and data sets are used (that is, to compensate for computing overhead).
### Sharing acoustic data
This new object class allows to share complete data sets, including acoustic data. For example, the **NatureSounds** package contains an extended selection table with long-billed hermit hummingbirds vocalizations from 10 different song types:
```{r}
data("Phae.long.est")
Phae.long.est
table(Phae.long.est$lek.song.type)
```
The ability to compress large data sets and the ease of performing analyzes that require a single R object can simplify the exchange of data and the reproducibility of bioacoustic analyzes.
## **warbleR** functions and the workflow of analysis in bioacoustics
Bioacoustic analyzes generally follow a specific processing sequence and analysis. This sequence can be represented schematically like this:
```{r, eval = FALSE, echo = FALSE}
library(warbleR)
wf <- ls("package:warbleR")
wf <- wf[-c(2, 7, 8, 10, 12, 16, 17, 19, 20, 23, 24, 28, 31, 32, 33, 38, 42, 43, 44, 47, 50, 53, 59, 64, 66, 68, 68, 72, 74, 80, 81, 85, 90, 93, 94, 96)]
df <- data.frame(funciones = wf, `Obtener-preparar grabaciones` = "", `Anotar` = "", `Medir` = "", `Revision` = "", `Inspeccion visual` = "", `Analisis estadistico` = "", `Otros` = "")
df2 <- edit(df)
df2$`organizar.anotaciones` <- ""
names(df2) <- names(df2)[c(1:3, 9, 4:8)]
df3 <- edit(df2)
df4 <- df3
df4[is.na(df4)] <- ""
df4 <- df4[df4$Obtener.preparar.grabaciones != "borrar", ]
names(df4) <- c("Funcion", "Obtener-preparar grabaciones", "Anotar", "Organizar anotaciones", "Medir estructura", "Verificar", "Inspeccion visual", "Analisis estadistico", "Otros")
rownames(df4) <- 1:nrow(df4)
df5 <- df4[order(df4$`Obtener-preparar grabaciones`, df4$Anotar, df4$`Organizar anotaciones`, df4$`Medir estructura`, df4$Verificar, df4$`Inspeccion visual`, df4$`Analisis estadistico`, df4$Otros, decreasing = TRUE), ]
df4 <- df4[c(5, 8, 18, 29, 34, 35, 37, 38, 39, 55, 56, 26, 1, 19, 40, 46, 4, 11, 16, 17, 24, 25, 32, 41, 45, 7, 12, 13, 14, 15, 23, 27, 30, 42, 47, 48, 57, 2, 3, 28, 44, 50, 51, 52, 58, 9, 10, 21, 22, 59, 6, 20, 31, 33, 36, 43, 49, 53, 54), ]
# write.csv(df4, "cuadro de funciones warbleR.csv", row.names = FALSE)
```
<img src="analysis-workflow.png" alt="analysis workflow">
We can group **warbleR** functions according to the bioacoustic analysis stages.
### Get and prepare recordings
The `query_xc()` function allows you to search and download sounds from the free access database [Xeno-Canto](https://www.xeno-canto.org/). You can also convert .mp3 files to .wav, change the sampling rate of the files and correct corrupt files, among other functions.
```{r, echo = FALSE, eval = TRUE}
library(kableExtra)
names(cf) <- gsub("\\.", " ", names(cf))
cf2 <- cf[cf$`Obtener preparar grabaciones` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Annotating sound
It is recommended to make annotations in other programs and then import them into R (for example in Raven and import them with the **Rraven** package). However, **warbleR** offers some functions to facilitate manual or automatic annotation of sound files, as well as the subsequent manipulation:
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$Anotar == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Organize annotations
The annotations (or selection tables) can be manipulated and refined with a variety of functions. Selection tables can also be converted into the compact format *extended selection tables*:
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$`Organizar anotaciones` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Measure acoustic signal structure
Most **warbleR** functions are dedicated to quantifying the structure of acoustic signals listed in selection tables using batch processing. For this, 4 main measurement methods are offered:
1. Spectrographic parameters
1. Cross correlation
1. Dynamic time warping (DTW)
1. Statistical descriptors of cepstral coefficients
Most functions gravitate around these methods, or variations of these methods:
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$`Medir estructura` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Verify annotations
Functions are provided to detect inconsistencies in the selection tables or modify selection tables. The package also offers several functions to generate spectrograms showing the annotations, which can be organized by annotation categories. This allows you to verify if the annotations match the previously defined categories, which is particularly useful if the annotations were automatically generated.
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$Verificar == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Visually inspection of annotations and measurements
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$`Inspeccion visual` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Additional functions
Finally, **warbleR** offers functions to simplify the use of extended selection tables, organize large numbers of images with spectrograms and generate elaborated signal visualizations:
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$`Analisis estadistico` == "x" | cf$Otros == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
---
## References
1. Araya-Salas M, G Smith-Vidaurre & M Webster. 2017. Assessing the effect of sound file compression and background noise on measures of acoustic signal structure. Bioacoustics 4622, 1-17
1. Araya-Salas M, Smith-Vidaurre G (2017) warbleR: An R package to streamline analysis of animal acoustic signals. Methods Ecol Evol 8:184-191.
---
<font size="4">Session information</font>
```{r session info, echo=F}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/warbleR/inst/doc/Intro_to_warbleR.Rmd
|
## ----echo = FALSE, message = FALSE------------------------------------------------------------------------------------------------------------------
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste("package:", names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = TRUE))
# load packages
X <- c("warbleR", "knitr")
invisible(lapply(X, library, character.only = TRUE))
# library(kableExtra)
options(knitr.table.format = "html")
# opts_chunk$set(comment = "")
opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
# from https://stackoverflow.com/questions/28961431/computationally-heavy-r-vignettes, so that vignettes will be built upon installation, but not executed during R CMD check (which is contributing to the /doc being too large)
is_check <- ("CheckExEnv" %in% search()) || any(c(
"_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_"
) %in% names(Sys.getenv()))
knitr::opts_chunk$set(eval = !is_check, comment = "")
# for vignette checking and image file output
# setwd("~/Desktop/R/warbleR_example2/")
# website to fix gifs
# https://ezgif.com/optimize
vgn.path <- getwd()
# read data example for Rraven code
sels <- read.csv("Raven_sels.csv", stringsAsFactors = FALSE)
## ----echo = TRUE, eval=FALSE------------------------------------------------------------------------------------------------------------------------
#
# ### Install packages from CRAN
# # Note that if you install from CRAN, then don't run the code to install from GitHub below, and vice versa
# install.packages("warbleR")
# install.packages("Rraven")
#
# ### Alternatively, install warbleR and Rraven from GitHub repositories, which contain the latest updates
# # Run this ONLY if devtools is not already installed
# install.packages("devtools")
#
# # Load devtools to access the install_github function
# library(devtools)
#
# # Install packages from GitHub
# # install_github("maRce10/warbleR")
# # install_github("maRce10/Rraven")
# # install_github("maRce10/NatureSounds")
#
# # Load warbleR and Rraven into your global environment
# X <- c("warbleR", "Rraven")
# invisible(lapply(X, library, character.only = TRUE))
## ----echo = TRUE, eval=FALSE------------------------------------------------------------------------------------------------------------------------
#
# # The package must be loaded in your working environment
# ls("package:warbleR")
## ----echo = TRUE, eval=FALSE------------------------------------------------------------------------------------------------------------------------
#
# # Create a new directory and set your working directory (assuming that you are in your /home/username directory)
# dir.create(file.path(getwd(), "warbleR_example"))
# setwd(file.path(getwd(), "warbleR_example"))
#
# # Check your location
# getwd()
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # Load Raven example selection tables
# data("selection_files")
#
# # Write out Raven example selection tables as physical files
# out <- lapply(1:2, function(x) {
# writeLines(selection_files[[x]], con = names(selection_files)[x])
# })
#
# # Write example sound files out as physical .wav files
# data(list = c("Phae.long1", "Phae.long2"))
#
# writeWave(Phae.long1, "Phae.long1.wav")
# writeWave(Phae.long2, "Phae.long2.wav")
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # Import selections
# sels <- imp_raven(all.data = FALSE, freq.cols = FALSE, warbler.format = TRUE)
# str(sels)
#
# # Write out the imported selections as a .csv for later use
# write.csv(sels, "Raven_sels.csv", row.names = FALSE)
## ----echo=TRUE, eval=FALSE--------------------------------------------------------------------------------------------------------------------------
#
# sels <- selection_table(X = sels)
# str(sels)
# class(sels)
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # Query xeno-canto for all Phaethornis recordings (e.g., by genus)
# Phae <- query_xc(qword = "Phaethornis", download = FALSE)
#
# # Check out the structure of resulting the data frame
# str(Phae)
## ----eval = TRUE, echo = FALSE, message = FALSE-----------------------------------------------------------------------------------------------------
# Phae <- query_xc(qword = "Phaethornis", download = FALSE)
# write.csv(Phae, file = "~/Dropbox/warbleR/vignettes/Phae.XC.csv", row.names = FALSE)
Phae <- read.csv(file.path(vgn.path, "Phae.XC.csv"), stringsAsFactors = FALSE)
# Check out the structure of resulting the data frame
str(Phae)
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # Query xeno-canto for all Phaethornis longirostris recordings
# Phae.lon <- query_xc(qword = "Phaethornis longirostris", download = FALSE)
#
# # Check out the structure of resulting the data frame
# str(Phae.lon)
## ----eval = TRUE, echo = FALSE, message = FALSE-----------------------------------------------------------------------------------------------------
# Phae.lon <- query_xc(qword = "Phaethornis longirostris", download = FALSE)
# write.csv(Phae.lon, file = "~/Dropbox/warbleR/vignettes/Phae.lon.XC.csv", row.names = FALSE)
Phae.lon <- read.csv(file.path(vgn.path, "Phae.lon.XC.csv"), stringsAsFactors = FALSE)
# Check out the structure of resulting the data frame
str(Phae.lon)
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # Image type default is jpeg, but tiff files have better resolution
#
# # When the data frame contains multiple species, this will yield one map per species
# map_xc(X = Phae, img = TRUE, it = "tiff") # all species in the genus
# map_xc(X = Phae.lon, img = FALSE) # a single species
## ----eval=TRUE, echo=FALSE, message=FALSE-----------------------------------------------------------------------------------------------------------
map_xc(X = Phae.lon, img = FALSE)
## ----eval=TRUE, echo=TRUE---------------------------------------------------------------------------------------------------------------------------
# How many recordings are available for Phaethornis longirostris?
nrow(Phae.lon)
# How many signal types exist in the xeno-canto metadata?
unique(Phae.lon$Vocalization_type)
# How many recordings per signal type?
table(Phae.lon$Vocalization_type)
## ----eval=TRUE, echo=TRUE---------------------------------------------------------------------------------------------------------------------------
# Filter the metadata to select the signals we want to retain
# First by quality
Phae.lon <- Phae.lon[Phae.lon$Quality == "A", ]
nrow(Phae.lon)
# Then by signal type
Phae.lon.song <- Phae.lon[grep("song", Phae.lon$Vocalization_type, ignore.case = TRUE), ]
nrow(Phae.lon.song)
# Finally by locality
Phae.lon.LS <- Phae.lon.song[grep("La Selva Biological Station, Sarapiqui, Heredia", Phae.lon.song$Locality, ignore.case = FALSE), ]
# Check resulting data frame, 6 recordings remain
str(Phae.lon.LS)
## ----eval=TRUE, echo=TRUE---------------------------------------------------------------------------------------------------------------------------
# map in the RStudio graphics device (img = FALSE)
map_xc(Phae.lon.LS, img = FALSE)
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # Not working as of 01 Aug 2017
# # This copies the selected sound files to a dropbox folder so they can be shared
# # do not show this code
# fn <- with(Phae.lon.LS, paste(paste(Genus, Specific_epithet, Recording_ID, sep = "-"), ".wav", sep = " "))
# file.copy(from = file.path("/home/m/Documents/Biblioteca de cantos/Trochilidae/XC/wavs", fn), to = file.path("/home/m/Dropbox/Projects/warbleR package/vignette files", fn), overwrite = TRUE)
#
# wlist <- lapply(fn, function(x) downsample(readWave(file.path("/home/m/Dropbox/Projects/warbleR package/vignette files", x)), samp.rate = 22500))
#
# names(wlist) <- fn
#
# saveRDS(wlist, file = "/home/m/Dropbox/Sharing/warbleR/recs.RDS")
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # Download sound files
# query_xc(X = Phae.lon.LS)
#
# # Save the metadata object as a .csv file
# write.csv(Phae.lon.LS, "Phae_lon.LS.csv", row.names = FALSE)
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # Always check you're in the right directory beforehand
# # getwd()
#
# # here we are downsampling the original sampling rate of 44.1 kHz to speed up downstream analyses in the vignette series
# mp32wav(samp.rate = 22.05)
#
# # Use checkwavs to see if wav files can be read
# check_wavs()
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # Not working 01 Aug 2017
#
# ### If you were unable to convert _mp3_ to _wav_ format:
# # + download the file in [this link](https://www.dropbox.com/s/htpbxbdw8s4i23k/recs.RDS?dl=0) and put it in your working directory
# # + then run the following code:
#
#
# # recs <- readRDS(file = "recs.RDS")
# #
# # for(i in 1:length(recs))
# # writeWave(recs[[i]], filename = names(recs)[i])
## ----echo=TRUE, eval=FALSE, message=FALSE-----------------------------------------------------------------------------------------------------------
#
# # Make sure you are in the right working directory
# # Note that all the example sound files begin with the pattern "Phae.long"
# wavs <- list.files(pattern = "wav$")
# wavs
#
# rm <- wavs[grep("Phae.long", wavs)]
#
# file.remove(rm)
#
# # Check that the right wav files were removed
# # Only xeno-cant wav files should remain
# list.files(pattern = "wav$")
## ----echo=TRUE, eval=FALSE, message=FALSE-----------------------------------------------------------------------------------------------------------
#
# # For this example, set your working directory to an empty temporary directory
# setwd(tempdir())
#
# # Here we will simulate the problem of having files scattered in multiple directories
#
# # Load .wav file examples from the NatureSounds package
# data(list = c("Phae.long1", "Phae.long2", "Phae.long3"))
#
# # Create first folder inside the temporary directory and write new .wav files inside this new folder
# dir.create("folder1")
# writeWave(Phae.long1, file.path("folder1", "Phae_long1.wav"))
# writeWave(Phae.long2, file.path("folder1", "Phae_long2.wav"))
#
# # Create second folder inside the temporary directory and write new .wav files inside this second new folder
# dir.create("folder2")
# writeWave(Phae.long3, file.path("folder2", "Phae_long3.wav"))
#
# # Consolidate the scattered files into a single folder, and make a .csv file that contains metadata (location, old and new names in the case that files were renamed)
# invisible(consolidate(path = tempdir(), save.csv = TRUE))
#
# list.files(path = "./consolidated_folder")
#
# # set your working directory back to "/home/user/warbleR_example" for the rest of the vignette, or to whatever working directory you were using originally
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # Create a vector of all the recordings in the directory
# wavs <- list.files(pattern = "wav$")
#
# # Print this object to see all sound files
# # 6 sound files from xeno-canto
# wavs
#
# # Select a subset of recordings to explore full_spectrograms() arguments
# # Based on the list of wav files we created above
# sub <- wavs[c(1, 5)]
#
# # How long are these files? this will determine number of pages returned by full_spectrograms
# duration_wavs(sub)
#
# # ovlp = 10 to speed up function
# # tiff image files are better quality and are faster to produce
# full_spectrograms(flist = sub, ovlp = 10, it = "tiff")
#
# # We can zoom in on the frequency axis by changing flim,
# # the number of seconds per row, and number of rows
# full_spectrograms(flist = sub, flim = c(2, 10), sxrow = 6, rows = 15, ovlp = 10, it = "tiff")
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # Make long spectrograms for the xeno-canto sound files
# full_spectrograms(flim = c(2, 10), ovlp = 10, sxrow = 6, rows = 15, it = "jpeg", flist = fl)
#
# # Concatenate full_spectrograms image files into a single PDF per recording
# # full_spectrograms images must be jpegs to do this
# full_spectrograms2pdf(keep.img = FALSE, overwrite = TRUE)
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # make all page-size images 700 pxls width
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# ### Remove silence in sound files
#
# # The function below removes silent segments of sound files. This can help reduce file size, which can speed up functions.
#
# # giving error: Error in file.copy(from = wv, to = file.path(getwd(), "removed_silence_files", :
# # more 'from' files than 'to' files
#
# # here we will produce spectrograms of the silent gaps that were removed
# # perform this on only the longer xeno-canto recordings
# remove_silence(flist = wavs, min.sil.dur = 0.2, img = TRUE, it = "jpeg", flim = c(0, 12))
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # Select a subset of sound files
# # Reinitialize the wav object
# wavs <- list.files(pattern = ".wav$", ignore.case = TRUE)
#
# # Set a seed so we all have the same results
# set.seed(1)
# sub <- wavs[sample(1:length(wavs), 3)]
#
# # Run auto_detec() on subset of recordings
# # The data frame object output is printed to the console, we are not saving this in an object yet, since we are just playing around with argument settings
# # you can run this in parallel to speed up computation time
# auto_detec(flist = sub, bp = c(1, 10), threshold = 10, mindur = 0.05, maxdur = 0.5, envt = "abs", ssmooth = 300, ls = TRUE, res = 100, flim = c(1, 12), wl = 300, set = TRUE, sxrow = 6, rows = 15, redo = FALSE)
## ----eval=FALSE, echo = TRUE------------------------------------------------------------------------------------------------------------------------
#
# auto_detec(flist = sub, bp = c(2, 10), threshold = 20, mindur = 0.09, maxdur = 0.22, envt = "abs", ssmooth = 900, ls = TRUE, res = 100, flim = c(1, 12), wl = 300, set = TRUE, sxrow = 6, rows = 15, redo = TRUE, it = "tiff", img = TRUE, smadj = "end")
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# Phae.ad <- auto_detec(bp = c(2, 10), threshold = 20, mindur = 0.09, maxdur = 0.22, envt = "abs", ssmooth = 900, ls = TRUE, res = 100, flim = c(2, 10), wl = 300, set = TRUE, sxrow = 6, rows = 15, redo = TRUE, it = "tiff", img = TRUE, smadj = "end")
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# table(Phae.ad$sound.files)
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # A margin that's too large causes other signals to be included in the noise measurement
# # Re-initialize X as needed, for either auto_detec output
#
# # Try this with 10% of the selections first
# # Set a seed first, so we all have the same results
# set.seed(5)
#
# X <- Phae.ad[sample(1:nrow(Phae.ad), (nrow(Phae.ad) * 0.05)), ]
# nrow(X)
#
# snr_spectrograms(X = X, flim = c(2, 10), snrmar = 0.5, mar = 0.7, it = "jpeg")
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # This smaller margin is better
# snr_spectrograms(X = X, flim = c(2, 10), snrmar = 0.04, mar = 0.7, it = "jpeg")
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# Phae.snr <- sig2noise(X = Phae.ad[seq(1, nrow(Phae.ad), 2), ], mar = 0.04)
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# Phae.hisnr <- Phae.snr[ave(-Phae.snr$SNR, Phae.snr$sound.files, FUN = rank) <= 5, ]
#
# # save the selections as a physical file
# write.csv(Phae.hisnr, "Phae_hisnr.csv", row.names = FALSE)
#
# # Double check the number of selection per sound files
# # Only the xeno-canto sound files will have 5 selections, the other sound files started off with less than 5 selections
# table(Phae.hisnr$sound.files)
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# Phae.hisnr <- read.csv("Phae_hisnr.csv", header = TRUE)
# table(Phae.hisnr$sound.files)
|
/scratch/gouwar.j/cran-all/cranData/warbleR/inst/doc/warbleR_workflow_01.R
|
---
title: <font size="7"><b>warbleR: Import sound files and select signals</b></font>
pagetitle: Import sound files and select signals
author:
- <a href="https://marce10.github.io">Marcelo Araya-Salas, PhD</a> & <a href="https://smith-vidaurre.com/">Grace Smith-Vidaurre</a>
date: "`r Sys.Date()`"
output:
rmarkdown::html_document:
self_contained: yes
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{2. Import sound files and select signals}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
<!-- <script> -->
<!-- $(document).ready(function() { -->
<!-- $head = $('#header'); -->
<!-- $head.prepend('<img src=\"logo.png\"/>') -->
<!-- }); -->
<!-- </script> -->
<!-- -->
```{css, echo = FALSE}
div#header h1.title, div#header h3.subtitle, div#header h4.author, div#header h4.date {
text-align: center
}
```
## Bioacoustics in R with `warbleR`
<img src="warbleR_sticker.png" alt="warbleR logo" align="right" width="25%" height="25%">
Bioacoustics research encompasses a wide range of questions, study systems and methods, including the software used for analyses. The `warbleR` and `Rraven` packages leverage the flexibility of the `R` environment to offer a broad and accessible bioinformatics tool set. These packages fundamentally rely upon two types of data to perform bioacoustics analyses in R:
1. **Sound files:** Recordings in _wav_ or _mp3_ format, either from your own research or open-access databases like _xeno-canto_
2. **Selection tables:** Selection tables contain the temporal coordinates (start and end points) of selected acoustic signals within recordings
### Package repositories
These packages are both available on _CRAN_: [`warbleR`](https://cran.r-project.org/package=warbleR), [`Rraven`](https://cran.r-project.org/package=Rraven), as well as on _GitHub_: [`warbleR`](https://github.com/maRce10/warbleR), [`Rraven`](https://github.com/maRce10/Rraven). The GitHub repository will always contain the latest functions and updates. You can also check out an article in _Methods in Ecology and Evolution_ documenting the `warbleR` package <a href='#References'><sup>[1]</sup></a>.
We welcome all users to provide feedback, contribute updates or new functions and report bugs to warbleR's GitHub repository.
Please note that `warbleR` and `Rraven` use functions from the [`seewave`](https://cran.r-project.org/package=seewave), [`monitoR`](https://cran.r-project.org/package=monitoR), [`tuneR`](https://cran.r-project.org/package=tuneR) and [`dtw`](https://cran.r-project.org/package=dtw) packages internally. `warbleR` and `Rraven` have been designed to make bioacoustics analyses more accessible to `R` users, and such analyses would not be possible without the tools provided by the packages above. These packages should be given credit when using `warbleR` and `Rraven` by including citations in publications as appropriate (e.g. `citation("seewave")`).
### Parallel processing in `warbleR`
Parallel processing, or using multiple cores on your machine, can greatly speed up analyses. All iterative `warbleR` functions now have parallel processing for Linux, Mac and Windows operating systems. These functions also contain progress bars to visualize progress during normal or parallel processing. See <a href='#References'><sup>[1]</sup></a> for more details about improved running time using parallel processing.
## **Vignette introduction**
Below we present a case study of microgeographic vocal variation in long-billed hermit hummingbirds, _Phaethornis longirostris_. Variation at small geographic scales has been already described in this species <a href='#References'><sup>[2]</sup></a>. Our goal is to search for visible differences in song structure within a site, and then determine whether underlying differences in acoustic parameters are representative of spectrographic distinctiveness. In this vignette, we will demonstrate how to:
1. Prepare for bioacoustics analyses by downloading `warbleR` and `Rraven`
2. Use `Rraven` to import _Raven_ selection tables for your own recordings
3. Obtain recordings from the open-access database [_xeno-canto_](https://www.xeno-canto.org/)
4. Select signals using `warbleR` functions
This vignette can be run without an advanced understanding of `R`, as long as you know how to run code in your console. However, knowing more about basic `R` coding would be very helpful to modify the code for your research questions.
For more details about function arguments, input or output, read the documentation for the function in question (e.g. `?query_xc`).
## **Prepare for analyses**
### Install and load packages
First, we need to install and load `warbleR` and `Rraven`. You will need an `R` version \xf2\n 3.2.1 and `seewave` version \xf2\n 2.0.1. Also, users using `UNIX` machines (Linux or Mac operating systems), may need to install `fftw3`, `pkg-config` and `libsndfile` on their machines prior to installing `warbleR`. These external packages will need to be installed through a `UNIX` terminal. Installing these packages lies outside the scope of this vignette, but you can find more information on _Google_.
```{r, echo = FALSE, message = FALSE}
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste("package:", names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = TRUE))
# load packages
X <- c("warbleR", "knitr")
invisible(lapply(X, library, character.only = TRUE))
# library(kableExtra)
options(knitr.table.format = "html")
# opts_chunk$set(comment = "")
opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
# from https://stackoverflow.com/questions/28961431/computationally-heavy-r-vignettes, so that vignettes will be built upon installation, but not executed during R CMD check (which is contributing to the /doc being too large)
is_check <- ("CheckExEnv" %in% search()) || any(c(
"_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_"
) %in% names(Sys.getenv()))
knitr::opts_chunk$set(eval = !is_check, comment = "")
# for vignette checking and image file output
# setwd("~/Desktop/R/warbleR_example2/")
# website to fix gifs
# https://ezgif.com/optimize
vgn.path <- getwd()
# read data example for Rraven code
sels <- read.csv("Raven_sels.csv", stringsAsFactors = FALSE)
```
```{r, echo = TRUE, eval=FALSE}
### Install packages from CRAN
# Note that if you install from CRAN, then don't run the code to install from GitHub below, and vice versa
install.packages("warbleR")
install.packages("Rraven")
### Alternatively, install warbleR and Rraven from GitHub repositories, which contain the latest updates
# Run this ONLY if devtools is not already installed
install.packages("devtools")
# Load devtools to access the install_github function
library(devtools)
# Install packages from GitHub
# install_github("maRce10/warbleR")
# install_github("maRce10/Rraven")
# install_github("maRce10/NatureSounds")
# Load warbleR and Rraven into your global environment
X <- c("warbleR", "Rraven")
invisible(lapply(X, library, character.only = TRUE))
```
This vignette series will not always include all available `warbleR` functions, as existing functions are updated and new functions are added. To see all functions available in this package:
```{r, echo = TRUE, eval=FALSE}
# The package must be loaded in your working environment
ls("package:warbleR")
```
### Make a new directory and set your working directory
```{r, echo = TRUE, eval=FALSE}
# Create a new directory and set your working directory (assuming that you are in your /home/username directory)
dir.create(file.path(getwd(), "warbleR_example"))
setwd(file.path(getwd(), "warbleR_example"))
# Check your location
getwd()
```
## **Import selection tables**
`Rraven` is an interface between _Raven_ and `R` that allows you to import selection tables for your own recordings. This is very useful if you prefer to select signals in recordings outside of `R`. Once you have selection tables imported into `R` and the corresponding sound files in your working directory, you can move on to making spectrograms or performing analyses (see the next vignette in this series).
The sound files and selection tables loaded here correspond to male long-billed hermit hummingbird songs recorded at La Selva Biological Station in Costa Rica. Later, we will add to this data set by searching for more recordings on the _xeno-canto_ open-access database.
Check out the `Rraven` package documentation for more functions and information (although you will need _Raven_ or _Syrinx_ installed on your computer for some functions).
```{r, eval=FALSE, echo=TRUE}
# Load Raven example selection tables
data("selection_files")
# Write out Raven example selection tables as physical files
out <- lapply(1:2, function(x) {
writeLines(selection_files[[x]], con = names(selection_files)[x])
})
# Write example sound files out as physical .wav files
data(list = c("Phae.long1", "Phae.long2"))
writeWave(Phae.long1, "Phae.long1.wav")
writeWave(Phae.long2, "Phae.long2.wav")
```
```{r, eval=FALSE, echo=TRUE}
# Import selections
sels <- imp_raven(all.data = FALSE, freq.cols = FALSE, warbler.format = TRUE)
str(sels)
# Write out the imported selections as a .csv for later use
write.csv(sels, "Raven_sels.csv", row.names = FALSE)
```
### Make your data frame into an object of class `selection table`
Downstream `warbleR` functions require selection tables in order to run correctly. Use the function `selection_table` to convert your data frame into an object of class `selection_table`. In future versions of `warbleR`, all functions will require selection table objects of class `selection_table`.
```{r, echo=TRUE, eval=FALSE}
sels <- selection_table(X = sels)
str(sels)
class(sels)
```
## **Obtain metadata and recordings from [xeno-canto](https://www.xeno-canto.org/)**
The open-access [xeno-canto](https://www.xeno-canto.org/) database is an excellent source of sound files across avian species. You can query this database by a species or genus of interest. The function `query_xc` has two types of output:
1. **Metadata of recordings:** geographic coordinates, recording quality, recordist, type of signal, etc.
2. **Sound files:** Sound files in _mp3_ format are returned if the argument `download` is set to `TRUE`.
We recommend downloading metadata first from _xeno-canto_, as this data can be filtered in R to more efficiently download recordings (e.g. only those relevant to your question).
Here, we will query the _xeno-canto_ database to download more _Phaethornis longirostris_ sound files for our question of how male songs vary at a microgeographic scale.
```{r, eval=FALSE}
# Query xeno-canto for all Phaethornis recordings (e.g., by genus)
Phae <- query_xc(qword = "Phaethornis", download = FALSE)
# Check out the structure of resulting the data frame
str(Phae)
```
```{r, eval = TRUE, echo = FALSE, message = FALSE}
# Phae <- query_xc(qword = "Phaethornis", download = FALSE)
# write.csv(Phae, file = "~/Dropbox/warbleR/vignettes/Phae.XC.csv", row.names = FALSE)
Phae <- read.csv(file.path(vgn.path, "Phae.XC.csv"), stringsAsFactors = FALSE)
# Check out the structure of resulting the data frame
str(Phae)
```
```{r, eval=FALSE}
# Query xeno-canto for all Phaethornis longirostris recordings
Phae.lon <- query_xc(qword = "Phaethornis longirostris", download = FALSE)
# Check out the structure of resulting the data frame
str(Phae.lon)
```
```{r, eval = TRUE, echo = FALSE, message = FALSE}
# Phae.lon <- query_xc(qword = "Phaethornis longirostris", download = FALSE)
# write.csv(Phae.lon, file = "~/Dropbox/warbleR/vignettes/Phae.lon.XC.csv", row.names = FALSE)
Phae.lon <- read.csv(file.path(vgn.path, "Phae.lon.XC.csv"), stringsAsFactors = FALSE)
# Check out the structure of resulting the data frame
str(Phae.lon)
```
You can then use the function `map_xc` to visualize the geographic spread of the queried recordings. `map_xc` will create an image file of a map per species in your current directory if `img = TRUE`. If `img = FALSE`, maps will be displayed in the graphics device.
```{r, eval=FALSE}
# Image type default is jpeg, but tiff files have better resolution
# When the data frame contains multiple species, this will yield one map per species
map_xc(X = Phae, img = TRUE, it = "tiff") # all species in the genus
map_xc(X = Phae.lon, img = FALSE) # a single species
```
```{r, eval=TRUE, echo=FALSE, message=FALSE}
map_xc(X = Phae.lon, img = FALSE)
```
### Filter [xeno-canto](https://www.xeno-canto.org/) recordings by quality, signal type and locality
In most cases, you will need to filter the _xeno-canto_ metadata by type of signal you want to analyze. When you subset the metadata, you can input the filtered metadata back into `query_xc` to download only the selected recordings. There are many ways to filter data in R, and the example below can be modified to fit your own data.
Here, before downloading the sound files themselves from _xeno-canto_, we want to ensure that we select high quality sound files that contain songs (rather than other acoustic signal types) and were also recorded at La Selva Biological Station in Costa Rica.
```{r, eval=TRUE, echo=TRUE}
# How many recordings are available for Phaethornis longirostris?
nrow(Phae.lon)
# How many signal types exist in the xeno-canto metadata?
unique(Phae.lon$Vocalization_type)
# How many recordings per signal type?
table(Phae.lon$Vocalization_type)
```
```{r, eval=TRUE, echo=TRUE}
# Filter the metadata to select the signals we want to retain
# First by quality
Phae.lon <- Phae.lon[Phae.lon$Quality == "A", ]
nrow(Phae.lon)
# Then by signal type
Phae.lon.song <- Phae.lon[grep("song", Phae.lon$Vocalization_type, ignore.case = TRUE), ]
nrow(Phae.lon.song)
# Finally by locality
Phae.lon.LS <- Phae.lon.song[grep("La Selva Biological Station, Sarapiqui, Heredia", Phae.lon.song$Locality, ignore.case = FALSE), ]
# Check resulting data frame, 6 recordings remain
str(Phae.lon.LS)
```
We can check if the location coordinates make sense (all recordings should be from a single place in Costa Rica) by making a map of these recordings using `map_xc`.
```{r, eval=TRUE, echo=TRUE}
# map in the RStudio graphics device (img = FALSE)
map_xc(Phae.lon.LS, img = FALSE)
```
```{r, eval=FALSE, echo=FALSE}
# Not working as of 01 Aug 2017
# This copies the selected sound files to a dropbox folder so they can be shared
# do not show this code
fn <- with(Phae.lon.LS, paste(paste(Genus, Specific_epithet, Recording_ID, sep = "-"), ".wav", sep = " "))
file.copy(from = file.path("/home/m/Documents/Biblioteca de cantos/Trochilidae/XC/wavs", fn), to = file.path("/home/m/Dropbox/Projects/warbleR package/vignette files", fn), overwrite = TRUE)
wlist <- lapply(fn, function(x) downsample(readWave(file.path("/home/m/Dropbox/Projects/warbleR package/vignette files", x)), samp.rate = 22500))
names(wlist) <- fn
saveRDS(wlist, file = "/home/m/Dropbox/Sharing/warbleR/recs.RDS")
```
Once you're sure you want the recordings, use `query_xc` to download the files. Also, save the metadata as a _.csv_ file.
```{r, eval=FALSE}
# Download sound files
query_xc(X = Phae.lon.LS)
# Save the metadata object as a .csv file
write.csv(Phae.lon.LS, "Phae_lon.LS.csv", row.names = FALSE)
```
### Convert [xeno-canto](https://www.xeno-canto.org/) _mp3_ recordings to _wav_ format
[xeno-canto](https://www.xeno-canto.org/) maintains recordings in _mp3_ format due to file size restrictions. However, we require _wav_ format for all downstream analyses. Compression from _wav_ to _mp3_ and back involves information losses, but recordings that have undergone this transformation have been successfully used in research <a href='#References'> <sup>[3]</sup></a>.
To convert _mp3_ to _wav_, we can use the warbleR function `mp32wav`, which relies on a underlying function from the [`tuneR`](https://cran.r-project.org/package=tuneR) package. This function does not always work (and it remains unclear as to why!). This bug should be fixed in future versions of `tuneR`. If RStudio aborts when running `mp32wav`, use an _mp3_ to _wav_ converter online, or download the open source software `Audacity` (available for Mac, Linux and Windows users).
After _mp3_ files have been converted, we need to check that the _wav_ files are not corrupted and can be read into RStudio (some _wav_ files can't be read due to format or permission issues).
```{r, eval=FALSE}
# Always check you're in the right directory beforehand
# getwd()
# here we are downsampling the original sampling rate of 44.1 kHz to speed up downstream analyses in the vignette series
mp32wav(samp.rate = 22.05)
# Use checkwavs to see if wav files can be read
check_wavs()
```
```{r, eval=FALSE, echo=FALSE}
# Not working 01 Aug 2017
### If you were unable to convert _mp3_ to _wav_ format:
# + download the file in [this link](https://www.dropbox.com/s/htpbxbdw8s4i23k/recs.RDS?dl=0) and put it in your working directory
# + then run the following code:
# recs <- readRDS(file = "recs.RDS")
#
# for(i in 1:length(recs))
# writeWave(recs[[i]], filename = names(recs)[i])
```
## **A note on combining data from different sources**
We now have _.wav_ files for existing recordings ( _Phae.long1.wav_ through _Phae.long4.wav_, representing recordings made in the field) as well as 6 recordings downloaded from _xeno-canto_. The existing Phae.long*.wav recordings have associated selection tables that were made in _Raven_, but the _xeno-canto_ have no selection tables, as we have not parsed these sound files to select signals within them.
Depending on your question(s), you can combine your own sound files and those from `xeno-canto` into a single data set (after ground-truthing). This is made possible by the fact that `warbleR` functions will read in all sound files present in your working directory.
For the main case study in this vignette, we will move forwards with only the `xeno-canto` sound files. We will use the example sound files when demonstrating `warbleR` functions that are not mandatory for the case study but may be useful for your own workflow (e.g. `consolidate` below).
To continue the workflow, remove all example _wav_ files from your working directory
```{r, echo=TRUE, eval=FALSE, message=FALSE}
# Make sure you are in the right working directory
# Note that all the example sound files begin with the pattern "Phae.long"
wavs <- list.files(pattern = "wav$")
wavs
rm <- wavs[grep("Phae.long", wavs)]
file.remove(rm)
# Check that the right wav files were removed
# Only xeno-cant wav files should remain
list.files(pattern = "wav$")
```
### Consolidate sound files across various directories
Since `warbleR` handles sound files in working directories, it's good practice to keep sound files associated with the same project in a single directory. If you're someone who likes to make a new directory for every batch of recordings or new analysis associated with the same project, you may find the `consolidate` function useful.
In case you have your own recordings in _wav_ format and have skipped previous sections, you must specify the location of the sound files you will use prior to running downstream functions by setting your working directory again.
```{r, echo=TRUE, eval=FALSE, message=FALSE}
# For this example, set your working directory to an empty temporary directory
setwd(tempdir())
# Here we will simulate the problem of having files scattered in multiple directories
# Load .wav file examples from the NatureSounds package
data(list = c("Phae.long1", "Phae.long2", "Phae.long3"))
# Create first folder inside the temporary directory and write new .wav files inside this new folder
dir.create("folder1")
writeWave(Phae.long1, file.path("folder1", "Phae_long1.wav"))
writeWave(Phae.long2, file.path("folder1", "Phae_long2.wav"))
# Create second folder inside the temporary directory and write new .wav files inside this second new folder
dir.create("folder2")
writeWave(Phae.long3, file.path("folder2", "Phae_long3.wav"))
# Consolidate the scattered files into a single folder, and make a .csv file that contains metadata (location, old and new names in the case that files were renamed)
invisible(consolidate(path = tempdir(), save.csv = TRUE))
list.files(path = "./consolidated_folder")
# set your working directory back to "/home/user/warbleR_example" for the rest of the vignette, or to whatever working directory you were using originally
```
## **Make long spectrograms of whole recordings**
`full_spectrograms` produces image files with spectrograms of whole sound files split into multiple rows. It is a useful tool for filtering by visual inspection.
`full_spectrograms` allows you to visually inspect the quality of the recording (e.g. amount of background noise) or the type, number, and completeness of the vocalizations of interest. You can discard the image files and recordings that you no longer want to analyze.
First, adjust the function arguments as needed. We can work on a subset of the recordings by specifying their names with the `flist` argument.
```{r, eval=FALSE}
# Create a vector of all the recordings in the directory
wavs <- list.files(pattern = "wav$")
# Print this object to see all sound files
# 6 sound files from xeno-canto
wavs
# Select a subset of recordings to explore full_spectrograms() arguments
# Based on the list of wav files we created above
sub <- wavs[c(1, 5)]
# How long are these files? this will determine number of pages returned by full_spectrograms
duration_wavs(sub)
# ovlp = 10 to speed up function
# tiff image files are better quality and are faster to produce
full_spectrograms(flist = sub, ovlp = 10, it = "tiff")
# We can zoom in on the frequency axis by changing flim,
# the number of seconds per row, and number of rows
full_spectrograms(flist = sub, flim = c(2, 10), sxrow = 6, rows = 15, ovlp = 10, it = "tiff")
```
Once satisfied with the argument settings we can make long spectrograms for all the sound files.
```{r, eval=FALSE}
# Make long spectrograms for the xeno-canto sound files
full_spectrograms(flim = c(2, 10), ovlp = 10, sxrow = 6, rows = 15, it = "jpeg", flist = fl)
# Concatenate full_spectrograms image files into a single PDF per recording
# full_spectrograms images must be jpegs to do this
full_spectrograms2pdf(keep.img = FALSE, overwrite = TRUE)
```
The pdf image files (in the working directory) for the _xeno-canto_ recordings should look like this:
```{r, eval=FALSE, echo=FALSE}
# make all page-size images 700 pxls width
```
<!-- <center> </center> -->
The sound file name and page number are placed in the top right corner. The dimensions of the image are made to letter paper size for printing and subsequent visual inspection.
Recording _154123_ has a lot of background noise. Delete the _wav_ file for this recording to remove it from subsequent analyses.
```{r, eval=FALSE, echo=FALSE}
### Remove silence in sound files
# The function below removes silent segments of sound files. This can help reduce file size, which can speed up functions.
# giving error: Error in file.copy(from = wv, to = file.path(getwd(), "removed_silence_files", :
# more 'from' files than 'to' files
# here we will produce spectrograms of the silent gaps that were removed
# perform this on only the longer xeno-canto recordings
remove_silence(flist = wavs, min.sil.dur = 0.2, img = TRUE, it = "jpeg", flim = c(0, 12))
```
## **Select signals in _warbleR_**
_warbleR_ provides a function for selecting acoustic signals within recordings. `auto_detec` automatically detects the start and end of signals in sound files based on amplitude, duration, and frequency range attributes.
Both functions are fastest with shorter recordings, but there are ways to deal with larger recordings (an hour long or more). In this section we have expanded on some important function arguments, but check out the function documentation for more information.
### Automatically detect signals with `auto_detec`
`auto_detec` has 2 types of output:**
+ data frame with recording name, selection, start and end times. The last two are temporal coordinates that will be passed on to downstream functions to measure acoustic parameters
+ a spectrogram per recording, with red dotted lines marking the start and end of each detected signal, saved in your working directory
Check out the `auto_detec` documentation for more information. The argument `threshold` is one of the most important detection parameters, as well as information about signal frequency range and duration. _Phaethornis longirostris_ songs have frequencies between 2 and 10 kHz and durations between 0.05 and 0.5 s.
If you need to detect all or most of the signals within the recording, play around with different arguments to increase detection accuracy. Since you may need to do several rounds of optimization, we recommend using subsets of your recordings to speed up the process. If the species you study produces stereotyped signals, like _Phaethornis longirostris_, just a few signals are needed per recording, and a low-accuracy detection could yield enough selections.
`auto_detec` does not replace visual inspection of selected signals. Ensure that you set aside the time to inspect all selected signals for accuracy. You will often need to run detection functions several times, and in the process you will get to know your signals better (if you don't already).
```{r, eval=FALSE, echo=TRUE}
# Select a subset of sound files
# Reinitialize the wav object
wavs <- list.files(pattern = ".wav$", ignore.case = TRUE)
# Set a seed so we all have the same results
set.seed(1)
sub <- wavs[sample(1:length(wavs), 3)]
# Run auto_detec() on subset of recordings
# The data frame object output is printed to the console, we are not saving this in an object yet, since we are just playing around with argument settings
# you can run this in parallel to speed up computation time
auto_detec(flist = sub, bp = c(1, 10), threshold = 10, mindur = 0.05, maxdur = 0.5, envt = "abs", ssmooth = 300, ls = TRUE, res = 100, flim = c(1, 12), wl = 300, set = TRUE, sxrow = 6, rows = 15, redo = FALSE)
```
Check out the image files in your working directory. Note that some songs were correctly detected but other undesired sounds were also detected. In most cases, the undesired selections have a shorter duration than our target signals.
We won't save the `auto_detec` ouput in an object until we're satisfied with the detection. To improve our detection we should play around with argument values. Also note that the image files produced by `auto_detec` contain the values used for the different arguments, which can help you better compare between runs. Below are some detection parameters that work well for these _Phaethornis longirostris _ recordings:
```{r, eval=FALSE, echo = TRUE}
auto_detec(flist = sub, bp = c(2, 10), threshold = 20, mindur = 0.09, maxdur = 0.22, envt = "abs", ssmooth = 900, ls = TRUE, res = 100, flim = c(1, 12), wl = 300, set = TRUE, sxrow = 6, rows = 15, redo = TRUE, it = "tiff", img = TRUE, smadj = "end")
```
This seems to provide a good detection for most recordings (recording ID 154161):
<center> </center>
Once we're satisfied with the detection, we can run the `auto_detec` on all the recordings, removing the argument `flist` (so `auto_detec` runs over all _wav_ files in the working directory). We will also save the temporal output in an object.
```{r, eval=FALSE, echo=TRUE}
Phae.ad <- auto_detec(bp = c(2, 10), threshold = 20, mindur = 0.09, maxdur = 0.22, envt = "abs", ssmooth = 900, ls = TRUE, res = 100, flim = c(2, 10), wl = 300, set = TRUE, sxrow = 6, rows = 15, redo = TRUE, it = "tiff", img = TRUE, smadj = "end")
```
Let's look at the number of selections per sound file:
```{r, eval=FALSE, echo=TRUE}
table(Phae.ad$sound.files)
```
### Use SNR to filter automatically selected signals
Signal-to-noise ratio (SNR) can be a useful filter for automated signal detection. When background noise is detected as a signal it will have a low SNR, and this characteristic can be used to remove background noise from the `auto_detec` selection table. SNR = 1 means the signal and background noise have the same amplitude, so signals with SNR <= 1 are poor quality. SNR calculations can also be used for different purposes throughout your analysis workflow.
#### Optimize SNR measurements
`snr_spectrograms` is a function in the family of spectrogram creators that allows you to pick a margin for measuring noise. These margins are very important for calculating SNR, especially when working with signals separated by short gaps (e.g. duets).
```{r, eval=FALSE}
# A margin that's too large causes other signals to be included in the noise measurement
# Re-initialize X as needed, for either auto_detec output
# Try this with 10% of the selections first
# Set a seed first, so we all have the same results
set.seed(5)
X <- Phae.ad[sample(1:nrow(Phae.ad), (nrow(Phae.ad) * 0.05)), ]
nrow(X)
snr_spectrograms(X = X, flim = c(2, 10), snrmar = 0.5, mar = 0.7, it = "jpeg")
```
Check out the image files in your working directory. This margin overlaps neighboring signals, so a smaller margin would be better.
```{r, eval=FALSE}
# This smaller margin is better
snr_spectrograms(X = X, flim = c(2, 10), snrmar = 0.04, mar = 0.7, it = "jpeg")
```
<!-- <center>  </center> -->
#### Calculate SNR for automatically selected signals
Once we've picked an SNR margin we can move forward with the SNR calculation. We will measure SNR on every other selection to speed up the process.
```{r, eval=FALSE}
Phae.snr <- sig2noise(X = Phae.ad[seq(1, nrow(Phae.ad), 2), ], mar = 0.04)
```
As we just need a few songs to characterize individuals (here sound files are equivalent to different individuals), we can choose selections with the highest SNR per sound file. In this example, we will choose 5 selections per recording with the highest SNRs.
```{r, eval=FALSE}
Phae.hisnr <- Phae.snr[ave(-Phae.snr$SNR, Phae.snr$sound.files, FUN = rank) <= 5, ]
# save the selections as a physical file
write.csv(Phae.hisnr, "Phae_hisnr.csv", row.names = FALSE)
# Double check the number of selection per sound files
# Only the xeno-canto sound files will have 5 selections, the other sound files started off with less than 5 selections
table(Phae.hisnr$sound.files)
```
```{r, eval=FALSE, echo=FALSE}
Phae.hisnr <- read.csv("Phae_hisnr.csv", header = TRUE)
table(Phae.hisnr$sound.files)
```
## **Next vignette: Visual inspection and signal classification**
Here we have given examples of how to begin the `warbleR` workflow. Note that there are many different ways to begin the workflow, depending on your question and source of data. After running the code in this first vignette, you should now have an idea of:
* the type of data used in _warbleR_ (sound files and selections)
* how to import _Raven_ selection tables for your own sound files
* how to obtain open-access _xeno-canto_ sound files
* how to create long spectrograms of recordings for visual inspection
* how to select signals within sound files in `warbleR`
- automatic selection
- filtering automatically selected signals using SNR
- manual selection
The next vignette will cover the second phase of the _warbleR_ workflow, which includes methods to visualize signals for quality control and classification.
## **Citation**
Please cite `warbleR` when you use the package:
Araya-Salas, M. and Smith-Vidaurre, G. (2017), warbleR: an R package to streamline analysis of animal acoustic signals. Methods Ecol Evol. 8, 184-191.
## **Reporting bugs**
Please report any bugs [here](https://github.com/maRce10/warbleR/issues).
## <font size="5"><a name="References">References</a></font>
1. Araya-Salas, M. and G. Smith-Vidaurre. 2016. warbleR: an R package to streamline analysis of animal
acoustic signals. _Methods in Ecology and Evolution_. doi: 10.1111/2041-210X.12624
2. Araya-Salas, M. and T. Wright. 2013. Open-ended song learning in a hummingbird. _Biology Letters_. 9 (5). doi: 10.1098/rsbl.2013.0625
3. Medina-Garcia, Angela, M. Araya-Salas, and T. Wright. 2015. Does vocal learning accelerate acoustic diversification? Evolution of contact calls in Neotropical parrots. _Journal of Evolutionary Biology_. doi: 10.1111/jeb.12694
|
/scratch/gouwar.j/cran-all/cranData/warbleR/inst/doc/warbleR_workflow_01.Rmd
|
## ----echo = FALSE, message = FALSE------------------------------------------------------------------------------------------------------------------
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste("package:", names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = TRUE))
# load packages
X <- c("warbleR", "knitr")
invisible(lapply(X, library, character.only = TRUE))
# library(kableExtra)
options(knitr.table.format = "html")
# opts_chunk$set(comment = "")
# opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
# from https://stackoverflow.com/questions/28961431/computationally-heavy-r-vignettes, so that vignettes will be built upon installation, but not executed during R CMD check (which is contributing to the /doc being too large)
is_check <- ("CheckExEnv" %in% search()) || any(c(
"_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_"
) %in% names(Sys.getenv()))
knitr::opts_chunk$set(eval = !is_check, comment = "")
# for vignette checking and image file output
# setwd("~/Desktop/R/warbleR_example2/")
# website to fix gifs
# https://ezgif.com/optimize
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# library(warbleR)
#
# # set your working directory appropriately
# # setwd("/path/to/working directory")
#
# # run this if you have restarted RStudio between vignettes without saving your workspace (assuming that you are in your /home/username directory)
# setwd(file.path(getwd(), "warbleR_example"))
#
# # Check your location
# getwd()
## ----echo = TRUE, eval=FALSE------------------------------------------------------------------------------------------------------------------------
#
# # The package must be loaded in your working environment
# ls("package:warbleR")
## ----echo=TRUE, eval=FALSE--------------------------------------------------------------------------------------------------------------------------
#
# # To run this example:
# # Open Phae_hisnr.csv and modify the start coordinate of the first selection and the end coordinate of the second selection so that the signals overlap
#
# Phae.hisnr <- read.csv("Phae_hisnr.csv", header = TRUE)
# str(Phae.hisnr)
# head(Phae.hisnr, n = 15)
#
# # yields a data frame with an additional column (ovlp.sels) that indicates which selections overlap
# Phae.hisnr <- overlapping_sels(X = Phae.hisnr, max.ovlp = 0)
#
# # run the function again but this time retain only the signals that don't overlap
# Phae.hisnr <- overlapping_sels(X = Phae.hisnr, max.ovlp = 0, drop = TRUE)
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# spectrograms(Phae.hisnr, wl = 300, flim = c(2, 10), it = "jpeg", res = 150, osci = TRUE, ovlp = 90)
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # remove selections after deleting corresponding image files
# Phae.hisnr2 <- filtersels(Phae.hisnr, it = "jpeg", incl.wav = TRUE)
# nrow(Phae.hisnr2)
## ----echo=TRUE, eval=FALSE--------------------------------------------------------------------------------------------------------------------------
#
# # if selections can be read, "OK" will be printed to check.res column
# checksels(Phae.hisnr2, check.header = FALSE)
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # ### Cut selections into individual sound files
# #
# # Listening to signals complements visual inspection and classification. The function `cut_sels` can be very useful for aural comparison of selected signals. Selected signals can be played as individual sounds rather than having to open up entire sound files. As a word of caution, generating cuts of sound files will also propagate any naming errors present in your original files.
# #
# # `cut_sels` can also be used to your advantage if your original recordings are long (over 10-15 minutes). Some _warbleR_ functions, so it's helpful to use shorter duration sound files. You can make selections of shorter pieces of long original recordings, either in _Raven_ or _Syrinx_, and use `cut_sels` to generate shorter segments for smoother signal detection in `warbleR`.
#
# cut_sels(X = Phae.hisnr2, mar = 0.01, labels = c("sound.files", "selec"))
#
# # bug in the above cut_sels code
#
# # Error in apply(X[, sapply(X, is.factor)], 2, as.character) :
# # dim(X) must have a positive length
#
# # cut_sels(selec.table) # this works!
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# tailor_sels(Phae.hisnr2, wl = 300, flim = c(2, 10), wn = "hanning", mar = 0.1, osci = TRUE, title = c("sound.files", "selec"), auto.next = TRUE)
#
# # Read in tailor_sels output after renaming the csv file
# Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
# str(Phae.hisnrt)
## ----eval=TRUE, echo=FALSE--------------------------------------------------------------------------------------------------------------------------
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
str(Phae.hisnrt)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # highlight selected signals
# full_spectrograms(Phae.hisnrt, wl = 300, flim = c(2, 10), ovlp = 10, sxrow = 6, rows = 15, it = "jpeg")
#
# # concatenate full_spectrograms image files into a single PDF per recording
# # full_spectrograms images must be jpegs
# full_spectrograms2pdf(keep.img = FALSE, overwrite = TRUE)
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # Note for later...full_spectrograms2pdf works on auto_detec files in the working directory too...maybe including a suffix argument would help
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # we will use Phaethornis songs and selections from the warbleR package
# data(list = c("Phae.long1", "selec.table"))
# writeWave(Phae.long1, "Phae.long1.wav") # save sound files
#
# # subset selection table
# # already contains the frequency range for these signals
# st <- selec.table[selec.table$sound.files == "Phae.long1.wav", ]
#
# # read wave file as an R object
# sgnl <- tuneR::readWave(as.character(st$sound.files[1]))
#
# # create color column
# st$colors <- c("red2", "blue", "green")
#
# # highlight selections
# color_spectro(wave = sgnl, wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5), dB = "B", X = st, col.clm = "colors", base.col = "skyblue", t.mar = 0.07, f.mar = 0.1)
## ----eval = FALSE, echo = FALSE---------------------------------------------------------------------------------------------------------------------
#
# # was getting bugs using the xeno-canto recordings
# # but code sort of works for the following code:
# # problem is that code takes a while to run and then shows the whole long spectrogram
# # suggestion for color spectro - an argument to zoom in on section of x-axis?
#
# X <- Phae.hisnrt[Phae.hisnrt$sound.files == "Phaethornis-longirostris-154072.wav", ]
# X$colors <- c("red2", "blue", "green", "yellow", "orange")
#
# X2 <- frange(X)
# # View(X2)
#
# color_spectro(
# wave = readWave("Phaethornis-longirostris-154072.wav"), wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5),
# dB = "B", X = X2, col.clm = "colors", base.col = "skyblue", t.mar = 0.07, f.mar = 0.1
# )
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # spec_param takes a single selection from the selection table as input
# spec_param(Phae.hisnrt[1, ], length.out = 5, ovlp = 90, wl = c(150, 900), wn = c("hanning", "bartlett"), collev.min = c(-60, -30), pal = "reverse.gray.colors.2", path = NULL, rm.axes = TRUE, cex = 0.45, flim = c(2, 10))
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# # create a column of recording IDs for friendlier catalog labels
# rec_ID <- sapply(1:nrow(Phae.hisnrt), function(x) {
# gsub(x = strsplit(as.character(Phae.hisnrt$sound.files[x]), split = "-")[[1]][3], pattern = ".wav$", replacement = "")
# })
# rec_ID
#
# Phae.hisnrt$rec_ID <- rec_ID
# str(Phae.hisnrt)
#
# # set color palette
# # alpha controls transparency for softer colors
# cmc <- function(n) cm.colors(n, alpha = 0.8)
#
# catalog(X = Phae.hisnrt, flim = c(2, 10), nrow = 4, ncol = 3, height = 10, width = 10, tag.pal = list(cmc), cex = 0.8, same.time.scale = TRUE, mar = 0.01, wl = 300, gr = FALSE, labels = "rec_ID", tags = "rec_ID", hatching = 1, group.tag = "rec_ID", spec.mar = 0.4, lab.mar = 0.8, max.group.cols = 5)
#
# catalog2pdf(keep.img = FALSE, overwrite = TRUE)
#
# # assuming we are working from the warbleR_example directory
# # the ~/ format does not apply to Windows
# # make sure you have already moved or deleted all other pdf files
# move_imgs(from = ".", it = "pdf", create.folder = TRUE, folder.name = "Catalog_image_files")
## ----eval = FALSE, echo = FALSE---------------------------------------------------------------------------------------------------------------------
#
# # suggestion for move_imgs
# # add argument for regex so as not to delete/move all image files of a given type
# # and be able to move just "Cat*.pdf"...etc
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
# # now create a catalog without labels, tags, groups or axes
# Phae.hisnrt$no_label <- ""
#
# # catalog(X = Phae.hisnrt, flim = c(1, 10), nrow = 4, ncol = 3, height = 10, width = 10, cex = 0.8, same.time.scale = TRUE, mar = 0.01, wl = 300, spec.mar = 0.4, rm.axes = TRUE, labels = "no_label", lab.mar = 0.8, max.group.cols = 5, img.suffix = "nolabel")
#
# catalog(X = Phae.hisnrt, flim = c(1, 10), nrow = 4, ncol = 3, height = 10, width = 10, tag.pal = list(cmc), cex = 0.8, same.time.scale = TRUE, mar = 0.01, wl = 300, gr = FALSE, labels = "no_label", spec.mar = 0.4, lab.mar = 0.8, max.group.cols = 5, img.suffix = "nolabels")
#
# catalog2pdf(keep.img = FALSE, overwrite = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/warbleR/inst/doc/warbleR_workflow_02.R
|
---
title: <font size="7"><b>"warbleR: Visual inspection and signal classification</b></font>
pagetitle: Visual inspection and signal classification
author:
- <a href="https://marce10.github.io">Marcelo Araya-Salas, PhD</a> & <a href="https://smith-vidaurre.com/">Grace Smith-Vidaurre</a>
date: "`r Sys.Date()`"
output:
rmarkdown::html_document:
self_contained: yes
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{3. Visual inspection and signal classification}
%\VignetteEngine{knitr::rmarkdown}
---
<!-- <center> <h1><b>Visual inspection and classification of signals</h1></b> -->
<!-- <center> <i><font size="4">Marcelo Araya-Salas and Grace Smith Vidaurre</font></i> </center> -->
<!-- <center> `r Sys.Date()` </center> -->
```{css, echo = FALSE}
div#header h1.title, div#header h3.subtitle, div#header h4.author, div#header h4.date {
text-align: center
}
```
## Bioacoustics in R with `warbleR`
<img src="warbleR_sticker.png" alt="warbleR logo" align="right" width="25%" height="25%">
Bioacoustics research encompasses a wide range of questions, study systems and methods, including the software used for analyses. The `warbleR` and `Rraven` packages leverage the flexibility of the `R` environment to offer a broad and accessible bioinformatics tool set. These packages fundamentally rely upon two types of data to begin bioacoustic analyses in R:
1. **Sound files:** Recordings in _wav_ or _mp3_ format, either from your own research or open-access databases like _xeno-canto_
2. **Selection tables:** Selection tables contain the temporal coordinates (start and end points) of selected acoustic signals within recordings
### Package repositories
These packages are both available on _CRAN_: [`warbleR`](https://cran.r-project.org/package=warbleR/), [`Rraven`](https://cran.r-project.org/package=Rraven), as well as on _GitHub_: [`warbleR`](https://github.com/maRce10/warbleR), [`Rraven`](https://github.com/maRce10/Rraven). The GitHub repository will always contain the latest functions and updates. You can also check out an article in _Methods in Ecology and Evolution_ documenting the `warbleR` package <a href='#References'><sup>[1]</sup></a>.
We welcome all users to provide feedback, contribute updates or new functions and report bugs to warbleR's GitHub repository.
Please note that `warbleR` and `Rraven` use functions from the [`seewave`](https://cran.r-project.org/package=seewave), [`monitoR`](https://cran.r-project.org/package=monitoR), [`tuneR`](https://cran.r-project.org/package=tuneR) and [`dtw`](https://cran.r-project.org/package=dtw) packages internally. `warbleR` and `Rraven` have been designed to make bioacoustics analyses more accessible to `R` users, and such analyses would not be possible without the tools provided by the packages above. These packages should be given credit when using `warbleR` and `Rraven` by including citations in publications as appropriate (e.g. `citation("seewave")`).
### Parallel processing in `warbleR`
Parallel processing, or using multiple cores on your machine, can greatly speed up analyses. All iterative `warbleR` functions now have parallel processing for Linux, Mac and Windows operating systems. These functions also contain progress bars to visualize progress during normal or parallel processing. See <a href='#References'><sup>[1]</sup></a> for more details about improved running time using parallel processing.
## **Vignette introduction**
In the previous vignette, we used the `Rraven` package to import _Raven_ selection tables for recordings in our working directory, added more recordings to the data set by downloading new sound files from the open-access `xeno-canto` database and reviewed methods of automated and manual signal selection in `warbleR`. Here we continue with the case study of microgeographic vocal variation in long-billed hermit hummingbirds, _Phaethornis longirostris_ <a href='#References'><sup>[2]</sup></a> by:
1. Performing quality control processing on selected signals, including visual inspection and tailoring temporal coordinates
2. Making lexicons for visual classification of signals
This vignette can be run without an advanced understanding of `R`, as long as you know how to run code in your console. However, knowing more about basic `R` coding would be very helpful to modify the code for your research questions.
For more details about function arguments, input or output, read the documentation for the function in question (e.g. `?catalog`).
```{r, echo = FALSE, message = FALSE}
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste("package:", names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = TRUE))
# load packages
X <- c("warbleR", "knitr")
invisible(lapply(X, library, character.only = TRUE))
# library(kableExtra)
options(knitr.table.format = "html")
# opts_chunk$set(comment = "")
# opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
# from https://stackoverflow.com/questions/28961431/computationally-heavy-r-vignettes, so that vignettes will be built upon installation, but not executed during R CMD check (which is contributing to the /doc being too large)
is_check <- ("CheckExEnv" %in% search()) || any(c(
"_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_"
) %in% names(Sys.getenv()))
knitr::opts_chunk$set(eval = !is_check, comment = "")
# for vignette checking and image file output
# setwd("~/Desktop/R/warbleR_example2/")
# website to fix gifs
# https://ezgif.com/optimize
```
## **Prepare for analyses**
```{r, eval=FALSE}
library(warbleR)
# set your working directory appropriately
# setwd("/path/to/working directory")
# run this if you have restarted RStudio between vignettes without saving your workspace (assuming that you are in your /home/username directory)
setwd(file.path(getwd(), "warbleR_example"))
# Check your location
getwd()
```
This vignette series will not always include all available `warbleR` functions, as existing functions are updated and new functions are added. To see all functions available in this package:
```{r, echo = TRUE, eval=FALSE}
# The package must be loaded in your working environment
ls("package:warbleR")
```
## **Quality control filtering of selections**
### Find overlapping selections
Overlapping selections can sometimes arise after selecting signals using other functions or software. The function below helps you detect overlapping signals in your selection table, and has arguments that you can play around with for overlap detection, renaming or deleting overlapping selections.
```{r, echo=TRUE, eval=FALSE}
# To run this example:
# Open Phae_hisnr.csv and modify the start coordinate of the first selection and the end coordinate of the second selection so that the signals overlap
Phae.hisnr <- read.csv("Phae_hisnr.csv", header = TRUE)
str(Phae.hisnr)
head(Phae.hisnr, n = 15)
# yields a data frame with an additional column (ovlp.sels) that indicates which selections overlap
Phae.hisnr <- overlapping_sels(X = Phae.hisnr, max.ovlp = 0)
# run the function again but this time retain only the signals that don't overlap
Phae.hisnr <- overlapping_sels(X = Phae.hisnr, max.ovlp = 0, drop = TRUE)
```
### Make spectrograms of selections
`spectrograms` generates spectrograms of individual selected signals. These image files can be used to filter out selections that were poorly made or represent signals that are not relevant to your analysis. This quality control step is important for visualizing your selected signals after any selection method, even if you imported your selections from _Raven_ or _Syrinx_.
```{r, eval=FALSE}
spectrograms(Phae.hisnr, wl = 300, flim = c(2, 10), it = "jpeg", res = 150, osci = TRUE, ovlp = 90)
```
Inspect spectrograms and throw away image files that are poor quality to prepare for later steps. Make sure you are working in a directory that only has image files associated with this vignette. Delete the image files corresponding to recording _154070_ selection _8_ and _154070_ selection _12_, as the start coordinates for these selections are not accurate.
### Remove selections with missing image files
```{r, eval=FALSE}
# remove selections after deleting corresponding image files
Phae.hisnr2 <- filtersels(Phae.hisnr, it = "jpeg", incl.wav = TRUE)
nrow(Phae.hisnr2)
```
After removing the poorest quality selections or signals, there are some other quality control steps that may be helpful.
### Check selections
Can selections be read by downstream functions? The function `checksels` also yields a data frame with columns for duration, minimum samples, sampling rate, channels and bits.
```{r, echo=TRUE, eval=FALSE}
# if selections can be read, "OK" will be printed to check.res column
checksels(Phae.hisnr2, check.header = FALSE)
```
If selections cannot be read, it is possible the sound files are corrupt. If so, use the `fixwavs` function to repair _wav_ files.
```{r, eval=FALSE, echo=FALSE}
# ### Cut selections into individual sound files
#
# Listening to signals complements visual inspection and classification. The function `cut_sels` can be very useful for aural comparison of selected signals. Selected signals can be played as individual sounds rather than having to open up entire sound files. As a word of caution, generating cuts of sound files will also propagate any naming errors present in your original files.
#
# `cut_sels` can also be used to your advantage if your original recordings are long (over 10-15 minutes). Some _warbleR_ functions, so it's helpful to use shorter duration sound files. You can make selections of shorter pieces of long original recordings, either in _Raven_ or _Syrinx_, and use `cut_sels` to generate shorter segments for smoother signal detection in `warbleR`.
cut_sels(X = Phae.hisnr2, mar = 0.01, labels = c("sound.files", "selec"))
# bug in the above cut_sels code
# Error in apply(X[, sapply(X, is.factor)], 2, as.character) :
# dim(X) must have a positive length
# cut_sels(selec.table) # this works!
```
### Tailor temporal coordinates of selections
Sometimes the start and end times of selected signals need fine-tuned adjustments. This is particularly true when signals are found within bouts of closely delivered sounds that may be hard to pull apart, such as duets, or if multiple researchers use different rules-of-thumb to select signals. `tailor_sels` provides an interactive interface for tailoring the temporal coordinates of selections.
If you check out the image files generated by running `spectrograms` above, you'll see that some of the selections made during the automatic detection process with `auto_detec` do not have accurate start and/or end coordinates.
For instance:
The end of this signal is not well selected.
<!-- <center> </center> -->
The temporal coordinates for the tailored signals will be saved in a _ .csv_ file called `seltailor_output.csv`. You can rename this file and read it back into `R` to continue downstream analyses.
```{r, eval=FALSE}
tailor_sels(Phae.hisnr2, wl = 300, flim = c(2, 10), wn = "hanning", mar = 0.1, osci = TRUE, title = c("sound.files", "selec"), auto.next = TRUE)
# Read in tailor_sels output after renaming the csv file
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
str(Phae.hisnrt)
```
```{r, eval=TRUE, echo=FALSE}
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
str(Phae.hisnrt)
```
## **Visual classification of selected signals**
Visual classification of signals is fundamental to vocal repertoire analysis, and can also be useful for other questions. If your research focuses on assessing variation between individuals or groups, several `warbleR` functions can provide you with important information about how to steer your analysis. If there is discrete variation in vocalization structure across groups (e.g. treatments or geographic regions), visual classification of vocalizations will be useful.
### Print long spectrograms with `full_spectrograms`
The function `full_spectrograms`that we used in the last vignette can also be a tool for visually classifying signals. Long spectrograms can be printed to classify signals by hand, or comments accompanying the selections can be printed over selected signals.
Here, we print the start and end of selections with a red dotted line, and the selection number printed over the signal. If a selection data frame contains a comments column, these will be printed with the selection number.
```{r, eval=FALSE, echo=TRUE}
# highlight selected signals
full_spectrograms(Phae.hisnrt, wl = 300, flim = c(2, 10), ovlp = 10, sxrow = 6, rows = 15, it = "jpeg")
# concatenate full_spectrograms image files into a single PDF per recording
# full_spectrograms images must be jpegs
full_spectrograms2pdf(keep.img = FALSE, overwrite = TRUE)
```
```{r, eval=FALSE, echo=FALSE}
# Note for later...full_spectrograms2pdf works on auto_detec files in the working directory too...maybe including a suffix argument would help
```
Check out the image file in your working directory. These will look very similar to the `full_spectrograms` images produced in vignette 1, but with red dotted lines indicating where the selected signals start and end.
### Highlight spectrogram regions with `color_spectro`
`color_spectro` allows you to highlight selections you've made within a short region of a spectrogram. In the example below we will use `color_spectro` to highlight neighboring songs. This function has a wide variety of uses, and could be especially useful for analysis of duets or coordinated singing bouts. This example is taken directly from the `color_spectro` documentation. If working with your own data frame of selections, make sure to calculate the frequency range for your selections beforehand using the function `frange`, which will come up in the next vignette.
```{r, eval=FALSE}
# we will use Phaethornis songs and selections from the warbleR package
data(list = c("Phae.long1", "selec.table"))
writeWave(Phae.long1, "Phae.long1.wav") # save sound files
# subset selection table
# already contains the frequency range for these signals
st <- selec.table[selec.table$sound.files == "Phae.long1.wav", ]
# read wave file as an R object
sgnl <- tuneR::readWave(as.character(st$sound.files[1]))
# create color column
st$colors <- c("red2", "blue", "green")
# highlight selections
color_spectro(wave = sgnl, wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5), dB = "B", X = st, col.clm = "colors", base.col = "skyblue", t.mar = 0.07, f.mar = 0.1)
```
```{r, eval = FALSE, echo = FALSE}
# was getting bugs using the xeno-canto recordings
# but code sort of works for the following code:
# problem is that code takes a while to run and then shows the whole long spectrogram
# suggestion for color spectro - an argument to zoom in on section of x-axis?
X <- Phae.hisnrt[Phae.hisnrt$sound.files == "Phaethornis-longirostris-154072.wav", ]
X$colors <- c("red2", "blue", "green", "yellow", "orange")
X2 <- frange(X)
# View(X2)
color_spectro(
wave = readWave("Phaethornis-longirostris-154072.wav"), wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5),
dB = "B", X = X2, col.clm = "colors", base.col = "skyblue", t.mar = 0.07, f.mar = 0.1
)
```
### Optimize spectrogram display parameters
`spec_param` makes a catalog or mosaic of the same signal plotted with different combinations of spectrogram display arguments. The purpose of this function is to help you choose parameters that yield the best spectrograms (e.g. optimal visualization) for your signals (although low signal-to-noise ratio selections may be an exception).
```{r, eval=FALSE, echo=FALSE}
# spec_param takes a single selection from the selection table as input
spec_param(Phae.hisnrt[1, ], length.out = 5, ovlp = 90, wl = c(150, 900), wn = c("hanning", "bartlett"), collev.min = c(-60, -30), pal = "reverse.gray.colors.2", path = NULL, rm.axes = TRUE, cex = 0.45, flim = c(2, 10))
```
<!-- <center> </center> -->
### Make lexicons of signals
When we are interested in geographic variation of acoustic signals, we usually want to compare spectrograms from different individuals and sites. This can be challenging when working with large numbers of signals, individuals and/or sites. `catalog` aims to simplify this task.
This is how it works:
* `catalog` plots a matrix of spectrograms from signals listed in a selection table
* the catalog files are saved as image files in the working directory (or path provided)
* Several image files are generated if the signals do not fit in a single file
* Spectrograms can be labeled or color-tagged to facilitate exploring variation related to the parameter of interest (e.g. site or song type if already classified)
* A legend can be added to help match colors with tag levels
* different color palettes can be used for each tag
* The duration of the signals can be "fixed" such that all the spectrograms have the same duration
* facilitates comparisons
* You can control the number of rows and columns as well as the width and height of the output image
`catalog` allows you to group signals into biologically relevant groups by coloring the background of selected spectrograms accordingly. There is also an option to add hatching to tag labels, as well as filling the catalog with spectrograms by rows or columns of the selection table data frame, among other additional arguments.
The `move_imgs` function can come in handy when creating multiple catalogs to avoid overwriting previous image files, or when working through rounds of other image files. In this case, the first catalog we create has signals labeled, tagged and grouped with respective color and hatching levels. The second catalog we create will not have any grouping of signals whatsoever, and could be used for a test of inter-observer reliability. `move_imgs` helps us move the first catalog into another directory to save it from being overwritten when creating the second catalog.
```{r, eval=FALSE}
# create a column of recording IDs for friendlier catalog labels
rec_ID <- sapply(1:nrow(Phae.hisnrt), function(x) {
gsub(x = strsplit(as.character(Phae.hisnrt$sound.files[x]), split = "-")[[1]][3], pattern = ".wav$", replacement = "")
})
rec_ID
Phae.hisnrt$rec_ID <- rec_ID
str(Phae.hisnrt)
# set color palette
# alpha controls transparency for softer colors
cmc <- function(n) cm.colors(n, alpha = 0.8)
catalog(X = Phae.hisnrt, flim = c(2, 10), nrow = 4, ncol = 3, height = 10, width = 10, tag.pal = list(cmc), cex = 0.8, same.time.scale = TRUE, mar = 0.01, wl = 300, gr = FALSE, labels = "rec_ID", tags = "rec_ID", hatching = 1, group.tag = "rec_ID", spec.mar = 0.4, lab.mar = 0.8, max.group.cols = 5)
catalog2pdf(keep.img = FALSE, overwrite = TRUE)
# assuming we are working from the warbleR_example directory
# the ~/ format does not apply to Windows
# make sure you have already moved or deleted all other pdf files
move_imgs(from = ".", it = "pdf", create.folder = TRUE, folder.name = "Catalog_image_files")
```
```{r, eval = FALSE, echo = FALSE}
# suggestion for move_imgs
# add argument for regex so as not to delete/move all image files of a given type
# and be able to move just "Cat*.pdf"...etc
```
<center> </center>
You can also make lexicons for blind scoring, which could be useful for determining interobserver reliability.
```{r, eval=FALSE}
# now create a catalog without labels, tags, groups or axes
Phae.hisnrt$no_label <- ""
# catalog(X = Phae.hisnrt, flim = c(1, 10), nrow = 4, ncol = 3, height = 10, width = 10, cex = 0.8, same.time.scale = TRUE, mar = 0.01, wl = 300, spec.mar = 0.4, rm.axes = TRUE, labels = "no_label", lab.mar = 0.8, max.group.cols = 5, img.suffix = "nolabel")
catalog(X = Phae.hisnrt, flim = c(1, 10), nrow = 4, ncol = 3, height = 10, width = 10, tag.pal = list(cmc), cex = 0.8, same.time.scale = TRUE, mar = 0.01, wl = 300, gr = FALSE, labels = "no_label", spec.mar = 0.4, lab.mar = 0.8, max.group.cols = 5, img.suffix = "nolabels")
catalog2pdf(keep.img = FALSE, overwrite = TRUE)
```
<!-- <center> </center> -->
<!-- -->
### **Next vignette: Acoustic (dis)similarity, coordinated singing and simulating songs**
Here we finished the second phase of the `warbleR` workflow, which includes various options for quality control filtering or visual classification of signals that you can leverage during acoustic analysis. After running the code in this vignette, you should now have an idea of how to:
* how to perform quality control filtering of your selected signals, including visual inspection and tailoring temporal coordinates of spectrograms
* use different methods for visual classification of signals, including:
* long spectrograms
* highlighted regions within spectrograms
* catalogs or lexicons of individual signals
The next vignette will cover the third phase of the _warbleR_ workflow, which includes methods to perform acoustic measurements as a batch process, an example of how to use these measurements for an analysis of geographic variation, coordinated singing analysis and a new function to simulate songs.
## **Citation**
Please cite `warbleR` when you use the package:
Araya-Salas, M. and Smith-Vidaurre, G. (2017), warbleR: an R package to streamline analysis of animal acoustic signals. Methods Ecol Evol. 8, 184-191.
## **Reporting bugs**
Please report any bugs [here](https://github.com/maRce10/warbleR/issues).
<font size="5"><a name="References">References</a></font>
1. Araya-Salas, M. and G. Smith-Vidaurre. 2016. warbleR: an R package to streamline analysis of animal
acoustic signals. _Methods in Ecology and Evolution_. doi: 10.1111/2041-210X.12624
|
/scratch/gouwar.j/cran-all/cranData/warbleR/inst/doc/warbleR_workflow_02.Rmd
|
## ----echo = FALSE, message = FALSE------------------------------------------------------------------------------------------------------------------
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste("package:", names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = TRUE))
# load packages
X <- c("warbleR", "knitr")
invisible(lapply(X, library, character.only = TRUE))
# library(kableExtra)
options(knitr.table.format = "html")
# opts_chunk$set(comment = "")
# opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
# from https://stackoverflow.com/questions/28961431/computationally-heavy-r-vignettes, so that vignettes will be built upon installation, but not executed during R CMD check (which is contributing to the /doc being too large)
is_check <- ("CheckExEnv" %in% search()) || any(c(
"_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_"
) %in% names(Sys.getenv()))
knitr::opts_chunk$set(eval = !is_check, comment = "")
# for vignette checking and image file output
# setwd("~/Desktop/R/warbleR_example2/")
# website to fix gifs
# https://ezgif.com/optimize
## ----eval=FALSE-------------------------------------------------------------------------------------------------------------------------------------
#
# library(warbleR)
#
# # set your working directory appropriately
# # setwd("/path/to/working directory")
#
# # run this if you have restarted RStudio between vignettes without saving your workspace
# # assumes that you are in your /home/username directory
# setwd(file.path(getwd(), "warbleR_example"))
#
# # Check your location
# getwd()
## ----echo=TRUE, eval=FALSE--------------------------------------------------------------------------------------------------------------------------
#
# # The package must be loaded in your working environment
# ls("package:warbleR")
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# tin <- query_xc(qword = "Tinamus", download = FALSE)
#
# # select a single recording
# tin <- tin[tin$Recordist == "Marcelo Araya-Salas", ]
#
# # download this recording
# query_xc(X = tin, download = TRUE)
#
# mp32wav()
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # Hiding the text that goes with the chunk below
#
# # If you have _Raven_ installed on your local machine, you can use _Rraven_ to call this software and make selections. Make sure to include arguments from imp_raven to ensure that the selection table is imported with the correct columns for downstream functions. We will use the _Tinamus major_ signals for detecting frequency range below, so if you do not have _Raven_ installed on your machine, you can use the code below as a reference for your own signals.
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # commenting this out because this fails on my machine, although it worked when I first wrote this code...
#
# # here you will replace the raven.path argument with the path specifying where Raven is located on your own machine
# Tin.sels <- run_raven(raven.path = "/home/gsvidaurre/opt/Raven-1.5.0.0035/", sound.files = "Tinamus-major-154191.wav", import = TRUE, all.data = FALSE, name.from.file = TRUE, ext.case = "lower", freq.cols = FALSE)
# str(Tin.sels)
#
# # write the selection table as a physical file you you can read them back in at any time
# # good way to save all your work
# write.csv(Tin.sels, "Tinamus-major-154191_sels.csv", row.names = FALSE)
#
# # generate individual cuts for freqeuency range measurements below
# cut_sels(Tin.sels, mar = 0.05, labels = c("sound.files", "selec"))
## ----eval=FALSE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # Tin.sels <- read.csv("Tinamus-major-154191_sels.csv", header = TRUE)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # here we will use a data set with sound files that have been already annotated
# # read the selections back into the global environment
# Tin.sels <- read.csv("manualoc_output.csv")
# str(Tin.sels)
#
# # cut the original wave file by selections for freq_range_detec below
# writeWave(seewave::cutw(readWave("Tinamus-major-154191.wav"), from = Tin.sels$start[1], to = Tin.sels$end[1], f = 44100, plot = FALSE, output = "Wave"), filename = "Tinamus-major-154191-1.wav")
#
# writeWave(seewave::cutw(readWave("Tinamus-major-154191.wav"), from = Tin.sels$start[2], to = Tin.sels$end[2], f = 44100, plot = FALSE, output = "Wave"), filename = "Tinamus-major-154191-2.wav")
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # note that changing the threshold argument in combination with the bandpass argument can improve the detection
# freq_range_detec(readWave("Tinamus-major-154191-1.wav"), flim = c(0, 2.5), bp = c(0, 3), threshold = 15, plot = TRUE)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # here, giving a strict bandpass with very low threshold improves freq_range detection
# # since the curving end of the tinamou signal is lower amplitude than the rest of the signal
# c(readWave("Tinamus-major-154191-1.wav"), flim = c(0, 2.5), bp = c(0, 3), threshold = 1, plot = TRUE)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # use arguments from freq_range_detec above
# fr <- freq_range(Tin.sels, threshold = 1, res = 100, flim = c(0, 2.5), bp = c(0.5, 2.5))
# str(fr)
## ----eval = FALSE, echo=FALSE-----------------------------------------------------------------------------------------------------------------------
#
# Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
# str(Phae.hisnrt)
#
# se <- entropy_ts(Phae.hisnrt, wl = 300, length.out = 10, threshold = 10, img = TRUE, img.suffix = "entropy_ts", type = "b", ovlp = 90, sp.en.range = c(-25, 10), flim = c(2, 10), picsize = 0.75, title = FALSE)
#
# str(se)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # Note that the dominant frequency measurements are almost always more accurate
# track_freq_contour(Phae.hisnrt, wl = 300, flim = c(2, 10), bp = c(1, 12), it = "jpeg")
#
# # We can change the lower end of bandpass to make the frequency measurements more precise
# track_freq_contour(Phae.hisnrt, wl = 300, flim = c(2, 10), bp = c(2, 12), col = c("purple", "orange"), pch = c(17, 3), res = 100, it = "jpeg", picsize = 0.8)
## ----echo=FALSE, eval=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # decided to remove track_harmonics, not working well for either Phaethornis or Tinamou signals
#
# # the text for above this chunk
# # `track_harmonics` is a modified function from `seewave` that allows you to track the dominant frequency for harmonic calls, even when the amplitude fluctuates among harmonics.
#
# # with a Phaethornis harmonic signal
# nm <- paste(paste(as.character(Phae.hisnrt$sound.files[1]), as.character(Phae.hisnrt$selec[1]), sep = "-"), ".wav", sep = "")
#
# writeWave(seewave::cutw(readWave(as.character(Phae.hisnrt$sound.files[1])), from = Phae.hisnrt$start[1], to = Phae.hisnrt$end[1], f = 44100, plot = FALSE, output = "Wave"), filename = nm)
#
# trck_hrm <- track_harmonic(readWave(nm), f = 44100, ovlp = 70, fftw = FALSE, threshold = 15, bandpass = NULL, clip = 0.1, plot = TRUE, xlab = "Time (s)", ylab = "Frequency (kHz)", adjust.wl = FALSE, dfrq = FALSE)
#
# # plot spectrogram
# spectro(readWave(nm), grid = FALSE, scale = FALSE, f = 22050, ovlp = 90, palette = reverse.gray.colors.2, collevels = seq(-40, 0, 1), wl = 300, osc = FALSE, flim = c(2, 10), main = "warbleR's 'track_harmonic'")
#
# # plot detected frequency contour
# points(x = trck_hrm[, 1] + 0.1, y = trck_hrm[, 2], cex = 1, col = "red", pch = 20)
## ----echo=FALSE, eval=FALSE-------------------------------------------------------------------------------------------------------------------------
#
# # with a Tinamou tonal signal
# trck_hrm <- track_harmonic(readWave("Tinamus-major-154191-1.wav"), f = 44100, ovlp = 70, fftw = FALSE, threshold = 15, bandpass = NULL, plot = TRUE, xlab = "Time (s)", ylab = "Frequency (kHz)", adjust.wl = FALSE, dfrq = FALSE)
#
# # plot spectrogram
# spectro(readWave("Tinamus-major-154191-2.wav"), grid = FALSE, scale = FALSE, f = 44100, ovlp = 90, palette = reverse.gray.colors.2, collevels = seq(-40, 0, 1), wl = 300, osc = FALSE, flim = c(0, 4), main = "warbleR's 'track_harmonic'")
#
# # plot detected frequency contour
# points(x = trck_hrm[, 1] + 0.1, y = trck_hrm[, 2], cex = 1, col = "red", pch = 20)
## ----echo=TRUE, eval=FALSE--------------------------------------------------------------------------------------------------------------------------
#
# # Fundamental frequency contour
# ff_df <- freq_ts(Phae.hisnrt, wl = 300, length.out = 20, threshold = 15, img = TRUE, img.suffix = "ff", type = "p", ovlp = 70, clip.edges = FALSE, leglab = "freq_ts", ff.method = "tuneR")
#
# str(ff_df)
## ----echo=TRUE, eval=FALSE--------------------------------------------------------------------------------------------------------------------------
#
# # Dominant frequency contour
#
# # Uses seewave function dfreq by default
# df_df <- freq_ts(Phae.hisnrt, wl = 300, length.out = 20, threshold = 15, img = TRUE, img.suffix = "ff", type = "p", ovlp = 70, clip.edges = FALSE, leglab = "freq_ts", fsmooth = 0.2)
#
# str(df_df)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # Use the original data frame of songs for the main tailor_sels dataset
# # the data frame with the fundamental frequency contours is provided for manual tracing
# tailor_sels(Phae.hisnrt,
# wl = 300, flim = c(2, 10), wn = "hanning", mar = 0.1,
# osci = TRUE, title = c("sound.files", "selec"), auto.contour = TRUE, ts.df = ff_df, col = "red", alpha = 0.6
# )
#
# # rename your tailor_sels output csv as desired, then read it back into R
# mff <- read.csv("seltailor_output_mff.csv")
# str(mff)
#
# track_freq_contour(Phae.hisnrt, wl = 300, flim = c(2, 10), bp = c(1, 12), it = "jpeg", custom.contour = mff)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# df_inf <- inflections(X = df_df, pb = TRUE)
# str(df_inf)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
#
# compare_methods(
# X = Phae.hisnrt, flim = c(0, 10), bp = c(0, 10),
# wl = 300, n = 10, methods = c("XCORR", "dfDTW")
# )
## ----eval=TRUE, echo=FALSE--------------------------------------------------------------------------------------------------------------------------
params <- read.csv("acoustic_parameters.csv")
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# params <- spectro_analysis(Phae.hisnrt, bp = c(2, 10), threshold = 15)
# write.csv(params, "acoustic_parameters.csv", row.names = FALSE)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# params <- params[, grep("fun|peakf", colnames(params), invert = TRUE)]
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4", "selec.table"))
# writeWave(Phae.long1, "Phae.long1.wav")
# writeWave(Phae.long2, "Phae.long2.wav")
# writeWave(Phae.long3, "Phae.long3.wav")
# writeWave(Phae.long4, "Phae.long4.wav")
#
# # Add a 'song' column
# selec.table$song <- rep(1:4, each = 3)[1:11]
#
# # Measure acoustic parameters
# sp <- spectro_analysis(selec.table, bp = c(1, 11), 300, fast = TRUE)
#
# # Add song data
# sp <- merge(sp, selec.table, by = c("sound.files", "selec"))
#
# # Caculate song-level parameters for all numeric parameters
# sng <- song_analysis(X = sp, song_colm = "song", parallel = 1, pb = TRUE)
# str(sng)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # Harmonic Phaethornis signals
# dm <- freq_DTW(Phae.hisnrt, length.out = 30, flim = c(2, 10), bp = c(2, 9), wl = 300, img = TRUE)
#
# str(dm)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# # Tonal Tinamou signals
# Tin.sels <- read.csv("Tinamus-major-154191_sels.csv", header = TRUE)
#
# dm <- freq_DTW(Tin.sels, length.out = 30, flim = c(0, 2.5), bp = c(0.5, 2.5), wl = 512, img = TRUE)
# str(dm)
## ----eval=FALSE, echo=TRUE--------------------------------------------------------------------------------------------------------------------------
#
# xc <- cross_correlation(Phae.hisnrt, wl = 300, na.rm = FALSE)
# str(xc)
## ----eval=TRUE, dpi=220-----------------------------------------------------------------------------------------------------------------------------
# Run the PCA with only numeric variables of params
pca <- prcomp(x = params[, sapply(params, is.numeric)], scale. = TRUE)
# Check loadings
summary(pca)
# Extract PCA scores
pcascor <- as.data.frame(pca[[5]])
# Plot the 2 first PCs
plot(pcascor[, 1], pcascor[, 2],
col = as.numeric(as.factor(params$sound.files)), pch = 20,
cex = 1, xlab = "PC1", ylab = "PC2"
)
# Add recordings/individuals labels
x <- tapply(pcascor[, 1], params$sound.files, mean)
y <- tapply(pcascor[, 2], params$sound.files, mean)
labs <- gsub(".wav", "", unique(sapply(as.character(params$sound.files), function(x) {
strsplit(x, split = "-", fixed = TRUE)[[1]][3]
}, USE.NAMES = FALSE)))
text(x, y, labs, cex = 0.75)
## ----eval=TRUE, dpi=220-----------------------------------------------------------------------------------------------------------------------------
# Create a song type variable
# First, extract recording ID
songtype <- gsub(".wav", "", sapply(as.character(params$sound.files), function(x) {
strsplit(x, split = "-", fixed = TRUE)[[1]][3]
}, USE.NAMES = FALSE))
# Now change IDs for letters representing song types
songtype <- gsub("154070|154072", "A", songtype)
songtype <- gsub("154129|154161", "B", songtype)
songtype <- gsub("154138", "C", songtype)
# Add song type as a variable representing symbol type
plot(pcascor[, 1], pcascor[, 2],
col = as.numeric(as.factor(params$sound.files)),
pch = as.numeric(as.factor(songtype)),
cex = 1, xlab = "PC1", ylab = "PC2"
)
# Add song type labels
x <- tapply(pcascor[, 1], songtype, mean)
y <- tapply(pcascor[, 2], songtype, mean)
text(x, y, unique(songtype), cex = 1)
## ----eval = FALSE, echo = TRUE----------------------------------------------------------------------------------------------------------------------
#
# data(sim_coor_sing)
# str(sim_coor_sing)
## ----eval = FALSE, echo = TRUE----------------------------------------------------------------------------------------------------------------------
#
# # save plots in a list
# g <- plot_coordination(sim_coor_sing, it = "jpeg", img = FALSE, res = 300)
#
# # print list of plots to graphics device
# g
## ----eval = FALSE, echo = TRUE----------------------------------------------------------------------------------------------------------------------
#
# cs <- test_coordination(sim_coor_sing, iterations = 1000, less.than.chance = TRUE, cutoff = 10)
# str(cs)
## ----eval = FALSE, echo = TRUE----------------------------------------------------------------------------------------------------------------------
#
# # simulate a song with 3 tonal elements
# ss <- sim_songs(n = 3, harms = 1)
#
# # plot the simulated song
# # seewave::spectro(ss)
#
# # simulate a song with 3 harmonic elements of differing amplitude
# ss <- sim_songs(n = 3, harms = 3)
#
# # plot the simulated song
# seewave::spectro(ss)
|
/scratch/gouwar.j/cran-all/cranData/warbleR/inst/doc/warbleR_workflow_03.R
|
---
title: <font size="7"><b>warbleR: Acoustic (dis)similarity, coordinated singing and simulating songs</b></font>
pagetitle: Acoustic (dis)similarity, coordinated singing and simulating songs
author:
- <a href="https://marce10.github.io">Marcelo Araya-Salas, PhD</a>
- <a href="https://smith-vidaurre.com/">Grace Smith-Vidaurre</a>
date: "`r Sys.Date()`"
output:
rmarkdown::html_document:
self_contained: yes
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{4. Acoustic (dis)similarity, coordinated singing and simulating songs}
%\VignetteEngine{knitr::rmarkdown}
---
<!-- <center> <h1><b>Acoustic (dis)similarity and coordinated singing</h1></b> </center> -->
<!-- <center> <i><font size="4">Marcelo Araya-Salas and Grace Smith Vidaurre</font></i> </center> -->
<!-- <center> `r Sys.Date()` </center> -->
```{css, echo = FALSE}
div#header h1.title, div#header h3.subtitle, div#header h4.author, div#header h4.date {
text-align: center
}
```
## Bioacoustics in R with `warbleR`
<img src="warbleR_sticker.png" alt="warbleR logo" align="right" width="25%" height="25%">
Bioacoustics research encompasses a wide range of questions, study systems and methods, including the software used for analyses. The `warbleR` and `Rraven` packages leverage the flexibility of the `R` environment to offer a broad and accessible bioinformatics tool set. These packages fundamentally rely upon two types of data to begin bioacoustic analyses in R:
1. **Sound files:** Recordings in _wav_ or _mp3_ format, either from your own research or open-access databases like _xeno-canto_
2. **Selection tables:** Selection tables contain the temporal coordinates (start and end points) of selected acoustic signals within recordings
### Package repositories
These packages are both available on _CRAN_: [`warbleR`](https://cran.r-project.org/package=warbleR), [`Rraven`](https://cran.r-project.org/package=Rraven), as well as on _GitHub_: [`warbleR`](https://github.com/maRce10/warbleR), [`Rraven`](https://github.com/maRce10/Rraven). The GitHub repository will always contain the latest functions and updates. You can also check out an article in _Methods in Ecology and Evolution_ documenting the `warbleR` package <a href='#References'><sup>[1]</sup></a>.
We welcome all users to provide feedback, contribute updates or new functions and report bugs to warbleR's GitHub repository.
Please note that `warbleR` and `Rraven` use functions from the [`seewave`](https://cran.r-project.org/package=seewave), [`monitoR`](https://cran.r-project.org/package=monitoR), [`tuneR`](https://cran.r-project.org/package=tuneR) and [`dtw`](https://cran.r-project.org/package=dtw) packages internally. `warbleR` and `Rraven` have been designed to make bioacoustics analyses more accessible to `R` users, and such analyses would not be possible without the tools provided by the packages above. These packages should be given credit when using `warbleR` and `Rraven` by including citations in publications as appropriate (e.g. `citation("seewave")`).
### Parallel processing in `warbleR`
Parallel processing, or using multiple cores on your machine, can greatly speed up analyses. All iterative `warbleR` functions now have parallel processing for Linux, Mac and Windows operating systems. These functions also contain progress bars to visualize progress during normal or parallel processing. See <a href='#References'><sup>[1]</sup></a> for more details about improved running time using parallel processing.
## **Vignette introduction**
In the previous vignette, we performed quality control processing of selected signals and made lexicons for visual classification. Here we continue with the case study of microgeographic vocal variation in long-billed hermit hummingbirds, _Phaethornis longirostris_ <a href='#References'><sup>[2]</sup></a> (and a short sidenote using _Tinamus major_ for an example of a tonal signal) by:
1. Detecting signal frequency range
2. Extracting spectral entropy and frequency contours as time series
3. Comparing methods for quantitative analysis of signal structure
- data set of 29 acoustic parameters
- spectrographic cross-correlation
- dynamic time warping on frequency contours
4. Visually inspecting frequency contours
5. Measuring acoustic parameters as a batch-process across signals
6. Calculating pairwise acoustic (dis)similarity between signals
7. Analysis of geographic variation in _Phaethornis longirostris_ songs
We also include some examples at the end of the vignette of how to perform coordinated singing analysis and simulate songs.
This vignette can be run without an advanced understanding of `R`, as long as you know how to run code in your console. However, knowing more about basic `R` coding would be very helpful to modify the code for your research questions.
For more details about function arguments, input or output, read the documentation for the function in question (e.g. `?cross_correlation`).
```{r, echo = FALSE, message = FALSE}
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste("package:", names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = TRUE))
# load packages
X <- c("warbleR", "knitr")
invisible(lapply(X, library, character.only = TRUE))
# library(kableExtra)
options(knitr.table.format = "html")
# opts_chunk$set(comment = "")
# opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
# from https://stackoverflow.com/questions/28961431/computationally-heavy-r-vignettes, so that vignettes will be built upon installation, but not executed during R CMD check (which is contributing to the /doc being too large)
is_check <- ("CheckExEnv" %in% search()) || any(c(
"_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_"
) %in% names(Sys.getenv()))
knitr::opts_chunk$set(eval = !is_check, comment = "")
# for vignette checking and image file output
# setwd("~/Desktop/R/warbleR_example2/")
# website to fix gifs
# https://ezgif.com/optimize
```
## **Prepare for analyses**
```{r, eval=FALSE}
library(warbleR)
# set your working directory appropriately
# setwd("/path/to/working directory")
# run this if you have restarted RStudio between vignettes without saving your workspace
# assumes that you are in your /home/username directory
setwd(file.path(getwd(), "warbleR_example"))
# Check your location
getwd()
```
This vignette series will not always include all available `warbleR` functions, as existing functions are updated and new functions are added. To see all functions available in this package:
```{r, echo=TRUE, eval=FALSE}
# The package must be loaded in your working environment
ls("package:warbleR")
```
## **Extract acoustic parameters as time series**
### Detect frequency range
_Raven_ selection tables can return low and high frequencies in your selections (e.g. if `all.data` or `freq.cols` in `run_raven` or `imp.raven` is TRUE), but the accuracy of these frequency selections depends on how the signals themselves were selected. Below we demonstrate how to visualize and detect the frequency range in your selected signals using the functions `freq_range_detec` and `freq_range`, which have options for setting bandpass filters to exclude background noise or other non-target acoustic signals.
`freq_range_detec` creates a plot that will print to your graphics device, and also outputs a data frame per recording with the frequency range. This data frame can be used in subsequent analyses if saved as an object. `freq_range_detec` works best with single signals. If used for a whole recording, `freq_range_detec` will pick up _all_ sounds in the recording.
Finally, although we have been using _Phaethornis longirostris_ vocalizations throughout these vignettes, these signals are harmonically structured. The functions for detecting frequency ranges, `freq_range` and `freq_range_detec` work best on tonal signals, so for this example we will use Great Tinamou ( _Tinamus major_) songs.
Download a tinamou recording from _xeno-canto_, make selections and visualize/detect frequency ranges.
```{r, eval=FALSE, echo=TRUE}
tin <- query_xc(qword = "Tinamus", download = FALSE)
# select a single recording
tin <- tin[tin$Recordist == "Marcelo Araya-Salas", ]
# download this recording
query_xc(X = tin, download = TRUE)
mp32wav()
```
```{r, eval=FALSE, echo=FALSE}
# Hiding the text that goes with the chunk below
# If you have _Raven_ installed on your local machine, you can use _Rraven_ to call this software and make selections. Make sure to include arguments from imp_raven to ensure that the selection table is imported with the correct columns for downstream functions. We will use the _Tinamus major_ signals for detecting frequency range below, so if you do not have _Raven_ installed on your machine, you can use the code below as a reference for your own signals.
```
```{r, eval=FALSE, echo=FALSE}
# commenting this out because this fails on my machine, although it worked when I first wrote this code...
# here you will replace the raven.path argument with the path specifying where Raven is located on your own machine
Tin.sels <- run_raven(raven.path = "/home/gsvidaurre/opt/Raven-1.5.0.0035/", sound.files = "Tinamus-major-154191.wav", import = TRUE, all.data = FALSE, name.from.file = TRUE, ext.case = "lower", freq.cols = FALSE)
str(Tin.sels)
# write the selection table as a physical file you you can read them back in at any time
# good way to save all your work
write.csv(Tin.sels, "Tinamus-major-154191_sels.csv", row.names = FALSE)
# generate individual cuts for freqeuency range measurements below
cut_sels(Tin.sels, mar = 0.05, labels = c("sound.files", "selec"))
```
```{r, eval=FALSE, echo=FALSE}
# Tin.sels <- read.csv("Tinamus-major-154191_sels.csv", header = TRUE)
```
```{r, eval=FALSE, echo=TRUE}
# here we will use a data set with sound files that have been already annotated
# read the selections back into the global environment
Tin.sels <- read.csv("manualoc_output.csv")
str(Tin.sels)
# cut the original wave file by selections for freq_range_detec below
writeWave(seewave::cutw(readWave("Tinamus-major-154191.wav"), from = Tin.sels$start[1], to = Tin.sels$end[1], f = 44100, plot = FALSE, output = "Wave"), filename = "Tinamus-major-154191-1.wav")
writeWave(seewave::cutw(readWave("Tinamus-major-154191.wav"), from = Tin.sels$start[2], to = Tin.sels$end[2], f = 44100, plot = FALSE, output = "Wave"), filename = "Tinamus-major-154191-2.wav")
```
```{r, eval=FALSE, echo=TRUE}
# note that changing the threshold argument in combination with the bandpass argument can improve the detection
freq_range_detec(readWave("Tinamus-major-154191-1.wav"), flim = c(0, 2.5), bp = c(0, 3), threshold = 15, plot = TRUE)
```
```{r, eval=FALSE, echo=TRUE}
# here, giving a strict bandpass with very low threshold improves freq_range detection
# since the curving end of the tinamou signal is lower amplitude than the rest of the signal
c(readWave("Tinamus-major-154191-1.wav"), flim = c(0, 2.5), bp = c(0, 3), threshold = 1, plot = TRUE)
```
The function `freq_range` allows you to simultaneously return the frequency ranges for all signals in a selection table, including the graphical output as `freq_range_detec`. Check out the resulting image file in your graphics device. In addition to image files, this function returns the original selection table, as a data frame with the newly calculated low and high frequency measurements.
```{r, eval=FALSE, echo=TRUE}
# use arguments from freq_range_detec above
fr <- freq_range(Tin.sels, threshold = 1, res = 100, flim = c(0, 2.5), bp = c(0.5, 2.5))
str(fr)
```
### Extract spectral entropy as a time series
Spectral entropy can be calculated as time series in selected signals and plotted onto image files. Previously, spectral entropy was only available as a sole measurement across a selection, as measured by `spectro_analysis`. Check out the resulting image files in your working directory.
```{r, eval = FALSE, echo=FALSE}
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
```
```{r, eval=FALSE, echo=TRUE}
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
str(Phae.hisnrt)
se <- entropy_ts(Phae.hisnrt, wl = 300, length.out = 10, threshold = 10, img = TRUE, img.suffix = "entropy_ts", type = "b", ovlp = 90, sp.en.range = c(-25, 10), flim = c(2, 10), picsize = 0.75, title = FALSE)
str(se)
```
#### Visualizing frequency contours with `track_freq_contour`
The function `track_freq_contour` allows you to create spectrograms and visualize the accuracy of dominant frequency and fundamental frequency measurements.
Use `track_freq_contour` on all the recordings for which you want to extract frequency contours as a time series, or later, calculate other frequency measurements. Scroll through all the spectrograms to get a feeling for how well the frequency measurements will be performed across your recordings.
Running `track_freq_contour` can allow you to decide which frequency parameters to use in subsequent analyses, namely `spectro_analysis` and dynamic time warping methods. Also, ff the frequency measurements look acceptable with the bandpass setting used in `track_freq_contour`, use that same bandpass while running `spectro_analysis`.
```{r, eval=FALSE, echo=TRUE}
# Note that the dominant frequency measurements are almost always more accurate
track_freq_contour(Phae.hisnrt, wl = 300, flim = c(2, 10), bp = c(1, 12), it = "jpeg")
# We can change the lower end of bandpass to make the frequency measurements more precise
track_freq_contour(Phae.hisnrt, wl = 300, flim = c(2, 10), bp = c(2, 12), col = c("purple", "orange"), pch = c(17, 3), res = 100, it = "jpeg", picsize = 0.8)
```
<!-- <center> </center> -->
Note that the fundamental frequency measurements are not always very accurate, so we will remove fundamental frequency measurements later on.
### Extract fundamental or dominant frequency contours as a time series
These functions return a data frame that contains estimated frequency contours as a time series across each signal in the input data frame. You can also specify if you want to create image files with the estimated frequency contours plotted over spectrograms. You can change argument settings to better visualize the signals or change the estimation of the frequency contour. For instance, the argument `threshold` (as in `auto_detec`) controls the amplitude threshold for estimating frequency values at each time point. Note that the fundamental frequency contour estimation can often have errors, and tends to perform best with more tonal signals. The frequency contours are those that can be visualized using `track_freq_contour`.
```{r, echo=FALSE, eval=FALSE}
# decided to remove track_harmonics, not working well for either Phaethornis or Tinamou signals
# the text for above this chunk
# `track_harmonics` is a modified function from `seewave` that allows you to track the dominant frequency for harmonic calls, even when the amplitude fluctuates among harmonics.
# with a Phaethornis harmonic signal
nm <- paste(paste(as.character(Phae.hisnrt$sound.files[1]), as.character(Phae.hisnrt$selec[1]), sep = "-"), ".wav", sep = "")
writeWave(seewave::cutw(readWave(as.character(Phae.hisnrt$sound.files[1])), from = Phae.hisnrt$start[1], to = Phae.hisnrt$end[1], f = 44100, plot = FALSE, output = "Wave"), filename = nm)
trck_hrm <- track_harmonic(readWave(nm), f = 44100, ovlp = 70, fftw = FALSE, threshold = 15, bandpass = NULL, clip = 0.1, plot = TRUE, xlab = "Time (s)", ylab = "Frequency (kHz)", adjust.wl = FALSE, dfrq = FALSE)
# plot spectrogram
spectro(readWave(nm), grid = FALSE, scale = FALSE, f = 22050, ovlp = 90, palette = reverse.gray.colors.2, collevels = seq(-40, 0, 1), wl = 300, osc = FALSE, flim = c(2, 10), main = "warbleR's 'track_harmonic'")
# plot detected frequency contour
points(x = trck_hrm[, 1] + 0.1, y = trck_hrm[, 2], cex = 1, col = "red", pch = 20)
```
```{r, echo=FALSE, eval=FALSE}
# with a Tinamou tonal signal
trck_hrm <- track_harmonic(readWave("Tinamus-major-154191-1.wav"), f = 44100, ovlp = 70, fftw = FALSE, threshold = 15, bandpass = NULL, plot = TRUE, xlab = "Time (s)", ylab = "Frequency (kHz)", adjust.wl = FALSE, dfrq = FALSE)
# plot spectrogram
spectro(readWave("Tinamus-major-154191-2.wav"), grid = FALSE, scale = FALSE, f = 44100, ovlp = 90, palette = reverse.gray.colors.2, collevels = seq(-40, 0, 1), wl = 300, osc = FALSE, flim = c(0, 4), main = "warbleR's 'track_harmonic'")
# plot detected frequency contour
points(x = trck_hrm[, 1] + 0.1, y = trck_hrm[, 2], cex = 1, col = "red", pch = 20)
```
```{r, echo=TRUE, eval=FALSE}
# Fundamental frequency contour
ff_df <- freq_ts(Phae.hisnrt, wl = 300, length.out = 20, threshold = 15, img = TRUE, img.suffix = "ff", type = "p", ovlp = 70, clip.edges = FALSE, leglab = "freq_ts", ff.method = "tuneR")
str(ff_df)
```
```{r, echo=TRUE, eval=FALSE}
# Dominant frequency contour
# Uses seewave function dfreq by default
df_df <- freq_ts(Phae.hisnrt, wl = 300, length.out = 20, threshold = 15, img = TRUE, img.suffix = "ff", type = "p", ovlp = 70, clip.edges = FALSE, leglab = "freq_ts", fsmooth = 0.2)
str(df_df)
```
### Manually tailor frequency contours with tailor_sels
The functions above to track/extract dominant and fundamental frequency contours perform best with more tonal signals. The frequency measurements for signals with harmonic structure tend to jump around, and might not always match your own visual tracking of frequency contours. If this is the case, you can use the function `tailor_sels` to fix frequency contours where individual frequency measurements are clearly far off from the frequency contour detected by the human eye. The `tailor_sels` function is the same used in vignette 2 in this series, but by changing a few arguments, you can use `tailor_sels` to fix frequency contours.
Note that manually fixing frequency contours might not make sense, depending on your question and/or the contour in question. For instance, a dominant frequency contour for a harmonic signal that jumps around and does not form a smooth contour may in fact be the truth, rather than mis-estimation of the contour. On the other hand, fundamental frequencies can be more easily traced by the human eye across a signal, so using `tailor_sels` to fix a frequency contour that jumps around the signal makes more sense.
When the new graphics window for `tailor_sels` appears, it will show spectrograms as we saw in vignette 2, but with frequency contours plotted as points over each spectrogram. To fix the frequency contour, click near the malaligned points to place them over the frequency contour that you detect by eye. `tailor_sels` makes a new `.csv` file in your working directory that merges your original data frame (below, `Phae.hisnrt`) with the modified frequency time series (below, `ff_df` with any modified frequency values). To check that your manual tracing improved frequency contours, you can use `track_freq_contour` to make spectrograms with your new frequency contours plotted as custom contours.
```{r, eval=FALSE, echo=TRUE}
# Use the original data frame of songs for the main tailor_sels dataset
# the data frame with the fundamental frequency contours is provided for manual tracing
tailor_sels(Phae.hisnrt,
wl = 300, flim = c(2, 10), wn = "hanning", mar = 0.1,
osci = TRUE, title = c("sound.files", "selec"), auto.contour = TRUE, ts.df = ff_df, col = "red", alpha = 0.6
)
# rename your tailor_sels output csv as desired, then read it back into R
mff <- read.csv("seltailor_output_mff.csv")
str(mff)
track_freq_contour(Phae.hisnrt, wl = 300, flim = c(2, 10), bp = c(1, 12), it = "jpeg", custom.contour = mff)
```
### Count inflections across frequency contours
This function calculates the modulation index for any frequency contour or time series. The function `spectro_analysis` (see below) also calculates a modulation index for signals, but as a single value across the length of the signal.
```{r, eval=FALSE, echo=TRUE}
df_inf <- inflections(X = df_df, pb = TRUE)
str(df_inf)
```
##**Quantitative measurements of acoustic (dis)similarity**
### Compare methods for quantitative analysis of signal structure
Bioacoustic research relies on quantifying the structure of acoustic signals and comparing that structure across behavioral/ecological contexts, groups or species. However, measuring signal structure in a way that fully accounts for the variation in the signals could be a tricky task. Some of the differences that are apparent by visual inspection of spectrograms might not be picked up by some analyses. Hence, choosing the most appropriate analytical approach is a critical step.
The `warbleR` function `compare_methods` attempts to facilitate method selection. This function produces graphs (as image files in the working directory) with spectrograms from 4 signals that allow visual inspection of the performance of acoustic analysis methods at comparing those signals. The signals are randomly picked up from the provided data frame (`X` argument), and the function compares 2 `warbleR`methods at a time. The methods available are:
* cross-correlation by warbleR function `cross_correlation`
* dynamic time warping on dominant or fundamental frequency contours with `freq_DTW`
* spectral parameters with `spectro_analysis`
In the last vignette, we tailored selections of _Phaethornis longirostris_ songs that were originally downloaded from _xeno-canto_, detected by `auto_detec`and filtered by signal-to-noise ratio (SNR). Here we will pick up the workflow with these filtered and tailored selections, using the data frame `Phae.hisnrt`.
```{r, eval=FALSE, echo=TRUE}
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
compare_methods(
X = Phae.hisnrt, flim = c(0, 10), bp = c(0, 10),
wl = 300, n = 10, methods = c("XCORR", "dfDTW")
)
```
`compare_methods` will produce 10 image files in the working directory (since we specified `n = 10`) that look like this:
<!-- <center> </center> -->
In this graphic, the acoustic pairwise distance between signals is shown next to the arrows linking them. The font color of a distance value corresponds to the font color of the method that generated it, as shown in the scatterplots (in this case black font represents XCORR distances). Distances are standardized, with 0 being the distance of a signal to itself and 1 the farthest pairwise distance in the pool of signals. Principal Component Analysis (`princomp` function) is applied to calculate distances when using spectral parameters (SP). In this case, the first 2 PC's are used. Classical Multidimensional Scaling (also known as Principal Coordinates Analysis, `cmdscale` function) is used for all other methods. The image file name contains the methods being compared and the row number of the selections. This function internally uses a modified version of the `spectro` function from the `seewave` package to create spectrograms. Note that the spectrograms are all plotted with the same frequency and time scales.
Also note that the graphs contain 2 scatterplots (1 per method) of the acoustic space of all signals in the input data frame `X`. The position of the 4 signals in the spectrograms is highlighted in the acoustic space scatterplot. These graphics allow you to directly assess if the distances between signals in the acoustic space accurately represent the spectrographic similarity (e.g. how similar their acoustic structure looks in the spectrograms).
You can run `compare_methods` for any combination of the quantitative methods for assessing acoustic (dis)similarity mentioned above. Importantly, to include the SP method (spectral parameters measured by the function `spectro_analysis`), you need a large enough dataset, as the PCA that summarizes the spectral parameters needs more units (rows) that variables (columns).
### Measure acoustic parameters with spectro_analysis
We can now perform acoustic measurements with the function `spectro_analysis`. This function relies on the temporal coordinates in selection tables to measure 29 parameters across selections. `spectro_analysis` is a batch process that is faster than calculating measurements manually, e.g. one selection and recording at a time. `spectro_analysis` uses and customizes several functions available in the [`seewave package`](https://cran.r-project.org/package=seewave).
Use the bandpass filter to your advantage here, to filter out low or high background noise before performing measurements. Also note that changing the amplitude threshold will change the amplitude at which noises (including non-target signals) are detected for measurements.
```{r, eval=TRUE, echo=FALSE}
params <- read.csv("acoustic_parameters.csv")
```
```{r, eval=FALSE, echo=TRUE}
params <- spectro_analysis(Phae.hisnrt, bp = c(2, 10), threshold = 15)
write.csv(params, "acoustic_parameters.csv", row.names = FALSE)
```
Remove parameters derived from fundamental frequency (based on `track_freq_contour` results).
```{r, eval=FALSE, echo=TRUE}
params <- params[, grep("fun|peakf", colnames(params), invert = TRUE)]
```
### Calculate acoustic parameters by song type
In addition to calculating acoustic parameters per individual signals using `spectro_analysis`, you can also calculate these acoustic parameters by song type (average, minimum and maximum values per song type group).
```{r, eval=FALSE, echo=TRUE}
data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4", "selec.table"))
writeWave(Phae.long1, "Phae.long1.wav")
writeWave(Phae.long2, "Phae.long2.wav")
writeWave(Phae.long3, "Phae.long3.wav")
writeWave(Phae.long4, "Phae.long4.wav")
# Add a 'song' column
selec.table$song <- rep(1:4, each = 3)[1:11]
# Measure acoustic parameters
sp <- spectro_analysis(selec.table, bp = c(1, 11), 300, fast = TRUE)
# Add song data
sp <- merge(sp, selec.table, by = c("sound.files", "selec"))
# Caculate song-level parameters for all numeric parameters
sng <- song_analysis(X = sp, song_colm = "song", parallel = 1, pb = TRUE)
str(sng)
```
### Dynamic time warping of frequency contours
The dynamic time warping methods in `warbleR` all rely on functions from the `dtw` package, and are available for both dominant and fundamental frequencies. `df_DTW` and `ff_DTW` calculate the dominant and fundamental frequency contours, respectively, of each signal and compares using dynamic time warping. You can interpolate measurements across the frequency time series using the `length.out` argument.
These functions return a matrix of pairwise acoustic dissimilarity (e.g. acoustic "distance") measurements that can be used in analyses of acoustic similarity, as well as image files with the frequency contours plotted over the spectrograms. If you require only the time series without the dynamic time warping analysis for either the dominant or fundamental frequency, check out the functions `freq_ts`and `freq_ts`.
Note that as the `freq_range` and `freq_range_detec` functions, the dynamic time warping functions tend to work best on more tonal signals. Check out the resulting image files in your working directory.
```{r, eval=FALSE, echo=TRUE}
# Harmonic Phaethornis signals
dm <- freq_DTW(Phae.hisnrt, length.out = 30, flim = c(2, 10), bp = c(2, 9), wl = 300, img = TRUE)
str(dm)
```
```{r, eval=FALSE, echo=TRUE}
# Tonal Tinamou signals
Tin.sels <- read.csv("Tinamus-major-154191_sels.csv", header = TRUE)
dm <- freq_DTW(Tin.sels, length.out = 30, flim = c(0, 2.5), bp = c(0.5, 2.5), wl = 512, img = TRUE)
str(dm)
```
### Spectrographic cross-correlation
Cross-correlation calculates the pairwise similarity of multiple signals by spectrogram cross-correlation. `cross_correlation` calculates a correlation of the amplitude values at each step by sliding one spectrogram over the other. This function is a modified version of the `corMatch` and `makeTemplate` functions from the package `monitoR`. `cross_correlation` runs over multiple signals and returns a list of output, including:
* a correlation statistic for each "sliding" step
* the maximum (peak) correlation for each pairwise comparison
`cross_correlation` requires margins half the duration of the signal on either side of the signal (e.g. before and after the start and end coordinates). Signals that are very close to either the start or end of a recording may throw errors. Additionally, when sound files have been modified by bandpass filters, some pairwise cross-correlations may not work and the correlation matrix may contain `NA` values. `cross_correlation` now contains an argument to remove columns of the matrix that contain `NA` values, which will reduce the data in the matrix but also allow the matrix to be used in subsequent analyses.
Here our output is a matrix of peak correlation per pairwise comparison, with the recording-selection names as the matrix dimension names.
```{r, eval=FALSE, echo=TRUE}
xc <- cross_correlation(Phae.hisnrt, wl = 300, na.rm = FALSE)
str(xc)
```
## Analysis of geographic variation using `spectro_analysis` measurements
We can evaluate whether or not the observed variation in song structure is reflected by the meters we just measured. For this we will conduct a Principal Component Analysis on scaled (z-transformed) meters and look at the grouping of songs (data points) in the scatter plot.
```{r, eval=TRUE, dpi=220}
# Run the PCA with only numeric variables of params
pca <- prcomp(x = params[, sapply(params, is.numeric)], scale. = TRUE)
# Check loadings
summary(pca)
# Extract PCA scores
pcascor <- as.data.frame(pca[[5]])
# Plot the 2 first PCs
plot(pcascor[, 1], pcascor[, 2],
col = as.numeric(as.factor(params$sound.files)), pch = 20,
cex = 1, xlab = "PC1", ylab = "PC2"
)
# Add recordings/individuals labels
x <- tapply(pcascor[, 1], params$sound.files, mean)
y <- tapply(pcascor[, 2], params$sound.files, mean)
labs <- gsub(".wav", "", unique(sapply(as.character(params$sound.files), function(x) {
strsplit(x, split = "-", fixed = TRUE)[[1]][3]
}, USE.NAMES = FALSE)))
text(x, y, labs, cex = 0.75)
```
Songs are grouped by sound file. As each sound file represents a single individual, this suggests that songs have individual signatures. Let's look at the song type level. First, we need to classify the songs by song type. We can check the spectrograms we previously created to do this.
<!-- <center> </center> -->
Songs from sound files 154070 and 154072 seem to belong to the same song type. Sound files 154129 and 154161 represent a different song type. Finally, the songs from each of the other 2 sound files have a unique structure, so each one represents a different song type. Sound file 154123 is represented here too, but in vignette 1 we decided against using signals from this recording. We can add this information to the plot by using symbols to represent song types.
```{r, eval=TRUE, dpi=220}
# Create a song type variable
# First, extract recording ID
songtype <- gsub(".wav", "", sapply(as.character(params$sound.files), function(x) {
strsplit(x, split = "-", fixed = TRUE)[[1]][3]
}, USE.NAMES = FALSE))
# Now change IDs for letters representing song types
songtype <- gsub("154070|154072", "A", songtype)
songtype <- gsub("154129|154161", "B", songtype)
songtype <- gsub("154138", "C", songtype)
# Add song type as a variable representing symbol type
plot(pcascor[, 1], pcascor[, 2],
col = as.numeric(as.factor(params$sound.files)),
pch = as.numeric(as.factor(songtype)),
cex = 1, xlab = "PC1", ylab = "PC2"
)
# Add song type labels
x <- tapply(pcascor[, 1], songtype, mean)
y <- tapply(pcascor[, 2], songtype, mean)
text(x, y, unique(songtype), cex = 1)
```
Songs of the same song type are more similar (they cluster together). This PCA confirms that the visually obvious differences in the song structures are well described by the meters measured in [`warbleR`](https://cran.r-project.org/package=warbleR). Likewise, it also confirms that we can detect biologically relevant differences from sound files that have undergone _mp3_ compression and conversion back to _wav_ format (see also <a href='#References'> <sup>[2]</sup></a>). Importantly, if this analysis were for publication we would have to run other filtering steps on the acoustic parameters generated by `spectro_analysis`, including removing any collinear variables.
## Coordinated singing analysis
`warbleR` contains functions to visualize and test whether or not singing bouts between individuals can be considered coordinated singing. The function `plot_coordination` plots signals as polygons to better visualize overlap between these signals.
The function `test_coordination` uses a Monte Carlo randomization test to determine whether or not the probability of finding overlapping songs in a singing event is less than or more than what you would expect by chance. `test_coordination` can be run simultaneously for many singing events, if all are contained in a single data frame (see `test_coordination` documentation for more information).
```{r, eval = FALSE, echo = TRUE}
data(sim_coor_sing)
str(sim_coor_sing)
```
The `sim_coor_sing` dataset contains three types of singing bouts:
```{r, eval = FALSE, echo = TRUE}
# save plots in a list
g <- plot_coordination(sim_coor_sing, it = "jpeg", img = FALSE, res = 300)
# print list of plots to graphics device
g
```
We can test for overlapping or alternating coordinated singing using the `less.than.chance` argument in `test_coordination`. Here we will test for alternating coordinated singing, since `less.than.chance` is set to `TRUE`.
```{r, eval = FALSE, echo = TRUE}
cs <- test_coordination(sim_coor_sing, iterations = 1000, less.than.chance = TRUE, cutoff = 10)
str(cs)
```
## Simulating songs
You can simulate songs using `warbleR`. Depending on your question, you can simulate songs with different numbers of subunits, subunit durations, numbers of harmonics, amplitudes, gaps between subunits, amplitude fading of subunits, among other options. Songs are simulated under Brownian motion frequency drift.
```{r, eval = FALSE, echo = TRUE}
# simulate a song with 3 tonal elements
ss <- sim_songs(n = 3, harms = 1)
# plot the simulated song
# seewave::spectro(ss)
# simulate a song with 3 harmonic elements of differing amplitude
ss <- sim_songs(n = 3, harms = 3)
# plot the simulated song
seewave::spectro(ss)
```
This vignette concludes the `warbleR` vignette series. After running the code in this third vignette, you should now have an idea of how to:
* measure frequency range of signals
* extract spectral entropy and frequency contours as time series
* manually fix frequency contours (as appropriate)
* compare and choose methods for acoustic (dis)similarity analysis
* use different methods for acoustic (dis)similarity analysis
* `spectro_analysis`- 29 parameters (across signals or song types)
* `freq_DTW` - dynamic time warping on frequency contours
* `cross_correlation`- spectrographic cross-correlation
* carry out steps for an analysis of geographic variation in vocalizations
* perform coordinated singing analysis
* simulate songs for other research questions
In these vignettes we presented examples of how the `warbleR` functions can be used in a streamlined workflow for flexible and rigorous bioacoustics analyses. The output of `warbleR` functions can be used in other packages for statistical analyses, such as machine learning.
## **Citation**
Please cite `warbleR` when you use the package:
Araya-Salas, M. and Smith-Vidaurre, G. (2017), warbleR: an R package to streamline analysis of animal acoustic signals. Methods Ecol Evol. 8, 184-191.
## **Reporting bugs**
Please report any bugs [here](https://github.com/maRce10/warbleR/issues).
<font size="5"><a name="References">References</a></font>
1. Araya-Salas, M. and G. Smith-Vidaurre. 2016. warbleR: an R package to streamline analysis of animal
acoustic signals. _Methods in Ecology and Evolution_. doi: 10.1111/2041-210X.12624
|
/scratch/gouwar.j/cran-all/cranData/warbleR/inst/doc/warbleR_workflow_03.Rmd
|
# if(getRversion() < "3.3.0")
# {
# stop("Your version of R is too old. This package requires R-3.3.0 or newer on Windows.")
# }
#
# if (Sys.getenv("LIB_FFTW") == "")
# {
# if (!file.exists("../windows/include/fftw3.h") ||
# !file.exists(paste0("../windows/lib/", .Platform$r_arch, "/fftw3.a")))
# {
# download.file("https://github.com/wavx/rpkg-libs/raw/master/fftw3/fftw3.zip", "lib.zip", quiet = TRUE)
#
# dir.create("../windows", showWarnings = FALSE)
# unzip("lib.zip", exdir = "../windows", files = c("include/fftw3.h", paste0("lib/", .Platform$r_arch, "/libfftw3.a")))
# unlink("lib.zip")
# }
# }
#
# if (!file.exists("../windows/include/soxr-lsr.h") ||
# !file.exists(paste0("../windows/lib/", .Platform$r_arch, "/libsoxr-lsr.a")))
# {
# download.file("https://github.com/wavx/rpkg-libs/raw/master/libsoxr-lsr/libsoxr-lsr.zip", "lib.zip", quiet = TRUE)
#
# dir.create("../windows", showWarnings = FALSE)
# unzip(
# "lib.zip",
# exdir = "../windows",
# files = c("include/soxr.h", "include/soxr-lsr.h",
# paste0("lib/", .Platform$r_arch, c("/libsoxr.a", "/libsoxr-lsr.a"))
# )
# )
# unlink("lib.zip")
# }
#
|
/scratch/gouwar.j/cran-all/cranData/warbleR/tools/winlibs.R
|
---
title: <font size="7"><b>Introduction to warbleR</b></font>
pagetitle: Introduction to warbleR
author:
- <a href="https://marce10.github.io">Marcelo Araya-Salas, PhD</a> & <a href="https://smith-vidaurre.com/">Grace Smith-Vidaurre</a>
date: "`r Sys.Date()`"
output:
rmarkdown::html_document:
self_contained: yes
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: true
vignette: >
%\VignetteIndexEntry{1. Introduction to warbleR}
%\usepackage[utf8]{inputenc}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
<!-- <script> -->
<!-- $(document).ready(function() { -->
<!-- $head = $('#header'); -->
<!-- $head.prepend('<img src=\"logo.png\"/>') -->
<!-- }); -->
<!-- </script> -->
<!-- -->
```{css, echo = FALSE}
div#header h1.title, div#header h3.subtitle, div#header h4.author, div#header h4.date {
text-align: center
}
```
<img src="warbleR_sticker.png" alt="warbleR logo" align="right" width="25%" height="25%">
The [warbleR](https://cran.r-project.org/package=warbleR) package is intended to facilitate the analysis of the structure of the animal acoustic signals in R. Users can enter their own data into a workflow that facilitates spectrographic visualization and measurement of acoustic parameters **warbleR** makes use of the fundamental sound analysis tools of the **seewave** package, and offers new tools for acoustic structure analysis. These tools are available for batch analysis of acoustic signals.
<font size = "3">The main features of the package are:
- The use of loops to apply tasks through acoustic signals referenced in a selection table
- The production of image files with spectrograms that let users organize data and verify acoustic analyzes </font>
<center><img src = "loop_warbleR_images_optim.gif" alt = "warbleR image loop" width = "500"></center>
The package offers functions for:
- Browse and download recordings of [Xeno-Canto](https://www.xeno-canto.org/)
- Explore, organize and manipulate multiple sound files
- Detect signals automatically (in frequency and time)
- Create spectrograms of complete recordings or individual signals
- Run different measures of acoustic signal structure
- Evaluate the performance of measurement methods
- Catalog signals
- Characterize different structural levels in acoustic signals
- Statistical analysis of duet coordination
- Consolidate databases and annotation tables
Most of the functions allow the parallelization of tasks, which distributes the tasks among several cores to improve computational efficiency. Tools to evaluate the performance of the analysis at each step are also available. All these tools are provided in a standardized workflow for the analysis of the signal structure, making them accessible to a wide range of users, including those without much knowledge of R. **warbleR** is a young package (officially published in 2017) currently in a maturation stage.
## Selection tables
These objects are created with the `selection_table()` function. The function takes data frames containing selection data (name of the sound file, selection, start, end ...), verifies if the information is consistent (see the function `checksels()` for details) and saves the 'diagnostic' metadata as an attribute. The selection tables are basically data frames in which the information contained has been corroborated so it can be read by other **warbleR** functions. The selection tables must contain (at least) the following columns:
1. sound files (sound.files)
1. selection (select)
1. start
1. end
The sample data "lbh_selec_table" contains these columns:
```{r extn_sel_2, echo = FALSE, message = FALSE}
# load packages
library(warbleR)
library(knitr)
cf <- read.csv("function_descrip_table.csv", stringsAsFactors = FALSE)
data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4"))
writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"))
writeWave(Phae.long3, file.path(tempdir(), "Phae.long3.wav"))
writeWave(Phae.long4, file.path(tempdir(), "Phae.long4.wav"))
warbleR_options(wav.path = tempdir())
options(knitr.table.format = "html")
opts_chunk$set(comment = "")
opts_knit$set(root.dir = tempdir())
options(width = 100, max.print = 100)
```
```{r extn_sel_4.1, eval=FALSE}
data("lbh_selec_table")
lbh_selec_table
```
```{r extn_sel_4.2, echo=FALSE, eval = TRUE}
library(kableExtra)
kbl <- kable(lbh_selec_table, align = "c", row.names = F, format = "html")
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl <- scroll_box(kbl,
width = "740px",
box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
)
kbl
```
... and can be converted to the *selection_table* format like this (after saving the corresponding sound files):
```{r extn_sel_4.32, eval = FALSE}
data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4"))
writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"))
writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"))
writeWave(Phae.long3, file.path(tempdir(), "Phae.long3.wav"))
writeWave(Phae.long4, file.path(tempdir(), "Phae.long4.wav"))
# parametros globales
warbleR_options(wav.path = tempdir())
st <- selection_table(X = lbh_selec_table, pb = FALSE)
st
```
```{r, eval = TRUE, echo = FALSE}
st <- selection_table(X = lbh_selec_table, pb = FALSE)
```
```{r, eval = TRUE, echo = FALSE}
st
```
Note that the path to the sound files has been provided. This is necessary in order to verify that the data provided conforms to the characteristics of the audio files.
Selection tables have their own class in R:
```{r}
class(st)
```
### Extended selection tables
When the `extended = TRUE` argument the function generates an object of the *extended_selection_table* class that also contains a list of 'wave' objects corresponding to each of the selections in the data. Therefore, the function **transforms the selection table into self-contained objects** since the original sound files are no longer needed to perform most of the acoustic analysis in **warbleR**. This can greatly facilitate the storage and exchange of (bio)acoustic data. In addition, it also speeds up analysis, since it is not necessary to read the sound files every time the data is analyzed.
Now, as mentioned earlier, you need the `selection_table()` function to create an extended selection table. You must also set the argument `extended = TRUE` (otherwise, the class would be a selection table). The following code converts the sample data into an extended selection table:
```{r extn_sel_4.3, eval = FALSE}
# global parameters
warbleR_options(wav.path = tempdir())
ext_st <- selection_table(
X = lbh_selec_table, pb = FALSE,
extended = TRUE, confirm.extended = FALSE
)
```
```{r extn_sel_4.33, eval = TRUE, echo = FALSE}
ext_st <- selection_table(
X = lbh_selec_table, pb = FALSE,
extended = TRUE, confirm.extended = FALSE
)
```
And that is. Now the acoustic data and the selection data (as well as the additional metadata) are all together in a single R object.
### Handling extended selection tables
Several functions can be used to deal with objects of this class. You can test if the object belongs to the *extended_selection_table*:
```{r extn_sel_5}
is_extended_selection_table(ext_st)
```
You can subset the selection in the same way that any other data frame and it will still keep its attributes:
```{r extn_sel_6}
ext_st2 <- ext_st[1:2, ]
is_extended_selection_table(ext_st2)
```
There is also a generic version of `print()` for this class of objects:
```{r extn_sel_7}
## print
print(ext_st)
```
... which is equivalent to:
```{r extn_sel_7.1, eval=FALSE}
ext_st
```
```{r extn_sel_7/2, echo=FALSE}
print(ext_st)
```
You can also join them in rows. Here the original *extended_selection_table* is divided into 2 and bound again using `rbind()`:
```{r extn_sel_8, eval = FALSE}
ext_st3 <- ext_st[1:5, ]
ext_st4 <- ext_st[6:11, ]
ext_st5 <- rbind(ext_st3, ext_st4)
# print
ext_st5
```
```{r extn_sel_8.1, echo=FALSE}
ext_st3 <- ext_st[1:5, ]
ext_st4 <- ext_st[6:11, ]
ext_st5 <- rbind(ext_st3, ext_st4)
# print
print(ext_st5)
```
```{r extn_sel_8.2}
# igual q el original
all.equal(ext_st, ext_st5)
```
The 'wave' objects can be read individually using `read_wave()`, a wrapper for the `readWave()` function of **tuneR**, which can handle extended selection tables:
```{r extn_sel_8.21}
wv1 <- read_wave(X = ext_st, index = 3, from = 0, to = 0.37)
```
These are regular 'wave' objects:
```{r extn_sel_8.22, out.width= 750}
class(wv1)
wv1
spectro(wv1, wl = 150, grid = FALSE, scale = FALSE, ovlp = 90)
```
```{r extn_sel_8.23, out.width= 750}
par(mfrow = c(3, 2), mar = rep(0, 4))
for (i in 1:6) {
wv <- read_wave(X = ext_st, index = i, from = 0.05, to = 0.32)
spectro(wv,
wl = 150, grid = FALSE, scale = FALSE, axisX = FALSE,
axisY = FALSE, ovlp = 90
)
}
```
The `read_wave()` function requires the selection table, as well as the row index (i.e. the row number) to be able to read the 'wave' objects. It can also read a regular 'wave' file if the path is provided.
Note that other functions that modify data frames are likely to delete the attributes in which the 'wave' objects and metadata are stored. For example, the merge and the extended selection box will remove its attributes:
```{r extn_sel_8.24}
# create new data frame
Y <- data.frame(sound.files = ext_st$sound.files, site = "La Selva", lek = c(rep("SUR", 5), rep("CCL", 6)))
# combine
mrg_ext_st <- merge(ext_st, Y, by = "sound.files")
# check class
is_extended_selection_table(mrg_ext_st)
```
In this case, we can use the `fix_extended_selection_table()` function to transfer the attributes of the original extended selection table:
```{r extn_sel_8.25}
# fix est
mrg_ext_st <- fix_extended_selection_table(X = mrg_ext_st, Y = ext_st)
# check class
is_extended_selection_table(mrg_ext_st)
```
This works as long as some of the original sound files are retained and no other selections are added.
### Analysis using extended selection tables
These objects can be used as input for most **warbleR** functions. Here are some examples of **warbleR** functions using *extended_selection_table*:
#### Spectral parameters
```{r extn_sel_12.1, eval=FALSE}
# parametros espectrales
sp <- spectro_analysis(ext_st)
sp
```
```{r, extn_sel_12.2, echo= FALSE, eval = FALSE}
sp <- spectro_analysis(ext_st)
kbl <- kable(sp, align = "c", row.names = F, format = "html")
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl <- scroll_box(kbl,
width = "740px",
box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
)
kbl
```
#### Signal-to-noise ratio
```{r extn_sel_12.5, eval=FALSE}
snr <- sig2noise(ext_st, mar = 0.05)
snr
```
```{r, extn_sel_12.6, echo= FALSE, eval = FALSE}
snr <- sig2noise(ext_st, mar = 0.05)
kbl <- kable(snr, align = "c", row.names = F, format = "html")
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl <- scroll_box(kbl,
width = "740px",
box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
)
kbl
```
#### Dynamic time warping (DTW)
```{r extn_sel_12.7, eval=FALSE}
dtw.dist <- freq_DTW(ext_st, img = FALSE)
dtw.dist
```
```{r, extn_sel_12.8, echo= FALSE, eval = FALSE}
dtw.dist <- freq_DTW(ext_st, img = FALSE)
kbl <- kable(dtw.dist, align = "c", row.names = T, format = "html")
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl <- scroll_box(kbl,
width = "740px",
box_css = "border: 1px solid #ddd; padding: 1px; ", extra_css = NULL
)
kbl
```
### Performance
The use of *extended_selection_table* objects can improve performance (in our case, measured as time). Here we use **microbenchmark** to compare the performance of `sig2noise()` and **ggplot2** to plot the results. First, a selection table with 1000 selections is created simply by repeating the sample data frame several times and then is converted to an extended selection table:
```{r extn_sel_13, eval = FALSE}
# create long selection table
lng.selec.table <- do.call(rbind, replicate(10, lbh_selec_table,
simplify = FALSE
))
# relabels selec
lng.selec.table$selec <- 1:nrow(lng.selec.table)
# create extended selection table
lng_ext_st <- selection_table(
X = lng.selec.table, pb = FALSE,
extended = TRUE, confirm.extended = FALSE
)
# load packages
library(microbenchmark)
library(ggplot2)
# check performance
mbmrk.snr <- microbenchmark(extended = sig2noise(lng_ext_st,
mar = 0.05
), regular = sig2noise(lng.selec.table,
mar = 0.05
), times = 50)
autoplot(mbmrk.snr) + ggtitle("sig2noise")
```
<center> </center>
The function runs much faster in the extended selection tables. Performance gain is likely to improve when longer recordings and data sets are used (that is, to compensate for computing overhead).
### Sharing acoustic data
This new object class allows to share complete data sets, including acoustic data. For example, the **NatureSounds** package contains an extended selection table with long-billed hermit hummingbirds vocalizations from 10 different song types:
```{r}
data("Phae.long.est")
Phae.long.est
table(Phae.long.est$lek.song.type)
```
The ability to compress large data sets and the ease of performing analyzes that require a single R object can simplify the exchange of data and the reproducibility of bioacoustic analyzes.
## **warbleR** functions and the workflow of analysis in bioacoustics
Bioacoustic analyzes generally follow a specific processing sequence and analysis. This sequence can be represented schematically like this:
```{r, eval = FALSE, echo = FALSE}
library(warbleR)
wf <- ls("package:warbleR")
wf <- wf[-c(2, 7, 8, 10, 12, 16, 17, 19, 20, 23, 24, 28, 31, 32, 33, 38, 42, 43, 44, 47, 50, 53, 59, 64, 66, 68, 68, 72, 74, 80, 81, 85, 90, 93, 94, 96)]
df <- data.frame(funciones = wf, `Obtener-preparar grabaciones` = "", `Anotar` = "", `Medir` = "", `Revision` = "", `Inspeccion visual` = "", `Analisis estadistico` = "", `Otros` = "")
df2 <- edit(df)
df2$`organizar.anotaciones` <- ""
names(df2) <- names(df2)[c(1:3, 9, 4:8)]
df3 <- edit(df2)
df4 <- df3
df4[is.na(df4)] <- ""
df4 <- df4[df4$Obtener.preparar.grabaciones != "borrar", ]
names(df4) <- c("Funcion", "Obtener-preparar grabaciones", "Anotar", "Organizar anotaciones", "Medir estructura", "Verificar", "Inspeccion visual", "Analisis estadistico", "Otros")
rownames(df4) <- 1:nrow(df4)
df5 <- df4[order(df4$`Obtener-preparar grabaciones`, df4$Anotar, df4$`Organizar anotaciones`, df4$`Medir estructura`, df4$Verificar, df4$`Inspeccion visual`, df4$`Analisis estadistico`, df4$Otros, decreasing = TRUE), ]
df4 <- df4[c(5, 8, 18, 29, 34, 35, 37, 38, 39, 55, 56, 26, 1, 19, 40, 46, 4, 11, 16, 17, 24, 25, 32, 41, 45, 7, 12, 13, 14, 15, 23, 27, 30, 42, 47, 48, 57, 2, 3, 28, 44, 50, 51, 52, 58, 9, 10, 21, 22, 59, 6, 20, 31, 33, 36, 43, 49, 53, 54), ]
# write.csv(df4, "cuadro de funciones warbleR.csv", row.names = FALSE)
```
<img src="analysis-workflow.png" alt="analysis workflow">
We can group **warbleR** functions according to the bioacoustic analysis stages.
### Get and prepare recordings
The `query_xc()` function allows you to search and download sounds from the free access database [Xeno-Canto](https://www.xeno-canto.org/). You can also convert .mp3 files to .wav, change the sampling rate of the files and correct corrupt files, among other functions.
```{r, echo = FALSE, eval = TRUE}
library(kableExtra)
names(cf) <- gsub("\\.", " ", names(cf))
cf2 <- cf[cf$`Obtener preparar grabaciones` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Annotating sound
It is recommended to make annotations in other programs and then import them into R (for example in Raven and import them with the **Rraven** package). However, **warbleR** offers some functions to facilitate manual or automatic annotation of sound files, as well as the subsequent manipulation:
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$Anotar == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Organize annotations
The annotations (or selection tables) can be manipulated and refined with a variety of functions. Selection tables can also be converted into the compact format *extended selection tables*:
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$`Organizar anotaciones` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Measure acoustic signal structure
Most **warbleR** functions are dedicated to quantifying the structure of acoustic signals listed in selection tables using batch processing. For this, 4 main measurement methods are offered:
1. Spectrographic parameters
1. Cross correlation
1. Dynamic time warping (DTW)
1. Statistical descriptors of cepstral coefficients
Most functions gravitate around these methods, or variations of these methods:
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$`Medir estructura` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Verify annotations
Functions are provided to detect inconsistencies in the selection tables or modify selection tables. The package also offers several functions to generate spectrograms showing the annotations, which can be organized by annotation categories. This allows you to verify if the annotations match the previously defined categories, which is particularly useful if the annotations were automatically generated.
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$Verificar == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Visually inspection of annotations and measurements
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$`Inspeccion visual` == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
### Additional functions
Finally, **warbleR** offers functions to simplify the use of extended selection tables, organize large numbers of images with spectrograms and generate elaborated signal visualizations:
```{r, echo = FALSE, eval = TRUE}
cf2 <- cf[cf$`Analisis estadistico` == "x" | cf$Otros == "x", c("Function", "Description", "Works on", "Output")]
cf2$Function <- cell_spec(x = cf2$Function, link = paste0("https://marce10.github.io/warbleR/reference/", cf2$Function, ".html"))
kbl <- kable(cf2, align = "c", row.names = F, format = "html", escape = F)
kbl <- column_spec(kbl, 1, bold = TRUE)
kbl <- column_spec(kbl, 2:4, italic = TRUE)
kbl <- kable_styling(kbl, bootstrap_options = "striped", font_size = 14)
kbl
```
---
## References
1. Araya-Salas M, G Smith-Vidaurre & M Webster. 2017. Assessing the effect of sound file compression and background noise on measures of acoustic signal structure. Bioacoustics 4622, 1-17
1. Araya-Salas M, Smith-Vidaurre G (2017) warbleR: An R package to streamline analysis of animal acoustic signals. Methods Ecol Evol 8:184-191.
---
<font size="4">Session information</font>
```{r session info, echo=F}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/warbleR/vignettes/Intro_to_warbleR.Rmd
|
---
title: <font size="7"><b>warbleR: Import sound files and select signals</b></font>
pagetitle: Import sound files and select signals
author:
- <a href="https://marce10.github.io">Marcelo Araya-Salas, PhD</a> & <a href="https://smith-vidaurre.com/">Grace Smith-Vidaurre</a>
date: "`r Sys.Date()`"
output:
rmarkdown::html_document:
self_contained: yes
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{2. Import sound files and select signals}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
<!-- <script> -->
<!-- $(document).ready(function() { -->
<!-- $head = $('#header'); -->
<!-- $head.prepend('<img src=\"logo.png\"/>') -->
<!-- }); -->
<!-- </script> -->
<!-- -->
```{css, echo = FALSE}
div#header h1.title, div#header h3.subtitle, div#header h4.author, div#header h4.date {
text-align: center
}
```
## Bioacoustics in R with `warbleR`
<img src="warbleR_sticker.png" alt="warbleR logo" align="right" width="25%" height="25%">
Bioacoustics research encompasses a wide range of questions, study systems and methods, including the software used for analyses. The `warbleR` and `Rraven` packages leverage the flexibility of the `R` environment to offer a broad and accessible bioinformatics tool set. These packages fundamentally rely upon two types of data to perform bioacoustics analyses in R:
1. **Sound files:** Recordings in _wav_ or _mp3_ format, either from your own research or open-access databases like _xeno-canto_
2. **Selection tables:** Selection tables contain the temporal coordinates (start and end points) of selected acoustic signals within recordings
### Package repositories
These packages are both available on _CRAN_: [`warbleR`](https://cran.r-project.org/package=warbleR), [`Rraven`](https://cran.r-project.org/package=Rraven), as well as on _GitHub_: [`warbleR`](https://github.com/maRce10/warbleR), [`Rraven`](https://github.com/maRce10/Rraven). The GitHub repository will always contain the latest functions and updates. You can also check out an article in _Methods in Ecology and Evolution_ documenting the `warbleR` package <a href='#References'><sup>[1]</sup></a>.
We welcome all users to provide feedback, contribute updates or new functions and report bugs to warbleR's GitHub repository.
Please note that `warbleR` and `Rraven` use functions from the [`seewave`](https://cran.r-project.org/package=seewave), [`monitoR`](https://cran.r-project.org/package=monitoR), [`tuneR`](https://cran.r-project.org/package=tuneR) and [`dtw`](https://cran.r-project.org/package=dtw) packages internally. `warbleR` and `Rraven` have been designed to make bioacoustics analyses more accessible to `R` users, and such analyses would not be possible without the tools provided by the packages above. These packages should be given credit when using `warbleR` and `Rraven` by including citations in publications as appropriate (e.g. `citation("seewave")`).
### Parallel processing in `warbleR`
Parallel processing, or using multiple cores on your machine, can greatly speed up analyses. All iterative `warbleR` functions now have parallel processing for Linux, Mac and Windows operating systems. These functions also contain progress bars to visualize progress during normal or parallel processing. See <a href='#References'><sup>[1]</sup></a> for more details about improved running time using parallel processing.
## **Vignette introduction**
Below we present a case study of microgeographic vocal variation in long-billed hermit hummingbirds, _Phaethornis longirostris_. Variation at small geographic scales has been already described in this species <a href='#References'><sup>[2]</sup></a>. Our goal is to search for visible differences in song structure within a site, and then determine whether underlying differences in acoustic parameters are representative of spectrographic distinctiveness. In this vignette, we will demonstrate how to:
1. Prepare for bioacoustics analyses by downloading `warbleR` and `Rraven`
2. Use `Rraven` to import _Raven_ selection tables for your own recordings
3. Obtain recordings from the open-access database [_xeno-canto_](https://www.xeno-canto.org/)
4. Select signals using `warbleR` functions
This vignette can be run without an advanced understanding of `R`, as long as you know how to run code in your console. However, knowing more about basic `R` coding would be very helpful to modify the code for your research questions.
For more details about function arguments, input or output, read the documentation for the function in question (e.g. `?query_xc`).
## **Prepare for analyses**
### Install and load packages
First, we need to install and load `warbleR` and `Rraven`. You will need an `R` version \xf2\n 3.2.1 and `seewave` version \xf2\n 2.0.1. Also, users using `UNIX` machines (Linux or Mac operating systems), may need to install `fftw3`, `pkg-config` and `libsndfile` on their machines prior to installing `warbleR`. These external packages will need to be installed through a `UNIX` terminal. Installing these packages lies outside the scope of this vignette, but you can find more information on _Google_.
```{r, echo = FALSE, message = FALSE}
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste("package:", names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = TRUE))
# load packages
X <- c("warbleR", "knitr")
invisible(lapply(X, library, character.only = TRUE))
# library(kableExtra)
options(knitr.table.format = "html")
# opts_chunk$set(comment = "")
opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
# from https://stackoverflow.com/questions/28961431/computationally-heavy-r-vignettes, so that vignettes will be built upon installation, but not executed during R CMD check (which is contributing to the /doc being too large)
is_check <- ("CheckExEnv" %in% search()) || any(c(
"_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_"
) %in% names(Sys.getenv()))
knitr::opts_chunk$set(eval = !is_check, comment = "")
# for vignette checking and image file output
# setwd("~/Desktop/R/warbleR_example2/")
# website to fix gifs
# https://ezgif.com/optimize
vgn.path <- getwd()
# read data example for Rraven code
sels <- read.csv("Raven_sels.csv", stringsAsFactors = FALSE)
```
```{r, echo = TRUE, eval=FALSE}
### Install packages from CRAN
# Note that if you install from CRAN, then don't run the code to install from GitHub below, and vice versa
install.packages("warbleR")
install.packages("Rraven")
### Alternatively, install warbleR and Rraven from GitHub repositories, which contain the latest updates
# Run this ONLY if devtools is not already installed
install.packages("devtools")
# Load devtools to access the install_github function
library(devtools)
# Install packages from GitHub
# install_github("maRce10/warbleR")
# install_github("maRce10/Rraven")
# install_github("maRce10/NatureSounds")
# Load warbleR and Rraven into your global environment
X <- c("warbleR", "Rraven")
invisible(lapply(X, library, character.only = TRUE))
```
This vignette series will not always include all available `warbleR` functions, as existing functions are updated and new functions are added. To see all functions available in this package:
```{r, echo = TRUE, eval=FALSE}
# The package must be loaded in your working environment
ls("package:warbleR")
```
### Make a new directory and set your working directory
```{r, echo = TRUE, eval=FALSE}
# Create a new directory and set your working directory (assuming that you are in your /home/username directory)
dir.create(file.path(getwd(), "warbleR_example"))
setwd(file.path(getwd(), "warbleR_example"))
# Check your location
getwd()
```
## **Import selection tables**
`Rraven` is an interface between _Raven_ and `R` that allows you to import selection tables for your own recordings. This is very useful if you prefer to select signals in recordings outside of `R`. Once you have selection tables imported into `R` and the corresponding sound files in your working directory, you can move on to making spectrograms or performing analyses (see the next vignette in this series).
The sound files and selection tables loaded here correspond to male long-billed hermit hummingbird songs recorded at La Selva Biological Station in Costa Rica. Later, we will add to this data set by searching for more recordings on the _xeno-canto_ open-access database.
Check out the `Rraven` package documentation for more functions and information (although you will need _Raven_ or _Syrinx_ installed on your computer for some functions).
```{r, eval=FALSE, echo=TRUE}
# Load Raven example selection tables
data("selection_files")
# Write out Raven example selection tables as physical files
out <- lapply(1:2, function(x) {
writeLines(selection_files[[x]], con = names(selection_files)[x])
})
# Write example sound files out as physical .wav files
data(list = c("Phae.long1", "Phae.long2"))
writeWave(Phae.long1, "Phae.long1.wav")
writeWave(Phae.long2, "Phae.long2.wav")
```
```{r, eval=FALSE, echo=TRUE}
# Import selections
sels <- imp_raven(all.data = FALSE, freq.cols = FALSE, warbler.format = TRUE)
str(sels)
# Write out the imported selections as a .csv for later use
write.csv(sels, "Raven_sels.csv", row.names = FALSE)
```
### Make your data frame into an object of class `selection table`
Downstream `warbleR` functions require selection tables in order to run correctly. Use the function `selection_table` to convert your data frame into an object of class `selection_table`. In future versions of `warbleR`, all functions will require selection table objects of class `selection_table`.
```{r, echo=TRUE, eval=FALSE}
sels <- selection_table(X = sels)
str(sels)
class(sels)
```
## **Obtain metadata and recordings from [xeno-canto](https://www.xeno-canto.org/)**
The open-access [xeno-canto](https://www.xeno-canto.org/) database is an excellent source of sound files across avian species. You can query this database by a species or genus of interest. The function `query_xc` has two types of output:
1. **Metadata of recordings:** geographic coordinates, recording quality, recordist, type of signal, etc.
2. **Sound files:** Sound files in _mp3_ format are returned if the argument `download` is set to `TRUE`.
We recommend downloading metadata first from _xeno-canto_, as this data can be filtered in R to more efficiently download recordings (e.g. only those relevant to your question).
Here, we will query the _xeno-canto_ database to download more _Phaethornis longirostris_ sound files for our question of how male songs vary at a microgeographic scale.
```{r, eval=FALSE}
# Query xeno-canto for all Phaethornis recordings (e.g., by genus)
Phae <- query_xc(qword = "Phaethornis", download = FALSE)
# Check out the structure of resulting the data frame
str(Phae)
```
```{r, eval = TRUE, echo = FALSE, message = FALSE}
# Phae <- query_xc(qword = "Phaethornis", download = FALSE)
# write.csv(Phae, file = "~/Dropbox/warbleR/vignettes/Phae.XC.csv", row.names = FALSE)
Phae <- read.csv(file.path(vgn.path, "Phae.XC.csv"), stringsAsFactors = FALSE)
# Check out the structure of resulting the data frame
str(Phae)
```
```{r, eval=FALSE}
# Query xeno-canto for all Phaethornis longirostris recordings
Phae.lon <- query_xc(qword = "Phaethornis longirostris", download = FALSE)
# Check out the structure of resulting the data frame
str(Phae.lon)
```
```{r, eval = TRUE, echo = FALSE, message = FALSE}
# Phae.lon <- query_xc(qword = "Phaethornis longirostris", download = FALSE)
# write.csv(Phae.lon, file = "~/Dropbox/warbleR/vignettes/Phae.lon.XC.csv", row.names = FALSE)
Phae.lon <- read.csv(file.path(vgn.path, "Phae.lon.XC.csv"), stringsAsFactors = FALSE)
# Check out the structure of resulting the data frame
str(Phae.lon)
```
You can then use the function `map_xc` to visualize the geographic spread of the queried recordings. `map_xc` will create an image file of a map per species in your current directory if `img = TRUE`. If `img = FALSE`, maps will be displayed in the graphics device.
```{r, eval=FALSE}
# Image type default is jpeg, but tiff files have better resolution
# When the data frame contains multiple species, this will yield one map per species
map_xc(X = Phae, img = TRUE, it = "tiff") # all species in the genus
map_xc(X = Phae.lon, img = FALSE) # a single species
```
```{r, eval=TRUE, echo=FALSE, message=FALSE}
map_xc(X = Phae.lon, img = FALSE)
```
### Filter [xeno-canto](https://www.xeno-canto.org/) recordings by quality, signal type and locality
In most cases, you will need to filter the _xeno-canto_ metadata by type of signal you want to analyze. When you subset the metadata, you can input the filtered metadata back into `query_xc` to download only the selected recordings. There are many ways to filter data in R, and the example below can be modified to fit your own data.
Here, before downloading the sound files themselves from _xeno-canto_, we want to ensure that we select high quality sound files that contain songs (rather than other acoustic signal types) and were also recorded at La Selva Biological Station in Costa Rica.
```{r, eval=TRUE, echo=TRUE}
# How many recordings are available for Phaethornis longirostris?
nrow(Phae.lon)
# How many signal types exist in the xeno-canto metadata?
unique(Phae.lon$Vocalization_type)
# How many recordings per signal type?
table(Phae.lon$Vocalization_type)
```
```{r, eval=TRUE, echo=TRUE}
# Filter the metadata to select the signals we want to retain
# First by quality
Phae.lon <- Phae.lon[Phae.lon$Quality == "A", ]
nrow(Phae.lon)
# Then by signal type
Phae.lon.song <- Phae.lon[grep("song", Phae.lon$Vocalization_type, ignore.case = TRUE), ]
nrow(Phae.lon.song)
# Finally by locality
Phae.lon.LS <- Phae.lon.song[grep("La Selva Biological Station, Sarapiqui, Heredia", Phae.lon.song$Locality, ignore.case = FALSE), ]
# Check resulting data frame, 6 recordings remain
str(Phae.lon.LS)
```
We can check if the location coordinates make sense (all recordings should be from a single place in Costa Rica) by making a map of these recordings using `map_xc`.
```{r, eval=TRUE, echo=TRUE}
# map in the RStudio graphics device (img = FALSE)
map_xc(Phae.lon.LS, img = FALSE)
```
```{r, eval=FALSE, echo=FALSE}
# Not working as of 01 Aug 2017
# This copies the selected sound files to a dropbox folder so they can be shared
# do not show this code
fn <- with(Phae.lon.LS, paste(paste(Genus, Specific_epithet, Recording_ID, sep = "-"), ".wav", sep = " "))
file.copy(from = file.path("/home/m/Documents/Biblioteca de cantos/Trochilidae/XC/wavs", fn), to = file.path("/home/m/Dropbox/Projects/warbleR package/vignette files", fn), overwrite = TRUE)
wlist <- lapply(fn, function(x) downsample(readWave(file.path("/home/m/Dropbox/Projects/warbleR package/vignette files", x)), samp.rate = 22500))
names(wlist) <- fn
saveRDS(wlist, file = "/home/m/Dropbox/Sharing/warbleR/recs.RDS")
```
Once you're sure you want the recordings, use `query_xc` to download the files. Also, save the metadata as a _.csv_ file.
```{r, eval=FALSE}
# Download sound files
query_xc(X = Phae.lon.LS)
# Save the metadata object as a .csv file
write.csv(Phae.lon.LS, "Phae_lon.LS.csv", row.names = FALSE)
```
### Convert [xeno-canto](https://www.xeno-canto.org/) _mp3_ recordings to _wav_ format
[xeno-canto](https://www.xeno-canto.org/) maintains recordings in _mp3_ format due to file size restrictions. However, we require _wav_ format for all downstream analyses. Compression from _wav_ to _mp3_ and back involves information losses, but recordings that have undergone this transformation have been successfully used in research <a href='#References'> <sup>[3]</sup></a>.
To convert _mp3_ to _wav_, we can use the warbleR function `mp32wav`, which relies on a underlying function from the [`tuneR`](https://cran.r-project.org/package=tuneR) package. This function does not always work (and it remains unclear as to why!). This bug should be fixed in future versions of `tuneR`. If RStudio aborts when running `mp32wav`, use an _mp3_ to _wav_ converter online, or download the open source software `Audacity` (available for Mac, Linux and Windows users).
After _mp3_ files have been converted, we need to check that the _wav_ files are not corrupted and can be read into RStudio (some _wav_ files can't be read due to format or permission issues).
```{r, eval=FALSE}
# Always check you're in the right directory beforehand
# getwd()
# here we are downsampling the original sampling rate of 44.1 kHz to speed up downstream analyses in the vignette series
mp32wav(samp.rate = 22.05)
# Use checkwavs to see if wav files can be read
check_wavs()
```
```{r, eval=FALSE, echo=FALSE}
# Not working 01 Aug 2017
### If you were unable to convert _mp3_ to _wav_ format:
# + download the file in [this link](https://www.dropbox.com/s/htpbxbdw8s4i23k/recs.RDS?dl=0) and put it in your working directory
# + then run the following code:
# recs <- readRDS(file = "recs.RDS")
#
# for(i in 1:length(recs))
# writeWave(recs[[i]], filename = names(recs)[i])
```
## **A note on combining data from different sources**
We now have _.wav_ files for existing recordings ( _Phae.long1.wav_ through _Phae.long4.wav_, representing recordings made in the field) as well as 6 recordings downloaded from _xeno-canto_. The existing Phae.long*.wav recordings have associated selection tables that were made in _Raven_, but the _xeno-canto_ have no selection tables, as we have not parsed these sound files to select signals within them.
Depending on your question(s), you can combine your own sound files and those from `xeno-canto` into a single data set (after ground-truthing). This is made possible by the fact that `warbleR` functions will read in all sound files present in your working directory.
For the main case study in this vignette, we will move forwards with only the `xeno-canto` sound files. We will use the example sound files when demonstrating `warbleR` functions that are not mandatory for the case study but may be useful for your own workflow (e.g. `consolidate` below).
To continue the workflow, remove all example _wav_ files from your working directory
```{r, echo=TRUE, eval=FALSE, message=FALSE}
# Make sure you are in the right working directory
# Note that all the example sound files begin with the pattern "Phae.long"
wavs <- list.files(pattern = "wav$")
wavs
rm <- wavs[grep("Phae.long", wavs)]
file.remove(rm)
# Check that the right wav files were removed
# Only xeno-cant wav files should remain
list.files(pattern = "wav$")
```
### Consolidate sound files across various directories
Since `warbleR` handles sound files in working directories, it's good practice to keep sound files associated with the same project in a single directory. If you're someone who likes to make a new directory for every batch of recordings or new analysis associated with the same project, you may find the `consolidate` function useful.
In case you have your own recordings in _wav_ format and have skipped previous sections, you must specify the location of the sound files you will use prior to running downstream functions by setting your working directory again.
```{r, echo=TRUE, eval=FALSE, message=FALSE}
# For this example, set your working directory to an empty temporary directory
setwd(tempdir())
# Here we will simulate the problem of having files scattered in multiple directories
# Load .wav file examples from the NatureSounds package
data(list = c("Phae.long1", "Phae.long2", "Phae.long3"))
# Create first folder inside the temporary directory and write new .wav files inside this new folder
dir.create("folder1")
writeWave(Phae.long1, file.path("folder1", "Phae_long1.wav"))
writeWave(Phae.long2, file.path("folder1", "Phae_long2.wav"))
# Create second folder inside the temporary directory and write new .wav files inside this second new folder
dir.create("folder2")
writeWave(Phae.long3, file.path("folder2", "Phae_long3.wav"))
# Consolidate the scattered files into a single folder, and make a .csv file that contains metadata (location, old and new names in the case that files were renamed)
invisible(consolidate(path = tempdir(), save.csv = TRUE))
list.files(path = "./consolidated_folder")
# set your working directory back to "/home/user/warbleR_example" for the rest of the vignette, or to whatever working directory you were using originally
```
## **Make long spectrograms of whole recordings**
`full_spectrograms` produces image files with spectrograms of whole sound files split into multiple rows. It is a useful tool for filtering by visual inspection.
`full_spectrograms` allows you to visually inspect the quality of the recording (e.g. amount of background noise) or the type, number, and completeness of the vocalizations of interest. You can discard the image files and recordings that you no longer want to analyze.
First, adjust the function arguments as needed. We can work on a subset of the recordings by specifying their names with the `flist` argument.
```{r, eval=FALSE}
# Create a vector of all the recordings in the directory
wavs <- list.files(pattern = "wav$")
# Print this object to see all sound files
# 6 sound files from xeno-canto
wavs
# Select a subset of recordings to explore full_spectrograms() arguments
# Based on the list of wav files we created above
sub <- wavs[c(1, 5)]
# How long are these files? this will determine number of pages returned by full_spectrograms
duration_wavs(sub)
# ovlp = 10 to speed up function
# tiff image files are better quality and are faster to produce
full_spectrograms(flist = sub, ovlp = 10, it = "tiff")
# We can zoom in on the frequency axis by changing flim,
# the number of seconds per row, and number of rows
full_spectrograms(flist = sub, flim = c(2, 10), sxrow = 6, rows = 15, ovlp = 10, it = "tiff")
```
Once satisfied with the argument settings we can make long spectrograms for all the sound files.
```{r, eval=FALSE}
# Make long spectrograms for the xeno-canto sound files
full_spectrograms(flim = c(2, 10), ovlp = 10, sxrow = 6, rows = 15, it = "jpeg", flist = fl)
# Concatenate full_spectrograms image files into a single PDF per recording
# full_spectrograms images must be jpegs to do this
full_spectrograms2pdf(keep.img = FALSE, overwrite = TRUE)
```
The pdf image files (in the working directory) for the _xeno-canto_ recordings should look like this:
```{r, eval=FALSE, echo=FALSE}
# make all page-size images 700 pxls width
```
<!-- <center> </center> -->
The sound file name and page number are placed in the top right corner. The dimensions of the image are made to letter paper size for printing and subsequent visual inspection.
Recording _154123_ has a lot of background noise. Delete the _wav_ file for this recording to remove it from subsequent analyses.
```{r, eval=FALSE, echo=FALSE}
### Remove silence in sound files
# The function below removes silent segments of sound files. This can help reduce file size, which can speed up functions.
# giving error: Error in file.copy(from = wv, to = file.path(getwd(), "removed_silence_files", :
# more 'from' files than 'to' files
# here we will produce spectrograms of the silent gaps that were removed
# perform this on only the longer xeno-canto recordings
remove_silence(flist = wavs, min.sil.dur = 0.2, img = TRUE, it = "jpeg", flim = c(0, 12))
```
## **Select signals in _warbleR_**
_warbleR_ provides a function for selecting acoustic signals within recordings. `auto_detec` automatically detects the start and end of signals in sound files based on amplitude, duration, and frequency range attributes.
Both functions are fastest with shorter recordings, but there are ways to deal with larger recordings (an hour long or more). In this section we have expanded on some important function arguments, but check out the function documentation for more information.
### Automatically detect signals with `auto_detec`
`auto_detec` has 2 types of output:**
+ data frame with recording name, selection, start and end times. The last two are temporal coordinates that will be passed on to downstream functions to measure acoustic parameters
+ a spectrogram per recording, with red dotted lines marking the start and end of each detected signal, saved in your working directory
Check out the `auto_detec` documentation for more information. The argument `threshold` is one of the most important detection parameters, as well as information about signal frequency range and duration. _Phaethornis longirostris_ songs have frequencies between 2 and 10 kHz and durations between 0.05 and 0.5 s.
If you need to detect all or most of the signals within the recording, play around with different arguments to increase detection accuracy. Since you may need to do several rounds of optimization, we recommend using subsets of your recordings to speed up the process. If the species you study produces stereotyped signals, like _Phaethornis longirostris_, just a few signals are needed per recording, and a low-accuracy detection could yield enough selections.
`auto_detec` does not replace visual inspection of selected signals. Ensure that you set aside the time to inspect all selected signals for accuracy. You will often need to run detection functions several times, and in the process you will get to know your signals better (if you don't already).
```{r, eval=FALSE, echo=TRUE}
# Select a subset of sound files
# Reinitialize the wav object
wavs <- list.files(pattern = ".wav$", ignore.case = TRUE)
# Set a seed so we all have the same results
set.seed(1)
sub <- wavs[sample(1:length(wavs), 3)]
# Run auto_detec() on subset of recordings
# The data frame object output is printed to the console, we are not saving this in an object yet, since we are just playing around with argument settings
# you can run this in parallel to speed up computation time
auto_detec(flist = sub, bp = c(1, 10), threshold = 10, mindur = 0.05, maxdur = 0.5, envt = "abs", ssmooth = 300, ls = TRUE, res = 100, flim = c(1, 12), wl = 300, set = TRUE, sxrow = 6, rows = 15, redo = FALSE)
```
Check out the image files in your working directory. Note that some songs were correctly detected but other undesired sounds were also detected. In most cases, the undesired selections have a shorter duration than our target signals.
We won't save the `auto_detec` ouput in an object until we're satisfied with the detection. To improve our detection we should play around with argument values. Also note that the image files produced by `auto_detec` contain the values used for the different arguments, which can help you better compare between runs. Below are some detection parameters that work well for these _Phaethornis longirostris _ recordings:
```{r, eval=FALSE, echo = TRUE}
auto_detec(flist = sub, bp = c(2, 10), threshold = 20, mindur = 0.09, maxdur = 0.22, envt = "abs", ssmooth = 900, ls = TRUE, res = 100, flim = c(1, 12), wl = 300, set = TRUE, sxrow = 6, rows = 15, redo = TRUE, it = "tiff", img = TRUE, smadj = "end")
```
This seems to provide a good detection for most recordings (recording ID 154161):
<center> </center>
Once we're satisfied with the detection, we can run the `auto_detec` on all the recordings, removing the argument `flist` (so `auto_detec` runs over all _wav_ files in the working directory). We will also save the temporal output in an object.
```{r, eval=FALSE, echo=TRUE}
Phae.ad <- auto_detec(bp = c(2, 10), threshold = 20, mindur = 0.09, maxdur = 0.22, envt = "abs", ssmooth = 900, ls = TRUE, res = 100, flim = c(2, 10), wl = 300, set = TRUE, sxrow = 6, rows = 15, redo = TRUE, it = "tiff", img = TRUE, smadj = "end")
```
Let's look at the number of selections per sound file:
```{r, eval=FALSE, echo=TRUE}
table(Phae.ad$sound.files)
```
### Use SNR to filter automatically selected signals
Signal-to-noise ratio (SNR) can be a useful filter for automated signal detection. When background noise is detected as a signal it will have a low SNR, and this characteristic can be used to remove background noise from the `auto_detec` selection table. SNR = 1 means the signal and background noise have the same amplitude, so signals with SNR <= 1 are poor quality. SNR calculations can also be used for different purposes throughout your analysis workflow.
#### Optimize SNR measurements
`snr_spectrograms` is a function in the family of spectrogram creators that allows you to pick a margin for measuring noise. These margins are very important for calculating SNR, especially when working with signals separated by short gaps (e.g. duets).
```{r, eval=FALSE}
# A margin that's too large causes other signals to be included in the noise measurement
# Re-initialize X as needed, for either auto_detec output
# Try this with 10% of the selections first
# Set a seed first, so we all have the same results
set.seed(5)
X <- Phae.ad[sample(1:nrow(Phae.ad), (nrow(Phae.ad) * 0.05)), ]
nrow(X)
snr_spectrograms(X = X, flim = c(2, 10), snrmar = 0.5, mar = 0.7, it = "jpeg")
```
Check out the image files in your working directory. This margin overlaps neighboring signals, so a smaller margin would be better.
```{r, eval=FALSE}
# This smaller margin is better
snr_spectrograms(X = X, flim = c(2, 10), snrmar = 0.04, mar = 0.7, it = "jpeg")
```
<!-- <center>  </center> -->
#### Calculate SNR for automatically selected signals
Once we've picked an SNR margin we can move forward with the SNR calculation. We will measure SNR on every other selection to speed up the process.
```{r, eval=FALSE}
Phae.snr <- sig2noise(X = Phae.ad[seq(1, nrow(Phae.ad), 2), ], mar = 0.04)
```
As we just need a few songs to characterize individuals (here sound files are equivalent to different individuals), we can choose selections with the highest SNR per sound file. In this example, we will choose 5 selections per recording with the highest SNRs.
```{r, eval=FALSE}
Phae.hisnr <- Phae.snr[ave(-Phae.snr$SNR, Phae.snr$sound.files, FUN = rank) <= 5, ]
# save the selections as a physical file
write.csv(Phae.hisnr, "Phae_hisnr.csv", row.names = FALSE)
# Double check the number of selection per sound files
# Only the xeno-canto sound files will have 5 selections, the other sound files started off with less than 5 selections
table(Phae.hisnr$sound.files)
```
```{r, eval=FALSE, echo=FALSE}
Phae.hisnr <- read.csv("Phae_hisnr.csv", header = TRUE)
table(Phae.hisnr$sound.files)
```
## **Next vignette: Visual inspection and signal classification**
Here we have given examples of how to begin the `warbleR` workflow. Note that there are many different ways to begin the workflow, depending on your question and source of data. After running the code in this first vignette, you should now have an idea of:
* the type of data used in _warbleR_ (sound files and selections)
* how to import _Raven_ selection tables for your own sound files
* how to obtain open-access _xeno-canto_ sound files
* how to create long spectrograms of recordings for visual inspection
* how to select signals within sound files in `warbleR`
- automatic selection
- filtering automatically selected signals using SNR
- manual selection
The next vignette will cover the second phase of the _warbleR_ workflow, which includes methods to visualize signals for quality control and classification.
## **Citation**
Please cite `warbleR` when you use the package:
Araya-Salas, M. and Smith-Vidaurre, G. (2017), warbleR: an R package to streamline analysis of animal acoustic signals. Methods Ecol Evol. 8, 184-191.
## **Reporting bugs**
Please report any bugs [here](https://github.com/maRce10/warbleR/issues).
## <font size="5"><a name="References">References</a></font>
1. Araya-Salas, M. and G. Smith-Vidaurre. 2016. warbleR: an R package to streamline analysis of animal
acoustic signals. _Methods in Ecology and Evolution_. doi: 10.1111/2041-210X.12624
2. Araya-Salas, M. and T. Wright. 2013. Open-ended song learning in a hummingbird. _Biology Letters_. 9 (5). doi: 10.1098/rsbl.2013.0625
3. Medina-Garcia, Angela, M. Araya-Salas, and T. Wright. 2015. Does vocal learning accelerate acoustic diversification? Evolution of contact calls in Neotropical parrots. _Journal of Evolutionary Biology_. doi: 10.1111/jeb.12694
|
/scratch/gouwar.j/cran-all/cranData/warbleR/vignettes/warbleR_workflow_01.Rmd
|
---
title: <font size="7"><b>"warbleR: Visual inspection and signal classification</b></font>
pagetitle: Visual inspection and signal classification
author:
- <a href="https://marce10.github.io">Marcelo Araya-Salas, PhD</a> & <a href="https://smith-vidaurre.com/">Grace Smith-Vidaurre</a>
date: "`r Sys.Date()`"
output:
rmarkdown::html_document:
self_contained: yes
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{3. Visual inspection and signal classification}
%\VignetteEngine{knitr::rmarkdown}
---
<!-- <center> <h1><b>Visual inspection and classification of signals</h1></b> -->
<!-- <center> <i><font size="4">Marcelo Araya-Salas and Grace Smith Vidaurre</font></i> </center> -->
<!-- <center> `r Sys.Date()` </center> -->
```{css, echo = FALSE}
div#header h1.title, div#header h3.subtitle, div#header h4.author, div#header h4.date {
text-align: center
}
```
## Bioacoustics in R with `warbleR`
<img src="warbleR_sticker.png" alt="warbleR logo" align="right" width="25%" height="25%">
Bioacoustics research encompasses a wide range of questions, study systems and methods, including the software used for analyses. The `warbleR` and `Rraven` packages leverage the flexibility of the `R` environment to offer a broad and accessible bioinformatics tool set. These packages fundamentally rely upon two types of data to begin bioacoustic analyses in R:
1. **Sound files:** Recordings in _wav_ or _mp3_ format, either from your own research or open-access databases like _xeno-canto_
2. **Selection tables:** Selection tables contain the temporal coordinates (start and end points) of selected acoustic signals within recordings
### Package repositories
These packages are both available on _CRAN_: [`warbleR`](https://cran.r-project.org/package=warbleR/), [`Rraven`](https://cran.r-project.org/package=Rraven), as well as on _GitHub_: [`warbleR`](https://github.com/maRce10/warbleR), [`Rraven`](https://github.com/maRce10/Rraven). The GitHub repository will always contain the latest functions and updates. You can also check out an article in _Methods in Ecology and Evolution_ documenting the `warbleR` package <a href='#References'><sup>[1]</sup></a>.
We welcome all users to provide feedback, contribute updates or new functions and report bugs to warbleR's GitHub repository.
Please note that `warbleR` and `Rraven` use functions from the [`seewave`](https://cran.r-project.org/package=seewave), [`monitoR`](https://cran.r-project.org/package=monitoR), [`tuneR`](https://cran.r-project.org/package=tuneR) and [`dtw`](https://cran.r-project.org/package=dtw) packages internally. `warbleR` and `Rraven` have been designed to make bioacoustics analyses more accessible to `R` users, and such analyses would not be possible without the tools provided by the packages above. These packages should be given credit when using `warbleR` and `Rraven` by including citations in publications as appropriate (e.g. `citation("seewave")`).
### Parallel processing in `warbleR`
Parallel processing, or using multiple cores on your machine, can greatly speed up analyses. All iterative `warbleR` functions now have parallel processing for Linux, Mac and Windows operating systems. These functions also contain progress bars to visualize progress during normal or parallel processing. See <a href='#References'><sup>[1]</sup></a> for more details about improved running time using parallel processing.
## **Vignette introduction**
In the previous vignette, we used the `Rraven` package to import _Raven_ selection tables for recordings in our working directory, added more recordings to the data set by downloading new sound files from the open-access `xeno-canto` database and reviewed methods of automated and manual signal selection in `warbleR`. Here we continue with the case study of microgeographic vocal variation in long-billed hermit hummingbirds, _Phaethornis longirostris_ <a href='#References'><sup>[2]</sup></a> by:
1. Performing quality control processing on selected signals, including visual inspection and tailoring temporal coordinates
2. Making lexicons for visual classification of signals
This vignette can be run without an advanced understanding of `R`, as long as you know how to run code in your console. However, knowing more about basic `R` coding would be very helpful to modify the code for your research questions.
For more details about function arguments, input or output, read the documentation for the function in question (e.g. `?catalog`).
```{r, echo = FALSE, message = FALSE}
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste("package:", names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = TRUE))
# load packages
X <- c("warbleR", "knitr")
invisible(lapply(X, library, character.only = TRUE))
# library(kableExtra)
options(knitr.table.format = "html")
# opts_chunk$set(comment = "")
# opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
# from https://stackoverflow.com/questions/28961431/computationally-heavy-r-vignettes, so that vignettes will be built upon installation, but not executed during R CMD check (which is contributing to the /doc being too large)
is_check <- ("CheckExEnv" %in% search()) || any(c(
"_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_"
) %in% names(Sys.getenv()))
knitr::opts_chunk$set(eval = !is_check, comment = "")
# for vignette checking and image file output
# setwd("~/Desktop/R/warbleR_example2/")
# website to fix gifs
# https://ezgif.com/optimize
```
## **Prepare for analyses**
```{r, eval=FALSE}
library(warbleR)
# set your working directory appropriately
# setwd("/path/to/working directory")
# run this if you have restarted RStudio between vignettes without saving your workspace (assuming that you are in your /home/username directory)
setwd(file.path(getwd(), "warbleR_example"))
# Check your location
getwd()
```
This vignette series will not always include all available `warbleR` functions, as existing functions are updated and new functions are added. To see all functions available in this package:
```{r, echo = TRUE, eval=FALSE}
# The package must be loaded in your working environment
ls("package:warbleR")
```
## **Quality control filtering of selections**
### Find overlapping selections
Overlapping selections can sometimes arise after selecting signals using other functions or software. The function below helps you detect overlapping signals in your selection table, and has arguments that you can play around with for overlap detection, renaming or deleting overlapping selections.
```{r, echo=TRUE, eval=FALSE}
# To run this example:
# Open Phae_hisnr.csv and modify the start coordinate of the first selection and the end coordinate of the second selection so that the signals overlap
Phae.hisnr <- read.csv("Phae_hisnr.csv", header = TRUE)
str(Phae.hisnr)
head(Phae.hisnr, n = 15)
# yields a data frame with an additional column (ovlp.sels) that indicates which selections overlap
Phae.hisnr <- overlapping_sels(X = Phae.hisnr, max.ovlp = 0)
# run the function again but this time retain only the signals that don't overlap
Phae.hisnr <- overlapping_sels(X = Phae.hisnr, max.ovlp = 0, drop = TRUE)
```
### Make spectrograms of selections
`spectrograms` generates spectrograms of individual selected signals. These image files can be used to filter out selections that were poorly made or represent signals that are not relevant to your analysis. This quality control step is important for visualizing your selected signals after any selection method, even if you imported your selections from _Raven_ or _Syrinx_.
```{r, eval=FALSE}
spectrograms(Phae.hisnr, wl = 300, flim = c(2, 10), it = "jpeg", res = 150, osci = TRUE, ovlp = 90)
```
Inspect spectrograms and throw away image files that are poor quality to prepare for later steps. Make sure you are working in a directory that only has image files associated with this vignette. Delete the image files corresponding to recording _154070_ selection _8_ and _154070_ selection _12_, as the start coordinates for these selections are not accurate.
### Remove selections with missing image files
```{r, eval=FALSE}
# remove selections after deleting corresponding image files
Phae.hisnr2 <- filtersels(Phae.hisnr, it = "jpeg", incl.wav = TRUE)
nrow(Phae.hisnr2)
```
After removing the poorest quality selections or signals, there are some other quality control steps that may be helpful.
### Check selections
Can selections be read by downstream functions? The function `checksels` also yields a data frame with columns for duration, minimum samples, sampling rate, channels and bits.
```{r, echo=TRUE, eval=FALSE}
# if selections can be read, "OK" will be printed to check.res column
checksels(Phae.hisnr2, check.header = FALSE)
```
If selections cannot be read, it is possible the sound files are corrupt. If so, use the `fixwavs` function to repair _wav_ files.
```{r, eval=FALSE, echo=FALSE}
# ### Cut selections into individual sound files
#
# Listening to signals complements visual inspection and classification. The function `cut_sels` can be very useful for aural comparison of selected signals. Selected signals can be played as individual sounds rather than having to open up entire sound files. As a word of caution, generating cuts of sound files will also propagate any naming errors present in your original files.
#
# `cut_sels` can also be used to your advantage if your original recordings are long (over 10-15 minutes). Some _warbleR_ functions, so it's helpful to use shorter duration sound files. You can make selections of shorter pieces of long original recordings, either in _Raven_ or _Syrinx_, and use `cut_sels` to generate shorter segments for smoother signal detection in `warbleR`.
cut_sels(X = Phae.hisnr2, mar = 0.01, labels = c("sound.files", "selec"))
# bug in the above cut_sels code
# Error in apply(X[, sapply(X, is.factor)], 2, as.character) :
# dim(X) must have a positive length
# cut_sels(selec.table) # this works!
```
### Tailor temporal coordinates of selections
Sometimes the start and end times of selected signals need fine-tuned adjustments. This is particularly true when signals are found within bouts of closely delivered sounds that may be hard to pull apart, such as duets, or if multiple researchers use different rules-of-thumb to select signals. `tailor_sels` provides an interactive interface for tailoring the temporal coordinates of selections.
If you check out the image files generated by running `spectrograms` above, you'll see that some of the selections made during the automatic detection process with `auto_detec` do not have accurate start and/or end coordinates.
For instance:
The end of this signal is not well selected.
<!-- <center> </center> -->
The temporal coordinates for the tailored signals will be saved in a _ .csv_ file called `seltailor_output.csv`. You can rename this file and read it back into `R` to continue downstream analyses.
```{r, eval=FALSE}
tailor_sels(Phae.hisnr2, wl = 300, flim = c(2, 10), wn = "hanning", mar = 0.1, osci = TRUE, title = c("sound.files", "selec"), auto.next = TRUE)
# Read in tailor_sels output after renaming the csv file
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
str(Phae.hisnrt)
```
```{r, eval=TRUE, echo=FALSE}
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
str(Phae.hisnrt)
```
## **Visual classification of selected signals**
Visual classification of signals is fundamental to vocal repertoire analysis, and can also be useful for other questions. If your research focuses on assessing variation between individuals or groups, several `warbleR` functions can provide you with important information about how to steer your analysis. If there is discrete variation in vocalization structure across groups (e.g. treatments or geographic regions), visual classification of vocalizations will be useful.
### Print long spectrograms with `full_spectrograms`
The function `full_spectrograms`that we used in the last vignette can also be a tool for visually classifying signals. Long spectrograms can be printed to classify signals by hand, or comments accompanying the selections can be printed over selected signals.
Here, we print the start and end of selections with a red dotted line, and the selection number printed over the signal. If a selection data frame contains a comments column, these will be printed with the selection number.
```{r, eval=FALSE, echo=TRUE}
# highlight selected signals
full_spectrograms(Phae.hisnrt, wl = 300, flim = c(2, 10), ovlp = 10, sxrow = 6, rows = 15, it = "jpeg")
# concatenate full_spectrograms image files into a single PDF per recording
# full_spectrograms images must be jpegs
full_spectrograms2pdf(keep.img = FALSE, overwrite = TRUE)
```
```{r, eval=FALSE, echo=FALSE}
# Note for later...full_spectrograms2pdf works on auto_detec files in the working directory too...maybe including a suffix argument would help
```
Check out the image file in your working directory. These will look very similar to the `full_spectrograms` images produced in vignette 1, but with red dotted lines indicating where the selected signals start and end.
### Highlight spectrogram regions with `color_spectro`
`color_spectro` allows you to highlight selections you've made within a short region of a spectrogram. In the example below we will use `color_spectro` to highlight neighboring songs. This function has a wide variety of uses, and could be especially useful for analysis of duets or coordinated singing bouts. This example is taken directly from the `color_spectro` documentation. If working with your own data frame of selections, make sure to calculate the frequency range for your selections beforehand using the function `frange`, which will come up in the next vignette.
```{r, eval=FALSE}
# we will use Phaethornis songs and selections from the warbleR package
data(list = c("Phae.long1", "selec.table"))
writeWave(Phae.long1, "Phae.long1.wav") # save sound files
# subset selection table
# already contains the frequency range for these signals
st <- selec.table[selec.table$sound.files == "Phae.long1.wav", ]
# read wave file as an R object
sgnl <- tuneR::readWave(as.character(st$sound.files[1]))
# create color column
st$colors <- c("red2", "blue", "green")
# highlight selections
color_spectro(wave = sgnl, wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5), dB = "B", X = st, col.clm = "colors", base.col = "skyblue", t.mar = 0.07, f.mar = 0.1)
```
```{r, eval = FALSE, echo = FALSE}
# was getting bugs using the xeno-canto recordings
# but code sort of works for the following code:
# problem is that code takes a while to run and then shows the whole long spectrogram
# suggestion for color spectro - an argument to zoom in on section of x-axis?
X <- Phae.hisnrt[Phae.hisnrt$sound.files == "Phaethornis-longirostris-154072.wav", ]
X$colors <- c("red2", "blue", "green", "yellow", "orange")
X2 <- frange(X)
# View(X2)
color_spectro(
wave = readWave("Phaethornis-longirostris-154072.wav"), wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5),
dB = "B", X = X2, col.clm = "colors", base.col = "skyblue", t.mar = 0.07, f.mar = 0.1
)
```
### Optimize spectrogram display parameters
`spec_param` makes a catalog or mosaic of the same signal plotted with different combinations of spectrogram display arguments. The purpose of this function is to help you choose parameters that yield the best spectrograms (e.g. optimal visualization) for your signals (although low signal-to-noise ratio selections may be an exception).
```{r, eval=FALSE, echo=FALSE}
# spec_param takes a single selection from the selection table as input
spec_param(Phae.hisnrt[1, ], length.out = 5, ovlp = 90, wl = c(150, 900), wn = c("hanning", "bartlett"), collev.min = c(-60, -30), pal = "reverse.gray.colors.2", path = NULL, rm.axes = TRUE, cex = 0.45, flim = c(2, 10))
```
<!-- <center> </center> -->
### Make lexicons of signals
When we are interested in geographic variation of acoustic signals, we usually want to compare spectrograms from different individuals and sites. This can be challenging when working with large numbers of signals, individuals and/or sites. `catalog` aims to simplify this task.
This is how it works:
* `catalog` plots a matrix of spectrograms from signals listed in a selection table
* the catalog files are saved as image files in the working directory (or path provided)
* Several image files are generated if the signals do not fit in a single file
* Spectrograms can be labeled or color-tagged to facilitate exploring variation related to the parameter of interest (e.g. site or song type if already classified)
* A legend can be added to help match colors with tag levels
* different color palettes can be used for each tag
* The duration of the signals can be "fixed" such that all the spectrograms have the same duration
* facilitates comparisons
* You can control the number of rows and columns as well as the width and height of the output image
`catalog` allows you to group signals into biologically relevant groups by coloring the background of selected spectrograms accordingly. There is also an option to add hatching to tag labels, as well as filling the catalog with spectrograms by rows or columns of the selection table data frame, among other additional arguments.
The `move_imgs` function can come in handy when creating multiple catalogs to avoid overwriting previous image files, or when working through rounds of other image files. In this case, the first catalog we create has signals labeled, tagged and grouped with respective color and hatching levels. The second catalog we create will not have any grouping of signals whatsoever, and could be used for a test of inter-observer reliability. `move_imgs` helps us move the first catalog into another directory to save it from being overwritten when creating the second catalog.
```{r, eval=FALSE}
# create a column of recording IDs for friendlier catalog labels
rec_ID <- sapply(1:nrow(Phae.hisnrt), function(x) {
gsub(x = strsplit(as.character(Phae.hisnrt$sound.files[x]), split = "-")[[1]][3], pattern = ".wav$", replacement = "")
})
rec_ID
Phae.hisnrt$rec_ID <- rec_ID
str(Phae.hisnrt)
# set color palette
# alpha controls transparency for softer colors
cmc <- function(n) cm.colors(n, alpha = 0.8)
catalog(X = Phae.hisnrt, flim = c(2, 10), nrow = 4, ncol = 3, height = 10, width = 10, tag.pal = list(cmc), cex = 0.8, same.time.scale = TRUE, mar = 0.01, wl = 300, gr = FALSE, labels = "rec_ID", tags = "rec_ID", hatching = 1, group.tag = "rec_ID", spec.mar = 0.4, lab.mar = 0.8, max.group.cols = 5)
catalog2pdf(keep.img = FALSE, overwrite = TRUE)
# assuming we are working from the warbleR_example directory
# the ~/ format does not apply to Windows
# make sure you have already moved or deleted all other pdf files
move_imgs(from = ".", it = "pdf", create.folder = TRUE, folder.name = "Catalog_image_files")
```
```{r, eval = FALSE, echo = FALSE}
# suggestion for move_imgs
# add argument for regex so as not to delete/move all image files of a given type
# and be able to move just "Cat*.pdf"...etc
```
<center> </center>
You can also make lexicons for blind scoring, which could be useful for determining interobserver reliability.
```{r, eval=FALSE}
# now create a catalog without labels, tags, groups or axes
Phae.hisnrt$no_label <- ""
# catalog(X = Phae.hisnrt, flim = c(1, 10), nrow = 4, ncol = 3, height = 10, width = 10, cex = 0.8, same.time.scale = TRUE, mar = 0.01, wl = 300, spec.mar = 0.4, rm.axes = TRUE, labels = "no_label", lab.mar = 0.8, max.group.cols = 5, img.suffix = "nolabel")
catalog(X = Phae.hisnrt, flim = c(1, 10), nrow = 4, ncol = 3, height = 10, width = 10, tag.pal = list(cmc), cex = 0.8, same.time.scale = TRUE, mar = 0.01, wl = 300, gr = FALSE, labels = "no_label", spec.mar = 0.4, lab.mar = 0.8, max.group.cols = 5, img.suffix = "nolabels")
catalog2pdf(keep.img = FALSE, overwrite = TRUE)
```
<!-- <center> </center> -->
<!-- -->
### **Next vignette: Acoustic (dis)similarity, coordinated singing and simulating songs**
Here we finished the second phase of the `warbleR` workflow, which includes various options for quality control filtering or visual classification of signals that you can leverage during acoustic analysis. After running the code in this vignette, you should now have an idea of how to:
* how to perform quality control filtering of your selected signals, including visual inspection and tailoring temporal coordinates of spectrograms
* use different methods for visual classification of signals, including:
* long spectrograms
* highlighted regions within spectrograms
* catalogs or lexicons of individual signals
The next vignette will cover the third phase of the _warbleR_ workflow, which includes methods to perform acoustic measurements as a batch process, an example of how to use these measurements for an analysis of geographic variation, coordinated singing analysis and a new function to simulate songs.
## **Citation**
Please cite `warbleR` when you use the package:
Araya-Salas, M. and Smith-Vidaurre, G. (2017), warbleR: an R package to streamline analysis of animal acoustic signals. Methods Ecol Evol. 8, 184-191.
## **Reporting bugs**
Please report any bugs [here](https://github.com/maRce10/warbleR/issues).
<font size="5"><a name="References">References</a></font>
1. Araya-Salas, M. and G. Smith-Vidaurre. 2016. warbleR: an R package to streamline analysis of animal
acoustic signals. _Methods in Ecology and Evolution_. doi: 10.1111/2041-210X.12624
|
/scratch/gouwar.j/cran-all/cranData/warbleR/vignettes/warbleR_workflow_02.Rmd
|
---
title: <font size="7"><b>warbleR: Acoustic (dis)similarity, coordinated singing and simulating songs</b></font>
pagetitle: Acoustic (dis)similarity, coordinated singing and simulating songs
author:
- <a href="https://marce10.github.io">Marcelo Araya-Salas, PhD</a>
- <a href="https://smith-vidaurre.com/">Grace Smith-Vidaurre</a>
date: "`r Sys.Date()`"
output:
rmarkdown::html_document:
self_contained: yes
toc: true
toc_depth: 3
toc_float:
collapsed: false
smooth_scroll: true
vignette: >
\usepackage[utf8]{inputenc}
%\VignetteIndexEntry{4. Acoustic (dis)similarity, coordinated singing and simulating songs}
%\VignetteEngine{knitr::rmarkdown}
---
<!-- <center> <h1><b>Acoustic (dis)similarity and coordinated singing</h1></b> </center> -->
<!-- <center> <i><font size="4">Marcelo Araya-Salas and Grace Smith Vidaurre</font></i> </center> -->
<!-- <center> `r Sys.Date()` </center> -->
```{css, echo = FALSE}
div#header h1.title, div#header h3.subtitle, div#header h4.author, div#header h4.date {
text-align: center
}
```
## Bioacoustics in R with `warbleR`
<img src="warbleR_sticker.png" alt="warbleR logo" align="right" width="25%" height="25%">
Bioacoustics research encompasses a wide range of questions, study systems and methods, including the software used for analyses. The `warbleR` and `Rraven` packages leverage the flexibility of the `R` environment to offer a broad and accessible bioinformatics tool set. These packages fundamentally rely upon two types of data to begin bioacoustic analyses in R:
1. **Sound files:** Recordings in _wav_ or _mp3_ format, either from your own research or open-access databases like _xeno-canto_
2. **Selection tables:** Selection tables contain the temporal coordinates (start and end points) of selected acoustic signals within recordings
### Package repositories
These packages are both available on _CRAN_: [`warbleR`](https://cran.r-project.org/package=warbleR), [`Rraven`](https://cran.r-project.org/package=Rraven), as well as on _GitHub_: [`warbleR`](https://github.com/maRce10/warbleR), [`Rraven`](https://github.com/maRce10/Rraven). The GitHub repository will always contain the latest functions and updates. You can also check out an article in _Methods in Ecology and Evolution_ documenting the `warbleR` package <a href='#References'><sup>[1]</sup></a>.
We welcome all users to provide feedback, contribute updates or new functions and report bugs to warbleR's GitHub repository.
Please note that `warbleR` and `Rraven` use functions from the [`seewave`](https://cran.r-project.org/package=seewave), [`monitoR`](https://cran.r-project.org/package=monitoR), [`tuneR`](https://cran.r-project.org/package=tuneR) and [`dtw`](https://cran.r-project.org/package=dtw) packages internally. `warbleR` and `Rraven` have been designed to make bioacoustics analyses more accessible to `R` users, and such analyses would not be possible without the tools provided by the packages above. These packages should be given credit when using `warbleR` and `Rraven` by including citations in publications as appropriate (e.g. `citation("seewave")`).
### Parallel processing in `warbleR`
Parallel processing, or using multiple cores on your machine, can greatly speed up analyses. All iterative `warbleR` functions now have parallel processing for Linux, Mac and Windows operating systems. These functions also contain progress bars to visualize progress during normal or parallel processing. See <a href='#References'><sup>[1]</sup></a> for more details about improved running time using parallel processing.
## **Vignette introduction**
In the previous vignette, we performed quality control processing of selected signals and made lexicons for visual classification. Here we continue with the case study of microgeographic vocal variation in long-billed hermit hummingbirds, _Phaethornis longirostris_ <a href='#References'><sup>[2]</sup></a> (and a short sidenote using _Tinamus major_ for an example of a tonal signal) by:
1. Detecting signal frequency range
2. Extracting spectral entropy and frequency contours as time series
3. Comparing methods for quantitative analysis of signal structure
- data set of 29 acoustic parameters
- spectrographic cross-correlation
- dynamic time warping on frequency contours
4. Visually inspecting frequency contours
5. Measuring acoustic parameters as a batch-process across signals
6. Calculating pairwise acoustic (dis)similarity between signals
7. Analysis of geographic variation in _Phaethornis longirostris_ songs
We also include some examples at the end of the vignette of how to perform coordinated singing analysis and simulate songs.
This vignette can be run without an advanced understanding of `R`, as long as you know how to run code in your console. However, knowing more about basic `R` coding would be very helpful to modify the code for your research questions.
For more details about function arguments, input or output, read the documentation for the function in question (e.g. `?cross_correlation`).
```{r, echo = FALSE, message = FALSE}
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste("package:", names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = TRUE))
# load packages
X <- c("warbleR", "knitr")
invisible(lapply(X, library, character.only = TRUE))
# library(kableExtra)
options(knitr.table.format = "html")
# opts_chunk$set(comment = "")
# opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
# from https://stackoverflow.com/questions/28961431/computationally-heavy-r-vignettes, so that vignettes will be built upon installation, but not executed during R CMD check (which is contributing to the /doc being too large)
is_check <- ("CheckExEnv" %in% search()) || any(c(
"_R_CHECK_TIMINGS_",
"_R_CHECK_LICENSE_"
) %in% names(Sys.getenv()))
knitr::opts_chunk$set(eval = !is_check, comment = "")
# for vignette checking and image file output
# setwd("~/Desktop/R/warbleR_example2/")
# website to fix gifs
# https://ezgif.com/optimize
```
## **Prepare for analyses**
```{r, eval=FALSE}
library(warbleR)
# set your working directory appropriately
# setwd("/path/to/working directory")
# run this if you have restarted RStudio between vignettes without saving your workspace
# assumes that you are in your /home/username directory
setwd(file.path(getwd(), "warbleR_example"))
# Check your location
getwd()
```
This vignette series will not always include all available `warbleR` functions, as existing functions are updated and new functions are added. To see all functions available in this package:
```{r, echo=TRUE, eval=FALSE}
# The package must be loaded in your working environment
ls("package:warbleR")
```
## **Extract acoustic parameters as time series**
### Detect frequency range
_Raven_ selection tables can return low and high frequencies in your selections (e.g. if `all.data` or `freq.cols` in `run_raven` or `imp.raven` is TRUE), but the accuracy of these frequency selections depends on how the signals themselves were selected. Below we demonstrate how to visualize and detect the frequency range in your selected signals using the functions `freq_range_detec` and `freq_range`, which have options for setting bandpass filters to exclude background noise or other non-target acoustic signals.
`freq_range_detec` creates a plot that will print to your graphics device, and also outputs a data frame per recording with the frequency range. This data frame can be used in subsequent analyses if saved as an object. `freq_range_detec` works best with single signals. If used for a whole recording, `freq_range_detec` will pick up _all_ sounds in the recording.
Finally, although we have been using _Phaethornis longirostris_ vocalizations throughout these vignettes, these signals are harmonically structured. The functions for detecting frequency ranges, `freq_range` and `freq_range_detec` work best on tonal signals, so for this example we will use Great Tinamou ( _Tinamus major_) songs.
Download a tinamou recording from _xeno-canto_, make selections and visualize/detect frequency ranges.
```{r, eval=FALSE, echo=TRUE}
tin <- query_xc(qword = "Tinamus", download = FALSE)
# select a single recording
tin <- tin[tin$Recordist == "Marcelo Araya-Salas", ]
# download this recording
query_xc(X = tin, download = TRUE)
mp32wav()
```
```{r, eval=FALSE, echo=FALSE}
# Hiding the text that goes with the chunk below
# If you have _Raven_ installed on your local machine, you can use _Rraven_ to call this software and make selections. Make sure to include arguments from imp_raven to ensure that the selection table is imported with the correct columns for downstream functions. We will use the _Tinamus major_ signals for detecting frequency range below, so if you do not have _Raven_ installed on your machine, you can use the code below as a reference for your own signals.
```
```{r, eval=FALSE, echo=FALSE}
# commenting this out because this fails on my machine, although it worked when I first wrote this code...
# here you will replace the raven.path argument with the path specifying where Raven is located on your own machine
Tin.sels <- run_raven(raven.path = "/home/gsvidaurre/opt/Raven-1.5.0.0035/", sound.files = "Tinamus-major-154191.wav", import = TRUE, all.data = FALSE, name.from.file = TRUE, ext.case = "lower", freq.cols = FALSE)
str(Tin.sels)
# write the selection table as a physical file you you can read them back in at any time
# good way to save all your work
write.csv(Tin.sels, "Tinamus-major-154191_sels.csv", row.names = FALSE)
# generate individual cuts for freqeuency range measurements below
cut_sels(Tin.sels, mar = 0.05, labels = c("sound.files", "selec"))
```
```{r, eval=FALSE, echo=FALSE}
# Tin.sels <- read.csv("Tinamus-major-154191_sels.csv", header = TRUE)
```
```{r, eval=FALSE, echo=TRUE}
# here we will use a data set with sound files that have been already annotated
# read the selections back into the global environment
Tin.sels <- read.csv("manualoc_output.csv")
str(Tin.sels)
# cut the original wave file by selections for freq_range_detec below
writeWave(seewave::cutw(readWave("Tinamus-major-154191.wav"), from = Tin.sels$start[1], to = Tin.sels$end[1], f = 44100, plot = FALSE, output = "Wave"), filename = "Tinamus-major-154191-1.wav")
writeWave(seewave::cutw(readWave("Tinamus-major-154191.wav"), from = Tin.sels$start[2], to = Tin.sels$end[2], f = 44100, plot = FALSE, output = "Wave"), filename = "Tinamus-major-154191-2.wav")
```
```{r, eval=FALSE, echo=TRUE}
# note that changing the threshold argument in combination with the bandpass argument can improve the detection
freq_range_detec(readWave("Tinamus-major-154191-1.wav"), flim = c(0, 2.5), bp = c(0, 3), threshold = 15, plot = TRUE)
```
```{r, eval=FALSE, echo=TRUE}
# here, giving a strict bandpass with very low threshold improves freq_range detection
# since the curving end of the tinamou signal is lower amplitude than the rest of the signal
c(readWave("Tinamus-major-154191-1.wav"), flim = c(0, 2.5), bp = c(0, 3), threshold = 1, plot = TRUE)
```
The function `freq_range` allows you to simultaneously return the frequency ranges for all signals in a selection table, including the graphical output as `freq_range_detec`. Check out the resulting image file in your graphics device. In addition to image files, this function returns the original selection table, as a data frame with the newly calculated low and high frequency measurements.
```{r, eval=FALSE, echo=TRUE}
# use arguments from freq_range_detec above
fr <- freq_range(Tin.sels, threshold = 1, res = 100, flim = c(0, 2.5), bp = c(0.5, 2.5))
str(fr)
```
### Extract spectral entropy as a time series
Spectral entropy can be calculated as time series in selected signals and plotted onto image files. Previously, spectral entropy was only available as a sole measurement across a selection, as measured by `spectro_analysis`. Check out the resulting image files in your working directory.
```{r, eval = FALSE, echo=FALSE}
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
```
```{r, eval=FALSE, echo=TRUE}
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
str(Phae.hisnrt)
se <- entropy_ts(Phae.hisnrt, wl = 300, length.out = 10, threshold = 10, img = TRUE, img.suffix = "entropy_ts", type = "b", ovlp = 90, sp.en.range = c(-25, 10), flim = c(2, 10), picsize = 0.75, title = FALSE)
str(se)
```
#### Visualizing frequency contours with `track_freq_contour`
The function `track_freq_contour` allows you to create spectrograms and visualize the accuracy of dominant frequency and fundamental frequency measurements.
Use `track_freq_contour` on all the recordings for which you want to extract frequency contours as a time series, or later, calculate other frequency measurements. Scroll through all the spectrograms to get a feeling for how well the frequency measurements will be performed across your recordings.
Running `track_freq_contour` can allow you to decide which frequency parameters to use in subsequent analyses, namely `spectro_analysis` and dynamic time warping methods. Also, ff the frequency measurements look acceptable with the bandpass setting used in `track_freq_contour`, use that same bandpass while running `spectro_analysis`.
```{r, eval=FALSE, echo=TRUE}
# Note that the dominant frequency measurements are almost always more accurate
track_freq_contour(Phae.hisnrt, wl = 300, flim = c(2, 10), bp = c(1, 12), it = "jpeg")
# We can change the lower end of bandpass to make the frequency measurements more precise
track_freq_contour(Phae.hisnrt, wl = 300, flim = c(2, 10), bp = c(2, 12), col = c("purple", "orange"), pch = c(17, 3), res = 100, it = "jpeg", picsize = 0.8)
```
<!-- <center> </center> -->
Note that the fundamental frequency measurements are not always very accurate, so we will remove fundamental frequency measurements later on.
### Extract fundamental or dominant frequency contours as a time series
These functions return a data frame that contains estimated frequency contours as a time series across each signal in the input data frame. You can also specify if you want to create image files with the estimated frequency contours plotted over spectrograms. You can change argument settings to better visualize the signals or change the estimation of the frequency contour. For instance, the argument `threshold` (as in `auto_detec`) controls the amplitude threshold for estimating frequency values at each time point. Note that the fundamental frequency contour estimation can often have errors, and tends to perform best with more tonal signals. The frequency contours are those that can be visualized using `track_freq_contour`.
```{r, echo=FALSE, eval=FALSE}
# decided to remove track_harmonics, not working well for either Phaethornis or Tinamou signals
# the text for above this chunk
# `track_harmonics` is a modified function from `seewave` that allows you to track the dominant frequency for harmonic calls, even when the amplitude fluctuates among harmonics.
# with a Phaethornis harmonic signal
nm <- paste(paste(as.character(Phae.hisnrt$sound.files[1]), as.character(Phae.hisnrt$selec[1]), sep = "-"), ".wav", sep = "")
writeWave(seewave::cutw(readWave(as.character(Phae.hisnrt$sound.files[1])), from = Phae.hisnrt$start[1], to = Phae.hisnrt$end[1], f = 44100, plot = FALSE, output = "Wave"), filename = nm)
trck_hrm <- track_harmonic(readWave(nm), f = 44100, ovlp = 70, fftw = FALSE, threshold = 15, bandpass = NULL, clip = 0.1, plot = TRUE, xlab = "Time (s)", ylab = "Frequency (kHz)", adjust.wl = FALSE, dfrq = FALSE)
# plot spectrogram
spectro(readWave(nm), grid = FALSE, scale = FALSE, f = 22050, ovlp = 90, palette = reverse.gray.colors.2, collevels = seq(-40, 0, 1), wl = 300, osc = FALSE, flim = c(2, 10), main = "warbleR's 'track_harmonic'")
# plot detected frequency contour
points(x = trck_hrm[, 1] + 0.1, y = trck_hrm[, 2], cex = 1, col = "red", pch = 20)
```
```{r, echo=FALSE, eval=FALSE}
# with a Tinamou tonal signal
trck_hrm <- track_harmonic(readWave("Tinamus-major-154191-1.wav"), f = 44100, ovlp = 70, fftw = FALSE, threshold = 15, bandpass = NULL, plot = TRUE, xlab = "Time (s)", ylab = "Frequency (kHz)", adjust.wl = FALSE, dfrq = FALSE)
# plot spectrogram
spectro(readWave("Tinamus-major-154191-2.wav"), grid = FALSE, scale = FALSE, f = 44100, ovlp = 90, palette = reverse.gray.colors.2, collevels = seq(-40, 0, 1), wl = 300, osc = FALSE, flim = c(0, 4), main = "warbleR's 'track_harmonic'")
# plot detected frequency contour
points(x = trck_hrm[, 1] + 0.1, y = trck_hrm[, 2], cex = 1, col = "red", pch = 20)
```
```{r, echo=TRUE, eval=FALSE}
# Fundamental frequency contour
ff_df <- freq_ts(Phae.hisnrt, wl = 300, length.out = 20, threshold = 15, img = TRUE, img.suffix = "ff", type = "p", ovlp = 70, clip.edges = FALSE, leglab = "freq_ts", ff.method = "tuneR")
str(ff_df)
```
```{r, echo=TRUE, eval=FALSE}
# Dominant frequency contour
# Uses seewave function dfreq by default
df_df <- freq_ts(Phae.hisnrt, wl = 300, length.out = 20, threshold = 15, img = TRUE, img.suffix = "ff", type = "p", ovlp = 70, clip.edges = FALSE, leglab = "freq_ts", fsmooth = 0.2)
str(df_df)
```
### Manually tailor frequency contours with tailor_sels
The functions above to track/extract dominant and fundamental frequency contours perform best with more tonal signals. The frequency measurements for signals with harmonic structure tend to jump around, and might not always match your own visual tracking of frequency contours. If this is the case, you can use the function `tailor_sels` to fix frequency contours where individual frequency measurements are clearly far off from the frequency contour detected by the human eye. The `tailor_sels` function is the same used in vignette 2 in this series, but by changing a few arguments, you can use `tailor_sels` to fix frequency contours.
Note that manually fixing frequency contours might not make sense, depending on your question and/or the contour in question. For instance, a dominant frequency contour for a harmonic signal that jumps around and does not form a smooth contour may in fact be the truth, rather than mis-estimation of the contour. On the other hand, fundamental frequencies can be more easily traced by the human eye across a signal, so using `tailor_sels` to fix a frequency contour that jumps around the signal makes more sense.
When the new graphics window for `tailor_sels` appears, it will show spectrograms as we saw in vignette 2, but with frequency contours plotted as points over each spectrogram. To fix the frequency contour, click near the malaligned points to place them over the frequency contour that you detect by eye. `tailor_sels` makes a new `.csv` file in your working directory that merges your original data frame (below, `Phae.hisnrt`) with the modified frequency time series (below, `ff_df` with any modified frequency values). To check that your manual tracing improved frequency contours, you can use `track_freq_contour` to make spectrograms with your new frequency contours plotted as custom contours.
```{r, eval=FALSE, echo=TRUE}
# Use the original data frame of songs for the main tailor_sels dataset
# the data frame with the fundamental frequency contours is provided for manual tracing
tailor_sels(Phae.hisnrt,
wl = 300, flim = c(2, 10), wn = "hanning", mar = 0.1,
osci = TRUE, title = c("sound.files", "selec"), auto.contour = TRUE, ts.df = ff_df, col = "red", alpha = 0.6
)
# rename your tailor_sels output csv as desired, then read it back into R
mff <- read.csv("seltailor_output_mff.csv")
str(mff)
track_freq_contour(Phae.hisnrt, wl = 300, flim = c(2, 10), bp = c(1, 12), it = "jpeg", custom.contour = mff)
```
### Count inflections across frequency contours
This function calculates the modulation index for any frequency contour or time series. The function `spectro_analysis` (see below) also calculates a modulation index for signals, but as a single value across the length of the signal.
```{r, eval=FALSE, echo=TRUE}
df_inf <- inflections(X = df_df, pb = TRUE)
str(df_inf)
```
##**Quantitative measurements of acoustic (dis)similarity**
### Compare methods for quantitative analysis of signal structure
Bioacoustic research relies on quantifying the structure of acoustic signals and comparing that structure across behavioral/ecological contexts, groups or species. However, measuring signal structure in a way that fully accounts for the variation in the signals could be a tricky task. Some of the differences that are apparent by visual inspection of spectrograms might not be picked up by some analyses. Hence, choosing the most appropriate analytical approach is a critical step.
The `warbleR` function `compare_methods` attempts to facilitate method selection. This function produces graphs (as image files in the working directory) with spectrograms from 4 signals that allow visual inspection of the performance of acoustic analysis methods at comparing those signals. The signals are randomly picked up from the provided data frame (`X` argument), and the function compares 2 `warbleR`methods at a time. The methods available are:
* cross-correlation by warbleR function `cross_correlation`
* dynamic time warping on dominant or fundamental frequency contours with `freq_DTW`
* spectral parameters with `spectro_analysis`
In the last vignette, we tailored selections of _Phaethornis longirostris_ songs that were originally downloaded from _xeno-canto_, detected by `auto_detec`and filtered by signal-to-noise ratio (SNR). Here we will pick up the workflow with these filtered and tailored selections, using the data frame `Phae.hisnrt`.
```{r, eval=FALSE, echo=TRUE}
Phae.hisnrt <- read.csv("Phae_hisnrt.csv", header = TRUE)
compare_methods(
X = Phae.hisnrt, flim = c(0, 10), bp = c(0, 10),
wl = 300, n = 10, methods = c("XCORR", "dfDTW")
)
```
`compare_methods` will produce 10 image files in the working directory (since we specified `n = 10`) that look like this:
<!-- <center> </center> -->
In this graphic, the acoustic pairwise distance between signals is shown next to the arrows linking them. The font color of a distance value corresponds to the font color of the method that generated it, as shown in the scatterplots (in this case black font represents XCORR distances). Distances are standardized, with 0 being the distance of a signal to itself and 1 the farthest pairwise distance in the pool of signals. Principal Component Analysis (`princomp` function) is applied to calculate distances when using spectral parameters (SP). In this case, the first 2 PC's are used. Classical Multidimensional Scaling (also known as Principal Coordinates Analysis, `cmdscale` function) is used for all other methods. The image file name contains the methods being compared and the row number of the selections. This function internally uses a modified version of the `spectro` function from the `seewave` package to create spectrograms. Note that the spectrograms are all plotted with the same frequency and time scales.
Also note that the graphs contain 2 scatterplots (1 per method) of the acoustic space of all signals in the input data frame `X`. The position of the 4 signals in the spectrograms is highlighted in the acoustic space scatterplot. These graphics allow you to directly assess if the distances between signals in the acoustic space accurately represent the spectrographic similarity (e.g. how similar their acoustic structure looks in the spectrograms).
You can run `compare_methods` for any combination of the quantitative methods for assessing acoustic (dis)similarity mentioned above. Importantly, to include the SP method (spectral parameters measured by the function `spectro_analysis`), you need a large enough dataset, as the PCA that summarizes the spectral parameters needs more units (rows) that variables (columns).
### Measure acoustic parameters with spectro_analysis
We can now perform acoustic measurements with the function `spectro_analysis`. This function relies on the temporal coordinates in selection tables to measure 29 parameters across selections. `spectro_analysis` is a batch process that is faster than calculating measurements manually, e.g. one selection and recording at a time. `spectro_analysis` uses and customizes several functions available in the [`seewave package`](https://cran.r-project.org/package=seewave).
Use the bandpass filter to your advantage here, to filter out low or high background noise before performing measurements. Also note that changing the amplitude threshold will change the amplitude at which noises (including non-target signals) are detected for measurements.
```{r, eval=TRUE, echo=FALSE}
params <- read.csv("acoustic_parameters.csv")
```
```{r, eval=FALSE, echo=TRUE}
params <- spectro_analysis(Phae.hisnrt, bp = c(2, 10), threshold = 15)
write.csv(params, "acoustic_parameters.csv", row.names = FALSE)
```
Remove parameters derived from fundamental frequency (based on `track_freq_contour` results).
```{r, eval=FALSE, echo=TRUE}
params <- params[, grep("fun|peakf", colnames(params), invert = TRUE)]
```
### Calculate acoustic parameters by song type
In addition to calculating acoustic parameters per individual signals using `spectro_analysis`, you can also calculate these acoustic parameters by song type (average, minimum and maximum values per song type group).
```{r, eval=FALSE, echo=TRUE}
data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4", "selec.table"))
writeWave(Phae.long1, "Phae.long1.wav")
writeWave(Phae.long2, "Phae.long2.wav")
writeWave(Phae.long3, "Phae.long3.wav")
writeWave(Phae.long4, "Phae.long4.wav")
# Add a 'song' column
selec.table$song <- rep(1:4, each = 3)[1:11]
# Measure acoustic parameters
sp <- spectro_analysis(selec.table, bp = c(1, 11), 300, fast = TRUE)
# Add song data
sp <- merge(sp, selec.table, by = c("sound.files", "selec"))
# Caculate song-level parameters for all numeric parameters
sng <- song_analysis(X = sp, song_colm = "song", parallel = 1, pb = TRUE)
str(sng)
```
### Dynamic time warping of frequency contours
The dynamic time warping methods in `warbleR` all rely on functions from the `dtw` package, and are available for both dominant and fundamental frequencies. `df_DTW` and `ff_DTW` calculate the dominant and fundamental frequency contours, respectively, of each signal and compares using dynamic time warping. You can interpolate measurements across the frequency time series using the `length.out` argument.
These functions return a matrix of pairwise acoustic dissimilarity (e.g. acoustic "distance") measurements that can be used in analyses of acoustic similarity, as well as image files with the frequency contours plotted over the spectrograms. If you require only the time series without the dynamic time warping analysis for either the dominant or fundamental frequency, check out the functions `freq_ts`and `freq_ts`.
Note that as the `freq_range` and `freq_range_detec` functions, the dynamic time warping functions tend to work best on more tonal signals. Check out the resulting image files in your working directory.
```{r, eval=FALSE, echo=TRUE}
# Harmonic Phaethornis signals
dm <- freq_DTW(Phae.hisnrt, length.out = 30, flim = c(2, 10), bp = c(2, 9), wl = 300, img = TRUE)
str(dm)
```
```{r, eval=FALSE, echo=TRUE}
# Tonal Tinamou signals
Tin.sels <- read.csv("Tinamus-major-154191_sels.csv", header = TRUE)
dm <- freq_DTW(Tin.sels, length.out = 30, flim = c(0, 2.5), bp = c(0.5, 2.5), wl = 512, img = TRUE)
str(dm)
```
### Spectrographic cross-correlation
Cross-correlation calculates the pairwise similarity of multiple signals by spectrogram cross-correlation. `cross_correlation` calculates a correlation of the amplitude values at each step by sliding one spectrogram over the other. This function is a modified version of the `corMatch` and `makeTemplate` functions from the package `monitoR`. `cross_correlation` runs over multiple signals and returns a list of output, including:
* a correlation statistic for each "sliding" step
* the maximum (peak) correlation for each pairwise comparison
`cross_correlation` requires margins half the duration of the signal on either side of the signal (e.g. before and after the start and end coordinates). Signals that are very close to either the start or end of a recording may throw errors. Additionally, when sound files have been modified by bandpass filters, some pairwise cross-correlations may not work and the correlation matrix may contain `NA` values. `cross_correlation` now contains an argument to remove columns of the matrix that contain `NA` values, which will reduce the data in the matrix but also allow the matrix to be used in subsequent analyses.
Here our output is a matrix of peak correlation per pairwise comparison, with the recording-selection names as the matrix dimension names.
```{r, eval=FALSE, echo=TRUE}
xc <- cross_correlation(Phae.hisnrt, wl = 300, na.rm = FALSE)
str(xc)
```
## Analysis of geographic variation using `spectro_analysis` measurements
We can evaluate whether or not the observed variation in song structure is reflected by the meters we just measured. For this we will conduct a Principal Component Analysis on scaled (z-transformed) meters and look at the grouping of songs (data points) in the scatter plot.
```{r, eval=TRUE, dpi=220}
# Run the PCA with only numeric variables of params
pca <- prcomp(x = params[, sapply(params, is.numeric)], scale. = TRUE)
# Check loadings
summary(pca)
# Extract PCA scores
pcascor <- as.data.frame(pca[[5]])
# Plot the 2 first PCs
plot(pcascor[, 1], pcascor[, 2],
col = as.numeric(as.factor(params$sound.files)), pch = 20,
cex = 1, xlab = "PC1", ylab = "PC2"
)
# Add recordings/individuals labels
x <- tapply(pcascor[, 1], params$sound.files, mean)
y <- tapply(pcascor[, 2], params$sound.files, mean)
labs <- gsub(".wav", "", unique(sapply(as.character(params$sound.files), function(x) {
strsplit(x, split = "-", fixed = TRUE)[[1]][3]
}, USE.NAMES = FALSE)))
text(x, y, labs, cex = 0.75)
```
Songs are grouped by sound file. As each sound file represents a single individual, this suggests that songs have individual signatures. Let's look at the song type level. First, we need to classify the songs by song type. We can check the spectrograms we previously created to do this.
<!-- <center> </center> -->
Songs from sound files 154070 and 154072 seem to belong to the same song type. Sound files 154129 and 154161 represent a different song type. Finally, the songs from each of the other 2 sound files have a unique structure, so each one represents a different song type. Sound file 154123 is represented here too, but in vignette 1 we decided against using signals from this recording. We can add this information to the plot by using symbols to represent song types.
```{r, eval=TRUE, dpi=220}
# Create a song type variable
# First, extract recording ID
songtype <- gsub(".wav", "", sapply(as.character(params$sound.files), function(x) {
strsplit(x, split = "-", fixed = TRUE)[[1]][3]
}, USE.NAMES = FALSE))
# Now change IDs for letters representing song types
songtype <- gsub("154070|154072", "A", songtype)
songtype <- gsub("154129|154161", "B", songtype)
songtype <- gsub("154138", "C", songtype)
# Add song type as a variable representing symbol type
plot(pcascor[, 1], pcascor[, 2],
col = as.numeric(as.factor(params$sound.files)),
pch = as.numeric(as.factor(songtype)),
cex = 1, xlab = "PC1", ylab = "PC2"
)
# Add song type labels
x <- tapply(pcascor[, 1], songtype, mean)
y <- tapply(pcascor[, 2], songtype, mean)
text(x, y, unique(songtype), cex = 1)
```
Songs of the same song type are more similar (they cluster together). This PCA confirms that the visually obvious differences in the song structures are well described by the meters measured in [`warbleR`](https://cran.r-project.org/package=warbleR). Likewise, it also confirms that we can detect biologically relevant differences from sound files that have undergone _mp3_ compression and conversion back to _wav_ format (see also <a href='#References'> <sup>[2]</sup></a>). Importantly, if this analysis were for publication we would have to run other filtering steps on the acoustic parameters generated by `spectro_analysis`, including removing any collinear variables.
## Coordinated singing analysis
`warbleR` contains functions to visualize and test whether or not singing bouts between individuals can be considered coordinated singing. The function `plot_coordination` plots signals as polygons to better visualize overlap between these signals.
The function `test_coordination` uses a Monte Carlo randomization test to determine whether or not the probability of finding overlapping songs in a singing event is less than or more than what you would expect by chance. `test_coordination` can be run simultaneously for many singing events, if all are contained in a single data frame (see `test_coordination` documentation for more information).
```{r, eval = FALSE, echo = TRUE}
data(sim_coor_sing)
str(sim_coor_sing)
```
The `sim_coor_sing` dataset contains three types of singing bouts:
```{r, eval = FALSE, echo = TRUE}
# save plots in a list
g <- plot_coordination(sim_coor_sing, it = "jpeg", img = FALSE, res = 300)
# print list of plots to graphics device
g
```
We can test for overlapping or alternating coordinated singing using the `less.than.chance` argument in `test_coordination`. Here we will test for alternating coordinated singing, since `less.than.chance` is set to `TRUE`.
```{r, eval = FALSE, echo = TRUE}
cs <- test_coordination(sim_coor_sing, iterations = 1000, less.than.chance = TRUE, cutoff = 10)
str(cs)
```
## Simulating songs
You can simulate songs using `warbleR`. Depending on your question, you can simulate songs with different numbers of subunits, subunit durations, numbers of harmonics, amplitudes, gaps between subunits, amplitude fading of subunits, among other options. Songs are simulated under Brownian motion frequency drift.
```{r, eval = FALSE, echo = TRUE}
# simulate a song with 3 tonal elements
ss <- sim_songs(n = 3, harms = 1)
# plot the simulated song
# seewave::spectro(ss)
# simulate a song with 3 harmonic elements of differing amplitude
ss <- sim_songs(n = 3, harms = 3)
# plot the simulated song
seewave::spectro(ss)
```
This vignette concludes the `warbleR` vignette series. After running the code in this third vignette, you should now have an idea of how to:
* measure frequency range of signals
* extract spectral entropy and frequency contours as time series
* manually fix frequency contours (as appropriate)
* compare and choose methods for acoustic (dis)similarity analysis
* use different methods for acoustic (dis)similarity analysis
* `spectro_analysis`- 29 parameters (across signals or song types)
* `freq_DTW` - dynamic time warping on frequency contours
* `cross_correlation`- spectrographic cross-correlation
* carry out steps for an analysis of geographic variation in vocalizations
* perform coordinated singing analysis
* simulate songs for other research questions
In these vignettes we presented examples of how the `warbleR` functions can be used in a streamlined workflow for flexible and rigorous bioacoustics analyses. The output of `warbleR` functions can be used in other packages for statistical analyses, such as machine learning.
## **Citation**
Please cite `warbleR` when you use the package:
Araya-Salas, M. and Smith-Vidaurre, G. (2017), warbleR: an R package to streamline analysis of animal acoustic signals. Methods Ecol Evol. 8, 184-191.
## **Reporting bugs**
Please report any bugs [here](https://github.com/maRce10/warbleR/issues).
<font size="5"><a name="References">References</a></font>
1. Araya-Salas, M. and G. Smith-Vidaurre. 2016. warbleR: an R package to streamline analysis of animal
acoustic signals. _Methods in Ecology and Evolution_. doi: 10.1111/2041-210X.12624
|
/scratch/gouwar.j/cran-all/cranData/warbleR/vignettes/warbleR_workflow_03.Rmd
|
#' Locate period boundaries for a date vector
#'
#' @description
#' `warp_boundary()` detects a change in time period along `x`, for example,
#' rolling from one month to the next. It returns the start and stop positions
#' for each contiguous period chunk in `x`.
#'
#' @details
#' The stop positions are just the [warp_change()] values, and the start
#' positions are computed from these.
#'
#' @inheritParams warp_distance
#'
#' @return
#' A two column data frame with the columns `start` and `stop`. Both are
#' double vectors representing boundaries of the date time groups.
#'
#' @export
#' @examples
#' x <- as.Date("1970-01-01") + -4:5
#' x
#'
#' # Boundaries by month
#' warp_boundary(x, "month")
#'
#' # Bound by every 5 days, relative to "1970-01-01"
#' # Creates boundaries of:
#' # [1969-12-27, 1970-01-01)
#' # [1970-01-01, 1970-01-06)
#' # [1970-01-06, 1970-01-11)
#' warp_boundary(x, "day", every = 5)
#'
#' # Bound by every 5 days, relative to the smallest value in our vector
#' origin <- min(x)
#' origin
#'
#' # Creates boundaries of:
#' # [1969-12-28, 1970-01-02)
#' # [1970-01-02, 1970-01-07)
#' warp_boundary(x, "day", every = 5, origin = origin)
warp_boundary <- function(x,
period,
...,
every = 1L,
origin = NULL) {
check_dots_empty("warp_boundary", ...)
.Call(warp_warp_boundary, x, period, every, origin)
}
|
/scratch/gouwar.j/cran-all/cranData/warp/R/boundary.R
|
#' Detect changes in a date time vector
#'
#' @description
#' `warp_change()` detects changes at the `period` level.
#'
#' If `last = TRUE`, it returns locations of the last value before a change,
#' and the last location in `x` is always included. Additionally, if
#' `endpoint = TRUE`, the first location in `x` will be included.
#'
#' If `last = FALSE`, it returns locations of the first value after a change,
#' and the first location in `x` is always included. Additionally, if
#' `endpoint = TRUE`, the last location in `x` will be included.
#'
#' @inheritParams warp_distance
#'
#' @param last `[logical(1)]`
#'
#' If `TRUE`, the _last_ location _before_ a change is returned.
#' The last location of the input is always returned.
#'
#' If `FALSE`, the _first_ location _after_ a change is returned.
#' The first location of the input is always returned.
#'
#' @param endpoint `[logical(1)]`
#'
#' If `TRUE` and `last = TRUE`, will additionally return the first location
#' of the input.
#'
#' If `TRUE` and `last = FALSE`, will additionally return the last location
#' of the input.
#'
#' If `FALSE`, does nothing.
#'
#' @return
#' A double vector of locations.
#'
#' @export
#' @examples
#' x <- as.Date("2019-01-01") + 0:5
#' x
#'
#' # Last location before a change, last location of `x` is always included
#' warp_change(x, period = "yday", every = 2, last = TRUE)
#'
#' # Also include first location
#' warp_change(x, period = "yday", every = 2, last = TRUE, endpoint = TRUE)
#'
#' # First location after a change, first location of `x` is always included
#' warp_change(x, period = "yday", every = 2, last = FALSE)
#'
#' # Also include last location
#' warp_change(x, period = "yday", every = 2, last = FALSE, endpoint = TRUE)
warp_change <- function(x,
period,
...,
every = 1L,
origin = NULL,
last = TRUE,
endpoint = FALSE) {
check_dots_empty("warp_change", ...)
.Call(warp_warp_change, x, period, every, origin, last, endpoint)
}
|
/scratch/gouwar.j/cran-all/cranData/warp/R/change.R
|
# Exported for testing
date_get_year_offset <- function(x) {
.Call(warp_date_get_year_offset, x)
}
date_get_month_offset <- function(x) {
.Call(warp_date_get_month_offset, x)
}
|
/scratch/gouwar.j/cran-all/cranData/warp/R/date.R
|
#' Compute distances from a date time origin
#'
#' @description
#' `warp_distance()` is a low level engine for computing date time distances.
#'
#' It returns the distance from `x` to the `origin` in units
#' defined by the `period`.
#'
#' For example, `period = "year"` would return the number of years from
#' the `origin`. Setting `every = 2` would return the number of 2 year groups
#' from the `origin`.
#'
#' @details
#' The return value of `warp_distance()` has a variety of uses. It can be used
#' for:
#'
#' - A grouping column in a `dplyr::group_by()`. This is especially useful for
#' grouping by a multitude of a particular period, such as "every 5 months".
#'
#' - Computing distances between values in `x`, in units of the `period`.
#' By returning the distances from the `origin`, `warp_distance()` has also
#' implicitly computed the distances between values of `x`. This is used
#' by `slide::block()` to break the input into time blocks.
#'
#' When the time zone of `x` differs from the time zone of `origin`, a warning
#' is issued, and `x` is coerced to the time zone of `origin` without changing
#' the number of seconds of `x` from the epoch. In other words, the time zone
#' of `x` is directly changed to the time zone of `origin` without changing the
#' underlying numeric representation. __It is highly advised to specify an
#' `origin` value with the same time zone as `x`.__ If a `Date` is used for
#' `x`, its time zone is assumed to be `"UTC"`.
#'
#' @section Period:
#'
#' For `period` values of `"year"`, `"month"`, and `"day"`, the information
#' provided in `origin` is truncated. Practically this means that if you
#' specify:
#'
#' ```
#' warp_distance(period = "month", origin = as.Date("1970-01-15"))
#' ```
#'
#' then only `1970-01` will be used, and not the fact that the origin starts
#' on the 15th of the month.
#'
#' The `period` value of `"quarter"` is internally
#' `period = "month", every = every * 3`. This means that for `"quarter"`
#' the month specified for the `origin` will be used as the month to start
#' counting from to generate the 3 month quarter.
#'
#' To mimic the behavior of `lubridate::floor_date()`, use `period = "week"`.
#' Internally this is just `period = "day", every = every * 7`. To mimic the
#' `week_start` argument of `floor_date()`, set `origin` to a date
#' with a week day identical to the one you want the week to start from. For
#' example, the default origin of `1970-01-01` is a Thursday, so this would be
#' generate groups identical to `floor_date(week_start = 4)`.
#'
#' The `period` value of `"yday"` is computed as complete `every`-day periods
#' from the `origin`, with a forced reset of the `every`-day counter every
#' time you hit the month-day value of the `origin`. `"yweek"` is built on top
#' of this internally as `period = "yday", every = every * 7`. This ends up
#' using an algorithm very similar to `lubridate::week()`, with the added
#' benefit of being able to control the `origin` date.
#'
#' The `period` value of `"mday"` is computed as `every`-day periods within
#' each month, with a forced reset of the `every`-day counter
#' on the first day of each month. The most useful application of this is
#' `"mweek"`, which is implemented as `period = "mday", every = every * 7`. This
#' allows you to group by the "week of the month". For `"mday"` and `"mweek"`,
#' only the year and month parts of the `origin` value are used. Because of
#' this, the `origin` argument is not that interesting for these periods.
#'
#' The `"hour"` period (and more granular frequencies) can produce results
#' that might be surprising, even if they are technically correct. See the
#' vignette at `vignette("hour", package = "warp")` for more information.
#'
#' @section Precision:
#'
#' With `POSIXct`, the limit of precision is approximately the microsecond
#' level. Only dates that are very close to the unix origin of 1970-01-01 can
#' possibly represent microsecond resolution correctly (close being within
#' about 40 years on either side). Otherwise, the values past the microsecond
#' resolution are essentially random, and can cause problems for the distance
#' calculations. Because of this, decimal digits past the microsecond range are
#' zeroed out, so please do not attempt to rely on them. It should still be safe
#' to work with microseconds, by, say, bucketing them by millisecond distances.
#'
#' @param x `[Date / POSIXct / POSIXlt]`
#'
#' A date time vector.
#'
#' @param period `[character(1)]`
#'
#' A string defining the period to group by. Valid inputs can be roughly
#' broken into:
#'
#' - `"year"`, `"quarter"`, `"month"`, `"week"`, `"day"`
#' - `"hour"`, `"minute"`, `"second"`, `"millisecond"`
#' - `"yweek"`, `"mweek"`
#' - `"yday"`, `"mday"`
#'
#' @param every `[positive integer(1)]`
#'
#' The number of periods to group together.
#'
#' For example, if the period was set to `"year"` with an every value of `2`,
#' then the years 1970 and 1971 would be placed in the same group.
#'
#' @param origin `[Date(1) / POSIXct(1) / POSIXlt(1) / NULL]`
#'
#' The reference date time value. The default when left as `NULL` is the
#' epoch time of `1970-01-01 00:00:00`, _in the time zone of the index_.
#'
#' This is generally used to define the anchor time to count from, which is
#' relevant when the every value is `> 1`.
#'
#' @param ... `[dots]`
#'
#' These dots are for future extensions and must be empty.
#'
#' @return
#' A double vector containing the distances.
#'
#' @export
#' @examples
#' x <- as.Date("1970-01-01") + -4:4
#' x
#'
#' # Compute monthly distances (really, year + month)
#' warp_distance(x, "month")
#'
#' # Compute distances every 2 days, relative to "1970-01-01"
#' warp_distance(x, "day", every = 2)
#'
#' # Compute distances every 2 days, this time relative to "1970-01-02"
#' warp_distance(x, "day", every = 2, origin = as.Date("1970-01-02"))
#'
#' y <- as.POSIXct("1970-01-01 00:00:01", "UTC") + c(0, 2, 3, 4, 5, 6, 10)
#'
#' # Compute distances every 5 seconds, starting from the unix epoch of
#' # 1970-01-01 00:00:00
#' # So this buckets:
#' # [1970-01-01 00:00:00, 1970-01-01 00:00:05) = 0
#' # [1970-01-01 00:00:05, 1970-01-01 00:00:10) = 1
#' # [1970-01-01 00:00:10, 1970-01-01 00:00:15) = 2
#' warp_distance(y, "second", every = 5)
#'
#' # Compute distances every 5 seconds, starting from the minimum of `x`
#' # 1970-01-01 00:00:01
#' # So this buckets:
#' # [1970-01-01 00:00:01, 1970-01-01 00:00:06) = 0
#' # [1970-01-01 00:00:06, 1970-01-01 00:00:11) = 1
#' # [1970-01-01 00:00:11, 1970-01-01 00:00:16) = 2
#' origin <- as.POSIXct("1970-01-01 00:00:01", "UTC")
#' warp_distance(y, "second", every = 5, origin = origin)
#'
#' # ---------------------------------------------------------------------------
#' # Time zones
#'
#' # When `x` is not UTC and `origin` is left as `NULL`, the origin is set as
#' # 1970-01-01 00:00:00 in the time zone of `x`. This seems to be the most
#' # practically useful default.
#' z <- as.POSIXct("1969-12-31 23:00:00", "UTC")
#' z_in_nyc <- as.POSIXct("1969-12-31 23:00:00", "America/New_York")
#'
#' # Practically this means that these give the same result, because their
#' # `origin` values are defined in their respective time zones.
#' warp_distance(z, "year")
#' warp_distance(z_in_nyc, "year")
#'
#' # Compare that to what would happen if we used a static `origin` of
#' # 1970-01-01 00:00:00 UTC.
#' # America/New_York is 5 hours behind UTC, so when `z_in_nyc` is converted to
#' # UTC the value becomes `1970-01-01 04:00:00 UTC`, a different year. Because
#' # this is generally surprising, a warning is thrown.
#' origin <- as.POSIXct("1970-01-01 00:00:00", tz = "UTC")
#' warp_distance(z, "year", origin = origin)
#' warp_distance(z_in_nyc, "year", origin = origin)
#'
#' # ---------------------------------------------------------------------------
#' # `period = "yweek"`
#'
#' x <- as.Date("2019-12-23") + 0:16
#' origin <- as.Date("1970-01-01")
#'
#' # `"week"` counts the number of 7 day periods from the `origin`
#' # `"yweek"` restarts the 7 day counter every time you hit the month-day
#' # value of the `origin`. Notice how, for the `yweek` column, only 1 day was
#' # in the week starting with `2019-12-31`. This is because the next day is
#' # `2020-01-01`, which aligns with the month-day value of the `origin`.
#' data.frame(
#' x = x,
#' week = warp_distance(x, "week", origin = origin),
#' yweek = warp_distance(x, "yweek", origin = origin)
#' )
#'
#' # ---------------------------------------------------------------------------
#' # `period = "mweek"`
#'
#' x <- as.Date("2019-12-23") + 0:16
#'
#' # `"mweek"` breaks `x` up into weeks of the month. Notice how days 1-7
#' # of 2020-01 all have the same distance value. A forced reset of the 7 day
#' # counter is done at the 1st of every month. This results in the 3 day
#' # week of the month at the end of 2019-12, from 29-31.
#' data.frame(
#' x = x,
#' mweek = warp_distance(x, "mweek")
#' )
#'
warp_distance <- function(x,
period,
...,
every = 1L,
origin = NULL) {
check_dots_empty("warp_distance", ...)
.Call(warp_warp_distance, x, period, every, origin)
}
|
/scratch/gouwar.j/cran-all/cranData/warp/R/distance.R
|
# Exported for testing
divmod <- function(x, y) {
.Call(warp_divmod, x, y)
}
div <- function(x, y) {
.Call(warp_div, x, y)
}
|
/scratch/gouwar.j/cran-all/cranData/warp/R/divmod.R
|
dots_n <- function(...) {
nargs()
}
# Like `ellipsis::check_dots_empty()` but without the import
check_dots_empty <- function(fn, ...) {
n <- dots_n(...)
if (n == 0L) {
return(invisible())
}
msg <- paste0(
"`...` is not empty in `", fn, "()`.\n",
"These dots only exist to allow for future extensions and should be empty.\n",
"Did you misspecify an argument?"
)
stop(msg, call. = FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/warp/R/dots.R
|
time_class_type <- function(x) {
.Call(warp_class_type, x)
}
# Callable from C
as_posixct_from_posixlt <- function(x) {
as.POSIXct.POSIXlt(x)
}
# Callable from C
as_posixlt_from_posixct <- function(x) {
as.POSIXlt.POSIXct(x)
}
# Callable from C, ensures that the resulting Date
# is a double and has no fractional parts
as_date <- function(x) {
type <- time_class_type(x)
if (type == "date") {
if (typeof(x) == "integer") {
return(structure(as.double(x), class = "Date"))
} else {
# Always truncate towards 0 to get rid of fractional date components
return(structure(trunc(unclass(x)), class = "Date"))
}
}
if (type == "posixct") {
return(as.Date.POSIXct(x, tz = tz(x)))
}
if (type == "posixlt") {
return(as.Date.POSIXlt(x))
}
stop("Internal error: Unknown date time class", call. = FALSE)
}
# Used in `as_date()`, main thing to ensure of is that
# `as_date()` on a POSIXct retains the year/month/day of that time zone
tz <- function(x) {
tzone <- attr(x, "tzone")[[1]]
if (is.null(tzone) && !is_POSIXt(x)) {
return("UTC")
}
if (is.character(tzone) && nzchar(tzone)) {
return(tzone)
}
tzone <- attr(as.POSIXlt(x[0]), "tzone")[[1]]
if (is.null(tzone)) {
return("UTC")
}
tzone
}
is_POSIXt <- function(x) {
inherits(x, "POSIXt")
}
|
/scratch/gouwar.j/cran-all/cranData/warp/R/utils.R
|
#' @keywords internal
"_PACKAGE"
# The following block is used by usethis to automatically manage
# roxygen namespace tags. Modify with care!
## usethis namespace: start
#' @useDynLib warp, .registration = TRUE
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/warp/R/warp-package.R
|
# nocov start
.onLoad <- function(libname, pkgname) {
.Call(warp_init_library, asNamespace("warp"))
}
# nocov end
|
/scratch/gouwar.j/cran-all/cranData/warp/R/zzz.R
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(warp)
## -----------------------------------------------------------------------------
before_dst <- as.POSIXct("1970-04-26 01:59:59", tz = "America/New_York")
before_dst
before_dst + 1
## -----------------------------------------------------------------------------
x <- as.POSIXct("1970-04-26 00:00:00", tz = "America/New_York") + 3600 * 0:7
data.frame(
x = x,
hour = warp_distance(x, "hour", every = 2)
)
## -----------------------------------------------------------------------------
y <- as.POSIXct("1970-04-26 22:00:00", tz = "America/New_York") + 3600 * 0:5
data.frame(
y = y,
hour = warp_distance(y, "hour", every = 2)
)
## -----------------------------------------------------------------------------
# Or call `lubridate::force_tz(x, "UTC")`
force_utc <- function(x) {
x_lt <- as.POSIXlt(x)
x_lt <- unclass(x_lt)
attributes(x) <- NULL
out <- x + x_lt$gmtoff
as.POSIXct(out, tz = "UTC", origin = "1970-01-01")
}
x_utc <- force_utc(x)
y_utc <- force_utc(y)
x_utc
## -----------------------------------------------------------------------------
data.frame(
x_utc = x_utc,
hour = warp_distance(x_utc, "hour", every = 2)
)
data.frame(
y_utc = y_utc,
hour = warp_distance(y_utc, "hour", every = 2)
)
## -----------------------------------------------------------------------------
before_fallback <- as.POSIXct("1970-10-25 01:00:00", tz = "America/New_York")
before_fallback
# add 1 hour of seconds
before_fallback + 3600
## -----------------------------------------------------------------------------
x <- as.POSIXct("1970-10-25 00:00:00", tz = "America/New_York") + 3600 * 0:7
x
data.frame(
x = x,
hour = warp_distance(x, "hour", every = 2)
)
## -----------------------------------------------------------------------------
y <- as.POSIXct("1970-10-25 22:00:00", tz = "America/New_York") + 3600 * 0:5
y
data.frame(
y = y,
hour = warp_distance(y, "hour", every = 2)
)
## -----------------------------------------------------------------------------
x_utc <- force_utc(x)
x_utc
## -----------------------------------------------------------------------------
data.frame(
x_utc = x_utc,
hour = warp_distance(x_utc, "hour", every = 2)
)
|
/scratch/gouwar.j/cran-all/cranData/warp/inst/doc/hour.R
|
---
title: "Hour Distances and Daylight Savings"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Hour Distances and Daylight Savings}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(warp)
```
If using `period = "hour"`, it should work as expected at all times when using a time zone that doesn't have daylight savings, like UTC or EST. If using a time zone with DST, like America/New_York, some additional explanation is required, especially when `every > 1`.
## Spring Forward Gap
In America/New_York's time zone, as time was about to reach `1970-04-26 02:00:00`, daylight savings kicked in and time shifts forward 1 hour so that the next time is actually `1970-04-26 03:00:00`.
```{r}
before_dst <- as.POSIXct("1970-04-26 01:59:59", tz = "America/New_York")
before_dst
before_dst + 1
```
`warp_distance()` treats hours 1 and 3 as being side by side, since no hour 2 ever existed. This means that hours (0, 1) and (3, 4) get grouped together in the below example.
```{r}
x <- as.POSIXct("1970-04-26 00:00:00", tz = "America/New_York") + 3600 * 0:7
data.frame(
x = x,
hour = warp_distance(x, "hour", every = 2)
)
```
Because `period = "hour"` just computes the running number of 2 hour periods from the `origin`, this pattern carries forward into the next day to have a contiguous stream of values. This can be somewhat confusing, since hours 0 and 1 don't get grouped together on the 27th.
```{r}
y <- as.POSIXct("1970-04-26 22:00:00", tz = "America/New_York") + 3600 * 0:5
data.frame(
y = y,
hour = warp_distance(y, "hour", every = 2)
)
```
One way that you can sort of get around this is by using lubridate's `force_tz()` function to force a UTC time zone with the same clock time as your original date. I've mocked up a poor man's version of that function below.
```{r}
# Or call `lubridate::force_tz(x, "UTC")`
force_utc <- function(x) {
x_lt <- as.POSIXlt(x)
x_lt <- unclass(x_lt)
attributes(x) <- NULL
out <- x + x_lt$gmtoff
as.POSIXct(out, tz = "UTC", origin = "1970-01-01")
}
x_utc <- force_utc(x)
y_utc <- force_utc(y)
x_utc
```
In UTC, hour 2 exists so groups are created as (0, 1), (2, 3), and so on, even though hour 2 doesn't actually exist in America/New_York because of the DST gap. This has the affect of limiting the (2, 3) group to a maximum size of 1, since only hour 3 is possible in the data.
```{r}
data.frame(
x_utc = x_utc,
hour = warp_distance(x_utc, "hour", every = 2)
)
data.frame(
y_utc = y_utc,
hour = warp_distance(y_utc, "hour", every = 2)
)
```
## Fall Backwards Overlap
In America/New_York's time zone, as time was about to reach `1970-10-25 02:00:00`, daylight savings kicked in and time shifts backwards 1 hour so that the next time is actually `1970-10-25 01:00:00`. This means there are 2 full hours with an hour value of 1 in that day.
```{r}
before_fallback <- as.POSIXct("1970-10-25 01:00:00", tz = "America/New_York")
before_fallback
# add 1 hour of seconds
before_fallback + 3600
```
Because these are two distinct hours, `warp_distance()` treats them as such, so in the example below a group of (1 EDT, 1 EST) gets created. Since daylight savings is currently active, we also have the situation described above where hour 0 and hour 1 are not grouped together.
```{r}
x <- as.POSIXct("1970-10-25 00:00:00", tz = "America/New_York") + 3600 * 0:7
x
data.frame(
x = x,
hour = warp_distance(x, "hour", every = 2)
)
```
This fallback adjustment actually realigns hours 0 and 1 in the next day, since the 25th has 25 hours.
```{r}
y <- as.POSIXct("1970-10-25 22:00:00", tz = "America/New_York") + 3600 * 0:5
y
data.frame(
y = y,
hour = warp_distance(y, "hour", every = 2)
)
```
As before, one way to sort of avoid this is to force a UTC time zone.
```{r}
x_utc <- force_utc(x)
x_utc
```
The consequences of this are that you have two dates with an hour value of 1. When forced to UTC, these look identical. The groups are as you probably expect with buckets of hours (0, 1), (2, 3), and so on, but now the two dates with hour values of 1 are identical so they fall in the same hour group.
```{r}
data.frame(
x_utc = x_utc,
hour = warp_distance(x_utc, "hour", every = 2)
)
```
## Conclusion
While the implementation of `period = "hour"` is _technically_ correct, I recognize that it isn't the most intuitive operation. More intuitive would be a period value of `"dhour"`, which would correspond to the "hour of the day". This would count the number of hour groups from the origin, like `"hour"` does, but it would reset the `every`-hour counter every time you enter a new day. However, this has proved to be challenging to code up, but I hope to incorporate this eventually.
|
/scratch/gouwar.j/cran-all/cranData/warp/inst/doc/hour.Rmd
|
---
title: "Hour Distances and Daylight Savings"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Hour Distances and Daylight Savings}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(warp)
```
If using `period = "hour"`, it should work as expected at all times when using a time zone that doesn't have daylight savings, like UTC or EST. If using a time zone with DST, like America/New_York, some additional explanation is required, especially when `every > 1`.
## Spring Forward Gap
In America/New_York's time zone, as time was about to reach `1970-04-26 02:00:00`, daylight savings kicked in and time shifts forward 1 hour so that the next time is actually `1970-04-26 03:00:00`.
```{r}
before_dst <- as.POSIXct("1970-04-26 01:59:59", tz = "America/New_York")
before_dst
before_dst + 1
```
`warp_distance()` treats hours 1 and 3 as being side by side, since no hour 2 ever existed. This means that hours (0, 1) and (3, 4) get grouped together in the below example.
```{r}
x <- as.POSIXct("1970-04-26 00:00:00", tz = "America/New_York") + 3600 * 0:7
data.frame(
x = x,
hour = warp_distance(x, "hour", every = 2)
)
```
Because `period = "hour"` just computes the running number of 2 hour periods from the `origin`, this pattern carries forward into the next day to have a contiguous stream of values. This can be somewhat confusing, since hours 0 and 1 don't get grouped together on the 27th.
```{r}
y <- as.POSIXct("1970-04-26 22:00:00", tz = "America/New_York") + 3600 * 0:5
data.frame(
y = y,
hour = warp_distance(y, "hour", every = 2)
)
```
One way that you can sort of get around this is by using lubridate's `force_tz()` function to force a UTC time zone with the same clock time as your original date. I've mocked up a poor man's version of that function below.
```{r}
# Or call `lubridate::force_tz(x, "UTC")`
force_utc <- function(x) {
x_lt <- as.POSIXlt(x)
x_lt <- unclass(x_lt)
attributes(x) <- NULL
out <- x + x_lt$gmtoff
as.POSIXct(out, tz = "UTC", origin = "1970-01-01")
}
x_utc <- force_utc(x)
y_utc <- force_utc(y)
x_utc
```
In UTC, hour 2 exists so groups are created as (0, 1), (2, 3), and so on, even though hour 2 doesn't actually exist in America/New_York because of the DST gap. This has the affect of limiting the (2, 3) group to a maximum size of 1, since only hour 3 is possible in the data.
```{r}
data.frame(
x_utc = x_utc,
hour = warp_distance(x_utc, "hour", every = 2)
)
data.frame(
y_utc = y_utc,
hour = warp_distance(y_utc, "hour", every = 2)
)
```
## Fall Backwards Overlap
In America/New_York's time zone, as time was about to reach `1970-10-25 02:00:00`, daylight savings kicked in and time shifts backwards 1 hour so that the next time is actually `1970-10-25 01:00:00`. This means there are 2 full hours with an hour value of 1 in that day.
```{r}
before_fallback <- as.POSIXct("1970-10-25 01:00:00", tz = "America/New_York")
before_fallback
# add 1 hour of seconds
before_fallback + 3600
```
Because these are two distinct hours, `warp_distance()` treats them as such, so in the example below a group of (1 EDT, 1 EST) gets created. Since daylight savings is currently active, we also have the situation described above where hour 0 and hour 1 are not grouped together.
```{r}
x <- as.POSIXct("1970-10-25 00:00:00", tz = "America/New_York") + 3600 * 0:7
x
data.frame(
x = x,
hour = warp_distance(x, "hour", every = 2)
)
```
This fallback adjustment actually realigns hours 0 and 1 in the next day, since the 25th has 25 hours.
```{r}
y <- as.POSIXct("1970-10-25 22:00:00", tz = "America/New_York") + 3600 * 0:5
y
data.frame(
y = y,
hour = warp_distance(y, "hour", every = 2)
)
```
As before, one way to sort of avoid this is to force a UTC time zone.
```{r}
x_utc <- force_utc(x)
x_utc
```
The consequences of this are that you have two dates with an hour value of 1. When forced to UTC, these look identical. The groups are as you probably expect with buckets of hours (0, 1), (2, 3), and so on, but now the two dates with hour values of 1 are identical so they fall in the same hour group.
```{r}
data.frame(
x_utc = x_utc,
hour = warp_distance(x_utc, "hour", every = 2)
)
```
## Conclusion
While the implementation of `period = "hour"` is _technically_ correct, I recognize that it isn't the most intuitive operation. More intuitive would be a period value of `"dhour"`, which would correspond to the "hour of the day". This would count the number of hour groups from the origin, like `"hour"` does, but it would reset the `every`-hour counter every time you enter a new day. However, this has proved to be challenging to code up, but I hope to incorporate this eventually.
|
/scratch/gouwar.j/cran-all/cranData/warp/vignettes/hour.Rmd
|
#' Compute the empirical L_2 distance related to the warping function.
#'
#' This function returns the empirical L_2 between two functions, the
#' first one being warped.
#'
#' @param t A vector of numbers, corresponding to time points.
#' @param f A vector of numbers, corresponding to the evaluated function.
#' @param g A vector of numbers, corresponding to the evaluated function.
#' @param theta A vector of warping parameters.
#' @param splineBasisW A matrix, corresponding to the spline basis for
#' the warping functions, evaluted in time points.
#'
#' @return A list, with crit the distance.
#'
#'
#'
criterion = function (t,f,g,theta,splineBasisW){
## initialization
T = length(t)
diff = rep(0,T-1)
## warpTime
A = warpTimeFunction(splineBasisW,theta,t)
approxF = approxfun(A$warpTime,f)
## Computation of the criterion
for (j in c(1:(T-1))) {
diff[j] = (approxF(t[j]) - g[j])^2 * (t[j+1]-t[j])
}
crit = sqrt(sum(diff))
result = list(crit = crit)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/warpMix/R/criterion.R
|
#' Estimate the warping parameters.
#'
#' This function estimate the warping parameters, knowing the observations and
#' the individual aligned curves.
#'
#' @param t A vector of numbers, corresponding to time points.
#' @param y A matrix of numbers, corresponding to observations (size: T * n).
#' @param splineBasisW A matrix, corresponding to the spline basis for
#' the warping functions, evaluted in time points.
#' @param indSignal A matrix, corresponding to the individual aligned curves.
#' @param thetaObs A matrix, corresponding to initial values for the warping parameters.
#'
#' @return A list, with theta, a matrix of estimated warping parameters,
#' and wT, the corresponding warping functions.
#'
#'
estimationTheta = function(t,y, splineBasisW,indSignal, thetaObs) {
## Initialization
n = dim(y)[2]
mW = dim(splineBasisW)[2]
thetaInit = thetaObs
thetaOpt = matrix(rep(0,n*mW),ncol=n)
warpTime = matrix(rep(0, n* length(t)),ncol = n)
## Computing the warping function -> computing the warping parameters theta
for (i in c(1:n)) {
MedCurv = indSignal[,i]
crit = function (theta) criterion(t,y[,i],MedCurv,theta,splineBasisW)
a = optim(thetaInit[,i],crit)
thetaOpt[,i] = a$par
warpTime[,i] = warpTimeFunction(splineBasisW,thetaOpt[,i] ,t)$warpTime
}
result = list(theta = thetaOpt, wT = warpTime)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/warpMix/R/estimationTheta.R
|
#' Initialize the functional parameters (associated to the aligned curves).
#'
#' This function initializes the mean curve, the individual effect U_i, related to
#' aligned curves.
#'
#' @param t A vector of numbers, corresponding to time points.
#' @param y A matrix of numbers, corresponding to observations (size: T * n).
#' @param splineBasisMu A matrix, corresponding to the spline basis for
#' the global mean function, evaluted in time points.
#' @param splineBasisU A matrix, corresponding to the spline basis for
#' the individual curves, evaluted in time points.
#' @param warpTime A matrix, corresponding to warping time points.
#'
#' @return A list, with x, aligned curves, alphaMu the coefficients of the mean curve,
#' sigmaEpsilon the variance of the noise, sigmaU the variance of the random effects,
#' and indSignal each individual curves.
#'
#' @import lme4
initialisationPara = function(t,y,splineBasisMu,splineBasisU,warpTime){
## Initialization
n = dim(y)[2]
x = y
## warped process X
for (i in c(1:n)){
approxX = approxfun(warpTime[,i],y[,i])
x[,i] = approxX(t)
}
## melting the data to compute the initialisation of parameters in the mixed model
splineBasisMuMelt=matrix(rep(0,dim(splineBasisMu)[1]*n * dim(splineBasisMu)[2]),
ncol = dim(splineBasisMu)[2])
for (j in c(1:(dim(splineBasisMu)[2]))){
splineBasisMuMelt[,j] = rep(splineBasisMu[,j],n)}
splineBasisUMelt=matrix(rep(0,dim(splineBasisU)[1]*n * dim(splineBasisU)[2]),
ncol = dim(splineBasisU)[2])
for (j in c(1: (dim(splineBasisU)[2]))){
splineBasisUMelt[,j] = rep(splineBasisU[,j],n)}
xMelt <- melt(x)
XM = xMelt[,3]
g = xMelt[,2]
## Initialization for the mixed model
p = dim(splineBasisU)[2]
f = as.formula(XM ~ -1 + splineBasisMuMelt)
for (l in 1:p){
f = update.formula(f,as.formula(paste( "~ . + (splineBasisUMelt[,",eval(l),"] | g)")))
}
## Inference for the mixed effect model
Res = lmer(f, REML = FALSE)
ResS = summary(Res)
alphaMu = ResS$coefficients[1:(dim(ResS$coefficients)[1])]
sigmaU = as.data.frame(VarCorr(Res))[1:dim(splineBasisUMelt)[2],4]
sigmaEpsilon = as.data.frame(VarCorr(Res))[dim(splineBasisUMelt)[2]+1,4]
## Fitted individual signal mu+U
Ufitted = fitted(Res)
uFittedMatrix = matrix(0, nrow = length(t),ncol = n)
for (i in c(1:n)){
uFittedMatrix[,i] = Ufitted[((i-1)*length(t)+1):(i*length(t))]
}
result = list(x = x, alphaMu = alphaMu, sigmaEpsilon = sigmaEpsilon, sigmaU = sigmaU, indSignal = uFittedMatrix)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/warpMix/R/initialisationPara.R
|
#' Initialize the warping parameters.
#'
#' This function initializes the warping parameters
#'
#' @param t A vector of numbers, corresponding to time points.
#' @param y A matrix of numbers, corresponding to observations (size: T * n).
#' @param splineBasisW A matrix, corresponding to the spline basis for
#' the warping functions, evaluted in time points.
#'
#' @return A list, with theta, a matrix of estimated warping parameters,
#' and wT, the corresponding warping functions.
#'
#'
#' @import fda
initialisationTheta = function(t,y,splineBasisW) {
## Initialization
n = dim(y)[2]
mW = dim(splineBasisW)[2]
thetaInit = rep(0,mW)
thetaOpt = matrix(rep(0,n*mW),ncol=n)
## Initialization of the central curve
functBoxPlot = fbplot(y,t,plot=FALSE)
MedCurv = y[,functBoxPlot$medcurv]
warpTime = matrix(rep(0, n* length(t)),ncol = n)
## Computing the warping function -> computing the warping parameters theta
for (i in c(1:n)) {
crit = function (theta) criterion(t,y[,i],MedCurv,theta,splineBasisW)
a = optim(thetaInit,crit)
thetaOpt[,i] = a$par
warpTime[,i] = warpTimeFunction(splineBasisW,thetaOpt[,i] ,t)$warpTime
}
result = list(theta0 = thetaOpt, wT = warpTime)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/warpMix/R/initialisationTheta.R
|
#' Update the functional parameters (associated to the aligned curves).
#'
#' This function updates the estimations of the mean curve,
#' the individual effect U_i, related to aligned curves.
#'
#' @param t A vector of numbers, corresponding to time points.
#' @param y A matrix of numbers, corresponding to observations (size: T * n).
#' @param splineBasisMu A matrix, corresponding to the spline basis for
#' the global mean function, evaluted in time points.
#' @param splineBasisU A matrix, corresponding to the spline basis for
#' the individual curves, evaluted in time points.
#' @param warpTime A matrix, corresponding to warping time points.
#'
#' @return A list, with x, aligned curves, alphaMu the coefficients of the mean curve,
#' sigmaEpsilon the variance of the noise, sigmaU the variance of the random effects,
#' and indSignal the individual curves.
#'
#' @import lme4
majPara = function(t,y,splineBasisMu,splineBasisU,warpTime){
## Initialization
n = dim(y)[2]
x = y
## warped process X
for (i in c(1:n)){
approxY = approxfun(warpTime[,i],y[,i])
x[,i] = approxY(t)
}
## melting the data to compute the initialisation of parameters in the mixed model
splineBasisMuMelt=matrix(rep(0,dim(splineBasisMu)[1]*n * (dim(splineBasisMu)[2])),
ncol = dim(splineBasisMu)[2])
for (j in c(1:(dim(splineBasisMu)[2]))){
splineBasisMuMelt[,j] = rep(splineBasisMu[,j],n)}
splineBasisUMelt=matrix(rep(0,dim(splineBasisU)[1]*n * (dim(splineBasisU)[2])),
ncol = dim(splineBasisU)[2])
for (j in c(1: (dim(splineBasisU)[2]))){
splineBasisUMelt[,j] = rep(splineBasisU[,j],n)}
xMelt <- melt(x)
XM = xMelt[,3]
g = xMelt[,2]
## Initialization for the mixed model
p = dim(splineBasisU)[2]
f = as.formula(XM ~ -1 + splineBasisMuMelt)
for (l in 1:p){
f = update.formula(f,as.formula(paste( "~ . + (splineBasisUMelt[,",eval(l),"] | g)")))
}
## Inference for the mixed effect model
Res = lmer(f, REML = FALSE)
ResS = summary(Res)
alphaMu = ResS$coefficients[1:(dim(ResS$coefficients)[1])]
sigmaU = as.data.frame(VarCorr(Res))[1:dim(splineBasisUMelt)[2],4]
sigmaEpsilon = as.data.frame(VarCorr(Res))[dim(splineBasisUMelt)[2]+1,4]
## Fitted individual signal mu+U
Ufitted = fitted(Res)
uFittedMatrix = matrix(0, nrow = length(t),ncol = n)
for (i in c(1:n)){
uFittedMatrix[,i] = Ufitted[((i-1)*length(t)+1):(i*length(t))]
}
result = list(x = x, alphaMu = alphaMu, sigmaEpsilon = sigmaEpsilon,
sigmaU = sigmaU, indSignal = uFittedMatrix)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/warpMix/R/majPara.R
|
#' Predict the warping parameters.
#'
#' This function predict the warping parameters, using the estimations of those parameters,
#' and fitting a linear mixed effect model on them.
#'
#' @param thetaObs A matrix (size: n * T) corresponding of the estimations of the warping parameters.
#' @param sigmaEpsilon A number, defining the variance of the noise in the linear mixed-
#' effect model fitted on the warping parameters.
#'
#' @return A list, with theta, a matrix of predicted warping parameters,
#' sigmaE the covariance of the random effects, and theta0 the mean.
#'
#'
predictionTheta = function(thetaObs,sigmaEpsilon){
## Initialization
thetaObs = t(thetaObs)
A = dim(thetaObs)
n = A[1]
T = A[2]
## Compute the prediction
theta0hat = apply(thetaObs,2, mean)
sigmaEhat = cov(thetaObs) - sigmaEpsilon * diag(1,T)
effetAlea = sigmaEhat %*% solve(cov(thetaObs)) %*% t(thetaObs)
result = list(theta = effetAlea, sigmaE = sigmaEhat, theta0 = theta0hat)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/warpMix/R/predictionTheta.R
|
#' Estimate the non linear mixed effect functional model.
#'
#' This function returns estimates of parameters in the
#' non linear functional mixed-effect model defined to warp
#' data and estimate the underlying model with mixed effect.
#'
#' Notice that the warping parameters are considered as random effects.
#'
#' @param t A vector of numbers, corresponding to time points.
#' @param y A matrix of numbers, corresponding to observations (size: T * n).
#' @param baseMu A B-spline used to decompose the global mean.
#' @param baseU A B-spline used to decompose the individual effects.
#' @param baseW A B-spline used to decompose the warping functions.
#' @param sigmaEpsilonTilde A number, defining the variance of the noise in the linear mixed-
#' effect model fitted on the warping parameters.
#' @param threshold A number, defining the threshold of convergence.
#' @param nIte Maximum number of iterations
#'
#' @return A list, with fonct, functional quantities (indCurvAlign
#' the individual aligned curves, warping the warping functions and theta,
#' the warping parameters), para the estimates of parameters (alphaMu,
#' sigmaU,theta0, sigmaTheta, sigmaEpsHat), dist the criterion computed to
#' reach the convergence, and others other values (successAlphaMu,
#' initTheta, initPara, CPUtime).
#' @examples
#' T = seq(0.5,0.841,length.out = 9)
#' n = 10
#' t = c(qnorm(T),1)
#' mu = cos(2*pi*t+pi/2)
#' library(fda)
#' baseMu = create.bspline.basis(c(0,max(t)), norder = 2, breaks = seq(0,1,0.5))
#' splineBasisMu = eval.basis(t,baseMu)
#' alphaMu = Data2fd(mu,argvals = t, baseMu)$coef
#' muApprox = (splineBasisMu) %*% alphaMu
#' baseU = create.bspline.basis(c(0,max(t)),norder = 2, breaks = seq(0,1,0.5))
#' mU = baseU$nbasis
#' sigmaU = diag(0.1,mU)
#' library(MASS)
#' alphaU = t(mvrnorm(n,rep(0,mU),sigmaU))
#' splineBasisU = eval.basis(t,baseU)
#' U = splineBasisU %*% alphaU
#' epsilon = t(mvrnorm(n,rep(0,length(t)),0.01 * diag(1,length(t))))
#' X = as.vector(muApprox) + U + epsilon
#' baseW = create.bspline.basis(c(0,max(t)), norder = 2, breaks = c(0,0.6,1))
#' mW = baseW$nbasis
#' splineBasisW = eval.basis(t,baseW)
#' theta = t(mvrnorm(n,rep(0,mW),diag(0.1,mW + 1e-3 * diag(1,mW))))
#' wtheta = matrix(rep(0,n*length(t)),ncol = n)
#' for (i in c(1:n)){
#' wtheta[,i] = warpTimeFunction(splineBasisW,theta[,i],t)$warpTime
#' }
#' Y = matrix(0, nrow = length(t), ncol = n)
#' for (i in c(1:n)){
#' y = approxfun(wtheta[,i],X[,i])
#' Y[,i] = y(t)
#' }
#' warpMix(t,Y,baseMu, baseU, baseW, nIte = 2)
#'
#'
#'
#' @import fda
#' @import fields
#' @import MASS
#' @import reshape2
#' @import lme4
#' @importFrom stats approxfun as.formula cov fitted integrate optim update.formula
#' @export
#'
warpMix = function (t,y,baseMu,baseU,baseW,sigmaEpsilonTilde = 10^-3, threshold = 10^-3, nIte = 100){
t0 = Sys.time()
##basis
splineBasisMu = eval.basis(t,baseMu)
splineBasisU = eval.basis(t,baseU)
splineBasisW = eval.basis(t,baseW)
## dimension
n = dim(y)[2]
mW = dim(splineBasisW)[2]
dimT = dim(y)[1]
theta0 = rep(0,dim(splineBasisW)[2])
ite=1;
diffT = rep(0,length(t)-1)
for (j in c(1:length(t)-1)){
diffT[j] = t[j+1]-t[j]
}
## initialization of theta (warping parameter)
initTheta = initialisationTheta(t,y,splineBasisW)
thetaIt = initTheta$theta0
warptimeInit = initTheta$wT
## computation of theta0 and sigmaTheta
theta0 = apply(thetaIt,1,mean)
sigmaTheta = cov(t(thetaIt))
## initialization of alphaMu,sigmaEpsilon,alphaU,sigmaU
initPara = initialisationPara(t,y,splineBasisMu,splineBasisU,warptimeInit)
alphaMu = initPara$alphaMu
sigmaU = initPara$sigmaU
sigmaEpsilon = initPara$sigmaEpsilon
indSignalChap = initPara$indSignal
muChap = splineBasisMu %*% alphaMu
d1=1
d2 =1
it = 1
distGlob = 1
diffCrit = 1
Crit = 0
crit = 1
it = 0
vectAlphaMu = matrix(0,ncol = 150,nrow = dim(splineBasisMu)[2])
a = theta0
warpTime = warptimeInit
diff = rep(0,length(t)-1)
somme = rep(0,n)
cpt = 0
while (((cpt <5)&&(it<nIte))){
it = it+1
## observations for theta (warping parameter)
est = estimationTheta(t,y, splineBasisW,indSignalChap, thetaIt)
thetaObs = est$theta
wThetaObs = est$wT
## global criterion
for (i in c(1:n)){
approxY = approxfun(wThetaObs[,i],y[,i])
x = approxY(t)
for (j in 1:(length(t)-1)){
diff[j] = (x[j] - indSignalChap[j,i])^2 * (t[j+1]-t[j])
}
somme[i] = sqrt(sum(diff))
}
## computation of theta by mixed effect modelling
pred = predictionTheta(thetaObs,sigmaEpsilonTilde)
thetaPred = pred$theta
sigmaTheta = pred$sigmaE + sigmaEpsilonTilde * diag(1,dim(thetaPred)[1])
for (i in c(1:n)){
warpTime[,i] = warpTimeFunction(splineBasisW,thetaPred[,i],t)$warpTime
}
thetaIt = thetaPred
theta0 = apply(thetaIt,1,mean)
## global criterion
for (i in c(1:n)){
approxY = approxfun(warpTime[,i],y[,i])
x = approxY(t)
for (j in 1:(length(t)-1)){
diff[j] = (x[j] - indSignalChap[j,i])^2 * (t[j+1]-t[j])
}
somme[i] = sqrt(sum(diff))
}
##for alphaMu,sigmaEpsilon,alphaU,sigmaU
warpTime = wThetaObs
para = majPara(t,y,splineBasisMu,splineBasisU,warpTime)
alphaMu = para$alphaMu
indSignalChap = para$indSignal
vectAlphaMu[,it] = alphaMu
## global criterion
for (i in c(1:n)){
approxY = approxfun(warpTime[,i],y[,i])
x = approxY(t)
for (j in 1:(length(t)-1)){
diff[j] = (x[j] - indSignalChap[j,i])^2 * (t[j+1]-t[j])
}
somme[i] = sqrt(sum(diff))
}
diffCrit = abs(crit - sum(somme))
crit = sum(somme)
if (diffCrit < threshold){
cpt = cpt+1
}
else cpt = 0
Crit = c(Crit,crit)
## Update parameters
muChap = splineBasisMu %*% alphaMu
sigmaU = para$sigmaU
sigmaEpsHat = para$sigmaEpsilon
}
t1 = Sys.time()
dT = t1-t0
fonct = list(indCurvAlign = indSignalChap, warping = warpTime,theta = thetaIt)
para = list(alphaMu = alphaMu, sigmaU = sigmaU, theta0 = theta0,
sigmaTheta = sigmaTheta, sigmaEpsHat=sigmaEpsHat)
dist = list(GlobalCrit = Crit)
others = list(successAlphaMu = vectAlphaMu[, 1:it], initTheta = initTheta, initPara = initPara, CPUtime = dT)
result = list(fonction = fonct,
parameters = para,
dist = dist,
others = others)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/warpMix/R/warpMix.R
|
#' Compute the warped time points
#'
#' This function returns warped time points for a known warping parameter theta.
#'
#' @param splineBasisW A matrix, corresponding to the spline basis for
#' the warping functions, evaluted in time points.
#' @param theta A matrix, corresponding to initial values for the warping parameters.
#' @param t A vector of numbers, corresponding to time points.
#'
#' @export
#' @return A vector, corresponding to the warped time points.
#'
warpTimeFunction = function(splineBasisW,theta,t){
## Tests
if (is.vector(t) == FALSE || is.matrix(splineBasisW) == FALSE || is.vector(theta) == FALSE)
stop(paste(sQuote("t"), "must be a vector,"," the other parameters must be matrices"))
if ((length(t) == dim(splineBasisW)[1]) == FALSE)
stop(paste(sQuote("splineBasisW"), "must have as much rows as the length of ", sQuote("t")))
## Initialization
T = length(t)
mW = dim(splineBasisW)[2]
## Functions h tilde and h
hTilde = splineBasisW %*% theta
hTildeFun = approxfun(t,hTilde)
h = hTilde - integrate(hTildeFun,0,1,stop.on.error = FALSE)$value
## Function w
hFun = approxfun(t,exp(h))
num1 = rep(0,length(t))
num = num1
for (j in c(1:(length(t)-1))){
num1[j+1] = integrate (hFun, t[j], t[j+1])$value
}
num = cumsum(num1)
w = num/num[length(t)]
result = list(warpTime = w)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/warpMix/R/warpTimeFunction.R
|
################################################################################
#
#' Urban Water and Sanitation Survey Dataset
#'
#' See the README at
#' \href{https://github.com/katilingban/washdata#readme}{GitHub}
#'
#' @docType data
#' @name washdata
#' @keywords internal
#'
#
"_PACKAGE"
################################################################################
#
#' Population Data for Dhaka, Bangladesh
#'
#' Grid-based population of Dhaka, Bangladesh
#'
#' @format A data frame with four variables:
#' \describe{
#' \item{\code{psu}}{primary sampling unit (PSU) ID}
#' \item{\code{zone}}{survey enumeration area}
#' \item{\code{type}}{slum (1) or non-slum (2)}
#' \item{\code{pop}}{population}
#' }
#'
#' @examples
#' popBGD
#
"popBGD"
################################################################################
#
#' WASH Survey Raw Data for Dhaka, Bangladesh
#'
#' WASH survey raw data collected by WSUP in Dhaka, Bangladesh
#'
#' @format A data frame with 217 variables and 1282 entries:
#' \describe{
#' \item{\code{country}}{Country}
#' \item{\code{ccode}}{Country Code}
#' \item{\code{zone}}{Enumeration Area}
#' \item{\code{type}}{Sampling Point Type}
#' \item{\code{quadrat}}{Quadrat Number}
#' \item{\code{hhid}}{Household ID}
#' \item{\code{uniqueID}}{Unique ID}
#' \item{\code{month}}{Month of Survey}
#' \item{\code{year}}{Year of Survey}
#' \item{\code{deployment}}{Deployment}
#' \item{\code{enumerator}}{Enumerator}
#' \item{\code{status}}{Status}
#' \item{\code{responseCode}}{Response Code}
#' \item{\code{draftDate}}{Drafted On}
#' \item{\code{submissionDate}}{Submitted On}
#' \item{\code{ipAddress}}{IP Address}
#' \item{\code{surveyType}}{Survey type}
#' \item{\code{psu}}{primary sampling unit (PSU) ID}
#' \item{\code{psuTime}}{PSU ID Time Answered}
#' \item{\code{latitude}}{PSU ID Location Answered latitude}
#' \item{\code{longitude}}{PSU ID Location Answered longitude}
#' \item{\code{accuracy}}{PSU ID Location Answered accuracy}
#' \item{\code{altitude}}{PSU ID Location Answered altitude}
#' \item{\code{area1}}{Thana}
#' \item{\code{area1Other}}{Thana Other please specify specify}
#' \item{\code{area1Time}}{Thana Time Answered}
#' \item{\code{area2}}{Ward}
#' \item{\code{slum}}{Slum}
#' \item{\code{gender}}{Respondent gender}
#' \item{\code{landOwnStatus}}{What is the respondent land ownership status}
#' \item{\code{nWomen}}{Number of adult women aged 16 to 60}
#' \item{\code{nMen}}{Number of adult men aged 16 to 60}
#' \item{\code{nOldWomen}}{Number of older women aged _60}
#' \item{\code{nOldMen}}{Number of older men aged _60}
#' \item{\code{nGirls}}{Number of girls aged 4 15}
#' \item{\code{nBoys}}{Number of boys aged 4 15}
#' \item{\code{nInfants}}{Number of infants _3}
#' \item{\code{nMobility}}{How many people with reduced mobility are there in this household}
#' \item{\code{water1}}{Main source of drinking water}
#' \item{\code{water2}}{Main source of drinking water Other please specify specify}
#' \item{\code{water3}}{Is the service provision formal or informal}
#' \item{\code{water4}}{Is the service provision formal or informal Don t Know}
#' \item{\code{water5}}{Is main source of drinking water a WSUP supported facility}
#' \item{\code{water6}}{Is main source of drinking water a WSUP supported facility Don t Know}
#' \item{\code{water7}}{Typical number of hours per day that water is available}
#' \item{\code{water8}}{Typical number of hours per day that water is available Don t Know}
#' \item{\code{water9}}{Typical days per week that water is available}
#' \item{\code{water10}}{Typical days per week that water is available Don t Know}
#' \item{\code{water11}}{Is water typically available and sufficient throughout the whole year}
#' \item{\code{water12}}{Is water typically available and sufficient throughout the whole year Comments}
#' \item{\code{water13}}{Is water typically available and sufficient throughout the whole year Don t Know}
#' \item{\code{water14}}{How long does it take to go there get water and come back}
#' \item{\code{water15}}{How long does it take to go there get water and come back Don t Know}
#' \item{\code{water16}}{Are you satisfied that the distance between your main water source and your home is acceptable}
#' \item{\code{water17}}{Are you satisfied that the distance between your main water source and your home is acceptable Don t Know}
#' \item{\code{water18}}{Do you consider that the queuing time at your main water source is acceptable}
#' \item{\code{water19}}{Do you consider that the queuing time at your main water source is acceptable Not Applicable}
#' \item{\code{water20}}{Do you consider that the queuing time at your main water source is acceptable Don t Know}
#' \item{\code{water21}}{Which household member s usually collect the water}
#' \item{\code{water22}}{Which household member s usually collect the water Other please specify specify}
#' \item{\code{water23}}{Typical amount of water used by your household per person per day}
#' \item{\code{water24}}{Typical amount of water used by your household per person per day Don t Know}
#' \item{\code{water25}}{Do you consider the amount of water supplied from your primary water source to be sufficient to meet your household_s requirements each day}
#' \item{\code{water26}}{Do you consider the amount of water supplied from your primary water source to be sufficient to meet your household_s requirements each day Don t Know}
#' \item{\code{water27}}{Do you also use water from other water sources}
#' \item{\code{water28}}{Do you also use water from other water sources Don t Know}
#' \item{\code{water29}}{Which is your usual secondary water source}
#' \item{\code{water30}}{Which is your usual secondary water source Other please specify specify}
#' \item{\code{water31}}{Which is your usual secondary water source Don t Know}
#' \item{\code{water32}}{Typical amount of money spent on water per week magnitude}
#' \item{\code{water33}}{Typical amount of money spent on water per week units}
#' \item{\code{water34}}{Typical amount of money spent on water per week Don t Know}
#' \item{\code{water35}}{Are you satisfied with the price of your main water source}
#' \item{\code{water36}}{Are you satisfied with the price of your main water source Don t Know}
#' \item{\code{water37}}{Would you be willing to invest in improving your drinking water source}
#' \item{\code{water38}}{Would you be willing to invest in improving your drinking water source Comments}
#' \item{\code{water39}}{Would you be willing to invest in improving your drinking water source Don t Know}
#' \item{\code{water40}}{Are you satisfied with the quality of your main water source}
#' \item{\code{water41}}{Are you satisfied with the quality of your main water source Don t Know}
#' \item{\code{water42}}{What would increase your satisfaction}
#' \item{\code{water43}}{What would increase your satisfaction Other improvement please specify specify}
#' \item{\code{water44}}{What would increase your satisfaction Don t Know}
#' \item{\code{water45}}{Are you satisfied with the water pressure of your main water source}
#' \item{\code{water46}}{Are you satisfied with the water pressure of your main water source Don t Know}
#' \item{\code{water47}}{Who do you talk to if there is a problem with the water supply}
#' \item{\code{water48}}{Who do you talk to if there is a problem with the water supply Other please specify specify}
#' \item{\code{water49}}{Who do you talk to if there is a problem with the water supply Not Applicable}
#' \item{\code{water50}}{Who do you talk to if there is a problem with the water supply Don t Know}
#' \item{\code{water51}}{Previous source of drinking water}
#' \item{\code{water52}}{Previous source of drinking water Other please specify specify}
#' \item{\code{water53}}{Previous source of drinking water Don t Know}
#' \item{\code{water54}}{Typical amount of money spent per week on the previous water source magnitude}
#' \item{\code{water55}}{Typical amount of money spent per week on the previous water source units}
#' \item{\code{water56}}{Typical amount of money spent per week on the previous water source Don t Know}
#' \item{\code{water57}}{Why do you use the WSUP supported facility instead of the facility you used before}
#' \item{\code{water58}}{Why do you use the WSUP supported facility instead of the facility you used before Other please specify specify}
#' \item{\code{water59}}{Why do you use the WSUP supported facility instead of the facility you used before Don t Know}
#' \item{\code{san1}}{Usage arrangement for household_s sanitation facility}
#' \item{\code{san2}}{Usage arrangement for household_s sanitation facility Other please specify specify}
#' \item{\code{san3}}{Usage arrangement for household_s sanitation facility Don t Know}
#' \item{\code{san4}}{How many people share use of the communal shared toilet}
#' \item{\code{san5}}{How many people share use of the communal shared toilet Don t Know}
#' \item{\code{san6}}{What kind of toilet facility do members in your household usually use}
#' \item{\code{san7}}{What kind of toilet facility do members in your household usually use Other please specify specify}
#' \item{\code{san8}}{What kind of toilet facility do members in your household usually use Don t Know}
#' \item{\code{san9}}{Where does your toilet flush to}
#' \item{\code{san10}}{Where does your toilet flush to Other please specify specify}
#' \item{\code{san11}}{Where does your toilet flush to Don t Know}
#' \item{\code{san12}}{Is main sanitation facility a WSUP supported facility}
#' \item{\code{san13}}{Is main sanitation facility a WSUP supported facility Not Applicable}
#' \item{\code{san14}}{Is main sanitation facility a WSUP supported facility Don t Know}
#' \item{\code{san15}}{Is water available at the sanitation facility}
#' \item{\code{san16}}{Is water available at the sanitation facility Don t Know}
#' \item{\code{san17}}{Are you satisfied with the status of the hand washing facilities at the sanitation facility}
#' \item{\code{san18}}{Are you satisfied with the status of the hand washing facilities at the sanitation facility Don t Know}
#' \item{\code{san19}}{What would increase your satisfaction 1}
#' \item{\code{san20}}{What would increase your satisfaction Other please specify specify}
#' \item{\code{san21}}{What would increase your satisfaction Don t Know 1}
#' \item{\code{san22}}{Is there an electric light in the sanitation facility}
#' \item{\code{san23}}{Is there an electric light in the sanitation facility Don t Know}
#' \item{\code{san24}}{Does the sanitation facility have a lockable door}
#' \item{\code{san25}}{Does the sanitation facility have a lockable door Don t Know}
#' \item{\code{san26}}{Does the sanitation facility have a container for menstrual hygiene management}
#' \item{\code{san27}}{Does the sanitation facility have a container for menstrual hygiene management Don t Know}
#' \item{\code{san28}}{The last time your youngest child passed stools what was done to dispose of the stools}
#' \item{\code{san29}}{The last time your youngest child passed stools what was done to dispose of the stools Not Applicable}
#' \item{\code{san30}}{The last time your youngest child passed stools what was done to dispose of the stools Don t Know}
#' \item{\code{san31}}{How long does it take to go there queue for the toilet if applicable and come back}
#' \item{\code{san32}}{How long does it take to go there queue for the toilet if applicable and come back Don t Know}
#' \item{\code{san33}}{Typical amount of money spent on the main sanitation facility per week magnitude}
#' \item{\code{san34}}{Typical amount of money spent on the main sanitation facility per week units}
#' \item{\code{san35}}{Typical amount of money spent on the main sanitation facility per week Don t Know}
#' \item{\code{san36}}{Are you satisfied with the price of the sanitation facility}
#' \item{\code{san37}}{Are you satisfied with the price of the sanitation facility Don t Know}
#' \item{\code{san38}}{Did you make an initial investment in the sanitation facility}
#' \item{\code{san39}}{Did you make an initial investment in the sanitation facility Don t Know}
#' \item{\code{san40}}{How much was the initial investment magnitude}
#' \item{\code{san41}}{How much was the initial investment units}
#' \item{\code{san42}}{How much was the initial investment Don t Know}
#' \item{\code{san43}}{Would you be willing to invest in improved sanitation facilities}
#' \item{\code{san44}}{Would you be willing to invest in improved sanitation facilities Comments}
#' \item{\code{san45}}{Would you be willing to invest in improved sanitation facilities Don t Know}
#' \item{\code{san46}}{Are you satisfied with the cleanliness of the sanitation facilities}
#' \item{\code{san47}}{Are you satisfied with the cleanliness of the sanitation facilities Don t Know}
#' \item{\code{san48}}{What would increase your level of satisfaction}
#' \item{\code{san49}}{What would increase your level of satisfaction Other please specify specify}
#' \item{\code{san50}}{What would increase your level of satisfaction Don t Know}
#' \item{\code{san51}}{Is everyone in the household able to get to the sanitation facility and use it}
#' \item{\code{san52}}{Is everyone in the household able to get to the sanitation facility and use it Don t Know}
#' \item{\code{san53}}{Who is not using them}
#' \item{\code{san54}}{Who is not using them Other please specify specify}
#' \item{\code{san55}}{Who is not using them Don t Know}
#' \item{\code{san56}}{Why not}
#' \item{\code{san57}}{Why not Other please specify specify}
#' \item{\code{san58}}{Why not Don t Know}
#' \item{\code{san59}}{Do all household members usually use the available sanitation facilities}
#' \item{\code{san60}}{Do all household members usually use the available sanitation facilities Don t Know}
#' \item{\code{san61}}{Who is not using them 1}
#' \item{\code{san62}}{Who is not using them Other please specify specify 1}
#' \item{\code{san63}}{Who is not using them Don t Know 1}
#' \item{\code{san64}}{Why are they not using the facilities}
#' \item{\code{san65}}{Why are they not using the facilities Don t Know}
#' \item{\code{san66}}{Who do you talk to if there is a problem with the sanitation facility}
#' \item{\code{san67}}{Who do you talk to if there is a problem with the sanitation facility Other please specify specify}
#' \item{\code{san68}}{Who do you talk to if there is a problem with the sanitation facility Not Applicable}
#' \item{\code{san69}}{Who do you talk to if there is a problem with the sanitation facility Don t Know}
#' \item{\code{san70}}{Do the women and girls in your household feel safe using the sanitation facilities during the day and night}
#' \item{\code{san71}}{Do the women and girls in your household feel safe using the sanitation facilities during the day and night Don t Know}
#' \item{\code{san72}}{Why not 1}
#' \item{\code{san73}}{Why not Don t Know 1}
#' \item{\code{san74}}{Do the women and girls in the household feel comfortable using the sanitation facilities during menstruation}
#' \item{\code{san75}}{Do the women and girls in the household feel comfortable using the sanitation facilities during menstruation Not Applicable}
#' \item{\code{san76}}{Why not 2}
#' \item{\code{san77}}{Why not Other please specify specify 1}
#' \item{\code{san78}}{Why not Don t Know 2}
#' \item{\code{san79}}{What materials are used by the women girls of your house during their menstruation cycle}
#' \item{\code{san80}}{What materials are used by the women girls of your house during their menstruation cycle Other please specify specify}
#' \item{\code{san81}}{How is the cloth washed}
#' \item{\code{san82}}{How is the cloth washed Other please specify specify}
#' \item{\code{san83}}{Where do you dispose of the used napkin pad}
#' \item{\code{san84}}{Where do you dispose of the used napkin pad Other please specify specify}
#' \item{\code{san85}}{When should you wash your hands}
#' \item{\code{san86}}{When should you wash your hands Other please specify specify}
#' \item{\code{san87}}{When should you wash your hands Don t Know}
#' \item{\code{san88}}{Can you show me where you wash your hands}
#' \item{\code{san89}}{Can you show me where you wash your hands Not Applicable}
#' \item{\code{wash60}}{Can you show me where you store your drinking water}
#' \item{\code{wash61}}{Can you show me where you store your drinking water Not Applicable}
#' \item{\code{wash62}}{Can you show me where you store your drinking water Don t Know}
#' \item{\code{san90}}{How often do you empty your pit septic tank}
#' \item{\code{san91}}{How often do you empty your pit septic tank Other please specify specify}
#' \item{\code{san92}}{How often do you empty your pit septic tank Not Applicable}
#' \item{\code{san93}}{How often do you empty your pit septic tank Don t Know}
#' \item{\code{san94}}{How much does it cost to empty magnitude}
#' \item{\code{san95}}{How much does it cost to empty units}
#' \item{\code{san96}}{How much does it cost to empty Not Applicable}
#' \item{\code{san97}}{How much does it cost to empty Don t Know}
#' \item{\code{san98}}{Who empties the pit}
#' \item{\code{san99}}{Who empties the pit Other please specify specify}
#' \item{\code{san100}}{Who empties the pit Not Applicable}
#' \item{\code{san101}}{Who empties the pit Don t Know}
#' \item{\code{san102}}{Who pays for the pit to be emptied}
#' \item{\code{san103}}{Who pays for the pit to be emptied Other please specify specify}
#' \item{\code{san104}}{Who pays for the pit to be emptied Not Applicable}
#' \item{\code{san105}}{Who pays for the pit to be emptied Don t Know}
#' \item{\code{san106}}{Are you satisfied with the pit emptying service}
#' \item{\code{ppi1}}{How many household members are 12 years old or younger}
#' \item{\code{ppi2}}{Do all household members ages 6 to 12 currently attend a school educational institution}
#' \item{\code{ppi3}}{X In the past year did any household member ever do work for which he she was paid on a daily basis}
#' \item{\code{ppi4}}{How many rooms does your household occupy excluding rooms used for business}
#' \item{\code{ppi5}}{What is the main construction material of the walls of the main room}
#' \item{\code{ppi6}}{Does the household own any televisions}
#' \item{\code{ppi7}}{How many fans does the household own}
#' \item{\code{ppi8}}{How many mobile phones does the household own}
#' \item{\code{ppi9}}{Does the household own any bicycles motorcycle scooters or motor cars etc}
#' \item{\code{ppi10}}{Does the household own or rent sharecrop mortgage in or out 51 or more decimals of cultivable agricultural land excluding uncultivable land and dwelling house homestead land}
#' \item{\code{ppi11}}{Does the household own or rent sharecrop mortgage in or out 51 or more decimals of cultivable agricultural land excluding uncultivable land and dwelling house homestead land Time Answered}
#' }
#'
#' @examples
#' surveyDataBGD
#'
#
"surveyDataBGD"
################################################################################
#
#' WASH Survey Indicators Data for Dhaka, Bangladesh
#'
#' WASH survey indicators data calculated from survey raw data
#'
#' @format A data frame with 162 variables and 1282 entries
#'
#' @examples
#' indicatorsDataBGD
#'
#
"indicatorsDataBGD"
################################################################################
#
#' PPI Look-up Table for Bangladesh
#'
#' PPI look-up table for Bangladesh to calculate PPI score
#'
#' @format A data frame with 10 variables and 101 entries
#'
#' @examples
#' ppiMatrixBGD
#'
"ppiMatrixBGD"
|
/scratch/gouwar.j/cran-all/cranData/washdata/R/data.R
|
library(hexSticker)
sticker("inst/figures/wash.png",
s_x = 1, s_y = 1, s_width = 0.4,
package = "washdata", p_color = "#FFFFFF", p_y = 1,
h_size = 1, h_color = "#797842", h_fill = "#ACBB85",
filename = "man/figures/washdata.png")
|
/scratch/gouwar.j/cran-all/cranData/washdata/inst/createHexSticker.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
|
/scratch/gouwar.j/cran-all/cranData/washdata/inst/doc/washdata.R
|
---
title: "Urban Water and Sanitation Survey Dataset"
author: "Ernest Guevarra"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Urban Water and Sanitation Survey Dataset}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Background: the global problem of inadequate access to water and sanitation for the urban poor
Over the past two decades most developing countries have experienced rapid urbanisation as part of a global trend that has brought millions out of poverty, and helped to bridge the gap between the developed and the developing world. However, many of the people migrating to urban environments are concentrated in low-income informal settlements (commonly referred to as ‘slums’), either within the central city or in peri-urban districts at the city’s ever-growing periphery. Since 1990, an estimated 171 million more people are living in such settlements. This number will only increase over time, with Africa’s urban population predicted to triple to 1.23 billion people by 2050.
Faced with this trend of mass urban migration, governments and utilities are proving unable to expand their water and sanitation systems at sufficient pace to respond to the increasing consumer need. This is due to a complex web of constraints that characterise the urban water, sanitation and hygiene (WASH) sector in many developing countries. These constraints include: limited institutional and government capacity at the national and city level; lack of technical expertise to serve low-income areas effectively; lack of the necessary fiscal and tariff-setting autonomy for mandated service providers to fulfil their role; unsupportive and/or poorly defined institutional frameworks; insufficient levels of government investment in water and sanitation; poorly targeted international financial institutions (IFI) investments; and weak implementation of existing national policies and strategies.
Without functional WASH sectors that can deliver new service delivery models at scale, huge numbers of people living in slums will continue to remain without access to the most basic services. Diseases related to inadequate WASH remain among the world’s most serious public health problems, and the associated impacts on economic productivity and children’s cognitive development are likely to have profoundly negative impacts on national development.
## [Water and Sanitation for the Urban Poor (WSUP)](https://www.wsup.com)
[WSUP](https://www.wsup.com) is a not-for-profit company that helps transform cities to benefit the millions who lack access to water and sanitation. [WSUP](https://www.wsup.com) believes that access to safe and affordable water, improved sanitation and improved hygiene practices underpin poverty reduction through impacts on health, education and livelihoods. This is coupled with a conviction in two core principles. Firstly, [WSUP](https://www.wsup.com) believes in the transformative power of markets and innovation: enormous strides can be made by bringing the local and international private sector into the urban WASH space, by driving business thinking in low-income service provision, and by viewing low-income people as consumers who make choices. Secondly, [WSUP](https://www.wsup.com) understands that the dynamising power of markets can only partially solve the problem: deep political commitment, institutional change, and a functioning WASH sector are equally critical. Sustainable at-scale progress in urban WASH will only be achieved if political leaders prioritise government investment and policy change, and if WASH service providers are assisted to radically enhance their capacity to deliver effectively on that investment.
## [Valid International](http://www.validinternational.org)
[Valid International (Valid)](http://www.validinternational.org) is a limited company registered in the UK. The company was founded by Drs Steve Collins and Alistair Hallam in 1999 to improve the quality, impact and accountability of global health and nutrition interventions. [Valid](http://www.validinternational.org) pioneered the Community-based Management of Acute Malnutrition (CMAM) model for addressing Severe Acute Malnutrition (SAM) at scale as well as the local production of Ready to Use Therapeutic Food (RUTF), and was the central force behind the advocacy leading to the endorsement of the model by the UN and multiple national governments in 2007. To date, the approach has been adopted in over 60 countries worldwide.
Over the past ten years, [Valid International](http://www.validinternational.org) has been the primary provider of technical services for the set-up, training and monitoring and evaluation of CMAM programmes all over the world. As part of this, [Valid International](http://www.validinternational.org) has developed various tools and methods that facilitate the provision of these services. For CMAM setup and training, [Valid](http://www.validinternational.org) has published a field manual for CMAM[^1] that gives detailed guidance for the set up and implementation of CMAM. In addition, [Valid](http://www.validinternational.org) has supported the development of CMAM training modules with support and funding from FANTA[^2].
[Valid International](http://www.validinternational.org)’s assessment arm **Valid Measures** started work 15 years ago to address the urgent need for a survey method that could accurately measure the coverage of selective feeding programmes such as CMAM. We have now developed several innovative approaches for needs assessment and monitoring and evaluation of the coverage and effectiveness of nutrition interventions, including the `Simple Spatial Survey Method (S3M)`, `Rapid Assessment Method (RAM)`, `Semi-quantitative Evaluation of Access and Coverage (SQUEAC)`, and `Simplified Lot Quality Assurance Sampling Evaluation of Access and Coverage (SLEAC)`.[^3] Taken together, these approaches have improved assessment outputs over the standard cluster sample approach used in health and nutrition surveys, including improved precision of results, speed of data collection, understanding of barriers and boosters to service access, spatially even distribution of data collection, and detailed spatial mapping of indicator results.
Whist this work was initially focussed on the measurement of coverage of feeding interventions it has now expanded to assess multiple indicator sets, making the methodologies suitable for evaluation of a broad range of multi-sectoral interventions and practices including the assessment of WASH indicators.
## Background to the citywide surveys
In order to measure sustained universal coverage in urban areas of [WSUP](https://www.wsup.com) programme countries[^4], [WSUP](https://www.wsup.com) will be conducting citywide surveys in one selected city for each of our six programme countries in early 2017 and early 2020. These citywide surveys will collect data allowing service levels across the entire city to be characterised, while also allowing more detailed data to be collected in areas of the city of particular interest to [WSUP](https://www.wsup.com). These surveys are intended to generate useful information for others working in the sector including the municipality, regulator and local utilities.
In support of these citywide surveys, [Valid International](http://www.validinternational.org) has been contracted to provide technical support with the design of the citywide surveys, with particular focus on the technical design of variable density sampling approaches that enable a general characterisation of the entire city, while allowing higher-resolution data to be obtained in areas of specific [WSUP](https://www.wsup.com) interest. This should be achieved while minimising the sample size as far as possible provided that it retains the ability to generate statistically significant conclusions.
## [WSUP](https://www.wsup.com) information requirements and other survey design considerations
1. [WSUP](https://www.wsup.com) would like to be able to report findings that are representative for three specific populations of interest:
* The general city-wide population;
* Low income/poor population within the city; and,
* WSUP-identified population of interest[^5].
As such, the design will ideally have 3 sampling frames (one for each population of interest) with each having their own sample size requirement so as to be able to report estimates of the various water and sanitation indicators. The sample size requirement (see section on sample size) in each sampling frame would be the same so at the minimum, this approach will require triple the sample size from doing a survey of only one population of interest.
The specified populations of interest also provide some challenges that need to be considered when designing the survey. These challenges pertain to identification of these populations and determining where they can be located or found within the city. For low income/poor populations, the first issue to address is who to consider as having low-income or as being poor given that there are a multitude of concepts and metrics used to define this categorisation (e.g., income-based, asset-based, multi-dimensional poverty, etc.) and varying levels of available data that can provide information to determine who is low income/poor. In addition to knowing who is to be considered low income or poor, it would be important to know which areas of the selected city are considered low income / poor areas. Ideally, this should be presented in a map of the city indicating generally (or specifically) where areas of low income / poverty can be located. This can usually be done using map outputs of work done by other researchers on poverty mapping. However, most of this work on poverty mapping have results of low resolution (i.e., poverty is mapped down to district level only) and rarely present poverty mapping at smaller units of a city.
As for [WSUP](https://www.wsup.com)-identified populations of interest specifically populations who live in areas within the city where [WSUP](https://www.wsup.com) provides interventions, the challenges are similar in identifying who these populations are and determining the area in the city where they can be located. Whilst locating the general areas where beneficiaries of [WSUP](https://www.wsup.com) interventions live can be relatively simple, knowing the full and specific extent of where they are located in the city can be difficult given that they may not necessarily fall neatly into known administrative units/boundaries.
An additional issue with having a focus on [WSUP](https://www.wsup.com)-supported areas is that it has additional sample size requirements especially if the purpose of drilling down on these focused areas is to report on experience and satisfaction of users of [WSUP](https://www.wsup.com) services and interventions (which is what is indicated in our preliminary review of the questionnaire) specifically. We think that to get such information requires a different approach to data collection and cannot be easily nested within a bigger cross- sectional requirements without complicating the whole process with .
2. The type of indicators that [WSUP](https://www.wsup.com) would like to assess through the survey and the questionnaire that is to be used to collect information to calculate these indicators will also influence to some extent the design of the survey. Hence, they are factors to consider at design phase.
From the current questionnaire that we have seen, the target respondent of the survey are households with the head of the household present during survey time as the key person of interest to provide answers to the questions. So, this is primarily a household survey.
Within the questionnaire, some form of household roster is documented such that the head of the household is asked about the other members of the household and specifically their ages and/or their age groupings, their gender and other specific categorisation / groupings of interest such as persons with disability, pregnant women[^6], and in some cases persons living with HIV/AIDS (PLWHA). This is not a standard household roster per se as the data collected on the members of the household are still attributed to the household itself (the data on the members are individual variables on the household data itself) rather than a separate data branch from the household data (which is what is typical of a household roster). From our review, it seems that the data from household members are used to report indicator results of access and coverage to water and sanitation services specific to the different categories of interest. Such kind of reporting has sample size implications but as of yet it is not clear whether this is indeed what the purpose of the household roster is. So, for the purpose of this design document, we focus on the idea that this survey is targeting households and reporting results on households only rather than different categories of interest to which members of the household belong to.
The questionnaire also indicate that most likely majority of the indicators that [WSUP](https://www.wsup.com) needs to be reported are proportion-type of indicators and mostly based on respondents' self-report with most questions requiring a yes/no answer whilst some require responding based on a scale[^7] which are used for eliciting satisfaction from services that accessed and/or provided. The questionnaire doesn't require measurements to obtain data (numerical data) but a module on poverty assessment[^8] uses a numeric scoring scale to produced/report an index score for a household that is then used to assess the level of wealth/poverty of the household. Given that majority of the indicators are proportion- type, sample size calculations will take this into account.
Finally, it should be noted that by nature, indicators on access to water and sanitation services are highly clustered. That is, source of drinking water or type of sanitation facility tend to be delivered or provided in a similar way among groups or sets of households specifically those living within a block or a neighbourhood. Hence, responses taken from households that live near each other or on the same street or neighbourhood will most likely be similar if not totally identical. This is the nature of a water and sanitation questionnaire and their associated indicators in a cross-sectional survey and should be factored in when estimating sample size.
3. [WSUP](https://www.wsup.com) would like to have as much spatial disaggregation of the results across the city as possible as this will provide more nuanced information for the organisation with regard to variances in service provision and access throughout the city which can in turn help guide programme development, beneficiary targeting and programme implementation. At the minimum, [WSUP](https://www.wsup.com) would like to be able to report results specific to the three main populations of interest mentioned above and ideally have an even finer breakdown of results across the low-income/poor groupings and the [WSUP](https://www.wsup.com)-supported communities.
This requirement has implications on the sample size (both overall and at each level of disaggregation) hence should be taken into account. The general rule is, the finer the resolution of results needed, the higher the sample size required.
## Survey design
Based on the considerations discussed above, we propose the following overall design that can be applicable to all of the six countries that [WSUP](https://www.wsup.com) plans to survey.
### Sample Universe
The survey is to take place at household level. There are three populations to be surveyed:
* A representative sample from across all of the city, to estimate WASH services coverage (e.g. indicators like percentage of the population using an improved, non-shared sanitation facility)
* A representative sample from identified low-income/poor areas of the city, assessing the same indicators
* A representative sample from one or more [WSUP](https://www.wsup.com)-identified areas of interest, either for previous work locations or proposed future locations, again assessing the same indicators
### Sample frame
We propose a sampling frame with at least three levels of spatial stratification across the city and a two-stage spatial sampling design within each of the spatial strata.
The three levels of spatial stratification that we propose are:
1. A first level sub-division of the city – this would usually be a formally recognised and/or official sub-division of the city along known or set administrative boundaries. However, it should be noted that formally recognised and/or official sub-divisions of a city may or may not always include urban sprawl or peri-urban areas or conurbations that extend the city.
These areas are usually a result of influx of people from rural areas and other areas of the country to the city who form communities around the borders of the city and/or in specific areas within the city. The people who form these communities tend to be low-income/poor. Hence, it would be important that further investigation be made whether formally recognised borders of the city include these conurbations and are part of the first level sub- divisions of the city that will be used for the sampling. If they are not, then a decision has to be made whether the areas that have not been included fall under WSUP's other populations of interest such as the low-income/poor (category b above) and/or those that receive or benefit from WSUP interventions (category c above). If these areas are deemed important to include in the sample universe, then these areas are added to the sample universe either as a separate sub-division within the city[^9] or included within the existing sub-divisions of the city.
The rationale for having this first level of stratification is to serve as a unit of disaggregation to which the survey will be designed to be able to report indicator estimates on. This, therefore, has an impact on sample size as the more the first level sub-divisions there are, the higher the overall sample size needed (see sample size section for detail). This would mean that in some cases for cities with numerous first-level sub-divisions, some kind of grouping of first-level sub-divisions would be necessary so as balance the need for spatial disaggregation of results with the level of resources available for the surveys.
2. A second grouping based on low-income/poor stratification – this would address WSUP's requirement of reporting results specific to the low-income/poor population of the city. In this section, we refer to low-income/poor stratification as a geographical grouping based on areas that are known to be low-income/poor. This information should be based on known or established concepts of low-income/poor which have been mapped and areas and boundaries of which are well-defined. These maybe available for some cities (as described in the section on design considerations).
It is possible that the first level sub-division stratification described in point 1 above overlaps with the second stratification on low-income/poor. As mentioned, there maybe specific first level sub-divisions that include certain first level units being classified or categorised as low-income/poor[^10]. If so, then the first level and second level stratification proposed here will be the same as it already includes a grouping based on low-income/poor status. On the other hand, these areas can be smaller units interspersed across the first level sub-divisions (i.e., slums). In this case, we will need to define these areas separately as a second-level sub-division within the first-level units.
It is also important to clarify that this stratification on low-income/poor does not necessarily imply that all respondents within areas that are considered low-income/poor are indeed low- income/poor (based on actual metrics of poverty such as wealth index, etc.) and the same for areas that are classified as not low-income/poor. Variations in wealth status at the household level can still vary within these areas and this variation will be captured through the survey itself. What this geographical stratification on the basis of low-income/poor does is it allows us to provide a specific sampling frame and hence a specific dataset for this area to be able to provide more robust disaggregated spatial results. This is based on the recognition that these areas of low-income/poor have more likely very different characteristics compared to the more formal and organised areas that are not considered low-income/poor. Such characteristics (e.g., housing structure, available services, etc) would make these areas function in a different way than others and the households within them live in a different way than other households in other areas. Capturing this variation specific to this area would therefore be important particularly in an urban-setting survey where clustering is very common.
3. The third grouping are [WSUP](https://www.wsup.com)-identified areas of interest either based on previous work locations or proposed future locations. These are even more localised areas and are often not clearly delineated geographically. This will require work to delineate and map these areas as clearly as possible so that they can server as third level sub-divisions with their own sampling frame.
It should be noted that this third-level of sub-division will require additional sample size which should be considered weighing in the desirability of having results specific to [WSUP](https://www.wsup.com)-identified areas of interest with resources available.
Each of these levels of spatial stratification will have their own sampling frames and as such will require their own sample sizes.
### Sample Size
The following sample size calculations are for each spatial strata described above which will address WSUP's requirements for spatially disaggregated results based on defined sets of populations to report on.
**Sample size for estimating proportion-type indicators**
Based on the nature of the indicators that WSUP requires the survey to report on, the sample size calculations should be powered to be able to report proportion-type of estimates. In addition, sample size required for the survey depends on:
1. `Precision` of the estimate required (as determined by the width of the confidence interval around the estimates); and,
2. `Variance inflating factor (VIF)` or `design effect (DEFF)` for the chosen design (e.g., simple random sample, cluster sample, etc.), reflecting the increase in sample required to use a cluster design.
The general formula to calculate sample sizes to estimate a proportion (sometimes called prevalence) indicator is:
<br/>
$$ n = z ^ 2 \times{\frac{p(1-p)}{c ^ 2}} $$
<br/>
where
<br/>
\(n = \text{sample size}\)
\(z = \text{z-value for} ~ 95\% ~ \text{confidence interval} \)
\(p = \text{expected proportion/prevalence} \)
\(c = \text{level of precision} \)
<br/>
To be able to use this formula, we set out the following parameters specific to the survey requirements and the sample frame described above:
<br/>
\(z = 1.96 ~ \text{(for} ~ 95\% ~ \text{CI)} \)
\(p = 50\% ~ \text{(assume proportion requiring highest sample size)} \)
\(c = 0.10 ~ \text{(} 10\% ~ \text{precision)} \)
<br/>
This gives us the following sample size:
<br/>
$$ n = 1.96 ^ 2 \times{\frac{0.5(1 - 0.5)}{0.1 ^ 2}} \approx 96 $$
<br/>
We therefore need a sample size of \(n = 96\) assuming a simple random sample survey design.
However, given that we will be using a clustered sample design, we need to take into consideration design effect or `DEFF`. `DEFF` is related to the indicators being studied as well as the planned sample size per cluster, and is best calculated empirically from previous similar surveys in the area being studied. The best reference survey to use to estimate `DEFF` from is the **Demographic and Health Survey (DHS)** which is conducted every 5 years in most countries. In general, DHS includes an urban sample from the capital city and from other main cities of the country which can be used to estimate the `intra-cluster coefficient (ICC)`. The `intra-cluster coefficient` gives an estimate of how correlated the responses are for specific questions / indicators within a cluster thereby providing a metric for the loss of variance as a function of the cluster survey sample design. The purpose of `DEFF` is to inflate the base sample size based on a simple random sample design so as to increase the variance of the survey sample. This counteracts the clustering that happens with a cluster sample design and that is created by the type of indicators being surveyed (such as water and sanitation indicators). `DEFF` can be calculated using the following formula:
<br/>
$$ DEFF = 1 + (c - 1) \times \rho $$
<br/>
where
<br/>
\(c = \text{cluster size} \)
\(\rho = \text{intracluster coefficient (ICC)} \)
<br/>
This equation shows that `DEFF` increases with increases in cluster size and/or in `ICC`. Since there is very little we can do to change `ICC`, the general idea will be to reduce cluster size as much as possible as this will bring down `DEFF`. The smaller the cluster size, the more that the survey design approximates a simple random sample.
Studies that have estimated `DEFF` using survey datasets from various countries have shown that `DEFF` for water and sanitation indicators can range from as low as 1.5 to as high as 7. `DEFF` should therefore be estimated per country whenever possible so that a more precise sample size estimate can be calculated.
Once `DEFF` has been estimated, the base sample size of \( n = 96 \) can be adjusted as follows:
<br/>
$$ n_{adjusted} = n_{base} \times DEFF = 96 \times DEFF $$
<br/>
**Sample size for classifying proportion indicators**
Another way of reporting proportion indicators is by classification, that is determining whether a certain threshold or thresholds is/are reached by the results of the indicator enabling its categorisation into specific classes of achievement (i.e., high or low, success or failure, adequate or inadequate). The approach of classification is called `lot quality assurance sampling (LQAS)` which is an analytical technique developed and used widely in the industrial field (pharmaceuticals, manufacturing to name a few) as an efficient and cost-effective way of checking and controlling the quality of voluminous products manufactured[^11]. `LQAS` requires much smaller sample size than the classical estimation approach mainly because of the type of information it provides (classification rather than estimation). From a quality control perspective particularly at an industrial level, a classification result determining whether a specific lot or set of goods produced are of good quality or not is just the information needed to be able to make decisions on whether or not to release the lot of goods to the market and whether an inspection of a specific production line needs to be performed to check its manufacturing process. Such decision-making do not require results of high precision. For example, if a drug company sets its quality control standard at \( 80\% \) of medicine in a batch produced meets the quality requirements, then a survey with a sample size aimed at reporting an estimate with acceptable precision (as described above) that gives an estimate of 60% has the same decision-making value as a survey using a much smaller sample size (usually \( n = 40 \)) that classifies the batch of medicines as failure (below \( 80\% \)). Both sets of result will guide plant managers in determining that the specific batch of medicines are of poor quality hence should not be brought to market. However, the `LQAS` approach provides this information with about half of the sample size.
The use of this quality control technique has been adapted for use in health monitoring and health surveys, which include water and sanitation indicators, to a great degree because of its small sample size requirements[^12] which lends itself well to routine and repeated applications over time and location.
`LQAS` generally requires a sample size of \(n = 40\) . Adjustments to this sample size are made based on survey design and the cut-offs used for classifying with a cut-off of \( 50\% \) requiring the highest sample size (\( n = 40 \)). Factoring cluster design, this minimum sample size can go up to \( n = 60 \) which is the typical sample size used for `LQAS` applied to water and sanitation indicators[^13].
### Stage 1 sampling
Stage 1 sampling is the the selection of clusters or `primary sampling units (PSU)` in which the sample of households will be collected from. We propose to select survey clusters using a variable grid spatial sampling approach. Cluster locations will be chosen for each sampling frame (non-slum, slum, and operational area) using `centric systematic area sampling (CSAS)`. This will be performed through the following general steps[^14]:
1. Get appropriate maps for the city to be surveyed. The types of maps that will be appropriate for this urban sampling approach are:
a) Map data files[^15] that delineate the boundaries of the whole city and each of the three levels of the sampling frame[^16];
b) High resolution gridded population maps[^17]; and,
c) Map data on roads, buildings, residential areas, landuse and places of interest for the city[^18].
2. Using maps with boundaries of the different administrative levels with an overlay of the
high resolution gridded population maps, draw a square grid around each spatial strata that has been identified for the city. The size of the grid will be dependent on the number of sampling clusters that has been decided when calculating sample size. The grid should cover only areas with populations as indicated by the population maps.
3. Get the centroid of each quadrat (square) of the grid. This identifies the primary sampling unit within the specified area where the stage 2 sampling will be conducted. The geographical coordinates of the centroid will be captured and recorded and put on the stage 1 sampling list (and drawn on appropriate maps) for enumerators to use during data collection.
It should be noted that the stage 1 sample is not selected `proportional to population size (PPS)` which is a stage 1 sample selection approach that is commonly used in most population surveys. Instead the stage 1 sample is drawn spatially through an even square grid across the city. This approach addresses the limitation of a `PPS` sample which gives a cluster with higher population a higher probability of being included in the sample compared to clusters with smaller populations. Also, a population-weighted stage 1 sample is drawn from a specific sampling frame hence can only be representative of that frame. Any form of disaggregation of that dataset to provide results at finer resolution will not be possible to perform.
For the spatial sampling approach, the dataset can be disaggregated and aggregated accordingly by just applying a cluster weighting approach (using the population at each cluster as the weights) used in analysis of stratified data in estimating the indicators of interest. This is what is called posterior weighting approach. Population estimates obtained from WorldPop remote sensing data can be used for the purpose of weighting the cluster data.
### Sampling Stage 2
At each cluster, a total of two households will be selected. The cluster will be identified using GPS coordinates on enumerators' devices, and the start household will be identified as the building closest to the GPS coordinates. In the case of a multi-household or apartment building, the household will be chosen randomly. To select the second household, we recommend a general approach of choosing from the building or housing structure five doors to the right facing the door of the start household.
It is very likely that the spatial organisation of the various cities to be sampled will be different between each other. Also, within each city, there will be differences in the setting and organisation of buildings and houses in different parts of the city which will require adjustments or variations to the general approach for stage 2 sampling described above specifically for selecting the second household. This is something that will require investigation and testing prior to the start of the survey in every city and experiences documented in the process document produced during and after the survey process. This will help in producing more nuanced guidelines on other approaches to stage 2 sampling.
## Endnotes
[^1]: Valid International, 2006. Community-based Therapeutic Care (CTC): A Field Manual, Oxford: Valid International. Available at <https://www.fantaproject.org/sites/default/files/resources/CTC-Field-Manual-Oct2006-508.pdf>
[^2]: see <https://www.fantaproject.org/focus-areas/nutrition-emergencies-mam/cmam-training>
[^3]: **RAM** is a quick, simple and low-cost survey method to assess and monitor individual datasets for multiple health and nutrition indicators for under-five children, older people and other vulnerable groups in different humanitarian crises and development contexts. Compared to other needs assessment and monitoring methodologies, RAM uses a smaller sample size without compromising relative precision and spatial distribution, and is therefore ideal for capturing reliable data for different vulnerable groups, including socio-cultural sub-populations, in a timely and cost-efficient manner. **S3M** is a development of the Centric Systematic Area Sampling (CSAS) method providing better spatial resolution with lower sampling costs. S3M is appropriate for mapping coverage (or prevalence of indicators) over very wide-areas. **SQUEAC** is a semi-quantitative method that provides in-depth analysis of barriers and boosters to coverage. It is designed as a routine program monitoring tool through the intelligent use of routine monitoring data complemented by other relevant data that are collected on a “little and often” basis. **SLEAC** is a rapid low-resource survey method that classifies coverage at the service delivery unit (SDU) level such as the district. A SLEAC survey identifies the category of coverage (e.g. “low coverage”, “moderate coverage” or “high coverage”) that describes the coverage of the service delivery unit being assessed. The advantage of this approach is that relatively small sample sizes (e.g. \(n ≤ 40\)) are required in order to make an accurate and reliable classification.
[^4]: Bangladesh, Ghana, Kenya, Madagascar, Mozambique and Zambia
[^5]: Main consideration for selecting population of interest is living in areas within the city where WSUP is currently providing interventions or supporting projects
[^6]: It is interesting to note that based on the WSUP questionnaires, pregnant women are identified separately but are classified within the grouping of those with disability
[^7]:The questionnaire uses a 4 category scale i.e., satisfied, somewhat satisfied, somewhat dissatisfied, dissatisfied
[^8]: WSUP uses the Progress out of Poverty Index (PPI)
[^9]: In some cities, these settlements are categorised or named as slums or squatters areas and can be formally or informally recognised by authorities and/or city planners
[^10]: Especially true if the mapping of the low-income/poor areas are fixed specifically on administrative boundaries and not at more localised areas
[^11]: See Lanata, C.F. & Black, R.E., 1991. Lot quality assurance sampling techniques in health surveys in developing countries: advantages and current constraints. World Health Statistics Quarterly, 44(3), pp.133–139
[^12]: See Lanata, C.F. et al., 1988. Lot quality assurance sampling in health monitoring. The Lancet, 1(8577), pp.122–123 and Robertson, S.E. & Valadez, J.J., 2006. Global review of health care surveys using lot quality assurance sampling (LQAS), 1984– 2004. Social Science & Medicine, 63(6), pp.1648–1660.
[^13]: Based on previous work done by Valid International on application of LQAS on reporting water and sanitation indicators recommended by the Joint Monitoring Programme (JMP)
[^14]: Some modifications to these steps may need to be done on a country by country basis particularly with regard to how the groupings of the spatial units are organised and sub-divided. Otherwise, the general approach is the same on any setting.
[^15]: There are many formats for map files. In general, the most common and most accessible formats are keyhole markup language (KML) files which are the standard output format produced by Google Maps/Google Earth and ESRI Shapefiles.
[^16]: These days,there are various available online repositories of map data files for most countries including cities. We most commonly use the repository maintained by Robert Hijmans at https://gadm.org and the one maintained by UN OCHA called Humanitarian Data Exchange (https://data.humdata.org). Organisations such as the World Food Programme also maintain their own repositories but most of their maps are also made available through the Humanitarian Data Exchange.
[^17]: The best openly available sources for this map data are from the WorldPop project (<https://www.worldpop.org>) and the Global Rural-Urban Mapping Project or GRUMP (<https://sedac.ciesin.columbia.edu/data/collection/grump-v1>).
[^18]: For most cities,this can be available from OpenStreetMap or OSM (<https://www.openstreetmap.org>). Google Earth can also be used to view satellite imagery of the city.
|
/scratch/gouwar.j/cran-all/cranData/washdata/inst/doc/washdata.Rmd
|
---
title: "Urban Water and Sanitation Survey Dataset"
author: "Ernest Guevarra"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Urban Water and Sanitation Survey Dataset}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Background: the global problem of inadequate access to water and sanitation for the urban poor
Over the past two decades most developing countries have experienced rapid urbanisation as part of a global trend that has brought millions out of poverty, and helped to bridge the gap between the developed and the developing world. However, many of the people migrating to urban environments are concentrated in low-income informal settlements (commonly referred to as ‘slums’), either within the central city or in peri-urban districts at the city’s ever-growing periphery. Since 1990, an estimated 171 million more people are living in such settlements. This number will only increase over time, with Africa’s urban population predicted to triple to 1.23 billion people by 2050.
Faced with this trend of mass urban migration, governments and utilities are proving unable to expand their water and sanitation systems at sufficient pace to respond to the increasing consumer need. This is due to a complex web of constraints that characterise the urban water, sanitation and hygiene (WASH) sector in many developing countries. These constraints include: limited institutional and government capacity at the national and city level; lack of technical expertise to serve low-income areas effectively; lack of the necessary fiscal and tariff-setting autonomy for mandated service providers to fulfil their role; unsupportive and/or poorly defined institutional frameworks; insufficient levels of government investment in water and sanitation; poorly targeted international financial institutions (IFI) investments; and weak implementation of existing national policies and strategies.
Without functional WASH sectors that can deliver new service delivery models at scale, huge numbers of people living in slums will continue to remain without access to the most basic services. Diseases related to inadequate WASH remain among the world’s most serious public health problems, and the associated impacts on economic productivity and children’s cognitive development are likely to have profoundly negative impacts on national development.
## [Water and Sanitation for the Urban Poor (WSUP)](https://www.wsup.com)
[WSUP](https://www.wsup.com) is a not-for-profit company that helps transform cities to benefit the millions who lack access to water and sanitation. [WSUP](https://www.wsup.com) believes that access to safe and affordable water, improved sanitation and improved hygiene practices underpin poverty reduction through impacts on health, education and livelihoods. This is coupled with a conviction in two core principles. Firstly, [WSUP](https://www.wsup.com) believes in the transformative power of markets and innovation: enormous strides can be made by bringing the local and international private sector into the urban WASH space, by driving business thinking in low-income service provision, and by viewing low-income people as consumers who make choices. Secondly, [WSUP](https://www.wsup.com) understands that the dynamising power of markets can only partially solve the problem: deep political commitment, institutional change, and a functioning WASH sector are equally critical. Sustainable at-scale progress in urban WASH will only be achieved if political leaders prioritise government investment and policy change, and if WASH service providers are assisted to radically enhance their capacity to deliver effectively on that investment.
## [Valid International](http://www.validinternational.org)
[Valid International (Valid)](http://www.validinternational.org) is a limited company registered in the UK. The company was founded by Drs Steve Collins and Alistair Hallam in 1999 to improve the quality, impact and accountability of global health and nutrition interventions. [Valid](http://www.validinternational.org) pioneered the Community-based Management of Acute Malnutrition (CMAM) model for addressing Severe Acute Malnutrition (SAM) at scale as well as the local production of Ready to Use Therapeutic Food (RUTF), and was the central force behind the advocacy leading to the endorsement of the model by the UN and multiple national governments in 2007. To date, the approach has been adopted in over 60 countries worldwide.
Over the past ten years, [Valid International](http://www.validinternational.org) has been the primary provider of technical services for the set-up, training and monitoring and evaluation of CMAM programmes all over the world. As part of this, [Valid International](http://www.validinternational.org) has developed various tools and methods that facilitate the provision of these services. For CMAM setup and training, [Valid](http://www.validinternational.org) has published a field manual for CMAM[^1] that gives detailed guidance for the set up and implementation of CMAM. In addition, [Valid](http://www.validinternational.org) has supported the development of CMAM training modules with support and funding from FANTA[^2].
[Valid International](http://www.validinternational.org)’s assessment arm **Valid Measures** started work 15 years ago to address the urgent need for a survey method that could accurately measure the coverage of selective feeding programmes such as CMAM. We have now developed several innovative approaches for needs assessment and monitoring and evaluation of the coverage and effectiveness of nutrition interventions, including the `Simple Spatial Survey Method (S3M)`, `Rapid Assessment Method (RAM)`, `Semi-quantitative Evaluation of Access and Coverage (SQUEAC)`, and `Simplified Lot Quality Assurance Sampling Evaluation of Access and Coverage (SLEAC)`.[^3] Taken together, these approaches have improved assessment outputs over the standard cluster sample approach used in health and nutrition surveys, including improved precision of results, speed of data collection, understanding of barriers and boosters to service access, spatially even distribution of data collection, and detailed spatial mapping of indicator results.
Whist this work was initially focussed on the measurement of coverage of feeding interventions it has now expanded to assess multiple indicator sets, making the methodologies suitable for evaluation of a broad range of multi-sectoral interventions and practices including the assessment of WASH indicators.
## Background to the citywide surveys
In order to measure sustained universal coverage in urban areas of [WSUP](https://www.wsup.com) programme countries[^4], [WSUP](https://www.wsup.com) will be conducting citywide surveys in one selected city for each of our six programme countries in early 2017 and early 2020. These citywide surveys will collect data allowing service levels across the entire city to be characterised, while also allowing more detailed data to be collected in areas of the city of particular interest to [WSUP](https://www.wsup.com). These surveys are intended to generate useful information for others working in the sector including the municipality, regulator and local utilities.
In support of these citywide surveys, [Valid International](http://www.validinternational.org) has been contracted to provide technical support with the design of the citywide surveys, with particular focus on the technical design of variable density sampling approaches that enable a general characterisation of the entire city, while allowing higher-resolution data to be obtained in areas of specific [WSUP](https://www.wsup.com) interest. This should be achieved while minimising the sample size as far as possible provided that it retains the ability to generate statistically significant conclusions.
## [WSUP](https://www.wsup.com) information requirements and other survey design considerations
1. [WSUP](https://www.wsup.com) would like to be able to report findings that are representative for three specific populations of interest:
* The general city-wide population;
* Low income/poor population within the city; and,
* WSUP-identified population of interest[^5].
As such, the design will ideally have 3 sampling frames (one for each population of interest) with each having their own sample size requirement so as to be able to report estimates of the various water and sanitation indicators. The sample size requirement (see section on sample size) in each sampling frame would be the same so at the minimum, this approach will require triple the sample size from doing a survey of only one population of interest.
The specified populations of interest also provide some challenges that need to be considered when designing the survey. These challenges pertain to identification of these populations and determining where they can be located or found within the city. For low income/poor populations, the first issue to address is who to consider as having low-income or as being poor given that there are a multitude of concepts and metrics used to define this categorisation (e.g., income-based, asset-based, multi-dimensional poverty, etc.) and varying levels of available data that can provide information to determine who is low income/poor. In addition to knowing who is to be considered low income or poor, it would be important to know which areas of the selected city are considered low income / poor areas. Ideally, this should be presented in a map of the city indicating generally (or specifically) where areas of low income / poverty can be located. This can usually be done using map outputs of work done by other researchers on poverty mapping. However, most of this work on poverty mapping have results of low resolution (i.e., poverty is mapped down to district level only) and rarely present poverty mapping at smaller units of a city.
As for [WSUP](https://www.wsup.com)-identified populations of interest specifically populations who live in areas within the city where [WSUP](https://www.wsup.com) provides interventions, the challenges are similar in identifying who these populations are and determining the area in the city where they can be located. Whilst locating the general areas where beneficiaries of [WSUP](https://www.wsup.com) interventions live can be relatively simple, knowing the full and specific extent of where they are located in the city can be difficult given that they may not necessarily fall neatly into known administrative units/boundaries.
An additional issue with having a focus on [WSUP](https://www.wsup.com)-supported areas is that it has additional sample size requirements especially if the purpose of drilling down on these focused areas is to report on experience and satisfaction of users of [WSUP](https://www.wsup.com) services and interventions (which is what is indicated in our preliminary review of the questionnaire) specifically. We think that to get such information requires a different approach to data collection and cannot be easily nested within a bigger cross- sectional requirements without complicating the whole process with .
2. The type of indicators that [WSUP](https://www.wsup.com) would like to assess through the survey and the questionnaire that is to be used to collect information to calculate these indicators will also influence to some extent the design of the survey. Hence, they are factors to consider at design phase.
From the current questionnaire that we have seen, the target respondent of the survey are households with the head of the household present during survey time as the key person of interest to provide answers to the questions. So, this is primarily a household survey.
Within the questionnaire, some form of household roster is documented such that the head of the household is asked about the other members of the household and specifically their ages and/or their age groupings, their gender and other specific categorisation / groupings of interest such as persons with disability, pregnant women[^6], and in some cases persons living with HIV/AIDS (PLWHA). This is not a standard household roster per se as the data collected on the members of the household are still attributed to the household itself (the data on the members are individual variables on the household data itself) rather than a separate data branch from the household data (which is what is typical of a household roster). From our review, it seems that the data from household members are used to report indicator results of access and coverage to water and sanitation services specific to the different categories of interest. Such kind of reporting has sample size implications but as of yet it is not clear whether this is indeed what the purpose of the household roster is. So, for the purpose of this design document, we focus on the idea that this survey is targeting households and reporting results on households only rather than different categories of interest to which members of the household belong to.
The questionnaire also indicate that most likely majority of the indicators that [WSUP](https://www.wsup.com) needs to be reported are proportion-type of indicators and mostly based on respondents' self-report with most questions requiring a yes/no answer whilst some require responding based on a scale[^7] which are used for eliciting satisfaction from services that accessed and/or provided. The questionnaire doesn't require measurements to obtain data (numerical data) but a module on poverty assessment[^8] uses a numeric scoring scale to produced/report an index score for a household that is then used to assess the level of wealth/poverty of the household. Given that majority of the indicators are proportion- type, sample size calculations will take this into account.
Finally, it should be noted that by nature, indicators on access to water and sanitation services are highly clustered. That is, source of drinking water or type of sanitation facility tend to be delivered or provided in a similar way among groups or sets of households specifically those living within a block or a neighbourhood. Hence, responses taken from households that live near each other or on the same street or neighbourhood will most likely be similar if not totally identical. This is the nature of a water and sanitation questionnaire and their associated indicators in a cross-sectional survey and should be factored in when estimating sample size.
3. [WSUP](https://www.wsup.com) would like to have as much spatial disaggregation of the results across the city as possible as this will provide more nuanced information for the organisation with regard to variances in service provision and access throughout the city which can in turn help guide programme development, beneficiary targeting and programme implementation. At the minimum, [WSUP](https://www.wsup.com) would like to be able to report results specific to the three main populations of interest mentioned above and ideally have an even finer breakdown of results across the low-income/poor groupings and the [WSUP](https://www.wsup.com)-supported communities.
This requirement has implications on the sample size (both overall and at each level of disaggregation) hence should be taken into account. The general rule is, the finer the resolution of results needed, the higher the sample size required.
## Survey design
Based on the considerations discussed above, we propose the following overall design that can be applicable to all of the six countries that [WSUP](https://www.wsup.com) plans to survey.
### Sample Universe
The survey is to take place at household level. There are three populations to be surveyed:
* A representative sample from across all of the city, to estimate WASH services coverage (e.g. indicators like percentage of the population using an improved, non-shared sanitation facility)
* A representative sample from identified low-income/poor areas of the city, assessing the same indicators
* A representative sample from one or more [WSUP](https://www.wsup.com)-identified areas of interest, either for previous work locations or proposed future locations, again assessing the same indicators
### Sample frame
We propose a sampling frame with at least three levels of spatial stratification across the city and a two-stage spatial sampling design within each of the spatial strata.
The three levels of spatial stratification that we propose are:
1. A first level sub-division of the city – this would usually be a formally recognised and/or official sub-division of the city along known or set administrative boundaries. However, it should be noted that formally recognised and/or official sub-divisions of a city may or may not always include urban sprawl or peri-urban areas or conurbations that extend the city.
These areas are usually a result of influx of people from rural areas and other areas of the country to the city who form communities around the borders of the city and/or in specific areas within the city. The people who form these communities tend to be low-income/poor. Hence, it would be important that further investigation be made whether formally recognised borders of the city include these conurbations and are part of the first level sub- divisions of the city that will be used for the sampling. If they are not, then a decision has to be made whether the areas that have not been included fall under WSUP's other populations of interest such as the low-income/poor (category b above) and/or those that receive or benefit from WSUP interventions (category c above). If these areas are deemed important to include in the sample universe, then these areas are added to the sample universe either as a separate sub-division within the city[^9] or included within the existing sub-divisions of the city.
The rationale for having this first level of stratification is to serve as a unit of disaggregation to which the survey will be designed to be able to report indicator estimates on. This, therefore, has an impact on sample size as the more the first level sub-divisions there are, the higher the overall sample size needed (see sample size section for detail). This would mean that in some cases for cities with numerous first-level sub-divisions, some kind of grouping of first-level sub-divisions would be necessary so as balance the need for spatial disaggregation of results with the level of resources available for the surveys.
2. A second grouping based on low-income/poor stratification – this would address WSUP's requirement of reporting results specific to the low-income/poor population of the city. In this section, we refer to low-income/poor stratification as a geographical grouping based on areas that are known to be low-income/poor. This information should be based on known or established concepts of low-income/poor which have been mapped and areas and boundaries of which are well-defined. These maybe available for some cities (as described in the section on design considerations).
It is possible that the first level sub-division stratification described in point 1 above overlaps with the second stratification on low-income/poor. As mentioned, there maybe specific first level sub-divisions that include certain first level units being classified or categorised as low-income/poor[^10]. If so, then the first level and second level stratification proposed here will be the same as it already includes a grouping based on low-income/poor status. On the other hand, these areas can be smaller units interspersed across the first level sub-divisions (i.e., slums). In this case, we will need to define these areas separately as a second-level sub-division within the first-level units.
It is also important to clarify that this stratification on low-income/poor does not necessarily imply that all respondents within areas that are considered low-income/poor are indeed low- income/poor (based on actual metrics of poverty such as wealth index, etc.) and the same for areas that are classified as not low-income/poor. Variations in wealth status at the household level can still vary within these areas and this variation will be captured through the survey itself. What this geographical stratification on the basis of low-income/poor does is it allows us to provide a specific sampling frame and hence a specific dataset for this area to be able to provide more robust disaggregated spatial results. This is based on the recognition that these areas of low-income/poor have more likely very different characteristics compared to the more formal and organised areas that are not considered low-income/poor. Such characteristics (e.g., housing structure, available services, etc) would make these areas function in a different way than others and the households within them live in a different way than other households in other areas. Capturing this variation specific to this area would therefore be important particularly in an urban-setting survey where clustering is very common.
3. The third grouping are [WSUP](https://www.wsup.com)-identified areas of interest either based on previous work locations or proposed future locations. These are even more localised areas and are often not clearly delineated geographically. This will require work to delineate and map these areas as clearly as possible so that they can server as third level sub-divisions with their own sampling frame.
It should be noted that this third-level of sub-division will require additional sample size which should be considered weighing in the desirability of having results specific to [WSUP](https://www.wsup.com)-identified areas of interest with resources available.
Each of these levels of spatial stratification will have their own sampling frames and as such will require their own sample sizes.
### Sample Size
The following sample size calculations are for each spatial strata described above which will address WSUP's requirements for spatially disaggregated results based on defined sets of populations to report on.
**Sample size for estimating proportion-type indicators**
Based on the nature of the indicators that WSUP requires the survey to report on, the sample size calculations should be powered to be able to report proportion-type of estimates. In addition, sample size required for the survey depends on:
1. `Precision` of the estimate required (as determined by the width of the confidence interval around the estimates); and,
2. `Variance inflating factor (VIF)` or `design effect (DEFF)` for the chosen design (e.g., simple random sample, cluster sample, etc.), reflecting the increase in sample required to use a cluster design.
The general formula to calculate sample sizes to estimate a proportion (sometimes called prevalence) indicator is:
<br/>
$$ n = z ^ 2 \times{\frac{p(1-p)}{c ^ 2}} $$
<br/>
where
<br/>
\(n = \text{sample size}\)
\(z = \text{z-value for} ~ 95\% ~ \text{confidence interval} \)
\(p = \text{expected proportion/prevalence} \)
\(c = \text{level of precision} \)
<br/>
To be able to use this formula, we set out the following parameters specific to the survey requirements and the sample frame described above:
<br/>
\(z = 1.96 ~ \text{(for} ~ 95\% ~ \text{CI)} \)
\(p = 50\% ~ \text{(assume proportion requiring highest sample size)} \)
\(c = 0.10 ~ \text{(} 10\% ~ \text{precision)} \)
<br/>
This gives us the following sample size:
<br/>
$$ n = 1.96 ^ 2 \times{\frac{0.5(1 - 0.5)}{0.1 ^ 2}} \approx 96 $$
<br/>
We therefore need a sample size of \(n = 96\) assuming a simple random sample survey design.
However, given that we will be using a clustered sample design, we need to take into consideration design effect or `DEFF`. `DEFF` is related to the indicators being studied as well as the planned sample size per cluster, and is best calculated empirically from previous similar surveys in the area being studied. The best reference survey to use to estimate `DEFF` from is the **Demographic and Health Survey (DHS)** which is conducted every 5 years in most countries. In general, DHS includes an urban sample from the capital city and from other main cities of the country which can be used to estimate the `intra-cluster coefficient (ICC)`. The `intra-cluster coefficient` gives an estimate of how correlated the responses are for specific questions / indicators within a cluster thereby providing a metric for the loss of variance as a function of the cluster survey sample design. The purpose of `DEFF` is to inflate the base sample size based on a simple random sample design so as to increase the variance of the survey sample. This counteracts the clustering that happens with a cluster sample design and that is created by the type of indicators being surveyed (such as water and sanitation indicators). `DEFF` can be calculated using the following formula:
<br/>
$$ DEFF = 1 + (c - 1) \times \rho $$
<br/>
where
<br/>
\(c = \text{cluster size} \)
\(\rho = \text{intracluster coefficient (ICC)} \)
<br/>
This equation shows that `DEFF` increases with increases in cluster size and/or in `ICC`. Since there is very little we can do to change `ICC`, the general idea will be to reduce cluster size as much as possible as this will bring down `DEFF`. The smaller the cluster size, the more that the survey design approximates a simple random sample.
Studies that have estimated `DEFF` using survey datasets from various countries have shown that `DEFF` for water and sanitation indicators can range from as low as 1.5 to as high as 7. `DEFF` should therefore be estimated per country whenever possible so that a more precise sample size estimate can be calculated.
Once `DEFF` has been estimated, the base sample size of \( n = 96 \) can be adjusted as follows:
<br/>
$$ n_{adjusted} = n_{base} \times DEFF = 96 \times DEFF $$
<br/>
**Sample size for classifying proportion indicators**
Another way of reporting proportion indicators is by classification, that is determining whether a certain threshold or thresholds is/are reached by the results of the indicator enabling its categorisation into specific classes of achievement (i.e., high or low, success or failure, adequate or inadequate). The approach of classification is called `lot quality assurance sampling (LQAS)` which is an analytical technique developed and used widely in the industrial field (pharmaceuticals, manufacturing to name a few) as an efficient and cost-effective way of checking and controlling the quality of voluminous products manufactured[^11]. `LQAS` requires much smaller sample size than the classical estimation approach mainly because of the type of information it provides (classification rather than estimation). From a quality control perspective particularly at an industrial level, a classification result determining whether a specific lot or set of goods produced are of good quality or not is just the information needed to be able to make decisions on whether or not to release the lot of goods to the market and whether an inspection of a specific production line needs to be performed to check its manufacturing process. Such decision-making do not require results of high precision. For example, if a drug company sets its quality control standard at \( 80\% \) of medicine in a batch produced meets the quality requirements, then a survey with a sample size aimed at reporting an estimate with acceptable precision (as described above) that gives an estimate of 60% has the same decision-making value as a survey using a much smaller sample size (usually \( n = 40 \)) that classifies the batch of medicines as failure (below \( 80\% \)). Both sets of result will guide plant managers in determining that the specific batch of medicines are of poor quality hence should not be brought to market. However, the `LQAS` approach provides this information with about half of the sample size.
The use of this quality control technique has been adapted for use in health monitoring and health surveys, which include water and sanitation indicators, to a great degree because of its small sample size requirements[^12] which lends itself well to routine and repeated applications over time and location.
`LQAS` generally requires a sample size of \(n = 40\) . Adjustments to this sample size are made based on survey design and the cut-offs used for classifying with a cut-off of \( 50\% \) requiring the highest sample size (\( n = 40 \)). Factoring cluster design, this minimum sample size can go up to \( n = 60 \) which is the typical sample size used for `LQAS` applied to water and sanitation indicators[^13].
### Stage 1 sampling
Stage 1 sampling is the the selection of clusters or `primary sampling units (PSU)` in which the sample of households will be collected from. We propose to select survey clusters using a variable grid spatial sampling approach. Cluster locations will be chosen for each sampling frame (non-slum, slum, and operational area) using `centric systematic area sampling (CSAS)`. This will be performed through the following general steps[^14]:
1. Get appropriate maps for the city to be surveyed. The types of maps that will be appropriate for this urban sampling approach are:
a) Map data files[^15] that delineate the boundaries of the whole city and each of the three levels of the sampling frame[^16];
b) High resolution gridded population maps[^17]; and,
c) Map data on roads, buildings, residential areas, landuse and places of interest for the city[^18].
2. Using maps with boundaries of the different administrative levels with an overlay of the
high resolution gridded population maps, draw a square grid around each spatial strata that has been identified for the city. The size of the grid will be dependent on the number of sampling clusters that has been decided when calculating sample size. The grid should cover only areas with populations as indicated by the population maps.
3. Get the centroid of each quadrat (square) of the grid. This identifies the primary sampling unit within the specified area where the stage 2 sampling will be conducted. The geographical coordinates of the centroid will be captured and recorded and put on the stage 1 sampling list (and drawn on appropriate maps) for enumerators to use during data collection.
It should be noted that the stage 1 sample is not selected `proportional to population size (PPS)` which is a stage 1 sample selection approach that is commonly used in most population surveys. Instead the stage 1 sample is drawn spatially through an even square grid across the city. This approach addresses the limitation of a `PPS` sample which gives a cluster with higher population a higher probability of being included in the sample compared to clusters with smaller populations. Also, a population-weighted stage 1 sample is drawn from a specific sampling frame hence can only be representative of that frame. Any form of disaggregation of that dataset to provide results at finer resolution will not be possible to perform.
For the spatial sampling approach, the dataset can be disaggregated and aggregated accordingly by just applying a cluster weighting approach (using the population at each cluster as the weights) used in analysis of stratified data in estimating the indicators of interest. This is what is called posterior weighting approach. Population estimates obtained from WorldPop remote sensing data can be used for the purpose of weighting the cluster data.
### Sampling Stage 2
At each cluster, a total of two households will be selected. The cluster will be identified using GPS coordinates on enumerators' devices, and the start household will be identified as the building closest to the GPS coordinates. In the case of a multi-household or apartment building, the household will be chosen randomly. To select the second household, we recommend a general approach of choosing from the building or housing structure five doors to the right facing the door of the start household.
It is very likely that the spatial organisation of the various cities to be sampled will be different between each other. Also, within each city, there will be differences in the setting and organisation of buildings and houses in different parts of the city which will require adjustments or variations to the general approach for stage 2 sampling described above specifically for selecting the second household. This is something that will require investigation and testing prior to the start of the survey in every city and experiences documented in the process document produced during and after the survey process. This will help in producing more nuanced guidelines on other approaches to stage 2 sampling.
## Endnotes
[^1]: Valid International, 2006. Community-based Therapeutic Care (CTC): A Field Manual, Oxford: Valid International. Available at <https://www.fantaproject.org/sites/default/files/resources/CTC-Field-Manual-Oct2006-508.pdf>
[^2]: see <https://www.fantaproject.org/focus-areas/nutrition-emergencies-mam/cmam-training>
[^3]: **RAM** is a quick, simple and low-cost survey method to assess and monitor individual datasets for multiple health and nutrition indicators for under-five children, older people and other vulnerable groups in different humanitarian crises and development contexts. Compared to other needs assessment and monitoring methodologies, RAM uses a smaller sample size without compromising relative precision and spatial distribution, and is therefore ideal for capturing reliable data for different vulnerable groups, including socio-cultural sub-populations, in a timely and cost-efficient manner. **S3M** is a development of the Centric Systematic Area Sampling (CSAS) method providing better spatial resolution with lower sampling costs. S3M is appropriate for mapping coverage (or prevalence of indicators) over very wide-areas. **SQUEAC** is a semi-quantitative method that provides in-depth analysis of barriers and boosters to coverage. It is designed as a routine program monitoring tool through the intelligent use of routine monitoring data complemented by other relevant data that are collected on a “little and often” basis. **SLEAC** is a rapid low-resource survey method that classifies coverage at the service delivery unit (SDU) level such as the district. A SLEAC survey identifies the category of coverage (e.g. “low coverage”, “moderate coverage” or “high coverage”) that describes the coverage of the service delivery unit being assessed. The advantage of this approach is that relatively small sample sizes (e.g. \(n ≤ 40\)) are required in order to make an accurate and reliable classification.
[^4]: Bangladesh, Ghana, Kenya, Madagascar, Mozambique and Zambia
[^5]: Main consideration for selecting population of interest is living in areas within the city where WSUP is currently providing interventions or supporting projects
[^6]: It is interesting to note that based on the WSUP questionnaires, pregnant women are identified separately but are classified within the grouping of those with disability
[^7]:The questionnaire uses a 4 category scale i.e., satisfied, somewhat satisfied, somewhat dissatisfied, dissatisfied
[^8]: WSUP uses the Progress out of Poverty Index (PPI)
[^9]: In some cities, these settlements are categorised or named as slums or squatters areas and can be formally or informally recognised by authorities and/or city planners
[^10]: Especially true if the mapping of the low-income/poor areas are fixed specifically on administrative boundaries and not at more localised areas
[^11]: See Lanata, C.F. & Black, R.E., 1991. Lot quality assurance sampling techniques in health surveys in developing countries: advantages and current constraints. World Health Statistics Quarterly, 44(3), pp.133–139
[^12]: See Lanata, C.F. et al., 1988. Lot quality assurance sampling in health monitoring. The Lancet, 1(8577), pp.122–123 and Robertson, S.E. & Valadez, J.J., 2006. Global review of health care surveys using lot quality assurance sampling (LQAS), 1984– 2004. Social Science & Medicine, 63(6), pp.1648–1660.
[^13]: Based on previous work done by Valid International on application of LQAS on reporting water and sanitation indicators recommended by the Joint Monitoring Programme (JMP)
[^14]: Some modifications to these steps may need to be done on a country by country basis particularly with regard to how the groupings of the spatial units are organised and sub-divided. Otherwise, the general approach is the same on any setting.
[^15]: There are many formats for map files. In general, the most common and most accessible formats are keyhole markup language (KML) files which are the standard output format produced by Google Maps/Google Earth and ESRI Shapefiles.
[^16]: These days,there are various available online repositories of map data files for most countries including cities. We most commonly use the repository maintained by Robert Hijmans at https://gadm.org and the one maintained by UN OCHA called Humanitarian Data Exchange (https://data.humdata.org). Organisations such as the World Food Programme also maintain their own repositories but most of their maps are also made available through the Humanitarian Data Exchange.
[^17]: The best openly available sources for this map data are from the WorldPop project (<https://www.worldpop.org>) and the Global Rural-Urban Mapping Project or GRUMP (<https://sedac.ciesin.columbia.edu/data/collection/grump-v1>).
[^18]: For most cities,this can be available from OpenStreetMap or OSM (<https://www.openstreetmap.org>). Google Earth can also be used to view satellite imagery of the city.
|
/scratch/gouwar.j/cran-all/cranData/washdata/vignettes/washdata.Rmd
|
#' @title Data frame of meteorological data
#'
#' @description This sample data would be invented meteorological information detected by weather stations.
#'
#'
#' @format A data frame with 800 rows and 4 variables:
#' \describe{
#' \item{phen}{Temperature, Rain}
#' \item{time}{ordered numbers for time (a number in the format YYYYMMDD [Year Month Day] is possible too) }
#' \item{zone}{label classification for the group, for example the identification code of a wheather station.}
#' \item{value}{values}
#'
#' }
"dati"
|
/scratch/gouwar.j/cran-all/cranData/washeR/R/dati.R
|
#' @title Time series
#'
#' @description This is an example of a single time series with increasing trend and some variability.
#'
#' @format A data frame with 35 rows and 1 variable:
#' \describe{
#' \item{dati}{pseudo random numers}
#'
#' }
"ts"
|
/scratch/gouwar.j/cran-all/cranData/washeR/R/ts.R
|
#' @title Outlier detection for single or grouped time series
#'
#' @description This function provides anomaly signals (even a graphical visualization) when there is a 'jump' in a single time series, or the 'jump' is too much different respect those ones of grouped similar time series.
#'
#' @param dati data frame (grouped time series: phenomenon+date+group+values) or vector (single time series)
#' @param graph logical value for graphical analysis (default=FALSE)
#' @param linear_analysis logical value for linear analysis (default=FALSE)
#' @param val_test_limit value for outlier detection sensitiveness (default=5 ; max=10)
#' @param save_out logical value for saving detected outliers (default=FALSE)
#' @param out_out a character file name for saving outliers in csv form, delimited with ";" and using ',' as decimal separator (default out.csv)
#' @param pdf_out a character file name for saving graphic analysis in pdf file (default=out.pdf)
#' @param r_out rows number of graphs (default=3)
#' @param c_out cols number of graphs (default=2)
#' @param first_line value for first dotted line in graphic analysis (default=1)
#' @param pace_line value for pace in dotted line in graphic analysis (default=6)
#' @return Data frame of possible outliers in a triad. Output record: rows/time.2/series/y1/y2/y3/test(AV)/AV/ n/median(AV)/mad(AV)/madindex(AV). Where time.2 is the center of the triad y1, y2, y3; test(AV) is the number to compare with 5 to detect outlier; n is the number of observations of the group ....
#' @export
#' @examples
#' ## we can start with data without outliers but structured with co-movement between groups
#'data("dati")
#'## first column for phenomenon
#'## 2° col for time written in ordered numbers or strings
#'## 3° col for group classification variable
#'## 4° col for values
#'str(dati)
#'#######################################
#'## a data frame without any outlier
#'#######################################
#'out=wash.out(dati)
#'out ## empity data frame
#'length(out[,1]) ## no row
#'## we can add two outliers
#'#### time=3 temperature value=0
#'dati[99,4]= 0
#'## ... and then for "rain" phenomenon!
#'#### time=3 rain value=37
#'dati[118,4]= 37
#'#######################################
#'## data.frame with 2 fresh outliers
#'#######################################
#'out=wash.out(dati)
#'## all "three terms" time series
#'## let's take a look at anomalous time series
#'out
#'## ... the same but we save results in a file....
#'## If we don't specify a name, out.csv is the default
#'out=wash.out(dati,save_out=TRUE,out_out="tabel_out.csv")
#'out
#'## we put the parameter from 5 to 10, using this upper one to capture
#'## only particularly anomalous outliers
#'out=wash.out(dati, val_test_limit = 10)
#'out
#'## save plots and outliers in a pdf file "out.pdf" as a default
#'out=wash.out(dati, val_test_limit = 10, graph=TRUE)
#'out
#'## we can make the usual analysis for groups but we can also use that one
#'## reserved for every single time series
#'## (linear_analysis): two files for saved outliers (out.csv and linout.csv)
#'## and for graph display in two pdf files (out.pdf and linout.pdf)
#'out=wash.out(dati,val_test_limit=5,save_out=TRUE,linear_analysis=TRUE,graph=TRUE)
#'out
#'## out return only the linear analysis...
#'## ... in this case we lose the co-movement information an we run the risk
#'## of finding too much variance in a single time series
#'## and detecting not too much likely outliers
#'##########################################################
#'## single time series analysis
#'##########################################################
#'data(ts)
#'str(ts)
#'sts= ts$dati
#'plot(sts,type="b",pch=20,col="red")
#'## a time series with a variability and an increasing trend
#'## sts is a vector and linear analysis is the default one
#'out=wash.out(sts)
#'out
#'## we find no outlier
#'out=wash.out(sts,val_test_limit=5,linear_analysis=TRUE,graph=TRUE)
#'out
#'## no outlier
#'## We can add an outlier with limited amount
#'sts[5]=sts[5]*2
#'plot(sts,type="b",pch=20,col="red")
#'out=wash.out(sts,val_test_limit=5)
#'out
#'## test is over 5 for a bit
#'out=wash.out(sts,val_test_limit=5,save_out=TRUE,graph=TRUE)
#'out
#'data(ts)
#'sts= ts$dati
#'sts[5]=sts[5]*3
#'## we can try a greater value to put an outlier of a certain importance
#'plot(sts,type="b",pch=20,col="blue")
#'out=wash.out(sts,val_test_limit=5,save_out=TRUE,graph=TRUE)
#'out
#'## washer procedure identify three triads of outliers values
#'system("rm *.csv *.pdf")
#'
wash.out = function( dati ,
# p t i y
# dati structure: phenom./date/series/values/... other
graph=FALSE ,
linear_analysis=FALSE ,
val_test_limit = 5 ,
save_out=FALSE ,
out_out="out.csv" ,
pdf_out="out.pdf" ,
r_out =3 ,
c_out=2 ,
first_line =1 ,
pace_line = 6
)
{
## start function code
## sub function recall
washer2.AV = function( dati ) # p t i y
{ # dati structure: phenom./date/series/values/... other
# example: Phenomenon Time Zone Value ...
# ----------- -------- -- ----- --------
# Temperature 20091231 A1 20.1 ...
# Temperature 20091231 A2 21.0 ...
# ...
# Rain 20081231 B1 123.0 ...
# ...
###############################################################################################
AV = function(y) { # y matrix 3 columns (y1 y2 y3) and n rows
AV=array(0,length(y[,1]))
100*(2*y[,2]-y[,1]-y[,3])/(stats::median(y[,1]+y[,2]+y[,3])+ y[,1]+y[,2]+y[,3]) }
# output array AV
###############################################################################################
test.AV = function(AV) { # AV array n rows
t(rbind(test.AV=abs(AV-stats::median(AV))/stats::mad(AV),AV=AV,n=length(AV),median.AV=stats::median(AV),mad.AV=stats::mad(AV) ,
madindex.AV=stats::mad(AV)*1000/150 )) }
# col 1 2 3 5 6 7
# output: test / AV / n / median(AV) / mad(AV) / madindex
################################################################################################
if (min(dati[,4])> 0) {
dati=dati[which(!is.na(dati[,4])),]
dati=dati[order(dati[,1],dati[,3],dati[,2]),]
fen=rownames( table(dati[,1]) )
nfen=length(fen)
out= NA
for ( fi in 1:nfen)
{ print(c("phenomenon:",fi) ,quote=FALSE)
time=rownames( table(dati[which(fen[fi]==dati[,1]),2]) )
n=length(time)
for ( i in 2:(n-1) )
{ datiy= dati[dati[,1] == fen[fi] & dati[,2] %in% c(time[i-1],time[i],time[i+1]),c(2,3,4)]
y1=stats::reshape(datiy,timevar=colnames(datiy)[1],idvar=colnames(datiy)[2],direction="wide" )
y1=y1[!is.na(apply(y1[,c(2:4)],1,sum)),]
y=y1[,c(2,3,4)]
colnames(y)=c("t.1","t.2","t.3")
out=rbind(out,data.frame(fen=fen[fi],t.2=time[i],
series=y1[,1],y=y,test.AV(AV(y))))
}
}
rownames(out)=(1:length(out[,1])-1)
washer2.AV=out[2:length(out[,1]),]
# col 1 2 3 4 5 6 7 8 9 10 11 12
# output: rows /time.2/series/y1/y2/y3/test(AV)/AV/ n /median(AV)/mad(AV)/madindex(AV)
# end function washer2.AV
} else print(" . . . zero or negative y: t r a n s l a t i o n r e q u i r e d !!!")
}
####################################################################################################
td_w= function(dati, phen="T")
{ ## serie storica a gruppi di tre
## ----------------------------------
time=rep(0,(length(dati)-2)*3)
zone=rep(0,(length(dati)-2)*3)
value=rep(0,(length(dati)-2)*3)
out = data.frame(phen,time,zone,value)
k=1
for (i in 1:(length(dati)-2))
{for( j in 1:3)
{
out$zone[k+j-1] = i
out$time[k+j-1] = j
out$value[k+j-1] = dati[i+j-1]
}
k=k+3
}
return(out)
}
translate = function( dati )
{
min_var=min(dati[,4],na.rm = TRUE)
if (min_var>0) min_var = 0.0001
dati[,4]= dati[,4] - min_var+0.0001
return(dati)
}
inv_translate = function (out,dati)
{
min_var=min(dati[,4],na.rm = TRUE)
if (min_var>0) min_var = 0.0001
out$y.t.1 = out$y.t.1 +min_var -0.0001
out$y.t.2 = out$y.t.2 +min_var -0.0001
out$y.t.3 = out$y.t.3 +min_var -0.0001
return(out)
}
graph_f = function(dati,out,pdf_out,r_out,c_out,first_line,pace_line,val_test_limit)
{
#requireNamespace(gplots)
grDevices::pdf(pdf_out)
graphics::par(mfrow=c(r_out,c_out),cex.main=.6)
a= out[out[,"test.AV"]> val_test_limit,]
a= a[order(paste(a$fen,a$series)),]
series_old = 0
fen_old = 0
for (i in 1:length(a[,1]))
{
#######################
series=a[i,"series"]
fen=a[i,"fen"]
#######################
if( !(series == series_old & fen == fen_old))
{
elenco=a[a$series==a[i,"series"] & a$fen==a[i,"fen"],c(1,3,2,4,5,6,7)]
elenco[4:7]=round(elenco[4:7],2)
# names(elenco)[1]= "settore"
# names(elenco)[2] = "area"
if (length(elenco[,1]) > 25) elenco = elenco[1:25,]
gplots::textplot(elenco,cex = .50 , show.rownames = FALSE)
graphics::title("Outlier(s) list \n (max 25)")
##
cond = ( dati[,3]== series & dati[,1]== fen )
ss <- dati[ cond , c(2,4)]
ss=ss[order(ss[,1]),]
#ss[,1]=as.integer(ss[,1])
#ss
x=1:length(ss[,1])
time=ss[,1]
y=ss[,2]
graphics::plot(x,y,xaxt="n",type="l",pch=20, cex=0.5,col="blue",
main=paste('Series:',a[i,"series"],' phen. ',fen),
lwd=0.4)
graphics::points(x,y,col="darkblue",pch=20, cex=0.5)
graphics::axis(1, at=seq(first_line, length(ss[,1]), by = pace_line),
labels=time[seq(first_line, length(ss[,1]), by = pace_line)],
las=2,cex.axis=0.8)
graphics::abline(h=0, col = "blue",lty = 3)
graphics::abline(v=seq(first_line-0.1, length(ss[,1]), by = pace_line), col = "blue",lty = 3)
series_old = series
fen_old = fen
}
out_t2=which(ss[,1]==a[i,"t.2"])
graphics::abline(v=out_t2-1.1,col="red", lwd=0.1)
graphics::abline(v=out_t2+1.1,col="red", lwd =0.1)
}
grDevices::dev.off() #
}
if(is.vector(dati))
{ linear_analysis=TRUE
if(length(dati)<10)
{
print("No linear analisys: too few data!")
return(0)
}
if (min(dati)>=0 ) min_var = 0.0001
else min_var = min(dati)
dati= dati - min_var+0.0001
out= washer2.AV(td_w(dati))
out=out[out[,7]>val_test_limit,]
if( length(out[,1])==0) print("NO outlier!!!")
dum=out[,2]
out[,2]=out[,3]+1
out[,3]=dum
out$y.t.1 = out$y.t.1 +min_var -0.0001
out$y.t.2 = out$y.t.2 +min_var -0.0001
out$y.t.3 = out$y.t.3 +min_var -0.0001
dati= dati +min_var-0.0001
if(save_out & length(out[,1])>0){
utils::write.csv2(out[out[,7]>val_test_limit,],file=out_out)
print(paste("File '",out_out,"' saved!",sep=""))
print(paste("Dir -> ",getwd()))
}
if(graph & length(out[,1])>0)
{
dati=data.frame(phen=rep("T",length(dati)),data=1:length(dati),series=rep(2,length(dati)),dati=dati)
#print(dati)
#print(out)
graph_f(dati,out,paste("lin",pdf_out,sep=""),r_out=2,c_out=1,first_line,pace_line,val_test_limit)
print(paste("File'",paste("lin",pdf_out,sep=""),"' saved!",sep=""))
print(paste("Dir -> ",getwd()))
}
return(out[out[,7]>val_test_limit,])
}
else if(is.data.frame(dati))
{
out= washer2.AV(translate(dati))
out=out[out[,7]>val_test_limit,]
if( length(out[,1])==0) print("NO outlier!!!")
else out=inv_translate(out,dati)
if(save_out & length(out[,1])>0){
utils::write.csv2(out[out[,7]>val_test_limit,],file=out_out)
print(paste("File '",out_out,"' saved!",sep=""))
print(paste("Dir -> ",getwd()))
}
if(graph & length(out[,1])>0)
{
graph_f(dati,out,pdf_out,r_out,c_out,first_line,pace_line,val_test_limit)
print(paste("File '",pdf_out,"' saved!",sep=""))
print(paste("Dir -> ",getwd()))
}
if(linear_analysis)
{
fen_series=dati[,c(1,3)]
datiw=dati[,c(1,2,3,4)]
datiw[,1] = paste(datiw[,1],datiw[,3], sep="")
fen_series[,3]=paste("p_",datiw[,1],sep="")
datiw[,3] <- NULL
names(datiw)[1]="col"
names(datiw)[2]="DATA"
names(datiw)[3]="p"
datiw=stats::reshape(datiw,v.names="p",idvar="DATA",timevar="col",direction="wide",sep="_")
n_col = length(datiw[1,])
print(paste ("cols:",n_col))
n_row = length(datiw[,1])
if(n_row<10)
{
print("No linear analisys: too few data!")
return(out[out[,7]>val_test_limit,])
}
print(paste ("rows:",n_row))
mesi = datiw$DATA[c(-1,-n_row)]
###############################################################
## first elaboration out of cycle
###############################################################
min_var= min(datiw[,2],na.rm = TRUE)
datiw[,2]= datiw[,2] - min_var +0.0001
out=washer2.AV(td_w(datiw[,2]))
out$fen= colnames(datiw)[2]
out$t.2=mesi
out$y.t.1 = out$y.t.1 +min_var -0.0001
out$y.t.2 = out$y.t.2 +min_var -0.0001
out$y.t.3 = out$y.t.3 +min_var -0.0001
datiw[,2] = datiw[,2] +min_var -0.0001
###############################################################
for (i in 3:n_col)
###############################################################
{
min_var= min(datiw[,i],na.rm = TRUE)
datiw[,i]= datiw[,i] - min_var +0.0001
out0=washer2.AV(td_w(datiw[,i]))
out0$fen= colnames(datiw)[i]
out0$t.2=mesi
out0$y.t.1 = out0$y.t.1 +min_var -0.0001
out0$y.t.2 = out0$y.t.2 +min_var -0.0001
out0$y.t.3 = out0$y.t.3 +min_var -0.0001
datiw[,i] = datiw[,i] +min_var -0.0001
out=rbind(out,out0)
print(paste("Time series: ",i))
}
###############################################################
for (i in 1:length(out[,1]))
{pos=match(out[i,1],fen_series[,3])
as.character(fen_series[pos,1]) -> out[i,1]
as.character(fen_series[pos,2]) -> out[i,3]
}
}
out=out[out[,7]>val_test_limit,]
if(linear_analysis & save_out & length(out[,1])>0){
utils::write.csv2(out[out[,7]>val_test_limit,],file=paste("lin",out_out,sep=""))
print(paste("File 'lin",out_out,"' saved!",sep=""))
print(paste("Dir -> ",getwd()))
}
if(linear_analysis & graph & length(out[,1])>0)
{
graph_f(dati,out,paste("lin",pdf_out,sep=""),r_out,c_out,first_line,pace_line,val_test_limit)
print(paste("File '",paste("lin",pdf_out,sep=""),"' saved!",sep=""))
print(paste("Dir -> ",getwd()))
}
}
else print ("not compliant data ")
#dev.off() #
return(out)
## end function
}
|
/scratch/gouwar.j/cran-all/cranData/washeR/R/wash.out.R
|
#' Link bills to Revised Code of Washington (RCW)
#'
#' Get a listing of all RCW citations affected by a given bill
#'
#' @inheritParams getBillSponsors
#' @inheritParams getLegislation
#'
#' @return \code{getAffectedRCW} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#'
#' @export
#' @examples
#' ## usage for a single bill case, XML form
#' getAffectedRCW("2005-06", "HB 1427", type = "xml")
#'
#' ## generates a dataframe of affected codes from all bills in 2007
#' \dontrun{
#' bills <- getLegislationByYear("2007")
#' codesAffected <- getAffectedRCW("2007-08", bills$BillId)}
#'
#' @section Note: for more information on RCW codes, see
#' \url{https://apps.leg.wa.gov/rcw/}
getAffectedRCW <- function(biennium, billId, paired = TRUE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getAffectedRCW for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billId_pattern, billId))) {
stop("Bill ID formatted incorrectly. Use ?getAffectedRCW for more information")
}
if(length(biennium) == length(billId) & paired) {
request <- data.frame(biennium = biennium, billId = billId)
} else {
request <- expand.grid(biennium, billId, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetRcwCitesAffected?biennium=",
request[bill,1], "&billId=", gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium <- request[bill,1]
tbl$BillId <- request[bill,2]
tbl <- tbl[c("Biennium", "BillId",
setdiff(names(tbl),c("Biennium","BillId")))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetRcwCitesAffected?biennium=",
request[bill,1], "&billId=", gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- request[bill,2]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetRcwCitesAffected?biennium=",
request[bill,1], "&billId=", gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out,tbl)
}
names(out) <- paste(request[,1],request[,2],sep="//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getAffectedRCW.R
|
#' Get amendments to a bill
#'
#' Get a list of all proposed amendments (accepted and rejected) on the bill,
#' including the URL to the amendment text
#'
#' @inheritParams getLegislation
#'
#' @return \code{getAmendments} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' ## get amendments for a single bill
#' getAmendments("2007-08", "1001")
#'
#' ## get amendments for a specific set of bills
#' years <- c("2005-06","2007-08","2007-08")
#' bills <- c(1447,1219,1001)
#'
#' getAmendments(years, bills, paired = TRUE, type = "df")
getAmendments <- function(biennium, billNumber, paired = TRUE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
billNumber <- as.character(billNumber)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getAmendments for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billNum_pattern, billNumber))) {
stop("Bill Number formatted incorrectly. Use ?getAmendments for more information")
}
if(length(biennium) == length(billNumber) & paired) {
request <- data.frame(biennium = biennium, billNumber = billNumber)
} else {
request <- expand.grid(biennium, billNumber, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetAmendmentsForBiennium?biennium=",
request[bill,1], "&billNumber=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium <- request[bill,1]
tbl$BillNumber <- request[bill,2]
tbl <- tbl[c("Biennium", "BillNumber",
setdiff(names(tbl),c("Biennium", "BillNumber")))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetAmendmentsForBiennium?biennium=",
request[bill,1], "&billNumber=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- request[bill,2]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetAmendmentsForBiennium?biennium=",
request[bill,1], "&billNumber=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out,tbl)
}
names(out) <- paste(request[,1],request[,2],sep="//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getAmendments.R
|
#' Get sponsor information for a bill
#'
#' @inheritParams getLegislation
#' @param billId Character vector containing the bill(s) to be retrieved.
#' Each argument should take the form "XX YYYY", where XX
#' is the prefix (HB, SB, etc.) and YYYY is the bill number.
#'
#' @return \code{getBillSponsors} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' ## get the list of all sponsors on a set of bills, filtered for primary sponsorship
#'
#' spons <- getBillSponsors("2007-08", c("HB 1001", "HB 1002", "HB 1003"))
#' if(!is.null(spons)) sponsP <- subset(spons, Type == "Primary")
getBillSponsors <- function(biennium, billId, paired = TRUE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getBillSponsors for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billId_pattern, billId))) {
stop("Bill ID formatted incorrectly. Use ?getBillSponsors for more information")
}
if(length(biennium) == length(billId) & paired) {
request <- data.frame(biennium = biennium, billId = billId)
} else {
request <- expand.grid(biennium, billId, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetSponsors?biennium=",
request[bill,1], "&billId=", gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium <- request[bill,1]
tbl$BillId <- request[bill,2]
tbl <- tbl[c("Biennium", "BillId",
setdiff(names(tbl),c("Biennium","BillId")))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetSponsors?biennium=",
request[bill,1], "&billId=", gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- request[bill,2]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetSponsors?biennium=",
request[bill,1], "&billId=", gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out,tbl)
}
names(out) <- paste(request[,1],request[,2],sep = "//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getBillSponsors.R
|
#' Get committee members
#'
#' @inheritParams getLegislation
#' @param agency One of "House" or "Senate", or a vector with these as its
#' elements.
#' @param name Character vector of committee names. To get the committee names
#' for a particular session, see \code{\link{getCommittees}}.
#'
#' @return \code{getCommitteeMembers} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' ## get all committee members for a select number of committees and years
#' years <- c("2011-12","2013-14")
#' comms <- c("Education","Judiciary")
#'
#' getCommitteeMembers(years, agency = "House", comms, paired = TRUE)
getCommitteeMembers <- function(biennium, agency = c("House", "Senate"), name, paired = FALSE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getCommitteeMembers for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
}
agency <- paste(toupper(substr(agency,1,1)),
substr(agency,2,nchar(agency)), sep = "")
if(!all(agency %in% c("House", "Senate"))) {
stop("Agency name invalid. Make sure to use one of 'House' or 'Senate'")
}
if(length(biennium) == length(agency) &
length(biennium) == length(name) & paired) {
request <- data.frame(biennium = biennium, agency = agency, name = name)
} else {
request <- expand.grid(biennium, agency, name, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"CommitteeService.asmx/GetCommitteeMembers?biennium=",
request[bill,1], "&agency=", request[bill,2], "&committeeName=",
gsub("&", "%26", gsub(" ", "%20", request[bill,3])), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium <- request[bill,1]
tbl$Agency <- request[bill,2]
tbl$CommitteeName <- request[bill,3]
tbl <- tbl[c("Biennium", "Agency", "CommitteeName",
setdiff(names(tbl), c("Biennium", "Agency", "CommitteeName")))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"CommitteeService.asmx/GetCommitteeMembers?biennium=",
request[bill,1], "&agency=", request[bill,2], "&committeeName=",
gsub("&", "%26", gsub(" ", "%20", request[bill,3])), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- request[bill,3]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"CommitteeService.asmx/GetCommitteeMembers?biennium=",
request[bill,1], "&agency=", request[bill,2], "&committeeName=",
gsub("&", "%26", gsub(" ", "%20", request[bill,3])), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out, tbl)
}
names(out) <- paste(request[,1], request[,2], request[,3], sep = "//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getCommitteeMembers.R
|
#' Get legislative committees
#'
#' Get a list of all committees that were active during the biennium,
#' along with their respective committee code
#'
#' @inheritParams getSponsors
#'
#' @return \code{getCommittees} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' getCommittees("2007-08")
getCommittees <- function(biennium, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getCommittees for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
}
if(type == "df") {
out <- data.frame()
for(i in 1:length(biennium)) {
path <- paste(prefix,
"CommitteeService.asmx/GetCommittees?biennium=",
biennium[i], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium <- biennium[i]
tbl <- tbl[c("Biennium",
setdiff(names(tbl),"Biennium"))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(i in 1:length(biennium)) {
path <- paste(prefix,
"CommitteeService.asmx/GetCommittees?biennium=",
biennium[i], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- biennium[i]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(i in 1:length(biennium)) {
path <- paste(prefix,
"CommitteeService.asmx/GetCommittees?biennium=",
biennium[i], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out, tbl)
}
names(out) <- biennium
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getCommittees.R
|
#' Get bill status
#'
#' Get the current status of a given bill
#'
#' @inheritParams getLegislation
#'
#' @return \code{getCurrentStatus} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' getCurrentStatus("2007-08", "1001")
#'
#' ## get final status for all bills written in 2011
#' \dontrun{
#' billNums <- getLegislationByYear("2011")
#' status <- getCurrentStatus("2011-12", billNums$billNumber)}
#'
#' @section Note:
#' This function returns the bill's status as of today. If a bill
#' was never passed, it lists the most recent status. To
#' get a bill's complete history, use \code{\link{getStatusChanges}}
getCurrentStatus <- function(biennium, billNumber, paired = TRUE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
billNumber <- as.character(billNumber)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getCurrentStatus for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billNum_pattern, billNumber))) {
stop("Bill Number formatted incorrectly. Use ?getCurrentStatus for more information")
}
if(length(biennium) == length(billNumber) & paired) {
request <- data.frame(biennium = biennium, billNumber = billNumber)
} else {
request <- expand.grid(biennium, billNumber, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
path <- paste(prefix, "legislationservice.asmx/GetCurrentStatus?biennium=",
gsub(" ", "%20", request[bill,1]), "&billNumber=",
gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
df <- data.frame(t(matrix(unlist(tbl))),
stringsAsFactors = FALSE)
colnames(df) <- names(tbl)
rownames(df) <- ""
if(nrow(df) > 0) {
df$Biennium <- request[bill,1]
df$BillNumber <- request[bill,2]
df <- df[c("Biennium", "BillNumber",
setdiff(names(df), c("Biennium", "BillNumber")))]
out <- dplyr::bind_rows(out, df)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
path <- paste(prefix, "legislationservice.asmx/GetCurrentStatus?biennium=",
gsub(" ", "%20", request[bill,1]), "&billNumber=",
gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- request[bill,2]
if(length(tbl) > 0) {
out <- c(out, tbl)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix, "legislationservice.asmx/GetCurrentStatus?biennium=",
gsub(" ", "%20", request[bill,1]), "&billNumber=",
gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out, tbl)
}
names(out) <- paste(request[,1], request[,2], sep = "//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getCurrentStatus.R
|
#' Get hearings regarding a bill
#'
#' Get a list of dates, locations, and descriptions of all
#' committee hearings on a particular bill
#'
#' @inheritParams getLegislation
#'
#' @return \code{getHearings} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' ## get hearings for all senate bills in 2011
#' bills <- getLegislationByYear("2011")
#' if(!is.null(bills)) billsSenate <- subset(bills, OriginalAgency == "Senate")
#'
#' \dontrun{getHearings(billsSenate$Biennium, billsSenate$BillNumber, paired = TRUE, type = "df")}
#'
#' @section Note: Due to the nature of the resulting XML document,
#' the function trims data from excessively nested lists when
#' \code{type = "df"}. In order to access the full information, use
#' \code{type = "list"} instead.
getHearings <- function(biennium, billNumber, paired = TRUE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
billNumber <- as.character(billNumber)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getHearings for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billNum_pattern, billNumber))) {
stop("Bill Number formatted incorrectly. Use ?getHearings for more information")
}
if(length(biennium) == length(billNumber) & paired) {
request <- data.frame(biennium = biennium, billNumber = billNumber)
} else {
request <- expand.grid(biennium, billNumber, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetHearings?biennium=",
request[bill,1], "&billNumber=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- purrr::map(XML::xmlToList(tbl), purrr::flatten)
tbl <- purrr::map(tbl, ~ purrr::discard(.x, is.null))
tbl <- purrr::map(tbl, ~ toss(.x, "Committees"))
if(length(tbl) > 0) {
tbl <- purrr::map(tbl, ~ data.frame(t(matrix(unlist(.x), nrow = length(.x), dimnames = list(names(.x)))),
stringsAsFactors = FALSE))
df <- dplyr::bind_rows(tbl)
out <- dplyr::bind_rows(out, df)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetHearings?biennium=",
request[bill,1], "&billNumber=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- request[bill,2]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetHearings?biennium=",
request[bill,1], "&billNumber=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out, tbl)
}
names(out) <- paste(request[,1], request[,2], sep = "//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getHearings.R
|
#' Get summary information on a particular bill
#'
#' Get legislative summary information for a particular bill,
#' including bill ID, introduction date, bill title(s), and
#' description
#'
#' @param biennium Character vector representing the biennium(s) to be
#' searched. Each argument should take the form "XXXX-YY"
#' @param billNumber Character or numeric vector containing the bill number(s)
#' to be retrieved.
#' @param paired If TRUE, will assume that equal length vectors represent
#' paired data. Set to FALSE to generate an NxN grid of input
#' arguments. Applies to equal length vector inputs only.
#' @param type One of "df", "list", or "xml". Specifies the format for
#' the output.
#'
#' @return \code{getLegislation} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' getLegislation("2007-08", "1001")
#'
#' ## get XML data for the first 100 hundred bills of the 2007-08 session
#' \dontrun{getLegislation("2007-08", 1001:1100, type = "xml")}
getLegislation <- function(biennium, billNumber, paired = TRUE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
billNumber <- as.character(billNumber)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getLegislation for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billNum_pattern, billNumber))) {
stop("Bill Number formatted incorrectly. Use ?getLegislation for more information")
}
if(length(biennium) == length(billNumber) & paired) {
request <- data.frame(biennium = biennium, billId = billNumber)
} else {
request <- expand.grid(biennium, billNumber, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
path <- paste(prefix, "legislationservice.asmx/GetLegislation?biennium=",request[bill,1],
"&billNumber=", gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium = request[bill,1]
tbl$BillNumber = request[bill,2]
tbl <- tbl[c("Biennium", "BillNumber",
setdiff(names(tbl), c("Biennium", "BillNumber")))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
path <- paste(prefix, "legislationservice.asmx/GetLegislation?biennium=",request[bill,1],
"&billNumber=", gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- request[bill,2]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix, "legislationservice.asmx/GetLegislation?biennium=",request[bill,1],
"&billNumber=", gsub(" ", "%20", request[bill,2]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out, tbl)
}
names(out) <- paste(request[,1], request[,2], sep = "//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getLegislation.R
|
#' Get legislation by year
#'
#' Get a list of all bills introduced during the year
#'
#' @inheritParams getLegislation
#' @param year Character or numeric vector representing the year(s) to be
#' searched.
#'
#' @return \code{getLegislationByYear} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' \dontrun{getLegislationByYear("2007")}
getLegislationByYear <- function(year, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
year <- as.character(year)
if(!all(grepl(year_pattern, year))) {
stop("Year formatted incorrectly. Use ?getLegislationByYear for more information")
} else if(!all(as.numeric(year) >= 1991)) {
stop("Year out of range. Information is available going back to 1991")
}
if(type == "df") {
out <- data.frame()
for(i in 1:length(year)) {
path <- paste(prefix, "legislationservice.asmx/GetLegislationByYear?year=",
gsub(" ", "%20", year[i]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Year <- year[i]
tbl <- tbl[c("Year",
setdiff(names(tbl), "Year"))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(i in 1:length(year)) {
path <- paste(prefix, "legislationservice.asmx/GetLegislationByYear?year=",
gsub(" ", "%20", year[i]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- year[i]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(i in 1:length(year)) {
path <- paste(prefix, "legislationservice.asmx/GetLegislationByYear?year=",
gsub(" ", "%20", year[i]), sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out, tbl)
}
names(out) <- year
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getLegislationByYear.R
|
#' Get all bills signed into law
#'
#' Get a dataframe containing all of the bills that originated in a
#' given chamber and were eventually signed into law
#'
#' @inheritParams getCommitteeMembers
#'
#' @return \code{getLegislationSigned} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' ## get all bills signed into law from the Senate between 2007-2010
#' bienniums <- c("2007-08", "2009-10")
#' getLegislationSigned(bienniums, "Senate")
getLegislationSigned <- function(biennium, agency = c("House", "Senate"), paired = FALSE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getLegislationSigned for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
}
agency <- paste(toupper(substr(agency,1,1)),
substr(agency,2,nchar(agency)), sep = "")
if(!all(agency %in% c("House", "Senate"))) {
stop("Agency name invalid. Make sure to use one of 'House' or 'Senate'")
}
if(length(biennium) == length(agency) & paired) {
request <- data.frame(biennium = biennium, agency = agency)
} else {
request <- expand.grid(biennium, agency, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetLegislationGovernorSigned?biennium=",
request[bill,1], "&agency=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium <- request[bill,1]
tbl$Agency <- request[bill,2]
tbl <- tbl[c("Biennium", "Agency",
setdiff(names(tbl), c("Biennium", "Agency")))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetLegislationGovernorSigned?biennium=",
request[bill,1], "&agency=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- paste(request[bill,1], request[bill,2], sep = "_")
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetLegislationGovernorSigned?biennium=",
request[bill,1], "&agency=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out, tbl)
}
names(out) <- paste(request[,1], request[,2], sep = "//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getLegislationSigned.R
|
#' Search for bills based on Revised Code (RCW) citations
#'
#' Get a list of all bills which reference or amend a particular
#' portion of the Revised Code of Washington (RCW)
#'
#' @inheritParams getLegislation
#' @param rcwCite Character vector for the citation in the RCW to pull
#' legislation from. Optional extensions for title, chapter, and section
#' are allowed. For more information, see
#' \url{https://apps.leg.wa.gov/rcw/}
#'
#' @return \code{getRCWBills} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' getRCWBills("2007-08", "13.40.0357")
getRCWBills <- function(biennium, rcwCite, paired = FALSE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
rcwCite <- as.character(rcwCite)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getLegislation for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(rcw_pattern, rcwCite))) {
stop("RCW reference formatted incorrectly. Use ?getRCWBills for more information")
}
if(length(biennium) == length(rcwCite) & paired) {
request <- data.frame(biennium = biennium, rcwCite = rcwCite)
} else {
request <- expand.grid(biennium, rcwCite, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"RcwCiteAffectedService.asmx/GetLegislationAffectingRcw?biennium=",
request[bill,1], "&rcwCite=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium <- request[bill,1]
tbl$rcwCite <- request[bill,2]
tbl <- tbl[c("Biennium", "rcwCite",
setdiff(names(tbl), c("Biennium", "rcwCite")))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"RcwCiteAffectedService.asmx/GetLegislationAffectingRcw?biennium=",
request[bill,1], "&rcwCite=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- request[bill,2]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"RcwCiteAffectedService.asmx/GetLegislationAffectingRcw?biennium=",
request[bill,1], "&rcwCite=", request[bill,2], sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out, tbl)
}
names(out) <- paste(request[,1], request[,2], sep = "//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getRCWBills.R
|
#' Get roll call votes
#'
#' Get an XML containing roll call information for all recorded votes
#' on a bill
#'
#' @inheritParams getLegislation
#'
#' @return \code{getRollCalls.xml} returns a list of XML objects for each bill.
#' \code{getRollCalls.summary} and \code{getRollCalls.votes}
#' return objects of type equal to the
#' \code{type} argument (defaults to dataframe)
#'
#' @examples
#' votes <- getRollCalls.summary("2007-08", "1001") # get roll call votes
#' if(!is.null(votes)) {
#' length(votes) # total number of roll call votes recorded
#' votes$CountYeas[3] # number of yea votes on roll call vote #3
#' }
#'
#' ## example: get member id's for all representatives voting against the bill
#' ## on final passage
#' votes <- getRollCalls.votes("2007-08", "1001")
#' if(!is.null(votes)) {
#' nay_votesFP <- subset(votes, (Motion == "Final Passage" & Vote == "Nay"))
#' print(nay_votesFP$MemberId)
#' }
#'
#' @section Note: Due to the nested nature of the resulting document,
#' we provide various functions to present simplified views of the data
#' that are compatible with more parsimonious data structures. To see the
#' full, original data, use \code{getRollCalls.xml} instead.
#'
#' @name getRollCalls
NULL
#' @export
#' @rdname getRollCalls
getRollCalls.xml <- function(biennium, billNumber, paired = TRUE) {
billNumber <- as.character(billNumber)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getRollCalls for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billNum_pattern, billNumber))) {
stop("Bill Number formatted incorrectly. Use ?getRollCalls for more information")
}
if(length(biennium) == length(billNumber) & paired) {
request <- data.frame(biennium = biennium, billNumber = billNumber)
} else {
request <- expand.grid(biennium, billNumber, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
out <- c()
for(bill in 1:nrow(request)) {
path <- paste(prefix,
"legislationservice.asmx/GetRollCalls?biennium=",
biennium, "&billNumber=", billNumber, sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
out <- c(out, tbl)
}
names(out) <- paste(request[,1], request[,2], sep = "//")
return(out)
}
#' @export
#' @rdname getRollCalls
getRollCalls.summary <- function(biennium, billNumber, paired = TRUE, type = c("df", "list")) {
type <- rlang::arg_match(type)
billNumber <- as.character(billNumber)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getRollCalls for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billNum_pattern, billNumber))) {
stop("Bill Number formatted incorrectly. Use ?getRollCalls for more information")
}
if(length(biennium) == length(billNumber) & paired) {
request <- data.frame(biennium = biennium, billNumber = billNumber)
} else {
request <- expand.grid(biennium, billNumber, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
xml <- unname(getRollCalls.xml(request[bill,1], request[bill,2]))
if(is.null(xml)) {
return(NULL)
}
tbl <- XML::xmlToList(xml[[1]])
if(length(tbl) > 0) {
tbl <- purrr::map(tbl, ~ data.frame(Agency = .x[["Agency"]],
BillId = .x[["BillId"]],
Biennium = .x[["Biennium"]],
Motion = .x[["Motion"]],
SequenceNumber = .x[["SequenceNumber"]],
VoteDate = .x[["VoteDate"]],
CountYeas = .x[["YeaVotes"]]$Count,
CountNays = .x[["NayVotes"]]$Count,
CountAbsent = .x[["AbsentVotes"]]$Count,
CountExcused = .x[["ExcusedVotes"]]$Count))
df <- dplyr::bind_rows(tbl)
out <- dplyr::bind_rows(out, df)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
xml <- unname(getRollCalls.xml(request[bill,1], request[bill,2]))
tbl <- purrr::map(XML::xmlToList(xml[[1]]), purrr::flatten)
tbl <- purrr::map(tbl, ~ toss(.x, c("MembersVoting", "Vote")))
#tbl <- purrr::map(tbl, ~ toss(.x, "Vote"))
if(length(tbl) > 0) {
for(i in 1:length(tbl)) {
names(tbl[[i]]) <- c("Agency",
"BillId",
"Biennium",
"Motion",
"SequenceNumber",
"VoteDate",
"CountYeas",
"CountNays",
"CountAbsent",
"CountExcused")
}
list <- list(tbl)
names(list) <- request[bill,2]
out <- c(out, list)
}
}
}
return(out)
}
#' @export
#' @rdname getRollCalls
getRollCalls.votes <- function(biennium, billNumber, paired = TRUE, type = c("df", "list")) {
type <- rlang::arg_match(type)
billNumber <- as.character(billNumber)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getRollCalls for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billNum_pattern, billNumber))) {
stop("Bill Number formatted incorrectly. Use ?getRollCalls for more information")
}
if(length(biennium) == length(billNumber) & paired) {
request <- data.frame(biennium = biennium, billNumber = billNumber)
} else {
request <- expand.grid(biennium, billNumber, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
xml <- unname(getRollCalls.xml(request[bill,1], request[bill,2]))
if(is.null(xml)) {
return(NULL)
}
tbl <- XML::xmlToList(xml[[1]])
tbl <- purrr::map(tbl, purrr::flatten)
tbl <- purrr::map(tbl, ~ toss(.x, c("SequenceNumber", "Count", "MembersVoting")))
if(length(tbl) > 0) {
tbl <- purrr::map(tbl, ~ data.frame(Agency = .x[["Agency"]],
BillId = .x[["BillId"]],
Biennium = .x[["Biennium"]],
Motion = .x[["Motion"]],
VoteDate = .x[["VoteDate"]],
MemberId = unlist(purrr::map(.x, "MemberId")),
Name = unlist(purrr::map(.x, "Name")),
Vote = unlist(purrr::map(.x, "VOte"))))
# yes there is supposed to be a typo on "VOte"
df <- dplyr::bind_rows(tbl)
out <- dplyr::bind_rows(out, df)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
xml <- unname(getRollCalls.xml(request[bill,1], request[bill,2]))
tbl <- XML::xmlToList(xml[[1]])
tbl <- purrr::map(tbl, purrr::flatten)
tbl <- purrr::map(tbl, ~ toss(.x, c("SequenceNumber", "Count", "MembersVoting")))
if(length(tbl) > 0) {
list <- list(tbl)
names(list) <- request[bill,2]
out <- c(out, list)
}
}
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getRollCalls.R
|
#' Get legislators
#'
#' Get a list of all sponsors (all congressmembers) for a given biennium
#'
#' @inheritParams getLegislation
#'
#' @return \code{getSponsors} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' getSponsors("2007-08")
getSponsors <- function(biennium, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getSponsors for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
}
if(type == "df") {
out <- data.frame()
for(year in biennium) {
path <- paste(prefix,
"sponsorservice.asmx/GetSponsors?biennium=",
year, sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium <- year
tbl <- tbl[c("Biennium",
setdiff(names(tbl), "Biennium"))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(year in biennium) {
path <- paste(prefix,
"sponsorservice.asmx/GetSponsors?biennium=",
year, sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- year
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(year in biennium) {
path <- paste(prefix,
"sponsorservice.asmx/GetSponsors?biennium=",
year, sep = "")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlParse(tbl)
out <- c(out, tbl)
}
names(out) <- biennium
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getSponsors.R
|
#' Track historical progess on a bill
#'
#' Get a complete history of all status changes that occurred on a particular
#' bill
#'
#' @inheritParams getLegislation
#'
#' @return \code{getStatusChanges} returns an object of type equal to the
#' \code{type} argument (defaults to dataframe)
#' @export
#'
#' @examples
#' getStatusChanges("2007-08", "1001", type = "list")
getStatusChanges <- function(biennium, billNumber, paired = TRUE, type = c("df", "list", "xml")) {
type <- rlang::arg_match(type)
billNumber <- as.character(billNumber)
if(!all(grepl(biennium_pattern, biennium))) {
stop("Biennium formatted incorrectly. Use ?getStatusChanges for more information")
} else if(!all(as.numeric(substr(biennium,1,4)) >= 1991)) {
stop("Biennium out of range. Information is available going back to 1991-92")
} else if(!all(grepl(billNum_pattern, billNumber))) {
stop("Bill Number formatted incorrectly. Use ?getStatusChanges for more information")
}
if(length(biennium) == length(billNumber) & paired) {
request <- data.frame(biennium = biennium, billNum = billNumber)
} else {
request <- expand.grid(biennium, billNumber, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)
}
beginDate <- paste(substr(request[1,1],1,4),"01","01",sep = "-")
endDate <- paste(substr(request[1,1],1,2),substr(request[1,1],6,7),"-12","-31",sep = "")
if(type == "df") {
out <- data.frame()
for(bill in 1:nrow(request)) {
beginDate <- paste(substr(request[bill,1],1,4),"01","01",sep = "-")
endDate <- paste(substr(request[bill,1],1,2),substr(request[bill,1],6,7),"-12","-31",sep = "")
path <- paste(prefix,
"legislationservice.asmx/GetLegislativeStatusChangesByBillNumber?biennium=",
gsub(" ", "%20", request[bill,1]), "&billNumber=", gsub(" ", "%20", request[bill,2]),
"&beginDate=", beginDate, "&endDate=", endDate, sep="")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToDataFrame(tbl,
stringsAsFactors = FALSE)
if(nrow(tbl) > 0) {
tbl$Biennium <- request[bill,1]
tbl$BillNumber <- request[bill,2]
tbl <- tbl[c("Biennium", "BillNumber",
setdiff(names(tbl), c("Biennium", "BillNumber")))]
out <- dplyr::bind_rows(out, tbl)
out <- out[!duplicated(out),]
}
}
} else if(type == "list") {
out <- list()
for(bill in 1:nrow(request)) {
beginDate <- paste(substr(request[bill,1],1,4),"01","01",sep = "-")
endDate <- paste(substr(request[bill,1],1,2),substr(request[bill,1],6,7),"-12","-31",sep = "")
path <- paste(prefix,
"legislationservice.asmx/GetLegislativeStatusChangesByBillNumber?biennium=",
gsub(" ", "%20", request[bill,1]), "&billNumber=", gsub(" ", "%20", request[bill,2]),
"&beginDate=", beginDate, "&endDate=", endDate, sep="")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlToList(tbl)
list <- list(tbl)
names(list) <- request[bill,2]
if(length(tbl) > 0) {
out <- c(out, list)
}
}
} else if(type == "xml") {
out <- c()
for(bill in 1:nrow(request)) {
beginDate <- paste(substr(request[bill,1],1,4),"01","01",sep = "-")
endDate <- paste(substr(request[bill,1],1,2),substr(request[bill,1],6,7),"-12","-31",sep = "")
path <- paste(prefix,
"legislationservice.asmx/GetLegislativeStatusChangesByBillNumber?biennium=",
gsub(" ", "%20", request[bill,1]), "&billNumber=", gsub(" ", "%20", request[bill,2]),
"&beginDate=", beginDate, "&endDate=", endDate, sep="")
tbl <- fetch(path)
if(is.null(tbl)) {
return(NULL)
}
tbl <- XML::xmlParse(tbl)
out <- c(out, tbl)
}
names(out) <- paste(request[,1], request[,2], sep = "//")
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/getStatusChanges.R
|
## negative of purr::keep("name")
toss <- function(.x, .p) {
pat <- paste(paste0("^", .p, "$"), collapse = "|")
return(.x[!grepl(pat, names(.x))])
}
fetch <- function(path) {
if(!curl::has_internet()) {
message("Internet connection is down")
return(NULL)
}
r <- httr::GET(path)
if(httr::http_error(r)) {
message("Internal error or data source has moved. Please try again.")
return(NULL)
} else {
return(XML::xmlParse(httr::content(r)))
}
}
|
/scratch/gouwar.j/cran-all/cranData/washex/R/utils.R
|
#' @keywords internal
"_PACKAGE"
# Suppress R CMD check note
#' @importFrom dplyr select
#' @importFrom purrr map
#' @importFrom rlang abort
#' @importFrom curl curl
#' @importFrom httr GET
#' @importFrom xml2 xml_url
#' @importFrom XML xmlParse
NULL
|
/scratch/gouwar.j/cran-all/cranData/washex/R/washex.r
|
#' Example data in long (tidy) format
#'
#' A subset WaSHI State of the Soils Assessment dataset that has been
#' anonymized. This dataset is tidied, so each measurement is in its
#' own row.
#'
#' @format ## `example_data_long` A data frame with 1,800 rows and 14 columns:
#' \describe{
#' \item{year}{Year of sample}
#' \item{sampleId, producerId, fieldId}{Anonymized IDs}
#' \item{farmName, producerName, fieldName}{Anonymized names}
#' \item{longitude, latitude}{Truncated coordinates}
#' \item{texture}{Measured soil texture}
#' \item{measurement}{Measurement name with units}
#' \item{value}{Measurement result}
#' ...
#' }
#' @source
#' <https://agr.wa.gov/departments/land-and-water/natural-resources/soil-health/state-of-the-soils>
"example_data_long"
#' Example data in wide format
#'
#' A subset WaSHI State of the Soils Assessment dataset that has been
#' anonymized. This dataset presents each sample in its own row, with
#' columns for each measurement.
#'
#' @format ## `example_data_wide` A data frame with 30 rows and 72
#' columns:
#' \describe{
#' \item{year}{Year of sample}
#' \item{sampleId, producerId, fieldId}{Anonymized IDs}
#' \item{farmName, producerName, fieldName}{Anonymized names}
#' \item{longitude, latitude}{Truncated coordinates}
#' \item{texture}{Measured soil texture}
#' \item{other columns}{Column name includes measurement and units; value is the measurement results}
#' ...
#' }
#' @source
#' <https://agr.wa.gov/departments/land-and-water/natural-resources/soil-health/state-of-the-soils>
"example_data_wide"
|
/scratch/gouwar.j/cran-all/cranData/washi/R/data.R
|
#' WaSHI flextable style
#'
#' Creates a flextable in WaSHI's style.
#'
#' @param data Dataframe for the table.
#' @param cols_bold Numeric indices of columns to bold. Defaults to
#' `NULL`.
#' @param header_font Font family for header text. Defaults to
#' "Lato".
#' @param body_font Font family for table body text. Defaults to
#' "Poppins".
#' @param header_font_color Hexcode color for header font. Defaults to
#' white.
#' @param header_bg_color Hexcode color for header background.
#' Defaults to WaSHI green.
#' @param border_color Hexcode color for horizontal borders. Defaults
#' to WaSHI tan.
#'
#' @returns A flextable formatted in WaSHI's style.
#' @family table functions
#'
#' @examples
#' subset(
#' example_data_wide,
#' select = c(
#' "sampleId",
#' "county",
#' "crop",
#' "totalN_%",
#' "totalC_%"
#' )
#' ) |>
#' washi_flextable(cols_bold = 1)
#' @export
washi_flextable <- function(
data,
cols_bold = NULL,
header_font = "Lato",
body_font = "Poppins",
header_font_color = "white",
header_bg_color = washi_pal[["standard"]][["green"]],
border_color = washi_pal[["standard"]][["tan"]]
) {
# Change font to "sans" if given font isn't found
check_fonts(
header_font = header_font,
body_font = body_font
)
if (is.character(cols_bold)) {
cli::cli_abort(c(
"`cols_bold` must be numeric column indices."
))
}
if (!is.character(header_font_color)) {
cli::cli_abort("`header_font_color` must be character color name or code.")
}
if (!is.character(header_bg_color)) {
cli::cli_abort("`header_bg_color` must be character color name or code.")
}
# Set default font
flextable::set_flextable_defaults(font.family = body_font)
# Set header background
header_cell <- officer::fp_cell(
background.color = header_bg_color
)
# Set header text
header_text <- officer::fp_text(
font.family = header_font,
font.size = 12,
bold = TRUE,
color = header_font_color
)
ft <- flextable::flextable(data) |>
flextable::style(
pr_t = header_text,
pr_c = header_cell,
part = "header"
) |>
flextable::style(
pr_c = officer::fp_cell(background.color = "white"),
part = "body"
) |>
# Horizontal borders
flextable::hline(
border = officer::fp_border(color = border_color),
part = "body"
) |>
# Merge header cells when consecutive cells have identical values
flextable::merge_h(part = "header") |>
# Center header text
flextable::align(align = "center", part = "header") |>
# Autofit with a little added padding
flextable::autofit(add_w = 0.1, add_h = 0.1)
# Bold columns
if (!is.null(cols_bold)) {
ft <- ft |>
flextable::bold(j = cols_bold, bold = TRUE, part = "body")
}
return(ft)
}
|
/scratch/gouwar.j/cran-all/cranData/washi/R/flextable.R
|
#' Install Lato and Poppins fonts on your system
#'
#' Opens the font files to install on your computer.
#'
#' @returns Opens a directory containing font files.
#' @family font functions
#' @export
washi_install_fonts <- function() {
# Get directory of font files
font_dir <- system.file("fonts", package = "washi")
# Open font directory
if (interactive()) {
cli::cli_alert_info("Opening {.path {font_dir}} with fonts.")
utils::browseURL(font_dir)
}
# Instructions for installing fonts on windows
if (.Platform$OS.type == "windows") {
cli::cli_bullets(c(
"i" = "To install the fonts on Windows:",
"*" = "Select all {.pkg .ttf} files in the newly opened directory.",
"*" = "Right-click + {.pkg Install}.",
"*" = "Restart RStudio."
))
} else {
cli::cli_bullets(c(
"i" = "To install fonts on non-Windows platforms:",
"*" = "Double click on the font files to open a dialog box.",
"*" = "Click {.pkg Install}.",
"!" = "Or, drag & drop them into your system's font folder or app.",
"*" = "MacOS has {.pkg FontBook}.",
"*" = "Restart RStudio."
))
}
}
#' Check fonts
#' Change font to "sans" if given font isn't found
#' @noRd
check_fonts <- function(header_font, body_font) {
fonts <- c(systemfonts::system_fonts()$family, "sans", "serif", "mono")
if (!header_font %in% fonts) {
header_font <- "sans"
cli::cli_inform("Couldn't find {.pkg '{header_font}'}.
Defaulting to {.pkg 'sans'.}")
}
if (!body_font %in% fonts) {
body_font <- "sans"
cli::cli_inform("Couldn't find {.pkg '{body_font}'}.
Defaulting to {.pkg 'sans'.}")
}
}
|
/scratch/gouwar.j/cran-all/cranData/washi/R/fonts.R
|
#' WaSHI color palettes
#'
#' Color palettes are stored in a named list.
#'
#' @returns List of available `washi` color palettes
#' @family color palette functions
#' @examples
#' # List names of available palettes
#' names(washi_pal)
#'
#' # Get hex codes from a palette using dollar `$name` or
#' # double bracket
#' # `[["name"]]` operators for extracting list elements
#' washi_pal$standard
#'
#' washi_pal[["green_gradient"]]
#'
#' # Extract a color from the standard WaSHI palette
#' washi_pal[["standard"]][["green"]]
#' @export
washi_pal <- list(
# WaSHI standard brand colors
standard = c(
green = "#023b2c",
blue = "#335c67",
red = "#a60f2d",
gold = "#fcb040",
gray = "#3e3d3d",
ltgray = "#7C7979",
tan = "#ccc29c",
cream = "#F2F0E6"
),
# WaSHI 6 color blind safe palette
# checked accessibility with Adobe color wheel and Viz Palette
color_blind = c(
green = "#03634A",
gold = "#FCB040",
blue = "#7AB7C6",
red = "#700004",
ltgray = "#7C7979",
tan = "#D3CF9D"
),
# WaSHI accent colors
accent = c(
gray = "#3e3d3d",
ltgray = "#7C7979",
tan = "#ccc29c",
cream = "#F2F0E6"
),
# green highight color with subdued gray and tan colors
green_highlight = c(
green = "#023b2c",
ltgray = "#7C7979",
tan = "#ccc29c"
),
# blue highight color with subdued gray and tan colors
blue_highlight = c(
blue = "#335c67",
ltgray = "#7C7979",
tan = "#ccc29c"
),
# red highight color with subdued gray and tan colors
red_highlight = c(
red = "#a60f2d",
ltgray = "#7C7979",
tan = "#ccc29c"
),
# gold highight color with subdued gray and tan colors
gold_highlight = c(
gold = "#fcb040",
ltgray = "#7C7979",
tan = "#ccc29c"
),
# green gradient
green_gradient = c(
"#023B2C",
"#1C4F40",
"#376355",
"#51776A",
"#6C8B7E",
"#879F93",
"#A2B3A8",
"#BCC7BC"
),
# blue gradient
blue_gradient = c(
"#335C67",
"#486C75",
"#5D7C83",
"#728D91",
"#879D9F",
"#9DAEAD",
"#B2BEBB",
"#C7CFC9"
),
# red gradient
red_gradient = c(
"#A60F2D",
"#AE2841",
"#B64156",
"#BF596A",
"#C7727F",
"#D08C93",
"#D8A4A8",
"#E1BDBC"
),
# gold gradient
gold_gradient = c(
"#FCB040",
"#FBB54F",
"#FABB5E",
"#F9C16D",
"#F8C77C",
"#F7CD8B",
"#F6D29A",
"#F5D8A9"
)
)
#' Setup a color palette
#'
#' Choose desired number of colors and whether the colors are
#' reversed.
#'
#' @param palette Character name of palette in washi_pal. See
#' `names(washi_pal)` for a list of available palettes.
#' @param n Number of colors in palette.
#' @param reverse Boolean indicating whether the palette should be
#' reversed. Default is FALSE.
#'
#' @returns A vector of color hex codes.
#' @family color palette functions
#' @examples
#' washi_pal_setup("color_blind")
#'
#' washi_pal_setup("green_gradient", 12)
#' @export
washi_pal_setup <- function(
palette = "standard",
n,
reverse = FALSE
) {
if (!palette %in% names(washi_pal)) {
cli::cli_abort(c(
"There is no palette called `{palette}`.",
"i" = "List available palettes with `names(washi_pal)`."
))
}
if (!is.logical(reverse)) {
cli::cli_abort(c(
"`reverse` must be `TRUE` or `FALSE`."
))
}
if (missing(n)) {
n <- length(washi_pal[[palette]])
}
if (!is.numeric(n) | is.null(n)) {
cli::cli_abort(c(
"`n` must be numeric."
))
}
pal <- washi_pal[[palette]]
if (reverse) pal <- rev(pal)
grDevices::colorRampPalette(pal)(n)
}
#' View a WaSHI palette
#'
#' Show the colors within a palette in a plot.
#'
#' @inheritParams washi_pal_setup
#' @returns A plot with each color displayed.
#' @family color palette functions
#'
#' @examples
#' washi_pal_view("standard")
#'
#' washi_pal_view("color_blind")
#'
#' washi_pal_view("blue_gradient", 4, reverse = TRUE)
#' @export
washi_pal_view <- function(
palette = "color_blind",
n,
reverse = FALSE
) {
if (missing(n)) {
n <- length(washi_pal[[palette]])
}
pal <- washi_pal_setup(palette, n, reverse)
graphics::image(
1:n,
1,
as.matrix(1:n),
col = pal,
xlab = paste(palette),
ylab = "",
xaxt = "n",
yaxt = "n",
bty = "n"
)
graphics::box()
}
|
/scratch/gouwar.j/cran-all/cranData/washi/R/palettes.R
|
#' Scales for plotting with WaSHI palettes
#'
#' Provides compatibility with `ggplot2`.
#'
#' @inheritParams washi_pal_setup
#' @param aesthetics Character string or vector of character strings
#' listing the name(s) of the aesthetic(s) that this scale works
#' with. Defaults to c("color", "fill"), which applies the palette
#' to both the color and fill aesthetics at the same time.
#' @param alpha Numeric transparency level of the color from 0 to 1.
#' Default is 1 (not transparent).
#' @param discrete Boolean indicating whether color aesthetic is
#' discrete or not. Default is TRUE.
#' @param ... Additional arguments passed to discrete_scale() or
#' scale_color_gradientn(), used respectively when discrete is TRUE
#' or FALSE
#' @returns A `ScaleContinuous` or `ScaleDiscrete` object that can be
#' added to a `ggplot` object.
#' @family ggplot2 functions
#' @examples
#' library(ggplot2)
#'
#' # Discrete scale
#' example_data_wide |>
#' subset(crop %in% c("Apple", "Cherry", "Potato")) |>
#' ggplot(aes(x = pH, y = Mn_mg.kg, color = crop)) +
#' geom_point(size = 2.5) +
#' theme_minimal() +
#' washi_scale()
#'
#' # Continuous scale
#' example_data_wide |>
#' ggplot(aes(x = `totalC_%`, y = poxC_mg.kg, color = poxC_mg.kg)) +
#' geom_point(size = 2.5) +
#' theme_minimal() +
#' washi_scale("green_gradient", reverse = TRUE, discrete = FALSE)
#' @export
washi_scale <- function(
palette = "color_blind",
aesthetics = c("color", "fill"),
alpha = 1,
reverse = FALSE,
discrete = TRUE,
...
) {
pal <- washi_pal_setup(palette = palette, reverse = reverse)
if (alpha < 0 | alpha > 1) {
cli::cli_abort("Alpha must be between 0 and 1.")
}
if (!is.logical(discrete)) {
cli::cli_abort(c(
"`discrete` must be `TRUE` or `FALSE`."
))
}
pal <- scales::alpha(pal, rep_len(alpha, length(pal)))
aesthetics <- match.arg(aesthetics, several.ok = TRUE)
if (discrete == TRUE) {
ggplot2::discrete_scale(
aesthetics = aesthetics,
scale_name = paste0("washi_", palette),
palette = scales::manual_pal(pal),
...
)
} else {
ggplot2::scale_color_gradientn(colors = pal, ...)
}
}
|
/scratch/gouwar.j/cran-all/cranData/washi/R/scales.R
|
#' Create standard WaSHI plots
#'
#' All changed defaults from this function can be overridden by another
#' call to [ggplot2::theme()] with the desired changes.
#'
#' @source Adapted from `glitr::si_style()`.
#'
#' @param header_font Font family for title and subtitle. Defaults to
#' "Lato Black".
#' @param header_color Font color for title and subtitle. Defaults to
#' almost black.
#' @param body_font Font family for all other text Defaults to
#' "Poppins".
#' @param body_color Font color for all other text Defaults to almost
#' black.
#' @param text_scale Scalar that will grow/shrink all text defined
#' within.
#' @param legend_position Position of legend ("none", "left", "right",
#' "bottom", "top", or two-element numeric vector). Defaults to
#' "top".
#' @param facet_space Controls how far apart facets are from each
#' other.
#' @param color_gridline Gridline color. Defaults to WaSHI tan.
#' @param gridline_x Boolean indicating whether major gridlines are
#' displayed for the x axis. Default is TRUE.
#' @param gridline_y Boolean indicating whether major gridlines are
#' displayed for the y axis. Default is TRUE.
#' @param ... Pass any parameters from theme that are not already
#' defined within.
#'
#' @importFrom ggplot2 %+replace%
#' @returns `ggplot2` object
#' @family ggplot2 functions
#'
#' @examples
#' # NOTE: These examples do not use Poppins or Lato in order to pass
#' # automated checks on computers without these fonts installed.
#'
#' library(ggplot2)
#'
#' # Single geom_point plot
#' example_data_wide |>
#' subset(crop %in% c("Apple", "Cherry", "Potato")) |>
#' ggplot(aes(x = pH, y = Mn_mg.kg, color = crop)) +
#' labs(
#' title = "Scatter plot of pH and Mn (mg/kg)",
#' subtitle = "Example with geom_point().",
#' caption = "This is a caption."
#' ) +
#' geom_point(size = 2.5) +
#' washi_theme(
#' header_font = "sans",
#' body_font = "sans"
#' ) +
#' washi_scale()
#'
#' # Bar plot
#' if (requireNamespace("forcats")) {
#' example_data_wide |>
#' ggplot(aes(x = forcats::fct_rev(forcats::fct_infreq(crop)))) +
#' geom_bar(fill = washi_pal[["standard"]][["blue"]]) +
#' geom_text(
#' aes(
#' y = after_stat(count),
#' label = after_stat(count)
#' ),
#' stat = "count",
#' hjust = 2.5,
#' color = "white"
#' ) +
#' # Flip coordinates to accommodate long crop names
#' coord_flip() +
#' labs(
#' title = "Number of samples in each crop",
#' subtitle = "Example plot with geom_bar() without gridlines.",
#' y = NULL,
#' x = NULL
#' ) +
#' # Turn gridlines off
#' washi_theme(
#' gridline_y = FALSE,
#' gridline_x = FALSE,
#' header_font = "sans",
#' body_font = "sans"
#' ) +
#' # Remove x-axis
#' theme(axis.text.x = element_blank())
#' }
#'
#' # Facetted geom_density plots
#' example_data_long |>
#' subset(measurement %in% c("totalC_%", "poxC_mg.kg") &
#' !texture == "Loamy Sand") |>
#' ggplot(aes(x = value, fill = texture, color = texture)) +
#' labs(
#' title = "Distribution of POXC (mg/kg) and Total C (%)",
#' subtitle = "Example with geom_density() and facet_wrap()."
#' ) +
#' geom_density(alpha = 0.4) +
#' facet_wrap(. ~ measurement, scales = "free") +
#' washi_theme(
#' legend_position = "bottom",
#' header_font = "sans",
#' body_font = "sans"
#' ) +
#' washi_scale() +
#' xlab(NULL) +
#' guides(col = guide_legend(nrow = 2, byrow = TRUE))
#' @export
washi_theme <- function(
header_font = "Lato Black",
header_color = "#151414",
body_font = "Poppins",
body_color = "#151414",
text_scale = 1,
legend_position = "top",
facet_space = 2,
color_gridline = washi_pal[["standard"]][["tan"]],
gridline_y = TRUE,
gridline_x = TRUE,
...
) {
# Change font to "sans" if given font isn't found
check_fonts(
header_font = header_font,
body_font = body_font
)
# Errors for invalid legend_position argument
if (length(legend_position) > 1) {
cli::cli_abort("`legend_position` must have length 1.")
}
legend_choices <- c("top", "bottom", "left", "right", "none")
if (!legend_position %in% legend_choices) {
cli::cli_abort("`legend_position` must be one of {dQuote(legend_choices)}.")
}
# Errors if invalid gridline argument
if (!is.logical(gridline_y)) {
cli::cli_abort("`gridline_y` must be TRUE or FALSE.")
}
if (!is.logical(gridline_x)) {
cli::cli_abort("`gridline_x` must be TRUE or FALSE.")
}
# Set up gridline variables
gridline_y <- if (isTRUE(gridline_y)) {
ggplot2::element_line(
color = color_gridline,
linewidth = 0.25
)
} else {
ggplot2::element_blank()
}
gridline_x <- if (isTRUE(gridline_x)) {
ggplot2::element_line(
color = color_gridline,
linewidth = 0.25
)
} else {
ggplot2::element_blank()
}
# Set up theme based on theme_minimal
ggplot2::theme_minimal() %+replace%
ggplot2::theme(
plot.title = ggplot2::element_text(
family = header_font,
size = 14 * text_scale,
face = "bold",
color = header_color,
margin = ggplot2::margin(b = 6),
hjust = 0
),
# This sets the font, size, type and color of text for the
# chart's subtitle, as well as setting a margin between the
# title and the subtitle
plot.subtitle = ggplot2::element_text(
family = header_font,
size = 12 * text_scale,
face = "bold",
color = header_color,
margin = ggplot2::margin(b = 15),
hjust = 0
),
plot.caption = ggplot2::element_text(
family = body_font,
size = 9 * text_scale,
color = body_color,
margin = ggplot2::margin(t = 6),
hjust = 1,
vjust = 1
),
plot.margin = ggplot2::margin(t = 15, r = 15, b = 10, l = 15),
plot.title.position = "plot", # Move plot.title to the left
plot.caption.position = "plot",
# Legend format
# Set the legend to be at the top left of the graphic, below title
legend.position = legend_position,
legend.text.align = 0,
legend.background = ggplot2::element_blank(),
legend.margin = ggplot2::margin(t = 5, r = 5, b = 5, l = 5),
legend.spacing = ggplot2::unit(2, "cm"),
legend.key = ggplot2::element_blank(),
legend.title = ggplot2::element_text(
family = body_font,
size = 11 * text_scale,
face = "bold",
color = body_color
),
legend.text = ggplot2::element_text(
family = body_font,
size = 11 * text_scale,
color = body_color
),
# Axis format
axis.text = ggplot2::element_text(
family = body_font,
size = 10 * text_scale,
color = body_color
),
axis.ticks = ggplot2::element_blank(),
axis.line = ggplot2::element_blank(),
axis.title = ggplot2::element_text(
family = body_font,
size = 10 * text_scale,
color = body_color
),
axis.title.y = ggplot2::element_text(
angle = 90,
margin = ggplot2::margin(t = 0, r = 15, b = 0, l = 0)
),
axis.title.x = ggplot2::element_text(
margin = ggplot2::margin(t = 0, r = 0, b = 0, l = 0)
),
# Grid lines
panel.grid.minor = ggplot2::element_blank(),
panel.grid.major.y = gridline_y,
panel.grid.major.x = gridline_x,
# Blank background
panel.background = ggplot2::element_blank(),
panel.border = ggplot2::element_blank(),
panel.spacing = ggplot2::unit(facet_space, "lines"),
# Plot fill and margins
plot.background = ggplot2::element_rect(
fill = "white",
color = NA
),
# Facet strip background and text
strip.background = ggplot2::element_blank(),
strip.text = ggplot2::element_text(
family = body_font,
face = "bold",
size = 11 * text_scale,
hjust = 0.5,
margin = ggplot2::margin(t = 4, r = 4, b = 4, l = 4)
),
...
)
}
|
/scratch/gouwar.j/cran-all/cranData/washi/R/theme.R
|
#' @title applyLambda
#'
#' @description Apply the lambda to assign a relative importance to each of the
#' previously used methods (WSM and WPM). Lambda values range from zero to one
#' @param matrix_wsm The data set object obtained from the application of the
#' calcWSM function
#' @param matrix_wpm The data set object obtained from the application of the
#' calcWPM function
#' @param lambda The lambda value (between 0 and 1)
#'
#' @return A data frame object that contains the alternatives set scored by and
#' classified in descending
#' order (from best to worst classified) according to the weighting
#' proposed by the WASPAS method
#' using the input lambda.
#'
#' @export
# Determines relative values according to the WASPAS method
applyLambda <- function(matrix_wsm, matrix_wpm, lambda) {
lambda <- lambda
tryCatch({
# Test value of lambda
if (! (as.numeric(lambda) >= 0 & as.numeric(lambda) <= 1)) {
return("Error: The lambda's value must be between 0 and 1")
}
# Test matrix_wsm X matrix_wpm (size and contents)
if (nrow(matrix_wsm) != nrow(matrix_wpm)) {
return("Error: WSM & WPM matrices entered must have same number of rows")
}
# WASPAS Ranking
waspas_matrix <- cbind(matrix_wsm, matrix_wpm[, "WPM_Rank"], WASPAS = 0.0)
colnames(waspas_matrix) <- c("Alternative"
, "WSM_Rank", "WPM_Rank", "WASPAS_Rank")
waspas_matrix[, "WASPAS_Rank"] <-
as.numeric(waspas_matrix[, "WSM_Rank"]) * lambda +
as.numeric(waspas_matrix[, "WPM_Rank"]) * (1 - lambda)
return(as.data.frame(waspas_matrix))
},
error = function(cond) {
stop(paste("E[AL]", cond))
},
warning = function(cond) {
if (grepl("NAs intro", cond)) {
return("W[AL] Error: Some non numeric-alike value was found")
}
})
}
|
/scratch/gouwar.j/cran-all/cranData/waspasR/R/applyLambda.R
|
#' @title calcWPM
#'
#' @description Calculates the ranking for the alternative's set according to
#' WPM method
#' @param normal_db A data set object with normalized values of
#' Alternatives X Criteria
#' @param vec_weights Contains a set of user assigned values to weight
#' the criteria.
#' The sum of these weights must add up to 1.
#' The format of this input is an array of values.
#'
#' @return A data frame object that contains 2 columns and the the same number
#' of rows as the input matrix. The columns "WPM_Rank" has the calculated
#' relative value of each alternative whose id is in the "Alternative" column
#'
#' @export
# Ranking for WPM Method: normal_db Matrix into wpm Matrix
calcWPM <- function(normal_db, vec_weights) {
tryCatch({
# Test vector of Weights X matrix of values dimentions
if (length(vec_weights) != ncol(normal_db)) {
return(paste("Error: The weight vector must be the same size as"
, "the number of criteria"))
}
# Test Vector of Weights contents, it must summarize 1
if (sum(sapply(vec_weights, as.numeric)) != 1) {
return("Error: Values in Vector of Weights must summarize 1")
}
# WPM Calculation loop
WPM_Rank <- rep(0, nrow(normal_db))
Alternative <- seq_len(nrow(normal_db))
wpm <- cbind(Alternative, WPM_Rank)
for (iCol in seq_len(ncol(normal_db))) {
for (iRow in seq_len(nrow(normal_db))) {
normal_db[iRow, iCol] <- toString(as.numeric(normal_db[iRow, iCol])
^ as.numeric(vec_weights[iCol]))
}
}
# calculate ranking
for (iRow in seq_len(nrow(normal_db))) {
wpm[iRow, "WPM_Rank"] <- prod(sapply(normal_db[iRow, ], as.numeric))
}
wpm_db <- wpm[, c("Alternative", "WPM_Rank")]
return(wpm_db)
},
error = function(cond) {
stop(paste("E[P]", cond))
},
warning = function(cond) {
if (grepl("NAs intro", cond)) {
return("W[P] Error: Some non numeric - alike value was found")
}
})
}
|
/scratch/gouwar.j/cran-all/cranData/waspasR/R/calcWPM.R
|
#' @title calcWSM
#'
#' @description Calculates the ranking for the alternative's set according to
#' WSM method.
#' @param normal_db A data set object with normalized values of
#' Alternatives X Criteria
#' @param vec_weights Contains a set of user assigned values to weight
#' the criteria.
#' The sum of these weights must add up to 1.
#' The format of this input is an array of values.
#'
#' @return A data frame object that contains 2 columns and the the same number
#' of rows as the input matrix. The column "WSM_Rank" has the calculated
#' relative value of each alternative whose id is in the "Alternative" column
#'
#' @export
# Ranking for WSM Method: normal_db Matrix into wsm Matrix
calcWSM <- function(normal_db, vec_weights) {
tryCatch({
# Test vector of Weights X matrix of values dimentions
if (length(vec_weights) != ncol(normal_db)) {
return(paste("Error: The weight vector must be the same size as the"
, "number of criteria"))
}
# Test Vector of Weights contents, it must summarize 1
if (sum(sapply(vec_weights, as.numeric)) != 1) {
return("Error: Values in Vector of Weights must summarize 1")
}
# WSM Calculation loop
WSM_Rank <- rep(0, nrow(normal_db))
Alternative <- seq_len(nrow(normal_db))
wsm <- cbind(Alternative, WSM_Rank)
for (iCol in seq_len(ncol(normal_db))) {
for (iRow in seq_len(nrow(normal_db))) {
normal_db[iRow, iCol] <- toString(as.numeric(normal_db[iRow, iCol])
* as.numeric(vec_weights[iCol]))
}
}
# calculate ranking
for (iRow in seq_len(nrow(normal_db))) {
wsm[iRow, "WSM_Rank"] <- sum(sapply(normal_db[iRow, ], as.numeric))
}
wsm_db <- wsm[, c("Alternative", "WSM_Rank")]
return(wsm_db)
},
error = function(cond) {
stop(paste("E[S]", cond))
},
warning = function(cond) {
if (grepl("NAs intro", cond)) {
return("W[S] Error: Some non numeric - alike value was found")
}
})
}
|
/scratch/gouwar.j/cran-all/cranData/waspasR/R/calcWSM.R
|
#' @title checkInputFormat
#'
#' @description Verify if the database to be submitted to WASPAS is correctly
#' formatted
#'
#' @param waspas_db The original database to be validated in its format
#'
#' @return True if everything is OK, an error message in case of bad format
#'
#' @export
# Verify if a data.frame has the proper format to be the waspasR input database
checkInputFormat <- function(waspas_db) {
if (missing(waspas_db)) return("Parameter waspas_db is missing")
if (!is.data.frame(waspas_db))
return("Parameter waspas_db must be a data.frame")
tryCatch({
# test indicators: Flags, Weights and Criteria
proc_step <- "Indicators"
indicators <- c("C","F","W")
i1 <- toupper(substr(toString(waspas_db[1,1]), 1, 1))
i2 <- toupper(substr(toString(waspas_db[2,1]), 1, 1))
i3 <- toupper(substr(toString(waspas_db[3,1]), 1, 1))
indicators_uploaded <- sort(c(i1, i2, i3))
if (!identical(indicators, indicators_uploaded)) stop()
# Test flags contents, just strings initiated with B (Benefit) ou
# C (Cost) are permitted
flags <- sliceData(waspas_db, "F")
just_bc <- sort(unique(toupper(substr(flags, 1, 1))))
proc_step <- "Flags"
if (!identical(just_bc, c("B", "C"))) stop()
# Test Vector of Weights contents, it must summarize 1
proc_step <- "Weights-1"
weights <- sliceData(waspas_db, "W")
weights <- sapply(weights, as.numeric)
proc_step <- "Weights-2"
if (sum(weights) != 1) stop()
# Test the values (if waspas_db has just numeric - alike variables)
proc_step <- "Values"
values <- sliceData(waspas_db, "V")
values <- sapply(values, as.numeric)
if (sum(is.na(values)) > 0) stop()
proc_step <- "End"
# No return here due to use of "finally" # return(TRUE)
},
error = function(cond) {
stop(paste("E[CI]", cond))
},
warning = function(cond) {
if (grepl("NAs intro", cond)) {
# Some non numeric - alike variable
# will be dealt with in the "finally" clause
}
},
finally = {
if (proc_step == "Indicators") {
return(paste("Error: Check the indicators in cells [1:3, 1], the strings"
, " must be intiated with 'C', 'F' or 'W'"))
} else if (proc_step == "Flags") {
return(paste("Error: Vector of flags must contains just strings initiated"
, "with B or C (i.e. b, c, B, C, Cost, Benefit, Ben etc.)"))
} else if (proc_step == "Weights-1") {
return("Error: Check Weights values, all must be numeric")
} else if (proc_step == "Weights-2") {
return("Error: Values in Vector of Weights must summarize 1")
} else if (proc_step == "Values") {
return("Error: Check Aternatives x Criteria values, all must be numeric")
} else {
return(TRUE)
}
})
}
|
/scratch/gouwar.j/cran-all/cranData/waspasR/R/checkInputFormat.R
|
#' Data about helicopters
#'
#' A small database with real helicopter names and criteria that make sense.
#' The values and weights are generated at random.
#' The only requirement for the methods is that their sum be 1.
#' The set of Cost - Benefit flags make no sense in terms of the criteria they
#' refer to and are just a set of strings starting with the letter "C" (cost) or
#' "B" (benefit). That is, a criterion that must be monotonic of benefit
#' want to maximize it) can have a "c" flag, while a criterion that the (we user
#' would rightly judge to be cost (we want to minimize it) can have a "b" flag.
#'
#' @docType data
#'
#' @usage data(choppers)
#'
#' @format An object of class \code{"data.frame"}
#' \describe{
#' \item{alternatives}{A set of helicopters names that can be bought in the
#' marketplace.These data are in the first column, from row 4 to the last.}
#' \item{criteria}{set of criteria that a helicopter buyer may deem relevant
#' for making apurchase decision. These data are in the 3rd row, from col 2
#' to the last.}
#' \item{weights}{Arbitrated by the decision maker, they attribute different
#' relative importance to the values of the criteria in percentage terms,
#' thus making a weighting of these criteria.}
#' \item{flags}{They determine whether the specific criterion is cost, that is,
#' the smaller, the better, or benefit, the greater the better.
#' These data are in the 1st row, from col 2 to the last.}
#' \item{values}{Randomly generated value, within a range that makes sense
#' These data are in the fourth row, from col 2 to the last.}
#' }
#' @references This data set was created with the help of Gustavo, Marcos &
#' Marcio, in the work cited below:
#' Soares de Assis, Gustavo & Santos, Marcos & Basilio, Marcio. (2023). Use of
#' the WASPAS Method to Select Suitable Helicopters for Aerial Activity Carried
#' Out by the Military Police of the State of Rio de Janeiro.
#' Axioms. 12. 77. 10.3390.
#'
#' @keywords datasets
#'
#' @examples
#' data(choppers)
#' head(choppers)
#'
"choppers"
|
/scratch/gouwar.j/cran-all/cranData/waspasR/R/choppers.R
|
#' @title normalize()
#'
#' @description Normalize the values of Alternatives X Criteria matrix
#' according to a given Cost - Benefit vector of flags.
#' @param normalized_matrix A data set object with Alternatives X Criteria
#' values to be normalized
#' @param vec_cost_benefit A vector of flags that determines if the criterion
#' is a Cost or Benefit. Must be same size of Criteria, must contains
#' just strings initiated with B, b, C or c
#'
#' @return A data frame object that contains the input matrix values normalized
#'
#' @export
# Normalization: normalized_matrix Matrix into normalDb Matrix
normalize <- function(normalized_matrix, vec_cost_benefit) {
tryCatch({
# Test if normalized_matrix has just numeric-alike variables
sapply(normalized_matrix, as.numeric)
# Test vector of flags X matrix of values dimentions
if (length(vec_cost_benefit) != ncol(normalized_matrix)) {
return(paste("Error: The cost - benefit flags array must be the same size"
, "as the number of criteria"))
}
# Test flags contents, just strings initiated with 'B' (Benefit)
# or 'C' (Cost) are permitted
just_bc <- sort(unique(toupper(substr(vec_cost_benefit, 1, 1))))
if (!identical(just_bc, c("B", "C"))) {
return(paste("Error: Vector of flags must contains just strings initiated"
, "with B or C (i.e. b, c, B, C, Cost, Benefit, Ben etc.)"))
}
# Normalization loop
total_rows <- nrow(normalized_matrix)
flags <- toupper(substr(vec_cost_benefit, 1, 1))
for (iCol in seq_len(ncol(normalized_matrix))) {
alternative_vals <- normalized_matrix[1:total_rows, iCol]
alternative_vals <- sapply(alternative_vals, as.numeric)
maxv <- max(alternative_vals)
minv <- min(alternative_vals)
for (iRow in seq_len(nrow(normalized_matrix))) {
if (flags[iCol] == "C") { # Cost
normalized_matrix[iRow, iCol] <-
toString(minv / as.numeric(normalized_matrix[iRow, iCol]))
} else { # Benefit
normalized_matrix[iRow, iCol] <-
toString(as.numeric(normalized_matrix[iRow, iCol]) / maxv)
}
}
}
return(as.data.frame(normalized_matrix))
},
error = function(cond) {
stop(paste("E[N]", cond))
},
warning = function(cond) {
if (grepl("NAs intro", cond)) {
return("W[N] Error: Some non numeric-alike value was found")
}
})
}
|
/scratch/gouwar.j/cran-all/cranData/waspasR/R/normalize.R
|
#' @title sliceData
#'
#' @description Slice a matrix or data.frame in “all - in - one” format into
#' dedicated vectors/matrices as data.frame objects
#'
#' @param waspas_db A matrix or data.frame in “all - in - one” format
#' @param output_obj A flag to determine the vector or matrix (data.frame)
#' to extract from the input matrix must be 'A' (Alternatives), 'C' (Criteria),
#' 'F' (Flags), 'V' (Values) or 'W' (Weights)
#'
#' @return A data.frame one - dimensional (vector) or two - dimensional (matrix)
#' with one of the Following objects:
#' - if output_obj == "A": A vector of Alternatives
#' - if output_obj == "C": A vector of Criteria
#' - if output_obj == "F": A vector of Cost - Benefit Flags
#' - if output_obj == "I": A vector containing the indicators in cells [1:3, 1]
#' - if output_obj == "V": A matrix of values per Alternative x Criterion
#' - if output_obj == "W": A vector of Weights
#'
#' @export
# Extract data from main data.frame
sliceData <- function(waspas_db, output_obj) {
tryCatch({
# extract vectors of flags, weights and criteria
output_obj <- toupper(substr(output_obj, 1, 1))
if (output_obj %in% c("C", "F", "W")) {
for (iRow in 1:3) {
if (toupper(substr(waspas_db[iRow, 1], 1, 1)) == output_obj) {
return(waspas_db[iRow, 2:ncol(waspas_db)])
}
}
} else if (output_obj == "A") {
alternatives <- waspas_db[4:nrow(waspas_db), 1]
# Transpose the result to output in the same standard of the other slices
vec_alts <- as.data.frame(t(alternatives))
return(vec_alts)
} else if (output_obj == "V") {
return(waspas_db[4:nrow(waspas_db), 2:ncol(waspas_db)])
} else if (output_obj == "I") {
indicators <- waspas_db[1:3, 1]
# Transpose the result to output in the same standard of the other slices
vec_inds <- as.data.frame(t(indicators))
return(vec_inds)
} else {
return(paste("Error: The value of parameter [output_obj] must be"
, "'A', 'C', 'F', 'I', 'V' or 'W', please reffer to help"))
}
},
error = function(cond) {
stop(paste("E[SD]", cond))
})
}
|
/scratch/gouwar.j/cran-all/cranData/waspasR/R/sliceData.R
|
#' @title waspasR()
#'
#' @description Runs the complete process from slicing the original database,
#' processing all the computational steps, like computing WSM and WPM
#' formulas, applying the lambda as proposed by the method WASPAS, and the
#' building the complete output in a new data.frame with the criteria as
#' column names, all the original data and appending 3 new columns with
#' the WSM, WPM and WASPAS ranking ("WSM_Rank", "WPM_Rank", "WASPAS_Rank").
#'
#' @param waspas_df The original data set in a proper format. The format can be
#' checked by checkInputFormat() function.
#'
#' @param lambda The lambda value (between 0 and 1)
#'
#' @return A data.frame object that contains the input matrix with its values
#' normalized. Or an error message if some bad data is entered.
#'
#' @export
# Putting everything togheter
waspasR <- function(waspas_df, lambda) {
# Test the entered data
if (missing(waspas_df)) return("Parameter waspas_df is missing")
if (missing(lambda)) return("Parameter lambda is missing")
format_ok <- checkInputFormat(waspas_df)
if (is.character(format_ok))
return(format_ok)
# Slice the raw data into specific objects
Alternative <- sliceData(waspas_df, "A")
criteria <- sliceData(waspas_df, "C")
weights <- sliceData(waspas_df, "W")
flags <- sliceData(waspas_df, "F")
values <- sliceData(waspas_df, "V")
# Normalize values
normalized <- normalize(values, flags)
# Run the methods calculations
wsm <- calcWSM(normalized, weights)
wpm <- calcWPM(normalized, weights)
# Apply lambda to get WASPAS
waspas <- applyLambda(wsm, wpm, lambda)
# Bind all the stuff
waspas_matrix <- data.frame(matrix(nrow = nrow(waspas_df) - 1
, ncol = ncol(waspas_df) + 3))
colnames(waspas_matrix) <- cbind("Alternative", criteria
, "WSM_Rank", "WPM_Rank", "WASPAS_Rank")
qtd_rows <- nrow(waspas_matrix)
qtd_cols_weights <- ncol(weights) + 1
qtd_cols_flags <- ncol(flags) + 1
qtd_cols_values <- ncol(values) + 1
waspas_matrix[1, 1] <- "W"
waspas_matrix[1, 2:qtd_cols_weights] <- weights
waspas_matrix[2, 1] <- "F"
waspas_matrix[2, 2:qtd_cols_flags] <- flags
waspas_matrix[3:qtd_rows, 1] <- t(Alternative)
waspas_matrix[3:qtd_rows, 2:qtd_cols_values] <- values
waspas_matrix[3:qtd_rows, "WSM_Rank"] <- waspas[, "WSM_Rank"]
waspas_matrix[3:qtd_rows, "WPM_Rank"] <- waspas[, "WPM_Rank"]
waspas_matrix[3:qtd_rows, "WASPAS_Rank"] <- waspas[, "WASPAS_Rank"]
return(as.data.frame(waspas_matrix))
}
|
/scratch/gouwar.j/cran-all/cranData/waspasR/R/waspasR.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(waspasR)
|
/scratch/gouwar.j/cran-all/cranData/waspasR/inst/doc/waspas-in-a-nutshell.R
|
---
title: "WASPAS in a nutshell"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{WASPAS in a nutshell}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(waspasR)
```
## waspasR motivation
Reliable data and formal mathematical processes are valuable resources for decision-making processes. There are many scientific methods proposed by studies carried out in the field of Operational Research [OR] to support decision makers. The complexity of the sets of variables that must be considered in each decision-making increases uncertainties. Managers looking for assertiveness in communicating their decisions find these methods an important tool for justifying the decisions taken. However, the main obstacle to the use of these scientific resources is their intrinsic complexity. Many managers have an understandable difficulty dealing with the mathematical formalism of the long and detailed formulas that document the algorithms implementation of decision-making methods based on multiple criteria. The **waspasR** package proposes to offer a set of functions to easily create R applications that implement a suitable MCDM (Multiple Criteria Decision Making) method to determine the optimal alternative in a MCDM process. It is the "Weighted Aggregated Sum Product Assessment [WASPAS]" method, proposed by Zavadskas et al. (2012) \<<doi:10.5755/j01.eee.122.6.1810>\>
Since WASPAS is composed by other two underlying methods WSM (Weighted Sum Model) and WPS (Weighted Product Model) these are also covered by **waspasR** package.
For application in complex decision-making processes, where large sets of alternatives and criteria must be considered, MCDM systems can be successfully applied. It was proved that the accuracy of two methods used together is greater than the accuracy of each of the methods applied separately. WASPAS implements this principle by aggregating the Weighted Sum Model [WSM] and Weighted Product Model [WPM] methods. WSM is one of the most known and used MCDM methods applied to problems of this category. WPM is a variation that uses the weight-value exponentiation product instead of the sum of multiplication.
|
/scratch/gouwar.j/cran-all/cranData/waspasR/inst/doc/waspas-in-a-nutshell.Rmd
|
---
title: "WASPAS in a nutshell"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{WASPAS in a nutshell}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(waspasR)
```
## waspasR motivation
Reliable data and formal mathematical processes are valuable resources for decision-making processes. There are many scientific methods proposed by studies carried out in the field of Operational Research [OR] to support decision makers. The complexity of the sets of variables that must be considered in each decision-making increases uncertainties. Managers looking for assertiveness in communicating their decisions find these methods an important tool for justifying the decisions taken. However, the main obstacle to the use of these scientific resources is their intrinsic complexity. Many managers have an understandable difficulty dealing with the mathematical formalism of the long and detailed formulas that document the algorithms implementation of decision-making methods based on multiple criteria. The **waspasR** package proposes to offer a set of functions to easily create R applications that implement a suitable MCDM (Multiple Criteria Decision Making) method to determine the optimal alternative in a MCDM process. It is the "Weighted Aggregated Sum Product Assessment [WASPAS]" method, proposed by Zavadskas et al. (2012) \<<doi:10.5755/j01.eee.122.6.1810>\>
Since WASPAS is composed by other two underlying methods WSM (Weighted Sum Model) and WPS (Weighted Product Model) these are also covered by **waspasR** package.
For application in complex decision-making processes, where large sets of alternatives and criteria must be considered, MCDM systems can be successfully applied. It was proved that the accuracy of two methods used together is greater than the accuracy of each of the methods applied separately. WASPAS implements this principle by aggregating the Weighted Sum Model [WSM] and Weighted Product Model [WPM] methods. WSM is one of the most known and used MCDM methods applied to problems of this category. WPM is a variation that uses the weight-value exponentiation product instead of the sum of multiplication.
|
/scratch/gouwar.j/cran-all/cranData/waspasR/vignettes/waspas-in-a-nutshell.Rmd
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' The swapping algorithm for computing Wasserstein barycenters
#'
#' @name swap_rcpp
#'
#' @param samples A cube containing samples for all subset posteriors (rows =
#' subsets, columns = par, slices = samples)
#' @param acc accuracy
#' @param iter maximum number of iterations of the algorithm
#' @param out boolean indicating whether output for each iteration should be displayed (default = false)
#'
#' @return a three dimensional array (rows = subsets, columns = par, slices =
#' samples) containing output from the swapping algorithm.
#'
swap_rcpp <- function(samples, acc = 0.001, iter = 10L, out = FALSE) {
.Call(`_waspr_swap_rcpp`, samples, acc, iter, out)
}
hmode <- function(x, cip) {
.Call(`_waspr_hmode`, x, cip)
}
hmodeci <- function(x, cip) {
.Call(`_waspr_hmodeci`, x, cip)
}
|
/scratch/gouwar.j/cran-all/cranData/waspr/R/RcppExports.R
|
#' Posterior summaries for the Wasserstein barycenter of subset posteriors
#'
#' \code{summary} gives a posterior summary (mean, mode, sd, HPD)
#'
#' @param x a \code{wasp} object.
#'
#' @details the method \link[waspr]{summary.wasp} has its own help page.
#'
#' @examples
#' library(waspr)
#'
#' @export
#'
summary <- function(x){
UseMethod("summary", x)
}
#' Posterior summaries for the Wasserstein barycenter of subset posteriors
#'
#' Outputs and prints posterior summary statistics (mean, mode, sd, 95% Highest
#' Posterior Density interval)
#'
#' @param x a \code{wasp} object obtained from the function \code{wasp()}.
#'
#' @return Posterior summary statistics (mean, mode, sd, 95% HPD interval) for
#' all the Wasserstein barycenter of subset posteriors of all parameters in
#' the model.
#'
#' @method summary wasp
#'
#' @examples
#' library(waspr)
#' out <- wasp(pois_logistic,
#' par.names = c("beta_s", "alpha_l", "beta_l",
#' "baseline_sigma", "baseline_mu",
#' "correlation", "sigma_s", "sigma_l"))
#' summary(out)
#'
#' @export
#'
summary.wasp <- function(x){
mean = colMeans(x$barycenter)
sd = apply(x$barycenter, 2, sd)
mode = apply(x$barycenter, 2, mode_est)
hpd = apply(x$barycenter, 2, hpd_est)
out = cbind(mean, mode, sd, t(hpd))
colnames(out) = c("mean", "mode", "sd", "LB HPD", "UB HPD")
rownames(out) = colnames(x$barycenter)
return(out)
}
#' Print posterior summaries for the Wasserstein barycenter of subset posteriors
#'
#' Prints selected output from a Bayesian circular mixed-effects model.
#'
#' @param x a \code{wasp} object obtained from the function \code{wasp()}.
#' @param ... further arguments passed to or from other methods.
#'
#' @return A print of posterior summaries for the Wasserstein barycenter of subset posteriors
#'
#' @method print wasp
#'
#' @examples
#' library(waspr)
#' out <- wasp(pois_logistic,
#' par.names = c("beta_s", "alpha_l", "beta_l",
#' "baseline_sigma", "baseline_mu",
#' "correlation", "sigma_s", "sigma_l"))
#' print(out)
#'
#' @export
#'
print.wasp <- function(x, ...){
cat("\n\n")
cat("WASP \n\n")
cat("Call: \n",
paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat("Swapping algorithm: \n",
paste("iter = ", x$iter, "\n",
"acc = ", x$acc,
sep = ""),
"\n\n", sep = "")
cat("MCMC: \n",
paste("subsets = ", x$subsets, "\n",
"parameters = ", x$parameters, "\n",
"samples = ", x$samples,
sep = ""),
"\n\n", sep = "")
cat("Posterior summary of the Wasserstein Barycenter: \n")
print(summary(x))
cat("\n\n")
}
|
/scratch/gouwar.j/cran-all/cranData/waspr/R/classfunctions.R
|
#'pois_logistic
#'
#'A set of mcmc samples from 8 subposteriors from the analysis of a joint model
#'with a logistic and poisson outcome variable.
#'
#'
#'@format An array with 3 dimensions of which the first represents the
#' subposteriors (size = 8), the second represents the paramters (size = 8) and
#' the third represents the amount of mcmc samples (size = 450).
#'
"pois_logistic"
|
/scratch/gouwar.j/cran-all/cranData/waspr/R/data.R
|
#' Combine output of the swapping algorithm
#'
#' This (non-exported) function combines the output from the swapping algorithm (Puccetti,
#' Rüschendorf and Vanduffel, 2020).
#'
#' @param x a three dimensional array (rows = subsets, columns = par, slices
#' = samples) containing posterior samples for all subsets
#'
#' @return A \code{wasp} object, which can be further analyzed using the
#' associated function \code{\link{summary.wasp}}.
#'
#' @source Puccetti, G., Rüschendorf, L. & Vanduffel, S. (2020). On the
#' computation of Wasserstein barycenters, Journal of Multivariate Analysis,
#' 176.
#'
combine <- function(x){
out = apply(x, 2, colMeans)
return(out)
}
#' Compute the mode
#'
#' @param x a numeric vector
#'
#' @return The mode of a numeric vector as computed by the methods from Venter
#' (1967).
#'
#' @source Venter, J.H. (1967). On estimation of the mode, Annals of
#' Mathematical Statistics, 38(5), 1446-1455.
#'
#' @examples
#' library(waspr)
#' mode_est(pois_logistic[1,1,])
#'
#' @export
mode_est <- function(x){
if(!is.numeric(x)){stop("x is not numeric")}
hmode(x, 0.1)
}
#' Compute the 95 percent Highest Posterior Density interval
#'
#' @inheritParams mode_est
#'
#' @return A vector containing the lower and upper bound of the 96% Highest
#' Posterior Density interval of a numeric vector as computed by the methods
#' from Venter (1967).
#'
#' @source Venter, J.H. (1967). On estimation of the mode, Annals of
#' Mathematical Statistics, 38(5), 1446-1455.
#'
#' @examples
#' library(waspr)
#' hpd_est(pois_logistic[1,1,])
#'
#' @export
hpd_est <- function(x){
if(!is.numeric(x)){stop("x is not numeric")}
hmodeci(x, 0.95)
}
|
/scratch/gouwar.j/cran-all/cranData/waspr/R/utils.R
|
#' Compute Wasserstein barycenters of subset posteriors
#'
#' This function computes Wasserstein Barycenters of subset posteriors and
#' gives posterior summaries for the full posterior.
#'
#' @param mcmc a three dimensional array (rows = number of subset posteriors,
#' columns = number of parameters of the posterior distribution, slices =
#' samples number of samples for each subset posterior) containing posterior
#' samples for all subsets
#' @param par.names optional character vector with parameter names
#' @param acc accuracy of the swapping algorithm (default = 0.001)
#' @param iter maximum number of iterations of the swapping algorithm (default = 10)
#' @param out boolean indicating whether output for each iteration of the swapping algorithm should be displayed (default = false)
#'
#' @details The swapping algorithm developed by Puccetti, Rüschendorf and
#' Vanduffel (2020) is used to compute Wasserstein barycenters of subset
#' posteriors.
#'
#' @return A \code{wasp} object, which can be further analyzed using the
#' associated function \code{\link{summary.wasp}}.
#'
#' A \code{wasp} object contains the following elements (some elements are not
#' returned if not applicable)
#'
#' \describe{
#' \item{\code{barycenter}}{A matrix of posterior samples (rows) for
#' all parameters (columns) of the full posterior obtained by the swapping algorithm.}
#' \item{\code{raw}}{An array (\code{dim = c(subsets, parameters, samples)})
#' containing the raw output from the swapping algorithm.}
#' \item{\code{call}}{The call to the \code{wasp()} function.}
#' \item{\code{subsets}}{The amount of subset posteriors in mcmc.}
#' \item{\code{parameters}}{The amount of parameters in mcmc.}
#' \item{\code{samples}}{The amount of posterior samples for each subset posterior in mcmc.}
#' \item{\code{acc}}{Accuracy of the swapping algorithm, default = 0.001.}
#' \item{\code{iter}}{Maximum amount of iterations for the swapping algorithm, default = 10.}
#' }
#'
#' @source Puccetti, G., Rüschendorf, L. & Vanduffel, S. (2020). On the
#' computation of Wasserstein barycenters, Journal of Multivariate Analysis,
#' 176.
#'
#' @examples
#'
#' library(waspr)
#' out <- wasp(pois_logistic,
#' par.names = c("beta_s", "alpha_l", "beta_l",
#' "baseline_sigma", "baseline_mu",
#' "correlation", "sigma_s", "sigma_l"))
#' summary(out)
#'
#' @export
#'
wasp <- function(mcmc, par.names = NULL,
acc = 0.001, iter = 10, out = FALSE){
if(!is.null(par.names) & !is.character(par.names)){
stop("par.names is not a character vector")
}
if(!is.numeric(mcmc)){
stop("mcmc is not numeric")
}
if(!is.array(mcmc)){
stop("mcmc is not a three dimensional array")
}
if(length(dim(mcmc)) != 3){
stop("mcmc is not a three dimensional array")
}
subsets = dim(mcmc)[1]
par = dim(mcmc)[2]
samples = dim(mcmc)[3]
if(!is.null(par.names) & par != length(par.names)){
stop("Names are not provided for each parameter, length(par.names) != dim(mcmc)[2].")
}
if(out){
res_wasp = swap_rcpp(samples = mcmc, acc = acc, iter = iter, out = TRUE)
}else{
res_wasp = swap_rcpp(samples = mcmc, acc = acc, iter = iter)
}
barycenter = combine(res_wasp)
if(!is.null(par.names)){
colnames(barycenter) = par.names
dimnames(res_wasp) = list(NULL, par.names, NULL)
}
call <- match.call()
output = list('barycenter' = barycenter,
'raw' = res_wasp,
'call' = call,
'subsets' = subsets,
'parameters' = par,
'samples' = samples,
'iter' = iter,
'acc' = acc)
class(output) = c("wasp", class(output))
output
}
|
/scratch/gouwar.j/cran-all/cranData/waspr/R/wasp.R
|
#' waspr: an R package for computing Wasserstein barycenters of subset
#' posteriors
#'
#' This package contains functions to compute Wasserstein barycenters of subset
#' posteriors using the swapping algorithm developed by Puccetti, Rüschendorf
#' and Vanduffel (2020). The Wasserstein barycenter is a geometric approach for
#' combining subset posteriors. It allows for parallel and distributed
#' computation of the posterior in case of complex models and/or big datasets,
#' thereby increasing computational speed tremendously.
#'
#' @section Functions:
#'
#' The main function of the package is:
#'
#' \code{\link{wasp}}, which runs the swapping algorithm developed by
#' Puccetti, Rüschendorf and Vanduffel (2020), combines the output from the
#' swapping algorithm and computes the Wasserstein barycenter. It returns an
#' S3 object of type \code{wasp}.
#'
#' @source Puccetti, G., Rüschendorf, L. & Vanduffel, S. (2020). On the
#' computation of Wasserstein barycenters, Journal of Multivariate Analysis,
#' 176.
#'
#' @useDynLib waspr, .registration = TRUE
#'
#' @importFrom Rcpp evalCpp
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/waspr/R/waspr-package.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----message=FALSE------------------------------------------------------------
library(waspr)
## -----------------------------------------------------------------------------
out <- wasp(pois_logistic,
iter = 10,
acc = 0.001,
par.names = c("beta_s", "alpha_l", "beta_l",
"baseline_sigma", "baseline_mu",
"correlation", "sigma_s", "sigma_l"))
## -----------------------------------------------------------------------------
summary(out)
|
/scratch/gouwar.j/cran-all/cranData/waspr/inst/doc/Tutorial.R
|
---
title: "Tutorial"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Tutorial}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
To use `waspr` the user first needs to load the package as follows:
```{r message=FALSE}
library(waspr)
```
The user must provide a 3-dimensional array with posterior samples for all
parameters for each subset posterior (rows = subset posteriors, columns =
parameters, slices = samples). The amount of parameters and samples must be
equal for each subset posterior, these posterior samples may be obtained from
any type of MCMC algorithm. `waspr` provides an example array with posterior
samples for 8 parameters for 8 subset posteriors, `pois_logistic`, that will be
used to illustrate the functionality of the package.
The main function `wasp()` runs the swapping algorithm, combines its output and
computes the Wasserstein barycenter. It has four arguments, `mcmc`, that
specifies the 3-dimensional array with samples for each subset posterior, and
optional arguments `par.names`, that can be used to specify parameter names,
`iter` to specify the maximum number of iterations of the swapping algorithm,
`acc` to specify the accuracy of the swapping algorithm and `out` to indicate
whether the results per iteration of the swap algorithm should be printed.
```{r}
out <- wasp(pois_logistic,
iter = 10,
acc = 0.001,
par.names = c("beta_s", "alpha_l", "beta_l",
"baseline_sigma", "baseline_mu",
"correlation", "sigma_s", "sigma_l"))
```
`wasp()` prints the iteration number and cost function value of the swapping
algorithm. The `out` object is of class `wasp` and contains several elements. To
obtain the Wasserstein barycenter of the subset posteriors a user can specify
`out$barycenter`. This returns a matrix of posterior samples (rows) for all
parameters (columns) of the full data posterior. A summary of the approximation
of the full data posterior is available through `summary(out)`.
```{r}
summary(out)
```
|
/scratch/gouwar.j/cran-all/cranData/waspr/inst/doc/Tutorial.Rmd
|
---
title: "Tutorial"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Tutorial}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
To use `waspr` the user first needs to load the package as follows:
```{r message=FALSE}
library(waspr)
```
The user must provide a 3-dimensional array with posterior samples for all
parameters for each subset posterior (rows = subset posteriors, columns =
parameters, slices = samples). The amount of parameters and samples must be
equal for each subset posterior, these posterior samples may be obtained from
any type of MCMC algorithm. `waspr` provides an example array with posterior
samples for 8 parameters for 8 subset posteriors, `pois_logistic`, that will be
used to illustrate the functionality of the package.
The main function `wasp()` runs the swapping algorithm, combines its output and
computes the Wasserstein barycenter. It has four arguments, `mcmc`, that
specifies the 3-dimensional array with samples for each subset posterior, and
optional arguments `par.names`, that can be used to specify parameter names,
`iter` to specify the maximum number of iterations of the swapping algorithm,
`acc` to specify the accuracy of the swapping algorithm and `out` to indicate
whether the results per iteration of the swap algorithm should be printed.
```{r}
out <- wasp(pois_logistic,
iter = 10,
acc = 0.001,
par.names = c("beta_s", "alpha_l", "beta_l",
"baseline_sigma", "baseline_mu",
"correlation", "sigma_s", "sigma_l"))
```
`wasp()` prints the iteration number and cost function value of the swapping
algorithm. The `out` object is of class `wasp` and contains several elements. To
obtain the Wasserstein barycenter of the subset posteriors a user can specify
`out$barycenter`. This returns a matrix of posterior samples (rows) for all
parameters (columns) of the full data posterior. A summary of the approximation
of the full data posterior is available through `summary(out)`.
```{r}
summary(out)
```
|
/scratch/gouwar.j/cran-all/cranData/waspr/vignettes/Tutorial.Rmd
|
#' Function to calculate short-, medium-, and long-term hydrologic anomalies
#'
#' This function was written with streamflow data in mind because streamflow
#' is the most commonly used exogenous variable for trend models for water
#' quality; however, the function is generic so that users may experiment with
#' anomalies from other daily hydrologic data. Examples of the inclusion of
#' streamflow anomalies in trend analysis of nutrients, pesticides and
#' surface water can be found in Alexander and Smith (2006), Ryberg and Vecchia
#' (2006), Ryberg and others (2010), Sullivan and others (2009), Vecchia
#' (2003), Vecchia (2005), and Vecchia and others (2008).
#'
#' @name compAnom
#' @title Calculates anomalies
#' @param dataset is the daily hydrologic data returned from \link{importDVs}
#' or data otherwise obtained and in the same format as that produced by
#' \link{importDVs}.
#' @param which indicates which set of anomalies; 1 calculates the 1-year,
#' 30-day, and 1-day anomalies; 2 calculates the 100-day, 10-day, and 1-day
#' anomalies; 3 calculates the 30-day and 1-day anomalies; and 4 calculates
#' the 10-year, 5-year, 1-year, one-quarter-year (seasonal), and 1-day
#' anomalies.
#' @keywords ts multivariate
#' @return A list. In the cases of "which" equal to 1 or 2, the
#' first element of the list is a data frame containing the station
#' identification number, dates, streamflow, and long-term, mid-term, and
#' short-term anomalies. The next three elements of the list are the length
#' in days of the long-term, mid-term, and short-term streamflow anomalies.
#' In the case of "which" equal to 3, the first element of the list is a data
#' frame containing the station identification number, dates, streamflow, and
#' mid-term and short-term anomalies. The next two elements of the list are
#' the length in days of the mid-term and short-term streamflow anomalies.
#' In the case of "which" equal to 4, the first element of the list is a data
#' frame containing the station identification number, dates, streamflow, and
#' 10-year, 5-year, annual, seasonal, and daily streamflow anomalies. The
#' next five elements of the list are the length in days of the 10-year,
#' 5-year, annual, seasonal, and daily streamflow anomalies.
#' @export
#' @examples
#' q05054000.85 <- importDVs("05054000", sdate="1985-10-01", edate="2010-09-30")
#' anoms05054000.1 <- compAnom(q05054000.85, which=1)
#' anoms05054000.2 <- compAnom(q05054000.85, which=2)
#' anoms05054000.3 <- compAnom(q05054000.85, which=3)
#' anoms05054000.4 <- compAnom(q05054000.85, which=4)
#' @references
#' Alexander, R.B. and Smith, R.A., 2006, Trends in the nutrient enrichment of
#' U.S. rivers during the late 20th century and their relation to changes in
#' probable stream trophic conditions: Limnology and Oceanography, v. 51, no.
#' 1, Part 2: Eutrophication of Freshwater and Marine Ecosystems, p.
#' 639--654, accessed August 1, 2012, at
#' \url{http://www.jstor.org/stable/4499617}.
#'
#' Ryberg, K.R. and Vecchia, A.V., 2006, Water-quality trend analysis and
#' sampling design for the Devils Lake Basin, North Dakota, January 1965
#' through September 2003: U.S. Geological Survey Scientific Investigations
#' Report 2006--5238, 64 p., accessed August 1, 2012, at
#' \url{http://pubs.usgs.gov/sir/2006/5238/}.
#'
#' Ryberg, K.R. and Vecchia, A.V., 2012, waterData---An R packge for retrieval,
#' analysis, and anomaly calculation of daily hydrologic time series data,
#' version 1.0: U.S. Geological Survey Open-File Report 2012--1168, 8 p.
#' (Also available at \url{http://pubs.usgs.gov/of/2012/1168/}.)
#'
#' Ryberg, K.R., Vecchia, A.V., Martin, J.D., Gilliom, R.J., 2010, Trends in
#' pesticide concentrations in urban streams in the United States, 1992-2008:
#' U.S. Geological Survey Scientific Investigations Report 2010--5139, 101 p.
#' (Also available at \url{http://pubs.usgs.gov/sir/2010/5139/}.)
#'
#' Sullivan, D.J., Vecchia, A.V., Lorenz, D.L., Gilliom, R.J., Martin, J.D.,
#' 2009, Trends in pesticide concentrations in corn-belt streams, 1996--2006:
#' U.S. Geological Survey Scientific Investigations Report 2009--5132, 75 p.,
#' accessed Ausugst , 2012, at \url{http://pubs.usgs.gov/sir/2009/5132/}.
#'
#' Vecchia, A.V., 2003, Relation between climate variability and stream water
#' quality in the continental United States, Hydrological Science and
#' Technology, v. 19, no. 1, 77--98.
#'
#' Vecchia, A.V., 2003, Water-quality trend analysis and sampling design for
#' streams in North Dakota, 1971--2000: U.S. Geological Survey Scientific
#' Investigations Report 2003--4094, 73 p., accessed August 1, 2012, at
#' \url{http://nd.water.usgs.gov/pubs/wri/wri034094/index.html}.
#'
#' Vecchia, A.V., 2005, Water-quality trend analysis and sampling design for
#' streams in the Red River of the North Basin, Minnesota, North Dakota, and
#' South Dakota, 1970--2001: U.S. Geological Survey Scientific Investigations
#' Report 2005--5224, 54 p. accessed August 1, 2012, at
#' \url{http://pubs.usgs.gov/sir/2005/5224/}.
compAnom <- function(dataset,which=1) {
if (which==1) {
lt<-365
mt<-30
st<-1
n1 <- length(dataset[,1])
if ( n1 >= lt ) {
qdf <- dataset[,c("staid","dates","val")]
qlog10 <- log10(dataset$val)
xmean <- mean(qlog10,na.rm=TRUE)
xlt <- rep(NA,n1)
for (i in (lt:n1)) {
xlt[i] <- mean(qlog10[(i - (lt - 1)):i],na.rm=FALSE)
}
ltfa <- xlt-xmean
xmt <- rep(NA,n1)
for (i in (mt:n1)) {
xmt[i] <- mean(qlog10[(i - (mt - 1)):i],na.rm=FALSE)
}
mtfa <- xmt-xlt
# short term anomaly is based on 1 day
stfa <- qlog10 - xmt
qdf <- cbind(qdf,ltfa,mtfa,stfa)
list(qdf, lt,mt,st)
} else {
stop("Dataset not long enough to calculate 1-year anomaly,
try which=2 or which=3")
}
}
else if (which==2) {
lt<-100
mt<-10
st<-1
n1 <- length(dataset[,1])
qdf <- dataset[,c("staid","dates","val")]
qlog10 <- log10(dataset$val)
xmean <- mean(qlog10,na.rm=TRUE)
xlt <- rep(NA,n1)
for (i in (lt:n1)) {
xlt[i] <- mean(qlog10[(i - (lt - 1)):i],na.rm=FALSE)
}
ltfa <- xlt-xmean
xmt <- rep(NA,n1)
for (i in (mt:n1)) {
xmt[i] <- mean(qlog10[(i - (mt - 1)):i],na.rm=FALSE)
}
mtfa <- xmt-xlt
# short term anomaly is based on 1 day
stfa <- qlog10 - xmt
qdf <- cbind(qdf,ltfa,mtfa,stfa)
list(qdf,lt,mt,st)
}
else if (which==3) {
st <- 1
mt <- 30
n1 <- length(dataset[,1])
qdf <- dataset[,c("staid","dates","val")]
qlog10 <- log10(dataset$val)
xmean <- mean(qlog10,na.rm=TRUE)
xmt <- rep(NA,n1)
for (i in (mt:n1)) {
xmt[i] <- mean(qlog10[(i - (mt - 1)):i],na.rm=FALSE)
}
mtfa <- xmt-xmean
# short term anomaly is based on 1 day
stfa <- qlog10 - xmt
qdf <- cbind(qdf,mtfa,stfa)
list(qdf, mt, st)
}
else if (which==4) {
ta<-10*365
fa<-5*365
lt<-365
mt<-90
st<-1
n1 <- length(dataset[,1])
if ( n1 >= ta ) {
qdf <- dataset[,c("staid","dates","val")]
qlog10 <- log10(dataset$val)
xmean <- mean(qlog10,na.rm=TRUE)
xta <- rep(NA,n1)
for (i in (ta:n1)) {
xta[i] <- mean(qlog10[(i - (ta - 1)):i],na.rm=FALSE)
}
tfa <- xta-xmean
xfa <- rep(NA,n1)
for (i in (fa:n1)) {
xfa[i] <- mean(qlog10[(i - (fa - 1)):i],na.rm=FALSE)
}
ffa <- xfa-xta
xlt <- rep(NA,n1)
for (i in (lt:n1)) {
xlt[i] <- mean(qlog10[(i - (lt - 1)):i], na.rm=FALSE)
}
ltfa <- xlt-xfa
xmt <- rep(NA,n1)
for (i in (mt:n1)) {
xmt[i] <- mean(qlog10[(i - (mt - 1)):i],na.rm=FALSE)
}
mtfa <- xmt-xlt
# short term anomaly is based on 1 day
stfa <- qlog10 - xmt
qdf <- cbind(qdf,tfa,ffa,ltfa,mtfa,stfa)
dimnames(qdf)[[2]] <- c("staid", "dates", "val", "tenyranom", "fiveyranom",
"annualanom","seasanom","dailyanom")
list(qdf, ta, fa, lt, mt, st)
} else {
stop("Dataset not long enough to calculate 10-year anomaly, try which=1")
}
}
else {
stop("which must be a numeric value 1, 2, 3, or 4")
}
}
|
/scratch/gouwar.j/cran-all/cranData/waterData/R/compAnom.R
|
#' Function to fill in missing time series data.
#'
#' This function will check the percent of missing values and the size
#' of the largest missing block of data. By default, if less than 40 percent
#' of the data are missing and the largest block is less than 30 days, the
#' data will be filled-in by using a structural time series, \link{StructTS}
#' from the base stats package in R (R Development Core Team, 2012). The
#' fitted structural time series is then smoothed via a state-space model,
#' \link{tsSmooth} from the base stats package in R.
#'
#' @name fillMiss
#' @title Fill-in missing hydrological values
#' @param dataset is a data frame in the format of the data frame returned by
#' \link{importDVs}, with missing values indicated by NA.
#' @param block is the size of the largest block of missing data that the
#' function will fill-in.
#' @param pmiss is the maximum amount of the missing data that can be missing
#' in the dataset for fill-in procedure to be performed.
#' @param model is the type of structural time series model, see
#' \link{StructTS}. The default value is trend. If level is used, the results
#' of \link{fillMiss}, which by default applies a fixed-interval smoothing to
#' the time series, \link{tsSmooth}, will be very close to linear interpolation.
#' @param smooth a logical that indicates whether or not to apply
#' \link{tsSmooth} to the structured time series.
#' @param ... further arguments to be passed to plotting method (see \link{par}).
#' @return a data frame with NAs in the "val" column replaced by
#' estimated values and a plot showing observed and estimated data. If there
#' are too many missing values, based on default or user defined limits, the
#' unaltered dataset is returned as well as a message, such as "Error
#' in fillMiss(misQ05054000) : Too much missing data. Cannot fill in missing
#' values."
#' @note Many methods have been suggested for estimating missing hydrological
#' data. However, experiments showed that the functions in the base stats
#' package worked very well if the blocks of missing data were not long.
#' Users with larger blocks of missing data may want to explore other methods
#' including using nearby gages to estimate missing values at a streamgage.
#' Additional methods for filling in missing hydrological data are summarized
#' in Beauchamp and others (1989) and Elshorbagy and others (2000).
#'
#' To indicate which values have been replaced, the qualcode field is
#' populated with 'fM' for those values that were estimated using the
#' fillMiss function.
#' @export
#' @seealso \link{StructTS}, \link{tsSmooth}, \link{cleanUp}
#' @format The returned data frame has the following columns: \cr
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' staid \tab factor \tab USGS station identification number \cr
#' val \tab numeric \tab The value of the hydrologic variable \cr
#' dates \tab Date \tab Date of daily value \cr
#' qualcode \tab factor \tab Qualification code
#' }
#' @examples
#' data(exampleWaterData)
#' my.newdata <- fillMiss(misQ05054000, block=30, pmiss=50, log="y")
#' summary(misQ05054000)
#' summary(my.newdata)
#' # ph example
#' pH05082500<-importDVs("05082500", code="00400", stat="00008",
#' sdate="2000-01-01", edate="2011-12-31")
#' plotParam(pH05082500)
#' pHfilled<-fillMiss(pH05082500, block=45, ylim=c(7.5,9), yaxs="i")
#' @keywords NA hplot datagen ts smooth
#' @references
#' Beauchamp, J.J., 1989, Comparison of regression and time-series methods for
#' synthesizing missing streamflow records: Water Resources Bulletin, v. 25,
#' no. 5, p. 961--975.
#'
#' Elshorbagy, A.A., Panu, U.S., Simonovic, S.P., 2000, Group-based
#' estimation of missing hydrological data---I. Approach and general
#' methodology: Hydrological Sciences Journal, v. 45, no. 6, p. 849--866.
#'
#' R Development Core Team, 2012, R---A language and environment for statistical
#' computing: Vienna, Austria, R Foundation for Statistical Computing, [ISBN
#' 3-900051-07-0]. (Also available at \url{https://www.r-project.org}.)
fillMiss <- function(dataset, block=30, pmiss=40, model="trend",
smooth=TRUE, ... ) {
pck<-is.na(dataset$val)
percent<-0
max.mis<-0
if (sum(pck) > 0) {
percent<-round((sum(pck)/length(dataset$val))*100, digits=2)
my.message<-paste("There are ", sum(pck),
"missing values for", dataset$staid[1], "or", percent,
"percent of the data.", sep=" ")
message(my.message)
rles<-rle(is.na(dataset$val))
max.mis<-max(rles$lengths[rles$values])
my.message2<-paste("The maximum block of missing", dataset$staid[1],
"data is", max.mis, "days long.", sep=" ")
message(my.message2)
} else { noMis<-paste("No missing values for", dataset$staid[1], sep=" ")
message(noMis)
}
if ( percent >= pmiss | max.mis >= block ) {
tooMuch <- paste("Too much missing data for", dataset$staid[1],
"Cannot fill in missing values.", sep=" ")
message (tooMuch)
} else {
my.series<-window(dataset$val)
my.struct<-StructTS(my.series, type=model)
if ( smooth ) fit<-tsSmooth(my.struct) else fit<-fitted(my.struct)
#par(las=1, tck=0.01)
plot(my.series, typ="l", lwd=4, xlab="Observation",
ylab="Observed and estimated times series", ...)
lines(fit[,1],col="green")
leg.txt<-c("Observed values", "New time series")
legend("topleft", leg.txt, col=c("black","green"), lwd=c(4,1), bty="n",
ncol=2, cex=0.8)
dataset$val[pck] <- fit[pck,1]
lng <- length(grep("fM", levels(dataset$qualcode)))
if ( lng < 1 ) {
dataset$qualcode <- factor(dataset$qualcode, levels=c(levels(dataset$qualcode), 'fM'))
}
dataset$qualcode[pck] <- "fM"
if (!is.null(attributes(dataset)$stat) &
!is.null(attributes(dataset)$code)) {
subtext <- paste("Parameter code", attributes(dataset)$code,
" Statistics code", attributes(dataset)$stat,
sep=" ")
title(sub=subtext)
}
fillMess <- paste("Filled in", sum(pck), "values for", dataset$staid[1],
sep=" ")
message (fillMess)
}
dataset
}
|
/scratch/gouwar.j/cran-all/cranData/waterData/R/fillMiss.R
|
#' Function to import daily hydrologic time series
#' data given a USGS streamgage identification number.
#'
#' This function will import data from a WaterML2 service (current USGS
#' hydrological data standard). It will retrieve daily streamflow and
#' continuous water-quality data from the USGS Daily Values Site Web
#' Service \url{https://waterservices.usgs.gov/rest/DV-Service.html}
#' (U.S. Geological Survey, 2017d).
#'
#' @name importDVs
#' @title Imports daily USGS hydrologic times series data
#' @param staid is the USGS site identification number,
#' usually eight digits long, but can be longer. Users may search for
#' surface-water sites and obtain station identification numbers using the
#' USGS Site Web Service,
#' \url{https://waterservices.usgs.gov/rest/Site-Service.html} (USGS, 2017e);
#' using the National Water Information System: Mapper,
#' \url{https://maps.waterdata.usgs.gov/mapper/index.html} (U.S. Geological Survey, 2017a);
#' or using the National Water Information System: Web Interface to daily
#' surface-water data,
#' \url{https://waterdata.usgs.gov/nwis/dv/?referred_module=sw}
#' (U.S. Geological Survey, 2012f). The site identification number needs to
#' be entered as a character, that is in quotes, because many USGS
#' streamgage numbers begin with zero and the leading zero is necessary.
#' @param code is the USGS parameter code, a 5-digit number
#' used in the USGS computerized data system, National Water
#' Information System (NWIS), to uniquely identify a specific hydrologic
#' property or constituent. A list of paramater codes is available at
#' \url{https://nwis.waterdata.usgs.gov/usa/nwis/pmcodes} (U.S. Geological Survey, 2017b).
#' @param stat is the USGS statistics code, a 5-digit number
#' used in the USGS computerized data system, National Water
#' Information System (NWIS), to uniquely identify specific statistics, such
#' as daily mean, daily maximum, and daily minimum. The default,
#' 00003, is the mean daily value. A list of statistics codes is available at
#' \url{https://nwis.waterdata.usgs.gov/nwis/help/?read_file=stat&format=table}
#' (U.S. Geological Survey, 2017c).
#' Not all statistics are available at every gage.
#' @param sdate is the start date of the time series, in the format
#' yyyy-mm-dd, optional.
#' @param edate is the end date of the time series, in the format yyyy-mm-dd,
#' optional.
#' @return a data frame containing daily streamflow or other hydrologic data
#' for the site specified during the dates specified (inclusive). The USGS
#' parameter code, code, and the statistics code, stat, are attributes of the
#' data frame.
#' @references
#' U.S. Geological Survey, 2017a, National Water Information System: Mapper,
#' accessed January 3, 2017, at
#' \url{https://maps.waterdata.usgs.gov/mapper/index.html}.
#'
#' U.S. Geological Survey, 2017b, Parameter code definition,
#' National Water Information System: Web Interface, accessed January 3,
#' 2017, at \url{https://nwis.waterdata.usgs.gov/usa/nwis/pmcodes}.
#'
#' U.S. Geological Survey, 2017c, Stat codes (stat_cd),
#' National Water Information System: Web Interface, accessed January 3,
#' 2017, at
#' \url{https://nwis.waterdata.usgs.gov/nwis/help/?read_file=stat&format=table}.
#'
#' U.S. Geological Survey, 2017d, USGS daily values site web service:
#' REST Web Services, accessed January 3, 2017, at
#' \url{https://waterservices.usgs.gov/rest/DV-Service.html}.
#'
#' U.S. Geological Survey, 2017e, USGS site web service:
#' REST Web Services, accessed January 3, 2017, at
#' \url{https://waterservices.usgs.gov/rest/Site-Service.html}.
#'
#' U.S. Geological Survey, 2017f, USGS surface-water daily data for the Nation:
#' National Water Information System: Web Interface, accessed January 3,
#' 2017, at \url{http://waterdata.usgs.gov/nwis/dv/?referred_module=sw}.
#' @keywords ts IO
#' @importFrom xml2 read_xml
#' @importFrom xml2 xml_find_all
#' @importFrom xml2 xml_find_first
#' @importFrom xml2 xml_text
#' @importFrom xml2 xml_attr
#' @importFrom lubridate parse_date_time
#' @importFrom dataRetrieval getWebServiceData
#' @export
#' @format The returned data frame has the following columns \cr
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' staid \tab factor \tab USGS station identification number \cr
#' val \tab numeric \tab The value of the hydrologic variable \cr
#' dates \tab Date \tab Date of daily value \cr
#' qualcode \tab factor \tab Qualification code
#' }
#' @examples
#' # import mean daily streamflow for Red River of the North at Fargo, ND
#' q05054000 <- importDVs("05054000", sdate="2000-01-01", edate="2010-12-31")
#' head(q05054000)
#' # additional examples of how to this function follow
#' # import mean daily gage height for Red River of the North at Grand Forks, ND
#' gh05082500 <- importDVs("05082500", code="00065", sdate="2000-01-01", edate="2010-12-31")
#' # import mean daily specific conductance for Red River of the North at Grand Forks, ND
#' sc05082500<- importDVs("05082500", code="00095", sdate="2000-01-01", edate="2010-12-31")
#' # import mean daily water temperature for Red River of the North at Fargo, ND
#' temp05054000<- importDVs("05054000", code="00010", sdate="2000-01-01", edate="2010-12-31")
#' # import median daily pH for Red River of the North at Fargo, ND
#' pH05054000<- importDVs("05054000", code="00400", stat="00008",
#' sdate="2000-01-01", edate="2010-12-31")
#' # examine the attributes of the data frame to show that the parameter code
#' # and statistics code are saved with the data frame
#' attributes(pH05054000)[c("code","stat")]
#' # import mean daily oxygen for Red River of the North at Fargo, ND
#' do05054000 <- importDVs("05054000", code="00300", sdate="2000-01-01", edate="2010-12-31")
#' # import mean daily turbidity for Red River of the North at Fargo, ND
#' turb05054000 <- importDVs("05054000", code="63680", sdate="2000-01-01", edate="2010-12-31")
importDVs <- function(staid, code="00060", stat="00003", sdate="1851-01-01",
edate=as.Date(Sys.Date(), format="%Y-%m-%d")) {
if (is.character(staid) == FALSE ) stop("staid needs to have quotes around it")
if (nchar(staid) < 8) stop ("staid must be at least 8 characters")
base_url <- "https://waterservices.usgs.gov/nwis/dv/?format=waterml,2.0"
url <- paste(base_url, "&site=", staid, "¶meterCd=", code, "&statCd=",
stat, sep = "")
url <- paste(url, "&startDt=", sdate, "&endDt=", edate, sep="")
# modified from importWaterML2 function of package dataRetrieval version 2.6.3
asDateTime <- TRUE
raw <- FALSE
if (class(url) == "character" && file.exists(url)) {
returnedDoc <- read_xml(url)
} else if(class(url) == 'raw') {
returnedDoc <- read_xml(url)
raw <- TRUE
} else {
returnedDoc <- xml_root(getWebServiceData(url, encoding = 'gzip'))
}
timeSeries <- xml_find_all(returnedDoc, "//wml2:Collection") # each parameter/site combo
if (0 == length(timeSeries)) {
df <- data.frame()
if (!raw) {
attr(df, "url") <- url
}
return(df)
}
TVP <- xml_find_all(timeSeries, ".//wml2:MeasurementTVP") #time-value pairs
time <- xml_text(xml_find_all(TVP, ".//wml2:time"))
if (asDateTime) {
time <- parse_date_time(time, c("%Y","%Y-%m-%d","%Y-%m-%dT%H:%M","%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%OS","%Y-%m-%dT%H:%M:%OS%z"),
exact = TRUE)
}
values <- as.numeric(xml_text(xml_find_all(TVP, ".//wml2:value")))
idents <- xml_text(xml_find_all(timeSeries, ".//gml:identifier"))
idents <- strsplit(idents, "[.]")[[1]][2]
useIdents <- rep(idents, length(values))
tvpQuals <- xml_text(xml_find_first(TVP, ".//swe:value"))
df <- cbind.data.frame(staid = useIdents, val = values, dates = time,
qualcode = tvpQuals, stringsAsFactors = FALSE)
# retrieval does not create a series with the days for which there are missing
# values
# this code will create the days with missing values to have a complete
# time series
df$dates <- as.Date(df$dates)
beginDate <- df$dates[1]
endDate <- df$dates[dim(df)[[1]]]
myDates <- as.data.frame(seq.Date(beginDate, endDate, by=1))
dimnames(myDates)[[2]][1] <- "dates"
ndays <- dim(myDates)[1]
nobs <- dim(df)[1]
if ( nobs < ndays ) {
sitedat <- df
fixedData <- merge(myDates, sitedat, all.x=TRUE)
fixedData$staid <- sitedat$staid[1]
fixedData <- fixedData[,c("staid", "val", "dates", "qualcode")]
df <- fixedData
}
attributes(df)$code<-code
attributes(df)$stat<-stat
return(df)
}
#' Function to plot hydrologic times series.
#' Will plot more than one site at a time.
#'
#' @name plotParam
#' @title Plot Streamflow and Continous Water-Quality Data
#' @param data is the data frame in the format of that returned by
#' \link{importDVs}.
#' @param metric USGS streamflow data are usually in cubic feet per second;
#' however it may be converted to cubic meters per second for publication.
#' Likewise, gage height is usually in feet, but could be converted to
#' meters. The metric argument only has an effect on streamflow and gage
#' height.
#' @param logscale is a logical indicating whether or not the y-scale should be
#' log 10. Streamflow generally is plotted with a log scale and this only has
#' an effect on the plotting of streamflow data.
#' @param ylabel optionally allows user to pass a y-axis label.
#' @param ... further arguments to be passed to plotting method (see \link{par}).
#' (see \link{xyplot}).
#' @return a lattice plot
#' @importFrom lattice xyplot
#' @importFrom latticeExtra yscale.components.log10ticks
#' @export
#' @examples
#' data(exampleWaterData)
#' plotParam(misQ05054000, code="00060", stat="00003", logscale=TRUE)
#' plotParam(misQ05054000, code=attributes(misQ05054000)$code,
#' stat=attributes(misQ05054000)$stat, logscale=TRUE)
#' @keywords hplot ts univar
plotParam<-function(data, logscale=FALSE, metric=FALSE, ylabel=NULL, ...) {
if (missing(ylabel) ) {
if ( is.null(attributes(data)$stat) | is.null(attributes(data)$code) ) {
stop("The data frame needs to have either stat and code attributes or
the ylabel argument needs to be used.")
} else {
stat<-attributes(data)$stat
code<-attributes(data)$code
if (stat=="00003") { stat.txt <- "Daily mean" } else
if (stat=="00001") { stat.txt <- "Daily maximum" } else
if (stat=="00002") { stat.txt <- "Daily minimum" } else
if (stat=="00008") { stat.txt <- "Daily median" } else {
message("Unknown stat code, a label should be passed using the
ylabel argument.")
}
if (code=="00060") {
if ( logscale== TRUE ) {
if (metric==TRUE) {
my.ylab <- paste(stat.txt,"streamflow, cubic meters per second",
sep=" ")
my.plot<-xyplot((val*0.0283)~dates | staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(log=TRUE,
tck = -1)),
ylab=my.ylab, xlab="",
yscale.components = yscale.components.log10ticks,
...)
}
else if (metric=="FALSE") {
my.ylab<-paste(stat.txt,"streamflow, cubic feet per second",
sep=" ")
my.plot<-xyplot(val~dates|staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(log=logscale,
tck = -1)),
ylab=my.ylab, xlab="",
yscale.components = yscale.components.log10ticks,
...)
}
else {
stop("metric must be TRUE for cubic meters per second or FALSE
for cubic feet per second")
}
}
if ( logscale==FALSE ) {
if (metric==TRUE) {
my.ylab <- paste(stat.txt,"streamflow, cubic meters per second",
sep=" ")
my.plot<-my.plot<-xyplot((val*0.0283)~dates | staid, data=data,
typ="l", scales=list(x = list(tck = -1),
y=list(tck = -1)),
ylab=my.ylab, xlab="", ...)
}
else if (metric=="FALSE") {
my.ylab<-paste(stat.txt,"streamflow, cubic feet per second",
sep=" ")
my.plot<-xyplot(val~dates|staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(tck = -1)),
ylab=my.ylab, xlab="", ...)
}
else {
stop("metric must be TRUE for cubic meters per second or FALSE
for cubic feet per second")
}
}
}
if (code=="00065") {
if (metric==FALSE) {
my.ylab <- paste(stat.txt, "gage height, feet", sep=" ")
my.plot<-xyplot(val~dates|staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(tck = -1)),
ylab=my.ylab, xlab="", ...)
}
else if (metric==TRUE) {
my.ylab <- paste(stat.txt, "gage height, meters", sep=" ")
my.plot<-xyplot((val*0.3048)~dates|staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(tck = -1)),
ylab=my.ylab, xlab="", ...)
}
else {
stop("metric must be TRUE for meters or FALSE for feet.")
}
}
if (code=="00095") {
my.ylab<-paste(stat.txt,
"specific conductance, water,\nunfiltered, microsiemens per centimeter\nat 25 degrees Celsius",
sep=" ")
my.plot<-xyplot(val~dates|staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(tck = -1)),
ylab=my.ylab, xlab="", ...)
}
if (code=="00010") {
my.ylab<-paste(stat.txt, "temperature, water, degrees Celsius", sep=" ")
my.plot<-xyplot(val~dates|staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(tck = -1)),
ylab=my.ylab, xlab="", ...)
}
if (code=="00300") {
my.ylab <- paste(stat.txt,
"dissolved oxygen, water, unfiltered, milligrams per liter",
sep=" ")
my.plot<-xyplot(val~dates|staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(tck = -1)),
ylab=my.ylab, xlab="", ...)
}
if (code=="00400") {
my.ylab <- paste(stat.txt,
"pH, water, unfiltered, field, standard units",
sep=" ")
my.plot<-xyplot(val~dates|staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(tck = -1)),
ylab=my.ylab, xlab="", ...)
}
if (code=="63680") {
my.ylab <- paste(stat.txt,
"turbidity, water, unfiltered,\nmonochrome near infra-red LED light,\n780-900 nm, detection angle 90 +/ -2.5 degrees,\nformazin nephelometric units (FNU)",
sep=" ")
my.plot<-xyplot(val~dates|staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(tck = -1)),
ylab=my.ylab, xlab="", ...)
}
}
} else {
my.ylab <- ylabel
if ( logscale==TRUE ) {
my.plot<-xyplot(val~dates | staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(log=TRUE,
tck = -1)),
ylab=my.ylab, xlab="",
yscale.components = yscale.components.log10ticks, ...)
} else {
my.plot<-xyplot(val~dates | staid, data=data, typ="l",
scales=list(x = list(tck = -1), y=list(log=FALSE,
tck = -1)),
ylab=my.ylab, xlab="")
}
}
my.plot
}
#' Function that returns USGS Daily Values Site Service URL for troubleshooting or
#' building a URL for other purposes.
#'
#' @name tellMeURL
#' @title USGS Daily Values Site Service URL
#' @param staid is the USGS site identification number, which
#' is usually eight digits long, but can be longer. Users may search for
#' surface-water sites and obtain station identification numbers using the
#' USGS Site Web Service,
#' \url{https://waterservices.usgs.gov/rest/Site-Service.html} (U.S. Geological
#' Survey, 2017d); using the National Water Information System: Mapper,
#' \url{https://maps.waterdata.usgs.gov/mapper/index.html} (U.S. Geological Survey, 2017a);
#' or using the National Water Information System: Web Interface to daily
#' surface-water data,
#' \url{https://waterdata.usgs.gov/nwis/dv/?referred_module=sw} (U.S.
#' Geological Survey, 2017e). The site identification number needs to be
#' entered as a character, that is in quotes, because many USGS streamgage
#' numbers begin with zero and the leading zero is necessary.
#' @param code is the USGS parameter code, a 5-digit number
#' used in the USGS computerized data system, National Water
#' Information System (NWIS), to uniquely identify a specific hydrologic
#' property or constituent. A list of paramater codes is available at
#' \url{https://nwis.waterdata.usgs.gov/usa/nwis/pmcodes} (U.S. Geological
#' Survey, 2017b).
#' @param stat is the USGS statistics code, a 5-digit number
#' used in the USGS computerized data system, NWIS, to uniquely identify
#' specific statistics, such as daily mean, daily maximum, and daily minimum.
#' The default, 00003, is the mean daily value. A list of statistics codes
#' is available at
#' \url{https://nwis.waterdata.usgs.gov/nwis/help/?read_file=stat&format=table}
#' (U.S. Geological Survey, 2017c).
#' Not all statistics are available at every gage.
#' @param sdate is the start date of the time series, in the format yyyy-mm-dd.
#' @param edate is the end date of the time series, in the format yyyy-mm-dd.
#' @keywords utilities
#' @export
#' @references
#' U.S. Geological Survey, 2017a, National Water Information System: Mapper,
#' accessed January 3, 2017, at
#' \url{https://maps.waterdata.usgs.gov/mapper/index.html}.
#'
#' U.S. Geological Survey, 2017b, Parameter code definition,
#' National Water Information System: Web Interface, accessed January 3,
#' 2017, at \url{https://nwis.waterdata.usgs.gov/usa/nwis/pmcodes}.
#'
#' U.S. Geological Survey, 2017c, Stat codes (stat_cd),
#' National Water Information System: Web Interface, accessed January 3,
#' 2017, at
#' \url{https://nwis.waterdata.usgs.gov/nwis/help/?read_file=stat&format=table}.
#'
#' U.S. Geological Survey, 2017d, USGS site web service:
#' REST Web Services, accessed January 3, 2017, at
#' \url{https://waterservices.usgs.gov/rest/Site-Service.html}.
#'
#' U.S. Geological Survey, 2017e, USGS surface-water daily data for the Nation:
#' National Water Information System: Web Interface, accessed January 3,
#' 2017, at \url{http://waterdata.usgs.gov/nwis/dv/?referred_module=sw}.
#' @return URL for USGS data
#' @examples
#' tellMeURL("05054000", code="00060", stat="00003", sdate="2000-01-01",
#' edate=as.Date(Sys.Date(), format="%Y-%m-%d"))
tellMeURL <- function(staid, code="00060", stat="00003", sdate="1851-01-01",
edate=as.Date(Sys.Date(), format="%Y-%m-%d")) {
if (is.character(staid) == FALSE ) stop("staid needs to have quotes around it")
if (nchar(staid) < 8) stop ("staid must be at least 8 characters")
base_url <- "https://waterservices.usgs.gov/nwis/dv/?format=waterml,2.0"
url <- paste(base_url, "&site=", staid, "¶meterCd=", code, "&statCd=",
stat, sep = "")
url <- paste(url, "&startDt=", sdate, sep="")
url <- paste(url, "&endDt=", edate, sep="")
url
}
#' Function to retrieve information about a USGS streamgage site
#'
#' This provides some limited metadata about the USGS streamgage site.
#' @name siteInfo
#' @title Retrieve site information
#' @param staid is a character vector containing USGS site
#' identification number(s). USGS site numbers are usually eight digits long,
#' but can be longer. Users may search for surface-water sites and obtain
#' station identification numbers using the USGS Site Web Service,
#' \url{https://waterservices.usgs.gov/rest/Site-Service.html} (U.S. Geological
#' Survey, 2017b); using the National Water Information System: Mapper,
#' \url{https://maps.waterdata.usgs.gov/mapper/index.html} (U.S. Geological Survey, 2017a);
#' or using the National Water Information System: Web Interface to daily
#' surface-water data,
#' \url{https://waterdata.usgs.gov/nwis/dv/?referred_module=sw} (U.S.
#' Geological Survey, 2017c). The site identification number needs to be
#' entered as a character, that is in quotes, because many USGS streamgage
#' numbers begin with zero and the leading zero is necessary.
#' @keywords datagen
#' @references
#' U.S. Geological Survey, 2017a, National Water Information System: Mapper,
#' accessed January 3, 2017, at
#' \url{https://maps.waterdata.usgs.gov/mapper/index.html}.
#'
#' U.S. Geological Survey, 2017b, USGS site web service:
#' REST Web Services, accessed January 3, 2017, at
#' \url{https://waterservices.usgs.gov/rest/Site-Service.html}.
#'
#' U.S. Geological Survey, 2017c, USGS surface-water daily data for the Nation:
#' National Water Information System: Web Interface, accessed January 3, 2017,
#' at \url{https://waterdata.usgs.gov/nwis/dv/?referred_module=sw}.
#' @importFrom xml2 read_xml
#' @importFrom xml2 xml_find_all
#' @importFrom xml2 xml_root
#' @importFrom xml2 xml_children
#' @importFrom xml2 xml_attr
#' @importFrom dataRetrieval getWebServiceData
#' @export
#' @return a data frame containing the station identification number(s), the
#' USGS streamgage name(s), the decimal latitude(s), and decimal longitude(s).
#' @format a data frame with the following columns:
#' \tabular{llll}{
#' Name \tab Type \tab Description \cr
#' staid \tab factor \tab USGS station identification number \cr
#' staname \tab character \tab USGS station name \cr
#' lat \tab numeric \tab Decimal latitude \cr
#' lng \tab numeric \tab Decimal longitude
#' }
#' @note Information retrieved using this function can be used to create a map of
#' multiple streamgage sites---see package vignette.
#' @examples
#' staInfo <- siteInfo("05054000")
#' staInfo
#' staInfo <- siteInfo(c("05054000", "05082500", "06342500"))
#' staInfo
#' # a list with an invalid station identification number
#' staInfo <- siteInfo(c("05054000", "05082500", "0642501"))
siteInfo<-function(staid) {
# modified from whatNWISsites function of package dataRetrieval version 2.6.3
retVal <- NULL
for (i in 1:length(staid)) {
if (is.character(staid[i]) == FALSE ) stop("staid needs to have quotes
around it")
if (nchar(staid[i]) < 8) stop ("staid must be at least 8 characters")
base_url <-"https://waterservices.usgs.gov/nwis/site?format=mapper&sites="
url <- paste(base_url, staid[i],
"&siteOutput=expanded&seriesCatalogOutput=true&outputDataTypeCd=all",
sep = "")
if (class(url) == "character" && file.exists(url)) {
returnedDoc <- read_xml(url)
} else if(class(url) == 'raw') {
returnedDoc <- read_xml(url)
raw <- TRUE
} else {
returnedDoc <- xml_root(getWebServiceData(url, encoding = 'gzip'))
}
siteDat <- xml_find_all(returnedDoc, "//site") # each parameter/site combo
doc <- xml_root(siteDat)
siteCategories <- xml_children(doc)
for (sc in siteCategories) {
sites <- xml_children(sc)
singlestaid <- xml_attr(sites, "sno")
staname <- xml_attr(sites, "sna")
lat <- as.numeric(xml_attr(sites, "lat"))
lng <- as.numeric(xml_attr(sites, "lng"))
df <- data.frame(staid=singlestaid, staname, lat, lng, stringsAsFactors = FALSE)
if (is.null(retVal)) {
retVal <- df
} else {
retVal <- rbind.data.frame(retVal, df)
}
}
}
retVal
}
#' Function that returns USGS Site Information Service URL for troubleshooting or
#' building a URL for other purposes.
#'
#' @name tellMeSiteURL
#' @title USGS Site Information Service URL
#' @param staid is the USGS site identification number, which
#' is usually eight digits long, but can be longer. Users may search for
#' surface-water sites and obtain station identification numbers using the
#' USGS Site Web Service,
#' \url{https://waterservices.usgs.gov/rest/Site-Service.html} (U.S. Geological
#' Survey, 2017b); using the National Water Information System: Mapper,
#' \url{https://maps.waterdata.usgs.gov/mapper/index.html} (U.S. Geological Survey, 2017a);
#' or using the National Water Information System: Web Interface to daily
#' surface-water data,
#' \url{https://waterdata.usgs.gov/nwis/dv/?referred_module=sw} (U.S.
#' Geological Survey, 2017c). The site identification number needs to be
#' entered as a character, that is in quotes, because many USGS streamgage
#' numbers begin with zero and the leading zero is necessary.
#' @keywords utilities
#' @return URL for USGS site information
#' @export
#' @references
#' U.S. Geological Survey, 2017a, National Water Information System: Mapper,
#' accessed January 3, 2017, at
#' \url{https://maps.waterdata.usgs.gov/mapper/index.html}.
#'
#' U.S. Geological Survey, 2017b, USGS site web service:
#' REST Web Services, accessed January 3, 2017, at
#' \url{https://waterservices.usgs.gov/rest/Site-Service.html}.
#'
#' U.S. Geological Survey, 2017c, USGS surface-water daily data for the Nation:
#' National Water Information System: Web Interface, accessed January 3, 2017,
#' at \url{https://waterdata.usgs.gov/nwis/dv/?referred_module=sw}.
#' @examples
#' tellMeSiteURL("05054000")
tellMeSiteURL <- function(staid) {
if (is.character(staid) == FALSE ) stop("Station number needs to have quotes around it")
if (nchar(staid) < 8) stop ("Station number must be at least 8 characters")
base_url <-"https://waterservices.usgs.gov/nwis/site?format=mapper&sites="
url <- paste(base_url, staid,
"&siteOutput=expanded&seriesCatalogOutput=true&outputDataTypeCd=all",
sep = "")
url
}
|
/scratch/gouwar.j/cran-all/cranData/waterData/R/importDVs.R
|
#' Function to plot hydrologic time series and anomalies
#'
#' @name plotAnoms
#' @title Plots streamflow anomalies
#' @param data is the anomaly list from the function
#' \link{compAnom}.
#' @param ... further arguments to be passed to plotting method (see \link{par}).
#' @return a plot.
#' @keywords hplot ts multivariate
#' @export
#' @examples
#' q05054000.85 <- importDVs("05054000", sdate="1985-01-01", edate="2010-09-30")
#' anoms05054000 <- compAnom(q05054000.85, which=1)
#' plotAnoms(anoms05054000)
plotAnoms<- function(data, ...) {
if ( ! is.data.frame(data) ) {
if ( length(data) == 4 ) {
par(mfrow=c(2,2), cex.lab=.9, las=1, tcl=0.5, xaxs="r", yaxs="r",
mar=c(4, 5, 0, 1) + 0.1,mgp=c(4, 1, 0),oma=c(0,0,1,0)+.1)
plot(data[[1]]$dates, data[[1]]$val, log="y", type="l", xlab="",
ylab="Streamflow, cubic feet per second", cex.lab=0.6, ...)
plot(data[[1]]$dates, data[[1]]$ltfa, type="l", xlab="",
ylab="Long-term flow anomaly", cex.lab=0.6, ...)
ltfa.sd <- round(sd(data[[1]]$ltfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", ltfa.sd, sep=""),side=3, line=-1,
cex=0.5)
plot(data[[1]]$dates, data[[1]]$mtfa, type="l", xlab="",
ylab="Medium-term flow anomaly", cex.lab=0.6, ...)
mtfa.sd<-round(sd(data[[1]]$mtfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", mtfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plot(data[[1]]$dates, data[[1]]$stfa, type="l", xlab="",
ylab="Short-term flow anomaly", cex.lab=0.6, ...)
stfa.sd<-round(sd(data[[1]]$stfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", stfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plttxt<-paste("Site", as.character(data[[1]]$staid[1]), "LTFA is", data[[2]],
"days", "MTFA is", data[[3]], "days", "STFA is", data[[4]],
sep=" ")
mtext(plttxt, outer=TRUE, side=3, cex=.8)
}
else if ( length(data) == 3 ) {
par(mfrow=c(2,2), cex.lab=.9, las=1, tcl=0.5, xaxs="r", yaxs="r",
mar=c(4, 5, 0, 1) + 0.1,mgp=c(4, 1, 0),oma=c(0,0,1,0)+.1)
plot(data[[1]]$dates, data[[1]]$val, log="y", type="l", xlab="",
ylab="Streamflow, cubic feet per second", cex.lab=0.6, ...)
plot(data[[1]]$dates, data[[1]]$mtfa, type="l", xlab="",
ylab="Medium-term flow anomaly", cex.lab=0.6, ...)
mtfa.sd<-round(sd(data[[1]]$mtfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", mtfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plot(data[[1]]$dates, data[[1]]$stfa, type="l", xlab="",
ylab="Short-term flow anomaly", cex.lab=0.6, ...)
stfa.sd<-round(sd(data[[1]]$stfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", stfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plttxt<-paste("Site", as.character(data[[1]]$staid[1]),
"MTFA is", data[[2]], "days", "STFA is", data[[3]],
sep=" ")
mtext(plttxt, outer=TRUE, side=3, cex=.8)
}
else if ( length(data) == 6 ) {
par(mfrow=c(3,2), cex.lab=.9, las=1, tcl=0.5, xaxs="r", yaxs="r",
mar=c(4, 5, 0, 1) + 0.1,mgp=c(4, 1, 0),oma=c(0,0,1,0)+.1)
plot(data[[1]]$dates, data[[1]]$val, log="y", type="l", xlab="",
ylab="Streamflow, cubic feet per second", cex.lab=0.6, ...)
plot(data[[1]]$dates, data[[1]]$tenyranom, type="l", xlab="",
ylab="Ten-year flow anomaly", cex.lab=0.6, ...)
tfa.sd <- round(sd(data[[1]]$tenyranom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", tfa.sd, sep=""),side=3, line=-1,
cex=0.5)
plot(data[[1]]$dates, data[[1]]$fiveyranom, type="l", xlab="",
ylab="Five-year flow anomaly", cex.lab=0.6, ...)
ffa.sd <- round(sd(data[[1]]$fiveyranom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", ffa.sd, sep=""),side=3, line=-1,
cex=0.5)
plot(data[[1]]$dates, data[[1]]$annualanom, type="l", xlab="",
ylab="One-year flow anomaly", cex.lab=0.6, ...)
ltfa.sd <- round(sd(data[[1]]$annualanom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", ltfa.sd, sep=""),side=3, line=-1,
cex=0.5)
plot(data[[1]]$dates, data[[1]]$seasanom, type="l", xlab="",
ylab="Seasonal flow anomaly", cex.lab=0.6, ...)
mtfa.sd<-round(sd(data[[1]]$seasanom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", mtfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plot(data[[1]]$dates, data[[1]]$dailyanom, type="l", xlab="",
ylab="Daily flow anomaly", cex.lab=0.6, ...)
stfa.sd<-round(sd(data[[1]]$dailyanom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", stfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plttxt<-paste("Site", as.character(data[[1]]$staid[1]),
"Ten-year, five-year, one-year, seasonal, and daily anomalies",
sep=" ")
mtext(plttxt, outer=TRUE, side=3, cex=.8)
}
else {
stop("Data supplied must be a list of length 3, 4, or 6 generated by the
compQAnom function.")
}
}
else if (is.data.frame(data)) {
if ( length(data) == 6 ) {
par(mfrow=c(2,2), cex.lab=.9, las=1, tcl=0.5, xaxs="r", yaxs="r",
mar=c(4, 5, 0, 1) + 0.1,mgp=c(4, 1, 0),oma=c(0,0,1,0)+.1)
plot(data$dates, data$val, log="y", type="l", xlab="",
ylab="Streamflow, cubic feet per second", cex.lab=0.6, ...)
plot(data$dates, data$ltfa, type="l", xlab="",
ylab="Long-term flow anomaly", cex.lab=0.6, ...)
ltfa.sd <- round(sd(data$ltfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", ltfa.sd, sep=""),side=3, line=-1,
cex=0.5)
plot(data$dates, data$mtfa, type="l", xlab="",
ylab="Medium-term flow anomaly", cex.lab=0.6, ...)
mtfa.sd<-round(sd(data$mtfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", mtfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plot(data$dates, data$stfa, type="l", xlab="",
ylab="Short-term flow anomaly", cex.lab=0.6, ...)
stfa.sd<-round(sd(data$stfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", stfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plttxt<-paste("Site", as.character(data$staid[1]), sep=" ")
mtext(plttxt, outer=TRUE, side=3, cex=.8)
}
else if ( length(data) == 5 ) {
par(mfrow=c(2,2), cex.lab=.9, las=1, tcl=0.5, xaxs="r", yaxs="r",
mar=c(4, 5, 0, 1) + 0.1,mgp=c(4, 1, 0),oma=c(0,0,1,0)+.1)
plot(data$dates, data$val, log="y", type="l", xlab="",
ylab="Streamflow, cubic feet per second", cex.lab=0.6, ...)
plot(data$dates, data$mtfa, type="l", xlab="",
ylab="Medium-term flow anomaly", cex.lab=0.6, ...)
mtfa.sd<-round(sd(data$mtfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", mtfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plot(data$dates, data$stfa, type="l", xlab="",
ylab="Short-term flow anomaly", cex.lab=0.6, ...)
stfa.sd<-round(sd(data$stfa, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", stfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plttxt<-paste("Site", as.character(data$staid[1]), sep=" ")
mtext(plttxt, outer=TRUE, side=3, cex=.8)
}
else if ( length(data) == 8 ) {
par(mfrow=c(3,2), cex.lab=.9, las=1, tcl=0.5, xaxs="r", yaxs="r",
mar=c(4, 5, 0, 1) + 0.1,mgp=c(4, 1, 0),oma=c(0,0,1,0)+.1)
plot(data$dates, data$val, log="y", type="l", xlab="",
ylab="Streamflow, cubic feet per second", cex.lab=0.6, ...)
plot(data$dates, data$tenyranom, type="l", xlab="",
ylab="Ten-year flow anomaly", cex.lab=0.6, ...)
tfa.sd <- round(sd(data$tenyranom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", tfa.sd, sep=""),side=3, line=-1,
cex=0.5)
plot(data$dates, data$fiveyranom, type="l", xlab="",
ylab="Five-year flow anomaly", cex.lab=0.6, ...)
ffa.sd <- round(sd(data$fiveyranom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", ffa.sd, sep=""),side=3, line=-1,
cex=0.5)
plot(data$dates, data$annualanom, type="l", xlab="",
ylab="One-year flow anomaly", cex.lab=0.6, ...)
ltfa.sd <- round(sd(data$annualanom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", ltfa.sd, sep=""),side=3, line=-1,
cex=0.5)
plot(data$dates, data$seasanom, type="l", xlab="",
ylab="Seasonal flow anomaly", cex.lab=0.6, ...)
mtfa.sd<-round(sd(data$seasanom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", mtfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plot(data$dates, data$dailyanom, type="l", xlab="",
ylab="Daily flow anomaly", cex.lab=0.6, ...)
stfa.sd<-round(sd(data$dailyanom, na.rm=TRUE), digits=3)
mtext(paste("Standard deviation is ", stfa.sd, sep=""), side=3, line=-1,
cex=0.5)
plttxt<-paste("Site", as.character(data$staid[1]),
"Ten-year, five-year, one-year, seasonal, and daily anomalies",
sep=" ")
mtext(plttxt, outer=TRUE, side=3, cex=.8)
}
else {
stop("Data supplied must be a data frame with 5, 6, or 8 columns.")
}
}
else {
stop("Data must be a list or dataframe.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/waterData/R/plotAnoms.R
|
#' Function to identify and fix common problems with hydrologic data
#'
#' @name cleanUp
#' @title Cleans up hydrologic time series data
#' @param dataset is a data frame in format of the data frame returned by
#' \link{importDVs}.
#' @param task is either "view" or "fix." View will return a list containing
#' rows with negative values and rows with missing values. Fix will
#' replace negative values with NA and replace zeroes with the value
#' specified by the replace argument.
#' @param replace is the value used to replace 0 values. The default
#' is 0.1. For streamflow in small streams, one might want to use 0.01.
#' For daily data other than streamflow, such as turbidity, users may not want
#' to replace 0 values with a nonzero value. In those cases, replace can be
#' set to 0.
#' @note If calculating anomalies (see \link{compAnom}), the user may need to
#' replace isolated missing values with with a value; however, if there are
#' larger periods with missing values, streamflow anomalies may not be an
#' appropriate use of the data. The substitution of some missing data with
#' values may be done using the function \link{fillMiss} that is part of this
#' package. However, care needs to be taken when filling in missing data.
#' @keywords NA ts utilities manip
#' @return A list showing days with negative values and days with
#' values of 0 when task is "view." When task is "fix" the fixed dataset
#' is returned.
#' When a negative value is replaced with NA, an "N" is added to the qualcode
#' field to indicate that there had been a negative number.
#' When a zero value is replaced, an "R" is added to the qualcode field to
#' indicate that a zero value was replaced.
#' @seealso \link{fillMiss}
#' @export
#' @examples
#' data(exampleWaterData)
#' head(badDataSet)
#' cleanUp(badDataSet, task="view")
#' q05054000Fix <- cleanUp(badDataSet, task="fix")
#' # replace 0s with NA, then one could use the fillMiss function
#' # to estimate values
#' q05054000Fix2 <- cleanUp(badDataSet, task="fix", replace=NA)
#' summary(badDataSet)
#' summary(q05054000Fix)
#' summary(q05054000Fix2)
cleanUp <- function(dataset, task="view", replace=0.1) {
if (replace < 0 & !is.na(replace) ) {
stop("The value of replace must be greater than or equal to 0.")
}
if (replace > 10 & !is.na(replace) ) {
stop("The value of replace must be less than 10.")
}
pck <- dataset$val < 0 & !is.na(dataset$val)
# dataset[pck,]
pck2 <- dataset$val == 0 & !is.na(dataset$val)
if (task == "view") {
list(dataset[pck,], dataset[pck2,])
}
else if (task == "fix") {
dataset$qualcode<-as.character(dataset$qualcode)
dataset$val[pck] <- NA
# add N to qualcode to indicate that there had been a negative number
dataset$qualcode[pck] <- paste(dataset$qualcode[pck], "N",sep=" ")
dataset$val[pck2] <- replace
# add R to qualcode to indicate 0s were replaced
dataset$qualcode[pck2] <- paste(dataset$qualcode[pck2], "R",sep=" ")
dataset$qualcode<-factor(dataset$qualcode)
dataset
}
else {
stop("Task must be view or fix.")
}
}
#' Function to calculate summary statistics for daily hydrologic time series.
#'
#' The summary statistics returned are useful for exploratory data analysis
#' and for describing the date set.
#' @note Hydrologic data are often skewed (Helsel and Hirsch, 2002). Summary
#' statistics help describe the degree of skewness and help to determine
#' the degree of applicability of hypothesis tests. Some data, in particular
#' streamflow, may need to be transformed to produce approximately normal
#' data.
#' @name summaryStats
#' @title Calculate summary statistics
#' @param dataset is the data frame containing hydrologic data
#' @param staid is used to label the output
#' @keywords arith
#' @return a data frame containing a number of summary statistics of the daily
#' hydrologic data series
#' @export
#' @format The returned matrix has the following columns, which are formatted
#' for putting in a report or table. \cr
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' Begin \tab character \tab The beginning date of the time series \cr
#' End \tab character \tab The ending date of the time series \cr
#' n \tab character \tab Number of rows \cr
#' NA \tab character \tab Number of missing values \cr
#' Neg \tab character \tab Number of negative values \cr
#' Min \tab character \tab The minimum value \cr
#' Q1 \tab character \tab The first quartile, 25th percentile \cr
#' Med \tab character \tab The median \cr
#' Mean \tab character \tab The mean \cr
#' Q3 \tab character \tab The third quartile, 75th percentile \cr
#' Max \tab character \tab The maximum value \cr
#' StdDev \tab character \tab The standard deviation \cr
#' IQR \tab character \tab The interquartile range \cr
#'}
#' @examples
#' data(exampleWaterData)
#' summaryStats(pH05082500, staid="05082500")
#' @references
#' Helsel, D.R. and Hirsch, R. M., 2002, Statistical methods in water resources:
#' U.S. Geolgical Survey Techniques of Water Resources Investigations, book 4,
#' chap. A3, 522 p. (Also available at \url{http://pubs.usgs.gov/twri/twri4a3/}).
summaryStats<-function(dataset,staid=1) {
sdate<-dataset$dates[1]
edate<-dataset$dates[length(dataset$dates)]
n<-length(dataset$val)
pck<-is.na(dataset$val)
missing<-sum(pck)
pck<-dataset$val<0&!is.na(dataset$val)
negative<-sum(pck)
my.sum<-fivenum(dataset$val,na.rm=TRUE)
my.min<-my.sum[1]
my.25<-my.sum[2]
my.med<-my.sum[3]
qmean<-mean(dataset$val,na.rm=TRUE)
my.75<-my.sum[4]
my.max<-my.sum[5]
my.sd<-sd(dataset$val,na.rm=TRUE)
my.iqr<-IQR(dataset$val,na.rm=TRUE)
my.dfnums<-cbind(missing, negative, my.min, my.25, my.med, qmean, my.75,
my.max, my.sd, my.iqr)
n<-format(n, digits=1, big.mark=",", scientific=FALSE)
my.dfnums<-format(my.dfnums, digits=1, big.mark=",", scientific=FALSE)
my.df<-as.data.frame(cbind(as.character(sdate),as.character(edate),n,my.dfnums),
stringsAsFactors=FALSE)
dimnames(my.df)[[2]]<-c("Begin", "End", "n", "NA", "Neg", "Min","Q1", "Med",
"Mean", "Q3", "Max", "StdDev", "IQR")
row.names(my.df)<-staid
my.df
}
|
/scratch/gouwar.j/cran-all/cranData/waterData/R/summaryStats.R
|
#' Retrieval, analysis, and anomaly calculation of daily
#' hydrologic time deries data.
#'
#' This package imports U.S. Geological Survey (USGS) daily hydrologic data
#' from USGS web services, plots the data, addresses some common data problems,
#' and calculates and plots anomalies. For a description of anomalies see
#' Vecchia (2003), and for examples of the application of streamflow anomalies
#' in trend analysis of nutrients, pesticides and surface water, see Alexander
#' and Smith (2006), Ryberg and Vecchia (2006), Ryberg and others (2010),
#' Sullivan and others (2009), Vecchia (2005), and Vecchia and others (2008).
#'
#' \tabular{ll}{
#' Package: \tab waterData\cr
#' Type: \tab Package\cr
#' Version: \tab 1.0.8\cr
#' Date: \tab 2017-04-28\cr
#' License: \tab Unlimited | file LICENSE \cr
#' LazyLoad: \tab yes\cr
#' }
#'
#' @name waterData-package
#' @aliases waterData
#' @docType package
#' @title Hydrologic Data Retrieval and Analysis and Anomaly Calculation
#' @author Karen R. Ryberg \email{kryberg@@usgs.gov} and
#' Aldo V. Vecchia \email{avecchia@@usgs.gov}
#' @keywords package
#' @references
#' Alexander, R.B. and Smith, R.A., 2006, Trends in the nutrient enrichment of
#' U.S. rivers during the late 20th century and their relation to changes in
#' probable stream trophic conditions: Limnology and Oceanography, v. 51, no.
#' 1, Part 2: Eutrophication of Freshwater and Marine Ecosystems, p.
#' 639--654., accessed August 1, 2012, at \url{http://www.jstor.org/stable/4499617}.
#'
#' Ryberg, K.R. and Vecchia, A.V., 2006, Water-quality trend analysis and
#' sampling design for the Devils Lake Basin, North Dakota, January 1965
#' through September 2003: U.S. Geological Survey Scientific Investigations
#' Report 2006--5238, 64 p., accessed August 1, 2012, at
#' \url{http://pubs.usgs.gov/sir/2006/5238/}.
#'
#' Ryberg, K.R. and Vecchia, A.V., 2012, waterData---An R package for retrieval,
#' analysis, and anomaly calculation of daily hydrologic time series data,
#' version 1.0: U.S. Geological Survey Open-File Report 2012--1168, 8 p.
#' (Also available at \url{http://pubs.usgs.gov/of/2012/1168/}.)
#'
#' Ryberg, K.R., Vecchia, A.V., Martin, J.D., Gilliom, R.J., 2010, Trends in
#' pesticide concentrations in urban streams in the United States, 1992--2008:
#' U.S. Geological Survey Scientific Investigations Report 2010-5139, 101 p.,
#' accessed August 1, 2012, at \url{http://pubs.usgs.gov/sir/2010/5139/}.
#'
#' Sullivan, D.J., Vecchia, A.V., Lorenz, D.L., Gilliom, R.J., Martin, J.D.,
#' 2009, Trends in pesticide concentrations in corn-belt streams, 1996--2006:
#' U.S. Geological Survey Scientific Investigations Report 2009-5132, 75 p.,
#' accessed August 1, 2012, at \url{http://pubs.usgs.gov/sir/2009/5132/}.
#'
#' Vecchia, A.V., 2003, Relation between climate variability and stream water
#' quality in the continental United States, Hydrological Science and
#' Technology, v. 19 no. 1, 77--98.
#'
#' Vecchia, A.V., 2003, Water-quality trend analysis and sampling design for
#' streams in North Dakota, 1971--2000: U.S. Geological Survey Scientific
#' Investigations Report 2003--4094, 73 p., accessed August 1, 2012, at
#' \url{http://nd.water.usgs.gov/pubs/wri/wri034094/index.html}.
#'
#' Vecchia, A.V., 2005, Water-quality trend analysis and sampling design for
#' streams in the Red River of the North Basin, Minnesota, North Dakota, and
#' South Dakota, 1970--2001: U.S. Geological Survey Scientific Investigations
#' Report 2005--5224, 54 p., accessed August 1, 2012, at
#' \url{http://pubs.usgs.gov/sir/2005/5224/}.
NULL
|
/scratch/gouwar.j/cran-all/cranData/waterData/R/waterData-package.R
|
### R code from vignette source 'vignette.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: vignette.Rnw:41-47
###################################################
# load waterData package, assuming it has already been installed on the system
library(waterData)
q05054000 <- importDVs("05054000", code="00060", stat = "00003",
sdate = "2000-01-01", edate = "2010-12-31")
# return first 6 rows for new data set to view a subset of the data
head(q05054000)
###################################################
### code chunk number 2: vignette.Rnw:53-55
###################################################
my.URL <- tellMeURL("05054000", code = "00060", stat = "00003",
sdate = "2000-01-01", edate = "2010-12-31")
###################################################
### code chunk number 3: vignette.Rnw:58-59
###################################################
cat("\\url{", my.URL, "}")
###################################################
### code chunk number 4: vignette.Rnw:68-71
###################################################
data(exampleWaterData, package = "waterData")
my.plot <- plotParam(badDataSet)
print(my.plot)
###################################################
### code chunk number 5: vignette.Rnw:80-81
###################################################
cleanUp(badDataSet, task = "view")
###################################################
### code chunk number 6: vignette.Rnw:89-90
###################################################
q05054000Fix <- cleanUp(badDataSet, task = "fix", replace = 0.1)
###################################################
### code chunk number 7: vignette.Rnw:96-98
###################################################
my.plot <- plotParam(q05054000Fix, code = "00060", stat = "00003", logscale = TRUE)
print(my.plot)
###################################################
### code chunk number 8: vignette.Rnw:104-105
###################################################
q05054000Fix <- cleanUp(badDataSet, task = "fix", replace = 10)
###################################################
### code chunk number 9: vignette.Rnw:111-113
###################################################
my.plot <- plotParam(q05054000Fix, code = "00060", stat = "00003", logscale = TRUE)
print(my.plot)
###################################################
### code chunk number 10: vignette.Rnw:122-123
###################################################
summary(misQ05054000)
###################################################
### code chunk number 11: vignette.Rnw:131-133
###################################################
my.newdata <- fillMiss(misQ05054000, block = 30, pmiss = 50, model = "trend",
smooth = TRUE, log = "y")
###################################################
### code chunk number 12: vignette.Rnw:142-153
###################################################
library(xtable)
my.xtable <- xtable(summaryStats(q05054000Fix, staid = "05054000"),
cap = "Summary statistics for daily streamflow series.
Begin, the beginning date for the series; End, the ending
date for the series; n, the number of observations; NA, the
number of missing values; Neg, the number of negative values;
Min, the minimum value; Q1, the first quartile or 25th percentile;
Med, the median value; Mean, the mean value; Q3, the third
quartile or 75th percentile; Max, the maximum value; StdDev,
the standard deviation; IQR, the interquartile range.")
print.xtable(my.xtable, size = c("scriptsize"))
###################################################
### code chunk number 13: vignette.Rnw:163-166
###################################################
par(cex.lab = 0.9, las = 1, tcl = 0.5, xaxs = "r", yaxs = "r", cex.axis = 0.8)
qqnorm(q05054000Fix$val)
qqline(q05054000Fix$val, qtype = 9)
###################################################
### code chunk number 14: vignette.Rnw:174-177
###################################################
par(cex.lab = 0.9, las = 1, tcl = 0.5, xaxs = "r", yaxs = "r", cex.axis = 0.8)
qqnorm(log10(q05054000Fix$val))
qqline(log10(q05054000Fix$val), qtype = 9)
###################################################
### code chunk number 15: vignette.Rnw:186-191
###################################################
my.sites <- c("05054000", "05082500", "05061000", "05050000", "05058700",
"05267000", "06342500", "06478000", "06414000")
my.siteInfo <- siteInfo(my.sites)
xtable(my.siteInfo[order(my.siteInfo$staid),], cap = "Information for select
U.S. Geological Survey streamgage sites, sorted in downstream order.")
###################################################
### code chunk number 16: vignette.Rnw:199-223
###################################################
library(maps)
library(mapdata)
par(las = 1, tck = 0.02, mar = c(0, 0, 0, 0))
map("state", region = c("minnesota", ".*dakota"))
map("rivers", add = TRUE, col = 4)
# label centered over gage site, jitter added to differentiate sites close
# together
mindif <- 0
maxiterations <- 30
iteration <- 1
while (mindif < 0.085) {
y.offset <- as.numeric(my.siteInfo$lat) + runif(length(my.siteInfo$lat),
0.12, 0.45)
mindif <- min(diff(unique(sort.int(round(as.numeric(y.offset), digits = 3)))))
iteration <- iteration + 1
if ( iteration >= maxiterations ) {
mindif <- 0.09
message("No ideal jitter found. Some labels may conflict")
}
}
points(my.siteInfo$lng, my.siteInfo$lat, pch = 19, col = "green")
text(xy.coords(my.siteInfo$lng, y.offset), my.siteInfo$staid, cex = 0.55)
box()
map.axes()
###################################################
### code chunk number 17: vignette.Rnw:230-231
###################################################
siteInfoURL <- tellMeSiteURL(c("05054000"))
###################################################
### code chunk number 18: vignette.Rnw:234-235
###################################################
cat("\\url{", siteInfoURL, "}")
###################################################
### code chunk number 19: vignette.Rnw:277-281
###################################################
anoms365.30.1 <- compAnom(q05054000, which = 1)
anoms100.10.1 <- compAnom(q05054000, which = 2)
anoms30.1 <- compAnom(q05054000, which = 3)
anomsLT <- compAnom(q05054000, which = 4)
###################################################
### code chunk number 20: vignette.Rnw:289-290
###################################################
plotAnoms(anoms365.30.1)
###################################################
### code chunk number 21: vignette.Rnw:298-299
###################################################
plotAnoms(anoms100.10.1)
###################################################
### code chunk number 22: vignette.Rnw:307-308
###################################################
plotAnoms(anoms30.1)
###################################################
### code chunk number 23: vignette.Rnw:316-325
###################################################
# Import not run during vignette building because it may time out
# over a slow connection, but this is the code to get the original data
# q05054000LT<-importDVs("05054000", code="00060", stat="00003", sdate="1949-10-01",
# edate="2010-9-30")
# data are available in example data set
data(exampleWaterData)
my.xtable <- xtable(summaryStats(q05054000LT, staid = "05054000"),
cap = "Summary statistics for daily streamflow series.")
print.xtable(my.xtable, size = c("footnotesize"))
###################################################
### code chunk number 24: vignette.Rnw:330-332
###################################################
q05054000LT <- cleanUp(q05054000LT, task = "fix")
anomsLT <- compAnom(q05054000LT, which = 4)
###################################################
### code chunk number 25: vignette.Rnw:338-339
###################################################
plotAnoms(anomsLT)
|
/scratch/gouwar.j/cran-all/cranData/waterData/inst/doc/vignette.R
|
#' Water Year Index and Type 1901-2017
#'
#' @description
#' \strong{Department of Water Resources}
#'
#' \url{http://cdec.water.ca.gov/cgi-progs/iodir/WSIHIST}
#'
#' California Cooperative Snow Surveys / Chronological Reconstructed Sacramento and San Joaquin Valley
#'
#' Water Year Hydrologic Classification Indices based on measured unimpaired runoff (in million acre-feet), subject to revision.
#'
#' \strong{Abbreviations:}
#' \itemize{
#' \item WY - Water year (Oct 1 - Sep 30)
#' \item W - Wet year type
#' \item AN - Above normal year type
#' \item BN - Below normal year type
#' \item D - Dry year type
#' \item C - Critical year type
#' }
#' Notes:
#'
#' Unimpaired runoff represents the natural water production of a river basin,
#' unaltered by upstream diversions, storage, export of water to or import of
#' water from other basins.
#'
#' Sacramento River Runoff is the sum (in maf) of Sacramento River at Bend Bridge,
#' Feather River inflow to Lake Oroville, Yuba River at Smartville, and
#' American River inflow to Folsom Lake. The WY sum is also known as the
#' Sacramento River Index, and was previously referred to as the "4 River Index" or
#' "4 Basin Index". It was previously used to determine year type classifications
#' under State Water Resources Control Board (SWRCB) Decision 1485.
#'
#' Sacramento Valley Water Year Index = 0.4 * Current Apr-Jul Runoff Forecast (in maf)
#' + 0.3 * Current Oct-Mar Runoff in (maf) + 0.3 * Previous Water Year's Index
#' (if the Previous Water Year's Index exceeds 10.0, then 10.0 is used).
#' This index, originally specified in the 1995 SWRCB Water Quality Control Plan,
#' is used to determine the Sacramento Valley water year type as implemented in
#' SWRCB D-1641. Year types are set by first of month forecasts beginning in
#' February. Final determination is based on the May 1 50% exceedence forecast.
#'
#' \strong{Sacramento Valley Water Year Hydrologic Classification:}
#' \itemize{
#' \item Wet - Equal to or greater than 9.2
#' \item Above Normal - Greater than 7.8, and less than 9.2
#' \item Below Normal - Greater than 6.5, and equal to or less than 7.8
#' \item Dry - Greater than 5.4, and equal to or less than 6.5
#' \item Critical - Equal to or less than 5.4
#' }
#'
#' San Joaquin River Runoff is the sum of Stanislaus River inflow to New Melones
#' Lake, Tuolumne River inflow to New Don Pedro Reservoir, Merced River inflow
#' to Lake McClure, and San Joaquin River inflow to Millerton Lake (in maf).
#' San Joaquin Valley Water Year Index = 0.6 * Current Apr-Jul Runoff Forecast (in maf)
#' + 0.2 * Current Oct-Mar Runoff in (maf) + 0.2 * Previous Water Year's Index
#' (if the Previous Water Year's Index exceeds 4.5, then 4.5 is used).
#'
#' This index, originally specified in the 1995 SWRCB Water Quality Control Plan,
#' is used to determine the San Joaquin Valley water year type as implemented in
#' SWRCB D-1641. Year types are set by first of month forecasts beginning in
#' February. Final determination for San Joaquin River flow objectives is based
#' on the May 1 75% exceedence forecast.
#'
#' \strong{San Joaquin Valley Water Year Hydrologic Classification:}
#' \itemize{
#' \item Wet - Equal to or greater than 3.8
#' \item Above Normal - Greater than 3.1, and less than 3.8
#' \item Below Normal - Greater than 2.5, and equal to or less than 3.1
#' \item Dry - Greater than 2.1, and equal to or less than 2.5
#' \item Critical - Equal to or less than 2.1
#' }
#'
#' Eight River Index = Sacramento River Runoff + San Joaquin River Runoff
#' This Index is used from December through May to set flow objectives
#' as implemented in SWRCB Decision 1641.
#'
#'
#' @format A data frame with 227 rows and 7 variables:
#' \describe{
#' \item{WY}{October-September}
#' \item{Oct_Mar}{Runoff (maf)}
#' \item{Apr_Jul}{Runoff (maf)}
#' \item{WYsum}{Year Total Runoff (maf)}
#' \item{Index}{Water Year Type Index Score}
#' \item{Yr_type}{Water Year Type}
#' \item{location}{Sacramento Valley or San Joaquin Valley}
#' }
#'
#' @examples
#' head(water_year_indices)
#'
#' @source California Department of Water Resources, California Data Exchange Center (CDEC)
'water_year_indices'
|
/scratch/gouwar.j/cran-all/cranData/waterYearType/R/data.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.