content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
print.alacoxIC <- function(x, ...) {
xnames <- x$xnames
n <- x$n
ind.normalize <- x$normalize.X
ind.convergence <- x$convergence
ind.trun <- x$ind.trun
user.theta <- x$user.theta
pcon <- ifelse(ind.convergence, "converged", "failed to converge")
iteration <- x$iteration
coef <- x$b
theta <- x$theta
bic <- x$bic
ind_zero <- coef == 0
nb <- length(coef)
se <- x$se
z <- coef/se
pvalue <- pnorm(abs(z), lower.tail = FALSE)
ci <- cbind(coef - qnorm(0.975) * se, coef + qnorm(0.975) * se)
digit <- paste("%.", max(3, getOption("digits") - 3), "f", sep = "")
pcoef <- sprintf(digit, coef)
pz <- sprintf(digit, z)
ppvalue <- fun_less(sprintf(digit, pvalue))
pci <- t(apply(ci, 1, function(x, digit) sprintf(digit, x), digit = digit))
pse <- sprintf(digit, se)
pse[ind_zero] <- "-"
pz[ind_zero] <- "-"
ppvalue[ind_zero] <- "-"
pci[ind_zero, ] <- "-"
if(is.null(xnames)) {
pxnames <- paste(" X", 1:nb, sep = "")
} else {
pxnames <- paste(" ", xnames, sep = "")
}
results <- cbind(pcoef, pci, pse, pz, ppvalue)
colnames(results) <- c("coef", "lower.CI", "upper.CI", "se", "z", "p")
rownames(results) <- pxnames
presults <- as.data.frame(results)
presults
cat("\n=======================================================\n")
cat(" Variable Selection - adpative lasso estimation\n")
if(ind.trun) {
cat(" (Input: interval censored and left truncated data)")
} else {
cat(" (Input: interval censored data)")
}
cat("\n-------------------------------------------------------\n")
print(presults)
cat("-------------------------------------------------------\n")
cat(paste(" * n = ", n, "\n", sep = ""))
if(user.theta) {
cat(paste(" * The user input tuning parameter = ", round(theta, 2), "\n", sep = ""))
} else {
cat(paste(" * Tuning parameter selected by BIC = ", round(theta, 2),"\n", sep = ""))
}
cat(paste(" * EM algorithm ", pcon, " after ", iteration, " iterations\n", sep = ""))
if(isTRUE(x$normalize.X)) cat(paste(" * X is normalized", sep = ""))
cat("\n=======================================================\n")
}
| /scratch/gouwar.j/cran-all/cranData/ALassoSurvIC/R/print.alacoxIC.R |
print.baseline <- function(x, ...) {
lower.set <- x$lower.set
upper.set <- x$upper.set
lambda <- x$lambda
clambda <- x$clambda
digit2 <- paste("%.", max(2, getOption("digits") - 5), "f", sep = "")
digit4 <- paste("%.", max(3, getOption("digits") - 3), "f", sep = "")
plower.set <- sprintf(digit2, lower.set)
pupper.set <- sprintf(digit2, upper.set)
plambda <- sprintf(digit4, lambda)
pclambda <- sprintf(digit4, clambda)
pset <- paste("(", plower.set, ", ", pupper.set, "]", sep = "")
presults <- data.frame(support = pset, lambda = plambda, cum.lambda = pclambda, stringsAsFactors=FALSE)
rownames(presults) <- paste(1:nrow(presults), ":", sep = "")
cat("\n========================================\n")
cat(" Baseline Hazard Estimation (NPMLE)")
cat("\n----------------------------------------\n")
print(presults)
cat("========================================\n")
}
| /scratch/gouwar.j/cran-all/cranData/ALassoSurvIC/R/print.baseline.R |
print.unpencoxIC <- function(x, ...) {
xnames <- x$xnames
n <- x$n
ind.normalize <- x$normalize.X
ind.convergence <- x$convergence
ind.trun <- x$ind.trun
pcon <- ifelse(ind.convergence, "converged", "failed to converge")
iteration <- x$iteration
coef <- x$b
nb <- length(coef)
se <- x$se
z <- coef/se
pvalue <- pnorm(abs(z), lower.tail = FALSE)
ci <- cbind(coef - qnorm(0.975) * se, coef + qnorm(0.975) * se)
digit <- paste("%.", max(3, getOption("digits") - 3), "f", sep = "")
pcoef <- sprintf(digit, coef)
pz <- sprintf(digit, z)
if(all(!is.na(x$cov))) {
ppvalue <- fun_less(sprintf(digit, pvalue))
} else {
ppvalue <- rep(NA, length(se))
}
pci <- t(apply(ci, 1, function(x, digit) sprintf(digit, x), digit = digit))
pse <- sprintf(digit, se)
if(is.null(xnames)) {
pxnames <- paste(" X", 1:nb, sep = "")
} else {
pxnames <- paste(" ", xnames, sep = "")
}
results <- cbind(pcoef, pci, pse, pz, ppvalue)
colnames(results) <- c("coef", "lower.CI", "upper.CI", "se", "z", "p")
rownames(results) <- pxnames
presults <- as.data.frame(results)
presults
cat("\n===========================================================\n")
cat(" Unpenalized Nonparametric Maximum Likelihood Estimation\n")
if(ind.trun) {
cat(" (Input: interval censored and left truncated data)")
} else {
cat(" (Input: interval censored data)")
}
cat("\n-----------------------------------------------------------\n")
print(presults)
cat("-----------------------------------------------------------\n")
cat(paste(" * n = ", n, "\n", sep = ""))
cat(paste(" * EM algorithm ", pcon, " after ", iteration, " iterations\n", sep = ""))
if(isTRUE(x$normalize.X)) cat(paste(" * X is normalized", sep = ""))
cat("\n===========================================================\n")
}
| /scratch/gouwar.j/cran-all/cranData/ALassoSurvIC/R/print.unpencoxIC.R |
unpencoxIC <- function(...) UseMethod("unpencoxIC")
| /scratch/gouwar.j/cran-all/cranData/ALassoSurvIC/R/unpencoxIC.R |
unpencoxIC.default <- function(lowerIC, upperIC, X, trunc = NULL, normalize.X = TRUE, covmat = TRUE, cl = NULL, tol = 1e-3, niter = 1e5, string.cen = Inf, string.missing = NA, ...) {
match.call()
if(missing(trunc)) {
trunc <- NULL
ind.trunc <- FALSE
smallest.trunc <- 0
} else {
ind.trunc <- TRUE
smallest.trunc <- min(trunc)
}
if (!is.null(cl)) {
if (.Platform$OS.type == "windows") {
if (!inherits(cl, "cluster"))
cl <- NULL
} else {
if (inherits(cl, "cluster")) {
if (length(cl) < 2L)
cl <- NULL
} else {
if (cl < 2)
cl <- NULL
}
}
}
xnames <- colnames(X)
arglist <- fun_arglist(lowerIC, upperIC, X, trunc, normalize.X, tol, niter)
arglist$initial_lambda <- rep(1/nrow(arglist$set), nrow(arglist$set))
message(" Now: Obtaining the unpenalized nonparametric MLE")
unpen <- fun_unpenSurvIC(rep(0, ncol(arglist$z)), arglist)
final.b0 <- unpen$b
final.lambda <- unpen$lambda
log_pen <- log_penlikelihood(final.b0, arglist)
arglist$initial_lambda <- final.lambda
if (covmat == TRUE) {
message(" Now: calculating the covariance matrix")
cov <- fun_cov_parallel(b = final.b0, theta = 0, var.h = 5, arglist, cl)
} else {
cov <- rep(NA, ncol(arglist$z))
}
message(" Done.")
if (!is.null(cl)) stopCluster(cl)
if (normalize.X == TRUE) {
atrue_mu <- arglist$true_mu # added
atrue_sd <- (arglist$true_sd)
final.b <- final.b0/atrue_sd
final.cov <- cov / (atrue_sd %*%t(atrue_sd))
final.lambda <- final.lambda/exp(sum(final.b * atrue_mu)) # added
} else {
final.b <- final.b0
final.cov <- cov
}
results <- list()
results$xnames <- xnames
results$n <- nrow(X)
results$b <- final.b
results$se <- sqrt(diag(final.cov))
results$cov <- final.cov
results$lambda <- final.lambda
results$lambda.set <- arglist$set
results$convergence <- unpen$convergence
results$iteration <- unpen$iteration
results$ind.trunc <- ind.trunc
results$smallest.trunc <- ifelse(ind.trunc, min(trunc), 0)
results$normalize.X <- normalize.X
results$log_likelihood <- log_pen
class(results) <- "unpencoxIC"
return(results)
}
| /scratch/gouwar.j/cran-all/cranData/ALassoSurvIC/R/unpencoxIC.default.R |
#' AMAPVox package
#'
#' The package provides a a set of R functions for working with voxel spaces
#' (read, write, plot, etc.).
#' Voxel spaces are read from text-based output files of the
#' \href{https://amapvox.org}{AMAPVox software}.
#'
#' @section References: \itemize{ \item{ Research paper first describing
#' AMAPVox:\cr Vincent, G., Antin, C., Laurans, M., Heurtebize, J., Durrieu,
#' S., Lavalley, C., & Dauzat, J. (2017). Mapping plant area index of tropical
#' evergreen forest by airborne laser scanning. A cross-validation study using
#' LAI2200 optical sensor. Remote Sensing of Environment, 198, 254-266.
#' \doi{10.1016/j.rse.2017.05.034} } \item{ Up-to-date description of PAD/LAD
#' estimators implemented in AMAPVox:\cr VINCENT, Gregoire; PIMONT, François;
#' VERLEY, Philippe, 2021, "A note on PAD/LAD estimators implemented in
#' AMAPVox 1.7", \doi{10.23708/1AJNMP}, DataSuds, V1 } }
#'
#' @section Contact: \email{[email protected]}
#'
#' @docType package
#'
#' @author Philippe VERLEY \email{[email protected]}
#'
#' @name AMAPVox
#'
#' @importFrom methods callGeneric is new show
#' @importFrom utils write.table download.file unzip
#' @importFrom data.table data.table fread merge.data.table := .SD rbindlist
#' @importFrom stringr str_match str_trim str_squish str_starts str_split
#' str_remove_all str_extract
#' @importFrom dplyr %>%
#' @importFrom rappdirs user_data_dir
#' @importFrom curl nslookup curl_fetch_memory
#' @importFrom stats dbeta weighted.mean
#' @importFrom jsonlite fromJSON prettify
# commented since these packages are suggested instead of requested
# @importFrom rgl par3d plot3d bgplot3d
# @importFrom ggplot2 ggplot aes ggtitle geom_line xlab ylab
# @importFrom RANN nn2
"_PACKAGE"
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/AMAPVox.R |
#' Identify butterflies from a VoxelSpace object.
#'
#' @docType methods
#' @rdname butterfly
#' @description Identify butterflies from a [`VoxelSpace-class`] object.
#'
#' A butterfly refers to a non-empty isolated voxel. Non-empty means that there
#' is one or more hits recorded in the voxel. Isolated means that voxels in the
#' [Moore neighborhood](https://en.wikipedia.org/wiki/Moore_neighborhood)
#' of rank 1 are empty (no hit).
#'
#' @return a list of voxel index (i, j, k) identified as butterfly.
#' @param vxsp a [`VoxelSpace-class`] object
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # identify butterflies
#' btf <- butterfly(vxsp)
#' # clear butterflies
#' clear(vxsp, butterfly(vxsp))
#' @seealso [clear()]
#' @export
butterfly <- function(vxsp) {
i <- j <- k <- nbSampling <- nbEchos <- NULL # due to NSE notes in R CMD check
if (!requireNamespace("RANN", quietly = TRUE)) {
stop(
"Package \"RANN\" must be installed to remove butterfly",
"\n",
"> install.packages(\"RANN\")",
call. = FALSE)
}
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
# cat("Looking for butterflies in", basename(vxsp@file), "...", "\n")
# pointer to voxels
vx <- vxsp@data
# subset of voxels with nbEchos > 0
vx.hit.index <- vx[nbEchos > 0, which = TRUE]
vx.hit <- vx[vx.hit.index, list(i, j, k, nbEchos)]
# moore neighborhood of rank 1
neighbors <- RANN::nn2(data = vx.hit[, list(i, j, k)],
k = 27, searchtype = "radius", radius = 1.8)
# remove current voxel from neighbors
neighbors <- neighbors$nn.idx[, -1]
# identify butterflies
# butterflies = voxels without neighbors in subset of voxels with nbEchos > 0
butterflies <- which(
apply(neighbors, 1, function(neighbor) all(neighbor == 0)))
return( vx.hit[butterflies, list(i, j, k)] )
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Butterfly.R |
#' Extract canopy from voxel space.
#'
#' @docType methods
#' @rdname canopy
#'
#' @description Extract canopy from [`VoxelSpace-class`] object.
#' The canopy layer is the set of highest voxels with number of hits greater
#' than a user-defined threshold.
#'
#' ## Minimum number of hits/echos
#'
#' Minimum number of hits is set by default to one, meaning that a single echo
#' in a voxel is enough to consider that there is some vegetation. Increasing
#' this threshold will tend to lower the canopy level or introduce some gaps (
#' i-j-cells with no vegetation). This `hit.min` filter is stronger than
#' [butterfly()] since is does not discriminate isolated voxels.
#' A reasonable value for `hit.min` cannot be suggested ad-hoc since it
#' strongly depends on sampling intensity. Removing butterflies prior to
#' extracting canopy is advisable.
#'
#' ## Gaps
#'
#' For a VoxelSpace with fully defined ground level (see [ground()]),
#' missing canopy cells can be interpreted as gaps. Conversely if both ground
#' and canopy are missing for a i-j-cell, then it is inconclusive.
#'
#' ## Above/below canopy
#'
#' Function `aboveCanopy` returns voxel index above canopy level (excluded).
#' Function `belowCanopy` returns voxel index below canopy level (included).
#'
#' ## Canopy Height Model
#'
#' Function `canopyHeight` returns ground distance at canopy level, including
#' gaps.
#'
#' @return [`data.table::data.table-class`] object with voxel index either
#' below canopy, canopy level or above canopy
#'
#' @param vxsp a [`VoxelSpace-class`] object.
#' @param hit.min a positive integer, minimum number of hit/echo in a voxel
#' to consider it contains vegetation.
#' @param ... additional parameters which will be passed to `canopy` function.
#' So far only `hit.min` parameter.
#'
#' @seealso [butterfly()], [ground()]
#'
#' @examples
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' cnp <- canopy(vxsp)
#' acnp <- aboveCanopy(vxsp)
#' bcnp <- belowCanopy(vxsp)
#' # canopy layer included in below canopy subset
#' all(bcnp[cnp, on=list(i, j, k)] == cnp) # TRUE expected
# # extract ground distance from canopy voxels
#' vxsp@data[cnp, list(i, j, ground_distance), on=list(i, j, k)]
#'
#' @export
canopy <- function(vxsp, hit.min = 1) {
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
# nbSampling and nbEchos variables required
stopifnot("nbEchos" %in% names(vxsp))
# extract highest voxel with minimum hits count
nbEchos <- i <- j <- k <- NULL # trick to avoid notes in R CMD check
vxsp@data[nbEchos >= hit.min, list(i, j, k)][, list(k=max(k)), by=list(i, j)]
}
#' @rdname canopy
#' @export
belowCanopy <- function(vxsp, ...) {
k.cnp <- i <- j <- k <- NULL # trick to avoid notes in R CMD check
# create a i, j, k, k.cnp data.table
vx <- merge(vxsp@data[, list(i, j, k)], canopy(vxsp, ...),
by = c("i", "j"), suffixes = c("", ".cnp"))
# extract i, j, k voxels for k <= k.cnp
vx[k <= k.cnp, list(i, j, k)]
}
#' @rdname canopy
#' @export
aboveCanopy <- function(vxsp, ...) {
k.cnp <- i <- j <- k <- NULL # trick to avoid notes in R CMD check
# create a i, j, k, k.cnp data.table
vx <- merge(vxsp@data[, list(i, j, k)], canopy(vxsp, ...),
by = c("i", "j"), suffixes = c("", ".cnp"))
# extract i, j, k voxels for k > k.cnp
vx[k > k.cnp, list(i, j, k)]
}
#' @rdname canopy
#' @export
canopyHeight <- function(vxsp, ...) {
# get ground
ground <- ground(vxsp)
# get canopy
canopy <- canopy(vxsp, ...)
# canopy height model
i <- j <- k <- ground_distance <- NULL # trick to avoid "no visible binding" note
chm <- merge(vxsp@data[canopy,
list(i, j, ground_distance),
on=list(i, j, k)],
vxsp@data[ground[!canopy, on=list(i, j, k)],
list(i, j, ground_distance),
on=list(i, j, k)],
all = TRUE)
data.table::setnames(chm, "ground_distance", "canopy_height")
return ( chm )
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Canopy.R |
#' @title VoxelSpace
#' @description Class that holds the state variables of every voxel of the voxel
#' space in a [`data.table::data.table-class`] object, plus metadata from the
#' voxel space header.
#' @docType class
#' @slot file the path of the voxel file (.vox).
#' @slot data the voxels hold in a data.table.
#' @slot header a list of parameters associated to this voxel file.
#' @return An object of class VoxelSpace.
#' @seealso [readVoxelSpace()]
#' @name VoxelSpace-class
#' @rdname VoxelSpace-class
#' @export
setClass(
Class="VoxelSpace",
slots=c(
file="character",
data="data.table",
header="list"
)
)
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Classes.R |
# AMAPVox namespace variable
leafAngleDistribution = c("planophile",
"erectophile",
"extremophile",
"plagiophile",
"spherical",
"uniform",
"ellipsoidal",
"twoParamBeta")
#' Foliage projection ratio G(theta).
#'
#' @description Compute the mean projection of unit leaf area on the plane
#' perpendicular to beam direction, namely, G(theta) parameter. Assumption of
#' symmetric distribution of leaf azimuth angle.
#' When estimating G for large amount of theta values, it is advised to enable
#' the lookup table for speeding up the calculation.
#' @details Leaf Angle Distribution functions
#' * de Wit’s leaf angle distribution functions:
#' * \strong{uniform}, proportion of leaf angle is the same at any angle
#' * \strong{spherical}, relative frequency of leaf angle is the same as for
#' surface elements of a sphere
#' * \strong{planophile}, horizontal leaves most frequent
#' * \strong{erectophile}, vertical leaves most frequent
#' * \strong{plagiophile}, oblique leaves most frequent
#' * \strong{extremophile}, oblique leaves least frequent
#' * \strong{ellipsoidal} distribution function, generalization of the spherical
#' distribution over an ellipsoid. Relative frequency of leaf angle is the same
#' as for surface elements of an ellipsoid. Takes one parameter `chi` the ratio
#' horizontal axis over vertical axis. For `chi = 1` the distribution becomes
#' spherical. For `chi < 1`, the ellipsoid is a prolate spheroid (like a
#' rugby ball). For `chi > 1` the ellipsoid is an oblate spheroid (a sphere that
#' bulges at the equator and is somewhat squashed at the poles).
#' * \strong{two parameters Beta} distribution. Most generic approach from Goal
#' and Strebel (1984) to represent large variety of leaf angle distribution. Takes
#' two parameters `mu` and `nu` that control the shape of the Beta
#' distribution.
#' @param theta a numeric vector, theta, the incident beam inclination, in radian,
#' ranging `[0, pi/2]`.
#' @param pdf the name of the probability density function of the leaf angle
#' distribution. One of "uniform", "spherical", "planophile", "erectophile",
#' "plagiophile", "extremophile", "ellipsoidal", "twoParamBeta". Refer to
#' section "Leaf Angle Distribution functions" for details.
#' @param chi a float, parameter of the ellipsoidal leaf angle distribution.
#' The ratio the ratio horizontal axis over vertical axis. See section "Leaf
#' Angle Ditribution functions" for details.
#' @param mu a float, parameter controlling the Beta distribution. See section
#' "Leaf Angle Distribution functions" for details.
#' @param nu a float, parameter controlling the Beta distribution. See section
#' "Leaf Angle Distribution functions" for details.
#' @param with.lut a Boolean, whether to estimate G with a lookup table (LUT).
#' By default the lookup table is automatically generated when length of theta
#' vector is greater than 100.
#' @param lut.precision a float, the increment of the theta sequence ranging
#' from 0 to pi/2 for computing the lookup table.
#' @references Wang, W. M., Li, Z. L., & Su, H. B. (2007).
#' Comparison of leaf angle distribution functions: effects on extinction
#' coefficient and fraction of sunlit foliage. Agricultural and Forest
#' Meteorology, 143(1), 106-122.
#' @examples
#' # G(theta) == 0.5 for spherical distribution
#' all(computeG(theta = runif(10, 0, pi/2)) == 0.5) # returns TRUE
#' # ellipsoidal distribution
#' computeG(theta = runif(10, 0, pi/2), pdf = "ellipsoidal", chi = 0.6)
#' @seealso [plotG()] for plotting G(theta) profiles
#' @export
computeG <- function(theta, pdf = "spherical", chi, mu, nu,
with.lut = length(theta) > 100, lut.precision = 0.001) {
stopifnot(all(is.numeric(theta)))
# normalize theta in [0, pi / 2]
theta <- theta %% pi
theta <- ifelse(theta > (pi / 2), pi - theta, theta)
if (with.lut & lut.precision > 0) {
theta.lut <- seq(0, pi / 2, by = lut.precision)
lut <- computeG(theta.lut, pdf, chi, mu, nu, with.lut = FALSE)
ind <- round(theta / lut.precision) + 1
ind[ind > length(lut)] <- length(lut)
return ( lut[ind] )
} else {
return ( sapply(theta, computeGtheta, pdf, chi, mu, nu) )
}
}
# planophile probability density of leaf angle distribution
# planophile == horizontal leaves most frequent
# thetaL parameter, the leaf inclination angle in radian [0, pi/2]
dplanophile <- function(thetaL) {
stopifnot(dplyr::between(thetaL, 0, pi / 2))
# probability density of eaf angle distribution
(2 / pi) * (1 + cos(2 * thetaL))
}
# erectophile probability density of leaf angle distribution
# erectophile == vertical leaves most frequent
# thetaL parameter, the leaf inclination angle in radian [0, pi/2]
derectophile <- function(thetaL) {
stopifnot(dplyr::between(thetaL, 0, pi / 2))
(2 / pi) * (1 - cos(2 * thetaL))
}
# plagiophile probability density of leaf angle distribution
# plagiophile == oblique leaves most frequent
# thetaL parameter, the leaf inclination angle in radian [0, pi/2]
dplagiophile <- function(thetaL) {
stopifnot(dplyr::between(thetaL, 0, pi / 2))
(2 / pi) * (1 - cos(4 * thetaL))
}
# extremophile probability density of leaf angle distribution
# extremophile == oblique leaves are least frequent
# thetaL parameter, the leaf inclination angle in radian [0, pi/2]
dextremophile <- function(thetaL) {
stopifnot(dplyr::between(thetaL, 0, pi / 2))
(2 / pi) * (1 + cos(4 * thetaL))
}
# spherical probability density of leaf angle distribution
# spherical == relative frequency of leaf angle is the same as for surface
# elements of a sphere
# thetaL parameter, the leaf inclination angle in radian [0, pi/2]
dspherical <- function(thetaL) {
stopifnot(dplyr::between(thetaL, 0, pi / 2))
sin(thetaL)
}
# uniform probability density of leaf angle distribution
# uniform == proportion of leaf angle is the same at any angle
# thetaL parameter, the leaf inclination angle in radian [0, pi/2]
duniform <- function(thetaL) {
stopifnot(dplyr::between(thetaL, 0, pi / 2))
2 / pi
}
# ellipsoidal probability density of leaf angle distribution
# ellipsoidal == relative frequency of leaf angle is the same as for surface
# elements of an ellipsoid.
# thetaL parameter, the leaf inclination angle in radian [0, pi/2]
# chi parameter, the ratio horizontal axis over vertical axis. For chi = 1
# the distribution becomes spherical. For chi in [0, 1[, the ellipsoid is a
# a prolate spheroid (like a rugby ball). For chi > 1 the ellipsoid is an
# oblate spheroid (a sphere that bulges at the equator and somewhat squashed
# at the poles).
dellipsoidal <- function(thetaL, chi) {
stopifnot(dplyr::between(thetaL, 0, pi / 2))
stopifnot(chi >= 0)
# chi == 1
if (chi == 1)
return(sin(thetaL))
# chi < 1
if (chi < 1) {
epsilon <- sqrt(1 - chi ^ 2)
lambda <- chi + asin(epsilon) / epsilon
}
# chi > 1
else {
epsilon <- sqrt(1 - 1 / (chi ^ 2))
lambda <- chi + log((1 + epsilon) / (1 - epsilon)) / (2 * epsilon * chi)
}
2 * (chi ^ 3) * sin(thetaL) / (lambda * (cos(thetaL) ^ 2 + (chi * sin(thetaL)) ^ 2 ) ^ 2)
}
# two parameters Beta probability density of leaf angle distribution
# Most generic approach from Goal and Strebel (1984) to represent large
# variaty of leaf angle distribution.
# thetaL parameter, the leaf inclination angle in radian [0, pi/2]
# mu and nu parameters, the Beta distribution parameters.
dtwoParamBeta <- function(thetaL, mu, nu) {
stopifnot(dplyr::between(thetaL, 0, pi / 2))
stopifnot(all(mu >= 0, nu >= 0))
t <- 2 * thetaL / pi
stats::dbeta(t, mu, nu) * (2 / pi)
}
# probability density function of leaf angle distribution
# thetaL parameter, the leaf inclination angle in radian [0, pi/2]
# pdf parameter, the name of the probability density function
# chi the parameter of the ellipsoidal probability density function
# mu & nu the parameters of the two-parameters Beta probability density function
dleaf <- function(thetaL, pdf = "spherical", chi, mu, nu) {
stopifnot(pdf %in% leafAngleDistribution)
if (pdf == "ellipsoidal") stopifnot(!missing(chi))
if (pdf == "twoParamBeta") stopifnot(all(!missing(mu), !missing(nu)))
switch(
pdf,
"planophile" = dplanophile(thetaL),
"erectophile" = derectophile(thetaL),
"extremophile" = dextremophile(thetaL),
"plagiophile" = dplagiophile(thetaL),
"spherical" = dspherical(thetaL),
"uniform" = duniform(thetaL),
"ellipsoidal" = dellipsoidal(thetaL, chi),
"twoParamBeta" = dtwoParamBeta(thetaL, mu, nu)
)
}
# internal function to compute G for a single theta value
computeGtheta <- function(theta, pdf, chi, mu, nu) {
stopifnot(length(theta) == 1)
stopifnot(is.character(pdf))
# special case G(theta) for spherical leaf angle distribution is always 0.5
# save computation time
if (pdf == "spherical") return(0.5)
thetaL.bin <- seq(0, pi / 2, length.out = 181 * 7)
nbin <- length(thetaL.bin) - 1
dthetaL <- thetaL.bin[-1] - thetaL.bin[-(nbin + 1)]
thetaL.x <- 0.5 * (thetaL.bin[-1] + thetaL.bin[-(nbin + 1)])
# large number of bins allows to skip proper integration of dleaf function
# fj <- sapply(seq(1, nbin), function(j) integrate(dleaf, thetaL.bin[j], thetaL.bin[j + 1], pdf)$value)
fj <- dleaf(thetaL.x, pdf, chi, mu, nu) * dthetaL
# avoid tan(pi/2)
theta.corr <- ifelse(theta == pi / 2, pi / 2 - 0.00000001, theta)
# internal auxiliary function used in computation of Gtheta
A <- function(thetaL) {
cotcot <- 1 / (tan(theta.corr) * tan(thetaL))
suppressWarnings(
return(
ifelse(
abs(cotcot) > 1 | is.infinite(cotcot),
cos(theta.corr) * cos(thetaL),
cos(theta.corr) * cos(thetaL) * (1 + (2 / pi) * (tan(acos(cotcot)) - acos(cotcot))))
)
)
}
# large number of bins allows to skip proper integration of A function
# hj <- sapply(seq(1, nbin), function(j) integrate(A, thetaL.bin[j], thetaL.bin[j + 1])$value)
hj <- A(thetaL.x) * dthetaL
# return G(theta)
sum(fj * hj / dthetaL)
}
#' Plot G(theta) profiles for one or several leaf angle distribution functions
#'
#' @description Plot G(theta) profiles for one or several leaf angle distribution
#' functions with `theta in [0, pi/2]`. Requires ggplot2 package.
#' @param pdf the name of the leaf angle distribution functions. One of
#' "uniform", "spherical", "planophile", "erectophile", "plagiophile",
#' "extremophile", "ellipsoidal", "twoParamBeta".
#' @param chi a float, parameter of the ellipsoidal leaf angle distribution.
#' The ratio the ratio horizontal axis over vertical axis. See section "Leaf
#' Angle Ditribution functions" for details.
#' @param mu a float, parameter controlling the Beta distribution. See section
#' "Leaf Angle Distribution functions" for details.
#' @param nu a float, parameter controlling the Beta distribution. See section
#' "Leaf Angle Distribution functions" for details.
#' @examples
#' \dontrun{
#' # plot G(theta) for planophile leaf angle distribution function
#' AMAPVox::plotG(pdf = "planophile")
#' # plot G(theta) for every distributions
#' AMAPVox::plotG()
#' }
#' @export
plotG <- function(pdf = leafAngleDistribution,
chi = 0.6,
mu = 1.1, nu = 1.3) {
# check for ggplot2 package
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop(
"Package \"ggplot2\" must be installed to plot G(\u03B8) profiles.",
"\n",
"> install.packages(\"ggplot2\")",
call. = FALSE)
}
theta <- seq(0, pi / 2, length.out = 91)
df.lad <- data.frame()
for (lad in pdf) {
LAD <- lad
LAD <- switch(
lad,
"ellipsoidal" = paste0("ellipsoidal (\u03C7=", chi, ")"),
"twoParamBeta" = paste0("twoParamBeta (\u03BC=", mu, ", \u03BD=", nu, ")"),
lad
)
df <- data.frame(theta = theta * (180 / pi),
Gtheta = AMAPVox::computeG(theta, pdf = lad, chi, mu, nu),
LAD)
df.lad <- rbind(df.lad, df)
}
Gtheta <- NULL # due to NSE notes in R CMD check
ggplot2::ggplot(data = df.lad, ggplot2::aes(x=theta, y=Gtheta)) +
ggplot2::geom_line(ggplot2::aes(colour=LAD)) +
ggplot2::ggtitle(sprintf("Foliage projection ratio G(\u03B8) for given Leaf Angle Distribution (LAD)")) +
ggplot2::xlab(sprintf("Beam angle \u03B8 [0:90\u00B0]")) +
ggplot2::ylab(sprintf("G(\u03B8)"))
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/ComputeG.R |
#' Crop voxel space
#'
#' @docType methods
#' @rdname crop
#'
#' @description Crop [`VoxelSpace-class`] object based on voxel i, j,
#' k, index. If cropping index are missing, the function will automatically crop
#' the voxel space by discarding outermost unsampled slices of voxels. A *slice*
#' designates a layer with constant i (i-slice), j (j-slice) or k (k-slice).
#' *unsampled* means that no pulse went through.
#'
#' One may want to crop the voxel space on coordinates rather than grid index.
#' To do so the voxel space must be first converted to an [`sf::sf`]
#' object and use the [sf::st_crop()] function.
#' ```
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' vxsp@data[, c("x", "y"):=getPosition(vxsp)[, .(x, y)]]
#' library(sf)
#' vx.sf <- sf::st_as_sf(vxsp@data, coords=c("x", "y"))
#' vx.sf <- sf::st_crop(vx.sf, c(xmin = 4, ymin = 1, xmax = 5, ymax = 4))
#' sf::st_bbox(vx.sf)
#' vxsp@data <- sf::st_drop_geometry(vx.sf)
#' ```
#'
#' @param vxsp a [`VoxelSpace-class`] object.
#' @param imin minimum i index of cropped area (inclusive)
#' @param imax maximum i index of cropped area (inclusive)
#' @param jmin minimum j index of cropped area (inclusive)
#' @param jmax maximum j index of cropped area (inclusive)
#' @param kmin minimum k index of cropped area (inclusive)
#' @param kmax maximum k index of cropped area (inclusive)
#'
#' @return Cropped voxel space with updated i, j, k grid coordinates and
#' updated header (min and max corner).
#'
#' @examples
#' \dontrun{
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' plot(crop(vxsp, imin = 1, imax = 5))
#' # introduce unsampled areas in voxel space
#' vxsp@data[i < 3, nbSampling:= 0]
#' # automatic cropping
#' plot(crop(vxsp))
#' }
#'
#' @export
crop <- function(vxsp,
imin = 0, imax = Inf,
jmin = 0, jmax = Inf,
kmin = 0, kmax = Inf) {
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
i <- j <- k <- nbSampling <- NULL # due to NSE notes in R CMD check
# get current max index
vx.imax <- vxsp@data[, max(i)]
vx.jmax <- vxsp@data[, max(j)]
vx.kmax <- vxsp@data[, max(k)]
# automatic crop
if (imin == 0 & imax == Inf
& jmin == 0 & jmax == Inf
& kmin == 0 & kmax == Inf) {
irange <- range(which(vxsp@data[, sum(nbSampling, na.rm = T), by = i][[2]] > 0)) - 1
imin <- irange[1]
imax <- irange[2]
jrange <- range(which(vxsp@data[, sum(nbSampling, na.rm = T), by = j][[2]] > 0)) - 1
jmin <- jrange[1]
jmax <- jrange[2]
krange <- range(which(vxsp@data[, sum(nbSampling, na.rm = T), by = k][[2]] > 0)) - 1
kmin <- krange[1]
kmax <- krange[2]
}
# replace negative min index by zero, replace Inf index by max index
if (imin < 0) imin <- 0
if (imax == Inf) imax <- vx.imax
if (jmin < 0) jmin <- 0
if (jmax == Inf) jmax <- vx.jmax
if (kmin < 0) kmin <- 0
if (kmax == Inf) kmax <- vx.kmax
# crop index must be positive numeric
stopifnot(is.numeric(imin), is.numeric(imax),
is.numeric(jmin), is.numeric(jmax),
is.numeric(kmin), is.numeric(kmax))
stopifnot(all(c(imin, imax, jmin, jmax, kmin, kmax) >= 0))
# min <= max
stopifnot(imin <= imax, jmin <= jmax, kmin <= kmax)
# check whether there is anything to crop
if (imin == 0 & imax == vx.imax
& jmin == 0 & jmax == vx.jmax
& kmin == 0 & kmax == vx.kmax) {
# nothing to do
message("Nothing to crop")
return(vxsp)
}
# crop
vx.cropped <- vxsp@data[i >= imin & i <= imax
& j >= jmin & j <= jmax
& k >= kmin & k <= kmax, ]
# update i, j, k index
vx.cropped[, i:=(i-imin)]
vx.cropped[, j:=(j-jmin)]
vx.cropped[, k:=(k-kmin)]
# update header parameters
mincorner <- unlist(getPosition(vxsp, c(imin, jmin, kmin)))
maxcorner <- unlist(getPosition(vxsp, c(imax, jmax, kmax)))
dim <- c(imax - imin + 1, jmax - jmin + 1, kmax - kmin + 1)
# cropped voxel space
vxsp.cropped <- new(Class=("VoxelSpace"))
vxsp.cropped@file <- vxsp@file
vxsp.cropped@header <- vxsp@header
vxsp.cropped@header$mincorner <- mincorner
vxsp.cropped@header$maxcorner <- maxcorner
vxsp.cropped@header$dim <- dim
# overwrite voxels
vxsp.cropped@data <- vx.cropped
# return cropped voxel space
return(vxsp.cropped)
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Crop.R |
#' Fill missing values (NA) with averaged neighboring data
#'
#' @docType methods
#' @rdname fillNA
#'
#' @description Fill missing values of a given variable in a VoxelSpace object
#' with averaged neighboring values.
#'
#' Neighboring values are selected among voxels within a user-defined radius
#' in meter and whose sampling rate (number of pulses that went through the
#' voxel) is above a user-defined threshold. Distance between voxels is the
#' euclidian distance between voxel centers. Fill-value may be capped by
#' user-defined minimal and maximal values.
#'
#' Default radius (if not defined by user) is set to largest dimension of voxel
#' size `max(getVoxelSize(vxsp))`. It guarantees that default neighborhood is
#' isotropic.
#'
#' In some cases, for instance poorly sampled area, neighboring values may all
#' be missing or discarded. A fallback value can be provided to "force fill"
#' suche voxels. An other option is to run again the function with larger
#' radius or lower sampling threshold.
#'
#' @param vxsp a [`VoxelSpace-class`] object.
#' @param variable.name a character, the name of a variable in the VoxelSpace
#' @param variable.min a numeric, minimal value for the fill values
#' @param variable.max a numeric, maximal value for the fill values
#' @param variable.fallback a numeric, optional fallback value in case no fill
#' value can be estimated from neighboring voxels.
#' @param radius a numeric, the radius in meter that defines the neighborhood of
#' a voxel. The function looks for the voxels whose center is inside a sphere
#' of radius `radius` centered at current voxel center. Default is set to
#' `max(getVoxelSize(vxsp))`
#' @param pulse.min a numeric, minimal sampling intensity (i.e. number of pulses
#' that went through a voxel) to include neighboring voxel in the estimation of
#' the averaged fill value.
#'
#' @examples
#' # read voxel space
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # Randomly add some NA in PAD variable
#' vx <- vxsp@data
#' ind <- sample(vx[PadBVTotal > 0, which = TRUE], 3)
#' # print initial values
#' vx[ind, .(i, j, k, PadBVTotal)]
#' vx[ind, PadBVTotal := NA]
#' # fill NA in PAD variable
#' fillNA(vxsp, "PadBVTotal", variable.max = 5)
#' # print filled values
#' vx[ind, .(i, j, k, PadBVTotal)]
#'
#' @export
fillNA <- function(vxsp,
variable.name,
variable.min = -Inf, variable.max = Inf, variable.fallback,
radius, pulse.min = 10) {
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
# variable must exist, only one variable at a time
stopifnot(variable.name %in% colnames(vxsp@data),
length(variable.name) == 1)
# check variable min & max
stopifnot(is.numeric(variable.min), length(variable.min) == 1,
is.numeric(variable.max), length(variable.max) == 1)
if (!missing(variable.fallback)) stopifnot(is.numeric(variable.fallback),
length(variable.fallback) == 1)
# radius missing
# default value set to largest voxel size to make sure there will be
# neighbors in every direction
if (missing(radius)) radius <- max(getVoxelSize(vxsp))
# radius must be a positive float
stopifnot(is.numeric(radius), length(radius) == 1)
# minimal number of pulses in voxel
stopifnot(is.numeric(pulse.min), pulse.min >= 0)
# pointer to voxels
vx <- vxsp@data
# extract NA voxels
vx.na <- vx[is.na(get(variable.name))]
# extract voxels whose number of pulse >= pulse.min
nbSampling <- NULL # due to NSE notes in R CMD check
vx.pool <- vx[!is.na(get(variable.name)) & nbSampling >= pulse.min]
# computes max number of neighboring voxels inside sphere(r=radius)
neighbors.k <- neighbors.max(getVoxelSize(vxsp), radius)
# identify valid neighbors
neighbors <- RANN::nn2(data = getPosition(vxsp, vx.pool),
query = getPosition(vxsp, vx.na),
k = neighbors.k,
searchtype = "radius", radius = radius)
neighbors <- neighbors$nn.idx
# mean value from neighborhood
fill.value <- apply(neighbors, 1, function(nghb) vx.pool[nghb, mean(get(variable.name), na.rm = T)])
# replace NaN by NA (mean function may return NaN if every neighbor is NA)
fill.value[which(is.nan(fill.value))] <- NA
# replace NA with fallback value, if provided
if (!missing(variable.fallback))
fill.value[which(is.na(fill.value))] <- variable.fallback
# warn if NA remain
na.count <- length(which(is.na(fill.value)))
if (na.count > 0) warning(paste(na.count, " NA left", "\nSet a fallback value or run with larger radius or lower minimal pulse threshold."))
# cap fill.value
fill.value[which(fill.value > variable.max)] <- variable.max
fill.value[which(fill.value < variable.min)] <- variable.min
# write fill values in data.table
vxsp@data[is.na(get(variable.name)), (variable.name):=fill.value]
}
# Computes number of voxels, given voxel size (in meter), whose centers are
# within sphere of given radius (in meter)
# ~~not~exported~~
neighbors.max <- function(voxel.size, radius) {
dr <- round(radius / voxel.size)
nr <- 2 * dr + 1
x <- voxel.size[1] * rep(seq(-dr[1], dr[1]), each = nr[2] * nr[3])
y <- voxel.size[2] *rep(rep(seq(-dr[2], dr[2]), each = nr[3]), times = nr[1])
z <- voxel.size[3] *rep(seq(-dr[3], dr[3]), times = nr[1] * nr[2])
length(which(sqrt(x^2 + y^2 + z^2) <= radius))
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/FillNA.R |
#' Tools inherited from base R for VoxelSpace object.
#'
#' @description Tools inherited from base R for [`VoxelSpace-class`]
#' objects.
#'
#' @section Note on `length.VoxelSpace`:
#' AMAPVox allows to discard empty voxels in the voxel file. In such case
#' `length.VoxelSpace` will return the expected number of voxels as if
#' none were missing. As a consequence the number of voxels stored in the
#' [`VoxelSpace-class`] object may be inferior to the returned
#' value, namely `nrow(x) <= length(x)`
#'
#' @param x a [`VoxelSpace-class`] object.
#' @param object a [`VoxelSpace-class`] object.
#' @param \dots further arguments passed to `print` function.
#'
#' @name tools
#' @rdname tools
NULL
#' @rdname tools
#' @export
setMethod ("show",
signature(object = "VoxelSpace"),
function(object) showVoxelSpace(object))
#' @rdname tools
#' @export
print.VoxelSpace <- function(x, ...) showVoxelSpace(x, ...)
#' @rdname tools
#' @export
length.VoxelSpace <- function(x) return (prod(x@header$dim))
#' @rdname tools
#' @export
dim.VoxelSpace <- function(x) return (x@header$dim)
#' @rdname tools
#' @export
is.VoxelSpace <- function (x) is(x, "VoxelSpace")
#' @export
#' @rdname tools
setMethod ("ncol",
signature(x = "VoxelSpace"),
function(x) ncol(x@data))
#' @export
#' @rdname tools
setMethod ("nrow",
signature(x = "VoxelSpace"),
function(x) nrow(x@data))
#' @export
#' @rdname tools
names.VoxelSpace <- function(x) return(names(x@data))
#' Gets a parameter from the VoxelSpace header.
#'
#' @docType methods
#' @rdname getParameter
#' @description Gets a parameter from the VoxelSpace header.
#' @param vxsp the [`VoxelSpace-class`] object
#' @param what the name of the parameter. If missing returns all parameters.
#' @return the parameter as a `character`
#' @include Classes.R
#' @seealso [`VoxelSpace-class`]
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # show parameters name
#' names(getParameter(vxsp))
#' # retrieve 'mincorner' parameter
#' getParameter(vxsp, "mincorner")
#' # all parameters
#' getParameter(vxsp)
#' @export
setGeneric("getParameter",
function(vxsp, what){standardGeneric ("getParameter")})
#' Gets the x, y, z coordinates of the voxel space bottom left corner.
#'
#' @docType methods
#' @rdname getMinCorner
#' @description Gets the x, y, z coordinates of the voxel space bottom left
#' corner.
#' @param vxsp the [`VoxelSpace-class`] object.
#' @return the x, y, z coordinates of the voxel space bottom left corner, as a
#' numerical vector.
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # retrieve 'min_corner' parameter
#' getMinCorner(vxsp)
#' @export
setGeneric("getMinCorner",
function(vxsp){standardGeneric ("getMinCorner")})
#' Gets the x, y, z coordinates of the voxel space top right corner.
#'
#' @docType methods
#' @rdname getMaxCorner
#' @description Gets the x, y, z coordinates of the voxel space top right
#' corner.
#' @param vxsp the [`VoxelSpace-class`] object.
#' @return the x, y, z coordinates of the voxel space top right corner, as a
#' numerical vector.
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # retrieve 'max_corner' parameter
#' getMaxCorner(vxsp)
#' @export
setGeneric("getMaxCorner",
function(vxsp){standardGeneric ("getMaxCorner")})
#' Gets the elemental size of a voxel (dx, dy, dz) in meter.
#'
#' @docType methods
#' @rdname getVoxelSize
#' @description Gets the elemental size of a voxel (dx, dy, dz) in meter.
#' @param vxsp the [`VoxelSpace-class`] object.
#' @return the size of the voxel in meter, as a numerical vector.
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # retrieve voxel size
#' getVoxelSize(vxsp)
#' @export
#' @export
setGeneric("getVoxelSize",
function(vxsp){standardGeneric ("getVoxelSize")})
#' Gets the x, y, z coordinates of a given voxel.
#'
#' @docType methods
#' @rdname getPosition
#' @description Gets the x, y, z coordinates of the voxel center. If the voxel
#' parameter is missing, it returns the positions of all the voxels in the
#' voxel space.
#' @param vxsp a [`VoxelSpace-class`] object.
#' @param vx (i, j, k) voxel coordinates as a [`data.table::data.table-class`]
#' with i, j, k columns, a vector (i, j, k) or a matrix with i, j, k columns.
#' @return the x, y, z coordinates of the voxel center.
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#'
#' # get position of voxel(i=0, j=0, k=0)
#' getPosition(vxsp, c(0, 0, 0))
#'
#' # get position of voxels 1 to 10 in the data.table
#' getPosition(vxsp, vxsp@data[1:10,])
#'
#' # get positions of every voxel
#' getPosition(vxsp)
#' @export
setGeneric("getPosition",
function(vxsp, vx){standardGeneric ("getPosition")})
#' Clear voxel
#'
#' @docType methods
#' @rdname clear
#'
#' @description Clear a set of voxels. Clearing means that the state variables
#' of the selected voxels are altered as if they were *clear* of any vegetation.
#' Namely:
#' * number of echo set to zero
#' * intercepted beam surface set to zero (if variable is outputted)
#' * plant area density set to zero (if variable is outputted)
#' * transmittance set to one (if variable is outputted)
#' * any attenuation variable set to zero
#'
#' Other state variables such as sampling intensity, mean angle, entering beam
#' surface, etc. are unaltered. A cleared voxel is not the same as an unsampled
#' voxel (not "crossed" by any beam).
#'
#' @param vxsp a [`VoxelSpace-class`] object.
#' @param vx (i, j, k) voxel coordinates as a [`data.table::data.table-class`]
#' with i, j, k columns, a vector (i, j, k) or a matrix with i, j, k columns.
#'
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # clear 1st voxel
#' clear(vxsp, c(0, 0, 0)) # clear 1st voxel
#' # clear butterflies
#' clear(vxsp, butterfly(vxsp))
#' # clear voxels with less than two hits
#' clear(vxsp, vxsp@data[nbEchos < 2])
#'
#' @export
setGeneric("clear",
function(vxsp, vx){standardGeneric ("clear")})
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Generics.R |
#' @rdname getParameter
setMethod("getParameter", signature(vxsp="VoxelSpace", what="character"),
function(vxsp, what) {
stopifnot(
sum(!is.na(str_match(
names(vxsp@header),
paste0("^", what, "$")))) == 1)
return ( vxsp@header[[what]] )
})
#' @rdname getParameter
setMethod("getParameter", signature(vxsp="VoxelSpace", what="missing"),
function(vxsp, what) {
return ( vxsp@header )
})
#' @rdname getMinCorner
setMethod("getMinCorner", "VoxelSpace",
function(vxsp) {
return ( vxsp@header$mincorner )
})
#' @rdname getMaxCorner
setMethod("getMaxCorner", "VoxelSpace",
function(vxsp) {
return ( vxsp@header$maxcorner )
})
#' @rdname getVoxelSize
setMethod("getVoxelSize", "VoxelSpace",
function(vxsp) {
return ( vxsp@header$voxel.size )
})
#' @rdname getPosition
setMethod("getPosition", signature(vxsp="VoxelSpace", vx="vector"),
function(vxsp, vx) {
# 3 coordinates i, j, k
stopifnot(length(vx) == 3)
# i, j, k must be positive integers
stopifnot(as.integer(vx) == vx)
stopifnot(all(vx >=0))
# check i, j, k ranges
stopifnot(all((vx >= 0) & (vx < dim(vxsp))))
return (
callGeneric(vxsp,
data.table::data.table(i=vx[1], j=vx[2], k=vx[3])))
})
#' @rdname getPosition
setMethod("getPosition", signature(vxsp="VoxelSpace", vx="matrix"),
function(vxsp, vx) {
# 3 columns i, j, k
stopifnot(ncol(vx) == 3)
# i, j, k must be integers
stopifnot(as.integer(vx) == vx)
# check i, j, k ranges
stopifnot(
all(apply(vx, 1, function(vx) (vx >= 0) & (vx < dim(vxsp)))))
return (
callGeneric(vxsp,
data.table::data.table(i=vx[,1], j=vx[,2], k=vx[,3])))
})
#' @rdname getPosition
setMethod("getPosition", signature(vxsp="VoxelSpace", vx="data.table"),
function(vxsp, vx) {
# ensure existence of i, j, k
stopifnot(all(c("i", "j", "k") %in% colnames(vx)))
# extract i, j, k
pos <- vx[, c("i", "j", "k")]
# min corner and voxel size as local variables
minc <- vxsp@header$mincorner
res <- vxsp@header$voxel.size
# function for calculating the position
calcPos <- function(index, coord) minc[coord] + index * res[coord]
# compute x, y, z
i <- j <- k <- x <- y <- z <- NULL # due to NSE notes in R CMD check
pos <- pos[, x:=calcPos(i, "x")][, y:=calcPos(j, "y")][, z:=calcPos(k, "z")][, c("x", "y", "z")]
# return positions as data.table
return ( pos )
})
#' @rdname getPosition
setMethod("getPosition", signature(vxsp="VoxelSpace", vx="missing"),
function(vxsp, vx) {
return (
callGeneric(vxsp, vxsp@data[ , c("i", "j", "k")]))
})
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Getters.R |
#' Extract ground from voxel space.
#'
#' @docType methods
#' @rdname ground
#'
#' @description Extract ground layer from [`VoxelSpace-class`] object.
#'
#' ## Ground layer
#'
#' The ground layer is the set of voxels that are just above ground level. The
#' bottom facet of the voxel must be above ground
#' `ground_distance(voxel_center) >= dz/2` with dz the voxel size on z axis.
#' Ground layer may be missing (the function returns an empty data.table) or
#' incomplete (the function returns a data.table with
#' `nrow(ground(vxsp)) < prod(dim(vxsp)[1:2])`) for some voxel space.
#'
#' ## Above/below ground
#'
#' Function `aboveGround` returns voxel index above ground layer (included).
#' Function `belowGround` returns voxel index below ground layer (excluded).
#'
#' ## Ground energy
#'
#' Function `groundEnergy` estimates fraction of light reaching the ground. It
#' is computed as the ratio of entering beam section on potential beam section
#' (beams that would have crossed a voxel if there were no vegetation in the
#' scene). It requires variables *bsEntering* and *bsPotential*.
#'
#' ## Ground elevation
#'
#' Function `groundElevation` returns the elevation of the ground layer. It is
#' provided as a check function, to make sure that AMAPVox
#' *digital elevation model* is consistent with the one provided in input.
#'
#' @return [`data.table::data.table-class`] object with voxel index either
#' below ground, ground level or above ground.
#'
#' @param vxsp a [`VoxelSpace-class`] object.
#'
#' @examples
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' gr <- ground(vxsp)
#' ag <- aboveGround(vxsp)
#' bg <- belowGround(vxsp) # empty in test case
#' # ground layer included in above ground subset
#' all(ag[gr, on=list(i, j, k)] == gr) # TRUE expected
# # extract above ground voxels
#' vxsp@data[ag, on=list(i, j, k)]
#'
#' @export
ground <- function(vxsp) {
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
# ground_distance variable required
stopifnot("ground_distance" %in% names(vxsp))
# z voxel size
dz <- getVoxelSize(vxsp)["z"]
# extract ground layer
i<- j <- k <- ground_distance <- NULL # trick to avoid "no visible binding" note
return ( vxsp@data[ground_distance >= (0.5 * dz)
& ground_distance < (1.5 * dz), list(i, j, k)] )
}
#' @rdname ground
#' @export
belowGround <- function(vxsp) {
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
# ground_distance variable required
stopifnot("ground_distance" %in% names(vxsp))
# z voxel size
dz <- getVoxelSize(vxsp)["z"]
# return i, j, k index of below ground voxels
i<- j <- k <- ground_distance <- NULL # trick to avoid "no visible binding" note
return ( vxsp@data[ground_distance < (0.5 * dz), list(i, j, k)] )
}
#' @rdname ground
#' @export
aboveGround <- function(vxsp) {
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
# ground_distance variable required
stopifnot("ground_distance" %in% names(vxsp))
# z voxel size
dz <- getVoxelSize(vxsp)["z"]
# return i, j, k index of below ground voxels
i<- j <- k <- ground_distance <- NULL # trick to avoid "no visible binding" note
return ( vxsp@data[ground_distance >= (0.5 * dz), list(i, j, k)] )
}
#' @rdname ground
#' @export
groundEnergy <- function(vxsp) {
# get ground
ground <- ground(vxsp)
# bsEntering and bsPotential variables required
stopifnot(c("bsEntering", "bsPotential") %in% names(vxsp))
# ground energy
i <- j <- k <- bsEntering <- bsPotential <- NULL # trick to avoid "no visible binding" note
vxsp@data[ground,
list(i, j, ground_energy=bsEntering / bsPotential),
on=list(i, j, k)]
}
#' @rdname ground
#' @export
groundElevation <- function(vxsp) {
# get ground
ground <- ground(vxsp)
# ground elevation
i <- j <- k <- NULL # trick to avoid "no visible binding" note
dem <- vxsp@data[ground, list(i, j), on = list(i, j, k)]
dem[["ground_elevation"]] <- getPosition(vxsp,
vxsp@data[ground, on=list(i, j, k)])[["z"]]
return (dem)
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Ground.R |
#' Extract or Replace Parts of a VoxelSpace Object
#'
#' @description Operators acting on [`VoxelSpace-class`] object. If user attempts
#'
#' @param x a [`VoxelSpace-class`] object
#' @param name A literal character string or a name (possibly backtick quoted).
#' @param value typically an array-like R object of a similar class as x.
#' @param i string, name of elements to extract.
#' @param j Unused.
#' @param \dots Unused.
#'
#' @name Extract
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#'
#' # extract columns or header parameters
#' vxsp$nbSampling
#' vxsp[["i"]]
#' vxsp[["mincorner"]]
#'
#' \dontrun{
#' # add new column
#' vxsp[["pad_capped"]] <- ifelse(vxsp$PadBVTotal > 0.5, 0.5, vxsp$PadBVTotal)
#' # update header parameter
#' vxsp[["max_pad"]] <- 0.5
#' }
#'
NULL
#' @export
#' @rdname Extract
setMethod("$", "VoxelSpace", function(x, name) { x[[name]] })
#' @export
#' @rdname Extract
setMethod("[[", c("VoxelSpace", "ANY", "missing"), function(x, i, j, ...) {
if (is.character(i) && !i %in% names(x@data))
return(x@header[[i]])
return(x@data[[i]])
})
#' @export
#' @rdname Extract
setMethod("$<-", "VoxelSpace", function(x, name, value)
{
x[[name]] <- value
return(x)
})
#' @export
#' @aliases [[<-,VoxelSpace,ANY,missing-method
#' @rdname Extract
setMethod("[[<-", c("VoxelSpace", "ANY", "missing", "ANY"), function(x, i, j, value)
{
# replace header parameter
if (i %in% names(x@header)) {
x@header[[i]] <- value
return(x)
}
# forbid operation on grid coordinates
if (i %in% c("i", "j", "k")) {
stop("Direct modification of grid index is not allowed",
"Please use AMAPVox::crop function instead.")
}
# replace or add new column in data.table
x@data[[i]] <- value
return(x)
})
#' @export
#' @rdname Extract
setMethod("$<-", "VoxelSpace", function(x, name, value)
{
x[[name]] <- value
return(x)
})
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Operators.R |
#' Plant Area Density (PAD)
#'
#' @docType methods
#' @rdname plantAreaDensity
#'
#' @description Computes Plant Area Density either from transmittance or
#' attenuation coefficient estimates.
#' Details of calculation and underlying assumptions can be found online at
#' \doi{10.23708/1AJNMP}.
#' PAD is defind as the plant area per unit volume
#' ( PAD plant area / voxel volume = m^2 / m^3).
#'
#' @param vxsp a [`VoxelSpace-class`] object.
#' @param vx a subset of voxel index. A data.table with `i, j, k` columns.
#' Missing parameter means whole voxel space.
#' @param lad the name of the probability density function of the leaf angle
#' distribution. One of `AMAPVox:::leafAngleDistribution`.
#' @param angle.name the name of the mean angle variable in the VoxelSpace
#' object.
#' @param variable.name the name of the transmittance/attenuation variables in
#' the VoxelSpace object. Transmittance variables are expected to start with
#' "tra" and attenuation variables with "att".
#' @param pad.max a float, the maximal PAD value
#' @param pulse.min an integer, the minimal number of pulses in a voxel for
#' computing the PAD. PAD set to NA otherwise.
#' @param ... additional parameters which will be passed to the leaf angle
#' distribution functions. Details in [computeG()].
#'
#' @return A voxel space object with the requested PAD variables.
#'
#' @seealso [computeG()]
#'
#' @references VINCENT, Gregoire; PIMONT, François; VERLEY, Philippe, 2021,
#' "A note on PAD/LAD estimators implemented in AMAPVox 1.7",
#' \doi{10.23708/1AJNMP}, DataSuds, V1
#'
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # compute PAD
#' pad <- plantAreaDensity(vxsp, variable.name = "attenuation_PPL_MLE")
#' # merge pad variables into voxel space
#' vxsp@data <- merge(vxsp@data, pad, by = c("i", "j", "k"))
#' grep("^pad", names(vxsp), value = TRUE) # print PAD variables in vxsp
#' # PAD on a subset
#' pad.i2j3 <- plantAreaDensity(vxsp, vxsp@data[i ==2 & j==3, .(i, j, k)])
#' pad.i2j3[["ground_distance"]] <- vxsp@data[i ==2 & j==3]$ground_distance
#' \dontrun{
#' # plot vertical profile
#' library(ggplot2)
#' # meld data.table (wide-to-long reshaping)
#' pad <- data.table::melt(pad.i2j3,
#' id.vars = "ground_distance",
#' measure.vars = c("pad_transmittance", "pad_attenuation_FPL_unbiasedMLE",
#' "pad_attenuation_PPL_MLE"))
#' ggplot(data = pad, aes(x=value, y=ground_distance, color=variable)) +
#' geom_path() + geom_point()
#' }
#' @export
plantAreaDensity <- function(vxsp, vx,
lad = "spherical",
angle.name = "angleMean",
variable.name = c("transmittance",
"attenuation_FPL_unbiasedMLE",
"attenuation_PPL_MLE"),
pad.max = 5, pulse.min = 5,
...) {
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
# vx subset missing == every voxel from vxsp
if (missing(vx)) {
i <- j <- k <- NULL # trick to get rid of R CMD check warning with data.table
vx <- vxsp@data[, list(i, j, k)]
}
# vx must be data.table with i, j, k columns
stopifnot(any(class(vx) == "data.table"))
stopifnot(c("i", "j", "k") %in% names(vx))
# check leaf angle distribution
stopifnot(lad %in% leafAngleDistribution)
# angle variable must exist
stopifnot(angle.name %in% colnames(vxsp@data))
# only keep variable name that exists in voxel space
variables <- variable.name[which(variable.name %in% names(vxsp))]
if (length(variables) == 0) {
stop(paste("Variables", paste(variable.name, collapse = ", "), "cannot be found in voxelspace."))
}
# transmittance / attenuation variable must start, by convention, either
# by "tra" of "att"
stopifnot(all(grepl("(^tra*)|(^att*)", variables)))
# subset of voxels
vx.subset <- vxsp@data[vx, on = list(i, j, k)]
# pad data.table
pad.dt <- data.table::data.table(vx.subset[, list(i, j, k)])
# loop over requested variables
for (variable in variables) {
nbSampling <- padtmp <- NULL # due to NSE notes in R CMD check
# initialize PAD vector with NA
pad <- rep(NA, nrow(vx.subset))
# index of voxels such as number of pulses greater than pulse.min
index <- vx.subset[nbSampling >= pulse.min, which = TRUE]
# compute G(θ)
gtheta <- computeG(vx.subset[[angle.name]][index], lad, ...)
# compute PAD
if (grepl("^tra", variable)) {
# from transmittance
pad[index] <- log(vx.subset[index, get(variable)]) / (-gtheta)
} else {
# from attenuation
pad[index] <- vx.subset[index, get(variable)] / gtheta
}
# cap PAD
pad[which(pad > pad.max)] <- pad.max
# add PAD variable into PAD data.table
pad.name <- paste0("pad_", variable)
pad.dt[[pad.name]] <- pad
}
return ( pad.dt )
}
#' Plant Area Index (PAI)
#'
#' @docType methods
#' @rdname plantAreaIndex
#'
#' @description Computes Plant Area Index (PAI) from Plant Area Density (PAD).
#' PAI is defined as the plant area per unit ground surface area (PAI = plant
#' area / ground area = m^2 / m^2).
#'
#' The function can estimate PAI on the whole voxel space or any region of
#' interest (parameter vx subset of voxels). It can compute PAI from several
#' perspectives : either an averaged PAI value, a two-dimensions (i, j) PAI
#' array or vertical profiles either above ground or below canopy.
#'
#' @param vxsp a [`VoxelSpace-class`] object.
#' @param vx a subset of voxel index. A data.table with `i, j, k` columns.
#' Missing parameter means whole voxel space.
#' @param type a character vector, the type of PAI profile.
#' * `"av"` Averaged value on every voxel
#' * `"ag"` Above ground vertical profile
#' * `"bc"` Below canopy vertical profile
#' * `"xy"` Spatial profile
#' @param pattern.pad character string containing a
#' [regular expression][base::regex] to be matched in the voxel space
#' variable names, for selecting PAD variables. Typing the name of a specific
#' PAD variable works just fine.
#'
#' @return Returns a list of PAI profiles for requested PAD variables and PAI
#' types.
#'
#' ## `av` Averaged PAI
#'
#' Returns a single value. Calculated as the sum of PAD values multiplied by
#' voxel volume and divided by ground surface with vegetation.
#'
#' ## `ag & bc` Above ground and below canopy PAI vertical profile
#'
#' Returns a vertical profile of PAI values either from ground distance or
#' canopy depth. Calculated as the averaged PAD values per layer (a layer
#' being defined by either the distance to ground or canopy level) multiplied
#' by voxel size along z (equivalent to multiplying PAD by voxel volume and
#' dividing by voxel ground surface).
#'
#' ## `xy` Spatial PAI profile
#'
#' Returns a list a PAI values by i, j index. Calculated as the sum of PAD on
#' (i, j) column multiplied by voxel size along z (equivalent to multiplying
#' PAD by voxel volume and dividing by voxel ground surface).
#'
#' @seealso [plantAreaDensity()]
#'
#' @examples
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' vxsp@data <- merge(vxsp@data, plantAreaDensity(vxsp), by = c("i", "j", "k"))
#' \dontrun{
#' lai <- plantAreaIndex(vxsp)
#' names(lai)
#' library(ggplot2)
#' ggplot(data = lai[["pad_transmittance.pai.ag" ]], aes(x=pai, y=ground_distance)) +
#' geom_path() + geom_point()
#' }
#' # PAI on a subset
#' ni <- round(dim(vxsp)[1]/2)
#' vx <- vxsp@data[i < ni, .(i, j, k)]
#' lai <- plantAreaIndex(vxsp, vx)
#'
#' @export
plantAreaIndex <- function(vxsp, vx,
type = c("av", "ag", "bc", "xy"),
pattern.pad = "^pad_*") {
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
# vx subset missing == every voxel from vxsp
if (missing(vx)) {
i <- j <- k <- NULL # trick to get rid of R CMD check warning with data.table
vx <- vxsp@data[, list(i, j, k)]
}
# vx must be data.table with i, j, k columns
stopifnot(any(class(vx) == "data.table"))
stopifnot(c("i", "j", "k") %in% names(vx))
# type one of av, ag, bc, xy
stopifnot(is.character(type), type %in% c("av", "ag", "bc", "xy"))
# pad variables
pad.variables <- grep(pattern.pad, names(vxsp), value = TRUE)
if (length(pad.variables) == 0)
stop(paste("There is not any PAD variables matching pattern", dQuote(pattern.pad, q = FALSE), "in vxsp"))
# empty pai list
pai.all <- list()
dz <- unname(getVoxelSize(vxsp)["z"])
# loop on PAD variable
for (pad.variable in pad.variables) {
# data.table of voxels with required variables for PAI calculation
i <- j <- k <- ground_distance <- NULL # trick to get rid of R CMD check warning with data.table
dt <- vxsp@data[vx,
list(i, j, k, ground_distance, pad=get(pad.variable)),
on = list(i, j, k)]
# loop on PAI type
for (pai.type in type) {
# handle differernt PAI type
if ("av" == pai.type) {
#
# Averaged PAI
#
# number of i, j cells with some vegetation
pad <- NULL # trick to get rid of R CMD check warning with data.table
n.cell <- dt[, list(n.cell=sum(pad, na.rm = T) > 0), by=c("i", "j")][, sum(n.cell)]
# pai = sum(pad) * dxdydz / (n.cell * dxdy) = sum(pad) * dz / n.cell
pai <- dt[, sum(pad, na.rm = T) * dz] / n.cell
#
} else if ("ag" == pai.type) {
#
# Above ground PAI
#
grd <- ground(vxsp)
pai <- merge(dt, grd,
by = c("i", "j"),
suffixes = c("", ".grd")
)
k <- k.grd <- dk <- ground_distance <- NULL # trick to get rid of R CMD check warning with data.table
pai <- pai[, list(pai=mean(pad, na.rm = TRUE)),
by=list(dk = k - k.grd)][dk >= 0 & !is.na(pai) ][order(dk)]
pai[, ground_distance := dk * dz][, dk := NULL]
#
} else if ("bc" == pai.type) {
#
# Below canopy PAI
#
cnp <- canopy(vxsp)
pai <- merge(dt, cnp,
by = c("i", "j"),
suffixes = c("", ".cnp"))
k <- k.cnp <- dk <- canopy_depth <- NULL # trick to get rid of R CMD check warning with data.table
pai <- pai[, list(pai=mean(pad, na.rm = TRUE)),
by=list(dk = k.cnp - k)][dk >= 0 & !is.na(pai) ][order(dk)]
pai[, canopy_depth := dk * dz][, dk := NULL]
#
} else if ("xy" == pai.type) {
#
# X, Y PAI
#
pad <- NULL # trick to get rid of R CMD check warning with data.table
pai <- dt[, list(pai=sum(pad, na.rm = T) * dz), by=c("i", "j")]
}
# append pai to list
pai.all[[paste(pad.variable, "pai", pai.type, sep = ".")]] <- pai
} # end loop pai.type
} # end loop pad.variable
if (length(pai.all) == 1)
return ( pai.all[[1]])
else
return ( pai.all )
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/PlantArea.R |
#' Plot an object of class VoxelSpace
#'
#' Plot an object of class VoxelSpace in a 3d device. By default it plots the
#' sampling intensity but the user can choose any variable available in the
#' voxel file.
#'
#' @description plot a [`VoxelSpace-class`] object.
#' @param x the object of class VoxelSpace to plot
#' @param y a subset of voxel index. A data.table with `i, j, k` columns.
#' Missing parameter means whole voxel space.
#' @param variable.name character, the name of the variable to plot
#' @param palette character, a valid palette name (one of hcl.pals())
#' @param bg.color character, a valid background color name (one of colors())
#' @param width numeric, the width of the windows
#' @param voxel.size numeric, the size of voxel in pixels
#' @param unsampled.discard logical, whether to discard unsampled voxel
#' @param empty.discard logical, whether to discard empty voxel (no hit)
#' @param ... additional parameters which will be passed to [rgl::plot3d()].
#' @include Classes.R
#' @seealso [rgl::plot3d()]
#' @examples
#' \dontrun{
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # plot sampling intensity by default
#' plot(vxsp)
#' # plot PAD
#' plot(vxsp, variable.name = "PadBVTotal", palette = "YlOrRd")
#' # plot a subset
#' plot(vxsp, vxsp@data[k > 4, .(i, j, k)])
#' }
#' @export
#' @method plot VoxelSpace
setGeneric("plot", function(x, y, ...)
standardGeneric("plot"))
#' @rdname plot
setMethod("plot",
signature(x = "VoxelSpace", y = "missing"),
function(x, y, variable.name = "nbSampling",
palette = "viridis", bg.color = "lightgrey",
width = 640, voxel.size = 5,
unsampled.discard = TRUE, empty.discard = TRUE,
...) {
i <- j <- k <- NULL
return (
callGeneric(x, x@data[, list(i, j, k)],
variable.name = variable.name,
palette = palette, bg.color = bg.color,
width = width, voxel.size = voxel.size,
unsampled.discard = unsampled.discard,
empty.discard = empty.discard,
...))
})
#' @rdname plot
setMethod("plot",
signature(x = "VoxelSpace", y = "data.table"),
function(x, y, variable.name = "nbSampling",
palette = "viridis", bg.color = "lightgrey",
width = 640, voxel.size = 5,
unsampled.discard = TRUE, empty.discard = TRUE,
...) {
# check if rgl package is installed
if (!(requireNamespace("rgl", quietly = TRUE) &
requireNamespace("fields", quietly = TRUE))) {
stop(
"Package \"rgl\" & \"fields\" must be installed to plot VoxelSpace object",
"\n",
">> install.packages(c(\"rgl\", \"fields\"))",
call. = FALSE)
}
# must be a voxel space
stopifnot(is.VoxelSpace(x))
# y must be data.table with i, j, k columns
stopifnot(any(class(y) == "data.table"))
stopifnot(c("i", "j", "k") %in% names(y))
# make sure variable exists
stopifnot(variable.name %in% colnames(x@data))
# make sure variable nbSampling exists if discard unsampled voxel is TRUE
stopifnot(empty.discard | ('nbSampling' %in% colnames(x@data)))
# make sure variable nbEchos exists if discard empty voxel is TRUE
stopifnot(empty.discard | ('nbEchos' %in% colnames(x@data)))
# discard empty voxels
i <- j <- k <- NULL # trick to get rid of R CMD check warning with data.table
vx <- x@data[y, on=list(i, j, k)]
nbSampling <- nbEchos <- NULL # due to NSE notes in R CMD check
if (unsampled.discard) vx <- vx[nbSampling > 0]
if (empty.discard) vx <- vx[nbEchos > 0]
# compute x, y, z positions
pos <- getPosition(x, vx)
# extract variable to plot
variable <- unlist(vx[, variable.name, with = FALSE])
# variable range
varLim <- range(variable, finite = TRUE)
varLen <- varLim[2] - varLim[1]
# color look-up table
colorlut <- grDevices::hcl.colors(1024, palette = palette)
col <- colorlut[ceiling(1023 * (variable - varLim[1]) / varLen) + 1]
# palette
palette(colorlut)
# 3d plot
rgl::par3d(windowRect = 200 + c( 0, 0, width, 0.8 * width ) )
rgl::plot3d(pos$x, pos$y, pos$z,
col=col, size=voxel.size, aspect="iso",
xlab="x", ylab="y", zlab="z")
rgl::bgplot3d({
# background color
graphics::par(bg= bg.color)
graphics::plot.new()
# colorbar
fields::image.plot(legend.only = TRUE, add = TRUE, zlim = varLim,
col = colorlut)
# main title
graphics::title(main = paste('Voxel space - Variable', variable.name))
})
})
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/PlotVoxelSpace.R |
## given a string "numeric separator numeric separator numeric ..." this
## function will test for several separators for splitting and returns the one
## with highest occurrence.
.guessSeparator <- function(str){
SEPARATORS <- c(space = " " , equal = "=", semicolon = ";",
coma = ",", colon = ":", tab = "\t")
guess <- which.min(
nchar(lapply(stringr::str_split(str,SEPARATORS), "[", i = 1)))
separator <- SEPARATORS[guess]
return(separator)
}
## 3d point coordinates in voxel file header may be written in several ways.
## this function parses the string and returns a (x, y, z) vector.
## Accepted formats: {[( numeric separator numeric separator numeric ... )]}
## leading and trailing brackets are removed
## guess separator between numeric values and split
.parseNumericVector <- function(str) {
vec <- as.numeric(unlist(
stringr::str_split(
stringr::str_squish(stringr::str_remove_all(str, "[\\(\\)\\[\\]\\{\\}]")),
.guessSeparator(str))))
if (length(vec) == 1) vec <- rep(vec, 3)
names(vec) <- c("x", "y", "z")
return(vec)
}
#' Read a voxel file
#'
#' @docType methods
#' @rdname readVoxelSpace
#' @description read a voxel file and cast it into a [`VoxelSpace-class`]
#' object.
#'
#' Zipped voxel file is accepted. AMAPVox uses user cache directory to unzip
#' the file ([rappdirs::user_cache_dir()]).
#' @param f The path of the voxel file.
#' @include Classes.R
#' @seealso [writeVoxelSpace()]
#' @examples
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' @export
readVoxelSpace <- function(f){
if (grepl("*.zip$", f)) {
# unzip first
cache.dir <- rappdirs::user_cache_dir("AMAPVox", "IRD")
cat("Unzip voxel file", basename(f), "in cache directory ", cache.dir, "\n")
f.unzip <- utils::unzip(f,
junkpaths = TRUE,
exdir = cache.dir)
vx.file <- normalizePath(f.unzip[1])
} else {
vx.file <- normalizePath(f)
}
#lecture du header
conn <- file(vx.file, open="r")
# check 1st line VOXEL FILE
firstLine <- readLines(conn, n=1)
stopifnot(
!is.na(stringr::str_match(stringr::str_trim(firstLine), "^VOXEL SPACE$")))
# loop over header
rawParameters <- list()
nLineHeader <- 0
while ( TRUE ) {
# read next line
line <- stringr::str_squish(readLines(conn, n = 1))
nLineHeader <- nLineHeader + 1
# check if line starts with hash
if ( stringr::str_starts(line, "#") ) {
# parse header line
# may be several parameters on same line, separated by hash
lineSplit <- stringr::str_squish(stringr::str_split(line, "#")[[1]][-1])
# split key:value
lineParam <- vapply(lineSplit,
function(p) unlist(stringr::str_split(p, ":")),
character(2))
colnames(lineParam) <- as.character(lineParam[1,])
rawParameters <- c(rawParameters, lineParam[-1,])
} else {
break
}
}
# closes vox file
close(conn)
# new VoxelSpace object
vxsp <- new(Class=("VoxelSpace"))
# set file slot
vxsp@file <- f
## Predefined parameters
parameters <- list(
# number of lines
nline = as.integer(nLineHeader),
# column names
columnNames = unlist(stringr::str_split(line, " ")),
# min corner
mincorner = .parseNumericVector(rawParameters["min_corner"]),
# max corner
maxcorner = .parseNumericVector(rawParameters["max_corner"]),
# split
dim = .parseNumericVector(rawParameters["split"]),
# voxel size
voxel.size = .parseNumericVector(rawParameters["res"])
)
# Other parameters
parameters <- c(parameters,
rawParameters[!(names(rawParameters) %in%
c("min_corner", "max_corner",
"split", "res")) ])
vxsp@header <- parameters
# read voxels
vxsp@data <- data.table::fread(vx.file, header = TRUE, skip = nLineHeader)
return (vxsp)
}
## specific implementation of the "show" function for a VoxelSpace object
showVoxelSpace <- function(vxsp) {
cat(class(vxsp)[1],'\n')
cat(" file", vxsp@file, sep='\t', '\n')
writeLines(paste0(" ",
paste(names(vxsp@header),
vxsp@header,
sep='\t')))
cat(" output variables",
paste(vxsp@header$columnNames, collapse=", "),
'\n',
sep='\t')
show(vxsp@data)
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/ReadVoxelSpace.R |
#' Run AMAPVox
#'
#' @docType methods
#' @rdname run
#' @description Run AMAPVox either in batch mode or with Graphical User
#' Interface (GUI). The function embeds a version manager for installing
#' locally any version available remotely.
#'
#' AMAPVox versions equal or prior to 1.10 require Java 8 on your Operating
#' System. Refer to section *Java 8 64-Bit* for details.
#'
#' `gui` function has been kept for background compatibility. It is an alias
#' of the `run` function.
#' @section Java 8 64-Bit: AMAPVox versions equal or prior to 1.10 rely on
#' Java/JavaFX 64-Bit. It must be installed on the Operating System before
#' running AMAPVox. In practice it requires either [Java 8 64-Bit
#' Oracle](https://java.com/download/) or [Java 8 64-Bit
#' Corretto](https://aws.amazon.com/fr/corretto/). Mind that OpenJDK 8 will
#' not work for AMAPVox GUI since JavaFX is not included in this distribution.
#' Nonetheless for AMAPVox in batch mode, any version of Java 64-bit >= 8
#' should work.
#'
#' You may check beforehand if java is installed on your system and which
#' version.
#' ```
#' system2("java", args = "-version")
#' ```
#' If AMAPVox::run keeps throwing errors after you have installed a suitable
#' Java 8 64-Bit, it means that Java 8 may not be properly detected by
#' your system. In such case you may have to check and set the `JAVA_HOME`
#' environment variable.
#' ```
#' Sys.getenv("JAVA_HOME")
#' Sys.setenv(JAVA_HOME="path/to/java/8/bin")
#' system2("java", args = "-version")
#' ```
#' As a last resort you may change the `java` parameter of this function and
#' set the full path to Java 8 binary.
#' ```
#' AMAPVox::run("1.10.4", java = "/path/to/java/8/bin/java")
#' ```
#' @param version, either "latest" or a valid version number major.minor(.build)
#' if `version="latest"` the function looks for latest remote version. If
#' there is no internet connection it runs latest local version.
#' @param xml path(s) to AMAPVox XML configuration files. If missing or `NULL`
#' AMAPVox launches the GUI.
#' @param java path to the java executable. Ignored for AMAPVox version >= 2.0
#' since Java is embedded within AMAPVox binary. Default 'java' value assumes
#' that java is correctly defined on the $PATH variable.
#' @param jvm.options JVM (Java Virtual Machine) options. By default it
#' allocates 2Go of heap memory to AMAPVox.
#' @param nt maximum number of threads for running tasks. `nt=1` means
#' sequential execution. `nt=0` means as many threads as available.
#' @param ntt maximum number of threads per task. `ntt=0` means as many threads
#' as available.
#' @param stdout where output from both stdout/stderr should be sent. Same as
#' stdout & stderr options from function [system2()].
#' @seealso [getLocalVersions()], [getRemoteVersions()], [installVersion()] and
#' [removeVersion()]
#' @examples
#' \dontrun{
#' # (install and) run latest AMAPVox version with GUI
#' AMAPVox::run()
#' # (install and) run version 2.0.0 with GUI
#' AMAPVox::run(version="2.0.0")
#' # run latest AMAPVox version with XML configuration
#' AMAPVox::run(xml="/path/to/cfg.xml")
#' # run multiple configurations
#' AMAPVox::run(xml=c("cfg1.xml", "cfg2.xml"), nt=2)
#' }
#' @include VersionManager.R
#' @export
run <- function(version="latest",
xml,
java = "java", jvm.options = "-Xms2048m",
nt = 1, ntt = 1,
stdout = "") {
# handle versions
version <- versionManager(version)
# no JVM options
if(is.null(jvm.options)) jvm.options = ""
# local AMAPVox
localVersions <- getLocalVersions()
amapvox <- localVersions[which(localVersions == version), ]
# look for java and check version
if (is_v1(version))
check.java(java)
else
java <- ifelse(get_os() == "windows",
file.path(amapvox$path, "runtime", "bin", "java.exe"),
file.path(amapvox$path, "lib", "runtime", "bin", "java"))
# Generate the execution expression
if (missing(xml) || is.null(xml)) {
# AMAPVox GUI
if (compVersion(version, "1.10.1") < 0) {
jar.path <- file.path(amapvox$path,
paste0("AMAPVox-", version, ".jar"))
} else if (compVersion(version, "2.0.0") < 0) {
jar.path <- file.path(amapvox$path,
paste0("AMAPVoxGUI-", version, ".jar"))
} else {
if (get_os() == "windows") {
jar.path <- file.path(amapvox$path,
"app",
paste0("AMAPVox-", version, ".jar"))
} else {
jar.path <- file.path(amapvox$path,
"lib", "app",
paste0("AMAPVox-", version, ".jar"))
}
}
# normalize path
jar.path <- normalizePath(jar.path, mustWork = TRUE)
# JVM options
args = ifelse(is_v1(version),
paste(jvm.options, "-jar", jar.path),
paste(jvm.options,
"--add-opens javafx.graphics/javafx.scene=ALL-UNNAMED",
"-jar", jar.path))
} else {
# AMAPVox batch mode
# configuration file must exist
stopifnot(all(file.exists(xml)))
if (is_v1(version)) {
jar.path <- file.path(amapvox$path,
paste0("AMAPVox-", version, ".jar"))
} else {
if (get_os() == "windows") {
jar.path <- file.path(amapvox$path,
"app",
paste0("AMAPVox-", version, ".jar"))
} else {
jar.path <- file.path(amapvox$path,
"lib", "app",
paste0("AMAPVox-", version, ".jar"))
}
}
jar.path <- normalizePath(jar.path, mustWork = TRUE)
# validate number of threads and number of threads per task
stopifnot(all(is.wholenumber(nt), nt >= 0, is.wholenumber(ntt), ntt >= 0))
# jar options
jar.options = ifelse(
compVersion(version, "1.10.1") >= 0,
paste(paste0("--T=", nt), paste0("--TT=", ntt), xml),
paste(paste0("--execute-cfg=\"", paste(xml, collapse = " "), "\""),
paste0("--T=", nt), paste0("--T-TLS_VOX=", ntt)))
args = paste(jvm.options, "-jar", jar.path, jar.options)
}
# concatenate command
command = paste(c(shQuote(java), args), collapse = " ")
# run AMAPVox
message(paste("Running AMAPVox", version))
message(command)
if (get_os() == "windows")
system2(java, args = args, stdout = stdout, stderr = stdout, wait = TRUE,
invisible = FALSE)
else
system2(java, args = args, stdout = stdout, stderr = stdout, wait = TRUE)
return(invisible(command))
}
#' @export
#' @rdname run
gui <- function(version="latest",
java = "java", jvm.options = "-Xms2048m",
stdout = "") {
# call run() function
AMAPVox::run(version, xml= NULL, java, jvm.options, stdout)
}
# internal util function
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) {
abs(x - round(x)) < tol
}
# check java version for AMAPVox v1
# must be Java 8 64 bit Oracle or Corretto (sic!)
check.java <- function(java = "java") {
res <- suppressWarnings(
system2(java, args = "-version", stdout = NULL, stderr = NULL))
if (res != 0) {
stop(paste("R did not find 'java' command.",
" Make sure Java 8 64-Bit is properly installed"))
} else {
# java is installed, make sure it is Java 8 64-Bit
jvrs <- system2(java, args = "-version", stdout = TRUE, stderr = TRUE)
# java 8 64-Bit Oracle or Corretto for JavaFX support
if (!(grepl("1\\.8\\.0", jvrs[1])
& (grepl("Java\\(TM\\)", jvrs[2]) | grepl("Corretto", jvrs[2]))
& grepl("64\\-[bB]it", jvrs[3]))) {
stop("unsupported java version\n", paste(" ", jvrs, "\n"),
"Must be Java 8 64-Bit, Oracle or Corretto.\n",
"Read help function for details.")
}
}
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Run.R |
#' @rdname clear
setMethod("clear", signature(vxsp="VoxelSpace", vx="data.table"),
function(vxsp, vx) {
# ensure existence of i, j, k
stopifnot(all(c("i", "j", "k") %in% colnames(vx)))
# clear voxels
var.cleared <- c("nbEchos", "bsIntercepted", "PadBVTotal",
"attenuation_FPL_biasedMLE",
"attenuation_FPL_biasCorrection",
"attenuation_PPL_MLE")
i <- j <- k <- NULL # trick to avoid "no visible binding" note
for (var in var.cleared) {
if (var %in% names(vxsp)) {
vxsp@data[vx, (var):=0, on=list(i, j, k)]
}
}
# special case for transmittance, set to 1
if ("transmittance" %in% names(vxsp)) {
vxsp@data[vx, ("transmittance"):= 1, on=list(i, j, k)]
}
})
#' @rdname clear
setMethod("clear", signature(vxsp="VoxelSpace", vx="vector"),
function(vxsp, vx) {
# 3 coordinates i, j, k
stopifnot(length(vx) == 3)
# i, j, k must be positive integers
stopifnot(as.integer(vx) == vx)
stopifnot(all(vx >=0))
# check i, j, k ranges
stopifnot(all((vx >= 0) & (vx < dim(vxsp))))
return (
callGeneric(vxsp,
data.table::data.table(i=vx[1], j=vx[2], k=vx[3])))
})
#' @rdname clear
setMethod("clear", signature(vxsp="VoxelSpace", vx="matrix"),
function(vxsp, vx) {
# 3 columns i, j, k
stopifnot(ncol(vx) == 3)
# i, j, k must be integers
stopifnot(as.integer(vx) == vx)
# check i, j, k ranges
stopifnot(
all(apply(vx, 1, function(vx) (vx >= 0) & (vx < dim(vxsp)))))
return (
callGeneric(vxsp,
data.table::data.table(i=vx[,1], j=vx[,2], k=vx[,3])))
})
#' Voxel layer to raster
#'
#' @docType methods
#' @rdname toRaster
#'
#' @description Converts a voxel space (i, j) layer into a
#' [`terra::SpatRaster-class`] object.
#'
#' @param vxsp a [`VoxelSpace-class`] object.
#' @param vx a voxel space horizontal slice. A data.table with `i, j` columns
#' and least one additional variable, the value of the raster layer. Every
#' column beside i and j will be converted into a raster layer.
#'
#' @return a [`terra::SpatRaster-class`] object.
#'
#' @examples
#' \dontrun{
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' library(terra)
#'
#' # CHM, DEM and PAI as raster
#' plot(toRaster(vxsp, merge(canopyHeight(vxsp), groundElevation(vxsp), all = T)))
#'
#' # PAI
#' vxsp <- plantAreaDensity(vxsp)
#' pai <- plantAreaIndex(vxsp, type = "xy", pattern.pad = "pad_transmittance")
#' plot(toRaster(vxsp, pai))
#'
#' # sampling intensity at 2 meters
#' plot(toRaster(vxsp, vxsp@data[ground_distance == 2.25, .(i, j, nbSampling)]))
#' }
#'
#' @export
toRaster <- function(vxsp, vx) {
# check for terra package
if (!requireNamespace("terra", quietly = TRUE)) {
stop(
"Package \"terra\" must be installed to convert voxel layer into raster.",
"\n",
"> install.packages(\"terra\")",
call. = FALSE)
}
# must be a voxel space
stopifnot(is.VoxelSpace(vxsp))
# vx must be data.table with i, j columns
stopifnot(any(class(vx) == "data.table"))
stopifnot(c("i", "j") %in% names(vx))
# there must be a third column beside i, j
stopifnot(ncol(vx) >= 3)
# i, j coordinates must be unique (no vertical dimension)
i <- j <- N <- .N <- NULL # trick to get rid of R CMD check warning with data.table
if (vx[, .N, by=list(i, j)][, all(N > 1)])
stop("The `vx` layer must have unique (i, j) coordinates.")
# layers name
layers.name <- colnames(vx)[! colnames(vx) %in% c("i", "j")]
nx <- dim(vxsp)[1]
ny <- dim(vxsp)[2]
xmin <- AMAPVox::getMinCorner(vxsp)[1]
ymin <- AMAPVox::getMinCorner(vxsp)[2]
xmax <- AMAPVox::getMaxCorner(vxsp)[1]
ymax <- AMAPVox::getMaxCorner(vxsp)[2]
# terra::raster and AMAPVox voxel space does not have same convention for
# plot origin, so reorder cell index
ind <- 1 + vx[["i"]] + nx * (ny - vx[["j"]] - 1)
# raster layers list
r <- list()
# loop on layers
for (layer.name in layers.name) {
layer <- rep(NA, length.out = nx * ny)
layer[ind] <- vx[[layer.name]]
r[[layer.name]] <- terra::rast(nrows = ny, ncols = nx,
xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,
vals = layer)
}
# stack layers into a single raster
return ( terra::rast(r))
}
#' Merge two voxel spaces
#'
#' @docType methods
#' @rdname merge
#'
#' @description Merge of two [`VoxelSpace-class`] object.
#' Voxel spaces must have same sptial extension and resolution, and some
#' shared column names.
#'
#' ## Merging modes
#'
#' Variables `i, j, k & ground_distance` are merged.
#'
#' Variables `nbEchos, nbSampling, lgTotal, bsEntering, bsIntercepted,
#' bsPotential, weightedEffectiveFreepathLength & weightedFreepathLength`
#' are summed-up.
#'
#' Variables `sdLength, angleMean and distLaser` are weighted means with
#' `nbSampling` (the number of pulses) as weights.
#'
#' Attenuation FPL variables (`attenuation_FPL_biasedMLE,
#' attenuation_FPL_biasCorrection, attenuation_FPL_unbiasedMLE) & lMeanTotal`
#' are calculated analytically.
#'
#' Transmittance and attenuation variables (except the FPL attenuation
#' variables listed above) are weighted means with bsEntering as weights.
#'
#' Any other variables will not be merged. In particular PAD variables
#' are not merged and should be recalculated with
#' [plantAreaDensity()] on the merged voxel space.
#' ```
#' vxsp <- plantAreaDensity(merge(vxsp1, vxsp2))
#' ```
#' ## Merging multiple voxel spaces
#'
#' Merging several voxel spaces works as follow : vxsp1 and vxsp2 merged
#' into vxsp12. vxsp12 & vxsp3 merged into vxsp123, etc. The process can be
#' synthesized with the [Reduce()] function.
#' ```
#' vxsp <- Reduce(merge, list(vxsp1, vxsp2, vxsp3))
#' ```
#'
#' @param x,y [`VoxelSpace-class`] objects to be merged.
#' @param ... Not used
#' @return A merged [`VoxelSpace-class`] object.
#'
#' @examples
#' # merge same voxel space to confirm merging behavior
#' vxsp1 <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' vxsp2 <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' vxsp <- merge(vxsp1, vxsp2)
#' all(vxsp$nbSampling == vxsp1$nbSampling + vxsp2$nbSampling)
#'
#' # with PAD
#' vxsp <- plantAreaDensity(merge(vxsp1, vxsp2), pulse.min = 1)
#' all((vxsp$pad_transmittance - vxsp1$PadBVTotal) < 1e-7) # equal at float precision
#'
#' @export
merge.VoxelSpace <- function(x, y, ...) {
# must be a voxel space
stopifnot(is.VoxelSpace(x), is.VoxelSpace(y))
# same spatial extension
stopifnot(getMinCorner(x) == getMinCorner(y))
stopifnot(getMaxCorner(x) == getMaxCorner(y))
stopifnot(getVoxelSize(x) == getVoxelSize(y))
# shared variables
variables.merged <- intersect(names(x), names(y))
if (!all(c("i", "j", "k", "nbSampling") %in% variables.merged)) {
stop("i, j, k & nbSampling variables are mandatory for merging voxel spaces.")
}
# Discarded variables
x.discarded <- names(x)[which(!names(x) %in% variables.merged)]
y.discarded <- names(y)[which(!names(y) %in% variables.merged)]
if (length(x.discarded) == 1)
warning(paste("Variable", x.discarded,
"from x is not in y. Discarded from merging."))
else if (length(x.discarded) > 1)
warning(paste("Variables", x.discarded,
"from x are not in y. Discarded from merging."))
if (length(y.discarded) == 1)
warning(paste("Variable", y.discarded,
"from y is not in x. Discarded from merging."))
else if (length(y.discarded) > 1)
warning(paste("Variables", y.discarded,
"from y are not in x. Discarded from merging."))
# raw merge
.SD <- .SDcols <- NULL # trick to get rid of `no visible binding` note
vx.raw <- data.table::merge.data.table(
x@data[, .SD, .SDcols=variables.merged],
y@data[, .SD, .SDcols=variables.merged],
all = TRUE,
by = c("i", "j", "k"),
suffixes = c(".x", ".y"))
i <- j <- k <- NULL # trick to get rid of `no visible binding` note
vx.merged = vx.raw[, list(i, j, k)]
# list of predifined custom merge
variable.custom <- c("i", "j", "k", "ground_distance", "lMeanTotal",
"attenuation_FPL_biasedMLE",
"attenuation_FPL_biasCorrection",
"attenuation_FPL_unbiasedMLE")
# ground distance
ground_distance.y <- ground_distance <- NULL # trick to get rid of `no visible binding` note
vx.merged[["ground_distance"]] <- vx.raw[["ground_distance.x"]]
ind.grd.y <- vx.raw[!is.na(ground_distance.y), which = TRUE]
vx.merged[ind.grd.y,
ground_distance:=vx.raw[["ground_distance.y"]][ind.grd.y]]
# weighted mean function for merging
wmean <- function(x) {
return ( stats::weighted.mean(x[1:2], x[3:4], na.rm = TRUE))
}
# merge variables one at a time
for (variable in
variables.merged[which(!variables.merged %in% variable.custom)]) {
if (variable %in% c("nbEchos", "nbSampling", "lgTotal",
"bsPotential", "bsEntering", "bsIntercepted",
"weightedEffectiveFreepathLength",
"weightedFreepathLength")) {
# sum
vx.merged[[variable]] <- vx.raw[, apply(.SD, 1, sum, na.rm=TRUE),
.SDcols=paste0(variable, c(".x", ".y"))]
} else if (variable %in% c("sdLength", "angleMean", "distLaser")) {
# weighted mean on number of sampling
xy <- cbind(vx.raw[[paste0(variable, ".x")]],
vx.raw[[paste0(variable, ".y")]],
vx.raw[["nbSampling.x"]],
vx.raw[["nbSampling.y"]])
vx.merged[[variable]] <- apply(xy, 1, wmean)
} else if (grepl("^(att|tra)", variable)) {
# weighted mean on entering beam surface for attenuation/transmittance
if ("bsEntering" %in% variables.merged) {
xy <- cbind(vx.raw[[paste0(variable, ".x")]],
vx.raw[[paste0(variable, ".y")]],
vx.raw[["bsEntering.x"]],
vx.raw[["bsEntering.y"]])
vx.merged[[variable]] <- apply(xy, 1, wmean)
} else {
warning("`", variable,"` cannot be merged without `bsEntering` variable.")
}
}
else {
# unknown or user-defined variable, not merged
warning("Variable `", variable, "` does not have predefined merging mode. Discarded from merging.")
}
}
# lMeanTotal = lgTotal / nbSampling
if (all(c("lMeanTotal", "lgTotal") %in% variables.merged)) {
lgTotal <- nbSampling <- lMeanTotal <- NULL # trick to get rid of `no visible binding` note
vx.merged[["lMeanTotal"]] <- vx.merged[, lgTotal / nbSampling]
vx.merged[is.infinite(lMeanTotal), lMeanTotal:=NA]
} else if ("lMeanTotal" %in% variables.merged) {
warning("`lMeanTotal` cannot be merged without `lgTotal` variable.")
}
# biased attenuation FPL (free path length)
if ("attenuation_FPL_biasedMLE" %in% variables.merged) {
if (all(c("bsIntercepted", "weightedEffectiveFreepathLength")
%in% variables.merged)) {
attenuation_FPL_biasedMLE <- bsIntercepted <- weightedEffectiveFreepathLength <- NULL # trick to get rid of `no visible binding` note
vx.merged[["attenuation_FPL_biasedMLE"]] <- vx.merged[, bsIntercepted / weightedEffectiveFreepathLength]
vx.merged[is.infinite(attenuation_FPL_biasedMLE), attenuation_FPL_biasedMLE:=NA]
} else {
warning("`attenuation_FPL_biasedMLE` cannot be merged without
`bsIntercepted` and `weightedEffectiveFreepathLength` variables.")
}
}
# attenuation FPL correction factor
if ("attenuation_FPL_biasCorrection" %in% variables.merged) {
if ("weightedEffectiveFreepathLength" %in% variables.merged) {
xx <- vx.raw[["attenuation_FPL_biasCorrection.x"]] *
vx.raw[["weightedEffectiveFreepathLength.x"]]^2 *
vx.raw[["nbSampling.x"]]
yy <- vx.raw[["attenuation_FPL_biasCorrection.y"]] *
vx.raw[["weightedEffectiveFreepathLength.y"]]^2 *
vx.raw[["nbSampling.y"]]
attenuation_FPL_biasCorrection <- weightedEffectiveFreepathLength <- nbSampling <- NULL # trick to get rid of `no visible binding` note
vx.merged[["attenuation_FPL_biasCorrection"]] <-
apply(cbind(xx, yy), 1, sum, na.rm = TRUE) / vx.merged[, weightedEffectiveFreepathLength^2 * nbSampling]
vx.merged[is.infinite(attenuation_FPL_biasCorrection), attenuation_FPL_biasCorrection:=NA]
} else {
warning("`attenuation_FPL_biasedMLE` cannot be merged without
`weightedEffectiveFreepathLength` variable")
}
}
# unbiased attenuation FPL
if ("attenuation_FPL_unbiasedMLE" %in% variables.merged) {
if (all(c("attenuation_FPL_biasedMLE", "attenuation_FPL_biasCorrection")
%in% variables.merged)) {
attenuation_FPL_biasedMLE <- attenuation_FPL_biasCorrection <- NULL # trick to get rid of `no visible binding` note
vx.merged[["attenuation_FPL_unbiasedMLE"]] <- vx.merged[, attenuation_FPL_biasedMLE - attenuation_FPL_biasCorrection]
} else {
warning("`attenuation_FPL_unbiasedMLE` cannot be merged without
`attenuation_FPL_biasedMLE` & `attenuation_FPL_biasCorrection` variables.")
}
}
# new VoxelSpace object
vxsp.merged <- new(Class=("VoxelSpace"))
vxsp.merged@header <- x@header
vxsp.merged@data <- vx.merged
# return merged voxel space
return ( vxsp.merged )
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/Utils-voxel.R |
## Version manager
## Important function, not exported though, because there is no reason for a
## direct call by end user. At this stage end user shall call AMAPVox::gui().
## What is does ? Given a requested version (either "latest" or major.minor or
## major.minor.build) the function either returns the requested version or the
## best match.
## If requested version is available locally, end of story. If not, check
## remotely and install if available. If not returns best match, remote if
## online, local otherwise.
## Throws an error if no approaching version can be found.
versionManager <- function(version="latest") {
# check internet connection
is.offline <- inherits(
try(curl::nslookup("forge.ird.fr"), silent = TRUE),
"try-error")
# list local versions
localVersions <- getLocalVersions()
# no local version and offline
if (is.null(localVersions) & is.offline) {
stop(paste("There are not any local version installed.",
"Computer is offline, cannot look for remote version. "))
}
# check updates
check.update = (version == "latest")
# no local version, set arbitrary version 0.0
if (is.null(localVersions)) {
version <- "0.0"
} else if (version == "latest") {
# latest local version
version <- utils::tail(localVersions$version, 1)
}
# valid version number l.m(.n)
stopifnot(is.validVersion(version, expanded = FALSE))
if (is.offline) {
## OFFLINE
# resolve local version
if (!inherits(
try(resolveLocalVersion(version, silent = TRUE), silent = TRUE),
"try-error")) {
localVersion <- resolveLocalVersion(version)
cmp <- compVersion(version, localVersion)
# requested version older than local version
if (cmp < 0)
warning(paste("Computer is offline, cannot check if version", version,
"is available online."),
call. = FALSE, immediate. = TRUE)
# requested version newer than local version
if (cmp > 0)
warning(paste("Computer is offline, cannot check if a newer version",
version, "is available online."),
call. = FALSE, immediate. = TRUE)
# cannot check update offline
if (check.update)
warning("Computer is offline, cannot check for update.",
call. = FALSE, immediate. = TRUE)
version <- localVersion
}
else
stop(paste("Version", version, "does not match any local versions",
"(", paste(localVersions$version, collapse = ", "), ").\n",
"Computer is offline, cannot check if version", version,
"is available online."),
call. = FALSE)
} else {
## ONLINE
# list remote versions
remoteVersions <- getRemoteVersions()
latestVersion <- utils::tail(remoteVersions, 1)$version
# update requested
if (check.update && (compVersion(version, latestVersion) < 0)) {
version <- latestVersion
message(paste("Check for updates. Latest version available",
latestVersion))
}
# check local version availability
if (!(version %in% localVersions$version)) {
# resolve remote version
version <- resolveRemoteVersion(version)
# install remote version
installVersion(version)
}
}
return(version)
}
#' List remote AMAPVox versions.
#'
#' @docType methods
#' @rdname getRemoteVersions
#' @description List AMAPVox versions available for download from AMAPVox Gitlab
#' package registry \url{https://forge.ird.fr/amap/amapvox/-/packages}
#' @return a `data.frame` with 2 variables: `$version` that stores
#' the version number and `$url` the URL of the associated ZIP file.
#' @seealso [getLocalVersions()]
#' @export
getRemoteVersions <- function() {
# get list of packages
url <- "https://forge.ird.fr/api/v4/groups/1604/packages?package_type=generic"
req <- curl::curl_fetch_memory(url)
pkgs <- jsonlite::fromJSON(jsonlite::prettify(rawToChar(req$content)))
# keep only AMAPVox packages
pkgs <- pkgs[pkgs$name == "amapvox", ]
# add url to list package files
url <- "https://forge.ird.fr/api/v4/projects"
pkgs <- cbind(pkgs,
files_path=paste(url, pkgs$project_id, "packages", pkgs$id, "package_files", sep="/"))
# list package files
zips <- data.table::rbindlist(apply(pkgs, 1, function(pkg) {
req <- curl::curl_fetch_memory(pkg$files_path)
pkg.files <- jsonlite::fromJSON(jsonlite::prettify(rawToChar(req$content)))
pkg.url <- paste("https://forge.ird.fr/api/v4/projects",
pkg$project_id,
"packages", "generic", "amapvox",
pkg$version,
pkg.files$file_name,
sep="/")
return( data.table::data.table(version=pkg$version, url=pkg.url))
}))
# sort versions
zips <- zips[orderVersions(zips$version),]
# return dataframe
return(zips)
}
#' List local AMAPVox versions.
#'
#' @docType methods
#' @rdname getLocalVersions
#' @description List AMAPVox versions already installed on your computer by
#' the package. AMAPVox versions are installed in the user-specific data
#' directory, as specified by [rappdirs::user_data_dir()].
#' @return a `data.frame` with 2 variables: `$version` that stores
#' the version number and `$path` the local path of the AMAPVox
#' directory.
#' @seealso [getRemoteVersions()], [rappdirs::user_data_dir()]
#' @export
getLocalVersions <- function() {
# local directory for AMAPVox binaries
binPath <- normalizePath(
file.path(rappdirs::user_data_dir("AMAPVox"), "bin"),
mustWork = FALSE)
# local directory does not exist, no local version yet
if (!dir.exists(binPath))
return(NULL)
# list existing folders
version <- list.dirs(binPath, full.names = FALSE, recursive = FALSE)
if (identical(version, character(0)))
return(NULL)
version <- stringr::str_extract(version, "\\d+\\.\\d+\\.\\d+")
path <- vapply(
list.dirs(binPath, full.names = TRUE, recursive = FALSE),
normalizePath,
character(1),
USE.NAMES = F)
# create dataframe and sort it along version
binaries <- data.frame(version, path)
binaries <- binaries[orderVersions(binaries$version),]
rownames(binaries) <- NULL
# return dataframe
return(binaries)
}
## check if valid version number major.minor(.build) with major, minor & build
## positive integers.
## expanded option controls whether the (.build) number is mandatory or not.
is.validVersion <- function(version, expanded = TRUE) {
# regex pattern for version number major.minor(.build)
pattern <- "^(\\d+)(\\.\\d+)?(\\.\\d+)$"
if (!grepl(pattern, version)) {
message(paste(version,
"is not a valid version number.",
"Must be \"l.m(.n)\" with l, m & n integers"))
return(FALSE)
}
return(ifelse(expanded,
length(strsplit(version, "\\.")[[1L]]) == 3,
TRUE))
}
## order version numbers respectively along major, minor and build numbers.
orderVersions <- function(versions) {
# valid version numbers only
stopifnot(all(vapply(versions, is.validVersion, logical(1))))
#
split <- t(vapply(strsplit(versions, "\\."), as.integer, integer(3)))
vrs <- data.frame(major = split[, 1], minor = split[, 2], build = split[, 3])
return(order(vrs$major, vrs$minor, vrs$build))
}
## compare two version numbers using the base function compareVersion
## transform "major.minor.build" into "major.minor-build" for compatibility with
## R version numbers.
compVersion <- function(v1, v2) {
# expand version number l.m.n
v1.exp <- expandVersion(v1)
v2.exp <- expandVersion(v2)
# valid version numbers only
stopifnot(all(is.validVersion(v1.exp), is.validVersion(v2.exp)))
# reformat as R package version number
v1.exp <- gsub("\\.(\\d+)$", "-\\1", v1.exp)
v2.exp <- gsub("\\.(\\d+)$", "-\\1", v2.exp)
# compare with utils::compareVersion
return(utils::compareVersion(v1.exp, v2.exp))
}
## expand version number major.minor to major.minor.0
expandVersion <- function(version) {
stopifnot(is.validVersion(version, expanded = FALSE))
return(ifelse(
length(strsplit(version, "\\.")[[1L]]) == 3,
version,
paste0(version, ".999")
))
}
## given a version number and a list of version numbers (versions), the function
## resolves the version against the vector of version numbers.
## if version is contained in versions, it returns version
## if not it returns the latest version number from versions with matching
## major and minor numbers.
## at last throws error if none matches.
resolveVersion <- function(version, versions, silent) {
# expand version number l.m.n
version.exp <- expandVersion(version)
# valid version numbers only
stopifnot(all(vapply(rbind(version.exp, versions), is.validVersion, logical(1))))
# version matches remote version, check successful
if (version.exp %in% versions) return(version.exp)
# version does not match, try with short version major.minor without build
shortVersions <- sub("\\.\\d+$", "", versions)
shortVersion <- stringr::str_extract(version.exp, "^\\d+\\.\\d+")
# short version matches remote short version
if (shortVersion %in% shortVersions) {
# return latest build corresponding to short version
ind <- utils::tail(which(shortVersions == shortVersion), 1)
suggestedVersion <- versions[ind]
if (!silent)
message(paste0("Requested version ", version,
". Matching version ", suggestedVersion))
return(suggestedVersion)
}
# short version does not match any available version
stop(paste("Version", version,
"does not match any available versions",
"(", paste(versions, collapse = ", "), ")"))
}
## check if remote version is available, or suggest approaching version
## otherwise.
resolveRemoteVersion <- function(version, silent = FALSE) {
versions <- getRemoteVersions()
resolveVersion(version, versions$version, silent)
}
## check if local version is available, or suggest approaching version
## otherwise.
resolveLocalVersion <- function(version, silent = FALSE) {
versions <- getLocalVersions()
resolveVersion(version, versions$version, silent)
}
#' Install specific AMAPVox version on local computer.
#'
#' @docType methods
#' @rdname installVersion
#' @description Install specific AMAPVox version on your computer.
#' AMAPVox versions are installed in the user-specific data
#' directory, as specified by [rappdirs::user_data_dir()].
#' You should not worry to call directly this function since
#' local installations are automatically handled by the version manager
#' when you launch AMAPVox GUI with [gui()] function.
#' @param version, a valid and existing AMAPVox remote version number
#' (major.minor.build)
#' @param overwrite, whether existing local installation should be re-installed.
#' @param timeout maximum time in seconds before interrupting download.
#' @return the path of the AMAPVox installation directory.
#' @seealso [getLocalVersions()], [getRemoteVersions()], [removeVersion()]
#' @seealso [rappdirs::user_data_dir()]
#' @seealso [utils::download.file()], [utils::unzip()]
#' @examples
#' \dontrun{
#' # install latest version
#' installVersion(tail(getRemoteVersions()$version, 1))
#' }
#' @export
installVersion <- function(version, overwrite = FALSE,
timeout = 300) {
# make sure version number is valid and available
stopifnot(is.validVersion(version))
remoteVersions <- getRemoteVersions()
stopifnot(version %in% remoteVersions$version)
# local directory for AMAPVox binaries
binPath <- normalizePath(
file.path(rappdirs::user_data_dir("AMAPVox"), "bin"),
mustWork = FALSE)
versionPath <- normalizePath(
file.path(binPath, ifelse(is_v1(version),
paste0("AMAPVox-", version),
paste0("AMAPVox-", version, "-", get_os()))),
mustWork = FALSE)
# check whether requested version already installed
jar.dir <- ifelse(is_v1(version),
versionPath,
ifelse(get_os() == "windows",
file.path(versionPath, "app"),
file.path(versionPath, "lib", "app")))
jarPath <- normalizePath(
file.path(jar.dir, paste0("AMAPVox-", version, ".jar")),
mustWork = FALSE)
if (file.exists(jarPath) & !overwrite) {
message(paste("AMAPVox", version, "already installed in", versionPath))
return(versionPath)
}
# url to download
url <- remoteVersions$url[which(remoteVersions == version)]
# from AMAPVox v2 OS specific binaries
if (!is_v1(version)) {
# specific URL depending on OS
url <- url[which(grepl(get_os(), url))]
# unsupported OS
if (url == '')
stop("Unsupported OS (", get_os(),
") Sorry! Email [email protected] to request specific binaries.")
}
# local destination
zipfile <- normalizePath(
file.path(binPath, basename(url)),
mustWork = FALSE)
# create local bin folder if does not exist
if (!dir.exists(binPath)) dir.create(binPath, recursive = TRUE,
showWarnings = FALSE)
# increase timeout and download zip
timeout.user = getOption("timeout")
options(timeout = timeout)
utils::download.file(url, zipfile, method = "auto", mode="wb")
# revert timeout to user default
options(timeout = timeout.user)
# unzip
utils::unzip(zipfile,
# for linux-like system uses system unzip that should preserve file permissions
unzip = getOption("unzip"),
exdir = ifelse(is_v1(version), versionPath, binPath))
# delete zip file
file.remove(zipfile)
message(paste("AMAPVox", version, "successfully installed in", versionPath))
return(versionPath)
}
#' Remove specific AMAPVox version from local computer.
#'
#' @docType methods
#' @rdname removeVersion
#' @description Uninstall specific AMAPVox version from your computer.
#' @param version, a valid and existing AMAPVox local version number
#' (major.minor.build)
#' @seealso [getLocalVersions()], [installVersion()]
#' @examples
#' \dontrun{
#' # uninstall oldest version from your computer
#' removeVersion(head(getLocalVersions()$version, 1))
#' }
#' @export
removeVersion <- function(version) {
# make sure version number is valid
stopifnot(is.validVersion(version))
localVersions <- getLocalVersions()
# version not installed
if (!(version %in% localVersions$version)) {
message(paste("Version", version, "not installed locallly. Nothing to do."),
call. = FALSE, immediate. = TRUE)
return(invisible(NULL))
}
# local version exists, uninstall it
path <- localVersions$path[which(localVersions == version)]
if (unlink(path, recursive = TRUE) == 0)
{
message(paste("Version", version, "successfully removed",
"(", path, ")."))
} else {
message(paste("Failed to delete folder", path,
", you may have to do so manually..."))
}
}
# get operating system 'windows', 'linux', 'osx'
get_os <- function() {
sysinf <- Sys.info()
if (!is.null(sysinf)){
os <- sysinf['sysname']
if (os == 'Darwin')
os <- "osx"
} else { ## mystery machine
os <- .Platform$OS.type
if (grepl("^darwin", R.version$os))
os <- "osx"
if (grepl("linux-gnu", R.version$os))
os <- "linux"
}
tolower(os)
}
# check whether given version is v1
# does not even check whether it is a valid version, assuming it has been
# tested already
is_v1 <- function(version) {
return(compVersion(version, "2.0.0") < 0)
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/VersionManager.R |
#' Write a voxel file
#'
#' @docType methods
#' @rdname writeVoxelSpace
#' @description write a voxel file out of a [`VoxelSpace-class`]
#' object.
#' @param vxsp the object of class VoxelSpace to write
#' @param f a character string naming a file.
#' @include Classes.R
#' @seealso [readVoxelSpace()]
#' @examples
#' \dontrun{
#' # load a voxel file
#' vxsp <- readVoxelSpace(system.file("extdata", "tls_sample.vox", package = "AMAPVox"))
#' # set max PAD to 5
#' vxsp@data[, PadBVTotal:=sapply(PadBVTotal, min, 5)]
#' # write updated voxel file in temporary file
#' writeVoxelSpace(vxsp, tempfile("pattern"="amapvox_", fileext=".vox"))
#' }
#' @export
writeVoxelSpace <- function(vxsp, f){
stopifnot(is.VoxelSpace(vxsp))
# write header
conn <- file(f, open="w")
writeLines("VOXEL SPACE", conn)
writeLines(printHeader(vxsp), conn)
close(conn)
# write voxels
suppressWarnings(
data.table::fwrite(vxsp@data,
f,
row.names=FALSE,
col.names=TRUE,
na="NaN",
sep=" ",
append=TRUE,
quote=FALSE,
scipen = 999)
)
cat("Saved voxel file ", f, "[OK]")
# options(scipen = scipen_o, digits = digits_o)
}
# format voxel file header
# returns a vector of formatted parameters "#key:value"
printHeader <- function(vxsp) {
# list parameters, discard nline & columnNames that are internal to package
parameters <- vxsp@header[!(names(vxsp@header)
%in% c("nline", "columnNames"))]
# index of numeric vector parameters
pVec <- which(sapply(parameters,
function(p) is.numeric(p) && length(p) > 1) > 0)
# format numeric vectors and concatenate with remaining parameters
parameters <- c(parameters[-pVec],
sapply(parameters[pVec], .formatNumericVector))
# renamed some parameters
names(parameters) <- sapply(
names(parameters),
function(p) switch(p,
mincorner = "min_corner",
maxcorner = "max_corner",
voxel.size = "res",
dim = "split",
p))
# return a vector of parameters formatted as "#key:value"
return ( sort(paste0("#", paste(names(parameters), parameters, sep=":"))) )
}
# format numeric vector in readable format for AMAPVox
# returns x formatted as "(x1, x2, ..., xn)"
.formatNumericVector <- function(x) {
return(paste0("(", paste(x, collapse = ", "), ")"))
}
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/R/WriteVoxelSpace.R |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(AMAPVox)
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/AMAPVox.R |
---
title: "AMAPVox"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{AMAPVox}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
<!-- AMAPVox logo -->
<center><img src="rsc/amapvox-logo.png" alt="AMAPVox logo" width="250"/></center>
```{r setup}
library(AMAPVox)
```
**AMAPVox packages provides several vignettes as a guided tour:**
## [Installation](Installation.html)
## [Graphical User Interface](amapvox-gui.html)
## [Voxelization](Voxelization.html)
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/AMAPVox.Rmd |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----eval=FALSE---------------------------------------------------------------
# # install "remotes" package if not already installed
# if (!any(grepl("remotes", rownames(installed.packages())))) install.packages("remotes")
# # install latest stable version from source
# remotes::install_github('umr-amap/AMAPVox')
## ----setup--------------------------------------------------------------------
# load AMAPVox package
library(AMAPVox)
## ----eval=FALSE---------------------------------------------------------------
# AMAPVox::run()
## ----eval=FALSE---------------------------------------------------------------
# ? AMAPVox::run # section "Java 1.8 64-Bit with JavaFX"
## ----eval=FALSE---------------------------------------------------------------
# AMAPVox::getRemoteVersions()
## ----eval=FALSE---------------------------------------------------------------
# AMAPVox::installVersion("1.7.6")
# # install and run specific version
# AMAPVox::run("1.6.4", check.update = FALSE)
## ----eval=FALSE---------------------------------------------------------------
# AMAPVox::getLocalVersions()
## ----eval=FALSE---------------------------------------------------------------
# AMAPVox::removeVersion("1.7.6")
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/Installation.R |
---
title: "Installation"
output: rmarkdown::html_vignette
#output: rmarkdown::pdf_document
vignette: >
%\VignetteIndexEntry{Installation}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
<!-- AMAPVox logo -->
{width=256px}
AMAPVox is an R package that embeds a Java Desktop Application. The Java part
of AMAPVox is distributed as a set of JAR files (Java Archives) and is not
included by default in the R package to keep it light. The R package handles
the download of the JARs and manages multiple versions for the end user.
## System requirements
**Operating system**: Windows (7 or above), Linux-based, Mac OS X. 64-Bit.
**Java**: For AMAPVox v2 or above, Java is embedded with AMAPVox binaries. For
AMAPVox v1 the system requires Java 1.8 64-Bit with JavaFX. In practice either
[Java 1.8 64-Bit Oracle](https://java.com/download/) or
[Java 1.8 64-Bit Corretto](https://aws.amazon.com/fr/corretto/).
**CPU**: one required but multi-core CPU recommended for multithreading execution.
**RAM**: 1GB is enough for running the sample case. It will greatly depends on
the dimension of the voxel space and the size of the LiDAR scans.
**OpenGL**: Version 3 or higher, for visualization only.
## AMAPVox R package
Install AMAPVox package latest stable version from source:
```{r eval=FALSE}
# install "remotes" package if not already installed
if (!any(grepl("remotes", rownames(installed.packages())))) install.packages("remotes")
# install latest stable version from source
remotes::install_github('umr-amap/AMAPVox')
```
```{r setup}
# load AMAPVox package
library(AMAPVox)
```
## AMAPVox GUI
Install and run latest AMAPVox GUI (with active internet connection)
```{r eval=FALSE}
AMAPVox::run()
```
After downloading latest version, AMAPVox GUI should start automatically:
<center>{width=640px}</center>
For AMAPVox <= 1.10, if you get error(s) about java version, please read help page
```{r eval=FALSE}
? AMAPVox::run # section "Java 1.8 64-Bit with JavaFX"
```
List available remote versions:
```{r eval=FALSE}
AMAPVox::getRemoteVersions()
```
Install specific version:
```{r eval=FALSE}
AMAPVox::installVersion("1.7.6")
# install and run specific version
AMAPVox::run("1.6.4", check.update = FALSE)
```
List local versions:
```{r eval=FALSE}
AMAPVox::getLocalVersions()
```
Uninstall local version:
```{r eval=FALSE}
AMAPVox::removeVersion("1.7.6")
```
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/Installation.Rmd |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup, echo=FALSE--------------------------------------------------------
# libraries RefManageR
require("RefManageR", quietly = TRUE)
## ----echo=FALSE, eval=TRUE, results='asis'------------------------------------
bib <- RefManageR::ReadBib("rsc/amapvox-bibtex.bib")
RefManageR::NoCite(bib, "*")
RefManageR::PrintBibliography(bib, .opts = list(style = "text", sorting = "ydnt"))
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/Publications.R |
---
# https://bookdown.org/yihui/rmarkdown-cookbook/bibliography.html
title: "Publications"
output: rmarkdown::html_vignette
#output: rmarkdown::pdf_document
vignette: >
%\VignetteIndexEntry{Publications}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
*Updated April 2023*
Your article is not listed ? Please let us know *contact at amapvox dot org*
```{r, setup, echo=FALSE}
# libraries RefManageR
require("RefManageR", quietly = TRUE)
```
```{r, echo=FALSE, eval=TRUE, results='asis'}
bib <- RefManageR::ReadBib("rsc/amapvox-bibtex.bib")
RefManageR::NoCite(bib, "*")
RefManageR::PrintBibliography(bib, .opts = list(style = "text", sorting = "ydnt"))
```
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/Publications.Rmd |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(AMAPVox)
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/Voxelization.R |
---
title: "Voxelization"
output: rmarkdown::html_vignette
#output: rmarkdown::pdf_document
vignette: >
%\VignetteIndexEntry{Voxelization}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
<!-- AMAPVox logo -->
{width=256px}
```{r setup}
library(AMAPVox)
```
In the *voxelization* process, AMAPvox tracks every laser pulse through a 3D grid
(voxelized space) to the last recorded hit. Several estimators are implemented
in order to calculate local transmittance, local attenuation and several other
variables of interest. For details about the vozelization theoretical framework
please refer to
[A note on PAD/LAD estimators implemented in AMAPVox](https://doi.org/10.23708/1AJNMP)
This vignette goes through all the parameters of the voxelization configuration.
## Semantics
Every field has its own jargon, LiDAR is no exception. Throughout the documents
we will use different terms:
- a shot or a pulse refers to a single light pulse going out from the laser. A
shot is defined by an origin, a direction, a time and zero, one or more hits.
- a hit or and echo occurs when the light beam hits and obstacle and returns
some light that is recorded by the laser. It is associated to a pulse and there
may be no hit, a single hit or multiple hits. Be aware that absence of hit does
not mean that there were no obstacle on the way: the light might have been
diffracted or the laser failed to record the signal. We will refer to that
situation as a "false empty shot". Conversely a hit may not always indicate that
the light beam hit some vegetation (see section Butterfly remover for instance).
- free path is the path from the shot origin to the hit. In a voxel, the free
path refers to the path from the voxel entering point to the hit.
- free path length is the length in meter of the free path
- path length refers to the length of the path from the voxel entering point to
the voxel exiting point, whether or not the pulse has been intercepted.
## LiDAR scans
### LAS/LAZ point cloud
- [LAS](https://en.wikipedia.org/wiki/LAS_file_format) file format;
- LAZ file format (compressed LAS);
*LAS/LAZ* files can be manipulated by the [LAStools](http://lastools.org/)
software or the [LidR](https://CRAN.R-project.org/package=lidR) R package.
AMAPVox rebuilds shots geometry from LAS/LAZ point clouds.
*Consistency checks*: AMAPVox can perform preliminary checks on the LAS/LAZ
data, prior to the voxelization.
First, it suggests to discard shots with inconsistent number of echoes or ranks.
It check whether a subset of LAS points with same GPS time is consistent in
terms of echo rank and number of echoes. Every LAS point of the subset should
have a unique echo rank and the same number of echoes.
Secondly, it may discard shots whose echoes are not collinear. The maximal
deviation, user defined, is a tolerance in degree to strict collinearity.
Theses checks are not mandatory but inconsistent shots will most likely lead to
errors in the voxelization process. It is advised to enable them both in a first
run just to make sure the point cloud is "clean" and then disable them since it
is time consuming and pointless to perform the checks every time.
### Text SHOT format
*SHT or SHOT format* is a text based format, one shot per line, a shot being
defined by an origin, a direction, the number of echoes and the echo ranges.
First row is header, columns are separated by space character.
```
xOrigin yOrigin zOrigin xDirection yDirection zDirection nbEchoes r1 r2 r3 r4 r5 r6 r7 c1 c2 c3 c4 c5 c6 c7
x0 y0 z0 xd yd zd 1 10
etc.
```
### RIEGL scans
- RiSCAN single scan, RXP file format. A proprietary binary file format owned
by RIEGL. Can be edited with RiSCAN Pro software.
- RiSCAN project, RSP file format. An aggregation of RXP scans in the same
folder with an XML file project.xml that lists the file paths of the single
scans, the SOP and POP matrix (see section Transformation for details on the
transformation matrix)
### LEICA formats
- PTX / PTG file formats from LEICA Geosystems.
### FARO formats
- XYB from FARO
## Trajectory file (LAS/LAZ)
For LAS and LAZ file, you need to provide either a fixed scanner position or
a *trajectory file*.
A trajectory file is a text based format that contains GPS positions of the
scanner at a given time.
Four columns are expected:
- easting (x coordinate),
- northing (y coordinate),
- elevation (z-coordinate)
- and time.
Time of the trajectory file must be consistent with time of the LAS/LAZ file.
Scanner positions must be expressed in the same coordinate system as the point
cloud prior to any transformation (refer to section Transformation).
AMAPVox will isolate points from the the LAS/LAZ point cloud with same GPS time
(the hits from the same laser pulse), and calculates the scanner position with
a linear interpolation of the trajectory points. Then AMAPVox can reconstruct
the geometry of the pulse.
The text based format is flexible: AMAPVox GUI provides a user interface to
identify the columns, the separator, number of lines to skip, etc.
Example:
```
"X" "Y" "Z" "T"
289109.129 586268.068 504.85 308500.003528
289108.973 586267.861 504.846 308500.008528
289108.816 586267.654 504.842 308500.013527
289108.659 586267.447 504.838 308500.018526
etc.
```
## Digital Terrain Model input
Digital Terrain Model is optional. If provided it will be used to compute
distance to ground. Required if DTM filter (section Filter > DTM filter) or
Ground energy output (section Output > Post-processing) are enabled.
Expected format: [AAIGrid – Arc/Info ASCII
Grid](https://gdal.org/drivers/raster/aaigrid.html#raster-aaigrid) No data
values must be a numeric.
## Output parameters
Set output folder, output format and output variables to be recorded.
## Transformation matrix
A transformation matrix in AMAPVox is a 4x4 matrix that combines translation and
rotation movements applied to 3D points (x, y, z).
### SOP matrix
System Orientation and Position, TLS only. Each scan from Riscan Project has its
own SOP matrix which is included in Riscan Pro project file. If a single scan
(*.rxp) is selected, by clicking on the « Open file » button next to POP matrix
you can choose the Riscan Pro project file and it will automatically configure
the POP matrix and the SOP matrix of that scan.
### POP matrix
Project Orientation and Position, TLS only Projection matrix of a Riscan Pro
project, this is defined in the project file (*.rsp). That matrix is
automatically filled when a Riscan Pro project file is open, being read in the
file. Using single scan (*.rxp) voxelization, it is possible to defined POP
matrix, either by opening a matrix file (see file formats in annexe), or by
choosing a Riscan Pro project file.
### VOP matrix
Voxel Orientation and Position.
Optional transformation matrix.
The final transformation matrix is the product of the three matrix.
`transformation matrix = (VOP.POP).SOP`
## Voxel space
Defines geographical extent and spatial resolution of the voxelized space.
The geographical extent is a 3-dimensional rectangular cuboid, defined by (x, y,
z) min and max coordinates. Spatial resolution is a (x, y, z) unitary vector
giving the dimensions of a single voxel. A voxel is a rectangular cuboid and
most commonly a cube. Voxel size is a critical parameter for estimating plant
area density. It must be small enough so that the hypothesis of the vegetation
being homogeneously distributed within the voxel holds true. But it must be
large enough to include enough points to ensure reliable attenuation/PAD
estimations.
## Filtering
### Digital Terrain Model filter
Digital Terrain Model filter discards every point that are below a given
distance to the ground (the Offset parameter in meter). Enabling this filter
implies that a Digital Terrain Model has been provided in the Input section.
### Shot decimation
Randomly downsample the scan by a float factor M ranging inside [0, 1[, the
decimation factor. M = 0 means no shot discarded, M = 0.1 means 10% of the shots
discarded, etc.
### Shot angle filter
Filter shots based on shot Zenith angle (also called polar angle) in degrees,
i.e angle between zenith (origin at the ground) and shot direction.
### False empty shot
Riegl TLS scanners may include artifical empty shots for objects too close to
the sensor (around 1m). This option allows to remove those "false" empty shots.
### Point cloud filter
Discard or retain subset of points from the input point cloud for the
voxelization. For instance it allows to extract a tree from a forest patch or
remove the wood of a tree to estimate leaf area density instead of plant area
density. The point cloud is provided as a text file with 3 columns x, y, z the
coordinates of the points.
### Echo attribute filter
For RIEGL scans only, filter echoes based on reflectance, amplitude or deviation
values.
### Classification filter
For LAS/LAZ only, filter echoes based on LAS classification. By default discard
class "2 - Ground".
## Weighting
For multi-echoes sans, the weighting table defines energy attenuation for
every hit of a pulse. AMAPVox calculates the amount of light entering,
intercepted and exiting the voxels to derive attenuation and plant area density
estimators. A partial hit implies that some light is intercepted and some light
carries on. This information is sometimes provided in the scans as the returned
intensity, but we have not find yet an approach that works in every situation
(work in progress). Providing a statistical model of energy attenuation as a
function of the return rank is an other option, not straightforward though.
[Vincent et al., 2017](https://doi.org/10.1016/j.rse.2017.05.034) and [Vincent
et al., 2022](https://doi.org/10.1016/j.rse.2022.113442) discuss pros and cons
of both approaches.
As for now (2022), we consider that the best default option is to assume that
the energy is evenly distributed among every hit of a pulse. Hence the suggested
default weighting table with equal weights summing up to one for every pulse.
The table remains editable for advanced users who would rather apply custom
statistical model that fits better their data.
No weighting table means that AMAPVox will only take into account the first
return of a pulse. Exploratory approach "most intense return" has been
implemented but not released. Fee free to request binaries for testing purpose.
## Scanner
Either user predefined laser specifications or define custom specifications.
Please contact us to include permanently your scanner specifications.
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/Voxelization.Rmd |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
options(rmarkdown.html_vignette.check_title = FALSE)
## ----setup--------------------------------------------------------------------
library(AMAPVox)
## ----eval=FALSE---------------------------------------------------------------
# grep("OpenGL", system2("glxinfo", stdout = TRUE), value = TRUE)
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/amapvox-gui.R |
---
title: "Graphical User Interface"
output: rmarkdown::html_vignette
#output: rmarkdown::pdf_document
vignette: >
%\VignetteIndexEntry{amapvox-gui}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
options(rmarkdown.html_vignette.check_title = FALSE)
```
<!-- AMAPVox logo -->
{width=256px}
```{r setup}
library(AMAPVox)
```
## Main frame
AMAPVox GUI fulfills three objectives:
1. Editing configuration conveniently;
2. Running simulations (sequentially or concurrently);
3. Visualizing quickly output files (3D views and vertical profiles)
The main toolbar and the File menu provides the usual functions New/ Open /
Edit / Close / Save / Save as etc.
## Tabs Configuration files and Output files
The top left pane contains the configuration pane and the output pane.
The configuration pane lists the configurations files that have
been opened or created. Double click or right click Edit to edit the
configuration file.
<center></center>
The output pane lists the output files grouped by configuration. You may right
click on the files to edit a context menu.
<center></center>
## Log pane
The bottom left pane contains the logs from the main frame and the processes
when there are running. You may clean the log pane, export it as a text file
and cap the size of the log in the heap memory.
## Vox tools tab pane
The vox tools frame is dedicated to the edition of the configuration files.
### New configuration
Menu > File > New or click on the New button {width=24px}
in the toolbar.
A configuration file is an XML file with following header:
```
<?xml version="1.0" encoding="UTF-8"?>
<configuration creation-date="Thu Sep 23 16:14:41 CEST 2021" build-version="1.8.0" update-date="Mon Sep 27 09:59:18 CEST 2021">
<process mode="process-name" >
```
### Open configuration
Menu > File > Open or click on the Open button {width=24px}
in the toolbar.
The configuration file is added in the configuration pane but is not edited
automatically. The Start button {width=24px} is
disable if the file that has never been edited yet.
### Edit configuration
Select one or more files in the configuration pane then
Menu > File > Edit or click on the Save button {width=24px}
in the toolbar.
You may also:
- double click on a file in the configuration pane;
- right-click on a file in the configuration pane to display the context
menu and click on Edit;
Then you may modify the parameters. Use the Help
buttons {width=24px} throughout the configuration editor
as much as you can. Many parameters provide on-the-fly validation, for instance
some text fields only accept numerical values.
### Save configuration
Select a modified files in the configuration pane then
Menu > File > Save or click on the Save button {width=24px}
in the toolbar.
As soon as a parameter is modified in the configuration editor the name of the
configuration will appear in bold with a leading *. It reminds you to save the
configuration file. If you were to run a modified configuration file, it would
be saved beforehand automatically.
Before saving configuration editors may launch a validity check and
provide some feedback. File will not be saved until you address all the issues
detected by the validity check. You may notice that in the meantime
the {width=24px} is disable.
### Save as configuration
Select a file in the configuration pane then Menu > File > Save as or click on
the Save as button {width=24px}
in the toolbar.
### Close configuration
Select one or more files in the configuration pane then
Menu > File > Close or click on the Close button {width=24px}
in the toolbar.
A dialog box will show up if you attempt to close a modified file, to ask for
confirmation. Same when you exit the program.
Closing the configuration tab in the Vox tools tab pane only hides the editor.
Unsaved modifications are kept. A configuration file is only closed when it is
removed from the configuration pane.
### Run configuration
You may run a single configuration by a click on the
{width=24px} in the configuration pane. A disable
Start button means that the configuration file is either empty (newly created)
or that the file did not go through the validity check when saving and need
more editing.
When the simulation starts, the Start button becomes a Stop button
{width=24px} that you may click to cancel the job.
You may run a bunch of configurations by selecting them in the configuration
pane and click on the Run button {width=24px}
in the toolbar. You will be asked whether to run the simulation sequentially
(one after an other) or concurrently. The maximum number of CPUs dedicated to
AMAPVox may be set in Menu > Edit > Preferences.
When the simulations start, the Run button becomes a Stop Button
{width=24px} that you may click to cancel all
the jobs.
## Visualization pane
AMAPVox provides two visualization tools:
1. A 3D voxel space viewer
2. A vertical profile viewer
### 3D viewer
The 3D Viewer relies on OpenGL >= 3.
For Linux users, you may check OpenGL version with:
```{r eval=FALSE}
grep("OpenGL", system2("glxinfo", stdout = TRUE), value = TRUE)
```
The user is staight-forward. You choose a voxel file, select the variable to
plot (that can be changed later on, directly in the viewer) and you can click
on the Draw button.
The viewer can optionally draw the digital terrain model, the same one that is
used in the voxelization process.
### Vertical profile
@TODO...
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/inst/doc/amapvox-gui.Rmd |
---
title: "AMAPVox"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{AMAPVox}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
<!-- AMAPVox logo -->
<center><img src="rsc/amapvox-logo.png" alt="AMAPVox logo" width="250"/></center>
```{r setup}
library(AMAPVox)
```
**AMAPVox packages provides several vignettes as a guided tour:**
## [Installation](Installation.html)
## [Graphical User Interface](amapvox-gui.html)
## [Voxelization](Voxelization.html)
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/vignettes/AMAPVox.Rmd |
---
title: "Installation"
output: rmarkdown::html_vignette
#output: rmarkdown::pdf_document
vignette: >
%\VignetteIndexEntry{Installation}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
<!-- AMAPVox logo -->
{width=256px}
AMAPVox is an R package that embeds a Java Desktop Application. The Java part
of AMAPVox is distributed as a set of JAR files (Java Archives) and is not
included by default in the R package to keep it light. The R package handles
the download of the JARs and manages multiple versions for the end user.
## System requirements
**Operating system**: Windows (7 or above), Linux-based, Mac OS X. 64-Bit.
**Java**: For AMAPVox v2 or above, Java is embedded with AMAPVox binaries. For
AMAPVox v1 the system requires Java 1.8 64-Bit with JavaFX. In practice either
[Java 1.8 64-Bit Oracle](https://java.com/download/) or
[Java 1.8 64-Bit Corretto](https://aws.amazon.com/fr/corretto/).
**CPU**: one required but multi-core CPU recommended for multithreading execution.
**RAM**: 1GB is enough for running the sample case. It will greatly depends on
the dimension of the voxel space and the size of the LiDAR scans.
**OpenGL**: Version 3 or higher, for visualization only.
## AMAPVox R package
Install AMAPVox package latest stable version from source:
```{r eval=FALSE}
# install "remotes" package if not already installed
if (!any(grepl("remotes", rownames(installed.packages())))) install.packages("remotes")
# install latest stable version from source
remotes::install_github('umr-amap/AMAPVox')
```
```{r setup}
# load AMAPVox package
library(AMAPVox)
```
## AMAPVox GUI
Install and run latest AMAPVox GUI (with active internet connection)
```{r eval=FALSE}
AMAPVox::run()
```
After downloading latest version, AMAPVox GUI should start automatically:
<center>{width=640px}</center>
For AMAPVox <= 1.10, if you get error(s) about java version, please read help page
```{r eval=FALSE}
? AMAPVox::run # section "Java 1.8 64-Bit with JavaFX"
```
List available remote versions:
```{r eval=FALSE}
AMAPVox::getRemoteVersions()
```
Install specific version:
```{r eval=FALSE}
AMAPVox::installVersion("1.7.6")
# install and run specific version
AMAPVox::run("1.6.4", check.update = FALSE)
```
List local versions:
```{r eval=FALSE}
AMAPVox::getLocalVersions()
```
Uninstall local version:
```{r eval=FALSE}
AMAPVox::removeVersion("1.7.6")
```
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/vignettes/Installation.Rmd |
---
# https://bookdown.org/yihui/rmarkdown-cookbook/bibliography.html
title: "Publications"
output: rmarkdown::html_vignette
#output: rmarkdown::pdf_document
vignette: >
%\VignetteIndexEntry{Publications}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
*Updated April 2023*
Your article is not listed ? Please let us know *contact at amapvox dot org*
```{r, setup, echo=FALSE}
# libraries RefManageR
require("RefManageR", quietly = TRUE)
```
```{r, echo=FALSE, eval=TRUE, results='asis'}
bib <- RefManageR::ReadBib("rsc/amapvox-bibtex.bib")
RefManageR::NoCite(bib, "*")
RefManageR::PrintBibliography(bib, .opts = list(style = "text", sorting = "ydnt"))
```
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/vignettes/Publications.Rmd |
---
title: "Voxelization"
output: rmarkdown::html_vignette
#output: rmarkdown::pdf_document
vignette: >
%\VignetteIndexEntry{Voxelization}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
<!-- AMAPVox logo -->
{width=256px}
```{r setup}
library(AMAPVox)
```
In the *voxelization* process, AMAPvox tracks every laser pulse through a 3D grid
(voxelized space) to the last recorded hit. Several estimators are implemented
in order to calculate local transmittance, local attenuation and several other
variables of interest. For details about the vozelization theoretical framework
please refer to
[A note on PAD/LAD estimators implemented in AMAPVox](https://doi.org/10.23708/1AJNMP)
This vignette goes through all the parameters of the voxelization configuration.
## Semantics
Every field has its own jargon, LiDAR is no exception. Throughout the documents
we will use different terms:
- a shot or a pulse refers to a single light pulse going out from the laser. A
shot is defined by an origin, a direction, a time and zero, one or more hits.
- a hit or and echo occurs when the light beam hits and obstacle and returns
some light that is recorded by the laser. It is associated to a pulse and there
may be no hit, a single hit or multiple hits. Be aware that absence of hit does
not mean that there were no obstacle on the way: the light might have been
diffracted or the laser failed to record the signal. We will refer to that
situation as a "false empty shot". Conversely a hit may not always indicate that
the light beam hit some vegetation (see section Butterfly remover for instance).
- free path is the path from the shot origin to the hit. In a voxel, the free
path refers to the path from the voxel entering point to the hit.
- free path length is the length in meter of the free path
- path length refers to the length of the path from the voxel entering point to
the voxel exiting point, whether or not the pulse has been intercepted.
## LiDAR scans
### LAS/LAZ point cloud
- [LAS](https://en.wikipedia.org/wiki/LAS_file_format) file format;
- LAZ file format (compressed LAS);
*LAS/LAZ* files can be manipulated by the [LAStools](http://lastools.org/)
software or the [LidR](https://CRAN.R-project.org/package=lidR) R package.
AMAPVox rebuilds shots geometry from LAS/LAZ point clouds.
*Consistency checks*: AMAPVox can perform preliminary checks on the LAS/LAZ
data, prior to the voxelization.
First, it suggests to discard shots with inconsistent number of echoes or ranks.
It check whether a subset of LAS points with same GPS time is consistent in
terms of echo rank and number of echoes. Every LAS point of the subset should
have a unique echo rank and the same number of echoes.
Secondly, it may discard shots whose echoes are not collinear. The maximal
deviation, user defined, is a tolerance in degree to strict collinearity.
Theses checks are not mandatory but inconsistent shots will most likely lead to
errors in the voxelization process. It is advised to enable them both in a first
run just to make sure the point cloud is "clean" and then disable them since it
is time consuming and pointless to perform the checks every time.
### Text SHOT format
*SHT or SHOT format* is a text based format, one shot per line, a shot being
defined by an origin, a direction, the number of echoes and the echo ranges.
First row is header, columns are separated by space character.
```
xOrigin yOrigin zOrigin xDirection yDirection zDirection nbEchoes r1 r2 r3 r4 r5 r6 r7 c1 c2 c3 c4 c5 c6 c7
x0 y0 z0 xd yd zd 1 10
etc.
```
### RIEGL scans
- RiSCAN single scan, RXP file format. A proprietary binary file format owned
by RIEGL. Can be edited with RiSCAN Pro software.
- RiSCAN project, RSP file format. An aggregation of RXP scans in the same
folder with an XML file project.xml that lists the file paths of the single
scans, the SOP and POP matrix (see section Transformation for details on the
transformation matrix)
### LEICA formats
- PTX / PTG file formats from LEICA Geosystems.
### FARO formats
- XYB from FARO
## Trajectory file (LAS/LAZ)
For LAS and LAZ file, you need to provide either a fixed scanner position or
a *trajectory file*.
A trajectory file is a text based format that contains GPS positions of the
scanner at a given time.
Four columns are expected:
- easting (x coordinate),
- northing (y coordinate),
- elevation (z-coordinate)
- and time.
Time of the trajectory file must be consistent with time of the LAS/LAZ file.
Scanner positions must be expressed in the same coordinate system as the point
cloud prior to any transformation (refer to section Transformation).
AMAPVox will isolate points from the the LAS/LAZ point cloud with same GPS time
(the hits from the same laser pulse), and calculates the scanner position with
a linear interpolation of the trajectory points. Then AMAPVox can reconstruct
the geometry of the pulse.
The text based format is flexible: AMAPVox GUI provides a user interface to
identify the columns, the separator, number of lines to skip, etc.
Example:
```
"X" "Y" "Z" "T"
289109.129 586268.068 504.85 308500.003528
289108.973 586267.861 504.846 308500.008528
289108.816 586267.654 504.842 308500.013527
289108.659 586267.447 504.838 308500.018526
etc.
```
## Digital Terrain Model input
Digital Terrain Model is optional. If provided it will be used to compute
distance to ground. Required if DTM filter (section Filter > DTM filter) or
Ground energy output (section Output > Post-processing) are enabled.
Expected format: [AAIGrid – Arc/Info ASCII
Grid](https://gdal.org/drivers/raster/aaigrid.html#raster-aaigrid) No data
values must be a numeric.
## Output parameters
Set output folder, output format and output variables to be recorded.
## Transformation matrix
A transformation matrix in AMAPVox is a 4x4 matrix that combines translation and
rotation movements applied to 3D points (x, y, z).
### SOP matrix
System Orientation and Position, TLS only. Each scan from Riscan Project has its
own SOP matrix which is included in Riscan Pro project file. If a single scan
(*.rxp) is selected, by clicking on the « Open file » button next to POP matrix
you can choose the Riscan Pro project file and it will automatically configure
the POP matrix and the SOP matrix of that scan.
### POP matrix
Project Orientation and Position, TLS only Projection matrix of a Riscan Pro
project, this is defined in the project file (*.rsp). That matrix is
automatically filled when a Riscan Pro project file is open, being read in the
file. Using single scan (*.rxp) voxelization, it is possible to defined POP
matrix, either by opening a matrix file (see file formats in annexe), or by
choosing a Riscan Pro project file.
### VOP matrix
Voxel Orientation and Position.
Optional transformation matrix.
The final transformation matrix is the product of the three matrix.
`transformation matrix = (VOP.POP).SOP`
## Voxel space
Defines geographical extent and spatial resolution of the voxelized space.
The geographical extent is a 3-dimensional rectangular cuboid, defined by (x, y,
z) min and max coordinates. Spatial resolution is a (x, y, z) unitary vector
giving the dimensions of a single voxel. A voxel is a rectangular cuboid and
most commonly a cube. Voxel size is a critical parameter for estimating plant
area density. It must be small enough so that the hypothesis of the vegetation
being homogeneously distributed within the voxel holds true. But it must be
large enough to include enough points to ensure reliable attenuation/PAD
estimations.
## Filtering
### Digital Terrain Model filter
Digital Terrain Model filter discards every point that are below a given
distance to the ground (the Offset parameter in meter). Enabling this filter
implies that a Digital Terrain Model has been provided in the Input section.
### Shot decimation
Randomly downsample the scan by a float factor M ranging inside [0, 1[, the
decimation factor. M = 0 means no shot discarded, M = 0.1 means 10% of the shots
discarded, etc.
### Shot angle filter
Filter shots based on shot Zenith angle (also called polar angle) in degrees,
i.e angle between zenith (origin at the ground) and shot direction.
### False empty shot
Riegl TLS scanners may include artifical empty shots for objects too close to
the sensor (around 1m). This option allows to remove those "false" empty shots.
### Point cloud filter
Discard or retain subset of points from the input point cloud for the
voxelization. For instance it allows to extract a tree from a forest patch or
remove the wood of a tree to estimate leaf area density instead of plant area
density. The point cloud is provided as a text file with 3 columns x, y, z the
coordinates of the points.
### Echo attribute filter
For RIEGL scans only, filter echoes based on reflectance, amplitude or deviation
values.
### Classification filter
For LAS/LAZ only, filter echoes based on LAS classification. By default discard
class "2 - Ground".
## Weighting
For multi-echoes sans, the weighting table defines energy attenuation for
every hit of a pulse. AMAPVox calculates the amount of light entering,
intercepted and exiting the voxels to derive attenuation and plant area density
estimators. A partial hit implies that some light is intercepted and some light
carries on. This information is sometimes provided in the scans as the returned
intensity, but we have not find yet an approach that works in every situation
(work in progress). Providing a statistical model of energy attenuation as a
function of the return rank is an other option, not straightforward though.
[Vincent et al., 2017](https://doi.org/10.1016/j.rse.2017.05.034) and [Vincent
et al., 2022](https://doi.org/10.1016/j.rse.2022.113442) discuss pros and cons
of both approaches.
As for now (2022), we consider that the best default option is to assume that
the energy is evenly distributed among every hit of a pulse. Hence the suggested
default weighting table with equal weights summing up to one for every pulse.
The table remains editable for advanced users who would rather apply custom
statistical model that fits better their data.
No weighting table means that AMAPVox will only take into account the first
return of a pulse. Exploratory approach "most intense return" has been
implemented but not released. Fee free to request binaries for testing purpose.
## Scanner
Either user predefined laser specifications or define custom specifications.
Please contact us to include permanently your scanner specifications.
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/vignettes/Voxelization.Rmd |
---
title: "Graphical User Interface"
output: rmarkdown::html_vignette
#output: rmarkdown::pdf_document
vignette: >
%\VignetteIndexEntry{amapvox-gui}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
options(rmarkdown.html_vignette.check_title = FALSE)
```
<!-- AMAPVox logo -->
{width=256px}
```{r setup}
library(AMAPVox)
```
## Main frame
AMAPVox GUI fulfills three objectives:
1. Editing configuration conveniently;
2. Running simulations (sequentially or concurrently);
3. Visualizing quickly output files (3D views and vertical profiles)
The main toolbar and the File menu provides the usual functions New/ Open /
Edit / Close / Save / Save as etc.
## Tabs Configuration files and Output files
The top left pane contains the configuration pane and the output pane.
The configuration pane lists the configurations files that have
been opened or created. Double click or right click Edit to edit the
configuration file.
<center></center>
The output pane lists the output files grouped by configuration. You may right
click on the files to edit a context menu.
<center></center>
## Log pane
The bottom left pane contains the logs from the main frame and the processes
when there are running. You may clean the log pane, export it as a text file
and cap the size of the log in the heap memory.
## Vox tools tab pane
The vox tools frame is dedicated to the edition of the configuration files.
### New configuration
Menu > File > New or click on the New button {width=24px}
in the toolbar.
A configuration file is an XML file with following header:
```
<?xml version="1.0" encoding="UTF-8"?>
<configuration creation-date="Thu Sep 23 16:14:41 CEST 2021" build-version="1.8.0" update-date="Mon Sep 27 09:59:18 CEST 2021">
<process mode="process-name" >
```
### Open configuration
Menu > File > Open or click on the Open button {width=24px}
in the toolbar.
The configuration file is added in the configuration pane but is not edited
automatically. The Start button {width=24px} is
disable if the file that has never been edited yet.
### Edit configuration
Select one or more files in the configuration pane then
Menu > File > Edit or click on the Save button {width=24px}
in the toolbar.
You may also:
- double click on a file in the configuration pane;
- right-click on a file in the configuration pane to display the context
menu and click on Edit;
Then you may modify the parameters. Use the Help
buttons {width=24px} throughout the configuration editor
as much as you can. Many parameters provide on-the-fly validation, for instance
some text fields only accept numerical values.
### Save configuration
Select a modified files in the configuration pane then
Menu > File > Save or click on the Save button {width=24px}
in the toolbar.
As soon as a parameter is modified in the configuration editor the name of the
configuration will appear in bold with a leading *. It reminds you to save the
configuration file. If you were to run a modified configuration file, it would
be saved beforehand automatically.
Before saving configuration editors may launch a validity check and
provide some feedback. File will not be saved until you address all the issues
detected by the validity check. You may notice that in the meantime
the {width=24px} is disable.
### Save as configuration
Select a file in the configuration pane then Menu > File > Save as or click on
the Save as button {width=24px}
in the toolbar.
### Close configuration
Select one or more files in the configuration pane then
Menu > File > Close or click on the Close button {width=24px}
in the toolbar.
A dialog box will show up if you attempt to close a modified file, to ask for
confirmation. Same when you exit the program.
Closing the configuration tab in the Vox tools tab pane only hides the editor.
Unsaved modifications are kept. A configuration file is only closed when it is
removed from the configuration pane.
### Run configuration
You may run a single configuration by a click on the
{width=24px} in the configuration pane. A disable
Start button means that the configuration file is either empty (newly created)
or that the file did not go through the validity check when saving and need
more editing.
When the simulation starts, the Start button becomes a Stop button
{width=24px} that you may click to cancel the job.
You may run a bunch of configurations by selecting them in the configuration
pane and click on the Run button {width=24px}
in the toolbar. You will be asked whether to run the simulation sequentially
(one after an other) or concurrently. The maximum number of CPUs dedicated to
AMAPVox may be set in Menu > Edit > Preferences.
When the simulations start, the Run button becomes a Stop Button
{width=24px} that you may click to cancel all
the jobs.
## Visualization pane
AMAPVox provides two visualization tools:
1. A 3D voxel space viewer
2. A vertical profile viewer
### 3D viewer
The 3D Viewer relies on OpenGL >= 3.
For Linux users, you may check OpenGL version with:
```{r eval=FALSE}
grep("OpenGL", system2("glxinfo", stdout = TRUE), value = TRUE)
```
The user is staight-forward. You choose a voxel file, select the variable to
plot (that can be changed later on, directly in the viewer) and you can click
on the Draw button.
The viewer can optionally draw the digital terrain model, the same one that is
used in the voxelization process.
### Vertical profile
@TODO...
| /scratch/gouwar.j/cran-all/cranData/AMAPVox/vignettes/amapvox-gui.Rmd |
#' A Model Comparison Perspective (AMCP)
#'
#' AMCP contains all of the data sets used in Maxwell, Delaney, & Kelley's (2018) \emph{Designing experiments and analyzing data: A model comparison perspective} (3rd edition). Information about the book is available at <http://www.DesigningExperiments.com>.
#'
#' The general strategy is to have chapter data (e.g., from numeric examples) denoted by the chapter and table number,
#' such as \code{chapter_1_table_1} (for Table 1 from the Chapter 1). Alternatively, a chapter and table can be accessed by capital "C"
#' followed by the chapter number and capital "T" followed by the table number, as in \code{C1T1} (for Table 1 from the Chapter 1).
#'
#' For the exercises at the end of the chapter, the general strategy is to denote the data sets as \code{chapter_1_exercise_18} (for Exercise 18 from Chapter 1).
#' Also, an uppercase "C" and "E" can be used, as in \code{Chapter_1_Exercise_18}. Alternatively, the data from an exercise can be accessed by capital "C"
#' followed by the chapter number and capital "E" followed by the exercise number, as in \code{C1E18} (for Exercise 18 from Chapter 1).
#'
#' For a data set of interest to be loaded into the workspace, it must be loaded using the \code{data()} function as: \code{data(chapter_1_table_1)}.
#'
#' There are a few "one-off" naming conventions for non-standard data, such as raw data to produce the output shown in the book (e.g., \code{data(chapter_3_table_7_raw)}),
#' for the data from the Chapter 9 extension used in Exercise 1 (e.g., \code{data("chapter_9_extension_exercise_1")} or \code{data("C9ExtE1")}), data for the tutorial (e.g., \code{data(tutorial_1_table_1)} or \code{data(T1T1)}),
#' or data the Chapter 15 Exercise 18 data in the "univariate" format (long, not wide; e.g., \code{data("chapter_15_exercise_18_univariate")} or \code{data(C15E18U)}).
#'
#' A list of the available data sets from AMCP can be obtained with the following code: \code{ta(package="AMCP")}
#'
#' Note that, for many data sets the coding for factors is numeric. Correspondingly, those variables may need to be identified as factors (e.g., \code{C16E9$Room <- as.factor(C16E9$Room)}). Further,
#' the data sets are not always in the most convenient form for analysis, as they are generally entered to match the style in the book. Thus, for some analyses the data may need benefit from being parsed, wrangled, or tidied.
#'
#' Note that \url{https://designingexperiments.com/computing/} shows R code (via R Markdown), SPSS syntax and graphical user interface approaches, along with SAS code for implementing many of the analyses in the book, by chapter.
#'
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY. Routledge.
#'
#' See the web page that accompanies the book here: \url{https://designingexperiments.com/}.
#'
#' For suggested updates, please email Ken Kelley \email{[email protected]}.
#' @author Ken Kelley \email{[email protected]}
#'
"_PACKAGE"
| /scratch/gouwar.j/cran-all/cranData/AMCP/R/AMCP.R |
# Read me
# This file contains the help files, in Roxygen [ library(roxygen2) ] for the AMCP package (beginning in version .0.0.4, February 2017).
# Tables from the chapters
#######################################################################################################################################
#' The data used in Chapter 1, Table 1
#'
#' Assume that a developmental psychologist is interested in whether brief training can improve performance of 2-year-old children on a test of mental abilities. The test selected is the Mental Scale of the Bayley Scales of Infant Development, which yields a mental age in months. To increase the sensitivity of the experiment, the psychologist decides to rectruit sets of twins and randomly assigns one member of each pair to the treatment condition. The treatment consists of simply watching a videotape of another child attempting to perform tasks similar to those making up the Bayley Mental Scale. The other member of each pair plays in a waiting area as a time-filling activity while the first is viewing the videotape. Then both children are individually given the Bayley by a tester who is blind to their assigned conditions. A different set of twins takes part in the experiment each day, Monday through Friday, and the experiment extends over a 2-week period. Table 1.1 shows the data for the study in the middle columns.
#'
#'@section Variables:
#'\describe{
#' \item{treat}{scores for the treatment group}
#' \item{control}{scores for the control group}
#' \item{week}{identifies the week}}
#'
#' @docType data
#' @name chapter_1_table_1
#' @aliases C1T1 chapter_1_table_1 Chapter_1_Table_1 c1t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_1_table_1)
#' @section Synonym:
#' C1T1
#' @examples
#' # Load the data
#' data(chapter_1_table_1)
#'
#' # Or, alternatively load the data as
#' data(C1T1)
#'
#' # View the structure
#' str(chapter_1_table_1)
#'
#' chapter_1_table_1$Difference <- chapter_1_table_1$treat - chapter_1_table_1$control
#'
#' # Summaries by week.
#'summary(chapter_1_table_1[chapter_1_table_1$week==1,])
#'summary(chapter_1_table_1[chapter_1_table_1$week==2,])
"chapter_1_table_1"
#' The data used in Chapter 3, Table 1
#'
#' Hyperactive children's IQ scores from the WISC-R
#'
#' Assume that you work in the research office of a large school system. For the last several years, the mean score on the WISC-R, which is administered to all elementary school children in your district, has been holding fairly steady at about 98. A parent of a hyperactive child in one of your special edication programs maintains that the hyperactive children in the district are actually brighter than this average. To investigate this assertion, you randomly select the files of six hyperactive children and examine their WISC-R scores. The data set analyzed to replicate Chapter 3 Table 1 consists of IQ (WISC-R) measurements on six hyperactive children. The question of interest is: "are hyperactive children in the school district brighter than the average student?" The mean IQ among the students is known to be 98. Thus, the null hypothesis in this situation is that the population mean for the hyperactive students is also 98. To answer such a question we perform a one sample \emph{t}-test specifying the value of the null hypothesis as 98. Because a t-value squared with df degrees of freedom is equivalent to an \emph{F}-value with one numerator and \emph{df} denominator degrees of freedom. Recall that the observed \emph{F}-value (with 1 and 5 degrees of freedom) in the book is 9, whereas our \emph{t}-value (with 5 degrees of freedom) is 3.
#'
#' @section Variables:
#'\describe{
#'\item{iq}{IQ score from the WISC-R}}
#'
#' @docType data
#' @name chapter_3_table_1
#' @aliases C3T1 chapter_3_table_1 Chapter_3_Table_1 c3t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_3_table_1)
#' @section Synonym:
#' C3T1
#' @examples
#' # Load the data
#' data(chapter_3_table_1)
#'
#' # Or, alternatively load the data as
#' data(C3T1)
#'
#' # View the structure
#' str(chapter_3_table_1)
#'
"chapter_3_table_1"
#' The data used in Chapter 3, Table 3
#'
#' The data used in Chapter 3, Table 3.
#'
#' Although different mood states have, of course, always been of interest to clinicians, recent years have seen a profusion of studies attempting to manipulate mood states in controlled laboratory studies. In such induced-mood research, participants typically are randomly assigned to one of three groups: a depressed-mood induction, a neutral-mood induction, or an elated-mood induction. One study (Pruitt, 1988) used selected videoslips from several movies and public television programs as the mood-induction treatments. After viewing the video for her assigned condition, each participant was asked to indicate her mood on various scales. In addition, each subject was herself videotaped, and her facial expressions of emotion were rated on a scale of 1 to 7 (1 indicating sad; 4, neutral; and 7, happy) by an assistant who viewed the videotapes but was kept "blind" regarding the subjects' assigned conditions.
#'
#' @section Variables:
#'\describe{
#' \item{cond}{assigned Condition: a numeric vector (1=Pleasant/elated, 2=Neutral, 3=Unpleasant/depressed)}
#' \item{mood}{a numeric vector between 1 and 7}}
#'
#' @docType data
#' @name chapter_3_table_3
#' @aliases C3T3 chapter_3_table_3 Chapter_3_Table_3 c3t3
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_3_table_3)
#' @section Synonym:
#' C3T3
#' @examples
#' # Load the data
#' data(chapter_3_table_3)
#'
#' # Or, alternatively load the data as
#' data(C3T3)
#'
#' # View the structure
#' str(chapter_3_table_3)
#'
"chapter_3_table_3"
#' The data used for Chapter 3, Table 7 (raw data to produce the summary measures)
#'
#' Raw data on the number of drinks per day (and log of the number of drinks)
#'
#' Average number of standard drinks per week at intake for a sample of homeless alcoholics at nine-month follow-up (Smith, Meyers, & Delaney, 1988). Note that the groups, 1-5, are, respectively, "T1 CRA-D", "T1 CRA+D", "T1Std", "T2 CRA-D", and "T2 Std," where CRA is "Community Reinforcement Approach (with or without Disulfiram) and where "Std" is standard therepy. Note that this is the same data as \code{data(chapter_3_table_9_raw)}.
#'
#' @section Variables:
#'\describe{
#'\item{Group}{randomly assigned group membership (see details)}
#'\item{Drinks}{number of standard drinks, on average, per week}
#'\item{LgDrinks}{log of the number of standard drinks, on average, per week}}
#'
#' @docType data
#' @name chapter_3_table_7_raw
#' @aliases C3T7R chapter_3_table_7_raw Chapter_3_Table_7_Raw c3t7r
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @references Smith, J. E., Meyers, R. J. \& Delaney, H. D. (1998). The community reinforcement approach with homeless alcohol-dependent individuals. \emph{Journal of Consulting and Clinical Psychology, 66}, 541--548.
#' @keywords datasets
#' @usage data(chapter_3_table_7_raw)
#' @section Synonym:
#' C3T7R
#' @examples
#' # Load the data
#' data(chapter_3_table_7_raw)
#'
#' # Or, alternatively load the data as
#' data(C3T7R)
#'
#' # View the structure
#' str(chapter_3_table_7_raw)
#'
"chapter_3_table_7_raw"
#' The data used for Chapter 3, Table 9 (raw data to produce the summary measures)
#'
#' Raw data on the number of drinks per day (and log of the number of drinks)
#'
#' Average number of standard drinks per week at intake for a sample of homeless alcoholics at nine-month follow-up (Smith, Meyers, & Delaney, 1988). Note that the groups, 1-5, are, respectively, "T1 CRA-D", "T1 CRA+D", "T1Std", "T2 CRA-D", and "T2 Std," where CRA is "Community Reinforcement Approach (with or without Disulfiram) and where "Std" is standard therepy. Note that this is the same data as \code{data(chapter_3_table_9_raw)}.
#'
#' @section Variables:
#'\describe{
#'\item{Group}{randomly assigned group membership (see details)}
#'\item{Drinks}{number of standard drinks, on average, per week}
#'\item{LgDrinks}{log of the number of standard drinks, on average, per week}}
#'
#' @docType data
#' @name chapter_3_table_9_raw
#' @aliases C3T9R chapter_3_table_9_raw Chapter_3_Table_9_Raw c3t9r
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @references Smith, J. E., Meyers, R. J. \& Delaney, H. D. (1998). The community reinforcement approach with homeless alcohol-dependent individuals. \emph{Journal of Consulting and Clinical Psychology, 66}, 541--548.
#' @keywords datasets
#' @usage data(chapter_3_table_9_raw)
#' @section Synonym:
#' C3T9R
#' @examples
#' # Load the data
#' data(chapter_3_table_9_raw)
#'
#' # Or, alternatively load the data as
#' data(C3T9R)
#'
#' # View the structure
#' str(chapter_3_table_9_raw)
#'
"chapter_3_table_9_raw"
#' The data used in Chapter 4, Table 1
#'
#' The data used in Chapter 4, Table 1
#'
#'This is hypothetical data for four groups of participants, corresponding to treatments for hypertension. The context is 24 mild hypertensives that have been independently and randomly assigned to one of the four treatments. The scores are the systolic blood pressure values two-weeks after the termination of treatment.
#'
#' @section Variables:
#'\describe{
#'\item{\code{bloodpr}}{systolic blood pressure (hypothetical data)}
#'\item{\code{cond}}{identifies group membership (1=drug therapy; 2=biofeedbacbk; 3=diet; 4=combination)}}
#'
#' @docType data
#' @name chapter_4_table_1
#' @aliases C4T1 chapter_4_table_1 Chapter_4_Table_1 c4t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_4_table_1)
#' @section Synonym:
#' C4T1
#' @examples
#' # Load the data
#' data(chapter_4_table_1)
#'
#' # Or, alternatively load the data as
#' data(C4T1)
#'
#' # View the structure
#' str(chapter_4_table_1)
#'
"chapter_4_table_1"
#' The data used in Chapter 4, Table 7
#'
#' The data used in Chapter 4, Table 7
#'
#' Data used to demonstrate another property of orthogonal contrasts. The demonstrated principle is that the sums of squares of nonorthogonal contrasts are not additive, yet the sums of squares of orthogonal contrasts can be added to determine the magnitude of the sum of squares they jointly account for.
#'
#' @section Variables:
#'\describe{
#'\item{dv}{dependent variable}
#'\item{group}{Group number: 1, 2, or 3}}
#'
#' @docType data
#' @name chapter_4_table_7
#' @aliases C4T7 chapter_4_table_7 Chapter_4_Table_7 c4t7
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_4_table_7)
#' @section Synonym:
#' C4T7
#' @examples
#' # Load the data
#' data(chapter_4_table_7)
#'
#' # Or, alternatively load the data as
#' data(C4T7)
#'
#' # View the structure
#' str(chapter_4_table_7)
#'
"chapter_4_table_7"
#' The data used in Chapter 5, Table 4
#'
#' The data used in Chapter 5, Table 4
#'
#' The following data consists of blood pressure measurements for six individuals randomly assigned to one of four groups. Our purpose here is to perform four planed contrasts in order to discern if group differences exist for the selected contrasts of interests.
#'
#' @section Variables:
#'\describe{
#'\item{group}{a numeric vector between 1 and 4;group number}
#'\item{sbp}{systolic blood pressure of a patient within one of the four groups}}
#'
#' @docType data
#' @name chapter_5_table_4
#' @aliases C5T4 chapter_5_table_4 Chapter_5_Table_4 c5t4
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_5_table_4)
#' @section Synonym:
#' C5T4
#' @examples
#' # Load the data
#' data(chapter_5_table_4)
#'
#' # Or, alternatively load the data as
#' data(C5T4)
#'
#' # View the structure
#' str(chapter_5_table_4)
#'
"chapter_5_table_4"
#' The data used in Chapter 6, Table 1
#'
#' The data used in Chapter 6, Table 1
#'
#' Recall scores for 24 children who have been randomly assigned to one of four experimental conditions where there are 6 children in each condition. The experimental conditions of interest are 1, 2, 3, and 4 minutes where the number of minutes is the amount of time the child is allotted to study a list of words before attempting to recall the words. The dependent variable (i.e., the recall scores) are the number of words the child is able to recall after a brief interference task. The first hypothesis of interest is whether the number of words recalled is linearly related to the number of minutes spent studying.
#'
#' @section Variables:
#'\describe{
#'\item{recall}{the number of words recalled by the child after the study time expires}
#'\item{minutes}{the amount of time, in minutes, the child was permitted to study}}
#'
#' @docType data
#' @name chapter_6_table_1
#' @aliases C6T1 chapter_6_table_1 Chapter_6_Table_1 c6t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_6_table_1)
#' @section Synonym:
#' C6T1
#' @examples
#' # Load the data
#' data(chapter_6_table_1)
#'
#' # Or, alternatively load the data as
#' data(C6T1)
#'
#' # View the structure
#' str(chapter_6_table_1)
#'
"chapter_6_table_1"
#' The data used in Chapter 7, Table 1
#'
#' The data used in Chapter 7, Table 1
#'
#' This data is the hypothetical data from a psychologist's evaluation of the effectiveness of biofeedback and drug therapy for treating hypertension (lowering blood pressure). There are four groups: both biofeedback training and drug therapy, biofeedback but not drug therapy, drug therapy but no biofeedback, and neither biofeedback nor drug therapy... As usual, in this data set, the number of subjects is kept small to minimize the computational burden. We assume that the scores in the table represent systolic blood pressure readings taken at the end of the treatment period.
#'
#' The following data consists specifically of blood pressure measurements taken after the end of treatment for five individuals that were randomly assigned to one of four groups. The initial question of interest is whether there is a significant difference between any of the group means, that is, are all of the population group means equal or is there a difference somewhere.
#'
#' As before, we can perform a one-way ANOVA via the One-Way ANOVA procedure to replicate the results given in Table 7.2.
#'
#' @section Variables:
#'\describe{
#'\item{group}{a numeric vector between 1 and 4 equal to the drug therapy group}
#'\item{score}{the blood pressure of one of the individuals in the study}}
#'
#' @docType data
#' @name chapter_7_table_1
#' @aliases C7T1 chapter_7_table_1 Chapter_7_Table_1 c7t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_7_table_1)
#' @section Synonym:
#' C7T1
#' @examples
#' # Load the data
#' data(chapter_7_table_1)
#'
#' # Or, alternatively load the data as
#' data(C7T1)
#'
#' # View the structure
#' str(chapter_7_table_1)
#'
"chapter_7_table_1"
#' The data used in Chapter 7, Table 5
#'
#' The data used in Chapter 7, Table 5
#'
#' This table represents hypothetical data from a study investigating the effects of biofeedback and drug therapy on hypertension. We (arbitrarily) refer to the presence or absence of biofeedback as factor A and to the type of drug as factor B.
#'
#' The following data is a generalization of the blood pressure data given in Table 7.1 (as there are now three, rather than two, levels of the drug factor). In addition to assessing the likelihood of there being a biofeedback or a drug main effect, the interaction is explicitly taken into consideration.
#'
#' @section Variables:
#'\describe{
#'\item{score}{blood pressure}
#'\item{feedback}{the likelihood of there being a biofeedback or drug main effect}
#'\item{drug}{the level of the drug factor}}
#'
#' @docType data
#' @name chapter_7_table_5
#' @aliases C7T5 chapter_7_table_5 Chapter_7_Table_5 c7t5
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_7_table_5)
#' @section Synonym:
#' C7T5
#' @examples
#' # Load the data
#' data(chapter_7_table_5)
#'
#' # Or, alternatively load the data as
#' data(C7T5)
#'
#' # View the structure
#' str(chapter_7_table_5)
#'
"chapter_7_table_5"
#' The data used in Chapter 7, Table 9
#'
#' The data used in Chapter 7, Table 9
#'
#' The following data is a generalization of the blood pressure data given in Table 7.5 (which itself was a generalization of the data given in Table 7.1). After the interaction is found to be significant, a common recommendation is to examine simple main effects. Recall that a simple main effect is the main effect of one factor given a fixed level of another factor. In this case interest is in determining if there are any differences in drugs (a) given biofeedback and (b) given no biofeedback.
#'
#' @section Variables:
#'\describe{
#'\item{score}{blood pressure}
#'\item{feedback}{the likelihood of there being a biofeedback or drug main effect}
#'\item{drug}{the level of the drug factor}}
#'
#' @docType data
#' @name chapter_7_table_9
#' @aliases C7T9 chapter_7_table_9 Chapter_7_Table_9 c7t9
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_7_table_9)
#' @section Synonym:
#' C7T9
#' @examples
#' # Load the data
#' data(chapter_7_table_9)
#'
#' # Or, alternatively load the data as
#' data(C7T9)
#'
#' # View the structure
#' str(chapter_7_table_9)
#'
"chapter_7_table_9"
#' The data used in Chapter 7, Table 11
#'
#' The data used in Chapter 7, Table 11
#'
#' Table 7.11 presents this hypothetical data for 15 amnesiacs, 15 Huntington individuals, and 15 controls. The data represents a two way factorial design where diagnosis and task are fully crossed, each with three levels. Of interest for the results displayed in Table 7.12 is whether the interaction contrast specified in Figure 7.3 and 7.4 is statistically significant. Namely the question pertains to whether the relationship of the mean of grammar and classification versus recognition differs for those in the amnesic and Huntington's group. The procedure SPSS syntax procedure MANOVA is very general and can handle many types of analyses. Interaction contrasts are easily performed in this procedure.
#'
#' Consider an example of a cognitive neuroscience study of patient groups. Specifically, suppose that a certain theory implies that amnesic patients will have a deficit in explicit memory but not in implicit memory. According to this theory, Huntington patients, on the other hand, will be just the opposite: They will have no deficit in explicit memory, but will have a deficit in implicit memory. Further suppose that a study is designed yielding a 3x3 factorial design to test this theory. The rows of this study will represent three types of individuals: amnesic patients, Huntington patients, and a control group of individuals with no known neurological disorder. Each research participant will be randomly assigned to one of three tasks: (1) artificial grammar task, which consists of classifying letter sequences as either following or not following grammatical rules; (2) classification learning task, which consists of classifying hypothetical patients as either having or not having a certain disease based on symptoms probabilistically related to the disease; and (3) recognition memory task, which consists of recognizing particular stimuli as stimuli that have previously been presented during the task.
#'
#' @section Variables:
#'\describe{
#'\item{score}{blood pressure}
#'\item{feedback}{the likelihood of there being a biofeedback or drug main effect}
#'\item{drug}{the level of the drug factor}}
#'
#' @docType data
#' @name chapter_7_table_11
#' @aliases C7T11 chapter_7_table_11 Chapter_7_Table_11 c7t11
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_7_table_11)
#' @section Synonym:
#' C7T11
#' @examples
#' # Load the data
#' data(chapter_7_table_11)
#'
#' # Or, alternatively load the data as
#' data(C7T11)
#'
#' # View the structure
#' str(chapter_7_table_11)
#'
"chapter_7_table_11"
#' The data used in Chapter 7, Table 15
#'
#' The data used in Chapter 7, Table 15
#'
#' The following hypothetical salary data represents a nonorthogonal two-by-two factorial design. The first factor (sex) is crossed with college (degree or no degree). The primary question of interest is whether or not there is sex discrimination in terms of salary.
#'
#' The data in Table 7.15 presents hypothetical data (in thousands) for 12 females and 10 males who have just been hired by the organization. The mean salary for the 12 females is \$22,333, whereas the mean for the 10 males is \$22,100. The data in Table 7.15 also contains information about an additional characteristic of employees, namely whether they received a college degree. It is clear from the data that a majority of the new female employees are college graduates, whereas a majority of the males are not.
#'
#'
#' @section Variables:
#'\describe{
#'\item{sex}{gender (male vs female)}
#'\item{educ}{education level (degree vs no degree)}
#'\item{salary}{salary (in thousands)}
#'}
#'
#' @docType data
#' @name chapter_7_table_15
#' @aliases C7T15 chapter_7_table_15 Chapter_7_Table_15 c7t15
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_7_table_15)
#' @section Synonym:
#' C7T15
#' @examples
#' # Load the data
#' data(chapter_7_table_15)
#'
#' # Or, alternatively load the data as
#' data(C7T15)
#'
#' # View the structure
#' str(chapter_7_table_15)
#'
"chapter_7_table_15"
#' The data used in Chapter 7, Table 15
#'
#' The data used in Chapter 7, Table 15
#'
#' Table 7.23 shows hypothetical MMPI scores for 45 participants, each of whom is placed in one cell of a 3x3 design. One factor (A, the row factor) is type of therapy. The other factor (B, the column factor) is degree of severity.
#'
#' Suppose that a clinical psychologist is interested in comparing the relative effectiveness of three forms of psychotherapy for alleviating depression. Fifteen individuals are randomly assigned to one of each of three treatment groups: cognitive-behavioral, Rogerian, and assertiveness training. The Depression Scale of the MMPI serves as the dependent variable. After the fact, these individuals where placed into one of three categories based on the severity of their depression. Thus, this data set represents a 3 by 3 nonorthogonal factorial design with post hoc blocking.
#'
#' The data represents the relative effectiveness of three forms of psychotherapy for alleviating depression. Fifteen individuals were randomly assigned to one of three groups. After the fact, these individuals where placed into one of three categories based on the severity of their depression. Thus, this data set represents a 3 by 3 nonorthogonal factorial design with post hoc blocking.
#'
#' @section Variables:
#'\describe{
#'\item{therapy}{the type of therapy}
#'\item{severity}{the severity of the therapy}
#'\item{score}{the score of the individual}
#'}
#'
#' @docType data
#' @name chapter_7_table_15
#' @aliases C7T15 chapter_7_table_15 Chapter_7_Table_15 c7t15
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_7_table_15)
#' @section Synonym:
#' C7T15
#' @examples
#' # Load the data
#' data(chapter_7_table_15)
#'
#' # Or, alternatively load the data as
#' data(C7T15)
#'
#' # View the structure
#' str(chapter_7_table_15)
#'
"chapter_7_table_15"
#' The data used in Chapter 7, Table 23
#'
#' The data used in Chapter 7, Table 23
#'
#' Suppose that a clinical psychologist is interested in comparing the relative effectiveness of three forms of psychotherapy for alleviating depression. Fifteen individuals are randomly assigned to one of each of three treatment groups: cognitive-behavioral, Rogerian, and assertiveness training. The Depression Scale of the MMPI serves as the dependent variable. After the fact, these individuals where placed into one of three categories based on the severity of their depression. Thus, this data set represents a 3 by 3 nonorthogonal factorial design with post hoc blocking. Table 7.23 shows hypothetical MMPI scores for 45 participants, each of whom is placed in one cell of a 3x3 design. One factor (A, the row factor) is type of therapy. The other factor (B, the column factor) is degree of severity.
#'
#' The data represents the relative effectiveness of three forms of psychotherapy for alleviating depression. Fifteen individuals were randomly assigned to one of three groups. After the fact, these individuals where placed into one of three categories based on the severity of their depression. Thus, this data set represents a 3 by 3 nonorthogonal factorial design with post hoc blocking.
#'
#' @section Variables:
#'\describe{
#'\item{therapy}{the type of therapy}
#'\item{severity}{the severity of the therapy}
#'\item{score}{the score of the individual}
#'}
#'
#' @docType data
#' @name chapter_7_table_23
#' @aliases C7T23 chapter_7_table_23 Chapter_7_Table_23 c7t23
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_7_table_23)
#' @section Synonym:
#' C7T23
#' @examples
#' # Load the data
#' data(chapter_7_table_23)
#'
#' # Or, alternatively load the data as
#' data(C7T23)
#'
#' # View the structure
#' str(chapter_7_table_23)
#'
"chapter_7_table_23"
#' The data used in Chapter 8, Table 12
#'
#' The data used in Chapter 8, Table 12
#'
#' This example builds from the hypertension example used in chapter 7 for the two-way design. The data in Table 8.12 consist of blood pressure scores for 72 participants. Three categorical independent variables: the presence and absence of biofeedback (biofeed), drug X, Y, or Z (drug), and diet absent or present (diet) have been factorially combined to form a 2 x 3 x 2 design where each person contributes one blood pressure score to one of the 12 different experimental conditions. For this example, there are 6 participants in each group.#'
#'
#' @section Variables:
#'\describe{
#'\item{bp}{patient blood pressure}
#'\item{drug}{drug given (X,Y,or Z)}
#'\item{biofeed}{presence or absence of biofeedback}
#'\item{diet}{presence of absence of a diet}
#'}
#'
#' @docType data
#' @name chapter_8_table_12
#' @aliases C8T12 chapter_8_table_12 Chapter_8_Table_12 c8t12
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_8_table_12)
#' @section Synonym:
#' C8T12
#' @examples
#' # Load the data
#' data(chapter_8_table_12)
#'
#' # Or, alternatively load the data as
#' data(C8T12)
#'
#' # View the structure
#' str(chapter_8_table_12)
#'
"chapter_8_table_12"
#' The data used in Chapter 9, Table 1
#'
#' The data used in Chapter 9, Table 1
#'
#' The data in Table 9.1 are the numerical values for the data that is presented in Figure 9.1, which presents a comparison of errors in ANOVA and ANCOVA restricted models.
#'
#' The data represents a pre-post design, where a training program designed to assist people in losing weight is evaluated. An initial measure of weight is collected to use as a baseline measure (specifically as a covariate in the present analysis) and then participants are randomly assigned to one of two groups. At the end of the training program another measure of weight is obtained. The question of interest is: "did the participants who received the treatment lose more weight than those that were assigned to the wait-list control group?"
#'
#' @section Variables:
#'\describe{
#'\item{group}{the group (treatment group vs wait-list control group)}
#'\item{x}{the weight lost by the control group}
#'\item{y}{the weight lost by the treatment group}}
#'
#' @docType data
#' @name chapter_9_table_1
#' @aliases C9T1 chapter_9_table_1 Chapter_9_Table_1 c9t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_9_table_1)
#' @section Synonym:
#' C9T1
#' @examples
#' # Load the data
#' data(chapter_9_table_1)
#'
#' # Or, alternatively load the data as
#' data(C9T1)
#'
#' # View the structure
#' str(chapter_9_table_1)
#'
"chapter_9_table_1"
#' The data used in Chapter 9, Table 7
#'
#' The data used in Chapter 9, Table 7
#'
#' The data shown in Table 9.7 represents a hypothetical three-group study assessing different interventions for depression. 30 depressive individuals have been randomly assigned to one of three conditions: (1) selective serotonin reuptake inhibitor (SSRI) antidepressant medication, (2) placebo, or (3) wait list control. The Beck Depression Inventory (BDI) has been administered to each individual prior to the study, and then later is administered a second time at the end of the study. The data represents a three group pre-post design, where the 30 depressives were randomly assigned to one of three conditions. The primary question of interest is: "do individuals in some groups change more on their measures of depression than do individuals in other groups?"#'
#'
#' @section Variables:
#'\describe{
#'\item{cond}{the treatment condition (SSRI, Placebo, Wait List Control)}
#'\item{pre}{the measure of depression before the experiment}
#'\item{post}{the measure of depression after the experiment}
#'}
#'
#' @docType data
#' @name chapter_9_table_7
#' @aliases C9T7 chapter_9_table_7 Chapter_9_Table_7 c9t7
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_9_table_7)
#' @section Synonym:
#' C9T7
#' @examples
#' # Load the data
#' data(chapter_9_table_7)
#'
#' # Or, alternatively load the data as
#' data(C9T7)
#'
#' # View the structure
#' str(chapter_9_table_7)
#'
"chapter_9_table_7"
#' The data used in Chapter 9, Table 11
#'
#' The data used in Chapter 9, Table 11
#'
#' The question of interest in the present situation assumes that there are three blocks of elderly participants, six per block. The elderly participants are sorted into the three blocks as a function of their age. The purpose of the study was to assess the effect of age on motor control, measured by the number of errors on a certain task (where there were three tasks). The goal here is to replicate the results from the top portion of Table 9.12 (At this point performing the ANOVA and the ANCOVA also included in Table 9.12 should be straightforward.#'
#'
#' @section Variables:
#'\describe{
#'\item{block}{a numeric vector between 1 and 3, equal to the block of elderly participants (6 per block)}
#'\item{task}{the task given}
#'\item{x}{age}
#'\item{y}{error scores}
#'}
#'
#' @docType data
#' @name chapter_9_table_11
#' @aliases C9T11 chapter_9_table_11 Chapter_9_Table_11 c9t11
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_9_table_11)
#' @section Synonym:
#' C9T11
#' @examples
#' # Load the data
#' data(chapter_9_table_11)
#'
#' # Or, alternatively load the data as
#' data(C9T11)
#'
#' # View the structure
#' str(chapter_9_table_11)
#'
"chapter_9_table_11"
#' The data used in Chapter 9, Extension Table 1
#'
#' The data used in Chapter 9, Extension Table 1
#'
#' Table 9E.1 shows the data from Table 9.1 after some minor modifications to reflect heterogeneity of regression. The data were altered in such a way that the means in both groups are the same as in the original example, as is the pooled within-group slope.
#'
#' @section Variables:
#'\describe{
#'\item{group}{the group (treatment group vs wait-list control group)}
#'\item{x}{the weight lost by the control group}
#'\item{y}{the weight lost by the treatment group}
#'}
#'
#' @docType data
#' @name chapter_9_extension_table_1
#' @aliases C9ExtT1 chapter_9_extension_table_1 Chapter_9_Extension_Table_1 c9extt1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_9_extension_table_1)
#' @section Synonym:
#' C9ExtT1
#' @examples
#' # Load the data
#' data(chapter_9_extension_table_1)
#'
#' # Or, alternatively load the data as
#' data(C9ExtT1)
#'
#' # View the structure
#' str(chapter_9_extension_table_1)
#'
"chapter_9_extension_table_1"
#' The data used in Chapter 10, Table 5
#'
#' The data used in Chapter 10, Table 5
#'
#' Assume that an educational products firm markets study programs to help high school students prepare for college entrance exams such as the ACT, and wants to compare a new computer-based training program with their standard packet of printed materials. The firm would like to be able to generalize to all American high schools but only has the resources to conduct a study in a few schools. Thus, assume four high schools are selected at random from a listing of all public schools in the country. Volunteers from the junior class at these schools are solicited to take part in an eight-session after-school study program. Ten students from each school are permitted to take part, and equal numbers from each school are assigned randomly to the two study programs. Designating the type of study program as factor A (a1 designates the computer-based program and a2 designates the standard paper-and-pencil program) and the particular school as factor B, assume the data in Table 10.5 are obtained.
#'
#' The data consists of simulated ACT scores from 40 participants where 10 participants were selected from each of four schools. It is assumed that the schools are randomly selected from a population of schools in America in order to generalize the results found. Two schools (and thus, 20 participants) are randomly assigned to the computer-based ACT training program, while the other two schools are randomly assigned to the standard paper-and-pencil program in order to assess the effectiveness of these different types of programs.
#'
#' The primary hypothesis of interest is whether the standard paper-and-pencil and computer-based ACT training programs differ in effectiveness.
#'
#' @section Variables:
#'\describe{
#'\item{a}{type of study program}
#'\item{b}{the particular school}
#'\item{act}{the individual's ACT score}
#'}
#'
#' @docType data
#' @name chapter_10_table_5
#' @aliases C10T5 chapter_10_table_5 Chapter_10_Table_5 c10t5
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_10_table_5)
#' @section Synonym:
#' C10T5
#' @examples
#' # Load the data
#' data(chapter_10_table_5)
#'
#' # Or, alternatively load the data as
#' data(C10T5)
#'
#' # View the structure
#' str(chapter_10_table_5)
#'
"chapter_10_table_5"
#' The data used in Chapter 10, Table 9
#'
#' The data used in Chapter 10, Table 9
#'
#' The data in Table 10.9 is based upon the information from the student therapist example of the random-effects section. Assume that the director of the clinic decides to test for a difference across genders in the general severity ratings that graduate students assign to clients. If three male and three female clinical students are randomly selected to participate, and each is randomly assigned four clients with whom to do an intake interview, then we might obtain data like that shown in Table 10.9. Three of the trainees are males while the other three trainees are females. The trainees are nested within their particular gender.
#'
#' @section Variables:
#'\describe{
#'\item{a}{type of study program}
#'\item{b}{the particular school}
#'\item{act}{the individual's ACT score}
#'}
#'
#' @docType data
#' @name chapter_10_table_9
#' @aliases C10T9 chapter_10_table_9 Chapter_10_Table_9 c10t9
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_10_table_9)
#' @section Synonym:
#' C10T9
#' @examples
#' # Load the data
#' data(chapter_10_table_9)
#'
#' # Or, alternatively load the data as
#' data(C10T9)
#'
#' # View the structure
#' str(chapter_10_table_9)
#'
"chapter_10_table_9"
#' The data used in Chapter 11, Table 1
#'
#' The data used in Chapter 11, Table 1
#'
#' Table 11.1 displays the data from the observation of six subjects under two treatment conditions, yielding 12 scores in all on the dependent variable.
#'
#' For the data set, six individuals were observed under two different conditions. The question of interest is: "does the mean of the scores in Condition 1 differ from the mean of the scores in Condition 2?"
#'
#' @section Variables:
#'\describe{
#'\item{ycond1}{data from the six subjects under treatment condition 1}
#'\item{ycond2}{data from the six subjects under treatment condition 2}
#'}
#'
#' @docType data
#' @name chapter_11_table_1
#' @aliases C11T1 chapter_11_table_1 Chapter_11_Table_1 c11t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_11_table_1)
#' @section Synonym:
#' C11T1
#' @examples
#' # Load the data
#' data(chapter_11_table_1)
#'
#' # Or, alternatively load the data as
#' data(C11T1)
#'
#' # View the structure
#' str(chapter_11_table_1)
#'
"chapter_11_table_1"
#' The data used in Chapter 11, Table 4
#'
#' The data used in Chapter 11, Table 4
#'
#' No analyses are conducted for this data set. The traditional view of a repreated-measures design is to regard it as a two-factor design. Specifically, one factor represents the repeated condition (e.g., time, drug, subtest), whereas the second factor represents subjects. The rationale for this conceptualization can be understood by considering the data in Table 11.4. When the data are displayed this way, the design looks very much like other factorial designs we've already encountered.
#'
#' @section Variables:
#'\describe{
#'\item{ycond1}{a numeric vector}
#'\item{ycond2}{a numeric vector}
#'\item{ycond3}{a numeric vector}
#'\item{ycond4}{a numeric vector}
#'}
#'
#' @docType data
#' @name chapter_11_table_4
#' @aliases C11T4 chapter_11_table_4 Chapter_11_Table_4 c11t4
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_11_table_4)
#' @section Synonym:
#' C11T4
#' @examples
#' # Load the data
#' data(chapter_11_table_4)
#'
#' # Or, alternatively load the data as
#' data(C11T4)
#'
#' # View the structure
#' str(chapter_11_table_4)
#'
"chapter_11_table_4"
#' The data used in Chapter 11, Table 5
#'
#' The data used in Chapter 11, Table 5
#'
#' The data show that 12 participants have been observed in each of 4 conditions. To make the example easier to discuss, let's suppose that the 12 subjects are children who have been observed at 30, 36, 42, and 48 months of age. Essentially, for the present data set, 12 children were each observed four times over an 18 month period. The dependent variable is the age-normed general cognitive score on the McCarthy Scales of Children's Abilities. Interest is to determine if the children were sampled from a population where growth in cognitive ability is more rapid or less rapid than average.
#'
#' @section Variables:
#'\describe{
#'\item{months30}{age-normed general cognitive score for 30-month-old}
#'\item{months36}{age-normed general cognitive score for 36-month-old}
#'\item{months42}{age-normed general cognitive score for 42-month-old}
#'\item{months48}{age-normed general cognitive score for 48-month-old}
#'}
#'
#' @docType data
#' @name chapter_11_table_5
#' @aliases C11T5 chapter_11_table_5 Chapter_11_Table_5 c11t5
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_11_table_5)
#' @section Synonym:
#' C11T5
#' @examples
#' # Load the data
#' data(chapter_11_table_5)
#'
#' # Or, alternatively load the data as
#' data(C11T5)
#'
#' # View the structure
#' str(chapter_11_table_5)
#'
"chapter_11_table_5"
#' The data used in Chapter 11, Table 19
#'
#' The data used in Chapter 11, Table 19
#'
#' Table 11.19 duplicates a table from Shrout and Fleiss showing hypothetical data obtained from four judges, each of whom has rated six targets (i.e., subjects). For the present (hypothetical) data set (taken from Shrout and Fleiss, 1979) consists of six participants who are ranked by four judges.
#'
#' As is pointed out in the book, the structure of Table 11.19 is analogous to that of Table 11.5 (repeated measures). However, notice that in the data file that the data are entered differently. For the repeated measures design (e.g., Table 11.5), each row corresponded to a different participant, while each column corresponded with another measurement. The main reason for the difference in how the data was entered is mainly because of the procedures used to analyze the data. SPSS and SAS allow repeated measures data to be entered in a "participants by occasions" fashion, whereas in order to get the appropriate mean squares to carryout the intraclass correlations for the data given in Table 11.19, a mixed-effects (one fixed factor and one random factor) ANOVA needs to be performed.
#'
#' @section Variables:
#'\describe{
#'\item{subject}{a numeric vector}
#'\item{judge}{judge number; of 3}
#'\item{rating}{a numeric vector}
#'}
#'
#' @docType data
#' @name chapter_11_table_19
#' @aliases C11T19 chapter_11_table_19 Chapter_11_Table_19 c11t19
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_11_table_19)
#' @section Synonym:
#' C11T19
#' @examples
#' # Load the data
#' data(chapter_11_table_19)
#'
#' # Or, alternatively load the data as
#' data(C11T19)
#'
#' # View the structure
#' str(chapter_11_table_19)
#'
"chapter_11_table_19"
#' The data used in Chapter 11, Table 20
#'
#' The data used in Chapter 11, Table 20
#'
#' Table 11.20 shows hypothetical data obtained from three judges, each of whom has rated five targets (i.e., subjects). This data will be important in determining if our reliability measure should reflect consistency or agreement. Notice that the rank order of targets is identical for each of the three judges (in fact, not only are the ranks identical, but the scores are also perfectly linearly related to one another in this example). However, in an absolute sense, the ratings provided by Judge 2 are clearly very different from the ratings of the other two judges... Consistency is relatively low in these data, because the columns of scores do not closely resemble one another. However, agreement is high in these data because the relative position of any target in the distribution of scores is identical for each and every judge.
#'
#' The analysis of the data contained in Table 11.20 is carried out in exactly the same manner as was the data contained in Table 11.19. Thus, a mixed effects ANOVA model is performed in order to obtain the mean squares which are then used in the formulas give towards the end of Chapter 11.
#'
#' @section Variables:
#'\describe{
#'\item{months30}{age-normed general cognitive score for 30-month-old}
#'\item{months36}{age-normed general cognitive score for 36-month-old}
#'\item{months42}{age-normed general cognitive score for 42-month-old}
#'\item{months48}{age-normed general cognitive score for 48-month-old}
#'}
#'
#' @docType data
#' @name chapter_11_table_20
#' @aliases C11T20 chapter_11_table_20 Chapter_11_Table_20 c11t20
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_11_table_20)
#' @section Synonym:
#' C11T20
#' @examples
#' # Load the data
#' data(chapter_11_table_20)
#'
#' # Or, alternatively load the data as
#' data(C11T20)
#'
#' # View the structure
#' str(chapter_11_table_20)
#'
"chapter_11_table_20"
#' The data used in Chapter 12, Table 1
#'
#' The data used in Chapter 12, Table 1
#'
#' Suppose that a psychologist studying the visual system was interested in determining the extent to which interfering visual stimuli slow the abilityto recognize letters. Subjects are brought into a laboratory and seated in front of a tachistoscope. Subjects are told that they will see either the letter T or the letter I displayed on the screen. In some trials, the letter appears by itself, but in other trials, the target letter is embedded in a group of other letters. This variation in the display consitutes the first factor, which is referred to as noise. The noise factor has two levels -- absent and present. The other factor varied by the experimenter is where in the display the target letter appears. This factor, which is called angle, has three levels. The target letter is either shown at the center of the screen (0 degrees off-center, where the subject has been instructed to fixate), 4 degrees off-center or 8 degrees off-center (in each case, the deviation from the center varies randomly between left and right).
#' The data in Table 12.1 consist of reaction time scores for 10 participants where each participant contributes 6 scores to the analysis. In particular, each participant is exposed to each of 6 experimental conditions, which are obtained by factorially combining angle (0, 4, and 8) with noise (absent and present). The tests of interest are the omnibus tests within the two-factor within-subjects ANOVA. The dependent measure is reaction time (latency), measured in milliseconds (ms), required by the subject to identify the correct target letter. Each subject has six scores.
#'
#' @section Variables:
#'\describe{
#' \item{abs0}{reaction time w/ noise absent, angle 0}
#' \item{abs4}{reaction time w/ noise absent, angle 4}
#' \item{abs8}{reaction time w/ noise absent, angle 8}
#' \item{pres0}{reaction time w/ noise present, angle 0}
#' \item{pres4}{reaction time w/ noise present, angle 4}
#' \item{pres8}{reaction time w/ noise present, angle 8}
#'}
#'
#' @docType data
#' @name chapter_12_table_1
#' @aliases C12T1 chapter_12_table_1 Chapter_12_Table_1 c12t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_12_table_1)
#' @section Synonym:
#' C12T1
#' @examples
#' # Load the data
#' data(chapter_12_table_1)
#'
#' # Or, alternatively load the data as
#' data(C12T1)
#'
#' # View the structure
#' str(chapter_12_table_1)
#'
"chapter_12_table_1"
#' The data used in Chapter 12, Table 7
#'
#' The data used in Chapter 12, Table 7
#'
#' Table 12.7 presents scores for the individual subjects for the A effect for the data in Table 12.1. Notice that each score for a given subject is simply that subject's mean response time for that angle, where the mean is the average of the noise-absent and the noise-present scores.
#'
#' @section Variables:
#'\describe{
#'\item{angle0}{mean reaction time for subject at angle factor 0, averaging over noise}
#'\item{angle4}{mean reaction time for subject at angle factor 4, averaging over noise}
#'\item{angle8}{mean reaction time for subject at angle factor 8, averaging over noise}
#'}
#'
#' @docType data
#' @name chapter_12_table_7
#' @aliases C12T7 chapter_12_table_7 Chapter_12_Table_7 c12t7
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_12_table_7)
#' @section Synonym:
#' C12T7
#' @examples
#' # Load the data
#' data(chapter_12_table_7)
#'
#' # Or, alternatively load the data as
#' data(C12T7)
#'
#' # View the structure
#' str(chapter_12_table_7)
#'
"chapter_12_table_7"
#' The data used in Chapter 12, Table 9
#'
#' The data used in Chapter 12, Table 9
#'
#' A different covariance matrix is relevant for the B main effect because the B effect averages over levels of A, whereas the A effect averages over levels of B. Table 12.9 presents each subject's mean score for noise absent and noise present, where the mean is the average of the three angle scores at that particular level of noise.
#'
#' @section Variables:
#'\describe{
#' \item{noiseabs}{mean reaction time for subject without noise, averaging over angle}
#' \item{noiseprs}{mean reaction time for subject with noise, averaging over angle}
#'}
#'
#' @docType data
#' @name chapter_12_table_9
#' @aliases C12T9 chapter_12_table_9 Chapter_12_Table_9 c12t9
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_12_table_9)
#' @section Synonym:
#' C12T9
#' @examples
#' # Load the data
#' data(chapter_12_table_9)
#'
#' # Or, alternatively load the data as
#' data(C12T9)
#'
#' # View the structure
#' str(chapter_12_table_9)
#'
"chapter_12_table_9"
#' The data used in Chapter 12, Table 11
#'
#' The data used in Chapter 12, Table 11
#'
#' A third covariance matrix is relevant for the AB interaction because this effect does not average over either A or B. Instead, the interaction assesses whether the B difference is the same at each level of A. Table 12.11 presents scores that address this question. For each subject, a given score represents the noise effect (i.e., reaction time when noise is present minus reaction time when noise is absent) at a particular level of the angle factor.
#'
#' #' @section Variables:
#'\describe{
#'\item{angle0}{reaction time when noise is present minus reaction time when noise is absent at angle 0}
#'\item{angle4}{reaction time when noise is present minus reaction time when noise is absent at angle 4}
#'\item{angle8}{reaction time when noise is present minus reaction time when noise is absent at angle 8}
#'}
#'
#' @docType data
#' @name chapter_12_table_11
#' @aliases C12T11 chapter_12_table_11 Chapter_12_Table_11 c12t11
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_12_table_11)
#' @section Synonym:
#' C12T11
#' @examples
#' # Load the data
#' data(chapter_12_table_11)
#'
#' # Or, alternatively load the data as
#' data(C12T11)
#'
#' # View the structure
#' str(chapter_12_table_11)
#'
"chapter_12_table_11"
#' The data used in Chapter 12, Table 15
#'
#' The data used in Chapter 12, Table 15
#'
#' The data in Table 12.15 consist of reaction time scores for 10 young participants where each participant contributes 3 scores to the analysis. In particular, each participant is exposed to each of 3 experimental conditions, angle (0, 4, and 8). For the current analyses Table 12.15 is appended to Table 12.7, which contains reaction time scores for 10 old participants for angles of 0, 4, and 8. Thus, it is necessary to perform some data management before analyzing the data.
#'
#' @section Variables:
#'\describe{
#'\item{angle0}{reaction time when noise is present minus reaction time when noise is absent at angle 0}
#'\item{angle4}{reaction time when noise is present minus reaction time when noise is absent at angle 4}
#'\item{angle8}{reaction time when noise is present minus reaction time when noise is absent at angle 8}
#'}
#'
#' @docType data
#' @name chapter_12_table_15
#' @aliases C12T15 chapter_12_table_15 Chapter_12_Table_15 c12t15
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_12_table_15)
#' @section Synonym:
#' C12T15
#' @examples
#' # Load the data
#' data(chapter_12_table_15)
#'
#' # Or, alternatively load the data as
#' data(C12T15)
#'
#' # View the structure
#' str(chapter_12_table_15)
#'
"chapter_12_table_15"
#' The data used in Chapter 12, Table 21
#'
#' The data used in Chapter 12, Table 21
#'
#' Suppose that we are interested in comparing the effects of three drugs (A, B, and C) on aggressiveness on monkeys. To control for possible order effects, we use a Latin square design. Specifically, we suppose that six subjects are available (as we discussed in Chapter 11, a subject is actually a pair of monkeys in this design). Following the design principles outlined at the end of Chapter 11, we use a replicated Latin square design with two randomly consituted squares. Subjects are then randomly assigned to rows of the squares. The dependent measure can be thought of as the number of aggressive behvaiors engaged in during a fixed time period. Notice that each sore is a function of three possible influences: subject, time period, and treatment condition (where here is drug, with three levels, either A, B, or C).
#'
#' To summarize, the data in Table 12.21 consists of hypothetical aggressiveness scores for 6 monkeys who have been exposed to three types of drugs (A, B, and C). In order to control for potential order effects, a Latin square design is utilized. In particular, two randomly constituted squares are formed with three monkeys randomly assigned to each square and also randomly assigned to the particular row of the square that assigns the order that they are exposed to treatment.
#'
#' @section Variables:
#'\describe{
#'\item{dv}{dependent variable; the number of aggressive behaviors in a time period}
#'\item{subject}{one pair of monkeys (6 total)}
#'\item{time}{time period}
#'\item{cond}{treatment condition}
#'\item{square}{a numeric vector}
#'}
#'
#' @docType data
#' @name chapter_12_table_21
#' @aliases C12T21 chapter_12_table_21 Chapter_12_Table_21 c12t21
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_12_table_21)
#' @section Synonym:
#' C12T21
#' @examples
#' # Load the data
#' data(chapter_12_table_21)
#'
#' # Or, alternatively load the data as
#' data(C12T21)
#'
#' # View the structure
#' str(chapter_12_table_21)
#'
"chapter_12_table_21"
#' The data used in Chapter 13, Table 1
#'
#' The data used in Chapter 13, Table 1
#'
#' For the hypothetical data contained in Table 13.1, five participants were measured at two occasions. The question of interest is: "is there a difference between Time 1 and Time 2 scores?"
#'
#' Table 13.1 presents hypothetical data. The null hypothesis to be tested is that population means of Time 1 and Time 2 are equal to one another. This will be tested by forming a difference score. The right-most column of Table 13.1 shows such a difference score, Time 2 score minus Time 1 score, for each subject.
#'
#' @section Variables:
#'\describe{
#'\item{time1}{participant score at time 1}
#'\item{time2}{participant score at time 2}
#'}
#'
#' @docType data
#' @name chapter_13_table_1
#' @aliases C13T1 chapter_13_table_1 Chapter_13_Table_1 c13t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_13_table_1)
#' @section Synonym:
#' C13T1
#' @examples
#' # Load the data
#' data(chapter_13_table_1)
#'
#' # Or, alternatively load the data as
#' data(C13T1)
#'
#' # View the structure
#' str(chapter_13_table_1)
#'
"chapter_13_table_1"
#' The data used in Chapter 13, Table 2
#'
#' The data used in Chapter 13, Table 2
#'
#' For the hypothetical data contained in Table 13.2, eight participants were measured at three occasions. The question of interest is: "Is there a population mean difference between across Time 1, Time 2, and Time 3 measurement occasions?" Table 13.2 presents hypothetical data for a three-level design. The null hypothesis to be tested is that the population means of scores at all three time points are equal to each.
#'
#' @section Variables:
#'\describe{
#'\item{time1}{participant score at time 1}
#'\item{time2}{participant score at time 2}
#'\item{time3}{participant score at time 3}
#'}
#'
#' @docType data
#' @name chapter_13_table_2
#' @aliases C13T2 chapter_13_table_2 Chapter_13_Table_2 c13t2
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_13_table_2)
#' @section Synonym:
#' C13T2
#' @examples
#' # Load the data
#' data(chapter_13_table_2)
#'
#' # Or, alternatively load the data as
#' data(C13T2)
#'
#' # View the structure
#' str(chapter_13_table_2)
#'
"chapter_13_table_2"
#' The data used in Chapter 13, Table 5
#'
#' The data used in Chapter 13, Table 5
#'
#' Table 13.5 presents the hypothetical McCarthy IQ scores for 12 subjects (ages 30, 36, 42, or 48 months). The McCarthy data contained in Table 13.5, which was previously analyzed in Table 11.5, is now analyzed using the multivariate approach to repeated measures.
#'
#' @section Variables:
#'\describe{
#'\item{months30}{hypothetical McCarthy IQ for 30-month-old individuals}
#'\item{months36}{hypothetical McCarthy IQ for 36-month-old individuals}
#'\item{months42}{hypothetical McCarthy IQ for 42-month-old individuals}
#'\item{months48}{hypothetical McCarthy IQ for 48-month-old individuals}
#'}
#'
#' @docType data
#' @name chapter_13_table_5
#' @aliases C13T5 chapter_13_table_5 Chapter_13_Table_5 c13t5
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_13_table_5)
#' @section Synonym:
#' C13T5
#' @examples
#' # Load the data
#' data(chapter_13_table_5)
#'
#' # Or, alternatively load the data as
#' data(C13T5)
#'
#' # View the structure
#' str(chapter_13_table_5)
#'
"chapter_13_table_5"
#' The data used in Chapter 13, Table 12
#'
#' The data used in Chapter 13, Table 12
#'
#' For the hypothetical data contained in Table 13.2, the linear and quadratic D variables were formed by making use of the appropriate coefficients from Appendix Table A.10. Because the eight participants were measured at three occasions, both a linear and a quadratic effect can be tested. The question of interest in this instance is: "is there a linear and/or quadratic trend exhibited by the group over time?" Recall that in the book (pages 646-647) it was shown that the D variables for linear and quadratic effects led to an omnibus F test of 19.148, which was a value previously obtained for the omnibus effect. Because the particular values chosen for the D variables do not matter (unless it leads to a linear combination of columns), we will focus only on the tests of the individual contrasts when analyzing the data given in Table 12. Because columns one and two already represent the linear and quadratic effect respectively, all that needs to be done is to test mean of the column in order to determine if it differs from zero.
#'
#' @section Variables:
#'\describe{
#'\item{months30}{hypothetical McCarthy IQ for 30-month-old individuals}
#'\item{months36}{hypothetical McCarthy IQ for 36-month-old individuals}
#'\item{months42}{hypothetical McCarthy IQ for 42-month-old individuals}
#'\item{months48}{hypothetical McCarthy IQ for 48-month-old individuals}
#'}
#'
#' @docType data
#' @name chapter_13_table_12
#' @aliases C13T12 chapter_13_table_12 Chapter_13_Table_12 c13t12
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_13_table_12)
#' @section Synonym:
#' C13T12
#' @examples
#' # Load the data
#' data(chapter_13_table_12)
#'
#' # Or, alternatively load the data as
#' data(C13T12)
#'
#' # View the structure
#' str(chapter_13_table_12)
#'
"chapter_13_table_12"
#' The data used in Chapter 13, Table 12
#'
#' The data used in Chapter 13, Table 12
#'
#' Table 13.14 shows the slope of the least-squares regression line for each of the eight subjects, as well as the score on the linear D variable, reproduced from Table 13.12. There is a strikin relationship between the numbers in the two columns of Table 13.14. Every subject's score on D is 24 times his or her slope.
#'
#' @section Variables:
#'\describe{
#'\item{slope}{slope of the least-squares regression line for data in Table 13.2}
#'\item{linear}{Linear D variable for data in Table 13.2}
#'}
#'
#' @docType data
#' @name chapter_13_table_12
#' @aliases C13T12 chapter_13_table_12 Chapter_13_Table_12 c13t12
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_13_table_12)
#' @section Synonym:
#' C13T12
#' @examples
#' # Load the data
#' data(chapter_13_table_12)
#'
#' # Or, alternatively load the data as
#' data(C13T12)
#'
#' # View the structure
#' str(chapter_13_table_12)
#'
"chapter_13_table_12"
#' The data used in Chapter 13, Table 14
#'
#' The data used in Chapter 13, Table 14
#'
#' Table 13.14 shows the slope of the least-squares regression line for each of the eight subjects, as well as the score on the linear D variable, reproduced from Table 13.14. There is a strikin relationship between the numbers in the two columns of Table 13.14. Every subject's score on D is 24 times his or her slope.
#'
#' @section Variables:
#'\describe{
#'\item{slope}{slope of the least-squares regression line for data in Table 13.2}
#'\item{linear}{Linear D variable for data in Table 13.2}
#'}
#'
#' @docType data
#' @name chapter_13_table_14
#' @aliases C13T14 chapter_13_table_14 Chapter_13_Table_14 c13t14
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_13_table_14)
#' @section Synonym:
#' C13T14
#' @examples
#' # Load the data
#' data(chapter_13_table_14)
#'
#' # Or, alternatively load the data as
#' data(C13T14)
#'
#' # View the structure
#' str(chapter_13_table_14)
#'
"chapter_13_table_14"
#' The data used in Chapter 14, Table 1
#'
#' The data used in Chapter 14, Table 1
#'
#' Suppose that a perceptual psychologist studying the visual system was interested in determining the extent to which interfering visual stimuli slow the ability to recognize letters. Participants are brought into a laboratory and seated in front of a tachistoscope. They are told that they will see either the letter T or the letter I displayed on the screen. In some trials, the letter appears by itself, but in other trials the target letter is embedded in a group of other letters. This variation in the display consitutes the first factor, which is referred to as noise. The noise factor has two levels - absent and present. The other factor varied by the experimenter is where in the display the target letter appears. This factor, which is called angle, also has two levels. The target letter is either shown at the center of the screen (where the participant has been told to fixate), or 8 degrees off center (with the deviation from the center randomly varying between left and right). Table 14.1 presents hypothetical data for 10 participants. As usual, the same size is kept small to minimize the computational burden. The dependent measure is reaction time (or latency) measured in milliseconds. Each participant has four scores, one for each combination of the 2x2 design. In an actual perceptual experiment, each of these four scores would itself be the mean score for that individual across a number of trials in the particular condition.
#'
#' @section Variables:
#'\describe{
#'\item{abs0}{reaction time for participant without noise and at angle 0}
#'\item{abs8}{reaction time for participant without noise and at angle 8}
#'\item{pres0}{reaction time for participant with noise and at angle 0}
#'\item{pres8}{reaction time for participant with noise and at angle 8}
#'}
#'
#' @docType data
#' @name chapter_14_table_1
#' @aliases C14T1 chapter_14_table_1 Chapter_14_Table_1 c14t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_14_table_1)
#' @section Synonym:
#' C14T1
#' @examples
#' # Load the data
#' data(chapter_14_table_1)
#'
#' # Or, alternatively load the data as
#' data(C14T1)
#'
#' # View the structure
#' str(chapter_14_table_1)
#'
"chapter_14_table_1"
#' The data used in Chapter 14, Table 3
#'
#' The data used in Chapter 14, Table 3
#'
#' Using the data in Table 14.1, we could average scores for each participant individually because the noise factor we need to average over is a within-subjects factor. For example, participant 1's average 0 degree score is 450, whereas his or her 8 degree score is 630. This participant's reaction time averages 180 msec longer (630 vs 450) for the 8 degree condition than the 0 degree condition. If the other 9 participants' data show a similar pattern, we would infer that there is indeed a main effect due to angle.
#'
#' For the hypothetical data contained in Table 14.1, Table 14.3 gives the set of D variables. The D variables are subsequently used to analyze the data given in Table 14.1. Recall that we analyzed the data contained in Table 14.1 directly using SPSS without (explicitly) forming D variables. Although obtaining the results of the main effects is easily accomplished using the data directly, forming and then analyzing D variables directly also has its benefits (which are delineated in the chapter). Below we analyze the D variables contained in Table 14.3. As expected, our results will match those previously obtained when we analyzed the raw data (i.e., skipping the step of explicitly forming D variables). However, the method to be outlined here provides a different way to accomplish the same goal. We will soon see that analyzing the data by explicitly forming D variables has its advantages.The first column of Table 14.3 (D1) shows these scores for all 10 participants. Indeed, all 10 participants have an average 8 degree reaction time that is slower than their average 0 degree reaction time. Such consistency strongly supports the existence of an angle main effect.
#'
#' @section Variables:
#'\describe{
#'\item{d1}{participant D1 difference score averaged over noise}
#'\item{d2}{participant D2 difference score averaged over noise}
#'\item{d3}{participant D3 difference score averaged over noise}
#'}
#'
#' @docType data
#' @name chapter_14_table_3
#' @aliases C14T3 chapter_14_table_3 Chapter_14_Table_3 c14t3
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_14_table_3)
#' @section Synonym:
#' C14T3
#' @examples
#' # Load the data
#' data(chapter_14_table_3)
#'
#' # Or, alternatively load the data as
#' data(C14T3)
#'
#' # View the structure
#' str(chapter_14_table_3)
#'
"chapter_14_table_3"
#' The data used in Chapter 14, Table 4
#'
#' The data used in Chapter 14, Table 4
#'
#' Suppose a perceptual psychologist studying the visual system was interested in determining the extent to which interfering visual stimuli slow the ability to recognize letters. Participants are brought into a laboratory where they are seated in front of a tachistoscope. Variations in the presentations of letters is examined with interest being on the reaction time for target letters presented either in the center of the screen or off centered with and without "noise" accompanying the target letters.
#'
#' @section Variables:
#'\describe{
#' \item{abs0}{participant reaction time without noise and with angle 0}
#' \item{abs4}{participant reaction time without noise and with angle 4}
#' \item{abs8}{participant reaction time without noise and with angle 8}
#' \item{pres0}{participant reaction time with noise and with angle 0}
#' \item{pres4}{participant reaction time with noise and with angle 4}
#' \item{pres8}{participant reaction time with noise and with angle 8}
#'}
#'
#' @docType data
#' @name chapter_14_table_4
#' @aliases C14T4 chapter_14_table_4 Chapter_14_Table_4 c14t4
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_14_table_4)
#' @section Synonym:
#' C14T4
#' @examples
#' # Load the data
#' data(chapter_14_table_4)
#'
#' # Or, alternatively load the data as
#' data(C14T4)
#'
#' # View the structure
#' str(chapter_14_table_4)
#'
"chapter_14_table_4"
#' The data used in Chapter 14, Table 5
#'
#' The data used in Chapter 14, Table 5
#'
#' In terms of symbols, let D(1i) represent the linear trend for a given angle. For the hypothetical data contained in Table 14.4, Table 14.5 gives an appropriate and substantively interesting set of D variables. The D variables (rather than the raw data itself) is used because of the benefits and flexibility gained from analyzing the D variables directly (rather than indirectly as we did with the Table 14.4 data).
#'
#' @section Variables:
#'\describe{
#'\item{d1}{participant D1 variable}
#'\item{d2}{participant D2 variable}
#'\item{d3}{participant D3 variable}
#'\item{d4}{participant D4 variable}
#'\item{d5}{participant D5 variable}
#'}
#'
#' @docType data
#' @name chapter_14_table_5
#' @aliases C14T5 chapter_14_table_5 Chapter_14_Table_5 c14t5
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_14_table_5)
#' @section Synonym:
#' C14T5
#' @examples
#' # Load the data
#' data(chapter_14_table_5)
#'
#' # Or, alternatively load the data as
#' data(C14T5)
#'
#' # View the structure
#' str(chapter_14_table_5)
#'
"chapter_14_table_5"
#' The data used in Chapter 14, Table 8
#'
#' The data used in Chapter 14, Table 8
#'
#' For the hypothetical data contained in Table 14.8, a perceptual psychologist is interested in age differences ("young" and "old") in reaction time on a perceptual task. In addition, the psychologist is also interested in the effect of angle (zero degrees off center and eight degrees off center). The question of interest is to see if there are is a main effect of age, a main effect of angle, and an interaction between the two. Table 14.8 presents the same data that we analyzed in chapter 12 for 10 young participants and 10 old participants, except that for the moment we are only analyzing data from the 0 degree and 8 degree conditions of the angle factor.
#'
#' In any two factor design, the effects to be tested are typically the two main effects and the two-way interaction. In our example, then, we test the main effect of age (designated A), the main effect of angle (designated B), and the interaction of age and angle.
#'
#' @section Variables:
#'\describe{
#'\item{angle0}{participant reaction time at angle 0}
#'\item{angle8}{participant reaction time at angle 8}
#'\item{group}{participant age (young or old)}
#'}
#'
#' @docType data
#' @name chapter_14_table_8
#' @aliases C14T8 chapter_14_table_8 Chapter_14_Table_8 c14t8
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_14_table_8)
#' @section Synonym:
#' C14T8
#' @examples
#' # Load the data
#' data(chapter_14_table_8)
#'
#' # Or, alternatively load the data as
#' data(C14T8)
#'
#' # View the structure
#' str(chapter_14_table_8)
#'
"chapter_14_table_8"
#' The data used in Chapter 14, Table 10
#'
#' The data used in Chapter 14, Table 10
#'
#' These data are identical to those analyzed in chapter 12 (see Tables 12.7 and 12.15) to facilitate comparisons of the multivariate approach and the mixed-model approach.The hypothetical data contained in Table 14.10 contains an additional level of angle (four degrees) that was not considered in Table 14.8.
#'
#' @section Variables:
#'\describe{
#'\item{angle0}{participant reaction time at angle 0}
#'\item{angle4}{participant reaction time at angle 4}
#'\item{angle8}{participant reaction time at angle 8}
#'\item{group}{participant group number}
#'}
#'
#' @docType data
#' @name chapter_14_table_10
#' @aliases C14T10 chapter_14_table_10 Chapter_14_Table_10 c14t10
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_14_table_10)
#' @section Synonym:
#' C14T10
#' @examples
#' # Load the data
#' data(chapter_14_table_10)
#'
#' # Or, alternatively load the data as
#' data(C14T10)
#'
#' # View the structure
#' str(chapter_14_table_10)
#'
"chapter_14_table_10"
#' The data used in Chapter 15, Table 1
#'
#' The data used in Chapter 15, Table 1
#'
#' The first table in Chapter 15 presents the Hypothetical McCarthy data again (originally shown in Table 11.5). This data set is used throughout the chapter as the to illustrate the discussion given on the mixed model. At the time of this writing the mixed model procedure is SPSS is not as fully developed the SAS analog PROC MIXED. For this reason no analyses using SPSS are provided to replicate the analyses given in Chapter 15.
#'
#'@section Variables:
#' \describe{
#' \item{months30}{McCarthy IQ score for 30-month-old}
#' \item{months36}{McCarthy IQ score for 36-month-old}
#' \item{months42}{McCarthy IQ score for 42-month-old}
#' \item{months48}{McCarthy IQ score for 48-month-old}}
#'
#'
#' @docType data
#' @name chapter_15_table_1
#' @aliases C15T1 chapter_15_table_1 Chapter_15_Table_1 c15t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_15_table_1)
#' @section Synonym:
#' C15T1
#' @examples
#' # Load the data
#' data(chapter_15_table_1)
#'
#' # Or, alternatively load the data as
#' data(C15T1)
#'
#' # View the structure
#' str(chapter_15_table_1)
#'
"chapter_15_table_1"
#' The data used in Chapter 16, Table 1
#'
#' The data used in Chapter 16, Table 1
#'
#' The first table in Chapter 16 presents the Severity Ratings by Clinical Trainees, which was originally given in Table 10.9. The data set is analyzed again using the multilevel model approach and the results are compared with those obtained in Chapter 10. As a brief background, the goal of the study here is to examine the extent to which female and male clinical psychology graduate student trainees may assign different severity ratings to clients at initial intake.Three female and three male graduate students are randomly selected to participate and each is randomly assigned four clients with whom to do an intake interview, after which each clinical trainee assigns a severity rating to each client, producing the data shown in Table 16.1.
#'
#'@section Variables:
#' \describe{
#' \item{severity}{severity rating assigned to client by trainee}
#' \item{trainee}{trainee}
#' \item{gender}{gender of trainee}}
#'
#' @docType data
#' @name chapter_16_table_1
#' @aliases C16T1 chapter_16_table_1 Chapter_16_Table_1 c16t1
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_16_table_1)
#' @section Synonym:
#' C16T1
#' @examples
#' # Load the data
#' data(chapter_16_table_1)
#'
#' # Or, alternatively load the data as
#' data(C16T1)
#'
#' # View the structure
#' str(chapter_16_table_1)
#'
"chapter_16_table_1"
#' The data used in Chapter 16, Table 4
#'
#' The data used in Chapter 16, Table 4
#'
#' The hypothetical data contained in Table 16.4 is supposed to represent the data from 29 children who participated in a study to evaluate the effectiveness of an intervention designed to increase inductive reasoning skills. The data consists of children who are nested within one of six classrooms, where each classroom contained students from both the control and the experimental condition. The question of interest is whether or not the children who participated in the experimental group actually improved their cognitive reasoning ability.
#'
#' The children with condition values of 0 received the control, whereas the 14 children with condition values of 1 received the treatment. 4 of the children in the control condition were students in control Classroom 1, 6 of them were students in control Classroom 2, and 5 were students in control Classroom 3. Similarly, 3 of the students in the treatment condition were students in treatment Classroom 1, 5 were students in treatment Classroom 2, and 6 were students in treatment Classroom 3. It is also important to note that scores on the dependent variable appear in the rightmost column under the variable label "induct". The variable labeled "cog" in Table 16.4 represents cognitive ability scores that have been obtained for each student sometime prior to assigning classrooms to treatment conditions. The variable labeled "skill" represents a global measure of each teacher's teaching skill, once again assigned prior to assigning classrooms to treatment conditions.
#'
#'@section Variables:
#' \describe{
#' \item{obs}{observation/participant number}
#' \item{room}{participant classroom placement}
#' \item{cond}{participant condition (0=control, 1=treatment)}
#' \item{cog}{participant cognitive ability score}
#' \item{skill}{participant's teacher's teaching skill}
#' \item{induct}{induction; scores on the dependent variable}}
#'
#' @docType data
#' @name chapter_16_table_4
#' @aliases C16T4 chapter_16_table_4 Chapter_16_Table_4 c16t4
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_16_table_4)
#' @section Synonym:
#' C16T4
#' @examples
#' # Load the data
#' data(chapter_16_table_4)
#'
#' # Or, alternatively load the data as
#' data(C16T4)
#'
#' # View the structure
#' str(chapter_16_table_4)
#'
"chapter_16_table_4"
# Exercises
#######################################################################################################################################
#' The data used in Chapter 1, Exercise 18
#'
#' Data from Chapter 1 Exercise 18 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Promoted.
#' \item Minority.
#' \item Freq. Frequency}
#'
#' @docType data
#' @name chapter_1_exercise_18
#' @aliases C1E18 chapter_1_exercise_18 Chapter_1_Exercise_18 c1e18
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @keywords datasets
#' @usage data(chapter_1_exercise_18)
#' @section Synonym:
#' C1E18
#' @examples
#' # Load the data
#' data(chapter_1_exercise_18)
#'
#' # Or, alternatively load the data as
#' data(C1E18)
#'
#' # View the structure
#' str(chapter_1_exercise_18)
#'
#' # Brief summary of the data.
#' summary(chapter_1_exercise_18)
#'
"chapter_1_exercise_18"
#' The data used in Chapter 1, Exercise 19
#'
#' Data from Chapter 1 Exercise 19 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Convicted.
#' \item Monozygotic.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_1_exercise_19
#' @aliases chapter_1_exercise_19 Chapter_1_Exercise_19 C1E19 c1e19
#' @keywords datasets
#' @usage data(chapter_1_exercise_19)
#' @section Synonym:
#' C1E19
#' @examples
#' # Load the data
#' data(chapter_1_exercise_19)
#'
#' # Or, alternatively load the data as
#' data(C1E19)
#'
#' # View the structure
#' str(chapter_1_exercise_19)
#'
#' # Brief summary of the data.
#' summary(chapter_1_exercise_19)
#'
"chapter_1_exercise_19"
#' The data used in Chapter 1, Exercise 21
#'
#' Data from Chapter 1 Exercise 21 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Experimental.
#' \item Control.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_1_exercise_21
#' @aliases chapter_1_exercise_21 Chapter_1_Exercise_21 C1E21 c1e21
#' @keywords datasets
#' @usage data(chapter_1_exercise_21)
#' @section Synonym:
#' C1E21
#' @examples
#' # Load the data
#' data(chapter_1_exercise_21)
#'
#' # Or, alternatively load the data as
#' data(C1E21)
#'
#' # View the structure
#' str(chapter_1_exercise_21)
#'
#' # Brief summary of the data.
#' summary(chapter_1_exercise_21)
#'
"chapter_1_exercise_21"
#' The data used in Chapter 1, Exercise 22
#'
#' Data from Chapter 1 Exercise 22 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Pot.
#' \item Crossed.
#' \item SelfFertilized.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_1_exercise_22
#' @aliases chapter_1_exercise_22 Chapter_1_Exercise_22 C1E22 c1e22
#' @keywords datasets
#' @usage data(chapter_1_exercise_22)
#' @section Synonym:
#' C1E22
#' @examples
#' # Load the data
#' data(chapter_1_exercise_22)
#'
#' # Or, alternatively load the data as
#' data(C1E22)
#'
#' # View the structure
#' str(chapter_1_exercise_22)
#'
#' # Brief summary of the data.
#' summary(chapter_1_exercise_22)
#'
"chapter_1_exercise_22"
#' The data used in Chapter 1, Exercise 23
#'
#' Data from Chapter 1 Exercise 23 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Group.
#' \item Cholesterol.
#' \item Cholesterol_Category_MedianSplit.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_1_exercise_23
#' @aliases chapter_1_exercise_23 Chapter_1_Exercise_23 C1E23 c1e23
#' @keywords datasets
#' @usage data(chapter_1_exercise_23)
#' @section Synonym:
#' C1E23
#' @examples
#' # Load the data
#' data(chapter_1_exercise_23)
#'
#' # Or, alternatively load the data as
#' data(C1E23)
#'
#' # View the structure
#' str(chapter_1_exercise_23)
#'
#' # Brief summary of the data.
#' summary(chapter_1_exercise_23)
#'
"chapter_1_exercise_23"
#' The data used in Chapter 3, Exercise 9
#'
#' Data from Chapter 3 Exercise 9 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Group.
#' \item Scores.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_3_exercise_9
#' @aliases chapter_3_exercise_9 Chapter_3_Exercise_9 C3E9 c3e9
#' @keywords datasets
#' @usage data(chapter_3_exercise_9)
#' @section Synonym:
#' C3E9
#' @examples
#' # Load the data
#' data(chapter_3_exercise_9)
#'
#' # Or, alternatively load the data as
#' data(C3E9)
#'
#' # View the structure
#' str(chapter_3_exercise_9)
#'
#' # Brief summary of the data.
#' summary(chapter_3_exercise_9)
#'
"chapter_3_exercise_9"
#' The data used in Chapter 3, Exercise 10
#'
#' Data from Chapter 3 Exercise 10 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Experiment.
#' \item Experimental.
#' \item Control.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_3_exercise_10
#' @aliases chapter_3_exercise_10 Chapter_3_Exercise_10 C3E10 c3e10
#' @keywords datasets
#' @usage data(chapter_3_exercise_10)
#' @section Synonym:
#' C3E10
#' @examples
#' # Load the data
#' data(chapter_3_exercise_10)
#'
#' # Or, alternatively load the data as
#' data(C3E10)
#'
#' # View the structure
#' str(chapter_3_exercise_10)
#'
#' # Brief summary of the data.
#' summary(chapter_3_exercise_10)
#'
"chapter_3_exercise_10"
#' The data used in Chapter 3, Exercise 11
#'
#' Data from Chapter 3 Exercise 11 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Condition.
#' \item Scores.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references Maxwell, S. E., Delaney, H. D., \& Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_3_exercise_11
#' @aliases chapter_3_exercise_11 Chapter_3_Exercise_11 C3E11 c3e11
#' @keywords datasets
#' @usage data(chapter_3_exercise_11)
#' @section Synonym:
#' C3E11
#' @examples
#' # Load the data
#' data(chapter_3_exercise_11)
#'
#' # Or, alternatively load the data as
#' data(C3E11)
#'
#' # View the structure
#' str(chapter_3_exercise_11)
#'
#' # Brief summary of the data.
#' summary(chapter_3_exercise_11)
#'
"chapter_3_exercise_11"
#' The data used in Chapter 3, Exercise 19
#'
#' Data from Chapter 3 Exercise 19 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item Condition.
#' \item Anger.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_3_exercise_19
#' @aliases chapter_3_exercise_19 Chapter_3_Exercise_19 C3E19 c3e19
#' @keywords datasets
#' @usage data(chapter_3_exercise_19)
#' @section Synonym:
#' C3E19
#' @examples
#' # Load the data
#' data(chapter_3_exercise_19)
#'
#' # Or, alternatively load the data as
#' data(C3E19)
#'
#' # View the structure
#' str(chapter_3_exercise_19)
#'
#' # Brief summary of the data.
#' summary(chapter_3_exercise_19)
#'
"chapter_3_exercise_19"
#' The data used in Chapter 3, Exercise 20
#'
#' Data from Chapter 3 Exercise 20 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item Condition.
#' \item Anger.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_3_exercise_20
#' @aliases chapter_3_exercise_20 Chapter_3_Exercise_20 C3E20 c3e20
#' @keywords datasets
#' @usage data(chapter_3_exercise_20)
#' @section Synonym:
#' C3E20
#' @examples
#' # Load the data
#' data(chapter_3_exercise_20)
#'
#' # Or, alternatively load the data as
#' data(C3E20)
#'
#' # View the structure
#' str(chapter_3_exercise_20)
#'
#' # Brief summary of the data.
#' summary(chapter_3_exercise_20)
#'
"chapter_3_exercise_20"
#' The data used in Chapter 3, Exercise 21
#'
#' Data from Chapter 3 Exercise 21 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Condition.
#' \item Exercise.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_3_exercise_21
#' @aliases chapter_3_exercise_21 Chapter_3_Exercise_21 C3E21 c3e21
#' @keywords datasets
#' @usage data(chapter_3_exercise_21)
#' @section Synonym:
#' C3E21
#' @examples
#' # Load the data
#' data(chapter_3_exercise_21)
#'
#' # Or, alternatively load the data as
#' data(C3E21)
#'
#' # View the structure
#' str(chapter_3_exercise_21)
#'
#' # Brief summary of the data.
#' summary(chapter_3_exercise_21)
#'
"chapter_3_exercise_21"
#' The data used in Chapter 3, Exercise 22
#'
#' Data from Chapter 3 Exercise 22 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Grade.
#' \item Treatment.
#' \item IQPre.
#' \item IQ4.
#' \item IQ8.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_3_exercise_22
#' @aliases chapter_3_exercise_22 Chapter_3_Exercise_22 C3E22 c3e22
#' @keywords datasets
#' @usage data(chapter_3_exercise_22)
#' @section Synonym:
#' C3E22
#' @examples
#' # Load the data
#' data(chapter_3_exercise_22)
#'
#' # Or, alternatively load the data as
#' data(C3E22)
#'
#' # View the structure
#' str(chapter_3_exercise_22)
#'
#' # Brief summary of the data.
#' summary(chapter_3_exercise_22)
#'
"chapter_3_exercise_22"
#' The data used in Chapter 4, Exercise 11
#'
#' Data from Chapter 4 Exercise 11 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item dv.
#' \item cond.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_4_exercise_11
#' @aliases chapter_4_exercise_11 Chapter_4_Exercise_11 C4E11 c4e11
#' @keywords datasets
#' @usage data(chapter_4_exercise_11)
#' @section Synonym:
#' C4E11
#' @examples
#' # Load the data
#' data(chapter_4_exercise_11)
#'
#' # Or, alternatively load the data as
#' data(C4E11)
#'
#' # View the structure
#' str(chapter_4_exercise_11)
#'
#' # Brief summary of the data.
#' summary(chapter_4_exercise_11)
#'
"chapter_4_exercise_11"
#' The data used in Chapter 4, Exercise 12
#'
#' Data from Chapter 4 Exercise 12 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item group.
#' \item y.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_4_exercise_12
#' @aliases chapter_4_exercise_12 Chapter_4_Exercise_12 C4E12 c4e12
#' @keywords datasets
#' @usage data(chapter_4_exercise_12)
#' @section Synonym:
#' C4E12
#' @examples
#' # Load the data
#' data(chapter_4_exercise_12)
#'
#' # Or, alternatively load the data as
#' data(C4E12)
#'
#' # View the structure
#' str(chapter_4_exercise_12)
#'
#' # Brief summary of the data.
#' summary(chapter_4_exercise_12)
#'
"chapter_4_exercise_12"
#' The data used in Chapter 4, Exercise 13
#'
#' Data from Chapter 4 Exercise 13 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item dv.
#' \item cond.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_4_exercise_13
#' @aliases chapter_4_exercise_13 Chapter_4_Exercise_13 C4E13 c4e13
#' @keywords datasets
#' @usage data(chapter_4_exercise_12)
#' @section Synonym:
#' C4E13
#' @examples
#' # Load the data
#' data(chapter_4_exercise_13)
#'
#' # Or, alternatively load the data as
#' data(C4E13)
#'
#' # View the structure
#' str(chapter_4_exercise_13)
#'
#' # Brief summary of the data.
#' summary(chapter_4_exercise_13)
#'
"chapter_4_exercise_13"
#' The data used in Chapter 5, Exercise 5
#'
#' Data from Chapter 5 Exercise 5 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item dv.
#' \item cond.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_5_exercise_5
#' @aliases chapter_5_exercise_5 Chapter_5_Exercise_5 C5E5 c5e5
#' @keywords datasets
#' @usage data(chapter_5_exercise_5)
#' @section Synonym:
#' C5E5
#' @examples
#' # Load the data
#' data(chapter_5_exercise_5)
#'
#' # Or, alternatively load the data as
#' data(C5E5)
#'
#' # View the structure
#' str(chapter_5_exercise_5)
#'
#' # Brief summary of the data.
#' summary(chapter_5_exercise_5)
#'
"chapter_5_exercise_5"
#' The data used in Chapter 5, Exercise 10
#'
#' Data from Chapter 5 Exercise 10 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item cond.
#' \item score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_5_exercise_10
#' @aliases chapter_5_exercise_10 Chapter_5_Exercise_10 C5E10 c5e10
#' @keywords datasets
#' @usage data(chapter_5_exercise_10)
#' @section Synonym:
#' C5E10
#' @examples
#' # Load the data
#' data(chapter_5_exercise_10)
#'
#' # Or, alternatively load the data as
#' data(C5E10)
#'
#' # View the structure
#' str(chapter_5_exercise_10)
#'
#' # Brief summary of the data.
#' summary(chapter_5_exercise_10)
#'
"chapter_5_exercise_10"
#' The data used in Chapter 5, Exercise 16
#'
#' Data from Chapter 5 Exercise 16 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item cond.
#' \item scores.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_5_exercise_16
#' @aliases chapter_5_exercise_16 Chapter_5_Exercise_16 C5E16 c5e16
#' @keywords datasets
#' @usage data(chapter_5_exercise_16)
#' @section Synonym:
#' C5E16
#' @examples
#' # Load the data
#' data(chapter_5_exercise_16)
#'
#' # Or, alternatively load the data as
#' data(C5E16)
#'
#' # View the structure
#' str(chapter_5_exercise_16)
#'
#' # Brief summary of the data.
#' summary(chapter_5_exercise_16)
#'
"chapter_5_exercise_16"
#' The data used in Chapter 6, Exercise 10
#'
#' Data from Chapter 6 Exercise 10 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item cond.
#' \item score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_6_exercise_10
#' @aliases chapter_6_exercise_10 Chapter_6_Exercise_10 C6E10 c6e10
#' @keywords datasets
#' @usage data(chapter_6_exercise_10)
#' @section Synonym:
#' C6E10
#' @examples
#' # Load the data
#' data(chapter_6_exercise_10)
#'
#' # Or, alternatively load the data as
#' data(C6E10)
#'
#' # View the structure
#' str(chapter_6_exercise_10)
#'
#' # Brief summary of the data.
#' summary(chapter_6_exercise_10)
#'
"chapter_6_exercise_10"
#' The data used in Chapter 6, Exercise 14
#'
#' Data from Chapter 6 Exercise 14 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Proportion.
#' \item Months.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_6_exercise_14
#' @aliases chapter_6_exercise_14 Chapter_6_Exercise_14 C6E14 c6e14
#' @keywords datasets
#' @usage data(chapter_6_exercise_14)
#' @section Synonym:
#' C6E14
#' @examples
#' # Load the data
#' data(chapter_6_exercise_14)
#'
#' # Or, alternatively load the data as
#' data(C6E14)
#'
#' # View the structure
#' str(chapter_6_exercise_14)
#'
#' # Brief summary of the data.
#' summary(chapter_6_exercise_14)
#'
"chapter_6_exercise_14"
#' The data used in Chapter 6, Exercise 16
#'
#' Data from Chapter 6 Exercise 16 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item id.
#' \item group.
#' \item y.
#' \item latency.
#' \item latency_2.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_6_exercise_16
#' @aliases chapter_6_exercise_16 Chapter_6_Exercise_16 C6E16 c6e16
#' @keywords datasets
#' @usage data(chapter_6_exercise_16)
#' @section Synonym:
#' C6E16
#' @examples
#' # Load the data
#' data(chapter_6_exercise_16)
#'
#' # Or, alternatively load the data as
#' data(C6E16)
#'
#' # View the structure
#' str(chapter_6_exercise_16)
#'
#' # Brief summary of the data.
#' summary(chapter_6_exercise_16)
#'
"chapter_6_exercise_16"
#' The data used in Chapter 7, Exercise 6
#'
#' Data from Chapter 7 Exercise 6 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Treatment.
#' \item Level.
#' \item Score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_6
#' @aliases chapter_7_exercise_6 Chapter_7_Exercise_6 C7E6 c7e6
#' @keywords datasets
#' @usage data(chapter_7_exercise_6)
#' @section Synonym:
#' C7E6
#' @examples
#' # Load the data
#' data(chapter_7_exercise_6)
#'
#' # Or, alternatively load the data as
#' data(C7E6)
#'
#' # View the structure
#' str(chapter_7_exercise_6)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_6)
#'
"chapter_7_exercise_6"
#' The data used in Chapter 7, Exercise 9
#'
#' Data from Chapter 7 Exercise 9 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Treatment.
#' \item Level.
#' \item Score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_9
#' @aliases chapter_7_exercise_9 Chapter_7_Exercise_9 C7E9 c7e9
#' @keywords datasets
#' @usage data(chapter_7_exercise_9)
#' @section Synonym:
#' C7E9
#' @examples
#' # Load the data
#' data(chapter_7_exercise_9)
#'
#' # Or, alternatively load the data as
#' data(C7E9)
#'
#' # View the structure
#' str(chapter_7_exercise_9)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_9)
#'
"chapter_7_exercise_9"
#' The data used in Chapter 7, Exercise 12
#'
#' Data from Chapter 7 Exercise 12 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ALevel.
#' \item BLevel.
#' \item Score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_12
#' @aliases chapter_7_exercise_12 Chapter_7_Exercise_12 C7E12 c7e12
#' @keywords datasets
#' @usage data(chapter_7_exercise_12)
#' @section Synonym:
#' C7E12
#' @examples
#' # Load the data
#' data(chapter_7_exercise_12)
#'
#' # Or, alternatively load the data as
#' data(C7E12)
#'
#' # View the structure
#' str(chapter_7_exercise_12)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_12)
#'
"chapter_7_exercise_12"
#' The data used in Chapter 7, Exercise 13
#'
#' Data from Chapter 7 Exercise 13 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Age.
#' \item Gender.
#' \item Score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_13
#' @aliases chapter_7_exercise_13 Chapter_7_Exercise_13 C7E13 c7e13
#' @keywords datasets
#' @usage data(chapter_7_exercise_13)
#' @section Synonym:
#' C7E13
#' @examples
#' # Load the data
#' data(chapter_7_exercise_13)
#'
#' # Or, alternatively load the data as
#' data(C7E13)
#'
#' # View the structure
#' str(chapter_7_exercise_13)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_13)
#'
"chapter_7_exercise_13"
#' The data used in Chapter 7, Exercise 14
#'
#' Data from Chapter 7 Exercise 14 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item cond.
#' \item status.
#' \item score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_14
#' @aliases chapter_7_exercise_14 Chapter_7_Exercise_14 C7E14 c7e14
#' @keywords datasets
#' @usage data(chapter_7_exercise_14)
#' @section Synonym:
#' C7E14
#' @examples
#' # Load the data
#' data(chapter_7_exercise_14)
#'
#' # Or, alternatively load the data as
#' data(C7E14)
#'
#' # View the structure
#' str(chapter_7_exercise_14)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_14)
#'
"chapter_7_exercise_14"
#' The data used in Chapter 7, Exercise 15
#'
#' Data from Chapter 7 Exercise 15 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Gender.
#' \item Cond.
#' \item Score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_15
#' @aliases chapter_7_exercise_15 Chapter_7_Exercise_15 C7E15 c7e15
#' @keywords datasets
#' @usage data(chapter_7_exercise_15)
#' @section Synonym:
#' C7E15
#' @examples
#' # Load the data
#' data(chapter_7_exercise_15)
#'
#' # Or, alternatively load the data as
#' data(C7E15)
#'
#' # View the structure
#' str(chapter_7_exercise_15)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_15)
#'
"chapter_7_exercise_15"
#' The data used in Chapter 7, Exercise 18
#'
#' Data from Chapter 7 Exercise 18 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item level.
#' \item gender.
#' \item salary.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_18
#' @aliases chapter_7_exercise_18 Chapter_7_Exercise_18 C7E18 c7e18
#' @keywords datasets
#' @usage data(chapter_7_exercise_18)
#' @section Synonym:
#' C7E18
#' @examples
#' # Load the data
#' data(chapter_7_exercise_18)
#'
#' # Or, alternatively load the data as
#' data(C7E18)
#'
#' # View the structure
#' str(chapter_7_exercise_18)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_18)
#'
"chapter_7_exercise_18"
#' The data used in Chapter 7, Exercise 19
#'
#' Data from Chapter 7 Exercise 19 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item race.
#' \item courses.
#' \item scores.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_19
#' @aliases chapter_7_exercise_19 Chapter_7_Exercise_19 C7E19 c7e19
#' @keywords datasets
#' @usage data(chapter_7_exercise_19)
#' @section Synonym:
#' C7E19
#' @examples
#' # Load the data
#' data(chapter_7_exercise_19)
#'
#' # Or, alternatively load the data as
#' data(C7E19)
#'
#' # View the structure
#' str(chapter_7_exercise_19)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_19)
#'
"chapter_7_exercise_19"
#' The data used in Chapter 7, Exercise 22
#'
#' Data from Chapter 7 Exercise 22 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item id.
#' \item bpd.
#' \item drug.
#' \item trust.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_22
#' @aliases chapter_7_exercise_22 Chapter_7_Exercise_22 C7E22 c7e22
#' @keywords datasets
#' @usage data(chapter_7_exercise_22)
#' @section Synonym:
#' C7E22
#' @examples
#' # Load the data
#' data(chapter_7_exercise_22)
#'
#' # Or, alternatively load the data as
#' data(C7E22)
#'
#' # View the structure
#' str(chapter_7_exercise_22)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_22)
#'
"chapter_7_exercise_22"
#' The data used in Chapter 7, Exercise 23
#'
#' Data from Chapter 7 Exercise 23 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item id.
#' \item esteem.
#' \item cond.
#' \item mood.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_23
#' @aliases chapter_7_exercise_23 Chapter_7_Exercise_23 C7E23 c7e23
#' @keywords datasets
#' @usage data(chapter_7_exercise_23)
#' @section Synonym:
#' C7E23
#' @examples
#' # Load the data
#' data(chapter_7_exercise_23)
#'
#' # Or, alternatively load the data as
#' data(C7E23)
#'
#' # View the structure
#' str(chapter_7_exercise_23)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_23)
#'
"chapter_7_exercise_23"
#' The data used in Chapter 7, Exercise 24
#'
#' Data from Chapter 7 Exercise 24 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item id.
#' \item thought.
#' \item complexity.
#' \item attitude.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_24
#' @aliases chapter_7_exercise_24 Chapter_7_Exercise_24 C7E24 c7e24
#' @keywords datasets
#' @usage data(chapter_7_exercise_24)
#' @section Synonym:
#' C7E24
#' @examples
#' # Load the data
#' data(chapter_7_exercise_24)
#'
#' # Or, alternatively load the data as
#' data(C7E24)
#'
#' # View the structure
#' str(chapter_7_exercise_24)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_24)
#'
"chapter_7_exercise_24"
#' The data used in Chapter 7, Exercise 25
#'
#' Data from Chapter 7 Exercise 25 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item id.
#' \item switch.
#' \item cond.
#' \item change.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_7_exercise_25
#' @aliases chapter_7_exercise_25 Chapter_7_Exercise_25 C7E25 c7e25
#' @keywords datasets
#' @usage data(chapter_7_exercise_25)
#' @section Synonym:
#' C7E25
#' @examples
#' # Load the data
#' data(chapter_7_exercise_25)
#'
#' # Or, alternatively load the data as
#' data(C7E25)
#'
#' # View the structure
#' str(chapter_7_exercise_25)
#'
#' # Brief summary of the data.
#' summary(chapter_7_exercise_25)
#'
"chapter_7_exercise_25"
#' The data used in Chapter 8, Exercise 15
#'
#' Data from Chapter 8 Exercise 15 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ProportionTime.
#' \item Parent.
#' \item Child.
#' \item Months.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_8_exercise_15
#' @aliases chapter_8_exercise_15 Chapter_8_Exercise_15 C8E15 c8e15
#' @keywords datasets
#' @usage data(chapter_8_exercise_15)
#' @section Synonym:
#' C8E15
#' @examples
#' # Load the data
#' data(chapter_8_exercise_15)
#'
#' # Or, alternatively load the data as
#' data(C8E15)
#'
#' # View the structure
#' str(chapter_8_exercise_15)
#'
#' # Brief summary of the data.
#' summary(chapter_8_exercise_15)
#'
"chapter_8_exercise_15"
#' The data used in Chapter 8, Exercise 16
#'
#' Data from Chapter 8 Exercise 16 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Value.
#' \item Monitors.
#' \item Argument.
#' \item Source.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_8_exercise_16
#' @aliases chapter_8_exercise_16 Chapter_8_Exercise_16 C8E16 c8e16
#' @keywords datasets
#' @usage data(chapter_8_exercise_16)
#' @section Synonym:
#' C8E16
#' @examples
#' # Load the data
#' data(chapter_8_exercise_16)
#'
#' # Or, alternatively load the data as
#' data(C8E16)
#'
#' # View the structure
#' str(chapter_8_exercise_16)
#'
#' # Brief summary of the data.
#' summary(chapter_8_exercise_16)
#'
"chapter_8_exercise_16"
#' The data used in Chapter 8, Exercise 17
#'
#' Data from Chapter 8 Exercise 17 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item BehavioralAvoidance.
#' \item Condition.
#' \item Phobia.
#' \item Gender.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_8_exercise_17
#' @aliases chapter_8_exercise_17 Chapter_8_Exercise_17 C8E17 c8e17
#' @keywords datasets
#' @usage data(chapter_8_exercise_17)
#' @section Synonym:
#' C8E17
#' @examples
#' # Load the data
#' data(chapter_8_exercise_17)
#'
#' # Or, alternatively load the data as
#' data(C8E17)
#'
#' # View the structure
#' str(chapter_8_exercise_17)
#'
#' # Brief summary of the data.
#' summary(chapter_8_exercise_17)
#'
"chapter_8_exercise_17"
#' The data used in Chapter 8, Exercise 18
#'
#' Data from Chapter 8 Exercise 18 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item Partner.
#' \item Report.
#' \item Focus.
#' \item Negativity.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_8_exercise_18
#' @aliases chapter_8_exercise_18 Chapter_8_Exercise_18 C8E18 c8e18
#' @keywords datasets
#' @usage data(chapter_8_exercise_18)
#' @section Synonym:
#' C8E18
#' @examples
#' # Load the data
#' data(chapter_8_exercise_18)
#'
#' # Or, alternatively load the data as
#' data(C8E18)
#'
#' # View the structure
#' str(chapter_8_exercise_18)
#'
#' # Brief summary of the data.
#' summary(chapter_8_exercise_18)
#'
"chapter_8_exercise_18"
#' The data used in Chapter 8, Exercise 19
#'
#' Data from Chapter 8 Exercise 19 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item Gender.
#' \item Audience.
#' \item Presentation.
#' \item Persistence.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_8_exercise_19
#' @aliases chapter_8_exercise_19 Chapter_8_Exercise_19 C8E19 c8e19
#' @keywords datasets
#' @usage data(chapter_8_exercise_19)
#' @section Synonym:
#' C8E19
#' @examples
#' # Load the data
#' data(chapter_8_exercise_19)
#'
#' # Or, alternatively load the data as
#' data(C8E19)
#'
#' # View the structure
#' str(chapter_8_exercise_19)
#'
#' # Brief summary of the data.
#' summary(chapter_8_exercise_19)
#'
"chapter_8_exercise_19"
#' The data used in Chapter 9, Exercise 4
#'
#' Data from Chapter 9 Exercise 4 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Group.
#' \item Pre.
#' \item Post.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_9_exercise_4
#' @aliases chapter_9_exercise_4 Chapter_9_Exercise_4 C9E4 c9e4
#' @keywords datasets
#' @usage data(chapter_9_exercise_4)
#' @section Synonym:
#' C9E4
#' @examples
#' # Load the data
#' data(chapter_9_exercise_4)
#'
#' # Or, alternatively load the data as
#' data(C9E4)
#'
#' # View the structure
#' str(chapter_9_exercise_4)
#'
#' # Brief summary of the data.
#' summary(chapter_9_exercise_4)
#'
"chapter_9_exercise_4"
#' The data used in Chapter 9, Exercise 14
#'
#' Data from Chapter 9 Exercise 14 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item Condition.
#' \item EmotClose.
#' \item Anger.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_9_exercise_14
#' @aliases chapter_9_exercise_14 Chapter_9_Exercise_14 C9E14 c9e14
#' @keywords datasets
#' @usage data(chapter_9_exercise_14)
#' @section Synonym:
#' C9E14
#' @examples
#' # Load the data
#' data(chapter_9_exercise_14)
#'
#' # Or, alternatively load the data as
#' data(C9E14)
#'
#' # View the structure
#' str(chapter_9_exercise_14)
#'
#' # Brief summary of the data.
#' summary(chapter_9_exercise_14)
#'
"chapter_9_exercise_14"
#' The data used in Chapter 9, Exercise 15
#'
#' Data from Chapter 9 Exercise 15 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item Condition.
#' \item EmotClose.
#' \item Anger.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_9_exercise_15
#' @aliases chapter_9_exercise_15 Chapter_9_Exercise_15 C9E15 c9e15
#' @keywords datasets
#' @usage data(chapter_9_exercise_15)
#' @section Synonym:
#' C9E15
#' @examples
#' # Load the data
#' data(chapter_9_exercise_15)
#'
#' # Or, alternatively load the data as
#' data(C9E15)
#'
#' # View the structure
#' str(chapter_9_exercise_15)
#'
#' # Brief summary of the data.
#' summary(chapter_9_exercise_15)
#'
"chapter_9_exercise_15"
#' The data used in Chapter 9, Exercise 16
#'
#' Data from Chapter 9 Exercise 16 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item Condition.
#' \item EmotClose.
#' \item Anger.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_9_exercise_16
#' @aliases chapter_9_exercise_16 Chapter_9_Exercise_16 C9E16 c9e16
#' @keywords datasets
#' @usage data(chapter_9_exercise_16)
#' @section Synonym:
#' C9E16
#' @examples
#' # Load the data
#' data(chapter_9_exercise_16)
#'
#' # Or, alternatively load the data as
#' data(C9E16)
#'
#' # View the structure
#' str(chapter_9_exercise_16)
#'
#' # Brief summary of the data.
#' summary(chapter_9_exercise_16)
#'
"chapter_9_exercise_16"
#' The data used in Chapter 9 Extension, Exercise 1
#'
#' Data from Chapter 9 Extension Exercise 1 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item RSA.
#' \item Delay.
#' \item SES_group.
#' \item MaxDelay.
#' \item RSAdev.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @source Sturge-Apple, M. L., Suor, J. H., Davies, P. T., Cicchetti, D., Skibo, M. A., \& Rogosch, F. A. (2016). Vagal tone and children's delay of gratification: Differential sensitivity in resource-poor and resource-rich environments. \emph{Psychological Science}, \emph{27}, 885--893.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data: {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_9_extension_exercise_1
#' @aliases chapter_9_extension_exercise_1 Chapter_9_Extension_Exercise_1 C9ExtE1 c9exte1
#' @keywords datasets
#' @usage data(chapter_9_extension_exercise_1)
#' @section Synonym:
#' C9ExtE1
#' @examples
#' # Load the data
#' data(chapter_9_extension_exercise_1)
#'
#' # Or, alternatively load the data as
#' data(C9ExtE1)
#'
#' # View the structure
#' str(chapter_9_extension_exercise_1)
#'
#' # Brief summary of the data.
#' summary(chapter_9_extension_exercise_1)
#'
"chapter_9_extension_exercise_1"
#' The data used in Chapter 9 Extension, Exercise 2
#'
#' Data from Chapter 9 Extension Exercise 2 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ParticipantNumber.
#' \item Group.
#' \item BaseHrsDrkTypWk.
#' \item FolHrsDrkTypWk.
#' \item DiffBaseFolHrsDrk.
#' \item BaseHrsCtrd.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_9_extension_exercise_2
#' @aliases chapter_9_extension_exercise_2 Chapter_9_Extension_Exercise_2 C9ExtE2 c9exte2
#' @keywords datasets
#' @usage data(chapter_9_extension_exercise_2)
#' @section Synonym:
#' C9ExtE2
#' @examples
#' # Load the data
#' data(chapter_9_extension_exercise_2)
#'
#' # Or, alternatively load the data as
#' data(C9ExtE2)
#'
#' # View the structure
#' str(chapter_9_extension_exercise_2)
#'
#' # Brief summary of the data.
#' summary(chapter_9_extension_exercise_2)
#'
"chapter_9_extension_exercise_2"
#' The data used in Chapter 9 Extension, Exercise 3
#'
#' Data from Chapter 9 Extension Exercise 3 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Grade.
#' \item Treatment.
#' \item IQPre.
#' \item IQ4.
#' \item IQ8.
#' \item IQGain.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_9_extension_exercise_2
#' @aliases chapter_9_extension_exercise_3 Chapter_9_Extension_Exercise_3 C9ExtE3 c9exte3
#' @keywords datasets
#' @usage data(chapter_9_extension_exercise_3)
#' @section Synonym:
#' C9ExtE3
#' @examples
#' # Load the data
#' data(chapter_9_extension_exercise_3)
#'
#' # Or, alternatively load the data as
#' data(C9ExtE3)
#'
#' # View the structure
#' str(chapter_9_extension_exercise_3)
#'
#' # Brief summary of the data.
#' summary(chapter_9_extension_exercise_2)
#'
"chapter_9_extension_exercise_3"
# Chapter 9, Extension Figures (Only Used in Figures)
#' The data used in Chapter 9 Extension Figures 4 and 5
#'
#' Data used in the Chapter 9 Extension, Figures 4 and 5, of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Grade
#' \item Treatment
#' \item IQPre
#' \item IQ4
#' \item IQ8
#' \item AvPost
#' \item IQPre_Mean
#' \item IQPre_Centered
#' \item TxX
#' \item Constant1}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data: {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_9_extension_figures_4_and_5
#' @aliases chapter_9_extension_figures_4_and_5 Chapter_9_Extension_Figures_4_and_5 c9extfigs4and5 C9ExtFigs4and5
#' @keywords datasets
#' @usage data(chapter_9_extension_figures_4_and_5)
#' @section Synonym:
#' C9ExtE1
#' @examples
#' # Load the data
#' data(chapter_9_extension_figures_4_and_5)
#'
#' # Or, alternatively load the data as
#' data(C9ExtFigs4and5)
#'
#' # View the structure
#' str(chapter_9_extension_figures_4_and_5)
#'
#' # Brief summary of the data.
#' summary(chapter_9_extension_figures_4_and_5)
#'
"chapter_9_extension_figures_4_and_5"
#' The data used in Chapter 10, Exercise 7
#'
#' Data from Chapter 10 Exercise 7 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Ratings.
#' \item Therapist.
#' \item Method.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_10_exercise_7
#' @aliases chapter_10_exercise_7 Chapter_10_Exercise_7 C10E7 c10e7
#' @keywords datasets
#' @usage data(chapter_10_exercise_7)
#' @section Synonym:
#' C10E7
#' @examples
#' # Load the data
#' data(chapter_10_exercise_7)
#'
#' # Or, alternatively load the data as
#' data(C10E7)
#'
#' # View the structure
#' str(chapter_10_exercise_7)
#'
#' # Brief summary of the data.
#' summary(chapter_10_exercise_7)
#'
"chapter_10_exercise_7"
#' The data used in Chapter 10, Exercise 9
#'
#' Data from Chapter 10 Exercise 9 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item BP.
#' \item ResearchAssistant.
#' \item Biofeedback.
#' \item Diet.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_10_exercise_9
#' @aliases chapter_10_exercise_9 Chapter_10_Exercise_9 C10E9 c10e9
#' @keywords datasets
#' @usage data(chapter_10_exercise_9)
#' @section Synonym:
#' C10E9
#' @examples
#' # Load the data
#' data(chapter_10_exercise_9)
#'
#' # Or, alternatively load the data as
#' data(C10E9)
#'
#' # View the structure
#' str(chapter_10_exercise_9)
#'
#' # Brief summary of the data.
#' summary(chapter_10_exercise_9)
#'
"chapter_10_exercise_9"
#' The data used in Chapter 10, Exercise 14
#'
#' Data from Chapter 10 Exercise 14 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Composite.
#' \item Therapist.
#' \item Modality.
#' \item Therapist_w_Modality}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_10_exercise_14
#' @aliases chapter_10_exercise_14 Chapter_10_Exercise_14 C10E14 c10e14
#' @keywords datasets
#' @usage data(chapter_10_exercise_14)
#' @section Synonym:
#' C10E14
#' @examples
#' # Load the data
#' data(chapter_10_exercise_14)
#'
#' # Or, alternatively load the data as
#' data(C10E14)
#'
#' # View the structure
#' str(chapter_10_exercise_14)
#'
#' # Brief summary of the data.
#' summary(chapter_10_exercise_14)
#'
"chapter_10_exercise_14"
#' The data used in Chapter 11, Exercise 3
#'
#' Data from Chapter 11 Exercise 3 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Location1.
#' \item Location2.
#' \item Location3.
#' \item Location4.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_11_exercise_3
#' @aliases chapter_11_exercise_3 Chapter_11_Exercise_3 C11E3 c11e3
#' @keywords datasets
#' @usage data(chapter_11_exercise_3)
#' @section Synonym:
#' C11E3
#' @examples
#' # Load the data
#' data(chapter_11_exercise_3)
#'
#' # Or, alternatively load the data as
#' data(C11E3)
#'
#' # View the structure
#' str(chapter_11_exercise_3)
#'
#' # Brief summary of the data.
#' summary(chapter_11_exercise_3)
#'
"chapter_11_exercise_3"
#' The data used in Chapter 11, Exercise 5
#'
#' Data from Chapter 11 Exercise 5 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item cond1.
#' \item cond2.
#' \item cond3.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_11_exercise_5
#' @aliases chapter_11_exercise_5 Chapter_11_Exercise_5 C11E5 c11e5
#' @keywords datasets
#' @usage data(chapter_11_exercise_5)
#' @section Synonym:
#' C11E5
#' @examples
#' # Load the data
#' data(chapter_11_exercise_5)
#'
#' # Or, alternatively load the data as
#' data(C11E5)
#'
#' # View the structure
#' str(chapter_11_exercise_5)
#'
#' # Brief summary of the data.
#' summary(chapter_11_exercise_5)
#'
"chapter_11_exercise_5"
#' The data used in Chapter 11, Exercise 17
#'
#' Data from Chapter 11 Exercise 17 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Day1.
#' \item Day2.
#' \item Day3.
#' \item Day4.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_11_exercise_17
#' @aliases chapter_11_exercise_17 Chapter_11_Exercise_17 C11E17 c11e17
#' @keywords datasets
#' @usage data(chapter_11_exercise_17)
#' @section Synonym:
#' C11E17
#' @examples
#' # Load the data
#' data(chapter_11_exercise_17)
#'
#' # Or, alternatively load the data as
#' data(C11E17)
#'
#' # View the structure
#' str(chapter_11_exercise_17)
#'
#' # Brief summary of the data.
#' summary(chapter_11_exercise_17)
#'
"chapter_11_exercise_17"
#' The data used in Chapter 11, Exercise 18
#'
#' Data from Chapter 11 Exercise 18 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Strong.
#' \item Medium.
#' \item Weak.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_11_exercise_18
#' @aliases chapter_11_exercise_18 Chapter_11_Exercise_18 C11E18 c11e18
#' @keywords datasets
#' @usage data(chapter_11_exercise_18)
#' @section Synonym:
#' C11E18
#' @examples
#' # Load the data
#' data(chapter_11_exercise_18)
#'
#' # Or, alternatively load the data as
#' data(C11E18)
#'
#' # View the structure
#' str(chapter_11_exercise_18)
#'
#' # Brief summary of the data.
#' summary(chapter_11_exercise_18)
#'
"chapter_11_exercise_18"
#' The data used in Chapter 11, Exercise 19
#'
#' Data from Chapter 11 Exercise 19 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Face.
#' \item Circle.
#' \item Paper.
#' \item White.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_11_exercise_19
#' @aliases chapter_11_exercise_19 Chapter_11_Exercise_19 C11E19 c11e19
#' @keywords datasets
#' @usage data(chapter_11_exercise_19)
#' @section Synonym:
#' C11E19
#' @examples
#' # Load the data
#' data(chapter_11_exercise_19)
#'
#' # Or, alternatively load the data as
#' data(C11E19)
#'
#' # View the structure
#' str(chapter_11_exercise_19)
#'
#' # Brief summary of the data.
#' summary(chapter_11_exercise_19)
#'
"chapter_11_exercise_19"
#' The data used in Chapter 11, Exercise 21
#'
#' Data from Chapter 11 Exercise 21 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Mother.
#' \item Rater.
#' \item Warmth.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_11_exercise_21
#' @aliases chapter_11_exercise_21 Chapter_11_Exercise_21 C11E21 c11e21
#' @keywords datasets
#' @usage data(chapter_11_exercise_21)
#' @section Synonym:
#' C11E21
#' @examples
#' # Load the data
#' data(chapter_11_exercise_21)
#'
#' # Or, alternatively load the data as
#' data(C11E21)
#'
#' # View the structure
#' str(chapter_11_exercise_21)
#'
#' # Brief summary of the data.
#' summary(chapter_11_exercise_21)
#'
"chapter_11_exercise_21"
#' The data used in Chapter 11, Exercise 22
#'
#' Data from Chapter 11 Exercise 22 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item AgeNumeric.
#' \item GenderNum.
#' \item AVGMeetMonkey.
#' \item AVGChildRecTreats.
#' \item AVGExpGivesCommon.
#' \item AVGChildGivesCommon.
#' \item AVGChildGivesOwn.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_11_exercise_22
#' @aliases chapter_11_exercise_22 Chapter_11_Exercise_22 C11E22 c11e22
#' @keywords datasets
#' @usage data(chapter_11_exercise_22)
#' @section Synonym:
#' C11E22
#' @examples
#' # Load the data
#' data(chapter_11_exercise_22)
#'
#' # Or, alternatively load the data as
#' data(C11E22)
#'
#' # View the structure
#' str(chapter_11_exercise_22)
#'
#' # Brief summary of the data.
#' summary(chapter_11_exercise_22)
#'
"chapter_11_exercise_22"
#' The data used in Chapter 11, Exercise 23
#'
#' Data from Chapter 11 Exercise 23 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item id.
#' \item position.
#' \item meanz.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_11_exercise_23
#' @aliases chapter_11_exercise_23 Chapter_11_Exercise_23 C11E23 c11e23
#' @keywords datasets
#' @usage data(chapter_11_exercise_23)
#' @section Synonym:
#' C11E23
#' @examples
#' # Load the data
#' data(chapter_11_exercise_23)
#'
#' # Or, alternatively load the data as
#' data(C11E23)
#'
#' # View the structure
#' str(chapter_11_exercise_23)
#'
#' # Brief summary of the data.
#' summary(chapter_11_exercise_23)
#'
"chapter_11_exercise_23"
#' The data used in Chapter 11, Exercise 24
#'
#' Data from Chapter 11 Exercise 24 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item id.
#' \item judgement.
#' \item activity.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_11_exercise_24
#' @aliases chapter_11_exercise_24 Chapter_11_Exercise_24 C11E24 c11e24
#' @keywords datasets
#' @usage data(chapter_11_exercise_24)
#' @section Synonym:
#' C11E24
#' @examples
#' # Load the data
#' data(chapter_11_exercise_24)
#'
#' # Or, alternatively load the data as
#' data(C11E24)
#'
#' # View the structure
#' str(chapter_11_exercise_24)
#'
#' # Brief summary of the data.
#' summary(chapter_11_exercise_24)
#'
"chapter_11_exercise_24"
#' The data used in Chapter 12, Exercise 9
#'
#' Data from Chapter 12 Exercise 9 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item GridLeft.
#' \item GridRight.
#' \item BraceLeft.
#' \item BraceRight.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_12_exercise_9
#' @aliases chapter_12_exercise_9 Chapter_12_Exercise_9 C12E9 c12e9
#' @keywords datasets
#' @usage data(chapter_12_exercise_9)
#' @section Synonym:
#' C12E9
#' @examples
#' # Load the data
#' data(chapter_12_exercise_9)
#'
#' # Or, alternatively load the data as
#' data(C12E9)
#'
#' # View the structure
#' str(chapter_12_exercise_9)
#'
#' # Brief summary of the data.
#' summary(chapter_12_exercise_9)
#'
"chapter_12_exercise_9"
#' The data used in Chapter 12, Exercise 17
#'
#' Data from Chapter 12 Exercise 17 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Day1.
#' \item Day2.
#' \item Day3.
#' \item Day4.
#' \item Group}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_12_exercise_17
#' @aliases chapter_12_exercise_17 Chapter_12_Exercise_17 C12E17 c12e17
#' @keywords datasets
#' @usage data(chapter_12_exercise_17)
#' @section Synonym:
#' C12E17
#' @examples
#' # Load the data
#' data(chapter_12_exercise_17)
#'
#' # Or, alternatively load the data as
#' data(C12E17)
#'
#' # View the structure
#' str(chapter_12_exercise_17)
#'
#' # Brief summary of the data.
#' summary(chapter_12_exercise_17)
#'
"chapter_12_exercise_17"
#' The data used in Chapter 12, Exercise 18
#'
#' Data from Chapter 12 Exercise 18 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Baseline.
#' \item Feedback.
#' \item Group.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_12_exercise_18
#' @aliases chapter_12_exercise_18 Chapter_12_Exercise_18 C12E18 c12e18
#' @keywords datasets
#' @usage data(chapter_12_exercise_18)
#' @section Synonym:
#' C12E18
#' @examples
#' # Load the data
#' data(chapter_12_exercise_18)
#'
#' # Or, alternatively load the data as
#' data(C12E18)
#'
#' # View the structure
#' str(chapter_12_exercise_18)
#'
#' # Brief summary of the data.
#' summary(chapter_12_exercise_18)
#'
"chapter_12_exercise_18"
#' The data used in Chapter 12, Exercise 19
#'
#' Data from Chapter 12 Exercise 19 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item September.
#' \item November.
#' \item April.
#' \item June.
#' \item July.
#' \item Group.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_12_exercise_19
#' @aliases chapter_12_exercise_19 Chapter_12_Exercise_19 C12E19 c12e19
#' @keywords datasets
#' @usage data(chapter_12_exercise_19)
#' @section Synonym:
#' C12E19
#' @examples
#' # Load the data
#' data(chapter_12_exercise_19)
#'
#' # Or, alternatively load the data as
#' data(C12E19)
#'
#' # View the structure
#' str(chapter_12_exercise_19)
#'
#' # Brief summary of the data.
#' summary(chapter_12_exercise_19)
#'
"chapter_12_exercise_19"
#' The data used in Chapter 12, Exercise 21
#'
#' Data from Chapter 12 Exercise 21 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Recall.
#' \item Subject.
#' \item Passage.
#' \item DifficultyCondition.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_12_exercise_21
#' @aliases chapter_12_exercise_21 Chapter_12_Exercise_21 C12E21 c12e21
#' @keywords datasets
#' @usage data(chapter_12_exercise_21)
#' @section Synonym:
#' C12E21
#' @examples
#' # Load the data
#' data(chapter_12_exercise_21)
#'
#' # Or, alternatively load the data as
#' data(C12E21)
#'
#' # View the structure
#' str(chapter_12_exercise_21)
#'
#' # Brief summary of the data.
#' summary(chapter_12_exercise_21)
#'
"chapter_12_exercise_21"
#' The data used in Chapter 13, Exercise 7
#'
#' Data from Chapter 13 Exercise 7 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Location1.
#' \item Location2.
#' \item Location3.
#' \item Location4.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_13_exercise_7
#' @aliases chapter_13_exercise_7 Chapter_13_Exercise_7 C13E7 c13e7
#' @keywords datasets
#' @usage data(chapter_13_exercise_7)
#' @section Synonym:
#' C13E7
#' @examples
#' # Load the data
#' data(chapter_13_exercise_7)
#'
#' # Or, alternatively load the data as
#' data(C13E7)
#'
#' # View the structure
#' str(chapter_13_exercise_7)
#'
#' # Brief summary of the data.
#' summary(chapter_13_exercise_7)
#'
"chapter_13_exercise_7"
#' The data used in Chapter 13, Exercise 10
#'
#' Data from Chapter 13 Exercise 10 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Face.
#' \item Circle.
#' \item Paper.
#' \item White.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_13_exercise_10
#' @aliases chapter_13_exercise_10 Chapter_13_Exercise_10 C13E10 c13e10
#' @keywords datasets
#' @usage data(chapter_13_exercise_10)
#' @section Synonym:
#' C13E10
#' @examples
#' # Load the data
#' data(chapter_13_exercise_10)
#'
#' # Or, alternatively load the data as
#' data(C13E10)
#'
#' # View the structure
#' str(chapter_13_exercise_10)
#'
#' # Brief summary of the data.
#' summary(chapter_13_exercise_10)
#'
"chapter_13_exercise_10"
#' The data used in Chapter 13, Exercise 13
#'
#' Data from Chapter 13 Exercise 13 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Day1.
#' \item Day2.
#' \item Day3.
#' \item Day4.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_13_exercise_13
#' @aliases chapter_13_exercise_13 Chapter_13_Exercise_13 C13E13 c13e13
#' @keywords datasets
#' @usage data(chapter_13_exercise_13)
#' @section Synonym:
#' C13E13
#' @examples
#' # Load the data
#' data(chapter_13_exercise_13)
#'
#' # Or, alternatively load the data as
#' data(C13E13)
#'
#' # View the structure
#' str(chapter_13_exercise_13)
#'
#' # Brief summary of the data.
#' summary(chapter_13_exercise_13)
#'
"chapter_13_exercise_13"
#' The data used in Chapter 13, Exercise 14
#'
#' Data from Chapter 13 Exercise 14 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Time1.
#' \item Time2.
#' \item Time3.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_13_exercise_14
#' @aliases chapter_13_exercise_14 Chapter_13_Exercise_14 C13E14 c13e14
#' @keywords datasets
#' @usage data(chapter_13_exercise_14)
#' @section Synonym:
#' C13E14
#' @examples
#' # Load the data
#' data(chapter_13_exercise_14)
#'
#' # Or, alternatively load the data as
#' data(C13E14)
#'
#' # View the structure
#' str(chapter_13_exercise_14)
#'
#' # Brief summary of the data.
#' summary(chapter_13_exercise_14)
#'
"chapter_13_exercise_14"
#' The data used in Chapter 13, Exercise 22
#'
#' Data from Chapter 13 Exercise 22 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Condition1.
#' \item Condition2.
#' \item Condition3.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_13_exercise_22
#' @aliases chapter_13_exercise_22 Chapter_13_Exercise_22 C13E22 c13e22
#' @keywords datasets
#' @usage data(chapter_13_exercise_22)
#' @section Synonym:
#' C13E22
#' @examples
#' # Load the data
#' data(chapter_13_exercise_22)
#'
#' # Or, alternatively load the data as
#' data(C13E22)
#'
#' # View the structure
#' str(chapter_13_exercise_22)
#'
#' # Brief summary of the data.
#' summary(chapter_13_exercise_22)
#'
"chapter_13_exercise_22"
#' The data used in Chapter 13, Exercise 23
#'
#' Data from Chapter 13 Exercise 23 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item AgeNumeric.
#' \item GenderNum.
#' \item AVGMeetMonkey.
#' \item AVGChildRecTreats.
#' \item AVGExpGivesCommon.
#' \item AVGChildGivesCommon.
#' \item AVGChildGivesOwn.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_13_exercise_23
#' @aliases chapter_13_exercise_23 Chapter_13_Exercise_23 C13E23 c13e23
#' @keywords datasets
#' @usage data(chapter_13_exercise_23)
#' @section Synonym:
#' C13E23
#' @examples
#' # Load the data
#' data(chapter_13_exercise_23)
#'
#' # Or, alternatively load the data as
#' data(C13E23)
#'
#' # View the structure
#' str(chapter_13_exercise_23)
#'
#' # Brief summary of the data.
#' summary(chapter_13_exercise_23)
#'
"chapter_13_exercise_23"
#' The data used in Chapter 13, Exercise 24
#'
#' Data from Chapter 13 Exercise 24 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item id.
#' \item position.
#' \item meanz.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_13_exercise_24
#' @aliases chapter_13_exercise_24 Chapter_13_Exercise_24 C13E24 c13e24
#' @keywords datasets
#' @usage data(chapter_13_exercise_24)
#' @section Synonym:
#' C13E24
#' @examples
#' # Load the data
#' data(chapter_13_exercise_24)
#'
#' # Or, alternatively load the data as
#' data(C13E24)
#'
#' # View the structure
#' str(chapter_13_exercise_24)
#'
#' # Brief summary of the data.
#' summary(chapter_13_exercise_24)
#'
"chapter_13_exercise_24"
#' The data used in Chapter 13, Exercise 25
#'
#' Data from Chapter 13 Exercise 25 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item self.
#' \item friend.
#' \item case.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_13_exercise_25
#' @aliases chapter_13_exercise_25 Chapter_13_Exercise_25 C13E25 c13e25
#' @keywords datasets
#' @usage data(chapter_13_exercise_25)
#' @section Synonym:
#' C13E25
#' @examples
#' # Load the data
#' data(chapter_13_exercise_25)
#'
#' # Or, alternatively load the data as
#' data(C13E25)
#'
#' # View the structure
#' str(chapter_13_exercise_25)
#'
#' # Brief summary of the data.
#' summary(chapter_13_exercise_25)
#'
"chapter_13_exercise_25"
#' The data used in Chapter 14, Exercise 10
#'
#' Data from Chapter 14 Exercise 10 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Angle0.
#' \item Angle4.
#' \item Angle8.
#' \item Group.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_14_exercise_10
#' @aliases chapter_14_exercise_10 Chapter_14_Exercise_10 C14E10 c14e10
#' @keywords datasets
#' @usage data(chapter_14_exercise_10)
#' @section Synonym:
#' C14E10
#' @examples
#' # Load the data
#' data(chapter_14_exercise_10)
#'
#' # Or, alternatively load the data as
#' data(C14E10)
#'
#' # View the structure
#' str(chapter_14_exercise_10)
#'
#' # Brief summary of the data.
#' summary(chapter_14_exercise_10)
#'
"chapter_14_exercise_10"
#' The data used in Chapter 14, Exercise 14
#'
#' Data from Chapter 14 Exercise 14 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Gender.
#' \item FaleFriend.
#' \item FemaleFriend.
#' \item Same.
#' \item Opposite.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_14_exercise_14
#' @aliases chapter_14_exercise_14 Chapter_14_Exercise_14 C14E14 c14e14
#' @keywords datasets
#' @usage data(chapter_14_exercise_14)
#' @section Synonym:
#' C14E14
#' @examples
#' # Load the data
#' data(chapter_14_exercise_14)
#'
#' # Or, alternatively load the data as
#' data(C14E14)
#'
#' # View the structure
#' str(chapter_14_exercise_14)
#'
#' # Brief summary of the data.
#' summary(chapter_14_exercise_14)
#'
"chapter_14_exercise_14"
#' The data used in Chapter 14, Exercise 15
#'
#' Data from Chapter 14 Exercise 15 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Baseline.
#' \item Feedback.
#' \item Group.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_14_exercise_15
#' @aliases chapter_14_exercise_15 Chapter_14_Exercise_15 C14E15 c14e15
#' @keywords datasets
#' @usage data(chapter_14_exercise_15)
#' @section Synonym:
#' C14E15
#' @examples
#' # Load the data
#' data(chapter_14_exercise_15)
#'
#' # Or, alternatively load the data as
#' data(C14E15)
#'
#' # View the structure
#' str(chapter_14_exercise_15)
#'
#' # Brief summary of the data.
#' summary(chapter_14_exercise_15)
#'
"chapter_14_exercise_15"
#' The data used in Chapter 14, Exercise 21
#'
#' Data from Chapter 14 Exercise 21 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Day1.
#' \item Day2.
#' \item Day3.
#' \item Day4.
#' \item Group.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_14_exercise_21
#' @aliases chapter_14_exercise_21 Chapter_14_Exercise_21 C14E21 c14e21
#' @keywords datasets
#' @usage data(chapter_14_exercise_21)
#' @section Synonym:
#' C14E21
#' @examples
#' # Load the data
#' data(chapter_14_exercise_21)
#'
#' # Or, alternatively load the data as
#' data(C14E21)
#'
#' # View the structure
#' str(chapter_14_exercise_21)
#'
#' # Brief summary of the data.
#' summary(chapter_14_exercise_21)
#'
"chapter_14_exercise_21"
#' The data used in Chapter 14, Exercise 22
#'
#' Data from Chapter 14 Exercise 22 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item September.
#' \item November.
#' \item April.
#' \item June.
#' \item July.
#' \item Group.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_14_exercise_22
#' @aliases chapter_14_exercise_22 Chapter_14_Exercise_22 C14E22 c14e22
#' @keywords datasets
#' @usage data(chapter_14_exercise_22)
#' @section Synonym:
#' C14E22
#' @examples
#' # Load the data
#' data(chapter_14_exercise_22)
#'
#' # Or, alternatively load the data as
#' data(C14E22)
#'
#' # View the structure
#' str(chapter_14_exercise_22)
#'
#' # Brief summary of the data.
#' summary(chapter_14_exercise_22)
#'
"chapter_14_exercise_22"
#' The data used in Chapter 15, Exercise 16
#'
#' Data from Chapter 15 Exercise 16 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item Time.
#' \item Y.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_15_exercise_16
#' @aliases chapter_15_exercise_16 Chapter_15_Exercise_16 C15E16 c15e16
#' @keywords datasets
#' @usage data(chapter_15_exercise_16)
#' @section Synonym:
#' C15E16
#' @examples
#' # Load the data
#' data(chapter_15_exercise_16)
#'
#' # Or, alternatively load the data as
#' data(C15E16)
#'
#' # View the structure
#' str(chapter_15_exercise_16)
#'
#' # Brief summary of the data.
#' summary(chapter_15_exercise_16)
#'
"chapter_15_exercise_16"
#' The data used in Chapter 15, Exercise 17
#'
#' Data from Chapter 15 Exercise 17 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Day1.
#' \item Day2.
#' \item Day3.
#' \item Day4.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_15_exercise_17
#' @aliases chapter_15_exercise_17 Chapter_15_Exercise_17 C15E17 c15e17
#' @keywords datasets
#' @usage data(chapter_15_exercise_17)
#' @section Synonym:
#' C15E17
#' @examples
#' # Load the data
#' data(chapter_15_exercise_17)
#'
#' # Or, alternatively load the data as
#' data(C15E17)
#'
#' # View the structure
#' str(chapter_15_exercise_17)
#'
#' # Brief summary of the data.
#' summary(chapter_15_exercise_17)
#'
"chapter_15_exercise_17"
#' The data used in Chapter 15, Exercise 18
#'
#' Data from Chapter 15 Exercise 18 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Subject.
#' \item September.
#' \item October.
#' \item November.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_15_exercise_18
#' @aliases chapter_15_exercise_18 Chapter_15_Exercise_18 C15E18 c15e18
#' @keywords datasets
#' @usage data(chapter_15_exercise_18)
#' @section Synonym:
#' C15E18
#' @examples
#' # Load the data
#' data(chapter_15_exercise_18)
#'
#' # Or, alternatively load the data as
#' data(C15E18)
#'
#' # View the structure
#' str(chapter_15_exercise_18)
#'
#' # Brief summary of the data.
#' summary(chapter_15_exercise_18)
#'
"chapter_15_exercise_18"
#' The data used in Chapter 15, Exercise 19
#'
#' Data from Chapter 15 Exercise 19 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Subject.
#' \item Cognitive70.
#' \item Cognitive73.
#' \item Cognitive74.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_15_exercise_19
#' @aliases chapter_15_exercise_19 Chapter_15_Exercise_19 C15E19 c15e19
#' @keywords datasets
#' @usage data(chapter_15_exercise_19)
#' @section Synonym:
#' C15E19
#' @examples
#' # Load the data
#' data(chapter_15_exercise_19)
#'
#' # Or, alternatively load the data as
#' data(C15E19)
#'
#' # View the structure
#' str(chapter_15_exercise_19)
#'
#' # Brief summary of the data.
#' summary(chapter_15_exercise_19)
#'
"chapter_15_exercise_19"
#' The data used in Chapter 15, Exercise 18 (Univariate)
#'
#' Data from Chapter 15 Exercise 18 (Univariate) of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item ID.
#' \item Time.
#' \item Y.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_15_exercise_18_univariate
#' @aliases chapter_15_exercise_18_univariate Chapter_15_Exercise_18_Univariate C15E18U c15e18u
#' @keywords datasets
#' @usage data(chapter_15_exercise_18_univariate)
#' @section Synonym:
#' C15E18U
#' @examples
#' # Load the data
#' data(chapter_15_exercise_18_univariate)
#'
#' # Or, alternatively load the data as
#' data(C15E18U)
#'
#' # View the structure
#' str(chapter_15_exercise_18_univariate)
#'
#' # Brief summary of the data.
#' summary(chapter_15_exercise_18_univariate)
#'
"chapter_15_exercise_18_univariate"
#' The data used in Chapter 15, Exercise 19 (Univariate)
#'
#' Data from Chapter 15 Exercise 19 (Univariate) of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Subject.
#' \item Age.
#' \item Ability.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_15_exercise_19_univariate
#' @aliases chapter_15_exercise_19_univariate Chapter_15_Exercise_19_Univariate C15E19U c15e19u
#' @keywords datasets
#' @usage data(chapter_15_exercise_19_univariate)
#' @section Synonym:
#' C15E19U
#' @examples
#' # Load the data
#' data(chapter_15_exercise_19_univariate)
#'
#' # Or, alternatively load the data as
#' data(C15E19U)
#'
#' # View the structure
#' str(chapter_15_exercise_19_univariate)
#'
#' # Brief summary of the data.
#' summary(chapter_15_exercise_19_univariate)
#'
"chapter_15_exercise_19_univariate"
#' The data used in Chapter 16, Exercise 5
#'
#' Data from Chapter 16 Exercise 5 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Severity.
#' \item Trainee.
#' \item Gender.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_16_exercise_5
#' @aliases chapter_16_exercise_5 Chapter_16_Exercise_5 C16E5 c16e5
#' @keywords datasets
#' @usage data(chapter_16_exercise_5)
#' @section Synonym:
#' C16E5
#' @examples
#' # Load the data
#' data(chapter_16_exercise_5)
#'
#' # Or, alternatively load the data as
#' data(C16E5)
#'
#' # View the structure
#' str(chapter_16_exercise_5)
#'
#' # Brief summary of the data.
#' summary(chapter_16_exercise_5)
#'
"chapter_16_exercise_5"
#' The data used in Chapter 16, Exercise 7
#'
#' Data from Chapter 16 Exercise 7 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Observation.
#' \item Room.
#' \item Condition.
#' \item Cognition.
#' \item Skill.
#' \item Inductive.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_16_exercise_7
#' @aliases chapter_16_exercise_7 Chapter_16_Exercise_7 C16E7 c16e7
#' @keywords datasets
#' @usage data(chapter_16_exercise_7)
#' @section Synonym:
#' C16E7
#' @examples
#' # Load the data
#' data(chapter_16_exercise_7)
#'
#' # Or, alternatively load the data as
#' data(C16E7)
#'
#' # View the structure
#' str(chapter_16_exercise_7)
#'
#' # Brief summary of the data.
#' summary(chapter_16_exercise_7)
#'
"chapter_16_exercise_7"
#' The data used in Chapter 16, Exercise 9
#'
#' Data from Chapter 16 Exercise 9 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Observation.
#' \item Room.
#' \item Condition.
#' \item Cognition.
#' \item Skill
#' \item Inductive.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name chapter_16_exercise_9
#' @aliases chapter_16_exercise_9 Chapter_16_Exercise_9 C16E9 c16e9
#' @keywords datasets
#' @usage data(chapter_16_exercise_9)
#' @section Synonym:
#' C16E9
#' @examples
#' # Load the data
#' data(chapter_16_exercise_9)
#'
#' # Or, alternatively load the data as
#' data(C16E9)
#'
#' # View the structure
#' str(chapter_16_exercise_9)
#'
#' # Brief summary of the data.
#' summary(chapter_16_exercise_9)
#'
"chapter_16_exercise_9"
#######################################################################################################################################
# Tutorials
#######################################################################################################################################
#' The data used in Tutorial 1, Table 1
#'
#' Data from Tutorial 1 Table 1 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Group.
#' \item Score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name tutorial_1_table_1
#' @aliases tutorial_1_table_1 tutorial_1_table_1 T1T1 t1t1
#' @keywords datasets
#' @usage data(tutorial_1_table_1)
#' @section Synonym:
#' T1T1
#' @examples
#' # Load the data
#' data(tutorial_1_table_1)
#'
#' # Or, alternatively load the data as
#' data(T1T1)
#'
#' # View the structure
#' str(tutorial_1_table_1)
#'
#' # Brief summary of the data.
#' summary(tutorial_1_table_1)
#'
"tutorial_1_table_1"
#' The data used in Tutorial 2, Table 1
#'
#' Data from Tutorial 2 Table 1 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Group.
#' \item Score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name tutorial_2_table_1
#' @aliases tutorial_2_table_1 tutorial_2_table_1 T2T1 t2t1
#' @keywords datasets
#' @usage data(tutorial_2_table_1)
#' @section Synonym:
#' T2T1
#' @examples
#' # Load the data
#' data(tutorial_2_table_1)
#'
#' # Or, alternatively load the data as
#' data(T2T1)
#'
#' # View the structure
#' str(tutorial_2_table_1)
#'
#' # Brief summary of the data.
#' summary(tutorial_2_table_1)
#'
"tutorial_2_table_1"
#' The data used in Tutorial 2, Table 2
#'
#' Data from Tutorial 2 Table 2 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Y.
#' \item X1.
#' \item X2.
#' \item X3.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name tutorial_2_table_1
#' @aliases tutorial_2_table_2 tutorial_2_table_2 T2T2 t2t2
#' @keywords datasets
#' @usage data(tutorial_2_table_2)
#' @section Synonym:
#' T2T2
#' @examples
#' # Load the data
#' data(tutorial_2_table_2)
#'
#' # Or, alternatively load the data as
#' data(T2T2)
#'
#' # View the structure
#' str(tutorial_2_table_2)
#'
#' # Brief summary of the data.
#' summary(tutorial_2_table_2)
#'
"tutorial_2_table_2"
#' The data used in Tutorial 3A, Table 1
#'
#' Data from Tutorial 3A Table 1 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Group.
#' \item Score.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name tutorial_3a_table_1
#' @aliases tutorial_3a_table_1 tutorial_3a_table_1 T3AT1 t3at1
#' @keywords datasets
#' @usage data(tutorial_3a_table_1)
#' @section Synonym:
#' T3AT1
#' @examples
#' # Load the data
#' data(tutorial_3a_table_1)
#'
#' # Or, alternatively load the data as
#' data(T3AT1)
#'
#' # View the structure
#' str(tutorial_3a_table_1)
#'
#' # Brief summary of the data.
#' summary(tutorial_3a_table_1)
#'
"tutorial_3a_table_1"
#' The data used in Tutorial 3A, Table 2
#'
#' Data from Tutorial 3A Table 2 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item Y.
#' \item X1.
#' \item X2.
#' \item X3.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name tutorial_3a_table_2
#' @aliases tutorial_3a_table_2 tutorial_3a_table_2 T3AT2 t3at2
#' @keywords datasets
#' @usage data(tutorial_3a_table_2)
#' @section Synonym:
#' T3AT2
#' @examples
#' # Load the data
#' data(tutorial_3a_table_2)
#'
#' # Or, alternatively load the data as
#' data(T3AT2)
#'
#' # View the structure
#' str(tutorial_3a_table_2)
#'
#' # Brief summary of the data.
#' summary(tutorial_3a_table_2)
#'
"tutorial_3a_table_2"
#' The data used in Tutorial 3A, Table 4
#'
#' Data from Tutorial 3A Table 4 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item group.
#' \item score.
#' \item X0.
#' \item X1.
#' \item X2.
#' \item x3.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name tutorial_3a_table_4
#' @aliases tutorial_3a_table_4 tutorial_3a_table_4 T3AT4 t3at4
#' @keywords datasets
#' @usage data(tutorial_3a_table_4)
#' @section Synonym:
#' T3AT4
#' @examples
#' # Load the data
#' data(tutorial_3a_table_4)
#'
#' # Or, alternatively load the data as
#' data(T3AT4)
#'
#' # View the structure
#' str(tutorial_3a_table_4)
#'
#' # Brief summary of the data.
#' summary(tutorial_3a_table_4)
#'
"tutorial_3a_table_4"
#' The data used in Tutorial 3A, Table 5
#'
#' Data from Tutorial 3A Table 5 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
#'
#' \itemize{
#' \item group.
#' \item score.
#' \item X0.
#' \item X1.
#' \item X2.
#' \item x3.}
#'
#' @author Ken Kelley \email{[email protected]}
#' @source \url{https://designingexperiments.com/data/}
#' @source Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
#' analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
#' @references
#' Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
#' {A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
#' @docType data
#' @name tutorial_3a_table_5
#' @aliases tutorial_3a_table_5 tutorial_3a_table_5 T3AT5 t3at5
#' @keywords datasets
#' @usage data(tutorial_3a_table_5)
#' @section Synonym:
#' T3AT5
#' @examples
#' # Load the data
#' data(tutorial_3a_table_5)
#'
#' # Or, alternatively load the data as
#' data(T3AT5)
#'
#' # View the structure
#' str(tutorial_3a_table_5)
#'
#' # Brief summary of the data.
#' summary(tutorial_3a_table_5)
#'
"tutorial_3a_table_5"
| /scratch/gouwar.j/cran-all/cranData/AMCP/R/documentation.R |
#' Generate AMC LaTeX question group ("element") codes
#'
#' @param element A character value or vector of question groups ("elements") to generate input code for. Default is "general".
#' @param shufflequestions A logical value or vector to indicate whether to shuffle questions inside a question group. Defaults to TRUE.
#' @param sections A character value or vector to indicate whether to create a new LaTeX section for each element (defaults to TRUE).
#' @param output A character value to indicate how to output the LaTeX commands. Use "message" (default) to get a console message that can be directly copy-and-pasted to the LaTeX mais file. Use "list" to get a list object. Use "file" to output to a .tex file (the path can be changed with the "filepath" command).
#' Defaults to "message
#' @param filepath A character value with the file path for the .tex file to be created (defaults to "elements.tex").
#' @param append A logical value indicating if the code should be appended (append=TRUE) to an existing .tex file. Defaults to FALSE, thus overwriting the file.
#' @param messages A logical to indicate whether instructions should be output (defaults to TRUE).
#'
#' @return Commands to add the question groups in AMC-LaTeX code.
#' @export
#'
#' @examples #To output a message (not visible in documentation)
#' AMCcreateelements(c(1:4))
#'
#' #To output a list
#' AMCcreateelements(c(1:4), output = "list")
#'
#' #Duplicates are automatically removed
#' AMCcreateelements(rep(1:3, 5), output = "list")
#'
#' #To cancel shuffling
#' AMCcreateelements(c(1:4), output = "list", shufflequestions = FALSE)
#'
#' #To remove sections at each element
#' AMCcreateelements(c(1:4), sections = FALSE, output = "list")
#'
#' #To add sections for only last element
#' AMCcreateelements(c(1:4),
#' sections = c(FALSE,FALSE,FALSE,TRUE),
#' output = "list")
AMCcreateelements <- function(element = "general", shufflequestions = TRUE, sections = TRUE, output = "message", filepath = "elements.tex", append = FALSE, messages = TRUE) {
#Remove duplicate elements
element <- unique(element)
# Create function that wraps "element" and initial code in LaTeX-AMC code
codeelement <- function(x, shufflequestions, sections) {
if (shufflequestions == T) {
if (sections == T) {
paste("\\section*{", x, "}\n","\\shufflegroup{", x, "}\n", "\\insertgroup{", x, "}\n", sep = "")
} else {
paste("\\shufflegroup{", x, "}\n", "\\insertgroup{", x, "}\n", sep = "")
}
} else {
if (sections == T) {
ifelse(x==""| is.na(x), "Default", paste("\\section*{",x,"}\n","\\insertgroup{", x, "}\n", sep = ""))
} else {
ifelse(x==""| is.na(x), "Default", paste("\\insertgroup{", x, "}\n", sep = ""))
}
}
}
# Apply that function to the list of elements
listofelement <- mapply(x = element, shufflequestions = shufflequestions, sections = sections, FUN = codeelement, SIMPLIFY = T)
#Return to vector
vectorofelement <- unlist(listofelement)
#return(listofelement)
uniqueelements <- vectorofelement
#
if (output == "message") {
message("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n",
"%%%%%%%%%| List of elements |%%%%%%%%%\n",
"%%% (copy & paste after questions) %%%\n",
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n",
paste(uniqueelements)
)
} else if(output == "list"){
return(uniqueelements)
} else if (output == "file"){
if (messages == TRUE){
message("File written to \"", paste(basename(filepath)), "\".\n",
"%%%%%%%%%%%%%%%%%%%%%%\n",
"%%%| Instructions |%%%\n",
"%%%%%%%%%%%%%%%%%%%%%%\n",
"%-Make sure that the created file is the main AMC project folder.",
"\n-Point to the written file in the main .tex file (usually \"source.tex\"), using \"\\input{",
paste(basename(filepath)), "}\". \n",
"-%Note : Ultimately, the questions should be defined first before compiling (use the AMCcreatequestions() function for this).")}
write(uniqueelements, filepath, append = append)
}
}
| /scratch/gouwar.j/cran-all/cranData/AMCTestmakeR/R/AMCcreateelements.R |
#' Generate AMC LaTeX question codes in the console, in a LaTeX file, or as a vector.
#'
#' @param question A character value or vector containing the questions.
#' @param correctanswers A character (value, vector) containing the correct answer. A vector (or list) of character vectors can be passed, in the case of multiple correct answers.
#' @param incorrectanswers A character (value, vector) containing the wrong answer. A vector (or list) of character vectors can be passed, in the case of multiple wrong answers.
#' @param element A character value or vector to define the category of the entire set of questions (character value) or of each question (character vector). Defaults to "general.
#' @param code A character value or vector to identify each question (note that AMC requires each code to be unique in a questionnaire). Defaults to "Q1", "Q2", "Q3", etc. (the prefix "Q" can be changed with the "codeprefix" argument).
#' @param codeprefix A character value to be used to generate automatically question codes, when not provided with the "code" argument.
#' @param append A logical value indicating if the code should be appended (append=TRUE) to an existing .tex file. Defaults to FALSE, thus overwriting the file.
#' @param filepath A character value with the file path for the .tex file to be created (defaults to "questions.tex").
#' @param multicols A numeric (or numeric vector) indicating the desired number of columns for the presentation of the correct and incorrect answers (note that the LaTeX environment multicols must be called in the main ".tex" document for more than 1 columns). Defaults to 1, which does not require the LaTeX multicols environnment.
#' @param messages A logical to indicate whether instructions should be output (defaults to TRUE).
#' @param listelements A logical to indicate whether instructions should be output (use the AMCcreateelements() function for more options).
#' @param output A character value to indicate what type of output is desired. "message" (default) outputs the questions as a message (suitable for direct copy-and-paste from console into LaTeX file). "file" outputs a questions .tex file (which can then be pointed to in the main LaTeX document). "list" and "vector" output a character vector of questions (suitable for further manipulation in R).
#' @param questiontype A character value or vector to indicate the type of all questions (character value) or of each (character vector) question. Use "single" for single-choice, and "multiple" for multiple-answer. So far open questions are not supported.
#' @param scoringcorrect A numeric value or vector to indicate the scoring for the correct answer(s). Defaults to 1.
#' @param scoringincorrect A numeric value or vector to indicate the scoring for an incorrect answer(s). Defaults to 0.
#' @param scoringnoresponse A numeric value or vector to indicate the scoring for non-responding. Defaults to 0.
#' @param scoringincoherent A numeric value or vector to indicate the scoring for incoherent answer(s) (e.g. two boxes checked for a single-answer questionnaire). Defaults to 0.
#' @param scoringbottom A numeric value or vector to indicate the minimum score for the question(s). Especially useful when attributing negative points to incorrect answers in a multiple-answer questionnaire, to ensure students do not lose too many points on one question. Defaults to 0.
#' @param shuffleanswersonce A logical value to indicate whether to shuffle answers for each question directly in the LaTeX code (useful if the answers are not randomized by examinee by AMC). Defaults to TRUE.
#'
#' @return A character value or vector (output = "list" or "vector"), a copy-and-pastable message (output = "message") or a LaTeX .tex file (output = "file") containing AMC LaTeX code for questions and answers.
#' @export
#'
#' @examples
#' #Creating a single question
#'
#' AMCcreatequestions("How much is $1+1$?",2,list("3", "11"))
#'
#' #Presenting answers in multiple columns (LaTeX environment 'multicols' is used)
#'
#' AMCcreatequestions("How much is $1+1$?",2,list("3","11"),multicols = 2)
#'
#' #Creating an entire questionnaire from a dataset of questions
#' ## Defining the questions (don't forget to escape R special characters)
#' question <- c("How much is $1+1$ ?", "How much is $1 \\times 1$ ?",
#' "How much is $\\frac{1}{2}$ ?")
#' correct <- c(2,1,0.5)
#' incorrect1 <- c(3,4,10)
#' incorrect2 <- c(1,3,100)
#' incorrect3 <- c(4,8,NA) #Empty values (NA and "") are skipped
#'
#' ## Generating the AMC LaTeX code
#' AMCcreatequestions(
#' question = question,
#' correctanswers = correct,
#' incorrectanswers = list(incorrect1,incorrect2,incorrect3))
#'
#' #Changing the code prefix from "Q" to "MATH"
#'
#' AMCcreatequestions(
#' question = question,
#' correctanswers = correct,
#' incorrectanswers = list(incorrect1,incorrect2,incorrect3),
#' codeprefix = "MATH")
#'
AMCcreatequestions <- function(question,
correctanswers,
incorrectanswers,
element = "general",
code = paste(codeprefix,c(1:length(question)), sep=""),
codeprefix = "Q",
output = "message",
filepath = "questions.tex",
questiontype = "single",
append = F,
multicols=2,
messages = T,
listelements = T,
scoringcorrect = 1,
scoringincorrect = 0,
scoringnoresponse = 0,
scoringincoherent = scoringincorrect,
scoringbottom = scoringincorrect,
shuffleanswersonce = T
) {
#ELEMENT
# Create function that wraps "element" and initial code in LaTeX-AMC code
codeelement <- function(x) {
ifelse(x==""| is.na(x), "Default", paste("\\element{", x, "}\n", sep = ""))
}
# Apply that function to the list of elements
listofelement <- lapply(X = element, FUN = codeelement)
#Return to vector
vectorofelement <- unlist(listofelement)
#Get list of unique elements (used later)
#uniqueelements <- unique(vectorofelement)
#CODE
# Create function that wraps "code" in LaTeX-AMC code
codecode <- function(x, questiontype, scoringcorrect, scoringincorrect, scoringnoresponse, scoringincoherent, scoringbottom) {
if (questiontype == "single") {
questiontypetext <- "question"
}
if (questiontype == "multiple") {
questiontypetext <- "questionmult"
}
ifelse(x==""| is.na(x), "Default", paste("{\\begin{",questiontypetext,"}{", x,
"}\\scoring{b=",scoringcorrect,",",
"m=",scoringincorrect, ",",
"v=",scoringnoresponse, ",",
"e=",scoringincoherent, ",",
"b=", scoringbottom,
"}\n", sep = ""))
}
# Apply that function to the list of codes
listofcode <- mapply(x = code, questiontype= questiontype, scoringcorrect = scoringcorrect, scoringincorrect=scoringincorrect, scoringnoresponse=scoringnoresponse, scoringincoherent=scoringincoherent, scoringbottom=scoringbottom, FUN = codecode)
#Return to vector
vectorofcode <- unlist(listofcode)
#QUESTION
# Create function that wraps "question" in LaTeX-AMC code
codequestion <- function(x, multicols) {
if(multicols > 1.1){
paste(x, "\n \\begin{multicols}{",multicols,"}\\AMCBoxedAnswers\\begin{choices}\n", sep = "")
} else{
paste(x, "\n \\AMCBoxedAnswers\\begin{choices}\n", sep = "") }
}
# Apply that function to the list of questions
listofquestion <- mapply(x = question, multicols = multicols, FUN = codequestion)
#Return to vector
vectorofquestion <- unlist(listofquestion)
# Create function that wraps good answers (if non empty) in LaTeX-AMC code
codegoodanswer <- function(x) {
ifelse(x==""| is.na(x), "", paste("\\correctchoice{", x, "}\n", sep = ""))
}
# Apply that function to the list of good answers
arrayofcodedcorrectanswers <- sapply(FUN = codegoodanswer, X = correctanswers, simplify = "array")
# Create function that wraps wrong answers (if non empty) in LaTeX-AMC code
codewronganswer <- function(x) {
ifelse(x==""| is.na(x), "", paste("\\wrongchoice{", x, "}\n", sep = ""))
}
# Apply that function to the list of wrong answers
arrayofcodedincorrectanswers <- sapply(FUN = codewronganswer, X = incorrectanswers, simplify = "array")
# #Create a vector to adds the closing code (different if multicols)
# if(multicols < 2 ){
# vectorofclosingcode <- rep("\\end{choices}\\end{question}\n}\n \n", length(question))
# }
# else{
# vectorofclosingcode <- rep("\\end{choices}\\end{multicols}\\end{question}\n}\n \n", length(question))}
#
#
# Create function that creates the closing code
closequestion <- function(x, questiontype) {
if (questiontype == "single") {
questiontypetext <- "question"
}
if (questiontype == "multiple") {
questiontypetext <- "questionmult"
}
if (x < 2) {
paste("\\end{choices}\\end{",questiontypetext,"}\n}\n \n", sep="")
} else{
paste("\\end{choices}\\end{multicols}\\end{",questiontypetext,"}\n}\n \n", sep="")}
}
# Apply that function to the list of questions
vectorofclosingcode <- mapply(x = multicols, questiontype = questiontype, FUN = closequestion)
#Return to vector
vectorofquestion <- unlist(listofquestion)
# Bind the code into a dataset
if(length(question)==1){
# For one question, create vector of all answers
vectorofallanswersforonequestion <- c(arrayofcodedcorrectanswers, arrayofcodedincorrectanswers)
# Shuffle them if shuffleanswersonce = TRUE
if (shuffleanswersonce == T) {
vectorofallanswersforonequestion <- sample(x = vectorofallanswersforonequestion,
size = length(vectorofallanswersforonequestion),
replace = F)
}
#Bind code
bindedcode <- cbind(vectorofelement, vectorofcode, vectorofquestion,paste(vectorofallanswersforonequestion, collapse = " "), vectorofclosingcode)
} else {
# Bind together answers in array to randomize them in R
arrayofallanswersformultiplequestions <- cbind(arrayofcodedcorrectanswers,arrayofcodedincorrectanswers)
# Shuffle them if shuffleanswersonce = TRUE
if (shuffleanswersonce == T) {
shuffled <- datamatrix <- arrayofallanswersformultiplequestions
for (i in 1:nrow(datamatrix)) { shuffled[i, ] <- sample(datamatrix[i, ]) }
arrayofallanswersformultiplequestions <- shuffled
}
bindedcode <- cbind(vectorofelement, vectorofcode, vectorofquestion,arrayofallanswersformultiplequestions, vectorofclosingcode) }
# Create list of questions
texfile <- apply(bindedcode, 1, paste, collapse=" ")
if (output == "message") {
message("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n",
"%%%%%%%%%| List of questions |%%%%%%%%%%\n",
"%%% (copy & paste in main .tex file) %%%\n",
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n",
paste(texfile))
}
#If writefile is TRUE, write to latex document with name "filepath"
if(output=="file"){
write(texfile, filepath, append = append)
if (messages == T) {
message("File successfully written to \"", paste(basename(filepath)), "\".\n",
length(texfile), " questions created.\n\n",
"%%%%%%%%%%%%%%%%%%%%%%\n",
"%%%| Instructions |%%%\n",
"%%%%%%%%%%%%%%%%%%%%%%\n",
"%-Place the created .tex file in the AMC project folder. \n-In the main .tex file (usually, \"source.tex\") point to the created file using \"\\input{", paste(basename(filepath)), "}\".")
}
}
#Append list of elements to questions file
if (listelements == "file") {
#create path for list of elements
elementfilepath <- paste(dirname(filepath),"/elements.tex", sep ="")
AMCTestmakeR::AMCcreateelements(element = element, output = "file", filepath = elementfilepath, append = F, messages = messages)
if (messages == T) {
message("%%%%%%%%%%%%%%%%%%%%%%")
message("\n%-Note: The list of elements was written at ", elementfilepath)
message("\n%-Make sure the question elements are inserted with \"\\insertgroup{element}\" after the questions. Use AMCcreateelement() function for more options).")
}
}
#Show list of elements in message
if (listelements == T) {
AMCTestmakeR::AMCcreateelements(element = element)
if (messages == T) {
#message("\n%-Note: Use the function AMCcreateelements() for more options.\n")
}
}
if (output == "list") {
#return(unname(texfile))
return(vectorofquestion)
}
if (output == "vector") {
#return(unname(texfile))
return(vectorofquestion)
}
#FOR TESTS
if (output == "test") {
#return(unname(texfile))
return(arrayofallanswersformultiplequestions)
}
}
| /scratch/gouwar.j/cran-all/cranData/AMCTestmakeR/R/AMCcreatequestions.R |
#' Create a full Auto-Multiple-Choice test with a main .tex file (\code{groups.tex}), a file for questions (\code{questions.tex}), a file for elements (\code{elements.tex}.
#'
#' @param question A character value or vector containing the questions.
#' @param correctanswers A character (value, vector) containing the correct answer. A vector (or list) of character vectors can also be passed, in the case of multiple correct answers.
#' @param incorrectanswers A character (value, vector) containing the wrong answer. A vector (or list) of character vectors can also be passed, in the case of multiple wrong answers.
#' @param element A character value or vector to define the category of the entire set of questions (character value) or of each question (character vector). Defaults to "general.
#' @param code A character value or vector to identify each question (note that AMC requires each code to be unique in a questionnaire). Defaults to "Q1", "Q2", "Q3", etc. (the prefix "Q" can be changed with the "codeprefix" argument).
#' @param codeprefix A character value to be used to generate automatically question codes, when not provided with the "code" argument.
#' @param multicols A numeric (or numeric vector) indicating the desired number of columns for the presentation of the correct and incorrect answers (note that the LaTeX environment multicols must be called in the main ".tex" document for more than 1 columns). Defaults to 1, which does not require the LaTeX multicols environnment.
#' @param questiontype A character value or vector to indicate the type of all questions (character value) or of each (character vector) question. Use "single" for single-choice, and "multiple" for multiple-answer. So far open questions are not supported.
#' @param scoringcorrect A numeric value or vector to indicate the scoring for the correct answer(s). Defaults to 1.
#' @param scoringincorrect A numeric value or vector to indicate the scoring for an incorrect answer(s). Defaults to 0.
#' @param scoringnoresponse A numeric value or vector to indicate the scoring for non-responding. Defaults to 0.
#' @param scoringincoherent A numeric value or vector to indicate the scoring for incoherent answer(s) (e.g. two boxes checked for a single-answer questionnaire). Defaults to 0.
#' @param scoringbottom A numeric value or vector to indicate the minimum score for the question(s). Especially useful when attributing negative points to incorrect answers in a multiple-answer questionnaire, to ensure students do not lose too many points on one question. Defaults to 0.
#' @param shufflequestions A logical value or vector to indicate whether to shuffle questions inside a question group. Defaults to TRUE.
#' @param shuffleanswers A logical value or vector to indicate whether to shuffle answers per examinee. Defaults to TRUE. If set to FALSE, it is recommended to shuffle once for all examinee with shuffle the answers once with 'shuffleanswersonce = TRUE'.
#' @param shuffleanswersonce A logical value to indicate whether to shuffle answers for each question directly in the LaTeX code (useful if the answers are not randomized by examinee by AMC). Defaults to TRUE.
#' @param sections A character value or vector to indicate whether to create a new LaTeX section for each element (defaults to TRUE).
#' @param filepath A character value indicating the path for the main .tex file output. Most often, in AMC, it is \code{source.tex} (default), but in some examples it's named \code{groups.tex}, for example. Note that the other created files (\code{questions.tex} and \code{elements.tex} will we written in the folder of this file).
#' @param messages A logical value to indicate whether to output messages and reports (default is TRUE).
#' @param title A character value indicating a title for the test (default is "Test").
#' @param fontsize A numeric value to indicate the font size of the output document. Default is 10. Note: Above 12 pt, the LaTeX package "extarticle" is automatically used in lieu of "article".
#' @param instructions A logical value to add a block of preliminary instructions to the students (for example, how to fill the questionnaire). Defaults to TRUE.
#' @param paper A character value indicating what type of paper to use. Default is "letter", but "a4" can also be used.
#' @param identifier A character value indicating what to ask for to pair the exam sheets. The default is "Name", but other values like "Student ID Number" may be more appropriate.
#' @param separateanswersheet A logical value to indicate whether to use a separate answer sheet. Defaults to FALSE.
#' @param answersheettitle A character value to indicate the title of the separate answer sheet. Defaults to "Answer sheet".
#' @param answersheetinstructions A logical or character value to add default (TRUE), remove (FALSE) or customize (character value) instructions given on the separate answer sheet. Default is TRUE, which indicates that the students shall answer on the answer sheet.
#' @param twosided A logical value to indicate whether the exam will be printed two sided. This is notably important when printing on a separate answer sheet, to have the answer sheet printed on a separate page. Defaults to TRUE.
#' @param lettersinsidebox A logical value to indicate whether to put letters inside boxes. Defaults to FALSE.
#' @param box A logical value to indicate whether to box the questions and answers, to ensure that they are always presented on the same page. Defaults to TRUE.
#' @param facilitatemanualadd A logical indicating whether to add LaTeX code to facilitate adding questions and elements manually. If TRUE, creates .tex files where questions and elements can be input manually without changing the main files. Defaults to FALSE.
#'
#' @return Writes 3 .tex documents (\code{source.tex}, \code{questions.tex} and \code{elements.tex})) for direct use in Auto-Multiple-Choice.
#' @export
#'
#' @examples
#' # Create all LaTeX files
#'
#' \dontrun{
#' AMCcreatetest(
#' # Arguments passed to AMCcreatequestions()
#' question = "How much is $1+1$?",
#' correctanswers = 2,
#' incorrectanswer = list("3", "11", "4"),
#' # Arguments passed to AMCcreateelements()
#' shufflequestions = T,
#' sections = F,
#' # Part used for test options
#' title = "Exam", #Custom title
#' fontsize = 11, #change fontsize
#' identifier = "ID Number", #change identifier
#' twosided = F, #print in one sided
#' instructions = T, #show an instructions block to students
#' separateanswersheet = F, #use a separate answer sheet
#' answersheettitle = "Respond Here", #Change answer sheet title
#' answersheetinstructions = "Fill the boxes" #Answer sheet instructions
#' )}
#'
AMCcreatetest <- function(question,
correctanswers,
incorrectanswers,
element = "general",
code = paste(codeprefix,c(1:length(question)), sep=""),
codeprefix = "Q",
questiontype = "single",
multicols=2,
scoringcorrect = 1,
scoringincorrect = 0,
scoringnoresponse = 0,
scoringincoherent = scoringincorrect,
scoringbottom = scoringincorrect,
shufflequestions = T,
shuffleanswers = T,
shuffleanswersonce = T,
sections = T,
title = "Test",
filepath = "source.tex",
messages = T,
fontsize = 10,
instructions = T,
paper = "letter",
identifier = "Name",
separateanswersheet = F,
answersheettitle = "Answer sheet",
answersheetinstructions = T,
twosided = T,
lettersinsidebox = F,
box = T,
facilitatemanualadd = T) {
#Name file path
filepathname <- paste(dirname(filepath), sep="")
if(filepathname == "."){
filepathname <- getwd()
}
# shorten argument for letter paper
if(paper == "letter") {
paper <- "letterpaper"
}
# Shorten argument for a4
if(paper == "a4") {
paper <- "a4paper"
}
# Use extarticle as library for fontsize > 12 ::: next: check <10 ?
if(fontsize > 12) {
articlelibrary <- "extarticle"
} else {articlelibrary <- "article"}
# Create file paths
filepathgroups <- filepath
filepathquestions <- paste(dirname(filepath), "/questions.tex", sep="")
filepathelements <- paste(dirname(filepath), "/elements.tex", sep="")
#Create header block
headerblocknonseparate <- c("\\namefield{\\fbox{\n",
" \\begin{minipage}{.5\\linewidth}\n",
" %Identifier:\n",
paste(identifier," :\n\n", sep =""),
" \\vspace*{.5cm} \n",
" %\\dotfill \n",
" \\vspace*{1mm} \n",
" \\end{minipage}\n}} \n")
#Create header block
headerblockseparate <- c("\\namefield{\\fbox{\n",
" \\begin{minipage}{.5\\linewidth}\n",
" %Identifier:\n",
paste(identifier," :\n\n", sep =""),
" \\vspace*{.5cm} \n",
" %\\dotfill \n",
" \\vspace*{1mm} \n",
" \\end{minipage}\n}} \n")
# If instructions == T, add a priliminary block
if(instructions == T){
instructionblock <- c("\n",
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n",
"%% INSTRUCTIONS TO STUDENTS %%\n",
"%% UNCOMMENT AS NEEDED %%\n",
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n",
"\\section*{Instructions} \n",
"\n",
"\\begin{itemize} \n",
"%\\item Points are \\underline{not} deduced for incorrect answers.%, and most questions are independent from one another, so try to answer all the questions, even if you hesitate.\n",
"%\\item The total exam is graded over XX points.\n",
"%\\item There is \\underline{always} one and \\underline{only} one correct answer.\n",
"%\\item All the questions are presented in randomized order and are independent from each other.\n",
"\\item \\underline{Fill} -- don't cross -- with a dark color pencil the box corresponding to what you think is the correct answer, leaving the others blank. Use an eraser to correct any mistake.\n",
"%\\item If you think you made a mistake, circle your \\emph{entire} final answer (make your final answer as clear as you can): The exam will be both graded by computer and checked by your instructors to ensure accuracy.\n",
"\\item Do not write or draw around or in the black circles and bar codes on the corners and top of each page.\n",
"%\\item For short answer questions, write your answers in the answer box provided. Leave the grey part blank.\n",
"\\end{itemize}\n\n",
"\\hrule \\vspace{3mm}",
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n")
} else {
if (instructions == "all") {
instructionblock <- c("\n",
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n",
"%% INSTRUCTIONS TO STUDENTS %%\n",
"%% UNCOMMENT AS NEEDED %%\n",
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n",
"\\section*{Instructions} \n",
"\n",
"\\begin{itemize} \n",
"\\item Points are \\underline{not} deduced for incorrect answers.%, and most questions are independent from one another, so try to answer all the questions, even if you hesitate.\n",
"%\\item The total exam is graded over XX points.\n",
"\\item There is \\underline{always} one and \\underline{only} one correct answer.\n",
"\\item All the questions are presented in randomized order and are independent from each other.\n",
"\\item \\underline{Fill} -- don't cross -- with a dark color pencil the box corresponding to what you think is the correct answer, leaving the others blank. Use an eraser to correct any mistake.\n",
"\\item If you think you made a mistake, circle your \\emph{entire} final answer (make your final answer as clear as you can): The exam will be both graded by computer and checked by your instructors to ensure accuracy.\n",
"\\item Do not write or draw around or in the black circles and bar codes on the corners and top of each page.\n",
"\\item For short answer questions, write your answers in the answer box provided. Leave the grey part blank.\n",
"\\end{itemize}\n\n",
"\\hrule \\vspace{3mm}",
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n")
} else {
if (instructions == F) {
instructionblock <- ""
} else {
instructionblock <- instructions
}
}
}
if(separateanswersheet == F) {
#Separate answers
separateanswer1 <- ""
clearpagetext <- "\\clearpage"
answersheettext <- ""
}
if(separateanswersheet == T) {
#Separate answers
separateanswer1 <- ",separateanswersheet"
if (twosided == T) {
clearpagetext <- "\\AMCcleardoublepage" ## Good if recto-verso
} else {
clearpagetext <- "\\clearpage" ## Good if recto-verso
}
#Auto add answer sheet instructions
if (answersheetinstructions == TRUE) {
answersheetinstructionstext <- c("\\bf\\em Answers must be given exclusively on this sheet:\n",
"Answers given on the other sheets will be ignored.\n")
} else {
if (answersheetinstructions == FALSE) {
answersheetinstructionstext <- ""
} else {
answersheetinstructionstext <- answersheetinstructions
}
} #Remove answer sheet instructions
#Define Answersheet header
answersheettext <- c("\n \\AMCformBegin \n",
"%%% Answer sheet header %%%\n",
"{\\large\\bf ", answersheettitle,":}\n",
"\\hfill ", headerblockseparate,
"\\begin{center}\n",
answersheetinstructionstext,
"\\end{center}\n",
"%%% End of answer sheet header %%%\n",
"\\AMCform"
)
}
# OPTION BOX
if (box == T) {
useboxpackage <- "box,"
} else {
useboxpackage <- ""
}
# OPTION letters inside box
if (lettersinsidebox == T) {
lettersinsideboxcode <- "insidebox,"
} else {
lettersinsideboxcode <- ""
}
# OPTION no shuffle
if (shuffleanswers == T) {
shuffleanswerscode <- "noshuffle,"
} else {
shuffleanswerscode <- ""
}
# OPTION facilitate manual add
if (facilitatemanualadd == T) {
manualaddquestionscode <- "\n %Manually add questions in the separate file \n \\input{manuallyaddedquestions.tex}\n"
manualaddelementscode <- "\n %Manually add question groups (elements) in the separate file \n \\input{manuallyaddedelements.tex}\n"
facilitatemanualaddmessage <- c("\n- manuallyaddedquestions.tex (manually add questions here)","\n- manuallyaddedelements.tex (manually add questions here)")
# Write the additional files for questions
filemanuallyaddedquestions <- paste("%Place your additional questions below (or leave blank but keep the file)\n",
"%Note : AMCTestmakeR is set to NOT erase your additional questions if you rerun it.\n",
sep ="")
filepathmanuallyaddedquestions <- paste(dirname(filepath), "/manuallyaddedquestions.tex", sep="")
write(filemanuallyaddedquestions, filepathmanuallyaddedquestions, append = T)
# Write the additional files for elements
filemanuallyaddedelements <- paste("%Place your additional question groups (elements) below (or leave blank but keep the file)\n",
"%Note : AMCTestmakeR is set to NOT erase your additional questions if you rerun it.\n",
sep ="")
filepathmanuallyaddedelements <- paste(dirname(filepath), "/manuallyaddedelements.tex", sep="")
write(filemanuallyaddedelements, filepathmanuallyaddedelements, append = T)
} else {
manualaddquestionscode <- ""
manualaddelementscode <- ""
facilitatemanualaddmessage <- ""
}
listoriginaltex <- c(paste("\\documentclass[",paper,",",fontsize,"pt]{",articlelibrary,"} \n", sep =""),
"\n",
"\\usepackage{multicol} \n",
"\\usepackage[utf8x]{inputenc} \n",
"\\usepackage[T1]{fontenc} \n",
"\\usepackage{amsmath} \n",
"\\usepackage[",useboxpackage,lettersinsideboxcode, shuffleanswerscode,
"completemulti",separateanswer1,"]{automultiplechoice} \n",
"\n",
"\\renewcommand{\\rmdefault}{\\sfdefault} \n",
"\n",
"%Tweak margins here if desired",
"%\\geometry{hmargin=3cm,headheight=2cm,headsep=.3cm,footskip=1cm,top=3.5cm,bottom=2.5cm}\n",
"\n",
"\\usepackage{titlesec} \n",
"\n",
"%Format section titles with horizontal lines \n",
"\\titleformat{\\section} \n",
"{\\hrule\\center\\normalfont\\normalsize\\bfseries}{\\thesection.}{1em}{}[{\\vspace{1mm}\\hrule}] \n",
"\n",
"\\renewcommand{\\thesection}{\\Alph{section}} \n",
"\\renewcommand{\\thesubsection}{\\thesection.\\Roman{subsection}} \n",
"\n",
"\\AMCrandomseed{1527384} \n",
"\n",
"%Takes the questions from the questions.tex file\n",
"\\input{questions.tex}",manualaddquestionscode,
"\n",
"\\begin{document} \n",
"\n",
"%Vertical space between answers \n",
"%\\AMCinterBrep=.2ex \n",
"\n",
"\\baremeDefautS{mz=1} \n",
"\n",
"\\onecopy{10}{ \n",
"\n",
"\\vspace*{.5cm} \n",
"\\begin{minipage}{.4\\linewidth} \n",
"\\centering \n",
"%Uncomment to insert logo image \n",
"%\\includegraphics[height=2cm,width=4cm,keepaspectratio]{logo.png} \n",
"\n",
"%Title \n",
"\\large\\bf ", title ," \\vspace*{1mm} \n",
"\\end{minipage}\n",
headerblocknonseparate,
"\n",
instructionblock,
"\n",
"\n",
"%Takes the elements list from the elements.tex file\n",
"\\input{elements.tex}",
manualaddelementscode,
"\n",
clearpagetext," \n",
"\n",
answersheettext,
"\n",
"}\n",
"% This test was created with AMCTestmakeR for R.\n\n\n",
"\\end{document} \n")
collapsedlist <- paste(listoriginaltex, sep = "", collapse = "")
# Write the groups.tex
write(collapsedlist, filepathgroups, append = F)
# Create list of questions through AMCcreatequestions
AMCcreatequestions(question = question,
correctanswers = correctanswers,
incorrectanswers = incorrectanswers,
element = element,
code = code,
codeprefix = codeprefix,
output = "file",
filepath = filepathquestions,
questiontype = questiontype,
append = F,
multicols = multicols,
messages = F,
listelements = F,
scoringcorrect = scoringcorrect,
scoringincorrect = scoringincorrect,
scoringnoresponse = scoringnoresponse,
scoringincoherent = scoringincoherent,
scoringbottom = scoringbottom,
shuffleanswersonce = shuffleanswersonce
)
# Create list of elements through AMCcreatelements
#(verbose but more options)
AMCcreateelements(element = element,
shufflequestions = shufflequestions,
sections = sections,
output = "file",
append = F,
messages = F,
filepath = filepathelements)
# Report message
if (messages == T){
message("The following files were successfully written to ",
filepathname,
":\n- ",
basename(filepath),
"\n- questions.tex",
"\n- elements.tex",
#Adds additional messages
facilitatemanualaddmessage,
"\n\nPut all these files in your AMC project folder and use AMC to compile them."
)
}
}
| /scratch/gouwar.j/cran-all/cranData/AMCTestmakeR/R/AMCcreatetest.R |
## ----comment=NA----------------------------------------------------------
library(AMCTestmakeR)
## ----comment=NA----------------------------------------------------------
AMCcreatequestions(
question = "How much is $1+1$?",
correctanswers = 2,
incorrectanswers = list(3, 11))
## ------------------------------------------------------------------------
question <- c("How much is $1+1$ ?",
"How much is $1 \\times 1$ ?",
"How much is $\\frac{1}{2}$ ?")
correct <- c(2,1,0.5)
incorrect1 <- c(3,4,10)
incorrect2 <- c(1,3,100)
incorrect3 <- c(4,8,NA)
## ------------------------------------------------------------------------
AMCcreatequestions(question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
## ------------------------------------------------------------------------
AMCcreatequestions(element = c("ADD", "MULT", "DIV"),
question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
## ------------------------------------------------------------------------
AMCcreatequestions(code = c("ADD1", "MULT1", "DIV1"),
question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
## ------------------------------------------------------------------------
AMCcreatequestions(codeprefix = "MATH",
question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
## ----eval=F--------------------------------------------------------------
# AMCcreatetest("How much is $1+2$?",2,list("3", "11"))
## ----eval=F--------------------------------------------------------------
# AMCcreatetest(
# #This part is passed to the AMCcreatequestions() function:
# question = "How much is $1+1$?",
# 2,
# list("3", "11"),
# #The next part is passed to AMCcreateelements():
# shuffle = T,
# sections = T,
# #The last part is for general test options:
# title = "Exam", #Custom title
# paper = "a4", #change the paper for a4
# fontsize = 11, #change fontsize
# identifier = "ID Number", #change identifier
# twosided = F, #print in one sided
# instructions = "Don't respond here.", #show an instructions block
# separateanswersheet = T, #use a separate answer sheet
# answersheettitle = "Respond Here", #Change answer sheet title
# answersheetinstructions = "Fill the boxes."#Answer sheet instructions
# )
## ------------------------------------------------------------------------
AMCcreateelements(element = c("ADD", "MULT", "DIV"), shuffle = T, sections = T)
## ------------------------------------------------------------------------
AMCcreateelements(element = c("MATH", "MATH", "MATH", "STAT"), shuffle = F, sections = F)
| /scratch/gouwar.j/cran-all/cranData/AMCTestmakeR/inst/doc/AMCTestmaker.R |
---
title: "Using `AMCTestmakeR`"
author: "Nils Myszkowski"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using `AMCTestmakeR`}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
`AMCTestmakeR` provides functions to be used with the free Optical Mark Recognition (OMR) software Auto Multiple Choice.
It's main purpose is to facilitate working with R and AMC in parallel, but it can also be used to transform a spreadsheet into an AMC questionnaire easily.
So far, the features are limited to generating AMC-LaTeX code questions for Multiple Choice Questionnaires (single and multiple answer). Hopefully, it's how most people use Auto Multiple Choice.
# Install and load the library
Install the library with `install.packages("AMCTestmakeR")`, and load it with:
```{r comment=NA}
library(AMCTestmakeR)
```
# Basic Use : Generating questions
## Generating code for one question
Let's say that we have a simple question to add:
- How much is $1+1$?
- The correct answer is $2$ (if you didn't get this one, you're probably at the wrong place)
- The incorrect ones are $3$ and $11$
```{r comment=NA}
AMCcreatequestions(
question = "How much is $1+1$?",
correctanswers = 2,
incorrectanswers = list(3, 11))
```
Or, more simply `AMCcreatequestions("How much is $1+1$?",2,list(3, 11))`.
## Writing questions to a .tex file
R escapes different characters than LaTeX, so doing a copy-and-paste of the console output will require than you tweak things a bit.
Instead of doing that, I recommend to use the optional argument `writefile = TRUE` to write the generated code into a file. The default creates a `questions.tex` file in the working directory, but you can indicate another path with `filepath`, and append to an existing file -- rather than overwriting the existing file -- with `append = TRUE`).
## Generating code for multiple questions
If you have an entire questionnaire to generate, the `AMCcreatequestions` can use vectors for many of its arguments.
Let's first create 3 questions, putting the questions and answers in vectors.
```{r}
question <- c("How much is $1+1$ ?",
"How much is $1 \\times 1$ ?",
"How much is $\\frac{1}{2}$ ?")
correct <- c(2,1,0.5)
incorrect1 <- c(3,4,10)
incorrect2 <- c(1,3,100)
incorrect3 <- c(4,8,NA)
```
Note that the third question has only 2 incorrect answers: `AMCTestmakeR` will simply skip missing values (`NA` and `""`).
```{r}
AMCcreatequestions(question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
```
Like before, copy-paste is not optimal, as R escapes different characters than LaTeX. Using `writefile = TRUE` is more convenient to take care of this and translate R text into LaTeX (see above for details). Also, consider using the function `AMCcreatetest()` to handle the full test creation (described later as *Suggested Workflow 1*).
## Additional options
### Changing the element
The element in AMC corresponds to a group of questions. They can for example correspond to different learning outcomes or chapters of a book. AMC is able to randomize questions within elements.
Provide a character value or vector to the argument `element` to define it. If you provide a value, all questions will have this value as element. If you provide a vector, each question will have its corresponding element.
```{r}
AMCcreatequestions(element = c("ADD", "MULT", "DIV"),
question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
```
The default element is `general`.
### Changing the question codes
In AMC, each question should have a unique code.
The code can be provided in `AMCTestmakeR` through the argument `code` (like for the `element` argument, a character value or vector can be used).
```{r}
AMCcreatequestions(code = c("ADD1", "MULT1", "DIV1"),
question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
```
A lazy version of this is, instead of codes, to input a code prefix with the `codeprefix` argument. Unique codes will be generated by the function by incrementing numbers after the prefix.
```{r}
AMCcreatequestions(codeprefix = "MATH",
question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
```
# Once the questions are ready...
When your questions are ready and the `AMCcreatequestions()` gives a satisfactory result. I suggest to directly use `AMCcreatetest()` to create the other .tex files and to have a fully working test easily (*Workflow 1*).
## Workflow 1: Creating the test files from scratch with `AMCcreatetest()`
`AMCTestmakeR` can create a test from scratch with the function `AMCcreatetest()`. It creates 3 .tex files (groups.tex, questions.tex, elements.tex) that can be directly used in the AMC project folder.
The first arguments of this function are passed to the `AMCcreatequestions()` function (see above for how to use it). The rest of the arguments are used to set test options (like `fontsize`, `separateanswersheet`, `title`, `identifier`, etc.). See the function documentation for a full list of options. If you don't pass any option (except for the questions of course), you should have a useable -- albeit not customized -- test.
```{r eval=F}
AMCcreatetest("How much is $1+2$?",2,list("3", "11"))
```
### More options
Separate answer sheets, font size, title, instructions, etc.
```{r eval=F}
AMCcreatetest(
#This part is passed to the AMCcreatequestions() function:
question = "How much is $1+1$?",
2,
list("3", "11"),
#The next part is passed to AMCcreateelements():
shuffle = T,
sections = T,
#The last part is for general test options:
title = "Exam", #Custom title
paper = "a4", #change the paper for a4
fontsize = 11, #change fontsize
identifier = "ID Number", #change identifier
twosided = F, #print in one sided
instructions = "Don't respond here.", #show an instructions block
separateanswersheet = T, #use a separate answer sheet
answersheettitle = "Respond Here", #Change answer sheet title
answersheetinstructions = "Fill the boxes."#Answer sheet instructions
)
```
When working on the questions, I suggest to work using `AMCcreatequestions()` with the default output as notes (to check the result without opening a separate .tex file). Once your questions are ready, I suggest to switch to `AMCcreatequestions()`, using the same beginning arguments, and changing the rest.
## Workflow 2: Doing things manually with your own template and `AMCcreateelements()`
If you want to customize more, you can do things step by step. If doing that, I highly recommend starting by reading the AMC documentation.
When using `AMCcreatequestions()` to create a questionnaire in AMC, I suggest to create, with `writefile = TRUE`, the questions in a separate questions file (e.g. `questions.tex`) in your AMC project folder.
From there, in your main .tex document (usually, that's named `groups.tex` by AMC), add `\input{questions.tex}` at the beginning (but still after your `\begin{document}`).
Then, where you want to place the different `elements`, in your main .tex, add `\insertgroup{element}` for each of them. Before the `\insertgroup{}` command, you can use `\shufflegroup{element}` to shuffle the questions within the element.
### The `AMCcreateelements()` function
If you have many elements in your document, and therefore many `\insertgroup{}` (and `\shufflegroup{}`) to insert, you may want to use the function `AMCcreateelements()` function. It will show as a console message (which you can, this time, easily copy-and-paste into your main .tex document) the commands to insert (and shuffle, if desired, through the `shuffle` argument) the elements:
```{r}
AMCcreateelements(element = c("ADD", "MULT", "DIV"), shuffle = T, sections = T)
```
Note that, if the same element is input multiple times (which often happens if you pass to this function the same vector of elements as the one used in `AMCcreatequestions()`), it is not a problem, since only unique values are output:
```{r}
AMCcreateelements(element = c("MATH", "MATH", "MATH", "STAT"), shuffle = F, sections = F)
```
# Future features
Auto Multiple Choice is a great freeware that is able to do a lot more that what `AMCTestmakeR` helps for, so I will try to add the most helpful features here soon. This software feels the In any case, I strongly encourage to read the documentation of how to use LaTeX in Auto Multiple Choice to get a sense of its many possibilities.
| /scratch/gouwar.j/cran-all/cranData/AMCTestmakeR/inst/doc/AMCTestmaker.Rmd |
---
title: "Using `AMCTestmakeR`"
author: "Nils Myszkowski"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using `AMCTestmakeR`}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
`AMCTestmakeR` provides functions to be used with the free Optical Mark Recognition (OMR) software Auto Multiple Choice.
It's main purpose is to facilitate working with R and AMC in parallel, but it can also be used to transform a spreadsheet into an AMC questionnaire easily.
So far, the features are limited to generating AMC-LaTeX code questions for Multiple Choice Questionnaires (single and multiple answer). Hopefully, it's how most people use Auto Multiple Choice.
# Install and load the library
Install the library with `install.packages("AMCTestmakeR")`, and load it with:
```{r comment=NA}
library(AMCTestmakeR)
```
# Basic Use : Generating questions
## Generating code for one question
Let's say that we have a simple question to add:
- How much is $1+1$?
- The correct answer is $2$ (if you didn't get this one, you're probably at the wrong place)
- The incorrect ones are $3$ and $11$
```{r comment=NA}
AMCcreatequestions(
question = "How much is $1+1$?",
correctanswers = 2,
incorrectanswers = list(3, 11))
```
Or, more simply `AMCcreatequestions("How much is $1+1$?",2,list(3, 11))`.
## Writing questions to a .tex file
R escapes different characters than LaTeX, so doing a copy-and-paste of the console output will require than you tweak things a bit.
Instead of doing that, I recommend to use the optional argument `writefile = TRUE` to write the generated code into a file. The default creates a `questions.tex` file in the working directory, but you can indicate another path with `filepath`, and append to an existing file -- rather than overwriting the existing file -- with `append = TRUE`).
## Generating code for multiple questions
If you have an entire questionnaire to generate, the `AMCcreatequestions` can use vectors for many of its arguments.
Let's first create 3 questions, putting the questions and answers in vectors.
```{r}
question <- c("How much is $1+1$ ?",
"How much is $1 \\times 1$ ?",
"How much is $\\frac{1}{2}$ ?")
correct <- c(2,1,0.5)
incorrect1 <- c(3,4,10)
incorrect2 <- c(1,3,100)
incorrect3 <- c(4,8,NA)
```
Note that the third question has only 2 incorrect answers: `AMCTestmakeR` will simply skip missing values (`NA` and `""`).
```{r}
AMCcreatequestions(question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
```
Like before, copy-paste is not optimal, as R escapes different characters than LaTeX. Using `writefile = TRUE` is more convenient to take care of this and translate R text into LaTeX (see above for details). Also, consider using the function `AMCcreatetest()` to handle the full test creation (described later as *Suggested Workflow 1*).
## Additional options
### Changing the element
The element in AMC corresponds to a group of questions. They can for example correspond to different learning outcomes or chapters of a book. AMC is able to randomize questions within elements.
Provide a character value or vector to the argument `element` to define it. If you provide a value, all questions will have this value as element. If you provide a vector, each question will have its corresponding element.
```{r}
AMCcreatequestions(element = c("ADD", "MULT", "DIV"),
question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
```
The default element is `general`.
### Changing the question codes
In AMC, each question should have a unique code.
The code can be provided in `AMCTestmakeR` through the argument `code` (like for the `element` argument, a character value or vector can be used).
```{r}
AMCcreatequestions(code = c("ADD1", "MULT1", "DIV1"),
question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
```
A lazy version of this is, instead of codes, to input a code prefix with the `codeprefix` argument. Unique codes will be generated by the function by incrementing numbers after the prefix.
```{r}
AMCcreatequestions(codeprefix = "MATH",
question = question,
correctanswers = correct,
incorrectanswers = list(incorrect1,incorrect2,incorrect3))
```
# Once the questions are ready...
When your questions are ready and the `AMCcreatequestions()` gives a satisfactory result. I suggest to directly use `AMCcreatetest()` to create the other .tex files and to have a fully working test easily (*Workflow 1*).
## Workflow 1: Creating the test files from scratch with `AMCcreatetest()`
`AMCTestmakeR` can create a test from scratch with the function `AMCcreatetest()`. It creates 3 .tex files (groups.tex, questions.tex, elements.tex) that can be directly used in the AMC project folder.
The first arguments of this function are passed to the `AMCcreatequestions()` function (see above for how to use it). The rest of the arguments are used to set test options (like `fontsize`, `separateanswersheet`, `title`, `identifier`, etc.). See the function documentation for a full list of options. If you don't pass any option (except for the questions of course), you should have a useable -- albeit not customized -- test.
```{r eval=F}
AMCcreatetest("How much is $1+2$?",2,list("3", "11"))
```
### More options
Separate answer sheets, font size, title, instructions, etc.
```{r eval=F}
AMCcreatetest(
#This part is passed to the AMCcreatequestions() function:
question = "How much is $1+1$?",
2,
list("3", "11"),
#The next part is passed to AMCcreateelements():
shuffle = T,
sections = T,
#The last part is for general test options:
title = "Exam", #Custom title
paper = "a4", #change the paper for a4
fontsize = 11, #change fontsize
identifier = "ID Number", #change identifier
twosided = F, #print in one sided
instructions = "Don't respond here.", #show an instructions block
separateanswersheet = T, #use a separate answer sheet
answersheettitle = "Respond Here", #Change answer sheet title
answersheetinstructions = "Fill the boxes."#Answer sheet instructions
)
```
When working on the questions, I suggest to work using `AMCcreatequestions()` with the default output as notes (to check the result without opening a separate .tex file). Once your questions are ready, I suggest to switch to `AMCcreatequestions()`, using the same beginning arguments, and changing the rest.
## Workflow 2: Doing things manually with your own template and `AMCcreateelements()`
If you want to customize more, you can do things step by step. If doing that, I highly recommend starting by reading the AMC documentation.
When using `AMCcreatequestions()` to create a questionnaire in AMC, I suggest to create, with `writefile = TRUE`, the questions in a separate questions file (e.g. `questions.tex`) in your AMC project folder.
From there, in your main .tex document (usually, that's named `groups.tex` by AMC), add `\input{questions.tex}` at the beginning (but still after your `\begin{document}`).
Then, where you want to place the different `elements`, in your main .tex, add `\insertgroup{element}` for each of them. Before the `\insertgroup{}` command, you can use `\shufflegroup{element}` to shuffle the questions within the element.
### The `AMCcreateelements()` function
If you have many elements in your document, and therefore many `\insertgroup{}` (and `\shufflegroup{}`) to insert, you may want to use the function `AMCcreateelements()` function. It will show as a console message (which you can, this time, easily copy-and-paste into your main .tex document) the commands to insert (and shuffle, if desired, through the `shuffle` argument) the elements:
```{r}
AMCcreateelements(element = c("ADD", "MULT", "DIV"), shuffle = T, sections = T)
```
Note that, if the same element is input multiple times (which often happens if you pass to this function the same vector of elements as the one used in `AMCcreatequestions()`), it is not a problem, since only unique values are output:
```{r}
AMCcreateelements(element = c("MATH", "MATH", "MATH", "STAT"), shuffle = F, sections = F)
```
# Future features
Auto Multiple Choice is a great freeware that is able to do a lot more that what `AMCTestmakeR` helps for, so I will try to add the most helpful features here soon. This software feels the In any case, I strongly encourage to read the documentation of how to use LaTeX in Auto Multiple Choice to get a sense of its many possibilities.
| /scratch/gouwar.j/cran-all/cranData/AMCTestmakeR/vignettes/AMCTestmaker.Rmd |
#' @import data.table
#' @importFrom stats ar na.omit sd
#' @importFrom utils setTxtProgressBar txtProgressBar
#' @title MIM roll
#' @description This function computes the rolling window MIM for a given data.table
#' @param data.table data.table with the data
#' @param identity.col column name of the identity intrument for example the stock ticker
#' @param Date.col column name of the date column with format format = "%Y-%m%-%d" (for example "2019-12-01")
#' @param rollWindow number of days to compute the MIM
#' @param return.col column name of the return column
#' @param min.obs minimum number of observations to compute the MIM
#' @param max.lag maximum number of lags to compute the MIM. The algorithm will select the number of lags that minimize the AIC
#' but the maximum number of lags is limited by this parameter. In case the AIC is zero for the zero lag then the algorithm will
#' estimate an AR(1) model. This is to avoid zero in the MIM.
#' @param a parameter to scale the MIM. In the paper Tran & Leivrik (2019) we use a=1.0
#' @return data.table with the MIM and the number of lags used to compute the MIM
#' @noRd
#' @examples
#' library(AMIM)
#' library(data.table)
#' data <- AMIM::exampledata # load the example data
#' MIM <- MIM.roll(
#' data.table = data, identity.col = "ticker", rollWindow = 60,
#' Date.col = "Date", return.col = "RET", min.obs = 30, max.lag = 10
#' )
MIM.roll <- function(data.table, identity.col, Date.col, rollWindow, return.col, min.obs, max.lag, a) {
data <- data.table::copy(data.table)
data.table::setDT(data)
data.table::setnames(data,
old = c(return.col),
new = c("RET")
) ## change column name
data.table::setorderv(data, c(identity.col, Date.col)) # oder columns
pb <- txtProgressBar(
min = 0, style = 3,
max = nrow(data) - rollWindow * data.table::uniqueN(data[, c(identity.col), with = F])
) ### SET progress Bar
dt.dates <- data[, list(date.join = seq(base::as.Date(get(Date.col), format = "%Y-%m%-%d"),
by = "-1 day", len = rollWindow
)),
by = c(identity.col, Date.col)
] ## create an extra table for join
MIM <- data.table::merge.data.table(dt.dates, data,
by.x = c(identity.col, "date.join"),
by.y = c(identity.col, Date.col), all.x = T
)
remove(dt.dates)
MIM <- na.omit(MIM)
MIM <- MIM[,
{
if (length(RET) < min.obs | sd(RET, na.rm = T) == 0) {
MIM <- as.numeric(NA)
N <- as.numeric(NA)
} else {
f <- ar(RET, aic = TRUE, order.max = max.lag)
if (length(f$ar) == 0) {
setTxtProgressBar(pb, .GRP) ### Update the progress
# MIM <- 0.0
# N <- 0.0
# If the AIC point zero lag then estimate an AR(1), the coeficient is not significant anyway
# This will avoid zero in MIM and AMIM
f <- ar(RET, aic = F, order.max = 1)
} else {
g <- solve(t(chol(f$asy.var.coef))) %*% f$ar ### (P^-1)*beta or L^-1'Beta. Chol give upper matrix U => t(u)=L
g <- abs(g) * a ## absolute and scale
MIM <- sum(g, na.rm = T) / (1 + sum(g, na.rm = T))
N <- as.numeric(f$order)
}
}
setTxtProgressBar(pb, .GRP) ### Update the progress bar
list(MIM = MIM, N = N)
},
by = c(identity.col, Date.col)
]
return(MIM)
}
#' @title AMIM roll
#' @import data.table
#' @description This function computes the rolling window AMIM for a given data.table
#' @param data.table data.table with the data
#' @param identity.col column name of the identity intrument for example the stock ticker
#' @param Date.col column name of the date column with format "YYYY-mm-dd" (for example "2019-12-01")
#' @param rollWindow number of days to compute the AMIM
#' @param return.col column name of the return column
#' @param min.obs minimum number of observations to compute the AMIM
#' @param max.lag maximum number of lags to compute the MIM and then AMIM. The algorithm will select the number of lags that minimize the AIC
#' but the maximum number of lags is limited by this parameter. In case the AIC is zero for the zero lag then the algorithm will
#' estimate an AR(1) model. This is to avoid zero in the MIM and AMIM.
#' @return data.table with the MIM, AMIM and the number of lags used to compute the MIM, AMIM, confidence interval (CI), and the number of lags (N).
#' @export
#' @examples
#' library(AMIM)
#' library(data.table)
#' data <- AMIM::exampledata # load the example data
#' AMIM <- AMIM.roll(
#' data.table = data, identity.col = "ticker", rollWindow = 60,
#' Date.col = "Date", return.col = "RET", min.obs = 30, max.lag = 10
#' )
#'
#' AMIM[, .SD[(.N - 5):(.N), ], by = ticker] # Last 5 rows of each instrument
AMIM.roll <- function(data.table, identity.col, Date.col, rollWindow, return.col, min.obs, max.lag) {
MIM. <- MIM.roll(
data.table = data.table, identity.col = identity.col, Date.col = Date.col,
rollWindow = rollWindow, return.col = return.col, min.obs = min.obs,
max.lag = max.lag, a = 1
) # compute MIM and N. Force a=1 like in the Tran & Leivrik (2019) paper
MIM. <- data.table::setDT(MIM.)
CI <- data.table::copy(AMIM::CI) ## copy the CI data from AMIM package
data.table::setDT(CI)
MIM. <- data.table::merge.data.table(x = MIM., y = CI[a == 1, .(N, CI)], all.x = T, by.x = "N", by.y = "N", sort = F)
MIM.$AMIM <- (MIM.$MIM - MIM.$CI) / (1 - MIM.$CI)
return(MIM.)
}
| /scratch/gouwar.j/cran-all/cranData/AMIM/R/AMIM.r |
#' Example Data to compute AMIM
#' @name exampledata
#' @format ## `exampledata`
#' A data datatable with the following columns:
#' \describe{
#' \item{Date}{Date format YYYY-MM-DD}
#' \item{ticker}{Imaginary ticker}
#' \item{RET}{Imaginary return}
#' ...
#' }
#' @source Vu Le Tran
"exampledata"
#' Confidence Interval Data to compute AMIM
#' @name CI
#' @format ## `CI`
#' A data datatable with the following columns:
#' \describe{
#' \item{N}{Number of lags}
#' \item{a}{Scale parameter equal to 1 as in Tran & Leivrik (2019)}
#' \item{CI}{Confidence interval accordingly
#' each number lags and scale parameter}
#' ...
#' }
#' @source Tran & Leivrik (2019)
#'
"CI"
| /scratch/gouwar.j/cran-all/cranData/AMIM/R/data.R |
utils::globalVariables(c("a", ".", "N", "RET"))
| /scratch/gouwar.j/cran-all/cranData/AMIM/R/zzz.r |
#' @name AMModels
#' @aliases AMModels
#' @title Search for a model in a model list using \code{grep}
#' @description Enables adaptive management by codifying knowledge in the form of models
#' generated from numerous analyses and datasets. AMModels facilitates this process
#' by storing all models and datasets in a single object that can be seemlessly
#' updated, tracking changes in knowledge through time. A shiny application called
#' AM Model Manager enables the use of these functions via a GUI. To launch the
#' Model Manager, use \code{modelMgr()}.
#' @family amModelLib
#' @keywords package
#' @importFrom methods is new
#' @importFrom stats rbeta rbinom rmultinom rnorm rpois runif
#' @importFrom unmarked unmarkedFrameOccu
#' @section Copyright:
#' This software is in the public domain because it contains materials
#' that originally came from the United States Geological Survey, an agency of
#' the United States Department of Interior. For more information, see the
#' official USGS copyright policy at http://www.usgs.gov/visual-id/credit_usgs.html#copyright
#' @section Disclaimer:
#' This software is in the public domain because it contains materials
#' that originally came from the U.S. Geological Survey, an agency of the United
#' States Department of Interior. For more information, see the official USGS
#' copyright policy at http://www.usgs.gov/visual-id/credit_usgs.html#copyright.
#' Although this software program has been used by the U.S. Geological
#' Survey (USGS), no warranty, expressed or implied, is made by the USGS or the
#' U.S. Government as to the accuracy and functioning of the program and related
#' program material nor shall the fact of distribution constitute any such
#' warranty, and no responsibility is assumed by the USGS in connection
#' therewith. This software is provided "AS IS."
#' @examples
#'
#' # create data and linear models from from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#' # create an amData object that includes metadata
#' plant.data <- data.frame(group = group, weight = weight)
#' plant.data <- amData(
#' data = plant.data,
#' comment = 'Dataset from lm helpfile.'
#' )
#'
#' # create a second amData object that includes metadata
#' log.plant.data <- data.frame(group, log.weight = log(weight))
#' log.plant.data <- amData(
#' data = log.plant.data,
#' comment = 'data with log weight',
#' source = 'lm helpfile (R).'
#' )
#'
#' # create two amModel objects with metadata and the metadata keyword 'data' to soft
#' # link the data used to fit the models
#' full.model <- amModel(
#' model = lm.D9,
#' comment = 'full model',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' no.int.model <- amModel(
#' model = lm.D90,
#' comment = 'model without intercept',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' # create an amModelLib that contains the two amModel objects and two amData objects
#' # the models and data must be supplied as named lists
#' mymodels <- amModelLib(
#' models = list(
#' full.model = full.model,
#' no.int.model = no.int.model
#' ),
#' data = list(
#' plant.data = plant.data,
#' log.plant.data = log.plant.data
#' )
#' )
#'
#' # show the amModelLib
#' mymodels
#'
#' # search the entire amModelLib for the word 'intercept'
#' # the dataset associated with the model will be returned
#' grepAMModelLib(pattern = "intercept", amml = mymodels)
#'
#' # search for data containing the word 'log'
#' grepAMModelLib(pattern = "log", amml = mymodels, search = "data")
#'
#' # search for models containing the word 'full';
#' # because 'full.model' is soft-linked to a dataset,
#' # the dataset information will be returned.
#' grepAMModelLib(pattern = "full", amml = mymodels, search = "model")
#'
#'
#' # list names of models in an amModelLib
#' lsModels(mymodels)
#'
#' # list names of data in an amModelLib
#' lsData(mymodels)
#'
#' # extract the dataset by name
#' getAMData(amml = mymodels, 'plant.data', as.list = FALSE)
#'
#' # notice the data are returned in their original class
#' class(getAMData(amml = mymodels, 'plant.data', as.list = FALSE))
#'
#' # you can also extract by index
#' getAMData(amml = mymodels, 1, as.list = FALSE)
#'
#' # extract the model by name
#' getAMModel(amml = mymodels, 'full.model', as.list = FALSE)
#'
#' # notice the models are returned in their original class
#' class(getAMModel(amml = mymodels, 'full.model', as.list = FALSE))
#'
#' # you can also extract by index
#' getAMModel(amml = mymodels, 1, as.list = FALSE)
#'
#' # remove just the second model
#' rmModel(mymodels, 'no.int.model')
#'
#' # remove the first plant data
#' # notice a warning is produced because some amModels are softly linked
#' # to the dataset via the metadata keyword, 'data'
#' rmData(mymodels, 'plant.data')
#'
#' \dontrun{
#' # The shiny app
#' modelMgr()
#' }
NULL
#' Sample \code{amModelLib} Object Containing Models and Data.
#'
#' Four models and four data objects.
#'
#' @name mymodels
#' @docType data
#' @format An \code{amModelLib} object with
#' @keywords datasets
#' @examples
#'
#' data(mymodels)
#' mymodels
#' summary(mymodels)
#' getAMModel(mymodels, 'plant.model')
#' mymodels[[1]]
#' str(mymodels)
#'
#'
#'
NULL
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/AMModels-package.R |
#' @name amData
#' @aliases amData
#' @title Create An \code{amData} Object That Pairs Datasets With Associated Metadata
#' @description Creates an object of class \code{\link{amData}}, which is typically a data frame of covariate data or model fitting data, with mandatory metadata. It is worth noting that some models include the data as part of the fitted model object, e.g. \code{lm} and \code{glm}, which access embedded data with \code{model.frame}. For large datasets that are embedded in the model, it may be worth documenting metadata a placeholder object such as the character string \code{"embedded data"} rather than a redundant data object.
#' @param data A dataset, typically data frame but may be in any structure.
#' @param \dots Named metadata elements, either supplied individually and coerced to list or supplied as a named list.
#' @return An object of class \code{amData} suitable for inclusion in an \code{\link{amModelLib}} object.
#' @family amModelLib
#' @keywords manip
#' @export
#' @examples
#'
#'
#' # create dataset from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#'
#' # create a dataset that is of class data.frame
#' plant.data <- data.frame(weight, group)
#'
#' # create an amData data object
#' dat1 <- amData(data = plant.data, comment='Dataset from lm helpfile.', taxa = 'plants')
#'
#' # the class of dat1 is amData
#' class(dat1)
#'
#' # the summary function will invoke the summary method for the dataset's original class
#' summary(dat1)
#'
#' # use the amModelLib function to create a new amModelLib called mymodels that
#' # includes dat1; data must be supplied in a named list
#' mymodels <- amModelLib(
#' data=list(dat1 = dat1),
#' description = "An example amModelLib called mymodels."
#' )
#'
#' # use the lsData function to list the amData objects in an amModelLib
#' lsData(mymodels)
#'
#' # the dataMeta function can be used to retrieve an amData object's metadata
#' dataMeta(amml = mymodels, 'dat1')
#'
#' # the dataMeta function can alse be used to set metadata
#' dataMeta(mymodels, 'dat1') <- list(
#' url = "https://stat.ethz.ch/R-manual/R-devel/library/stats/html/lm.html"
#' )
#' dataMeta(amml = mymodels, 'dat1')
#'
#' # use the getAMData function to extract the dataset back to its orginal form
#' getAMData(amml = mymodels, 'dat1', as.list = FALSE)
#'
#' # the retrieved datset is in its original class
#' class(getAMData(amml = mymodels, 'dat1', as.list = FALSE))
#'
#' # use the amModelLib function to create an empty amModelLib
#' mymodels2 <- amModelLib(description = "An example amModelLib called mymodels2.")
#'
#' # use the insertAMModelLib function to insert the amData object to an
#' # existing amModelLib
#' mymodels2 <- insertAMModelLib(data = list(dat1 = dat1))
#'
#' # use rmData to remove an amData object from an amModelLib
#' rmData('dat1', amml = mymodels2)
amData <- function(data, ...) {
metadata <- list(...)
if(length(metadata)) {
if(is.list(metadata[[1]])) metadata <- metadata[[1]]
}
metadata$date <- format(Sys.time(), format="%Y-%m-%d %H:%M:%S")
ammd <- methods::new('amData', data = data, metadata = metadata)
ammd
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/amData.R |
#' @name amModel
#' @aliases amModel
#' @title Create An \code{amModel} Object That Pairs Models With Their Metadata
#' @description Creates an object of class \code{\link{amModel}}, which is a fitted model object (or similar) with mandatory metadata.
#' @param model A model fit object or similar.
#' @param \dots Named metadata elements, either supplied individually and coerced to list or supplied as a named list.
#' @return An object of class \code{amModel} suitable for inclusion in an \code{\link{amModelLib}}.
#' @family amModelLib
#' @keywords manip
#' @export
#' @examples
#'
#'
#' # run models from from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#' # note the original class of models is 'lm'
#' class(lm.D9)
#'
#' # wrap lm.D9 in an amModel object
#' mod1 <- amModel(model = lm.D9, comment='example from lm')
#'
#' # summarize mod1; summary for the original object class, plus the metadata
#' summary(mod1)
#'
#' # convert lm.D90 to an amModel object
#' mod2 <- amModel(model = lm.D90, comment='second example from lm')
#'
#' # use the function, amModelLib, to create a new amModelLib that includes both models
#' # the models must be supplied as a named list
#' mymodels <- amModelLib(
#' models = list(
#' mod1 = mod1,
#' mod2 = mod2
#' ),
#' description = "This amModelLib stores linear models from the lm helpfile."
#' )
#'
#' # list the models of the amModelLib
#' lsModels(mymodels)
#'
#' # the modelMeta function can be used to retrieve model metadata
#' modelMeta(mymodels, 'mod1')
#'
#' # the modelMeta function can also be used to set model metadata
#' modelMeta(mymodels, 'mod1') <- list(
#' url = "https://stat.ethz.ch/R-manual/R-devel/library/stats/html/lm.html"
#' )
#' modelMeta(amml = mymodels, 'mod1')
#'
#' # use getAMModel to extract the model by name
#' getAMModel(amml = mymodels, 'mod2', as.list = FALSE)
#'
#' # notice the retrieved model is returned in its original class
#' class(getAMModel(amml = mymodels, 'mod2', as.list = FALSE))
#'
#' # create a new empty amModelLib called mymodels2
#' mymodels2 <- amModelLib(description = "A second amModelLib called mymodels2.")
#'
#' # use insertAMModelLib to insert amModel objects to an existing amModelLib
#' mymodels2 <- insertAMModelLib(models = list(mod1 = mod1, mod2 = mod2))
#'
#' # use rmModel to remove an amModel object from an amModelLib
#' rmModel('mod2', amml = mymodels2)
amModel <- function(model, ...) {
metadata <- list(...)
if(length(metadata)) {
if(is.list(metadata[[1]])) metadata <- metadata[[1]]
}
metadata$date <- format(Sys.time(), format="%Y-%m-%d %H:%M:%S")
amm <- methods::new('amModel', model = model, metadata = metadata)
amm
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/amModel.R |
#' @name amModelLib
#' @aliases amModelLib
#' @title Create An \code{AMModelLib} Object That Stores Lists Of \code{amModel} And \code{amData} Objects
#' @description Creates an object of class \code{amModelLib}, either empty, or containing \code{\link{amModel}} and/or \code{\link{amData}} objects.
#' @details \code{amModelLib} objects are useful for storing multiple models and datasets of a related theme, along with relevant metadata. Most extraction and manipulation functions will attempt to keep any relevant data with any model for which it is used. Multiple models may refer to a single dataset.
#' @param models A list of objects of class amModel.
#' @param data A list of objects of class amData.
#' @param info Named list with descriptive metadata about the \code{amModelLib} object.
#' @param description Text field describing the \code{amModelLib} object.
#' @return An object of class amModelLib.
#' @family amModelLib
#' @keywords manip
#' @export
#' @examples
#'
#'
#' # code from the lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#' # create a data.frame of the plant data
#' plant.data <- data.frame(group = group, weight = weight)
#'
#' # create an amData object that includes the data.frame and metadata
#' plant.data <- amData(
#' data = plant.data,
#' comment = 'Dataset from lm helpfile.'
#' )
#'
#' # create an amModel model object that includes model lm.D9 and metadata
#' # use the metadata keyword 'data' to link the model with the amData object
#' # that produced it
#' plant.model1 <- amModel(
#' model = lm.D9,
#' comment = 'Example model produced from from lm helpfile.',
#' data = 'plant.data'
#' )
#'
#' # create a second amModel model object that includes model lm.D90 and metadata
#' # use the metadata keyword 'data' to soft link the model with its data
#' plant.model2 <- amModel(
#' model = lm.D90,
#' comment = 'Second model produced from from lm helpfile.',
#' data = 'plant.data'
#' )
#'
#'
#' # use the amModelLib function to create a new amModelLib containing the two
#' # amModel objects and one amData object
#' mymodels <- amModelLib(
#' models = list(
#' plant.model1 = plant.model1,
#' plant.model2 = plant.model2
#' ),
#' data = list(
#' plant.data = plant.data
#' ),
#' description = "This amModelLib stores models and data from the lm helpfile.",
#' info = list(
#' owner = "Me"
#' )
#' )
#'
#' # use the amModelLib function amModelLib to create an empty amModelLib called mymodels2
#' mymodels2 <- amModelLib(
#' description = "A second amModelLib called mymodels2.",
#' info = list(
#' owner = "Me2"
#' )
#' )
#'
#' # use the insertAMModelLib function to insert the two amModel objects and one
#' # amData oject to the existing amModelLib
#' mymodels2 <- insertAMModelLib(
#' models = list(
#' plant.model1 = plant.model1,
#' plant.model2 = plant.model2),
#' data = list(plant.data = plant.data)
#' )
#'
#'
#'
#'
amModelLib <- function(models = list(), data = list(), info = list(), description = '') {
if (length(info)) {
if (!length(names(info))) stop("'info' list must be named.")
if (!is.list(info)) tryCatch({info <- as.list(info)}, error = function(e) stop("'info' is not a list and cannot be coerced to list."), warning = function(w) stop("'info' is not a list and cannot be coerced to list."))
info[['date.created']] <- format(Sys.time(), format="%Y-%m-%d %H:%M:%S")
}
if (!is.character(description) || length(description) > 1) stop("'description' must be a length 1 character vector.")
methods::new('amModelLib', models = models, data = data, info = info, description = description)
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/amModelLib.R |
# For printing pretty metadata to the console.
# Lightly adapted from base::strwrap() to remove some flexibility and
# automatically cat() rather than just returning y.
catwrap <- function (x, width = 0.9 * getOption("width")) {
if (!is.character(x))
x <- as.character(x)
UB <- TRUE
if (all(Encoding(x) == "UTF-8"))
UB <- FALSE
else {
enc <- Encoding(x) %in% c("latin1", "UTF-8")
if (length(enc))
x[enc] <- enc2native(x[enc])
}
z <- lapply(strsplit(x, "\n[ \t\n]*\n", perl = TRUE, useBytes = UB),
strsplit, "[ \t\n]", perl = TRUE, useBytes = UB)
for (i in seq_along(z)) {
for (j in seq_along(z[[i]])) {
words <- z[[i]][[j]]
nc <- nchar(words, type = "w")
if (anyNA(nc)) {
nc0 <- nchar(words, type = "b")
nc[is.na(nc)] <- nc0[is.na(nc)]
}
if (any(nc == 0L)) {
zLenInd <- which(nc == 0L)
zLenInd <- zLenInd[!(zLenInd %in% (grep("[.?!][)\"']{0,1}$",
words, perl = TRUE, useBytes = TRUE) + 1L))]
if (length(zLenInd)) {
words <- words[-zLenInd]
nc <- nc[-zLenInd]
}
}
currentIndex <- 0L
lowerBlockIndex <- 1L
upperBlockIndex <- integer()
lens <- cumsum(nc + 1L)
first <- TRUE
maxLength <- width
while (length(lens)) {
k <- max(sum(lens <= maxLength), 1L)
if (first) {
first <- FALSE
maxLength <- width - 3
}
currentIndex <- currentIndex + k
if (nc[currentIndex] == 0L)
upperBlockIndex <- c(upperBlockIndex, currentIndex -
1L)
else upperBlockIndex <- c(upperBlockIndex, currentIndex)
if (length(lens) > k) {
if (nc[currentIndex + 1L] == 0L) {
currentIndex <- currentIndex + 1L
k <- k + 1L
}
lowerBlockIndex <- c(lowerBlockIndex, currentIndex +
1L)
}
if (length(lens) > k)
lens <- lens[-seq_len(k)] - lens[k]
else lens <- NULL
}
nBlocks <- length(upperBlockIndex)
y <- lapply(seq_len(nBlocks), function(k)
paste(words[lowerBlockIndex[k]:upperBlockIndex[k]], collapse = " "))
}
}
y <- as.character(unlist(y))
print(noquote(y[1]))
if(length(y) > 1)
for(i in y[2:length(y)]) cat(' ',i,'\n')
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/catwrap.R |
#' An S4 class to store models with descriptive metadata.
#' @rdname amModel
#' @slot model A model fit object, e.g. from \code{lm} or any single object containing the means to predict values from data.
#' @slot metadata A named list of length 1 character vectors that form name:value pairs, e.g. the analyst, the project, the data used, etc. The metadata name \code{data} can be used to link the model with the name of a data object, and when manipulated in the \code{\link{amModelLib}} an effort will be made to keep the model and data together. When embedded in an \code{\link{amModelLib}}, model metadata may be retrieved or set with \code{\link{modelMeta}}.
#' @keywords classes
setClass('amModel', slots = c(
model = 'ANY',
metadata = 'list'
),
validity = function(object) {
meta <- object@metadata
err <- NULL
if (!length(meta)) {
err <- c(err, 'Must provide metadata about models. We recommend source, species, and comments.')
}
if (is.null(names(meta)) || any(names(meta) == "")) {
err <- c(err, 'All metadata elements must be named.')
}
if (length(unique(names(meta))) != length(names(meta))) {
err <- c(err, 'Metadata names must be unique.')
}
if (is.null(err)) err <- TRUE
err
}
)
#' An S4 class to store data with descriptive metadata.
#' @rdname amData
#' @slot data A single object containing data relevant to a model. There are no restrictions on how the data are used in the model; for example, they may be covariate data (for use on the right side of the equation) or observed data (for use on the left side of the equation).
#' @slot metadata A named list of length 1 character vectors that form name:value pairs, e.g the source, the collection method, etc. When embedded in an \code{\link{amModelLib}}, data metadata may be retrieved or set with \code{\link{dataMeta}}.
#' @keywords classes
setClass('amData', slots = c(
data = 'ANY',
metadata = 'list'
),
validity = function(object) {
dat <- object@data
meta <- object@metadata
err <- NULL
if (!length(meta)) {
err <- c(err, 'Must provide metadata about dataset. We recommend source, covariate data name (data), and comments.')
}
if (is.null(names(meta)) || any(names(meta) == "")) {
err <- c(err, 'All metadata elements must be named.')
}
if (length(unique(names(meta))) != length(names(meta))) {
err <- c(err, 'Metadata names must be unique.')
}
if (is.null(err)) err <- TRUE
err
}
)
#' An S4 class to hold model and data, each with descriptive metadata.
#' @rdname amModelLib
#' @slot models A named list of \code{\link{amModel}} objects. Models are added with \code{\link{insertAMModelLib}} and removed with \code{\link{rmModel}}.
#' @slot data A named list of \code{\link{amData}} objects. Data are added with \code{\link{insertAMModelLib}} and removed with \code{\link{rmData}}.
#' @slot info A named list of length 1 character strings that forms name:value metadata pairs describing the \code{amModelLib}, e.g. the project, the owner, etc. Name:value pairs may be retrieved or set using \code{\link{ammlInfo}}.
#' @slot description Length 1 character vector describing the \code{amModelLib}, intended to offer a summary of the \code{amModelLib} and quickly refresh the user to its contents and purpose without poring over the detailed metadata. The description may be retrieved or set with \code{\link{ammlDesc}}.
#' @keywords classes
setClass('amModelLib',
slots = c(
models = 'list',
data = 'list',
info = 'list',
description = 'character'
),
validity = function(object) {
models <- object@models
dat <- object@data
mod.err <- dat.err <- NULL
if (length(models)) {
out.mod <- all(sapply(models, is, class2 = 'amModel'))
if (!out.mod) mod.err <- '"models" must be a named list of objects of class "amModel".'
} else {
out.mod <- TRUE
}
if (length(dat)) {
out.dat <- all(sapply(dat, is, class2 = 'amData'))
if (!out.dat) dat.err <- '"data" must be a named list of objects of class "amData".'
} else {
out.dat <- TRUE
}
# if(length(info)) {
# out.info1 <- !(is.null(names(meta)) || any(names(meta) == ""))
# if(!out.info1) info.err <- c(info.err, 'All info elements must be named.')
# out.info2 <- length(unique(names(meta))) == length(names(meta))
# if(!out.info2) info.err <- c(info.err, 'Info names must be unique.')
# out.info <- c(out.info1, out.info2)
# } else {
# out.info <- TRUE
# }
if (all(out.mod, out.dat)) TRUE
else c(mod.err, dat.err)
}
)
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/classes.R |
#' @name getters
#' @aliases getAMModel getAMData ammlDesc ammlInfo
#' @rdname getters
#' @title Extract A Single Model or Data Object, Get Or Set Info, Description, Or Metadata.
#' @description The function \code{getAMData} will extract an \code{\link{amData}} object from an \code{\link{amModelLib}}; the function \code{getAMModel} will extract an \code{\link{amModel}} object from an \code{amModelLib}. The function \code{ammlDesc} can be used to retrieve or set a description of an \code{amModelLib} object. The function \code{ammlInfo} can be used to retrieve or set information about an \code{amModelLib} object. \code{modelMeta} and \code{dataMeta} retrieve and set metadata within \code{amModelLib} objects.
#' @details The objects created by \code{getAMData} and \code{getAMModel} are returned as their original class unless the argument \code{as.list} is set to \code{TRUE}. If \code{as.list}, a list is returned with the original object in the first element and metadata in the second.\cr The setters for \code{ammlInfo}, \code{modelMeta}, and \code{dataMeta} replace individual elements in their respective lists with each call. To remove elements set their value to \code{NULL} in the named replacement list.
#' @param amml An \code{amModelLib} object.
#' @param x A name or length 1 integer index to extract from or set within the \code{amModelLib} object.
#' @param as.list Logical; \code{FALSE} to return just the object, \code{TRUE} to return a list with both object and metadata.
#' @param value A named list of metadata to set within \code{x}.
#' @param \dots Additional arguments (not used).
#' @return The model or data object in its original form, or if \code{as.list} a list with \item{models}{The original model} (or \item{data}{The original data} ) and \item{metadata}{metadata}.
#' @family amModelLib
#' @keywords utilities
#' @export getAMModel
#' @examples
#'
#' # create dataset from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' # notice the models lm.D9 and lm.D90 are of class 'lm'
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#'
#' # create two amModel objects with metadata and a soft link to the data
#' full.model <- amModel(
#' model = lm.D9,
#' comment = 'full model',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' no.int.model <- amModel(
#' model = lm.D90,
#' comment = 'model without intercept',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' # create an amData object that includes metadata
#' # the plant.data is of class data.frame
#' plant.data <- data.frame(group = group, weight = weight)
#' plant.data <- amData(
#' data = plant.data,
#' comment = 'Dataset from lm helpfile.'
#' )
#'
#' # create a second amData object that includes metadata
#' log.plant.data <- data.frame(group, log.weight=log(weight))
#' log.plant.data <- amData(
#' data = log.plant.data,
#' comment = 'data to fit log model',
#' source = 'lm helpfile (R).'
#' )
#'
#' # create an amModelLib that contains the two amModel objects and two amData objects
#' # the models and data must be supplied as named lists
#' mymodels <- amModelLib(
#' models = list(
#' full.model = full.model,
#' no.int.model = no.int.model
#' ),
#' data = list(
#' plant.data = plant.data,
#' log.plant.data = log.plant.data
#' )
#' )
#'
#' # extract the dataset
#' getAMData(amml = mymodels, 'plant.data', as.list = FALSE)
#'
#' # you can also extract by index
#' getAMData(amml = mymodels, 1, as.list = FALSE)
#'
#' # extract the model
#' getAMModel(amml = mymodels, 'full.model', as.list = FALSE)
#'
#' # you can also extract by index
#' getAMModel(amml = mymodels, 1, as.list = FALSE)
#'
#' # extraction with '[' and '[[', which are identical here, focus on models
#' mymodels[c(1,2)]
#' mymodels[[1]]
#'
#' # Add a description to the amModelLib
#' ammlDesc(mymodels) <- "This library demonstrates how to store models
#' and data in a format that allows for descriptive metadata and easy
#' retrieval for future reference."
#'
#' # Extract the description
#' ammlDesc(mymodels)
#'
#' # Add some metadata 'info' to the amModelLib
#' ammlInfo(mymodels) <- list(owner = 'me', organization = 'My Organization')
#'
#' # Extract all info for an amModelLib
#' ammlInfo(mymodels)
#'
#' # Extract targeted info
#' ammlInfo(amml = mymodels, 'owner')
#'
#' # Delete metadata by setting to NULL
#' ammlInfo(mymodels) <- list(organization = NULL)
#'
#' # Extract all model metadata
#' modelMeta(mymodels)
#'
#' # Extract metadata from specific model
#' modelMeta(amml = mymodels, 'full.model')
#'
#' # Add metadata to 'full.model'
#' modelMeta(amml = mymodels, 'full.model') <- list(
#' url = "https://stat.ethz.ch/R-manual/R-devel/library/stats/html/lm.html"
#' )
#'
#' # remove metadata by setting value to NULL
#' modelMeta(amml = mymodels, 'full.model') <- list(url = NULL)
#'
#' # Extract all data metadata
#' dataMeta(mymodels)
#'
#' # Extract metadata from specific data
#' dataMeta(amml = mymodels, 'plant.data')
#'
#' # Add metadata to 'plant.data'
#' dataMeta(mymodels, 'plant.data') <- list(
#' url = "https://stat.ethz.ch/R-manual/R-devel/library/stats/html/lm.html"
#' )
#'
#' # remove metadata by setting value to NULL
#' dataMeta(mymodels, 'plant.data') <- list(url = NULL)
#'
getAMModel <- function(amml, x, as.list=FALSE, ...) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
if(missing(x)) stop('Must provide a model name or integer index.')
if(any(is.character(x), is.numeric(x))) {
model <- amml@models[[x]]@model
if (!as.list) {
model
} else {
list(model = model, metadata = amml@models[[x]]@metadata)
}
} else {
stop('Model selector must be provided as a character or integer vector.')
}
}
#' @rdname getters
#' @export
getAMData <- function(amml, x, as.list=FALSE, ...) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
if(missing(x)) stop('Must provide a model name or integer index.')
if(any(is.character(x), is.numeric(x))) {
dat <- amml@data[[x]]@data
if (!as.list) {
dat
} else {
list(data = dat, metadata = amml@data[[x]]@metadata)
}
} else {
stop('Data selector must be provided as a character or integer vector.')
}
}
#' @rdname getters
#' @export
ammlDesc <- function(amml) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
amml@description
}
#' @rdname getters
#' @export
`ammlDesc<-` <- function(amml, value) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
if (!missing(value)) {
if (is.character(value)) {
amml@description <- value
amml
} else {
stop("'value' must be a character string.")
}
}
}
#' @rdname getters
#' @export
ammlInfo <- function(amml, x = NULL) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
if (is.null(x)) {
amml@info
} else {
amml@info[x]
}
}
#' @rdname getters
#' @export
`ammlInfo<-` <- function(amml, value) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
if (!missing(value)) {
if (is.list(value) && length(names(value))) {
for (i in names(value)) amml@info[[i]] <- value[[i]]
amml@info <- amml@info[!sapply(amml@info, is.null)]
amml
} else {
stop("'value' must be a named list.")
}
}
}
#' @rdname getters
#' @export
modelMeta <- function(amml, x=NULL) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
if (is.null(x)) {
lapply(amml@models, function(y) y@metadata)
} else if (length(x) == 1) {
amml@models[[x]]@metadata
} else if (length(x) > 1) {
ammods <- amml@models[x]
lapply(ammods, function(y) y@metadata)
}
}
#' @rdname getters
#' @export
`modelMeta<-` <- function(amml, x, value) {
if (!missing(amml) && !missing(x) && !missing(value)) {
if (is.list(value) && length(names(value))) {
for (i in names(value)) {
if (is.null(value[[i]])) {
amml@models[[x]]@metadata <- amml@models[[x]]@metadata[-which(names(amml@models[[x]]@metadata) == i)]
} else {
amml@models[[x]]@metadata[[i]] <- value[[i]]
}
}
amml
} else if (missing(x)) {
stop("'x' must identify model as an integer index or character string.")
} else if (missing(amml)) {
stop("'amml' must be an amModelLib object.")
} else if (missing(value)) {
stop("'value' cannot be missing.")
}
}
}
#' @rdname getters
#' @export
dataMeta <- function(amml, x=NULL) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
if (is.null(x)) {
lapply(amml@data, function(y) y@metadata)
} else if (length(x) == 1) {
amml@data[[x]]@metadata
} else if (length(x) > 1) {
amdat <- amml@data[x]
lapply(amdat, function(y) y@metadata)
}
}
#' @rdname getters
#' @export
`dataMeta<-` <- function(amml, x, value) {
if (!missing(amml) && !missing(x) && !missing(value)) {
if (is.list(value) && length(names(value))) {
for (i in names(value)) {
if (is.null(value[[i]])) {
amml@data[[x]]@metadata <- amml@data[[x]]@metadata[-which(names(amml@data[[x]]@metadata) == i)]
} else {
amml@data[[x]]@metadata[[i]] <- value[[i]]
}
}
amml
} else if (missing(x)) {
stop("'x' must identify data as an integer index or character string.")
} else if (missing(amml)) {
stop("'amml' must be an amModelLib object.")
} else if (missing(value)) {
stop("'value' cannot be missing.")
}
}
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/getters.R |
#' @name grepAMModelLib
#' @title Search For A Model In A Model List Using \code{grep}
#' @description Returns an abbreviated amModelLib object that contains models and data that meet search terms.
#' @param pattern Search string or value, typically a model or data name
#' @param amml An \code{\link{amModelLib}} object
#' @param search Length 1 \code{character} vector indicating whether to search and return models or data that meet the search criteria.
#' @param \dots Additional arguments to \code{grep}.
#' @details \code{grep} is used to search both names, values (models/data), and metadata. An attempt is made to keep data with models if searching for models, or to keep models with data if searching for data, or to keep prior/posterior models together. The relational link between models their data relies on a case-agnostic 'data' element in the model metadata that names the linked data, and the same is true for models that use 'prior' to link to other models.
#' @return An object of class \code{amModelLib}.
#' @family amModelLib
#' @keywords manip
#' @export
#' @examples
#'
#' # create dataset from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#'
#' # create two amModel objects with metadata and a soft link to the data
#' full.model <- amModel(
#' lm.D9,
#' comment = 'full model',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' no.int.model <- amModel(
#' lm.D90,
#' comment = 'model without intercept',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#'
#' # create an amData object that includes metadata
#' plant.data <- data.frame(group = group, weight = weight)
#' plant.data <- amData(
#' plant.data,
#' comment = 'Dataset from lm helpfile.'
#' )
#'
#' log.plant.data <- data.frame(group, log.weight=log(weight))
#' log.plant.data <- amData(
#' log.plant.data,
#' comment = 'data to fit log model',
#' source = 'lm helpfile (R).'
#' )
#'
#' # create an amModelLib that contains the two amModel objects and two amData objects
#' # the models and data must be supplied as named lists
#' mymodels <- amModelLib(
#' models = list(
#' full.model = full.model,
#' no.int.model = no.int.model
#' ),
#' data=list(
#' plant.data = plant.data,
#' log.plant.data = log.plant.data
#' )
#' )
#'
#'
#' # search the entire amModelLib for the word 'intercept'
#' # the dataset associated with the model will be returned
#' grepAMModelLib("intercept", amml = mymodels)
#'
#' # the class of returned search is an amModelLib object
#' class(grepAMModelLib("intercept", amml = mymodels))
#'
#' # search for data containing the word 'log'
#' grepAMModelLib("log", amml = mymodels, search = "data")
#'
#' # search for models containing the word 'full'
#' # Because 'full.model' is soft-linked to a dataset,
#' # the dataset information will be returned.
#' grepAMModelLib("full", amml = mymodels, search = "model")
#'
#'
grepAMModelLib <- function(
pattern, # Either a model name, a data name, or a search string or value
amml, # an amModelLib object
search = c('all', 'model', 'data'), # search model or data names and contents
... # additional arguments to grep such as ignore.case
) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
search <- match.arg(search)
if (search == 'model') {
models <- amml@models
mod.l <- lapply(models, function(x) x@model)
mmet.l <- lapply(models, function(x) x@metadata)
mn <- names(models)
ind.name <- grep(pattern, mn, ...)
ind.mod <- grep(pattern, mod.l, ...)
ind.mmet <- grep(pattern, mmet.l, ...)
models <- models[unique(c(ind.name, ind.mod, ind.mmet))]
# find priors pointed to by each model
modprior <- unlist(
lapply(models, function(x) grep('prior', names(x@metadata), ignore.case = TRUE))
)
if (length(modprior)) {
# pull out the pointers
modpointers <- unique(sapply(1:length(modprior), function(x) amml@models[[names(modprior)[x]]]@metadata[[modprior[x]]]))
# identify which pointers match the search results and pull those datasets
getmodels <- amml@models[modpointers]
# ignore dead-end pointers
getmodels <- getmodels[unlist(lapply(getmodels, function(x) as.logical(length(x))))]
if(length(getmodels)) models <- c(models, getmodels)
}
# return an amModelLib, complete with associated data
if (length(models)) {
amml@models <- models
} else {
amml@models <- list()
}
# find that data pointed to by each model
moddata <- unlist(
lapply(models, function(x) grep('data', names(x@metadata), ignore.case = TRUE))
)
if (length(moddata)) {
# pull out the pointers
modpointers <- unique(sapply(1:length(moddata), function(x) amml@models[[names(moddata)[x]]]@metadata[[moddata[x]]]))
# identify which pointers match the search results and pull those datasets
getdata <- amml@data[modpointers]
# Ignore dead-end pointers
getdata <- getdata[unlist(lapply(getdata, function(x) as.logical(length(x))))]
# if(!length(moddata)) getdata <- unname(getdata)
amml@data <- getdata
} else {
amml@data <- list()
}
amml
} else if (search == 'data') {
dat <- amml@data
dat.l <- lapply(dat, function(x) x@data)
dmet.l <- lapply(dat, function(x) x@metadata)
dn <- names(dat)
ind.name <- grep(pattern, dn, ...)
ind.dat <- grep(pattern, dat.l, ...)
ind.dmet <- grep(pattern, dmet.l, ...)
dat <- dat[unique(c(ind.name, ind.dat, ind.dmet))]
# return an amModelLib, complete with associated models
if (length(dat)) {
amml@data <- dat
} else {
amml@data <- list()
}
# find the models that point to data
datmod <- unlist(
lapply(amml@models, function(x) any(grep('data', names(x@metadata), ignore.case = TRUE), grep('prior', names(x@metadata), ignore.case = TRUE)))
)
if (length(datmod)) {
# pull out those pointers
datpointers <- sapply(1:length(datmod), function(x) amml@models[[names(datmod)[x]]]@metadata[[datmod[x]]])
# identify which pointers match the search results and pull those models
getmodel <- amml@models[names(datmod)][datpointers %in% names(dat)]
amml@models <- getmodel
} else {
amml@models <- list()
}
amml
} else if (search == 'all') {
# search models
foundmodels <- grepAMModelLib(pattern, amml, search = 'model', ...)
founddata <- grepAMModelLib(pattern, amml, search = 'data', ...)
# filter for unique names
foundmodels_modnames <- lsModels(foundmodels)
foundmodels_datnames <- lsModels(foundmodels)
founddata_modnames <- lsModels(founddata)
founddata_datnames <- lsModels(founddata)
doublemodels <- founddata_modnames %in% foundmodels_modnames
doubledata <- founddata_datnames %in% foundmodels_datnames
mods <- c(foundmodels@models, founddata@models[!doublemodels])
dat <- c(foundmodels@data, founddata@data[!doubledata])
# return an amModelLib, complete with associated data
if (length(mods)) {
amml@models <- mods
} else {
amml@models <- list()
}
if (length(dat)) {
amml@data <- dat
} else {
amml@data <- list()
}
amml
}
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/grepAMModelLib.R |
#' @name insertAMModelLib
#' @aliases insertAMModelLib
#' @title Insert Model Of Class \code{amModel} Or Dataset Of Class \code{amData} Into An \code{amModelLib} Object
#' @description Inserts a model into the model slot of an \code{\link{amModelLib}} object, or inserts a dataset into the data slot of an \code{amModelLib} object. If the \code{amModelLib} is not specified, the function will create an info-less and description-less lib -- or specify these components in the \dots argument.
#' @details If the argument amml is NULL, the function will call \code{amModelLib} to create the object. The argument info can be passed to this function via the \dots argument.
#' @param models A named list of \code{\link{amModel}} objects, each composed of a fitted model object and metadata.
#' @param data An object of class \code{\link{amData}}.
#' @param amml The name of the object that of class \code{amModelLib}.
#' @param ... Additional arguments to be passed to the function \code{amModelLib} to be included in the \code{amModelLib} if one is created.
#' @return An object of class \code{amModelLib}
#' @family amModelLib
#' @keywords manip
#' @export
#' @examples
#'
#' # create dataset from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#' # create an amData object that includes metadata
#' plant.data <- data.frame(group = group, weight = weight)
#' plant.data <- amData(data = plant.data, comment = 'Dataset from lm helpfile.')
#'
#' # create an amModel object with metadata and a soft link to the data
#' full.model <- amModel(
#' model = lm.D9,
#' comment = 'full model',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#'
#' # create an amModelLib that contains the amModel object and the amData object
#' # the model and data must be supplied as named lists
#' mymodels <- amModelLib(
#' description = "An example amModelLib.",
#' models = list(full.model = full.model),
#' data = list(plant.data = plant.data)
#' )
#'
#' # create second amModel object with metadata and a soft link to the same data
#' no.int.model <- amModel(
#' model = lm.D90,
#' comment = 'model without intercept',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' # create a second amData object
#' log.plant.data <- data.frame(group, log.weight=log(weight))
#' log.plant.data <- amData(
#' data = log.plant.data,
#' comment = 'data to fit log model',
#' source = 'lm helpfile (R).'
#' )
#'
#' # insert the second model and second dataset to the amModelLib
#' mymodels <- insertAMModelLib(
#' mymodels,
#' models = list(no.int.model = no.int.model),
#' data = list(log.plant.data = log.plant.data)
#' )
#'
insertAMModelLib <- function(
models, # a named list of amModels, each containing a fitted model object and metadata
data, # an amData object
amml, # an existing amModelLib object to merge
... # additional arguments to createAMModelList, such as info list
) {
if (!missing(models)) {
if (length(models)) {
mn <- names(models)
if (any(is.null(mn))) stop('Model names are required.')
names(models) <- mn
# names(models) <- tolower(mn)
datnames <- unlist(lapply(models, function(y) grep('data', names(y@metadata), ignore.case = TRUE)))
datnames2 <- unlist(lapply(names(datnames), function(y) models[[y]]@metadata[[datnames[y]]]))
} else models <- datnames2 <- NULL
} else models <- NULL
if (!missing(data)) {
if (length(data)) {
dn <- names(data)
if (any(is.null(dn))) stop('Data names are required.')
names(data) <- dn
# names(data) <- tolower(dn)
} else data <- NULL
} else data <- NULL
if (missing(amml)) {
amml <- amModelLib(...)
ammlInfo(amml) <- list(date.created = format(Sys.time(), format="%Y-%m-%d %H:%M:%S"))
}
amml@data <- c(amml@data, data)
dn <- names(amml@data)
names(amml@data) <- make.names(names(amml@data), unique = TRUE)
if (!all(dn == names(amml@data))) {
warning('Data names were altered to be made unique.')
if (!is.null(datnames2)) {
# If data names are adjusted check the data name reference in the model metadata
models <- lapply(models, function(y) {
mmet <- y@metadata
mmdat <- grepl('data', names(mmet), ignore.case = TRUE)
if (any(mmdat)) {
# Now assuming only one match
tochange <- which(mmet %in% dn)
newname <- names(amml@data)[which(dn == mmet[[tochange]])]
mmet[[tochange]] <- newname[length(newname)]
}
y@metadata <- mmet
y
})
}
}
amml@models <- c(amml@models, models)
mn <- names(amml@models)
names(amml@models) <- make.names(names(amml@models), unique = TRUE)
if (!all(mn == names(amml@models))) warning('Model names were altered to be made unique.')
amml
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/insertAMModelLib.R |
#' @name lsModels
#' @aliases lsModels lsData
#' @title List Names Of \code{amModel} Or \code{amData} Objects In An \code{amModelLib} Object.
#' @description List names of all objects of class \code{\link{amModel}} (a fitted model with mandatory metadata) or \code{\link{amData}} (a dataset with mandatory metadata) in an \code{\link{amModelLib}} object.
#' @param x An object of class \code{amModelLib}.
#' @return A vector of names.
#' @family amModelLib
#' @keywords manip
#' @examples
#'
#' # create dataset from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#' # create an amData object that includes metadata
#' plant.data <- data.frame(group = group, weight = weight)
#' plant.data <- amData(
#' data = plant.data,
#' comment = 'Dataset from lm helpfile.'
#' )
#'
#' # create a second amData object with log data
#' log.plant.data <- data.frame(group, log.weight=log(weight))
#' log.plant.data <- amData(
#' data = log.plant.data,
#' comment = 'Dataset that includes the log of plant weight',
#' source = 'lm helpfile (R).'
#' )
#'
#'
#'
#' # create two amModel objects with metadata and a soft link to the data
#' full.model <- amModel(
#' model = lm.D9,
#' comment = 'full model',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' no.int.model <- amModel(
#' model = lm.D90,
#' comment = 'model without intercept',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#'
#'
#' # create an amModelLib that contains the two amModel objects and two amData objects
#' # the models and data must be supplied as named lists
#' mymodels <- amModelLib(
#' models = list(
#' full.model = full.model,
#' no.int.model = no.int.model
#' ),
#' data = list(
#' plant.data = plant.data,
#' log.plant.data = log.plant.data
#' )
#' )
#'
#'
#' # list names of models
#' lsModels(mymodels)
#'
#' # list names of data
#' lsData(mymodels)
#'
NULL
#' @rdname lsModels
#' @export
lsModels <- function(x) {
if (methods::is(x, 'amModelLib')) {
names(x@models)
} else {
stop('This function only works for "amModelLib" objects.')
}
}
#' @rdname lsModels
#' @export
lsData <- function(x) {
if (methods::is(x, 'amModelLib')) {
names(x@data)
} else {
stop('This function only works for "amModelLib" objects.')
}
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/ls.R |
# summarize metadata list and truncate text to window width
metaSummary <- function(x) {
max.nm.w <- max(nchar(names(x))) + 5
vals <- unlist(x)
maxw <- getOption('width')
vals <- sapply(vals, function(z) {
if((nchar(z) + max.nm.w) > maxw) paste0(substr(z, 1, maxw - max.nm.w - 3), '...')
else z
})
y <- data.frame(name=names(x), value=vals)
rownames(y) <- 1:nrow(y)
y
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/metaSummary.R |
#' @name methods-amModel
#' @rdname methods-amModel
#' @docType methods
#' @include classes.R
#' @aliases methods-amModel
#' @title Methods For Displaying, Summarizing, And Manipulating \code{amModel} And \code{amData} Objects
#' @param object,x An \code{\link{amModel}} or \code{\link{amData}} object.
#' @param i,j indices specifying elements to extract or replace. Indices are numeric or character vectors or empty (missing) or NULL.
#' @param drop Not used.
#' @param value Replacement value.
#' @param \dots Additional arguments passed to other functions or methods.
#' @description Getters and setters for models and data.
#' @details Summary assumes some meaningful summary method exists for each object in its home package.
#' @keywords methods
#' @examples
#'
#' # create dataset from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#'
#' # create am amModel object
#' full.model <- amModel(
#' model = lm.D9,
#' comment = 'full model',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#'
#' # create an amData object
#' plant.data <- data.frame(group = group, weight = weight)
#' plant.data <- amData(
#' data = plant.data,
#' source = 'lm helpfile (R).',
#' comment = 'Dataset from lm helpfile.'
#' )
#'
#' summary(full.model)
#'
#' # [ and [[ index from metadata
#' full.model[c(2,1)]
#' full.model[[1]]
#' full.model[['taxa']]
#'
#' plant.data[c(2,1)]
#' plant.data[[1]]
#' plant.data[['comment']]
#'
NULL
#' @aliases summary,amModel-method
#' @rdname methods-amModel
#' @export
setMethod('summary', signature(object = 'amModel'), function(object, ...) {
print(summary(object@model, ...))
cat('\n--- Metadata ---\n')
print(metaSummary(object@metadata), right = FALSE)
})
#' @aliases [,amModel,ANY,ANY,ANY-method [,amModel-method
#' @rdname methods-amModel
setMethod('[', signature(x='amModel', i = 'ANY', j = 'ANY'), function(x, i, j, ..., drop = TRUE) {
get <- unlist(c(as.list(i, j), list(...)))
x@metadata[i]
})
#' @aliases [[,amModel,ANY,ANY-method [[,amModel-method
#' @rdname methods-amModel
setMethod('[[', signature(x = 'amModel'), function(x, i) {
x@metadata[[i]]
})
#' @aliases [<-,amModel,ANY,ANY,ANY-method [<-,amModel-method
#' @rdname methods-amModel
setMethod('[<-', signature(x = 'amModel'), function(x, i, j, ..., value) {
get <- unlist(c(as.list(i, j), list(...)))
x@metadata[get] <- value
})
#' @aliases [[<-,amModel,ANY,ANY-method [[<-,amModel-method
#' @rdname methods-amModel
setMethod('[[<-', signature(x = 'amModel'), function(x, i, value) {
x@metadata[[i]] <- value
})
##########################
#' @aliases summary,amModel-method
#' @rdname methods-amModel
#' @export
setMethod('summary', 'amData', function(object, ...) {
print(summary(object@data, ...))
cat('\n--- Metadata ---\n')
print(metaSummary(object@metadata), right = FALSE)
})
#' @aliases [,amData,ANY,ANY,ANY-method [,amData-method
#' @rdname methods-amModel
setMethod('[', 'amData', function(x, i, j, ...) {
get <- unlist(c(as.list(i, j), list(...)))
x@metadata[get]
})
#' @aliases [[,amData,ANY,ANY-method [[,amData-method
#' @rdname methods-amModel
setMethod('[[', 'amData', function(x, i) {
x@metadata[[i]]
})
#' @aliases [<-,amData,ANY,ANY,ANY-method [<-,amData-method
#' @rdname methods-amModel
setMethod('[<-', 'amData', function(x, i, j, ..., value) {
get <- unlist(c(as.list(i, j), list(...)))
x@metadata[get] <- value
})
#' @aliases [[<-,amData,ANY,ANY-method [[<-,amData-method
#' @rdname methods-amModel
setMethod('[[<-', 'amData', function(x, i, value) {
x@metadata[[i]] <- value
})
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/methods-amModel.R |
#' @name methods-amModelLib
#' @docType methods
#' @include classes.R methods-amModel.R
#' @aliases methods-amModelLib
#' @title Methods For Displaying, Summarizing, And Manipulating \code{amModelLib} Objects
#' @param object An \code{\link{amModelLib}} object.
#' @param x An \code{amModelLib} object.
#' @param recursive Iterate recursively through lists (ignored).
#' @param i,j indices specifying elements to extract or replace. Indices are numeric or character vectors or empty (missing) or NULL.
#' @param drop Not used.
#' @param value Replacement value.
#' @param name For \code{$} A literal character string or a name (possibly backtick quoted); for \code{summary} an \code{\link{amModel}} or \code{\link{amData}} name as character string.
#' @param \dots Additional arguments passed to other functions or methods.
#' @description Getters and setters for AMModelLib objects.
#' @details Summary adds the metadata to the default show method. If \code{name} is supplied the call is passed on to the \code{amModel} or \code{amData} object with the specified name.
#' @return \code{summary} returns a list with the same elements displayed during the call. Others return an \code{amModelLib} object.
#' @keywords methods
#' @examples
#'
#' # create dataset from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#' #' # create an amData object that includes metadata
#' plant.data <- data.frame(group = group, weight = weight)
#' plant.data <- amData(
#' data = plant.data,
#' comment = 'Dataset from lm helpfile.'
#' )
#'
#' log.plant.data <- data.frame(group, log.weight=log(weight))
#' log.plant.data <- amData(
#' data = log.plant.data,
#' comment = 'data to fit log model',
#' source = 'lm helpfile (R).'
#' )
#'
#' # create two amModel objects with metadata and a soft link to the data
#' full.model <- amModel(
#' model = lm.D9,
#' comment = 'full model',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' no.int.model <- amModel(
#' model = lm.D90,
#' comment = 'model without intercept',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#'
#'
#' # create an amModelLib that contains the two amModel objects and two amData objects
#' # the models and data must be supplied as named lists
#' mymodels <- amModelLib(
#' models = list(
#' full.model = full.model,
#' no.int.model = no.int.model
#' ),
#' data=list(
#' plant.data = plant.data,
#' log.plant.data = log.plant.data
#' )
#' )
#'
#' summary(mymodels)
#' mymodels <- c(mymodels, mymodels)
#' mymodels[c(2,1)]
#' mymodels[[1]]
#' mymodels[['full.model']]
#' mymodels$full.model
NULL
#' @aliases summary,amModelLib-method
#' @rdname methods-amModelLib
#' @export
setMethod('summary', 'amModelLib', function(object, name, ...) {
if (missing(name)) {
mshow <- modmet <- dshow <- datmet <- NULL
# Print description
if (object@description != '') {
cat('\nDescription:\n')
catwrap(object@description)
}
# Print info
cat('\nInfo:\n')
info <- object@info
if (length(info)) {
for (i in 1:length(info)) {
cat(' ', names(info)[i], '\n ')
catwrap(info[[i]])
}
} else {
cat('\n** no informative metadata **\n')
}
# Models
if (!length(object@models)) {
cat('\n --- There are no models --- \n')
} else {
cat('\n\n--- Model Names and Indices ---\n')
mshow <- data.frame(name = names(object@models), class=sapply(1:length(object@models), function(x) paste0(class(object@models[[x]]@model), collapse = ', ')))
rownames(mshow) <- 1:nrow(mshow)
pkg <- lapply(1:length(object@models), function(x) attr(object@models[[x]]@model, 'class'))
pkg <- sapply(pkg, function(x) {
at <- attr(x, 'package')
if (is.null(at)) at <- NA
at
})
mshow$package <- pkg
print(mshow)
# Model metadata
cat('\n\n--- Model metadata ---\n')
for (i in 1:length(object@models)) {
cat(paste0('[[', i, ']] ', names(object@models)[i], '\n'))
print(metaSummary(object@models[[i]]@metadata), right = FALSE)
cat('\n')
}
}
# Data
if (!length(object@data)) {
cat('\n --- There are no datasets --- \n')
} else {
cat('\n\n--- Data Names and Indices ---\n ')
dclass <- sapply(1:length(object@data), function(x) paste0(class(object@data[[x]]@data), collapse = ', '))
ddim <- lapply(object@data, function(x) {
if (is.data.frame(x@data)) {
data.frame(rows = nrow(x@data), cols = ncol(x@data))
} else {
data.frame(rows = NA, cols = NA)
}
})
ddim <- do.call(rbind.data.frame, ddim)
dshow <- data.frame(name = names(object@data), class = dclass, ddim)
rownames(dshow) <- 1:nrow(dshow)
pkg <- lapply(1:length(object@data), function(x) attr(object@data[[x]]@data, 'class'))
pkg <- sapply(pkg, function(x) {
at <- attr(x, 'package')
if (is.null(at)) at <- NA
at
})
dshow$package <- pkg
print(dshow)
# Data metadata
cat('\n\n--- Data metadata ---\n')
for (i in 1:length(object@data)) {
cat(paste0('[[', i, ']] ', names(object@data)[i], '\n'))
print(metaSummary(object@data[[i]]@metadata), right = FALSE)
cat('\n')
}
}
invisible(list(models=list(models = mshow, metadata=modmet), data = list(data = dshow, metadata = datmet)))
} else {
tem <- c(object@models, object@data)[[name]]
if (!length(tem)) stop(paste0('Element "', name, '" not found.'))
summary(tem)
}
})
#' @aliases show,amModelLib-method
#' @rdname methods-amModelLib
setMethod('show', 'amModelLib', function(object) {
# Print description
if (object@description != '') {
cat('\nDescription:\n')
catwrap(object@description)
}
# Print info
cat('\nInfo:\n')
info <- object@info
if (length(info)) {
for (i in 1:length(info)) {
cat(' ', names(info)[i], '\n ')
catwrap(info[[i]])
}
} else {
cat('** no informative metadata **\n\n')
}
# Models
cat('\nModels:\n')
if (!length(object@models)) {
cat('\n --- There are no models --- \n')
} else {
mshow <- data.frame(name = names(object@models), class = sapply(1:length(object@models), function(x) paste0(class(object@models[[x]]@model), collapse = ', ')))
rownames(mshow) <- 1:nrow(mshow)
pkg <- lapply(1:length(object@models), function(x) attr(object@models[[x]]@model, 'class'))
pkg <- sapply(pkg, function(x) {
at <- attr(x, 'package')
if (is.null(at)) at <- NA
at
})
mshow$package <- pkg
print(mshow)
}
# Data
cat('\nData:\n')
if (!length(object@data)) {
cat('\n --- There are no datasets --- \n')
} else {
dclass <- sapply(1:length(object@data), function(x) paste0(class(object@data[[x]]@data), collapse = ', '))
ddim <- lapply(object@data, function(x) {
if (is.data.frame(x@data)) {
data.frame(rows = nrow(x@data), cols = ncol(x@data))
} else {
data.frame(rows = NA, cols = NA)
}
})
ddim <- do.call(rbind.data.frame, ddim)
dshow <- data.frame(name = names(object@data), class = dclass, ddim)
rownames(dshow) <- 1:nrow(dshow)
pkg <- lapply(1:length(object@data), function(x) attr(object@data[[x]]@data, 'class'))
pkg <- sapply(pkg, function(x) {
at <- attr(x, 'package')
if (is.null(at)) at <- NA
at
})
dshow$package <- pkg
print(dshow)
}
invisible(NULL)
})
#' @aliases c,amModelLib-method
#' @rdname methods-amModelLib
setMethod('c', 'amModelLib', function(x, ..., recursive = FALSE) {
y <- list(...)
# Add a routine to keep names unique with the decimal system
ymods <- unlist(lapply(y, function(z) z@models), recursive = FALSE)
ymnames <- unlist(lapply(y, function(z) names(z@models)))
xmnames <- names(x@models)
modvals <- c(x@models, ymods)
names(modvals) <- make.names(c(xmnames, ymnames), unique = TRUE)
hasmodel <- sapply(modvals, function(z) as.logical(length(z)))
x@models <- modvals[hasmodel]
ydata <- lapply(y, function(z) z@data)
ydnames <- unlist(lapply(y, function(z) names(z@data)))
xdnames <- names(x@data)
datavals <- c(x@data, unlist(ydata, recursive = FALSE))
names(datavals) <- make.names(c(xdnames, ydnames), unique = TRUE)
hasdata <- sapply(datavals, function(z) as.logical(length(z)))
x@data <- datavals[hasdata]
x
})
#' @aliases [,amModelLib,ANY,ANY,ANY-method [,amModelLib-method
#' @rdname methods-amModelLib
setMethod('[', 'amModelLib', function(x, i, j, ..., drop = TRUE) {
mods <- x@models
if (!missing(i)) {
if (missing(j)) j <- NULL
get <- c(i, j, ...)
getmods <- mods[get]
} else {
getmods <- mods[]
}
moddata <- unique(unlist(c(
sapply(getmods, function(x) x@metadata[['data']]),
sapply(getmods, function(x) x@metadata[['Data']]),
sapply(getmods, function(x) x@metadata[['DATA']])
# sapply(getmods, function(x) grep('data', names(x@metadata), ignore.case=TRUE))
)))
getdata <- x@data[moddata]
x@models <- getmods
if (!length(moddata)) getdata <- unname(getdata)
x@data <- getdata
x
})
#' @aliases [[,amModelLib,ANY,ANY-method [[,amModelLib-method
#' @rdname methods-amModelLib
setMethod('[[', 'amModelLib', function(x, i) {
x[i]
})
#' @aliases $,amModelLib-method
#' @rdname methods-amModelLib
setMethod('$', 'amModelLib', function(x, name) {
x[name]
})
#' @aliases [[<-,amModelLib,ANY,ANY-method [[<-,amModelLib-method
#' @rdname methods-amModelLib
setMethod('[[<-', 'amModelLib', function(x, i, j, ..., value) {
get <- c(i, j, ...)
if (length(get) > 1) stop('Attempt to select multiple elements.')
mods <- x@models
moddata <- x@data
if (methods::is(value, 'amModel')) {
mods[get] <- list(value)
x@models <- mods
x
} else if (methods::is(value, 'amData')) {
moddata[get] <- list(value)
x@data <- moddata
x
} else {
stop('No method implemented for objects of class ', class(value))
}
})
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/methods-amModelLib.R |
#' modelMgr
#' @title Graphical UI For The AMModels Package
#' @description The model manager is a graphical user interface supplied to accomplish nearly all functions within the AMModels package. Models and data can be tagged with metadata and organized within model libraries. Model libraries can be imported, searched, subset, edited, and exported through the GUI. The modelMgr GUI allows users to import models and data from either a .RData or .rda file or from the user's \code{.GlobalEnv}.
#' @param \dots Additional arguments to \code{shiny::runApp}.
#' @keywords misc
#' @export
#' @author Jon Katz
#' @examples
#' \dontrun{
#' # The shiny app
#' modelMgr()
#'
#' # Accepts args to shiny::runApp
#' modelMgr(quiet = TRUE)
#' }
modelMgr <- function(...) {
appDir <- system.file("modelMgr", package = "AMModels")
if (appDir == "") {
stop("Could not find directory. Try re-installing `AMModels`.", call. = FALSE)
}
shiny::runApp(appDir, ...)
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/modelMgr.R |
#' @name rmModel
#' @aliases rmModel rmData
#' @title Remove An \code{amModel} Or \code{amData} Object From An \code{amModelLib} Object
#' @description Remove an object of class \code{\link{amModel}} or \code{\link{amData}} (a fitted model object or data to fit a model or use as covariate data, with mandatory metadata) from an \code{\link{amModelLib}} object.
#' @param amml An \code{amModelLib} object.
#' @param x A character vector, numeric vector, or logical vector identifying model(s) or data to remove.
#' @return An object of class \code{amModelLib}.
#' @family amModelLib
#' @keywords utilities
#' @export rmModel rmData
#' @examples
#'
#' # create dataset from lm helpfile
#' ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
#' ## Page 9: Plant Weight Data.
#' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
#' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
#' group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
#' weight <- c(ctl, trt)
#' lm.D9 <- lm(weight ~ group)
#' lm.D90 <- lm(weight ~ group - 1) # omitting intercept
#'
#' # create an amData object that includes metadata
#' plant.data <- data.frame(group = group, weight = weight)
#' plant.data <- amData(
#' data = plant.data,
#' comment = 'Dataset from lm helpfile.'
#' )
#'
#' log.plant.data <- data.frame(group, log.weight=log(weight))
#' log.plant.data <- amData(
#' data = log.plant.data,
#' comment = 'data to fit log model',
#' source = 'lm helpfile (R).'
#' )
#'
#' # create two amModel objects with metadata and a soft link to the data
#' full.model <- amModel(
#' model = lm.D9,
#' comment = 'full model',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' no.int.model <- amModel(
#' model = lm.D90,
#' comment = 'model without intercept',
#' source = 'lm helpfile (R).',
#' taxa = 'plants',
#' data = 'plant.data'
#' )
#'
#' # create an amModelLib that contains the two amModel objects and two amData objects
#' # the models and data must be supplied as named lists
#' mymodels <- amModelLib(
#' models = list(
#' full.model = full.model,
#' no.int.model = no.int.model
#' ),
#' data=list(
#' plant.data = plant.data,
#' log.plant.data = log.plant.data
#' )
#' )
#'
#' # show the library
#' mymodels
#'
#'
#' # remove just the second model
#' rmModel(mymodels, 'no.int.model')
#'
#' # remove the first plant data, has a soft-link from a model, throws warning.
#' rmData(mymodels, 'plant.data')
#'
#' # show the library
#' mymodels
rmModel <- function(amml, x) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
if(missing(x)) stop("Must specify value 'x' to remove.")
# identify orphan posterior models to report
modnames <- unlist(lapply(amml@models, function(y) grep('prior', names(y@metadata), ignore.case = TRUE)))
modnames2 <- unlist(lapply(names(modnames), function(y) amml@models[[y]]@metadata[[modnames[y]]]))
orphanmods <- NULL
names(modnames2) <- names(modnames)
if (any(all(is.numeric(x)), all(is.logical(x)))) {
rmmod <- names(amml@models)[x]
orphanmods <- names(modnames2)[modnames2 %in% rmmod]
amml@models <- amml@models[-x]
} else if (all(is.character(x))) {
mn <- names(amml@models)
names(mn) <- mn
rmmod <- mn[x]
orphanmods <- names(modnames2)[modnames2 %in% rmmod]
amml@models <- amml@models[-which(names(amml@models) %in% x)]
} else stop("Invalid class for 'x'.")
if (length(orphanmods)) warning('The following models have dangling prior references: ', paste0(orphanmods, collapse = ', '))
## These lines can be removed after testing
# if (any(all(is.numeric(x)), all(is.logical(x)))) {
# amml@models <- amml@models[-x]
# } else if (all(is.character(x))) {
# amml@models <- amml@models[-which(names(amml@models) %in% x)]
# } else stop("Invalid class for 'x'.")
amml
}
#' @rdname rmModel
rmData <- function(amml, x) {
if (!methods::is(amml, 'amModelLib') || missing(amml)) stop('Must provide an amModelLib object.')
if(missing(x)) stop("Must specify value 'x' to remove.")
# identify orphan models to report
datnames <- unlist(lapply(amml@models, function(y) grep('data', names(y@metadata), ignore.case = TRUE)))
datnames2 <- unlist(lapply(names(datnames), function(y) amml@models[[y]]@metadata[[datnames[y]]]))
orphanmods <- NULL
names(datnames2) <- names(datnames)
if (any(all(is.numeric(x)), all(is.logical(x)))) {
rmdat <- names(amml@data)[x]
orphanmods <- names(datnames2)[datnames2 %in% rmdat]
amml@data <- amml@data[-x]
} else if (all(is.character(x))) {
dn <- names(amml@data)
names(dn) <- dn
rmdat <- dn[x]
orphanmods <- names(datnames2)[datnames2 %in% rmdat]
amml@data <- amml@data[-which(names(amml@data) %in% x)]
} else stop("Invalid class for 'x'.")
if (length(orphanmods)) warning('The following models have dangling data references: ', paste0(orphanmods, collapse = ', '))
amml
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/rmModelsData.R |
#' @name simCovariate
#' @aliases simCovariate
#' @title Simulate A Dataframe Of Uncorrelated Covariate(s)
#' @description Quickly create a dataframe of uncorrelated random variables which can be used as covariates. Values are drawn from the normal, uniform, beta, binomial, poisson or bernoulli distributions.
#' @details \code{simCovariate} will create a vector(s) of random variables from a specified R probability distribution. The distribution can be specified by entering the name or the name of the R function; partial matching is performed. For example, specifying a distribution as runif, 'runif', uniform, or u can be be used to generate random samples from a uniform distribution, in which case R's \code{\link[stats]{runif}} function is called. Additional arguments to the \code{\link[stats]{runif}} function are separated by commas. The function can be parameterized so that multiple covariates can be simulated from either the same distribution or from different distributions. \cr
#' @param cov.list A named list of covariates to be simulated and their required arguments.
#' @param ... additional arguments to be passed to \code{distr}. Argument in which the simulating distribution its corresponding arguments are specified.Minimally
#' contains \code{n}, for number of samples to draw, \code{shape1, shape2} for \code{\link{rbeta}},
#' and \code{size, prob} for \code{\link[stats]{rbinom}}. May also contain optional arguments such as
#' \code{min, max} for \code{\link[stats]{runif}}, \code{mean, sd} for \code{\link[stats]{rnorm}}, and \code{ncp} for \code{\link[stats]{rbeta}}. Accepts single values, or vectors that will be applied to multiple columns. Vectors should be used with care as lengths are not checked.
#' @param n The number of samples to generate from each covariate.
#' @param add.yr Logical, if \code{TRUE} a field named \code{yr} is added with indices from \code{1:n}.
#' @return A data frame of random numbers from the specified distribution,
#' with number of columns equal to the the number of cov.names (ncol=length(cov.names)).
#' @seealso \code{\link{amData}}
#' @family popmod
#' @keywords datagen, distribution
#' @export
#' @examples
#' # We can specify the distribution using a function, function name,
#' # or distribution name. Partial matching is performed. The examples
#' # below generate data for a single covariate; random seeds are not
#' # provided.
#'
#' # All four examples provide same results and generate 10 random numbers
#' # from a uniform distribution. In some examples the results are rounded;
#' # in other examples add.yr is set to TRUE to add a covariate called yr (year);
#' # in other examples a random seed is provided to ensure reproducibility.
#'
#' simCovariate(u1 =list(dist= runif), n=10, add.yr=FALSE)
#' simCovariate(u2=list(dist = 'runif', round=2), n = 10, add.yr=TRUE)
#' simCovariate(u3=list(dist ='uniform', seed=302), n=10, add.yr=TRUE)
#' simCovariate(u4 = list(dist ='u', seed=302, round=3, min=0, max=10), n=10, add.yr=TRUE)
#'
#' # If multiple covariates are to be simulated, create a list of covariates
#' # and then pass this covariate list as the argument, cov.list. Here, create
#' # a dataframe with seven covariates from five distributions, and
#' # add a covariate called yr.
#' cov.list <- list(
#' unif1 = list(dist = 'runif', min=0, max=10, seed=334, round=0),
#' unif2 = list(dist = 'runif', min=0, max=10, seed=668, round=0),
#' norm1=list(dist = 'normal', mean = 10,sd = 2, seed=10, round=1),
#' norm2=list(dist = 'normal', mean = 50, sd = 10, seed=15, round=2),
#' beta1=list(dist = rbeta, shape1=2, shape2=1, seed=1002),
#' binom1=list(dist = 'bin', size=20, prob=0.5, seed=561),
#' bern1=list(dist='bernoulli', size = 1, prob = 0.5, seed = 6)
#' )
#'
#' simCovariate(cov.list, n = 10, add.yr = TRUE)
simCovariate <- function(
cov.list=NULL,
...,
n,
add.yr = TRUE
) {
if (is.null(cov.list)) {
cov.list <- list(...)
if (length(cov.list) == 1) {
if (is.null(names(cov.list))) {
cov.list <- cov.list[[1]]
}
}
}
# set up list to hold output
datalist <- list()
distribution <- c('uniform','normal','beta','binomial', 'poisson', 'bernoulli')
seeds.used <- vector()
for (i in 1:length(cov.list)) {
cov <- cov.list[[i]]
cov.name <- names(cov.list[i])
# set the generating function
distr <- cov[['dist']]
if (!is.function(distr)) {
if (distr == "bernoulli") {dist <- 'binomial'}
distr <- tolower(distr)
if (substr(distr,1,1) == 'r') distr <- substr(distr,2,nchar(distr))
distr <- match.arg(distr,distribution)
distr <- switch(distr, uniform=stats::runif, normal = stats::rnorm, beta = stats::rbeta, binomial = stats::rbinom, poisson = stats::rpois, bernoulli = stats::rbinom, sample = sample, multinomial = stats::rmultinom)
}
# keep only necessary arguments
arg.names <- names(formals(fun = distr))
keepers <- names(cov) %in% arg.names
rand.args <- cov[keepers == TRUE]
rand.args$n <- n
# set the seed
if(is.null(cov[['seed']])) {seed <- sample(.Random.seed,1)
warning(paste("You did not provide a random seed for the simulation for covariate named ", cov.name , ". Data have been simulated using", seed, "as the random seed.", sep = ""))} else {seed <- cov[['seed']]}
seed <- as.integer(seed)
set.seed(seed)
seeds.used <- append(seeds.used, values = seed)
# call the function
dataset <- do.call(what = distr, args = rand.args)
# round if requested
if (!is.null(cov[['round']])) dataset <- round(dataset, cov[['round']])
# add to output list
datalist[[cov.name]] <- dataset
} # end of covariate type i
# convert datalist to dataframe
dataset <- as.data.frame(datalist)
# add yr if requested
if (add.yr == TRUE) {dataset$yr <- 1:nrow(dataset)}
# add attributes
attr(dataset, which = 'seeds.used') <- seeds.used
attr(dataset, which = 'cov.list') <- cov.list
attr(dataset, which = 'n') <- n
return(dataset)
} # end of function
| /scratch/gouwar.j/cran-all/cranData/AMModels/R/simCovariate.R |
createTimedAlert <- function (session, anchorId, alertId = NULL, title = NULL, content = NULL,
timeout = 2000, style = NULL, dismiss = FALSE, append = FALSE)
{
data <- shiny:::dropNulls(list(id = anchorId, alertId = alertId,
title = title, content = content, timeout=timeout, style = style, dismiss = dismiss,
append = append))
session$sendCustomMessage(type = "timedAlertCreate", message = data)
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/inst/modelMgr/createTimedAlert.R |
library(shiny)
library(shinyBS)
library(AMModels)
source('createTimedAlert.R')
source('textareaInput.R')
# Contents of the values list:
# modelSource: character, either 'Global Env' or 'File'
# selectedModelLib: character, the name of the selected amModelLib
# editedAMModelLib: amModelList, the result of a new from selected/search/save
# editedModelsData: named list with models and data elements--both are named vectors of integer save button values
# currentAMModelLib: the amModelLib with edits made in the left panel (add/delete model, add/delete data, add/delete/edit metadata). Replaces getSelectedModel().
# amModelLibForModals: the amModelLib for add/delete/edit metadata modal creation. Updated only by add/delete model, add/delete data.
# newAMModelLibInfo: named list of info that will be inserted into a new amml object on creation.
shinyServer(
function(input,output,session){
# # Debugger
# observe(label="console",{
# if(input$console != 0) {
# options(browserNLdisabled=TRUE)
# saved_console<-".RDuetConsole"
# if (file.exists(saved_console)) load(saved_console)
# isolate(browser())
# save(file=saved_console,list=ls(environment()))
# }
# })
values <- reactiveValues()
# A list with models and data elements--all are named vectors of checkbox values
editedModelsData <- list(models=0, data=0)
# List and store contents of uploaded .RData/.rda or .RDS file
getUpload <- reactive({
userdata <- input$userDataUpload
if(!is.null(userdata)) {
ext <- gsub('.+\\.', '', userdata$name)
userDataEnv <- new.env()
if(tolower(ext) %in% c('rdata', 'rda')) {
load(userdata$datapath, envir = .GlobalEnv)
userdata <- load(userdata$datapath, envir = userDataEnv)
} else if(tolower(ext) == 'rds') {
# Don't need to readRDS to a custom env, only do
# it for consistency in output from this function
name <- make.names(gsub('\\..{1,4}$', '', userdata$name))
obj <- readRDS(userdata$datapath)
userdata <- name
assign(x=name, value=obj, pos=userDataEnv)
assign(x=name, value=obj, pos=.GlobalEnv)
}
list(objnames=userdata, userdata=userDataEnv)
}
})
# List contents of .GlobalEnv
getFromGlobalEnv <- function(){
obj <- objects(".GlobalEnv")
if(!length(obj)) {
NULL
} else {
obj
}
}
# getFromGlobalEnv <- reactive({
# input$sendAMMLToGlobalEnv
# input$newAMModelLibGo
# obj <- objects(".GlobalEnv")
# if(!length(obj)) {
# NULL
# } else {
# obj
# }
# })
# selection can be from one of two sources:
# --can upload a model as .RData/.rda or RDS
observe({
userdata <- getUpload()
if(!is.null(userdata)) {
choices <- userdata$objnames
if(input$filterForAMModelLibs) {
isamml <- sapply(choices, function(x) is(userdata$userdata[[x]], 'amModelLib'))
choices <- choices[isamml]
}
if(!is.null(choices)) {
if(length(choices)) {
names(choices) <- choices
output$selectModelUI <- renderUI({
radioButtons('selectAMModelLib', 'Select AMModelLib', choices=choices)
})
values$modelSource <- 'File'
values$currentAMModelLib <- NULL
editedmodels <- list()
} else {
output$selectModelUI <- renderUI({
div(
p(style='font-weight:bold;', 'Select AMModelLib'),
p('No objects in file to list.')
)
})
values$modelSource <- NULL
}
}
}
})
# --or can select one from the .GlobalEnv
observe({
if(input$listGlobalEnv > 0) {
choices <- getFromGlobalEnv()
if(input$filterForAMModelLibs) {
isamml <- sapply(choices, function(x) is(.GlobalEnv[[x]], 'amModelLib'))
choices <- choices[isamml]
}
if(!is.null(choices)) {
if(length(choices)) {
names(choices) <- choices
output$selectModelUI <- renderUI({
radioButtons('selectAMModelLib', 'Select AMModelLib', choices=choices)
})
values$modelSource <- 'Global Env'
} else {
output$selectModelUI <- renderUI({
div(
p(style='font-weight:bold;', 'Select AMModelLib'),
p('No objects in global environment to list.')
)
})
values$modelSource <- NULL
}
}
}
})
# --set the .GlobalEnv selector to start
observe({
if(is.null(input$selectAMModelLib)) {
choices <- getFromGlobalEnv()
isamml <- sapply(choices, function(x) is(.GlobalEnv[[x]], 'amModelLib'))
choices <- choices[isamml]
if(!is.null(choices)) {
if(length(choices)) {
names(choices) <- choices
output$selectModelUI <- renderUI({
radioButtons('selectAMModelLib', 'Select AMModelLib', choices=choices)
})
values$modelSource <- 'Global Env'
} else {
output$selectModelUI <- renderUI({
div(
p(style='font-weight:bold;', 'Select AMModelLib'),
p('No objects in global environment to list.')
)
})
values$modelSource <- NULL
}
}
}
})
# Allow the 'create amModelList' action to update the hidden ui
observe({
if(!is.null(input$selectAMModelLib)) outputOptions(output, 'selectModelUI', suspendWhenHidden = FALSE)
})
# --or can create a new one (in a different modal)
observe({
input$newAMModelLibGo
isolate({
if(!is.null(input[['addinfo-name']])) {
amml <- make.names(input$newAMModelLibName)
description <- input$newAMModelLibDescription
info <- values$newAMModelLibInfo
if(input[['addinfo-name']] != '' && input[['addinfo-value']] != '') {
newinfo <- list(input[['addinfo-value']])
names(newinfo) <- input[['addinfo-name']]
info <- c(info, newinfo)
}
if(is.null(info)) info <- list()
# Can only create an amModelLib if it is assigned a name
if(input$newAMModelLibName != '') {
amml.obj <- amModelLib(info=info, description=description)
assign(amml, amml.obj, pos='.GlobalEnv')
# Update: no box, always edit on close if possible
# make new amModelLib the current one, if box is checked
# if(input$newAMModelLibRemoteActive) {
values$modelSource <- 'Global Env'
values$selectedModelLib <- amml
choices <- getFromGlobalEnv()
# Terri reports that the new one is appearing twice?
# My tests show otherwise?
# choices <- c(choices, amml)
if(is.null(input$filterForAMModelLibs)) {
isamml <- sapply(choices, function(x) is(.GlobalEnv[[x]], 'amModelLib'))
choices <- choices[isamml]
} else {
if(input$filterForAMModelLibs) {
isamml <- sapply(choices, function(x) is(.GlobalEnv[[x]], 'amModelLib'))
choices <- choices[isamml]
}
}
names(choices) <- choices
updateRadioButtons(session, 'selectAMModelLib', choices=choices, selected=amml)
# }
values$newAMModelLibInfo <- NULL
updateTextInput(session, 'addinfo-name', value='')
updateTextInput(session, 'addinfo-value', value='')
updateTextInput(session, 'newAMModelLibName', value='')
updateTextInput(session, 'newAMModelLibDescription', value='')
}
}
})
})
# Either way, report which one is currently selected
output$reportSelectedModel <- renderUI({
HTML({paste0('
<p style="font-weight:bold;">amModelLib: <span style="font-weight:normal;">', values$selectedModelLib, '</span></p>'
)})
})
# And report the source
output$reportSelectedModelSource <- renderUI({
HTML({paste0('
<p style="font-weight:bold;">Model source: <span style="font-weight:normal;">', values$modelSource, '</span></p>'
)})
})
# Display the current amModelLib info
output$amModelLibInfoUI <- renderUI({
input$okaytoaddinfotocurrent
# Make table headings
th <- '
<tr>
<th style="text-align:right;padding-right:1em;">Remove</th>
<th style="text-align:center;padding-right:1em;">Name</th>
<th style="text-align:center;">Value</th>
</tr>
'
# Make place for new info
newinfo <- paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:1.5em;">',
actionButton('okaytoaddinfotocurrent', '', icon=icon('thumbs-o-up')),'
</td>
<td style="width:12em;text-align:center;padding-right:1em;">
<span class="form-group shiny-input-container">
<input id="addinfotocurrent-name" type="text" class="form-control" value=""/>
</span>
</td>
<td style="width:40em;text-align:left;">
<span class="form-group shiny-input-container">
<textarea id="addinfotocurrent-value" rows=1, style="width:100%;"></textarea>
</span>
</td>
</tr>'
)
# Parse existing info
exinfo <- values$currentAMModelLib
if(!is.null(exinfo)) {
exinfo <- ammlInfo(exinfo)
if(length(exinfo)) {
exinfonm <- names(exinfo)
exinfo <- paste0(unlist(lapply(1:length(exinfo), function(x) paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:2em;">
<div class="checkbox">
<label>
<input id="', paste0('selectCurrentInfo-', x),'" type="checkbox"/>
</label>
</div>
</td>
<td style="width:12em;text-align:center;padding-right:1em;">
<span class="form-group shiny-input-container">
<input id="editCurrentInfo-name" type="text" class="form-control" value="', exinfonm[x], '"/>
</span>
</td>
<td style="width:40em;text-align:left;">
<span class="form-group shiny-input-container">
<textarea id="editCurrentInfo-value" rows=1, style="width:100%;">', exinfo[[x]], '</textarea>
</span>
</td>
</tr>'
))), collapse='')
}
}
list(
HTML(paste0('<table id="addInfoToCurrentAMMLTable">', th, exinfo, newinfo, '</table>')),
bsTooltip(id='addInfoToCurrentAMMLTable th:nth-child(1)', title='Checked lines are removed and the new line is added when the thumbs-up is pressed', placement = "top", trigger = "hover", options = list(container='body')),
bsTooltip(id='addInfoToCurrentAMMLTable th:nth-child(2)', title='Coerced to a valid R name: must start with a character and contain no spaces', placement = "top", trigger = "hover", options = list(container='body')),
bsTooltip(id='addInfoToCurrentAMMLTable th:nth-child(3)', title='Can contain any value', placement = "top", trigger = "hover", options = list(container='body'))
)
})
# Keep the current amModelLib up to date with new amModelLib info
observe({
go <- input$okaytoaddinfotocurrent
isolate({
if(!is.null(go)) {
amml <- values$currentAMModelLib
if(!is.null(amml)) {
exinfo <- amml@info
if(go) {
newinfo <- NULL
if(length(exinfo)) {
rminfo <- sapply(1:length(exinfo), function(x) input[[paste0('selectCurrentInfo-', x)]])
exinfo <- exinfo[!rminfo]
}
if(input[['addinfotocurrent-value']] != '' && input[['addinfotocurrent-name']] != '') {
newinfo <- list(input[['addinfotocurrent-value']])
names(newinfo) <- make.names(input[['addinfotocurrent-name']])
}
amml@info <- c(exinfo, newinfo)
values$currentAMModelLib <- amml
}
}
}
})
})
# Update the current amml description field
# observe({
# input$selectAMModelLib
# input$newAMModelLibGo
# input$listGlobalEnv
# values$currentAMModelLib
# isolate({
# browser()
# amml <- values$currentAMModelLib
# if(!is.null(amml)) {
# descr <- ammlDesc(amml)
# updateTextInput(session, 'amModelLibDescription', value=descr)
# }
# })
# })
# Store the selected amModelLib in a dedicated transporter--now a reactiveValues list
# getSelectedModel <- reactive({
observe({
input$revertchanges
if(!is.null(values$modelSource) && !is.null(input$selectAMModelLib)) {
if(values$modelSource == 'File' && input$selectAMModelLib != 'NA') {
userdata <- getUpload()
selectedobject <- userdata$userdata[[input$selectAMModelLib]]
if(is(selectedobject, 'amModelLib')) {
values$selectedModelLib <- input$selectAMModelLib
values$checkedModelsData <- NULL
values$currentAMModelLib <- selectedobject
values$amModelLibForModals <- selectedobject
editedModelsData <<- list(models=0, data=0)
} else {
values$selectedModelLib <- NULL
}
} else if(values$modelSource == 'Global Env' && input$selectAMModelLib != 'NA') {
selectedobject <- .GlobalEnv[[input$selectAMModelLib]]
if(is(selectedobject, 'amModelLib')) {
values$selectedModelLib <- input$selectAMModelLib
values$checkedModelsData <- NULL
values$currentAMModelLib <- selectedobject
values$amModelLibForModals <- selectedobject
editedModelsData <<- list(models=0, data=0)
} else {
values$selectedModelLib <- NULL
}
}
} else {
values$currentAMModelLib <- NULL
values$selectedModelLib <- NULL
values$amModelLibForModals <- NULL
}
})
# report the summary of the selected amModelLib (not the current/possibly edited version)
output$selectedModelSummary <- renderPrint({
selectedModel <- NULL
if(!is.null(values$modelSource) && !is.null(input$selectAMModelLib)) {
if(values$modelSource == 'File') {
userdata <- getUpload()
selectedModel <- userdata$userdata[[input$selectAMModelLib]]
} else if(values$modelSource == 'Global Env') {
selectedModel <- .GlobalEnv[[input$selectAMModelLib]]
}
}
# selectedModel <- getSelectedModel()
if(!is.null(selectedModel)) {
tryCatch(summary(selectedModel), error=function(e) print(selectedModel))
}
})
# Keep the values list up to date with new amModelLib info
observe({
go <- input$okaytoaddinfo
isolate({
if(!is.null(go)) {
exinfo <- values$newAMModelLibInfo
if(go) {
newinfo <- NULL
if(!is.null(exinfo)) {
rminfo <- unlist(lapply(1:length(exinfo), function(x) input[[paste0('selectNewInfo-', x)]]))
exinfo <- exinfo[!rminfo]
}
if(input[['addinfo-value']] != '' && input[['addinfo-name']] != '') {
newinfo <- list(input[['addinfo-value']])
names(newinfo) <- input[['addinfo-name']]
}
values$newAMModelLibInfo <- c(exinfo, newinfo)
}
}
})
})
# Enter new amModelLib info
output$newAMModelLibInfoUI <- renderUI({
input$okaytoaddinfo
# Make table headings
th <- '
<tr>
<th style="text-align:right;padding-right:1em;">Remove</th>
<th style="text-align:center;padding-right:1em;">Name</th>
<th style="text-align:left;">Value</th>
</tr>
'
# Make place for new info
newinfo <- paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:1.5em;">',
actionButton('okaytoaddinfo', '', icon=icon('thumbs-o-up')),'
</td>
<td style="width:12em;text-align:center;padding-right:1em;">
<span class="form-group shiny-input-container">
<input id="addinfo-name" type="text" class="form-control" value=""/>
</span>
</td>
<td style="width:40em;text-align:left;">
<span class="form-group shiny-input-container">
<textarea id="addinfo-value" rows=1, style="width:100%;"></textarea>
</span>
</td>
</tr>'
)
# Parse existing info
exinfo <- values$newAMModelLibInfo
if(!is.null(exinfo)) {
if(length(exinfo)) {
exinfonm <- names(exinfo)
exinfo <- paste0(unlist(lapply(1:length(exinfo), function(x) paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:2em;">
<div class="checkbox">
<label>
<input id="', paste0('selectNewInfo-', x),'" type="checkbox"/>
</label>
</div>
</td>
<td style="width:12em;text-align:center;padding-right:1em;">
<span class="form-group shiny-input-container">
<input id="editNewInfo-name" type="text" class="form-control" value="', exinfonm[x], '"/>
</span>
</td>
<td style="width:40em;text-align:left;">
<span class="form-group shiny-input-container">
<textarea id="editNewInfo-value" rows=1, style="width:100%;">', exinfo[[x]], '</textarea>
</span>
</td>
</tr>'
))), collapse='')
}
}
HTML(paste0('<table id="addInfoToNewAMMLTable">', th, exinfo, newinfo, '</table>'))
})
# Parse the contents of selected amModelLib into a table
output$selectedModelContents <- renderUI({
values$selectedModelLib
selected.model <- values$currentAMModelLib
# selected.model <- getSelectedModel()
if(!is.null(selected.model)) {
if(is(selected.model, 'amModelLib')) {
isolate({
# Make table headings
th <- '
<th style="text-align:right;padding-right:1em;">Select</th>
<th style="text-align:center;padding-right:1em;">Name</th>
<th style="text-align:center;">Summary</th>
<th style="text-align:center;">Edit</th>
'
# Get model and data names/presence
modnames <- names(selected.model@models)
datnames <- names(selected.model@data)
nmodels <- length(modnames)
ndata <- length(datnames)
if(nmodels) {
# Remove periods from names for js compatibility
modnames <- paste0(gsub('\\.', '', modnames), 1:length(modnames))
# Make table data
modrows <- sapply(1:nmodels, function(x) {
paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:1em;">
<div class="checkbox">
<label>
<input id="', paste0('select', modnames[x]),'" type="checkbox"/>
</label>
</div>
</td>
<td style="width:20em;text-align:center;padding-right:1em;">', names(selected.model@models)[x], '</td>
<td style="width:6em;text-align:center;">
<button id="', paste0('summary', modnames[x]), '" class="btn btn-default action-button shiny-bound-input" data-target="', paste0('#summary', modnames[x], 'modal'), '" data-toggle="modal" type="button">
<i class="fa fa-file-text-o"></i>
</button>
</td>
<td style="width:6em;text-align:center;">
<button id="', paste0('edit', modnames[x]), '" class="btn btn-default action-button shiny-bound-input" data-target="', paste0('#edit', modnames[x], 'modal'), '" data-toggle="modal" type="button">
<i class="fa fa-pencil"></i>
</button>
</td>
</tr>'
)
})
} else {
modrows <- '
<tr>
<td style="width:8em;text-align:right;padding-right:1em;"></td>
<td style="width:20em;text-align:center;padding-right:1em;">--- There are no models ---</td>
<td style="width:6em;text-align:center;"></td>
<td style="width:6em;text-align:center;"></td>
</tr>'
}
if(ndata) {
# Remove periods from names for js compatibility
datnames <- paste0(gsub('\\.', '', datnames), 1:length(datnames))
# Make table data
datrows <- sapply(1:ndata, function(x) {
paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:1em;">
<div class="checkbox">
<label>
<input id="', paste0('select', datnames[x]),'" type="checkbox"/>
</label>
</div>
</td>
<td style="width:20em;text-align:center;padding-right:1em;">', names(selected.model@data)[x], '</td>
<td style="width:6em;text-align:center;">
<button id="', paste0('summary', datnames[x]), '" class="btn btn-default action-button shiny-bound-input" data-target="', paste0('#summary', datnames[x], 'modal'), '" data-toggle="modal" type="button">
<i class="fa fa-file-text-o"></i>
</button>
</td>
<td style="width:6em;text-align:center;">
<button id="', paste0('edit', datnames[x]), '" class="btn btn-default action-button shiny-bound-input" data-target="', paste0('#edit', datnames[x], 'modal'), '" data-toggle="modal" type="button">
<i class="fa fa-pencil"></i>
</button>
</td>
</tr>'
)
})
} else {
datrows <- '
<tr>
<td style="width:8em;text-align:right;padding-right:1em;"></td>
<td style="width:20em;text-align:center;padding-right:1em;">--- There are no datasets ---</td>
<td style="width:6em;text-align:center;"></td>
<td style="width:6em;text-align:center;"></td>
</tr>'
}
# Assemble model table
modtab <- HTML(paste0(
span(style='padding-top:0.1em;padding-left:2em;',
HTML('
<button id="selectAllModels" style="width: 7.5em;" type="button" class="btn btn-default action-button">
<i class="fa fa-check-square-o"></i>
<i class="fa fa-square-o"></i>
</button>'
)
# actionButton("selectAllModels", '', icon=list(icon('check-square-o'), icon('square-o')), width='7.5em')
)
,
bsTooltip(id='selectAllModels', title='Select/deselect all', placement = "top", trigger = "hover", options = list(container='body')), '
<table id="model_table" style="width:100%"><tr>', th, '</tr>', paste0(modrows, collapse=''), '
</table>
<span style="padding-left:2em;">
<button id="addModelToAMML" class="btn btn-default action-button shiny-bound-input" data-target="#addModelToAMMLmodal" data-toggle="modal" type="button">
<i class="fa fa-plus"></i>
Add model
</button>
</span>'
))
# Assemble data table
dattab <- HTML(paste0(
span(style='padding-top:0.1em;padding-left:2em;',
HTML('
<button id="selectAllData" style="width: 7.5em;" type="button" class="btn btn-default action-button">
<i class="fa fa-check-square-o"></i>
<i class="fa fa-square-o"></i>
</button>'
)
# actionButton("selectAllData", '', icon=list(icon('check-square-o'), icon('square-o')), width='7.5em')
)
,
bsTooltip(id='selectAllData', title='Select/deselect all', placement = "top", trigger = "hover", options = list(container='body')), '
<table id="data_table" style="width:100%"><tr>', th, '</tr>', paste0(datrows, collapse=''), '</table>
<span style="padding-left:2em;">
<button id="addDataToAMML" class="btn btn-default action-button shiny-bound-input" data-target="#addDataToAMMLmodal" data-toggle="modal" type="button">
<i class="fa fa-plus"></i>
Add data
</button>
</span>'
))
# Display modelLib metadata in a tab rather than above the tabs
infotab <- div(style='clear:both;padding-bottom:1em;',
textareaInput('amModelLibDescription', 'Description', value=ammlDesc(selected.model), rows=3, width = '100%'),
p(style='font-weight:bold;', 'Info/Metadata'),
uiOutput('amModelLibInfoUI')
)
# Create tabset
tabsetPanel(
id='amModelLibSelectedElement',
selected=input$amModelLibSelectedElement,
tabPanel('Info/Metadata', infotab),
tabPanel('Models', modtab),
tabPanel('Data', dattab)
)
})
}
} else {
tabsetPanel(
tabPanel('Info/Metadata'),
tabPanel('Models'),
tabPanel('Data')
)
}
})
outputOptions(output, 'selectedModelContents', suspendWhenHidden = FALSE)
# Monitor checked models/data 'remove' boxes and add model boxes
observe({
selected.model <- values$currentAMModelLib
# selected.model <- getSelectedModel()
if(!is.null(selected.model)) {
if(is(selected.model, 'amModelLib')) {
# Get model and data names/presence
modnames <- names(selected.model@models)
datnames <- names(selected.model@data)
nmodels <- length(modnames)
ndata <- length(datnames)
selectedmodels <- selecteddata <- NULL
prevselected <- values$checkedModelsData
if(nmodels) {
# sanitize names for js compatibility
modnames <- paste0('select', gsub('\\.', '', modnames), 1:length(modnames))
# Check for selections
selectedmodels <- unlist(lapply(modnames, function(x) {
if(is.null(input[[x]])) {
FALSE
} else {
input[[x]]
}
}))
if(!is.null(selectedmodels)) names(selectedmodels) <- modnames
}
if(ndata) {
# sanitize names for js compatibility
datnames <- paste0('select', gsub('\\.', '', datnames), 1:length(datnames))
# Check for selections
selecteddata <- unlist(lapply(datnames, function(x) {
if(is.null(input[[x]])) {
FALSE
} else {
input[[x]]
}
}))
if(!is.null(selecteddata)) names(selecteddata) <- datnames
}
values$checkedModelsData <- list(models=selectedmodels, data=selecteddata)
}
}
})
# Select/deselect all models
observe({
go <- input$selectAllModels
if(!is.null(go)) {
isolate({
if(go) {
selectedmodels <- values$checkedModelsData$models
if(!is.null(selectedmodels)) {
if(any(selectedmodels)) {
selectedmodels <- selectedmodels[selectedmodels]
z <- lapply(1:length(selectedmodels), function(x) updateCheckboxInput(session, names(selectedmodels)[x], value=FALSE))
} else {
z <- lapply(names(selectedmodels), function(x) updateCheckboxInput(session, x, value=TRUE))
}
}
}
})
}
})
# Select/deselect all data
observe({
go <- input$selectAllData
if(!is.null(go)) {
isolate({
if(go) {
selecteddata <- values$checkedModelsData$data
if(!is.null(selecteddata)) {
if(any(selecteddata)) {
selecteddata <- selecteddata[selecteddata]
z <- lapply(1:length(selecteddata), function(x) updateCheckboxInput(session, names(selecteddata)[x], value=FALSE))
} else {
z <- lapply(names(selecteddata), function(x) updateCheckboxInput(session, x, value=TRUE))
}
}
}
})
}
})
# Delete selected
observe({
go <- input$deleteSelected
isolate({
selectedammodellib <- values$currentAMModelLib
# selectedammodellib <- getSelectedModel()
selectedmodels <- values$checkedModelsData$models
if(!is.null(selectedmodels)) {
if(any(selectedmodels)) {
selectedammodellib@models <- selectedammodellib@models[!selectedmodels]
}
}
selecteddata <- values$checkedModelsData$data
if(!is.null(selecteddata)) {
if(any(selecteddata)) {
selectedammodellib@data <- selectedammodellib@data[!selecteddata]
}
}
values$currentAMModelLib <- selectedammodellib
values$amModelLibForModals <- selectedammodellib
# values$editedAMModelLib <- selectedammodellib
})
})
# New amModelList from selected
observe({
go <- input$newAMMLFromSelected
isolate({
selectedammodellib <- values$currentAMModelLib
selectedmodels <- values$checkedModelsData$models
if(!is.null(selectedmodels)) {
if(any(selectedmodels)) {
selectedammodellib@models <- selectedammodellib@models[selectedmodels]
} else {
selectedammodellib@models <- list()
}
}
selecteddata <- values$checkedModelsData$data
if(!is.null(selecteddata)) {
if(any(selecteddata)) {
selectedammodellib@data <- selectedammodellib@data[selecteddata]
} else {
selectedammodellib@data <- list()
}
}
values$editedAMModelLib <- selectedammodellib
})
})
# Get selected
observe({
go <- input$getSelected
isolate({
selectedammodellib <- values$currentAMModelLib
selectedmodels <- values$checkedModelsData$models
mods <- dat <- NULL
if(!is.null(selectedmodels)) {
if(any(selectedmodels)) {
mods <- lapply(which(selectedmodels), function(x) {
y <- getAMModel(x, selectedammodellib)
assign(names(selectedammodellib@models)[x], y, pos='.GlobalEnv')
y
})
names(mods) <- names(selectedammodellib@models)[selectedmodels]
}
}
selecteddata <- values$checkedModelsData$data
if(!is.null(selecteddata)) {
if(any(selecteddata)) {
dat <- lapply(which(selecteddata), function(x) {
y <- getAMData(x, selectedammodellib)
assign(names(selectedammodellib@data)[x], y, pos='.GlobalEnv')
y
})
names(dat) <- names(selectedammodellib@data)[selecteddata]
}
}
z <- c(mods, dat)
if(!is.null(z)) z <- z[!unlist(lapply(z, is.null))]
if(length(z)) {
output$summaryOfEditedModel <- renderPrint({
cat('### Objects already saved to .GlobalEnv \n\n')
for(x in 1:length(z)) {
cat('#=================================\n# Object name:', names(z)[x], '\n')
print(summary(z[[x]]))
}
})
}
})
})
# Get selected as list
observe({
go <- input$getAsListSelected
isolate({
selectedammodellib <- values$currentAMModelLib
selectedmodels <- values$checkedModelsData$models
mods <- dat <- NULL
if(!is.null(selectedmodels)) {
if(any(selectedmodels)) {
mods <- lapply(which(selectedmodels), function(x) {
y <- getAMModel(x, selectedammodellib, as.list=TRUE)
assign(names(selectedammodellib@models)[x], y, pos='.GlobalEnv')
y
})
names(mods) <- names(selectedammodellib@models)[selectedmodels]
}
}
selecteddata <- values$checkedModelsData$data
if(!is.null(selecteddata)) {
if(any(selecteddata)) {
dat <- lapply(which(selecteddata), function(x) {
y <- getAMData(x, selectedammodellib, as.list=TRUE)
assign(names(selectedammodellib@data)[x], y, pos='.GlobalEnv')
y
})
names(dat) <- names(selectedammodellib@data)[selecteddata]
}
}
z <- c(mods, dat)
if(length(z)) {
output$summaryOfEditedModel <- renderPrint({
cat('### Objects already saved to .GlobalEnv \n\n')
for(x in 1:length(z)) {
cat('#=================================\n# Object name:', names(z)[x], '\n')
print(summary(z[[x]]))
}
})
}
})
})
# Render modals to edit metadata and display model/data summaries
# Done from a different reactive source than that which edits the embedded
# table to avoid deleting a modal from the ui while it is open!
output$renderedModals <- renderUI({
# The modal need to be initially created: each time the tabs are clicked
input$amModelLibSelectedElement
selected.model <- values$amModelLibForModals
# selected.model <- getSelectedModel()
if(!is.null(selected.model)) {
if(is(selected.model, 'amModelLib')) {
# this is the original human-readable name
modnames <- names(selected.model@models)
datnames <- names(selected.model@data)
# these names are sanitized for js compatibility, plus add an integer for uniqueness
modnames.s <- paste0(gsub('\\.', '', modnames), 1:length(modnames))
datnames.s <- paste0(gsub('\\.', '', datnames), 1:length(datnames))
nmodels <- length(modnames)
ndata <- length(datnames)
if(nmodels) {
# Assemble modals
modeditui <- lapply(1:length(modnames), function(x) {
bsModal(
id=paste0('edit', modnames.s[x],'_Modal'),
title=p(style='text-align:center;font-weight:bold;', paste0('Editing ', modnames[x], ' Metadata')),
trigger=paste0('edit', modnames.s[x]),
div(style='min-height:400px;',
uiOutput(paste0('modelmetadatatablesUI-', x)),
div(style='float:right;padding:2em;', actionButton(paste0('saveeditsto', modnames.s[x]), 'Save', icon=icon('save')))
),
size='large'
)
})
modsummaryui <- lapply(1:length(modnames), function(x) {
bsModal(
id=paste0('summary', modnames.s[x],'_Modal'),
title=p(style='text-align:center;font-weight:bold;', paste0('Summary of ', modnames[x])),
trigger=paste0('summary', modnames.s[x]),
div(style='min-height:400px;overflow-x:scroll;',
textOutput(paste0('modelsummaryUI-', x), container=tags$pre)
# uiOutput(paste0('modelsummaryUI-', x))
),
size='large'
)
})
} else {
modeditui <- modsummaryui <- NULL
}
if(ndata) {
dateditui <- lapply(1:length(datnames), function(x) {
bsModal(
id=paste0('edit', datnames.s[x],'_Modal'),
title=p(style='text-align:center;font-weight:bold;', paste0('Editing ', datnames[x], ' Metadata')),
trigger=paste0('edit', datnames.s[x]),
div(style='min-height:400px;',
uiOutput(paste0('datametadatatablesUI-', x)),
div(style='float:right;padding:2em;', actionButton(paste0('saveeditsto', datnames.s[x]), 'Save', icon=icon('save')))
),
size='large'
)
})
datsummaryui <- lapply(1:length(datnames), function(x) {
bsModal(
id=paste0('summary', datnames.s[x],'_Modal'),
title=p(style='text-align:center;font-weight:bold;', paste0('Summary of ', datnames[x])),
trigger=paste0('summary', datnames.s[x]),
div(style='min-height:400px;overflow-x:scroll;',
textOutput(paste0('datasummaryUI-', x), container=tags$pre)
# uiOutput(paste0('datasummaryUI-', x))
),
size='large'
)
})
} else {
dateditui <- datsummaryui <- NULL
}
c(modeditui, modsummaryui, dateditui, datsummaryui)
}
}
})
outputOptions(output, 'renderedModals', suspendWhenHidden = FALSE)
# Create tables to edit metadata and summaries of models/data
observe({
# The tables need to be initially created: each time the tabs are clicked
input$amModelLibSelectedElement
selected.model <- values$currentAMModelLib
if(!is.null(selected.model)) {
if(is(selected.model, 'amModelLib')) {
# this is the original human-readable name
modnames <- names(selected.model@models)
datnames <- names(selected.model@data)
allnames <- c(modnames, datnames)
# these names are sanitized for js compatibility, plus add an integer for uniqueness
modnames.s <- paste0(gsub('\\.', '', modnames), 1:length(modnames))
datnames.s <- paste0(gsub('\\.', '', datnames), 1:length(datnames))
# Make table headings
th <- '
<tr>
<th style="text-align:right;padding-right:1em;">Delete</th>
<th style="text-align:center;padding-right:1em;">Name</th>
<th style="text-align:center;">Value</th>
</tr>
'
nmodels <- length(modnames)
ndata <- length(datnames)
if(nmodels) {
# Make model td
modrows <- sapply(1:nmodels, function(x) {
newmetarow <- paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:1em;">
<i class="fa fa-asterisk"></i>
</td>
<td style="width:12em;text-align:center;padding-right:1em;">
<span class="form-group shiny-input-container">
<input id="', paste0('add', modnames.s[x], '-name'), '" type="text" class="form-control" value=""/>
</span>
</td>
<td style="width:40em;text-align:left;">
<span class="form-group shiny-input-container">
<textarea id="', paste0('add', modnames.s[x], '-value'), '" rows=1, style="width:100%;"></textarea>
</span>
</td>
</tr>'
)
metadat <- modelMeta(selected.model, x)
oldmetadat <- sapply(1:length(metadat), function(y) {
paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:1em;">
<div class="checkbox">
<label>
<input id="', paste0('select', modnames.s[x], '-', y),'" type="checkbox"/>
</label>
</div>
</td>
<td style="width:12em;text-align:center;padding-right:1em;">
<span class="form-group shiny-input-container">
<input id="', paste0('edit', modnames.s[x], '-name-', y), '" type="text" class="form-control" value="', names(metadat)[y],'"/>
</span>
</td>
<td style="width:40em;text-align:left;">
<span class="form-group shiny-input-container">
<textarea id="', paste0('edit', modnames.s[x], '-value-', y), '" rows=1, style="width:100%;">' , metadat[[y]], '</textarea>
</span>
</td>
</tr>'
)
})
paste0(paste0(oldmetadat, collapse=''), newmetarow, collapse='')
})
} else {
modrows <- NULL
}
if(ndata) {
# Make data td
datrows <- sapply(1:ndata, function(x) {
newmetarow <- paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:1em;">
<i class="fa fa-asterisk"></i>
</td>
<td style="width:12em;text-align:center;padding-right:1em;">
<span class="form-group shiny-input-container">
<input id="', paste0('add', datnames.s[x], '-name'), '" type="text" class="form-control" value=""/>
</span>
</td>
<td style="width:40em;text-align:left;">
<span class="form-group shiny-input-container">
<textarea id="', paste0('add', datnames.s[x], '-value'), '" rows=1, style="width:100%;"></textarea>
</span>
</td>
</tr>'
)
metadat <- dataMeta(selected.model, x)
oldmetadat <- sapply(1:length(metadat), function(y) {
paste0('
<tr>
<td style="width:8em;text-align:right;padding-right:1em;">
<div class="checkbox">
<label>
<input id="', paste0('select', datnames.s[x], '-', y),'" type="checkbox"/>
</label>
</div>
</td>
<td style="width:12em;text-align:center;padding-right:1em;">
<span class="form-group shiny-input-container">
<input id="', paste0('edit', datnames.s[x], '-name-', y), '" type="text" class="form-control" value="', names(metadat)[y],'"/>
</span>
</td>
<td style="width:40em;text-align:left;">
<span class="form-group shiny-input-container">
<textarea id="', paste0('edit', datnames.s[x], '-value-', y), '" rows=1, style="width:100%;">' , metadat[[y]], '</textarea>
</span>
</td>
</tr>'
)
})
paste0(paste0(oldmetadat, collapse=''), newmetarow, collapse='')
})
} else {
datrows <- NULL
}
}
# Assemble tables
nx <- lapply(1:length(modnames.s), function(x) {
output[[paste0('modelmetadatatablesUI-', x)]] <- renderUI({
HTML({paste0('<table id="', paste0('modelmetadatatablefor', modnames.s[x]), '">',th, paste0(modrows[x], collapse=''), '</table>', collapse='')})
})
})
ny <- lapply(1:length(datnames.s), function(x) {
output[[paste0('datametadatatablesUI-', x)]] <- renderUI({
HTML({paste0('<table id="', paste0('datametadatatablefor', datnames.s[x]), '">',th, paste0(datrows[x], collapse=''), '</table>', collapse='')})
})
})
# Make summaries too
sx <- lapply(1:length(modnames.s), function(x) {
output[[paste0('modelsummaryUI-', x)]] <- renderPrint({
summary(selected.model@models[[x]]@model)
})
})
sy <- lapply(1:length(datnames.s), function(x) {
output[[paste0('datasummaryUI-', x)]] <- renderPrint({
summary(selected.model@data[[x]]@data)
})
})
}
})
# Customize the add model/data modal titles
output$addmodeltitle <- renderUI({
currentammlname <- values$selectedModelLib
if(!is.null(currentammlname)) {
p(style='text-align:center;font-weight:bold;', paste0('Add Model to ', currentammlname))
}
})
output$adddatatitle <- renderUI({
currentammlname <- values$selectedModelLib
if(!is.null(currentammlname)) {
p(style='text-align:center;font-weight:bold;', paste0('Add Data to ', currentammlname))
}
})
# Generate the modal titles in advance to avoid 0.2 second delay on load
outputOptions(output, 'addmodeltitle', suspendWhenHidden = FALSE)
outputOptions(output, 'adddatatitle', suspendWhenHidden = FALSE)
# Make changes to models/data metadata
observe({
# See which button was clicked
selected.model <- values$currentAMModelLib
if(!is.null(selected.model)) {
if(is(selected.model, 'amModelLib')) {
# Get model and data names/presence
modnames <- names(selected.model@models)
datnames <- names(selected.model@data)
nmodels <- length(modnames)
ndata <- length(datnames)
editedmodels <- editeddata <- NULL
if(!is.null(input$amModelLibSelectedElement)) {
if(nmodels) {
# sanitize names for js compatibility
modbuttonnames <- paste0('saveeditsto', gsub('\\.', '', modnames), 1:length(modnames))
modmetanames <- paste0('edit', gsub('\\.', '', modnames), 1:length(modnames), '-')
modcheckednames <- paste0('select', gsub('\\.', '', modnames), 1:length(modnames), '-')
addmodnames <- paste0('add', gsub('\\.', '', modnames), 1:length(modnames), '-')
# Compare add model save button values
editedmodels <- list()
for(x in 1:length(modbuttonnames)) {
if(!is.null(input[[modbuttonnames[x]]]) && !is.null(editedModelsData[['models']][x]) && !is.na(editedModelsData[['models']][x])) {
newmeta <- NULL
if(input[[modbuttonnames[x]]] != editedModelsData$models[x]) {
oldmeta <- modelMeta(selected.model, x)
newmeta <- lapply(1:length(oldmeta), function(y) {
input[[paste0(modmetanames[x], 'value-', y)]]
})
# set checked to null for removal
newmeta <- lapply(1:length(oldmeta), function(y) {
if(input[[paste0(modcheckednames[x], y)]]) {
NULL
} else {
newmeta[[y]]
}
})
names(newmeta) <- sapply(1:length(oldmeta), function(y) input[[paste0(modmetanames[x], 'name-', y)]])
# look for new metadata
if(input[[paste0(addmodnames[x], 'value')]] != '' && input[[paste0(addmodnames[x], 'name')]] != '') {
addedmeta <- list(input[[paste0(addmodnames[x], 'value')]])
names(addedmeta) <- input[[paste0(addmodnames[x], 'name')]]
} else {
addedmeta <- NULL
}
newmeta <- c(newmeta, addedmeta)
modelMeta(selected.model, x) <- newmeta
}
editedmodels[[x]] <- input[[modbuttonnames[x]]]
} else {
editedmodels[[x]] <- 0
}
}
if(!is.null(editedmodels)) names(editedmodels) <- modbuttonnames
}
if(ndata) {
# # sanitize names for js compatibility
datbuttonnames <- paste0('saveeditsto', gsub('\\.', '', datnames), 1:length(datnames))
datmetanames <- paste0('edit', gsub('\\.', '', datnames), 1:length(datnames), '-')
datcheckednames <- paste0('select', gsub('\\.', '', datnames), 1:length(datnames), '-')
adddatnames <- paste0('add', gsub('\\.', '', datnames), 1:length(datnames), '-')
# Compare add data save button values
editeddata <- list()
for(x in 1:length(datbuttonnames)) {
if(!is.null(input[[datbuttonnames[x]]]) && !is.null(editedModelsData[['data']][x]) && !is.na(editedModelsData[['data']][x])) {
newmeta <- NULL
if(input[[datbuttonnames[x]]] != editedModelsData$data[x]) {
oldmeta <- dataMeta(selected.model, x)
newmeta <- lapply(1:length(oldmeta), function(y) {
input[[paste0(datmetanames[x], 'value-', y)]]
})
# set checked to null for removal
newmeta <- lapply(1:length(oldmeta), function(y) {
if(input[[paste0(datcheckednames[x], y)]]) {
NULL
} else {
newmeta[[y]]
}
})
names(newmeta) <- sapply(1:length(oldmeta), function(y) input[[paste0(datmetanames[x], 'name-', y)]])
# look for new metadata
if(input[[paste0(adddatnames[x], 'value')]] != '' && input[[paste0(adddatnames[x], 'name')]] != '') {
addedmeta <- list(input[[paste0(adddatnames[x], 'value')]])
names(addedmeta) <- input[[paste0(adddatnames[x], 'name')]]
} else {
addedmeta <- NULL
}
newmeta <- c(newmeta, addedmeta)
dataMeta(selected.model, x) <- newmeta
}
editeddata[[x]] <- input[[datbuttonnames[x]]]
} else {
editeddata[[x]] <- 0
}
}
if(!is.null(editeddata)) names(editeddata) <- datbuttonnames
}
}
}
editedModelsData <<- list(models=editedmodels, data=editeddata)
values$currentAMModelLib <- selected.model
}
})
# Select a model object to add to the ammodellib
output$addModelToAMMLUI <- renderUI({
choices <- getFromGlobalEnv()
if(!is.null(choices)) {
if(length(choices)) {
names(choices) <- choices
radioButtons('selectnewmodelobject', 'Select model', choices=choices)
} else {
div(
p(style='font-weight:bold;', 'Select model'),
p('No objects to list.')
)
}
}
})
# Add selected model to the amml
observe({
input$saveNewModel
isolate({
newmodname <- make.names(input$newModelName)
newmod <- input$selectnewmodelobject
if(!is.null(newmod) && input$newModelName != '') {
if(!is.na(newmod) && newmod != '') {
newmod <- .GlobalEnv[[newmod]]
newmod <- list(amModel(newmod))
names(newmod) <- newmodname
values$currentAMModelLib@models <- c(values$currentAMModelLib@models, newmod)
values$amModelLibForModals@models <- c(values$currentAMModelLib@models, newmod)
updateTextInput(session, 'newModelName', value='')
}
}
})
})
# Select a data object to add to the ammodellib
output$addDataToAMMLUI <- renderUI({
choices <- getFromGlobalEnv()
if(!is.null(choices)) {
if(length(choices)) {
names(choices) <- choices
radioButtons('selectnewdataobject', 'Select data object', choices=choices)
} else {
div(
p(style='font-weight:bold;', 'Select data object'),
p('No objects to list.')
)
}
}
})
# Add selected data to the amml
observe({
input$saveNewData
isolate({
newdatname <- make.names(input$newDataName)
newdat <- input$selectnewdataobject
if(!is.null(newdat) && input$newDataName != '') {
newdat <- .GlobalEnv[[newdat]]
newdat <- list(amData(newdat))
names(newdat) <- newdatname
values$currentAMModelLib@data <- c(values$currentAMModelLib@data, newdat)
values$amModelLibForModals@data <- c(values$currentAMModelLib@data, newdat)
updateTextInput(session, 'newDataName', value='')
}
})
})
# Search models or data
observe({
input$searchModel
isolate({
if(input$searchModelString != '') {
selectedModel <- values$currentAMModelLib
# selectedModel <- getSelectedModel()
if(!is.null(selectedModel)) {
if(input$searchModelComponent == '') {
search <- 'all'
updateSelectInput(session, 'searchModelComponent', selected='all')
} else {
search <- input$searchModelComponent
}
values$editedAMModelLib <- grepAMModelLib(input$searchModelString, selectedModel, search=search)
}
}
})
})
# Send a summary of edited amModelList to the screen
output$summaryOfEditedModel <- renderPrint({
editedModel <- values$editedAMModelLib
if(!is.null(editedModel)) {
summary(editedModel)
}
})
# Save the edited amModelList to the global environment
observe({
input$sendAMMLToGlobalEnv
isolate({
editedModel <- values$editedAMModelLib
amml <- make.names(input$newObjectName)
if(amml != '' && !is.null(editedModel)) {
assign(amml, editedModel, pos='.GlobalEnv')
updateRadioButtons(session, 'selectAMModelLib', selected=input$selectAMModelLib)
# create an alert to notify
createTimedAlert(session, anchorId='saveAlert', alertId='saveSuccessAlert', title='', content=paste0('<p style="text-align:center;">"', amml, '" is now in your Global Environment.</p>'), timeout=2000, style='info', append=TRUE)
}
})
})
# Set the current amModelList to the edited amModelList
observe({
input$saveallchanges
isolate({
amml <- values$currentAMModelLib
if(!is.null(amml)) {
exinfo <- amml@info
newinfo <- NULL
if(length(exinfo)) {
rminfo <- sapply(1:length(exinfo), function(x) input[[paste0('selectCurrentInfo-', x)]])
exinfo <- exinfo[!rminfo]
}
if(!is.null(input[['addinfotocurrent-name']])) {
if(input[['addinfotocurrent-name']] != '' && input[['addinfotocurrent-value']] != '') {
newinfo <- list(input[['addinfotocurrent-value']])
names(newinfo) <- make.names(input[['addinfotocurrent-name']])
}
}
amml@info <- c(exinfo, newinfo)
if(input$amModelLibDescription != '') ammlDesc(amml) <- input$amModelLibDescription
values$editedAMModelLib <- amml
}
})
})
session$onSessionEnded(function() {
# Remove the saved console
saved_console <- ".RDuetConsole"
# remove the aux functions
rm(list=c('createTimedAlert', 'textareaInput'), pos='.GlobalEnv')
if (file.exists(saved_console)) unlink(saved_console)
stopApp()
})
}
)
| /scratch/gouwar.j/cran-all/cranData/AMModels/inst/modelMgr/server.R |
textareaInput <- function (
inputId,
label,
value = "",
rows=1,
width = NULL,
placeholder = NULL
) {
shiny::tags$div(
class = "form-group shiny-input-container",
style = if (!is.null(width)) paste0("width: ", shiny::validateCssUnit(width), ";"),
shiny::tags$label(label, `for`= inputId),
shiny::HTML(paste0('
<div class="form-group shiny-input-container" style="width: 100%;">
<textarea id="', inputId, '" value="', value, '" rows="', rows, '" cols="1" style="width:100%;">', value, '</textarea>
</div>'
))
# shiny::tags$textarea(id = inputId,
# value = value,
# placeholder = placeholder,
# rows=rows, cols=1, style="width:100%;"
# )
)
}
| /scratch/gouwar.j/cran-all/cranData/AMModels/inst/modelMgr/textareaInput.R |
library(shiny)
library(shinyBS)
source('textareaInput.R')
shinyUI(
fluidPage(
includeCSS("www/css/custom.css"),
# headerPanel('AMModels Model Manager: organize your models and data'),
singleton(tags$head(
tags$title('Model Manager'),
tags$script(src="js/timedAlertCreate.js")
)),
# Modal for general information
bsModal(
id='generalInfoModal',
title=p(style='text-align:center;font-weight:bold;', 'AMModels Model Manager General Information'),
trigger='generalInfo',
div(style='min-height:400px;',
h3('Editing an amModelLib Object:'),
p('All additions to, deletions from, and edits to metadata made on the left side of the screen are temporary. To make them permanent:'),
div(style='padding:0.5em 1.5em 0.5em 0em;background-color:#f5f5f5;',
HTML({'
<ol>
<li>Click the create binding button below the search box.</li>
<li>Type a name for the new object below the summary that appeared in the gray box on the right side of the screen. <div style="margin-left:34px;background-color:#ffffcc;border-left:6px solid #ffeb3b;padding:0.5em 2em;"><p style="padding-top:0.5em;">To overwrite the current object, set the name identical to the object being edited.</p></div></li>
<li>Click the create binding button next to the name box. <div style="margin-left:34px;background-color:#ffffcc;border-left:6px solid #ffeb3b;padding:0.5em 2em;"><p style="padding-top:0.5em;">If you overwrote the object being edited, you may need to re-select the object from your Global Environment to verify the changes. You may also do this to revert changes before a binding has been created.</p></div></li>'
})
)
),
size='large'
),
# The modals for editing models and data
uiOutput('renderedModals'),
# Modal to import data
bsModal(
id='uploadDataModal',
title=p(style='text-align:center;font-weight:bold;', 'Select Source and amModelLib'),
trigger='getUserModelLib',
div(style='min-height:400px;width:100%;',
# div(style='width:100%;',
column(4,
div(style='padding-right:1em;padding-top:1.75em;',
actionButton('createNewModel', 'New', icon=icon('file'), width='100%'),
bsTooltip(id='createNewModel', title='Create a new amModelLib object and then populate it with models and data from your Global Environment', placement = "top", trigger = "hover", options = list(container='body'))
)
),
column(4,
div(style='padding:1.75em 1em 0em 1em;',
actionButton('listGlobalEnv', 'List environment', icon=icon('list'), width='100%'),
bsTooltip(id='listGlobalEnv', title='List amModelLib objects in your Global Environment', placement = "top", trigger = "hover", options = list(container='body'))
)
),
column(4,
div(id='datauploadcontainer', style='padding-left:1em;',
fileInput('userDataUpload', 'Upload file', accept=c('.RData', '.rdata', '.Rdata', '.rda', '.Rda', '.RDA', '.RDS', '.rds', 'application/octet-stream'), width='100%')
),
bsTooltip(id='datauploadcontainer', title='Upload a .RData, .rda, or .RDS file containing amModelLib objects', placement = "top", trigger = "hover", options = list(container='body'))
),
div(style='clear:both;padding-top:1em',
# column(4, offset=4,
bsAlert('emptyDataSource'),
checkboxInput('filterForAMModelLibs', 'Show only amModelLib objects', value=TRUE),
uiOutput('selectModelUI'),
textOutput('selectedModelSummary', container=tags$pre)
# )
)
# )
),
size='large'
),
# Modal to add models
bsModal(
id='addModelToAMMLmodal',
title=uiOutput('addmodeltitle'),
trigger='addModelToAMML',
div(style='min-height:400px;',
column(6,
uiOutput('addModelToAMMLUI')
),
column(6,
HTML(paste0('
<span class="form-group shiny-input-container" style="margin-bottom:0px;width:30em;">
<label for="newModelName">Provide name</label>
<input style="margin-top:2px;
width:25em;
border-radius:4px;
height: 34px;
padding: 0px 12px 5px 12px;
font-size: 14px;
line-height: 1.42857143;
color: #555;
background-color: #fff;
background-image: none;
border: 1px solid #ccc;
box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
transition: border-color ease-in-out .15s,box-shadow ease-in-out .15s;" id="newModelName" type="text" value="" placeholder="Model name"/>
<script>
$(document).keyup(function(event) {
if ($("#newModelName").is(":focus") && (event.keyCode == 13)) {
$("#saveNewModel").click();
}
});
</script>',
actionButton('saveNewModel', '', icon=icon('save')),
'</span>'
)),
p(style='padding-top:1em;', 'You can add metadata by clicking "Edit" in the main display.'),
HTML('<p>Names are coerced to valid R names. Valid names consist of "letters, numbers, and the dot and underscore characters, and starts with either a letter or a dot not followed by a number." Valid names may not be reserved words. For more information, see <a href="https://cran.r-project.org/doc/FAQ/R-FAQ.html#What-are-valid-names_003f" target="_blank">the R FAQ about names.</a>')
)
),
size='large'
),
# Modal to add data
bsModal(
id='addDataToAMMLmodal',
title=uiOutput('adddatatitle'),
trigger='addDataToAMML',
div(style='min-height:400px;overflow:hidden;',
column(6,
uiOutput('addDataToAMMLUI')
),
column(6,
HTML(paste0('
<span class="form-group shiny-input-container" style="margin-bottom:0px;width:30em;">
<label for="newDataName">Provide name</label>
<input style="margin-top:2px;
width:25em;
border-radius:4px;
height: 34px;
padding: 0px 12px 5px 12px;
font-size: 14px;
line-height: 1.42857143;
color: #555;
background-color: #fff;
background-image: none;
border: 1px solid #ccc;
box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
transition: border-color ease-in-out .15s,box-shadow ease-in-out .15s;" id="newDataName" type="text" value="" placeholder="Data object name"/>
<script>
$(document).keyup(function(event) {
if ($("#newDataName").is(":focus") && (event.keyCode == 13)) {
$("#saveNewData").click();
}
});
</script>',
actionButton('saveNewData', '', icon=icon('save')),
'</span>'
)),
p(style='padding-top:1em;', 'You can add metadata by clicking "Edit" in the main display.'),
HTML('<p>Names are coerced to valid R names. Valid names "consists of letters, numbers, and the dot and underscore characters, and starts with either a letter or a dot not followed by a number." Valid names may not be reserved words. For more information, see <a href="https://cran.r-project.org/doc/FAQ/R-FAQ.html#What-are-valid-names_003f" target="_blank">the R FAQ about names.</a>')
)
),
size='large'
),
# Modal to create new amModelLib
bsModal(
id='createNewModelmodal',
title=p(style='text-align:center;font-weight:bold;', 'Create New amModelLib'),
trigger='createNewModel',
div(style='min-height:400px;padding:0em 1em;',
column(6,
textInput('newAMModelLibName', 'amModelLib Name', width='100%')
),
column(6,
p(style='padding:2.25em 0em 0em 1em;','Coerced to a valid R name if necessary')
),
div(style='clear:both;',
textareaInput('newAMModelLibDescription', 'Description', rows=3, width = '100%'),
p(style='font-weight:bold;', 'Add Info/Metadata'),
uiOutput('newAMModelLibInfoUI')
),
column(6,
# checkboxInput('newAMModelLibRemoteActive', 'Edit on close', value=TRUE),
#actionButton('newAMModelLibGo', 'Create amModelLib')
div(style='margin-top:17px;',
HTML('
<button id="newAMModelLibGo" type="button" class="btn btn-default action-button" data-dismiss="modal">Create amModelLib</button>'
),
tags$script('document.getElementById("listGlobalEnv").focus();')
)
)
),
size='large'
),
# Main page header
HTML(paste0('
<a id="generalInfo" href="#" class="action-button" style="color: #333;text-decoration:none;">',
h1(style='text-align:center;', 'AMModels Model Manager: organize your models and data'), '
</a>'
)),
bsTooltip(id='generalInfo', title='See general information about using the Model Manager', placement = "bottom", trigger = "hover", options = list(container='body')),
# h1(style='text-align:center;', 'AMModels Model Manager: organize your models and data'),
# Main page
div(style='width:100%;clear:both;',
div(class='boxed', style='padding:2em;margin-top:16px;',
column(6,
div(style='padding:0em 3em 1em;',
actionButton('getUserModelLib', 'Select amModelLib', icon=icon('check-square-o'), width='100%', class='btn-primary'),
bsTooltip(id='getUserModelLib', title='Create an amModelLib, upload an .RData or .rda, or select one from your Global Environment', placement = "top", trigger = "hover", options = list(container='body'))
),
# Report which modelLib is selected and where it came from
div(style='width:100%;overflow:hidden;',
span(style='float:left;',
uiOutput('reportSelectedModel'),
uiOutput('reportSelectedModelSource')
),
span(style='float:right;',
actionButton('revertchanges', '', icon=icon('undo'), style='margin-right:32px;border:transparent;'),
bsTooltip(id='revertchanges', title='Revert all changes', placement = "top", trigger = "hover", options = list(container='body')),
actionButton('saveallchanges', '', icon=icon('arrow-circle-right', class='fa-2x'), style='border:transparent;'),
bsTooltip(id='saveallchanges', title='Move all changes to the output preview pane', placement = "top", trigger = "hover", options = list(container='body'))
)
),
## These moved to a tab in the 'selectedModelContents' panel below
# div(style='clear:both;padding-bottom:1em;',
# textareaInput('amModelLibDescription', 'Description', rows=3, width = '100%'),
# p(style='font-weight:bold;', 'Info/Metadata'),
# uiOutput('amModelLibInfoUI')
# ),
# The gray search bar
div(style='background-color:#E0E0E0;width:100%;overflow:hidden;margin-bottom:8px;',
# span(style='float:left;padding-top:0.25em;',
# actionButton('generalInfo', '', icon=icon('info-circle'), style="background-color:#E0E0E0;border:transparent;color:#337ab7;"),
# bsTooltip(id='generalInfo', title='General information about using the Model Manager', placement = "top", trigger = "hover", options = list(container='body'))
# ),
span(style='float:right;',
# Search box and select input are coded manually
HTML(paste0('
<span class="form-group shiny-input-container" style="margin:2px 0px;width:12em;padding-right:0.3em;">
<input style="
margin:3px 0px 2px;
width:18em;
border-radius:4px;
height: 34px;
padding: 0px 12px 5px 12px;
font-size: 14px;
line-height: 1.42857143;
color: #555;
background-color: #fff;
background-image: none;
border: 1px solid #ccc;
box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
transition: border-color ease-in-out .15s,box-shadow ease-in-out .15s;" id="searchModelString" type="text" value="" placeholder="Search expression"/>
<script>
$(document).keyup(function(event) {
if ($("#searchModelString").is(":focus") && (event.keyCode == 13)) {
$("#searchModel").click();
}
});
</script>
<span class="form-group shiny-input-container" style="float:right;padding-top:2px;width:10em;margin:1px 0px 2px;">
<select id="searchModelComponent" style="padding: 0px 12px 5px 12px;width:9em;height:34px;background-color:#fff;border-radius:4px;border:1px solid #ccc;">
<!--<option value="" selected>Search in:</option>-->
<option value="all" selected>in: all</option>
<option value="model">in: models</option>
<option value="data">in: data</option>
</select>
<script type="application/json" data-for="searchModelComponent" data-nonempty="">{}</script>
</span>',
actionButton('searchModel', '', icon=icon('search')),
'</span>'
)),
bsTooltip(id='searchModelString', title='Regular expressions are allowed', placement = "top", trigger = "hover", options = list(container='body')),
bsTooltip(id='searchModelComponent', title='Search can be done across all models and data, or confined to models or data.', placement = "top", trigger = "hover", options = list(container='body'))
)
),
# The tabs for editing modelLib
wellPanel(style='clear:both;width:100%;overflow:hidden;',
div(style='width:100%;',
span(style='float:right;padding-right:1em;',
p(style='font-weight:bold;display:inline-block;', 'With selected:'),
actionButton('deleteSelected', '', icon=icon('times')),
actionButton('newAMMLFromSelected', '', icon=icon('external-link')),
actionButton('getSelected', '', icon=icon('arrows-alt')),
actionButton('getAsListSelected', '', icon=icon('list'))
),
bsTooltip(id='deleteSelected', title='Delete', placement = "top", trigger = "hover", options = list(container='body')),
bsTooltip(id='newAMMLFromSelected', title='Add to new amModelLib', placement = "top", trigger = "hover", options = list(container='body')),
bsTooltip(id='getSelected', title='Extract to .GlobalEnv', placement = "top", trigger = "hover", options = list(container='body')),
bsTooltip(id='getAsListSelected', title='Extract as list to .GlobalEnv', placement = "top", trigger = "hover", options = list(container='body'))
),
uiOutput('selectedModelContents')
)
),
column(6,
div(style='padding:1.25em 0em 0em 1em;',
uiOutput('reportLastActionTarget'),
div(style='width:100%;min-height:50px;',
# Name & save box are coded manually
HTML(paste0('
<span id="saveAlert" class="sbs-alert" style="float:left;width:25%;"></span>
<span style="float:right;">
<span class="form-group shiny-input-container" style="margin-bottom:0px;width:12em;padding-left:8em;">
<input style="margin-top:2px;
width:18em;
border-radius:4px;
height: 34px;
padding: 0px 12px 5px 12px;
font-size: 14px;
line-height: 1.42857143;
color: #555;
background-color: #fff;
background-image: none;
border: 1px solid #ccc;
box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
transition: border-color ease-in-out .15s,box-shadow ease-in-out .15s;" id="newObjectName" type="text" value="" placeholder="New library name"/>
<script>
$(document).keyup(function(event) {
if ($("#newObjectName").is(":focus") && (event.keyCode == 13)) {
$("#sendAMMLToGlobalEnv").click();
}
});
</script>',
actionButton('sendAMMLToGlobalEnv', '', icon=icon('save')),
'</span></span>'
)),
bsTooltip(id='newObjectName', title='Name must be provided for object summarized below to be moved to your Global Environment. Coerced to valid R name. Objects in your Global Environment must still be saved to disk using save() or saveRDS().', placement = "top", trigger = "hover", options = list(container='body'))
),
div(style='clear:both;',
p(style='font-weight:bold;', 'Output Preview:'),
textOutput('summaryOfEditedModel', container=tags$pre)
)
)
)
)
)
######## OBSERVER CONSOLE #########
# div(style="width:100%;",
# div(style="clear:both;text-align:center;",
# actionLink("console","server console")
# )
# )
)
)
| /scratch/gouwar.j/cran-all/cranData/AMModels/inst/modelMgr/ui.R |
# Man page for AMPLE
# ample.R
# Author: Finlay Scott (SPC) <[email protected]>
# Distributed under the terms of the GNU General Public License GPL (>= 3)
#' AMPLE: A package of Shiny apps that introduce Harvest Control Rules (HCR) for fisheries management.
#'
#' AMPLE provides three Shiny apps that introduce Harvest Control Rules (HCR) for fisheries management.
#' 'Introduction to HCRs' provides a simple overview to how HCRs work. Users are able to select their own HCR and
#' step through its performance, year by year. Biological variability and estimation uncertainty are introduced.
#' 'Introduction to indicators' builds on the previous app and introduces the idea of using performance indicators
#' to evaluate HCR performance.
#' 'Comparing performance' allows multiple HCRs to be created and tested, and their performance compared so that the
#' preferred HCR can be selected.
#'
#' Harvest Control Rules are used as part of a fishery harvest strategy.
#' This package was put together as part of capacity building efforts by the Pacific Community (SPC) to develop fishery harvest strategies for tuna stocks in the western and central Pacific Ocean (WCPO), working with the Western and Central Pacific Fisheries Commission (WCPFC).
#' For more information on tuna harvest strategies in the WCPO please see LINKs.
#'
#' @section AMPLE functions:
#' Too launch the apps use the functions: \code{intro_hcr()}, \code{measuring_performance()} and \code{comparing_performance()}.
#'
#' @section Acknowledgement:
#' With thanks to Andre Punt.
#' Also thanks to Winston Chang for help with the R6 class / Shiny reactivity.
#'
#' @docType package
#' @rdname ample
#' @name AMPLE
#' @keywords internal
"_PACKAGE"
NULL
#> NULL | /scratch/gouwar.j/cran-all/cranData/AMPLE/R/ample.R |
# The Comparing Performance app
# comparing_performance.R
# Author: Finlay Scott (SPC) <[email protected]>
# Soundtrack: Fire by The Bug
# Distributed under the terms of the GNU General Public License GPL (>= 3)
#' 'Comparing HCR Performance' app launcher
#'
#' Launches the Comparing Performance Shiny app.
#' See the 'Information' tab in the app for more information.
#' Also see the package vignette (\code{vignette("comparing_performance", package="AMPLE")}) for a tutorial.
#' @param ... Not used
#' @examples
#' \dontrun{comparing_performance()}
#' @export
comparing_performance <- function(...){
# User interface ----
ui <- navbarPage(
title="Comparing HCR performance",
tabPanel(title = "HCR selection",
shinyjs::useShinyjs(), # So we can enable / disable buttons
sidebarLayout(
sidebar_setup(
# HCR options
mpParamsSetterUI("mpparams", mp_visible=c("Threshold catch", "Constant catch")),# "Threshold effort", "Constant effort")),
br(), # Could add br() automatically to side bar set up to separate each component?
actionButton("project", "Project", icon=icon("fish")),
br(),
br(),
textInput(inputId="user_hcr_name", label="HCR Display Name (optional)", value=as.character(NA), placeholder="Name of HCR", width='50%'),
actionButton("add_basket", "Add HCR to basket", icon=icon("shopping-basket")),
br(),
# How many HCRs do we have in the store
textOutput("print_number_hcrs"),
br(),
# This should reset everything - empty the stores
actionButton("empty_basket", "Empty basket"),
br(),
br(),
shinyscreenshot::screenshotButton(label="Take a screenshot", filename="comp_perf", scale=2)
), # End sidebar set up
mainPanel(
column(6,
fluidRow(
plotOutput("plot_catch", height="300px"),
),
fluidRow(
plotOutput("plot_biomass", height="300px"),
),
fluidRow(
plotOutput("plot_cpue", height="300px")
)
),
column(6,
fluidRow(plotOutput("plot_hcr")),
fluidRow(tableOutput("pi_table"))
)
) # End of mainPanel
) # End of sidebarLayout
), # End of front page tabPanel
tabPanel(title = "Compare results",
# Set up sub tabs
sidebarLayout(
sidebar_setup(
# PI choice
checkboxGroupInput(inputId = "pi_choice", label="PI selection", inline=TRUE,
# character(0) means no choice is available - updated in server function
choices = character(0)),
br(),
# Dynamic HCR choice
checkboxGroupInput(inputId = "hcr_choice", label="HCR selection",
# character(0) means no choice is available - updated in server function
choiceNames = character(0), choiceValues = character(0)),
br(),
shinyscreenshot::screenshotButton(label="Take a screenshot", filename="comp_perf", scale=2)
),# End of sidebar_setup()
mainPanel(
tabsetPanel(id="comparisontabs",
tabPanel(title="Performance indicators - bar charts", value="PImeds",
column(12, fluidRow(
tags$span(title="Bar plot of the median values of the performance indicators over the three time periods. Note that the lower the PI for relative effort is, the better the HCR is thought to be performing. Also, a high value for SB/SBF=0 may not indicate that the HCR is performing well - it depends on your objectives.",
plotOutput("plot_bar_comparison"))
))
), # End of median bar chart tab panel
tabPanel(title="Performance indicators - box plots", value="PIbox",
column(12, fluidRow(
tags$span(title="Box plot of the values of the performance indicators over the three time periods. Note that the lower the PI for relative effort is, the better the HCR is thought to be performing. Also, a high value for SB/SBF=0 may not indicate that the HCR is performing well - it depends on your objectives. The box contains the 20-80 percentiles, the tails the 5-95 percentiles.",
plotOutput("plot_box_comparison"))
))
), # End of box plot panel
tabPanel(title="Performance indicators - tables", value="PItable",
column(12, fluidRow(
br(),
"Performance indicators in the short-, medium- and long-term. The value is the median, the values in the brackets are the 5 and 95 percentiles (i.e. cover 90% of the range of values).",
br(),
tags$span(title="Peformance indicators in the short-term.", tableOutput("pi_table_all_hcrs_short")),
tags$span(title="Peformance indicators in the medium-term.", tableOutput("pi_table_all_hcrs_medium")),
tags$span(title="Peformance indicators in the long-term.", tableOutput("pi_table_all_hcrs_long"))
))
) # End of PI table panel
) # End of tabsetPanel
) # End of mainPanel
) # End of sidebarLayout
), # End of Compare results tabPanel
tabPanel(title = "Information",
sidebarLayout(
sidebar_setup(
br()
),
mainPanel(
# Should work with devtools and after installation
shiny::includeMarkdown(system.file("introtext", "comparing_performance.md", package="AMPLE")),
# For deployment premake the vignette and drop it into the inst/www folder
h1("Tutorial"),
p("A more detailed tutorial can be found at this link:"),
a("Tutorial (html version)", target="_blank", href="img/comparing_performance.html"),
br(),
br()
) # End of mainPanel
) # End of sidebarLayout
), # End of Settings tabPanel
tabPanel(title = "Settings",
sidebarLayout(
sidebar_setup(
),
mainPanel(
fluidRow(
br(),
# Life history parameters projection options
stockParamsSetterUI("stock"),
br(),
# Number of iteration
numericInput("niters", "Number of iterations", value = 250, min=10, max=1000, step=10),
# Stochasticity module
stochParamsSetterUI("stoch", init_biol_sigma=0.2, init_est_sigma=0.0, init_est_bias=0.0, show_var=TRUE),
)
) # End of mainPanel
) # End of sidebarLayout
), # End of Settings tabPanel
tabPanel(title = "About",
sidebarLayout(
sidebar_setup(
ample_maintainer_and_licence()
),
mainPanel(
spc_about()
) # End of mainPanel
) # End of sidebarLayout
) # End of About tabPanel
) # End of navbarPage
#--------------------------------------------------------------------------
# Start of server function
server <- function(input, output,session) {
pi_quantiles <- c(0.05, 0.10, 0.90, 0.95)
# Evaluating reactiveExpr can only be done inside a reactive consumer (like an observer or reactive)
get_stoch_params <- stochParamsSetterServer("stoch")
get_mp_params <- mpParamsSetterServer("mpparams", get_stoch_params)
get_stock_params <- stockParamsSetterServer("stock", get_stoch_params)
hcr_no <- reactiveVal(0)
all_pis <- reactiveVal(data.frame())
# Make a normal stock that can be used outside of shiny purposes
stock_noreactive <- Stock$new(stock_params = isolate(get_stock_params()), mp_params = isolate(get_mp_params()), niters = isolate(input$niters))
# Make a reactive version by calling the reactive() method (or do it all at once)
stock <- stock_noreactive$reactive()
# Update the available PIs checkboxes - although this doesn't really dynamically change
# It just saves having to maintain a list in the UI at the top AND in the PI calculation function
# Because the options come from the pistore and if no pistore yet, no names
# Drop F/FMSY and others from list
# Get available PI names generated by the performance_indicators() method
all_pi_names <- unique(stock_noreactive$performance_indicators()$pi)
pi_choices <- all_pi_names # Could drop some here if you want
updateCheckboxGroupInput(session, "pi_choice",
choices = pi_choices,
selected = pi_choices
)
# Reset observer
# What can trigger the reset:
# - changing stock and MP params
# - emptying the basket
observe({
#message("In stock reset observer")
input$confirm_empty
stock_params <- get_stock_params()
mp_params <- get_mp_params()
niters <- input$niters
# Use isolate else these are triggered when they become invalid (i.e. after project)
isolate(stock()$reset(stock_params = stock_params, mp_params = mp_params, niters=niters))
# Turn off add_basket button after stock has been cleared
# Activates again when project has been called
shinyjs::disable(id = "add_basket")
shinyjs::disable(id = "user_hcr_name")
}, label="stock_resetter")
# Are you sure you want to empty the basket?
observeEvent(input$empty_basket, {
showModal(modalDialog(
title="Are you sure you want to empty the basket?",
footer = tagList(actionButton("confirm_empty", "I am sure - empty it!"), modalButton("Ooops, no. Keep my beautiful HCRs")),
fade=FALSE, easyClose=FALSE
))
})
# Remove the modal dialogue after confirming to empty the basket
observeEvent(input$confirm_empty,
{
#message("Confirming empty basket")
removeModal()
})
# Reset the basket if user confirms or if stock_params are updated.
observe({
#message("In PIs reset observer")
input$confirm_empty # After confirming that to empty the basket
stock_params <- get_stock_params()
isolate(hcr_no(0))
isolate(all_pis(data.frame()))
}, label="pis_resetter")
# After projecting, alloq users to add it to the basket with a name
observeEvent(input$project, {
timesteps <- c(stock()$last_historical_timestep+1,dim(stock()$biomass)[2])
stock()$project(timesteps=timesteps, mp_params=get_mp_params())
shinyjs::enable("add_basket")
shinyjs::enable("user_hcr_name")
})
# If you like the results, add them to the basket
observeEvent(input$add_basket, {
# Update the numbers of HCRs in basket
hcr_no(hcr_no() + 1)
# Extract and save results from each model for plots - just PIs? Or time series too?
# Depends if we want to plot the time series? Maybe not. Look like shite anyway!
pis <- stock()$performance_indicators(quantiles=pi_quantiles)
# Reshape here to make wide - spread out the quantiles
pis <- reshape(data = pis, direction = "wide", timevar = "quantiles", idvar = c("pi", "time_period"), v.names = "value", sep="_")
# Add in column of HCR number
pis$hcr_no <- hcr_no()
# Also HCR name and details
hcr_ref <- input$user_hcr_name
# If no name given by user make one
if(hcr_ref== "" || is.na(hcr_ref)){
hcr_ref <- paste("HCR ", hcr_no(), sep="") # Used for legends
} else {
hcr_ref <- paste(hcr_no(), hcr_ref, sep=" ")
# Clear out any user defined name
updateTextInput(session,"user_hcr_name",value="")
}
pis$hcr_ref <- hcr_ref
pis$hcr_details <- paste(hcr_ref, ".<br>",get_mp_params()$name,sep="") # Use <br> for html linebreak
all_pis(rbind(all_pis(), pis))
# You can't store again until you project again
shinyjs::disable("add_basket")
shinyjs::disable("user_hcr_name")
})
# Update the HCR selector when all_pis() gets updated (after adding to basket, or emptying basket)
observeEvent(all_pis(),{
# If no HCRs in list
if(nrow(all_pis()) < 1){
hcr_details <- character(0)
hcr_nos <- character(0)
selected <- NULL
} else {
# Three columns of HCR info:
# hcr_no (numeric 1 - X),
# hcr_ref (user name - use for labelling plots), hcr_details (used for HCR selector)
# hcr_details (Used for labelling the selector)
hcr_details <- unique(as.character(all_pis()$hcr_details))
# Need to turn details into HTML so we can use the <br> as a line break
hcr_details <- lapply(hcr_details, HTML) # To use <br> line break
hcr_nos <- unique(all_pis()$hcr_no)
selected <- c(input$hcr_choice, max(hcr_nos))
}
# Figure which HCRs are selected
# Update HCR choice in selector
updateCheckboxGroupInput(session, "hcr_choice",
selected = selected,
choiceNames = hcr_details,
choiceValues = hcr_nos
)
})
#---------------------------------------------------------------
# Output stuff
max_spaghetti_iters <- 50
quantiles <- c(0.05, 0.95)
lhs_mar <- c(5.1,4.1,1,2.1) # Margins for the LHS plots - reduced top
cex_axis <- 1.1
cex_lab <- 1.3
output$print_number_hcrs<- renderText({
return(paste("Number of HCRs in basket: ", hcr_no(), sep=""))
})
output$pi_table<- renderTable({
# Is there future data in the stock? Ugly check
if(is.na(stock()$catch[1,stock()$last_historical_timestep+1])){
return(NULL)
}
stock()$pi_table(quantiles=quantiles)
},
bordered = TRUE,
rownames = TRUE,
caption= "Performance indicators",
auto=TRUE)
output$plot_catch <- renderPlot({
# Par reset
parmar <- par()$mar
opar <- par(mar=lhs_mar)
on.exit(par(opar))
plot_catch_iters(stock=stock(), mp_params=get_mp_params(), max_spaghetti_iters = max_spaghetti_iters, quantiles=quantiles, show_time_periods = TRUE, cex.axis=cex_axis, cex.lab=cex_lab)
})
output$plot_biomass <- renderPlot({
# Par reset
parmar <- par()$mar
opar <- par(mar=lhs_mar)
on.exit(par(opar))
plot_biomass(stock=stock(), mp_params=get_mp_params(), ylab="True SB/SBF=0", max_spaghetti_iters=max_spaghetti_iters, quantiles=quantiles, show_time_periods = TRUE, cex.axis=cex_axis, cex.lab=cex_lab)
})
output$plot_cpue <- renderPlot({
# Par reset
parmar <- par()$mar
opar <- par(mar=lhs_mar)
on.exit(par(opar))
plot_cpue(stock=stock(), mp_params=get_mp_params(), max_spaghetti_iters=max_spaghetti_iters, quantiles=quantiles, show_time_periods = TRUE, cex.axis=cex_axis, cex.lab=cex_lab)
})
output$plot_hcr <- renderPlot({
# Par reset
parmar <- par()$mar
opar <- par(mar=lhs_mar)
on.exit(par(opar))
plot_model_based_hcr(stock=stock(), mp_params=get_mp_params(), iter=1:input$niters, cex.axis=1.1, cex.lab=1.3)
})
# Plotting the comparison bar and box plots
# Comparison plots
no_cols <- 2
height_per_pi <- 200 # Could be adjustable depending on screen size?
# Alt.
total_height <- 800
max_height_per_row <- total_height / 2
plot_barbox_comparison <- function(plot_type, quantiles=NULL, no_cols=2){
out <- renderPlot({
req(input$hcr_choice)
req(input$pi_choice)
dat <- all_pis()
# Subset out the PIs
dat <- subset(dat, pi %in% input$pi_choice)
# Pass all HCRs in, as we we need to keep colours
hcr_nos <- input$hcr_choice
barboxplot(dat, hcr_nos, plot_type=plot_type, quantiles=quantiles, no_cols=no_cols)
},
height=function(){
# Each row the same height as PIs drop out
#return(max(height_per_pi*1.5, (height_per_pi * ceiling(length(input$pi_choice) / no_cols))))
# Fill space
return({
npis <- max(length(input$pi_choice), 1)
nrows <- ceiling(npis / no_cols)
height_per_row <- min(total_height / nrows, max_height_per_row)
return(height_per_row * nrows)
})
})
return(out)
}
output$plot_bar_comparison <- plot_barbox_comparison(plot_type="median_bar", no_cols=no_cols)
output$plot_box_comparison <- plot_barbox_comparison(plot_type="box", quantiles = pi_quantiles, no_cols=no_cols)
# Fix labeling for table captions - use the non-reactive version of the stock
time_periods <- stock_noreactive$time_periods()
time_period_text <- lapply(strsplit(names(time_periods), " "), function(x) {
paste(x[1], "-term ", x[2], sep="")
})
# period is 1, 2, or 3 - for ST, MT, LT
render_pi_table_all_hcrs <- function(period){
out <- renderTable({
req(input$hcr_choice)
req(input$pi_choice)
dat <- all_pis()
# Which time period
tp <- sort(unique(dat$time_period))[period] # Short term is 1, MT = 2 etc
# Subset out the PIs
#dat <- subset(dat, pi %in% input$pi_choice & hcr_no %in% input$hcr_choice & time_period == tp)
# Remove use of subset to avoid global variable warning
dat <- dat[dat$pi %in% input$pi_choice & dat$hcr_no %in% input$hcr_choice & dat$time_period == tp,]
tab <- pi_table_all_hcrs(pis = dat, quantiles = quantiles)
return(tab)
}, caption = time_period_text[[period]])
return(out)
}
output$pi_table_all_hcrs_short <- render_pi_table_all_hcrs(period = 1)
output$pi_table_all_hcrs_medium <- render_pi_table_all_hcrs(period = 2)
output$pi_table_all_hcrs_long <- render_pi_table_all_hcrs(period = 3)
} # End of server function
# Run the app
shinyApp(ui, server)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/comparing_performance.R |
# Plotting functions that use the ggplot2 package.
# Used in the Comparing Performance app.
# Note that none of these are exported and no man pages are written.
# They are documented for internal purposes only.
# ggplot_funcs.R
# Author: Finlay Scott (SPC) <[email protected]>
# Soundtrack: Hail Be You Sovereigns, Lief and Dear: Dark Britannica III by Various
# Distributed under the terms of the GNU General Public License GPL (>= 3)
#' Default palette for HCRs
#'
#' Get the default palette for the HCR colours
#'
#' @param hcr_names The names of all of the HCRs
#' @param chosen_hcr_names The names of only the chosen HCRs
#' @noRd
#' @keywords internal
get_hcr_colours <- function(hcr_names, chosen_hcr_names){
allcols <- grDevices::colorRampPalette(RColorBrewer::brewer.pal(12,"Paired"))(length(hcr_names))
names(allcols) <- hcr_names
hcrcols <- allcols[chosen_hcr_names]
return(hcrcols)
}
#' barboxplot
#'
#' barboxplot() plots box plots or bar charts of the median values of the indicators for each HCR.
#'
#' @param dat The data frame of performance indicators.
#' @param hcr_nos The numbers of the HCRs to plot.
#' @param plot_type Either median_bar or box.
#' @param no_cols Number of column in each row. Default is 2.
#'
#' @return A ggplot2 plot object.
#' @importFrom ggplot2 "ggplot" "aes_string" "geom_bar" "theme" "element_blank" "xlab" "facet_wrap" "theme_bw" "geom_boxplot" "scale_fill_manual" "ylab" "element_text"
#' @noRd
#' @keywords internal
barboxplot <- function(dat, hcr_nos, plot_type="median_bar", quantiles=c(0.05, 0.10, 0.90, 0.95), no_cols=2){
all_hcr_names <- unique(dat$hcr_ref)
#dat <- subset(dat, hcr_no %in% hcr_nos)
dat <- dat[dat$hcr_no %in% hcr_nos,] # Trying to remove weird warning about hcr_no being a global variable
hcr_cols <- get_hcr_colours(hcr_names=all_hcr_names, chosen_hcr_names=unique(dat$hcr_ref))
if (plot_type=="median_bar"){
p <- ggplot(dat, aes_string(x="time_period", y="value_0.5", fill="hcr_ref"))
p <- p + geom_bar(stat="identity", position="dodge", colour="black", width=0.7)
p <- p + ylab("Average value")
}
if (plot_type=="box"){
quantiles <- sort(quantiles)
quantiles_text <- paste("value_", quantiles, sep="")
ymin <- quantiles_text[1]
ymax <- quantiles_text[4]
lower <- quantiles_text[2]
upper <- quantiles_text[3]
p <- ggplot(dat, aes_string(x="time_period"))
p <- p + geom_boxplot(aes_string(ymin=ymin, ymax=ymax, lower=lower, upper=upper, middle="value_0.5", fill="hcr_ref"), stat="identity")
}
p <- p + xlab("Time period")
p <- p + scale_fill_manual(values=hcr_cols)
p <- p + facet_wrap(~pi, scales="free", ncol=no_cols)
p <- p + theme_bw()
p <- p + theme(legend.position="bottom", legend.title=element_blank())
# Size of labels etc
text_size <- 14
p <- p + theme(axis.text=element_text(size=text_size), axis.title=element_text(size=text_size), strip.text=element_text(size=text_size), legend.text=element_text(size=text_size))
return(p)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/ggplot_funcs.R |
# Functions for handling the HCR
# yr is the timestep that use to generate the HCR IP in the current year
# e.g. look at biomass / k in year yr
# DO WE NEED STOCK PARAMS HERE?
# What do the mp_analysis functions look like?
# "empirical_cpue_slope_ip" or "assessment"
# Only "assessment" implemented here. Empirical still to come.
# What parameters do these need?
# MP analysis functions
# For empirical HCR
# Analysis functions:
# Called by get_hcr_ip()
#' Get the input to the HCR
#'
#' Run the MP analyses function to generate the input to the HCR i.e. observed stock status.
#' For example, estimated biomass from an assessment.
#' @param stock The stock object
#' @param mp_params The HCR / management procedure parameters used to evaluate the HCR (as a list).
#' @param yr The time step of the true stock status used to generate the HCR IP .
#' @param ... Other arguments, including iters
#' @import shiny
#' @export
get_hcr_ip <- function(stock, mp_params, yr, ...){
# Check for NA in mp_analysis, if so return NA
if (is.na(mp_params$mp_analysis)){
hcr_ip <- NA
}
# If not NA call the analysis function
else {
# Call HCR with correct inputs:
# What are stock_params here?
hcr_ip <- do.call(mp_params$mp_analysis, args=list(stock=stock, mp_params=mp_params, yr=yr, ...))
}
return(hcr_ip)
}
# This needs testing!
# Analysis function - called by get_hcr_ip() to fill the hcr_ip slot
# This is then used by the get_hcr_op() function to fill the hcr_op slot
# That is then used by project() to update catch and effort in the next time step
# asessment: a stock assessment that estimates biomass / k
# The analysis assessment function
# Called by get_hcr_ip()
#' assessment
#'
#' Function used by \code{get_hcr_ip()} to generate input data for an assessment based HCR.
#' The input to the HCR is depletion (i.e. Biomass / K).
#' @param stock The stock object
#' @param mp_params A named list of MP parameters (with est_sigma and est_bias elements)
#' @param yr The timestep that the biomass is taken from.
#' @param iters Numeric vector of iters. Default is all of them.
#' @export
assessment <- function(stock, mp_params, yr, iters = 1:dim(stock$biomass)[1]){
# Return observed depletion
true_ip <- stock$biomass[iters,yr] / stock$k
est_ip <- estimation_error(input = true_ip, sigma = mp_params$est_sigma, bias = mp_params$est_bias)
# Max depletion is 1.0
est_ip <- pmin(est_ip, 1.0)
return(est_ip)
}
#' estimation_error
#'
#' Estimation error applied to the 'true' stock status to generate an 'observed' stock status used in the HCR.
#' The error is a combination of bias and lognormally distributed noise.
#' @param input A vector of the 'true' stock status
#' @param sigma Observation error standard deviation
#' @param bias Observation error bias
#' @export
estimation_error <- function(input, sigma, bias){
# Transform sigma - it means we can set the parameter as a similar scale to the biol var sigma
sigma <- sigma / 5
est_variability <- rlnorm(length(input),meanlog=0,sdlog=sigma)
output <- input * est_variability * (1 + bias)
return(output)
}
#' Evaluates the harvest control rule.
#'
#' Evaluates the harvest control rule in a single year (timestep).
#' @param stock The stock object
#' @param mp_params The HCR / management procedure parameters used to evaluate the HCR (as a list).
#' @param yr The timestep.
#' @param iters A numeric vector of iters.
#' @return A vector of outputs from the HCR.
#' @export
get_hcr_op <- function(stock, mp_params, yr, iters=1:dim(stock$biomass)[1]){
# Shape is not NA
# Call HCR with the lagged input
#hcr_op <- do.call(mp_params$hcr_shape, args=list(stock=stock, mp_params=mp_params, stock_params=stock_params, yr=yr))
hcr_op <- do.call(mp_params$hcr_shape, args=list(input=stock$hcr_ip[iters,yr,drop=FALSE], mp_params=mp_params, yr=yr))
return(hcr_op)
}
# HCR shape functions
# Called by get_hcr_op()
#' Evaluates a threshold harvest control rule
#'
#' Evaluates a threshold (i.e. hockey stick) harvest control rule.
#' Used by the \code{hcr_op} function.
#' @param input A vector of the 'true' stock status
#' @param mp_params The HCR / management procedure parameters used to evaluate the HCR (as a list).
#' @param ... Unused
#' @return A vector of the same dimension as the input.
#' @export
threshold <- function(input, mp_params, ...){
output <- array(NA, dim=dim(input))
# Below lim
output[input <= mp_params$params["lim"]] <- mp_params$params["min"]
# On the slope
grad <- (mp_params$params["max"] - mp_params$params["min"]) / (mp_params$params["elbow"] - mp_params$params["lim"])
on_slope <- (input > mp_params$params["lim"]) & (input <= mp_params$params["elbow"])
output[on_slope] <- ((input - mp_params$params["lim"]) * grad + mp_params$params["min"])[on_slope]
# Past the elbow
output[input > mp_params$params["elbow"]] <- mp_params$params["max"]
return(output)
}
#' Evaluates a constant harvest control rule
#'
#' Evaluates a constant harvest control rule, i.e. one that ignores the stock status and just returns the constant level (catch or effort).
#' Used by the \code{hcr_op} function.
#' @param mp_params The HCR / management procedure parameters used to evaluate the HCR (as a list).
#' @param ... Unused
#' @export
constant <- function(mp_params, ...){
return(mp_params$params["constant_level"])
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/hcr_funcs.R |
# The Introduction to HCRs app
# intro_hcr.R
# Author: Finlay Scott (SPC) <[email protected]>
# Soundtrack: Disintegration Dubs by G36 vs JK Flesh
# Distributed under the terms of the GNU General Public License GPL (>= 3)
# Note use of spurious importFrom to remove spurious note in R CMD check --as-cran
#' Introduction to HCRs app launcher
#'
#' Launches the introduction to HCRs Shiny app.
#' See the 'Information' tab in the app for more information.
#' Also see the package vignette (\code{vignette("intro_hcr", package="AMPLE")}) for a tutorial.
#' @param ... Not used
#' @importFrom markdown "renderMarkdown"
#' @examples
#' \dontrun{intro_hcr()}
#' @export
intro_hcr <- function(...){
# User interface ----
ui <- navbarPage(
title="Introduction to HCRs",
tabPanel(title = "What is a Harvest Control Rule?",
# Initiate sidebarLayout - fixed sidebar for all tabs (but we can turn the interaction options on and off)
sidebarLayout(
sidebar_setup(
# HCR options
mpParamsSetterUI("mpparams", mp_visible=c("Threshold catch", "Constant catch")),# "Threshold effort", "Constant effort")),
br(),
# Buttons
tags$span(title="Go forward one year",
actionButton("advance", "Advance")),
tags$span(title="Reset current projection",
actionButton("reset", "Reset")),
br(),
# Stochasticity module
stochParamsSetterUI("stoch", init_biol_sigma=0.0, init_est_sigma=0.0, init_est_bias=0.0, show_var=FALSE),
br(),
shinyscreenshot::screenshotButton(label="Take a screenshot", filename="intro_hcr", scale=2)
),
mainPanel(
# Set up the main panel
# Main plot for the Intro to HCR app
# 2 x 2 panel
# Catch | HCR
# ----------------
# B/K | connecting arrow
fluidRow(
column(6,
tags$span(title="Plot of the total catch. The blue, dashed horizontal line is next years catch limit that has been set by the HCR. The grey, dashed horizontal lines are the catch limits that were set by the HCR in the past.",
plotOutput("plot_catch", width="auto"))
),
column(6,
tags$span(title="The HCR. The blue, dashed vertical line shows the current estimated biomass that is used as the input. The blue, dashed horizontal line shows the resulting catch limit that will be set for the following year",
plotOutput("plot_hcr", width="auto"))
)
), # End of fluid Row
fluidRow(
column(6,
tags$span(title="The biomass of the stock (scaled by the unfished biomass). When the estimation variability options are switched on, the black line is the 'true' biomass and the blue line is the 'estimated' biomass. The HCR uses the estimated biomass for the input.",
plotOutput("plot_biomass", width="auto"))
),
column(6,
tags$span(title="The current estimated biomass is used as the input to the HCR.",
plotOutput("plot_arrow", width="auto"))
)
)
) # End of mainPanel
) # End of sidebarLayout
), # End of main Panel
tabPanel(title = "Information",
sidebarLayout(
sidebar_setup(
br()
),
mainPanel(
# Should work with devtools and after installation
shiny::includeMarkdown(system.file("introtext", "intro_hcr.md", package="AMPLE")),
h1("Tutorial"),
p("A more detailed tutorial can be found at this link:"),
# For deployment premake the vignette and drop it into the inst/www folder
a("Tutorial (html version)", target="_blank", href="img/intro_hcr.html"),
# For CRAN submission
br(),
br()
) # End of mainPanel
) # End of sidebarLayout
), # End of Settings tabPanel
tabPanel(title = "Settings",
sidebarLayout(
sidebar_setup(
br()
),
mainPanel(
# Life history parameters projection options
stockParamsSetterUI("stock")
) # End of mainPanel
) # End of sidebarLayout
), # End of Settings tabPanel
tabPanel(title = "About",
sidebarLayout(
sidebar_setup(
ample_maintainer_and_licence()
),
mainPanel(
spc_about()
) # End of mainPanel
) # End of sidebarLayout
) # End of About tabPanel
) # End of navbarPage
#--------------------------------------------------------------------------
# Start of server function
server <- function(input, output,session) {
# Notes for myself:
# get_mp_params is a "reactiveExpr"
# Use: get_mp_params() to evaluate the reactiveExpr (evaluates switcheroo function and returns the parameters)
# Evaluating reactiveExpr can only be done inside a reactive consumer (like an observer or reactive)
get_stoch_params <- stochParamsSetterServer("stoch")
get_mp_params <- mpParamsSetterServer("mpparams", get_stoch_params) # Pass in stoch params so they're together
get_stock_params <- stockParamsSetterServer("stock", get_stoch_params)
niters <- 1 # Always 1 for this app
# Make instance of the stock
# This is just a normal stock that can be used outside of shiny purposes
# Need isolate() as not a reactive context
stock_noreactive <- Stock$new(stock_params = isolate(get_stock_params()), mp_params = isolate(get_mp_params()), niters = niters)
# Make a reactive version by calling the reactive() method
# Note that they are linked! If you change a value in stock_noreactive - you also change it in stock
# stock is method reactive. Evaluate method through the stock() gets you stock_reactive + the reactive var
stock <- stock_noreactive$reactive()
# Alternative - do it all at once have a non reactive version is quite useful for getting dims etc
#stock <- Stock$new(stock_params = isolate(get_stock_params()), mp_params = isolate(get_mp_params()), niters = niters)$reactive()
# Set up the timestep - initial value is last historical timestep
timestep <- reactiveVal(isolate(get_stock_params()$last_historical_timestep))
# Reset observer - clears out the stock information
# Any invalid reactive objects in this observer will trigger this, including an invalid stock.
# For example, when stock() becomes invalid because project() method has been called.
# What can trigger the reset:
# - changing stock and MP params
# - pressing the reset button
observe({
#message("In reset observer")
input$reset
stock_params <- get_stock_params()
mp_params <- get_mp_params()
# Reset the timestep
timestep(get_stock_params()$last_historical_timestep)
# Use isolate else this is triggered when stock becomes invalid (i.e. after project)
isolate(stock()$reset(stock_params = stock_params, mp_params = mp_params, niters=niters))
}, label="resetter")
observeEvent(input$advance, {
# Advance the timestep if able
if(timestep() < get_stock_params()$nyears){
timestep(timestep()+1)
}
# Call the project() method. This invalidates the stock() object
# (by internally changing the reactiveDep field).
# The invalidated stock can then trigger other stuff
stock()$project(timesteps=timestep(), mp_params=get_mp_params())
})
#---------------------------------------------------------------
# Output stuff
output$plot_catch <- renderPlot({
plot_catch_hcr(stock=stock(), mp_params=get_mp_params(), timestep=timestep(), cex.axis=1.1, cex.lab=1.3, main="Catch")
})
output$plot_biomass <- renderPlot({
plot_biomass(stock=stock(), mp_params=get_mp_params(), cex.axis=1.1, cex.lab=1.3, main="Biomass", ylab="Biomass")
})
output$plot_hcr <- renderPlot({
plot_model_based_hcr(stock=stock(), mp_params=get_mp_params(), timestep=timestep()+1, cex.axis=1.1, cex.lab=1.3, main="The HCR")
})
output$plot_arrow <- renderPlot({
plot_hcr_intro_arrow(stock=stock(), timestep=timestep()+1-get_mp_params()$timelag) # Watch the timelag here
})
} # End of server function
# Run the app
shinyApp(ui, server)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/intro_hcr.R |
# The Measuring Performance app
# measuring_performance.R
# Author: Finlay Scott (SPC) <[email protected]>
# Soundtrack: Bad Magic by Motorhead
# Distributed under the terms of the GNU General Public License GPL (>= 3)
#' Measuring performance app launcher
#'
#' Launches the 'Measuring Performance' Shiny app.
#' See the 'Information' tab in the app for more information.
#' Also see the package vignette (\code{vignette("measuring_performance", package="AMPLE")}) for a tutorial.
#' @param ... Not used
#' @examples
#' \dontrun{measuring_performance()}
#' @export
measuring_performance <- function(...){
# User interface ----
ui <- navbarPage(
title="Measuring HCR performance",
tabPanel(title = "How to measure performance",
# Initiate sidebarLayout
sidebarLayout(
sidebar_setup(
# MP selector
mpParamsSetterUI("mpparams", mp_visible=c("Threshold catch", "Constant catch")),# "Threshold effort", "Constant effort")),
br(), # Could add br() automatically to side bar set up to separate each component?
# Buttons
tags$span(title="Run the full projection",
actionButton("project", "Run projection")),
tags$span(title="Reset all projections",
actionButton("reset", "Reset")),
br(),
# Stochasticity module
stochParamsSetterUI("stoch", init_biol_sigma=0.0, init_est_sigma=0.0, init_est_bias=0.0, show_var=FALSE),
br(),
shinyscreenshot::screenshotButton(label="Take a screenshot", filename="meas_perf", scale=2)
),
mainPanel(
column(6,
fluidRow(
plotOutput("plot_catch",height="300px"),
),
fluidRow(
plotOutput("plot_biomass",height="300px"),
),
fluidRow(
plotOutput("plot_cpue",height="300px")
)
),
column(6,
fluidRow(plotOutput("plot_hcr")),
fluidRow(
textOutput("print_iter"),
radioButtons(inputId="table_choice", label="Table selection", inline=TRUE, choiceNames=c("Each replicate", "Performance indicators"), choiceValues=c("reps", "pis"), selected="reps"),
conditionalPanel(condition="input.table_choice == 'reps'",
tags$span(title="The final values of SB/SBF=0, catch and relative CPUE of each replicate. The final row shows the median of the final values and the values in the brackets are the 10-90 percentiles respectively.",
tableOutput("rep_table"))),#, style = "font-size:100%")),
conditionalPanel(condition="input.table_choice == 'pis'",
tags$span(title="A table of various performance indicators calculated over the short-, medium- and long- term. The value is the median. The values in the brackets are the 10-90 percentiles respectively. See the information tab for more details",
tableOutput("pi_table")))#, style = "font-size:100%"))
)
)
) # End of mainPanel
) # End of sidebarLayout
), # End of tabPanel 1
tabPanel(title = "Information",
sidebarLayout(
sidebar_setup(
br()
),
mainPanel(
# Should work with devtools and after installation
shiny::includeMarkdown(system.file("introtext", "measuring_performance.md", package="AMPLE")),
# For deployment premake the vignette and drop it into the inst/www folder
h1("Tutorial"),
p("A more detailed tutorial can be found at this link:"),
a("Tutorial (html version)", target="_blank", href="img/measuring_performance.html"),
br(),
br()
) # End of mainPanel
) # End of sidebarLayout
), # End of Settings tabPanel
tabPanel(title = "Settings",
sidebarLayout(
sidebar_setup(
),
mainPanel(
br(),
# Life history parameters projection options
stockParamsSetterUI("stock")
) # End of mainPanel
) # End of sidebarLayout
), # End of Settings tabPanel
tabPanel(title = "About",
sidebarLayout(
sidebar_setup(
ample_maintainer_and_licence()
),
mainPanel(
spc_about()
) # End of mainPanel
) # End of sidebarLayout
) # End of About tabPanel
) # End of navbarPage
#--------------------------------------------------------------------------
# Start of server function
server <- function(input, output,session) {
# Get the modules
get_stoch_params <- stochParamsSetterServer("stoch")
get_mp_params <- mpParamsSetterServer("mpparams", get_stoch_params)
get_stock_params <- stockParamsSetterServer("stock", get_stoch_params)
max_iters <- 100 # If you click more than this I'd be surprised... This could be an app option?
# Make instance of the stock
stock_noreactive <- Stock$new(stock_params = isolate(get_stock_params()), mp_params = isolate(get_mp_params()), niters = max_iters)
# Make a reactive version by calling the reactive() method (or do it all at once)
stock <- stock_noreactive$reactive()
# Set up counter for iters
iter <- reactiveVal(0)
# Reset observer - clears out the stock information
observe({
#message("In reset observer")
input$reset
stock_params <- get_stock_params()
mp_params <- get_mp_params()
# Reset the iter counter
iter(0)
# Use isolate else this is triggered when stock becomes invalid (i.e. after project)
isolate(stock()$reset(stock_params = stock_params, mp_params = mp_params, niters=max_iters))
}, label="resetter")
observeEvent(input$project, {
# If space advance the iter and project
if(iter() < max_iters){
iter(iter()+1)
# If max out the number of iters - the stock for the last iter keeps changing as it keeps being projected
timesteps <- c(stock()$last_historical_timestep+1,dim(stock()$biomass)[2])
# Call the project() method. This invalidates the stock() object
# (by internally changing the reactiveDep field).
# The invalidated stock can then trigger other stuff
stock()$project(timesteps=timesteps, mp_params=get_mp_params(), iters=iter())
}
})
#---------------------------------------------------------------
# Output stuff
quantiles <- c(0.05, 0.95)
max_spaghetti_iters <- 50
lhs_mar <- c(5.1,4.1,1,2.1) # Margins for the LHS plots - reduced top
cex_axis <- 1.1
cex_lab <- 1.3
output$print_stock <- renderTable({
# This output is triggered if stock is invalidated, i.e. through the project() method
stock_temp <- stock()
stock_temp$as_data_frame()
})
output$print_iter <- renderText({
return(paste("Replicate: ", iter(), sep=""))
})
output$plot_catch <- renderPlot({
# Par reset
parmar <- par()$mar
opar <- par(mar=lhs_mar)
on.exit(par(opar))
iter_range <- 1:max(iter(),1) # When we start iter() = 0 - and we just to show the catch history
plot_catch_iters(stock=stock(), mp_params=get_mp_params(), iters=iter_range, max_spaghetti_iters = max_spaghetti_iters, quantiles=quantiles, show_time_periods = TRUE, cex.axis=1.1, cex.lab=1.3)
})
output$plot_biomass <- renderPlot({
# Par reset
parmar <- par()$mar
opar <- par(mar=lhs_mar)
on.exit(par(opar))
iter_range <- 1:max(iter(),1) # When we start iter() = 0 - and we just to show the catch history
plot_biomass(stock=stock(), mp_params=get_mp_params(), ylab="True biomass", iters=iter_range, max_spaghetti_iters=max_spaghetti_iters, quantiles=quantiles, show_time_periods = TRUE, cex.axis=1.1, cex.lab=1.3) # Other args sent to plot function
})
output$plot_cpue <- renderPlot({
# Par reset
parmar <- par()$mar
opar <- par(mar=lhs_mar)
on.exit(par(opar))
iter_range <- 1:max(iter(),1) # When we start iter() = 0 - and we just to show the catch history
plot_cpue(stock=stock(), mp_params=get_mp_params(), iters=iter_range, max_spaghetti_iters=max_spaghetti_iters, quantiles=quantiles, show_time_periods = TRUE, cex.axis=1.1, cex.lab=1.3)
})
output$plot_hcr <- renderPlot({
# Par reset
parmar <- par()$mar
opar <- par(mar=lhs_mar)
on.exit(par(opar))
plot_model_based_hcr(stock=stock(), mp_params=get_mp_params(), iter=iter(), cex.axis=1.1, cex.lab=1.3)
})
# Tables
output$rep_table<- renderTable({
if(iter() < 1){
return(NULL)
}
iter_range <- 1:max(iter(),1) # When we start iter() = 0 - and we just to show the catch history
stock()$replicate_table(iters=iter_range, quantiles=quantiles)
},
bordered = TRUE,
rownames = FALSE,
caption= "Performance of each replicate",
auto=TRUE)
output$pi_table<- renderTable({
if(iter() < 1){
return(NULL)
}
iter_range <- 1:max(iter(),1) # When we start iter() = 0 - and we just to show the catch history
stock()$pi_table(iters=iter_range, quantiles=quantiles)
},
bordered = TRUE,
rownames = TRUE,
caption= "Performance indicators",
auto=TRUE)
} # End of server function
# Run the app
shinyApp(ui, server)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/measuring_performance.R |
# Module for setting the MP parameters
# This is used to create a list of MP parameters
# The MP parameters list elements are:
# The UI bit sorts out what input controls should be on the screen depending on the selected HCR
# The server bit scrapes all the inputs into a list of mp params
#' mpParamsSetterUI
#'
#' The interface for the HCR options.
#' The parameter selection inputs shown in the app are conditional on the selected type of HCR.
#' Some of the inputs have initial values that can be set using the function arguments.
#'
#' @param mp_visible Which HCR types to show.
#' @param title The title.
#' @param init_thresh_max_catch Initial value of the maximum catch for the catch threshold HCR.
#' @param init_thresh_belbow Initial value of the belbow for the catch threshold HCR.
#' @param init_constant_catch Initial value of constant catch for the constant catch HCR.
#' @param init_constant_effort Initial value of constant effort for the constant effort HCR.
#'
#' @return A taglist
#' @rdname MP_modules
#' @name MP modules
#' @export
mpParamsSetterUI <- function(id, mp_visible=NULL, title="Select the type of HCR you want to test.", init_thresh_max_catch=140, init_thresh_belbow=0.5, init_constant_catch=50, init_constant_effort=1.0){
ns <- NS(id)
# Named vector of currently implemented HCRs
all_hcrs <- c("Constant catch" = "constant_catch",
"Constant effort" = "constant_effort",
"Threshold catch" = "threshold_catch",
"Threshold effort" = "threshold_effort")
#"Empirical: CPUE slope only" = "empirical_cpue_slope") # To come later
# Subset which HCRs you want to be available in the current app
hcr_choices <- all_hcrs[mp_visible]
# Drop down menu of HCR types
hcr_type <- tags$span(title=title, selectInput(ns("hcr_type"), label="HCR Type", choices = hcr_choices))
# The parameter option inputs shown in the app are conditional on the selected HCR type
# This is handled by a bunch of conditional Panels
# Could use a switch statement instead? Not sure it would help write clearer code
# Constant catch
ccpars <- conditionalPanel("input.hcr_type == 'constant_catch'", ns=ns,
tags$span(title="Ignores stock status (e.g. estimated bimoass) and sets a constant catch limit.",
sliderInput(ns("constant_catch_level"), "Constant catch level:", min = 0, max = 150, value = init_constant_catch, step = 1))
)
# Constant effort
cepars <- conditionalPanel("input.hcr_type == 'constant_effort'", ns=ns,
tags$span(title="Ignores stock status (e.g. estimated bimoass) and sets a constant effort limit.",
sliderInput(ns("constant_effort_level"), "Constant relative effort level:", min = 0, max = 3, value = init_constant_effort, step = 0.01))
)
# Blim and Belbow (used by threshold catch and threshold effort)
# Note || JavaScript OR
tctepars <- conditionalPanel("input.hcr_type == 'threshold_catch' || input.hcr_type == 'threshold_effort' ", ns=ns,
tags$span(title="The biomass levels that determine the shape of the HCR.",
sliderInput(ns("blim_belbow"), "Blim and Belbow:", min = 0, max = 1, value = c(0.2, init_thresh_belbow), step = 0.01))
)
# Cmin and Cmax (used by threshold catch)
tcpars <- conditionalPanel("input.hcr_type == 'threshold_catch'", ns=ns,
tags$span(title="The minimum and maximum catch limit levels output from the HCR.",
sliderInput(ns("cmin_cmax"), "Cmin and Cmax:", min = 0, max = 250, value = c(10, init_thresh_max_catch), step = 1))
)
# Emin and Emax (used by threshold effort)
tepars <- conditionalPanel("input.hcr_type == 'threshold_effort'", ns=ns,
tags$span(title="The minimum and maximum effort limit levels output from the HCR.",
sliderInput(ns("emin_emax"), "Emin and Emax:", min = 0, max = 3, value = c(0.1,1.0), step = 0.01))
)
# Empirical still to come in another app
## Gain and slope years (used by empirical CPUE slope)
#empcpueslopepars <- conditionalPanel("input.hcr_type == 'empirical_cpue_slope'", ns=ns,
# tags$span(title="Empirical HCR based on slope of recent CPUE (catch rate)",
# sliderInput(ns("gain"), "Gain:", min = 0, max = 5, value = 2, step = 0.1),
# sliderInput(ns("slope_years"), "Slope years", min=2, max=10, value=3, step = 1)
#))
# Put together list to return to the UI function.
# If the conditionalPanel is not selected, that element is NULL and ignored
out <- tagList(hcr_type, ccpars, cepars, tctepars, tcpars, tepars)#, empcpueslopepars)
return(out)
}
#' mpParamsSetterServer
#'
#' Does the setting part of the MP params module.
#' Returns a list of MP params based on the MP inputs.
#'
#' @param id The id (shiny magic)
#' @param get_stoch_params Reactive expression that gets the parameters from the stochasticity setter. Otherwise est_sigma and est_bias are set to 0.
#' @return A list of HCR options.
#' @rdname MP_modules
#' @name MP modules
mpParamsSetterServer <- function(id, get_stoch_params=NULL){
moduleServer(id, function(input, output, session){
reactive({
# Setting the stochasticity parameters in the mp_params using the values in the stochasticity reactive expr.
stoch_params <- list(est_sigma = 0, est_bias=0)
if(!is.null(get_stoch_params)){
stoch_params <- get_stoch_params()
}
return(mp_params_switcheroo(input, est_sigma = stoch_params$est_sigma, est_bias = stoch_params$est_bias))
})
})
}
#' mp_params_switcheroo
#'
#' Creates the MP params list based on the MP selection from the Shiny UI.
#' Defined outside of a reactive environment above so we can use it non-reactively (helpful for testing).
#' @param input List of information taken from the Shiny UI (mpParamsSetterUI)
#' @param est_sigma Standard deviation of the estimation variability (default = 0).
#' @param est_bias Estimation bias as a proportion. Can be negative (default = 0).
#'
#' @rdname MP_modules
#' @name MP modules
#' @export
mp_params_switcheroo <- function(input, est_sigma = 0, est_bias = 0){
out <- switch(input$hcr_type,
threshold_catch = list(hcr_shape = "threshold", mp_analysis = "assessment",
mp_type="model", output_type="catch",
name=paste("Thresh. catch: Blim=",input$blim_belbow[1],",Belbow=",input$blim_belbow[2],",Cmin=",input$cmin_cmax[1],",Cmax=",input$cmin_cmax[2], sep=""),
params = c(lim = input$blim_belbow[1], elbow = input$blim_belbow[2], min = input$cmin_cmax[1], max = input$cmin_cmax[2])),
constant_catch = list(hcr_shape = "constant", mp_analysis = "assessment",
mp_type="model", output_type="catch",
name=paste("Const. catch: level=",input$constant_catch_level,sep=""),
params = c(constant_level = input$constant_catch_level)),
threshold_effort = list(hcr_shape = "threshold", mp_analysis = "assessment",
mp_type="model", output_type="relative effort",
name=paste("Thresh. effort: Blim=",input$blim_belbow[1],",Belbow=",input$blim_belbow[2],",Emin=",input$emin_emax[1],",Emax=",input$emin_emax[2], sep=""),
params = c(lim = input$blim_belbow[1], elbow = input$blim_belbow[2], min = input$emin_emax[1], max = input$emin_emax[2])),
constant_effort = list(hcr_shape = "constant", mp_analysis = "assessment",
mp_type="model", output_type="relative effort",
name=paste("Const. effort: level=",input$constant_effort_level,sep=""),
params = c(constant_level = input$constant_effort_level)),
# Could add base year as an extra parameter for the relative effort ones
# This would need an input
# If do this, last_historical_timestep is not needed in the stock?
# Empirical not yet implemented - to come but needs separate app
# mp_analysis - name of function to generate HCR ip, mp_type - not sure it does anything
#empirical_cpue_slope = list(hcr_shape = "empirical_cpue_slope_op", mp_analysis = "empirical_cpue_slope_ip",
# mp_type="empirical", output_type="catch multiplier",
# name=paste("Empirical CPUE slope: Gain=",input$gain,",Slope years=",input$slope_years, sep=""),
# params = c(gain = input$gain, slope_years = input$slope_years)),
stop("In mp_params_setter. Unrecognised hcr_type.")
) # End of switch
# Add in the stochasticity options and timelag
out$est_bias = est_bias
out$est_sigma = est_sigma
out$timelag <- 0 # 2
return(out)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/mp_module.R |
# Performance indicator table for multiple HCRs
# pi_table.R
# Author: Finlay Scott (SPC) <[email protected]>
# Soundtrack: Spice Doubt by Ozric Tentacles
# Distributed under the terms of the GNU General Public License GPL (>= 3)
#' Performance indicator table for multiple HCRs
#'
#' Returns a neatly formatted table of the performance indicators for multiple HCRs.
#' The \code{pis} argument should already be subsetted for desired PIs, HCRs, and time periods.
#' Used in the Comparing Performance app.
#' @param pis A data.frame of quantiles of performance indicators for each HCR.
#' @param quantiles The quantile values to form the uncertainty part of the table (need to already exist in the \code{pis} data.frame).
#' @param signif The number of significant digits to use in the table.
#' @noRd
#' @keywords internal
pi_table_all_hcrs <- function(pis, quantiles = c(0.05, 0.95), signif=2){
quantile_cols <- paste0("value_", sort(quantiles))
pis$value <- paste(signif(pis$value_0.5, signif), " (", signif(pis[,quantile_cols[1]], signif), ",", signif(pis[,quantile_cols[2]], 2), ")", sep="")
# Fix Prob. PIs - no uncertainty
prob_pis <- grepl("Prob", pis$pi)
pis$value[prob_pis] <- as.character(signif(pis$value_0.5),2)[prob_pis]
dat <- pis[,c("hcr_ref", "pi", "value")]
dat <- reshape(data = dat, direction = "wide", timevar = "pi", idvar = c("hcr_ref"), v.names = "value", sep="_")
# Col names
new_colnames <- substring(colnames(dat)[-1], first = nchar("value_") + 1)
colnames(dat)[-1] <- new_colnames
colnames(dat)[1] <- "HCR"
return(dat)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/pi_table.R |
# Plotting functions for the Shiny apps.
# Note that none of these are exported and no man pages are written.
# They are documented for internal purposes only.
# plot_funcs.R
# Author: Finlay Scott (SPC) <[email protected]>
# Soundtrack: Waterfall Cities by Ozric Tentacles
# Distributed under the terms of the GNU General Public License GPL (>= 3)
#' Plot biomass to HCR arrow
#'
#' Plots the connecting arrow between the biomass plot and the HCR plot in the 'Introduction to HCRs' app.
#' @param stock An R6 class Stock object.
#' @param timestep The time step of the HCR input.
#' @importFrom graphics "arrows" "grid" "legend" "lines" "par" "points" "polygon"
#' @noRd
#' @keywords internal
plot_hcr_intro_arrow <- function(stock, timestep){
# Arrow from B/K to HCR
btimestep <- min(timestep, dim(stock$hcr_ip)[2])
last_hcr_ip <- stock$hcr_ip[1, btimestep]
npoints <- 100
theta <- seq(from=0, to=pi/2, length=npoints)
x <- sin(theta) * last_hcr_ip
y <- -1 * cos(theta) * (1-last_hcr_ip)
# Set up plot
plot(x=x, y=y, xlim=c(0,1), ylim=c(-1,0), type="n", xaxt="n", yaxt="n",xlab="", ylab="", axes=FALSE,xaxs="i", yaxs="i")
lines(x=x, y=y, col="blue", lwd=3)
# Add an arrow
arrows(x0=x[length(x)-1], y0=y[length(y)-1], x1=x[length(x)], y1=y[length(y)],col="blue", lwd=3)
}
#' Plot time periods
#'
#' Adds the time periods to the current stock as vertical lines.
#' @param stock An R6 class Stock object.
#' @noRd
#' @keywords internal
plot_time_periods <- function(stock){
time_periods <- stock$time_periods()
tp1 <- as.numeric(time_periods[[1]][1])# - 1 # So it looks better with catch
tp2 <- as.numeric(time_periods[[2]][1])# - 1
tp3 <- as.numeric(time_periods[[3]][1])# - 1
lines(x=c(tp1, tp1), y=c(-1e9, 1e9), lty=2, lwd=1, col="black")
lines(x=c(tp2, tp2), y=c(-1e9, 1e9), lty=2, lwd=1, col="black")
lines(x=c(tp3, tp3), y=c(-1e9, 1e9), lty=2, lwd=1, col="black")
return(NULL)
}
#' plot_biomass
#'
#' plot_biomass() plots time series of 'true' and observed depletion (SB/SBF=0).
#' Used in all of the Shiny apps in this package.
#'
#' @param stock An R6 class Stock object.
#' @param mp_params The management procedure parameters (a list including mp_type).
#' @param ylab The x-label.
#' @param iters The iters to plot. Default is all of them.
#' @param max_spaghetti_iters The number of iterations to show as spaghetti before ribbons are shown. Default is 50.
#' @param quantiles Quantiles of the ribbons.
#' @param show_time_periods Boolean. Show the time period lines on the plot.
#' @param cex_leg Expansion of legend text (not handled by ... to plot)
#' @param ... Other arguments to pass to the plot() function.
#' @return A plot
#' @noRd
#' @keywords internal
plot_biomass <- function(stock, mp_params, ylab = "SB/SBF=0", iters = 1:dim(stock$biomass)[1], max_spaghetti_iters=50, quantiles=c(0.05, 0.95), cex_leg = 1, show_time_periods = FALSE, ...){
years <- as.numeric(dimnames(stock$biomass)$year)
# True bk
bk_true <- stock$biomass / stock$k
# Set up empty plot
ylim <- c(0,1) #max(1, max(bk_true, na.rm=TRUE), na.rm=TRUE)
plot(x=years, y= bk_true[1,], type="n", ylab=ylab, xlab="Year", ylim=ylim, xaxs="i", yaxs="i", ...)
# Add LRP and TRP
lines(x=c(years[1], years[length(years)]),y=rep(stock$lrp, 2), lty=3, lwd=2, col="black")
lines(x=c(years[1], years[length(years)]),y=rep(stock$trp, 2), lty=3, lwd=2, col="black")
# Show time periods
if(show_time_periods){
plot_time_periods(stock)
}
# Add a grid
grid()
# Draw the iterations in grey
true_col <- "black"
est_col <- "blue"
# Show the last iter - including true and observed if there is estimation variability
last_iter <- max(iters)
# Draw a ribbon if more than X iters
if (length(iters) >= max_spaghetti_iters){
draw_ribbon(x=years, y=bk_true[iters,], quantiles=quantiles)
legend(x="bottomleft", legend=c("Average true biomass","Last replicate"), lty=c(2,1), lwd=2, col=true_col, cex=cex_leg)
} else {
for(i in iters){
lines(x=years, y=bk_true[i,], col=scales::alpha("black", 0.25), lwd=2, lty=1)
}
# This will only work for model based for now
# If we have a model based MP, show the HCR IP as it is the estimated biomass too
#if (mp_params$mp_type == "model"){
# With no estimation error, the true biomass is the same as the observed biomass
if ((mp_params$est_sigma != 0) | (mp_params$est_bias != 0)){
# Plot the estimated B/K - plotted first so that the Intro to HCR shows it
lines(x=years, y=stock$hcr_ip[last_iter,], col=est_col, lty=1, lwd=2)
# Only show legend if not already showing a legend
legend(x="bottomleft", legend=c("True","Estimated"), lwd=2, col=c(true_col, est_col), cex=cex_leg)
}
}
# Plot the last true iteration in black
lines(x=years, y=bk_true[last_iter,], col=true_col, lwd=2, lty=1)
# Hack to show the estimated biomass on the Introduction to HCR app
if(dim(stock$biomass)[1]==1){
lines(x=years, y=stock$hcr_ip[last_iter,], col=est_col, lty=1, lwd=2)
}
}
# Called by a couple of plotting functions
# Better than maintaining same code in multiple places
# So the HCR and catch plot in the Intro to HCR app have the same scale
#' Get ymax for catch plots
#'
#' Useful function to calculate the ymax for catch plots
#' @param catch Catch data.
#' @param mp_params The MP parameters.
#' @noRd
#' @keywords internal
get_catch_ymax <- function(catch, mp_params){
# Sort out dimensions and labels
# Bit annoying that we have a switch
ymax <- switch(mp_params$hcr_shape,
constant = mp_params$params["constant_level"],
threshold = mp_params$params["max"])
ymax <- max(c(ymax, c(catch)), na.rm=TRUE) * 1.1
return(ymax)
}
# Plot a single iteration (the first one) with options for historical HCR OPs (used in Intro to HCR)
# The is only used in the 'Introduction to HCRs' app.
#' Catch plot for Intro to HCR app
#'
#' Plots a time series of the catches with the addition of the HCR output.
#' Only deals with a single iteration and is only used in the Introduction to HCRs app.
#' @param stock An R6 Stock class object.
#' @param mp_params The MP parameters.
#' @param timestep Current time timestep.
#' @noRd
#' @keywords internal
plot_catch_hcr <- function(stock, mp_params, timestep, ...){
years <- as.numeric(dimnames(stock$biomass)$year)
# Set ylim - use same as HCR plot
ymax <- get_catch_ymax(stock$catch, mp_params)
yrange <- c(0, ymax)
# Empty axis
plot(x=years, y=years, type="n", ylim=c(0, ymax), ylab="Catch", xlab="Year", xaxs="i", yaxs="i", ...)
grid()
# Plot the historical catches as horizontal dashed lines
# (current timestep is considered as historical as that has already been fished)
if (timestep > stock$last_historical_timestep){
for (yr in (stock$last_historical_timestep+1):timestep){
lines(x=c(years[1], years[length(years)]), y=rep(stock$catch[1, yr],2), lty=2, col="grey", lwd=2)
}
}
# If HCR OP is total catch, plot the proposed catch from the HCR - i.e. the catch in the next time step
# Could add something similar for relative effort based HCRs but I don't they should be included in the
# 'Introduction to HCRs' app,
# Would need an additional step of converting that relative effort to actual catch (done in the project() method)
# This could be combined with the above bit of plotting ghosts - but is we need to convert the HCR OP to catch
if(mp_params$output_type == "catch"){
if (timestep < dim(stock$hcr_op)[2]){
next_catch <- stock$hcr_op[1, timestep+1]
# If HCR OP is catch multiplier - not implemented yet - used for empirical
# if (mp_params$output_type == "catch multiplier"){
# next_catch <- stock$catch[1,timestep] * stock$hcr_op[,timestep+1]
# next_catch[next_catch < 10] <- 10 # A minimum catch
# }
lines(x=c(years[1], years[length(years)]), y=rep(next_catch,2), lty=2, col="blue", lwd=2)
}
}
# Plot the catch in the first (only) iteration
lines(x=years, y=stock$catch[1,], col="blue", lwd=2, lty=1)
}
#' Plot the catch with iterations
#'
#' Plot the catch time series when there are potentially multiple iterations.
#' Used in the Measuring Performance and Comparing Performance apps.
#' @param stock An R6 class Stock object.
#' @param mp_params The management procedure parameters (a list including mp_type).
#' @param iters The iters to plot. Default is all of them.
#' @param max_spaghetti_iters The number of iterations to show as spaghetti before ribbons are shown. Default is 50.
#' @param quantiles Quantiles of the ribbons.
#' @param show_time_periods Boolean. Show the time period lines on the plot.
#' @param cex_leg Expansion of legend text (not handled by ... to plot)
#' @param ... Other arguments to pass to the plot() function.
#' @noRd
#' @keywords internal
plot_catch_iters <- function(stock, mp_params, iters = 1:dim(stock$biomass)[1], max_spaghetti_iters=50, quantiles=c(0.05, 0.95), cex_leg = 1, show_time_periods=FALSE, ...){
years <- as.numeric(dimnames(stock$biomass)$year)
# Set ylim - use same as HCR plot
ymax <- get_catch_ymax(stock$catch, mp_params)
yrange <- c(0, ymax)
# Empty axis
plot(x=years, y=years, type="n", ylim=c(0, ymax), ylab="Catch", xlab="Year", xaxs="i", yaxs="i", ...)
if(show_time_periods){
plot_time_periods(stock)
}
grid()
# Draw a ribbon if more than X iters
if (length(iters) >= max_spaghetti_iters){
draw_ribbon(x=years, y=stock$catch[iters,], quantiles=quantiles)
legend(x="bottomleft", legend=c("Average catch","Last replicate"), lty=c(2,1), lwd=2, col="black", cex=cex_leg)
} else {
# Otherwise plot all iters in grey
for(i in iters){
lines(x=years, y=stock$catch[i,], lty=1, lwd=2, col=scales::alpha("black", 0.25))
}
}
# Plot the most recent catch in black
lines(x=years, y=stock$catch[max(iters),], lty=1, lwd=2, col="black")
}
#' Plot relative CPUE
#'
#' Plot time series of the CPUE relative to the CPUE in the last historical time step.
#' Used in the Measuring Performance and Comparing Performance apps.
#' @param stock An R6 class Stock object.
#' @param mp_params The management procedure parameters (a list including mp_type).
#' @param iters The iters to plot. Default is all of them.
#' @param max_spaghetti_iters The number of iterations to show as spaghetti before ribbons are shown. Default is 50.
#' @param quantiles Quantiles of the ribbons.
#' @param show_time_periods Boolean. Show the time period lines on the plot.
#' @param cex_leg Expansion of legend text (not handled by ... to plot)
#' @param ... Other arguments to pass to the plot() function.
#' @noRd
#' @keywords internal
plot_cpue <- function(stock, mp_params, iters = 1:dim(stock$biomass)[1], max_spaghetti_iters=50, quantiles=c(0.05, 0.95), cex_leg=1, show_time_periods = FALSE, ...){
cpue <- stock$relative_cpue()
years <- as.numeric(dimnames(cpue)$year)
# Set ylim - use same as HCR plot
ymax <- max(1, cpue, na.rm=TRUE)
yrange <- c(0, ymax)
# Empty axis
plot(x=years, y=years, type="n", ylim=c(0, ymax), ylab="Relative CPUE (catch rate)", xlab="Year", xaxs="i", yaxs="i", ...)
if(show_time_periods){
plot_time_periods(stock)
}
grid()
# Draw a ribbon if more than X iters
if (length(iters) >= max_spaghetti_iters){
draw_ribbon(x=years, y=cpue[iters,], quantiles=quantiles)
legend(x="bottomleft", legend=c("Average CPUE","Last replicate"), lty=c(2,1), lwd=2, col="black", cex=cex_leg)
} else {
# Otherwise plot all iters in grey
for(i in iters){
lines(x=years, y=cpue[i,], lty=1, lwd=2, col=scales::alpha("black", 0.25))
}
}
# Plot the most recent catch in black
lines(x=years, y=cpue[max(iters),], lty=1, lwd=2, col="black")
}
#' Draw uncertainty ribbon.
#'
#' Add ribbon based on quantiles and add median to the current plot.
#' @param x Year range
#' @param y The data
#' @param quantiles A vector of quantiles (length 2)
#' @noRd
#' @keywords internal
draw_ribbon <- function(x, y, quantiles){
# Get quantiles
qs <- apply(y, 2, function(x){quantile(x, probs=c(quantiles[1], 0.5, quantiles[2]), na.rm=TRUE)})
# Draw envelope using polygon - horrible
polyx <- c(x, rev(x))
polyy <- c(qs[1,],rev(qs[3,]))
# Drop NAs
polyx <- polyx[!is.na(polyy)]
polyy <- polyy[!is.na(polyy)]
polygon(x=polyx, y=polyy, col="grey", border=NA)
# Add qlines
#lines(x=x, y=qs[1,], lty=ribbon_lty, col=ribbon_border_col, lwd=ribbon_lwd)
#lines(x=x, y=qs[3,], lty=ribbon_lty, col=ribbon_border_col, lwd=ribbon_lwd)
# Add median
lines(x=x, y=qs[2,], lty=2, col="black", lwd=2)
}
#' Plot a model based HCR
#'
#' Plot a model based HCR, including current stock status and current HCR OP.
#' Can also show the current and historical input and output values depending on the \code(timestep) and \code{iter} arguments.
#' @param stock An R6 class Stock object.
#' @param mp_params The management procedure parameters (a list including mp_type).
#' @param timestep The current timestep (optional)
#' @param iter The current iter (optional).
#' @param show_ref_pts Boolean. Plot the LRP and TRP lines. Default is false.
#' @noRd
#' @keywords internal
plot_model_based_hcr <- function(stock, mp_params, timestep=NULL, iter=NULL, show_ref_pts=FALSE, ...){
# Stop timestep going beyond bounds
if (!is.null(timestep)){
if (timestep > dim(stock$hcr_ip)[2]){
timestep <- dim(stock$hcr_ip)[2]
}
}
if (mp_params$output_type=="catch"){
ymax <- get_catch_ymax(stock$catch, mp_params)
}
if (mp_params$output_type=="relative effort"){
rel_effort <- sweep(stock$effort, 1, stock$effort[,stock$last_historical_timestep], "/")
ymax <- max(c(rel_effort, mp_params$params["max"], mp_params$params["constant_level"]), na.rm=TRUE) * 1.1
}
ylab <- paste("Next ", mp_params$output_type, sep="")
yrange <- c(0, ymax)
# Need to set these depending on the HCR somehow
# From MP analysis type? Leave as B/K for now. Could set additional argument in mp_params like xlab?
xrange <- c(0, 1)
xlab <- "Estimated biomass"
# Plot empty axes
plot(x=xrange,y=yrange,type="n",xlab=xlab, ylab=ylab, xaxs="i", yaxs="i", ...)
grid()
# Add the B/K based reference points
if (show_ref_pts){
lines(x=rep(stock$lrp,2), y=c(0, ymax),lty=3, lwd=2, col="black")
lines(x=rep(stock$trp,2), y=c(0, ymax),lty=3, lwd=2, col="black")
}
# Add all HCR inputs and outputs so far
# The years that the HCR has been active
hcr_ip_yrs <- ((stock$last_historical_timestep+1):dim(stock$hcr_ip)[2]) - mp_params$timelag
hcr_op_yrs <- (stock$last_historical_timestep+1):dim(stock$hcr_ip)[2]
# Plot current timestep of first iter in a different shade
# Used in Introduction to HCR app
if (!is.null(timestep)){
# Only show the HCR years (from last historical timestep)
# Show ghosts
points(x=c(stock$hcr_ip[1,hcr_ip_yrs]), y=c(stock$hcr_op[1,hcr_op_yrs]), col="grey", pch=16, cex=2)
# Show current
points(x=c(stock$hcr_ip[1,timestep - mp_params$timelag]),
y=c(stock$hcr_op[1,timestep]), col="blue", pch=16, cex=2)
} else if (!is.null(iter)) {
# Used in Measuring and Comparing Performance apps
# Only show points if a projection has been run?
iters_run <- !is.na(stock$catch[,dim(stock$catch)[2]])
points(x=c(stock$hcr_ip[iters_run,hcr_ip_yrs]), y=c(stock$hcr_op[iters_run,hcr_op_yrs]), col=scales::alpha("black", 0.1), pch=16, cex=2)
if(length(iter) == 1){
points(x=c(stock$hcr_ip[iter,hcr_ip_yrs]), y=c(stock$hcr_op[iter,hcr_op_yrs]), col="blue", pch=16, cex=2)
}
}
# Plot HCR outline - depends on the HCR shape
# Constant catch
hcr_lwd <- 3
if (mp_params$hcr_shape == "constant"){
lines(x=xrange, y=c(mp_params$params["constant_level"], mp_params$params["constant_level"]), lwd=hcr_lwd, lty=1, col="red")
}
# Threshold
else if (mp_params$hcr_shape == "threshold"){
lines(x = c(0, mp_params$params["lim"], mp_params$params["elbow"], 1),
y = c(rep(mp_params$params["min"], 2), rep(mp_params$params["max"], 2)), lwd=hcr_lwd, lty=1, col="red")
}
else {
stop("In plot_model_based_hcr(). Trying to plot the HCR shape but hcr_shape is not recognised.")
}
# If timestep is present, show the IP and OP as lines for the 1st iter and timestep
# Used in Introduction to HCRs app
# Plotted last so they lie on top of everything else
if (!is.null(timestep)){
# timestep is the catch to be
last_ip <- c(stock$hcr_ip[1,timestep-mp_params$timelag])
last_op <- c(stock$hcr_op[1,timestep])
lines(x = c(last_ip, last_ip), y=c(0, last_op), lty=2, lwd=2, col="blue")
lines(x = c(0, last_ip), y=c(last_op, last_op), lty=2, lwd=2, col="blue")
}
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/plot_funcs.R |
# Stochasticity module
# Handles the biological and esimtation variability options in the user interface
#' stochParamsSetterUI
#'
#' stochParamsSetterUI() is the UI part for the stochasticity options.
#' Stochasticity is included in the projections in two areas: biological variability (e.g. recruitment)
#' and estimation error (to represent the difference between the 'true' status of the stock and the estimated status that is used by the HCR).
#' Estimation error includes bias and variability.
#' The arguments to this function allow only some of these elements to be shown.
#'
#' @param id Shiny magic id
#' @param show_var Show the variability options when app opens (default is FALSE).
#' @param show_biol_sigma Show the biological productivity variability option (default is TRUE).
#' @param show_est_sigma Show the estimation variability option (default is TRUE).
#' @param show_est_bias Show the estimation bias option (default is TRUE).
#' @param init_biol_sigma Default value for biological productivity variability (ignored if not shown).
#' @param init_est_sigma Default value for estimation variability (ignored if not shown).
#' @param init_est_bias Default value for estimation bias (ignored if not shown).
#' @return A taglist
#' @rdname stoch_module
#' @name Stochasticity module
stochParamsSetterUI <- function(id, show_var=FALSE, show_biol_sigma = TRUE, show_est_sigma = TRUE, show_est_bias = TRUE, init_biol_sigma=0.0, init_est_sigma=0.0, init_est_bias=0.0){
ns <- NS(id) # To save doing stuff like NS(id, "show_var")
# Check box for showing any of the stochasticity options
show_var <- checkboxInput(ns("show_var"), label = "Show variability options", value = show_var)
# Store which stochasticity options to show in a list
options <- list()
if (show_biol_sigma){
options[[length(options)+1]] <-
tags$span(title="Natural variability in the stock biological processes (e.g. recruitment and growth)",
numericInput(ns("biol_sigma"), label = "Biological variability", value = init_biol_sigma, min=0, max=1, step=0.05))
}
if (show_est_sigma){
options[[length(options)+1]] <-
tags$span(title="Simulating the difference between the 'true' biomass and the 'estimated' biomass used by the HCR by applying randomly generated noise",
numericInput(ns("est_sigma"), label = "Estimation variability", value = init_est_sigma, min=0, max=1, step=0.05))
}
if (show_est_bias){
options[[length(options)+1]] <-
tags$span(title="Simulating the difference between the 'true' biomass and the 'estimated' biomass used by the HCR by applying a continuous bias (positive or negative)",
numericInput(ns("est_bias"), label = "Estimation bias", value = init_est_bias, min=-0.5, max=0.5, step=0.05))
}
# Put together all of the selected options into a conditionalPanel
vars <- conditionalPanel(condition="input.show_var == true", ns=ns, options)
out <- tagList(show_var, vars)
return(out)
}
#' stochParamsSetterServer
#'
#' stochParamSetterServer() does the server side stuff for the stochasticity options.
#'
#' @param id The id (shiny magic)
#' @return A list of stochasticity options.
#' @rdname stoch_module
#' @name Stochasticity module
stochParamsSetterServer <- function(id){
moduleServer(id, function(input, output, session){
reactive({return(set_stoch_params(input))})
})
}
#' set_stoch_params
#'
#' set_stoch_params() sets up default values for the stochasticity parameters.
#' Defined as a separate function so it can be used for testing outside of a reactive environment.
#' @param input A list of stochasticity parameters.
#' @rdname stoch_module
#' @name Stochasticity module
set_stoch_params <- function(input){
# Create a list of the stochasticity parameters - if they are not in the input, set to 0
params <- c("biol_sigma", "est_sigma", "est_bias")
out <- lapply(params, function(x) ifelse(is.null(input[[x]]), 0.0, input[[x]]))
names(out) <- params
return(out)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/stochasticity_module.R |
# The Stock class based on R6
# stock_class.R
# Author: Finlay Scott (SPC) <[email protected]>
# Soundtrack: The Heavens by Sedibus & The Orb
# Distributed under the terms of the GNU General Public License GPL (>= 3)
#' Correlated random noise
#'
#' Get the next value of correlated noise.
#' Not exported. For internal use only. Not a class method.
#' vt = b * vt-1 + s * sqrt(1 - b^2)
#' s is normally distributed random variable with mean = 0
#' b is the autocorrelation parameter > -1 and < 1
#' e.g. -0.8 very blue, 0.8 red
#' @param x Current value
#' @param b Correlation factor: -1 (red) to 1 (blue), 0 (white)
#' @param sd Standard deviation
#' @references Ranta and Kaitala 2001 Proc. R. Soc.
#' @importFrom stats "quantile" "reshape" "rlnorm" "rnorm"
#' @noRd
#' @keywords internal
next_corrnoise <- function(x, b, sd=0.1){
# Each iter needs a separate noise (i.e. not correlated across iters)
s <- rnorm(length(x),mean=0,sd=sd)
nextx <- b * x + s * sqrt(1 - b^2)
return(nextx)
}
# Note use of spurious importFrom to remove spurious note in R CMD check --as-cran
#' R6 Class representing a stock
#'
#' @description
#' A stock object has life history parameters, fields and methods for a biomass dynamic model.
#'
#' @details
#' A stock has biomass, effort, catch and hcr_ip and hcr_op fields as well as the life history parameters.
#' The population dynamics are a simple biomass dynamic model.
#' The Stock class is used for the Shiny apps in the AMPLE package.
#' @importFrom R6 "R6Class"
Stock <- R6::R6Class("Stock",
# R6 classes are not reactive, so even in you make a reactiveVal() of an instance of an R6 class
# changing that object will not invalidate the shiny magic and nothing reacts.
# Solution 1 is to use S3 class but they are horrible
# Solution 2 is this workaround from Winston Change - remember to thank him:
# https://community.rstudio.com/t/good-way-to-create-a-reactive-aware-r6-class/84890/8
# I will try this here. It involves adding an 'invalidate' function which triggers if certain methods are
# called. I want this class only to invalidate when the project method is called, because that means that
# something happened and the plots should update.
# Notes on using the reactive stock
# stock is a reactiveExpr. Evaluating stock() calls the Stock$reactive() method.
# This method accesses a reactiveVal (called reactiveDep) inside Stock and then returns self.
# This effectively makes stock a reactive version of a Stock as each call to it accesses reactiveDep.
# When reactiveDep changes, the stock object invalidates.
# Changing reativeDep is added to the methods you want to invalidate the stock.
# Here we add it to project() and reset(). When called, the last thing they do is change the
# value of reactiveDep using the invalidate() method.
# stock() therefore becomes invalid and triggers things in the Shiny app.
# Keeps track of whether object has been invalidated.
private = list(
reactiveDep = NULL,
reactiveExpr = NULL,
invalidate = function() {
private$count <- private$count + 1
private$reactiveDep(private$count)
invisible()
},
count = 0
),
public = list(
#' @field biomass Array of biomass
biomass = NULL,
#' @field catch Array of catches
catch = NULL,
#' @field effort Array of fishing effort
effort = NULL,
#' @field hcr_ip Array of HCR input signals
hcr_ip = NULL,
#' @field hcr_op Array of HCR output signals
hcr_op = NULL,
# #' @field estimated_cpue Array of estimated CPUE
#estimated_cpue = NULL,
#' @field msy MSY (default = 100).
msy = NULL,
#' @field r Growth rate (default = 0.6). Set by the user in the app.
r = NULL, # or slow 0.2 or fast 1.0 depending on input selection (default = 0.6).
#' @field k Carrying capacity (default = NULL - set by msy and r when object is initialised).
k = NULL,
#' @field p Shape of the production curve (default = 1).
p = NULL,
#' @field q Catchability (default = 1).
q = 1,
#' @field lrp Limit reference point, expressed as depletion (default = 0.2).
lrp = 0.2,
#' @field trp Target reference point, expressed as depletion (default = 0.5).
trp = 0.5,
#' @field b0 Virgin biomass (default = NULL - set by msy and r when object is initialised).
b0 = NULL,
#' @field current_corrnoise Stores the current values of the correlated noise (by iteration).
current_corrnoise = NULL,
#' @field biol_sigma Standard deviation of biological variability (default = 0).
biol_sigma = NULL,
#' @field last_historical_timestep The last historical timestep of catch and effort data.
last_historical_timestep = NULL,
#' @description
#' Create a new stock object, with fields of the right dimension and NA values (by calling the \code{reset()} method.
#' See the \code{reset()} method for more details.
#' @param stock_params A list of stock parameters with essential elements: r (growth rate, numeric), stock_history (string: "fully", "over", "under") initial_year (integer), last_historical_timestep (integer), nyears (integer), biol_sigma (numeric).
#' @param mp_params A list of the MP parameters. Used to fill HCR ip and op.
#' @param niters The number of iters in the stock (default = 1).
#' @return A new Stock object.
initialize = function(stock_params, mp_params, niters = 1){
#print("Initialising stock with NAs")
# Set up the reactive dependency - has to be done in the main constructor.
private$reactiveDep <- function(x) NULL
# Make the fields and fill up the history
self$reset(stock_params = stock_params, mp_params = mp_params, niters = niters)
},
#' @description
#' Resets an existing stock object, by remaking all fields (possibly with different dimensions for the array fields) .
#' Fills up the catch, effort and biomass fields in the historical period based on the stock history and
#' life history parameters in the \code{stock_params} argument.
#' This is a reactive method which invalidates a reactive instance of this class after it is called.
#' @param stock_params A list with essential elements: r (growth rate, numeric, default=6), stock_history (string: "fully", "over", "under", default="fully") initial_year (integer, default=2000), last_historical_timestep (integer, default=10), nyears (integer, default=30), biol_sigma (numeric, default = 0).
#' @param mp_params A list of the MP parameters. Used to fill HCR ip and op.
#' @param niters The number of iters in the stock (default = 1).
#' @return A new Stock object.
reset = function(stock_params, mp_params, niters){
#print("Resetting existing stock")
nyears <- max(stock_params$last_historical_timestep+1, round(stock_params$nyears))
initial_array <- array(NA, dim=c(niters, nyears), dimnames=list(iter=1:niters, year=stock_params$initial_year:(stock_params$initial_year+nyears-1)))
self$biomass <- initial_array
self$hcr_ip <- initial_array
self$hcr_op <- initial_array
self$catch <- initial_array
self$effort <- initial_array
#self$estimated_cpue <- initial_array
self$p <- 1
self$r <- stock_params$r
self$msy <- 100
self$k <- 4 * self$msy / self$r
self$b0 <- self$k * 2/3
self$current_corrnoise <- rep(0, niters)
self$biol_sigma = stock_params$biol_sigma
self$last_historical_timestep <- stock_params$last_historical_timestep
# Fill up the historical period
self$fill_history(stock_params = stock_params, mp_params = mp_params)
# Invalidate the object so Shiny gets triggered
private$invalidate()
invisible(self)
},
# Add option to initialiser to call this
#' @description
#' Method to create a reactive instance of a Stock.
#' @return a reactiveExpr.
reactive = function() {
# Ensure the reactive stuff is initialized.
if (is.null(private$reactiveExpr)) {
private$reactiveDep <- reactiveVal(0)
private$reactiveExpr <- reactive({
private$reactiveDep()
self
})
}
private$reactiveExpr
},
#' @description
#' Fills the historical period of the stock
#' @param stock_params Named list with last_historical_timestep and stock_history elements.
#' @param mp_params A list of the MP parameters. Used to fill HCR ip and op.
fill_history = function(stock_params, mp_params){
#print("Filling the historical period")
self$biomass[,1] <- self$b0
self$fill_catch_history(stock_params)
#print("Filling biomass history")
# Fill up biomass in the historical period - including the biomass at the start of projection period
# Also get the historical HCR ip and op over the time series
# This is so we plot the upcoming decision before it happens, and also show observed vs true stock status
for (ts in 2:(self$last_historical_timestep + 1)){
self$fill_biomass(ts)
self$hcr_ip[,ts] <- get_hcr_ip(stock = self, mp_params = mp_params, yr = ts)
self$hcr_op[,ts] <- get_hcr_op(stock = self, mp_params = mp_params, yr = ts)
}
# HCR ip and op also need the first time step
self$hcr_ip[,1] <- get_hcr_ip(stock = self, mp_params = mp_params, yr = 1)
self$hcr_op[,1] <- get_hcr_op(stock = self, mp_params = mp_params, yr = 1)
# Other members
self$effort <- self$catch / (self$biomass * self$q)
invisible(self)
},
# Make this private? So only called by fill historical?
#' @description
#' Fill up the historical period of catches with random values to simulate a catch history
#' @param stock_params A list with essential elements: r (growth rate, numeric), stock_history (string: "fully", "over", "under") initial_year (integer), last_historical_timestep (integer), nyears (integer).
#' @param stock_history Character string of the exploitation history (default = "fully", alternatives are "under" or "over").
fill_catch_history = function(stock_params){
#print("Filling catch history")
if (!(stock_params$stock_history %in% c("fully", "under", "over"))){
stop("stock_history parameter must be 'fully', 'under' or 'over'.")
}
# Base the catch history on the stock history
catch_history <- switch(stock_params$stock_history,
"under" = rep(2*self$msy/3, self$last_historical_timestep),
"fully" = rep(self$msy, self$last_historical_timestep),
"over" = seq(from=3*self$msy/4, to=4*self$msy/3, length=self$last_historical_timestep)
)
niters <- dim(self$catch)[1]
catch_history_iters <- array(NA,dim=c(niters, self$last_historical_timestep))
catch_history_iters[] <- rep(catch_history, each=niters)
# Sling a load of noise on it
# Set seed so that the initial noise is always the same
# And that the catch history for each iteration is the same
set.seed(666)
catch_history_iters <- catch_history_iters * rep(rlnorm(dim(catch_history_iters)[2],meanlog=0,sdlog=0.1), each=niters)
# Set a proper random seed
set.seed(as.numeric(Sys.time()))
# Or don't include noise in initial period
self$catch[,1:self$last_historical_timestep] <- catch_history_iters
invisible(self)
},
#' @description
#' Fills the biomass in the next timestep based on current biomass and catches
#' The surplus production model has the general form:
#' \code{Bt+1 = Bt + f(Bt) - Ct}
#' Where the production function f() is a Pella & Tomlinson model with shape
#' \code{f(Bt) = r/p Bt * (1 - (Bt/k)^p)}
#' Here p is fixed at 1 to give a Schaefer model
#' \code{cpue = Ct / Et = qBt}
#' @param ts The biomass time step to be filled (required catch etc in ts - 1).
#' @param iters The iterations to calculate the biomass for (optional - default is all of them).
fill_biomass = function(ts, iters = 1:dim(self$biomass)[1]){
# Check that ts > 1
if(ts < 2){
stop("Cannot get biomass in ts = 1 (as you need an initial biomass)")
}
# Get fB in previous timestep
fB <- (self$r / self$p) * self$biomass[iters,ts-1] * (1 - (self$biomass[iters,ts-1] / self$k) ^ self$p)
# Apply correlated noise to r if not in the historical period
# fB <- fB * process_variability
# A value of b = 0.5 is red noise, make redder by increasing (< 1)
# Currently not an input but could be
if (ts > self$last_historical_timestep){
b <- 0.5
# Update current_corrnoise
self$current_corrnoise[iters] <- next_corrnoise(self$current_corrnoise[iters], b=b, sd=self$biol_sigma)
fB <- fB * (self$current_corrnoise[iters] + 1)
}
# Update biomass
self$biomass[iters,ts] <- self$biomass[iters,ts-1] + fB - self$catch[iters,ts-1]
# Biomass cannot be less than 1e-6
self$biomass[iters,ts] <- pmax(self$biomass[iters,ts],1e-6)
invisible(self)
},
#' @description
#' Produces a data.frame of some of the array-based fields, like biomass.
#' Just used for testing purposes.
as_data_frame = function(){
out <- data.frame(
iter = rep(1:dim(self$biomass)[1], each=dim(self$biomass)[2]),
year = rep(1:dim(self$biomass)[2], times=dim(self$biomass)[1]),
biomass = c(t(self$biomass)),
catch = c(t(self$catch)),
effort = c(t(self$effort)),
hcr_ip = c(t(self$hcr_ip)),
hcr_op = c(t(self$hcr_op))
)
return(out)
},
#' @description
#' Projects the stock over the time steps given and updates the biomass, HCR ip / op and catches
#' It uses a simple biomass dynamic model where the catches or fishing effort are set every time step by the harvest control rule.
#'
#' @param timesteps The timesteps to project over. A vector of length 2 (start and end).
#' @param mp_params A vector of management procedure parameters.
#' @param iters A vector of iterations to be projected. Default is all the iterations in the stock
#' @return A stock object (a reactiveValues object with bits for the stock)
project = function(timesteps, mp_params, iters=1:dim(self$biomass)[1]){
# Check timesteps - should be a range of two values
if (length(timesteps) == 1){
timesteps <- rep(timesteps,2)
}
if (length(timesteps) != 2){
stop("In project(). timesteps argument should be of length 2.")
}
if((timesteps[1] - mp_params$timelag) < 1){
stop("In project(). Trying to access element in yr less than 1.")
}
# Check iters
if(!all(iters %in% 1:dim(self$biomass)[1])){
stop("In project(). Iterations outside of range.")
}
# Loop over the timesteps and update catch and biomass
# yr is the year we get catch for
# yr + 1 is the year we get true biomass and HCR op for
for (yr in timesteps[1]:timesteps[2]){
# Setting up future catch and effort depends on the output of the HCR
# Could be catch, relative effort or some empirical based one
# The HCR OP will have already been set up (either in initialisation or end of this loop)
# This gets converted into the catch and effort that will be realised
# Note the relationship between: effort, catch, q, and biomass
# Catch based HCRs - like the typical catch threshold HCR
if (mp_params$output_type == "catch"){
self$catch[iters,yr] <- self$hcr_op[iters,yr]
self$effort[iters,yr] <- self$catch[iters,yr] / (self$q * self$biomass[iters,yr])
}
# base_effort used by relative effort HCRs
base_effort <- self$effort[iters,self$last_historical_timestep]
if (mp_params$output_type == "relative effort"){
# HCR OP is relative to last historical effort
self$effort[iters,yr] <- base_effort * self$hcr_op[iters,yr]
self$catch[iters,yr] <- self$effort[iters,yr] * self$q * self$biomass[iters,yr]
}
# Only used with empirical HCR - not yet added
#if (mp_params$output_type == "catch multiplier"){
# # Careful - yr may be 1, so yr-1 may be null
# new_catch <- self$catch[,yr-1] * self$hcr_op[,yr]
# new_catch[new_catch < 10] <- 10 # A minimum catch - set for safety
# self$catch[,yr]<- new_catch
# self$effort[,yr] <- self$catch[,yr] / (self$q * self$biomass[,yr])
#}
# Sometimes you get crazy high efforts (when biomass is low)
# So we limit the maximum effort relative to the last historical year
# After applying effort limit you need to recalculate catch to reflect what really happened
rel_effort <- self$effort[iters,yr] / base_effort
max_rel_effort <- pmin(rel_effort, 10) # Max relative effort capped at 10 - could be lower
self$effort[iters,yr] <- max_rel_effort * base_effort
# Update catch based on the updated effort
self$catch[iters,yr] <- self$effort[iters,yr] * self$q * self$biomass[iters,yr] #
# What was estimated CPUE used for? Empirical HCR?
# Update estimated cpue too
#true_cpue <- stock$catch[,yr] / stock$effort[,yr]
#stock$estimated_cpue[,yr] <- estimation_error(input = true_cpue, sigma = stock_params$biol_est_sigma, bias = stock_params$biol_est_bias)
#
# If room get the biomass and evaluate the HCR in the next time step, yr+1
# Bt+1 = Bt + f(Bt) - Ct
# f(Bt) = r/p Bt * (1 - (Bt/k)^p)
if (yr < dim(self$biomass)[2]){
# Update biomass
self$fill_biomass(ts = yr+1, iters=iters)
# Evaluate HCR in the next (yr argument to get_hcr_ip and get_hcr_op is current or next?)
self$hcr_ip[iters,yr+1] <- get_hcr_ip(stock = self, mp_params = mp_params, yr = yr+1, iters=iters)
self$hcr_op[iters,yr+1] <- get_hcr_op(stock = self, mp_params = mp_params, yr = yr+1, iters=iters)
}
} # End of main project timestep for loop
# Invalidate the object so Shiny gets triggered
private$invalidate()
invisible(self)
}, # End of project()
#' @description
#' The catch per unit effort (CPUE, or catch rate) relative to the CPUE in the last historical period.
#' @return An array of same dims as the catch and effort fields.
relative_cpue = function(){
cpue <- self$catch / self$effort
base_cpue<- cpue[,self$last_historical_timestep]
rel_cpue <- sweep(cpue, 1, base_cpue, "/")
return(rel_cpue)
},
#' @description
#' The effort relative to the effort in the last historical period.
#' @return An array of same dims as the effort field.
relative_effort = function(){
base_effort <- self$effort[,self$last_historical_timestep]
rel_effort <- sweep(self$effort, 1, base_effort, "/")
return(rel_effort)
},
#' @description
#' Summarises the final year of each iteration. Only used for the Measuring Performance app.
#' @param iters The iterations to calculate the table values for (default is iteration 1).
#' @param quantiles Numeric vector of the quantile range. Default values are 0.05 and 0.95.
replicate_table = function(iters = 1, quantiles = c(0.05, 0.95)){
# How to deal with iters being empty
final_ts <- dim(self$biomass)[2]
sbsbf0 <- self$biomass[iters,final_ts] / self$k
catch <- self$catch[iters,final_ts]
rel_cpue <- self$relative_cpue()[iters, final_ts]
# Get median and percentiles
signif <- 2 # Could make option but no point
sbsbf0_qs <- signif(quantile(sbsbf0, probs=c(quantiles[1], 0.5, quantiles[2])),signif)
sbsbf0_summary <- paste(sbsbf0_qs[2], " (", sbsbf0_qs[1], ",", sbsbf0_qs[3], ")", sep="")
catch_qs <- signif(quantile(catch, probs=c(quantiles[1], 0.5, quantiles[2])), signif)
catch_summary <- paste(catch_qs[2], " (", catch_qs[1], ",", catch_qs[3], ")", sep="")
rel_cpue_qs <- signif(quantile(rel_cpue, probs=c(quantiles[1], 0.5, quantiles[2])), signif)
rel_cpue_summary <- paste(rel_cpue_qs[2], " (", rel_cpue_qs[1], ",", rel_cpue_qs[3], ")", sep="")
# Put all together -
# Reorder and sort so that average and range are top, and iterations are reversed
out <- data.frame(Replicate= rev(c(1:length(sbsbf0),"Average and range")),
sbsbf0=rev(c(signif(sbsbf0, signif), sbsbf0_summary)),
Catch=rev(c(signif(catch, signif), catch_summary)),
rel_cpue=rev(c(signif(rel_cpue, signif), rel_cpue_summary)))
colnames(out)[2] <- "Final biomass"
colnames(out)[3] <- "Final catch"
colnames(out)[4] <- "Final relative CPUE"
return(out)
},
#' @description
#' Calculates the short, medium and long term periods to calculate the performance indicators over,
#' based on the last historic year of data and the number of years in the projection.
time_periods = function(){
nprojyears <- dim(self$catch)[2] - self$last_historical_timestep
period_length <- round(nprojyears / 3)
short_term <- (self$last_historical_timestep + 1):(self$last_historical_timestep + period_length)
medium_term <- short_term + period_length
long_term <- (max(medium_term) + 1):dim(self$catch)[2]
all_years <- dimnames(self$catch)$year
short_term <- all_years[short_term]
medium_term <- all_years[medium_term]
long_term <- all_years[long_term]
short_name <- paste("Short (", short_term[1], "-", short_term[length(short_term)],")", sep="")
medium_name <- paste("Medium (", medium_term[1], "-", medium_term[length(medium_term)],")", sep="")
long_name <- paste("Long (", long_term[1], "-", long_term[length(long_term)],")", sep="")
out <- list(short = short_term, medium = medium_term, long = long_term)
names(out) <- c(short_name, medium_name, long_name)
return(out)
},
#' @description
#' Gets the performance indicators across all indicators, for three time periods.
#' Used in the Measuring Performance and Comparing Performance apps.
#' @param iters The iterations to calculate the table values for (default is all of them).
#' @param quantiles Numeric vector of the quantile range. Default values are 0.05 and 0.95.
#' @return A data.frame
performance_indicators = function(iters = 1:dim(self$biomass)[1], quantiles=c(0.05, 0.95)){
niters <- length(iters)
sbsbf0 <- self$biomass[iters,,drop=FALSE] / self$k
catch <- self$catch[iters,,drop=FALSE]
rel_cpue <- self$relative_cpue()[iters,,drop=FALSE]
rel_effort <- self$relative_effort()[iters,,drop=FALSE]
# Add median to quantile range
if (!(0.5 %in% quantiles)){
quantiles <- c(quantiles, 0.5)
}
# Reorder so pi_table knows() what order the quantiles are coming in
quantiles <- sort(quantiles)
# Probability above LRP
# Apply drops the dimensions which is annoying
prob_lrp <- apply(sbsbf0 > self$lrp, 2, sum, na.rm=TRUE) / niters
prob_lrp <- test <- array(prob_lrp, dim=c(1, length(prob_lrp)), dimnames=list(iter = 1, year=names(prob_lrp)))
# Catch stability
catch_diff <- abs(catch[,2:ncol(catch), drop=FALSE] - catch[,1:(ncol(catch)-1), drop=FALSE])
max_catch_diff <- self$k / 10 # For rescale - Has to be same for all stocks - arbitrary
catch_stab <- (max_catch_diff - catch_diff) / max_catch_diff # rescale
catch_stab[catch_stab < 0] <- 0
# Proximity to TRP
max_distance_from_trp <- max(self$trp, 1 - self$trp)
prox_trp <- pmax(1.0 - (abs(sbsbf0 - self$trp) / max_distance_from_trp), 0.0)
# Get the averages over the time periods
# This all gets a bit gnarly
time_periods <- self$time_periods()
#data <- list(sbsbf0 = sbsbf0, prob_lrp = prob_lrp, catch = catch, rel_cpue = rel_cpue, rel_effort = rel_effort, catch_stab = catch_stab, prox_trp = prox_trp)
# Give them proper names
data <- list("Biomass" = sbsbf0, "Prob. > LRP" = prob_lrp, "Catch" = catch, "Relative CPUE" = rel_cpue, "Relative effort" = rel_effort, "Catch stability"= catch_stab, "Proximity to TRP" = prox_trp)
t2 <- lapply(data, function(x) {
lapply(time_periods, function(y) {
iter_means <- apply(x[,y,drop=FALSE], 1, mean, na.rm=TRUE)
quants <- quantile(iter_means, probs = quantiles, na.rm=TRUE)
})
})
# Force the order of the time period and metrics
time_period_names <- factor(names(time_periods), levels=names(time_periods))
data_names <- factor(names(data), levels=names(data))
out <- data.frame(pi = rep(data_names, each = length(time_periods) * length(quantiles)),
time_period = rep(rep(time_period_names, length(data)), each=length(quantiles)),
quantiles = rep(quantiles,length(time_periods) * length(data)),
value = unlist(t2), row.names = NULL)
return(out)
},
#' @description
#' Makes a table of the performance indicators.
#' @param quantiles Numeric vector, length 2, of the low and high quantiles.
#' @param iters The iterations to calculate the table values for (default is all of them).
pi_table = function(iters = 1:dim(self$biomass)[1], quantiles = c(0.05, 0.95)){
if(length(quantiles) != 2){
stop("In Stock$pi_table(). quantiles must be length 2")
}
pis <- self$performance_indicators(iters=iters, quantiles=quantiles)
# Order so that
out <- tapply(signif(pis$value,2), INDEX = list(pis$pi, pis$time_period), FUN = function(x) paste0(x[2], " (",x[1], ",", x[3], ")"))
out <- as.data.frame(out)
return(out)
}
) # End of public fields
)
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/stock_class.R |
# Module for setting up the stock, including life history parameters
#' stockParamsSetterUI
#'
#' stockParamsSetterUI() is the interface for the stock options (e.g. life history and exploitation status).
#'
#' @return A taglist
#' @rdname stock_module
#' @name Stock module
stockParamsSetterUI <- function(id){
ns <- NS(id)
header <- tags$p("Here you can adjust the settings for the app, including the life history of the stock and length of the projections. Note that changing these settings will reset the app.")
# Life history parameters - used to determine the r and K in the production model
stock_lh <- tags$span(title="Choose the life history of the stock: slow, medium or fast growing. Some HCRs are more appropriate for different life histories.",
radioButtons(ns("stock_lh"), label= "Stock life history", choices = list("Slow growth" = "slow", "Medium growth" = "medium", "Fast growth" = "fast"), selected = "medium"))
# Has the stock been over, under or fully exploited
stock_history <- tags$span(title="Choose the history of the stock. Underexploited means that the stock could potentially be fished harder. Overexploited means that a recovery plan may be needed.",
radioButtons(ns("stock_history"), label= "Stock history", choices = list("Underexploited" = "under", "Fully exploited" = "fully", "Overexploited" = "over"), selected = "fully")
)
# Length of projection, number of historical years and initial year
initial_year <- tags$span(title="First year of the stock history. It has no effect, it just changes the time axis labels.",
numericInput(ns("initial_year"), label="First year", value=2010, min=2000, max=2020, step=1))
nyears <- tags$span(title="Total number of years in the projection, including historical period.",
numericInput(ns("nyears"), label="Number of years", value=30, min=20, max=50, step=1))
last_historical_timestep <- tags$span(title="The number of years that make up the historical period.",
numericInput(ns("last_historical_timestep"), label="Length of historical period", value=10, min=2, max=19, step=1))
return(tagList(header, stock_lh, stock_history, initial_year, nyears, last_historical_timestep))
}
#' stockParamsSetterServer
#'
#' stockParamsSetterServer() does the setting of the stock parameters in the server.
#'
#' @param id Shiny magic
#' @param get_stoch_params Reactive expression that accesses the stochasticity module server.
#' @return A list of stock options.
#' @rdname stock_module
#' @name Stock module
#' @export
stockParamsSetterServer <- function(id, get_stoch_params=NULL){
moduleServer(id, function(input, output, session){
reactive({
stoch_params <- list(biol_sigma = 0)
if(!is.null(get_stoch_params)){
stoch_params <- get_stoch_params()
}
return(get_stock_params(input, biol_sigma=stoch_params$biol_sigma))
})
})
}
# Defined outside of above reactive so we can call it elsewhere
#' get_stock_params
#'
#' get_stock_params() Sets up default values for the stock, including year range.
#' It's a separate function so it can be used and tested outside of a reactive environment.
#' @param input List of stock parameters taken from the shiny UI (stockParamsSetterUI()).
#' @param biol_sigma Standard deviation of the biological variability (default = 0).
#'
#' @rdname stock_module
#' @name Stock module
#' @export
get_stock_params <- function(input, biol_sigma=0){
# Set r and k depending on the stock choice radio button
# Set MSY to be a 100 for each stock
msy <- 100
# MSY = rk/4
r <- switch(input$stock_lh,
"slow" = 0.2,
"medium" = 0.6,
"fast" = 1.0)
# Could add check year to ensure that nyears and last_historical_timestep don't clash
# Input selectors should ensure it is OK, but additional check here to make sure.
# Trying to prevent NAs in the numericInputs
req(input$initial_year, input$last_historical_timestep, input$nyears)
out <- list(
r = r,
stock_history = input$stock_history, # to set up the initial trajectory
nyears = input$nyears,
initial_year = input$initial_year,
last_historical_timestep = input$last_historical_timestep,
biol_sigma = biol_sigma
)
return(out)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/stock_module.R |
# All the sidebars have the same width and SPC logo
# ui_funcs.R
# Author: Finlay Scott (SPC) <[email protected]>
# Sountrack: Disintegration Dubs by G36 vs JK Flesh
# Distributed under the terms of the GNU General Public License GPL (>= 3)
#' Sidebar setup for the apps
#'
#' Internal function, not exported, to help maintain consistent sidebars in a tabbed app.
#' @param ... Other stuff to add to the \code{sidebarPanel} function.
#' @noRd
#' @keywords internal
sidebar_setup <- function(...){
sb <- sidebarPanel(
width=3,
br(),
# Dynamically resize SPC logo to side bar width
img(src = "img/spc.png", width= "60%"),
br(),
br(),
br(),
...
) # End of sidebarPanel
return(sb)
}
#' Maintainer and licence information.
#'
#' Show the maintainer and licence for use in the AMPED PIMPLE applications.
#' Also show the 'About SPC' information.
#'
#' @return A shiny.tag for use in Shiny apps
#' @noRd
#' @keywords internal
ample_maintainer_and_licence <- function(){
out <- tags$html(
tags$h1("AMPLE"),
tags$p("Amazing Management Procedures expLoration Engine"),
tags$footer(
tags$p("Version 1.0.0 Tarantula Deadly Cargo"),
tags$p("Copyright 2021 Pacific Community (SPC)"),
tags$p("Distributed under the GPL 3"),
tags$a("Soure code", href="https://github.com/PacificCommunity/ofp-sam-amped/tree/master/AMPLE")
)
)
return(out)
}
#' SPC about information
#'
#' SPC about information included in all the Shiny apps.
#' @noRd
#' @keywords internal
spc_about <- function(){
out <- tags$html(
tags$p(style="opacity: 0.5", class="caption", align="center", HTML("©"), "Pacific Community, 2021"),
tags$h1("About us:"),
tags$p(align="justify", "The Pacific Community (SPC) is the principal scientific and technical organisation in the Pacific region, proudly supporting development since 1947. It is an international development organisation owned and governed by its 26 country and territory members. The members are: American Samoa, Australia, Cook Islands, Federated States of Micronesia, Fiji, France, French Polynesia, Guam, Kiribati, Marshall Islands, Nauru, New Caledonia, New Zealand, Niue, Northern Mariana Islands, Palau, Papua New Guinea, Pitcairn Islands, Samoa, Solomon Islands, Tokelau, Tonga, Tuvalu, United States of America, Vanuatu, and Wallis and Futuna."),
tags$p(align="justify", "In pursuit of sustainable development to benefit Pacific people, this unique organisation works across more than 25 sectors. SPC is renowned for its knowledge and innovation in such areas as fisheries science, public health surveillance, geoscience and conservation of plant genetic resources for food and agriculture."),
tags$p(align="justify", "Much of SPC's focus is on major cross-cutting issues, such as climate change, disaster risk management, food security, gender equality, human rights, non-communicable diseases and youth employment. Using a multi-sector approach in responding to its members' development priorities, SPC draws on skills and capabilities from around the region and internationally, and supports the empowerment of Pacific communities and sharing of expertise and skills between countries and territories."),
tags$p(align="justify", "With over 600 staff, SPC has its headquarters in Noumea, regional offices in Suva and Pohnpei, a country office in Honiara and field staff in other Pacific locations. Its working languages are English and French. See: ", a("https://www.spc.int", href="www.spc.int"))
)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/ui_funcs.R |
# Helpful functions for testing
# Not exported
#' Make dummy stock params
#'
#' Make dummy stock params to help with testing
#' @noRd
#' @keywords internal
make_dummy_stock_params <- function(r = 0.5, stock_history="fully", nyears = 30, initial_year = 2010, last_historical_timestep = 10, biol_sigma=0){
out <- list(
r = r,
stock_history = stock_history,
nyears = nyears,
initial_year = initial_year,
last_historical_timestep = last_historical_timestep,
biol_sigma = biol_sigma
)
return(out)
}
#' Make dummy mp params
#'
#' Make dummy mp params to help with testing
#' @noRd
#' @keywords internal
make_dummy_mp_params <- function(hcr_shape = "threshold", mp_analysis = "assessment", mp_type="model", output_type="catch", name = "Dummy", params = c(lim = 0.2, elbow = 0.5, min = 10, max = 140), est_bias = 0.0, est_sigma = 0.0, timelag = 0){
out <- list(hcr_shape = hcr_shape,
mp_analysis = mp_analysis,
mp_type = mp_type,
output_type = output_type,
name = name,
params = params,
est_bias = est_bias,
est_sigma = est_sigma,
timelag = timelag)
return(out)
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/utility_test_funcs.R |
# zzz.R
# Stuff so that the logo in the inst/www folder gets found
# Adapted from help found here:
# https://community.rstudio.com/t/trouble-including-image-jpeg-png-svg-etc-in-shiny-app-embedded-in-r-package/56156/2
# Thanks!
.onLoad <- function(libname, pkgname) {
shiny::addResourcePath(
prefix = "img",
directoryPath = system.file(
"www/img",
package = "AMPLE"
)
)
}
.onUnload <- function(libname, pkgname) {
shiny::removeResourcePath("img")
}
| /scratch/gouwar.j/cran-all/cranData/AMPLE/R/zzz.R |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
echo=FALSE, warning=FALSE, message=FALSE, out.width='100%'
)
## ----setup, echo=TRUE, eval=FALSE---------------------------------------------
# library(AMPLE)
# comparing_performance()
## ----start, fig.cap="The opening screen of the 'Comparing performance' app."----
knitr::include_graphics("comp_perf_start.png")
## ----hcr1, fig.cap="The results from evaluating HCR 1."-----------------------
knitr::include_graphics("comp_perf_hcr1.png")
## ----compbar1, fig.cap="Using bar charts to compare the average values of performance indicators of three candidate HCRs."----
knitr::include_graphics("comp_perf_compbar1.png")
## ----compbar2, fig.cap="Using bar charts to compare the average values of only four performance indicators of three candidate HCRs."----
knitr::include_graphics("comp_perf_compbar2.png")
## ----compbar3, fig.cap="Using bar charts to compare the average values of only three performance indicators after dropping HCR 2."----
knitr::include_graphics("comp_perf_compbar3.png")
## ----compbox1, fig.cap="Using box plots to compare the range of expected values of three performance indicators for HCR 1 and HCR 3."----
knitr::include_graphics("comp_perf_compbox1.png")
| /scratch/gouwar.j/cran-all/cranData/AMPLE/inst/doc/comparing_performance.R |
---
title: "Comparing the performance of Harvest Control Rules (HCRs)"
author: "Finlay Scott - OFP, SPC"
date: "2021-04-11"
output:
bookdown::html_document2:
base_format: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{comparing_performance}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
echo=FALSE, warning=FALSE, message=FALSE, out.width='100%'
)
```
# Introduction
This tutorial explores how to compare the performance of several Harvest Control Rules (HCRs).
It uses the *Comparing performance* Shiny app from the *AMPLE* package.
There are three apps in the *AMPLE* package. This is the third one.
If you are unsure what an HCR is you should look at the first app, *Introduction to HCRs*.
The previous tutorial (*Measuring the performance of HCRs*) introduced the idea of performance indicators (PIs) and how they can be used to measure the performance of an HCR in relation to the management objectives.
The performance of a proposed HCR is tested and evaluated using computer simulations (known as Management Strategy Evaluation - MSE).
During these evaluations the performance is measured using a collection of PIs.
In this tutorial we will generate PIs for several candidate HCRs and compare them.
By considering what the objectives of the fishery are, we can then select the most appropriate HCR.
**Note** that the fishery used in this tutorial is not based on a real world fishery or stock.
It's just a toy example.
Also **note** that in this app the biomass is scaled between 0 and 1, where 0 means there is no biomass (a bad thing) and 1 is the amount of biomass when is there is no fishing, i.e. the highest it can be.
Scaling the biomass between 0 and 1 can make it easier to understand if the stock is over or underexploited.
**Important note about management procedures**.
An HCR is part of a management procedure (MP), along with two other elements: the data collection and the estimation process.
Under the harvest strategy approach, when an MP is agreed and adopted by stakeholders all three elements of the MP are agreed together.
In this tutorial, and in the app, we assume that the data collection and estimation process are the same for each HCR that we try.
When we talk about comparing HCRs we are really comparing MPs.
# Getting started
If you want to use the app online, it is available at the following address:
[https://ofp-sam.shinyapps.io/ample-comparing-performance/](https://ofp-sam.shinyapps.io/ample-comparing-performance/)
Alternatively, if you are using your own version of R and have installed the *AMPLE* package, you can run this app by entering the following commands into the R console:
```{r setup, echo=TRUE, eval=FALSE}
library(AMPLE)
comparing_performance()
```
When you start the app you should see something similar to the *Measuring performance* app (Figure \@ref(fig:start)).
```{r start, fig.cap="The opening screen of the 'Comparing performance' app."}
knitr::include_graphics("comp_perf_start.png")
```
On the left side of the main panel there are plots of catch, biomass and CPUE relative to the CPUE in the last historical year (2019)
These three plots have 10 years of historical data, from 2010 to 2019.
There is an additional year of data for biomass as it shows the biomass at the start of the year.
The horizontal dashed lines on the biomass plot are the Target Reference Point (TRP = 0.5) and Limit Reference Point (LRP = 0.2).
The HCR is shown at the top of the right-hand panel. The HCR is the red line.
Underneath the HCR plot is currently blank...
In the left-hand panel there are various controls, including the HCR parameters.
The initial values of the HCR parameters should be: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140.
If they are not you can set them using the controls in the left panel.
This HCR is known as *HCR 1*.
Note that your plots might look slightly different due to variations in the historic catches.
In this app, *biological variability* is already switched on and set to a value of 0.2.
You can change the variability options by looking in the **Settings** tab (but don't do this yet).
# Evaluating the performance
We are going to compare the performance of several HCRs.
We will do this by running projections with uncertainty for different HCRs and calculating a range of performance indicators (PIs) for them.
The PIs of the different HCRs can then be compared in different ways.
The basic process we will follow is:
* Set up an HCR using the HCR parameters on the left-hand side.
* Project the stock forward in time under that HCR (by pressing the **Project** button).
* Have a quick check of the resulting time series plots and PI values.
* If you like the HCR, add it to the basket of candidate HCRs (by pressing the **Add HCR to basket** button).
* When you have several HCRs in the basket, go to the **Compare performance** tab at the top of the screen and take a look at their relative performance.
## Evaluating the first HCR
The initial values of the HCR parameters should be: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140. If not, set these parameters.
This HCR will be referred to as *HCR 1*.
Press the **Project** button to run the projection.
In the previous tutorial when you ran the projection it ran a single projection.
Here, when you run a projection 250 replicates will be run (i.e. 250 projections will be run). This may take a few seconds, depending on your machine.
As noted above, biological variability is switched on and set to 0.2 so that each replicate is slightly different.
This is the same as using the *Measuring performance* app and clicking **Project** 250 times.
After clicking **Project** the time series plots will show the results.
The grey ribbon shows the range of values of the replicates (it contains 90% of the full range of values).
The average value (the dashed line), and the final replicate as an example (the solid line) are also shown.
A table of performance indicators has also appeared underneath the HCR plot.
Here we calculate PIs over three different time periods: short-, medium- and long-term.
These time periods can be seen on the time series plots of catch, biomass and relative CPUE as vertical dashed lines.
Seven performance indicators are calculated (Table \@ref(tab:pis)).
Table: (\#tab:pis) The seven performance indicators used to measure performance in this app.
| Performance indicator | Description |
|:-------|:-------|
| Biomass | The biomass relative to the unfished biomass.|
| Probability of being above the LRP | This reflects the risk of the stock being overfished. |
| Catch | The expected catches. |
| Relative CPUE | The CPUE (or catch rate) relative to the CPUE in the last historical year. |
| Relative effort | The fishing effort relative to the effort in the last historical year. |
| Catch stability | How much the catches change over time. A value of 1 means that the catches are very stable and do not change at all. A low value, close to 0, means that the catches fluctuate a lot over time (probably not a good thing). |
| Proximity to the TRP | How close the biomass is to the TRP on average. A value of 1 means that the biomass is always at the TRP. A lower value means that the biomass spends a lot of time being higher, or lower, than the TRP. |
Generally, for most indicators, the higher the value the better (e.g. higher catches, and higher catch stability are assumed to be better than lower catches and catch levels that change a lot over time).
However, higher fishing effort is not necessarily better as it may mean higher costs of fishing.
Similarly, higher biomass might not be better. If the biomass is too high, it may mean you could have fished more.
By looking at the summary plots and the table of PIs, we can decide if we think the current HCR is worth considering further.
The HCR we have just tested, *HCR 1*, looks OK (e.g. there is a high chance of being above the LRP and the expected catches look OK).
We therefore consider *HCR 1* as a *candidate* HCR, i.e. a candidate for adoption.
Click on the **Add HCR to basket** button to add the HCR to the basket of candidate HCRs. You should see that the counter **Number of HCRs in basket** increases by 1.
```{r hcr1, fig.cap="The results from evaluating HCR 1."}
knitr::include_graphics("comp_perf_hcr1.png")
```
## Add more HCRs to the basket
We now have one HCR in our basket. We are going to add two more HCRs to the basket and then compare the results of all three of them.
Repeat the above process (set up the HCR, run the projection, add to the basket) for two more HCRs.
Use the following parameters:
* HCR2: *Blim* = 0.2, *Belbow* = 0.3, *Cmin* = 10 and *Cmax* = 130
* HCR3: *Blim* = 0.2, *Belbow* = 0.7, *Cmin* = 10 and *Cmax* = 150
You should now have three HCRs in your basket (check the counter and don't forget to **Add HCR to basket** after projecting).
# Comparing the performance
We can now compare the performance of the three HCRs.
Select the **Compare performance** tab at the top of the app window.
In the main panel you should see a lot of bar plots.
Each window shows a different PI for each of the HCRs in the basket.
Each HCR has a different colour.
The height of the bars is the average value of that PI.
The bars are grouped into the three different time periods (short-, medium- and long-term) (Figure \@ref(fig:compbar1)).
```{r compbar1, fig.cap="Using bar charts to compare the average values of performance indicators of three candidate HCRs."}
knitr::include_graphics("comp_perf_compbar1.png")
```
We are looking at the average values of seven PIs for three HCRs in three different time periods.
This is a lot of information!
We want to be able to choose which HCR best fits our objectives but it can be difficult when there is so much to look at.
To make things easier, we can drop PIs that we think are unimportant (perhaps they do not measure anything related to your management objectives) by deselecting them from the list in the left panel.
Bear in mind, that a PI that may be unimportant to you, may actually be very important to someone else.
Similarly, candidate HCRs can be deselected if they do not meet the objectives.
For example, we might think that the *Biomass*, *Catch stability* and *Relative effort* are less important than the other PIs.
Deselect them from the list on the left.
This still gives us four PIs left to consider (Figure \@ref(fig:compbar2)).
```{r compbar2, fig.cap="Using bar charts to compare the average values of only four performance indicators of three candidate HCRs."}
knitr::include_graphics("comp_perf_compbar2.png")
```
The results for the probability of being above LRP look pretty similar for the three HCRs.
They all show a high probability of being above the LRP.
This means that this indicator is not providing us with any information to help us choose between the HCRs (note that it may be helpful for other HCRs).
We can deselect it, leaving us with just three PIs to consider.
Looking at the performance of *HCR 2*, we can see that in the short-term the average catches are higher than *HCR 1* and *HCR 3*.
However, in the medium- and long-term it has the lowest catches.
Additionally, it has the lowest relative CPUE and lowest 'proximity to the TRP' (i.e. it is the furthest away from the TRP) in all three time periods.
Unless high catches in the short-term is the absolute top priority, we can decide that *HCR 2* does not meet our management objectives as well as *HCR 1* and *HCR 3*.
Deselect *HCR 2* from the list on the left (Figure \@ref(fig:compbar3)).
```{r compbar3, fig.cap="Using bar charts to compare the average values of only three performance indicators after dropping HCR 2."}
knitr::include_graphics("comp_perf_compbar3.png")
```
We now have only two HCRs to consider.
The choice between *HCR 1* and *HCR 3* depends on what objectives you have.
This will depend on your *priorities* and what *trade-offs* you are prepared to accept.
Considering catches, *HCR 1* has slightly higher average catches in the short-term. The average catches in the medium- and long-term are similar for both HCRs.
*HCR 3* has higher average relative CPUE in all three time periods.
*HCR 3* also gets you slightly closer to the TRP in the medium- and long-term.
These are the kind of decisions that managers and stakeholders will need to make.
You should always have your management objectives in the back of your mind when reviewing the results.
## Box plots
So far, we have only used the bar charts that show the *average* values of the PIs.
This is only part of the story.
Click on the tab at the top that says **Performance indicators - box plots**.
Box plots give you the *range* of expected values of each indicator, and hence the **uncertainty** in their value.
The taller the box, and the longer the whiskers, the greater the range of expected values.
The black line across the middle of each box is the average value, the same that is in the bar charts in the previous tab (Figure \@ref(fig:compbox1)).
```{r compbox1, fig.cap="Using box plots to compare the range of expected values of three performance indicators for HCR 1 and HCR 3."}
knitr::include_graphics("comp_perf_compbox1.png")
```
For example, the average value of the catches in the medium- and long-term is about the same for both HCRs (the black line in the middle is at the same height).
However, the box for *HCR 1* is bigger than that of *HCR 3*.
This means that, although the average value might be the same, we are *less certain* about what the catches will actually be under *HCR 1*.
This can be an important factor when considering which HCR you prefer.
## Table
Finally, it is possible to see all the indicators for each HCR in a table (click on the **Performance indicators - tables**.
This might not look as pretty, but can be very useful for really digging down into the results, particularly if you have narrowed your choices to just a few HCRs and PIs.
# Exercises
Return to the *HCR selection* tab at the top, click the *Empty basket* button (and confirm that you are happy to do that).
This will empty all the HCRs from your basket.
## Exercise 1
Using a similar process as above, find an HCR that meets the following two conditions:
* Gives the highest possible average catches in the short-term and
* Always has a probability of the biomass being above the LRP of at least 0.8.
Try out as many HCRs as you want (5, 10, 20..., keep adding them to the basket), then use the different methods for exploring the performance to select the best one.
## Exercise 2
Now find an HCR that meets these two conditions:
* Gives the highest possible average relative CPUE in the long-term and
* Maintains average catches above 100 in all time-periods.
Write down your final HCR parameter settings and also why you think it is the best compared to the others.
# Summary
Choosing a preferred HCR is not a simple task.
It is possible to calculate many different performance indicators to measure their performance.
The ranges of the indicator values should be considered as well as their average value.
Additionally, you can have different time periods to consider.
It may not be possible to find a HCR that performs well for all the chosen PIs.
In this case PIs should be considered in order of their priority and trade-offs will need to be evaluated.
| /scratch/gouwar.j/cran-all/cranData/AMPLE/inst/doc/comparing_performance.Rmd |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
echo=FALSE, warning=FALSE, message=FALSE, out.width='100%'
)
## ----plothcr, fig.cap="A threshold catch shape HCR. The shape is defined by four parameters: Cmin and Cmax (which determine the minimum and maximum catch limit) and Blim and Belbow (which determine the start and stop of sloping section).", fig.width=8, fig.height=6----
xrange <- c(0, 1)
yrange <- c(0, 150)
xlab <- "Estimated stock biomass"
ylab <- "Next catch limit (t)"
# Plot empty axes
plot(x=xrange,y=yrange,type="n",xlab=xlab, ylab=ylab, xaxs="i", yaxs="i")
lines(x = c(0, 0.2, 0.5, 1),
y = c(rep(10, 2), rep(140, 2)), lwd=2, lty=1, col="red")
grid()
# Label the parameters
text(x=0.3, y=10, labels = "Cmin", pos=4, col="blue")
lines(x=c(0.2, 0.3), y=c(10, 10), lty=2, col="blue")
text(x=0.4, y=140, labels = "Cmax", pos=2, col="blue")
lines(x=c(0.4, 0.5), y=c(140, 140), lty=2, col="blue")
text(x=0.2, y=50, labels = "Blim", pos=3, col="blue")
lines(x=c(0.2, 0.2), y=c(10, 50), lty=2, col="blue")
text(x=0.5, y=110, labels = "Belbow", pos=1, col="blue")
lines(x=c(0.5, 0.5), y=c(110, 140), lty=2, col="blue")
## ----setup, echo=TRUE, eval=FALSE---------------------------------------------
# library(AMPLE)
# intro_hcr()
## ----start, fig.cap="The opening screen of the 'Introduction to HCRs' app."----
knitr::include_graphics("intro_hcr_start.png")
## ----advance, fig.cap="After pressing Advance a single time, fishing has occurred in 2020 using the catch limit that was set by the HCR. The stock biomass has been affected by the new catch limit. The HCR uses the estimate of stock biomass at the start of 2021 to set the catch limit for 2021."----
knitr::include_graphics("intro_hcr_hcr11.png")
## ----hcr1, fig.cap="The results of running a full projection of HCR 1."-------
knitr::include_graphics("intro_hcr_hcr1.png")
## ----hcr2, fig.cap="The results of running a full projection with HCR 2."-----
knitr::include_graphics("intro_hcr_hcr2.png")
## ----hcr3, fig.cap="The results of running a full projection with HCR 3."-----
knitr::include_graphics("intro_hcr_hcr3.png")
## ----hcr1noise, fig.cap="The results of running a full projection of HCR 1 with biological variability (note that your plot may look different)."----
knitr::include_graphics("intro_hcr_hcr1_noise.png")
| /scratch/gouwar.j/cran-all/cranData/AMPLE/inst/doc/intro_hcr.R |
---
title: "Introduction to Harvest Control Rules (HCRs)"
author: "Finlay Scott - OFP, SPC"
date: "2021-02-11"
output:
bookdown::html_document2:
base_format: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{intro_hcr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
echo=FALSE, warning=FALSE, message=FALSE, out.width='100%'
)
```
# Introduction to HCRs
This tutorial is a quick introduction to Harvest Control Rules (HCRs) and their use in fisheries management.
It uses the *Introduction to HCRs* Shiny app from the *AMPLE* package.
There are three apps in the *AMPLE* package. This is the first one and is the one you should start with if you are unsure what an HCR is.
In this tutorial we will cover what an HCR is, how they operate, and start to think about the impact of uncertainty on HCR performance.
**Note** that the fishery used in this tutorial is not based on a real world fishery or stock.
It's just a toy example.
# What is an HCR?
An HCR is a pre-agreed decision rule that is used to set fishing opportunities in the future.
The rule is agreed by stakeholders and managers of that fishery.
An HCR should be designed so that the management objectives of the fishery have the greatest chance of being achieved.
As the real world can be a very uncertain place, a good HCR is robust to different sorts of uncertainty (which we will discuss later on).
In this tutorial we will use a simple HCR to set the catch limit of a fishery in every year.
The HCR uses estimates of the current stock biomass to set that catch limit
(HCRs can use information other than stock biomass but we will keep things simple for now)
This rule will be applied every year in the future to set new catch limits in each year.
**Note** that in this app the biomass is scaled between 0 and 1, where 0 means there is no biomass (a bad thing) and 1 is the amount of biomass when is there is no fishing, i.e. the highest it can be.
Scaling the biomass between 0 and 1 like this can make it easier to understand if the stock is over or underexploited.
HCRs can have different basic shapes.
In this tutorial we will use a shape called a *threshold catch*. This is sometimes known as a 'hockey stick'.
The catch limit is set according to the general rule shown in Figure \@ref(fig:plothcr).
```{r plothcr, fig.cap="A threshold catch shape HCR. The shape is defined by four parameters: Cmin and Cmax (which determine the minimum and maximum catch limit) and Blim and Belbow (which determine the start and stop of sloping section).", fig.width=8, fig.height=6}
xrange <- c(0, 1)
yrange <- c(0, 150)
xlab <- "Estimated stock biomass"
ylab <- "Next catch limit (t)"
# Plot empty axes
plot(x=xrange,y=yrange,type="n",xlab=xlab, ylab=ylab, xaxs="i", yaxs="i")
lines(x = c(0, 0.2, 0.5, 1),
y = c(rep(10, 2), rep(140, 2)), lwd=2, lty=1, col="red")
grid()
# Label the parameters
text(x=0.3, y=10, labels = "Cmin", pos=4, col="blue")
lines(x=c(0.2, 0.3), y=c(10, 10), lty=2, col="blue")
text(x=0.4, y=140, labels = "Cmax", pos=2, col="blue")
lines(x=c(0.4, 0.5), y=c(140, 140), lty=2, col="blue")
text(x=0.2, y=50, labels = "Blim", pos=3, col="blue")
lines(x=c(0.2, 0.2), y=c(10, 50), lty=2, col="blue")
text(x=0.5, y=110, labels = "Belbow", pos=1, col="blue")
lines(x=c(0.5, 0.5), y=c(110, 140), lty=2, col="blue")
```
The current stock biomass level is on x-axis along the bottom of the plot.
The catch limit that will be set in the next year is on the y-axis on the side.
The red line is the rule that sets the catch limit given the estimated stock biomass.
The basic idea is that if stock biomass is low the catch limit is low, and if the stock biomass is high the catch limit is high.
By pre-agreeing the shape of the rule, you know exactly what the catch limit will be, given the estimated status of the stock.
This is easier than all the stakeholders and managers arguing every time a catch limit needs to be set (which takes time and energy and can delay important decisions being made).
Choosing the right HCR is obviously very important.
The shape of this HCR is determined by 4 parameters: *Blim*, *Belbow*, *Cmin* and *Cmax*, shown in blue in the figure.
*Cmin* and *Cmax* determine the minimum and maximum catch limit.
*Blim* and *Belbow* determine the minimum and maximum biomass of the sloping section.
When the estimated stock biomass is greater than *Belbow* the catch limit is set at the maximum catch limit, *Cmax*.
When stock biomass is less than *Blim* the catch limit is set at minimum catch limit, *Cmin*.
When stock biomass is between *Blim* and *Belbow* the catch limit is set according to where it lies on the slope.
**Important note about management procedures**.
An HCR is part of a management procedure (MP), along with two other elements: the data collection and the estimation process.
Under a harvest strategy approach, when an MP is agreed and adopted by stakeholders all three elements of the MP are agreed together.
In this tutorial, and in the app, we assume that the data collection and estimation process are the same for each HCR that we try.
When we talk about comparing HCRs we are really comparing MPs.
# Getting started
If you want to use the app online, it is available at the following address:
[https://ofp-sam.shinyapps.io/AMPLE-intro-hcr/](https://ofp-sam.shinyapps.io/AMPLE-intro-hcr/)
Alternatively, if you are using your own version of R and have installed the *AMPLE* package, you can run this app by entering the following commands into the R console:
```{r setup, echo=TRUE, eval=FALSE}
library(AMPLE)
intro_hcr()
```
When you start the app you should see three plotting windows and a blue arrow in the main panel, and some controls in the left-hand panel (Figure \@ref(fig:start)).
Your plots might look slightly different due to variations in the historic catches.
```{r start, fig.cap="The opening screen of the 'Introduction to HCRs' app."}
knitr::include_graphics("intro_hcr_start.png")
```
The two plots on the left-hand side of the main panel show the history of the catch and stock biomass.
There are 10 years of catch history from 2010 to 2019 (the biomass has an extra year as it is the biomass at the *start* of the year).
Remember that the biomass is scaled between 0 and 1, where 1 is the unfished stock biomass, i.e. the maximum biomass.
There are two horizontal dashed lines on the biomass plot. The higher one, at 0.5, is the Target Reference Point (TRP).
The TRP is a management objective. Ideally, the stock biomass should be close to this line.
The lower horizontal line, at 0.2, is the Limit Reference Point (LRP).
If the stock biomass falls below this level the stock is considered to be overfished and may be in serious trouble.
The HCR is shown in the top-right panel. It is the red line (ignore the blue dashed lines for the moment).
As mentioned above, the shape of the HCR is determined by 4 parameters: *Blim*, *Belbow*, *Cmin* and *Cmax*.
The HCR parameter values can be controlled using the sliders on the left-hand side of the screen.
The first thing to do is to set the parameters of the HCR.
Set these values (if not already done): *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140.
These are the default initial values, but make sure that they have been set correctly.
We will refer to this HCR as *HCR 1*.
**Note** that as you change the HCR parameters, the shape of the HCR will change.
# Using the HCR
The purpose of the HCR is to set the catch limit each year.
The HCR uses the current estimated value of stock biomass to set the next catch limit.
This rule will be applied every year in the future to set new catch limits in each year.
We start at the very beginning of 2020 and we want to use *HCR 1* to set the catch limit for 2020.
The stock biomass can be seen in the bottom-left plot.
The way the HCR operates can be seen by following the blue arrow from the biomass plot to the HCR plot.
The current estimated value of biomass is shown on the HCR plot as the blue dashed vertical line.
This shows the same value of biomass as seen in the latest value in the biomass plot.
The catch limit in the following year is set by reading the corresponding catch limit from the HCR.
This is where the blue dashed vertical line hits the red HCR line (marked by the blue dot).
The new catch limit is shown by the blue dashed horizontal line on the HCR plot.
The new catch limit is also shown on the catch plot at the top-left, as the blue dashed line.
This represents what the catches *will* be in 2020.
We can see how the HCR works by going forward a year.
Press the **Advance** button in the left panel.
You have now advanced forward in time by a year. Several things have happened (Figure \@ref(fig:advance)):
* The catch limit that was set by the HCR for 2020 has been used by the fishery. In the catch plot you should see that the catch in the year 2020 has reached this catch limit.
* The stock has responded to being fished in 2020. You should see that we now have an extra year in the biomass plot up to 2021.
* The shape of the blue arrow to the right of the biomass plot has shifted to reflect the new current estimate of biomass. The blue vertical dashed line on the HCR plot has moved left or right as well.
* The HCR has used this new value of biomass to set a new catch limit for 2021 (this may be the same value as the previous catch limit).
```{r advance, fig.cap="After pressing Advance a single time, fishing has occurred in 2020 using the catch limit that was set by the HCR. The stock biomass has been affected by the new catch limit. The HCR uses the estimate of stock biomass at the start of 2021 to set the catch limit for 2021."}
knitr::include_graphics("intro_hcr_hcr11.png")
```
Press the **Advance** button again. The catch limit that was set for 2021 is used by the fishery. The stock responds to being fished again and we have a new estimate of biomass for the beginning of 2022.
This new estimate of biomass is again used by the HCR to set the new catch limit for 2022 (the blue arrow will move again to reflect this).
Keep pressing the **Advance** button until you get to the end of the projection (in 2040).
You should see the cycle:
* The HCR sets the catch limit based on the current estimate of stock biomass
* Fishing at the catch limit affects the stock biomass
* The next estimate of stock biomass is used by the HCR to set a new catch limit.
The previous values set by the HCR are shown by grey dots on the HCR plot, and the previous catch limits are shown on the catch plot as grey dashed horizontal lines.
You should see that eventually the fishery settles down to a steady catch limit and stock abundance (Figure \@ref(fig:hcr1)).
```{r hcr1, fig.cap="The results of running a full projection of HCR 1."}
knitr::include_graphics("intro_hcr_hcr1.png")
```
# Trying different HCRs
Press the **Reset** button in the left panel to clear the stock projection.
Run through the projection again by pressing the **Advance** button. Make sure that you understand how the HCR uses the latest value of biomass to set the catch limit.
Keep pressing the **Advance** button until you get to the end of the projection.
In the table below write down the final value of the catch and the final value of biomass that you see on the plots.
Also, note down anything interesting (if anything) that you see.
For example, what is biggest change in catch that you see?
Different HCRs behave in different ways and some are better at achieving management objectives than others.
The parameters of the HCR that we just tried are: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140.
<!-- Final catch = 100 Final B = 0.4 When does it settle: 10 yrs-->
Now we try a different HCR.
The general 'hockey stick' shape will be the same, but we will use different HCR parameters by changing the HCR parameters in the left panel.
Change *Belbow* to be 0.3 and *Cmax* to 130. Keep the other two parameters the same.
We will refer to this HCR as *HCR 2*.
You should see that the HCR plot has been updated to show the new shape of the HCR.
Notice that this HCR has a lower maximum catch limit than the previous HCR.
This limit will be set if the biomass is greater than 0.3 (the value of *Belbow*).
As before, repeatedly press the **Advance** button and follow the evolution of the stock and the catches (Figure \@ref(fig:hcr2)).
Note how the behaviour of the catch and biomass are different to the initial example with *HCR 1* (Figure \@ref(fig:hcr1)).
The biology of the fish stock is *exactly* the same as the example we ran earlier with *HCR 1*.
The *only* difference between the two projections is the shape of the HCR used to set future catch limits.
As before, write down the final values and any observations in the table at the end of this section.
```{r hcr2, fig.cap="The results of running a full projection with HCR 2."}
knitr::include_graphics("intro_hcr_hcr2.png")
```
As a final example set *Belbow* to 0.8 and *Cmax* to 150 and keep the other parameters the same.
We will refer to this HCR as *HCR 3*.
Notice that this HCR has a higher maximum catch limit but a much longer, shallower slope.
The catch limit will only be at this maximum catch limit if the biomass is greater than 0.8 (*Belbow*).
Again, repeatedly press the **Advance** button and follow the evolution of the stock and catch limits (Figure \@ref(fig:hcr3)).
Write down the behaviour and final values in the table.
```{r hcr3, fig.cap="The results of running a full projection with HCR 3."}
knitr::include_graphics("intro_hcr_hcr3.png")
```
Table: (\#tab:results) Results from testing the three HCRs. After running a full projection, enter the final values of the biomass, the catch, and any observations you have.
| HCR | Final biomass | Final catch | Anything interesting observed |
|:----|--------:|--------:|--------:|
| HCR 1 (Belbow=0.5, Cmax=140) | | | |
| HCR 2 (Belbow=0.3, Cmax=130) | | | |
| HCR 3 (Belbow=0.8, Cmax=150) | | | |
You should have seen that different parameterisations of the HCR lead to different fishery dynamics and final settled values.
You have now looked at the behaviour of three different HCRs.
* Which of these HCRs do you prefer and why?
* Which HCR gets the biomass closest to the TRP of 0.5?
* Which HCR results in the highest catches?
* Do any of them take the stock close to or below the LRP of 0.2?
# Introducing uncertainty
In the real world fisheries management is affected by different types of uncertainty.
By uncertainty, we mean things that we are not completely sure about, for example the exact status of the stock, and also things that are difficult to predict in the future, for example future recruitment.
The projections we have run so far have not considered uncertainty (they are known as *deterministic* simulations).
This means that if we run another projection with the same HCR, the outcome will be exactly the same.
Try this now and see.
It is very important to choose an HCR that is robust to different sources of uncertainty, otherwise the outcome may not be what you expected.
For example, an HCR that performs well when future stock recruitment is stable may not perform well when stock recruitment varies a lot.
We will look at this here.
Uncertainty is included in the app in two ways: biological variability in the stock dynamics (e.g. natural variability in recruitment) and through differences between the estimated and 'true' level of stock biomass.
These options are initially turned off.
Click on the **Show variability option** option in the panel on the left to show the uncertainty options.
## Biological variability
Biological variability reflects the natural variability in the stock dynamics, for example through variability in the recruitment, growth and natural mortality.
It means that the future states of these biological processes are very difficult to predict with any great accuracy.
Fisheries managers have no control over this source of uncertainty.
As such it is very important that an adopted HCR is robust to this uncertainty.
We saw in the previous examples that, without uncertainty, eventually the projected stock biomass settles down to a constant value.
What happens when we include natural variability?
Set the HCR parameters back to the values for *HCR 1* (*Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10, *Cmax* = 140).
Increase the **Biological variability** option to 0.2 and project forward through time using the **Advance** button.
You should see that, unlike before, the biomass now bounces around and is not perfectly flat (Figure \@ref(fig:hcr1noise)).
This is because the variability in the stock biology is affecting the abundance.
For example, in some years the stock may produce more recruits than in other years for the same level of biomass.
This causes fluctuations in the population abundance.
```{r hcr1noise, fig.cap="The results of running a full projection of HCR 1 with biological variability (note that your plot may look different)."}
knitr::include_graphics("intro_hcr_hcr1_noise.png")
```
As the HCR uses the estimate of biomass to set the catch limit, it means that the catch limit set by the HCR also bounces around.
The catch limits set by the HCR then go onto affect the stock biomass in the same way as before.
If you press the **Reset** button and run the projection again you should see that you get a different trajectory (uncertainty in action!).
Different HCRs respond differently to biological variability.
Try the following to see:
* Set up HCR 2 again (*Belbow* = 0.3, *Cmax* = 130) and run several complete projections with the biological variability set to 0.2. What do you notice? Are the catches more or less stable than with HCR 1?
* Set up HCR 3 again (*Belbow* = 0.8, *Cmax* = 150) and run several complete projections with the biological variability set to 0.2. What do you notice?
* From this very brief experiment, which of the three HCRs do you prefer and why?
## Estimation error
In the real world we do not know the true stock abundance.
This means that a HCR is not driven by the *true* value of biomass. Instead it is driven by an *estimated* value (we can never know the true value).
For example, the biomass can be estimated by a stock assessment model.
The HCR therefore uses an *estimated* value of the stock biomass to set the next catch limit, not the *true* value.
The difference between the estimated and true value of biomass is called the estimation error and it can have an important impact on the performance of a HCR.
Here estimation error is simulated in two ways: *variability* and *bias*. These can be combined.
To demonstrate these turn the **Biological variability** back to 0 and increase the **Estimation variability** to 0.2 (leave **Estimation bias** as 0 for now).
This estimation variability is a crude way of simulating that we don't really know the true value of the biomass.
Project the stock forward several times using the **Advance** button.
You should see that the biomass plot now shows two lines. The black one is the *true* abundance and the blue one is the *estimated* abundance.
It is the estimated abundance that is used by the HCR to set the catch limit.
You should see that the stock and catch bounce around as you project forward. This variability is not caused by any biological variability (you have turned that off) but from the HCR using the uncertain *estimated* value of biomass to set the catch limit.
The catch limit, of course, affects the *real* stock abundance.
If you press **Reset** and run the same projection again by pressing **Advance** you should see a slightly different outcome. Try it.
Now turn the **Estimation variability** back to 0 and set the **Estimation bias** to 0.1.
This means that the estimated value of biomass is always 10% higher than the true value, i.e. we are always overestimating the stock abundance.
Project forward and see what happens. Compare the *true* final values of biomass (the black line on the plot) and catch to the numbers you wrote down in the table above for the first HCR.
Are the final values higher or lower than when there is no bias?
<!-- I get lower values than when no bias -->
In the real world, both biological variability and estimation error are operating at the same time.
It is very important that the selected HCR is robust to this uncertainty so that the management objectives can be achieved.
# Summary
A HCR is a decision rule for setting future fishing opportunities.
In this app, the input to the rule is the *estimated* stock biomass and the output is the catch limit in the following year.
We have looked at three different HCRs and seen that they perform differently.
Uncertainty is a big concern in fisheries management.
Here we looked at biological and estimation uncertainty.
We have seen that they can change the performance of the HCR and the fishery.
It is very important that an HCR is robust to uncertainty.
A HCR that performs well in the absence of uncertainty may not perform as well when uncertainty is present.
The big question is: how do we know which HCR to use?
To answer this question, see the next tutorial *Measuring performance*.
| /scratch/gouwar.j/cran-all/cranData/AMPLE/inst/doc/intro_hcr.Rmd |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
echo=FALSE, warning=FALSE, message=FALSE, out.width='100%'
)
## ----setup, echo=TRUE, eval=FALSE---------------------------------------------
# library(AMPLE)
# measuring_performance()
## ----start, fig.cap="The opening screen of the 'Measuring performance' app."----
knitr::include_graphics("meas_perf_start.png")
## ----hcr11, fig.cap="The result from running a single projection with no uncertainty with HCR 1."----
knitr::include_graphics("meas_perf_hcr1_1.png")
## ----hcr12, fig.cap="The result from running a single projection with biological variability with HCR 1."----
knitr::include_graphics("meas_perf_hcr1_2.png")
## ----hcr110, fig.cap="The result from running 10 replicates with biological variability with HCR 1."----
knitr::include_graphics("meas_perf_hcr1_10.png")
## ----hcr150, fig.cap="The performance indicators from running 50 replicates with biological uncertainty with HCR 1."----
knitr::include_graphics("meas_perf_hcr1_50pi.png")
| /scratch/gouwar.j/cran-all/cranData/AMPLE/inst/doc/measuring_performance.R |
---
title: "Measuring the performance of harvest control rules (HCRs)"
author: "Finlay Scott - OFP, SPC"
date: "2021-03-11"
output:
bookdown::html_document2:
base_format: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{measuring_performance}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
echo=FALSE, warning=FALSE, message=FALSE, out.width='100%'
)
```
# Introduction
This tutorial explores how the performance of a Harvest Control Rule (HCR) can be measured.
It uses the *Measuring performance* Shiny app from the *AMPLE* package.
There are three apps in the *AMPLE* package. This is the second one.
If you are unsure what an HCR is you should look at the first app, *Introduction to HCRs*.
The previous tutorial (*Introduction to HCRs*) introduced HCRs and how they can be used to set future fishing opportunities based on estimates of stock status.
This allowed us to explore how HCRs work and we saw that different HCRs can perform differently.
We also looked at two sources of uncertainty, *biological variability* and *estimation error*, and saw that they can affect the performance of a HCR.
Before an HCR is adopted its performance is tested and evaluated using computer simulations (a process called Management Strategy Evaluation - MSE).
During these evaluations, the performance of a proposed HCR is measured using a collection of indicators, known as performance indicators (PIs).
The PIs should relate to the agreed management objectives of the fishery, e.g. stock sustainability, good economic performance etc.
By comparing the PIs from different HCRs, the preferred HCR can be selected and put into operation.
We explore comparing PIs in the third tutorial.
Fisheries management is affected by many sources of uncertainty, including uncertainty about the biology of the stock and the stock status.
It is very important to understand how an HCR performs under uncertainty.
Therefore, the HCR evaluations have to consider different sources of uncertainty.
In this tutorial we start to look at how PIs can be used to measure the performance of an HCR.
Particular attention is paid to how uncertainty can be included in the evaluations and the effect it can have on the PI values.
To evaluate the impact of uncertainty on the performance of an HCR, many projections are performed (sometimes hundreds or even more).
Each projection is known as a *replicate*.
The PIs are calculated for each replicate and summaries, such as average values and ranges, are presented.
**Note** that the fishery used in this tutorial is not based on a real world fishery or stock.
It's just a toy example.
**Important note about management procedures**.
An HCR is part of a management procedure (MP), along with two other elements: the data collection and the estimation process.
Under a harvest strategy approach, when an MP is agreed and adopted by stakeholders all three elements of the MP are agreed together.
In this tutorial, and in the app, we assume that the data collection and estimation process are the same for each HCR that we try.
When we talk about comparing HCRs we are really comparing MPs.
# Getting started
If you want to use the app online, it is available at the following address:
[https://ofp-sam.shinyapps.io/AMPLE-measuring-performance/](https://ofp-sam.shinyapps.io/AMPLE-measuring-performance/)
Alternatively, if you are using your own version of R and have installed the *AMPLE* package, you can run this app by entering the following commands into the R console:
```{r setup, echo=TRUE, eval=FALSE}
library(AMPLE)
measuring_performance()
```
When you start the app you should see something similar to the *Introduction to HCRs* app (Figure \@ref(fig:start)).
```{r start, fig.cap="The opening screen of the 'Measuring performance' app."}
knitr::include_graphics("meas_perf_start.png")
```
On the left side of the main panel there are three time series plots.
Two of them, catch and biomass, are similar to those seen in the *Introduction to HCRs* app (ignore the vertical dashed lines in the plots for now).
The third time series plot is a new plot of *relative CPUE*. CPUE is the catch-per-unit of effort, sometimes known as catch rate.
The relative CPUE plot is the CPUE relative to the CPUE in the final historical year, 2019.
The value in 2019 is therefore 1.
By making the CPUE relative to some point in the past, it can be easier to interpret.
These three plots have 10 years of historical data, from 2010 to 2019.
There is an additional year of data for biomass as it shows the biomass at the start of the year.
The horizontal dashed lines on the biomass plot are the Target Reference Point (TRP - 0.5) and Limit Reference Point (LRP - 0.2).
**Note** that in this app the biomass is scaled between 0 and 1, where 0 means there is no biomass (a bad thing) and 1 is the amount of biomass when is there is no fishing, i.e. the highest it can be.
Scaling the biomass between 0 and 1 can make it easier to understand if the stock is over or underexploited.
The HCR is shown at the top of the right-hand panel. The HCR is the red line.
Underneath the HCR plot is something about **Table selection**. The option **Each replicate** should be selected.
At the moment there is no table...
In the left-hand panel there are various controls, including the HCR parameters, similar to those seen in the *Introduction to HCRs* app.
The initial values of the HCR parameters should be: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140.
If they are not you can set them using the controls.
This HCR is known as *HCR 1*.
Note that your plots might look slightly different due to variations in the historic catches.
# Run a projection
In the previous tutorial, when you clicked the **Advance** button, you projected forward by a single year.
This meant that to run a full projection you had to keep pressing the **Advance** button.
In this tutorial, when you run a projection you will project forward 20 years from 2019 to 2039 with a single button press.
The HCR is applied in each year of your projection, and the corresponding catch limit used.
It's exactly the same as what we were doing in the previous app, it's just that now it happens automatically without you having to press the **Advance** button lots of times.
Try this now by pressing the **Run projection** button in the left panel to perform a 20 year projection, from 2019 to 2039 (Figure \@ref(fig:hcr11)).
```{r hcr11, fig.cap="The result from running a single projection with no uncertainty with HCR 1."}
knitr::include_graphics("meas_perf_hcr1_1.png")
```
You should see that the time series plots for catch, biomass and relative CPUE now show the full time series until 2039.
The HCR was applied in each year of the projection and the HCR plot has blue points on it to show which bits were used during the projection.
With this app we will run multiple projections for the same HCR.
Each projection is known as a replicate.
A table should have appeared underneath the **Table selection** control.
This table shows the values of *Biomass*, *Catch* and *Relative CPUE* in the final year of the projection.
We will use these as performance indicators (PIs) to evaluate the performance of HCRs.
The table has two rows. The top row gives the average value over all the replicates we have run, as well as the range of values (in brackets) in which *most* of the replicates have fallen into (strictly speaking it contains 90% of the full range of values).
So far we have only run 1 replicate so this first row is not very useful right now.
The results for our first replicate are shown in the second row of the table.
As we run more replicates (by running more projections), more rows will appear in the table.
Click **Run projection** again. This runs *exactly the same* projection as the last one.
The projection is the same as the last one because we have not included any uncertainty.
Another row has appeared in the table to show the final values of the this replicate.
These values are the same as the first replicate (again, because there is no uncertainty).
The summary values in the first row are also exactly the same.
You may notice that the **Replicate** counter under the HCR plot also increased by one.
You can keep clicking **Run projection** to add more replicates to the table.
However, as we have no uncertainty in the projection we are just running exactly the same projection over and over again with the same result.
# Including uncertainty in the projections
Press the **Reset** button to clear out the plots and tables.
We want to know how well our HCR is going to perform under uncertainty.
We introduced uncertainty in the previous tutorial (*Introduction to HCRs*).
As in the previous tutorial here we have two sources of uncertainty: *Biological variability* and *Estimation error*.
For the remainder of this tutorial we will only focus on biological variability (which is not to say that estimation error is unimportant - it is very important! - we're just keeping things simple at the moment).
Click on the **Show variability options** to show the uncertainty options.
Set **Biological variability** to 0.2.
Leave **Estimation variability** and **Estimation error bias** as 0.
Note that there is nothing special about the value of 0.2, it just gives enough variability to be a useful illustration.
Keep the HCR parameters the same as before.
Click **Run projection** to project over the full time series.
As we now have variability in the biological dynamics you should see that the plots are bumpy (Figure \@ref(fig:hcr12)).
This bumpiness is caused by the variability in the biological growth processes which causes variability in the stock status (the biomass).
The HCR responds to the variability in the stock status by setting variable catches.
A new row will also have appeared in the replicate table that records the final values of biomass, catch and relative CPUE.
These values should be different to when we ran the projections with no uncertainty.
```{r hcr12, fig.cap="The result from running a single projection with biological variability with HCR 1."}
knitr::include_graphics("meas_perf_hcr1_2.png")
```
Click **Run projection** again. You should get another line on the plots.
The new line is different to the previous one (the previous one is now in grey, the new one in black).
The difference between the replicates is caused by the biological variability.
Another row will also have appeared in the table to record the final values of this replicate.
The final values of this second replicate will be different to the values of the first replicate.
The summary row at the top of the table will have been updated.
This second projection has *exactly* the same stock and the same HCR as the first one but the outcome is slightly different.
The difference is a result of the uncertainty in the biological dynamics.
It is important to understand why they are different.
There is a lot of natural variability in fish stocks, for example with recruitment, and it is impossible for scientists and managers to predict exactly what will happen in the future.
To help understand the *possible* futures, variability is included in the biological dynamics to simulate the uncertainty.
By running many projections with uncertainty we will get slightly different results.
Looking at all of the results together gives us more understanding of what might happen.
This is more useful than running a single projection with no uncertainty.
Keep clicking **Run projection** until you have 10 replicates (Figure \@ref(fig:hcr110)).
More lines will appear on the plots and more rows will appear in the table.
Each line will be different because each projection is different due to the biological variability.
```{r hcr110, fig.cap="The result from running 10 replicates with biological variability with HCR 1."}
knitr::include_graphics("meas_perf_hcr1_10.png")
```
Keep clicking **Run projection** until you get 30 or more replicates.
You will see that, eventually, the average and range values in the first row of the table do not change by much with each new projection.
This indicates that we are starting to understand the *distribution of expected values* from this HCR.
Even though we don't know exactly what will happen in the future, we think we know the range of values that it will fall within.
This is very important when it comes to selecting a HCR for a fishery.
An HCR must be robust to uncertainty, otherwise it will not perform as well as expected.
When you get to 50 or more replicates (if you click that many times...), the grey lines on the plots will be replaced by grey ribbons.
The width of the grey ribbons summarises the range of most of the replicates that have been run (again, 90% of the full range - the same as the first row in the table).
Using the ribbons is neater than having loads of lines on a plot.
The average value and the last replicate is plotted over the ribbon for illustration purposes.
When the range of values has approximately settled down and doesn't change much as more projections are run, we can start to think about what might happen in the future.
The range of values in the plots and in the first row of the table tells us how certain we are about the future.
We think that the possible *real* future will be somewhere within this range.
If the range is wide, then there is a wide spread of values and we are not certain about what will happen.
If the range is narrow then we have more confidence in what we think will happen.
Considering the range of values is important because sometimes it is better to choose an HCR about which we have more certainty in the future, even though the expected catches might be lower.
For example, the average value of catches might be lower, but the range of possible values is small.
This helps with management and planning and might be more useful than having an HCR that *might* give a high future catch but also *might* give a low future catch.
These types of projections with uncertainty are different to running a single projection with no uncertainty (known as a *deterministic* projection).
If we want to understand how uncertainty may affect the performance of an HCR in the real world, running a deterministic projection is not enough.
# Measuring performance: exercise
In this exercise you will test the performance of three different HCRs and note down the values of the three PIs: *Catch*, *Biomass* and *Relative CPUE* in the final year.
Press the **Reset** button.
Use the same HCR parameters as above (*Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140).
This HCR is known as *HCR 1*
Use the same uncertainty setttings as above (*Biological variability* = 0.2, *Estimation variability* = 0, *Estimation bias* = 0).
Run 30 replicates by pressing the **Run projection** button 30 times.
Look at the top row of the table and write down the average value and the range (the values in the brackets) of biomass, catch and relative CPUE in the final year.
For example, having just run this on my machine, for biomass I get an average value of 0.41 with a range of 0.33 and 0.45.
This means that the *most likely* final value of biomass will be 0.41, but the range of possible values is from 0.33 to 0.45.
Now try a different HCR.
Set these parameters: *Blim* = 0.2, *Belbow* = 0.3, *Cmin* = 10 and *Cmax* = 130 in the panel on the left.
This HCR is known as *HCR 2*.
Keep the same uncertainty settings.
Run 30 replicates of *HCR 2* by pressing the **Run projection** button 30 times and note down the range and average values of biomass, catch and CPUE.
<!--
biomass=0 0.26 (0.21, 0.28)
Catch 82 (21, 110)
CPUE 0.43 (0.34, 0.46)
-->
Finally, do the same thing for *HCR 3*: *Blim* = 0.2, *Belbow* = 0.7, *Cmin* = 10 and *Cmax* = 150.
Table: (\#tab:results) Results from testing the three HCRs. After running 30 replicates, enter the average and range of values (the values in brackets) of the biomass, catch and relative CPUE in the final year.
| HCR | Final biomass | Final catch | Final relative CPUE|
|----:|--------:|--------:|--------:|
| HCR 1 (Belbow=0.5, Cmax=140) | | | |
| HCR 2 (Belbow=0.3, Cmax=130) | | | |
| HCR 3 (Belbow=0.8, Cmax=150) | | | |
Using the results in the table, try answering these questions:
*Question 1:* Which of the three HCRs would you choose if you wanted to have the highest average relative CPUE in the final year?
*Question 2:* Which of the three HCRs would you choose if you wanted biomass in the final year to be as close as possible to the Target Reference Point (TRP) of 0.5?
*Question 3:* Which of the three HCRs has the highest *uncertainty* in the catch in the final year (i.e. the biggest range of catch values)?
# Introducing performance indicators
In the exercise above we compared the values of catch, biomass and relative CPUE in the final year.
We used these metrics as *performance indicators* (PIs) to evaluate the performance of the three HCRs.
Lots of different PIs are available that measure different things.
For example, PIs can measure catch levels, changes in effort, the probability of biomass being above the Limit Reference Point (LRP) etc.
When comparing HCRs, the chosen PIs should relate to the management objectives for the fishery.
This allows you to measure how well the fishery is performing in relation to those objectives.
Some HCRs will perform well for some PIs and poorly for others.
This is where the idea of prioritising PIs and evaluating trade-offs between them comes in.
In the previous exercise we only looked at what happens in the final year of the projection.
We have not considered what happens during the course of the projection.
When comparing HCRs we should also compare what happens in the short- and medium-term as well as the long-term.
Here we calculate PIs over three different time periods: short-, medium- and long-term.
These time periods can be seen on the time series plots of catch, biomass and relative CPUE as vertical dashed lines.
Seven performance indicators are calculated (Table \@ref(tab:pis)).
Table: (\#tab:pis) The seven performance indicators used to measure performance in this app.
| Performance indicator | Description |
|:-------|:-------|
| Biomass | The biomass relative to the unfished biomass.|
| Probability of being above the LRP | This reflects the risk of the stock being overfished. |
| Catch | The expected catches. |
| Relative CPUE | The CPUE (or catch rate) relative to the CPUE in the last historical year. |
| Relative effort | The fishing effort relative to the effort in the last historical year. |
| Catch stability | How much the catches change over time. A value of 1 means that the catches are very stable and do not change at all. A low value, close to 0, means that the catches fluctuate a lot over time (probably not a good thing). |
| Proximity to the TRP | How close the biomass is to the TRP on average. A value of 1 means that the biomass is always at the TRP. A lower value means that the biomass spends a lot of time being higher, or lower, than the TRP. |
Generally, for most indicators, the higher the value the better (e.g. higher catches, and higher catch stability are assumed to be better than lower catches and catch levels that change a lot over time).
However, higher fishing effort is not necessarily better as it may mean higher costs of fishing.
Similarly, higher biomass might not be better. If the biomass is too high, it may mean you could have fished more.
# Calculating performance indicators
We will now to start to look at the different PIs, over three time periods.
Press the **Reset** button.
Set the HCR parameters to: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140 (back to *HCR 1*).
Leave the uncertainty parameters as they are (*Biological variability* = 0.2, *Estimation variability* = 0, *Estimation bias* = 0).
Select **Performance indicators** from the **Table selection** control.
Now press the **Run projection** button.
As before, new lines have been added to the three time series plots.
You should also see that a new table has appeared with various PIs in it.
Each row in the table is a different PI.
The PIs are measure over different time periods, short-, medium- and long-term.
These are the table columns.
The values in the table are the average of the PIs across the replicates, and the values in the brackets are the range.
It isn't very helpful to calculate the average value and the range for just 2 replicates.
Keep clicking **Run projection** until you have about 50 replicates.
You should see that the numbers in the table start to settle down (Figure \@ref(fig:hcr150)).
```{r hcr150, fig.cap="The performance indicators from running 50 replicates with biological uncertainty with HCR 1."}
knitr::include_graphics("meas_perf_hcr1_50pi.png")
```
In the previous exercise we only looked at the final values.
Now, you can see the difference between the short-, medium- and long-term values.
For example, you may find that the short-term catches are higher than the long-term catches.
Different HCRs will perform better over different time periods and this will affect which HCR you prefer.
# Performance indicators: exercise
We are going to look at some PIs to help us choose between the three HCRs we looked at above.
The three HCRs are:
* HCR 1 (*Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140)
* HCR 2 (*Blim* = 0.2, *Belbow* = 0.3, *Cmin* = 10 and *Cmax* = 130)
* HCR 3 (*Blim* = 0.2, *Belbow* = 0.7, *Cmin* = 10 and *Cmax* = 150)
To keep things simple we are going to record the values for only three of the PIs: *Catch*, *Catch stability* and *Prob. > LRP*, in the three different time periods, in the table below (Table \@ref(tab:results2)).
Use the same uncertainty settings as above (*Biological variability* = 0.2, *Estimation variability* = 0, *Estimation bias* = 0).
For each HCR, run 50 iterations (by pressing **Run projection** 50, yes 50, times).
I've included a line of the results that I got as an example.
Table: (\#tab:results2) Results from testing the three HCRs. After running a 50 replicates, enter the average and range of values (the values in brackets) of the catch, catch stability and probability of being above the LRP in each time period.
| HCR | Time period | Catch | Catch stability | Prob. > LRP|
|:----|--------:|--------:|--------:|--------:|
| HCR 1 | Short-term | 120 (110, 130) | 0.79 (0.71, 0.86) | 1 (1, 1) |
| | Medium-term | | | |
| | Long-term | | | |
| HCR 2 | Short-term | | | |
| | Medium-term | | | |
| | Long-term | | | |
| HCR 3 | Short-term | | | |
| | Medium-term | | | |
| | Long-term | | | |
When you have completed the table, try to answer these questions.
*Question 1:* Which HCR has the highest catches in the short-term?
*Question 2:* Which HCR has the highest catches in the long-term?
*Question 3:* Which HCR has the highest catch stability in the short-term?
*Question 4:* Which HCR has the highest catch stability in the long-term?
*Question 5:* In this example, is the indicator *Prob. > LRP* helpful in choosing between the HCRs?
*Question 6:* By looking at the indicators in the different time periods, which of the HCRs do you prefer and why?
# Summary
In this tutorial we started to use performance indicators (PIs) to measure the performance of different HCRs.
The indicators allow us to compare the performance of candidate HCRs.
The performance in different time periods is important (it's not just the final destination, but the journey).
Additionally, HCRs need to be robust to uncertainty.
In this tutorial we have seen that when we include uncertainty in an HCR evaluation, it is necessary to run many projections to allow us to understood how the HCR is going to perform.
In this tutorial we have 7 PIs over 3 time periods giving us a total of 21 PIs to consider.
Each PI is reported as an average value plus a range.
This is a lot of information to process!
You can see that considering more and more indicators can quickly lead to an overwhelming amount of information.
How we try to understand all of it is covered in the next tutorial.
| /scratch/gouwar.j/cran-all/cranData/AMPLE/inst/doc/measuring_performance.Rmd |
---
title: "Comparing the performance of Harvest Control Rules (HCRs)"
author: "Finlay Scott - OFP, SPC"
date: "2021-04-11"
output:
bookdown::html_document2:
base_format: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{comparing_performance}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
echo=FALSE, warning=FALSE, message=FALSE, out.width='100%'
)
```
# Introduction
This tutorial explores how to compare the performance of several Harvest Control Rules (HCRs).
It uses the *Comparing performance* Shiny app from the *AMPLE* package.
There are three apps in the *AMPLE* package. This is the third one.
If you are unsure what an HCR is you should look at the first app, *Introduction to HCRs*.
The previous tutorial (*Measuring the performance of HCRs*) introduced the idea of performance indicators (PIs) and how they can be used to measure the performance of an HCR in relation to the management objectives.
The performance of a proposed HCR is tested and evaluated using computer simulations (known as Management Strategy Evaluation - MSE).
During these evaluations the performance is measured using a collection of PIs.
In this tutorial we will generate PIs for several candidate HCRs and compare them.
By considering what the objectives of the fishery are, we can then select the most appropriate HCR.
**Note** that the fishery used in this tutorial is not based on a real world fishery or stock.
It's just a toy example.
Also **note** that in this app the biomass is scaled between 0 and 1, where 0 means there is no biomass (a bad thing) and 1 is the amount of biomass when is there is no fishing, i.e. the highest it can be.
Scaling the biomass between 0 and 1 can make it easier to understand if the stock is over or underexploited.
**Important note about management procedures**.
An HCR is part of a management procedure (MP), along with two other elements: the data collection and the estimation process.
Under the harvest strategy approach, when an MP is agreed and adopted by stakeholders all three elements of the MP are agreed together.
In this tutorial, and in the app, we assume that the data collection and estimation process are the same for each HCR that we try.
When we talk about comparing HCRs we are really comparing MPs.
# Getting started
If you want to use the app online, it is available at the following address:
[https://ofp-sam.shinyapps.io/ample-comparing-performance/](https://ofp-sam.shinyapps.io/ample-comparing-performance/)
Alternatively, if you are using your own version of R and have installed the *AMPLE* package, you can run this app by entering the following commands into the R console:
```{r setup, echo=TRUE, eval=FALSE}
library(AMPLE)
comparing_performance()
```
When you start the app you should see something similar to the *Measuring performance* app (Figure \@ref(fig:start)).
```{r start, fig.cap="The opening screen of the 'Comparing performance' app."}
knitr::include_graphics("comp_perf_start.png")
```
On the left side of the main panel there are plots of catch, biomass and CPUE relative to the CPUE in the last historical year (2019)
These three plots have 10 years of historical data, from 2010 to 2019.
There is an additional year of data for biomass as it shows the biomass at the start of the year.
The horizontal dashed lines on the biomass plot are the Target Reference Point (TRP = 0.5) and Limit Reference Point (LRP = 0.2).
The HCR is shown at the top of the right-hand panel. The HCR is the red line.
Underneath the HCR plot is currently blank...
In the left-hand panel there are various controls, including the HCR parameters.
The initial values of the HCR parameters should be: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140.
If they are not you can set them using the controls in the left panel.
This HCR is known as *HCR 1*.
Note that your plots might look slightly different due to variations in the historic catches.
In this app, *biological variability* is already switched on and set to a value of 0.2.
You can change the variability options by looking in the **Settings** tab (but don't do this yet).
# Evaluating the performance
We are going to compare the performance of several HCRs.
We will do this by running projections with uncertainty for different HCRs and calculating a range of performance indicators (PIs) for them.
The PIs of the different HCRs can then be compared in different ways.
The basic process we will follow is:
* Set up an HCR using the HCR parameters on the left-hand side.
* Project the stock forward in time under that HCR (by pressing the **Project** button).
* Have a quick check of the resulting time series plots and PI values.
* If you like the HCR, add it to the basket of candidate HCRs (by pressing the **Add HCR to basket** button).
* When you have several HCRs in the basket, go to the **Compare performance** tab at the top of the screen and take a look at their relative performance.
## Evaluating the first HCR
The initial values of the HCR parameters should be: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140. If not, set these parameters.
This HCR will be referred to as *HCR 1*.
Press the **Project** button to run the projection.
In the previous tutorial when you ran the projection it ran a single projection.
Here, when you run a projection 250 replicates will be run (i.e. 250 projections will be run). This may take a few seconds, depending on your machine.
As noted above, biological variability is switched on and set to 0.2 so that each replicate is slightly different.
This is the same as using the *Measuring performance* app and clicking **Project** 250 times.
After clicking **Project** the time series plots will show the results.
The grey ribbon shows the range of values of the replicates (it contains 90% of the full range of values).
The average value (the dashed line), and the final replicate as an example (the solid line) are also shown.
A table of performance indicators has also appeared underneath the HCR plot.
Here we calculate PIs over three different time periods: short-, medium- and long-term.
These time periods can be seen on the time series plots of catch, biomass and relative CPUE as vertical dashed lines.
Seven performance indicators are calculated (Table \@ref(tab:pis)).
Table: (\#tab:pis) The seven performance indicators used to measure performance in this app.
| Performance indicator | Description |
|:-------|:-------|
| Biomass | The biomass relative to the unfished biomass.|
| Probability of being above the LRP | This reflects the risk of the stock being overfished. |
| Catch | The expected catches. |
| Relative CPUE | The CPUE (or catch rate) relative to the CPUE in the last historical year. |
| Relative effort | The fishing effort relative to the effort in the last historical year. |
| Catch stability | How much the catches change over time. A value of 1 means that the catches are very stable and do not change at all. A low value, close to 0, means that the catches fluctuate a lot over time (probably not a good thing). |
| Proximity to the TRP | How close the biomass is to the TRP on average. A value of 1 means that the biomass is always at the TRP. A lower value means that the biomass spends a lot of time being higher, or lower, than the TRP. |
Generally, for most indicators, the higher the value the better (e.g. higher catches, and higher catch stability are assumed to be better than lower catches and catch levels that change a lot over time).
However, higher fishing effort is not necessarily better as it may mean higher costs of fishing.
Similarly, higher biomass might not be better. If the biomass is too high, it may mean you could have fished more.
By looking at the summary plots and the table of PIs, we can decide if we think the current HCR is worth considering further.
The HCR we have just tested, *HCR 1*, looks OK (e.g. there is a high chance of being above the LRP and the expected catches look OK).
We therefore consider *HCR 1* as a *candidate* HCR, i.e. a candidate for adoption.
Click on the **Add HCR to basket** button to add the HCR to the basket of candidate HCRs. You should see that the counter **Number of HCRs in basket** increases by 1.
```{r hcr1, fig.cap="The results from evaluating HCR 1."}
knitr::include_graphics("comp_perf_hcr1.png")
```
## Add more HCRs to the basket
We now have one HCR in our basket. We are going to add two more HCRs to the basket and then compare the results of all three of them.
Repeat the above process (set up the HCR, run the projection, add to the basket) for two more HCRs.
Use the following parameters:
* HCR2: *Blim* = 0.2, *Belbow* = 0.3, *Cmin* = 10 and *Cmax* = 130
* HCR3: *Blim* = 0.2, *Belbow* = 0.7, *Cmin* = 10 and *Cmax* = 150
You should now have three HCRs in your basket (check the counter and don't forget to **Add HCR to basket** after projecting).
# Comparing the performance
We can now compare the performance of the three HCRs.
Select the **Compare performance** tab at the top of the app window.
In the main panel you should see a lot of bar plots.
Each window shows a different PI for each of the HCRs in the basket.
Each HCR has a different colour.
The height of the bars is the average value of that PI.
The bars are grouped into the three different time periods (short-, medium- and long-term) (Figure \@ref(fig:compbar1)).
```{r compbar1, fig.cap="Using bar charts to compare the average values of performance indicators of three candidate HCRs."}
knitr::include_graphics("comp_perf_compbar1.png")
```
We are looking at the average values of seven PIs for three HCRs in three different time periods.
This is a lot of information!
We want to be able to choose which HCR best fits our objectives but it can be difficult when there is so much to look at.
To make things easier, we can drop PIs that we think are unimportant (perhaps they do not measure anything related to your management objectives) by deselecting them from the list in the left panel.
Bear in mind, that a PI that may be unimportant to you, may actually be very important to someone else.
Similarly, candidate HCRs can be deselected if they do not meet the objectives.
For example, we might think that the *Biomass*, *Catch stability* and *Relative effort* are less important than the other PIs.
Deselect them from the list on the left.
This still gives us four PIs left to consider (Figure \@ref(fig:compbar2)).
```{r compbar2, fig.cap="Using bar charts to compare the average values of only four performance indicators of three candidate HCRs."}
knitr::include_graphics("comp_perf_compbar2.png")
```
The results for the probability of being above LRP look pretty similar for the three HCRs.
They all show a high probability of being above the LRP.
This means that this indicator is not providing us with any information to help us choose between the HCRs (note that it may be helpful for other HCRs).
We can deselect it, leaving us with just three PIs to consider.
Looking at the performance of *HCR 2*, we can see that in the short-term the average catches are higher than *HCR 1* and *HCR 3*.
However, in the medium- and long-term it has the lowest catches.
Additionally, it has the lowest relative CPUE and lowest 'proximity to the TRP' (i.e. it is the furthest away from the TRP) in all three time periods.
Unless high catches in the short-term is the absolute top priority, we can decide that *HCR 2* does not meet our management objectives as well as *HCR 1* and *HCR 3*.
Deselect *HCR 2* from the list on the left (Figure \@ref(fig:compbar3)).
```{r compbar3, fig.cap="Using bar charts to compare the average values of only three performance indicators after dropping HCR 2."}
knitr::include_graphics("comp_perf_compbar3.png")
```
We now have only two HCRs to consider.
The choice between *HCR 1* and *HCR 3* depends on what objectives you have.
This will depend on your *priorities* and what *trade-offs* you are prepared to accept.
Considering catches, *HCR 1* has slightly higher average catches in the short-term. The average catches in the medium- and long-term are similar for both HCRs.
*HCR 3* has higher average relative CPUE in all three time periods.
*HCR 3* also gets you slightly closer to the TRP in the medium- and long-term.
These are the kind of decisions that managers and stakeholders will need to make.
You should always have your management objectives in the back of your mind when reviewing the results.
## Box plots
So far, we have only used the bar charts that show the *average* values of the PIs.
This is only part of the story.
Click on the tab at the top that says **Performance indicators - box plots**.
Box plots give you the *range* of expected values of each indicator, and hence the **uncertainty** in their value.
The taller the box, and the longer the whiskers, the greater the range of expected values.
The black line across the middle of each box is the average value, the same that is in the bar charts in the previous tab (Figure \@ref(fig:compbox1)).
```{r compbox1, fig.cap="Using box plots to compare the range of expected values of three performance indicators for HCR 1 and HCR 3."}
knitr::include_graphics("comp_perf_compbox1.png")
```
For example, the average value of the catches in the medium- and long-term is about the same for both HCRs (the black line in the middle is at the same height).
However, the box for *HCR 1* is bigger than that of *HCR 3*.
This means that, although the average value might be the same, we are *less certain* about what the catches will actually be under *HCR 1*.
This can be an important factor when considering which HCR you prefer.
## Table
Finally, it is possible to see all the indicators for each HCR in a table (click on the **Performance indicators - tables**.
This might not look as pretty, but can be very useful for really digging down into the results, particularly if you have narrowed your choices to just a few HCRs and PIs.
# Exercises
Return to the *HCR selection* tab at the top, click the *Empty basket* button (and confirm that you are happy to do that).
This will empty all the HCRs from your basket.
## Exercise 1
Using a similar process as above, find an HCR that meets the following two conditions:
* Gives the highest possible average catches in the short-term and
* Always has a probability of the biomass being above the LRP of at least 0.8.
Try out as many HCRs as you want (5, 10, 20..., keep adding them to the basket), then use the different methods for exploring the performance to select the best one.
## Exercise 2
Now find an HCR that meets these two conditions:
* Gives the highest possible average relative CPUE in the long-term and
* Maintains average catches above 100 in all time-periods.
Write down your final HCR parameter settings and also why you think it is the best compared to the others.
# Summary
Choosing a preferred HCR is not a simple task.
It is possible to calculate many different performance indicators to measure their performance.
The ranges of the indicator values should be considered as well as their average value.
Additionally, you can have different time periods to consider.
It may not be possible to find a HCR that performs well for all the chosen PIs.
In this case PIs should be considered in order of their priority and trade-offs will need to be evaluated.
| /scratch/gouwar.j/cran-all/cranData/AMPLE/vignettes/comparing_performance.Rmd |
---
title: "Introduction to Harvest Control Rules (HCRs)"
author: "Finlay Scott - OFP, SPC"
date: "2021-02-11"
output:
bookdown::html_document2:
base_format: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{intro_hcr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
echo=FALSE, warning=FALSE, message=FALSE, out.width='100%'
)
```
# Introduction to HCRs
This tutorial is a quick introduction to Harvest Control Rules (HCRs) and their use in fisheries management.
It uses the *Introduction to HCRs* Shiny app from the *AMPLE* package.
There are three apps in the *AMPLE* package. This is the first one and is the one you should start with if you are unsure what an HCR is.
In this tutorial we will cover what an HCR is, how they operate, and start to think about the impact of uncertainty on HCR performance.
**Note** that the fishery used in this tutorial is not based on a real world fishery or stock.
It's just a toy example.
# What is an HCR?
An HCR is a pre-agreed decision rule that is used to set fishing opportunities in the future.
The rule is agreed by stakeholders and managers of that fishery.
An HCR should be designed so that the management objectives of the fishery have the greatest chance of being achieved.
As the real world can be a very uncertain place, a good HCR is robust to different sorts of uncertainty (which we will discuss later on).
In this tutorial we will use a simple HCR to set the catch limit of a fishery in every year.
The HCR uses estimates of the current stock biomass to set that catch limit
(HCRs can use information other than stock biomass but we will keep things simple for now)
This rule will be applied every year in the future to set new catch limits in each year.
**Note** that in this app the biomass is scaled between 0 and 1, where 0 means there is no biomass (a bad thing) and 1 is the amount of biomass when is there is no fishing, i.e. the highest it can be.
Scaling the biomass between 0 and 1 like this can make it easier to understand if the stock is over or underexploited.
HCRs can have different basic shapes.
In this tutorial we will use a shape called a *threshold catch*. This is sometimes known as a 'hockey stick'.
The catch limit is set according to the general rule shown in Figure \@ref(fig:plothcr).
```{r plothcr, fig.cap="A threshold catch shape HCR. The shape is defined by four parameters: Cmin and Cmax (which determine the minimum and maximum catch limit) and Blim and Belbow (which determine the start and stop of sloping section).", fig.width=8, fig.height=6}
xrange <- c(0, 1)
yrange <- c(0, 150)
xlab <- "Estimated stock biomass"
ylab <- "Next catch limit (t)"
# Plot empty axes
plot(x=xrange,y=yrange,type="n",xlab=xlab, ylab=ylab, xaxs="i", yaxs="i")
lines(x = c(0, 0.2, 0.5, 1),
y = c(rep(10, 2), rep(140, 2)), lwd=2, lty=1, col="red")
grid()
# Label the parameters
text(x=0.3, y=10, labels = "Cmin", pos=4, col="blue")
lines(x=c(0.2, 0.3), y=c(10, 10), lty=2, col="blue")
text(x=0.4, y=140, labels = "Cmax", pos=2, col="blue")
lines(x=c(0.4, 0.5), y=c(140, 140), lty=2, col="blue")
text(x=0.2, y=50, labels = "Blim", pos=3, col="blue")
lines(x=c(0.2, 0.2), y=c(10, 50), lty=2, col="blue")
text(x=0.5, y=110, labels = "Belbow", pos=1, col="blue")
lines(x=c(0.5, 0.5), y=c(110, 140), lty=2, col="blue")
```
The current stock biomass level is on x-axis along the bottom of the plot.
The catch limit that will be set in the next year is on the y-axis on the side.
The red line is the rule that sets the catch limit given the estimated stock biomass.
The basic idea is that if stock biomass is low the catch limit is low, and if the stock biomass is high the catch limit is high.
By pre-agreeing the shape of the rule, you know exactly what the catch limit will be, given the estimated status of the stock.
This is easier than all the stakeholders and managers arguing every time a catch limit needs to be set (which takes time and energy and can delay important decisions being made).
Choosing the right HCR is obviously very important.
The shape of this HCR is determined by 4 parameters: *Blim*, *Belbow*, *Cmin* and *Cmax*, shown in blue in the figure.
*Cmin* and *Cmax* determine the minimum and maximum catch limit.
*Blim* and *Belbow* determine the minimum and maximum biomass of the sloping section.
When the estimated stock biomass is greater than *Belbow* the catch limit is set at the maximum catch limit, *Cmax*.
When stock biomass is less than *Blim* the catch limit is set at minimum catch limit, *Cmin*.
When stock biomass is between *Blim* and *Belbow* the catch limit is set according to where it lies on the slope.
**Important note about management procedures**.
An HCR is part of a management procedure (MP), along with two other elements: the data collection and the estimation process.
Under a harvest strategy approach, when an MP is agreed and adopted by stakeholders all three elements of the MP are agreed together.
In this tutorial, and in the app, we assume that the data collection and estimation process are the same for each HCR that we try.
When we talk about comparing HCRs we are really comparing MPs.
# Getting started
If you want to use the app online, it is available at the following address:
[https://ofp-sam.shinyapps.io/AMPLE-intro-hcr/](https://ofp-sam.shinyapps.io/AMPLE-intro-hcr/)
Alternatively, if you are using your own version of R and have installed the *AMPLE* package, you can run this app by entering the following commands into the R console:
```{r setup, echo=TRUE, eval=FALSE}
library(AMPLE)
intro_hcr()
```
When you start the app you should see three plotting windows and a blue arrow in the main panel, and some controls in the left-hand panel (Figure \@ref(fig:start)).
Your plots might look slightly different due to variations in the historic catches.
```{r start, fig.cap="The opening screen of the 'Introduction to HCRs' app."}
knitr::include_graphics("intro_hcr_start.png")
```
The two plots on the left-hand side of the main panel show the history of the catch and stock biomass.
There are 10 years of catch history from 2010 to 2019 (the biomass has an extra year as it is the biomass at the *start* of the year).
Remember that the biomass is scaled between 0 and 1, where 1 is the unfished stock biomass, i.e. the maximum biomass.
There are two horizontal dashed lines on the biomass plot. The higher one, at 0.5, is the Target Reference Point (TRP).
The TRP is a management objective. Ideally, the stock biomass should be close to this line.
The lower horizontal line, at 0.2, is the Limit Reference Point (LRP).
If the stock biomass falls below this level the stock is considered to be overfished and may be in serious trouble.
The HCR is shown in the top-right panel. It is the red line (ignore the blue dashed lines for the moment).
As mentioned above, the shape of the HCR is determined by 4 parameters: *Blim*, *Belbow*, *Cmin* and *Cmax*.
The HCR parameter values can be controlled using the sliders on the left-hand side of the screen.
The first thing to do is to set the parameters of the HCR.
Set these values (if not already done): *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140.
These are the default initial values, but make sure that they have been set correctly.
We will refer to this HCR as *HCR 1*.
**Note** that as you change the HCR parameters, the shape of the HCR will change.
# Using the HCR
The purpose of the HCR is to set the catch limit each year.
The HCR uses the current estimated value of stock biomass to set the next catch limit.
This rule will be applied every year in the future to set new catch limits in each year.
We start at the very beginning of 2020 and we want to use *HCR 1* to set the catch limit for 2020.
The stock biomass can be seen in the bottom-left plot.
The way the HCR operates can be seen by following the blue arrow from the biomass plot to the HCR plot.
The current estimated value of biomass is shown on the HCR plot as the blue dashed vertical line.
This shows the same value of biomass as seen in the latest value in the biomass plot.
The catch limit in the following year is set by reading the corresponding catch limit from the HCR.
This is where the blue dashed vertical line hits the red HCR line (marked by the blue dot).
The new catch limit is shown by the blue dashed horizontal line on the HCR plot.
The new catch limit is also shown on the catch plot at the top-left, as the blue dashed line.
This represents what the catches *will* be in 2020.
We can see how the HCR works by going forward a year.
Press the **Advance** button in the left panel.
You have now advanced forward in time by a year. Several things have happened (Figure \@ref(fig:advance)):
* The catch limit that was set by the HCR for 2020 has been used by the fishery. In the catch plot you should see that the catch in the year 2020 has reached this catch limit.
* The stock has responded to being fished in 2020. You should see that we now have an extra year in the biomass plot up to 2021.
* The shape of the blue arrow to the right of the biomass plot has shifted to reflect the new current estimate of biomass. The blue vertical dashed line on the HCR plot has moved left or right as well.
* The HCR has used this new value of biomass to set a new catch limit for 2021 (this may be the same value as the previous catch limit).
```{r advance, fig.cap="After pressing Advance a single time, fishing has occurred in 2020 using the catch limit that was set by the HCR. The stock biomass has been affected by the new catch limit. The HCR uses the estimate of stock biomass at the start of 2021 to set the catch limit for 2021."}
knitr::include_graphics("intro_hcr_hcr11.png")
```
Press the **Advance** button again. The catch limit that was set for 2021 is used by the fishery. The stock responds to being fished again and we have a new estimate of biomass for the beginning of 2022.
This new estimate of biomass is again used by the HCR to set the new catch limit for 2022 (the blue arrow will move again to reflect this).
Keep pressing the **Advance** button until you get to the end of the projection (in 2040).
You should see the cycle:
* The HCR sets the catch limit based on the current estimate of stock biomass
* Fishing at the catch limit affects the stock biomass
* The next estimate of stock biomass is used by the HCR to set a new catch limit.
The previous values set by the HCR are shown by grey dots on the HCR plot, and the previous catch limits are shown on the catch plot as grey dashed horizontal lines.
You should see that eventually the fishery settles down to a steady catch limit and stock abundance (Figure \@ref(fig:hcr1)).
```{r hcr1, fig.cap="The results of running a full projection of HCR 1."}
knitr::include_graphics("intro_hcr_hcr1.png")
```
# Trying different HCRs
Press the **Reset** button in the left panel to clear the stock projection.
Run through the projection again by pressing the **Advance** button. Make sure that you understand how the HCR uses the latest value of biomass to set the catch limit.
Keep pressing the **Advance** button until you get to the end of the projection.
In the table below write down the final value of the catch and the final value of biomass that you see on the plots.
Also, note down anything interesting (if anything) that you see.
For example, what is biggest change in catch that you see?
Different HCRs behave in different ways and some are better at achieving management objectives than others.
The parameters of the HCR that we just tried are: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140.
<!-- Final catch = 100 Final B = 0.4 When does it settle: 10 yrs-->
Now we try a different HCR.
The general 'hockey stick' shape will be the same, but we will use different HCR parameters by changing the HCR parameters in the left panel.
Change *Belbow* to be 0.3 and *Cmax* to 130. Keep the other two parameters the same.
We will refer to this HCR as *HCR 2*.
You should see that the HCR plot has been updated to show the new shape of the HCR.
Notice that this HCR has a lower maximum catch limit than the previous HCR.
This limit will be set if the biomass is greater than 0.3 (the value of *Belbow*).
As before, repeatedly press the **Advance** button and follow the evolution of the stock and the catches (Figure \@ref(fig:hcr2)).
Note how the behaviour of the catch and biomass are different to the initial example with *HCR 1* (Figure \@ref(fig:hcr1)).
The biology of the fish stock is *exactly* the same as the example we ran earlier with *HCR 1*.
The *only* difference between the two projections is the shape of the HCR used to set future catch limits.
As before, write down the final values and any observations in the table at the end of this section.
```{r hcr2, fig.cap="The results of running a full projection with HCR 2."}
knitr::include_graphics("intro_hcr_hcr2.png")
```
As a final example set *Belbow* to 0.8 and *Cmax* to 150 and keep the other parameters the same.
We will refer to this HCR as *HCR 3*.
Notice that this HCR has a higher maximum catch limit but a much longer, shallower slope.
The catch limit will only be at this maximum catch limit if the biomass is greater than 0.8 (*Belbow*).
Again, repeatedly press the **Advance** button and follow the evolution of the stock and catch limits (Figure \@ref(fig:hcr3)).
Write down the behaviour and final values in the table.
```{r hcr3, fig.cap="The results of running a full projection with HCR 3."}
knitr::include_graphics("intro_hcr_hcr3.png")
```
Table: (\#tab:results) Results from testing the three HCRs. After running a full projection, enter the final values of the biomass, the catch, and any observations you have.
| HCR | Final biomass | Final catch | Anything interesting observed |
|:----|--------:|--------:|--------:|
| HCR 1 (Belbow=0.5, Cmax=140) | | | |
| HCR 2 (Belbow=0.3, Cmax=130) | | | |
| HCR 3 (Belbow=0.8, Cmax=150) | | | |
You should have seen that different parameterisations of the HCR lead to different fishery dynamics and final settled values.
You have now looked at the behaviour of three different HCRs.
* Which of these HCRs do you prefer and why?
* Which HCR gets the biomass closest to the TRP of 0.5?
* Which HCR results in the highest catches?
* Do any of them take the stock close to or below the LRP of 0.2?
# Introducing uncertainty
In the real world fisheries management is affected by different types of uncertainty.
By uncertainty, we mean things that we are not completely sure about, for example the exact status of the stock, and also things that are difficult to predict in the future, for example future recruitment.
The projections we have run so far have not considered uncertainty (they are known as *deterministic* simulations).
This means that if we run another projection with the same HCR, the outcome will be exactly the same.
Try this now and see.
It is very important to choose an HCR that is robust to different sources of uncertainty, otherwise the outcome may not be what you expected.
For example, an HCR that performs well when future stock recruitment is stable may not perform well when stock recruitment varies a lot.
We will look at this here.
Uncertainty is included in the app in two ways: biological variability in the stock dynamics (e.g. natural variability in recruitment) and through differences between the estimated and 'true' level of stock biomass.
These options are initially turned off.
Click on the **Show variability option** option in the panel on the left to show the uncertainty options.
## Biological variability
Biological variability reflects the natural variability in the stock dynamics, for example through variability in the recruitment, growth and natural mortality.
It means that the future states of these biological processes are very difficult to predict with any great accuracy.
Fisheries managers have no control over this source of uncertainty.
As such it is very important that an adopted HCR is robust to this uncertainty.
We saw in the previous examples that, without uncertainty, eventually the projected stock biomass settles down to a constant value.
What happens when we include natural variability?
Set the HCR parameters back to the values for *HCR 1* (*Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10, *Cmax* = 140).
Increase the **Biological variability** option to 0.2 and project forward through time using the **Advance** button.
You should see that, unlike before, the biomass now bounces around and is not perfectly flat (Figure \@ref(fig:hcr1noise)).
This is because the variability in the stock biology is affecting the abundance.
For example, in some years the stock may produce more recruits than in other years for the same level of biomass.
This causes fluctuations in the population abundance.
```{r hcr1noise, fig.cap="The results of running a full projection of HCR 1 with biological variability (note that your plot may look different)."}
knitr::include_graphics("intro_hcr_hcr1_noise.png")
```
As the HCR uses the estimate of biomass to set the catch limit, it means that the catch limit set by the HCR also bounces around.
The catch limits set by the HCR then go onto affect the stock biomass in the same way as before.
If you press the **Reset** button and run the projection again you should see that you get a different trajectory (uncertainty in action!).
Different HCRs respond differently to biological variability.
Try the following to see:
* Set up HCR 2 again (*Belbow* = 0.3, *Cmax* = 130) and run several complete projections with the biological variability set to 0.2. What do you notice? Are the catches more or less stable than with HCR 1?
* Set up HCR 3 again (*Belbow* = 0.8, *Cmax* = 150) and run several complete projections with the biological variability set to 0.2. What do you notice?
* From this very brief experiment, which of the three HCRs do you prefer and why?
## Estimation error
In the real world we do not know the true stock abundance.
This means that a HCR is not driven by the *true* value of biomass. Instead it is driven by an *estimated* value (we can never know the true value).
For example, the biomass can be estimated by a stock assessment model.
The HCR therefore uses an *estimated* value of the stock biomass to set the next catch limit, not the *true* value.
The difference between the estimated and true value of biomass is called the estimation error and it can have an important impact on the performance of a HCR.
Here estimation error is simulated in two ways: *variability* and *bias*. These can be combined.
To demonstrate these turn the **Biological variability** back to 0 and increase the **Estimation variability** to 0.2 (leave **Estimation bias** as 0 for now).
This estimation variability is a crude way of simulating that we don't really know the true value of the biomass.
Project the stock forward several times using the **Advance** button.
You should see that the biomass plot now shows two lines. The black one is the *true* abundance and the blue one is the *estimated* abundance.
It is the estimated abundance that is used by the HCR to set the catch limit.
You should see that the stock and catch bounce around as you project forward. This variability is not caused by any biological variability (you have turned that off) but from the HCR using the uncertain *estimated* value of biomass to set the catch limit.
The catch limit, of course, affects the *real* stock abundance.
If you press **Reset** and run the same projection again by pressing **Advance** you should see a slightly different outcome. Try it.
Now turn the **Estimation variability** back to 0 and set the **Estimation bias** to 0.1.
This means that the estimated value of biomass is always 10% higher than the true value, i.e. we are always overestimating the stock abundance.
Project forward and see what happens. Compare the *true* final values of biomass (the black line on the plot) and catch to the numbers you wrote down in the table above for the first HCR.
Are the final values higher or lower than when there is no bias?
<!-- I get lower values than when no bias -->
In the real world, both biological variability and estimation error are operating at the same time.
It is very important that the selected HCR is robust to this uncertainty so that the management objectives can be achieved.
# Summary
A HCR is a decision rule for setting future fishing opportunities.
In this app, the input to the rule is the *estimated* stock biomass and the output is the catch limit in the following year.
We have looked at three different HCRs and seen that they perform differently.
Uncertainty is a big concern in fisheries management.
Here we looked at biological and estimation uncertainty.
We have seen that they can change the performance of the HCR and the fishery.
It is very important that an HCR is robust to uncertainty.
A HCR that performs well in the absence of uncertainty may not perform as well when uncertainty is present.
The big question is: how do we know which HCR to use?
To answer this question, see the next tutorial *Measuring performance*.
| /scratch/gouwar.j/cran-all/cranData/AMPLE/vignettes/intro_hcr.Rmd |
---
title: "Measuring the performance of harvest control rules (HCRs)"
author: "Finlay Scott - OFP, SPC"
date: "2021-03-11"
output:
bookdown::html_document2:
base_format: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{measuring_performance}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
echo=FALSE, warning=FALSE, message=FALSE, out.width='100%'
)
```
# Introduction
This tutorial explores how the performance of a Harvest Control Rule (HCR) can be measured.
It uses the *Measuring performance* Shiny app from the *AMPLE* package.
There are three apps in the *AMPLE* package. This is the second one.
If you are unsure what an HCR is you should look at the first app, *Introduction to HCRs*.
The previous tutorial (*Introduction to HCRs*) introduced HCRs and how they can be used to set future fishing opportunities based on estimates of stock status.
This allowed us to explore how HCRs work and we saw that different HCRs can perform differently.
We also looked at two sources of uncertainty, *biological variability* and *estimation error*, and saw that they can affect the performance of a HCR.
Before an HCR is adopted its performance is tested and evaluated using computer simulations (a process called Management Strategy Evaluation - MSE).
During these evaluations, the performance of a proposed HCR is measured using a collection of indicators, known as performance indicators (PIs).
The PIs should relate to the agreed management objectives of the fishery, e.g. stock sustainability, good economic performance etc.
By comparing the PIs from different HCRs, the preferred HCR can be selected and put into operation.
We explore comparing PIs in the third tutorial.
Fisheries management is affected by many sources of uncertainty, including uncertainty about the biology of the stock and the stock status.
It is very important to understand how an HCR performs under uncertainty.
Therefore, the HCR evaluations have to consider different sources of uncertainty.
In this tutorial we start to look at how PIs can be used to measure the performance of an HCR.
Particular attention is paid to how uncertainty can be included in the evaluations and the effect it can have on the PI values.
To evaluate the impact of uncertainty on the performance of an HCR, many projections are performed (sometimes hundreds or even more).
Each projection is known as a *replicate*.
The PIs are calculated for each replicate and summaries, such as average values and ranges, are presented.
**Note** that the fishery used in this tutorial is not based on a real world fishery or stock.
It's just a toy example.
**Important note about management procedures**.
An HCR is part of a management procedure (MP), along with two other elements: the data collection and the estimation process.
Under a harvest strategy approach, when an MP is agreed and adopted by stakeholders all three elements of the MP are agreed together.
In this tutorial, and in the app, we assume that the data collection and estimation process are the same for each HCR that we try.
When we talk about comparing HCRs we are really comparing MPs.
# Getting started
If you want to use the app online, it is available at the following address:
[https://ofp-sam.shinyapps.io/AMPLE-measuring-performance/](https://ofp-sam.shinyapps.io/AMPLE-measuring-performance/)
Alternatively, if you are using your own version of R and have installed the *AMPLE* package, you can run this app by entering the following commands into the R console:
```{r setup, echo=TRUE, eval=FALSE}
library(AMPLE)
measuring_performance()
```
When you start the app you should see something similar to the *Introduction to HCRs* app (Figure \@ref(fig:start)).
```{r start, fig.cap="The opening screen of the 'Measuring performance' app."}
knitr::include_graphics("meas_perf_start.png")
```
On the left side of the main panel there are three time series plots.
Two of them, catch and biomass, are similar to those seen in the *Introduction to HCRs* app (ignore the vertical dashed lines in the plots for now).
The third time series plot is a new plot of *relative CPUE*. CPUE is the catch-per-unit of effort, sometimes known as catch rate.
The relative CPUE plot is the CPUE relative to the CPUE in the final historical year, 2019.
The value in 2019 is therefore 1.
By making the CPUE relative to some point in the past, it can be easier to interpret.
These three plots have 10 years of historical data, from 2010 to 2019.
There is an additional year of data for biomass as it shows the biomass at the start of the year.
The horizontal dashed lines on the biomass plot are the Target Reference Point (TRP - 0.5) and Limit Reference Point (LRP - 0.2).
**Note** that in this app the biomass is scaled between 0 and 1, where 0 means there is no biomass (a bad thing) and 1 is the amount of biomass when is there is no fishing, i.e. the highest it can be.
Scaling the biomass between 0 and 1 can make it easier to understand if the stock is over or underexploited.
The HCR is shown at the top of the right-hand panel. The HCR is the red line.
Underneath the HCR plot is something about **Table selection**. The option **Each replicate** should be selected.
At the moment there is no table...
In the left-hand panel there are various controls, including the HCR parameters, similar to those seen in the *Introduction to HCRs* app.
The initial values of the HCR parameters should be: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140.
If they are not you can set them using the controls.
This HCR is known as *HCR 1*.
Note that your plots might look slightly different due to variations in the historic catches.
# Run a projection
In the previous tutorial, when you clicked the **Advance** button, you projected forward by a single year.
This meant that to run a full projection you had to keep pressing the **Advance** button.
In this tutorial, when you run a projection you will project forward 20 years from 2019 to 2039 with a single button press.
The HCR is applied in each year of your projection, and the corresponding catch limit used.
It's exactly the same as what we were doing in the previous app, it's just that now it happens automatically without you having to press the **Advance** button lots of times.
Try this now by pressing the **Run projection** button in the left panel to perform a 20 year projection, from 2019 to 2039 (Figure \@ref(fig:hcr11)).
```{r hcr11, fig.cap="The result from running a single projection with no uncertainty with HCR 1."}
knitr::include_graphics("meas_perf_hcr1_1.png")
```
You should see that the time series plots for catch, biomass and relative CPUE now show the full time series until 2039.
The HCR was applied in each year of the projection and the HCR plot has blue points on it to show which bits were used during the projection.
With this app we will run multiple projections for the same HCR.
Each projection is known as a replicate.
A table should have appeared underneath the **Table selection** control.
This table shows the values of *Biomass*, *Catch* and *Relative CPUE* in the final year of the projection.
We will use these as performance indicators (PIs) to evaluate the performance of HCRs.
The table has two rows. The top row gives the average value over all the replicates we have run, as well as the range of values (in brackets) in which *most* of the replicates have fallen into (strictly speaking it contains 90% of the full range of values).
So far we have only run 1 replicate so this first row is not very useful right now.
The results for our first replicate are shown in the second row of the table.
As we run more replicates (by running more projections), more rows will appear in the table.
Click **Run projection** again. This runs *exactly the same* projection as the last one.
The projection is the same as the last one because we have not included any uncertainty.
Another row has appeared in the table to show the final values of the this replicate.
These values are the same as the first replicate (again, because there is no uncertainty).
The summary values in the first row are also exactly the same.
You may notice that the **Replicate** counter under the HCR plot also increased by one.
You can keep clicking **Run projection** to add more replicates to the table.
However, as we have no uncertainty in the projection we are just running exactly the same projection over and over again with the same result.
# Including uncertainty in the projections
Press the **Reset** button to clear out the plots and tables.
We want to know how well our HCR is going to perform under uncertainty.
We introduced uncertainty in the previous tutorial (*Introduction to HCRs*).
As in the previous tutorial here we have two sources of uncertainty: *Biological variability* and *Estimation error*.
For the remainder of this tutorial we will only focus on biological variability (which is not to say that estimation error is unimportant - it is very important! - we're just keeping things simple at the moment).
Click on the **Show variability options** to show the uncertainty options.
Set **Biological variability** to 0.2.
Leave **Estimation variability** and **Estimation error bias** as 0.
Note that there is nothing special about the value of 0.2, it just gives enough variability to be a useful illustration.
Keep the HCR parameters the same as before.
Click **Run projection** to project over the full time series.
As we now have variability in the biological dynamics you should see that the plots are bumpy (Figure \@ref(fig:hcr12)).
This bumpiness is caused by the variability in the biological growth processes which causes variability in the stock status (the biomass).
The HCR responds to the variability in the stock status by setting variable catches.
A new row will also have appeared in the replicate table that records the final values of biomass, catch and relative CPUE.
These values should be different to when we ran the projections with no uncertainty.
```{r hcr12, fig.cap="The result from running a single projection with biological variability with HCR 1."}
knitr::include_graphics("meas_perf_hcr1_2.png")
```
Click **Run projection** again. You should get another line on the plots.
The new line is different to the previous one (the previous one is now in grey, the new one in black).
The difference between the replicates is caused by the biological variability.
Another row will also have appeared in the table to record the final values of this replicate.
The final values of this second replicate will be different to the values of the first replicate.
The summary row at the top of the table will have been updated.
This second projection has *exactly* the same stock and the same HCR as the first one but the outcome is slightly different.
The difference is a result of the uncertainty in the biological dynamics.
It is important to understand why they are different.
There is a lot of natural variability in fish stocks, for example with recruitment, and it is impossible for scientists and managers to predict exactly what will happen in the future.
To help understand the *possible* futures, variability is included in the biological dynamics to simulate the uncertainty.
By running many projections with uncertainty we will get slightly different results.
Looking at all of the results together gives us more understanding of what might happen.
This is more useful than running a single projection with no uncertainty.
Keep clicking **Run projection** until you have 10 replicates (Figure \@ref(fig:hcr110)).
More lines will appear on the plots and more rows will appear in the table.
Each line will be different because each projection is different due to the biological variability.
```{r hcr110, fig.cap="The result from running 10 replicates with biological variability with HCR 1."}
knitr::include_graphics("meas_perf_hcr1_10.png")
```
Keep clicking **Run projection** until you get 30 or more replicates.
You will see that, eventually, the average and range values in the first row of the table do not change by much with each new projection.
This indicates that we are starting to understand the *distribution of expected values* from this HCR.
Even though we don't know exactly what will happen in the future, we think we know the range of values that it will fall within.
This is very important when it comes to selecting a HCR for a fishery.
An HCR must be robust to uncertainty, otherwise it will not perform as well as expected.
When you get to 50 or more replicates (if you click that many times...), the grey lines on the plots will be replaced by grey ribbons.
The width of the grey ribbons summarises the range of most of the replicates that have been run (again, 90% of the full range - the same as the first row in the table).
Using the ribbons is neater than having loads of lines on a plot.
The average value and the last replicate is plotted over the ribbon for illustration purposes.
When the range of values has approximately settled down and doesn't change much as more projections are run, we can start to think about what might happen in the future.
The range of values in the plots and in the first row of the table tells us how certain we are about the future.
We think that the possible *real* future will be somewhere within this range.
If the range is wide, then there is a wide spread of values and we are not certain about what will happen.
If the range is narrow then we have more confidence in what we think will happen.
Considering the range of values is important because sometimes it is better to choose an HCR about which we have more certainty in the future, even though the expected catches might be lower.
For example, the average value of catches might be lower, but the range of possible values is small.
This helps with management and planning and might be more useful than having an HCR that *might* give a high future catch but also *might* give a low future catch.
These types of projections with uncertainty are different to running a single projection with no uncertainty (known as a *deterministic* projection).
If we want to understand how uncertainty may affect the performance of an HCR in the real world, running a deterministic projection is not enough.
# Measuring performance: exercise
In this exercise you will test the performance of three different HCRs and note down the values of the three PIs: *Catch*, *Biomass* and *Relative CPUE* in the final year.
Press the **Reset** button.
Use the same HCR parameters as above (*Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140).
This HCR is known as *HCR 1*
Use the same uncertainty setttings as above (*Biological variability* = 0.2, *Estimation variability* = 0, *Estimation bias* = 0).
Run 30 replicates by pressing the **Run projection** button 30 times.
Look at the top row of the table and write down the average value and the range (the values in the brackets) of biomass, catch and relative CPUE in the final year.
For example, having just run this on my machine, for biomass I get an average value of 0.41 with a range of 0.33 and 0.45.
This means that the *most likely* final value of biomass will be 0.41, but the range of possible values is from 0.33 to 0.45.
Now try a different HCR.
Set these parameters: *Blim* = 0.2, *Belbow* = 0.3, *Cmin* = 10 and *Cmax* = 130 in the panel on the left.
This HCR is known as *HCR 2*.
Keep the same uncertainty settings.
Run 30 replicates of *HCR 2* by pressing the **Run projection** button 30 times and note down the range and average values of biomass, catch and CPUE.
<!--
biomass=0 0.26 (0.21, 0.28)
Catch 82 (21, 110)
CPUE 0.43 (0.34, 0.46)
-->
Finally, do the same thing for *HCR 3*: *Blim* = 0.2, *Belbow* = 0.7, *Cmin* = 10 and *Cmax* = 150.
Table: (\#tab:results) Results from testing the three HCRs. After running 30 replicates, enter the average and range of values (the values in brackets) of the biomass, catch and relative CPUE in the final year.
| HCR | Final biomass | Final catch | Final relative CPUE|
|----:|--------:|--------:|--------:|
| HCR 1 (Belbow=0.5, Cmax=140) | | | |
| HCR 2 (Belbow=0.3, Cmax=130) | | | |
| HCR 3 (Belbow=0.8, Cmax=150) | | | |
Using the results in the table, try answering these questions:
*Question 1:* Which of the three HCRs would you choose if you wanted to have the highest average relative CPUE in the final year?
*Question 2:* Which of the three HCRs would you choose if you wanted biomass in the final year to be as close as possible to the Target Reference Point (TRP) of 0.5?
*Question 3:* Which of the three HCRs has the highest *uncertainty* in the catch in the final year (i.e. the biggest range of catch values)?
# Introducing performance indicators
In the exercise above we compared the values of catch, biomass and relative CPUE in the final year.
We used these metrics as *performance indicators* (PIs) to evaluate the performance of the three HCRs.
Lots of different PIs are available that measure different things.
For example, PIs can measure catch levels, changes in effort, the probability of biomass being above the Limit Reference Point (LRP) etc.
When comparing HCRs, the chosen PIs should relate to the management objectives for the fishery.
This allows you to measure how well the fishery is performing in relation to those objectives.
Some HCRs will perform well for some PIs and poorly for others.
This is where the idea of prioritising PIs and evaluating trade-offs between them comes in.
In the previous exercise we only looked at what happens in the final year of the projection.
We have not considered what happens during the course of the projection.
When comparing HCRs we should also compare what happens in the short- and medium-term as well as the long-term.
Here we calculate PIs over three different time periods: short-, medium- and long-term.
These time periods can be seen on the time series plots of catch, biomass and relative CPUE as vertical dashed lines.
Seven performance indicators are calculated (Table \@ref(tab:pis)).
Table: (\#tab:pis) The seven performance indicators used to measure performance in this app.
| Performance indicator | Description |
|:-------|:-------|
| Biomass | The biomass relative to the unfished biomass.|
| Probability of being above the LRP | This reflects the risk of the stock being overfished. |
| Catch | The expected catches. |
| Relative CPUE | The CPUE (or catch rate) relative to the CPUE in the last historical year. |
| Relative effort | The fishing effort relative to the effort in the last historical year. |
| Catch stability | How much the catches change over time. A value of 1 means that the catches are very stable and do not change at all. A low value, close to 0, means that the catches fluctuate a lot over time (probably not a good thing). |
| Proximity to the TRP | How close the biomass is to the TRP on average. A value of 1 means that the biomass is always at the TRP. A lower value means that the biomass spends a lot of time being higher, or lower, than the TRP. |
Generally, for most indicators, the higher the value the better (e.g. higher catches, and higher catch stability are assumed to be better than lower catches and catch levels that change a lot over time).
However, higher fishing effort is not necessarily better as it may mean higher costs of fishing.
Similarly, higher biomass might not be better. If the biomass is too high, it may mean you could have fished more.
# Calculating performance indicators
We will now to start to look at the different PIs, over three time periods.
Press the **Reset** button.
Set the HCR parameters to: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140 (back to *HCR 1*).
Leave the uncertainty parameters as they are (*Biological variability* = 0.2, *Estimation variability* = 0, *Estimation bias* = 0).
Select **Performance indicators** from the **Table selection** control.
Now press the **Run projection** button.
As before, new lines have been added to the three time series plots.
You should also see that a new table has appeared with various PIs in it.
Each row in the table is a different PI.
The PIs are measure over different time periods, short-, medium- and long-term.
These are the table columns.
The values in the table are the average of the PIs across the replicates, and the values in the brackets are the range.
It isn't very helpful to calculate the average value and the range for just 2 replicates.
Keep clicking **Run projection** until you have about 50 replicates.
You should see that the numbers in the table start to settle down (Figure \@ref(fig:hcr150)).
```{r hcr150, fig.cap="The performance indicators from running 50 replicates with biological uncertainty with HCR 1."}
knitr::include_graphics("meas_perf_hcr1_50pi.png")
```
In the previous exercise we only looked at the final values.
Now, you can see the difference between the short-, medium- and long-term values.
For example, you may find that the short-term catches are higher than the long-term catches.
Different HCRs will perform better over different time periods and this will affect which HCR you prefer.
# Performance indicators: exercise
We are going to look at some PIs to help us choose between the three HCRs we looked at above.
The three HCRs are:
* HCR 1 (*Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140)
* HCR 2 (*Blim* = 0.2, *Belbow* = 0.3, *Cmin* = 10 and *Cmax* = 130)
* HCR 3 (*Blim* = 0.2, *Belbow* = 0.7, *Cmin* = 10 and *Cmax* = 150)
To keep things simple we are going to record the values for only three of the PIs: *Catch*, *Catch stability* and *Prob. > LRP*, in the three different time periods, in the table below (Table \@ref(tab:results2)).
Use the same uncertainty settings as above (*Biological variability* = 0.2, *Estimation variability* = 0, *Estimation bias* = 0).
For each HCR, run 50 iterations (by pressing **Run projection** 50, yes 50, times).
I've included a line of the results that I got as an example.
Table: (\#tab:results2) Results from testing the three HCRs. After running a 50 replicates, enter the average and range of values (the values in brackets) of the catch, catch stability and probability of being above the LRP in each time period.
| HCR | Time period | Catch | Catch stability | Prob. > LRP|
|:----|--------:|--------:|--------:|--------:|
| HCR 1 | Short-term | 120 (110, 130) | 0.79 (0.71, 0.86) | 1 (1, 1) |
| | Medium-term | | | |
| | Long-term | | | |
| HCR 2 | Short-term | | | |
| | Medium-term | | | |
| | Long-term | | | |
| HCR 3 | Short-term | | | |
| | Medium-term | | | |
| | Long-term | | | |
When you have completed the table, try to answer these questions.
*Question 1:* Which HCR has the highest catches in the short-term?
*Question 2:* Which HCR has the highest catches in the long-term?
*Question 3:* Which HCR has the highest catch stability in the short-term?
*Question 4:* Which HCR has the highest catch stability in the long-term?
*Question 5:* In this example, is the indicator *Prob. > LRP* helpful in choosing between the HCRs?
*Question 6:* By looking at the indicators in the different time periods, which of the HCRs do you prefer and why?
# Summary
In this tutorial we started to use performance indicators (PIs) to measure the performance of different HCRs.
The indicators allow us to compare the performance of candidate HCRs.
The performance in different time periods is important (it's not just the final destination, but the journey).
Additionally, HCRs need to be robust to uncertainty.
In this tutorial we have seen that when we include uncertainty in an HCR evaluation, it is necessary to run many projections to allow us to understood how the HCR is going to perform.
In this tutorial we have 7 PIs over 3 time periods giving us a total of 21 PIs to consider.
Each PI is reported as an average value plus a range.
This is a lot of information to process!
You can see that considering more and more indicators can quickly lead to an overwhelming amount of information.
How we try to understand all of it is covered in the next tutorial.
| /scratch/gouwar.j/cran-all/cranData/AMPLE/vignettes/measuring_performance.Rmd |
# ==================================================================== #
# TITLE: #
# AMR: An R Package for Working with Antimicrobial Resistance Data #
# #
# SOURCE CODE: #
# https://github.com/msberends/AMR #
# #
# PLEASE CITE THIS SOFTWARE AS: #
# Berends MS, Luz CF, Friedrich AW, Sinha BNM, Albers CJ, Glasner C #
# (2022). AMR: An R Package for Working with Antimicrobial Resistance #
# Data. Journal of Statistical Software, 104(3), 1-31. #
# https://doi.org/10.18637/jss.v104.i03 #
# #
# Developed at the University of Groningen and the University Medical #
# Center Groningen in The Netherlands, in collaboration with many #
# colleagues from around the world, see our website. #
# #
# This R package is free software; you can freely use and distribute #
# it for both personal and commercial purposes under the terms of the #
# GNU General Public License version 2.0 (GNU GPL-2), as published by #
# the Free Software Foundation. #
# We created this package for both routine data analysis and academic #
# research and it was publicly released in the hope that it will be #
# useful, but it comes WITHOUT ANY WARRANTY OR LIABILITY. #
# #
# Visit our website for the full manual and a complete tutorial about #
# how to conduct AMR data analysis: https://msberends.github.io/AMR/ #
# ==================================================================== #
#' The `AMR` Package
#'
#' @description
#' Welcome to the `AMR` package.
#'
#' The `AMR` package is a [free and open-source](https://msberends.github.io/AMR/#copyright) R package with [zero dependencies](https://en.wikipedia.org/wiki/Dependency_hell) to simplify the analysis and prediction of Antimicrobial Resistance (AMR) and to work with microbial and antimicrobial data and properties, by using evidence-based methods. **Our aim is to provide a standard** for clean and reproducible AMR data analysis, that can therefore empower epidemiological analyses to continuously enable surveillance and treatment evaluation in any setting. [Many different researchers](https://msberends.github.io/AMR/authors.html) from around the globe are continually helping us to make this a successful and durable project!
#'
#' This work was published in the Journal of Statistical Software (Volume 104(3); \doi{10.18637/jss.v104.i03}) and formed the basis of two PhD theses (\doi{10.33612/diss.177417131} and \doi{10.33612/diss.192486375}).
#'
#' After installing this package, R knows [**`r format_included_data_number(AMR::microorganisms)` microorganisms**](https://msberends.github.io/AMR/reference/microorganisms.html) (updated `r format(TAXONOMY_VERSION$GBIF$accessed_date, "%B %Y")`) and all [**`r format_included_data_number(nrow(AMR::antibiotics) + nrow(AMR::antivirals))` antibiotic, antimycotic and antiviral drugs**](https://msberends.github.io/AMR/reference/antibiotics.html) by name and code (including ATC, EARS-Net, ASIARS-Net, PubChem, LOINC and SNOMED CT), and knows all about valid SIR and MIC values. The integral clinical breakpoint guidelines from CLSI and EUCAST are included, even with epidemiological cut-off (ECOFF) values. It supports and can read any data format, including WHONET data. This package works on Windows, macOS and Linux with all versions of R since R-3.0 (April 2013). **It was designed to work in any setting, including those with very limited resources**. It was created for both routine data analysis and academic research at the Faculty of Medical Sciences of the public [University of Groningen](https://www.rug.nl), in collaboration with non-profit organisations [Certe Medical Diagnostics and Advice Foundation](https://www.certe.nl) and [University Medical Center Groningen](https://www.umcg.nl).
#'
#' The `AMR` package is available in `r vector_and(vapply(FUN.VALUE = character(1), LANGUAGES_SUPPORTED_NAMES, function(x) x$exonym), quotes = FALSE, sort = FALSE)`. Antimicrobial drug (group) names and colloquial microorganism names are provided in these languages.
#' @section Reference Data Publicly Available:
#' All data sets in this `AMR` package (about microorganisms, antibiotics, SIR interpretation, EUCAST rules, etc.) are publicly and freely available for download in the following formats: R, MS Excel, Apache Feather, Apache Parquet, SPSS, SAS, and Stata. We also provide tab-separated plain text files that are machine-readable and suitable for input in any software program, such as laboratory information systems. Please visit [our website for the download links](https://msberends.github.io/AMR/articles/datasets.html). The actual files are of course available on [our GitHub repository](https://github.com/msberends/AMR/tree/main/data-raw).
#' @source
#' To cite AMR in publications use:
#'
#' Berends MS, Luz CF, Friedrich AW, Sinha BNM, Albers CJ, Glasner C (2022). "AMR: An R Package for Working with Antimicrobial Resistance Data." _Journal of Statistical Software_, *104*(3), 1-31. \doi{10.18637/jss.v104.i03}
#'
#' A BibTeX entry for LaTeX users is:
#'
#' \preformatted{
#' `r format(citation("AMR"), style = "bib")`
#' }
#' @name AMR
#' @keywords internal
#' @rdname AMR
"_PACKAGE"
| /scratch/gouwar.j/cran-all/cranData/AMR/R/aa_amr-package.R |
# ==================================================================== #
# TITLE: #
# AMR: An R Package for Working with Antimicrobial Resistance Data #
# #
# SOURCE CODE: #
# https://github.com/msberends/AMR #
# #
# PLEASE CITE THIS SOFTWARE AS: #
# Berends MS, Luz CF, Friedrich AW, Sinha BNM, Albers CJ, Glasner C #
# (2022). AMR: An R Package for Working with Antimicrobial Resistance #
# Data. Journal of Statistical Software, 104(3), 1-31. #
# https://doi.org/10.18637/jss.v104.i03 #
# #
# Developed at the University of Groningen and the University Medical #
# Center Groningen in The Netherlands, in collaboration with many #
# colleagues from around the world, see our website. #
# #
# This R package is free software; you can freely use and distribute #
# it for both personal and commercial purposes under the terms of the #
# GNU General Public License version 2.0 (GNU GPL-2), as published by #
# the Free Software Foundation. #
# We created this package for both routine data analysis and academic #
# research and it was publicly released in the hope that it will be #
# useful, but it comes WITHOUT ANY WARRANTY OR LIABILITY. #
# #
# Visit our website for the full manual and a complete tutorial about #
# how to conduct AMR data analysis: https://msberends.github.io/AMR/ #
# ==================================================================== #
# add new version numbers here, and add the rules themselves to "data-raw/eucast_rules.tsv" and clinical_breakpoints
# (sourcing "data-raw/_pre_commit_hook.R" will process the TSV file)
EUCAST_VERSION_BREAKPOINTS <- list(
# "13.0" = list(
# version_txt = "v13.0",
# year = 2023,
# title = "'EUCAST Clinical Breakpoint Tables'",
# url = "https://www.eucast.org/clinical_breakpoints/"
# ),
"12.0" = list(
version_txt = "v12.0",
year = 2022,
title = "'EUCAST Clinical Breakpoint Tables'",
url = "https://www.eucast.org/clinical_breakpoints/"
),
"11.0" = list(
version_txt = "v11.0",
year = 2021,
title = "'EUCAST Clinical Breakpoint Tables'",
url = "https://www.eucast.org/clinical_breakpoints/"
),
"10.0" = list(
version_txt = "v10.0",
year = 2020,
title = "'EUCAST Clinical Breakpoint Tables'",
url = "https://www.eucast.org/ast_of_bacteria/previous_versions_of_documents/"
)
)
EUCAST_VERSION_EXPERT_RULES <- list(
"3.3" = list(
version_txt = "v3.3",
year = 2021,
title = "'EUCAST Expert Rules' and 'EUCAST Intrinsic Resistance and Unusual Phenotypes'",
url = "https://www.eucast.org/expert_rules_and_expected_phenotypes"
),
"3.2" = list(
version_txt = "v3.2",
year = 2020,
title = "'EUCAST Expert Rules' and 'EUCAST Intrinsic Resistance and Unusual Phenotypes'",
url = "https://www.eucast.org/expert_rules_and_expected_phenotypes"
),
"3.1" = list(
version_txt = "v3.1",
year = 2016,
title = "'EUCAST Expert Rules, Intrinsic Resistance and Exceptional Phenotypes'",
url = "https://www.eucast.org/expert_rules_and_expected_phenotypes"
)
)
# EUCAST_VERSION_RESISTANTPHENOTYPES <- list(
# "1.2" = list(
# version_txt = "v1.2",
# year = 2023,
# title = "'Expected Resistant Phenotypes'",
# url = "https://www.eucast.org/expert_rules_and_expected_phenotypes"
# )
# )
TAXONOMY_VERSION <- list(
GBIF = list(
accessed_date = as.Date("2022-12-11"),
citation = "GBIF Secretariat (2022). GBIF Backbone Taxonomy. Checklist dataset \\doi{10.15468/39omei}.",
url = "https://www.gbif.org"
),
LPSN = list(
accessed_date = as.Date("2022-12-11"),
citation = "Parte, AC *et al.* (2020). **List of Prokaryotic names with Standing in Nomenclature (LPSN) moves to the DSMZ.** International Journal of Systematic and Evolutionary Microbiology, 70, 5607-5612; \\doi{10.1099/ijsem.0.004332}.",
url = "https://lpsn.dsmz.de"
),
BacDive = list(
accessed_date = as.Date("2023-05-12"),
citation = "Reimer, LC *et al.* (2022). ***BacDive* in 2022: the knowledge base for standardized bacterial and archaeal data.** Nucleic Acids Res., 50(D1):D741-D74; \\doi{10.1093/nar/gkab961}.",
url = "https://bacdive.dsmz.de"
),
SNOMED = list(
accessed_date = as.Date("2021-07-01"),
citation = "Public Health Information Network Vocabulary Access and Distribution System (PHIN VADS). US Edition of SNOMED CT from 1 September 2020. Value Set Name 'Microorganism', OID 2.16.840.1.114222.4.11.1009 (v12).",
url = "https://phinvads.cdc.gov"
),
LOINC = list(
accessed_date = as.Date("2023-10-19"),
citation = "Logical Observation Identifiers Names and Codes (LOINC), Version 2.76 (18 September, 2023).",
url = "https://loinc.org"
)
)
globalVariables(c(
".GenericCallEnv",
".mo",
".rowid",
".syndromic_group",
"ab",
"ab_txt",
"affect_ab_name",
"affect_mo_name",
"angle",
"antibiotic",
"antibiotics",
"atc_group1",
"atc_group2",
"base_ab",
"ci_max",
"ci_min",
"clinical_breakpoints",
"code",
"cols",
"count",
"data",
"disk",
"dosage",
"dose",
"dose_times",
"fullname",
"fullname_lower",
"g_species",
"genus",
"gr",
"group",
"guideline",
"hjust",
"input",
"intrinsic_resistant",
"isolates",
"lang",
"language",
"lookup",
"method",
"mic ",
"mic",
"microorganism",
"microorganisms",
"microorganisms.codes",
"mo",
"name",
"new",
"numerator",
"observations",
"old",
"old_name",
"pattern",
"R",
"rank_index",
"ref_tbl",
"reference.rule",
"reference.rule_group",
"reference.version",
"rowid",
"rule_group",
"rule_name",
"se_max",
"se_min",
"SI",
"sir",
"species",
"syndromic_group",
"total",
"txt",
"type",
"value",
"varname",
"xvar",
"y",
"year",
"yvar"
))
| /scratch/gouwar.j/cran-all/cranData/AMR/R/aa_globals.R |
# ==================================================================== #
# TITLE: #
# AMR: An R Package for Working with Antimicrobial Resistance Data #
# #
# SOURCE CODE: #
# https://github.com/msberends/AMR #
# #
# PLEASE CITE THIS SOFTWARE AS: #
# Berends MS, Luz CF, Friedrich AW, Sinha BNM, Albers CJ, Glasner C #
# (2022). AMR: An R Package for Working with Antimicrobial Resistance #
# Data. Journal of Statistical Software, 104(3), 1-31. #
# https://doi.org/10.18637/jss.v104.i03 #
# #
# Developed at the University of Groningen and the University Medical #
# Center Groningen in The Netherlands, in collaboration with many #
# colleagues from around the world, see our website. #
# #
# This R package is free software; you can freely use and distribute #
# it for both personal and commercial purposes under the terms of the #
# GNU General Public License version 2.0 (GNU GPL-2), as published by #
# the Free Software Foundation. #
# We created this package for both routine data analysis and academic #
# research and it was publicly released in the hope that it will be #
# useful, but it comes WITHOUT ANY WARRANTY OR LIABILITY. #
# #
# Visit our website for the full manual and a complete tutorial about #
# how to conduct AMR data analysis: https://msberends.github.io/AMR/ #
# ==================================================================== #
# faster implementation of left_join than using merge() by poorman - we use match():
pm_left_join <- function(x, y, by = NULL, suffix = c(".x", ".y")) {
if (is.null(by)) {
by <- intersect(names(x), names(y))[1L]
if (is.na(by)) {
stop_("no common column found for pm_left_join()")
}
pm_join_message(by)
} else if (!is.null(names(by))) {
by <- unname(c(names(by), by))
}
if (length(by) == 1) {
by <- rep(by, 2)
}
int_x <- colnames(x) %in% colnames(y) & colnames(x) != by[1]
int_y <- colnames(y) %in% colnames(x) & colnames(y) != by[2]
colnames(x)[int_x] <- paste0(colnames(x)[int_x], suffix[1L])
colnames(y)[int_y] <- paste0(colnames(y)[int_y], suffix[2L])
merged <- cbind(
x,
y[
match(
x[, by[1], drop = TRUE],
y[, by[2], drop = TRUE]
),
colnames(y)[!colnames(y) %in% colnames(x) & !colnames(y) == by[2]],
drop = FALSE
]
)
rownames(merged) <- NULL
merged
}
# support where() like tidyverse (this function will also be used when running `antibiogram()`):
where <- function(fn) {
# based on https://github.com/nathaneastwood/poorman/blob/52eb6947e0b4430cd588976ed8820013eddf955f/R/where.R#L17-L32
if (!is.function(fn)) {
stop_("`", deparse(substitute(fn)), "()` is not a valid predicate function.")
}
df <- pm_select_env$.data
cols <- pm_select_env$get_colnames()
if (is.null(df)) {
df <- get_current_data("where", call = FALSE)
cols <- colnames(df)
}
preds <- unlist(lapply(
df,
function(x, fn) {
do.call("fn", list(x))
},
fn
))
if (!is.logical(preds)) stop_("`where()` must be used with functions that return `TRUE` or `FALSE`.")
data_cols <- cols
cols <- data_cols[preds]
which(data_cols %in% cols)
}
# copied and slightly rewritten from {poorman} under permissive license (2021-10-15)
# https://github.com/nathaneastwood/poorman, MIT licensed, Nathan Eastwood, 2020
case_when_AMR <- function(...) {
fs <- list(...)
lapply(fs, function(x) {
if (!inherits(x, "formula")) {
stop("`case_when()` requires formula inputs.")
}
})
n <- length(fs)
if (n == 0L) {
stop("No cases provided.")
}
validate_case_when_length <- function(query, value, fs) {
lhs_lengths <- lengths(query)
rhs_lengths <- lengths(value)
all_lengths <- unique(c(lhs_lengths, rhs_lengths))
if (length(all_lengths) <= 1L) {
return(all_lengths[[1L]])
}
non_atomic_lengths <- all_lengths[all_lengths != 1L]
len <- non_atomic_lengths[[1L]]
if (length(non_atomic_lengths) == 1L) {
return(len)
}
inconsistent_lengths <- non_atomic_lengths[-1L]
lhs_problems <- lhs_lengths %in% inconsistent_lengths
rhs_problems <- rhs_lengths %in% inconsistent_lengths
problems <- lhs_problems | rhs_problems
if (any(problems)) {
stop("The following formulas must be length ", len, " or 1, not ",
paste(inconsistent_lengths, collapse = ", "), ".\n ",
paste(fs[problems], collapse = "\n "),
call. = FALSE
)
}
}
replace_with <- function(x, i, val, arg_name) {
if (is.null(val)) {
return(x)
}
i[is.na(i)] <- FALSE
if (length(val) == 1L) {
x[i] <- val
} else {
x[i] <- val[i]
}
x
}
query <- vector("list", n)
value <- vector("list", n)
default_env <- parent.frame()
for (i in seq_len(n)) {
query[[i]] <- eval(fs[[i]][[2]], envir = default_env)
value[[i]] <- eval(fs[[i]][[3]], envir = default_env)
if (!is.logical(query[[i]])) {
stop(fs[[i]][[2]], " does not return a `logical` vector.")
}
}
m <- validate_case_when_length(query, value, fs)
out <- value[[1]][rep(NA_integer_, m)]
replaced <- rep(FALSE, m)
for (i in seq_len(n)) {
out <- replace_with(
out, query[[i]] & !replaced, value[[i]],
NULL
)
replaced <- replaced | (query[[i]] & !is.na(query[[i]]))
}
out
}
rbind_AMR <- function(...) {
# this is just rbind(), but with the functionality of dplyr::bind_rows(),
# to allow differences in available columns
l <- list(...)
l_names <- unique(unlist(lapply(l, names)))
l_new <- lapply(l, function(df) {
rownames(df) <- NULL
for (col in l_names[!l_names %in% colnames(df)]) {
# create the new column, could also be length 0
df[, col] <- rep(NA, NROW(df))
}
df
})
do.call(rbind, l_new)
}
# No export, no Rd
addin_insert_in <- function() {
import_fn("insertText", "rstudioapi")(" %in% ")
}
# No export, no Rd
addin_insert_like <- function() {
# we want Shift + Ctrl/Cmd + L to iterate over %like%, %unlike%, %like_case%, and %unlike_case%
getActiveDocumentContext <- import_fn("getActiveDocumentContext", "rstudioapi")
insertText <- import_fn("insertText", "rstudioapi")
modifyRange <- import_fn("modifyRange", "rstudioapi")
document_range <- import_fn("document_range", "rstudioapi")
document_position <- import_fn("document_position", "rstudioapi")
context <- getActiveDocumentContext()
current_row <- context$selection[[1]]$range$end[1]
current_col <- context$selection[[1]]$range$end[2]
current_row_txt <- context$contents[current_row]
if (is.null(current_row) || current_row_txt %unlike% "%(un)?like") {
insertText(" %like% ")
return(invisible())
}
pos_preceded_by <- function(txt) {
if (tryCatch(substr(current_row_txt, current_col - nchar(trimws(txt, which = "right")), current_col) == trimws(txt, which = "right"),
error = function(e) FALSE
)) {
return(TRUE)
}
tryCatch(substr(current_row_txt, current_col - nchar(txt), current_col) %like% paste0("^", txt),
error = function(e) FALSE
)
}
replace_pos <- function(old, with) {
modifyRange(
document_range(
document_position(current_row, current_col - nchar(old)),
document_position(current_row, current_col)
),
text = with,
id = context$id
)
}
if (pos_preceded_by(" %like% ")) {
replace_pos(" %like% ", with = " %unlike% ")
} else if (pos_preceded_by(" %unlike% ")) {
replace_pos(" %unlike% ", with = " %like_case% ")
} else if (pos_preceded_by(" %like_case% ")) {
replace_pos(" %like_case% ", with = " %unlike_case% ")
} else if (pos_preceded_by(" %unlike_case% ")) {
replace_pos(" %unlike_case% ", with = " %like% ")
} else {
insertText(" %like% ")
}
}
search_type_in_df <- function(x, type, info = TRUE) {
meet_criteria(x, allow_class = "data.frame")
meet_criteria(type, allow_class = "character", has_length = 1)
# try to find columns based on type
found <- NULL
# remove attributes from other packages
x <- as.data.frame(x, stringsAsFactors = FALSE)
colnames_formatted <- tolower(generalise_antibiotic_name(colnames(x)))
# -- mo
if (type == "mo") {
add_MO_lookup_to_AMR_env()
if (any(vapply(FUN.VALUE = logical(1), x, is.mo))) {
# take first 'mo' column
found <- colnames(x)[vapply(FUN.VALUE = logical(1), x, is.mo)]
} else if ("mo" %in% colnames_formatted &&
suppressWarnings(all(x$mo %in% c(NA, AMR_env$MO_lookup$mo)))) {
found <- "mo"
} else if (any(colnames_formatted %like_case% "^(mo|microorganism|organism|bacteria|ba[ck]terie)s?$")) {
found <- sort(colnames(x)[colnames_formatted %like_case% "^(mo|microorganism|organism|bacteria|ba[ck]terie)s?$"])
} else if (any(colnames_formatted %like_case% "^(microorganism|organism|bacteria|ba[ck]terie)")) {
found <- sort(colnames(x)[colnames_formatted %like_case% "^(microorganism|organism|bacteria|ba[ck]terie)"])
} else if (any(colnames_formatted %like_case% "species")) {
found <- sort(colnames(x)[colnames_formatted %like_case% "species"])
}
}
# -- key antibiotics
if (type %in% c("keyantibiotics", "keyantimicrobials")) {
if (any(colnames_formatted %like_case% "^key.*(ab|antibiotics|antimicrobials)")) {
found <- sort(colnames(x)[colnames_formatted %like_case% "^key.*(ab|antibiotics|antimicrobials)"])
}
}
# -- date
if (type == "date") {
if (any(colnames_formatted %like_case% "^(specimen date|specimen_date|spec_date)")) {
# WHONET support
found <- sort(colnames(x)[colnames_formatted %like_case% "^(specimen date|specimen_date|spec_date)"])
if (!inherits(pm_pull(x, found), c("Date", "POSIXct"))) {
stop(
font_red(paste0(
"Found column '", font_bold(found), "' to be used as input for `col_", type,
"`, but this column contains no valid dates. Transform its values to valid dates first."
)),
call. = FALSE
)
}
} else if (any(vapply(FUN.VALUE = logical(1), x, function(x) inherits(x, c("Date", "POSIXct"))))) {
# take first <Date> column
found <- colnames(x)[vapply(FUN.VALUE = logical(1), x, function(x) inherits(x, c("Date", "POSIXct")))]
}
}
# -- patient id
if (type == "patient_id") {
crit1 <- colnames_formatted %like_case% "^(patient|patid)"
if (any(crit1)) {
found <- colnames(x)[crit1]
} else {
crit2 <- colnames_formatted %like_case% "(identification |patient|pat.*id)"
if (any(crit2)) {
found <- colnames(x)[crit2]
}
}
}
# -- specimen
if (type == "specimen") {
if (any(colnames_formatted %like_case% "(specimen type|spec_type)")) {
found <- sort(colnames(x)[colnames_formatted %like_case% "(specimen type|spec_type)"])
} else if (any(colnames_formatted %like_case% "^(specimen)")) {
found <- sort(colnames(x)[colnames_formatted %like_case% "^(specimen)"])
}
}
# -- UTI (urinary tract infection)
if (type == "uti") {
if (any(colnames_formatted == "uti")) {
found <- colnames(x)[colnames_formatted == "uti"]
} else if (any(colnames_formatted %like_case% "(urine|urinary)")) {
found <- sort(colnames(x)[colnames_formatted %like_case% "(urine|urinary)"])
}
if (!is.null(found)) {
# this column should contain logicals
if (!is.logical(x[, found, drop = TRUE])) {
message_("Column '", font_bold(found), "' found as input for `col_", type,
"`, but this column does not contain 'logical' values (TRUE/FALSE) and was ignored.",
add_fn = font_red
)
found <- NULL
}
}
}
found <- found[1]
if (!is.null(found) && isTRUE(info)) {
if (message_not_thrown_before("search_in_type", type)) {
msg <- paste0("Using column '", font_bold(found), "' as input for `col_", type, "`.")
if (type %in% c("keyantibiotics", "keyantimicrobials", "specimen")) {
msg <- paste(msg, "Use", font_bold(paste0("col_", type), "= FALSE"), "to prevent this.")
}
message_(msg)
}
}
found
}
is_valid_regex <- function(x) {
regex_at_all <- tryCatch(
vapply(
FUN.VALUE = logical(1),
X = strsplit(x, "", fixed = TRUE),
FUN = function(y) {
any(
y %in% c(
"$", "(", ")", "*", "+", "-",
".", "?", "[", "]", "^", "{",
"|", "}", "\\"
),
na.rm = TRUE
)
},
USE.NAMES = FALSE
),
error = function(e) rep(TRUE, length(x))
)
regex_valid <- vapply(
FUN.VALUE = logical(1),
X = x,
FUN = function(y) {
!inherits(try(grepl(y, "", perl = TRUE), silent = TRUE), "try-error")
},
USE.NAMES = FALSE
)
regex_at_all & regex_valid
}
stop_ifnot_installed <- function(package) {
installed <- vapply(FUN.VALUE = logical(1), package, requireNamespace, quietly = TRUE)
if (any(!installed) && any(package == "rstudioapi")) {
stop("This function only works in RStudio when using R >= 3.2.", call. = FALSE)
} else if (any(!installed)) {
stop("This requires the ", vector_and(package[!installed]), " package.",
"\nTry to install with install.packages().",
call. = FALSE
)
} else {
return(invisible())
}
}
pkg_is_available <- function(pkg, also_load = FALSE, min_version = NULL) {
if (also_load == TRUE) {
out <- suppressWarnings(require(pkg, character.only = TRUE, warn.conflicts = FALSE))
} else {
out <- requireNamespace(pkg, quietly = TRUE)
}
if (!is.null(min_version)) {
out <- out && utils::packageVersion(pkg) >= min_version
}
isTRUE(out)
}
import_fn <- function(name, pkg, error_on_fail = TRUE) {
if (isTRUE(error_on_fail)) {
stop_ifnot_installed(pkg)
}
tryCatch(
# don't use get() to avoid fetching non-API functions
getExportedValue(name = name, ns = asNamespace(pkg)),
error = function(e) {
if (isTRUE(error_on_fail)) {
stop_("function `", name, "()` is not an exported object from package '", pkg,
"'. Please create an issue at ", font_url("https://github.com/msberends/AMR/issues"), ". Many thanks!",
call = FALSE
)
} else {
return(NULL)
}
}
)
}
# this alternative wrapper to the message(), warning() and stop() functions:
# - wraps text to never break lines within words
# - ignores formatted text while wrapping
# - adds indentation dependent on the type of message (such as NOTE)
# - can add additional formatting functions like blue or bold text
word_wrap <- function(...,
add_fn = list(),
as_note = FALSE,
width = 0.95 * getOption("width"),
extra_indent = 0) {
msg <- paste0(c(...), collapse = "")
if (isTRUE(as_note)) {
msg <- paste0(AMR_env$info_icon, " ", gsub("^note:? ?", "", msg, ignore.case = TRUE))
}
if (msg %like% "\n") {
# run word_wraps() over every line here, bind them and return again
return(paste0(
vapply(
FUN.VALUE = character(1),
trimws(unlist(strsplit(msg, "\n", fixed = TRUE)), which = "right"),
word_wrap,
add_fn = add_fn,
as_note = FALSE,
width = width,
extra_indent = extra_indent
),
collapse = "\n"
))
}
# correct for operators (will add the space later on)
ops <- "([,./><\\]\\[])"
msg <- gsub(paste0(ops, " ", ops), "\\1\\2", msg, perl = TRUE)
# we need to correct for already applied style, that adds text like "\033[31m\"
msg_stripped <- font_stripstyle(msg)
# where are the spaces now?
msg_stripped_wrapped <- paste0(
strwrap(msg_stripped,
simplify = TRUE,
width = width
),
collapse = "\n"
)
msg_stripped_wrapped <- paste0(unlist(strsplit(msg_stripped_wrapped, "(\n|\\*\\|\\*)")),
collapse = "\n"
)
msg_stripped_spaces <- which(unlist(strsplit(msg_stripped, "", fixed = TRUE)) == " ")
msg_stripped_wrapped_spaces <- which(unlist(strsplit(msg_stripped_wrapped, "", fixed = TRUE)) != "\n")
# so these are the indices of spaces that need to be replaced
replace_spaces <- which(!msg_stripped_spaces %in% msg_stripped_wrapped_spaces)
# put it together
msg <- unlist(strsplit(msg, " ", fixed = TRUE))
msg[replace_spaces] <- paste0(msg[replace_spaces], "\n")
# add space around operators again
msg <- gsub(paste0(ops, ops), "\\1 \\2", msg, perl = TRUE)
msg <- paste0(msg, collapse = " ")
msg <- gsub("\n ", "\n", msg, fixed = TRUE)
if (msg_stripped %like% "\u2139 ") {
indentation <- 2 + extra_indent
} else if (msg_stripped %like% "^=> ") {
indentation <- 3 + extra_indent
} else {
indentation <- 0 + extra_indent
}
msg <- gsub("\n", paste0("\n", strrep(" ", indentation)), msg, fixed = TRUE)
# remove trailing empty characters
msg <- gsub("(\n| )+$", "", msg)
if (length(add_fn) > 0) {
if (!is.list(add_fn)) {
add_fn <- list(add_fn)
}
for (i in seq_len(length(add_fn))) {
msg <- add_fn[[i]](msg)
}
}
# format backticks
if (pkg_is_available("cli") &&
tryCatch(isTRUE(getExportedValue("ansi_has_hyperlink_support", ns = asNamespace("cli"))()), error = function(e) FALSE) &&
tryCatch(getExportedValue("isAvailable", ns = asNamespace("rstudioapi"))(), error = function(e) return(FALSE)) &&
tryCatch(getExportedValue("versionInfo", ns = asNamespace("rstudioapi"))()$version > "2023.6.0.0", error = function(e) return(FALSE))) {
# we are in a recent version of RStudio, so do something nice: add links to our help pages in the console.
parts <- strsplit(msg, "`", fixed = TRUE)[[1]]
cmds <- parts %in% paste0(ls(envir = asNamespace("AMR")), "()")
# functions with a dot are not allowed: https://github.com/rstudio/rstudio/issues/11273#issuecomment-1156193252
# lead them to the help page of our package
parts[cmds & parts %like% "[.]"] <- font_url(url = paste0("ide:help:AMR::", gsub("()", "", parts[cmds & parts %like% "[.]"], fixed = TRUE)),
txt = parts[cmds & parts %like% "[.]"])
# otherwise, give a 'click to run' popup
parts[cmds & parts %unlike% "[.]"] <- font_url(url = paste0("ide:run:AMR::", parts[cmds & parts %unlike% "[.]"]),
txt = parts[cmds & parts %unlike% "[.]"])
msg <- paste0(parts, collapse = "`")
}
msg <- gsub("`(.+?)`", font_grey_bg("\\1"), msg)
# clean introduced whitespace in between fullstops
msg <- gsub("[.] +[.]", "..", msg)
# remove extra space that was introduced (e.g. "Smith et al. , 2022")
msg <- gsub(". ,", ".,", msg, fixed = TRUE)
msg <- gsub("[ ,", "[,", msg, fixed = TRUE)
msg <- gsub("/ /", "//", msg, fixed = TRUE)
msg
}
message_ <- function(...,
appendLF = TRUE,
add_fn = list(font_blue),
as_note = TRUE) {
message(
word_wrap(...,
add_fn = add_fn,
as_note = as_note
),
appendLF = appendLF
)
}
warning_ <- function(...,
add_fn = list(),
immediate = FALSE,
call = FALSE) {
warning(
trimws2(word_wrap(...,
add_fn = add_fn,
as_note = FALSE
)),
immediate. = immediate,
call. = call
)
}
# this alternative to the stop() function:
# - adds the function name where the error was thrown
# - wraps text to never break lines within words
stop_ <- function(..., call = TRUE) {
msg <- paste0(c(...), collapse = "")
if (!isFALSE(call)) {
if (isTRUE(call)) {
call <- as.character(sys.call(-1)[1])
} else {
# so you can go back more than 1 call, as used in sir_calc(), that now throws a reference to e.g. n_sir()
call <- as.character(sys.call(call)[1])
}
msg <- paste0("in ", call, "(): ", msg)
}
msg <- trimws2(word_wrap(msg, add_fn = list(), as_note = FALSE))
stop(msg, call. = FALSE)
}
stop_if <- function(expr, ..., call = TRUE) {
if (isTRUE(expr)) {
if (isTRUE(call)) {
call <- -1
}
if (!isFALSE(call)) {
# since we're calling stop_(), which is another call
call <- call - 1
}
stop_(..., call = call)
}
}
stop_ifnot <- function(expr, ..., call = TRUE) {
if (isFALSE(expr)) {
if (isTRUE(call)) {
call <- -1
}
if (!isFALSE(call)) {
# since we're calling stop_(), which is another call
call <- call - 1
}
stop_(..., call = call)
}
}
"%or%" <- function(x, y) {
if (is.null(x) || is.null(y)) {
if (is.null(x)) {
return(y)
} else {
return(x)
}
}
ifelse(is.na(x), y, x)
}
return_after_integrity_check <- function(value, type, check_vector) {
if (!all(value[!is.na(value)] %in% check_vector)) {
warning_(paste0("invalid ", type, ", NA generated"))
value[!value %in% check_vector] <- NA
}
value
}
# transforms data set to a tibble with only ASCII values, to comply with CRAN policies
dataset_UTF8_to_ASCII <- function(df) {
trans <- function(vect) {
iconv(vect, from = "UTF-8", to = "ASCII//TRANSLIT")
}
df <- as.data.frame(df, stringsAsFactors = FALSE)
for (i in seq_len(NCOL(df))) {
col <- df[, i]
if (is.list(col)) {
col <- lapply(col, function(j) trans(j))
df[, i] <- list(col)
} else {
if (is.factor(col)) {
levels(col) <- trans(levels(col))
} else if (is.character(col)) {
col <- trans(col)
} else {
col
}
df[, i] <- col
}
}
import_fn("as_tibble", "tibble")(df)
}
documentation_date <- function(d) {
day <- as.integer(format(d, "%e"))
suffix <- rep("th", length(day))
suffix[day %in% c(1, 21, 31)] <- "st"
suffix[day %in% c(2, 22)] <- "nd"
suffix[day %in% c(3, 23)] <- "rd"
paste0(month.name[as.integer(format(d, "%m"))], " ", day, suffix, ", ", format(d, "%Y"))
}
format_included_data_number <- function(data) {
if (is.numeric(data) && length(data) == 1) {
n <- data
} else if (is.data.frame(data)) {
n <- nrow(data)
} else {
n <- length(unique(data))
}
if (n > 10000) {
rounder <- -3 # round on thousands
} else if (n > 1000) {
rounder <- -2 # round on hundreds
} else if (n < 50) {
# do not round
rounder <- 0
} else {
rounder <- -1 # round on tens
}
paste0(ifelse(rounder == 0, "", "~"), format(round(n, rounder), decimal.mark = ".", big.mark = " "))
}
# for eucast_rules() and mdro(), creates markdown output with URLs and names
create_eucast_ab_documentation <- function() {
x <- trimws(unique(toupper(unlist(strsplit(EUCAST_RULES_DF$then_change_these_antibiotics, ",", fixed = TRUE)))))
ab <- character()
for (val in x) {
if (paste0("AB_", val) %in% ls(envir = asNamespace("AMR"))) {
# antibiotic group names, as defined in data-raw/_pre_commit_hook.R, such as `CARBAPENEMS`
val <- eval(parse(text = paste0("AB_", val)), envir = asNamespace("AMR"))
} else if (val %in% AMR_env$AB_lookup$ab) {
# separate drugs, such as `AMX`
val <- as.ab(val)
} else {
val <- as.sir(NA)
}
ab <- c(ab, val)
}
ab <- unique(ab)
atcs <- ab_atc(ab, only_first = TRUE)
# only keep ABx with an ATC code:
ab <- ab[!is.na(atcs)]
atcs <- atcs[!is.na(atcs)]
# sort all vectors on name:
ab_names <- ab_name(ab, language = NULL, tolower = TRUE)
ab <- ab[order(ab_names)]
atcs <- atcs[order(ab_names)]
ab_names <- ab_names[order(ab_names)]
# create the text:
atc_txt <- paste0("[", atcs, "](", ab_url(ab), ")")
out <- paste0(ab_names, " (`", ab, "`, ", atc_txt, ")", collapse = ", ")
substr(out, 1, 1) <- toupper(substr(out, 1, 1))
out
}
vector_or <- function(v, quotes = TRUE, reverse = FALSE, sort = TRUE, initial_captital = FALSE, last_sep = " or ") {
# makes unique and sorts, and this also removed NAs
v <- unique(v)
if (isTRUE(sort)) {
v <- sort(v)
}
if (isTRUE(reverse)) {
v <- rev(v)
}
if (isTRUE(quotes)) {
quotes <- '"'
} else if (isFALSE(quotes)) {
quotes <- ""
} else {
quotes <- quotes[1L]
}
if (isTRUE(initial_captital)) {
v[1] <- gsub("^([a-z])", "\\U\\1", v[1], perl = TRUE)
}
if (length(v) <= 1) {
return(paste0(quotes, v, quotes))
}
if (identical(v, c("I", "R", "S"))) {
# class 'sir' should be sorted like this
v <- c("S", "I", "R")
}
# oxford comma
if (last_sep %in% c(" or ", " and ") && length(v) > 2) {
last_sep <- paste0(",", last_sep)
}
# all commas except for last item, so will become '"val1", "val2", "val3" or "val4"'
paste0(
paste0(quotes, v[seq_len(length(v) - 1)], quotes, collapse = ", "),
last_sep, paste0(quotes, v[length(v)], quotes)
)
}
vector_and <- function(v, quotes = TRUE, reverse = FALSE, sort = TRUE, initial_captital = FALSE) {
vector_or(
v = v, quotes = quotes, reverse = reverse, sort = sort,
initial_captital = initial_captital, last_sep = " and "
)
}
format_class <- function(class, plural = FALSE) {
class.bak <- class
class[class == "numeric"] <- "number"
class[class == "integer"] <- "whole number"
if (all(c("numeric", "integer") %in% class.bak, na.rm = TRUE)) {
class[class %in% c("number", "whole number")] <- "(whole) number"
}
class[class == "character"] <- "text string"
class[class == "Date"] <- "date"
class[class %in% c("POSIXt", "POSIXct", "POSIXlt")] <- "date/time"
class[class != class.bak] <- paste0(
ifelse(plural, "", "a "),
class[class != class.bak],
ifelse(plural, "s", "")
)
# exceptions
class[class == "logical"] <- ifelse(plural, "a vector of `TRUE`/`FALSE`", "`TRUE` or `FALSE`")
class[class == "data.frame"] <- "a data set"
if ("list" %in% class) {
class <- "a list"
}
if ("matrix" %in% class) {
class <- "a matrix"
}
if ("custom_eucast_rules" %in% class) {
class <- "input created with `custom_eucast_rules()`"
}
if (any(c("mo", "ab", "sir") %in% class)) {
class <- paste0("of class '", class[1L], "'")
}
class[class == class.bak] <- paste0("of class '", class[class == class.bak], "'")
# output
vector_or(class, quotes = FALSE, sort = FALSE)
}
# a check for every single argument in all functions
meet_criteria <- function(object, # can be literally `list(...)` for `allow_arguments_from`
allow_class = NULL,
has_length = NULL,
looks_like = NULL,
is_in = NULL,
is_positive = NULL,
is_positive_or_zero = NULL,
is_finite = NULL,
contains_column_class = NULL,
allow_NULL = FALSE,
allow_NA = FALSE,
ignore.case = FALSE,
allow_arguments_from = NULL, # 1 function, or a list of functions
.call_depth = 0) { # depth in calling
obj_name <- deparse(substitute(object))
call_depth <- -2 - abs(.call_depth)
# if object is missing, or another error:
tryCatch(invisible(object),
error = function(e) AMR_env$meet_criteria_error_txt <- e$message
)
if (!is.null(AMR_env$meet_criteria_error_txt)) {
error_txt <- AMR_env$meet_criteria_error_txt
AMR_env$meet_criteria_error_txt <- NULL
stop(error_txt, call. = FALSE) # don't use stop_() here, our pkg may not be loaded yet
}
AMR_env$meet_criteria_error_txt <- NULL
if (is.null(object)) {
stop_if(allow_NULL == FALSE, "argument `", obj_name, "` must not be NULL", call = call_depth)
return(invisible())
}
if (is.null(dim(object)) && length(object) == 1 && suppressWarnings(is.na(object))) { # suppressWarnings for functions
stop_if(allow_NA == FALSE, "argument `", obj_name, "` must not be NA", call = call_depth)
return(invisible())
}
if (!is.null(allow_class)) {
stop_ifnot(inherits(object, allow_class), "argument `", obj_name,
"` must be ", format_class(allow_class, plural = isTRUE(has_length > 1)),
", i.e. not be ", format_class(class(object), plural = isTRUE(has_length > 1)),
call = call_depth
)
# check data.frames for data
if (inherits(object, "data.frame")) {
stop_if(any(dim(object) == 0),
"the data provided in argument `", obj_name,
"` must contain rows and columns (current dimensions: ",
paste(dim(object), collapse = "x"), ")",
call = call_depth
)
}
}
if (!is.null(has_length)) {
stop_ifnot(length(object) %in% has_length, "argument `", obj_name,
"` must ", # ifelse(allow_NULL, "be NULL or must ", ""),
"be of length ", vector_or(has_length, quotes = FALSE),
", not ", length(object),
call = call_depth
)
}
if (!is.null(looks_like)) {
stop_ifnot(object %like% looks_like, "argument `", obj_name,
"` must ", # ifelse(allow_NULL, "be NULL or must ", ""),
"resemble the regular expression \"", looks_like, "\"",
call = call_depth
)
}
if (!is.null(is_in)) {
if (ignore.case == TRUE) {
object <- tolower(object)
is_in <- tolower(is_in)
}
stop_ifnot(all(object %in% is_in, na.rm = TRUE), "argument `", obj_name, "` ",
ifelse(!is.null(has_length) && length(has_length) == 1 && has_length == 1,
"must be either ",
"must only contain values "
),
vector_or(is_in, quotes = !isTRUE(any(c("double", "numeric", "integer") %in% allow_class))),
ifelse(allow_NA == TRUE, ", or NA", ""),
call = call_depth
)
}
if (isTRUE(is_positive)) {
stop_if(is.numeric(object) && !all(object > 0, na.rm = TRUE), "argument `", obj_name,
"` must ",
ifelse(!is.null(has_length) && length(has_length) == 1 && has_length == 1,
"be a number higher than zero",
"all be numbers higher than zero"
),
call = call_depth
)
}
if (isTRUE(is_positive_or_zero)) {
stop_if(is.numeric(object) && !all(object >= 0, na.rm = TRUE), "argument `", obj_name,
"` must ",
ifelse(!is.null(has_length) && length(has_length) == 1 && has_length == 1,
"be zero or a positive number",
"all be zero or numbers higher than zero"
),
call = call_depth
)
}
if (isTRUE(is_finite)) {
stop_if(is.numeric(object) && !all(is.finite(object[!is.na(object)]), na.rm = TRUE), "argument `", obj_name,
"` must ",
ifelse(!is.null(has_length) && length(has_length) == 1 && has_length == 1,
"be a finite number",
"all be finite numbers"
),
" (i.e. not be infinite)",
call = call_depth
)
}
if (!is.null(contains_column_class)) {
stop_ifnot(
any(vapply(
FUN.VALUE = logical(1),
object,
function(col, columns_class = contains_column_class) {
inherits(col, columns_class)
}
), na.rm = TRUE),
"the data provided in argument `", obj_name,
"` must contain at least one column of class '", contains_column_class[1L], "'. ",
"See `?as.", contains_column_class[1L], "`.",
call = call_depth
)
}
if (!is.null(allow_arguments_from) && !is.null(names(object))) {
args_given <- names(object)
if (is.function(allow_arguments_from)) {
allow_arguments_from <- list(allow_arguments_from)
}
args_allowed <- sort(unique(unlist(lapply(allow_arguments_from, function(x) names(formals(x))))))
args_allowed <- args_allowed[args_allowed != "..."]
disallowed <- args_given[!args_given %in% args_allowed]
stop_if(length(disallowed) > 0,
ifelse(length(disallowed) == 1,
paste("the argument", vector_and(disallowed), "is"),
paste("the arguments", vector_and(disallowed), "are")
),
" not valid. Valid arguments are: ",
vector_and(args_allowed), ".",
call = call_depth
)
}
return(invisible())
}
get_current_data <- function(arg_name, call) {
valid_df <- function(x) {
!is.null(x) && is.data.frame(x)
}
frms <- sys.frames()
# check dplyr environments to support dplyr groups
with_mask <- vapply(FUN.VALUE = logical(1), frms, function(e) !is.null(e$mask))
for (env in frms[which(with_mask)]) {
if (is.function(env$mask$current_rows) && (valid_df(env$data) || valid_df(env$`.data`))) {
# an element `.data` or `data` (containing all data) and `mask` (containing functions) will be in the environment when using dplyr verbs
# we use their mask$current_rows() to get the group rows, since dplyr::cur_data_all() is deprecated and will be removed in the future
# e.g. for `example_isolates %>% group_by(ward) %>% mutate(first = first_isolate(.))`
if (valid_df(env$data)) {
# support for dplyr 1.1.x
df <- env$data
} else {
# support for dplyr 1.0.x
df <- env$`.data`
}
rows <- tryCatch(env$mask$current_rows(), error = function(e) seq_len(NROW(df)))
return(df[rows, , drop = FALSE])
}
}
# now go over all underlying environments looking for other dplyr, data.table and base R selection environments
with_generic <- vapply(FUN.VALUE = logical(1), frms, function(e) !is.null(e$`.Generic`))
for (env in frms[which(with_generic)]) {
if (valid_df(env$`.data`)) {
# an element `.data` will be in the environment when using dplyr::select()
return(env$`.data`)
} else if (valid_df(env$xx)) {
# an element `xx` will be in the environment for rows + cols in base R, e.g. `example_isolates[c(1:3), carbapenems()]`
return(env$xx)
} else if (valid_df(env$x)) {
# an element `x` will be in the environment for only cols in base R, e.g. `example_isolates[, carbapenems()]`
# this element will also be present in data.table environments where there's a .Generic available
return(env$x)
}
}
# now a special case for dplyr's 'scoped' variants
with_tbl <- vapply(FUN.VALUE = logical(1), frms, function(e) valid_df(e$`.tbl`))
for (env in frms[which(with_tbl)]) {
if (!is.null(names(env)) && all(c(".tbl", ".vars", ".cols") %in% names(env), na.rm = TRUE)) {
# an element `.tbl` will be in the environment when using scoped dplyr variants, with or without `dplyr::vars()`
# (e.g. `dplyr::summarise_at()` or `dplyr::mutate_at()`)
return(env$`.tbl`)
}
}
# no data.frame found, so an error must be returned:
if (is.na(arg_name)) {
if (isTRUE(is.numeric(call))) {
fn <- as.character(sys.call(call + 1)[1])
examples <- paste0(
", e.g.:\n",
" your_data %>% select(", fn, "())\n",
" your_data %>% select(column_a, column_b, ", fn, "())\n",
" your_data[, ", fn, "()]\n",
' your_data[, c("column_a", "column_b", ', fn, "())]"
)
} else {
examples <- ""
}
stop_("this function must be used inside a `dplyr` verb or `data.frame` call",
examples,
call = call
)
} else {
# mimic a base R error that the argument is missing
stop_("argument `", arg_name, "` is missing with no default", call = call)
}
}
get_current_column <- function() {
# try dplyr::cur_columns() first
cur_column <- import_fn("cur_column", "dplyr", error_on_fail = FALSE)
out <- tryCatch(cur_column(), error = function(e) NULL)
if (!is.null(out)) {
return(out)
}
# cur_column() doesn't always work (only allowed for certain conditions set by dplyr), but it's probably still possible:
frms <- lapply(sys.frames(), function(env) {
if (tryCatch(!is.null(env$i), error = function(e) FALSE)) {
if (!is.null(env$tibble_vars)) {
# for mutate_if()
env$tibble_vars[env$i]
} else {
# for mutate(across())
df <- tryCatch(get_current_data(NA, 0), error = function(e) NULL)
if (is.data.frame(df)) {
colnames(df)[env$i]
} else {
env$i
}
}
} else {
NULL
}
})
vars <- unlist(frms)
if (length(vars) > 0) {
vars[length(vars)]
} else {
# not found, so:
NULL
}
}
is_null_or_grouped_tbl <- function(x) {
# class "grouped_data" is from {poorman}, see aa_helper_pm_functions.R
# class "grouped_df" is from {dplyr} and might change at one point, so only set in one place; here.
is.null(x) || inherits(x, "grouped_data") || inherits(x, "grouped_df")
}
get_group_names <- function(x) {
if ("pm_groups" %in% names(attributes(x))) {
pm_get_groups(x)
} else if (!is.null(x) && is_null_or_grouped_tbl(x)) {
grps <- colnames(attributes(x)$groups)
grps[!grps %in% c(".group_id", ".rows")]
} else {
character(0)
}
}
unique_call_id <- function(entire_session = FALSE, match_fn = NULL) {
if (entire_session == TRUE) {
return(c(envir = "session", call = "session"))
}
# combination of environment ID (such as "0x7fed4ee8c848")
# and relevant system call (where 'match_fn' is being called in)
calls <- sys.calls()
in_test <- any(as.character(calls[[1]]) %like_case% "run_test_dir|run_test_file|test_all|tinytest|test_package|testthat", na.rm = TRUE)
if (!isTRUE(in_test) && !is.null(match_fn)) {
for (i in seq_len(length(calls))) {
call_clean <- gsub("[^a-zA-Z0-9_().-]", "", as.character(calls[[i]]), perl = TRUE)
if (match_fn %in% call_clean || any(call_clean %like% paste0(match_fn, "\\("), na.rm = TRUE)) {
return(c(
envir = gsub("<environment: (.*)>", "\\1", utils::capture.output(sys.frames()[[1]]), perl = TRUE),
call = paste0(deparse(calls[[i]]), collapse = "")
))
}
}
}
c(
envir = paste0(sample(c(0:9, letters[1:6]), size = 32, replace = TRUE), collapse = ""),
call = paste0(sample(c(0:9, letters[1:6]), size = 32, replace = TRUE), collapse = "")
)
}
#' @noRd
#' @param fn name of the function as a character
#' @param ... character elements to be pasted together as a 'salt'
#' @param entire_session show message once per session
message_not_thrown_before <- function(fn, ..., entire_session = FALSE) {
# this is to prevent that messages/notes will be printed for every dplyr group or more than once per session
# e.g. this would show a msg 4 times: example_isolates %>% group_by(ward) %>% filter(mo_is_gram_negative())
salt <- gsub("[^a-zA-Z0-9|_-]", "?", substr(paste(c(...), sep = "|", collapse = "|"), 1, 512), perl = TRUE)
not_thrown_before <- is.null(AMR_env[[paste0("thrown_msg.", fn, ".", salt)]]) ||
!identical(
AMR_env[[paste0("thrown_msg.", fn, ".", salt)]],
unique_call_id(
entire_session = entire_session,
match_fn = fn
)
)
if (isTRUE(not_thrown_before)) {
# message was not thrown before - remember this so on the next run it will return FALSE:
assign(
x = paste0("thrown_msg.", fn, ".", salt),
value = unique_call_id(entire_session = entire_session, match_fn = fn),
envir = AMR_env
)
}
not_thrown_before
}
has_colour <- function() {
# this is a base R version of crayon::has_color, but disables colours on emacs
if (Sys.getenv("EMACS") != "" || Sys.getenv("INSIDE_EMACS") != "") {
# disable on emacs, which only supports 8 colours
return(FALSE)
}
enabled <- getOption("crayon.enabled")
if (!is.null(enabled)) {
return(isTRUE(enabled))
}
rstudio_with_ansi_support <- function(x) {
if (Sys.getenv("RSTUDIO", "") == "") {
return(FALSE)
}
if ((cols <- Sys.getenv("RSTUDIO_CONSOLE_COLOR", "")) != "" && !is.na(as.double(cols))) {
return(TRUE)
}
tryCatch(getExportedValue("isAvailable", ns = asNamespace("rstudioapi"))(), error = function(e) {
return(FALSE)
}) &&
tryCatch(getExportedValue("hasFun", ns = asNamespace("rstudioapi"))("getConsoleHasColor"), error = function(e) {
return(FALSE)
})
}
if (rstudio_with_ansi_support() && sink.number() == 0) {
return(TRUE)
}
if (!isatty(stdout())) {
return(FALSE)
}
if (tolower(Sys.info()["sysname"]) == "windows") {
if (Sys.getenv("ConEmuANSI") == "ON") {
return(TRUE)
}
if (Sys.getenv("CMDER_ROOT") != "") {
return(TRUE)
}
return(FALSE)
}
if ("COLORTERM" %in% names(Sys.getenv())) {
return(TRUE)
}
if (Sys.getenv("TERM") == "dumb") {
return(FALSE)
}
grepl(
pattern = "^screen|^xterm|^vt100|color|ansi|cygwin|linux",
x = Sys.getenv("TERM"),
ignore.case = TRUE,
perl = TRUE
)
}
# set colours if console has_colour()
try_colour <- function(..., before, after, collapse = " ") {
if (length(c(...)) == 0) {
return(character(0))
}
txt <- paste0(c(...), collapse = collapse)
if (isTRUE(has_colour())) {
if (is.null(collapse)) {
paste0(before, txt, after, collapse = NULL)
} else {
paste0(before, txt, after, collapse = "")
}
} else {
txt
}
}
is_dark <- function() {
if (is.null(AMR_env$is_dark_theme)) {
AMR_env$is_dark_theme <- !has_colour() || tryCatch(isTRUE(getExportedValue("getThemeInfo", ns = asNamespace("rstudioapi"))()$dark), error = function(e) FALSE)
}
isTRUE(AMR_env$is_dark_theme)
}
font_black <- function(..., collapse = " ", adapt = TRUE) {
before <- "\033[38;5;232m"
after <- "\033[39m"
if (isTRUE(adapt) && is_dark()) {
# white
before <- "\033[37m"
after <- "\033[39m"
}
try_colour(..., before = before, after = after, collapse = collapse)
}
font_white <- function(..., collapse = " ", adapt = TRUE) {
before <- "\033[37m"
after <- "\033[39m"
if (isTRUE(adapt) && is_dark()) {
# black
before <- "\033[38;5;232m"
after <- "\033[39m"
}
try_colour(..., before = before, after = after, collapse = collapse)
}
font_blue <- function(..., collapse = " ") {
try_colour(..., before = "\033[34m", after = "\033[39m", collapse = collapse)
}
font_green <- function(..., collapse = " ") {
try_colour(..., before = "\033[32m", after = "\033[39m", collapse = collapse)
}
font_magenta <- function(..., collapse = " ") {
try_colour(..., before = "\033[35m", after = "\033[39m", collapse = collapse)
}
font_red <- function(..., collapse = " ") {
try_colour(..., before = "\033[31m", after = "\033[39m", collapse = collapse)
}
font_silver <- function(..., collapse = " ") {
try_colour(..., before = "\033[90m", after = "\033[39m", collapse = collapse)
}
font_yellow <- function(..., collapse = " ") {
try_colour(..., before = "\033[33m", after = "\033[39m", collapse = collapse)
}
font_subtle <- function(..., collapse = " ") {
try_colour(..., before = "\033[38;5;246m", after = "\033[39m", collapse = collapse)
}
font_grey <- function(..., collapse = " ") {
try_colour(..., before = "\033[38;5;249m", after = "\033[39m", collapse = collapse)
}
font_grey_bg <- function(..., collapse = " ") {
if (is_dark()) {
# similar to HTML #444444
try_colour(..., before = "\033[48;5;238m", after = "\033[49m", collapse = collapse)
} else {
# similar to HTML #f0f0f0
try_colour(..., before = "\033[48;5;255m", after = "\033[49m", collapse = collapse)
}
}
font_red_bg <- function(..., collapse = " ") {
# this is #ed553b (picked to be colourblind-safe with other SIR colours)
try_colour(font_black(..., collapse = collapse, adapt = FALSE), before = "\033[48;5;203m", after = "\033[49m", collapse = collapse)
}
font_orange_bg <- function(..., collapse = " ") {
# this is #f6d55c (picked to be colourblind-safe with other SIR colours)
try_colour(font_black(..., collapse = collapse, adapt = FALSE), before = "\033[48;5;222m", after = "\033[49m", collapse = collapse)
}
font_yellow_bg <- function(..., collapse = " ") {
try_colour(font_black(..., collapse = collapse, adapt = FALSE), before = "\033[48;5;228m", after = "\033[49m", collapse = collapse)
}
font_green_bg <- function(..., collapse = " ") {
# this is #3caea3 (picked to be colourblind-safe with other SIR colours)
try_colour(font_black(..., collapse = collapse, adapt = FALSE), before = "\033[48;5;79m", after = "\033[49m", collapse = collapse)
}
font_purple_bg <- function(..., collapse = " ") {
try_colour(font_black(..., collapse = collapse, adapt = FALSE), before = "\033[48;5;89m", after = "\033[49m", collapse = collapse)
}
font_rose_bg <- function(..., collapse = " ") {
try_colour(font_black(..., collapse = collapse, adapt = FALSE), before = "\033[48;5;217m", after = "\033[49m", collapse = collapse)
}
font_na <- function(..., collapse = " ") {
font_red(..., collapse = collapse)
}
font_bold <- function(..., collapse = " ") {
try_colour(..., before = "\033[1m", after = "\033[22m", collapse = collapse)
}
font_italic <- function(..., collapse = " ") {
try_colour(..., before = "\033[3m", after = "\033[23m", collapse = collapse)
}
font_underline <- function(..., collapse = " ") {
try_colour(..., before = "\033[4m", after = "\033[24m", collapse = collapse)
}
font_url <- function(url, txt = url) {
if (tryCatch(isTRUE(getExportedValue("ansi_has_hyperlink_support", ns = asNamespace("cli"))()), error = function(e) FALSE)) {
paste0("\033]8;;", url, "\a", txt, "\033]8;;\a")
} else {
url
}
}
font_stripstyle <- function(x) {
# remove URLs
x <- gsub("\033]8;;(.*?)\a.*?\033]8;;\a", "\\1", x)
# from crayon:::ansi_regex
x <- gsub("(?:(?:\\x{001b}\\[)|\\x{009b})(?:(?:[0-9]{1,3})?(?:(?:;[0-9]{0,3})*)?[A-M|f-m])|\\x{001b}[A-M]", "", x, perl = TRUE)
x
}
progress_ticker <- function(n = 1, n_min = 0, print = TRUE, clear = TRUE, title = "", only_bar_percent = FALSE, ...) {
if (print == FALSE || n < n_min) {
# create fake/empty object
pb <- list()
pb$tick <- function() {
invisible()
}
pb$kill <- function() {
invisible()
}
set_clean_class(pb, new_class = "txtProgressBar")
} else if (n >= n_min) {
# use `progress`, which also has a timer
progress_bar <- import_fn("progress_bar", "progress", error_on_fail = FALSE)
if (!is.null(progress_bar)) {
# so we use progress::progress_bar
# a close()-method was also added, see below for that
pb <- progress_bar$new(
format = paste0(title,
ifelse(only_bar_percent == TRUE, "[:bar] :percent", "[:bar] :percent (:current/:total,:eta)")),
clear = clear,
total = n
)
} else {
# use base R
pb <- utils::txtProgressBar(max = n, style = 3)
pb$tick <- function() {
pb$up(pb$getVal() + 1)
}
}
pb
}
}
#' @method close progress_bar
#' @export
#' @noRd
close.progress_bar <- function(con, ...) {
# for progress::progress_bar$new()
con$terminate()
}
set_clean_class <- function(x, new_class) {
# return the object with only the new class and no additional attributes where possible
if (is.null(x)) {
x <- NA_character_
}
if (is.factor(x)) {
# keep only levels and remove all other attributes
lvls <- levels(x)
attributes(x) <- NULL
levels(x) <- lvls
} else if (!is.list(x) && !is.function(x)) {
attributes(x) <- NULL
}
class(x) <- new_class
x
}
formatted_filesize <- function(...) {
size_kb <- file.size(...) / 1024
if (size_kb < 1) {
paste(round(size_kb, 1), "kB")
} else if (size_kb < 100) {
paste(round(size_kb, 0), "kB")
} else {
paste(round(size_kb / 1024, 1), "MB")
}
}
create_pillar_column <- function(x, ...) {
new_pillar_shaft_simple <- import_fn("new_pillar_shaft_simple", "pillar")
new_pillar_shaft_simple(x, ...)
}
as_original_data_class <- function(df, old_class = NULL, extra_class = NULL) {
if ("tbl_df" %in% old_class && pkg_is_available("tibble")) {
# this will then also remove groups
fn <- import_fn("as_tibble", "tibble")
} else if ("tbl_ts" %in% old_class && pkg_is_available("tsibble")) {
fn <- import_fn("as_tsibble", "tsibble")
} else if ("data.table" %in% old_class && pkg_is_available("data.table")) {
fn <- import_fn("as.data.table", "data.table")
} else if ("tabyl" %in% old_class && pkg_is_available("janitor")) {
fn <- import_fn("as_tabyl", "janitor")
} else {
fn <- function(x) base::as.data.frame(df, stringsAsFactors = FALSE)
}
out <- fn(df)
if (!is.null(extra_class)) {
class(out) <- c(extra_class, class(out))
}
out
}
# works exactly like round(), but rounds `round2(44.55, 1)` to 44.6 instead of 44.5
# and adds decimal zeroes until `digits` is reached when force_zero = TRUE
round2 <- function(x, digits = 1, force_zero = TRUE) {
x <- as.double(x)
# https://stackoverflow.com/a/12688836/4575331
val <- (trunc((abs(x) * 10^digits) + 0.5) / 10^digits) * sign(x)
if (digits > 0 && force_zero == TRUE) {
values_trans <- val[val != as.integer(val) & !is.na(val)]
val[val != as.integer(val) & !is.na(val)] <- paste0(
values_trans,
strrep(
"0",
max(
0,
digits - nchar(
format(
as.double(
gsub(
".*[.](.*)$",
"\\1",
values_trans
)
),
scientific = FALSE
)
)
)
)
)
}
as.double(val)
}
# percentage from our other package: 'cleaner'
percentage <- function(x, digits = NULL, ...) {
# getdecimalplaces() function
getdecimalplaces <- function(x, minimum = 0, maximum = 3) {
if (maximum < minimum) {
maximum <- minimum
}
if (minimum > maximum) {
minimum <- maximum
}
max_places <- max(unlist(lapply(
strsplit(sub(
"0+$", "",
as.character(x * 100)
), ".", fixed = TRUE),
function(y) ifelse(length(y) == 2, nchar(y[2]), 0)
)), na.rm = TRUE)
max(
min(max_places,
maximum,
na.rm = TRUE
),
minimum,
na.rm = TRUE
)
}
# format_percentage() function
format_percentage <- function(x, digits = NULL, ...) {
if (is.null(digits)) {
digits <- getdecimalplaces(x)
}
if (is.null(digits) || is.na(digits) || !is.numeric(digits)) {
digits <- 2
}
# round right: percentage(0.4455) and format(as.percentage(0.4455), 1) should return "44.6%", not "44.5%"
x_formatted <- format(round2(as.double(x), digits = digits + 2) * 100,
scientific = FALSE,
digits = max(1, digits),
nsmall = digits,
...
)
x_formatted <- paste0(x_formatted, "%")
x_formatted[!grepl(pattern = "^[0-9.,e-]+$", x = x)] <- NA_character_
x_formatted
}
# the actual working part
x <- as.double(x)
if (is.null(digits)) {
# max one digit if undefined
digits <- getdecimalplaces(x, minimum = 0, maximum = 1)
}
format_percentage(
structure(
.Data = as.double(x),
class = c("percentage", "numeric")
),
digits = digits, ...
)
}
add_intrinsic_resistance_to_AMR_env <- function() {
# for mo_is_intrinsic_resistant() - saves a lot of time when executed on this vector
if (is.null(AMR_env$intrinsic_resistant)) {
AMR_env$intrinsic_resistant <- paste(AMR::intrinsic_resistant$mo, AMR::intrinsic_resistant$ab)
}
}
add_MO_lookup_to_AMR_env <- function() {
# for all MO functions, saves a lot of time on package load and in package size
if (is.null(AMR_env$MO_lookup)) {
MO_lookup <- AMR::microorganisms
MO_lookup$kingdom_index <- NA_real_
MO_lookup[which(MO_lookup$kingdom == "Bacteria" | MO_lookup$mo == "UNKNOWN"), "kingdom_index"] <- 1
MO_lookup[which(MO_lookup$kingdom == "Fungi"), "kingdom_index"] <- 1.25
MO_lookup[which(MO_lookup$kingdom == "Protozoa"), "kingdom_index"] <- 1.5
MO_lookup[which(MO_lookup$kingdom == "Archaea"), "kingdom_index"] <- 2
# all the rest
MO_lookup[which(is.na(MO_lookup$kingdom_index)), "kingdom_index"] <- 3
# the fullname lowercase, important for the internal algorithms in as.mo()
MO_lookup$fullname_lower <- tolower(trimws(paste(
MO_lookup$genus,
MO_lookup$species,
MO_lookup$subspecies
)))
ind <- MO_lookup$genus == "" | grepl("^[(]unknown ", MO_lookup$fullname, perl = TRUE)
MO_lookup[ind, "fullname_lower"] <- tolower(MO_lookup[ind, "fullname", drop = TRUE])
MO_lookup$fullname_lower <- trimws(gsub("[^.a-z0-9/ \\-]+", "", MO_lookup$fullname_lower, perl = TRUE))
# special for Salmonella - they have cities as subspecies but not the species (enterica) in the fullname:
MO_lookup$fullname_lower[which(MO_lookup$subspecies %like_case% "^[A-Z]")] <- gsub(" enterica ", " ", MO_lookup$fullname_lower[which(MO_lookup$subspecies %like_case% "^[A-Z]")], fixed = TRUE)
MO_lookup$full_first <- substr(MO_lookup$fullname_lower, 1, 1)
MO_lookup$species_first <- tolower(substr(MO_lookup$species, 1, 1)) # tolower for groups (Streptococcus, Salmonella)
MO_lookup$subspecies_first <- tolower(substr(MO_lookup$subspecies, 1, 1)) # tolower for Salmonella serovars
AMR_env$MO_lookup <- MO_lookup
}
}
trimws2 <- function(..., whitespace = "[\u0009\u000A\u000B\u000C\u000D\u0020\u0085\u00A0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u200B\u200C\u200D\u2028\u2029\u202F\u205F\u2060\u3000\uFEFF]") {
# this is even faster than trimws() itself which sets "[ \t\r\n]".
trimws(..., whitespace = whitespace)
}
totitle <- function(x) {
gsub("^(.)", "\\U\\1", x, perl = TRUE)
}
readRDS_AMR <- function(file, refhook = NULL) {
# this is readRDS with remote file support
con <- file(file)
on.exit(close(con))
readRDS(con, refhook = refhook)
}
# Faster data.table implementations ----
match <- function(x, table, ...) {
if (!is.null(AMR_env$chmatch) && inherits(x, "character") && inherits(table, "character")) {
# data.table::chmatch() is much faster than base::match() for character
AMR_env$chmatch(x, table, ...)
} else {
base::match(x, table, ...)
}
}
`%in%` <- function(x, table) {
if (!is.null(AMR_env$chin) && inherits(x, "character") && inherits(table, "character")) {
# data.table::`%chin%`() is much faster than base::`%in%`() for character
AMR_env$chin(x, table)
} else {
base::`%in%`(x, table)
}
}
# nolint start
# Register S3 methods ----
# copied from vctrs::s3_register by their permission:
# https://github.com/r-lib/vctrs/blob/05968ce8e669f73213e3e894b5f4424af4f46316/R/register-s3.R
s3_register <- function(generic, class, method = NULL) {
stopifnot(is.character(generic), length(generic) == 1)
stopifnot(is.character(class), length(class) == 1)
pieces <- strsplit(generic, "::")[[1]]
stopifnot(length(pieces) == 2)
package <- pieces[[1]]
generic <- pieces[[2]]
caller <- parent.frame()
get_method_env <- function() {
top <- topenv(caller)
if (isNamespace(top)) {
asNamespace(environmentName(top))
} else {
caller
}
}
get_method <- function(method, env) {
if (is.null(method)) {
get(paste0(generic, ".", class), envir = get_method_env())
} else {
method
}
}
method_fn <- get_method(method)
stopifnot(is.function(method_fn))
setHook(packageEvent(package, "onLoad"), function(...) {
ns <- asNamespace(package)
method_fn <- get_method(method)
registerS3method(generic, class, method_fn, envir = ns)
})
if (!isNamespaceLoaded(package)) {
return(invisible())
}
envir <- asNamespace(package)
if (exists(generic, envir)) {
registerS3method(generic, class, method_fn, envir = envir)
}
invisible()
}
# Support old R versions ----
# these functions were not available in previous versions of R
# see here for the full list: https://github.com/r-lib/backports
if (getRversion() < "3.1.0") {
# R-3.0 does not contain these functions, set them here to prevent installation failure
# (required for extension of the 'mic' class)
cospi <- function(...) 1
sinpi <- function(...) 1
tanpi <- function(...) 1
}
if (getRversion() < "3.2.0") {
anyNA <- function(x, recursive = FALSE) {
if (isTRUE(recursive) && (is.list(x) || is.pairlist(x))) {
return(any(rapply(x, anyNA, how = "unlist", recursive = FALSE)))
}
any(is.na(x))
}
dir.exists <- function(paths) {
x <- base::file.info(paths)$isdir
!is.na(x) & x
}
file.size <- function(...) {
file.info(...)$size
}
file.mtime <- function(...) {
file.info(...)$mtime
}
isNamespaceLoaded <- function(pkg) {
pkg %in% loadedNamespaces()
}
lengths <- function(x, use.names = TRUE) {
vapply(x, length, FUN.VALUE = NA_integer_, USE.NAMES = use.names)
}
}
if (getRversion() < "3.3.0") {
strrep <- function(x, times) {
x <- as.character(x)
if (length(x) == 0L) {
return(x)
}
unlist(.mapply(function(x, times) {
if (is.na(x) || is.na(times)) {
return(NA_character_)
}
if (times <= 0L) {
return("")
}
paste0(replicate(times, x), collapse = "")
}, list(x = x, times = times), MoreArgs = list()), use.names = FALSE)
}
}
if (getRversion() < "3.5.0") {
isFALSE <- function(x) {
is.logical(x) && length(x) == 1L && !is.na(x) && !x
}
}
if (getRversion() < "3.6.0") {
str2lang <- function(s) {
stopifnot(length(s) == 1L)
ex <- parse(text = s, keep.source = FALSE)
stopifnot(length(ex) == 1L)
ex[[1L]]
}
# trims() was introduced in 3.3.0, but its argument `whitespace` only in 3.6.0
trimws <- function(x, which = c("both", "left", "right"), whitespace = "[ \t\r\n]") {
which <- match.arg(which)
mysub <- function(re, x) sub(re, "", x, perl = TRUE)
switch(which,
left = mysub(paste0("^", whitespace, "+"), x),
right = mysub(paste0(whitespace, "+$"), x),
both = mysub(paste0(whitespace, "+$"), mysub(paste0("^", whitespace, "+"), x))
)
}
}
if (getRversion() < "4.0.0") {
deparse1 <- function(expr, collapse = " ", width.cutoff = 500L, ...) {
paste(deparse(expr, width.cutoff, ...), collapse = collapse)
}
}
# nolint end
| /scratch/gouwar.j/cran-all/cranData/AMR/R/aa_helper_functions.R |
# ==================================================================== #
# TITLE: #
# AMR: An R Package for Working with Antimicrobial Resistance Data #
# #
# SOURCE CODE: #
# https://github.com/msberends/AMR #
# #
# PLEASE CITE THIS SOFTWARE AS: #
# Berends MS, Luz CF, Friedrich AW, Sinha BNM, Albers CJ, Glasner C #
# (2022). AMR: An R Package for Working with Antimicrobial Resistance #
# Data. Journal of Statistical Software, 104(3), 1-31. #
# https://doi.org/10.18637/jss.v104.i03 #
# #
# Developed at the University of Groningen and the University Medical #
# Center Groningen in The Netherlands, in collaboration with many #
# colleagues from around the world, see our website. #
# #
# This R package is free software; you can freely use and distribute #
# it for both personal and commercial purposes under the terms of the #
# GNU General Public License version 2.0 (GNU GPL-2), as published by #
# the Free Software Foundation. #
# We created this package for both routine data analysis and academic #
# research and it was publicly released in the hope that it will be #
# useful, but it comes WITHOUT ANY WARRANTY OR LIABILITY. #
# #
# Visit our website for the full manual and a complete tutorial about #
# how to conduct AMR data analysis: https://msberends.github.io/AMR/ #
# ==================================================================== #
# ------------------------------------------------
# THIS FILE WAS CREATED AUTOMATICALLY!
# Source file: data-raw/reproduction_of_poorman.R
# ------------------------------------------------
# poorman: a package to replace all dplyr functions with base R so we can lose dependency on dplyr.
# These functions were downloaded from https://github.com/nathaneastwood/poorman,
# from this commit: https://github.com/nathaneastwood/poorman/tree/52eb6947e0b4430cd588976ed8820013eddf955f.
#
# All functions are prefixed with 'pm_' to make it obvious that they are dplyr substitutes.
#
# All code below was released under MIT license, that permits 'free of charge, to any person obtaining a
# copy of the software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
# is furnished to do so', given that a copyright notice is given in the software.
#
# Copyright notice on 19 September 2020, the day this code was downloaded, as found on
# https://github.com/nathaneastwood/poorman/blob/52eb6947e0b4430cd588976ed8820013eddf955f/LICENSE:
# YEAR: 2020
# COPYRIGHT HOLDER: Nathan Eastwood
pm_arrange <- function(.data, ...) {
pm_check_is_dataframe(.data)
if ("grouped_data" %in% class(.data)) {
pm_arrange.grouped_data(.data, ...)
} else {
pm_arrange.default(.data, ...)
}
}
pm_arrange.default <- function(.data, ...) {
pm_context$setup(.data)
on.exit(pm_context$clean(), add = TRUE)
rows <- eval(substitute(order(...)), envir = pm_context$.data)
.data[rows, , drop = FALSE]
}
pm_arrange.grouped_data <- function(.data, ...) {
pm_apply_grouped_function("pm_arrange", .data, drop = TRUE, ...)
}
pm_between <- function(x, left, right) {
if (!is.null(attr(x, "class")) && !inherits(x, c("Date", "POSIXct"))) {
warning("`pm_between()` called on numeric vector with S3 class")
}
if (!is.double(x)) x <- as.numeric(x)
x >= as.numeric(left) & x <= as.numeric(right)
}
pm_context <- new.env()
# Data
pm_context$setup <- function(.data) pm_context$.data <- .data
pm_context$get_data <- function() pm_context$.data
pm_context$get_nrow <- function() nrow(pm_context$.data)
pm_context$get_colnames <- function() colnames(pm_context$.data)
pm_context$clean <- function() rm(list = c(".data"), envir = pm_context)
pm_n <- function() {
pm_check_group_pm_context("`pm_n()`")
pm_context$get_nrow()
}
pm_cur_data <- function() {
pm_check_group_pm_context("`pm_cur_data()`")
data <- pm_context$get_data()
data[, !(colnames(data) %in% pm_get_groups(data)), drop = FALSE]
}
pm_cur_group <- function() {
pm_check_group_pm_context("`pm_cur_group()`")
data <- pm_context$get_data()
res <- data[1L, pm_get_groups(data), drop = FALSE]
rownames(res) <- NULL
res
}
pm_cur_group_id <- function() {
pm_check_group_pm_context("`pm_cur_group_id()`")
data <- pm_context$get_data()
res <- data[1L, pm_get_groups(data), drop = FALSE]
details <- pm_get_group_details(data)
details[, ".group_id"] <- seq_len(nrow(details))
res <- suppressMessages(pm_semi_join(details, res))
list(res[, ".group_id"])
}
pm_cur_group_rows <- function() {
pm_check_group_pm_context("`pm_cur_group_rows()`")
data <- pm_context$get_data()
res <- data[1L, pm_get_groups(data), drop = FALSE]
res <- suppressMessages(pm_semi_join(pm_get_group_details(data), res))
unlist(res[, ".rows"])
}
pm_check_group_pm_context <- function(fn) {
if (is.null(pm_context$.data)) {
stop(fn, " must only be used inside poorman verbs")
}
}
pm_count <- function(x, ..., wt = NULL, sort = FALSE, name = NULL) {
pm_groups <- pm_get_groups(x)
if (!missing(...)) x <- pm_group_by(x, ..., .add = TRUE)
wt <- pm_deparse_var(wt)
res <- do.call(pm_tally, list(x, wt, sort, name))
if (length(pm_groups) > 0L) res <- do.call(pm_group_by, list(res, as.name(pm_groups)))
res
}
pm_tally <- function(x, wt = NULL, sort = FALSE, name = NULL) {
name <- pm_check_name(x, name)
wt <- pm_deparse_var(wt)
res <- do.call(pm_summarise, pm_set_names(list(x, pm_tally_n(x, wt)), c(".data", name)))
res <- pm_ungroup(res)
if (isTRUE(sort)) res <- do.call(pm_arrange, list(res, call("pm_desc", as.name(name))))
rownames(res) <- NULL
res
}
pm_add_count <- function(x, ..., wt = NULL, sort = FALSE, name = NULL) {
name <- pm_check_name(x, name)
row_names <- rownames(x)
wt <- pm_deparse_var(wt)
if (!missing(...)) x <- pm_group_by(x, ..., .add = TRUE)
res <- do.call(pm_add_tally, list(x, wt, sort, name))
res[row_names, ]
}
pm_add_tally <- function(x, wt = NULL, sort = FALSE, name = NULL) {
wt <- pm_deparse_var(wt)
pm_n <- pm_tally_n(x, wt)
name <- pm_check_name(x, name)
res <- do.call(pm_mutate, pm_set_names(list(x, pm_n), c(".data", name)))
if (isTRUE(sort)) {
do.call(pm_arrange, list(res, call("pm_desc", as.name(name))))
} else {
res
}
}
pm_tally_n <- function(x, wt) {
if (is.null(wt) && "pm_n" %in% colnames(x)) {
message("Using `pm_n` as weighting variable")
wt <- "pm_n"
}
pm_context$setup(.data = x)
on.exit(pm_context$clean(), add = TRUE)
if (is.null(wt)) {
call("pm_n")
} else {
call("sum", as.name(wt), na.rm = TRUE)
}
}
pm_check_name <- function(df, name) {
if (is.null(name)) {
if ("pm_n" %in% colnames(df)) {
stop(
"Column 'pm_n' is already present in output\n",
"* Use `name = \"new_name\"` to pick a new name"
)
}
return("pm_n")
}
if (!is.character(name) || length(name) != 1) {
stop("`name` must be a single string")
}
name
}
pm_desc <- function(x) -xtfrm(x)
pm_distinct <- function(.data, ...) {
pm_check_is_dataframe(.data)
if ("grouped_data" %in% class(.data)) {
pm_distinct.grouped_data(.data, ...)
} else {
pm_distinct.default(.data, ...)
}
}
pm_distinct.default <- function(.data, ..., .keep_all = FALSE) {
if (ncol(.data) == 0L) {
return(.data[1, ])
}
cols <- pm_deparse_dots(...)
col_names <- names(cols)
col_len <- length(cols)
if (is.null(col_names) && col_len > 0L) names(cols) <- cols
if (col_len == 0L) {
res <- .data
} else {
res <- pm_mutate(.data, ...)
col_names <- names(cols)
res <- if (!is.null(col_names)) {
zero_names <- nchar(col_names) == 0L
if (any(zero_names)) {
names(cols)[zero_names] <- cols[zero_names]
col_names <- names(cols)
}
suppressMessages(pm_select(res, col_names))
} else {
suppressMessages(pm_select(res, cols))
}
}
res <- unique(res)
if (isTRUE(.keep_all)) {
res <- cbind(res, .data[rownames(res), setdiff(colnames(.data), colnames(res)), drop = FALSE])
}
common_cols <- c(intersect(colnames(.data), colnames(res)), setdiff(col_names, colnames(.data)))
if (length(common_cols) > 0L) res[, common_cols, drop = FALSE] else res
}
pm_distinct.grouped_data <- function(.data, ..., .keep_all = FALSE) {
pm_apply_grouped_function("pm_distinct", .data, drop = TRUE, ..., .keep_all = .keep_all)
}
pm_eval_env <- new.env()
pm_filter <- function(.data, ...) {
pm_check_is_dataframe(.data)
if ("grouped_data" %in% class(.data)) {
pm_filter.grouped_data(.data, ...)
} else {
pm_filter.default(.data, ...)
}
}
pm_filter.default <- function(.data, ...) {
conditions <- pm_dotdotdot(...)
cond_class <- vapply(conditions, typeof, NA_character_)
if (any(cond_class != "language")) stop("Conditions must be logical vectors")
pm_context$setup(.data)
on.exit(pm_context$clean(), add = TRUE)
pm_eval_env$env <- parent.frame()
on.exit(rm(list = "env", envir = pm_eval_env), add = TRUE)
rows <- lapply(
conditions,
function(cond, frame) eval(cond, pm_context$.data, frame),
frame = pm_eval_env$env
)
rows <- Reduce("&", rows)
.data[rows & !is.na(rows), ]
}
pm_filter.grouped_data <- function(.data, ...) {
rows <- rownames(.data)
res <- pm_apply_grouped_function("pm_filter", .data, drop = TRUE, ...)
res[rows[rows %in% rownames(res)], ]
}
pm_group_by <- function(.data, ..., .add = FALSE) {
pm_check_is_dataframe(.data)
pre_groups <- pm_get_groups(.data)
pm_groups <- pm_deparse_dots(...)
if (isTRUE(.add)) pm_groups <- unique(c(pre_groups, pm_groups))
unknown <- !(pm_groups %in% colnames(.data))
if (any(unknown)) stop("Invalid pm_groups: ", pm_groups[unknown])
class(.data) <- c("grouped_data", class(.data))
pm_set_groups(.data, pm_groups)
}
pm_ungroup <- function(x, ...) {
pm_check_is_dataframe(x)
rm_groups <- pm_deparse_dots(...)
pm_groups <- pm_get_groups(x)
if (length(rm_groups) == 0L) rm_groups <- pm_groups
x <- pm_set_groups(x, pm_groups[!(pm_groups %in% rm_groups)])
if (length(attr(x, "pm_groups")) == 0L) {
attr(x, "pm_groups") <- NULL
class(x) <- class(x)[!(class(x) %in% "grouped_data")]
}
x
}
pm_set_groups <- function(x, pm_groups) {
attr(x, "pm_groups") <- if (is.null(pm_groups) || length(pm_groups) == 0L) {
NULL
} else {
pm_group_data_worker(x, pm_groups)
}
x
}
pm_get_groups <- function(x) {
pm_groups <- attr(x, "pm_groups", exact = TRUE)
if (is.null(pm_groups)) character(0) else colnames(pm_groups)[!colnames(pm_groups) %in% c(".group_id", ".rows")]
}
pm_get_group_details <- function(x) {
pm_groups <- attr(x, "pm_groups", exact = TRUE)
if (is.null(pm_groups)) character(0) else pm_groups
}
pm_has_groups <- function(x) {
pm_groups <- pm_get_groups(x)
if (length(pm_groups) == 0L) FALSE else TRUE
}
pm_apply_grouped_function <- function(fn, .data, drop = FALSE, ...) {
pm_groups <- pm_get_groups(.data)
grouped <- pm_split_into_groups(.data, pm_groups, drop)
res <- do.call(rbind, unname(lapply(grouped, fn, ...)))
if (any(pm_groups %in% colnames(res))) {
class(res) <- c("grouped_data", class(res))
res <- pm_set_groups(res, pm_groups[pm_groups %in% colnames(res)])
}
res
}
pm_print.grouped_data <- function(x, ..., digits = NULL, quote = FALSE, right = TRUE, row.names = TRUE, max = NULL) {
class(x) <- "data.frame"
print(x, ..., digits = digits, quote = quote, right = right, row.names = row.names, max = max)
cat("\nGroups: ", paste(pm_get_groups(x), collapse = ", "), "\n\n")
}
pm_group_data <- function(.data) {
if (!pm_has_groups(.data)) {
return(data.frame(.rows = I(list(seq_len(nrow(.data))))))
}
pm_groups <- pm_get_groups(.data)
pm_group_data_worker(.data, pm_groups)
}
pm_group_data_worker <- function(.data, pm_groups) {
res <- unique(.data[, pm_groups, drop = FALSE])
class(res) <- "data.frame"
nrow_res <- nrow(res)
rows <- rep(list(NA), nrow_res)
for (i in seq_len(nrow_res)) {
rows[[i]] <- which(interaction(.data[, pm_groups]) %in% interaction(res[i, pm_groups]))
}
res$`.rows` <- rows
res <- res[do.call(order, lapply(pm_groups, function(x) res[, x])), , drop = FALSE]
rownames(res) <- NULL
res
}
pm_group_rows <- function(.data) {
pm_group_data(.data)[[".rows"]]
}
pm_group_indices <- function(.data) {
if (!pm_has_groups(.data)) {
return(rep(1L, nrow(.data)))
}
pm_groups <- pm_get_groups(.data)
res <- unique(.data[, pm_groups, drop = FALSE])
res <- res[do.call(order, lapply(pm_groups, function(x) res[, x])), , drop = FALSE]
class(res) <- "data.frame"
nrow_data <- nrow(.data)
rows <- rep(NA, nrow_data)
for (i in seq_len(nrow_data)) {
rows[i] <- which(interaction(res[, pm_groups]) %in% interaction(.data[i, pm_groups]))
}
rows
}
pm_group_vars <- function(x) {
pm_get_groups(x)
}
pm_groups <- function(x) {
lapply(pm_get_groups(x), as.symbol)
}
pm_group_size <- function(x) {
lengths(pm_group_rows(x))
}
pm_n_groups <- function(x) {
nrow(pm_group_data(x))
}
# pm_group_split <- function(.data, ..., .keep = TRUE) {
# dots_len <- ...length() > 0L
# if (pm_has_groups(.data) && isTRUE(dots_len)) {
# warning("... is ignored in pm_group_split(<grouped_df>), please use pm_group_by(..., .add = TRUE) %pm>% pm_group_split()")
# }
# if (!pm_has_groups(.data) && isTRUE(dots_len)) {
# .data <- pm_group_by(.data, ...)
# }
# if (!pm_has_groups(.data) && isFALSE(dots_len)) {
# return(list(.data))
# }
# pm_context$setup(.data)
# on.exit(pm_context$clean(), add = TRUE)
# pm_groups <- pm_get_groups(.data)
# attr(pm_context$.data, "pm_groups") <- NULL
# res <- pm_split_into_groups(pm_context$.data, pm_groups)
# names(res) <- NULL
# if (isFALSE(.keep)) {
# res <- lapply(res, function(x) x[, !colnames(x) %in% pm_groups])
# }
# any_empty <- unlist(lapply(res, function(x) !(nrow(x) == 0L)))
# res[any_empty]
# }
pm_group_keys <- function(.data) {
pm_groups <- pm_get_groups(.data)
pm_context$setup(.data)
res <- pm_context$.data[, pm_context$get_colnames() %in% pm_groups, drop = FALSE]
res <- res[!duplicated(res), , drop = FALSE]
if (nrow(res) == 0L) {
return(res)
}
class(res) <- "data.frame"
res <- res[do.call(order, lapply(pm_groups, function(x) res[, x])), , drop = FALSE]
rownames(res) <- NULL
res
}
pm_split_into_groups <- function(.data, pm_groups, drop = FALSE, ...) {
class(.data) <- "data.frame"
group_factors <- lapply(pm_groups, function(x, .data) as.factor(.data[, x]), .data)
split(x = .data, f = group_factors, drop = drop, ...)
}
pm_if_else <- function(condition, true, false, missing = NULL) {
if (!is.logical(condition)) stop("`condition` must be a logical vector.")
cls_true <- class(true)
cls_false <- class(false)
cls_missing <- class(missing)
if (!identical(cls_true, cls_false)) {
stop("The class of `true` <", class(true), "> is not the same as the class of `false` <", class(false), ">")
}
if (!is.null(missing) && !identical(cls_true, cls_missing)) {
stop("`missing` must be a ", cls_true, " vector, not a ", cls_missing, " vector.")
}
res <- ifelse(condition, true, false)
if (!is.null(missing)) res[is.na(res)] <- missing
attributes(res) <- attributes(true)
res
}
pm_anti_join <- function(x, y, by = NULL) {
pm_filter_join_worker(x, y, by, type = "anti")
}
pm_semi_join <- function(x, y, by = NULL) {
pm_filter_join_worker(x, y, by, type = "semi")
}
pm_filter_join_worker <- function(x, y, by = NULL, type = c("anti", "semi")) {
type <- match.arg(type, choices = c("anti", "semi"), several.ok = FALSE)
if (is.null(by)) {
by <- intersect(names(x), names(y))
pm_join_message(by)
}
rows <- interaction(x[, by]) %in% interaction(y[, by])
if (type == "anti") rows <- !rows
res <- x[rows, , drop = FALSE]
rownames(res) <- NULL
res
}
pm_inner_join <- function(x, y, by = NULL, suffix = c(".x", ".y")) {
pm_join_worker(x = x, y = y, by = by, suffix = suffix, sort = FALSE)
}
# pm_left_join <- function(x, y, by = NULL, suffix = c(".x", ".y")) {
# pm_join_worker(x = x, y = y, by = by, suffix = suffix, all.x = TRUE)
# }
pm_right_join <- function(x, y, by = NULL, suffix = c(".x", ".y")) {
pm_join_worker(x = x, y = y, by = by, suffix = suffix, all.y = TRUE)
}
pm_full_join <- function(x, y, by = NULL, suffix = c(".x", ".y")) {
pm_join_worker(x = x, y = y, by = by, suffix = suffix, all = TRUE)
}
pm_join_worker <- function(x, y, by = NULL, suffix = c(".x", ".y"), ...) {
x[, ".join_id"] <- seq_len(nrow(x))
if (is.null(by)) {
by <- intersect(names(x), names(y))
pm_join_message(by)
merged <- merge(x = x, y = y, by = by, suffixes = suffix, ...)[, union(names(x), names(y))]
} else if (is.null(names(by))) {
merged <- merge(x = x, y = y, by = by, suffixes = suffix, ...)
} else {
merged <- merge(x = x, y = y, by.x = names(by), by.y = by, suffixes = suffix, ...)
}
merged <- merged[order(merged[, ".join_id"]), colnames(merged) != ".join_id"]
rownames(merged) <- NULL
merged
}
pm_join_message <- function(by) {
if (length(by) > 1L) {
message("Joining, by = c(\"", paste0(by, collapse = "\", \""), "\")\n", sep = "")
} else {
message("Joining, by = \"", by, "\"\n", sep = "")
}
}
pm_lag <- function(x, pm_n = 1L, default = NA) {
if (inherits(x, "ts")) stop("`x` must be a vector, not a `ts` object, do you want `stats::pm_lag()`?")
if (length(pm_n) != 1L || !is.numeric(pm_n) || pm_n < 0L) stop("`pm_n` must be a nonnegative integer scalar")
if (pm_n == 0L) {
return(x)
}
tryCatch(
storage.mode(default) <- typeof(x),
warning = function(w) {
stop("Cannot convert `default` <", typeof(default), "> to `x` <", typeof(x), ">")
}
)
xlen <- length(x)
pm_n <- pmin(pm_n, xlen)
res <- c(rep(default, pm_n), x[seq_len(xlen - pm_n)])
attributes(res) <- attributes(x)
res
}
pm_lead <- function(x, pm_n = 1L, default = NA) {
if (length(pm_n) != 1L || !is.numeric(pm_n) || pm_n < 0L) stop("pm_n must be a nonnegative integer scalar")
if (pm_n == 0L) {
return(x)
}
tryCatch(
storage.mode(default) <- typeof(x),
warning = function(w) {
stop("Cannot convert `default` <", typeof(default), "> to `x` <", typeof(x), ">")
}
)
xlen <- length(x)
pm_n <- pmin(pm_n, xlen)
res <- c(x[-seq_len(pm_n)], rep(default, pm_n))
attributes(res) <- attributes(x)
res
}
pm_mutate <- function(.data, ...) {
pm_check_is_dataframe(.data)
if ("grouped_data" %in% class(.data)) {
pm_mutate.grouped_data(.data, ...)
} else {
pm_mutate.default(.data, ...)
}
}
pm_mutate.default <- function(.data, ...) {
conditions <- pm_dotdotdot(..., .impute_names = TRUE)
.data[, setdiff(names(conditions), names(.data))] <- NA
pm_context$setup(.data)
on.exit(pm_context$clean(), add = TRUE)
for (i in seq_along(conditions)) {
pm_context$.data[, names(conditions)[i]] <- eval(conditions[[i]], envir = pm_context$.data)
}
pm_context$.data
}
pm_mutate.grouped_data <- function(.data, ...) {
rows <- rownames(.data)
res <- pm_apply_grouped_function("pm_mutate", .data, drop = TRUE, ...)
res[rows, ]
}
pm_n_distinct <- function(..., na.rm = FALSE) {
res <- c(...)
if (is.list(res)) {
return(nrow(unique(as.data.frame(res, stringsAsFactors = FALSE))))
}
if (isTRUE(na.rm)) res <- res[!is.na(res)]
length(unique(res))
}
pm_na_if <- function(x, y) {
y_len <- length(y)
x_len <- length(x)
if (!(y_len %in% c(1L, x_len))) stop("`y` must be length ", x_len, " (same as `x`) or 1, not ", y_len)
x[x == y] <- NA
x
}
pm_near <- function(x, y, tol = .Machine$double.eps^0.5) {
abs(x - y) < tol
}
`%pm>%` <- function(lhs, rhs) {
lhs <- substitute(lhs)
rhs <- substitute(rhs)
eval(as.call(c(rhs[[1L]], lhs, as.list(rhs[-1L]))), envir = parent.frame())
}
pm_pull <- function(.data, var = -1) {
var_deparse <- pm_deparse_var(var)
col_names <- colnames(.data)
if (!(var_deparse %in% col_names) & grepl("^[[:digit:]]+L|[[:digit:]]", var_deparse)) {
var <- as.integer(gsub("L", "", var_deparse))
var <- pm_if_else(var < 1L, rev(col_names)[abs(var)], col_names[var])
} else if (var_deparse %in% col_names) {
var <- var_deparse
}
.data[, var, drop = TRUE]
}
pm_set_names <- function(object = nm, nm) {
names(object) <- nm
object
}
pm_vec_head <- function(x, pm_n = 6L, ...) {
stopifnot(length(pm_n) == 1L)
pm_n <- if (pm_n < 0L) max(length(x) + pm_n, 0L) else min(pm_n, length(x))
x[seq_len(pm_n)]
}
pm_relocate <- function(.data, ..., .before = NULL, .after = NULL) {
pm_check_is_dataframe(.data)
data_names <- colnames(.data)
col_pos <- pm_select_positions(.data, ...)
.before <- pm_deparse_var(.before)
.after <- pm_deparse_var(.after)
has_before <- !is.null(.before)
has_after <- !is.null(.after)
if (has_before && has_after) {
stop("You must supply only one of `.before` and `.after`")
} else if (has_before) {
pm_where <- min(match(.before, data_names))
col_pos <- c(setdiff(col_pos, pm_where), pm_where)
} else if (has_after) {
pm_where <- max(match(.after, data_names))
col_pos <- c(pm_where, setdiff(col_pos, pm_where))
} else {
pm_where <- 1L
col_pos <- union(col_pos, pm_where)
}
lhs <- setdiff(seq(1L, pm_where - 1L), col_pos)
rhs <- setdiff(seq(pm_where + 1L, ncol(.data)), col_pos)
col_pos <- unique(c(lhs, col_pos, rhs))
col_pos <- col_pos[col_pos <= length(data_names)]
res <- .data[col_pos]
if (pm_has_groups(.data)) res <- pm_set_groups(res, pm_get_groups(.data))
res
}
pm_rename <- function(.data, ...) {
pm_check_is_dataframe(.data)
new_names <- names(pm_deparse_dots(...))
if (length(new_names) == 0L) {
warning("You didn't give any new names")
return(.data)
}
col_pos <- pm_select_positions(.data, ...)
old_names <- colnames(.data)[col_pos]
new_names_zero <- nchar(new_names) == 0L
if (any(new_names_zero)) {
warning("You didn't provide new names for: ", paste0("`", old_names[new_names_zero], collapse = ", "), "`")
new_names[new_names_zero] <- old_names[new_names_zero]
}
colnames(.data)[col_pos] <- new_names
.data
}
pm_rename_with <- function(.data, .fn, .cols = pm_everything(), ...) {
if (!is.function(.fn)) stop("`", .fn, "` is not a valid function")
grouped <- inherits(.data, "grouped_data")
if (grouped) grp_pos <- which(colnames(.data) %in% pm_group_vars(.data))
col_pos <- eval(substitute(pm_select_positions(.data, .cols)))
cols <- colnames(.data)[col_pos]
new_cols <- .fn(cols, ...)
if (any(duplicated(new_cols))) {
stop("New names must be unique however `", deparse(substitute(.fn)), "` returns duplicate column names")
}
colnames(.data)[col_pos] <- new_cols
if (grouped) .data <- pm_set_groups(.data, colnames(.data)[grp_pos])
.data
}
pm_replace_with <- function(x, i, val, arg_name) {
if (is.null(val)) {
return(x)
}
pm_check_length(val, x, arg_name)
pm_check_type(val, x, arg_name)
pm_check_class(val, x, arg_name)
i[is.na(i)] <- FALSE
if (length(val) == 1L) {
x[i] <- val
} else {
x[i] <- val[i]
}
x
}
pm_check_length <- function(x, y, arg_name) {
length_x <- length(x)
length_y <- length(y)
if (all(length_x %in% c(1L, length_y))) {
return()
}
if (length_y == 1) {
stop(arg_name, " must be length 1, not ", paste(length_x, sep = ", "))
} else {
stop(arg_name, " must be length ", length_y, " or 1, not ", length_x)
}
}
pm_check_type <- function(x, y, arg_name) {
x_type <- typeof(x)
y_type <- typeof(y)
if (identical(x_type, y_type)) {
return()
}
stop(arg_name, " must be `", y_type, "`, not `", x_type, "`")
}
pm_check_class <- function(x, y, arg_name) {
if (!is.object(x)) {
return()
}
exp_classes <- class(y)
out_classes <- class(x)
if (identical(out_classes, exp_classes)) {
return()
}
stop(arg_name, " must have class `", exp_classes, "`, not class `", out_classes, "`")
}
pm_rownames_to_column <- function(.data, var = "rowname") {
pm_check_is_dataframe(.data)
col_names <- colnames(.data)
if (var %in% col_names) stop("Column `", var, "` already exists in `.data`")
.data[, var] <- rownames(.data)
rownames(.data) <- NULL
.data[, c(var, setdiff(col_names, var))]
}
pm_starts_with <- function(match, ignore.case = TRUE, vars = pm_peek_vars()) {
grep(pattern = paste0("^", paste0(match, collapse = "|^")), x = vars, ignore.case = ignore.case)
}
pm_ends_with <- function(match, ignore.case = TRUE, vars = pm_peek_vars()) {
grep(pattern = paste0(paste0(match, collapse = "$|"), "$"), x = vars, ignore.case = ignore.case)
}
pm_contains <- function(match, ignore.case = TRUE, vars = pm_peek_vars()) {
pm_matches <- lapply(
match,
function(x) {
if (isTRUE(ignore.case)) {
match_u <- toupper(x)
match_l <- tolower(x)
pos_u <- grep(pattern = match_u, x = toupper(vars), fixed = TRUE)
pos_l <- grep(pattern = match_l, x = tolower(vars), fixed = TRUE)
unique(c(pos_l, pos_u))
} else {
grep(pattern = x, x = vars, fixed = TRUE)
}
}
)
unique(unlist(pm_matches))
}
pm_matches <- function(match, ignore.case = TRUE, perl = FALSE, vars = pm_peek_vars()) {
grep(pattern = match, x = vars, ignore.case = ignore.case, perl = perl)
}
pm_num_range <- function(prefix, range, width = NULL, vars = pm_peek_vars()) {
if (!is.null(width)) {
range <- sprintf(paste0("%0", width, "d"), range)
}
find <- paste0(prefix, range)
if (any(duplicated(vars))) {
stop("Column names must be unique")
} else {
x <- match(find, vars)
x[!is.na(x)]
}
}
pm_all_of <- function(x, vars = pm_peek_vars()) {
x_ <- !x %in% vars
if (any(x_)) {
which_x_ <- which(x_)
if (length(which_x_) == 1L) {
stop("The column ", x[which_x_], " does not exist.")
} else {
stop("The columns ", paste(x[which_x_], collapse = ", "), " do not exist.")
}
} else {
which(vars %in% x)
}
}
pm_any_of <- function(x, vars = pm_peek_vars()) {
which(vars %in% x)
}
pm_everything <- function(vars = pm_peek_vars()) {
seq_along(vars)
}
pm_last_col <- function(offset = 0L, vars = pm_peek_vars()) {
if (!pm_is_wholenumber(offset)) stop("`offset` must be an integer")
pm_n <- length(vars)
if (offset && pm_n <= offset) {
stop("`offset` must be smaller than the number of `vars`")
} else if (pm_n == 0) {
stop("Can't pm_select last column when `vars` is empty")
} else {
pm_n - offset
}
}
pm_peek_vars <- function() {
pm_select_env$get_colnames()
}
pm_select_positions <- function(.data, ..., .group_pos = FALSE) {
cols <- pm_dotdotdot(...)
pm_select_env$setup(.data = .data, calling_frame = parent.frame(2L))
on.exit(pm_select_env$clean(), add = TRUE)
data_names <- pm_select_env$get_colnames()
pos <- unlist(lapply(cols, pm_eval_expr))
col_len <- pm_select_env$get_ncol()
if (any(pos > col_len)) {
oor <- pos[which(pos > col_len)]
oor_len <- length(oor)
stop(
"Location", if (oor_len > 1) "s " else " ", pm_collapse_to_sentence(oor),
if (oor_len > 1) " don't " else " doesn't ", "exist. There are only ", col_len, " columns."
)
}
if (isTRUE(.group_pos)) {
pm_groups <- pm_get_groups(.data)
missing_groups <- !(pm_groups %in% cols)
if (any(missing_groups)) {
sel_missing <- pm_groups[missing_groups]
message("Adding missing grouping variables: `", paste(sel_missing, collapse = "`, `"), "`")
readd <- match(sel_missing, data_names)
if (length(names(cols)) > 0L) names(readd) <- data_names[readd]
pos <- c(readd, pos)
}
}
pos[!duplicated(pos)]
}
pm_eval_expr <- function(x) {
type <- typeof(x)
switch(type,
"integer" = x,
"double" = as.integer(x),
"character" = pm_select_char(x),
"symbol" = pm_select_symbol(x),
"language" = pm_eval_call(x),
stop("Expressions of type <", typeof(x), "> cannot be evaluated for use when subsetting.")
)
}
pm_select_char <- function(expr) {
pos <- match(expr, pm_select_env$get_colnames())
if (is.na(pos)) stop("Column `", expr, "` does not exist")
pos
}
pm_select_symbol <- function(expr) {
expr_name <- as.character(expr)
if (grepl("^is\\.", expr_name) && pm_is_function(expr)) {
stop(
"Predicate functions must be wrapped in `pm_where()`.\n\n",
sprintf(" data %%pm>%% pm_select(pm_where(%s))", expr_name)
)
}
res <- try(pm_select_char(as.character(expr)), silent = TRUE)
if (inherits(res, "try-error")) {
res <- tryCatch(
unlist(lapply(eval(expr, envir = pm_select_env$calling_frame), pm_eval_expr)),
error = function(e) stop("Column ", expr, " does not exist.")
)
}
res
}
pm_eval_call <- function(x) {
type <- as.character(x[[1]])
switch(type,
`:` = pm_select_seq(x),
`!` = pm_select_negate(x),
`-` = pm_select_minus(x),
`c` = pm_select_c(x),
`(` = pm_select_bracket(x),
pm_select_pm_context(x)
)
}
pm_select_seq <- function(expr) {
x <- pm_eval_expr(expr[[2]])
y <- pm_eval_expr(expr[[3]])
x:y
}
pm_select_negate <- function(expr) {
x <- if (pm_is_negated_colon(expr)) {
expr <- call(":", expr[[2]][[2]], expr[[2]][[3]][[2]])
pm_eval_expr(expr)
} else {
pm_eval_expr(expr[[2]])
}
x * -1L
}
pm_is_negated_colon <- function(expr) {
expr[[1]] == "!" && length(expr[[2]]) > 1L && expr[[2]][[1]] == ":" && expr[[2]][[3]][[1]] == "!"
}
pm_select_minus <- function(expr) {
x <- pm_eval_expr(expr[[2]])
x * -1L
}
pm_select_c <- function(expr) {
lst_expr <- as.list(expr)
lst_expr[[1]] <- NULL
unlist(lapply(lst_expr, pm_eval_expr))
}
pm_select_bracket <- function(expr) {
pm_eval_expr(expr[[2]])
}
pm_select_pm_context <- function(expr) {
eval(expr, envir = pm_select_env$.data)
}
pm_select_env <- new.env()
pm_select_env$setup <- function(.data, calling_frame) {
pm_select_env$.data <- .data
pm_select_env$calling_frame <- calling_frame
}
pm_select_env$clean <- function() {
rm(list = c(".data", "calling_frame"), envir = pm_select_env)
}
pm_select_env$get_colnames <- function() colnames(pm_select_env$.data)
pm_select_env$get_nrow <- function() nrow(pm_select_env$.data)
pm_select_env$get_ncol <- function() ncol(pm_select_env$.data)
pm_select <- function(.data, ...) {
col_pos <- pm_select_positions(.data, ..., .group_pos = TRUE)
map_names <- names(col_pos)
map_names_length <- nchar(map_names)
if (any(map_names_length == 0L)) {
no_new_names <- which(map_names_length == 0L)
map_names[no_new_names] <- colnames(.data)[no_new_names]
}
res <- .data[, col_pos, drop = FALSE]
if (!is.null(map_names) && all(col_pos > 0L)) colnames(res) <- map_names
if (pm_has_groups(.data)) res <- pm_set_groups(res, pm_get_groups(.data))
res
}
pm_summarise <- function(.data, ...) {
pm_check_is_dataframe(.data)
if ("grouped_data" %in% class(.data)) {
pm_summarise.grouped_data(.data, ...)
} else {
pm_summarise.default(.data, ...)
}
}
pm_summarise.default <- function(.data, ...) {
fns <- pm_dotdotdot(...)
pm_context$setup(.data)
on.exit(pm_context$clean(), add = TRUE)
pm_groups_exist <- pm_has_groups(pm_context$.data)
if (pm_groups_exist) {
group <- unique(pm_context$.data[, pm_get_groups(pm_context$.data), drop = FALSE])
}
res <- lapply(
fns,
function(x) {
x_res <- do.call(with, list(pm_context$.data, x))
if (is.list(x_res)) I(x_res) else x_res
}
)
res <- as.data.frame(res, stringsAsFactors = FALSE)
fn_names <- names(fns)
colnames(res) <- if (is.null(fn_names)) fns else fn_names
if (pm_groups_exist) res <- cbind(group, res, row.names = NULL)
res
}
pm_summarise.grouped_data <- function(.data, ...) {
pm_groups <- pm_get_groups(.data)
res <- pm_apply_grouped_function("pm_summarise", .data, drop = TRUE, ...)
res <- res[do.call(order, lapply(pm_groups, function(x) res[, x])), ]
rownames(res) <- NULL
res
}
pm_transmute <- function(.data, ...) {
pm_check_is_dataframe(.data)
if ("grouped_data" %in% class(.data)) {
pm_transmute.grouped_data(.data, ...)
} else {
pm_transmute.default(.data, ...)
}
}
pm_transmute.default <- function(.data, ...) {
conditions <- pm_deparse_dots(...)
mutated <- pm_mutate(.data, ...)
mutated[, names(conditions), drop = FALSE]
}
pm_transmute.grouped_data <- function(.data, ...) {
rows <- rownames(.data)
res <- pm_apply_grouped_function("pm_transmute", .data, drop = TRUE, ...)
res[rows, ]
}
pm_dotdotdot <- function(..., .impute_names = FALSE) {
dots <- eval(substitute(alist(...)))
if (isTRUE(.impute_names)) {
pm_deparse_dots <- lapply(dots, deparse)
names_dots <- names(dots)
unnamed <- if (is.null(names_dots)) rep(TRUE, length(dots)) else nchar(names_dots) == 0L
names(dots)[unnamed] <- pm_deparse_dots[unnamed]
}
dots
}
pm_deparse_dots <- function(...) {
vapply(substitute(...()), deparse, NA_character_)
}
pm_deparse_var <- function(var, frame = if (is.null(pm_eval_env$env)) parent.frame() else pm_eval_env$env) {
sub_var <- eval(substitute(substitute(var)), frame)
if (is.symbol(sub_var)) var <- as.character(sub_var)
var
}
pm_check_is_dataframe <- function(.data) {
parent_fn <- all.names(sys.call(-1L), max.names = 1L)
if (!is.data.frame(.data)) stop(parent_fn, " must be given a data.frame")
invisible()
}
pm_is_wholenumber <- function(x) {
x %% 1L == 0L
}
pm_seq2 <- function(from, to) {
if (length(from) != 1) stop("`from` must be length one")
if (length(to) != 1) stop("`to` must be length one")
if (from > to) integer() else seq.int(from, to)
}
pm_is_function <- function(x, frame) {
res <- tryCatch(
is.function(x),
warning = function(w) FALSE,
error = function(e) FALSE
)
if (isTRUE(res)) {
return(res)
}
res <- tryCatch(
is.function(eval(x)),
warning = function(w) FALSE,
error = function(e) FALSE
)
if (isTRUE(res)) {
return(res)
}
res <- tryCatch(
is.function(eval(as.symbol(deparse(substitute(x))))),
warning = function(w) FALSE,
error = function(e) FALSE
)
if (isTRUE(res)) {
return(res)
}
FALSE
}
pm_collapse_to_sentence <- function(x) {
len_x <- length(x)
if (len_x == 0L) {
stop("Length of `x` is 0")
} else if (len_x == 1L) {
as.character(x)
} else if (len_x == 2L) {
paste(x, collapse = " and ")
} else {
paste(paste(x[1:(len_x - 1)], collapse = ", "), x[len_x], sep = " and ")
}
}
pm_where <- function(fn) {
if (!pm_is_function(fn)) {
stop(pm_deparse_var(fn), " is not a valid predicate function.")
}
preds <- unlist(lapply(
pm_select_env$.data,
function(x, fn) {
do.call("fn", list(x))
},
fn
))
if (!is.logical(preds)) stop("`pm_where()` must be used with functions that return `TRUE` or `FALSE`.")
data_cols <- pm_select_env$get_colnames()
cols <- data_cols[preds]
which(data_cols %in% cols)
}
pm_cume_dist <- function(x) {
rank(x, ties.method = "max", na.last = "keep") / sum(!is.na(x))
}
pm_dense_rank <- function(x) {
match(x, sort(unique(x)))
}
pm_min_rank <- function(x) {
rank(x, ties.method = "min", na.last = "keep")
}
pm_ntile <- function(x = pm_row_number(), pm_n) {
if (!missing(x)) x <- pm_row_number(x)
len <- length(x) - sum(is.na(x))
pm_n <- as.integer(floor(pm_n))
if (len == 0L) {
rep(NA_integer_, length(x))
} else {
pm_n_larger <- as.integer(len %% pm_n)
pm_n_smaller <- as.integer(pm_n - pm_n_larger)
size <- len / pm_n
larger_size <- as.integer(ceiling(size))
smaller_size <- as.integer(floor(size))
larger_threshold <- larger_size * pm_n_larger
bins <- pm_if_else(
x <= larger_threshold,
(x + (larger_size - 1L)) / larger_size,
(x + (-larger_threshold + smaller_size - 1L)) / smaller_size + pm_n_larger
)
as.integer(floor(bins))
}
}
pm_percent_rank <- function(x) {
(pm_min_rank(x) - 1) / (sum(!is.na(x)) - 1)
}
pm_row_number <- function(x) {
if (missing(x)) seq_len(pm_n()) else rank(x, ties.method = "first", na.last = "keep")
}
| /scratch/gouwar.j/cran-all/cranData/AMR/R/aa_helper_pm_functions.R |
# ==================================================================== #
# TITLE: #
# AMR: An R Package for Working with Antimicrobial Resistance Data #
# #
# SOURCE CODE: #
# https://github.com/msberends/AMR #
# #
# PLEASE CITE THIS SOFTWARE AS: #
# Berends MS, Luz CF, Friedrich AW, Sinha BNM, Albers CJ, Glasner C #
# (2022). AMR: An R Package for Working with Antimicrobial Resistance #
# Data. Journal of Statistical Software, 104(3), 1-31. #
# https://doi.org/10.18637/jss.v104.i03 #
# #
# Developed at the University of Groningen and the University Medical #
# Center Groningen in The Netherlands, in collaboration with many #
# colleagues from around the world, see our website. #
# #
# This R package is free software; you can freely use and distribute #
# it for both personal and commercial purposes under the terms of the #
# GNU General Public License version 2.0 (GNU GPL-2), as published by #
# the Free Software Foundation. #
# We created this package for both routine data analysis and academic #
# research and it was publicly released in the hope that it will be #
# useful, but it comes WITHOUT ANY WARRANTY OR LIABILITY. #
# #
# Visit our website for the full manual and a complete tutorial about #
# how to conduct AMR data analysis: https://msberends.github.io/AMR/ #
# ==================================================================== #
#' Options for the AMR package
#'
#' This is an overview of all the package-specific [options()] you can set in the `AMR` package.
#' @section Options:
#' * `AMR_custom_ab` \cr Allows to use custom antimicrobial drugs with this package. This is explained in [add_custom_antimicrobials()].
#' * `AMR_custom_mo` \cr Allows to use custom microorganisms with this package. This is explained in [add_custom_microorganisms()].
#' * `AMR_eucastrules` \cr Used for setting the default types of rules for [eucast_rules()] function, must be one or more of: `"breakpoints"`, `"expert"`, `"other"`, `"custom"`, `"all"`, and defaults to `c("breakpoints", "expert")`.
#' * `AMR_guideline` \cr Used for setting the default guideline for interpreting MIC values and disk diffusion diameters with [as.sir()]. Can be only the guideline name (e.g., `"CLSI"`) or the name with a year (e.g. `"CLSI 2019"`). The default to the latest implemented EUCAST guideline, currently \code{"`r clinical_breakpoints$guideline[1]`"}. Supported guideline are currently EUCAST (`r min(as.integer(gsub("[^0-9]", "", subset(clinical_breakpoints, guideline %like% "EUCAST")$guideline)))`-`r max(as.integer(gsub("[^0-9]", "", subset(clinical_breakpoints, guideline %like% "EUCAST")$guideline)))`) and CLSI (`r min(as.integer(gsub("[^0-9]", "", subset(clinical_breakpoints, guideline %like% "CLSI")$guideline)))`-`r max(as.integer(gsub("[^0-9]", "", subset(clinical_breakpoints, guideline %like% "CLSI")$guideline)))`).
#' * `AMR_ignore_pattern` \cr A [regular expression][base::regex] to ignore (i.e., make `NA`) any match given in [as.mo()] and all [`mo_*`][mo_property()] functions.
#' * `AMR_include_PKPD` \cr A [logical] to use in [as.sir()], to indicate that PK/PD clinical breakpoints must be applied as a last resort - the default is `TRUE`.
#' * `AMR_ecoff` \cr A [logical] use in [as.sir()], to indicate that ECOFF (Epidemiological Cut-Off) values must be used - the default is `FALSE`.
#' * `AMR_include_screening` \cr A [logical] to use in [as.sir()], to indicate that clinical breakpoints for screening are allowed - the default is `FALSE`.
#' * `AMR_keep_synonyms` \cr A [logical] to use in [as.mo()] and all [`mo_*`][mo_property()] functions, to indicate if old, previously valid taxonomic names must be preserved and not be corrected to currently accepted names. The default is `FALSE`.
#' * `AMR_cleaning_regex` \cr A [regular expression][base::regex] (case-insensitive) to use in [as.mo()] and all [`mo_*`][mo_property()] functions, to clean the user input. The default is the outcome of [mo_cleaning_regex()], which removes texts between brackets and texts such as "species" and "serovar".
#' * `AMR_locale` \cr A language to use for the `AMR` package, can be one of these supported language names or ISO-639-1 codes: `r vector_or(paste0(sapply(LANGUAGES_SUPPORTED_NAMES, function(x) x[[1]]), " (" , LANGUAGES_SUPPORTED, ")"), quotes = FALSE, sort = FALSE)`. The default is the current system language (if supported).
#' * `AMR_mo_source` \cr A file location for a manual code list to be used in [as.mo()] and all [`mo_*`][mo_property()] functions. This is explained in [set_mo_source()].
#'
#' @section Saving Settings Between Sessions:
#' Settings in \R are not saved globally and are thus lost when \R is exited. You can save your options to your own `.Rprofile` file, which is a user-specific file. You can edit it using:
#'
#' ```r
#' utils::file.edit("~/.Rprofile")
#' ```
#'
#' In this file, you can set options such as:
#'
#' ```r
#' options(AMR_locale = "pt")
#' options(AMR_include_PKPD = TRUE)
#' ```
#'
#' to add Portuguese language support of antibiotics, and allow PK/PD rules when interpreting MIC values with [as.sir()].
#'
#' ### Share Options Within Team
#'
#' For a more global approach, e.g. within a data team, save an options file to a remote file location, such as a shared network drive. This would work in this way:
#'
#' 1. Save a plain text file to e.g. "X:/team_folder/R_options.R" and fill it with preferred settings.
#'
#' 2. For each user, open the `.Rprofile` file using `utils::file.edit("~/.Rprofile")` and put in there:
#'
#' ```r
#' source("X:/team_folder/R_options.R")
#' ```
#'
#' 3. Reload R/RStudio and check the settings with [getOption()], e.g. `getOption("AMR_locale")` if you have set that value.
#'
#' Now the team settings are configured in only one place, and can be maintained there.
#' @keywords internal
#' @name AMR-options
NULL
| /scratch/gouwar.j/cran-all/cranData/AMR/R/aa_options.R |
# ==================================================================== #
# TITLE: #
# AMR: An R Package for Working with Antimicrobial Resistance Data #
# #
# SOURCE CODE: #
# https://github.com/msberends/AMR #
# #
# PLEASE CITE THIS SOFTWARE AS: #
# Berends MS, Luz CF, Friedrich AW, Sinha BNM, Albers CJ, Glasner C #
# (2022). AMR: An R Package for Working with Antimicrobial Resistance #
# Data. Journal of Statistical Software, 104(3), 1-31. #
# https://doi.org/10.18637/jss.v104.i03 #
# #
# Developed at the University of Groningen and the University Medical #
# Center Groningen in The Netherlands, in collaboration with many #
# colleagues from around the world, see our website. #
# #
# This R package is free software; you can freely use and distribute #
# it for both personal and commercial purposes under the terms of the #
# GNU General Public License version 2.0 (GNU GPL-2), as published by #
# the Free Software Foundation. #
# We created this package for both routine data analysis and academic #
# research and it was publicly released in the hope that it will be #
# useful, but it comes WITHOUT ANY WARRANTY OR LIABILITY. #
# #
# Visit our website for the full manual and a complete tutorial about #
# how to conduct AMR data analysis: https://msberends.github.io/AMR/ #
# ==================================================================== #
#' Transform Input to an Antibiotic ID
#'
#' Use this function to determine the antibiotic drug code of one or more antibiotics. The data set [antibiotics] will be searched for abbreviations, official names and synonyms (brand names).
#' @param x a [character] vector to determine to antibiotic ID
#' @param flag_multiple_results a [logical] to indicate whether a note should be printed to the console that probably more than one antibiotic drug code or name can be retrieved from a single input value.
#' @param info a [logical] to indicate whether a progress bar should be printed - the default is `TRUE` only in interactive mode
#' @param ... arguments passed on to internal functions
#' @rdname as.ab
#' @inheritSection WHOCC WHOCC
#' @details All entries in the [antibiotics] data set have three different identifiers: a human readable EARS-Net code (column `ab`, used by ECDC and WHONET), an ATC code (column `atc`, used by WHO), and a CID code (column `cid`, Compound ID, used by PubChem). The data set contains more than 5,000 official brand names from many different countries, as found in PubChem. Not that some drugs contain multiple ATC codes.
#'
#' All these properties will be searched for the user input. The [as.ab()] can correct for different forms of misspelling:
#'
#' * Wrong spelling of drug names (such as "tobramicin" or "gentamycin"), which corrects for most audible similarities such as f/ph, x/ks, c/z/s, t/th, etc.
#' * Too few or too many vowels or consonants
#' * Switching two characters (such as "mreopenem", often the case in clinical data, when doctors typed too fast)
#' * Digitalised paper records, leaving artefacts like 0/o/O (zero and O's), B/8, n/r, etc.
#'
#' Use the [`ab_*`][ab_property()] functions to get properties based on the returned antibiotic ID, see *Examples*.
#'
#' Note: the [as.ab()] and [`ab_*`][ab_property()] functions may use very long regular expression to match brand names of antimicrobial drugs. This may fail on some systems.
#'
#' You can add your own manual codes to be considered by [as.ab()] and all [`ab_*`][ab_property()] functions, see [add_custom_antimicrobials()].
#' @section Source:
#' World Health Organization (WHO) Collaborating Centre for Drug Statistics Methodology: \url{https://www.whocc.no/atc_ddd_index/}
#'
#' European Commission Public Health PHARMACEUTICALS - COMMUNITY REGISTER: \url{https://ec.europa.eu/health/documents/community-register/html/reg_hum_atc.htm}
#' @aliases ab
#' @return A [character] [vector] with additional class [`ab`]
#' @seealso
#' * [antibiotics] for the [data.frame] that is being used to determine ATCs
#' * [ab_from_text()] for a function to retrieve antimicrobial drugs from clinical text (from health care records)
#' @inheritSection AMR Reference Data Publicly Available
#' @export
#' @examples
#' # these examples all return "ERY", the ID of erythromycin:
#' as.ab("J01FA01")
#' as.ab("J 01 FA 01")
#' as.ab("Erythromycin")
#' as.ab("eryt")
#' as.ab(" eryt 123")
#' as.ab("ERYT")
#' as.ab("ERY")
#' as.ab("eritromicine") # spelled wrong, yet works
#' as.ab("Erythrocin") # trade name
#' as.ab("Romycin") # trade name
#'
#' # spelling from different languages and dyslexia are no problem
#' ab_atc("ceftriaxon")
#' ab_atc("cephtriaxone") # small spelling error
#' ab_atc("cephthriaxone") # or a bit more severe
#' ab_atc("seephthriaaksone") # and even this works
#'
#' # use ab_* functions to get a specific properties (see ?ab_property);
#' # they use as.ab() internally:
#' ab_name("J01FA01")
#' ab_name("eryt")
#'
#' \donttest{
#' if (require("dplyr")) {
#' # you can quickly rename 'sir' columns using set_ab_names() with dplyr:
#' example_isolates %>%
#' set_ab_names(where(is.sir), property = "atc")
#' }
#' }
as.ab <- function(x, flag_multiple_results = TRUE, info = interactive(), ...) {
meet_criteria(x, allow_class = c("character", "numeric", "integer", "factor"), allow_NA = TRUE)
meet_criteria(flag_multiple_results, allow_class = "logical", has_length = 1)
meet_criteria(info, allow_class = "logical", has_length = 1)
if (is.ab(x)) {
return(x)
}
if (all(x %in% c(AMR_env$AB_lookup$ab, NA))) {
# all valid AB codes, but not yet right class
return(set_clean_class(x,
new_class = c("ab", "character")
))
}
initial_search <- is.null(list(...)$initial_search)
already_regex <- isTRUE(list(...)$already_regex)
fast_mode <- isTRUE(list(...)$fast_mode)
x_bak <- x
x <- toupper(x)
# remove diacritics
x <- iconv(x, from = "UTF-8", to = "ASCII//TRANSLIT")
x <- gsub('"', "", x, fixed = TRUE)
x <- gsub("(specimen|specimen date|specimen_date|spec_date|gender|^dates?$)", "", x, ignore.case = TRUE, perl = TRUE)
# penicillin is a special case: we call it so, but then mean benzylpenicillin
x[x %like_case% "^PENICILLIN" & x %unlike_case% "[ /+-]"] <- "benzylpenicillin"
x_bak_clean <- x
if (already_regex == FALSE) {
x_bak_clean <- generalise_antibiotic_name(x_bak_clean)
}
x <- unique(x_bak_clean) # this means that every x is in fact generalise_antibiotic_name(x)
x_new <- rep(NA_character_, length(x))
x_unknown <- character(0)
x_unknown_ATCs <- character(0)
note_if_more_than_one_found <- function(found, index, from_text) {
if (isTRUE(initial_search) && isTRUE(length(from_text) > 1)) {
abnames <- ab_name(from_text, tolower = TRUE, initial_search = FALSE)
if (ab_name(found[1L], language = NULL) %like% "(clavulanic acid|(avi|tazo|mono|vabor)bactam)") {
abnames <- abnames[!abnames %in% c("clavulanic acid", "avibactam", "tazobactam", "vaborbactam", "monobactam")]
}
if (length(abnames) > 1) {
message_(
"More than one result was found for item ", index, ": ",
vector_and(abnames, quotes = FALSE)
)
}
}
found[1L]
}
# Fill in names, AB codes, CID codes and ATC codes directly (`x` is already clean and uppercase)
known_names <- x %in% AMR_env$AB_lookup$generalised_name
x_new[known_names] <- AMR_env$AB_lookup$ab[match(x[known_names], AMR_env$AB_lookup$generalised_name)]
known_codes_ab <- x %in% AMR_env$AB_lookup$ab
known_codes_atc <- vapply(FUN.VALUE = logical(1), x, function(x_) x_ %in% unlist(AMR_env$AB_lookup$atc), USE.NAMES = FALSE)
known_codes_cid <- x %in% AMR_env$AB_lookup$cid
x_new[known_codes_ab] <- AMR_env$AB_lookup$ab[match(x[known_codes_ab], AMR_env$AB_lookup$ab)]
x_new[known_codes_atc] <- AMR_env$AB_lookup$ab[vapply(
FUN.VALUE = integer(1),
x[known_codes_atc],
function(x_) {
which(vapply(
FUN.VALUE = logical(1),
AMR_env$AB_lookup$atc,
function(atc) x_ %in% atc
))[1L]
},
USE.NAMES = FALSE
)]
x_new[known_codes_cid] <- AMR_env$AB_lookup$ab[match(x[known_codes_cid], AMR_env$AB_lookup$cid)]
previously_coerced <- x %in% AMR_env$ab_previously_coerced$x
x_new[previously_coerced & is.na(x_new)] <- AMR_env$ab_previously_coerced$ab[match(x[is.na(x_new) & x %in% AMR_env$ab_previously_coerced$x], AMR_env$ab_previously_coerced$x)]
already_known <- known_names | known_codes_ab | known_codes_atc | known_codes_cid | previously_coerced
# fix for NAs
x_new[is.na(x)] <- NA
already_known[is.na(x)] <- FALSE
if (isTRUE(initial_search) && sum(already_known) < length(x)) {
progress <- progress_ticker(n = sum(!already_known), n_min = 25, print = info) # start if n >= 25
on.exit(close(progress))
}
for (i in which(!already_known)) {
if (isTRUE(initial_search)) {
progress$tick()
}
if (is.na(x[i]) || is.null(x[i])) {
next
}
if (identical(x[i], "") ||
# prevent "bacteria" from coercing to TMP, since Bacterial is a brand name of it:
identical(tolower(x[i]), "bacteria")) {
x_unknown <- c(x_unknown, x_bak[x[i] == x_bak_clean][1])
next
}
if (x[i] %like_case% "[A-Z][0-9][0-9][A-Z][A-Z][0-9][0-9]") {
# seems an ATC code, but the available ones are in `already_known`, so:
x_unknown <- c(x_unknown, x[i])
x_unknown_ATCs <- c(x_unknown_ATCs, x[i])
x_new[i] <- NA_character_
next
}
if (fast_mode == FALSE && flag_multiple_results == TRUE && x[i] %like% "[ ]") {
from_text <- tryCatch(suppressWarnings(ab_from_text(x[i], initial_search = FALSE, translate_ab = FALSE)[[1]]),
error = function(e) character(0)
)
} else {
from_text <- character(0)
}
# old code for phenoxymethylpenicillin (Peni V)
if (x[i] == "PNV") {
x_new[i] <- "PHN"
next
}
# exact LOINC code
loinc_found <- unlist(lapply(
AMR_env$AB_lookup$generalised_loinc,
function(s) x[i] %in% s
))
found <- AMR_env$AB_lookup$ab[loinc_found == TRUE]
if (length(found) > 0) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# exact synonym
synonym_found <- unlist(lapply(
AMR_env$AB_lookup$generalised_synonyms,
function(s) x[i] %in% s
))
found <- AMR_env$AB_lookup$ab[synonym_found == TRUE]
if (length(found) > 0) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# exact abbreviation
abbr_found <- unlist(lapply(
AMR_env$AB_lookup$generalised_abbreviations,
# require at least 2 characters for abbreviations
function(s) x[i] %in% s && nchar(x[i]) >= 2
))
found <- AMR_env$AB_lookup$ab[abbr_found == TRUE]
if (length(found) > 0) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# length of input is quite long, and Levenshtein distance is only max 2
if (nchar(x[i]) >= 10) {
levenshtein <- as.double(utils::adist(x[i], AMR_env$AB_lookup$generalised_name))
if (any(levenshtein <= 2)) {
found <- AMR_env$AB_lookup$ab[which(levenshtein <= 2)]
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
}
# allow characters that resemble others, but only continue when having more than 3 characters
if (nchar(x[i]) <= 3) {
x_unknown <- c(x_unknown, x_bak[x[i] == x_bak_clean][1])
next
}
x_spelling <- x[i]
if (already_regex == FALSE) {
x_spelling <- gsub("[IY]+", "[IY]+", x_spelling, perl = TRUE)
x_spelling <- gsub("(C|K|Q|QU|S|Z|X|KS)+", "(C|K|Q|QU|S|Z|X|KS)+", x_spelling, perl = TRUE)
x_spelling <- gsub("(PH|F|V)+", "(PH|F|V)+", x_spelling, perl = TRUE)
x_spelling <- gsub("(TH|T)+", "(TH|T)+", x_spelling, perl = TRUE)
x_spelling <- gsub("A+", "A+", x_spelling, perl = TRUE)
x_spelling <- gsub("E+", "E+", x_spelling, perl = TRUE)
x_spelling <- gsub("O+", "O+", x_spelling, perl = TRUE)
# allow any ending of -in/-ine and -im/-ime
x_spelling <- gsub("(\\[IY\\]\\+(N|M)|\\[IY\\]\\+(N|M)E\\+?)$", "[IY]+(N|M)E*", x_spelling, perl = TRUE)
# allow any ending of -ol/-ole
x_spelling <- gsub("(O\\+L|O\\+LE\\+)$", "O+LE*", x_spelling, perl = TRUE)
# allow any ending of -on/-one
x_spelling <- gsub("(O\\+N|O\\+NE\\+)$", "O+NE*", x_spelling, perl = TRUE)
# replace multiple same characters to single one with '+', like "ll" -> "l+"
x_spelling <- gsub("(.)\\1+", "\\1+", x_spelling, perl = TRUE)
# replace spaces and slashes with a possibility on both
x_spelling <- gsub("[ /]", "( .*|.*/)", x_spelling, perl = TRUE)
# correct for digital reading text (OCR)
x_spelling <- gsub("[NRD8B]", "[NRD8B]", x_spelling, perl = TRUE)
x_spelling <- gsub("(O|0)", "(O|0)+", x_spelling, perl = TRUE)
x_spelling <- gsub("++", "+", x_spelling, fixed = TRUE)
}
# try if name starts with it
found <- AMR_env$AB_lookup[which(AMR_env$AB_lookup$generalised_name %like% paste0("^", x_spelling)), "ab", drop = TRUE]
if (length(found) > 0) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# try if name ends with it
found <- AMR_env$AB_lookup[which(AMR_env$AB_lookup$generalised_name %like% paste0(x_spelling, "$")), "ab", drop = TRUE]
if (nchar(x[i]) >= 4 && length(found) > 0) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# and try if any synonym starts with it
synonym_found <- unlist(lapply(
AMR_env$AB_lookup$generalised_synonyms,
function(s) any(s %like% paste0("^", x_spelling))
))
found <- AMR_env$AB_lookup$ab[synonym_found == TRUE]
if (length(found) > 0) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# INITIAL SEARCH - More uncertain results ----
if (isTRUE(initial_search) && fast_mode == FALSE) {
# only run on first try
# try by removing all spaces
if (x[i] %like% " ") {
found <- suppressWarnings(as.ab(gsub(" +", "", x[i], perl = TRUE), initial_search = FALSE))
if (length(found) > 0 && !is.na(found)) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
}
# try by removing all spaces and numbers
if (x[i] %like% " " || x[i] %like% "[0-9]") {
found <- suppressWarnings(as.ab(gsub("[ 0-9]", "", x[i], perl = TRUE), initial_search = FALSE))
if (length(found) > 0 && !is.na(found)) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
}
# transform back from other languages and try again
x_translated <- paste(
lapply(
strsplit(x[i], "[^A-Z0-9]"),
function(y) {
for (i in seq_len(length(y))) {
for (lang in LANGUAGES_SUPPORTED[LANGUAGES_SUPPORTED != "en"]) {
y[i] <- ifelse(tolower(y[i]) %in% tolower(TRANSLATIONS[, lang, drop = TRUE]),
TRANSLATIONS[which(tolower(TRANSLATIONS[, lang, drop = TRUE]) == tolower(y[i]) &
!isFALSE(TRANSLATIONS$fixed)), "pattern"],
y[i]
)
}
}
generalise_antibiotic_name(y)
}
)[[1]],
collapse = "/"
)
x_translated_guess <- suppressWarnings(as.ab(x_translated, initial_search = FALSE))
if (!is.na(x_translated_guess)) {
x_new[i] <- x_translated_guess
next
}
# now also try to coerce brandname combinations like "Amoxy/clavulanic acid"
x_translated <- paste(
lapply(
strsplit(x_translated, "[^A-Z0-9 ]"),
function(y) {
for (i in seq_len(length(y))) {
y_name <- suppressWarnings(ab_name(y[i], language = NULL, initial_search = FALSE))
y[i] <- ifelse(!is.na(y_name),
y_name,
y[i]
)
}
generalise_antibiotic_name(y)
}
)[[1]],
collapse = "/"
)
x_translated_guess <- suppressWarnings(as.ab(x_translated, initial_search = FALSE))
if (!is.na(x_translated_guess)) {
x_new[i] <- x_translated_guess
next
}
# try by removing all trailing capitals
if (x[i] %like_case% "[a-z]+[A-Z]+$") {
found <- suppressWarnings(as.ab(gsub("[A-Z]+$", "", x[i], perl = TRUE), initial_search = FALSE))
if (!is.na(found)) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
}
# keep only letters
found <- suppressWarnings(as.ab(gsub("[^A-Z]", "", x[i], perl = TRUE), initial_search = FALSE))
if (!is.na(found)) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# try from a bigger text, like from a health care record, see ?ab_from_text
# already calculated above if flag_multiple_results = TRUE
if (flag_multiple_results == TRUE) {
found <- from_text[1L]
} else {
found <- tryCatch(suppressWarnings(ab_from_text(x[i], initial_search = FALSE, translate_ab = FALSE)[[1]][1L]),
error = function(e) NA_character_
)
}
if (!is.na(found)) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# first 5 except for cephalosporins, then first 7 (those cephalosporins all start quite the same!)
found <- suppressWarnings(as.ab(substr(x[i], 1, 5), initial_search = FALSE))
if (!is.na(found) && ab_group(found, initial_search = FALSE) %unlike% "cephalosporins") {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
found <- suppressWarnings(as.ab(substr(x[i], 1, 7), initial_search = FALSE))
if (!is.na(found)) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# make all consonants facultative
search_str <- gsub("([BCDFGHJKLMNPQRSTVWXZ])", "\\1*", x[i], perl = TRUE)
found <- suppressWarnings(as.ab(search_str, initial_search = FALSE, already_regex = TRUE))
# keep at least 4 normal characters
if (nchar(gsub(".\\*", "", search_str, perl = TRUE)) < 4) {
found <- NA
}
if (!is.na(found)) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# make all vowels facultative
search_str <- gsub("([AEIOUY])", "\\1*", x[i], perl = TRUE)
found <- suppressWarnings(as.ab(search_str, initial_search = FALSE, already_regex = TRUE))
# keep at least 5 normal characters
if (nchar(gsub(".\\*", "", search_str, perl = TRUE)) < 5) {
found <- NA
}
if (!is.na(found)) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# allow misspelling of vowels
x_spelling <- gsub("A+", "[AEIOU]+", x_spelling, fixed = TRUE)
x_spelling <- gsub("E+", "[AEIOU]+", x_spelling, fixed = TRUE)
x_spelling <- gsub("I+", "[AEIOU]+", x_spelling, fixed = TRUE)
x_spelling <- gsub("O+", "[AEIOU]+", x_spelling, fixed = TRUE)
x_spelling <- gsub("U+", "[AEIOU]+", x_spelling, fixed = TRUE)
found <- suppressWarnings(as.ab(x_spelling, initial_search = FALSE, already_regex = TRUE))
if (!is.na(found)) {
x_new[i] <- note_if_more_than_one_found(found, i, from_text)
next
}
# try with switched character, like "mreopenem"
for (j in seq_len(nchar(x[i]))) {
x_switched <- paste0(
# beginning part:
substr(x[i], 1, j - 1),
# here is the switching of 2 characters:
substr(x[i], j + 1, j + 1),
substr(x[i], j, j),
# ending part:
substr(x[i], j + 2, nchar(x[i]))
)
found <- suppressWarnings(as.ab(x_switched, initial_search = FALSE))
if (!is.na(found)) {
break
}
}
if (!is.na(found)) {
x_new[i] <- found[1L]
next
}
} # end of initial_search = TRUE
# not found
x_unknown <- c(x_unknown, x_bak[x[i] == x_bak_clean][1])
}
if (isTRUE(initial_search) && sum(already_known) < length(x)) {
close(progress)
}
# save to package env to save time for next time
if (isTRUE(initial_search)) {
AMR_env$ab_previously_coerced <- AMR_env$ab_previously_coerced[which(!AMR_env$ab_previously_coerced$x %in% x), , drop = FALSE]
AMR_env$ab_previously_coerced <- unique(rbind_AMR(
AMR_env$ab_previously_coerced,
data.frame(
x = x,
ab = x_new,
x_bak = x_bak[match(x, x_bak_clean)],
stringsAsFactors = FALSE
)
))
}
# take failed ATC codes apart from rest
if (length(x_unknown_ATCs) > 0 && fast_mode == FALSE) {
warning_(
"in `as.ab()`: these ATC codes are not (yet) in the antibiotics data set: ",
vector_and(x_unknown_ATCs), "."
)
}
x_unknown <- x_unknown[!x_unknown %in% x_unknown_ATCs]
x_unknown <- c(
x_unknown,
AMR_env$ab_previously_coerced$x_bak[which(AMR_env$ab_previously_coerced$x %in% x & is.na(AMR_env$ab_previously_coerced$ab))]
)
if (length(x_unknown) > 0 && fast_mode == FALSE) {
warning_(
"in `as.ab()`: these values could not be coerced to a valid antimicrobial ID: ",
vector_and(x_unknown), "."
)
}
x_result <- x_new[match(x_bak_clean, x)]
if (length(x_result) == 0) {
x_result <- NA_character_
}
set_clean_class(x_result,
new_class = c("ab", "character")
)
}
#' @rdname as.ab
#' @export
is.ab <- function(x) {
inherits(x, "ab")
}
# will be exported using s3_register() in R/zzz.R
pillar_shaft.ab <- function(x, ...) {
out <- trimws(format(x))
out[is.na(x)] <- font_na(NA)
# add the names to the drugs as mouse-over!
if (tryCatch(isTRUE(getExportedValue("ansi_has_hyperlink_support", ns = asNamespace("cli"))()), error = function(e) FALSE)) {
out[!is.na(x)] <- font_url(url = paste0(x[!is.na(x)], ": ", ab_name(x[!is.na(x)])),
txt = out[!is.na(x)])
}
create_pillar_column(out, align = "left", min_width = 4)
}
# will be exported using s3_register() in R/zzz.R
type_sum.ab <- function(x, ...) {
"ab"
}
#' @method print ab
#' @export
#' @noRd
print.ab <- function(x, ...) {
cat("Class 'ab'\n")
print(as.character(x), quote = FALSE)
}
#' @method as.data.frame ab
#' @export
#' @noRd
as.data.frame.ab <- function(x, ...) {
nm <- deparse1(substitute(x))
if (!"nm" %in% names(list(...))) {
as.data.frame.vector(as.ab(x), ..., nm = nm)
} else {
as.data.frame.vector(as.ab(x), ...)
}
}
#' @method [ ab
#' @export
#' @noRd
"[.ab" <- function(x, ...) {
y <- NextMethod()
attributes(y) <- attributes(x)
y
}
#' @method [[ ab
#' @export
#' @noRd
"[[.ab" <- function(x, ...) {
y <- NextMethod()
attributes(y) <- attributes(x)
y
}
#' @method [<- ab
#' @export
#' @noRd
"[<-.ab" <- function(i, j, ..., value) {
y <- NextMethod()
attributes(y) <- attributes(i)
return_after_integrity_check(y, "antimicrobial drug code", AMR_env$AB_lookup$ab)
}
#' @method [[<- ab
#' @export
#' @noRd
"[[<-.ab" <- function(i, j, ..., value) {
y <- NextMethod()
attributes(y) <- attributes(i)
return_after_integrity_check(y, "antimicrobial drug code", AMR_env$AB_lookup$ab)
}
#' @method c ab
#' @export
#' @noRd
c.ab <- function(...) {
x <- list(...)[[1L]]
y <- NextMethod()
attributes(y) <- attributes(x)
return_after_integrity_check(y, "antimicrobial drug code", AMR_env$AB_lookup$ab)
}
#' @method unique ab
#' @export
#' @noRd
unique.ab <- function(x, incomparables = FALSE, ...) {
y <- NextMethod()
attributes(y) <- attributes(x)
y
}
#' @method rep ab
#' @export
#' @noRd
rep.ab <- function(x, ...) {
y <- NextMethod()
attributes(y) <- attributes(x)
y
}
generalise_antibiotic_name <- function(x) {
x <- toupper(x)
# remove suffices
x <- gsub("_(MIC|RSI|SIR|DIS[CK])$", "", x, perl = TRUE)
# remove disk concentrations, like LVX_NM -> LVX
x <- gsub("_[A-Z]{2}[0-9_.]{0,3}$", "", x, perl = TRUE)
# remove part between brackets if that's followed by another string
x <- gsub("(.*)+ [(].*[)]", "\\1", x)
# keep only max 1 space
x <- trimws2(gsub(" +", " ", x, perl = TRUE))
# non-character, space or number should be a slash
x <- gsub("[^A-Z0-9 -]", "/", x, perl = TRUE)
# spaces around non-characters must be removed: amox + clav -> amox/clav
x <- gsub("(.*[A-Z0-9]) ([^A-Z0-9].*)", "\\1\\2", x, perl = TRUE)
x <- gsub("(.*[^A-Z0-9]) ([A-Z0-9].*)", "\\1\\2", x, perl = TRUE)
# remove hyphen after a starting "co"
x <- gsub("^CO-", "CO", x, perl = TRUE)
# replace operators with a space
x <- gsub("(/| AND | WITH | W/|[+]|[-])+", " ", x, perl = TRUE)
x
}
get_translate_ab <- function(translate_ab) {
translate_ab <- as.character(translate_ab)[1L]
if (translate_ab %in% c("TRUE", "official")) {
return("name")
} else if (translate_ab %in% c(NA_character_, "FALSE")) {
return(FALSE)
} else {
translate_ab <- tolower(translate_ab)
stop_ifnot(translate_ab %in% colnames(AMR::antibiotics),
"invalid value for 'translate_ab', this must be a column name of the antibiotics data set\n",
"or TRUE (equals 'name') or FALSE to not translate at all.",
call = FALSE
)
translate_ab
}
}
| /scratch/gouwar.j/cran-all/cranData/AMR/R/ab.R |
# ==================================================================== #
# TITLE: #
# AMR: An R Package for Working with Antimicrobial Resistance Data #
# #
# SOURCE CODE: #
# https://github.com/msberends/AMR #
# #
# PLEASE CITE THIS SOFTWARE AS: #
# Berends MS, Luz CF, Friedrich AW, Sinha BNM, Albers CJ, Glasner C #
# (2022). AMR: An R Package for Working with Antimicrobial Resistance #
# Data. Journal of Statistical Software, 104(3), 1-31. #
# https://doi.org/10.18637/jss.v104.i03 #
# #
# Developed at the University of Groningen and the University Medical #
# Center Groningen in The Netherlands, in collaboration with many #
# colleagues from around the world, see our website. #
# #
# This R package is free software; you can freely use and distribute #
# it for both personal and commercial purposes under the terms of the #
# GNU General Public License version 2.0 (GNU GPL-2), as published by #
# the Free Software Foundation. #
# We created this package for both routine data analysis and academic #
# research and it was publicly released in the hope that it will be #
# useful, but it comes WITHOUT ANY WARRANTY OR LIABILITY. #
# #
# Visit our website for the full manual and a complete tutorial about #
# how to conduct AMR data analysis: https://msberends.github.io/AMR/ #
# ==================================================================== #
#' Retrieve Antimicrobial Drug Names and Doses from Clinical Text
#'
#' Use this function on e.g. clinical texts from health care records. It returns a [list] with all antimicrobial drugs, doses and forms of administration found in the texts.
#' @param text text to analyse
#' @param type type of property to search for, either `"drug"`, `"dose"` or `"administration"`, see *Examples*
#' @param collapse a [character] to pass on to `paste(, collapse = ...)` to only return one [character] per element of `text`, see *Examples*
#' @param translate_ab if `type = "drug"`: a column name of the [antibiotics] data set to translate the antibiotic abbreviations to, using [ab_property()]. The default is `FALSE`. Using `TRUE` is equal to using "name".
#' @param thorough_search a [logical] to indicate whether the input must be extensively searched for misspelling and other faulty input values. Setting this to `TRUE` will take considerably more time than when using `FALSE`. At default, it will turn `TRUE` when all input elements contain a maximum of three words.
#' @param info a [logical] to indicate whether a progress bar should be printed - the default is `TRUE` only in interactive mode
#' @param ... arguments passed on to [as.ab()]
#' @details This function is also internally used by [as.ab()], although it then only searches for the first drug name and will throw a note if more drug names could have been returned. Note: the [as.ab()] function may use very long regular expression to match brand names of antimicrobial drugs. This may fail on some systems.
#'
#' ### Argument `type`
#' At default, the function will search for antimicrobial drug names. All text elements will be searched for official names, ATC codes and brand names. As it uses [as.ab()] internally, it will correct for misspelling.
#'
#' With `type = "dose"` (or similar, like "dosing", "doses"), all text elements will be searched for [numeric] values that are higher than 100 and do not resemble years. The output will be [numeric]. It supports any unit (g, mg, IE, etc.) and multiple values in one clinical text, see *Examples*.
#'
#' With `type = "administration"` (or abbreviations, like "admin", "adm"), all text elements will be searched for a form of drug administration. It supports the following forms (including common abbreviations): buccal, implant, inhalation, instillation, intravenous, nasal, oral, parenteral, rectal, sublingual, transdermal and vaginal. Abbreviations for oral (such as 'po', 'per os') will become "oral", all values for intravenous (such as 'iv', 'intraven') will become "iv". It supports multiple values in one clinical text, see *Examples*.
#'
#' ### Argument `collapse`
#' Without using `collapse`, this function will return a [list]. This can be convenient to use e.g. inside a `mutate()`):\cr
#' `df %>% mutate(abx = ab_from_text(clinical_text))`
#'
#' The returned AB codes can be transformed to official names, groups, etc. with all [`ab_*`][ab_property()] functions such as [ab_name()] and [ab_group()], or by using the `translate_ab` argument.
#'
#' With using `collapse`, this function will return a [character]:\cr
#' `df %>% mutate(abx = ab_from_text(clinical_text, collapse = "|"))`
#' @export
#' @return A [list], or a [character] if `collapse` is not `NULL`
#' @examples
#' # mind the bad spelling of amoxicillin in this line,
#' # straight from a true health care record:
#' ab_from_text("28/03/2020 regular amoxicilliin 500mg po tid")
#'
#' ab_from_text("500 mg amoxi po and 400mg cipro iv")
#' ab_from_text("500 mg amoxi po and 400mg cipro iv", type = "dose")
#' ab_from_text("500 mg amoxi po and 400mg cipro iv", type = "admin")
#'
#' ab_from_text("500 mg amoxi po and 400mg cipro iv", collapse = ", ")
#' \donttest{
#' # if you want to know which antibiotic groups were administered, do e.g.:
#' abx <- ab_from_text("500 mg amoxi po and 400mg cipro iv")
#' ab_group(abx[[1]])
#'
#' if (require("dplyr")) {
#' tibble(clinical_text = c(
#' "given 400mg cipro and 500 mg amox",
#' "started on doxy iv today"
#' )) %>%
#' mutate(
#' abx_codes = ab_from_text(clinical_text),
#' abx_doses = ab_from_text(clinical_text, type = "doses"),
#' abx_admin = ab_from_text(clinical_text, type = "admin"),
#' abx_coll = ab_from_text(clinical_text, collapse = "|"),
#' abx_coll_names = ab_from_text(clinical_text,
#' collapse = "|",
#' translate_ab = "name"
#' ),
#' abx_coll_doses = ab_from_text(clinical_text,
#' type = "doses",
#' collapse = "|"
#' ),
#' abx_coll_admin = ab_from_text(clinical_text,
#' type = "admin",
#' collapse = "|"
#' )
#' )
#' }
#' }
ab_from_text <- function(text,
type = c("drug", "dose", "administration"),
collapse = NULL,
translate_ab = FALSE,
thorough_search = NULL,
info = interactive(),
...) {
if (missing(type)) {
type <- type[1L]
}
meet_criteria(text)
meet_criteria(type, allow_class = "character", has_length = 1)
meet_criteria(collapse, has_length = 1, allow_NULL = TRUE)
meet_criteria(translate_ab, allow_NULL = FALSE) # get_translate_ab() will be more informative about what's allowed
meet_criteria(thorough_search, allow_class = "logical", has_length = 1, allow_NULL = TRUE)
meet_criteria(info, allow_class = "logical", has_length = 1)
type <- tolower(trimws2(type))
text <- tolower(as.character(text))
text_split_all <- strsplit(text, "[ ;.,:\\|]")
progress <- progress_ticker(n = length(text_split_all), n_min = 5, print = info)
on.exit(close(progress))
if (type %like% "(drug|ab|anti)") {
translate_ab <- get_translate_ab(translate_ab)
if (isTRUE(thorough_search) ||
(isTRUE(is.null(thorough_search)) && max(vapply(FUN.VALUE = double(1), text_split_all, length), na.rm = TRUE) <= 3)) {
text_split_all <- text_split_all[nchar(text_split_all) >= 4 & grepl("[a-z]+", text_split_all)]
result <- lapply(text_split_all, function(text_split) {
progress$tick()
suppressWarnings(
as.ab(text_split, ...)
)
})
} else {
# no thorough search
abbr <- unlist(AMR::antibiotics$abbreviations)
abbr <- abbr[nchar(abbr) >= 4]
names_atc <- substr(c(AMR::antibiotics$name, AMR::antibiotics$atc), 1, 5)
synonyms <- unlist(AMR::antibiotics$synonyms)
synonyms <- synonyms[nchar(synonyms) >= 4]
# regular expression must not be too long, so split synonyms in two:
synonyms_part1 <- synonyms[seq_len(0.5 * length(synonyms))]
synonyms_part2 <- synonyms[!synonyms %in% synonyms_part1]
to_regex <- function(x) {
paste0(
"^(",
paste0(unique(gsub("[^a-z0-9]+", "", sort(tolower(x)))), collapse = "|"),
").*"
)
}
result <- lapply(text_split_all, function(text_split) {
progress$tick()
suppressWarnings(
as.ab(
unique(c(
text_split[text_split %like_case% to_regex(abbr)],
text_split[text_split %like_case% to_regex(names_atc)],
text_split[text_split %like_case% to_regex(synonyms_part1)],
text_split[text_split %like_case% to_regex(synonyms_part2)]
)),
...
)
)
})
}
close(progress)
result <- lapply(result, function(out) {
out <- out[!is.na(out)]
if (length(out) == 0) {
as.ab(NA)
} else {
if (!isFALSE(translate_ab)) {
out <- ab_property(out, property = translate_ab, initial_search = FALSE)
}
out
}
})
} else if (type %like% "dos") {
text_split_all <- strsplit(text, " ", fixed = TRUE)
result <- lapply(text_split_all, function(text_split) {
text_split <- text_split[text_split %like% "^[0-9]{2,}(/[0-9]+)?[a-z]*$"]
# only left part of "/", like 500 in "500/125"
text_split <- gsub("/.*", "", text_split)
text_split <- gsub(",", ".", text_split, fixed = TRUE) # foreign system using comma as decimal sep
text_split <- as.double(gsub("[^0-9.]", "", text_split))
# minimal 100 units/mg and no years that unlikely doses
text_split <- text_split[text_split >= 100 & !text_split %in% c(1951:1999, 2001:2049)]
if (length(text_split) > 0) {
text_split
} else {
NA_real_
}
})
} else if (type %like% "adm") {
result <- lapply(text_split_all, function(text_split) {
text_split <- text_split[text_split %like% "(^iv$|intraven|^po$|per os|oral|implant|inhal|instill|nasal|paren|rectal|sublingual|buccal|trans.*dermal|vaginal)"]
if (length(text_split) > 0) {
text_split <- gsub("(^po$|.*per os.*)", "oral", text_split)
text_split <- gsub("(^iv$|.*intraven.*)", "iv", text_split)
text_split
} else {
NA_character_
}
})
} else {
stop_("`type` must be either 'drug', 'dose' or 'administration'")
}
# collapse text if needed
if (!is.null(collapse)) {
result <- vapply(FUN.VALUE = character(1), result, function(x) {
if (length(x) == 1 & all(is.na(x))) {
NA_character_
} else {
paste0(x, collapse = collapse)
}
})
}
result
}
| /scratch/gouwar.j/cran-all/cranData/AMR/R/ab_from_text.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.