content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' evenInsert #' A function to insert m elements evenly into a length n vector. #' #' @param m A numeric vector of length less than or equal to n. The elements to be inserted. #' @param n A numeric vector. The vector into which the elements of m will be inserted. #' @param verbose logical If TRUE, prints additional information. Default is FALSE. #' @return Returns a numeric vector with the elements of m evenly inserted into n. #' @details #' The function takes two vectors, m and n, and inserts the elements of m evenly into n. #' If the length of m is greater than the length of n, the vectors are swapped, and the insertion proceeds. #' The resulting vector is a combination of m and n, with the elements of m evenly distributed within n. #' @export #' @seealso \code{\link{SimPed}} for the main function that uses this supporting function. evenInsert <- function(m, n, verbose = FALSE) { if (length(m) > length(n)) { temp <- m m <- n n <- temp } # idx <- numeric() for (i in seq_along(m)) { names(m)[i] <- ceiling(i * length(n) / length(m)) } if (verbose) { print(m) } names(n) <- seq_along(n) if (verbose) { print(n) } vec <- c(m, n) vec <- vec[order(as.numeric(names(vec)))] vec <- unname(vec) return(vec) }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/evenInsert.R
#' allGens #' A function to calculate the number of individuals in each generation. This is a supporting function for \code{simulatePedigree}. #' @param kpc Number of kids per couple (integer >= 2). #' @param Ngen Number of generations (integer >= 1). #' @param marR Mating rate (numeric value ranging from 0 to 1). #' @return Returns a vector containing the number of individuals in every generation. #' @export allGens <- function(kpc, Ngen, marR) { # Check if the number of generations is valid if (Ngen < 1) { stop("The number of generations should be an integer greater or equal than 1") } if (Ngen == 1) { allGens <- 2 } if (Ngen >= 2) { allGens <- sizeAllGens( kpc = kpc, Ngen = Ngen, marR = marR ) } else { stop() } return(allGens) } #' sizeAllGens #' An internal supporting function for \code{simulatePedigree}. #' @inheritParams allGens #' @return Returns a vector including the number of individuals in every generation. sizeAllGens <- function(kpc, Ngen, marR) { Nmid <- Ngen - 2 midGens <- numeric(length = Nmid) # Calculate the number of individuals for middle generations for (i in 2:(Ngen - 1)) { midGens[i - 1] <- kpc^(i - 1) * marR^(i - 2) * (1 + marR) midGens[i - 1] <- ceiling(midGens[i - 1]) } # Calculate the number of individuals for the last generation lastGen <- ceiling(kpc^(Ngen - 1) * marR^(Ngen - 2)) allGens <- c(2, midGens, lastGen) # print(allGens) return(allGens) } #' famSizeCal #' A function to calculate the total number of individuals in a pedigree given parameters. This is a supporting function for function \code{simulatePedigree} #' @inheritParams allGens #' @return Returns a numeric value indicating the total pedigree size. #' @export famSizeCal <- function(kpc, Ngen, marR) { if (Ngen < 1) { stop("The number of generations should be an integer greater than or equal to 1") } else if (Ngen == 1) { size <- 2 } else if (Ngen >= 2) { allGens <- sizeAllGens( kpc = kpc, Ngen = Ngen, marR = marR ) size <- sum(allGens) } else { stop("You should never see this message. If you do, that means that famSizeCal is not working properly.") } return(size) }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/famSizeCal.R
#' Add an extended family ID variable to a pedigree #' @param ped a pedigree dataset. Needs ID, momID, and dadID columns #' @param personID character. Name of the column in ped for the person ID variable #' @param momID character. Name of the column in ped for the mother ID variable #' @param dadID character. Name of the column in ped for the father ID variable #' @param famID character. Name of the column to be created in ped for the family ID variable #' @details #' The general idea of this function is to use person ID, mother ID, and father ID to #' create an extended family ID such that everyone with the same family ID is in the #' same (perhaps very extended) pedigree. That is, a pair of people with the same family ID #' have at least one traceable relation of any length to one another. #' #' This function works by turning the pedigree into a mathematical graph using the igraph #' package. Once in graph form, the function uses weakly connected components to search #' for all possible relationship paths that could connect anyone in the data to anyone #' else in the data. #' #' @returns #' A pedigree dataset with one additional column for the newly created extended family ID #' #' @export #' ped2fam <- function(ped, personID = "ID", momID = "momID", dadID = "dadID", famID = "famID") { # Call to wrapper function .ped2id(ped = ped, personID = personID, momID = momID, dadID = dadID, famID = famID, type = "parents") } .ped2id <- function(ped, personID = "ID", momID = "momID", dadID = "dadID", famID = "famID", type) { # Turn pedigree into family pg <- ped2graph(ped = ped, personID = personID, momID = momID, dadID = dadID, adjacent = type) # Find weakly connected components of graph wcc <- igraph::components(pg) fam <- data.frame( V1 = as.numeric(names(wcc$membership)), V2 = wcc$membership ) names(fam) <- c(personID, famID) ped2 <- merge(fam, ped, by = personID, all.x = FALSE, all.y = TRUE ) return(ped2) } #' Turn a pedigree into a graph #' @param ped a pedigree dataset. Needs ID, momID, and dadID columns #' @inheritParams ped2fam #' @param directed Logical scalar. Default is TRUE. Indicates whether or not to create a directed graph. #' @param adjacent Character. Relationship that defines adjacency in the graph: parents, mothers, or fathers #' @details #' The general idea of this function is to represent a pedigree as a graph using the igraph package. #' #' Once in graph form, several common pedigree tasks become much simpler. #' #' The \code{adjacent} argument allows for different kinds of graph structures. #' When using \code{parents} for adjacency, the graph shows all parent-child relationships. #' When using \code{mother} for adjacency, the graph only shows mother-child relationships. #' Similarly when using \code{father} for adjacency, only father-child relationships appear in the graph. #' Construct extended families from the parent graph, maternal lines from the mothers graph, #' and paternal lines from the fathers graph. #' #' @returns #' A graph #' #' @export #' ped2graph <- function(ped, personID = "ID", momID = "momID", dadID = "dadID", directed = TRUE, adjacent = c("parents", "mothers", "fathers")) { # Check ped/data.fram if (!inherits(ped, "data.frame")) stop("ped should be a data.frame or inherit to a data.frame") # Handle adjacent argument adjacent <- match.arg(tolower(adjacent)[1], choices = c( "parents", "mothers", "fathers" ) ) # Check the needed IDs are in the data if (adjacent == "parents") { needIds <- c(personID, momID, dadID) } else if (adjacent == "mothers") { needIds <- c(personID, momID) } else if (adjacent == "fathers") { needIds <- c(personID, dadID) } if (!all(c(needIds) %in% names(ped))) { msg <- paste0( "The following ID variables are needed but were not found:\n", paste(needIds[!(c(needIds) %in% names(ped))], collapse = ", "), "\n", "Make sure you have the variable names correct." ) stop(msg) } # Create nodes and edges if (adjacent == "parents") { nodes <- unique( stats::na.omit( as.character(c(ped[[personID]], ped[[momID]], ped[[dadID]])) ) ) edges <- rbind( as.matrix(data.frame(personID = as.character(ped[[personID]]), momID = as.character(ped[[momID]]))), as.matrix(data.frame(personID = as.character(ped[[personID]]), dadID = as.character(ped[[dadID]]))) ) } else if (adjacent == "mothers") { nodes <- unique( stats::na.omit( as.character(c(ped[[personID]], ped[[momID]])) ) ) edges <- as.matrix(data.frame(personID = as.character(ped[[personID]]), momID = as.character(ped[[momID]]))) } else if (adjacent == "fathers") { nodes <- unique( stats::na.omit( as.character(c(ped[[personID]], ped[[dadID]])) ) ) edges <-as.matrix(data.frame(personID = as.character(ped[[personID]]), dadID = as.character(ped[[dadID]]))) } edges <- edges[stats::complete.cases(edges), ] # Make graph pg <- igraph::graph_from_data_frame( d = edges, directed = directed, # directed = TRUE looks better vertices = nodes ) return(pg) } #' Add a maternal line ID variable to a pedigree #' @inheritParams ped2fam #' @param matID Character. Maternal line ID variable to be created and added to the pedigree #' @details #' Under various scenarios it is useful to know which people in a pedigree #' belong to the same maternal lines. This function first turns a pedigree #' into a graph where adjacency is defined by mother-child relationships. #' Subsequently, the weakly connected components algorithm finds all the #' separate maternal lines and gives them an ID variable. #' @seealso [ped2fam()] for creating extended family IDs, and [ped2paternal()] #' for creating paternal line IDs #' @export #' ped2maternal <- function(ped, personID = "ID", momID = "momID", dadID = "dadID", matID = "matID") { # Call to wrapper function .ped2id(ped = ped, personID = personID, momID = momID, dadID = dadID, famID = matID, type = "mothers") } #' Add a paternal line ID variable to a pedigree #' @inheritParams ped2fam #' @param patID Character. Paternal line ID variable to be created and added to the pedigree #' @details #' Under various scenarios it is useful to know which people in a pedigree #' belong to the same paternal lines. This function first turns a pedigree #' into a graph where adjacency is defined by father-child relationships. #' Subsequently, the weakly connected components algorithm finds all the #' separate paternal lines and gives them an ID variable. #' @seealso [ped2fam()] for creating extended family IDs, and [ped2maternal()] #' for creating maternal line IDs #' @export #' ped2paternal <- function(ped, personID = "ID", momID = "momID", dadID = "dadID", patID = "patID") { # Call to wrapper function .ped2id(ped = ped, personID = personID, momID = momID, dadID = dadID, famID = patID, type = "fathers") }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/family.R
#' Calculate Relatedness Coefficient #' #' This function calculates the relatedness coefficient between two individuals based on their shared ancestry, as described by Wright (1922). #' #' @description #' The relatedness coefficient between two people (b & c) is defined in relation to their common ancestors: #' \eqn{r_{bc} = \sum \left(\frac{1}{2}\right)^{n+n'+1} (1+f_a)} #' #' @param generations Number of generations back of common ancestors the pair share. #' @param path Traditional method to count common ancestry, which is twice the number of generations removed from common ancestors. If not provided, it is calculated as 2*generations. #' @param full Logical. Indicates if the kin share both parents at the common ancestor's generation. Default is TRUE. #' @param maternal Logical. Indicates if the maternal lineage should be considered in the calculation. #' @param empirical Logical. Adjusts the coefficient based on empirical data, using the total number of nucleotides and other parameters. #' @param segregating Logical. Adjusts for segregating genes. #' @param total_a Numeric. Represents the total size of the autosomal genome in terms of nucleotides, used in empirical adjustment. Default is 6800*1000000. #' @param total_m Numeric. Represents the total size of the mitochondrial genome in terms of nucleotides, used in empirical adjustment. Default is 16500. #' @param weight_a Numeric. Represents the weight of phenotypic influence from additive genetic variance, used in empirical adjustment. #' @param weight_m Numeric. Represents the weight of phenotypic influence from mitochondrial effects, used in empirical adjustment. #' @param denom_m Logical. Indicates if `total_m` and `weight_m` should be included in the denominator of the empirical adjustment calculation. #' @param ... Further named arguments that may be passed to another function. #' #' @return #' Relatedness Coefficient (`coef`): A measure of the genetic relationship between two individuals. #' #' @examples #' \dontrun{ #' # For full siblings, the relatedness coefficient is expected to be 0.5: #' calculateRelatedness(generations = 1, full = TRUE) #' # For half siblings, the relatedness coefficient is expected to be 0.25: #' calculateRelatedness(generations = 1, full = FALSE) #' } #' @export #' calculateRelatedness <- function( generations = 2, path = NULL, full = TRUE, maternal = FALSE, empirical = FALSE, segregating = TRUE, total_a = 6800 * 1000000, total_m = 16500, weight_a = 1, weight_m = 1, denom_m = FALSE, ...) { # If path is not provided, it is calculated as twice the number of generations if (is.null(path)) { path <- generations * 2 } # Calculate the coefficient based on the path coef <- .5^path # If full siblings, the coefficient is doubled if (full) { coef <- coef * 2 } # If not considering segregating genes, adjust the coefficient if (!segregating) { coef <- coef * .01 + .99 } # If empirical adjustment is needed if (empirical) { coef <- (coef * total_a * weight_a + maternal * total_m * weight_m) / (denom_m * total_m * weight_m + total_a * weight_a) } return(coef) } #' Infer Relatedness Coefficient #' #' Infers the relatedness coefficient between two groups based on the observed correlation between their additive genetic variance and shared environmental variance. #' #' @description #' The function uses the ACE framework to infer the relatedness between two individuals. #' #' @param obsR Numeric. Observed correlation between the two groups. Must be between -1 and 1. #' @param aceA Numeric. Proportion of variance attributable to additive genetic variance. Must be between 0 and 1. Default is 0.9. #' @param aceC Numeric. Proportion of variance attributable to shared environmental variance. Must be between 0 and 1. Default is 0. #' @param sharedC Numeric. Proportion of shared environment shared between the two individuals. Must be between 0 and 1. Default is 0. #' #' @return #' Calculated relatedness coefficient (`est_r`). #' #' @examples #' \dontrun{ #' # Infer the relatedness coefficient: #' inferRelatedness(obsR = 0.5, aceA = 0.9, aceC = 0, sharedC = 0) #' } #' @export inferRelatedness <- function(obsR, aceA = .9, aceC = 0, sharedC = 0) { if (aceA > 1 || aceA < 0 || aceC > 1 || aceC < 0) { stop("aceA and aceC must be proportions between 0 and 1") } calc_r <- (obsR - sharedC * aceC) / aceA return(calc_r) }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/formula.R
#' Create Data Frame for Generation #' #' This function creates a data frame for a specific generation within the simulated pedigree. #' It initializes the data frame with default values for family ID, individual ID, generation number, #' paternal ID, maternal ID, spouse ID, and sex. All individuals are initially set with NA for paternal, #' maternal, spouse IDs, and sex, awaiting further assignment. #' #' @param sizeGens A numeric vector containing the sizes of each generation within the pedigree. #' @param genIndex An integer representing the current generation index for which the data frame is being created. #' @param idGen A numeric vector containing the ID numbers to be assigned to individuals in the current generation. #' @return A data frame representing the initial structure for the individuals in the specified generation #' before any relationships (parental, spousal) are defined. The columns include family ID (`fam`), #' individual ID (`id`), generation number (`gen`), father's ID (`pat`), mother's ID (`mat`), #' spouse's ID (`spt`), and sex (`sex`), with NA values for paternal, maternal, and spouse IDs, and sex. #' @examples #' sizeGens <- c(3, 5, 4) # Example sizes for 3 generations #' genIndex <- 2 # Creating data frame for the 2nd generation #' idGen <- 101:105 # Example IDs for the 2nd generation #' df_Ngen <- createGenDataFrame(sizeGens, genIndex, idGen) #' print(df_Ngen) #' @export createGenDataFrame <- function(sizeGens, genIndex, idGen) { df_Ngen <- data.frame( fam = rep(paste("fam", 1), sizeGens[genIndex], sep = ""), id = idGen[1:sizeGens[genIndex]], gen = rep(genIndex, sizeGens[genIndex]), pat = rep(NA, sizeGens[genIndex]), # father id mat = rep(NA, sizeGens[genIndex]), # mother id spt = rep(NA, sizeGens[genIndex]), # spouse id sex = rep(NA, sizeGens[genIndex]) ) return(df_Ngen) } #' Determine Sex of Offspring #' #' This function assigns sexes to the offspring in a generation based on the specified sex ratio. #' #' @param idGen Vector of IDs for the generation. #' @param sexR Numeric value indicating the sex ratio (proportion of males). #' @return Vector of sexes ("M" for male, "F" for female) for the offspring. #' @importFrom stats runif #' @export determineSex <- function(idGen, sexR) { if (runif(1) > .5) { sexVec1 <- rep("M", floor(length(idGen) * sexR)) sexVec2 <- rep("F", length(idGen) - length(sexVec1)) } else { sexVec1 <- rep("F", floor(length(idGen) * (1 - sexR))) sexVec2 <- rep("M", length(idGen) - length(sexVec1)) } sexVec <- sample(c(sexVec1, sexVec2)) return(sexVec) } #' Assign Couple IDs #' #' This subfunction assigns a unique couple ID to each mated pair in the generation. #' Unmated individuals are assigned NA for their couple ID. #' #' @param df_Ngen The dataframe for the current generation, including columns for individual IDs and spouse IDs. #' @return The input dataframe augmented with a 'coupleId' column, where each mated pair has a unique identifier. assignCoupleIds <- function(df_Ngen) { df_Ngen$coupleId <- NA_character_ # Initialize the coupleId column with NAs usedCoupleIds <- character() # Initialize an empty character vector to track used IDs for (j in seq_len(nrow(df_Ngen))) { if (!is.na(df_Ngen$spt[j]) && is.na(df_Ngen$coupleId[j])) { # Construct a potential couple ID from sorted individual and spouse IDs sortedIds <- sort(c(df_Ngen$id[j], df_Ngen$spt[j])) potentialCoupleId <- paste(sortedIds[1], sortedIds[2], sep = "_") # Check if the potentialCoupleId has not already been used if (!potentialCoupleId %in% usedCoupleIds) { # Assign the new couple ID to both partners df_Ngen$coupleId[j] <- potentialCoupleId spouseIndex <- which(df_Ngen$id == df_Ngen$spt[j]) df_Ngen$coupleId[spouseIndex] <- potentialCoupleId # Add the new couple ID to the list of used IDs usedCoupleIds <- c(usedCoupleIds, potentialCoupleId) } } } return(df_Ngen) } #' Generate or Adjust Number of Kids per Couple Based on Mating Rate #' #' This function generates or adjusts the number of kids per couple in a generation #' based on the specified average and whether the count should be randomly determined. #' #' @param nMates Integer, the number of mated pairs in the generation. #' @inheritParams simulatePedigree #' #' @return A numeric vector with the generated or adjusted number of kids per couple. adjustKidsPerCouple <- function(nMates, kpc, rd_kpc) { if (rd_kpc) { # cat("number of mates",nMates, "\n") diff <- nMates + 1 while (diff > nMates) { random_numbers <- stats::rpois(nMates, kpc) # cat("original random numbers", random_numbers, "\n") diff <- abs(nMates * kpc - sum(random_numbers)) } # make sure the sum of kids per couple is equal to the number of kids in the i th generation if (sum(random_numbers) < nMates * kpc) { names(random_numbers) <- seq_along(random_numbers) random_numbers <- sort(random_numbers) random_numbers[1:diff] <- random_numbers[1:diff] + 1 random_numbers <- random_numbers[order(names(random_numbers))] } else if (sum(random_numbers) > nMates * kpc) { names(random_numbers) <- seq_along(random_numbers) random_numbers <- sort(random_numbers, decreasing = TRUE) random_numbers[1:diff] <- random_numbers[1:diff] - 1 random_numbers <- random_numbers[order(names(random_numbers))] } } else { random_numbers <- rep(kpc, nMates) } if (min(random_numbers) < 0) { random_numbers[random_numbers == -1] <- 0 random_numbers[random_numbers == max(random_numbers)] <- max(random_numbers) - 1 } return(random_numbers) } #' Mark and Assign children #' #' This subfunction marks individuals in a generation as potential sons, daughters, #' or parents based on their relationships and assigns unique couple IDs. It processes #' the assignment of roles and relationships within and between generations in a pedigree simulation. #' #' @param df_Ngen A data frame for the current generation being processed. #' It must include columns for individual IDs (`id`), spouse IDs (`spt`), sex (`sex`), #' and any previously assigned roles (`ifparent`, `ifson`, `ifdau`). #' @param i Integer, the index of the current generation being processed. #' @param Ngen Integer, the total number of generations in the simulation. #' @param sizeGens Numeric vector, containing the size (number of individuals) of each generation. #' @param CoupleF Integer, IT MIGHT BE the number of couples in the current generation. #' #' #' @return Modifies `df_Ngen` in place by updating or adding columns related to individual roles #' (`ifparent`, `ifson`, `ifdau`) and couple IDs (`coupleId`). The updated data frame is #' also returned for integration into the larger pedigree data frame (`df_Fam`). #' markPotentialChildren <- function(df_Ngen, i, Ngen, sizeGens, CoupleF) { # Step 2.1: mark a group of potential sons and daughters in the i th generation # get all couple ids coupleID <- unique(df_Ngen$coupleId[!is.na(df_Ngen$coupleId)]) if (i == Ngen) { CoupleF <- 0 } coupleGirl <- sample(coupleID, CoupleF) coupleBoy <- coupleID[!coupleID %in% coupleGirl] # single person should all be sons or daus # change the ifson and ifdau based on coupleGirl and coupleBoy for (j in 1:sizeGens[i]) { if (is.na(df_Ngen$spt[j])) { if (df_Ngen$sex[j] == "F") { df_Ngen$ifdau[j] <- TRUE # usedIds <- c(usedIds, df_Ngen$id[j]) } else { df_Ngen$ifson[j] <- TRUE # usedIds <- c(usedIds, df_Ngen$id[j]) } } else { if (df_Ngen$coupleId[j] %in% coupleBoy && df_Ngen$sex[j] == "M") { df_Ngen$ifson[j] <- TRUE } else if (df_Ngen$coupleId[j] %in% coupleGirl && df_Ngen$sex[j] == "F") { df_Ngen$ifdau[j] <- TRUE } else { next } } } df_Ngen <- df_Ngen[order(as.numeric(rownames(df_Ngen))), , drop = FALSE] df_Ngen <- df_Ngen[, -ncol(df_Ngen)] return(df_Ngen) }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/helpPedigree.R
#' Error Function #' #' @param error error output #' @keywords internal #' @return Replaces error message (\code{error}) with NA #' efunc <- function(error) { return(NA) } #' rmvn #' @keywords internal #' @param n Sample Size #' @param sigma Covariance matrix #' @return Generates multivariate normal data from a covariance matrix (\code{sigma}) of length \code{n} #' rmvn <- function(n, sigma) { sH <- with(svd(sigma), v %*% diag(sqrt(d)) %*% t(u)) matrix(stats::rnorm(ncol(sigma) * n), ncol = ncol(sigma)) %*% sH } #' nullToNA #' @keywords internal #' @param x vector of any length #' @return replaces null values in a vector to NA #' nullToNA <- function(x) { if (length(x) == 0) { x <- NA # Handle case when x is a list } else if (is.list(x)) { for (i in seq_along(x)) { if (is.null(x[[i]])) { x[[i]] <- NA } } } return(x) } #' modified tryCatch function #' #' @param x vector of any length #' @keywords internal #' @return Fuses the nullToNA function with efunc #' try_na <- function(x) { nullToNA(tryCatch(x, error = efunc)) } #' Compute the null space of a matrix #' #' @param M a matrix of which the null space is desired #' @keywords internal #' #' @details #' The method uses the QR factorization to determine a basis for the null #' space of a matrix. This is sometimes also called the orthogonal #' complement of a matrix. As implemented, this function is identical #' to the function of the same name in the MASS package. #' Null <- function(M) { tmp <- qr(M) set <- if (tmp$rank == 0L) { seq_len(ncol(M)) } else { -seq_len(tmp$rank) } return(qr.Q(tmp, complete = TRUE)[, set, drop = FALSE]) } #' Resample Elements of a Vector #' #' This function performs resampling of the elements in a vector `x`. It randomly #' shuffles the elements of `x` and returns a vector of the resampled elements. If `x` #' is empty, it returns `NA_integer_`. #' #' @param x A vector containing the elements to be resampled. If `x` is empty, the #' function will return `NA_integer_`. #' @param ... Additional arguments passed to `sample.int`, such as `size` for the #' number of items to sample and `replace` indicating whether sampling should be with #' replacement. #' #' @return A vector of resampled elements from `x`. If `x` is empty, returns #' `NA_integer_`. The length and type of the returned vector depend on the input #' vector `x` and the additional arguments provided via `...`. #' #' #' @export resample <- function(x, ...) { # print(length(x)) if (length(x) == 0) { return(NA_integer_) } x[sample.int(length(x), ...)] } #' SimPed (Deprecated) #' #' This function is a wrapper around the new `simulatePedigree` function. #' `SimPed` has been deprecated, and it's advised to use `simulatePedigree` directly. #' #' @param ... Arguments to be passed to `simulatePedigree`. #' @return The same result as calling `simulatePedigree`. #' @seealso \code{\link{simulatePedigree}} for the updated function. #' @description When calling this function, a warning will be issued about its deprecation. #' @keywords deprecated #' @examples #' \dontrun{ #' # This is an example of the deprecated function: #' SimPed(...) #' # It is recommended to use: #' simulatePedigree(...) #' } #' @export SimPed <- function(...) { # nolint: object_name_linter. warning("The 'SimPed' function is deprecated. Please use 'simulatePedigree' instead.") simulatePedigree(...) } #' related_coef (Deprecated) #' #' This function is a wrapper around the new `calculateRelatedness` function. #' `related_coef` has been deprecated, and it's advised to use `calculateRelatedness` directly. #' #' @param ... Arguments to be passed to `calculateRelatedness`. #' @return The same result as calling `calculateRelatedness`. #' @seealso \code{\link{calculateRelatedness}} for the updated function. #' @description When calling this function, a warning will be issued about its deprecation. #' @keywords deprecated #' @examples #' \dontrun{ #' # This is an example of the deprecated function: #' related_coef(...) #' # It is recommended to use: #' calculateRelatedness(...) #' } #' @export related_coef <- function(...) { warning("The 'related_coef' function is deprecated. Please use 'calculateRelatedness' instead.") calculateRelatedness(...) } #' relatedness (Deprecated) #' #' This function is a wrapper around the new `inferRelatedness` function. #' `relatedness` has been deprecated, and it's advised to use `inferRelatedness` directly. #' #' @param ... Arguments to be passed to `inferRelatedness`. #' @return The same result as calling `inferRelatedness`. #' @seealso \code{\link{inferRelatedness}} for the updated function. #' @description When calling this function, a warning will be issued about its deprecation. #' @keywords deprecated #' @examples #' \dontrun{ #' # This is an example of the deprecated function: #' relatedness(...) #' # It is recommended to use: #' inferRelatedness(...) #' } #' @export relatedness <- function(...) { warning("The 'relatedness' function is deprecated. Please use 'inferRelatedness' instead.") inferRelatedness(...) }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/helper.R
#' identifyComponentModel #' Determine if a variance components model is identified #' #' @param ... Comma-separated relatedness component matrices representing the variance components of the model. #' @param verbose logical. If FALSE, suppresses messages about identification; TRUE by default. #' @return A list of length 2 containing: #' \itemize{ #' \item \code{identified}: TRUE if the model is identified, FALSE otherwise. #' \item \code{nidp}: A vector of non-identified parameters, specifying the names of components that are not simultaneously identified. #' } #' #' @export #' #' @details #' This function checks the identification status of a given variance components model #' by examining the rank of the concatenated matrices of the components. #' If any components are not identified, their names are returned in the output. #' #' @examples #' #' identifyComponentModel(A = list(matrix(1, 2, 2)), C = list(matrix(1, 2, 2)), E = diag(1, 2)) #' identifyComponentModel <- function(..., verbose = TRUE) { # Collect the relatedness components dots <- list(...) nam <- names(dots) if (is.null(nam)) { nam <- paste0("Comp", seq_along(dots)) } # Convert components to vectorized form compl <- lapply(dots, comp2vech, include.zeros = TRUE) compm <- do.call(cbind, compl) rank <- qr(compm)$rank if (rank != length(dots)) { if (verbose) cat("Component model is not identified.\n") jacOC <- Null(t(compm)) nidp <- nam[apply(jacOC, 1, function(x) { sum(x^2) }) > 1e-17] if (verbose) { cat( "Non-identified parameters are ", paste(nidp, collapse = ", "), "\n" ) } return(list(identified = FALSE, nidp = nidp)) } else { if (verbose) cat("Component model is identified.\n") return(list(identified = TRUE, nidp = character(0))) } } #' fitComponentModel #' Fit the estimated variance components of a model to covariance data #' #' @param covmat The covariance matrix of the raw data, which may be blockwise. #' @param ... Comma-separated relatedness component matrices representing the variance components of the model. #' @return A regression (linear model fitted with \code{lm}). The coefficients of the regression represent the estimated variance components. #' @export #' #' @details #' This function fits the estimated variance components of a model to given covariance data. #' The rank of the component matrices is checked to ensure that the variance components are all identified. #' Warnings are issued if there are inconsistencies. #' #' @examples #' \dontrun{ #' # install.packages("OpenMX") #' data(twinData, package = "OpenMx") #' sellVars <- c("ht1", "ht2") #' mzData <- subset(twinData, zyg %in% c(1), c(selVars, "zyg")) #' dzData <- subset(twinData, zyg %in% c(3), c(selVars, "zyg")) #' #' fitComponentModel( #' covmat = list(cov(mzData[, selVars], use = "pair"), cov(dzData[, selVars], use = "pair")), #' A = list(matrix(1, nrow = 2, ncol = 2), matrix(c(1, 0.5, 0.5, 1), nrow = 2, ncol = 2)), #' C = list(matrix(1, nrow = 2, ncol = 2), matrix(1, nrow = 2, ncol = 2)), #' E = list(diag(1, nrow = 2), diag(1, nrow = 2)) #' ) #' } #' fitComponentModel <- function(covmat, ...) { dots <- list(...) compl <- lapply(dots, comp2vech, include.zeros = TRUE) compm <- do.call(cbind, compl) rank <- qr(compm)$rank y <- comp2vech(covmat, include.zeros = TRUE) if (rank != length(dots)) { msg <- paste( "Variance components are not all identified.", "Try identifyComponentModel()." ) stop(msg) } if (rank > length(y)) { msg <- paste0( "Trying to estimate ", rank, " variance components when at most ", length(y), " are possible with the data given.\n" ) warning(msg) } stats::lm(y ~ 0 + compm) } #' vech #' Create the half-vectorization of a matrix #' #' @param x a matrix, the half-vectorization of which is desired #' @return A vector containing the lower triangle of the matrix, including the diagonal. #' @export #' #' @details #' This function returns the vectorized form of the lower triangle of a matrix, including the diagonal. #' The upper triangle is ignored with no checking that the provided matrix is symmetric. #' #' @examples #' #' vech(matrix(c(1, 0.5, 0.5, 1), nrow = 2, ncol = 2)) #' vech <- function(x) { x[lower.tri(x, diag = TRUE)] } #' comp2vech #' Turn a variance component relatedness matrix into its half-vectorization #' #' @param x Relatedness component matrix (can be a matrix, list, or object that inherits from 'Matrix'). #' @param include.zeros logical. Whether to include all-zero rows. Default is FALSE. #' @export #' @return The half-vectorization of the relatedness component matrix. #' @details #' This function is a wrapper around the \code{vech} function, extending it to allow for blockwise matrices and specific classes. #' It facilitates the conversion of a variance component relatedness matrix into a half-vectorized form. #' #' @examples comp2vech(list(matrix(c(1, .5, .5, 1), 2, 2), matrix(1, 2, 2))) #' comp2vech <- function(x, include.zeros = FALSE) { if (is.matrix(x)) { return(vech(x)) } else if (is.list(x)) { if (include.zeros) { return(vech(as.matrix(Matrix::bdiag(x)))) } else { return(do.call(c, lapply(x, vech))) } } else if (inherits(x, "Matrix")) { return(vech(as.matrix(x))) } else { msg <- paste( "Can't make component into a half vectorization:", "x is neither a list nor a matrix." ) stop(msg) } }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/identifyModel.R
#' plotPedigree #' A wrapped function to plot simulated pedigree from function \code{simulatePedigree}. This function require the installation of package \code{kinship2}. #' @import kinship2 #' @param ped The simulated pedigree data.frame from function \code{simulatePedigree}. Or a pedigree dataframe with the same colnames as the dataframe simulated from function \code{simulatePedigree}. #' @param cex The font size of the IDs for each individual in the plot. #' @param verbose logical If TRUE, prints additional information. Default is FALSE. #' @param code_male This optional input allows you to indicate what value in the sex variable codes for male. Will be recoded as "M" (Male). If \code{NULL}, no recoding is performed. #' @param affected This optional parameter can either be a string specifying the column name that indicates affected status or a numeric/logical vector of the same length as the number of rows in 'ped'. If \code{NULL}, no affected status is assigned. #' @inheritParams kinship2::plot.pedigree #' @return A plot of the provided pedigree #' @export plotPedigree <- function(ped, # optional data management code_male = NULL, verbose = FALSE, affected = NULL, # optional inputs for the pedigree plot cex = .5, col = 1, symbolsize = 1, branch = 0.6, packed = TRUE, align = c(1.5, 2), width = 8, density = c(-1, 35, 65, 20), mar = c(2.1, 1, 2.1, 1), angle = c(90, 65, 40, 0), keep.par = FALSE, pconnect = .5, ...) { # Standardize column names in the input dataframe ped <- standardizeColnames(ped) # Define required columns simulated_vars <- c("fam", "ID", "dadID", "momID", "sex") # Check if dataframe contains the required columns if (all(simulated_vars %in% names(ped))) { p <- ped[, c("fam", "ID", "dadID", "momID", "sex")] colnames(p) <- c("ped", "id", "father", "mother", "sex") # data conversation p[is.na(p)] <- 0 # adds affected status if present if (is.null(affected)) { p$affected <- 0 } else { # Check if 'affected' is a character (indicating a column name) if (is.character(affected)) { # Check if the DataFrame contains a column that matches the 'affected' string if (affected %in% names(ped)) { p$affected <- ped[[affected]] } else { stop(paste("Column", affected, "does not exist in the DataFrame")) } # Check if 'affected' is a numeric or logical vector } else if (is.numeric(affected) || is.logical(affected)) { # Check if the length of the vector matches the number of rows in the DataFrame if (length(affected) == nrow(p)) { p$affected <- affected } else { stop("Length of the 'affected' vector does not match the number of rows in the DataFrame") } # If 'affected' is neither a string nor a numeric/logical vector } else { stop("The 'affected' parameter must be either a string (column name) or a numeric/logical vector") } } p$avail <- 0 # recode sex values p <- recodeSex(p, code_male = code_male) # family id if (length(unique(p$ped)) == 1) { # only one family p$ped <- 1 } else { # Assign a unique string pattern "ped #" for each unique family unique_families <- unique(p$ped) named_families <- seq_along(unique_families) p$ped <- named_families[match(p$ped, unique_families)] } p2 <- kinship2::pedigree( id = p$id, dadid = p$father, momid = p$mother, sex = p$sex, famid = p$ped ) p3 <- p2["1"] if (verbose) { print(p3) return(kinship2::plot.pedigree(p3, cex = cex, col = col, symbolsize = symbolsize, branch = branch, packed = packed, align = align, width = width, density = density, angle = angle, keep.par = keep.par, pconnect = pconnect, mar = mar )) } else { # TODO: consistently suppress the printing of the pedigree comments # Determine the null device based on the OS # null_device <- ifelse(.Platform$OS.type == "windows", "nul", "/dev/null") # Start redirecting the standard output to suppress messages # sink(file = null_device, type = "output") # Ensure the output is reverted back to console when function exits # on.exit(if (sink.number() > 0) sink(), add = TRUE) plot_picture <- kinship2::plot.pedigree(p3, cex = cex, col = col, symbolsize = symbolsize, branch = branch, packed = packed, align = align, width = width, density = density, angle = angle, keep.par = keep.par, pconnect = pconnect, mar = mar ) # Explicitly revert the standard output back to the console # if (sink.number() > 0) { # sink() # } return(plot_picture) } } else { stop("The structure of the provided pedigree data does not match the expected structure.") } }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/plotPedigree.R
#' Process Generations for Pedigree Simulation #' #' This function iterates through generations in a pedigree simulation, assigning IDs, #' creating data frames, determining sexes, and managing pairing within each generation. #' #' @inheritParams simulatePedigree #' @inheritParams createGenDataFrame #' @return A data frame representing the simulated pedigree, including columns for family ID (`fam`), buildWithinGenerations <- function(sizeGens, marR, sexR, Ngen) { for (i in 1:Ngen) { idGen <- as.numeric(paste(100, i, 1:sizeGens[i], sep = "")) # idGen <- ifelse(i==1, # paste(i,"-",1:sizeGens[i]), # paste(i,"-",sizeGens[i-1]:sizeGens[i])) ### For each generation, create a separate dataframe df_Ngen <- createGenDataFrame( sizeGens = sizeGens, genIndex = i, idGen = idGen ) ### Let's deal with the sex in each generation first df_Ngen$sex <- determineSex(idGen = idGen, sexR = sexR) # print(paste("tiger",i)) # The first generation if (i == 1) { df_Ngen$spt[1] <- df_Ngen$id[2] df_Ngen$spt[2] <- df_Ngen$id[1] df_Ngen$sex[1] <- "F" df_Ngen$sex[2] <- "M" } ## Connect male and female into couples in each generations marR_crt <- (1 + marR) / 2 usedFemaleIds <- numeric() usedMaleIds <- numeric() # reserve the single persons if (i != 1 && i != Ngen) { nMerriedFemale <- round(sum(df_Ngen$sex == "F") * marR_crt) nMerriedMale <- round(sum(df_Ngen$sex == "M") * marR_crt) # make sure there are same numbers of merried males and females if (nMerriedFemale >= nMerriedMale) { nMerriedFemale <- nMerriedMale } else { nMerriedMale <- nMerriedFemale } # get the number of single males and females nSingleFemale <- sum(df_Ngen$sex == "F") - nMerriedFemale nSingleMale <- sum(df_Ngen$sex == "M") - nMerriedMale # sample single ids from male ids and female ids usedFemaleIds <- sample(df_Ngen$id[df_Ngen$sex == "F"], nSingleFemale) ## print(c("Used F", usedFemaleIds)) usedMaleIds <- sample(df_Ngen$id[df_Ngen$sex == "M"], nSingleMale) ## print(c("Used M", usedMaleIds)) usedIds <- c(usedFemaleIds, usedMaleIds) # Create spouses for (j in seq_len(nrow(df_Ngen))) { if (df_Ngen$id[j] %in% usedIds) { next } else { # idx <- j+1 if (df_Ngen$sex[j] == "F") { for (k in seq_len(nrow(df_Ngen))) { idr <- df_Ngen$id[k] tgt <- (!(idr %in% usedIds)) & df_Ngen$sex[k] == "M" # tgt <- ifelse(is.na(tgt),FALSE,TRUE) if (tgt) { df_Ngen$spt[j] <- df_Ngen$id[k] df_Ngen$spt[k] <- df_Ngen$id[j] usedIds <- c(usedIds, df_Ngen$id[j], df_Ngen$id[k]) break } else { next } } } else { for (k in seq_len(nrow(df_Ngen))) { idr <- df_Ngen$id[k] tgt <- (!(idr %in% usedIds)) & df_Ngen$sex[k] == "F" # tgt <- ifelse(is.na(tgt),FALSE,TRUE) if (tgt) { df_Ngen$spt[j] <- df_Ngen$id[k] df_Ngen$spt[k] <- df_Ngen$id[j] usedIds <- c(usedIds, df_Ngen$id[j], df_Ngen$id[k]) break } else { next } } } } # print(usedIds) } } if (i == 1) { df_Fam <- df_Ngen } else { df_Fam <- rbind(df_Fam, df_Ngen) } } return(df_Fam) } #' Process Generation Connections #' #' This function processes connections between each two generations in a pedigree simulation. #' It marks individuals as parents, sons, or daughters based on their generational position and relationships. #' The function also handles the assignment of couple IDs, manages single and coupled individuals, #' and establishes parent-offspring links across generations. #' @param df_Fam A data frame containing the simulated pedigree information up to the current generation. #' Must include columns for family ID, individual ID, generation number, spouse ID (spt), #' and sex. This data frame is updated in place to include flags for parental status (ifparent), #' son status (ifson), and daughter status (ifdau), as well as couple IDs. #' @inheritParams simulatePedigree #' @inheritParams createGenDataFrame #' #' @details #' The function iterates through each generation, starting from the second, to establish connections based on mating and parentage. #' For the first generation, it sets the parental status directly. For subsequent generations, it calculates the number of couples, #' the expected number of offspring, and assigns offspring to parents. It handles gender-based assignments for sons and daughters, #' and deals with the nuances of single individuals and couple formation. The function relies on external functions `assignCoupleIds` #' and `adjustKidsPerCouple` to handle specific tasks related to couple ID assignment and offspring number adjustments, respectively. #' #' @return The function updates the `df_Fam` data frame in place, adding or modifying columns related to parental and offspring status, #' as well as assigning unique couple IDs. It does not return a value explicitly. #' buildBetweenGenerations <- function(df_Fam, Ngen, sizeGens, verbose, marR, sexR, kpc, rd_kpc) { df_Fam$ifparent <- FALSE df_Fam$ifson <- FALSE df_Fam$ifdau <- FALSE for (i in 1:Ngen) { # generation 1 doesn't need any mother and father if (i == 1) { df_Ngen <- df_Fam[df_Fam$gen == i, ] df_Ngen$ifparent <- TRUE df_Ngen$ifson <- FALSE df_Ngen$ifdau <- FALSE df_Fam[df_Fam$gen == i, ] <- df_Ngen } else { # calculate the number of couples in the i-1 th generation N_couples <- (sizeGens[i - 1] - sum(is.na(df_Fam$spt[df_Fam$gen == i - 1]))) * 0.5 # calculate the number of members in the i th generation that have a link to the couples in the i-1 th generation N_LinkedMem <- N_couples * kpc # decompose the linked members into females and males respectively N_LinkedFemale <- round(N_LinkedMem * (1 - sexR)) N_LinkedMale <- N_LinkedMem - N_LinkedFemale # Create a pool for used male children and female children respectively usedFemaleIds <- numeric() usedMaleIds <- numeric() usedIds <- c(usedFemaleIds, usedMaleIds) # get the df for the i the generation df_Ngen <- df_Fam[df_Fam$gen == i, ] df_Ngen$ifparent <- FALSE df_Ngen$ifson <- FALSE df_Ngen$ifdau <- FALSE df_Ngen$coupleId <- NA_character_ df_Ngen <- df_Ngen[sample(nrow(df_Ngen)), ] # Start to connect children with mother and father # if (verbose) { print( "Step 2.1: mark a group of potential sons and daughters in the i th generation" ) } # try to rewrite the code # count the number of couples in the i th gen countCouple <- (nrow(df_Ngen) - sum(is.na(df_Ngen$spt))) * .5 # Now, assign couple IDs for the current generation df_Ngen <- assignCoupleIds(df_Ngen) # get the number of linked female and male children after excluding the single children # get a vector of single person id in the ith generation IdSingle <- df_Ngen$id[is.na(df_Ngen$spt)] SingleF <- sum(df_Ngen$sex == "F" & is.na(df_Ngen$spt)) CoupleF <- N_LinkedFemale - SingleF SingleM <- sum(df_Ngen$sex == "M" & is.na(df_Ngen$spt)) CoupleM <- N_LinkedMale - SingleM df_Fam[df_Fam$gen == i, ] <- markPotentialChildren(df_Ngen = df_Ngen, i = i, Ngen = Ngen, sizeGens = sizeGens, CoupleF = CoupleF) if (verbose) { print( "Step 2.2: mark a group of potential parents in the i-1 th generation" ) } df_Ngen <- df_Fam[df_Fam$gen == i - 1, ] df_Ngen$ifparent <- FALSE df_Ngen$ifson <- FALSE df_Ngen$ifdau <- FALSE df_Ngen <- df_Ngen[sample(nrow(df_Ngen)), ] # Create a pool for the used parents usedParentIds <- numeric() for (k in 1:sizeGens[i - 1]) { # first check if the number of married couples surpass the marriage rate if (sum(df_Ngen$ifparent) / nrow(df_Ngen) >= marR) { break } else { # check if the id is used and if the member has married if (!(df_Ngen$id[k] %in% usedParentIds) & !is.na(df_Ngen$spt[k])) { df_Ngen$ifparent[k] <- TRUE df_Ngen$ifparent[df_Ngen$spt == df_Ngen$id[k]] <- TRUE usedParentIds <- c(usedParentIds, df_Ngen$id[k], df_Ngen$spt[k]) } else { next } } } df_Ngen <- df_Ngen[order(as.numeric(rownames(df_Ngen))), , drop = FALSE] df_Fam[df_Fam$gen == i - 1, ] <- df_Ngen if (verbose) { print( "Step 2.3: connect the i and i-1 th generation" ) } if (i == 1) { next } else { # get the df for i and i-1 th generations df_Ngen <- df_Fam[df_Fam$gen %in% c(i, i - 1), ] sizeI <- sizeGens[i - 1] sizeII <- sizeGens[i] # create a vector with ordered ids that should be connected to a parent # print(df_Ngen) IdSon <- df_Ngen$id[df_Ngen$ifson == TRUE & df_Ngen$gen == i] # print(IdSon) IdDau <- df_Ngen$id[df_Ngen$ifdau == TRUE & df_Ngen$gen == i] # print(IdDau) IdOfp <- evenInsert(IdSon, IdDau) # generate link kids to the couples random_numbers <- adjustKidsPerCouple(nMates = sum(df_Ngen$ifparent) / 2, kpc = kpc, rd_kpc = rd_kpc) # cat("final random numbers",random_numbers, "\n") # cat("mean",sum(random_numbers)/length(random_numbers), "\n") # create two vectors for maId and paId; replicate the ids to match the same length as IdOfp IdMa <- numeric() IdPa <- numeric() usedIds <- numeric() idx <- 1 for (l in 1:sizeI) { # check if the id is used if (!df_Ngen$id[l] %in% usedIds) { # check if the member can be a parent if (df_Ngen$ifparent[l] == TRUE && df_Ngen$sex[l] == "F") { usedIds <- c(usedIds, df_Ngen$id[l], df_Ngen$spt[l]) IdMa <- c(IdMa, rep(df_Ngen$id[l], random_numbers[idx])) IdPa <- c(IdPa, rep(df_Ngen$spt[l], random_numbers[idx])) idx <- idx + 1 } else if (df_Ngen$ifparent[l] == TRUE && df_Ngen$sex[l] == "M") { usedIds <- c(usedIds, df_Ngen$id[l], df_Ngen$spt[l]) IdPa <- c(IdPa, rep(df_Ngen$id[l], random_numbers[idx])) IdMa <- c(IdMa, rep(df_Ngen$spt[l], random_numbers[idx])) idx <- idx + 1 } else { next } } else { next } } # the length of IdMa and IdPa can be longer than the vector of offspring, so truncated it ### making sure sampling out the single people instead of couples if (length(IdPa) - length(IdOfp) > 0) { # cat("length of IdPa", length(IdPa), "\n") IdRm <- sample.int(length(IdPa), size = length(IdPa) - length(IdOfp)) IdPa <- IdPa[-IdRm] IdMa <- IdMa[-IdRm] } else if (length(IdPa) - length(IdOfp) < 0) { # cat("length of IdOfp", length(IdOfp), "\n") # cat("length of IdPa", length(IdPa), "\n") # cat("length of IdSingle", length(IdMa), "\n") IdRm <- resample(IdSingle, size = length(IdOfp) - length(IdPa)) IdOfp <- IdOfp[!(IdOfp %in% IdRm)] } # if (length(IdMa)- length(IdOfp) > 0){ # IdRm <- sample.int(length(IdMa),size =length(IdMa)-length(IdOfp)) # IdPa <- IdPa[-IdRm] # IdMa <- IdMa[-IdRm] # }else if (length(IdMa)-length(IdOfp) < 0) { # IdRm <- sample.int(length(IdOfp),size =length(IdOfp)-length(IdMa)) # IdOfp <- IdOfp[-IdRm] # } # print(matrix(c(IdPa, IdMa), ncol = 2)) # print(IdPa) # print(IdOfp) # put the IdMa and IdPa into the dfFam with correspondent OfpId for (m in seq_along(IdOfp)) { df_Ngen[df_Ngen$id == IdOfp[m], "pat"] <- IdPa[m] df_Ngen[df_Ngen$id == IdOfp[m], "mat"] <- IdMa[m] } # print(df_Ngen) df_Fam[df_Fam$gen == i, ] <- df_Ngen[df_Ngen$gen == i, ] df_Fam[df_Fam$gen == i - 1, ] <- df_Ngen[df_Ngen$gen == i - 1, ] } } } return(df_Fam) } #' Simulate Pedigrees #' This function simulates "balanced" pedigrees based on a group of parameters: #' 1) k - Kids per couple; #' 2) G - Number of generations; #' 3) p - Proportion of males in offspring; #' 4) r - Mating rate. #' #' @importFrom stats runif #' @param kpc Number of kids per couple. An integer >= 2 that determines how many kids each fertilized mated couple will have in the pedigree. Default value is 3. Returns an error when kpc equals 1. #' @param Ngen Number of generations. An integer >= 2 that determines how many generations the simulated pedigree will have. The first generation is always a fertilized couple. The last generation has no mated individuals. #' @param sexR Sex ratio of offspring. A numeric value ranging from 0 to 1 that determines the proportion of males in all offspring in this pedigree. For instance, 0.4 means 40 percent of the offspring will be male. #' @param marR Mating rate. A numeric value ranging from 0 to 1 which determines the proportion of mated (fertilized) couples in the pedigree within each generation. For instance, marR = 0.5 suggests 50 percent of the offspring in a specific generation will be mated and have their offspring. #' @param rd_kpc logical. If TRUE, the number of kids per mate will be randomly generated from a poisson distribution with mean kpc. If FALSE, the number of kids per mate will be fixed at kpc. #' @param balancedSex Not fully developed yet. Always \code{TRUE} in the current version. #' @param balancedMar Not fully developed yet. Always \code{TRUE} in the current version. #' @param verbose logical If TRUE, print progress through stages of algorithm #' @return A \code{data.frame} with each row representing a simulated individual. The columns are as follows: #' \itemize{ #' \item{fam: The family id of each simulated individual. It is 'fam1' in a single simulated pedigree.} #' \item{ID: The unique personal ID of each simulated individual. The first digit is the fam id; the fourth digit is the generation the individual is in; the following digits represent the order of the individual within his/her pedigree. For example, 100411 suggests this individual has a family id of 1, is in the 4th generation, and is the 11th individual in the 4th generation.} #' \item{gen: The generation the simulated individual is in.} #' \item{dadID: Personal ID of the individual's father.} #' \item{momID: Personal ID of the individual's mother.} #' \item{spt: Personal ID of the individual's mate.} #' \item{sex: Biological sex of the individual. F - female; M - male.} #' } #' @export simulatePedigree <- function(kpc = 3, Ngen = 4, sexR = .5, marR = 2 / 3, rd_kpc = FALSE, balancedSex = TRUE, balancedMar = TRUE, verbose = FALSE) { # SexRatio: ratio of male over female in the offspring setting; used in the between generation combinations # SexRatio <- sexR / (1 - sexR) # Calculate the expected family size in each generations sizeGens <- allGens(kpc = kpc, Ngen = Ngen, marR = marR) # famSizeIndex <- 1:sum(sizeGens) if (verbose) { print( "Step 1: Let's build the connection within each generation first" ) } df_Fam <- buildWithinGenerations( sizeGens = sizeGens, Ngen = Ngen, sexR = sexR, marR = marR ) if (verbose) { print( "Step 2: Let's try to build connection between each two generations" ) } df_Fam <- buildBetweenGenerations( df_Fam = df_Fam, Ngen = Ngen, sizeGens = sizeGens, verbose = verbose, marR = marR, sexR = sexR, kpc = kpc, rd_kpc = rd_kpc ) df_Fam <- df_Fam[, 1:7] df_Fam <- df_Fam[!(is.na(df_Fam$pat) & is.na(df_Fam$mat) & is.na(df_Fam$spt)), ] colnames(df_Fam)[c(2, 4, 5)] <- c("ID", "dadID", "momID") # connect the detached members df_Fam[is.na(df_Fam$momID) & is.na(df_Fam$dadID) & df_Fam$gen > 1, ] # if the sex rate is .5, make there is a 50% chance to change male to female and female to male # doesn't seem to produce the expected results, sometimes leads to moms being classified as dads # if (sexR == .5 & runif(1) > .5) { # df_Fam$sex[df_Fam$sex == "M"] <- "F1" # df_Fam$sex[df_Fam$sex == "F"] <- "M" # df_Fam$sex[df_Fam$sex == "F1"] <- "F" # } # print(df_Fam) return(df_Fam) }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/simulatePedigree.R
#' makeTwins #' A function to impute twins in the simulated pedigree \code{data.frame}. #' Twins can be imputed by specifying their IDs or by specifying the generation the twin should be imputed. #' This is a supplementary function for \code{simulatePedigree}. #' @param ped A \code{data.frame} in the same format as the output of \code{simulatePedigree}. #' @param ID_twin1 A vector of \code{ID} of the first twin. #' @param ID_twin2 A vector of \code{ID} of the second twin. #' @param verbose logical. If TRUE, print progress through stages of algorithm #' @param gen_twin A vector of \code{generation} of the twin to be imputed. #' @return Returns a \code{data.frame} with MZ twins information added as a new column. #' @export # A function to impute twins in the simulated pedigree \code{data.frame}. # Twins can be imputed by specifying their IDs or by specifying the generation the twin should be imputed. makeTwins <- function(ped, ID_twin1 = NA_integer_, ID_twin2 = NA_integer_, gen_twin = 2, verbose = FALSE) { # Check if the ped is the same format as the output of simulatePedigree if (paste0(colnames(ped), collapse = "") != paste0(c( "fam", "ID", "gen", "dadID", "momID", "spt", "sex" ), collapse = "")) { ped <- standardizeColnames(ped) if (verbose) { cat("The input pedigree is not in the same format as the output of simulatePedigree\n") } # stop("The input pedigree is not in the same format as the output of simulatePedigree") } ped$MZtwin <- NA_integer_ # Check if the two IDs are provided if (is.na(ID_twin1) || is.na(ID_twin2)) { # Check if the generation is provided if (is.na(gen_twin)) { stop("You should provide either the IDs of the twins or the generation of the twins") } else { # Check if the generation is valid if (gen_twin < 2 || gen_twin > max(ped$gen)) { stop("The generation of the twins should be an integer between 2 and the maximum generation in the pedigree") } else { idx <- nrow(ped[ped$gen == gen_twin & !is.na(ped$dadID), ]) usedID <- c() # randomly loop through all the individuals in the generation until find an individual who is the same sex and shares the same dadID and momID with another individual for (i in 1:idx) { cat("loop", i) # check if i is equal to the number of individuals in the generation usedID <- c(usedID, ID_twin1) # print(usedID) if (i < idx) { # randomly select one individual from the generation ID_twin1 <- resample(ped$ID[ped$gen == gen_twin & !(ped$ID %in% usedID) & !is.na(ped$dadID)], 1) # cat("twin1", ID_twin1, "\n") # find one same sex sibling who has the same dadID and momID as the selected individual twin2_Pool <- ped$ID[ped$ID != ID_twin1 & ped$gen == gen_twin & ped$sex == ped$sex[ped$ID == ID_twin1] & ped$dadID == ped$dadID[ped$ID == ID_twin1] & ped$momID == ped$momID[ped$ID == ID_twin1]] # if there is an non-NA value in the twin2_Pool, get rid of the NA value if (all(is.na(twin2_Pool))) { cat("twin2_Pool is all NA\n") next } else { twin2_Pool <- twin2_Pool[!is.na(twin2_Pool)] ID_twin2 <- resample(twin2_Pool, 1) break } # test if the ID_twin2 is missing # if(!is.na(ID_twin2)){ # break # } } else { # randomly select all males or females in the generation and put them in a vector selectGender <- ped$ID[ped$gen == gen_twin & ped$sex == resample(c("M", "F"), 1) & !is.na(ped$dadID) & !is.na(ped$momID)] # print(selectGender) # randomly select two individuals from the vector ID_DoubleTwin <- sample(selectGender, 2) # print(ID_DoubleTwin) # change the second person's dadID and momID to the first person's dadID and momID ped$dadID[ped$ID == ID_DoubleTwin[2]] <- ped$dadID[ped$ID == ID_DoubleTwin[1]] ped$momID[ped$ID == ID_DoubleTwin[2]] <- ped$momID[ped$ID == ID_DoubleTwin[1]] # let the two individuals be twins! ID_twin1 <- ID_DoubleTwin[1] ID_twin2 <- ID_DoubleTwin[2] break } } # Impute the IDs of the twin in the MZtwin column ped$MZtwin[ped$ID == ID_twin1] <- ID_twin2 ped$MZtwin[ped$ID == ID_twin2] <- ID_twin1 } } } else { # Impute the IDs of the twin in the MZtwin column ped$MZtwin[ped$ID == ID_twin1] <- ID_twin2 ped$MZtwin[ped$ID == ID_twin2] <- ID_twin1 } if (verbose) { cat("twin1", ID_twin1, "\n") cat("twin2", ID_twin2, "\n") } return(ped) } #' makeInbreeding #' A function to create inbred mates in the simulated pedigree \code{data.frame}. #' Inbred mates can be created by specifying their IDs or the generation the inbred mate should be created. #' When specifying the generation, inbreeding between siblings or 1st cousin needs to be specified. #' This is a supplementary function for \code{simulatePedigree}. #' @param ped A \code{data.frame} in the same format as the output of \code{simulatePedigree}. #' @param ID_mate1 A vector of \code{ID} of the first mate. If not provided, the function will randomly select two individuals from the second generation. #' @param ID_mate2 A vector of \code{ID} of the second mate. #' @param verbose logical. If TRUE, print progress through stages of algorithm #' @param gen_inbred A vector of \code{generation} of the twin to be imputed. #' @param type_inbred A character vector indicating the type of inbreeding. "sib" for sibling inbreeding and "cousin" for cousin inbreeding. #' @return Returns a \code{data.frame} with some inbred mates. #' @details #' This function creates inbred mates in the simulated pedigree \code{data.frame}. This function's purpose is to evaluate the effect of inbreeding on model fitting and parameter estimation. In case it needs to be said, we do not condone inbreeding in real life. But we recognize that it is a common practice in some fields to create inbred strains for research purposes. #' @export # A function to create inbred mates in the simulated pedigree. makeInbreeding <- function(ped, ID_mate1 = NA_integer_, ID_mate2 = NA_integer_, verbose = FALSE, gen_inbred = 2, type_inbred = "sib") { # check if the ped is the same format as the output of simulatePedigree if (paste0(colnames(ped), collapse = "" ) != paste0( c("fam", "ID", "gen", "dadID", "momID", "spt", "sex"), collapse = "" )) { ped <- standardizeColnames(ped) if (verbose) { cat("The input pedigree is not in the same format as the output of simulatePedigree\n") } } # check if the type of inbreeding is valid if (type_inbred %in% c("siblings", "sib", "sibling")) { type_inbred <- "sib" } else if (type_inbred %in% c("cousins", "cousin")) { type_inbred <- "cousin" stop("Cousin inbreeding is not supported yet. Please use 'sib' for sibling inbreeding.") return() } else { stop("The type of inbreeding should be either 'sib' or 'cousin'") return() } # check if the two IDs are provided if (is.na(ID_mate1) || is.na(ID_mate2)) { # Check if the generation is provided if (is.na(gen_inbred)) { stop("You should provide either the IDs of the inbred mates or the generation of the inbred mates") } else { # Check if the generation is valid if (gen_inbred < 2 || gen_inbred > max(ped$gen)) { stop("The generation of the mates should be an integer between 2 and the maximum generation in the pedigree") } else { if (type_inbred == "sib") { # loop through all the nuclear families in the generation idx <- nrow(ped[ped$gen == gen_inbred & !is.na(ped$dadID), ]) usedID <- c() ID_mate2 <- NA_integer_ for (i in 1:idx) { # if ID_mate2 is not missing, break the loop if (!is.na(ID_mate2)) { break } ID_pool_mate1 <- ped$ID[ped$gen == gen_inbred & !is.na(ped$dadID) & !is.na(ped$momID) & is.na(ped$spt) & !(ped$ID %in% usedID)] # if the pool is empty, find all individuals who have the same dadID and momID as the selected individual but mated if (length(ID_pool_mate1) == 0) { ID_pool_mate1 <- ped$ID[ped$gen == gen_inbred & !is.na(ped$dadID) & !is.na(ped$momID) & !(ped$ID %in% usedID)] } ID_mate1 <- resample(ID_pool_mate1, 1) usedID <- c(usedID, ID_mate1) # try to find one opposite-sex individual who has the same dadID and momID as the selected individual, preferalbly not mated ID_pool_mate2 <- ped$ID[ped$gen == gen_inbred & ped$sex != ped$sex[ped$ID == ID_mate1] & ped$dadID == ped$dadID[ped$ID == ID_mate1] & ped$momID == ped$momID[ped$ID == ID_mate1] & is.na(ped$spt)] # if the pool is not empty, randomly select one individual from the pool if (length(ID_pool_mate2) > 0) { ID_mate2 <- resample(ID_pool_mate2, 1) } else { # if the pool is empty, find all individuals who have the same dadID and momID as the selected individual but mated ID_pool_mate2 <- ped$ID[ped$gen == gen_inbred & ped$sex != ped$sex[ped$ID == ID_mate1] & ped$dadID == ped$dadID[ped$ID == ID_mate1] & ped$momID == ped$momID[ped$ID == ID_mate1]] # if the pool is not empty, randomly select one individual from the pool if (length(ID_pool_mate2) > 0) { ID_mate2 <- resample(ID_pool_mate2, 1) } else { next } } } } else if (type_inbred == "cousin") { stop("cousin inbreeding is not supported yet\n") } else { stop("The type of inbreeding should be either sib or cousin") } } } } # save the two individual's former mates' IDs if they have any ID_mate1_former_mate <- ped$spt[ped$ID == ID_mate1] # cat(ID_mate1, "\n") ID_mate2_former_mate <- ped$spt[ped$ID == ID_mate2] # cat(ID_mate2, "\n") # remove two individuals' former mates from the pedigree if they have any ped$spt[ped$ID == ID_mate1] <- NA_integer_ ped$spt[ped$ID == ID_mate2] <- NA_integer_ # change the spouseID of ID_mate1 and ID_mate2 to each other ped$spt[ped$ID == ID_mate1] <- ID_mate2 ped$spt[ped$ID == ID_mate2] <- ID_mate1 # change the individuals in next generation whose dadID and momID are ID_mate1 and ID_mate2's former mates to ID_mate1 and ID_mate2 for (j in seq_len(nrow(ped))) { if (!is.na(ped$dadID[j]) & !is.na(ID_mate1_former_mate) & ped$dadID[j] == ID_mate1_former_mate) { ped$dadID[j] <- ID_mate2 } if (!is.na(ped$momID[j]) & !is.na(ID_mate1_former_mate) & ped$momID[j] == ID_mate1_former_mate) { ped$momID[j] <- ID_mate2 } if (!is.na(ped$dadID[j]) & !is.na(ID_mate2_former_mate) & ped$dadID[j] == ID_mate2_former_mate) { ped$dadID[j] <- ID_mate1 } if (!is.na(ped$momID[j]) & !is.na(ID_mate2_former_mate) & ped$momID[j] == ID_mate2_former_mate) { ped$momID[j] <- ID_mate1 } } return(ped) } #' dropLink #' A function to drop a person from his/her parents in the simulated pedigree \code{data.frame}. #' The person can be dropped by specifying his/her ID or by specifying the generation which the randomly to-be-dropped person is in. #' The function can separate one pedigree into two pedigrees. Separating into small pieces should be done by running the function multiple times. #' This is a supplementary function for \code{simulatePedigree}. #' @param ped a pedigree simulated from simulatePedigree function or the same format #' @param ID_drop the ID of the person to be dropped from his/her parents. #' @param gen_drop the generation in which the randomly dropped person is. Will work if `ID_drop` is not specified. #' @param sex_drop the biological sex of the randomly dropped person. #' @param n_drop the number of times the mutation happens. #' @return a pedigree with the dropped person's `dadID` and `momID` set to NA. #' @export dropLink <- function(ped, ID_drop = NA_integer_, gen_drop = 2, sex_drop = NA_character_, n_drop = 1) { # check if the ID_drop is specified if (is.na(ID_drop)) { # check if the sex_drop is specified if (is.na(sex_drop)) { ID_drop <- resample(ped$ID[ped$gen == gen_drop & !is.na(ped$dadID) & !is.na(ped$momID)], n_drop) } else { ID_drop <- resample(ped$ID[ped$gen == gen_drop & !is.na(ped$dadID) & !is.na(ped$momID) & ped$sex == sex_drop], n_drop) } if (!is.na(ID_drop)) { ped[ped$ID %in% ID_drop, c("dadID", "momID")] <- NA_integer_ } else { warning("No individual is dropped from his/her parents.") } } else { ped[ped$ID == ID_drop, c("dadID", "momID")] <- NA_integer_ } return(ped) }
/scratch/gouwar.j/cran-all/cranData/BGmisc/R/tweakPedigree.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ## ----setup-------------------------------------------------------------------- library(BGmisc) ## ----------------------------------------------------------------------------- # Example usage: # For full siblings, the relatedness coefficient is expected to be 0.5: calculateRelatedness(generations = 1, full = TRUE) # For half siblings, the relatedness coefficient is expected to be 0.25: calculateRelatedness(generations = 1, full = FALSE) ## ----------------------------------------------------------------------------- # Example usage: # Infer the relatedness coefficient: inferRelatedness(obsR = 0.5, aceA = 0.9, aceC = 0, sharedC = 0)
/scratch/gouwar.j/cran-all/cranData/BGmisc/inst/doc/analyticrelatedness.R
--- title: "Calculating and Inferring Relatedness Coefficients with BGmisc" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Relatedness Coefficients} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ``` # Introduction This vignette focuses on the calculation and inference of relatedness coefficients using the `BGmisc` package. The relatedness coefficient is a measure of the genetic relationship between two individuals. Here, we introduce two functions: `calculateRelatedness` and `inferRelatedness`, which allow users to compute and infer the relatedness coefficient respectively. ## Loading Required Libraries ```{r setup} library(BGmisc) ``` # Calculating Relatedness Coefficient The `calculateRelatedness` function offers a method to compute the relatedness coefficient based on shared ancestry, as described by Wright (1922). This function utilizes the formula: \[ r_{bc} = \sum \left(\frac{1}{2}\right)^{n+n'+1} (1+f_a) \] Where \( n \) and \( n' \) represent the number of generations back of common ancestors the pair share. ```{r} # Example usage: # For full siblings, the relatedness coefficient is expected to be 0.5: calculateRelatedness(generations = 1, full = TRUE) # For half siblings, the relatedness coefficient is expected to be 0.25: calculateRelatedness(generations = 1, full = FALSE) ``` # Inferring Relatedness Coefficient The `inferRelatedness` function is designed to infer the relatedness coefficient between two groups based on the observed correlation between their additive genetic variance and shared environmental variance. This function leverages the ACE framework. ```{r} # Example usage: # Infer the relatedness coefficient: inferRelatedness(obsR = 0.5, aceA = 0.9, aceC = 0, sharedC = 0) ```
/scratch/gouwar.j/cran-all/cranData/BGmisc/inst/doc/analyticrelatedness.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ## ----setup, include=FALSE----------------------------------------------------- library(BGmisc) require(EasyMx) require(OpenMx) ## ----------------------------------------------------------------------------- library(BGmisc) library(EasyMx) library(OpenMx) ## ----------------------------------------------------------------------------- comp2vech(list( matrix(c(1, .5, .5, 1), 2, 2), matrix(1, 2, 2) )) ## ----------------------------------------------------------------------------- identifyComponentModel( A = list(matrix(1, 2, 2)), C = list(matrix(1, 2, 2)), E = diag(1, 2) ) ## ----------------------------------------------------------------------------- identifyComponentModel( A = list(matrix(c(1, .5, .5, 1), 2, 2), matrix(1, 2, 2)), C = list(matrix(1, 2, 2), matrix(1, 2, 2)), E = diag(1, 4) ) ## ----------------------------------------------------------------------------- require(dplyr) # require(purrr) data(twinData, package = "OpenMx") selVars <- c("ht1", "ht2") mzdzData <- subset( twinData, zyg %in% c(1, 3), c(selVars, "zyg") ) mzdzData$RCoef <- c(1, NA, .5)[mzdzData$zyg] mzData <- mzdzData %>% filter(zyg == 1) ## ----------------------------------------------------------------------------- run1 <- emxTwinModel( model = "Cholesky", relatedness = "RCoef", data = mzData, use = selVars, run = TRUE, name = "TwCh" ) summary(run1) ## ----------------------------------------------------------------------------- run2 <- emxTwinModel( model = "Cholesky", relatedness = "RCoef", data = mzdzData, use = selVars, run = TRUE, name = "TwCh" ) summary(run2)
/scratch/gouwar.j/cran-all/cranData/BGmisc/inst/doc/modelingrelatedness.R
--- title: "Modeling and Relatedness" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{modelingandrelatedness} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ``` # Introduction This vignette provides a detailed guide to specific functions within the `BGmisc` package that aid in the identification and fitting of variance component models common in behavior genetics. We will explore key functions such as `identifyComponentModel`, providing practical examples and theoretical background. Identification ensures a unique set of parameters that define the model-implied covariance matrix, preventing free parameters from trading off one another. ## Loading Required Libraries ```{r setup, include=FALSE} library(BGmisc) require(EasyMx) require(OpenMx) ``` Ensure that the `BGmisc` package is installed and loaded. Ensure that the following dependencies are installed before proceeding as they provide us with behavior genetic data and models: - `EasyMx` - `OpenMx` ```{r} library(BGmisc) library(EasyMx) library(OpenMx) ``` Note: If any of the libraries are not installed, you can install them using install.packages("`package_name`"). # Working with Variance Component Models In this section, we will demonstrate core functions related to the identification and fitting of variance component models. ## Using `comp2vech` Function The `comp2vech` function is used to vectorize a components model. The function is often used in conjunction with the identification process. In this example, we apply it to a list of matrices: ```{r} comp2vech(list( matrix(c(1, .5, .5, 1), 2, 2), matrix(1, 2, 2) )) ``` The result showcases how the matrices have been transformed, reflecting their role in subsequent variance component analysis. ## Using `identifyComponentModel` Function The `identifyComponentModel` function helps determine if a variance components model is identified. It accepts relatedness component matrices and returns information about identified and non-identified parameters. Here's an example using the classical twin model *with only MZ twins*: ```{r} identifyComponentModel( A = list(matrix(1, 2, 2)), C = list(matrix(1, 2, 2)), E = diag(1, 2) ) ``` As you can see, the model is not identified. We need to add an additional group so that we have sufficient information. Let us add the rest of the classical twin model, in this case DZ twins. ```{r} identifyComponentModel( A = list(matrix(c(1, .5, .5, 1), 2, 2), matrix(1, 2, 2)), C = list(matrix(1, 2, 2), matrix(1, 2, 2)), E = diag(1, 4) ) ``` As you can see the model is identified, now that we've added another group. Let us confirm by fitting a model. First we prepare the data. ```{r} require(dplyr) # require(purrr) data(twinData, package = "OpenMx") selVars <- c("ht1", "ht2") mzdzData <- subset( twinData, zyg %in% c(1, 3), c(selVars, "zyg") ) mzdzData$RCoef <- c(1, NA, .5)[mzdzData$zyg] mzData <- mzdzData %>% filter(zyg == 1) ``` Let us fit the data with MZ twins by themselves. ```{r} run1 <- emxTwinModel( model = "Cholesky", relatedness = "RCoef", data = mzData, use = selVars, run = TRUE, name = "TwCh" ) summary(run1) ``` As you can see the model was unsuccessful because it was not identified. But when we add another group, so that the model is identified, the model now fits. ```{r} run2 <- emxTwinModel( model = "Cholesky", relatedness = "RCoef", data = mzdzData, use = selVars, run = TRUE, name = "TwCh" ) summary(run2) ```
/scratch/gouwar.j/cran-all/cranData/BGmisc/inst/doc/modelingrelatedness.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ## ----setup-------------------------------------------------------------------- library(BGmisc) data(potter) ## ----echo=FALSE, results='hide', out.width='50%', fig.cap="Potter Family Pedigree"---- plotPedigree(potter, code_male = 1, verbose = TRUE) ## ----------------------------------------------------------------------------- df_potter <- potter names(df_potter)[names(df_potter) == "famID"] <- "oldfam" ds <- ped2fam(df_potter, famID = "famID", personID = "personID") table(ds$famID, ds$oldfam) ## ----------------------------------------------------------------------------- add <- ped2add(potter) ## ----------------------------------------------------------------------------- add[1:7, 1:7] ## ----------------------------------------------------------------------------- table(add) ## ----------------------------------------------------------------------------- add_list <- lapply( unique(potter$famID), function(d) { tmp <- potter[potter$famID %in% d, ] ped2add(tmp) } ) ## ----------------------------------------------------------------------------- mit <- ped2mit(potter) mit[1:7, 1:7] table(mit) ## ----------------------------------------------------------------------------- commonNuclear <- ped2cn(potter) commonNuclear[1:7, 1:7] table(commonNuclear) ## ----------------------------------------------------------------------------- extendedFamilyEnvironment <- ped2ce(potter) extendedFamilyEnvironment[1:7, 1:7] table(extendedFamilyEnvironment) ## ----echo=FALSE, results='hide', out.width='50%', fig.cap="Potter Subset Pedigree"---- names(potter)[names(potter) == "oldfam"] <- "famID" subset_rows <- c(1:8, 11:36) subset_potter <- potter[subset_rows, ] subset_potter$dadID[subset_potter$dadID %in% c(9, 10)] <- NA subset_potter$momID[subset_potter$momID %in% c(9, 10)] <- NA plotPedigree(subset_potter, code_male = 1, verbose = TRUE) ## ----------------------------------------------------------------------------- subset_rows <- c(1:5, 31:36) subset_potter <- potter[subset_rows, ] ## ----echo=FALSE, results='hide', out.width='50%', fig.cap="Potter Subset Pedigree"---- plotPedigree(subset_potter, code_male = 1, verbose = TRUE)
/scratch/gouwar.j/cran-all/cranData/BGmisc/inst/doc/network.R
--- title: "Network tools for finding extended pedigrees and path tracing" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Network} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ``` # Introduction This vignette showcases two key features that capitalize on the network structure inherent in pedigrees: 1. Finding extended families with *any* connecting relationships between members. This feature strictly uses a person's ID, mother's ID, and father's ID to find out which people in a dataset are remotely related by any path, effectively finding all separable extended families in a dataset. 2. Using path tracing rules to quantify the *amount* of relatedness between all pairs of individuals in a dataset. The amount of relatedness can be characterized by additive nuclear DNA, shared mitochondrial DNA, sharing both parents, or being part of the same extended pedigree. ## Loading Required Libraries and Data ```{r setup} library(BGmisc) data(potter) ``` # Finding Extended Families Many pedigree datasets only contain information on the person, their mother, and their father, often without nuclear or extended family IDs. Recognizing which sets of people are unrelated simplifies many pedigree-related tasks. This function facilitates those tasks by finding all the extended families. People within the same extended family have at least some form of relation, however distant, while those in different extended families have no relations. ```{r, echo=FALSE, results='hide', out.width='50%', fig.cap="Potter Family Pedigree"} plotPedigree(potter, code_male = 1, verbose = TRUE) ``` We will use the `potter` pedigree data as an example. For convenience, we've renamed the family ID variable to `oldfam` to avoid confusion with the new family ID variable we will create. ```{r} df_potter <- potter names(df_potter)[names(df_potter) == "famID"] <- "oldfam" ds <- ped2fam(df_potter, famID = "famID", personID = "personID") table(ds$famID, ds$oldfam) ``` Because the `potter` data already had a family ID variable, we compare our newly created variable to the pre-existing one. They match! # Computing Relatedness Once you know which sets of people are related at all to one another, you'll likely want to know how much. For additive genetic relatedness, you can use the `ped2add()` function. ```{r} add <- ped2add(potter) ``` This computes the additive genetic relatedness for everyone in the data. It returns a square, symmetric matrix that has as many rows and columns as there are IDs. ```{r} add[1:7, 1:7] ``` The entry in the ith row and the jth column gives the relatedness between person i and person j. For example, person 1 (`r potter$name[1]`) shares `r add[1,6]` of their nuclear DNA with person 6 (`r potter$name[6]`), shares `r add[1,2]` of their nuclear DNA with person 2 (`r potter$name[2]`). ```{r} table(add) ``` It's probably fine to do this on the whole dataset when your data have fewer than 10,000 people. When the data get large, however, it's much more efficient to compute this relatedness separately for each extended family. ```{r} add_list <- lapply( unique(potter$famID), function(d) { tmp <- potter[potter$famID %in% d, ] ped2add(tmp) } ) ``` ## Other relatedness measures The function works similarly for mitochondrial (`ped2mit`), common nuclear environment through sharing both parents (`ped2cn`), and common extended family environment (`ped2ce`). ### Computing mitochondrial relatedness Here we calculate the mitochondrial relatedness between all pairs of individuals in the `potter` dataset. ```{r} mit <- ped2mit(potter) mit[1:7, 1:7] table(mit) ``` As you can see, some of the family members share mitochondrial DNA, such as person 2 and person 3 `r mit[2,3]`, whereas person 1 and person 3 do not. ### Computing relatedness through common nuclear environment Here we calculate the relatedness between all pairs of individuals in the `potter` dataset through sharing both parents. ```{r} commonNuclear <- ped2cn(potter) commonNuclear[1:7, 1:7] table(commonNuclear) ``` ### Computing relatedness through common extended family environment Here we calculate the relatedness between all pairs of individuals in the `potter` dataset through sharing an extended family. ```{r} extendedFamilyEnvironment <- ped2ce(potter) extendedFamilyEnvironment[1:7, 1:7] table(extendedFamilyEnvironment) ``` # Subsetting Pedigrees Subsetting a pedigree allows researchers to focus on specific family lines or individuals within a larger dataset. This can be particularly useful for data validation as well as simplifying complex pedigrees for visualization. However, subsetting a pedigree can result in the underestimation of relatedness between individuals. This is because the subsetted pedigree may not contain all the individuals that connect two people together. For example if we were to remove Arthur Weasley (person 9) and Molly Prewett (person 10) from the `potter` dataset, we would lose the connections amongst their children. ```{r, echo=FALSE, results='hide', out.width='50%', fig.cap="Potter Subset Pedigree"} names(potter)[names(potter) == "oldfam"] <- "famID" subset_rows <- c(1:8, 11:36) subset_potter <- potter[subset_rows, ] subset_potter$dadID[subset_potter$dadID %in% c(9, 10)] <- NA subset_potter$momID[subset_potter$momID %in% c(9, 10)] <- NA plotPedigree(subset_potter, code_male = 1, verbose = TRUE) ``` In the plot above, we have removed Arthur Weasley (person 9) and Molly Prewett (person 10) from the `potter` dataset. As a result, the connections between their children are lost. Similarly, if we remove the children of Vernon Dursey (1) and Petunia Evans (3) from the `potter` dataset, we would lose the connections between the two individuals. However, this subset does not plot the relationship between spouses (such as the marriage between Vernon Dursey and Petunia Evans), as there are not children to connect the two individuals together yet. ```{r} subset_rows <- c(1:5, 31:36) subset_potter <- potter[subset_rows, ] ``` ```{r, echo=FALSE, results='hide', out.width='50%', fig.cap="Potter Subset Pedigree"} plotPedigree(subset_potter, code_male = 1, verbose = TRUE) ```
/scratch/gouwar.j/cran-all/cranData/BGmisc/inst/doc/network.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ## ----------------------------------------------------------------------------- ## Loading Required Libraries library(BGmisc) set.seed(5) df_ped <- simulatePedigree( kpc = 4, Ngen = 4, sexR = .5, marR = .7 ) summary(df_ped) ## ----------------------------------------------------------------------------- df_ped[21, ] ## ----fig.width=8,fig.height=6------------------------------------------------- # Plot the simulated pedigree plotPedigree(df_ped) ## ----fig.width=8, fig.height=6------------------------------------------------ set.seed(8) # Simulate a family with 3 generations df_ped_3 <- simulatePedigree(Ngen = 3) # Simulate a family with 4 generations df_ped_4 <- simulatePedigree(Ngen = 4) # Set up plotting parameters for side-by-side display par(mfrow = c(1, 2)) # Plot the 3-generation pedigree plotPedigree(df_ped_3, width = 3) # Plot the 4-generation pedigree plotPedigree(df_ped_4, width = 1)
/scratch/gouwar.j/cran-all/cranData/BGmisc/inst/doc/pedigree.R
--- title: "Pedigree Simulation and Visualization with BGmisc" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Pedigree} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ``` # Introduction Unlike Tolstoy, where *only* happy families are alike, all pedigrees are alike -- or at least, all simulated pedigrees are alike. The `simulatePedigree` function generates a pedigree with a user-specified number of generations and individuals per generation. This function provides users the opportunity to test family models in pedigrees with a customized pedigree length and width. These pedigrees can be simulated as a function of several parameters, including the number of children per mate, generations, sex ratio of newborns, and mating rate. Given that large family pedigrees are difficult to collect or access, simulated pedigrees serve as an efficient tool for researchers. These simulated pedigrees are useful for building family-based statistical models, and evaluating their statistical properties, such as power, bias, and computational efficiency. To illustrate this functionality, let us generate a pedigree. This pedigree has a total of four generations (`Ngen`), in which each person who "mates", grows a family with four offspring (`kpc`). In our scenario, the number of male and female newborns is equal, but can be adjusted via (`sexR`). In this illustration 70% of individuals will mate and bear offspring (`marR`). Such a pedigree structure can be simulated by running the following code: ```{r} ## Loading Required Libraries library(BGmisc) set.seed(5) df_ped <- simulatePedigree( kpc = 4, Ngen = 4, sexR = .5, marR = .7 ) summary(df_ped) ``` The simulation output is a `data.frame` with `r length(df_ped$ID)` rows and `r length(df_ped)` columns. Each row corresponds to a simulated individual. ```{r} df_ped[21, ] ``` The columns represents the individual's family ID, the individual's personal ID, the generation the individual is in, the IDs of their father and mother, the ID of their spouse, and the biological sex of the individual, respectively. ## Plotting Pedigree Pedigrees are visual diagrams that represent family relationships across generations. They are commonly used in genetics to trace the inheritance of specific traits or conditions. This vignette will guide you through visualizing simulated pedigrees using the `plotPedigree` function. This function is a wrapper function for `Kinship2`'s base R plotting. ### Single Pedigree Visualization To visualize a single simulated pedigree, use the `plotPedigree()` function. ```{r,fig.width=8,fig.height=6} # Plot the simulated pedigree plotPedigree(df_ped) ``` In the resulting plot, biological males are represented by squares, while biological females are represented by circles, following the standard pedigree conventions. ### Visualizing Multiple Pedigrees Side-by-Side If you wish to compare different pedigrees side by side, you can plot them together. For instance, let's visualize pedigrees for families spanning three and four generations, respectively. ```{r,fig.width=8, fig.height=6} set.seed(8) # Simulate a family with 3 generations df_ped_3 <- simulatePedigree(Ngen = 3) # Simulate a family with 4 generations df_ped_4 <- simulatePedigree(Ngen = 4) # Set up plotting parameters for side-by-side display par(mfrow = c(1, 2)) # Plot the 3-generation pedigree plotPedigree(df_ped_3, width = 3) # Plot the 4-generation pedigree plotPedigree(df_ped_4, width = 1) ``` By examining the side-by-side plots, you can contrast and analyze the structures of different families, tracing the inheritance of specific traits or conditions if needed.
/scratch/gouwar.j/cran-all/cranData/BGmisc/inst/doc/pedigree.Rmd
--- title: "Calculating and Inferring Relatedness Coefficients with BGmisc" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Relatedness Coefficients} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ``` # Introduction This vignette focuses on the calculation and inference of relatedness coefficients using the `BGmisc` package. The relatedness coefficient is a measure of the genetic relationship between two individuals. Here, we introduce two functions: `calculateRelatedness` and `inferRelatedness`, which allow users to compute and infer the relatedness coefficient respectively. ## Loading Required Libraries ```{r setup} library(BGmisc) ``` # Calculating Relatedness Coefficient The `calculateRelatedness` function offers a method to compute the relatedness coefficient based on shared ancestry, as described by Wright (1922). This function utilizes the formula: \[ r_{bc} = \sum \left(\frac{1}{2}\right)^{n+n'+1} (1+f_a) \] Where \( n \) and \( n' \) represent the number of generations back of common ancestors the pair share. ```{r} # Example usage: # For full siblings, the relatedness coefficient is expected to be 0.5: calculateRelatedness(generations = 1, full = TRUE) # For half siblings, the relatedness coefficient is expected to be 0.25: calculateRelatedness(generations = 1, full = FALSE) ``` # Inferring Relatedness Coefficient The `inferRelatedness` function is designed to infer the relatedness coefficient between two groups based on the observed correlation between their additive genetic variance and shared environmental variance. This function leverages the ACE framework. ```{r} # Example usage: # Infer the relatedness coefficient: inferRelatedness(obsR = 0.5, aceA = 0.9, aceC = 0, sharedC = 0) ```
/scratch/gouwar.j/cran-all/cranData/BGmisc/vignettes/analyticrelatedness.Rmd
--- title: "Modeling and Relatedness" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{modelingandrelatedness} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ``` # Introduction This vignette provides a detailed guide to specific functions within the `BGmisc` package that aid in the identification and fitting of variance component models common in behavior genetics. We will explore key functions such as `identifyComponentModel`, providing practical examples and theoretical background. Identification ensures a unique set of parameters that define the model-implied covariance matrix, preventing free parameters from trading off one another. ## Loading Required Libraries ```{r setup, include=FALSE} library(BGmisc) require(EasyMx) require(OpenMx) ``` Ensure that the `BGmisc` package is installed and loaded. Ensure that the following dependencies are installed before proceeding as they provide us with behavior genetic data and models: - `EasyMx` - `OpenMx` ```{r} library(BGmisc) library(EasyMx) library(OpenMx) ``` Note: If any of the libraries are not installed, you can install them using install.packages("`package_name`"). # Working with Variance Component Models In this section, we will demonstrate core functions related to the identification and fitting of variance component models. ## Using `comp2vech` Function The `comp2vech` function is used to vectorize a components model. The function is often used in conjunction with the identification process. In this example, we apply it to a list of matrices: ```{r} comp2vech(list( matrix(c(1, .5, .5, 1), 2, 2), matrix(1, 2, 2) )) ``` The result showcases how the matrices have been transformed, reflecting their role in subsequent variance component analysis. ## Using `identifyComponentModel` Function The `identifyComponentModel` function helps determine if a variance components model is identified. It accepts relatedness component matrices and returns information about identified and non-identified parameters. Here's an example using the classical twin model *with only MZ twins*: ```{r} identifyComponentModel( A = list(matrix(1, 2, 2)), C = list(matrix(1, 2, 2)), E = diag(1, 2) ) ``` As you can see, the model is not identified. We need to add an additional group so that we have sufficient information. Let us add the rest of the classical twin model, in this case DZ twins. ```{r} identifyComponentModel( A = list(matrix(c(1, .5, .5, 1), 2, 2), matrix(1, 2, 2)), C = list(matrix(1, 2, 2), matrix(1, 2, 2)), E = diag(1, 4) ) ``` As you can see the model is identified, now that we've added another group. Let us confirm by fitting a model. First we prepare the data. ```{r} require(dplyr) # require(purrr) data(twinData, package = "OpenMx") selVars <- c("ht1", "ht2") mzdzData <- subset( twinData, zyg %in% c(1, 3), c(selVars, "zyg") ) mzdzData$RCoef <- c(1, NA, .5)[mzdzData$zyg] mzData <- mzdzData %>% filter(zyg == 1) ``` Let us fit the data with MZ twins by themselves. ```{r} run1 <- emxTwinModel( model = "Cholesky", relatedness = "RCoef", data = mzData, use = selVars, run = TRUE, name = "TwCh" ) summary(run1) ``` As you can see the model was unsuccessful because it was not identified. But when we add another group, so that the model is identified, the model now fits. ```{r} run2 <- emxTwinModel( model = "Cholesky", relatedness = "RCoef", data = mzdzData, use = selVars, run = TRUE, name = "TwCh" ) summary(run2) ```
/scratch/gouwar.j/cran-all/cranData/BGmisc/vignettes/modelingrelatedness.Rmd
--- title: "Network tools for finding extended pedigrees and path tracing" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Network} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ``` # Introduction This vignette showcases two key features that capitalize on the network structure inherent in pedigrees: 1. Finding extended families with *any* connecting relationships between members. This feature strictly uses a person's ID, mother's ID, and father's ID to find out which people in a dataset are remotely related by any path, effectively finding all separable extended families in a dataset. 2. Using path tracing rules to quantify the *amount* of relatedness between all pairs of individuals in a dataset. The amount of relatedness can be characterized by additive nuclear DNA, shared mitochondrial DNA, sharing both parents, or being part of the same extended pedigree. ## Loading Required Libraries and Data ```{r setup} library(BGmisc) data(potter) ``` # Finding Extended Families Many pedigree datasets only contain information on the person, their mother, and their father, often without nuclear or extended family IDs. Recognizing which sets of people are unrelated simplifies many pedigree-related tasks. This function facilitates those tasks by finding all the extended families. People within the same extended family have at least some form of relation, however distant, while those in different extended families have no relations. ```{r, echo=FALSE, results='hide', out.width='50%', fig.cap="Potter Family Pedigree"} plotPedigree(potter, code_male = 1, verbose = TRUE) ``` We will use the `potter` pedigree data as an example. For convenience, we've renamed the family ID variable to `oldfam` to avoid confusion with the new family ID variable we will create. ```{r} df_potter <- potter names(df_potter)[names(df_potter) == "famID"] <- "oldfam" ds <- ped2fam(df_potter, famID = "famID", personID = "personID") table(ds$famID, ds$oldfam) ``` Because the `potter` data already had a family ID variable, we compare our newly created variable to the pre-existing one. They match! # Computing Relatedness Once you know which sets of people are related at all to one another, you'll likely want to know how much. For additive genetic relatedness, you can use the `ped2add()` function. ```{r} add <- ped2add(potter) ``` This computes the additive genetic relatedness for everyone in the data. It returns a square, symmetric matrix that has as many rows and columns as there are IDs. ```{r} add[1:7, 1:7] ``` The entry in the ith row and the jth column gives the relatedness between person i and person j. For example, person 1 (`r potter$name[1]`) shares `r add[1,6]` of their nuclear DNA with person 6 (`r potter$name[6]`), shares `r add[1,2]` of their nuclear DNA with person 2 (`r potter$name[2]`). ```{r} table(add) ``` It's probably fine to do this on the whole dataset when your data have fewer than 10,000 people. When the data get large, however, it's much more efficient to compute this relatedness separately for each extended family. ```{r} add_list <- lapply( unique(potter$famID), function(d) { tmp <- potter[potter$famID %in% d, ] ped2add(tmp) } ) ``` ## Other relatedness measures The function works similarly for mitochondrial (`ped2mit`), common nuclear environment through sharing both parents (`ped2cn`), and common extended family environment (`ped2ce`). ### Computing mitochondrial relatedness Here we calculate the mitochondrial relatedness between all pairs of individuals in the `potter` dataset. ```{r} mit <- ped2mit(potter) mit[1:7, 1:7] table(mit) ``` As you can see, some of the family members share mitochondrial DNA, such as person 2 and person 3 `r mit[2,3]`, whereas person 1 and person 3 do not. ### Computing relatedness through common nuclear environment Here we calculate the relatedness between all pairs of individuals in the `potter` dataset through sharing both parents. ```{r} commonNuclear <- ped2cn(potter) commonNuclear[1:7, 1:7] table(commonNuclear) ``` ### Computing relatedness through common extended family environment Here we calculate the relatedness between all pairs of individuals in the `potter` dataset through sharing an extended family. ```{r} extendedFamilyEnvironment <- ped2ce(potter) extendedFamilyEnvironment[1:7, 1:7] table(extendedFamilyEnvironment) ``` # Subsetting Pedigrees Subsetting a pedigree allows researchers to focus on specific family lines or individuals within a larger dataset. This can be particularly useful for data validation as well as simplifying complex pedigrees for visualization. However, subsetting a pedigree can result in the underestimation of relatedness between individuals. This is because the subsetted pedigree may not contain all the individuals that connect two people together. For example if we were to remove Arthur Weasley (person 9) and Molly Prewett (person 10) from the `potter` dataset, we would lose the connections amongst their children. ```{r, echo=FALSE, results='hide', out.width='50%', fig.cap="Potter Subset Pedigree"} names(potter)[names(potter) == "oldfam"] <- "famID" subset_rows <- c(1:8, 11:36) subset_potter <- potter[subset_rows, ] subset_potter$dadID[subset_potter$dadID %in% c(9, 10)] <- NA subset_potter$momID[subset_potter$momID %in% c(9, 10)] <- NA plotPedigree(subset_potter, code_male = 1, verbose = TRUE) ``` In the plot above, we have removed Arthur Weasley (person 9) and Molly Prewett (person 10) from the `potter` dataset. As a result, the connections between their children are lost. Similarly, if we remove the children of Vernon Dursey (1) and Petunia Evans (3) from the `potter` dataset, we would lose the connections between the two individuals. However, this subset does not plot the relationship between spouses (such as the marriage between Vernon Dursey and Petunia Evans), as there are not children to connect the two individuals together yet. ```{r} subset_rows <- c(1:5, 31:36) subset_potter <- potter[subset_rows, ] ``` ```{r, echo=FALSE, results='hide', out.width='50%', fig.cap="Potter Subset Pedigree"} plotPedigree(subset_potter, code_male = 1, verbose = TRUE) ```
/scratch/gouwar.j/cran-all/cranData/BGmisc/vignettes/network.Rmd
--- title: "Pedigree Simulation and Visualization with BGmisc" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Pedigree} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(rmarkdown.html_vignette.check_title = FALSE) ``` # Introduction Unlike Tolstoy, where *only* happy families are alike, all pedigrees are alike -- or at least, all simulated pedigrees are alike. The `simulatePedigree` function generates a pedigree with a user-specified number of generations and individuals per generation. This function provides users the opportunity to test family models in pedigrees with a customized pedigree length and width. These pedigrees can be simulated as a function of several parameters, including the number of children per mate, generations, sex ratio of newborns, and mating rate. Given that large family pedigrees are difficult to collect or access, simulated pedigrees serve as an efficient tool for researchers. These simulated pedigrees are useful for building family-based statistical models, and evaluating their statistical properties, such as power, bias, and computational efficiency. To illustrate this functionality, let us generate a pedigree. This pedigree has a total of four generations (`Ngen`), in which each person who "mates", grows a family with four offspring (`kpc`). In our scenario, the number of male and female newborns is equal, but can be adjusted via (`sexR`). In this illustration 70% of individuals will mate and bear offspring (`marR`). Such a pedigree structure can be simulated by running the following code: ```{r} ## Loading Required Libraries library(BGmisc) set.seed(5) df_ped <- simulatePedigree( kpc = 4, Ngen = 4, sexR = .5, marR = .7 ) summary(df_ped) ``` The simulation output is a `data.frame` with `r length(df_ped$ID)` rows and `r length(df_ped)` columns. Each row corresponds to a simulated individual. ```{r} df_ped[21, ] ``` The columns represents the individual's family ID, the individual's personal ID, the generation the individual is in, the IDs of their father and mother, the ID of their spouse, and the biological sex of the individual, respectively. ## Plotting Pedigree Pedigrees are visual diagrams that represent family relationships across generations. They are commonly used in genetics to trace the inheritance of specific traits or conditions. This vignette will guide you through visualizing simulated pedigrees using the `plotPedigree` function. This function is a wrapper function for `Kinship2`'s base R plotting. ### Single Pedigree Visualization To visualize a single simulated pedigree, use the `plotPedigree()` function. ```{r,fig.width=8,fig.height=6} # Plot the simulated pedigree plotPedigree(df_ped) ``` In the resulting plot, biological males are represented by squares, while biological females are represented by circles, following the standard pedigree conventions. ### Visualizing Multiple Pedigrees Side-by-Side If you wish to compare different pedigrees side by side, you can plot them together. For instance, let's visualize pedigrees for families spanning three and four generations, respectively. ```{r,fig.width=8, fig.height=6} set.seed(8) # Simulate a family with 3 generations df_ped_3 <- simulatePedigree(Ngen = 3) # Simulate a family with 4 generations df_ped_4 <- simulatePedigree(Ngen = 4) # Set up plotting parameters for side-by-side display par(mfrow = c(1, 2)) # Plot the 3-generation pedigree plotPedigree(df_ped_3, width = 3) # Plot the 4-generation pedigree plotPedigree(df_ped_4, width = 1) ``` By examining the side-by-side plots, you can contrast and analyze the structures of different families, tracing the inheritance of specific traits or conditions if needed.
/scratch/gouwar.j/cran-all/cranData/BGmisc/vignettes/pedigree.Rmd
Rd_files <- vignettes <- R_files <- description <- list(encoding = "UTF-8", language = "en", dictionaries = c("en_stats", "BH"))
/scratch/gouwar.j/cran-all/cranData/BH/.aspell/defaults.R
#' BHAI: #' #' #' The \strong{BHAI} package #' #' @section BHAI functions: #' \code{\link{bhai}}: #' #' @docType package #' @name BHAI #' @importFrom prevtoinc "calculate_I_smooth" #' @importFrom MCMCpack "MCmultinomdirichlet" #' @importFrom plotrix "draw.circle" #' @importFrom methods "new" #' @import stats #' @import graphics #' @import grDevices NULL #' Aggregated data of the ECDC PPS 2010-2011. #' #' @docType data #' @keywords datasets #' @name eu_pps #' @usage data(eu_pps_2011) #' @format A PPS object. NULL #' Aggregated data of the german PPS 2010-2011 (convenience sample). #' #' @docType data #' @keywords datasets #' @name german_pps_conv #' @usage data(german_pps_2011_conv) #' @format A PPS object. NULL #' Hospital discharges in Germany (2011) #' #' @docType data #' @keywords datasets #' @name hospital_discharges #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' Average length of stay of survey patients in german PPS 2011 (representative sample) #' #' @docType data #' @keywords datasets #' @name length_of_stay #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' A list containing length of infections from all patients in the german PPS 2011 representative sample. #' #' @docType data #' @keywords datasets #' @name loi_pps #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' Named list containing remaining life expectancies for each McCabe score (NONFATAL, ULTFATAL, RAPFATAL). #' #' @docType data #' @keywords datasets #' @name mccabe_life_exp #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' The observed McCabe scores (counts) for each infection, age and gender stratum from the ECDC PPS 2011-2012. #' #' @docType data #' @keywords datasets #' @name mccabe_scores_distr #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' Number of cases for each infection in the german PPS 2011 (representative sample) #' #' @docType data #' @keywords datasets #' @name num_hai_patients #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' Stratified number of cases for each infection in the german PPS 2011 (representative sample) #' #' @docType data #' @keywords datasets #' @name num_hai_patients_by_stratum #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' Stratified number of cases for each infection in the german PPS 2011 (convenience sample). This distribution is used as a Prior for the representative sample. #' #' @docType data #' @keywords datasets #' @name num_hai_patients_by_stratum_prior #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' Number of survey patients in the german PPS 2011 (representative sample). #' #' @docType data #' @keywords datasets #' @name num_survey_patients #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' Population size of Germany in 2011. #' #' @docType data #' @keywords datasets #' @name population #' @usage data(german_pps_2011_repr) #' @format A PPS object. NULL #' Simulated/subsampled data sets from european PPS #' #' @docType data #' @keywords datasets #' @name sim_pps #' @usage data(simulations) #' @format A PPS object. NULL #' BHAI with stratified sampling was applied to simulated/subsampled data sets from european PPS #' #' @docType data #' @keywords datasets #' @name sim_pps_stratified #' @usage data(simulations) #' @format A PPS object. NULL #' BHAI with default options was applied to simulated/subsampled data sets from european PPS #' #' @docType data #' @keywords datasets #' @name sim_pps_bhai #' @usage data(simulations) #' @format A PPS object. NULL #' BHAI with prior was applied to simulated/subsampled data sets from european PPS #' #' @docType data #' @keywords datasets #' @name sim_pps_bhai_prior #' @usage data(simulations) #' @format A PPS object. NULL
/scratch/gouwar.j/cran-all/cranData/BHAI/R/BHAI-package.R
#' #' This class is a generic container for PPS data sets. #' #' @slot infections Character vector storing names of infections in PPS #' @slot num_hai_patients Named numeric containing patients having healthcare-associated infections. #' @slot num_survey_patients Number of patients in point prevalence survey. #' @slot length_of_stay Length of stay of all patients in hospitals. This is need for the prevalence to incidence conversion with the Rhame-Sudderth formula. #' @slot loi_pps A list containing length of infections from all patients in the PPS. In PPS this is usually calculated as the time from infection onset until the date of the survey. #' @slot hospital_discharges The number of hospital discharges. #' @slot num_hai_patients_by_stratum A list containing for each infection the number of patients in each age and gender stratum. #' @slot num_hai_patients_by_stratum_prior The prior weight (counts) for each infection, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @slot mccabe_scores_distr The observed McCabe scores (counts) for each infection, age and gender stratum from the PPS. #' @slot mccabe_by_stratum_prior The prior weight (counts) for each infection, McCabe score, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @slot mccabe_life_exp Named list containing remaining life expectancies for each McCabe score (NONFATAL, ULTFATAL, RAPFATAL). #' @slot num_survey_patients_by_stratum Number of survey patients stratified by infection, age and gender. If this parameter is provided the methodology described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> is applied. #' @slot population Population size #' @slot country Name of the country in which PPS was conducted #' @slot bhai_options Options with which bhai was run. If bhai was not run yet, this is an empty list. #' @slot bhai_summary Summary statistics of bhai. If bhai was not run yet, this is an empty list. #' @exportClass PPS .PPS <- setClass("PPS", representation(infections = "character", num_hai_patients = "numeric", num_survey_patients = "numeric", length_of_stay = "numeric", loi_pps = "list", hospital_discharges = "matrix", num_hai_patients_by_stratum = "list", num_hai_patients_by_stratum_prior = "list", mccabe_scores_distr = "list", mccabe_by_stratum_prior = "list", mccabe_life_exp = "list", num_survey_patients_by_stratum = "list", population = "numeric", country = "character", bhai_options = "list", bhai_summary = "list")) #' Checks validity of PPS parameters #' #' @keywords internal #' @noRd checkPPSParams = function(num_hai_patients, num_survey_patients, length_of_stay, loi_pps, hospital_discharges , num_hai_patients_by_stratum, num_hai_patients_by_stratum_prior, mccabe_scores_distr, mccabe_by_stratum_prior, mccabe_life_exp, num_survey_patients_by_stratum, population, country) { if(! inherits(num_hai_patients, what=c("numeric", "integer"))) stop("num_hai_patients must be of a numeric or integer!") if(! inherits(num_survey_patients, what=c("numeric", "integer"))) stop("num_survey_patients must be of a numeric or integer!") if(! inherits(length_of_stay, what=c("numeric", "integer"))) stop("num_survey_patients must be of a numeric or integer!") if(length(length_of_stay) != 1) stop("length_of_stay must have length 1!") if(! inherits(loi_pps, what="list")) stop("loi_pps must be of a list!") sapply(loi_pps, function(x) if(! inherits(x, what=c("numeric", "integer"))) { stop("Elements of loi_pps must be numeric or integer vectors!") }) if(! inherits(hospital_discharges, what=c("numeric", "integer", "matrix"))) stop("hospital_discharges must be of a numeric or integer or a matrix!") if(inherits(hospital_discharges, what=c("matrix"))) { checkMatrix(hospital_discharges) } else { if(length(hospital_discharges) != 1) stop("hospital_discharges must have length 1!") } if(! inherits(num_hai_patients_by_stratum, what="list")) stop("num_hai_patients_by_stratum must be of a list!") sapply(num_hai_patients_by_stratum, function(x) checkMatrix(x, "num_hai_patients_by_stratum")) if(! inherits(num_hai_patients_by_stratum_prior, what="list")) stop("num_hai_patients_by_stratum_prior must be of a list!") if(length(num_hai_patients_by_stratum_prior) > 0) { if(is.null(names(num_hai_patients_by_stratum_prior))) stop("num_hai_patients_by_stratum_prior must be named!") sapply(num_hai_patients_by_stratum_prior, function(x) checkMatrix(x, "num_hai_patients_by_stratum_prior")) sapply(num_hai_patients_by_stratum_prior, function(x) if(! all(names(x) %in% c("NONFATAL", "RAPFATAL", "ULTFATAL"))) { stop("num_hai_patients_by_stratum_prior must contain McCabe scores: NONFATAL, RAPFATAL, ULTFATAL") }) if(! all(names(num_hai_patients_by_stratum_prior) %in% names(num_hai_patients))) stop("Names of num_hai_patients_by_stratum_prior do not match names of num_hai_patients!") } if(! inherits(mccabe_by_stratum_prior, what="list")) stop("mccabe_by_stratum_prior must be of a list!") if(length(mccabe_by_stratum_prior) > 0) { if(is.null(names(mccabe_by_stratum_prior))) stop("mccabe_by_stratum_prior must be named!") sapply(mccabe_by_stratum_prior, function(x) sapply(x, function(y) checkMatrix(y, "mccabe_by_stratum_prior"))) sapply(mccabe_by_stratum_prior, function(x) if(! all(names(x) %in% c("NONFATAL", "RAPFATAL", "ULTFATAL"))) { stop("mccabe_by_stratum_prior must contain McCabe scores: NONFATAL, RAPFATAL, ULTFATAL") }) if(all(names(mccabe_by_stratum_prior) %in% names(num_hai_patients))) stop("Names of mccabe_by_stratum_prior do not match names of num_hai_patients!") } if(! inherits(mccabe_scores_distr, what="list")) if(is.null(names(mccabe_scores_distr))) stop("mccabe_scores_distr must be named!") sapply(mccabe_scores_distr, function(x) sapply(x, function(y) checkMatrix(y, "mccabe_scores_distr"))) sapply(mccabe_scores_distr, function(x) if(! all(names(x) %in% c("NONFATAL", "RAPFATAL", "ULTFATAL"))) { stop("mccabe_scores_distr must contain McCabe scores: NONFATAL, RAPFATAL, ULTFATAL") }) if(! all(names(mccabe_scores_distr) %in% names(num_hai_patients))) stop("Names of mccabe_scores_distr do not match names of num_hai_patients!") if(! inherits(mccabe_life_exp, what="list")) if(is.null(names(mccabe_life_exp))) stop("mccabe_life_exp must be named!") sapply(mccabe_life_exp, function(x) checkMatrix(x, "mccabe_life_exp")) if(! all(names(mccabe_life_exp) %in% c("NONFATAL", "RAPFATAL", "ULTFATAL"))) { stop("mccabe_life_exp must contain McCabe scores: NONFATAL, RAPFATAL, ULTFATAL") } if(! inherits(num_survey_patients_by_stratum, what="list")) stop("num_survey_patients_by_stratum must be of a list!") if(length(num_survey_patients_by_stratum) > 0) { if(is.null(names(num_survey_patients_by_stratum))) stop("num_survey_patients_by_stratum must be named!") sapply(num_survey_patients_by_stratum, function(x) checkMatrix(x, "num_survey_patients_by_stratum")) sapply(num_survey_patients_by_stratum, function(x) if(! all(names(x) %in% c("NONFATAL", "RAPFATAL", "ULTFATAL"))) { stop("num_survey_patients_by_stratum must contain McCabe scores: NONFATAL, RAPFATAL, ULTFATAL") }) if(all(names(num_survey_patients_by_stratum) %in% names(num_hai_patients))) stop("Names of num_survey_patients_by_stratum do not match names of num_hai_patients!") } if(! inherits(population, what=c("numeric", "integer"))) stop("population must be of a numeric or integer!") if(! inherits(country, what=c("character"))) stop("country must be of a character!") if(is.null(names(num_hai_patients))) stop("num_hai_patients must be named!") if(is.null(names(loi_pps))) stop("loi_pps must be named!") if(is.null(names(loi_pps))) stop("mccabe_scores_distr must be named!") if(! all(names(loi_pps) %in% names(num_hai_patients))) stop("Names of loi_pps do not match names of num_hai_patients!") if(is.null(names(num_hai_patients_by_stratum))) stop("num_hai_patients_by_stratum must be named!") if(! all(names(num_hai_patients_by_stratum) %in% names(num_hai_patients))) stop("Names of num_hai_patients_by_stratum do not match names of num_hai_patients!") if(! all(names(mccabe_scores_distr) %in% names(num_hai_patients))) stop("Names of mccabe_scores_distr do not match names of num_hai_patients!") } checkMatrix = function(x, mat_name) { if(! inherits(x, what=c("matrix", "data.frame"))) { stop(paste("Elements of ", mat_name, " must be a matrix!", sep="")) } apply(x, 2, function(y) if(!inherits(y, what=c("numeric", "integer"))) { stop(paste(mat_name, " elements must be numeric or integer!", sep=""))}) if(! all(colnames(x) == c("F", "M") | colnames(x) == c("Female", "Male"))) { stop(paste("Colnames of ", mat_name, " must be c(\"F\", \"M\")", sep="")) } rowN_print = "c(\"[0;0]\", \"[1;4]\", \"[5;9]\", \"[10;14]\", \"[15;19]\", \"[20;24]\", \"[25;29]\", \"[30;34]\", \"[35;39]\", \"[40;44]\", \"[45;49]\", \"[50;54]\", \"[55;59]\", \"[60;64]\", \"[65;69]\", \"[70;74]\", \"[75;79]\", \"[80;84]\", \"[85;Inf]\")" rowN = c("[0;0]", "[1;4]", "[5;9]", "[10;14]", "[15;19]", "[20;24]", "[25;29]", "[30;34]", "[35;39]", "[40;44]", "[45;49]", "[50;54]", "[55;59]", "[60;64]", "[65;69]", "[70;74]", "[75;79]", "[80;84]", "[85;Inf]") if(! all(rownames(x) == rowN)) { stop(paste("Colnames of ", mat_name, " must be ", rowN_print, sep="")) } } #' #' This function creates a PPS object. #' @title Create a PPS object #' #' @param num_hai_patients Named numeric containing patients having healthcare-associated infections. #' @param num_survey_patients Number of patients in point prevalence survey. #' @param length_of_stay Length of stay of all patients in hospitals. This is need for the prevalence to incidence conversion with the Rhame-Sudderth formula. #' @param loi_pps A list containing length of infections from all patients in the PPS. The length of infection of all healthcare-associated infections. In PPS this is usually approximated as the time from infection onset until the date of the survey. #' @param hospital_discharges The number of hospital discharges. #' @param num_hai_patients_by_stratum A list containing for each infection the number of patients in each age and gender stratum. #' @param num_hai_patients_by_stratum_prior The prior weight (counts) for each infection, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_scores_distr The observed McCabe scores (counts) for each infection, age and gender stratum from the PPS. #' @param mccabe_by_stratum_prior The prior weight (counts) for each infection, McCabe score, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_life_exp Named list containing remaining life expectancies for each McCabe score (NONFATAL, ULTFATAL, RAPFATAL). #' @param num_survey_patients_by_stratum Number of survey patients stratified by infection, age and gender. If this parameter is provided the methodology described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> is applied. #' @param population Population size. #' @param country Name of the country. #' #' @return A PPS class object. #' #' @seealso \code{\linkS4class{PPS}} #' #' @examples #' #' data(german_pps_2011_repr) #' german_pps_repr = PPS(num_hai_patients = num_hai_patients, #' num_hai_patients_by_stratum = num_hai_patients_by_stratum, #' num_hai_patients_by_stratum_prior = num_hai_patients_by_stratum_prior, #' num_survey_patients = num_survey_patients, #' length_of_stay = length_of_stay, #' loi_pps = loi_pps, #' mccabe_scores_distr = mccabe_scores_distr, #' mccabe_life_exp = mccabe_life_exp, #' hospital_discharges = hospital_discharges, #' population = population, #' country="Germany (representative sample)") #' german_pps_repr #' #' @export PPS <- function(num_hai_patients = NULL, num_survey_patients = NULL, length_of_stay = NULL, loi_pps = NULL, hospital_discharges = NULL, num_hai_patients_by_stratum = NULL, num_hai_patients_by_stratum_prior = NULL, mccabe_scores_distr = NULL, mccabe_by_stratum_prior = NULL, mccabe_life_exp = NULL, num_survey_patients_by_stratum = NULL, population = NULL, country = "") { if(is.null(num_hai_patients_by_stratum_prior)) { num_hai_patients_by_stratum_prior = list() } if(is.null(mccabe_by_stratum_prior)) { mccabe_by_stratum_prior = list() } if(is.null(num_survey_patients_by_stratum)) { num_survey_patients_by_stratum = list() } checkPPSParams(num_hai_patients, num_survey_patients, length_of_stay, loi_pps, hospital_discharges , num_hai_patients_by_stratum, num_hai_patients_by_stratum_prior, mccabe_scores_distr, mccabe_by_stratum_prior, mccabe_life_exp, num_survey_patients_by_stratum, population, country) if(class(hospital_discharges) == "numeric") { hospital_discharges = matrix(hospital_discharges,nrow=1,ncol=1) } .PPS(infections = names(num_hai_patients), num_hai_patients = num_hai_patients, num_survey_patients = num_survey_patients, length_of_stay = length_of_stay, loi_pps = loi_pps, hospital_discharges = hospital_discharges, num_hai_patients_by_stratum = num_hai_patients_by_stratum, num_hai_patients_by_stratum_prior = num_hai_patients_by_stratum_prior, mccabe_scores_distr = mccabe_scores_distr, mccabe_by_stratum_prior = as.list(mccabe_by_stratum_prior), mccabe_life_exp = mccabe_life_exp, num_survey_patients_by_stratum = as.list(num_survey_patients_by_stratum), population = population, country = country, bhai_options = list(), bhai_summary = list()) } #' Prints description of PPS object #' #' @keywords internal #' @noRd setMethod(f = "show", signature = c("PPS"), function(object) { if(object@country != "") { cat(" Country: ", object@country, "\n", sep="") } cat(" Patients in PPS: ", prettyNum(object@num_survey_patients, big.mark=","), "\n", sep = "") cat(" Patients with HAI: ", prettyNum(sum(object@num_hai_patients), big.mark=","), "\n", sep = "") cat(" HAIs: ", paste(names(object@num_hai_patients), collapse=", "), "\n", sep="") if(length(object@bhai_summary) > 0) { mat = matrix(bhai.prettyTable(object)["ALL",c("Cases", "Deaths", "DALY")], ncol=1) rownames(mat) = c(" Cases:", " Deaths:", " DALY:") colnames(mat) = "" print(mat, quote=FALSE) if(object@bhai_options$stratified_sampling) { cat("Note: Results were obtained using stratified sampling (not recommended)!") } } }) #' Estimation of the burden of healthcare-associated infections #' #' @name bhai #' @rdname bhai-methods #' #' @title Main function of the package to estimation of the burden of healthcare-associated infections #' #' @param pps The PPS object containing the data. #' @param nsim Number of Monte Carlo simulations, default: 1000. #' @param pop.sampling Specifying whether parameters of the disease outcome trees should be sampled on population level, default: TRUE. #' @param sample_distr Distribution used for prevalence sampling, default: "rbetamix". #' @param estimate_loi_fun Function used for estimation of the length of infection, default: bootstrap_mean_gren (recommended!). #' @param stratified_sampling Specifying whether stratified sampling should be done. #' @param summarize_strata Specifying whether stratum-specific summary statistics should be computed. #' @param use_prior Specifying whether Prior distributions should be used for computations. #' #' @return A PPS class object. #' #' @seealso \code{\linkS4class{PPS}} #' #' @examples #' #' data(german_pps_2011_repr) #' german_pps_repr = PPS(num_hai_patients = num_hai_patients, #' num_hai_patients_by_stratum = num_hai_patients_by_stratum, #' num_hai_patients_by_stratum_prior = num_hai_patients_by_stratum_prior, #' num_survey_patients = num_survey_patients, #' length_of_stay = length_of_stay, #' loi_pps = loi_pps, #' mccabe_scores_distr = mccabe_scores_distr, #' mccabe_life_exp = mccabe_life_exp, #' hospital_discharges = hospital_discharges, #' population = population, #' country="Germany (representative sample)") #' german_pps_repr #' #' set.seed(3) #' # The following example is run only for illustratory reasons #' # Note that you should never run the function with only 10 Monte-Carlo simulations in practice! #' bhai(german_pps_repr, nsim=10) #' #' @export #' @docType methods #' @rdname bhai-methods setGeneric("bhai", function(pps, nsim=1000, pop.sampling=TRUE, sample_distr="rbetamix", estimate_loi_fun=bootstrap_mean_gren, stratified_sampling=FALSE, summarize_strata=TRUE, use_prior=TRUE) { standardGeneric("bhai") }) #' @rdname bhai-methods #' @aliases bhai,PPS,ANY-method setMethod("bhai", "PPS", function(pps, nsim=1000, pop.sampling=TRUE, sample_distr="rbetamix", estimate_loi_fun=bootstrap_mean_gren, stratified_sampling=FALSE, summarize_strata=TRUE, use_prior=TRUE) { if(!use_prior) { pps@mccabe_by_stratum_prior = list() pps@mccabe_by_stratum_prior = list() } res = bhai.run(pps@num_hai_patients, pps@num_survey_patients, pps@length_of_stay, pps@loi_pps, pps@hospital_discharges, pps@num_hai_patients_by_stratum, num_hai_patients_by_stratum_prior=pps@num_hai_patients_by_stratum_prior, pps@mccabe_scores_distr, mccabe_by_stratum_prior=pps@mccabe_by_stratum_prior, pps@mccabe_life_exp, pop.sampling=pop.sampling, nsim=nsim, sample_distr=sample_distr, infections=names(pps@num_hai_patients), num_survey_patients_by_stratum=pps@num_survey_patients_by_stratum, estimate_loi_fun=estimate_loi_fun, stratified_sampling=stratified_sampling) sim_options = list(nsim=nsim, pop.sampling=pop.sampling, sample_distr=sample_distr, estimate_loi_fun=estimate_loi_fun, stratified_sampling=stratified_sampling) summary_bhai = summary.bhai(res, pps@population, summarize_strata) pps@bhai_options = sim_options pps@bhai_summary = summary_bhai pps })
/scratch/gouwar.j/cran-all/cranData/BHAI/R/PPSclass.R
#' #' Estimate the burden of healthcare-associated infections (bhai) from point prevalence surveys #' #' @title Burden of healthcare-associated infections #' #' @param num_hai_patients Named numeric containing patients having healthcare-associated infections. #' @param num_survey_patients Number of patients in point prevalence survey. #' @param length_of_stay Length of stay of all patients in hospitals. This is need for the prevalence to incidence conversion with the Rhame-Sudderth formula. #' @param loi_pps A list containing length of infections from all patients in the PPS. The length of infection of all healthcare-associated infections. In PPS this is usually approximated as the time from infection onset until the date of the survey. #' @param hospital_discharges The number of hospital discharges. #' @param num_hai_patients_by_stratum A list containing for each infection the number of patients in each age and gender stratum. #' @param num_hai_patients_by_stratum_prior The prior weight (counts) for each infection, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_scores_distr The observed McCabe scores (counts) for each infection, age and gender stratum from the PPS. #' @param mccabe_by_stratum_prior The prior weight (counts) for each infection, McCabe score, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_life_exp Named list containing remaining life expectancies for each McCabe score (NONFATAL, ULTFATAL, RAPFATAL). #' @param pop.sampling Indicating whether the sampling of the outcome tree parameters (e.g. probability of death, disability weights) should be carried out in each stratum separately or for the whole population. #' @param sample_distr The distribution for the prevalence sampling. Default is 'rbetamix' which uses the mid-P-Pearson-Clopper interval. Alternatives are 'rpert' - which uses the PERT distribution - and 'bcode' which uses the sampling procedures as described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150>. #' @param infections Infections for which the burden should be calculated. Default is names(num_hai_patients). #' @param num_survey_patients_by_stratum Number of survey patients stratified by infection, age and gender. If this parameter is provided the methodology described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> is applied. #' @param estimate_loi_fun Function to use for the estimation of length of infection. Default is runif_bootstrap_rear_gren. #' #' @usage bhai.run(num_hai_patients, num_survey_patients, length_of_stay, loi_pps, hospital_discharges, num_hai_patients_by_stratum, num_hai_patients_by_stratum_prior=NULL, mccabe_scores_distr, mccabe_by_stratum_prior=NULL, mccabe_life_exp, pop.sampling=TRUE, nsim=1000, sample_distr="rbetamix", infections=c("UTI", "SSI", "CDI", "PN", "BSI"), num_survey_patients_by_stratum=NULL, estimate_loi_fun=runif_bootstrap_rear_gren) #' #' @return A list containing estimates for all input infections. #' #' @keywords internal #' @noRd bhai.run = function(num_hai_patients, num_survey_patients, length_of_stay, loi_pps, hospital_discharges, num_hai_patients_by_stratum, num_hai_patients_by_stratum_prior=NULL, mccabe_scores_distr, mccabe_by_stratum_prior=NULL, mccabe_life_exp, pop.sampling=TRUE, nsim=1000, sample_distr="rbetamix", infections=names(num_hai_patients), num_survey_patients_by_stratum=NULL, estimate_loi_fun=bootstrap_mean_gren, stratified_sampling=FALSE) { ncases_by_stratum = num_hai_patients_by_stratum age_prior = num_hai_patients_by_stratum_prior if(length(age_prior) > 0) { # pseudocount = Reduce("+",age_prior) # pseudocount = pseudocount/sum(pseudocount)+1e-3 for(i in 1:length(age_prior)) { age_prior[[i]] = age_prior[[i]]+max(c(1e-3, 1e-3*sum(age_prior[[i]]))) } } nhai = num_hai_patients npatients = num_survey_patients if(length(mccabe_scores_distr) > 0) { for(n in names(mccabe_scores_distr)) { mccabe_overall = sapply(mccabe_scores_distr[[n]], sum) mccabe_overall = mccabe_overall/sum(mccabe_overall) for(i in 1:length(mccabe_scores_distr[[n]])) { # Add a pseudocount to make sure that no cases are lost due to empty strata # Empty strata will have McCabe distribution as average for infection pseudocount = 1e-3*mccabe_overall[i]+1e-3 mccabe_scores_distr[[n]][[i]] = mccabe_scores_distr[[n]][[i]]+pseudocount } } } mccabe_prior = mccabe_by_stratum_prior if(length(mccabe_prior) > 0) { for(n in names(mccabe_prior)) { for(i in 1:length(mccabe_prior[[n]])) { # Add a pseudocount to make sure that no cases are lost due to empty strata #if(any(mccabe_prior[[n]][[i]] == 0)) { mccabe_prior[[n]][[i]] = mccabe_prior[[n]][[i]]+ max(c(1e-3, 1e-3*sum(mccabe_prior[[n]][[i]]))) #} } } } # The following have be > 0. Otherwise cases might be 'lost' when distributed in starta and McCabe categories if(any(unlist(mccabe_prior) == 0)) warning("Empty strata in McCabe prior! Might lead to flawed estimation!") if(any(unlist(mccabe_scores_distr) == 0)) warning("Empty strata in McCabe distribution! Might lead to flawed estimation!") if(any(unlist(age_prior) == 0)) warning("Empty strata in Age-gender prior! Might lead to flawed estimation!") discharges = hospital_discharges la = length_of_stay loi = loi_pps pop.level = pop.sampling p_age = NULL if(length(num_survey_patients_by_stratum) > 0 & stratified_sampling) { p_age = num_survey_patients_by_stratum#/sum(num_survey_patients_by_stratum) } else if(length(num_survey_patients_by_stratum) == 0 & stratified_sampling) { warning("Strified sampling cannot be performed: stratified_sampling==TRUE but num_survey_patients_by_stratum is empty in PPS object!\n") } allsim = list() for(n in infections) { # print(n) allsim[[n]] = list() currloi = NULL if(class(loi) == "list" & length(loi) == 2) { currloi = sort(unlist(sapply(loi, function(x) x[n]))) } else { currloi = loi[n] } #(print(currloi)) for(k in 1:nsim) { allsim[[n]][[k]] = switch(n, HAP = sim.pn(nhai[n], npatients, la, currloi, discharges, mccabe_life_exp, mccabe_scores_distr[[n]], ncases_by_stratum[[n]], age_prior=age_prior[[n]], p_age=p_age, pop.level=pop.level, sample_distr=sample_distr, mccabe_prior[[n]], estimate_loi=estimate_loi_fun), BSI = sim.bsi(nhai[n], npatients, la, currloi, discharges, mccabe_life_exp, mccabe_scores_distr[[n]], ncases_by_stratum[[n]], age_prior=age_prior[[n]], p_age=p_age, pop.level=pop.level, sample_distr=sample_distr, mccabe_prior[[n]], estimate_loi=estimate_loi_fun), SSI = sim.ssi(nhai[n], npatients, la, currloi, discharges, mccabe_life_exp, mccabe_scores_distr[[n]], ncases_by_stratum[[n]], age_prior=age_prior[[n]], p_age=p_age, pop.level=pop.level, sample_distr=sample_distr, mccabe_prior[[n]], estimate_loi=estimate_loi_fun), CDI = sim.cdi(nhai[n], npatients, la, currloi, discharges, mccabe_life_exp, mccabe_scores_distr[[n]], ncases_by_stratum[[n]], age_prior=age_prior[[n]], p_age=p_age, pop.level=pop.level, sample_distr=sample_distr, mccabe_prior[[n]], estimate_loi=estimate_loi_fun), UTI = sim.uti(nhai[n], npatients, la, currloi, discharges, mccabe_life_exp, mccabe_scores_distr[[n]], ncases_by_stratum[[n]], age_prior=age_prior[[n]], p_age=p_age, pop.level=pop.level, sample_distr=sample_distr, mccabe_prior[[n]], estimate_loi=estimate_loi_fun)) } } allsim }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/bhai.run.R
#' #' Estimate the length of infection using the rearrengment and grenander estimator #' #' @title Estimate the length of infection #' #' @param loi Numeric vector containing the length of infections (LOI_PPS). #' #' @usage runif_bootstrap_rear_gren(loi) #' #' @return Estimated length of infection. #' #' @keywords internal #' @noRd runif_bootstrap_rear_gren = function(loi) { loi = sample(loi, length(loi), replace = TRUE) loi = c(calculate_I_smooth(data = data.frame(A.loi=loi), method = "rear")$x.loi.hat, calculate_I_smooth(data = data.frame(A.loi=loi), method = "gren")$x.loi.hat) loi = sort(loi) loi = runif(1, loi[1], loi[2]) loi } #' #' Estimate the length of infection using the mean #' #' @title Estimate the length of infection #' #' @param loi Numeric vector containing the length of infections (LOI_PPS). #' #' @usage bootstrap_mean(loi) #' #' @return Estimated length of infection. #' #' @keywords internal #' @noRd bootstrap_mean = function(loi) { loi = mean(sample(loi, length(loi), replace = TRUE)) loi } #' #' Estimate the length of infection using the mean #' #' @title Estimate the length of infection #' #' @param loi Numeric vector containing the length of infections (LOI_PPS). #' #' @usage bootstrap_mean_gren(loi) #' #' @return Estimated length of infection. #' #' @keywords internal #' @noRd bootstrap_mean_gren = function(loi) { loi = sample(loi, length(loi), replace = TRUE) loi = c(mean(loi), calculate_I_smooth(data = data.frame(A.loi=loi), method = "gren")$x.loi.hat) n = length(loi) a = 0.01 b = 500 alpha <- exp(a*(n-b))/(1+exp(a*(n-b))) loi = sum(loi*c(1-alpha, alpha)) loi }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/loi.estimation.R
#' Summary plot of number of infections, deaths and DALYs #' #' @title Summary plot of number of infections, deaths and DALYs #' #' @param pps The PPS object containing the data. #' @param infections Infections to be plotted. #' @param main Title of plot. #' @param xlim Limits of x-axis. #' @param ylim Limits of y-axis. #' #' @usage bhai.circleplot(pps, infections=NULL, main="", xlim=NULL, ylim=NULL) #' #' @seealso \code{\linkS4class{PPS}} #' #' @examples #' #' data(german_pps_2011_repr) #' german_pps_repr = PPS(num_hai_patients = num_hai_patients, #' num_hai_patients_by_stratum = num_hai_patients_by_stratum, #' num_hai_patients_by_stratum_prior = num_hai_patients_by_stratum_prior, #' num_survey_patients = num_survey_patients, #' length_of_stay = length_of_stay, #' loi_pps = loi_pps, #' mccabe_scores_distr = mccabe_scores_distr, #' mccabe_life_exp = mccabe_life_exp, #' hospital_discharges = hospital_discharges, #' population = population, #' country="Germany (representative sample)") #' german_pps_repr #' #' set.seed(3) #' # The following example is run only for illustratory reasons #' # Note that you should never run the function with only 10 Monte-Carlo simulations in practice! #' result = bhai(german_pps_repr, nsim=10) #' bhai.circleplot(pps=result) #' #' @export bhai.circleplot = function(pps, infections=NULL, main="", xlim=NULL, ylim=NULL) { bhai_summary = pps@bhai_summary infections = names(bhai_summary) infections = infections[infections != "ALL"] ncases_by_hai = sapply(bhai_summary[infections], function(x) x$TOTAL["Cases",2])#/pop_size*100000 ndeath_by_hai = sapply(bhai_summary[infections], function(x) x$TOTAL["Deaths",2])#/pop_size*100000 dalys_by_hai = sapply(bhai_summary[infections], function(x) x$TOTAL["DALY",2])#/pop_size*100000 dalys_by_hai = dalys_by_hai/sum(dalys_by_hai)*mean(ncases_by_hai) xLim = range(ncases_by_hai*min(c(dalys_by_hai/ncases_by_hai, ncases_by_hai/dalys_by_hai)), ncases_by_hai+1.5*dalys_by_hai) yLim = range(c(ndeath_by_hai, ndeath_by_hai)) xyasp <- par("pin") xycr <- diff(c(xLim, yLim))[c(1, 3)] ymult <- xyasp[1]/xyasp[2] * xycr[2]/xycr[1] yLim = range(c(ndeath_by_hai-dalys_by_hai*1.2*ymult, ndeath_by_hai+dalys_by_hai*1.2*ymult)) if(!is.null(ylim)) { yLim = ylim } if(!is.null(xlim)) { xLim = xlim } xAxis = axisTicks(xLim, log=FALSE) yAxis = axisTicks(yLim, log=FALSE) plot(ncases_by_hai, ndeath_by_hai, pch="", xlim=xLim, ylim=yLim, xaxt="n", yaxt="n", xlab="Number of cases", ylab="Number of deaths", main=main) axis(1, xAxis, xAxis) axis(2, yAxis, yAxis) abline(v=xAxis, col="grey") abline(h=yAxis, col="grey") # segments(x0=xAxis, y0=rep(yLim[1]-dalys_by_hai*1.2, length(xAxis)), x1=xAxis, y1=rep(yLim[2]+dalys_by_hai*1.2, length(xAxis)), col="grey") #segments(y0=yAxis, x0=rep(xLim[1]-dalys_by_hai*1.2, length(yAxis)), y1=yAxis, x1=rep(xLim[2]+dalys_by_hai*1.2, length(yAxis)), col="grey") for(i in names(ncases_by_hai)) { draw.circle(ncases_by_hai[i], ndeath_by_hai[i], radius=dalys_by_hai[i], nv=10000, col="lightblue", border="darkblue") text(ncases_by_hai[i], ndeath_by_hai[i], i) } } #' Stratified barplot of cases, deaths and DALYs. #' #' @title Stratified barplot of cases, deaths and DALYs. #' #' @param pps The PPS object containing the data. #' @param infection Infection to be plotted. #' @param what One of c("Cases", "Deaths", "DALY") #' @param col Color used to fill the bars. #' @param errors Specifying whether error bars should be plotted, default: TRUE. #' @param lwd.errors Line width of error bars. #' @param xlab X-axis labels. #' @param ... Further plotting arguments #' #' @usage bhai.strataplot(pps, infection, what, col=NULL, errors=TRUE, lwd.errors=2, xlab=NULL, ...) #' #' @seealso \code{\linkS4class{PPS}} #' #' @examples #' #' data(german_pps_2011_repr) #' german_pps_repr = PPS(num_hai_patients = num_hai_patients, #' num_hai_patients_by_stratum = num_hai_patients_by_stratum, #' num_hai_patients_by_stratum_prior = num_hai_patients_by_stratum_prior, #' num_survey_patients = num_survey_patients, #' length_of_stay = length_of_stay, #' loi_pps = loi_pps, #' mccabe_scores_distr = mccabe_scores_distr, #' mccabe_life_exp = mccabe_life_exp, #' hospital_discharges = hospital_discharges, #' population = population, #' country="Germany (representative sample)") #' german_pps_repr #' #' set.seed(3) #' # The following example is run only for illustratory reasons #' # Note that you should never run the function with only 10 Monte-Carlo simulations in practice! #' result = bhai(german_pps_repr, nsim=10) #' bhai.strataplot(pps=result, infection="HAP", what="Cases") #' #' @export bhai.strataplot = function(pps, infection, what, col=NULL, errors=TRUE, lwd.errors=2, xlab=NULL, ...) { bhai_summary = pps@bhai_summary[[infection]] data_by_stratum2 = NULL if(what == "DALY") { data_by_stratum = bhai_summary$stratum_specific_results$DALY data_by_stratum2 = bhai_summary$stratum_specific_results$YLL if(is.null(col)) { col = c("blue", "red") } col1 = col[1] col2 = col[2] } if(what == "Cases") { data_by_stratum = bhai_summary$stratum_specific_results$ncases if(is.null(col)) { col = c("grey") } col1 = col[1] col2 = col[1] } if(what == "Deaths") { data_by_stratum = bhai_summary$stratum_specific_results$ndeath if(is.null(col)) { col = c("grey") } col1 = col[1] col2 = col[1] } if(is.null(xlab)) { xlab=switch(what, Cases = "No. of cases ", Deaths = "No. of deaths ", DALY = "DALY ") xlab = paste(xlab, "(", infection, ")", sep="") } oldpar <- par(mfrow=c(1,2), mar=c(5, 2, 4, 2.4)) on.exit(par(oldpar)) barcenters = barplot(-t(data_by_stratum[["F"]][,2]), main="Female", legend=TRUE, names.arg = rep("",19), args.legend=list(x="topleft"), ylab="", las=2, xlab=xlab, xlim=c(-max(unlist(data_by_stratum), na.rm=TRUE), 0), horiz=TRUE, col=col1, axes=FALSE, ...) if(!is.null(data_by_stratum2)) { barplot(-t(data_by_stratum2[["F"]][,2]), col=col2, add=TRUE, horiz=TRUE, axes=FALSE, names.arg = rep("",19), ...) } if(errors) { segments(-t(data_by_stratum[["F"]][,1]), barcenters, -t(data_by_stratum[["F"]][,3]), barcenters, lwd = lwd.errors) segments(-t(data_by_stratum[["F"]][,1]), barcenters+0.25, -t(data_by_stratum[["F"]][,1]), barcenters-0.25, lwd = lwd.errors) segments(-t(data_by_stratum[["F"]][,3]), barcenters+0.25, -t(data_by_stratum[["F"]][,3]), barcenters-0.25, lwd = lwd.errors) } currTicks = axisTicks(c(0,max(unlist(data_by_stratum), na.rm=TRUE)), log=FALSE) axis(1, at=-currTicks, currTicks, cex.axis=0.8) mynames = gsub("-0", "", gsub("-Inf", "+", gsub(";", "-", gsub("\\[|\\]", "", rownames(data_by_stratum[["F"]]))))) barcenters = barplot(t(data_by_stratum[["M"]][,2]), main="Male", legend=TRUE, names.arg=mynames, args.legend=list(x="topleft"), ylab="", las=2, xlab=xlab, xlim=c(0,max(unlist(data_by_stratum), na.rm=TRUE)), horiz=TRUE, col=col1, cex.axis=0.75, las=2, axes=FALSE, ...) if(!is.null(data_by_stratum2)) { barplot(t(data_by_stratum2[["M"]][,2]), col=col2, add=TRUE, horiz=TRUE, axes=FALSE, names.arg = rep("",19), ...) } if(errors) { segments(t(data_by_stratum[["M"]][,1]), barcenters, t(data_by_stratum[["M"]][,3]), barcenters, lwd = lwd.errors) segments(t(data_by_stratum[["M"]][,1]), barcenters+0.25, t(data_by_stratum[["M"]][,1]), barcenters-0.25, lwd = lwd.errors) segments(t(data_by_stratum[["M"]][,3]), barcenters+0.25, t(data_by_stratum[["M"]][,3]), barcenters-0.25, lwd = lwd.errors) } axis(1, at=currTicks, currTicks, cex.axis=0.8) } #' Barplot of cases, deaths and DALYs. #' #' @title Barplot of cases, deaths and DALYs. #' #' @param ... Further plotting arguments #' @param what One of c("Cases", "Deaths", "DALY") #' @param infections If sepcified only a subset of infections in \code{bhai_summary} is plotted. #' @param cols1 Color used to fill the bars. #' @param cols2 Specifies colors of YLDs when plotting DALYs. #' @param ylab Y-axis labels. #' @param ylim Limits of y-axis. #' @param legend_labs Labels of legend. #' @param main Title of plot #' @param names.inf Specifying whether names of infections should be plotted. #' @param cex.names Font size of labels. #' @param border The color to be used for the border of the bars, default: par("fg"). #' @param lwd.errors Line width of error bars. #' #' @usage bhai.barplot(..., what, infections=NULL, cols1=NULL, cols2=NULL, ylab=NULL, ylim=NULL, #' legend_labs=NULL, main="", names.inf=TRUE, cex.names=1, border=par("fg"), lwd.errors=2) #' #' @seealso \code{\linkS4class{PPS}} #' #' @examples #' #' data(german_pps_2011_repr) #' german_pps_repr = PPS(num_hai_patients = num_hai_patients, #' num_hai_patients_by_stratum = num_hai_patients_by_stratum, #' num_hai_patients_by_stratum_prior = num_hai_patients_by_stratum_prior, #' num_survey_patients = num_survey_patients, #' length_of_stay = length_of_stay, #' loi_pps = loi_pps, #' mccabe_scores_distr = mccabe_scores_distr, #' mccabe_life_exp = mccabe_life_exp, #' hospital_discharges = hospital_discharges, #' population = population, #' country="Germany (representative sample)") #' german_pps_repr #' #' set.seed(3) #' # The following example is run only for illustratory reasons #' # Note that you should never run the function with only 10 Monte-Carlo simulations in practice! #' result_ger = bhai(german_pps_repr, nsim=10) #' #' bhai.barplot(result_ger, what="Cases") #' #' @export bhai.barplot = function(..., what, infections=NULL, cols1=NULL, cols2=NULL, ylab=NULL, ylim=NULL, legend_labs=NULL, main="", names.inf=TRUE, cex.names=1, border=par("fg"), lwd.errors=2) { pps_objects = list(...) if(class(pps_objects[[1]]) == "list") { pps_objects = pps_objects[[1]] } bhai_summaries = lapply(pps_objects, function(x) x@bhai_summary) names(bhai_summaries) = sapply(pps_objects, function(x) x@country) pop_demographics_list = lapply(pps_objects, function(x) x@population) if(is.null(ylab)) { ylab=switch(what, Cases = "No. of cases per 100,000", Deaths = "No. of deaths per 100,000", DALY = "DALY per 100,000") } if(is.null(infections)) { infections = names(bhai_summaries[[1]]) infections = infections[infections != "ALL"] } pop_norm = sapply(pop_demographics_list, function(x) 1/sum(x)*100000) median_all = do.call("rbind", lapply(infections, function(x) c(sapply(1:length(bhai_summaries), function(y) bhai_summaries[[y]][[x]]$TOTAL[what,"50%"]*pop_norm[y]), NA))) confint_lower = as.vector(t(do.call("rbind", lapply(infections, function(x) c(sapply(1:length(bhai_summaries), function(y) bhai_summaries[[y]][[x]]$TOTAL[what,"2.5%"]*pop_norm[y]), NA))))) confint_upper = as.vector(t(do.call("rbind", lapply(infections, function(x) c(sapply(1:length(bhai_summaries), function(y) bhai_summaries[[y]][[x]]$TOTAL[what,"97.5%"]*pop_norm[y]), NA))))) if(is.null(cols1)) { cols1 = colorRampPalette(c("gray40", "lightgrey"))(length(bhai_summaries)) } else { if(length(cols1) == 1) { cols1 = rep(cols1, length(bhai_summaries)) } } median_all_2 = NULL if(what == "DALY") { median_all_2 = do.call("rbind", lapply(infections, function(x) c(sapply(1:length(bhai_summaries), function(y) bhai_summaries[[y]][[x]]$TOTAL["YLL","50%"]*pop_norm[y]), NA))) if(is.null(cols2)) { cols1 = colorRampPalette(c("red", "gold"))(length(bhai_summaries)) cols2 = colorRampPalette(c("blue", "lightblue"))(length(bhai_summaries)) } else { if(length(cols2) == 1) { cols2 = rep(cols2, length(bhai_summaries)) } } } confint_all = list() for(i in 1:length(bhai_summaries)) { confint_all[[i]] = do.call("rbind", lapply(infections, function(x) bhai_summaries[[i]][[x]]$TOTAL[what,c("2.5%", "97.5%")]))*pop_norm[[i]] } if(is.null(ylim)) { ylim = c(0, max(unlist(confint_upper), na.rm=TRUE))*1.2 } curr_barplot = barplot(as.vector(t(median_all)), col=c(cols1, NA), ylab=ylab, ylim=ylim, beside=TRUE, main=main, border=border) if(names.inf) { axis(1, at=curr_barplot[seq(ceiling(length(bhai_summaries)/2),length(curr_barplot)-ceiling(length(bhai_summaries)/2), length=length(infections))], labels=infections, cex.axis=cex.names) } else { axis(1, at=curr_barplot, colnames(median_all), las=2, cex.axis=cex.names) } #axis(1, at=seq(range(curr_barplot)[1], range(curr_barplot)[2], length=5), labels=infections) yAxis = axisTicks(ylim, log=FALSE) abline(h=yAxis, col="grey") barplot(as.vector(t(median_all)), col=c(cols1, NA), add=TRUE, beside=TRUE, border=border) if(!is.null(median_all_2)) { barplot(as.vector(t(median_all_2)), col=c(cols2, NA), add=TRUE, border=border) } segments(curr_barplot, confint_lower, curr_barplot, confint_upper, lwd = lwd.errors) segments(curr_barplot-0.1, confint_lower, curr_barplot+0.1, confint_lower, lwd = lwd.errors) segments(curr_barplot-0.1, confint_upper, curr_barplot+0.1, confint_upper, lwd = lwd.errors) if(what == "DALY") { if(is.null(legend_labs)) { legend_labs = unlist(lapply(unique(names(bhai_summaries)), function(x) c(paste(x, ":", sep=""), "YLD", "YLL"))) } legend("topright", legend_labs, col=sapply(1:length(bhai_summaries), function(x) c(NA, cols1[x], cols2[x])), pch=15, bty="n") } else { if(is.null(legend_labs)) { legend_labs = unique(names(bhai_summaries)) } legend("topright", legend_labs, col=cols1, pch=15, bty="n") } }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/plotting.R
#' Simulate PPS data #' #' @title Simulate PPS data #' #' @param pps_data The PPS object containing the data. Parameters for simulations are extracted from this data. #' @param num_survey_patients Numeric vector indicating sample sizes for simulations. #' #' @seealso \code{\linkS4class{PPS}} #' #' @return A simulated PPS object. #' #' @examples #' #' # Specify the number of survey patients #' sim_survey_patients = 10000 #' # Subsample data sets from european PPS #' sim_pps = sample.pps(eu_pps, num_survey_patients = sim_survey_patients) #' #' @export sample.pps = function(pps_data, num_survey_patients) { pps_list = list() for(curr_num_survey_patients in num_survey_patients) { # get (fixed) parameters from PPS data prev = pps_data@num_hai_patients/pps_data@num_survey_patients length_of_stay = pps_data@length_of_stay # Get number of patients for current sample size num_hai_patients = round(curr_num_survey_patients*prev) # Sample length of infection loi_pps = lapply(names(pps_data@loi_pps), function(x) sample(pps_data@loi_pps[[x]], num_hai_patients[x], replace=TRUE)) names(loi_pps) = names(pps_data@loi_pps) # Calculate observed age-gender distribution from PPS data prob_hai_patients_by_stratum = lapply(pps_data@num_hai_patients_by_stratum, function(x) x/sum(x)) # Distribute sampled cases into age gender categories num_hai_patients_by_stratum = lapply(names(prob_hai_patients_by_stratum), function(x) matrix(table(factor(sample(1:38, num_hai_patients[x], prob=prob_hai_patients_by_stratum[[x]], replace=TRUE), levels=1:38)), ncol=2)) names(num_hai_patients_by_stratum) = names(prob_hai_patients_by_stratum) # Create a sample of McCabe scores mccabe_scores_distr = list() for(curr_inf in names(pps_data@mccabe_scores_distr)) { # Starat with > 0 cases are sampled sample_this = as.vector(num_hai_patients_by_stratum[[curr_inf]]) # McCabe score distribution for each stratum all_probs = apply(do.call("rbind", lapply(pps_data@mccabe_scores_distr[[curr_inf]], as.vector)), 2, function(x) x/sum(x)) # Create McCabe score distribution for current sample new_mccabe = list() for(i in as.character(1:3)) { new_mccabe[[i]] = matrix(0, nrow=19, ncol=2) } for(i in 1:ncol(all_probs)) { # Starta > 0 are sampled if(sample_this[i] > 0) { tab = table(factor(sample(1:3, sample_this[i], prob=all_probs[,i], replace=TRUE), levels=1:3)) for(j in as.character(1:3)) { new_mccabe[[j]][i] = tab[j] } } } names(new_mccabe) = names(pps_data@mccabe_scores_distr[[curr_inf]]) mccabe_scores_distr[[curr_inf]] = new_mccabe } # Denominator for strata num_survey_patients_by_stratum = pps_data@num_survey_patients_by_stratum prob_survey_patients_by_stratum = lapply(num_survey_patients_by_stratum, function(x) x/sum(x)) cases_mccabe = lapply(names(prob_survey_patients_by_stratum), function(x) Reduce("+", lapply(mccabe_scores_distr, function(y) y[[x]]))) names(cases_mccabe) = names(prob_survey_patients_by_stratum) # Distribute sampled cases into age gender categories num_survey_patients_by_stratum = table(factor(sample(1:length(unlist(prob_survey_patients_by_stratum)), curr_num_survey_patients-sum(unlist(cases_mccabe)), prob=unlist(prob_survey_patients_by_stratum), replace=TRUE), levels=1:length(unlist(prob_survey_patients_by_stratum)))) num_survey_patients_by_stratum = list(matrix(num_survey_patients_by_stratum[as.character(1:38)], ncol=2), matrix(num_survey_patients_by_stratum[as.character(39:76)], ncol=2), matrix(num_survey_patients_by_stratum[as.character(77:114)], ncol=2)) names(num_survey_patients_by_stratum) = names(prob_survey_patients_by_stratum) for(i in names(num_survey_patients_by_stratum)) { num_survey_patients_by_stratum[[i]] = num_survey_patients_by_stratum[[i]]+cases_mccabe[[i]] } curr_pps_data = pps_data curr_pps_data@num_hai_patients = num_hai_patients curr_pps_data@num_survey_patients = curr_num_survey_patients curr_pps_data@loi_pps = loi_pps curr_pps_data@num_hai_patients_by_stratum = num_hai_patients_by_stratum curr_pps_data@mccabe_scores_distr = mccabe_scores_distr curr_pps_data@num_hai_patients_by_stratum_prior = curr_pps_data@num_hai_patients_by_stratum curr_pps_data@mccabe_by_stratum_prior = pps_data@mccabe_scores_distr curr_pps_data@num_survey_patients_by_stratum = num_survey_patients_by_stratum pps_list[[as.character(curr_num_survey_patients)]] = curr_pps_data } pps_list }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/sample.pps.R
#' #' Calculate lower bound of Clopper-Pearson confidence interval for proportions #' code is taken from binom.test() #' #' @title Calculate lower bound of Clopper-Pearson confidence interval for proportions #' #' @param x Number of positive cases #' @param alpha confidence level #' @param n sample size #' #' @keywords internal #' @noRd p.L <- function(x, alpha, n) { if (x == 0) 0 else qbeta(alpha, x, n - x + 1) } #' #' Calculate upper bound of Clopper-Pearson confidence interval for proportions #' code is taken from binom.test() #' #' @title Calculate lower bound of Clopper-Pearson confidence interval for proportions #' #' @param x Number of positive cases #' @param alpha confidence level #' @param n sample size #' #' @keywords internal #' @noRd p.U <- function(x, alpha, n) { if (x == n) 1 else qbeta(1 - alpha, x + 1, n - x) } #' #' Sample HAI prevalence of a mixture beta distributions #' #' @title Sample HAI prevalence of a mixture beta distributions #' #' @param size number of values to sample #' @param x Number of positive cases #' @param n sample size #' @param n Mixture probability #' #' @keywords internal #' @noRd rbetamix <- function(size,x,n,alpha=0.5){ U <- runif(size) I <- as.numeric(U<alpha) y <- I*rbeta(size, x, n - x + 1)+ (1-I)*rbeta(size, x + 1, n - x) return(y) } #' #' Sample HAI prevalence from a PERT distribution #' Code is taken from https://www.riskamp.com/beta-pert #' #' @title Sample HAI prevalence from a PERT distribution #' #' @param n number of values to sample #' @param x.min Lower bound of distribution #' @param x.min Upper bound of distribution #' @param x.mode Mode of distribution #' @param lambda Shape parameter #' #' @keywords internal #' @noRd rpert <- function( n, x.min, x.max, x.mode, lambda = 4 ){ if( x.min > x.max || x.mode > x.max || x.mode < x.min ) stop( "invalid parameters" ); x.range <- x.max - x.min; if( x.range == 0 ) return( rep( x.min, n )); mu <- ( x.min + x.max + lambda * x.mode ) / ( lambda + 2 ); # special case if mu == mode if( mu == x.mode ){ v <- ( lambda / 2 ) + 1 } else { v <- (( mu - x.min ) * ( 2 * x.mode - x.min - x.max )) / (( x.mode - mu ) * ( x.max - x.min )); } w <- ( v * ( x.max - mu )) / ( mu - x.min ); return ( rbeta( n, v, w ) * x.range + x.min ); } #' #' Sample disease outcome tree parameters from a uniform distribution #' #' @title Sample disease outcome tree parameters from a uniform distribution #' #' @param nstrata Number of strata, i.e. size of sample to draw #' @param lower Lower bound of uniform distribution #' @param upper Upper bound of uniform distribution #' @param pop.level Indicating whether outcome trees are sampled on population level #' @param n Number of McCabe scores #' #' @keywords internal #' @noRd sample.runif = function(nstrata, lower, upper, pop.level, list.names=NULL, n=3) { curr_sample = list() if(pop.level) { curr_values = rep(runif(1, lower, upper), nstrata) for(i in 1:n) { curr_sample[[i]] = matrix(curr_values, ncol=2) } } else { for(i in 1:n) { curr_sample[[i]] = matrix(runif(nstrata, lower, upper), ncol=2) } } names(curr_sample) = list.names curr_sample } #' #' Sample disease outcome tree parameters from a PERT distribution #' #' @title Sample disease outcome tree parameters from a PERT distribution #' #' @param nstrata Number of strata, i.e. size of sample to draw #' @param min Lower bound of PERT distribution #' @param max Upper bound of PERT distribution #' @param mode Mode of PERT distribution #' @param pop.level Indicating whether outcome trees are sampled on population level #' @param list.names Names of output list (McCabe scores) #' @param n Number of McCabe scores #' #' @keywords internal #' @noRd sample.rpert = function(nstrata, min, max, mode, pop.level, list.names=NULL, n=3) { curr_sample = list() if(pop.level) { curr_values = rep(rpert(1, min, max, mode), nstrata) for(i in 1:n) { curr_sample[[i]] = matrix(curr_values, ncol=2) } } else { for(i in 1:n) { curr_sample[[i]] = matrix(rpert(nstrata, min, max, mode), ncol=2) } } names(curr_sample) = list.names curr_sample } #' #' Sample number of cases for Monte Carlo simulation #' #' @title Sample number of cases for Monte Carlo simulation #' #' @param nhai Number of patients in PPS with HAI #' @param npatients Total number of patients in PPS #' @param la Average length of stay of patients in PPS #' @param loi Length of infection from PPS as a numeric vector #' @param discharges Number of yearly hospital discharges #' @param mccabe_life_exp A list containing life expectancy according to McCabe scores #' @param mccabe_scores_distr Number of cases by HAI and McCabe scores #' @param ncases_by_stratum Number of cases stratified by HAI, age and gender #' @param sample_distr The distribution for the prevalence sampling. Default is 'rbetamix' which uses the mid-P-Pearson-Clopper interval. Alternatives are 'rpert' - which uses the PERT distribution - and 'bcode' which uses the sampling procedures as described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150>. #' @param age_prior The prior weight (counts) for each infection, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param p_age Number of survey patients stratified by infection, age and gender. If this parameter is provided the methodology described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> is applied. #' @param mccabe_prior The prior weight (counts) for each infection, McCabe score, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param estimate_loi Function to use for the estimation of length of infection. Default is runif_bootstrap_rear_gren. #' #' @keywords internal #' @noRd sample.ncases = function(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr="rbetamix", age_prior=NULL, p_age=NULL, mccabe_prior=NULL, estimate_loi) { loi = estimate_loi(loi[[1]]) ncases_hai = NULL if(sample_distr == "rpert") { point_estimate_hai = nhai/npatients*la/loi*sum(discharges) point_estimate_confint = c(p.L(nhai, 0.025, npatients),p.U(nhai, 0.025, npatients)) * la/loi*sum(discharges) ncases_hai = rpert(1, point_estimate_confint[1], point_estimate_confint[2], point_estimate_hai) } else if(sample_distr == "rbetamix") { ncases_hai = rbetamix(1, nhai, npatients) * la/loi*sum(discharges) } else if(sample_distr == "bcode") { point_estimate_hai = nhai/npatients*la/loi*sum(discharges) point_estimate_confint = c(p.L(nhai, 0.025, npatients),p.U(nhai, 0.025, npatients)) * la/loi*sum(discharges) ncases_hai = rpert(1, point_estimate_confint[1], point_estimate_confint[2], point_estimate_hai) if(point_estimate_hai == 0) { ncases_hai = runif(1, point_estimate_hai, point_estimate_confint[2]) } } else { stop("sample_distr must be one of c(\"rpert\", \"rbetamix\", \"bcode\")") } hai_age_distr = ncases_by_stratum if(length(age_prior) > 0) { age_gender_prior = age_prior hai_age_distr = matrix(MCmultinomdirichlet(as.vector(ncases_by_stratum), as.vector(age_gender_prior), mc=1), ncol=2) rownames(hai_age_distr) = rownames(ncases_by_stratum) colnames(hai_age_distr) = colnames(ncases_by_stratum) } else { hai_age_distr = hai_age_distr #+ (1/length(hai_age_distr)) hai_age_distr = hai_age_distr/sum(hai_age_distr) } if(length(mccabe_prior) > 0) { mccabe_scores_distr = mccabe_scores_distr[names(mccabe_prior)] for(i in 1:nrow(mccabe_prior[[1]])) { for(j in 1:ncol(mccabe_prior[[1]])) { curr_sample = as.vector(MCmultinomdirichlet(as.vector(sapply(mccabe_scores_distr, function(x) x[i,j])), as.vector(sapply(mccabe_prior, function(x) x[i,j])), mc=1)) names(curr_sample) = names(mccabe_prior) for(k in names(mccabe_prior)) { mccabe_scores_distr[[k]][i,j] = curr_sample[k] } } } } else { all_mccabe = Reduce("+", mccabe_scores_distr) for(i in 1:length(mccabe_scores_distr)) { mccabe_scores_distr[[i]] = mccabe_scores_distr[[i]]/all_mccabe mccabe_scores_distr[[i]][is.na(mccabe_scores_distr[[i]])] = 0 } } ncases_hai = lapply(mccabe_scores_distr, function(x) x*hai_age_distr*ncases_hai) if(length(p_age) > 0) { p_age_all = sum(unlist(p_age)) p_age_red = Reduce("+", p_age) p_age_mccabe = p_age for(k in names(mccabe_scores_distr)) { p_age_mccabe[[k]] = p_age[[k]]/p_age_red p_age[[k]] = p_age[[k]]/p_age_all curr_ncases_hai = ncases_by_stratum for(i in 1:nrow(ncases_by_stratum)) { # iterate over age groups for(j in 1:ncol(ncases_by_stratum)) { # iterate over gender curr_n = round(npatients * p_age[[k]][i,j]) if(curr_n == 0) { curr_ncases_hai[i,j] = 0 } else { # calculate prevalence for each stratum curr_ncases = round(ncases_by_stratum[i,j] * mccabe_scores_distr[[k]][i,j]) if(curr_n < curr_ncases) { print(c(curr_ncases, curr_n, "NO!")) } if(sample_distr == "rpert") { curr_prev = (curr_ncases / curr_n) #/ p_age[i,j] curr_conf_int = c(p.L(curr_ncases, 0.025, curr_n), p.U(curr_ncases, 0.025, curr_n)) curr_prev = curr_prev * la/loi*discharges[i,j] * p_age_mccabe[[k]][i,j] curr_conf_int = curr_conf_int * la/loi*discharges[i,j] * p_age_mccabe[[k]][i,j] curr_ncases_hai[i,j] = rpert(1, curr_conf_int[1], curr_conf_int[2], curr_prev) } if(sample_distr == "bcode") { curr_ncases_hai[i,j] = rpert(1, curr_conf_int[1], curr_conf_int[2], curr_prev) if(curr_prev == 0) { curr_ncases_hai[i,j] = runif(1, curr_prev, curr_conf_int[2]) } } if(sample_distr == "rbetamix") { curr_ncases_hai[i,j] = rbetamix(1,curr_ncases,curr_n)* la/loi*discharges[i,j] * p_age_mccabe[[k]][i,j] } } } } ncases_hai[[k]] = curr_ncases_hai } } names(ncases_hai) = names(mccabe_scores_distr) ncases_hai }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/sampling.R
#' Performs simulation of the outcome tree of blood-stream infection (BSI) for DALY calculation #' #' @param nhai Named numeric containing patients having healthcare-associated infections. #' @param npatients Number of patients in point prevalence survey. #' @param la Length of stay of all patients in hospitals. This is need for the prevalence to incidence conversion with the Rhame-Sudderth formula. #' @param loi A list containing length of infections from all patients in the PPS. The length of infection of all healthcare-associated infections. In PPS this is usually approximated as the time from infection onset until the date of the survey. #' @param discharges The number of hospital discharges. #' @param ncases_by_stratum A list containing for each infection the number of patients in each age and gender stratum. #' @param age_prior The prior weight (counts) for each infection, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_scores_distr The observed McCabe scores (counts) for each infection, age and gender stratum from the PPS. #' @param mccabe_prior The prior weight (counts) for each infection, McCabe score, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_life_exp Named list containing remaining life expectancies for each McCabe score (NONFATAL, ULTFATAL, RAPFATAL). #' @param pop.level Indicating whether the sampling of the outcome tree parameters (e.g. probability of death, disability weights) should be carried out in each stratum separately or for the whole population. #' @param sample_distr The distribution for the prevalence sampling. Default is 'rbetamix' which uses the mid-P-Pearson-Clopper interval. Alternatives are 'rpert' - which uses the PERT distribution - and 'bcode' which uses the sampling procedures as described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150>. #' @param infections Infections for which the burden should be calculated. Default is names(num_hai_patients). #' @param p_age Number of survey patients stratified by infection, age and gender. If this parameter is provided the methodology described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> is applied. #' @param estimate_loi_fun Function to use for the estimation of length of infection. Default is runif_bootstrap_rear_gren. #' #' @keywords internal #' @seealso #' Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> #' Colzani et al. (2017) <doi:https://doi.org/10.1371/journal.pone.0170662> #' #' @keywords internal #' @noRd sim.bsi = function(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr="rbetamix", age_prior, p_age=NULL, pop.level, mccabe_prior, estimate_loi) { ncases_hai = sample.ncases(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr, age_prior, p_age=p_age, mccabe_prior, estimate_loi=estimate_loi) output = list() output$ncases_hai = ncases_hai # sum(unlist(ncases_hai)) prob_death = sample.runif(19*2, 0.094, 0.203, pop.level, names(ncases_hai)) ndeath_hai = list() for(i in names(ncases_hai)) { ndeath_hai[[i]] = ncases_hai[[i]]*prob_death[[i]] } names(ndeath_hai) = names(ncases_hai) output$ndeath_hai = ndeath_hai # sum(unlist(ndeath_hai)) yll_hai = ndeath_hai for(i in names(mccabe_life_exp)) { yll_hai[[i]] = yll_hai[[i]]*mccabe_life_exp[[i]] } output$yll_hai = yll_hai # sum(unlist(yll_hai)) # prop_cases_uncomplicated = sample.runif(19*2, 0.53, 0.66, pop.level, names(ncases_hai)) weight_symptomatic_infection_uncomplicated = sample.rpert(19*2, 0.104, 0.152, 0.125, pop.level, names(ncases_hai)) duration_symptomatic_infection_uncomplicated = sample.runif(19*2, 0.027, 0.036, pop.level, names(ncases_hai)) weight_symptomatic_infection_complicated = sample.rpert(19*2, 0.579, 0.727, 0.655, pop.level, names(ncases_hai)) duration_symptomatic_infection_complicated = sample.runif(19*2, 0.027, 0.036, pop.level, names(ncases_hai)) daly_symptomatic_infection_uncomplicated = list() daly_symptomatic_infection_complicated = list() prop_cases_complicated = list() for(i in names(ncases_hai)) { prop_cases_complicated[[i]] = 1-prop_cases_uncomplicated[[i]] daly_symptomatic_infection_uncomplicated[[i]] = ncases_hai[[i]]*prop_cases_uncomplicated[[i]]* weight_symptomatic_infection_uncomplicated[[i]]*duration_symptomatic_infection_uncomplicated[[i]] daly_symptomatic_infection_complicated[[i]] = ncases_hai[[i]]*prop_cases_complicated[[i]]* weight_symptomatic_infection_complicated[[i]]*duration_symptomatic_infection_complicated[[i]] } output$daly_symptomatic_infection_uncomplicated = daly_symptomatic_infection_uncomplicated # sum(unlist(daly_symptomatic_infection_uncomplicated)) output$daly_symptomatic_infection_complicated = daly_symptomatic_infection_complicated # sum(unlist(daly_symptomatic_infection_complicated)) # prob_post_traumatic_disorder = sample.runif(19*2, 0.13, 0.21, pop.level, names(ncases_hai)) weight_post_traumatic_disorder = sample.rpert(19*2, 0.07, 0.108, 0.088, pop.level, names(ncases_hai)) post_traumatic_disorder = list() for(i in names(ncases_hai)) { post_traumatic_disorder_trans = prop_cases_complicated[[i]]*prob_post_traumatic_disorder[[i]] post_traumatic_disorder[[i]] = ncases_hai[[i]]*post_traumatic_disorder_trans* # Transition probability weight_post_traumatic_disorder[[i]] post_traumatic_disorder[[i]] = post_traumatic_disorder[[i]]*mccabe_life_exp[[i]] } output$post_traumatic_disorder = post_traumatic_disorder # sum(unlist(post_traumatic_disorder)) # prob_cognitive_impairment = sample.runif(19*2, 0.11, 0.47, pop.level, names(ncases_hai)) weight_cognitive_impairment = sample.rpert(19*2, 0.026, 0.064, 0.043, pop.level, names(ncases_hai)) cognitive_impairment = list() for(i in names(ncases_hai)) { cognitive_impairment_trans = prop_cases_complicated[[i]]*prob_cognitive_impairment[[i]] cognitive_impairment[[i]] = ncases_hai[[i]]*cognitive_impairment_trans* # Transition probability weight_cognitive_impairment[[i]] cognitive_impairment[[i]] = cognitive_impairment[[i]]*mccabe_life_exp[[i]] } output$cognitive_impairment = cognitive_impairment # sum(unlist(cognitive_impairment)) # weight_physical_impairment = sample.runif(19*2, 0.011, 0.053, pop.level, names(ncases_hai)) physical_impairment = list() for(i in names(ncases_hai)) { physical_impairment_trans = prop_cases_complicated[[i]] physical_impairment[[i]] = ncases_hai[[i]]*physical_impairment_trans* # Transition probability weight_physical_impairment[[i]] physical_impairment[[i]] = physical_impairment[[i]]*mccabe_life_exp[[i]] } output$physical_impairment = physical_impairment # sum(unlist(physical_impairment)) # prob_renal_failure = sample.runif(19*2, 0.009, 0.013, pop.level, names(ncases_hai)) weight_renal_failure = sample.runif(19*2, 0.03, 0.487, pop.level, names(ncases_hai)) renal_failure = list() for(i in names(ncases_hai)) { renal_failure_trans = prop_cases_complicated[[i]]*prob_renal_failure[[i]] renal_failure[[i]] = ncases_hai[[i]]*renal_failure_trans* # Transition probability weight_renal_failure[[i]] renal_failure[[i]] = renal_failure[[i]]*mccabe_life_exp[[i]] } output$renal_failure = renal_failure # sum(unlist(renal_failure)) output }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/sim.bsi.R
#' Performs simulation of the outcome tree of C. difficile infection (CDI) for DALY calculation #' #' @param nhai Named numeric containing patients having healthcare-associated infections. #' @param npatients Number of patients in point prevalence survey. #' @param la Length of stay of all patients in hospitals. This is need for the prevalence to incidence conversion with the Rhame-Sudderth formula. #' @param loi A list containing length of infections from all patients in the PPS. The length of infection of all healthcare-associated infections. In PPS this is usually approximated as the time from infection onset until the date of the survey. #' @param discharges The number of hospital discharges. #' @param ncases_by_stratum A list containing for each infection the number of patients in each age and gender stratum. #' @param age_prior The prior weight (counts) for each infection, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_scores_distr The observed McCabe scores (counts) for each infection, age and gender stratum from the PPS. #' @param mccabe_prior The prior weight (counts) for each infection, McCabe score, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_life_exp Named list containing remaining life expectancies for each McCabe score (NONFATAL, ULTFATAL, RAPFATAL). #' @param pop.level Indicating whether the sampling of the outcome tree parameters (e.g. probability of death, disability weights) should be carried out in each stratum separately or for the whole population. #' @param sample_distr The distribution for the prevalence sampling. Default is 'rbetamix' which uses the mid-P-Pearson-Clopper interval. Alternatives are 'rpert' - which uses the PERT distribution - and 'bcode' which uses the sampling procedures as described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150>. #' @param infections Infections for which the burden should be calculated. Default is names(num_hai_patients). #' @param p_age Number of survey patients stratified by infection, age and gender. If this parameter is provided the methodology described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> is applied. #' @param estimate_loi_fun Function to use for the estimation of length of infection. Default is runif_bootstrap_rear_gren. #' #' @keywords internal #' @seealso #' Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> #' Colzani et al. (2017) <doi:https://doi.org/10.1371/journal.pone.0170662> #' #' @keywords internal #' @noRd sim.cdi = function(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr="rbetamix", age_prior, p_age=NULL, pop.level, mccabe_prior, estimate_loi) { ncases_hai = sample.ncases(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr, age_prior, p_age=p_age, mccabe_prior, estimate_loi=estimate_loi) output = list() output$ncases_hai = ncases_hai # sum(unlist(ncases_hai)) prob_death = sample.runif(19*2, 0, 0.11, pop.level, names(ncases_hai)) ndeath_hai = list() for(i in names(ncases_hai)) { ndeath_hai[[i]] = ncases_hai[[i]]*prob_death[[i]] } names(ndeath_hai) = names(ncases_hai) output$ndeath_hai = ndeath_hai # sum(unlist(ndeath_hai)) yll_hai = ndeath_hai for(i in names(mccabe_life_exp)) { yll_hai[[i]] = yll_hai[[i]]*mccabe_life_exp[[i]] } output$yll_hai = yll_hai # sum(unlist(yll_hai)) # prop_cases_uncomplicated = sample.runif(19*2, 0.85, 0.985, pop.level, names(ncases_hai)) weight_symptomatic_infection_uncomplicated = sample.runif(19*2, 0.073, 0.149, pop.level, names(ncases_hai)) duration_symptomatic_infection_uncomplicated = sample.runif(19*2, 0, 0.0219, pop.level, names(ncases_hai)) weight_symptomatic_infection_complicated = sample.rpert(19*2, 0.202, 0.285, 0.239, pop.level, names(ncases_hai)) duration_symptomatic_infection_complicated = sample.runif(19*2, 0, 0.0219, pop.level, names(ncases_hai)) daly_symptomatic_infection_uncomplicated = list() daly_symptomatic_infection_complicated = list() for(i in names(ncases_hai)) { prop_cases_complicated = 1-prop_cases_uncomplicated[[i]] daly_symptomatic_infection_uncomplicated[[i]] = ncases_hai[[i]]*prop_cases_uncomplicated[[i]]* weight_symptomatic_infection_uncomplicated[[i]]*duration_symptomatic_infection_uncomplicated[[i]] daly_symptomatic_infection_complicated[[i]] = ncases_hai[[i]]*prop_cases_complicated* weight_symptomatic_infection_complicated[[i]]*duration_symptomatic_infection_complicated[[i]] } output$daly_symptomatic_infection_uncomplicated = daly_symptomatic_infection_uncomplicated # sum(unlist(daly_symptomatic_infection_uncomplicated)) output$daly_symptomatic_infection_complicated = daly_symptomatic_infection_complicated # sum(unlist(daly_symptomatic_infection_complicated)) # prob_post_colectomy_state = sample.runif(19*2, 0.002, 0.038, pop.level, names(ncases_hai)) weight_post_colectomy_state = sample.rpert(19*2, 0.104, 0.155, 0.125, pop.level, names(ncases_hai)) post_colectomy_state = list() for(i in names(ncases_hai)) { post_colectomy_state_trans = prob_post_colectomy_state[[i]] post_colectomy_state[[i]] = ncases_hai[[i]]*post_colectomy_state_trans* # Transition probability weight_post_colectomy_state[[i]] post_colectomy_state[[i]] = post_colectomy_state[[i]]*mccabe_life_exp[[i]] } output$post_colectomy_state = post_colectomy_state # sum(unlist(post_colectomy_state)) output }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/sim.cdi.R
#' Performs simulation of the outcome tree of heatlhcare-associated pneumonia (HAP) for DALY calculation #' #' @param nhai Named numeric containing patients having healthcare-associated infections. #' @param npatients Number of patients in point prevalence survey. #' @param la Length of stay of all patients in hospitals. This is need for the prevalence to incidence conversion with the Rhame-Sudderth formula. #' @param loi A list containing length of infections from all patients in the PPS. The length of infection of all healthcare-associated infections. In PPS this is usually approximated as the time from infection onset until the date of the survey. #' @param discharges The number of hospital discharges. #' @param ncases_by_stratum A list containing for each infection the number of patients in each age and gender stratum. #' @param age_prior The prior weight (counts) for each infection, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_scores_distr The observed McCabe scores (counts) for each infection, age and gender stratum from the PPS. #' @param mccabe_prior The prior weight (counts) for each infection, McCabe score, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_life_exp Named list containing remaining life expectancies for each McCabe score (NONFATAL, ULTFATAL, RAPFATAL). #' @param pop.level Indicating whether the sampling of the outcome tree parameters (e.g. probability of death, disability weights) should be carried out in each stratum separately or for the whole population. #' @param sample_distr The distribution for the prevalence sampling. Default is 'rbetamix' which uses the mid-P-Pearson-Clopper interval. Alternatives are 'rpert' - which uses the PERT distribution - and 'bcode' which uses the sampling procedures as described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150>. #' @param infections Infections for which the burden should be calculated. Default is names(num_hai_patients). #' @param p_age Number of survey patients stratified by infection, age and gender. If this parameter is provided the methodology described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> is applied. #' @param estimate_loi_fun Function to use for the estimation of length of infection. Default is runif_bootstrap_rear_gren. #' #' @keywords internal #' @seealso #' Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> #' Colzani et al. (2017) <doi:https://doi.org/10.1371/journal.pone.0170662> #' #' @keywords internal #' @noRd sim.pn = function(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr="rbetamix", age_prior, p_age=NULL, pop.level, mccabe_prior, estimate_loi) { ncases_hai = sample.ncases(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr, age_prior, p_age=p_age, mccabe_prior, estimate_loi=estimate_loi) output = list() output$ncases_hai = ncases_hai # sum(unlist(ncases_hai)) prob_death = sample.rpert(19*2, 0.001, 0.09, 0.035, pop.level, names(ncases_hai)) ndeath_hai = list() for(i in names(ncases_hai)) { ndeath_hai[[i]] = ncases_hai[[i]]*prob_death[[i]] } names(ndeath_hai) = names(ncases_hai) output$ndeath_hai = ndeath_hai # sum(unlist(ndeath_hai)) yll_hai = ndeath_hai for(i in names(mccabe_life_exp)) { yll_hai[[i]] = yll_hai[[i]]*mccabe_life_exp[[i]] } output$yll_hai = yll_hai # sum(unlist(yll_hai)) weight_symptomatic_infection = sample.rpert(19*2, 0.104, 0.152, 0.125, pop.level, names(ncases_hai)) duration_symptomatic_infection = sample.runif(19*2, 0.019, 0.031, pop.level, names(ncases_hai)) daly_symptomatic_infection = list() for(i in names(ncases_hai)) { daly_symptomatic_infection[[i]] = ncases_hai[[i]]*weight_symptomatic_infection[[i]]*duration_symptomatic_infection[[i]] } output$daly_symptomatic_infection = daly_symptomatic_infection # sum(unlist(daly_symptomatic_infection)) weight_severe_sepsis_shock = sample.rpert(19*2, 0.579, 0.727, 0.655, pop.level, names(ncases_hai)) duration_severe_sepsis_shock = sample.runif(19*2, 0.027, 0.036, pop.level, names(ncases_hai)) severe_sepsis_shock_trans = list() severe_sepsis_shock = list() for(i in names(ncases_hai)) { severe_sepsis_shock_trans[[i]] = matrix(rep(0.39,19*2), ncol=2) severe_sepsis_shock[[i]] = ncases_hai[[i]]*severe_sepsis_shock_trans[[i]]* # Transition probability weight_severe_sepsis_shock[[i]]*duration_severe_sepsis_shock[[i]] # DALYs } output$severe_sepsis_shock = severe_sepsis_shock # sum(unlist(severe_sepsis_shock)) # prob_post_traumatic_disorder = sample.runif(19*2, 0.13, 0.21, pop.level, names(ncases_hai)) weight_post_traumatic_disorder = sample.rpert(19*2, 0.07, 0.108, 0.088, pop.level, names(ncases_hai)) post_traumatic_disorder = list() for(i in names(ncases_hai)) { post_traumatic_disorder_trans = severe_sepsis_shock_trans[[i]]*prob_post_traumatic_disorder[[i]] post_traumatic_disorder[[i]] = ncases_hai[[i]]*post_traumatic_disorder_trans* # Transition probability weight_post_traumatic_disorder[[i]] post_traumatic_disorder[[i]] = post_traumatic_disorder[[i]]*mccabe_life_exp[[i]] } output$post_traumatic_disorder = post_traumatic_disorder # sum(unlist(post_traumatic_disorder)) # prob_cognitive_impairment = sample.runif(19*2, 0.11, 0.47, pop.level, names(ncases_hai)) weight_cognitive_impairment = sample.rpert(19*2, 0.026, 0.064, 0.043, pop.level, names(ncases_hai)) cognitive_impairment = list() for(i in names(ncases_hai)) { cognitive_impairment_trans = severe_sepsis_shock_trans[[i]]*prob_cognitive_impairment[[i]] cognitive_impairment[[i]] = ncases_hai[[i]]*cognitive_impairment_trans* # Transition probability weight_cognitive_impairment[[i]] cognitive_impairment[[i]] = cognitive_impairment[[i]]*mccabe_life_exp[[i]] } output$cognitive_impairment = cognitive_impairment # sum(unlist(cognitive_impairment)) # weight_physical_impairment = sample.runif(19*2, 0.011, 0.053, pop.level, names(ncases_hai)) physical_impairment = list() for(i in names(ncases_hai)) { physical_impairment_trans = severe_sepsis_shock_trans[[i]]*matrix(rep(1,19*2), ncol=2) physical_impairment[[i]] = ncases_hai[[i]]*physical_impairment_trans* # Transition probability weight_physical_impairment[[i]] physical_impairment[[i]] = physical_impairment[[i]]*mccabe_life_exp[[i]] } output$physical_impairment = physical_impairment # sum(unlist(physical_impairment)) # prob_renal_failure = sample.runif(19*2, 0.009, 0.013, pop.level, names(ncases_hai)) weight_renal_failure = sample.runif(19*2, 0.03, 0.487, pop.level, names(ncases_hai)) renal_failure = list() for(i in names(ncases_hai)) { renal_failure_trans = severe_sepsis_shock_trans[[i]]*prob_renal_failure[[i]] renal_failure[[i]] = ncases_hai[[i]]*renal_failure_trans* # Transition probability weight_renal_failure[[i]] renal_failure[[i]] = renal_failure[[i]]*mccabe_life_exp[[i]] } output$renal_failure = renal_failure # sum(unlist(renal_failure)) output }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/sim.pn.R
#' Performs simulation of the outcome tree of surgical site infection (SSI) for DALY calculation #' #' @param nhai Named numeric containing patients having healthcare-associated infections. #' @param npatients Number of patients in point prevalence survey. #' @param la Length of stay of all patients in hospitals. This is need for the prevalence to incidence conversion with the Rhame-Sudderth formula. #' @param loi A list containing length of infections from all patients in the PPS. The length of infection of all healthcare-associated infections. In PPS this is usually approximated as the time from infection onset until the date of the survey. #' @param discharges The number of hospital discharges. #' @param ncases_by_stratum A list containing for each infection the number of patients in each age and gender stratum. #' @param age_prior The prior weight (counts) for each infection, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_scores_distr The observed McCabe scores (counts) for each infection, age and gender stratum from the PPS. #' @param mccabe_prior The prior weight (counts) for each infection, McCabe score, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_life_exp Named list containing remaining life expectancies for each McCabe score (NONFATAL, ULTFATAL, RAPFATAL). #' @param pop.level Indicating whether the sampling of the outcome tree parameters (e.g. probability of death, disability weights) should be carried out in each stratum separately or for the whole population. #' @param sample_distr The distribution for the prevalence sampling. Default is 'rbetamix' which uses the mid-P-Pearson-Clopper interval. Alternatives are 'rpert' - which uses the PERT distribution - and 'bcode' which uses the sampling procedures as described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150>. #' @param infections Infections for which the burden should be calculated. Default is names(num_hai_patients). #' @param p_age Number of survey patients stratified by infection, age and gender. If this parameter is provided the methodology described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> is applied. #' @param estimate_loi_fun Function to use for the estimation of length of infection. Default is runif_bootstrap_rear_gren. #' #' @keywords internal #' @seealso #' Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> #' Colzani et al. (2017) <doi:https://doi.org/10.1371/journal.pone.0170662> #' #' @keywords internal #' @noRd sim.ssi = function(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr="rbetamix", age_prior, p_age=NULL, pop.level, mccabe_prior, estimate_loi) { ncases_hai = sample.ncases(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr, age_prior, p_age=p_age, mccabe_prior, estimate_loi=estimate_loi) output = list() output$ncases_hai = ncases_hai # sum(unlist(ncases_hai)) # prob_death = matrix(rep(c(rep(9e-3,14), rep(0.036,5)),2), ncol=2) ndeath_hai = list() for(i in names(ncases_hai)) { ndeath_hai[[i]] = ncases_hai[[i]]*prob_death } names(ndeath_hai) = names(ncases_hai) output$ndeath_hai = ndeath_hai # sum(unlist(ndeath_hai)) # yll_hai = ndeath_hai for(i in names(mccabe_life_exp)) { yll_hai[[i]] = yll_hai[[i]]*mccabe_life_exp[[i]] } output$yll_hai = yll_hai # sum(unlist(yll_hai)) # # weight_symptomatic_infection = sample.rpert(19*2, 0.039, 0.06, 0.051, pop.level, names(ncases_hai)) duration_symptomatic_infection = 0.096 daly_symptomatic_infection = list() for(i in names(ncases_hai)) { daly_symptomatic_infection[[i]] = ncases_hai[[i]]*weight_symptomatic_infection[[i]]*duration_symptomatic_infection } output$daly_symptomatic_infection = daly_symptomatic_infection # sum(unlist(daly_symptomatic_infection)) # output }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/sim.ssi.R
#' Performs simulation of the outcome tree of urinary tract infection (UTI) for DALY calculation #' #' @param nhai Named numeric containing patients having healthcare-associated infections. #' @param npatients Number of patients in point prevalence survey. #' @param la Length of stay of all patients in hospitals. This is need for the prevalence to incidence conversion with the Rhame-Sudderth formula. #' @param loi A list containing length of infections from all patients in the PPS. The length of infection of all healthcare-associated infections. In PPS this is usually approximated as the time from infection onset until the date of the survey. #' @param discharges The number of hospital discharges. #' @param ncases_by_stratum A list containing for each infection the number of patients in each age and gender stratum. #' @param age_prior The prior weight (counts) for each infection, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_scores_distr The observed McCabe scores (counts) for each infection, age and gender stratum from the PPS. #' @param mccabe_prior The prior weight (counts) for each infection, McCabe score, age and gender stratum. This is used for smooting the age and gender distribution when small numbers are observed. #' @param mccabe_life_exp Named list containing remaining life expectancies for each McCabe score (NONFATAL, ULTFATAL, RAPFATAL). #' @param pop.level Indicating whether the sampling of the outcome tree parameters (e.g. probability of death, disability weights) should be carried out in each stratum separately or for the whole population. #' @param sample_distr The distribution for the prevalence sampling. Default is 'rbetamix' which uses the mid-P-Pearson-Clopper interval. Alternatives are 'rpert' - which uses the PERT distribution - and 'bcode' which uses the sampling procedures as described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150>. #' @param infections Infections for which the burden should be calculated. Default is names(num_hai_patients). #' @param p_age Number of survey patients stratified by infection, age and gender. If this parameter is provided the methodology described in Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> is applied. #' @param estimate_loi_fun Function to use for the estimation of length of infection. Default is runif_bootstrap_rear_gren. #' #' @keywords internal #' @seealso #' Cassini et al. (2016) <doi:https://doi.org/10.1371/journal.pmed.1002150> #' Colzani et al. (2017) <doi:https://doi.org/10.1371/journal.pone.0170662> #' #' @keywords internal #' @noRd sim.uti = function(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr="rbetamix", age_prior, p_age=NULL, pop.level, mccabe_prior, estimate_loi) { ncases_hai = sample.ncases(nhai, npatients, la, loi, discharges, mccabe_life_exp, mccabe_scores_distr, ncases_by_stratum, sample_distr, age_prior, p_age=p_age, mccabe_prior, estimate_loi=estimate_loi) output = list() output$ncases_hai = ncases_hai # sum(unlist(ncases_hai)) weight_symptomatic_infection = sample.rpert(19*2, 0.039, 0.06, 0.051, pop.level, names(ncases_hai)) duration_symptomatic_infection = 0.007 daly_symptomatic_infection = list() for(i in names(ncases_hai)) { daly_symptomatic_infection[[i]] = ncases_hai[[i]]*weight_symptomatic_infection[[i]]*duration_symptomatic_infection } output$daly_symptomatic_infection = daly_symptomatic_infection # sum(unlist(daly_symptomatic_infection)) secondary_bacteraemia_urosepsis_trans = sample.runif(19*2, 0.0542, 0.2, pop.level, names(ncases_hai)) weight_secondary_bacteraemia_urosepsis = sample.rpert(19*2, 0.579, 0.727, 0.655, pop.level, names(ncases_hai)) duration_secondary_bacteraemia_urosepsis = sample.runif(19*2, 0.027, 0.036, pop.level, names(ncases_hai)) secondary_bacteraemia_urosepsis = list() for(i in names(ncases_hai)) { secondary_bacteraemia_urosepsis[[i]] = ncases_hai[[i]]*secondary_bacteraemia_urosepsis_trans[[i]]* # Transition probability weight_secondary_bacteraemia_urosepsis[[i]]*duration_secondary_bacteraemia_urosepsis[[i]] } output$secondary_bacteraemia_urosepsis = secondary_bacteraemia_urosepsis # sum(unlist(secondary_bacteraemia_urosepsis)) # prob_death= sample.runif(19*2, 0.09, 0.2, pop.level, names(ncases_hai)) ndeath_hai = list() for(i in names(ncases_hai)) { ndeath_hai[[i]] = ncases_hai[[i]]*secondary_bacteraemia_urosepsis_trans[[i]]*prob_death[[i]] } names(ndeath_hai) = names(ncases_hai) output$ndeath_hai = ndeath_hai # sum(unlist(ndeath_hai)) yll_hai = ndeath_hai for(i in names(mccabe_life_exp)) { yll_hai[[i]] = yll_hai[[i]]*mccabe_life_exp[[i]] } output$yll_hai = yll_hai # sum(unlist(yll_hai)) # prob_post_traumatic_disorder = sample.runif(19*2, 0.13, 0.21, pop.level, names(ncases_hai)) weight_post_traumatic_disorder = sample.rpert(19*2, 0.07, 0.108, 0.088, pop.level, names(ncases_hai)) post_traumatic_disorder = list() for(i in names(ncases_hai)) { post_traumatic_disorder_trans = secondary_bacteraemia_urosepsis_trans[[i]]*prob_post_traumatic_disorder[[i]] post_traumatic_disorder[[i]] = ncases_hai[[i]]*post_traumatic_disorder_trans* # Transition probability weight_post_traumatic_disorder[[i]] post_traumatic_disorder[[i]] = post_traumatic_disorder[[i]]*mccabe_life_exp[[i]] } output$post_traumatic_disorder = post_traumatic_disorder # sum(unlist(post_traumatic_disorder)) # prob_cognitive_impairment = sample.runif(19*2, 0.11, 0.47, pop.level, names(ncases_hai)) weight_cognitive_impairment = sample.rpert(19*2, 0.026, 0.064, 0.043, pop.level, names(ncases_hai)) cognitive_impairment = list() for(i in names(ncases_hai)) { cognitive_impairment_trans = secondary_bacteraemia_urosepsis_trans[[i]]*prob_cognitive_impairment[[i]] cognitive_impairment[[i]] = ncases_hai[[i]]*cognitive_impairment_trans* # Transition probability weight_cognitive_impairment[[i]] cognitive_impairment[[i]] = cognitive_impairment[[i]]*mccabe_life_exp[[i]] } output$cognitive_impairment = cognitive_impairment # sum(unlist(cognitive_impairment)) # weight_physical_impairment = sample.runif(19*2, 0.011, 0.053, pop.level, names(ncases_hai)) physical_impairment = list() for(i in names(ncases_hai)) { physical_impairment_trans = secondary_bacteraemia_urosepsis_trans[[i]]*matrix(rep(1,19*2), ncol=2) physical_impairment[[i]] = ncases_hai[[i]]*physical_impairment_trans* # Transition probability weight_physical_impairment[[i]] physical_impairment[[i]] = physical_impairment[[i]]*mccabe_life_exp[[i]] } output$physical_impairment = physical_impairment # sum(unlist(physical_impairment)) # prob_renal_failure = sample.runif(19*2, 0.009, 0.013, pop.level, names(ncases_hai)) weight_renal_failure = sample.runif(19*2, 0.03, 0.487, pop.level, names(ncases_hai)) renal_failure = list() for(i in names(ncases_hai)) { renal_failure_trans = secondary_bacteraemia_urosepsis_trans[[i]]*prob_renal_failure[[i]] renal_failure[[i]] = ncases_hai[[i]]*renal_failure_trans* # Transition probability weight_renal_failure[[i]] renal_failure[[i]] = renal_failure[[i]]*mccabe_life_exp[[i]] } output$renal_failure = renal_failure # sum(unlist(renal_failure)) output[c(1,4,5,2,3,6:9)] }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/sim.uti.R
#' Create BHAI summary table #' #' @title Create summary table #' #' @param pps The PPS object containing the data. #' @param pop_norm Indicating whether statistics should be computed per 100,000 population, default: TRUE. #' @param conf.int Specifying whether confidence intervals should be computed, default: TRUE. #' #' @usage bhai.prettyTable(pps, pop_norm=FALSE, conf.int=TRUE) #' #' @seealso \code{\linkS4class{PPS}} #' #' @return A data.frame containing the summarised results. #' #' @examples #' #' data(german_pps_2011_repr) #' german_pps_repr = PPS(num_hai_patients = num_hai_patients, #' num_hai_patients_by_stratum = num_hai_patients_by_stratum, #' num_hai_patients_by_stratum_prior = num_hai_patients_by_stratum_prior, #' num_survey_patients = num_survey_patients, #' length_of_stay = length_of_stay, #' loi_pps = loi_pps, #' mccabe_scores_distr = mccabe_scores_distr, #' mccabe_life_exp = mccabe_life_exp, #' hospital_discharges = hospital_discharges, #' population = population, #' country="Germany (representative sample)") #' german_pps_repr #' #' set.seed(3) #' # The following example is run only for illustratory reasons #' # Note that you should never run the function with only 10 Monte-Carlo simulations in practice! #' result = bhai(german_pps_repr, nsim=10) #' bhai.prettyTable(result) #' #' @export bhai.prettyTable = function(pps, pop_norm=FALSE, conf.int=TRUE) { summary_bhai = pps@bhai_summary pop_size = pps@population if(pop_norm) { pop_size = pop_size/100000 } else { pop_size = 1 } sum_tab = lapply(c("Cases", "Deaths", "DALY", "YLL", "YLD"), function(n) sapply(summary_bhai, function(x) paste(prettyNum(round(x$TOTAL[n,2]/pop_size, ifelse(pop_size==1,0,1)), big.mark=","), ifelse(conf.int," (",""), ifelse(conf.int,prettyNum(round(x$TOTAL[n,1]/pop_size, ifelse(pop_size==1,0,1)), big.mark=","),""), ifelse(conf.int," - ",""), ifelse(conf.int,prettyNum(round(x$TOTAL[n,3]/pop_size, ifelse(pop_size==1,0,1)), big.mark=","),""), ifelse(conf.int,")",""), sep=""))) sum_tab = do.call("rbind", sum_tab) rownames(sum_tab) = c("Cases", "Deaths", "DALY", "YLL", "YLD") sum_tab = t(sum_tab) sum_tab } #' #' Internal function for bhai summary calculation #' #' @param allsim Monte Carlo simulations. #' @param pop_size Population size. #' @param summarize_strata Specifying whether stratum-specific summary statistics should be computed. #' #' @keywords internal #' @noRd summary.bhai = function(allsim, pop_size, summarize_strata=TRUE) { out = c(summary_by_disease(allsim, pop_size, summarize_strata), list("ALL"=c(summary_total(allsim), ifelse(summarize_strata, summary_by_stratum(allsim), list("stratum_specific_results"=NULL))))) names(out[["ALL"]]) = c("TOTAL", "stratum_specific_results") out } #' #' Internal function for bhai summary calculation for a single infection #' #' @param allsim Monte Carlo simulations. #' @param pop_size Population size. #' @param summarize_strata Specifying whether stratum-specific summary statistics should be computed. #' #' @keywords internal #' @noRd summary_by_disease = function(allsim, pop_size, summarize_strata=TRUE) { out = list() summary_out = list() for(n in names(allsim)) { # print(n) out[[n]] = list() currsim = allsim[[n]] for(curr in names(currsim[[1]])) { # print(curr) currval = list() for(s in 1:length(currsim)) { currval[[s]] = c(sapply(currsim[[s]][[curr]], sum), "TOTAL"=sum(unlist(currsim[[s]][[curr]]))) } out[[n]][[curr]] = t(apply(do.call("rbind", currval), 2, quantile, prob=c(0.025, 0.5, 0.975))) out[[n]][[curr]] = rbind(out[[n]][[curr]], apply(out[[n]][[curr]][1:3,], 2, sum)) rownames(out[[n]][[curr]])[5] = "CHECK-WRONG" } alldaly = list() allcases = list() alldeath = list() allyll = list() allyld = list() for(s in 1:length(currsim)) { alldaly[[s]] = sum(sapply(currsim[[s]][3:length(currsim[[s]])], function(y) sum(unlist(y)))) allyll[[s]] = sum(sapply(currsim[[s]][3], function(y) sum(unlist(y)))) allyld[[s]] = sum(sapply(currsim[[s]][4:length(currsim[[s]])], function(y) sum(unlist(y)))) allcases[[s]] = sum(sapply(currsim[[s]][1], function(y) sum(unlist(y)))) alldeath[[s]] = sum(sapply(currsim[[s]][2], function(y) sum(unlist(y)))) } outmat = rbind(quantile(unlist(alldaly), prob=c(0.025, 0.5, 0.975)), quantile(unlist(allyll), prob=c(0.025, 0.5, 0.975)), quantile(unlist(allyld), prob=c(0.025, 0.5, 0.975)), quantile(unlist(allcases), prob=c(0.025, 0.5, 0.975)),#/sum(pop_demographics[,2:3])*100000 quantile(unlist(alldeath), prob=c(0.025, 0.5, 0.975)))#/sum(pop_demographics[,2:3])*100000 rownames(outmat) = c("DALY", "YLL", "YLD", "Cases", "Deaths") straum_specific = NULL if(summarize_strata) { ncases_by_stratum = do.call("rbind", lapply(currsim, function(y) as.vector(Reduce("+", y$ncases_hai)))) allncases_by_stratum = apply(ncases_by_stratum, 2, quantile, prob=c(0.025, 0.5, 0.975)) colnames(allncases_by_stratum) = rep(rownames(currsim[[1]]$ncases_hai[[1]]), 2) allncases_by_stratum = list("F"=t(allncases_by_stratum[,1:19]), "M"=t(allncases_by_stratum[,20:38])) ndeath_by_stratum = do.call("rbind", lapply(currsim, function(y) as.vector(Reduce("+", y$ndeath_hai)))) allndeath_by_stratum = apply(ndeath_by_stratum, 2, quantile, prob=c(0.025, 0.5, 0.975)) colnames(allndeath_by_stratum) = rep(rownames(currsim[[1]]$ndeath_hai[[1]]), 2) allndeath_by_stratum = list("F"=t(allndeath_by_stratum[,1:19]), "M"=t(allndeath_by_stratum[,20:38])) daly_strat = do.call("rbind",lapply(currsim, function(x) unlist((Reduce("+", unlist(x[3:length(x)], recursive=FALSE)))))) daly_strat = apply(daly_strat, 2, quantile, prob=c(0.025,0.5,0.975)) daly_strat = list(F=t(daly_strat[,1:19]), M=t(daly_strat[,20:38])) rownames(daly_strat[[1]]) = rownames(currsim[[1]][[1]][[1]]) rownames(daly_strat[[2]]) = rownames(currsim[[1]][[1]][[1]]) yll_strat = do.call("rbind",lapply(currsim, function(x) unlist((Reduce("+", unlist(x[3], recursive=FALSE)))))) yll_strat = apply(yll_strat, 2, quantile, prob=c(0.025,0.5,0.975)) yll_strat = list(F=t(yll_strat[,1:19]), M=t(yll_strat[,20:38])) rownames(yll_strat[[1]]) = rownames(currsim[[1]][[1]][[1]]) rownames(yll_strat[[2]]) = rownames(currsim[[1]][[1]][[1]]) yld_strat = do.call("rbind",lapply(currsim, function(x) unlist((Reduce("+", unlist(x[4:length(x)], recursive=FALSE)))))) if(n == "SSI") { yld_strat = do.call("rbind",lapply(currsim, function(x) as.vector((Reduce("+", unlist(x[4:length(x)], recursive=FALSE)))))) } yld_strat = apply(yld_strat, 2, quantile, prob=c(0.025,0.5,0.975)) yld_strat = list(F=t(yld_strat[,1:19]), M=t(yld_strat[,20:38])) rownames(yld_strat[[1]]) = rownames(currsim[[1]][[1]][[1]]) rownames(yld_strat[[2]]) = rownames(currsim[[1]][[1]][[1]]) straum_specific = list(ncases=allncases_by_stratum, ndeath=allndeath_by_stratum, DALY=daly_strat, YLL=yll_strat, YLD=yld_strat) } summary_out[[n]] = list(TOTAL=outmat, stratum_specific_results=straum_specific) } summary_out } #' #' Internal function for bhai summary calculation for over all infections in PPS #' #' @param allsim Monte Carlo simulations. #' #' @keywords internal #' @noRd summary_total = function(allsim) { all_out = list("Cases"=quantile(apply(sapply(allsim, function(x) (sapply(x, function(y) sum(unlist(y$ncases_hai))))), 1, sum), prob=c(0.025, 0.5, 0.975)), "Deaths"=quantile(apply(sapply(allsim, function(x) (sapply(x, function(y) sum(unlist(y$ndeath_hai))))), 1, sum), prob=c(0.025, 0.5, 0.975)), "DALY"=quantile(apply(sapply(allsim, function(x) (sapply(x, function(y) sum(unlist(y[3:length(y)]))))), 1, sum), prob=c(0.025, 0.5, 0.975)), "YLL"=quantile(apply(sapply(allsim, function(x) (sapply(x, function(y) sum(unlist(y[3]))))), 1, sum), prob=c(0.025, 0.5, 0.975)), "YLD"=quantile(apply(sapply(allsim, function(x) (sapply(x, function(y) sum(unlist(y[4:length(y)]))))), 1, sum), prob=c(0.025, 0.5, 0.975))) list("TOTAL"=do.call("rbind", all_out)) } #' #' Internal function that calculates stratum-specific summary statistics #' #' @param allsim Monte Carlo simulations. #' #' @keywords internal #' @noRd summary_by_stratum = function(allsim) { ncases_by_stratum = lapply(allsim, function(x) do.call("rbind", lapply(x, function(y) as.vector(Reduce("+", y$ncases_hai))))) allncases_by_stratum = apply(Reduce("+", ncases_by_stratum), 2, quantile, prob=c(0.025, 0.5, 0.975)) colnames(allncases_by_stratum) = rep(rownames(allsim[[1]][[1]][[1]][[1]]), 2) allncases_by_stratum = list("F"=t(allncases_by_stratum[,1:19]), "M"=t(allncases_by_stratum[,20:38])) ndeath_by_stratum = lapply(allsim, function(x) do.call("rbind", lapply(x, function(y) as.vector(Reduce("+", y$ndeath_hai))))) allndeath_by_stratum = apply(Reduce("+", ndeath_by_stratum), 2, quantile, prob=c(0.025, 0.5, 0.975)) colnames(allndeath_by_stratum) = rep(rownames(allsim[[1]][[1]][[1]][[1]]), 2) allndeath_by_stratum = list("F"=t(allndeath_by_stratum[,1:19]), "M"=t(allndeath_by_stratum[,20:38])) yll_by_stratum = lapply(allsim, function(x) do.call("rbind", lapply(x, function(y) unlist(Reduce("+", y$yll_hai))))) allyll_by_stratum = apply(Reduce("+", yll_by_stratum), 2, quantile, prob=c(0.025, 0.5, 0.975)) colnames(allyll_by_stratum) = rep(rownames(allsim[[1]][[1]][[1]][[1]]), 2) allyll_by_stratum = list("F"=t(allyll_by_stratum[,1:19]), "M"=t(allyll_by_stratum[,20:38])) daly_by_stratum = lapply(allsim, function(x) do.call("rbind", lapply(x, function(y) unlist(Reduce("+", unlist(y[3:length(y)], recursive=FALSE)))))) alldaly_by_stratum = apply(Reduce("+", daly_by_stratum), 2, quantile, prob=c(0.025, 0.5, 0.975)) colnames(alldaly_by_stratum) = rep(rownames(allsim[[1]][[1]][[1]][[1]]), 2) alldaly_by_stratum = list("F"=t(alldaly_by_stratum[,1:19]), "M"=t(alldaly_by_stratum[,20:38])) list("stratum_specific_results"= list("ncases"=allncases_by_stratum, "ndeath"=allndeath_by_stratum, "YLL"=allyll_by_stratum, "DALY"=alldaly_by_stratum)) }
/scratch/gouwar.j/cran-all/cranData/BHAI/R/summary.R
"anovaPlot" <- function (obj, stacked = TRUE, base = TRUE, axes = TRUE, faclab = TRUE, labels = FALSE, cex = par("cex"), cex.lab = par("cex.lab"), ...) { if (!any(class(obj) == "lm")) stop(paste("Object", deparse(substitute(obj)), "should be of class 'lm'")) if (!any(class(obj) == "aov")) obj <- do.call("aov", as.list(obj$call[-1])) tables <- model.tables(obj, type = "effects")[[1]] Residuals <- resid(obj) factors <- names(tables) k <- length(factors) Df <- anova(obj)[, "Df"] Scale.Factor <- sqrt(Df[length(Df)]/Df[-length(Df)]) lst <- lst.dev <- lst.names <- list() for (i in 1:k) { nf <- length(unlist(strsplit(names(tables[i]), ":"))) if (nf == 1) { label <- dimnames(tables[i][[1]])[[1]] eff <- as.numeric(tables[i][[1]]) names(eff) <- label } else { label <- dimnames(tables[i][[1]])[[1]] eff <- as.numeric(tables[i][[1]]) for (j in 2:nf) { lab <- dimnames(tables[i][[1]])[[j]] label <- paste(rep(label, length(lab)), rep(lab, each = length(label)), sep = ":") } names(eff) <- label } lst.dev[[factors[i]]] <- Scale.Factor[i] * eff } xmax <- max(abs(range(c(unlist(lst.dev), Residuals)))) xlim <- c(-xmax, +xmax) xpd <- par("xpd") on.exit(par(xpd = xpd)) par(xpd = TRUE) plot(c(0, 1), c(0, 1), xlim = xlim, type = "n", xlab = "", ylab = "", frame = FALSE, axes = FALSE, ...) if (stacked) h <- 1/(k + 2) else h <- 1/(k + 1) hinc <- h/(k + 1) for (i in 1:k) { y <- (k - i + 2) * h if (labels) lab <- names(lst.dev[[i]]) else lab <- NULL lst[[factors[i]]] <- dots(lst.dev[[i]], y = y + hinc, xlim = xlim, hmax = y + hinc + h, stacked = stacked, base = base, axes = FALSE, labels = lab, cex = cex) if (faclab) text(0, y, labels = paste("scaled <", factors[i], "> deviations"), adj = 0.5, cex = cex.lab) } lst[["Rediduals"]] <- dots(Residuals, y = 0, xlim = xlim, hmax = 2 * h, stacked = stacked, base = FALSE, axes = FALSE, labels = NULL) if (axes) axis(1, at = pretty(xlim), mgp = c(1.5, 0.5, 0), line = -1 + 2 * hinc) if (faclab & axes) mtext("Residuals", side = 1, cex = cex.lab, line = 1 + hinc) if (faclab & !axes) mtext("Residuals", side = 1, cex = cex.lab, line = 0) invisible(lst) }
/scratch/gouwar.j/cran-all/cranData/BHH2/R/anovaPlot.R
"dotPlot" <- function (x, y = 0, xlim = range(x, na.rm = TRUE), xlab = NULL, scatter = FALSE, hmax = 1, base = TRUE, axes = TRUE, frame = FALSE, pch = 21, pch.size = "x", labels = NULL, hcex = 1, cex = par("cex"), cex.axis = par("cex.axis"), ...) { if (is.null(xlab)) xlab <- deparse(substitute(x)) x <- x[!is.na(x)] xpd <- par("xpd") par(xpd = TRUE) on.exit(par(xpd = xpd)) plot(c(0, 1), c(0, 1), xlim = xlim, type = "n", axes = FALSE, cex = cex, cex.axis = cex.axis, frame = frame, xlab = xlab, ylab = "", ...) if (axes) axis(1, cex.axis = cex.axis) if (scatter) { dots(x, y = y, xlim = xlim, stacked = FALSE, hmax = hmax, base = base, axes = FALSE, pch = pch, pch.size = pch.size, labels = labels, hcex = hcex, cex = cex, cex.axis = cex.axis) y <- y + 2 * strheight(pch.size, units = "user") xlab <- "" axes <- FALSE base = FALSE } coord <- dots(x, y, xlim = xlim, stacked = TRUE, hmax = hmax, base = base, axes = FALSE, pch = pch, pch.size = pch.size, labels = labels, hcex = hcex, cex = cex, cex.axis = cex.axis) invisible(coord) }
/scratch/gouwar.j/cran-all/cranData/BHH2/R/dotPlot.R
"dots" <- function (x, y = 0.1, xlim = range(x, na.rm = TRUE), stacked = FALSE, hmax = 0.5, base = TRUE, axes = FALSE, pch = 21, pch.size = "x", labels = NULL, hcex = 1, cex = par("cex"), cex.axis = par("cex.axis")) { x <- x[!is.na(x)] hdots <- y xmin <- xlim[1] xmax <- xlim[2] x <- x[(x >= xmin) & (x <= xmax)] b <- strwidth(pch.size, units = "user", cex = cex) h <- strheight(pch.size, units = "user", cex = hcex * cex) if (stacked) { if (xmax - xmin < b) { stop("x-dimension resolution problem") } else { xu <- seq(xmin, xmax, by = b) } m <- length(xu) tab <- data.frame(j = 1:m, k = rep(0, m), xu = xu) n <- length(x) y <- rep(0, n) for (i in 1:n) { l <- max(tab$j[tab$xu <= x[i]]) x[i] <- xu[l] + b/2 tab$k[l] <- 1 + tab$k[l] y[i] <- tab$k[l] } y <- y * h u <- hdots + max(y) if (hmax <= hdots) warning(paste("dot base <hdots=", hdots, "> higher than maximum column height <hmax=", hmax, ">...", sep = "")) if (u > hmax) y <- (hmax - hdots) * y/u y <- hdots + y } else { y <- rep(hdots, length(x)) } if (!is.null(labels)) text(x, y, labels = labels, cex = cex) else points(x, y, pch = pch, cex = cex) points.coord <- data.frame(x, y) if (axes) { segments(xmin - b, hdots - h/4, xmax + b, hdots - h/4) x <- pretty(x, n = 3, h = 0.5) x <- x[(x > xmin - b) & (x < xmax + b)] for (i in seq(x)) segments(x[i], hdots - h/4, x[i], hdots - h/2) y <- rep(hdots - h, length(x)) text(x, y, labels = x, cex = cex.axis) } if (base && !axes) segments(xmin - b, hdots - h/4, xmax + b, hdots - h/4) invisible(points.coord) }
/scratch/gouwar.j/cran-all/cranData/BHH2/R/dots.R
"ffDesMatrix" <- function (k, gen = NULL) { N <- 2^k X <- matrix(NA, nrow = N, ncol = k) for (j in 1:k) X[, j] <- rep(sort(rep(c(-1, 1), N/2^j)), 2^(j - 1)) X <- X[, ncol(X):1, drop=FALSE] if (is.null(gen)) return(X) for (i in 1:length(gen)) { ind <- trunc(gen[[i]]) if (any(abs(ind) > k)) stop(paste("generator:", paste(ind[1], "=", paste(ind[-1], collapse = "*")), "includes undefined columns")) x <- rep(sign(ind[1]), N) for (j in ind[-1]) x <- x * X[, j, drop=FALSE] X[, abs(ind[1])] <- x } X <- unique(X) X }
/scratch/gouwar.j/cran-all/cranData/BHH2/R/ffDesMatrix.R
"ffFullMatrix" <- function (X, x, maxInt, blk = NULL) { if (!is.data.frame(X)) X <- as.data.frame(X) Z <- data.frame(one = rep(1, nrow(X))) k <- 0 if (!is.null(blk)) { blk <- as.matrix(blk) if (nrow(blk) != nrow(X)) stop("Matrix and block should have the same number of rows.") k <- ncol(blk) Z <- cbind(Z, blk) names(Z)[-1] <- paste("bk", seq(k), sep = "") } ord <- min(length(x), maxInt) nT <- rep(0, ord) for (i in seq(ord)) { tt <- subsets(length(x), i, x) if (is.null(dim(tt))) tt <- matrix(tt, nrow = 1) for (j in 1:nrow(tt)) { nT[i] <- nT[i] + 1 Z <- cbind(Z, eval(parse(text = (paste("X[", tt[j, ], "]", collapse = "*", sep = ""))))) names(Z)[ncol(Z)] <- paste("x", tt[j, ], collapse = "*", sep = "") } } nT <- c(k, nT) if (ord > 1) names(nT) <- c("blk", "main", paste("int", 2:ord, sep = ".")) else names(nT) <- c("blk", "main") list(Xa = as.matrix(Z), x = x, maxInt = ord, nTerms = nT) }
/scratch/gouwar.j/cran-all/cranData/BHH2/R/ffFullMatrix.R
"lambdaPlot" <- function (mod, lambda = seq(-1, 1, by = 0.1), stat = "F", global = TRUE, cex = par("cex"), ...) { if (stat == "F") { org.fit <- mod y <- org.fit$model[, 1] resp <- names(org.fit$model)[1] print(resp) dat <- org.fit$model form <- as.formula(org.fit$call$formula) tt <- lm(form, data = dat) sav <- anova(tt) n <- nrow(sav) numdf <- sum(sav[-n, "Df"]) dendf <- sav[n, "Df"] k <- length(lambda) if (global) { f.lambda <- matrix(NA, nrow = 1, ncol = k) dimnames(f.lambda) <- list("Model", paste("l", round(lambda, 2), sep = "")) for (j in seq(lambda)) { l <- lambda[j] if (l == 0) dat[, resp] <- log(y) else dat[, resp] <- (y^l - 1)/l tt <- lm(form, data = dat) f.lambda[1, j] <- (sum(anova(tt)[-n, "Sum Sq"])/numdf)/(anova(tt)[n, "Sum Sq"]/dendf) } } else { f.lambda <- matrix(NA, nrow = n, k) dimnames(f.lambda) <- list(dimnames(sav)[[1]], paste("l", round(lambda, 2), sep = "")) for (j in seq(lambda)) { l <- lambda[j] if (l == 0) dat[, resp] <- log(y) else dat[, resp] <- (y^l - 1)/l tt <- lm(form, data = dat) f.lambda[, j] <- anova(tt)[, "F value"] } f.lambda <- f.lambda[-n, ] } Labels <- data.frame(term = dimnames(f.lambda)[[1]], label = LETTERS[seq(nrow(f.lambda))]) plot(lambda, lambda, xlim = range(lambda), ylim = range(f.lambda), type = "n", xlab = "lambda", ylab = "F", ...) for (i in 1:nrow(f.lambda)) lines(lambda, f.lambda[i, ]) xlab <- lambda[k] ylab <- f.lambda[, k] lab <- paste(" ", as.character(Labels[, "label"]), sep = "") for (i in 1:nrow(f.lambda)) text(lambda[k], f.lambda[, k], labels = lab, adj = 0) lab <- paste(as.character(Labels[, "label"]), " ", sep = "") for (i in 1:nrow(f.lambda)) text(lambda[1], f.lambda[, 1], labels = lab, adj = 1) print(Labels) invisible(list(lambda = lambda, f.lambda = f.lambda)) } else if (stat == "t") { y <- mod$model[, 1] org.fit <- lm(y ~ ., qr = TRUE, data = mod$model[, -1]) QR <- org.fit$qr n <- length(y) p <- length(coef(org.fit)) idx <- 1:p rdf <- n - p coef.lambda <- matrix(NA, nrow = p, ncol = length(lambda)) dimnames(coef.lambda) <- list(names(coef(org.fit)), paste("l", round(lambda, 2), sep = "")) t.lambda <- se.lambda <- coef.lambda for (j in seq(lambda)) { l <- lambda[j] if (l == 0) y.lambda <- log(y) else y.lambda <- (y^l - 1)/l resvar <- sum(qr.resid(QR, y.lambda)^2)/rdf coef.lambda[, j] <- qr.coef(QR, y.lambda) R <- chol2inv(QR$qr[idx, idx, drop = FALSE]) se.lambda[, j] <- sqrt(diag(R) * resvar) } t.lambda <- coef.lambda/se.lambda Labels <- data.frame(term = names(coef(org.fit)), label = c(" ", LETTERS[seq(nrow(t.lambda) - 1)])) plot(lambda, lambda, xlim = range(lambda), ylim = range(t.lambda[-1, ]), type = "n", xlab = "lambda", ylab = "t", ...) for (i in 2:nrow(t.lambda)) lines(lambda, t.lambda[i, ]) xlab <- lambda[length(lambda)] ylab <- t.lambda[, ncol(t.lambda)] lab <- paste(" ", as.character(Labels[, "label"]), sep = "") for (i in 2:nrow(t.lambda)) text(lambda[length(lambda)], t.lambda[, ncol(t.lambda)], labels = lab, adj = 0) lab <- paste(as.character(Labels[, "label"]), " ", sep = "") for (i in 2:nrow(t.lambda)) text(lambda[1], t.lambda[, 1], labels = lab, adj = 1) print(Labels) invisible(list(lambda = lambda, coef = coef.lambda, se = se.lambda)) } else { warning("argument stat should be either \"F\" or \"t\"") invisible(NULL) } }
/scratch/gouwar.j/cran-all/cranData/BHH2/R/lambdaPlot.R
"permtest" <- function (x, y = NULL) { if (is.null(y)) { mx <- mean(x) n <- length(x) t.obs <- mx/sqrt(var(x)/n) N <- 2^n mat <- matrix(NA, nrow = N, ncol = n) for (j in 1:n) { m <- 2^j mat[, j] <- rep(c(rep(-1, N/m), rep(+1, N/m)), m/2) } d <- as.numeric(mat %*% x/n) k <- length(which(d > mx)) return(c(N = N, t.obs = t.obs, "t-Dist-P(>t)" = 1 - pt(t.obs, n - 1), "PermDist-P(>t)" = k/N)) } else { x <- x[!is.na(x)] y <- y[!is.na(y)] nx <- length(x) ny <- length(y) S2x <- sum(x^2) - sum(x)^2/nx S2y <- sum(y^2) - sum(y)^2/ny t.stat <- function(x, y) { (mean(x) - mean(y))/sqrt((S2x + S2y)/(nx + ny - 2) * (1/nx + 1/ny)) } f.stat <- function(x, y) { (S2x/(nx - 1))/(S2y/(ny - 1)) } t.obs <- t.stat(x, y) f.obs <- f.stat(x, y) z <- c(x, y) n <- nx + ny mat <- subsets(n, nx) N <- nrow(mat) kt <- kf <- 0 for (i in 1:nrow(mat)) { x <- z[mat[i, ]] y <- z[-mat[i, ]] S2x <- sum(x^2) - sum(x)^2/nx S2y <- sum(y^2) - sum(y)^2/ny if (t.obs < t.stat(x, y)) kt <- kt + 1 if (f.obs < f.stat(x, y)) kf <- kf + 1 } return(c(N = N, t.obs = t.obs, "t-Dist:P(>t)" = 1 - pt(t.obs, nx + ny - 2), "PermDist:P(>t)" = kt/N, F.obs = f.obs, "F-Dist:P(>F)" = 1 - pf(f.obs, nx - 1, ny - 1), "PermDist:P(>F)" = kf/N)) } }
/scratch/gouwar.j/cran-all/cranData/BHH2/R/permtest.R
"subsets" <- function (n, r, v = 1:n) if (r <= 0) vector(mode(v), 0) else if (r >= n) v[1:n] else { rbind(cbind(v[1], Recall(n - 1, r - 1, v[-1])), Recall(n - 1, r, v[-1])) }
/scratch/gouwar.j/cran-all/cranData/BHH2/R/subsets.R
### R code from vignette source 'BHH2.Rnw' ################################################### ### code chunk number 1: preliminaries ################################################### options(width=76) library(BHH2) ################################################### ### code chunk number 2: PermutationTest ################################################### # Permutation test for Tomato Data #cat("Tomato Data (not paired):\n") data(tomato.data) attach(tomato.data) a <- pounds[fertilizer=="A"] b <- pounds[fertilizer=="B"] permtest(b,a) detach() ################################################### ### code chunk number 3: PermutationTest ################################################### # Permutation test for Boy's Shoes Example #cat("Shoes Data (paired):\n") data(shoes.data) permtest(shoes.data$matB-shoes.data$matA) ################################################### ### code chunk number 4: BHH2.Rnw:142-146 ################################################### par(mar=c(4,1,2,1),mgp=c(2,1,0),cex=0.7) data(tab03B1) #stem(tab03B1$yield) dotPlot(tab03B1$yield,main="Dot plot: Industrial Process Example",xlab="yield") ################################################### ### code chunk number 5: BHH2.Rnw:155-161 ################################################### par(mar=c(4,1,2,1),mgp=c(2,1,0),cex=0.7) data(tab03B2) plt <- dotPlot(tab03B2$diff10,xlim=2.55*c(-1,+1),xlab="differences", main="Dot plot: Reference Distribution of Differences") segments(1.3,0,1.3,max(plt$y),lty=2) #vertical line at x=1.3 text(1.3,max(plt$y),labels=" 1.30",adj=0) ################################################### ### code chunk number 6: BHH2.Rnw:193-198 ################################################### par(mfrow=c(1,1),mar=c(3,1,2,1),cex=0.7) data(penicillin.data) penicillin.aov <- aov(yield~blend+treat,data=penicillin.data) anovaPlot(penicillin.aov,main="Anova plot: Penicillin Manufacturing Example", labels=TRUE,cex.lab=0.6) ################################################### ### code chunk number 7: BHH2.Rnw:209-213 ################################################### par(mfrow=c(1,1),mar=c(3,1,2,1),cex=0.7) data(poison.data) poison.lm <- lm(y~poison*treat,data=poison.data) anovaPlot(poison.lm,main="Anova plot: Toxic Agents Example",cex.lab=0.6) ################################################### ### code chunk number 8: BHH2.Rnw:233-238 ################################################### par(mfrow=c(1,1),mar=c(3,1,2,1),cex=0.7) data(corrosion.data) corrosion.aov <- aov(resistance~heats+run+coating+heats:coating,data=corrosion.data) anovaPlot(corrosion.aov,main="Anova plot: Corrosion Resistance Example", cex.lab=0.6) ################################################### ### code chunk number 9: BHH2.Rnw:260-265 ################################################### par(mar=c(4,3,2,1),mgp=c(2,1,0),cex=0.7) # Lambda Plot tracing F values. data(poison.data) lambdaPlot(poison.lm,lambda=seq(-2,1,by=.1),stat="F",global=FALSE,cex=0.6, main="Lambda Plot: Toxic Agents Example") ################################################### ### code chunk number 10: BHH2.Rnw:280-286 ################################################### # Lambda Plot tracing t values. par(mar=c(4,3,2,1),mgp=c(2,1,0),cex=0.7) data(woolen.data) woolen.lm <- lm(y~x1+x2+x3+I(x1^2)+I(x2^2)+I(x3^2)+I(x1*x2)+I(x1*x3)+I(x2*x3)+I(x1*x2*x3),data=woolen.data) lambdaPlot(woolen.lm,main="Lambda plot: Woolen Thread Example (2nd order model)", stat="t",cex=0.6) ################################################### ### code chunk number 11: BHH2.Rnw:294-298 ################################################### par(mar=c(4,3,2,1),mgp=c(2,1,0),cex=0.7) # Lambda Plot tracing F values. lambdaPlot(lm(y~x1+x2+x3,data=woolen.data),lambda=seq(-1,1,length=31), main="Lambda plot: Woolen Thread Example (1st order model)",stat="F",global=TRUE) ################################################### ### code chunk number 12: DesignMatrix ################################################### print(X <- ffDesMatrix(5,gen=list(c(-5,1,2,3,4)))) ################################################### ### code chunk number 13: DesignMatrix ################################################### ffFullMatrix(X,x=c(1,2,3,4),maxInt=2,blk=X[,5])$Xa ################################################### ### code chunk number 14: Subsets ################################################### subsets(n=5,r=3,v=c("x","y","z","A","B"))
/scratch/gouwar.j/cran-all/cranData/BHH2/inst/doc/BHH2.R
############# BHMSMAfMRI package R functions ############# # Function to compute GLM (general linear model) coefficients of the regressor variables for all voxels of a single brain slice for each subject # glmcoef = function( n, grid, data, designmat ) { glm_coef_standardized = glm_coef_se = array( 0, dim = c( n, grid, grid, ncol(designmat) ) ) for(i in 1:n) { out = glmcoef_sub(grid, data[i,,,], designmat) glm_coef_standardized[i,,,] = out$GLM_coef_st glm_coef_se[i,,,] = out$GLM_coef_se } return( list( GLMCoefStandardized = glm_coef_standardized, GLMCoefSE = glm_coef_se ) ) } # Function to compute wavelet transform (coefficients) of a 2D GLM coefficient map (e.g., corresponding to a single brain slice) of the regressor of interest for each subject # waveletcoef = function( n, grid, glmcoefstd, wave.family = "DaubLeAsymm", filter.number = 6, bc = "periodic" ) { # ....................... Wavelet Transform ................ WaveletCoefficientMatrix = matrix( nrow = n, ncol = grid^2-1 ) for(i in 1:n) { dwt = imwd(glmcoefstd[i,,], type = "wavelet", family = wave.family, filter.number = filter.number, bc = bc, RetFather = TRUE, verbose = FALSE) WaveletCoefficientMatrix[i,] = c(dwt$w0L1, dwt$w0L2, dwt$w0L3, dwt$w1L1, dwt$w1L2, dwt$w1L3, dwt$w2L1, dwt$w2L2, dwt$w2L3, dwt$w3L1, dwt$w3L2, dwt$w3L3, dwt$w4L1, dwt$w4L2, dwt$w4L3, dwt$w5L1, dwt$w5L2, dwt$w5L3, dwt$w6L1, dwt$w6L2, dwt$w6L3, dwt$w7L1, dwt$w7L2, dwt$w7L3, dwt$w8L1, dwt$w8L2, dwt$w8L3) #.....Note: Using up to w8. So, applicable only up to 2^9 by 2^9 data } return( list(WaveletCoefficientMatrix = WaveletCoefficientMatrix) ) } # Function to compute estimates of the hyperparameters that appear in the BHMSMA model based on multi-subject or single subject analyses (see References) # hyperparamest = function( n, grid, waveletcoefmat, analysis="multi" ) { #....................... Estimating C0, C1, C2, C3, C4 and C5.................. if(analysis == "multi") { C0 = C1 = C2 = C3 = C4 = C5 = 1 for(j in 1:50) { max_likelihood = nlminb(start = C5, objective=minus_ll, lower=0, upper=Inf, C0=C0, C1=C1, C2=C2, C3=C3, C4=C4, subs=1:n, grid=grid, waveletcoefmat=waveletcoefmat) C5 = max_likelihood$par max_likelihood = nlminb(start = C4, objective=minus_ll, lower=0, upper=Inf, C0=C0, C1=C1, C2=C2, C3=C3, C5=C5, subs=1:n, grid=grid, waveletcoefmat=waveletcoefmat) C4 = max_likelihood$par max_likelihood = nlminb(start = C0, objective=minus_ll, lower=0, upper=Inf, C1=C1, C2=C2, C3=C3, C4=C4, C5=C5, subs=1:n, grid=grid, waveletcoefmat=waveletcoefmat) C0 = max_likelihood$par max_likelihood = nlminb(start = C1, objective=minus_ll, lower=0, upper=Inf, C0=C0, C2=C2, C3=C3, C4=C4, C5=C5, subs=1:n, grid=grid, waveletcoefmat=waveletcoefmat) C1 = max_likelihood$par max_likelihood = nlminb(start = C2, objective=minus_ll, lower=0, upper=Inf, C0=C0, C1=C1, C3=C3, C4=C4, C5=C5, subs=1:n, grid=grid, waveletcoefmat=waveletcoefmat) C2 = max_likelihood$par max_likelihood = nlminb(start = C3, objective=minus_ll, lower=-Inf, upper=Inf, C0=C0, C1=C1, C2=C2, C4=C4, C5=C5, subs=1:n, grid=grid, waveletcoefmat=waveletcoefmat) C3 = max_likelihood$par } # ........... Computing MLE variance estimates................ h = sqrt(.Machine$double.eps)*c(C0,C1,C2,C3,C4,C5) if(C0==0) h[1]= 1.490116e-12 if(C1==0) h[2]= 1.490116e-12 if(C2==0) h[3]= 1.490116e-12 if(C3==0) h[4]= 1.490116e-12 if(C4==0) h[5]= 1.490116e-12 if(C5==0) h[6]= 1.490116e-12 VarMLE = var_mle(C0,C1,C2,C3,C4,C5,n,grid,waveletcoefmat,h) # Note: taking tol=1e-030. Not setting appropriate tolerance level may show the "system is computationally singular" error. return(list(hyperparam=c(C0,C1,C2,C3,C4,C5),hyperparamVar=VarMLE)) } else # # # # if(analysis == "single") { hyperparam = matrix(NA, nrow=n, ncol=6) hyperparamVar = array(dim = c(n,6,6)) for(i in 1:n) { C0 = C1 = C2 = C3 = C4 = C5 = 1 for(j in 1:50) { max_likelihood = nlminb(start = C5, objective=minus_ll, lower=0, upper=Inf, C0=C0, C1=C1, C2=C2, C3=C3, C4=C4, subs=i, grid=grid, waveletcoefmat=waveletcoefmat) C5 = max_likelihood$par max_likelihood = nlminb(start = C4, objective=minus_ll, lower=0, upper=Inf, C0=C0, C1=C1, C2=C2, C3=C3, C5=C5, subs=i, grid=grid, waveletcoefmat=waveletcoefmat) C4 = max_likelihood$par max_likelihood = nlminb(start = C0, objective=minus_ll, lower=0, upper=Inf, C1=C1, C2=C2, C3=C3, C4=C4, C5=C5, subs=i, grid=grid, waveletcoefmat=waveletcoefmat) C0 = max_likelihood$par max_likelihood = nlminb(start = C1, objective=minus_ll, lower=0, upper=Inf, C0=C0, C2=C2, C3=C3, C4=C4, C5=C5, subs=i, grid=grid, waveletcoefmat=waveletcoefmat) C1 = max_likelihood$par max_likelihood = nlminb(start = C2, objective=minus_ll, lower=0, upper=Inf, C0=C0, C1=C1, C3=C3, C4=C4, C5=C5, subs=i, grid=grid, waveletcoefmat=waveletcoefmat) C2 = max_likelihood$par max_likelihood = nlminb(start = C3, objective=minus_ll, lower=-Inf, upper=Inf, C0=C0, C1=C1, C2=C2, C4=C4, C5=C5, subs=i, grid=grid, waveletcoefmat=waveletcoefmat) C3 = max_likelihood$par } # ........... Computing MLE variance estimates .......... h = sqrt(.Machine$double.eps)*c(C0,C1,C2,C3,C4,C5) if(C0==0) h[1]= 1.490116e-12 if(C1==0) h[2]= 1.490116e-12 if(C2==0) h[3]= 1.490116e-12 if(C3==0) h[4]= 1.490116e-12 if(C4==0) h[5]= 1.490116e-12 if(C5==0) h[6]= 1.490116e-12 VarMLE = var_mle(C0,C1,C2,C3,C4,C5,i,grid,waveletcoefmat,h) # Note: taking tol=1e-030. Not setting appropriate tolerance level may show the "system is computationally singular" error. hyperparam[i,] = c(C0,C1,C2,C3,C4,C5) hyperparamVar[i,,] = VarMLE } return(list(hyperparam=hyperparam,hyperparamVar=hyperparamVar)) } } # Function to compute the mixture probabilities, which define the marginal posterior distribution of the wavelet coefficients of the BHMSMA model, for each subject based on multi-subject or single subject analyses (see References) postmixprob = function(n, grid, waveletcoefmat, hyperparam, analysis="multi") { # ....................... piklj bar by Trapezoidal Rule ................................... if(analysis == "multi") { C0 = hyperparam[1] C1 = hyperparam[2] C2 = hyperparam[3] C3 = hyperparam[4] C4 = hyperparam[5] C5 = hyperparam[6] pkljbar = pklj_bar(grid, n, waveletcoefmat, C0, C1, C2, C3, C4, C5) return(list(pkljbar=pkljbar)) } else # # # # # # if(analysis == "single") { pkljbar = matrix(NA,nrow=n, ncol=grid^2-1) for(i in 1:n) { C0 = hyperparam[i,1] C1 = hyperparam[i,2] C2 = hyperparam[i,3] C3 = hyperparam[i,4] C4 = hyperparam[i,5] C5 = hyperparam[i,6] pkljbar[i,] = pklj_bar(grid, 1, waveletcoefmat[i,,drop=F], C0, C1, C2, C3, C4, C5) } return(list(pkljbar=pkljbar)) } } # Function to compute the posterior estimates (mean and median) of the wavelet coefficients of the BHMSMA model for each subject based on multi-subject or single subject analyses (see References) postwaveletcoef = function( n, grid, waveletcoefmat, hyperparam, pkljbar, analysis="multfif" ) { # ......................... Posterior Mean of Wavelet Coeficients .............................. if(analysis == "multi") { C4 = hyperparam[5] C5 = hyperparam[6] out = post_wavelet_coef(grid, n, waveletcoefmat, pkljbar, C4, C5) return(out) } else # # # # # # { PostMeanWaveletCoef = matrix( nrow=n, ncol=grid^2-1 ) PostMedianWaveletCoef = matrix( nrow=n, ncol=grid^2-1 ) for(i in 1:n) { C4 = hyperparam[i,5] C5 = hyperparam[i,6] out = post_wavelet_coef(grid, 1, waveletcoefmat[i,,drop=F], pkljbar, C4, C5) PostMeanWaveletCoef[i,] = out$PostMeanWaveletCoef[1,] PostMedianWaveletCoef[i,] = out$PostMedianWaveletCoef[1,] } return(list(PostMeanWaveletCoef=PostMeanWaveletCoef, PostMedianWaveletCoef=PostMedianWaveletCoef)) } } # Function that substitutes the wavelet coefficients stored in an wavelet object with user given values and returns the modified wavelet object substituteWaveletCoef = function(grid, waveletobj, values) { out = waveletobj out$w0L1 = values[1] out$w0L2 = values[2] out$w0L3 = values[3] if(grid>2) { out$w1L1 = values[4:7] out$w1L2 = values[8:11] out$w1L3 = values[12:15] } if(grid>4) { out$w2L1 = values[16:31] out$w2L2 = values[32:47] out$w2L3 = values[48:63] } if(grid>8) { out$w3L1 = values[64:127] out$w3L2 = values[128:191] out$w3L3 = values[192:255] } if(grid>16) { out$w4L1 = values[256:511] out$w4L2 = values[512:767] out$w4L3 = values[768:1023] } if(grid>32) { out$w5L1 = values[1024:2047] out$w5L2 = values[2048:3071] out$w5L3 = values[3072:4095] } if(grid>64) { out$w6L1 = values[4096:8191] out$w6L2 = values[8192:12287] out$w6L3 = values[12288:16383] } if(grid>128) { out$w7L1 = values[16384:32767] out$w7L2 = values[32768:49151] out$w7L3 = values[49152:65535] } if(grid>256) { out$w8L1 = values[65536:131071] out$w8L2 = values[131072:196607] out$w8L3 = values[196608:262143] } out$w0Lconstant = waveletobj$w0Lconstant return(out) } # Function to compute the posterior estimates (mean and median) of the 2D GLM coefficients map (corresponding to a single brain slice) of the regressor of interest in the BHMSMA model for each subject based on multi-subject or single subject analyses (see References) postglmcoef = function(n, grid, glmcoefstd, postmeanwaveletcoef, wave.family="DaubLeAsymm", filter.number=6, bc="periodic" ) { # ............................. Posterior Mean of GLM coeficients ............................. PostMeanRecons = array(dim=c(n,grid,grid)) for(i in 1:n) { dwt = imwd(glmcoefstd[i,,], type="wavelet", family=wave.family, filter.number=filter.number, bc=bc, RetFather=TRUE, verbose=FALSE) dwt_new = substituteWaveletCoef(grid,dwt,postmeanwaveletcoef[i,]) PostMeanRecons[i,,] = imwr(dwt_new) } return(list(GLMcoefposterior=PostMeanRecons)) } #1: Function to perform the complete BHMSMA analysis (see References) of a 2D GLM coefficient map (corresponding to a single brain slice) of the regressor of interest based on multi-subject or single subject analyses BHMSMA = function( n, grid, data, designmat, k, analysis="multi", truecoef=NULL, wave.family="DaubLeAsymm", filter.number=6, bc="periodic") { if(!is.matrix(designmat)) stop("designmat must be a matrix") if(n!=nrow(data)) stop(cat("data doesn't have n=",n," rows.\n")) if(ncol(designmat)==1) cat("Warning: designmat has only one column. The function doesn't add any intercept column by itself.") if(k > ncol(designmat)) stop("Fix the input k.") # if( !length( unique(designmat[,k])) == 1) #if kth column is not an intercept column, add an intercept column # designmat = cbind( rep(1,nrow(designmat)), designmat[,k,drop=F] ) glmmap = glmcoef(n, grid, data, designmat) wavecoefglmmap = waveletcoef(n, grid, glmmap$GLMCoefStandardized[,,,k], wave.family, filter.number, bc) hyperest = hyperparamest( n, grid, wavecoefglmmap$WaveletCoefficientMatrix, analysis ) pkljbar = postmixprob(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, analysis) postwavecoefglmmap = postwaveletcoef( n, grid, wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, pkljbar$pkljbar, analysis ) postglmmap = postglmcoef(n, grid, glmmap$GLMCoefStandardized[,,,k], postwavecoefglmmap$PostMeanWaveletCoef, wave.family, filter.number, bc) if(! is.null(truecoef)) { MSE = NULL for(i in 1:n) { MSE[i] = sum ( ( as.vector(truecoef[i,,]/glmmap$GLMCoefSE[i,,,k]) - as.vector(postglmmap$GLMcoefposterior[i,,]) )^2 ) } } if(!is.null(truecoef)) return(list( GLMCoefStandardized = glmmap$GLMCoefStandardized, GLMCoefSE = glmmap$GLMCoefSE, WaveletCoefficientMatrix = wavecoefglmmap$WaveletCoefficientMatrix, hyperparam = hyperest$hyperparam, hyperparamVar = hyperest$hyperparamVar, posteriorMixProb=pkljbar$pkljbar, Waveletcoefposterior = postwavecoefglmmap$PostMeanWaveletCoef, GLMcoefposterior = postglmmap$GLMcoefposterior, MSE=MSE)) if(is.null(truecoef)) return(list( GLMCoefStandardized = glmmap$GLMCoefStandardized, GLMCoefSE = glmmap$GLMCoefSE, WaveletCoefficientMatrix = wavecoefglmmap$WaveletCoefficientMatrix, hyperparam = hyperest$hyperparam, hyperparamVar = hyperest$hyperparamVar, posteriorMixProb=pkljbar$pkljbar, Waveletcoefposterior = postwavecoefglmmap$PostMeanWaveletCoef, GLMcoefposterior = postglmmap$GLMcoefposterior)) } # Function to obtain samples from the posterior distribution of a 2D GLM coefficient map (corresponding to a single brain slice) of the regressor of interest in the BHMSMA model for each subject based on multi-subject or single subject analyses (see References) postsamples = function(nsample, n, grid, glmcoefstd, waveletcoefmat, hyperparam, pkljbar, analysis, wave.family="DaubLeAsymm", filter.number=6, bc="periodic", seed) { if(analysis == "multi") { C4 = hyperparam[5] C5 = hyperparam[6] # ................. Simulating d_iklj.............. idwt = array( NA, dim=c(n, grid, grid, nsample)) postdiscovery = array( dim=c(n, grid, grid) ) d.sim = post_samp(nsample, grid, n, waveletcoefmat, pkljbar, C4, C5, seed) for(i in 1:n) { dwt = imwd(glmcoefstd[i,,], type="wavelet", family=wave.family, filter.number=filter.number, bc=bc, RetFather=TRUE, verbose=FALSE) for(g in 1:nsample) { dwt_new = substituteWaveletCoef(grid,dwt,d.sim[i,,g]) idwt[i,,,g] = imwr(dwt_new) } delta = 1 phi = 0.999 for(j in 1:grid) for(l in 1:grid) { v = abs(idwt[i,j,l,]) p = length(v[v>delta])/length(v) if(p>phi) postdiscovery[i,j,l] = p else postdiscovery[i,j,l] = 0 } } return(list(samples = idwt, postdiscovery = postdiscovery)) } else # # # # # # if(analysis == "single") { idwt = array( NA, dim=c(n, grid, grid, nsample)) postdiscovery = array( dim=c(n, grid, grid) ) for(i in 1:n) { C4 = hyperparam[i,5] C5 = hyperparam[i,6] dwt = imwd(glmcoefstd[i,,], type="wavelet", family=wave.family, filter.number=filter.number, bc=bc, RetFather=TRUE, verbose=FALSE) d.sim = post_samp(nsample, grid, i, waveletcoefmat, pkljbar, C4, C5, seed) for(g in 1:nsample) { dwt_new = substituteWaveletCoef(grid,dwt,d.sim[1,,g]) idwt[i,,,g] = imwr(dwt_new) } delta = 1 phi = 0.999 for(j in 1:grid) for(l in 1:grid) { v = abs(idwt[i,j,l,]) p = length(v[v>delta])/length(v) if(p>phi) postdiscovery[i,j,l] = p else postdiscovery[i,j,l] = 0 } } return(list(samples = idwt, postdiscovery = postdiscovery)) } } # Function to compute the posterior estimates (mean and median) of the 2D GLM coefficient group map (corresponding to a single brain slice) of the regressor of interest in the BHMSMA model based on multi-subject or single subject analyses (see References) postgroupglmcoef = function( n, grid, glmcoefstd, postmeanwaveletcoef, wave.family="DaubLeAsymm", filter.number=6, bc="periodic" ) { postmeanwaveletcoef = apply(postmeanwaveletcoef, 2, mean, na.rm=TRUE) scaling = NULL for(i in 1:n) { dwt = imwd(glmcoefstd[i,,], type="wavelet", family=wave.family, filter.number=filter.number, bc=bc, RetFather=TRUE, verbose=FALSE) scaling = c(scaling,dwt$w0Lconstant) } dwt_new = substituteWaveletCoef(grid,dwt,postmeanwaveletcoef) dwt_new$w0Lconstant = mean(scaling) PostMeanReconsgroup = imwr(dwt_new) return(list(groupcoef=PostMeanReconsgroup)) } # Function to read and import fMRI data from several file types readfmridata = function( directory, format, prefix, nimages, dim.image, nii=TRUE ) { fmridata = array(NA, dim=c(dim.image,nimages)) if(format=="Analyze") { for(i in 1:nimages) { if (i <= 9) aux = paste0("000",i,".img") if ((i > 9) && (i <= 99)) aux = paste0("00",i,".img") if ((i > 99) && (i <= 999)) aux = paste0("0",i,".img") if (i > 999) aux = paste0(i,".img") a = readANALYZE(paste0(directory, "/", prefix, aux)) fmridata[,,,i] = a[,,,1] } return(fmridata) } if(format=="Nifti") { for(i in 1:nimages) { if(nii==TRUE) ext = ".nii" else ext = ".img" if (i <= 9) aux = paste0("000",i,ext) if ((i > 9) && (i <= 99)) aux = paste0("00",i,ext) if ((i > 99) && (i <= 999)) aux = paste0("0",i,ext) if (i > 999) aux = paste0(i,ext) fmridata[,,,i] = readNIfTI(paste0(directory, "/", prefix, aux)) } return(fmridata) } if(format=="Afni") { fmridata = readAFNI(paste0(directory, "/", prefix)) return(fmridata) } }
/scratch/gouwar.j/cran-all/cranData/BHMSMAfMRI/R/BHMSMA.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 glmcoef_sub <- function(grid, D_sub, X) { .Call(`_BHMSMAfMRI_glmcoef_sub`, grid, D_sub, X) } minus_ll <- function(C0, C1, C2, C3, C4, C5, subs, grid, waveletcoefmat) { .Call(`_BHMSMAfMRI_minus_ll`, C0, C1, C2, C3, C4, C5, subs, grid, waveletcoefmat) } ll <- function(C0, C1, C2, C3, C4, C5, subs, grid, waveletcoefmat) { .Call(`_BHMSMAfMRI_ll`, C0, C1, C2, C3, C4, C5, subs, grid, waveletcoefmat) } var_mle <- function(C0, C1, C2, C3, C4, C5, subs, grid, waveletcoefmat, h) { .Call(`_BHMSMAfMRI_var_mle`, C0, C1, C2, C3, C4, C5, subs, grid, waveletcoefmat, h) } LL <- function(w, l, j, n, waveletcoefmat, C0, C1, C2, C3, C4, C5) { .Call(`_BHMSMAfMRI_LL`, w, l, j, n, waveletcoefmat, C0, C1, C2, C3, C4, C5) } pklj <- function(w, l, j, i, waveletcoefmat, C4, C5) { .Call(`_BHMSMAfMRI_pklj`, w, l, j, i, waveletcoefmat, C4, C5) } pklj_bar <- function(grid, n, waveletcoefmat, C0, C1, C2, C3, C4, C5) { .Call(`_BHMSMAfMRI_pklj_bar`, grid, n, waveletcoefmat, C0, C1, C2, C3, C4, C5) } post_wavelet_coef <- function(grid, n, waveletcoefmat, pkljbar, C4, C5) { .Call(`_BHMSMAfMRI_post_wavelet_coef`, grid, n, waveletcoefmat, pkljbar, C4, C5) } set_seed <- function(seed) { invisible(.Call(`_BHMSMAfMRI_set_seed`, seed)) } post_samp <- function(nsample, grid, n, waveletcoefmat, pkljbar, C4, C5, seed) { .Call(`_BHMSMAfMRI_post_samp`, nsample, grid, n, waveletcoefmat, pkljbar, C4, C5, seed) }
/scratch/gouwar.j/cran-all/cranData/BHMSMAfMRI/R/RcppExports.R
.onAttach <- function(libname, pkgname) { message <- c("\n Welcome! Thanks for trying BHMSMAfMRI.", "\n \n Website: https://nilotpalsanyal.github.io/BHMSMAfMRI/", "\n Bug report: https://github.com/nilotpalsanyal/BHMSMAfMRI/issues") packageStartupMessage(message) }
/scratch/gouwar.j/cran-all/cranData/BHMSMAfMRI/R/welcome_msg.R
## ---- out.lines = 10, eval=F-------------------------------------------------- # library(BHMSMAfMRI) # BHMSMAmulti <- BHMSMA(n, grid, data, designmat, k, "multi", truecoef) # names(BHMSMAmulti) # [1] "GLMCoefStandardized" "GLMCoefSE" # [3] "WaveletCoefficientMatrix" "hyperparam" # [5] "hyperparamVar" "posteriorMixProb" # [7] "Waveletcoefposterior" "GLMcoefposterior" ## ---- eval=T------------------------------------------------------------------ library(BHMSMAfMRI) fpath <- system.file("extdata", package="BHMSMAfMRI") untar(paste0(fpath,"/fmridata.tar"), exdir=tempdir()) n <- 3 grid <- 32 ntime <- 9 data <- array(dim=c(n,grid,grid,ntime)) for(subject in 1:n) { directory <- paste0(tempdir(),"/fmridata","/s0",subject,"/") a <- readfmridata(directory, format="Analyze", prefix=paste0("s0",subject,"_t"), nimages=ntime, dim.image=c(grid,grid,1)) data[subject,,,] <- a[,,1,] } dim(a) ## ---- eval=T------------------------------------------------------------------ data(fmridata) names(fmridata) truecoef <- fmridata$TrueCoeff designmat <- fmridata$DesignMatrix dim(truecoef) dim(designmat) ## ----TrueCoef, fig.cap = "True regression coefficient images for the 3 subjects", fig.width=12, fig.height=4.2, fig.align="center"---- par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(truecoef[subject,,], main=paste0("Subject ",subject), col=heat.colors(8)) ## ---- eval=T------------------------------------------------------------------ glmmap <- glmcoef(n, grid, data, designmat) names(glmmap) dim(glmmap$GLMCoefStandardized) dim(glmmap$GLMCoefSE) ## ----GLMCoef, fig.cap = "Standardized regression coefficient estimates images for the second regressor for all subjects", fig.width=12, fig.height=4.2, fig.align="center"---- k <- 2 par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(abs(glmmap$GLMCoefStandardized[subject,,,k]), col=heat.colors(8), zlim=c(0,6), main=paste0("Subject ",subject)) ## ---- eval=T------------------------------------------------------------------ wavecoefglmmap <- waveletcoef(n, grid, glmmap$GLMCoefStandardized[,,,k], wave.family="DaubLeAsymm", filter.number=6, bc="periodic") names(wavecoefglmmap) dim(wavecoefglmmap$WaveletCoefficientMatrix) ## ---- eval=T-------------------------------------------------------------------------------------- options(width = 100) hyperest <- hyperparamest(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, analysis = "multi") names(hyperest) round(hyperest$hyperparam,3) signif(hyperest$hyperparamVar,4) ## ---- eval=T-------------------------------------------------------------------------------------- a.kl <- hyperest$hyperparam[1] * 2^(-hyperest$hyperparam[2] * (0:4)) b.kl <- hyperest$hyperparam[3] * 2^(-hyperest$hyperparam[4] * (0:4)) c.kl <- hyperest$hyperparam[5] * 2^(-hyperest$hyperparam[6] * (0:4)) round(a.kl,3) ## ---- eval=T-------------------------------------------------------------------------------------- pkljbar <- postmixprob(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, analysis = "multi") names(pkljbar) dim(pkljbar$pkljbar) round(pkljbar$pkljbar[1,1:10],4) ## ---- eval=T-------------------------------------------------------------------------------------- postwavecoefglmmap <- postwaveletcoef(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, pkljbar$pkljbar, analysis = "multi") names(postwavecoefglmmap) dim(postwavecoefglmmap$PostMeanWaveletCoef) dim(postwavecoefglmmap$PostMedianWaveletCoeff) ## ---- eval=T-------------------------------------------------------------------------------------- postglmmap <- postglmcoef(n, grid, glmmap$GLMCoefStandardized[,,,k], postwavecoefglmmap$PostMeanWaveletCoef, wave.family="DaubLeAsymm", filter.number=6, bc="periodic") str(postglmmap,vec.len = 3, digits.d = 2) ## ----PostCoef, fig.cap = "Posterior standardized regression coefficient images for the 3 subjects obtained by BHMSMA", fig.width=12, fig.height=4.2, fig.align="center"---- par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(abs(postglmmap$GLMcoefposterior[subject,,]), col=heat.colors(8), zlim=c(0,6), main=paste0("Subject ",subject)) ## ---- eval=T-------------------------------------------------------------------------------------- MSE <- c() for (i in 1:n) MSE[i] <- sum((as.vector(truecoef[i,,]/glmmap$GLMCoefSE[i,,,2]) - as.vector(postglmmap$GLMcoefposterior[i,,]))^2) round(MSE,3) ## ---- eval=T, fig.width=12, fig.height=4.2-------------------------------------------------------- Postsamp <- postsamples( nsample=50, n, grid, glmmap$GLMCoefStandardized[,,,k], wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, pkljbar$pkljbar, "multi", seed=123) names(Postsamp) dim(Postsamp$samples) dim(Postsamp$postdiscovery) ## ----PostDiscovery, eval=T, fig.cap = "Posterior discovery images for the 3 subjects", fig.width=12, fig.height=4.2, fig.align="center"---- par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(Postsamp$postdiscovery[subject,,], col=heat.colors(8), main=paste0("Subject ",subject)) ## ---- eval=T-------------------------------------------------------------------------------------- postsd <- array(dim=c(n,grid,grid)) for(subject in 1:n) postsd[subject,,] <- apply(Postsamp$samples[subject,,,], 1:2, sd) round(postsd[1,1:5,1:5],3) ## ---- eval=T-------------------------------------------------------------------------------------- postgroup <- postgroupglmcoef( n, grid, glmmap$GLMCoefStandardized[,,,k], postwavecoefglmmap$PostMeanWaveletCoef) names(postgroup) dim(postgroup$groupcoef) ## ----PostGroupCoef, fig.cap = "Posterior group regression coefficient image", fig.width=2.5, fig.height=2.5, fig.align="center"---- par(mfrow=c(1,1),cex=0.5) image(abs(postgroup$groupcoef),col=heat.colors(8),zlim=c(0,6))
/scratch/gouwar.j/cran-all/cranData/BHMSMAfMRI/inst/doc/BHMSMAfMRIvignette.R
--- title: "`BHMSMAfMRI` User Guide" author: Nilotpal Sanyal date: "`r format(Sys.time(), '%B %d, %Y')`" output: bookdown::pdf_document2: fig_caption: yes toc: yes toc_depth: 3 number_sections: true fontsize: 12pt bibliography: mybib.bib vignette: > %\VignetteIndexEntry{BHMSMAfMRI User Guide} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- <!-- \setlength{\textwidth}{6in} \setlength{\textheight}{8.5in} \setlength{\parskip}{.2cm plus4mm minus2mm} --> <!-- ```{r set-options, echo=FALSE, cache=FALSE} options(width = 200) ``` --> $\hspace{5cm}$ Welcome to **BHMSMAfMRI**, an R package to analyze functional MRI (fMRI) data, or other multiscale data. This manual shows how the **BHMSMAfMRI** package functions are used to analyze fMRI data and should be helpful for the first-time user. In Section \@ref(secintro), we give a short introduction and non-mathematical overview of the methodology, and in Section \@ref(secuse), we discuss the package functions in a systematic way and apply them to analyze a simulated fMRI dataset. # Introduction and overview {#secintro} The **BHMSMAfMRI** package performs Bayesian hierarchical multi-subject multiscale analysis (BHMSMA) of fMRI data [@sanyal:ferreira:2012], or other multiscale data. Though fMRI data is generally 3D, currently **BHMSMAfMRI** considers analysis of 2D slices only. The main features of the BHMSMA method are that: - it takes into account both the temporal and the spatial information contained in the fMRI data, - it performs multi-subject analysis and borrows strength across subjects for precise estimation of the effect sizes (i.e., brain activations), and provides a straightforward way to obtain group activation map, and - it does not use Markov Chain Monte Carlo (MCMC) simulation and is fast. BHMSMA models the temporal variation present in the fMRI data through a general linear model (GLM) and then considers discrete wavelet transform of the standardized regression coefficients for harnessing the spatial information. In the wavelet domain, each wavelet coefficient is assigned a mixture prior that is a combination of a Gaussian density and a point mass at zero. This prior specification takes into account the sparsity of the wavelet coefficients. For the mixture probabilities BHMSMA considers a prior that depends on few hyperparameters. Inference is carried out by an empirical Bayes methodology without using MCMC methods. The inference uses approximation of one-dimensional integrals only. The posterior mean of the regression coefficients are obtained by using the posterior mean of the wavelet coefficients in the inverse discrete wavelet transform. Further, the posterior wavelet coefficients are averaged over subjects and used in the inverse discrete wavelet transform to obtain posterior group estimate of the regression coefficients. Posterior uncertainty is assessed based on simulations from the posterior distribution of the regression coefficients. The **BHMSMAfMRI** package fits the BHMSMA model to the fMRI data and provides estimates of the hyperparameters along with their standard error, posterior mean of the wavelet coefficients, posterior mean of the regression coefficients, samples from the posterior distribution of the regression coefficients and posterior group estimate of the regression coefficients. The posterior samples can be used to compute the estimates of posterior standard deviation and posterior probability maps. # Use of package functions with examples {#secuse} In this section we illustrate the use of the package functions. We assume that prior to applying our methodology, the fMRI data have been preprocessed for necessary corrections like realignment or motion correction, slice-timing correction, coregistration with anatomical image and normalization. However, the data *must not be* spatially filtered before applying BHMSMA, because our approach is to include the spatial information into modeling instead of filtering it out. Preprocessing can be perfomed by using available softwares/packages like SPM [@SPM], BrainVoyager [@BrainVoyager], AFNI [@AFNI], and FSL [@FSL]. In the following subsections, we show the use of the package functions in a systematic way. ## The main function `BHMSMA` `BHMSMA` is the main function of the **BHMSMAfMRI** package. `BHMSMA` accepts fMRI data as a 4D array. The data can be imported from various image files by the `readfmridata` function. `BHMSMA` successively calls the following functions to perform the whole analysis --- `glmcoef` to obtain regression coefficient map of the regressor of interest, `waveletcoef` to perform 2D wavelet transformation of the regression coefficient map, `hyperparamest` to obtain estimates of the BHMSMA model hyperparameters, `postmixprob` to obtain estimates of the mixture probabilities that define posterior distribution of the wavelet coefficients, `postwaveletcoef` to obtain the posterior estimates of the wavelet coefficients and finally `postglmcoef` to obtain the posterior smoothed version of the regression coefficient map. If true coefficients are given, `BHMSMA` also returns mean squared error (MSE) estimates. Here is a quick look at the usage and outputs of the function. ```{r, out.lines = 10, eval=F} library(BHMSMAfMRI) BHMSMAmulti <- BHMSMA(n, grid, data, designmat, k, "multi", truecoef) names(BHMSMAmulti) [1] "GLMCoefStandardized" "GLMCoefSE" [3] "WaveletCoefficientMatrix" "hyperparam" [5] "hyperparamVar" "posteriorMixProb" [7] "Waveletcoefposterior" "GLMcoefposterior" ``` The dimension of `data` is `c(n,grid,grid,ntime)` where `n` is the number of sujects, `grid^2` is the total number of voxels in the data and `ntime` is the length of time-series for each voxel. The argument `k` selects the regressor of interest from the design matrix `designmat` which can have multiple regressor columns. Following sections break down the whole analysis shown above by showing specific uses and outputs of all the above functions and several others of the **BHMSMAfMRI** package. ## Reading fMRI Data: `readfmridata` The function `readfmridata` can read fMRI data file(s) stored in ANALYZE format (.img/.hdr files), NIFTI format (.img/.hdr files or .nii files) or AFNI format (.BRIK/.HEAD files). The reading of the fMRI data files is done using R package **oro.nifti** [@oronifti], which is loaded when **BHMSMAfMRI** package is loaded. For illustration, we consider a simulated fMRI dataset stored in ANALYZE format and provided within the **BHMSMAfMRI** package. The dataset contains noisy fMRI data collected over a $32 \times 32$ grid of a single axial brain slice and at 9 timepoints for 3 subjects. The following code illustrates how the function `readfmridata` can be used to import the data from the image files. The simulated dataset is extracted in the directory `fmridata` within the R temporary folder. ```{r, eval=T} library(BHMSMAfMRI) fpath <- system.file("extdata", package="BHMSMAfMRI") untar(paste0(fpath,"/fmridata.tar"), exdir=tempdir()) n <- 3 grid <- 32 ntime <- 9 data <- array(dim=c(n,grid,grid,ntime)) for(subject in 1:n) { directory <- paste0(tempdir(),"/fmridata","/s0",subject,"/") a <- readfmridata(directory, format="Analyze", prefix=paste0("s0",subject,"_t"), nimages=ntime, dim.image=c(grid,grid,1)) data[subject,,,] <- a[,,1,] } dim(a) ``` The above code reads all the data files for all subjects into a 4D array `data`. For each subject, the data were generated by adding Gaussian random noise to the true regression coefficient image with activation in 3 regions. The positions of 2 activation regions were varied across subject. The underlying design was a block design. The true regression coefficient images and the design matrix are also included in the package and can be read as follows. ```{r, eval=T} data(fmridata) names(fmridata) truecoef <- fmridata$TrueCoeff designmat <- fmridata$DesignMatrix dim(truecoef) dim(designmat) ``` Now, we have `truecoef` which is an array of dimension (3, 32, 32) containing the true regression coefficients, `data` which is an array of dimension (3, 32, 32, 9) containing time series of noisy observations for all the subjects and `designmat` which is the design matrix used to generate the data. Note that, the R package **neuRosim** [@neuRosim] can be used to generate fMRI data. ```{r TrueCoef, fig.cap = "True regression coefficient images for the 3 subjects", fig.width=12, fig.height=4.2, fig.align="center"} par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(truecoef[subject,,], main=paste0("Subject ",subject), col=heat.colors(8)) ``` ## Temporal modeling through GLM: `glmcoef` Now, we fit a general linear model to the time series of each voxel and obtain the estimated regression coefficients for all the regressors included in `designmat` by using the function `glmcoef` as follows. ```{r, eval=T} glmmap <- glmcoef(n, grid, data, designmat) names(glmmap) dim(glmmap$GLMCoefStandardized) dim(glmmap$GLMCoefSE) ``` The output `glmmap` contains the estimated standardized regression coefficients and their standard error estimates. From now on, we focus on the 2$^\text{nd}$ regressor as the regressor of interest. Figure \@ref(fig:GLMCoef), obtained by the following code, shows the images of its standardized regression coefficients estimates for the 3 subjects. ```{r GLMCoef, fig.cap = "Standardized regression coefficient estimates images for the second regressor for all subjects", fig.width=12, fig.height=4.2, fig.align="center"} k <- 2 par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(abs(glmmap$GLMCoefStandardized[subject,,,k]), col=heat.colors(8), zlim=c(0,6), main=paste0("Subject ",subject)) ``` ## Wavelet transform of the GLM coefficients: `waveletcoef` Next, we apply the discrete wavelet transform to the standardized regression coefficient images of each subject. The wavelet transformation is performed by the using the R package **wavethresh** [@wavethresh], which is loaded when **BHMSMAfMRI** package is loaded. The function `waveletcoef` returns the wavelet coefficients for the selected regressor for all the subjects as a matrix. Below it is illustrated. ```{r, eval=T} wavecoefglmmap <- waveletcoef(n, grid, glmmap$GLMCoefStandardized[,,,k], wave.family="DaubLeAsymm", filter.number=6, bc="periodic") names(wavecoefglmmap) dim(wavecoefglmmap$WaveletCoefficientMatrix) ``` In the wavelet transform, the user can choose the wavelet family (one of `DaubLeAsymm` and `DaubExPhase`), the number of vanishing moments (`filter.number`) and the boundary condition (`symmetric` or `periodic`) to be applied. For fMRI data, we recommend the use of Daubechies least asymmetric wavelet transform (`DaubLeAsymm`) with 6 vanishing moments and periodic boundary condition. ## Estimating the model hyperparameters: `hyperest` The BHMSMA model has six hyperparameters, which are estimated by their maximum likelihood estimates (MLEs) following an empirical Bayes approach. We can estimate the hyperparameters by performing multi-subject analysis or single subject analysis. In multi-subject analysis, the likelihood function of the hyperparameters is constructed over all subjects and maximized to obtain their estimates. In single subject analysis, for each subject, separate likelihood function of the hyperparameters is constructed and maximized. Hence, for single subject analysis, for each subject we obtain a set of estimates of the hyperparameters. Clearly, multi-subject analysis benefits from being able to borrow strength across subjects and produces more precise estimates. The function `hyperparamest` computes the hyperparameter estimates and their standard error estimates. The type of analysis must be specified as `analysis="multi"` or `"single"`. The following code illustrates the use of the function `hyperparamest` and the output. ```{r, eval=T} options(width = 100) hyperest <- hyperparamest(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, analysis = "multi") names(hyperest) round(hyperest$hyperparam,3) signif(hyperest$hyperparamVar,4) ``` From the hyperparameter estimates, we can compute the estimates of $a_{kl}$, $b_{kl}$ and $c_{kl}$ [@sanyal:ferreira:2012] for all levels as follows. ```{r, eval=T} a.kl <- hyperest$hyperparam[1] * 2^(-hyperest$hyperparam[2] * (0:4)) b.kl <- hyperest$hyperparam[3] * 2^(-hyperest$hyperparam[4] * (0:4)) c.kl <- hyperest$hyperparam[5] * 2^(-hyperest$hyperparam[6] * (0:4)) round(a.kl,3) ``` ## Computing posterior distribution of the wavelet coefficients: `postmixprob`, `postwaveletcoef` Given the values of the hyperparameters, the marginal posterior distribution of the wavelet coefficients is a mixture of a Gaussian and a point mass at zero with mixture probabilities $\bar{p}_{iklj}$. BHMSMA computes $\bar{p}_{iklj}$ values using Newton-Cotes numerical integration method. The function `postmixprob` computes the values $\bar{p}_{iklj}$ for all subjects and returns in a matrix. The following code illustrates it. ```{r, eval=T} pkljbar <- postmixprob(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, analysis = "multi") names(pkljbar) dim(pkljbar$pkljbar) round(pkljbar$pkljbar[1,1:10],4) ``` Once $\bar{p}_{iklj}$ values are obtained, the marginal posterior distribution of the wavelet coefficients are entirely known. With the hyperparameter estimates and the $\bar{p}_{iklj}$ values, the function `postwaveletcoef` computes the posterior mean and the posterior median of the wavelet coefficients. The following code illustrates it. ```{r, eval=T} postwavecoefglmmap <- postwaveletcoef(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, pkljbar$pkljbar, analysis = "multi") names(postwavecoefglmmap) dim(postwavecoefglmmap$PostMeanWaveletCoef) dim(postwavecoefglmmap$PostMedianWaveletCoeff) ``` ## Computing posterior mean of the regression coefficients: `postglmcoef` Given the posterior mean of the wavelet coefficients, the function `postglmcoef` is used to obtain the posterior means of the regression coefficients. The following code shows its use. ```{r, eval=T} postglmmap <- postglmcoef(n, grid, glmmap$GLMCoefStandardized[,,,k], postwavecoefglmmap$PostMeanWaveletCoef, wave.family="DaubLeAsymm", filter.number=6, bc="periodic") str(postglmmap,vec.len = 3, digits.d = 2) ``` ```{r PostCoef, fig.cap = "Posterior standardized regression coefficient images for the 3 subjects obtained by BHMSMA", fig.width=12, fig.height=4.2, fig.align="center" } par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(abs(postglmmap$GLMcoefposterior[subject,,]), col=heat.colors(8), zlim=c(0,6), main=paste0("Subject ",subject)) ``` Figure \@ref(fig:PostCoef), obtained by the following code, shows the images of the posterior standardized regression coefficients for the 3 subjects. As the true coefficients are known, we can compute the mean squared error (MSE) using the following code. ```{r, eval=T} MSE <- c() for (i in 1:n) MSE[i] <- sum((as.vector(truecoef[i,,]/glmmap$GLMCoefSE[i,,,2]) - as.vector(postglmmap$GLMcoefposterior[i,,]))^2) round(MSE,3) ``` In [@sanyal:ferreira:2012], we show that our multi-subject methodology performs better than some existing methodologies in terms of MSE. ## Posterior simulation and uncertainty estimation: `postsamples` In order to simulate observations from the posterior distribution of the regression coefficients, the function `postsamples` can be used. The type of analysis must be mentioned. The code below shows its use. ```{r, eval=T, fig.width=12, fig.height=4.2} Postsamp <- postsamples( nsample=50, n, grid, glmmap$GLMCoefStandardized[,,,k], wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, pkljbar$pkljbar, "multi", seed=123) names(Postsamp) dim(Postsamp$samples) dim(Postsamp$postdiscovery) ``` The argument `nsample` denotes the number of samples to be drawn. We can see `postsamples` returns the posterior samples and the probabilities of posterior discovery [@morris:et:al:2011] for all the subjects. Figure \@ref(fig:PostDiscovery), obtained by the following code, shows the posterior discovery images based on the above 50 samples for the 3 subjects. ```{r PostDiscovery, eval=T, fig.cap = "Posterior discovery images for the 3 subjects", fig.width=12, fig.height=4.2, fig.align="center"} par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(Postsamp$postdiscovery[subject,,], col=heat.colors(8), main=paste0("Subject ",subject)) ``` From the posterior samples, the posterior standard deviations of the regression coefficients can be computed as follows. ```{r, eval=T} postsd <- array(dim=c(n,grid,grid)) for(subject in 1:n) postsd[subject,,] <- apply(Postsamp$samples[subject,,,], 1:2, sd) round(postsd[1,1:5,1:5],3) ``` ## Posterior group estimates: `postgroupglmcoef` Posterior group coefficients can be obtained by using the function `postgroupglmcoef` as follows. ```{r, eval=T} postgroup <- postgroupglmcoef( n, grid, glmmap$GLMCoefStandardized[,,,k], postwavecoefglmmap$PostMeanWaveletCoef) names(postgroup) dim(postgroup$groupcoef) ``` Figure \@ref(fig:PostGroupCoef), obtained by the following code, shows the posterior group coefficient image for the simulated dataset. ```{r PostGroupCoef, fig.cap = "Posterior group regression coefficient image", fig.width=2.5, fig.height=2.5, fig.align="center"} par(mfrow=c(1,1),cex=0.5) image(abs(postgroup$groupcoef),col=heat.colors(8),zlim=c(0,6)) ``` ## References
/scratch/gouwar.j/cran-all/cranData/BHMSMAfMRI/inst/doc/BHMSMAfMRIvignette.Rmd
--- title: "`BHMSMAfMRI` User Guide" author: Nilotpal Sanyal date: "`r format(Sys.time(), '%B %d, %Y')`" output: bookdown::pdf_document2: fig_caption: yes toc: yes toc_depth: 3 number_sections: true fontsize: 12pt bibliography: mybib.bib vignette: > %\VignetteIndexEntry{BHMSMAfMRI User Guide} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- <!-- \setlength{\textwidth}{6in} \setlength{\textheight}{8.5in} \setlength{\parskip}{.2cm plus4mm minus2mm} --> <!-- ```{r set-options, echo=FALSE, cache=FALSE} options(width = 200) ``` --> $\hspace{5cm}$ Welcome to **BHMSMAfMRI**, an R package to analyze functional MRI (fMRI) data, or other multiscale data. This manual shows how the **BHMSMAfMRI** package functions are used to analyze fMRI data and should be helpful for the first-time user. In Section \@ref(secintro), we give a short introduction and non-mathematical overview of the methodology, and in Section \@ref(secuse), we discuss the package functions in a systematic way and apply them to analyze a simulated fMRI dataset. # Introduction and overview {#secintro} The **BHMSMAfMRI** package performs Bayesian hierarchical multi-subject multiscale analysis (BHMSMA) of fMRI data [@sanyal:ferreira:2012], or other multiscale data. Though fMRI data is generally 3D, currently **BHMSMAfMRI** considers analysis of 2D slices only. The main features of the BHMSMA method are that: - it takes into account both the temporal and the spatial information contained in the fMRI data, - it performs multi-subject analysis and borrows strength across subjects for precise estimation of the effect sizes (i.e., brain activations), and provides a straightforward way to obtain group activation map, and - it does not use Markov Chain Monte Carlo (MCMC) simulation and is fast. BHMSMA models the temporal variation present in the fMRI data through a general linear model (GLM) and then considers discrete wavelet transform of the standardized regression coefficients for harnessing the spatial information. In the wavelet domain, each wavelet coefficient is assigned a mixture prior that is a combination of a Gaussian density and a point mass at zero. This prior specification takes into account the sparsity of the wavelet coefficients. For the mixture probabilities BHMSMA considers a prior that depends on few hyperparameters. Inference is carried out by an empirical Bayes methodology without using MCMC methods. The inference uses approximation of one-dimensional integrals only. The posterior mean of the regression coefficients are obtained by using the posterior mean of the wavelet coefficients in the inverse discrete wavelet transform. Further, the posterior wavelet coefficients are averaged over subjects and used in the inverse discrete wavelet transform to obtain posterior group estimate of the regression coefficients. Posterior uncertainty is assessed based on simulations from the posterior distribution of the regression coefficients. The **BHMSMAfMRI** package fits the BHMSMA model to the fMRI data and provides estimates of the hyperparameters along with their standard error, posterior mean of the wavelet coefficients, posterior mean of the regression coefficients, samples from the posterior distribution of the regression coefficients and posterior group estimate of the regression coefficients. The posterior samples can be used to compute the estimates of posterior standard deviation and posterior probability maps. # Use of package functions with examples {#secuse} In this section we illustrate the use of the package functions. We assume that prior to applying our methodology, the fMRI data have been preprocessed for necessary corrections like realignment or motion correction, slice-timing correction, coregistration with anatomical image and normalization. However, the data *must not be* spatially filtered before applying BHMSMA, because our approach is to include the spatial information into modeling instead of filtering it out. Preprocessing can be perfomed by using available softwares/packages like SPM [@SPM], BrainVoyager [@BrainVoyager], AFNI [@AFNI], and FSL [@FSL]. In the following subsections, we show the use of the package functions in a systematic way. ## The main function `BHMSMA` `BHMSMA` is the main function of the **BHMSMAfMRI** package. `BHMSMA` accepts fMRI data as a 4D array. The data can be imported from various image files by the `readfmridata` function. `BHMSMA` successively calls the following functions to perform the whole analysis --- `glmcoef` to obtain regression coefficient map of the regressor of interest, `waveletcoef` to perform 2D wavelet transformation of the regression coefficient map, `hyperparamest` to obtain estimates of the BHMSMA model hyperparameters, `postmixprob` to obtain estimates of the mixture probabilities that define posterior distribution of the wavelet coefficients, `postwaveletcoef` to obtain the posterior estimates of the wavelet coefficients and finally `postglmcoef` to obtain the posterior smoothed version of the regression coefficient map. If true coefficients are given, `BHMSMA` also returns mean squared error (MSE) estimates. Here is a quick look at the usage and outputs of the function. ```{r, out.lines = 10, eval=F} library(BHMSMAfMRI) BHMSMAmulti <- BHMSMA(n, grid, data, designmat, k, "multi", truecoef) names(BHMSMAmulti) [1] "GLMCoefStandardized" "GLMCoefSE" [3] "WaveletCoefficientMatrix" "hyperparam" [5] "hyperparamVar" "posteriorMixProb" [7] "Waveletcoefposterior" "GLMcoefposterior" ``` The dimension of `data` is `c(n,grid,grid,ntime)` where `n` is the number of sujects, `grid^2` is the total number of voxels in the data and `ntime` is the length of time-series for each voxel. The argument `k` selects the regressor of interest from the design matrix `designmat` which can have multiple regressor columns. Following sections break down the whole analysis shown above by showing specific uses and outputs of all the above functions and several others of the **BHMSMAfMRI** package. ## Reading fMRI Data: `readfmridata` The function `readfmridata` can read fMRI data file(s) stored in ANALYZE format (.img/.hdr files), NIFTI format (.img/.hdr files or .nii files) or AFNI format (.BRIK/.HEAD files). The reading of the fMRI data files is done using R package **oro.nifti** [@oronifti], which is loaded when **BHMSMAfMRI** package is loaded. For illustration, we consider a simulated fMRI dataset stored in ANALYZE format and provided within the **BHMSMAfMRI** package. The dataset contains noisy fMRI data collected over a $32 \times 32$ grid of a single axial brain slice and at 9 timepoints for 3 subjects. The following code illustrates how the function `readfmridata` can be used to import the data from the image files. The simulated dataset is extracted in the directory `fmridata` within the R temporary folder. ```{r, eval=T} library(BHMSMAfMRI) fpath <- system.file("extdata", package="BHMSMAfMRI") untar(paste0(fpath,"/fmridata.tar"), exdir=tempdir()) n <- 3 grid <- 32 ntime <- 9 data <- array(dim=c(n,grid,grid,ntime)) for(subject in 1:n) { directory <- paste0(tempdir(),"/fmridata","/s0",subject,"/") a <- readfmridata(directory, format="Analyze", prefix=paste0("s0",subject,"_t"), nimages=ntime, dim.image=c(grid,grid,1)) data[subject,,,] <- a[,,1,] } dim(a) ``` The above code reads all the data files for all subjects into a 4D array `data`. For each subject, the data were generated by adding Gaussian random noise to the true regression coefficient image with activation in 3 regions. The positions of 2 activation regions were varied across subject. The underlying design was a block design. The true regression coefficient images and the design matrix are also included in the package and can be read as follows. ```{r, eval=T} data(fmridata) names(fmridata) truecoef <- fmridata$TrueCoeff designmat <- fmridata$DesignMatrix dim(truecoef) dim(designmat) ``` Now, we have `truecoef` which is an array of dimension (3, 32, 32) containing the true regression coefficients, `data` which is an array of dimension (3, 32, 32, 9) containing time series of noisy observations for all the subjects and `designmat` which is the design matrix used to generate the data. Note that, the R package **neuRosim** [@neuRosim] can be used to generate fMRI data. ```{r TrueCoef, fig.cap = "True regression coefficient images for the 3 subjects", fig.width=12, fig.height=4.2, fig.align="center"} par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(truecoef[subject,,], main=paste0("Subject ",subject), col=heat.colors(8)) ``` ## Temporal modeling through GLM: `glmcoef` Now, we fit a general linear model to the time series of each voxel and obtain the estimated regression coefficients for all the regressors included in `designmat` by using the function `glmcoef` as follows. ```{r, eval=T} glmmap <- glmcoef(n, grid, data, designmat) names(glmmap) dim(glmmap$GLMCoefStandardized) dim(glmmap$GLMCoefSE) ``` The output `glmmap` contains the estimated standardized regression coefficients and their standard error estimates. From now on, we focus on the 2$^\text{nd}$ regressor as the regressor of interest. Figure \@ref(fig:GLMCoef), obtained by the following code, shows the images of its standardized regression coefficients estimates for the 3 subjects. ```{r GLMCoef, fig.cap = "Standardized regression coefficient estimates images for the second regressor for all subjects", fig.width=12, fig.height=4.2, fig.align="center"} k <- 2 par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(abs(glmmap$GLMCoefStandardized[subject,,,k]), col=heat.colors(8), zlim=c(0,6), main=paste0("Subject ",subject)) ``` ## Wavelet transform of the GLM coefficients: `waveletcoef` Next, we apply the discrete wavelet transform to the standardized regression coefficient images of each subject. The wavelet transformation is performed by the using the R package **wavethresh** [@wavethresh], which is loaded when **BHMSMAfMRI** package is loaded. The function `waveletcoef` returns the wavelet coefficients for the selected regressor for all the subjects as a matrix. Below it is illustrated. ```{r, eval=T} wavecoefglmmap <- waveletcoef(n, grid, glmmap$GLMCoefStandardized[,,,k], wave.family="DaubLeAsymm", filter.number=6, bc="periodic") names(wavecoefglmmap) dim(wavecoefglmmap$WaveletCoefficientMatrix) ``` In the wavelet transform, the user can choose the wavelet family (one of `DaubLeAsymm` and `DaubExPhase`), the number of vanishing moments (`filter.number`) and the boundary condition (`symmetric` or `periodic`) to be applied. For fMRI data, we recommend the use of Daubechies least asymmetric wavelet transform (`DaubLeAsymm`) with 6 vanishing moments and periodic boundary condition. ## Estimating the model hyperparameters: `hyperest` The BHMSMA model has six hyperparameters, which are estimated by their maximum likelihood estimates (MLEs) following an empirical Bayes approach. We can estimate the hyperparameters by performing multi-subject analysis or single subject analysis. In multi-subject analysis, the likelihood function of the hyperparameters is constructed over all subjects and maximized to obtain their estimates. In single subject analysis, for each subject, separate likelihood function of the hyperparameters is constructed and maximized. Hence, for single subject analysis, for each subject we obtain a set of estimates of the hyperparameters. Clearly, multi-subject analysis benefits from being able to borrow strength across subjects and produces more precise estimates. The function `hyperparamest` computes the hyperparameter estimates and their standard error estimates. The type of analysis must be specified as `analysis="multi"` or `"single"`. The following code illustrates the use of the function `hyperparamest` and the output. ```{r, eval=T} options(width = 100) hyperest <- hyperparamest(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, analysis = "multi") names(hyperest) round(hyperest$hyperparam,3) signif(hyperest$hyperparamVar,4) ``` From the hyperparameter estimates, we can compute the estimates of $a_{kl}$, $b_{kl}$ and $c_{kl}$ [@sanyal:ferreira:2012] for all levels as follows. ```{r, eval=T} a.kl <- hyperest$hyperparam[1] * 2^(-hyperest$hyperparam[2] * (0:4)) b.kl <- hyperest$hyperparam[3] * 2^(-hyperest$hyperparam[4] * (0:4)) c.kl <- hyperest$hyperparam[5] * 2^(-hyperest$hyperparam[6] * (0:4)) round(a.kl,3) ``` ## Computing posterior distribution of the wavelet coefficients: `postmixprob`, `postwaveletcoef` Given the values of the hyperparameters, the marginal posterior distribution of the wavelet coefficients is a mixture of a Gaussian and a point mass at zero with mixture probabilities $\bar{p}_{iklj}$. BHMSMA computes $\bar{p}_{iklj}$ values using Newton-Cotes numerical integration method. The function `postmixprob` computes the values $\bar{p}_{iklj}$ for all subjects and returns in a matrix. The following code illustrates it. ```{r, eval=T} pkljbar <- postmixprob(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, analysis = "multi") names(pkljbar) dim(pkljbar$pkljbar) round(pkljbar$pkljbar[1,1:10],4) ``` Once $\bar{p}_{iklj}$ values are obtained, the marginal posterior distribution of the wavelet coefficients are entirely known. With the hyperparameter estimates and the $\bar{p}_{iklj}$ values, the function `postwaveletcoef` computes the posterior mean and the posterior median of the wavelet coefficients. The following code illustrates it. ```{r, eval=T} postwavecoefglmmap <- postwaveletcoef(n, grid, wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, pkljbar$pkljbar, analysis = "multi") names(postwavecoefglmmap) dim(postwavecoefglmmap$PostMeanWaveletCoef) dim(postwavecoefglmmap$PostMedianWaveletCoeff) ``` ## Computing posterior mean of the regression coefficients: `postglmcoef` Given the posterior mean of the wavelet coefficients, the function `postglmcoef` is used to obtain the posterior means of the regression coefficients. The following code shows its use. ```{r, eval=T} postglmmap <- postglmcoef(n, grid, glmmap$GLMCoefStandardized[,,,k], postwavecoefglmmap$PostMeanWaveletCoef, wave.family="DaubLeAsymm", filter.number=6, bc="periodic") str(postglmmap,vec.len = 3, digits.d = 2) ``` ```{r PostCoef, fig.cap = "Posterior standardized regression coefficient images for the 3 subjects obtained by BHMSMA", fig.width=12, fig.height=4.2, fig.align="center" } par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(abs(postglmmap$GLMcoefposterior[subject,,]), col=heat.colors(8), zlim=c(0,6), main=paste0("Subject ",subject)) ``` Figure \@ref(fig:PostCoef), obtained by the following code, shows the images of the posterior standardized regression coefficients for the 3 subjects. As the true coefficients are known, we can compute the mean squared error (MSE) using the following code. ```{r, eval=T} MSE <- c() for (i in 1:n) MSE[i] <- sum((as.vector(truecoef[i,,]/glmmap$GLMCoefSE[i,,,2]) - as.vector(postglmmap$GLMcoefposterior[i,,]))^2) round(MSE,3) ``` In [@sanyal:ferreira:2012], we show that our multi-subject methodology performs better than some existing methodologies in terms of MSE. ## Posterior simulation and uncertainty estimation: `postsamples` In order to simulate observations from the posterior distribution of the regression coefficients, the function `postsamples` can be used. The type of analysis must be mentioned. The code below shows its use. ```{r, eval=T, fig.width=12, fig.height=4.2} Postsamp <- postsamples( nsample=50, n, grid, glmmap$GLMCoefStandardized[,,,k], wavecoefglmmap$WaveletCoefficientMatrix, hyperest$hyperparam, pkljbar$pkljbar, "multi", seed=123) names(Postsamp) dim(Postsamp$samples) dim(Postsamp$postdiscovery) ``` The argument `nsample` denotes the number of samples to be drawn. We can see `postsamples` returns the posterior samples and the probabilities of posterior discovery [@morris:et:al:2011] for all the subjects. Figure \@ref(fig:PostDiscovery), obtained by the following code, shows the posterior discovery images based on the above 50 samples for the 3 subjects. ```{r PostDiscovery, eval=T, fig.cap = "Posterior discovery images for the 3 subjects", fig.width=12, fig.height=4.2, fig.align="center"} par(mfrow=c(1,n), cex=1) for(subject in 1:n) image(Postsamp$postdiscovery[subject,,], col=heat.colors(8), main=paste0("Subject ",subject)) ``` From the posterior samples, the posterior standard deviations of the regression coefficients can be computed as follows. ```{r, eval=T} postsd <- array(dim=c(n,grid,grid)) for(subject in 1:n) postsd[subject,,] <- apply(Postsamp$samples[subject,,,], 1:2, sd) round(postsd[1,1:5,1:5],3) ``` ## Posterior group estimates: `postgroupglmcoef` Posterior group coefficients can be obtained by using the function `postgroupglmcoef` as follows. ```{r, eval=T} postgroup <- postgroupglmcoef( n, grid, glmmap$GLMCoefStandardized[,,,k], postwavecoefglmmap$PostMeanWaveletCoef) names(postgroup) dim(postgroup$groupcoef) ``` Figure \@ref(fig:PostGroupCoef), obtained by the following code, shows the posterior group coefficient image for the simulated dataset. ```{r PostGroupCoef, fig.cap = "Posterior group regression coefficient image", fig.width=2.5, fig.height=2.5, fig.align="center"} par(mfrow=c(1,1),cex=0.5) image(abs(postgroup$groupcoef),col=heat.colors(8),zlim=c(0,6)) ``` ## References
/scratch/gouwar.j/cran-all/cranData/BHMSMAfMRI/vignettes/BHMSMAfMRIvignette.Rmd
#' @details See vignette. #' @references Baumeister, C., & Hamilton, J.D. (2015). Sign restrictions, structural vector autoregressions, and useful prior information. \emph{Econometrica}, 83(5), 1963-1999. #' @references Baumeister, C., & Hamilton, J.D. (2017). Structural interpretation of vector autoregressions with incomplete identification: Revisiting the role of oil supply and demand shocks (No. w24167). National Bureau of Economic Research. #' @references Baumeister, C., & Hamilton, J.D. (2018). Inference in structural vector autoregressions when the identifying assumptions are not fully believed: Re-evaluating the role of monetary policy in economic fluctuations. \emph{Journal of Monetary Economics}, 100, 48-65. #' @seealso Dr. Christiane Baumeister's website \href{https://sites.google.com/site/cjsbaumeister/}{https://sites.google.com/site/cjsbaumeister/}. #' @seealso Dr. James D. Hamilton's website \href{https://econweb.ucsd.edu/~jhamilton/}{https://econweb.ucsd.edu/~jhamilton/}. "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/BHSBVAR/R/BHSBVAR-package.R
# Create matrices containing dependent and independent variables. #' @keywords internal getXY <- function(data1, nlags) { varnames <- colnames(data1) data1 <- data1 - (matrix(data = 1, nrow = nrow(data1), ncol = ncol(data1)) %*% diag(x = colMeans(x = data1, na.rm = FALSE, dims = 1))) colnames(data1) <- varnames X <- matrix(data = NA_real_, nrow = (nrow(data1) - nlags), ncol = (ncol(data1) * nlags)) for (k in 1:nlags) { X[, (ncol(data1) * (k - 1) + 1):(ncol(data1) * k)] <- data1[(nlags - k + 1):(nrow(data1) - k), ] } X <- cbind(X, 1) colnames(X) <- c(paste(rep(colnames(data1), nlags), "_L", sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(data1)), decreasing = FALSE), sep = ""), "cons") Y <- data1[(nlags + 1):nrow(data1), ] list1 <- list("X" = X, "Y" = Y) return(list1) } # Posterior Density Function used to Determine Starting Values for A. #' @keywords internal #' @import Rcpp post_A_optim <- function(par, pA, pdetA, pH, pP, pP_sig, pR_sig, kappa1, y1, x1, omega, somega, nlags) { nrow1 <- dim(pA)[1] ncol1 <- dim(pA)[2] A_temp <- c(pA[, , 3]) A_temp[which(c(!is.na(pA[, , 1])))] <- par A_test <- matrix(data = A_temp, nrow = nrow1, ncol = ncol1) B_test <- matrix(data = NA_real_, nrow = ((ncol1 * nlags) + 1), ncol = ncol1) zeta_test <- matrix(data = 0, nrow = nrow1, ncol = ncol1) pR <- array(data = 0, dim = c(((ncol1 * nlags) + 1), ncol1, ncol1)) temp0 <- t(x1) %*% x1 + pP_sig temp1 <- t(x1) %*% y1 + pP_sig %*% pP temp2 <- t(y1) %*% y1 + t(pP) %*% pP_sig %*% pP Phi0 <- solve(temp0, temp1) temp4 <- temp2 - t(Phi0) %*% temp1 temp5 <- matrix(data = NA_real_, nrow = ((ncol1 * nlags) + 1), ncol = 1) nR <- matrix(data = 0, nrow = ncol1, ncol = 1) for (i in 1:ncol1) { if (any(pR_sig[, , i] > 0)) { nR[i, 1] <- 1 } } if (all(nR == 0)) { B_test <- Phi0 %*% A_test diag(zeta_test) <- diag(t(A_test) %*% temp4 %*% A_test) } else { for (i in 1:ncol1) { if (nR[i, 1] == 0) { B_test[, i] <- Phi0 %*% A_test[, i] zeta_test[i, i] <- c(t(A_test[, i]) %*% temp4 %*% A_test[, i]) } else { for (j in 1:nrow1) { if (is.finite(pA[j, i, 7])) { pR[j, i, i] <- A_test[j, i] } } temp5[,] <- pR_sig[, , i] %*% pR[, i, i] B_test[, i] <- solve((temp0 + pR_sig[, , i])) %*% ((temp1 %*% A_test[, i]) + temp5) zeta_test[i, i] <- ((t(A_test[, i]) %*% temp2 %*% A_test[, i]) + (t(pR[,i,i]) %*% temp5)) - (t((temp1 %*% A_test[, i]) + temp5) %*% solve(temp0 + pR_sig[,,i]) %*% ((temp1 %*% A_test[, i]) + temp5)) } } } #compute posterior density priors <- sum_log_prior_densities(A_test = A_test, pA = pA, pdetA = pdetA, pH = pH) likelihood <- log_likelihood_function(A_test = A_test, kappa1 = kappa1, y1 = y1, omega = omega, zeta_test = zeta_test, somega = somega) posterior <- -(priors + likelihood) return(posterior) } # Check arguments from the BH_SBVAR function that should be integers. #' @keywords internal check_integers <- function(list1) { #testing inputs that should be integers for (i in 1:length(list1)) { if (!is.null(list1[[i]])) { if (((!is.numeric(list1[[i]])) & (!is.integer(list1[[i]]))) || (!is.finite(list1[[i]]))) { return(paste(names(list1[i]), ": Must be finite 'numeric' or 'integer'.", sep = "")) } if (length(list1[[i]]) != 1) { return(paste(names(list1[i]), ": Must be finite 'numeric' or 'integer'.", sep = "")) } if ((list1[[i]] %% 1) != 0) { return(paste(names(list1[i]), ": Must be a whole number.", sep = "")) } if ((names(list1[i]) == "nlags") && (list1[[i]] <= 0)) { return(paste(names(list1[i]), ": Must be greater than 0.", sep = "")) } if ((names(list1[i]) == "itr") && (list1[[i]] < 100)) { return(paste(names(list1[i]), ": Must be greater than 100.", sep = "")) } if ((names(list1[i]) == "burn") && (list1[[i]] < 0)) { return(paste(names(list1[i]), ": Must be greater than or equal to 0.", sep = "")) } if ((names(list1[i]) == "thin") && (list1[[i]] <= 0)) { return(paste(names(list1[i]), ": Must be greater than 0.", sep = "")) } if ((names(list1[i]) == "h1_irf") && (list1[[i]] < 4)) { return(paste(names(list1[i]), ": Must be greater than or equal to 3.", sep = "")) } } else { return(paste(names(list1[i]), ": Must be finite 'numeric' or 'integer'.", sep = "")) } } return("pass") } # Check arguments from the BH_SBVAR function that should be doubles. #' @keywords internal check_doubles <- function(list1) { #testing inputs that could be doubles for (i in 1:length(list1)) { if (!is.null(list1[[i]])) { if (((!is.numeric(list1[[i]])) & (!is.integer(list1[[i]]))) || (!is.finite(list1[[i]]))) { return(paste(names(list1[i]), ": Must be finite 'numeric' or 'integer'.", sep = "")) } if (length(list1[[i]]) != 1) { return(paste(names(list1[i]), ": Must be finite 'numeric' or 'integer'.", sep = "")) } if ((names(list1[i]) == "ci") && ((list1[[i]] < 0.7) | (list1[[i]] > 1))) { return(paste(names(list1[i]), ": Must be greater than or equal to 0.7 and less than or equal to 1.", sep = "")) } if ((names(list1[i]) == "cri") && ((list1[[i]] < 0.4) | (list1[[i]] > 1))) { return(paste(names(list1[i]), ": Must be greater than or equal to 0.4 and less than or equal to 1.", sep = "")) } } else { if (names(list1[i]) == "cri") { return(paste(names(list1[i]), ": Must be finite 'numeric' or 'integer'.", sep = "")) } } } return("pass") } # Check arguments from the BH_SBVAR function that should be matrices. #' @keywords internal check_matrices <- function(list1, nlags) { #testing inputs that should be matrices for (i in 1:length(list1)) { if (!is.matrix(list1[[i]])) { return(paste(names(list1[i]), ": Must be a matrix.", sep = "")) } if (any(!is.finite(list1[[i]]))) { return(paste(names(list1[i]), ": Elements must be finite numeric values", sep = "")) } if ((names(list1[i]) == "y") && (nrow(list1[[i]]) <= ncol(list1[[i]]))) { return("y: The number of rows must be greater than the number of columns.") } if ((names(list1[i]) == "y") && (ncol(list1[[i]]) < 2)) { return(paste("y: The number of columns or endogenous variables must be greater than 1.", sep = "")) } if ((names(list1[i]) == "y") && (((ncol(list1[[i]]) * nlags) + 1) >= (nrow(list1[[i]])))) { return(paste("y: The number observations must be greater than ", ((ncol(list1[[i]]) * nlags) + 1),". Reduce the number of lags or increase the number of observations.", sep = "")) } if ((names(list1[i]) == "pP") && (nrow(list1[[i]]) != ((nlags * ncol(list1$y)) + 1))) { return(paste("pP: The number of rows must equal ", ((nlags * ncol(list1$y)) + 1), ".", sep = "")) } if ((names(list1[i]) == "pP") && (ncol(list1[[i]]) != ncol(list1$y))) { return(paste("pP: The number of columns must equal ", (ncol(list1$y)), ".", sep = "")) } if ((names(list1[i]) == "pP_sig") && (nrow(list1[[i]]) != ((nlags * ncol(list1$y)) + 1))) { return(paste("pP_sig: The number of rows must equal ", ((nlags * ncol(list1$y)) + 1), ".", sep = "")) } if ((names(list1[i]) == "pP_sig") && (ncol(list1[[i]]) != ((nlags * ncol(list1$y)) + 1))) { return(paste("pP_sig: The number of columns must equal ",((nlags * ncol(list1$y)) + 1), ".", sep = "")) } if ((names(list1[i]) == "pP_sig") && (any(list1[[i]] < 0))) { return(paste("pP_sig: Elements must be greater than or equal to 0.", sep = "")) } if ((names(list1[i]) == "pP_sig") && (!isSymmetric(list1[[i]]))) { return(paste("pP_sig: Must be symmetric.", sep = "")) } if ((names(list1[i]) == "kappa1") && (nrow(list1[[i]]) != 1)) { return(paste("kappa1: The number of rows must equal 1.", sep = "")) } if ((names(list1[i]) == "kappa1") && (ncol(list1[[i]]) != ncol(list1$y))) { return(paste("kappa1: The number of columns must equal ", ncol(list1$y), ".", sep = "")) } if ((names(list1[i]) == "kappa1") && (any(list1[[i]] < 0))) { return(paste("kappa1: Elements must be greater than or equal to 0.", sep = "")) } } return("pass") } # Check arguments from the BH_SBVAR function that should be arrays. #' @keywords internal check_arrays <- function(list1, y) { for (i in 1:length(list1)) { if (!is.array(list1[[i]])) { return(paste(names(list1[i]), ": Must be an array.", sep = "")) } if (!is.numeric(list1[[i]])) { return(paste(names(list1[i]), ": Should contain 'numeric' elements for arrays specifying prior distributions. Use 'NA_real_' for elements in arrays that contain all NAs.", sep = "")) } if ((names(list1[i]) == "pA") && (all(is.na(list1[[i]][, , 1])))) { return(paste(names(list1[i]), "[, , 1]: Should indicate at least one parameter to be estimated.", sep = "")) } if ((names(list1[i]) == "pA") && ((dim(list1[[i]])[1] != ncol(y)) | (dim(list1[[i]])[2] != ncol(y)) | (dim(list1[[i]])[3] != 8))) { return(paste(names(list1[i]), ": Should be a (", ncol(y), ", ", ncol(y), ", 8) array.", sep = "")) } if ((names(list1[i]) == "pdetA") && ((dim(list1[[i]])[1] != 1) | (dim(list1[[i]])[2] != 1) | (dim(list1[[i]])[3] != 6))) { return(paste(names(list1[i]), ": Should be a (1, 1, 6) array.", sep = "")) } if ((names(list1[i]) == "pH") && ((dim(list1[[i]])[1] != ncol(y)) | (dim(list1[[i]])[2] != ncol(y)) | (dim(list1[[i]])[3] != 6))) { return(paste(names(list1[i]), ": Should be a (", ncol(y), ", ", ncol(y), ", 6) array.", sep = "")) } for (j in 1:(dim(list1[[i]])[1])) { for (k in 1:(dim(list1[[i]])[2])) { if (is.na(list1[[i]][j, k, 1])) { #if distribution is not specified, no other parameters should be specified if ((names(list1[i]) == "pA") && (any(!is.na(list1[[i]][j, k, c(1:2, 4:8)])))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 1]: Indicates no prior distribution so sign, scale, degrees of freedom, skew, long-run restriction, and proposal scaling parameter (", names(list1[i]),"[", j, ", ", k, ", c(2,4:7)]) should all be NA.", sep = "")) } if ((names(list1[i]) == "pA") && (!is.finite(list1[[i]][j, k, 3]))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 1]: Indicates no prior distribution so position (", names(list1[i]), "[", j, ", ", k, ", 3]) should be some constant value.", sep = "")) } if ((names(list1[i]) != "pA") && (any(!is.na(list1[[i]][j, k, 1:6])))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 1]: Indicates no prior distribution so sign, position, scale, degrees of freedom, skew (", names(list1[i]), "[", j, ", ", k, ", 1:6]) should all be NA.", sep = "")) } } else if (list1[[i]][j, k, 1] == 0) { #if distribution is 0 (symmetric t-distribution), parameters in slices 2:5 must be specified if ((!is.na(list1[[i]][j, k, 2])) && ((list1[[i]][j, k, 2] != 1) & (list1[[i]][j, k, 2] != (-1)))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 2]: Sign should be indicated with a NA, 1, or -1.", sep = "")) } if (!is.finite(list1[[i]][j, k, 3])) { return(paste(names(list1[i]), "[", j, ", ", k, ", 3]: Position should be indicated with a finite number.", sep = "")) } if ((!is.na(list1[[i]][j, k, 2])) && ((list1[[i]][j, k, 3]) != 0) && ((list1[[i]][j, k, 2]) != ((list1[[i]][j, k, 3]) / abs(list1[[i]][j, k, 3])))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 3]: Position should have the same sign as sign (", names(list1[i]), "[", j, ", ", k, ", 2]).", sep = "")) } if ((!is.finite(list1[[i]][j, k, 4])) || (list1[[i]][j, k, 4] <= 0)) { return(paste(names(list1[i]), "[", j, ", ", k, ", 4]: Scale should be indicated with a finite number greater than 0.", sep = "")) } if ((!is.finite(list1[[i]][j, k, 5])) || (list1[[i]][j, k, 5] <= 2)) { return(paste(names(list1[i]), "[", j, ", ", k, ", 5]: Degrees of freedom should be indicated with a finite number greater than 2.", sep = "")) } if ((!is.na(list1[[i]][j, k, 6]))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 6]: Skew should be NA.", sep = "")) } if ((names(list1[i]) == "pA") && ((!is.na(list1[[i]][j, k, 7])) && ((!is.finite(list1[[i]][j, k, 7])) || (list1[[i]][j, k, 7] != 1)))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 7]: Long-run restriction should be indicated with an NA (no long-run restriction) or a 1 (long-run restriction).", sep = "")) } if ((names(list1[i]) == "pA") && ((is.na(list1[[i]][j, k, 8])) || (!is.finite(list1[[i]][j, k, 8])) || (list1[[i]][j, k, 8] < 0.1))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 8]: Proposal scaling parameter should be greater than or equal to 0.1.", sep = "")) } } else if (list1[[i]][j, k, 1] == 1) { #if distribution is 1 (non-central t-distribution), parameters in slices 2:6 must be specified if (!is.na(list1[[i]][j, k, 2])) { return(paste(names(list1[i]), "[", j, ", ", k, ", 2]: Sign should be NA.", sep = "")) } if (!is.finite(list1[[i]][j, k, 3])) { return(paste(names(list1[i]), "[", j, ", ", k, ", 3]: Position should be indicated with a finite number.", sep = "")) } if ((!is.finite(list1[[i]][j, k, 4])) || (list1[[i]][j, k, 4] <= 0)) { return(paste(names(list1[i]), "[", j, ", ", k, ", 4]: Scale should be indicated with a finite number greater than 0.", sep = "")) } if ((!is.finite(list1[[i]][j, k, 5])) || (list1[[i]][j, k, 5] <= 2)) { return(paste(names(list1[i]), "[", j, ", ", k, ", 5]: Degrees of freedom should be indicated with a finite number greater than 2.", sep = "")) } if (!is.finite(list1[[i]][j, k, 6])) { return(paste(names(list1[i]), "[", j, ", ", k, ", 6]: Skew should be indicated with a finite number.", sep = "")) } if ((names(list1[i]) == "pA") && ((!is.na(list1[[i]][j, k, 7])) && ((!is.finite(list1[[i]][j, k, 7])) || (list1[[i]][j, k, 7] != 1)))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 7]: Long-run restriction should be indicated with an NA (no long-run restriction) or a 1 (long-run restriction).", sep = "")) } if ((names(list1[i]) == "pA") && ((is.na(list1[[i]][j, k, 8])) || (!is.finite(list1[[i]][j, k, 8])) || (list1[[i]][j, k, 8] < 0.1))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 8]: Proposal scaling parameter should be greater than or equal to 0.1.", sep = "")) } } else if ((list1[[i]][j, k, 1] == 2) | (list1[[i]][j, k, 1] == 3)) { #if distribution is 2 or 3 (inverted beta-distribution or beta-distribution), parameters in slices 2:5 must be specified if ((is.na(list1[[i]][j, k, 2])) || ((list1[[i]][j, k, 2] != 1) & (list1[[i]][j, k, 2] != (-1)))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 2]: Sign should be indicated with a 1, or -1.", sep = "")) } if ((!is.finite(list1[[i]][j, k, 4])) || ((list1[[i]][j, k, 4] <= 1))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 4]: Shape1 should be indicated with a finite number greater than 1.", sep = "")) } if ((!is.finite(list1[[i]][j, k, 5])) || ((list1[[i]][j, k, 5] <= 1))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 5]: Shape2 should be indicated with a finite number greater than 1.", sep = "")) } if (!is.na(list1[[i]][j, k, 6])) { return(paste(names(list1[i]), "[", j, ", ", k, ", 6]: Should be NA.", sep = "")) } if (list1[[i]][j, k, 1] == 2) { if (round(x = list1[[i]][j, k, 3], digits = 2) != round(x = (list1[[i]][j, k, 2] * ((list1[[i]][j, k, 4] + list1[[i]][j, k, 5]) / (list1[[i]][j, k, 5] + 1))), digits = 2)) { return(paste(names(list1[i]), "[", j, ", ", k, ", 3]: Position should be (", list1[[i]][j, k, 2], ") * (", names(list1[i]), "[", j, ", ", k, ", 4] + ", names(list1[i]), "[", j, ", ", k, ", 5]) / (", names(list1[i]), "[", j, ", ", k, ", 5] + 1).", sep = "")) } } else if (list1[[i]][j, k, 1] == 3) { if (round(x = list1[[i]][j, k, 3], digits = 2) != round(x = (list1[[i]][j, k, 2] * ((list1[[i]][j, k, 4] - 1) / (list1[[i]][j, k, 4] + list1[[i]][j, k, 5] - 2))), digits = 2)) { return(paste(names(list1[i]), "[", j, ", ", k, ", 3]: Position should be (", list1[[i]][j, k, 2], ") * (", names(list1[i]), "[", j, ", ", k, ", 4] - 1) / (", names(list1[i]), "[", j, ", ", k, ", 4] + ", names(list1[i]), "[", j, ", ", k, ", 5] - 2).", sep = "")) } } if ((names(list1[i]) == "pA") && ((!is.na(list1[[i]][j, k, 7])) && ((!is.finite(list1[[i]][j, k, 7])) || (list1[[i]][j, k, 7] != 1)))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 7]: Long-run restriction should be indicated with an NA (no long-run restriction) or a 1 (long-run restriction).", sep = "")) } if ((names(list1[i]) == "pA") && ((is.na(list1[[i]][j, k, 8])) || (!is.finite(list1[[i]][j, k, 8])) || (list1[[i]][j, k, 8] < 0.1))) { return(paste(names(list1[i]), "[", j, ", ", k, ", 8]: Proposal scaling parameter should be greater than or equal to 0.1.", sep = "")) } } else { return(paste(names(list1[i]), "[", j, ", ", k, ", 1]: Distribution should be indicated with a NA (no prior), 0 (symetric t-distribution), 1 (non-central t-distribution), or 2 (inverted beta-distribution).", sep = "")) } } } } if ((is.finite(list1[["pdetA"]][1, 1, 3])) && (list1[["pdetA"]][1, 1, 3] == 0)) { return("pdetA[1, 1, 3]: Should be a finite value other than 0.") } list2 <- list(pH = list1[["pH"]], pdetA = list1[["pdetA"]]) list3 <- list(H = solve(list1[["pA"]][, , 3]), detA = matrix(data = det(list1[["pA"]][, , 3]))) den1 <- 0 for (i in 1:length(list2)) { if (any(is.finite(list2[[i]][, , 1]))) { for (j in 1:(dim(list2[[i]])[1])) { for (k in 1:(dim(list2[[i]])[2])) { if (is.finite(list2[[i]][j, k, 1])) { if (list2[[i]][j, k, 1] == 0) { if (!is.finite(list2[[i]][j, k, 2])) { den1 <- prior_t(list3[[i]][j, k], list2[[i]][j, k, 3], list2[[i]][j, k, 4], list2[[i]][j, k, 5]) if (den1 == 0) { if (names(list2[i]) == "pH") { return(paste0("pH[", j, ", ", k, ", ]: The (", j, ", ", k, ") element of the inverse of pA[, , 3] produces a prior density of 0.")) } if (names(list2[i]) == "pdetA") { return(paste0("pdetA[", j, ", ", k, ", ]: The determinant of pA[, , 3] produces a prior density of 0.")) } } } else { if ((list3[[i]][j, k] != 0) && (list2[[i]][j, k, 2] != (list3[[i]][j, k] / abs(list3[[i]][j, k])))) { if (names(list2[i]) == "pH") { return(paste0("pH[", j, ", ", k, ", 2:3]: Must have the same sign as the (", j, ", ", k, ") element of the inverse of pA[, , 3].")) } if (names(list2[i]) == "pdetA") { return(paste0("pdetA[", j, ", ", k, ", 2:3]: Must have the same sign as the determinant of pA[, , 3].")) } } if (list2[[i]][j, k, 2] == 1) { den1 <- prior_t_p(list3[[i]][j, k], list2[[i]][j, k, 3], list2[[i]][j, k, 4], list2[[i]][j, k, 5]) } if (list2[[i]][j, k, 2] == -1) { den1 <- prior_t_n(list3[[i]][j, k], list2[[i]][j, k, 3], list2[[i]][j, k, 4], list2[[i]][j, k, 5]) } if (den1 == 0) { if (names(list2[i]) == "pH") { return(paste0("pH[", j, ", ", k, ", ]: The (", j, ", ", k, ") element of the inverse of pA[, , 3] produces a prior density of 0.")) } if (names(list2[i]) == "pdetA") { return(paste0("pdetA[", j, ", ", k, ", ]: The determinant of pA[, , 3] produces a prior density of 0.")) } } } } if (list2[[i]][j, k, 1] == 1) { den1 <- prior_nonc_t(list3[[i]][j, k], list2[[i]][j, k, 3], list2[[i]][j, k, 4], list2[[i]][j, k, 5], list2[[i]][j, k, 6]) if (den1 == 0) { if (names(list2[i]) == "pH") { return(paste0("pH[", j, ", ", k, ", ]: The (", j, ", ", k, ") element of the inverse of pA[, , 3] produces a prior density of 0.")) } if (names(list2[i]) == "pdetA") { return(paste0("pdetA[", j, ", ", k, ", ]: The determinant of pA[, , 3] produces a prior density of 0.")) } } } if ((list2[[i]][j, k, 1] == 2) | (list2[[i]][j, k, 1] == 3)) { if ((list3[[i]][j, k] != 0) && (list2[[i]][j, k, 2] != (list3[[i]][j, k] / abs(list3[[i]][j, k])))) { if (names(list2[i]) == "pH") { return(paste0("pH[", j, ", ", k, ", 2:3]: Must have the same sign as the (", j, ", ", k, ") element of the inverse of pA[, , 3].")) } if (names(list2[i]) == "pdetA") { return(paste0("pdetA[", j, ", ", k, ", 2:3]: Must have the same sign as the determinant of pA[, , 3].")) } } if (list2[[i]][j, k, 1] == 2) { den1 <- prior_ibeta((list2[[i]][j, k, 2] * list3[[i]][j, k]), list2[[i]][j, k, 4], list2[[i]][j, k, 5]) } if (list2[[i]][j, k, 1] == 3) { den1 <- prior_beta((list2[[i]][j, k, 2] * list3[[i]][j, k]), list2[[i]][j, k, 4], list2[[i]][j, k, 5]) } if (den1 == 0) { if (names(list2[i]) == "pH") { return(paste0("pH[", j, ", ", k, ", ]: The (", j, ", ", k, ") element of the inverse of pA[, , 3] produces a prior density of 0.")) } if (names(list2[i]) == "pdetA") { return(paste0("pdetA[", j, ", ", k, ", ]: The determinant of pA[, , 3] produces a prior density of 0.")) } } } den1 <- 0 } } } } } return("pass") } # Check arguments from the BH_SBVAR function #' @keywords internal arguments_check <- function(y, nlags, pA, pdetA, pH, pP, pP_sig, pR_sig, kappa1, itr, burn, thin, cri) { test <- check_integers(list1 = list(nlags = nlags, itr = itr, burn = burn, thin = thin)) if (test != "pass") { return(test) } test <- check_doubles(list1 = list(cri = cri)) if (test != "pass") { return(test) } if (floor((itr - burn) / thin) < 5000) { return(paste("'floor((itr-burn)/thin)' must be greater than or equal to 5000.", sep = "")) } test <- check_matrices(list1 = list(y = y, pP = pP, pP_sig = pP_sig, kappa1 = kappa1), nlags) if (test != "pass") { return(test) } test <- check_arrays(list1 = list(pA = pA, pdetA = pdetA, pH = pH), y) if (test != "pass") { return(test) } # check pR_sig if (!is.array(pR_sig)) { return("pR_sig: Must be an array.") } if (any(!is.finite(pR_sig)) || (any(pR_sig < 0))) { return("pR_sig: Must contain finite values greater than or equal to 0.") } if ((dim(pR_sig)[1] != ((nlags * ncol(y)) + 1)) | (dim(pR_sig)[2] != ((nlags * ncol(y)) + 1)) | (dim(pR_sig)[3] != ncol(y))) { return(paste("pR_sig: Dimensions should be (", ((nlags * ncol(y)) + 1), ", ", ((nlags * ncol(y)) + 1), ", ", (ncol(y)), ").", sep = "")) } for (i in 1:ncol(y)) { if (any(is.finite(pA[, i, 7]))) { n <- which(is.finite(pA[, i, 7])) for (j in 1:ncol(y)) { if (any(n == j)) { if (any(pR_sig[-seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), i] != 0)) { return(paste0("pR_sig[-c(", gsub(pattern = " ", replacement = ", ", x = trimws(paste0(seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), " ", collapse = ""), which = "right")), "), c(", gsub(pattern = " ", replacement = ", ", x = trimws(paste0(seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), " ", collapse = ""), which = "right")), "), ", i, "]: Must be 0.")) } if (any(pR_sig[seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), -seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), i] != 0)) { return(paste0("pR_sig[c(", gsub(pattern = " ", replacement = ", ", x = trimws(paste0(seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), " ", collapse = ""), which = "right")), "), -c(", gsub(pattern = " ", replacement = ", ", x = trimws(paste0(seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), " ", collapse = ""), which = "right")), "), ", i, "]: Must be 0.")) } if (any(pR_sig[seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), i] == 0)) { return(paste0("pR_sig[-c(", gsub(pattern = " ", replacement = ", ", x = trimws(paste0(seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), " ", collapse = ""), which = "right")), "), c(", gsub(pattern = " ", replacement = ", ", x = trimws(paste0(seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), " ", collapse = ""), which = "right")), "), ", i, "]: Must be greater than 0 since pA[", j, ", ", i, ", 7] = ", pA[j, i, 7], ".")) } } else { if (any(pR_sig[, seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), i] != 0)) { return(paste0("pR_sig[, c(", gsub(pattern = " ", replacement = ", ", x = trimws(paste0(seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), " ", collapse = ""), which = "right")), "), ", i, "]: Must be 0 since pA[", j, ", ", i, ", 7] = ", pA[j, i, 7], ".")) } if (any(pR_sig[seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), , i] != 0)) { return(paste0("pR_sig[c(", gsub(pattern = " ", replacement = ", ", x = trimws(paste0(seq(from = j, to = (nlags * ncol(y)), by = ncol(y)), " ", collapse = ""), which = "right")), "), ,", i, "]: Must be 0 since pA[", j, ", ", i, ", 7] = ", pA[j, i, 7], ".")) } } } } else { if (any(pR_sig[, , i] != 0)) { return(paste0("pR_sig[, , ", i, "]: Must be 0 since pA[, ", i, ", 7] = ", pA[, i, 7], ".")) } } if (!isSymmetric(pR_sig[, , i])) { return(paste0("pR_sig[, , ", i, "]: Must be symmetric.")) } } return("pass") } # Line Plot #' @keywords internal line_plot <- function(data1, prior_name, i, j) { if (any(data1 != data1[1])) { if (prior_name == "pA") { elast = -1 } else { elast = 1 } graphics::plot(x = (elast * data1), type = "l", col = "black", yaxs = "r", xaxs = "i", xlab = "Iteration", ylab = "Estimate") if (prior_name == "pA") { graphics::title(main = paste("-A(", i, "," , j, ")", sep = ""), col.main = "black") } else if (prior_name == "pH") { graphics::title(main = paste("H(", i, "," , j, ")", sep = ""), col.main = "black") } else if (prior_name == "pdetA") { graphics::title(main = paste("Determinant of A"), col.main = "black") } Sys.sleep(0.25) } } # Line Plots #' @keywords internal Line_Plots <- function(raw_array, priors_array, prior_name) { n_rows <- nrow(raw_array) n_cols <- ncol(raw_array) for (i in 1:n_rows) { for (j in 1:n_cols) { if (all(is.finite(raw_array[i, j, ]))) { line_plot(data1 = raw_array[i, j, ], prior_name = prior_name, i = i, j = j) } } } } # Autocorrelation Plot #' @keywords internal acf_plot <- function(data1, prior_name, i, j) { if (any(data1 != data1[1])) { stats::acf(x = stats::ts(data1), lag.max = NULL, plot = TRUE, type = c("correlation"), demean = TRUE, main = "", xlab = "Lag Length", ylab = "Correlation", ci = 0) if (prior_name == "pA") { graphics::title(main = paste("-A(", i, "," , j, ")", sep = ""), col.main = "black") } else if (prior_name == "pH") { graphics::title(main = paste("H(", i, "," , j, ")", sep = ""), col.main = "black") } else if (prior_name == "pdetA") { graphics::title(main = paste("Determinant of A"), col.main = "black") } Sys.sleep(0.25) } } # Autocorrelation Plots #' @keywords internal ACF_Plots <- function(raw_array, priors_array, prior_name) { n_rows <- nrow(raw_array) n_cols <- ncol(raw_array) for (i in 1:n_rows) { for (j in 1:n_cols) { if (all(is.finite(raw_array[i, j, ]))) { acf_plot(data1 = raw_array[i, j, ], prior_name = prior_name, i = i, j = j) } } } } #' Structural Bayesian Vector Autoregression #' #' Estimates the parameters of a Structural Bayesian Vector Autoregression model with the method developed by Baumeister and Hamilton (2015/2017/2018). #' @author Paul Richardson #' @export #' @import Rcpp #' @name BH_SBVAR #' @param y \emph{(T x n)} matrix containing the endogenous variables. \emph{T} is the number of observations and \emph{n} is the number of endogenous variables. #' @param nlags Integer specifying the lag order. #' @param pA \emph{(n x n x 8)} array where \emph{n} is the number of endogenous variables and each slice of the third dimension contains the prior distributions (NA - no prior, 0 - symmetric t-distribution, 1 - non-central t-distribution, 2 - inverted beta distribution, 3 - beta distribution), sign restrictions (NA - no restriction, 1 - positive restriction, -1 - negative restriction), distribution position parameters, distribution scale or shape1 parameters for t-distributions or inverted beta and beta distributions, distribution degrees of freedom or shape2 parameters for t-distributions or inverted beta and beta distributions, distribution skew parameters for t-distributions, indication for long-run restrictions (NA - no long-run restriction, 1 - long-run restriction), and random-walk proposal scale parameters for \emph{A}, respectively. #' @param pdetA \emph{(1 x 1 x 6)} array where each slice of the third dimension contains the prior distributions (NA - no prior, 0 - symmetric t-distribution, 1 - non-central t-distribution, 2 - inverted beta distribution, 3 - beta distribution), sign restrictions (NA - no restriction, 1 - positive restriction, -1 - negative restriction), distribution position parameters, distribution scale or shape1 parameters for t-distributions or inverted beta and beta distributions, distribution degrees of freedom or shape2 parameters for t-distributions or inverted beta and beta distributions, and distribution skew parameters for t-distributions for the determinant of \emph{A}, respectively (default = NULL). NULL indicates no priors for the determinant of \emph{A}. #' @param pH \emph{(n x n x 6)} array where \emph{n} is the number of endogenous variables and each slice of the third dimension contains the prior distributions (NA - no prior, 0 - symmetric t-distribution, 1 - non-central t-distribution, 2 - inverted beta distribution, 3 - beta distribution), sign restrictions (NA - no restriction, 1 - positive restriction, -1 - negative restriction), distribution position parameters, distribution scale or shape1 parameters for t-distributions or inverted beta and beta distributions, distribution degrees of freedom or shape2 parameters for t-distributions or inverted beta and beta distributions, and distribution skew parameters for t-distributions for \emph{H}, the inverse of \emph{A}, respectively (default = NULL). NULL indicates no priors for the inverse of \emph{A}. #' @param pP \emph{(k x n)} matrix containing the prior position parameters for the reduced form lagged coefficient matrix \emph{\eqn{\Phi}} (default = NULL). \emph{\eqn{k = n L + 1}}, \emph{n} is the number of endogenous variables, and \emph{L} is the lag length. NULL indicates no priors for \emph{\eqn{\Phi}}. #' @param pP_sig \emph{(k x k)} matrix containing values indicating confidence in the priors for \emph{\eqn{\Phi}} (default = NULL). \emph{\eqn{k = n L + 1}}, \emph{n} is the number of endogenous variables, and \emph{L} is the lag length. NULL indicates no priors for \emph{\eqn{\Phi}}. #' @param pR_sig \emph{(k x k x n)} array containing values indicating confidence in long-run restrictions on the lagged structural coefficient matrix \emph{B} (default = NULL). \emph{\eqn{k = n L + 1}}, \emph{n} is the number of endogenous variables, and \emph{L} is the lag length. NULL indicates no long-run restrictions. #' @param kappa1 \emph{(1 x n)} matrix containing values indicating confidence in priors for the structural variances (default = NULL). \emph{n} is the number of endogenous variables. NULL indicates no priors for structural variances. #' @param itr Integer specifying the total number of iterations for the algorithm (default = 5000). #' @param burn Integer specifying the number of draws to throw out at the beginning of the algorithm (default = 0). #' @param thin Integer specifying the thinning parameter (default = 1). All draws beyond burn are kept when thin = 1. Draw 1, draw 3, etc. beyond burn are kept when thin = 2. #' @param cri credibility intervals for the estimates to be returned (default = 0.95). A value of 0.95 will return 95\% credibility intervals. A value of 0.90 will return 90\% credibility intervals. #' @details Estimates the parameters of a Structural Bayesian Vector Autoregression model with the method developed in Baumeister and Hamilton (2015/2017/2018). The function returns a list containing the results. #' @return A list containing the following: #' @return accept_rate: Acceptance rate of the algorithm. #' @return y and x: Matrices containing the endogenous variables and their lags. #' @return nlags: Numeric value indicating the number of lags included in the model. #' @return pA, pdetA, pH, pP, pP_sig, pR, pR_sig, tau1, and kappa1: Matrices and arrays containing prior information. #' @return A_start: Matrix containing estimates of the parameters in \emph{A} from the optimization routine. #' @return A, detA, H, B, Phi, and D: Arrays containing estimates of the model parameters. The first, second, and third slices of the third dimension are lower, median, and upper bounds of the estimates. #' @return A_den, detA_den, and H_den: Lists containing the horizontal and vertical axis coordinates of posterior densities of \emph{A}, \emph{det(A)}, and \emph{H}. #' @return A_chain, B_chain, D_chain, detA_chain, H_chain: Arrays containing the raw results for \emph{A}, \emph{B}, \emph{D}, \emph{detA}, \emph{H}. #' @return Line and ACF plots of the estimates for \emph{A}, \emph{det(A)}, and \emph{H}. #' @references Baumeister, C., & Hamilton, J.D. (2015). Sign restrictions, structural vector autoregressions, and useful prior information. \emph{Econometrica}, 83(5), 1963-1999. #' @references Baumeister, C., & Hamilton, J.D. (2017). Structural interpretation of vector autoregressions with incomplete identification: Revisiting the role of oil supply and demand shocks (No. w24167). National Bureau of Economic Research. #' @references Baumeister, C., & Hamilton, J.D. (2018). Inference in structural vector autoregressions when the identifying assumptions are not fully believed: Re-evaluating the role of monetary policy in economic fluctuations. \emph{Journal of Monetary Economics}, 100, 48-65. #' @seealso Dr. Christiane Baumeister's website \href{https://sites.google.com/site/cjsbaumeister/}{https://sites.google.com/site/cjsbaumeister/}. #' @seealso Dr. James D. Hamilton's website \href{https://econweb.ucsd.edu/~jhamilton/}{https://econweb.ucsd.edu/~jhamilton/}. #' @examples #' # Import data #' library(BHSBVAR) #' set.seed(123) #' data(USLMData) #' y0 <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2) #' y <- y0 - (matrix(data = 1, nrow = nrow(y0), ncol = ncol(y0)) %*% #' diag(x = colMeans(x = y0, na.rm = FALSE, dims = 1))) #' colnames(y) <- c("Wage", "Employment") #' #' # Set function arguments #' nlags <- 8 #' itr <- 5000 #' burn <- 0 #' thin <- 1 #' cri <- 0.95 #' #' # Priors for A #' pA <- array(data = NA, dim = c(2, 2, 8)) #' pA[, , 1] <- c(0, NA, 0, NA) #' pA[, , 2] <- c(1, NA, -1, NA) #' pA[, , 3] <- c(0.6, 1, -0.6, 1) #' pA[, , 4] <- c(0.6, NA, 0.6, NA) #' pA[, , 5] <- c(3, NA, 3, NA) #' pA[, , 6] <- c(NA, NA, NA, NA) #' pA[, , 7] <- c(NA, NA, 1, NA) #' pA[, , 8] <- c(2, NA, 2, NA) #' #' # Position priors for Phi #' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA)) #' pP[1:nrow(pA), 1:ncol(pA)] <- #' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA)) #' #' # Confidence in the priors for Phi #' x1 <- #' matrix(data = NA, nrow = (nrow(y) - nlags), #' ncol = (ncol(y) * nlags)) #' for (k in 1:nlags) { #' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <- #' y[(nlags - k + 1):(nrow(y) - k),] #' } #' x1 <- cbind(x1, 1) #' colnames(x1) <- #' c(paste(rep(colnames(y), nlags), #' "_L", #' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)), #' decreasing = FALSE), #' sep = ""), #' "cons") #' y1 <- y[(nlags + 1):nrow(y),] #' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1)) #' for (i in 1:ncol(y1)) { #' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) #' yy <- matrix(data = y1[, i], ncol = 1) #' phi <- solve(t(xx) %*% xx, t(xx) %*% yy) #' ee[, i] <- yy - (xx %*% phi) #' } #' somega <- (t(ee) %*% ee) / nrow(ee) #' lambda0 <- 0.2 #' lambda1 <- 1 #' lambda3 <- 100 #' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1) #' v1 <- v1^((-2) * lambda1) #' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1) #' v3 <- kronecker(v1, v2) #' v3 <- (lambda0^2) * rbind(v3, (lambda3^2)) #' v3 <- 1 / v3 #' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3)) #' diag(pP_sig) <- v3 #' #' # Confidence in long-run restriction priors #' pR_sig <- #' array(data = 0, #' dim = c(((nlags * ncol(y)) + 1), #' ((nlags * ncol(y)) + 1), #' ncol(y))) #' Ri <- #' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags), #' matrix(data = c(1, 0), nrow = 1)), #' 0) #' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1 #' #' # Confidence in priors for D #' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y)) #' #' # Set graphical parameters #' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", #' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) #' #' # Estimate the parameters of the model #' results1 <- #' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig, #' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, #' thin = thin, cri = cri) BH_SBVAR <- function(y, nlags, pA, pdetA = NULL, pH = NULL, pP = NULL, pP_sig = NULL, pR_sig = NULL, kappa1 = NULL, itr = 5000, burn = 0, thin = 1, cri = 0.95) { results <- tryCatch( expr = { #construct objects from NULL inputs if (is.null(pdetA)) { pdetA <- array(data = NA_real_, dim = c(1, 1, 6)) } if (is.null(pH)) { pH <- array(data = NA_real_, dim = c(ncol(y), ncol(y), 6)) } if (is.null(pP) | is.null(pP_sig)) { pP <- matrix(data = 0, nrow = ((nlags * ncol(y)) + 1), ncol = ncol(y)) pP_sig <- matrix(data = 0, nrow = ((nlags * ncol(y)) + 1), ncol = ((nlags * ncol(y)) + 1)) } if (is.null(pR_sig)) { pR_sig <- array(data = 0, dim = c(((nlags * ncol(y)) + 1), ((nlags * ncol(y)) + 1), ncol(y))) } if (is.null(kappa1)) { kappa1 <- matrix(data = 0, nrow = 1, ncol = ncol(y)) } #check BH_SBVAR function arguments test <- arguments_check(y = y, nlags = nlags, pA = pA, pdetA = pdetA, pH = pH, pP = pP, pP_sig = pP_sig, pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, thin = thin, cri = cri) if (test != "pass") { stop(test) } ci <- (1.0 - ((1.0 - cri) / 2.0)) #create proposal scale matrix scale_ar <- diag(x = c(pA[, , 8])[which(!is.na(c(pA[, , 1])))], nrow = length(which(!is.na(c(pA[, , 1])))), ncol = length(which(!is.na(c(pA[, , 1]))))) #trim pA pA <- pA[, , 1:7] #check for variable names if (is.null(colnames(y))) { colnames(y) <- paste("y", 1:ncol(y), sep = "") } else { colnames(y) <- make.names(names = colnames(y), unique = TRUE) } rownames(y) <- NULL #get variable names varnames <- colnames(y) #get x and y data matrices list1 <- getXY(data1 = y, nlags = nlags) x1 <- list1$X y1 <- list1$Y #omega omega <- ((t(y1) %*% y1) - (t(y1) %*% x1) %*% solve(t(x1) %*% x1) %*% t(t(y1) %*% x1)) / nrow(y1) #somega ee <- matrix(data = NA_real_, nrow = nrow(y1), ncol = ncol(y1), dimnames = list(rownames(y1), varnames)) for (i in 1:ncol(y1)) { xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) yy <- matrix(data = y1[, i], ncol = 1) phi <- solve((t(xx) %*% xx), (t(xx) %*% yy)) ee[, i] <- yy - (xx %*% phi) } somega <- (t(ee) %*% ee) / nrow(ee) #optimization startvalues <- c(pA[, , 3])[which(!is.na(c(pA[, , 1])))] A_optim <- list(par = NULL, value = NULL, counts = NULL, convergence = 1, message = NULL, hessian = diag(x = 1, nrow = length(startvalues), ncol = length(startvalues))) A_optim[c("par", "value", "counts", "convergence", "message")] <- stats::optim(par = startvalues, fn = post_A_optim, pA = pA, pdetA = pdetA, pH = pH, pP = pP, pP_sig = pP_sig, pR_sig = pR_sig, kappa1 = kappa1, y1 = y1, x1 = x1, omega = omega, somega = somega, nlags = nlags, method = "Nelder-Mead", control = list(maxit = 2500))[c("par", "value", "counts", "convergence", "message")] if (A_optim$convergence != 0) { if ((all(!is.null(A_optim$par))) && (all(is.finite(A_optim$par)))) { A_optim[c("par", "value", "counts", "convergence", "message")] <- stats::optim(par = A_optim$par, fn = post_A_optim, pA = pA, pdetA = pdetA, pH = pH, pP = pP, pP_sig = pP_sig, pR_sig = pR_sig, kappa1 = kappa1, y1 = y1, x1 = x1, omega = omega, somega = somega, nlags = nlags, method = "Nelder-Mead", control = list(maxit = 500))[c("par", "value", "counts", "convergence", "message")] } else { stop("Optimization routine convergence was not successful.") } } if (A_optim$convergence != 0) { stop("Optimization routine convergence was not successful.") } A_optim$hessian <- stats::optimHess(par = A_optim$par, fn = post_A_optim, pA = pA, pdetA = pdetA, pH = pH, pP = pP, pP_sig = pP_sig, pR_sig = pR_sig, kappa1 = kappa1, y1 = y1, x1 = x1, omega = omega, somega = somega, nlags = nlags) #optimum values in A A_temp <- c(pA[, , 3]) A_temp[which(!is.na(c(pA[, , 1])))] <- A_optim$par A_start <- matrix(data = A_temp, nrow = dim(pA)[1], ncol = dim(pA)[2], dimnames = list(varnames, varnames)) #test that optimized starting values are consistent with sign restrictions H_max <- solve(A_start) for (i in 1:nrow(pA)) { for (j in 1:ncol(pA)) { if ((!is.na(pA[i, j, 1])) && ((pA[i, j, 1] == 0) | (pA[i, j, 1] == 2) | (pA[i, j, 1] == 3)) && (!is.na(pA[i, j, 2])) && (A_start[i, j] != 0) && (pA[i, j, 2] != (A_start[i, j] / abs(A_start[i, j])))) { warning("Optimization routine produced values for the elements in A that are not consistent with sign restrictions.", immediate. = TRUE) } if ((!is.na(pH[i, j, 1])) && ((pH[i, j, 1] == 0) | (pH[i, j, 1] == 2) | (pH[i, j, 1] == 3)) && (!is.na(pH[i, j, 2])) && (H_max[i, j] != 0) && (pH[i, j, 2] != (H_max[i, j] / abs(H_max[i, j])))) { warning("Optimization routine produced values for the elements in H that are not consistent with sign restrictions.", immediate. = TRUE) } } } if ((!is.na(pdetA[1, 1, 1])) && ((pdetA[1, 1, 1] == 0) | (pdetA[1, 1, 1] == 2) | (pdetA[1, 1, 1] == 3)) && (!is.na(pdetA[1, 1, 2])) && (pdetA[1, 1, 2] != (det(A_start) / abs(det(A_start))))) { warning("Optimization routine produced values for the determinant of A that are not consistent with sign restrictions.", immediate. = TRUE) } #scale H0 <- A_optim$hessian if (min(eigen(solve(H0))[[1]]) > 0) { PH <- t(chol(solve(H0))) } else { PH <- diag(x = 1, nrow = nrow(H0)) } scale1 <- PH * scale_ar #Metropolis-Hastings Algorithm results <- MAIN(y1 = y1, x1 = x1, omega = omega, somega = somega, nlags = nlags, pA = pA, pdetA = pdetA, pH = pH, pP = pP, pP_sig = pP_sig, pR_sig = pR_sig, kappa1 = kappa1, A_start = A_start, itr = itr, burn = burn, thin = thin, scale1 = scale1, ci = ci) dimnames(results$y) <- dimnames(y1) dimnames(results$x) <- dimnames(x1) dimnames(results$pA) <- list(varnames, paste0(varnames, "_Eq"), c("Dist", "Sign", "Posn", "Dist_Arg1", "Dist_Arg2", "Dist_Arg3", "LR")) dimnames(results$pdetA) <- list("detA", "detA", c("Dist", "Sign", "Posn", "Dist_Arg1", "Dist_Arg2", "Dist_Arg3")) dimnames(results$pH) <- list(varnames, paste0(varnames, "_Eq"), c("Dist", "Sign", "Posn", "Dist_Arg1", "Dist_Arg2", "Dist_Arg3")) dimnames(results$pP) <- list(colnames(x1), paste0(varnames, "_Eq")) dimnames(results$pP_sig) <- list(colnames(x1), colnames(x1)) dimnames(results$pR) <- list(colnames(x1), paste0(varnames, "_Eq"), paste0(varnames, "_Eq")) dimnames(results$pR_sig) <- list(colnames(x1), colnames(x1), paste0(varnames, "_Eq")) dimnames(results$tau1) <- list(varnames, varnames) dimnames(results$kappa1) <- list(varnames, varnames) dimnames(results$A_start) <- list(varnames, paste0(varnames, "_Eq")) dimnames(results$A) <- list(varnames, paste0(varnames, "_Eq"), paste0(c(((1 - ci) * 100), 50, (ci * 100)),"%")) dimnames(results$detA) <- list("detA", "detA", paste0(c(((1 - ci) * 100), 50, (ci * 100)),"%")) dimnames(results$H) <- list(varnames, paste0(varnames, "_Eq"), paste0(c(((1 - ci) * 100), 50, (ci * 100)),"%")) dimnames(results$B) <- list(colnames(x1), paste0(colnames(y1), "_Eq"), paste0(c(((1 - ci) * 100), 50, (ci * 100)),"%")) dimnames(results$Phi) <- list(colnames(x1), paste0(colnames(y1), "_Eq"), paste0(c(((1 - ci) * 100), 50, (ci * 100)),"%")) dimnames(results$D) <- list(varnames, paste0(varnames, "_Eq"), paste0(c(((1 - ci) * 100), 50, (ci * 100)),"%")) dimnames(results$A_den$hori) <- list(varnames, paste0(varnames, "_Eq"), NULL) dimnames(results$A_den$vert) <- list(varnames, paste0(varnames, "_Eq"), NULL) dimnames(results$detA_den$hori) <- list("detA", "detA", NULL) dimnames(results$detA_den$vert) <- list("detA", "detA", NULL) dimnames(results$H_den$hori) <- list(varnames, paste0(varnames, "_Eq"), NULL) dimnames(results$H_den$vert) <- list(varnames, paste0(varnames, "_Eq"), NULL) Line_Plots(raw_array = results$A_chain, priors_array = results$pA, prior_name = "pA") ACF_Plots(raw_array = results$A_chain, priors_array = results$pA, prior_name = "pA") return(results) }, error = function(e) {e} ) return(results) } # Density Plots #' @keywords internal den_plot <- function(list2, den1, elast, lb, ub, nticks0, A_titles, H_titles, xlab, ylab, k, j, i) { yticks <- signif(((max(den1[, 2]) - min(den1[, 2])) / nticks0), 2) graphics::plot(x = (elast * den1[, 1]), y = den1[, 2], type = "l", col = "black", yaxs = "i", xaxs = "r", yaxt = "n", xlab = xlab, ylab = ylab, xlim = c(lb, ub), ylim = c(0, (yticks * (nticks0 + 1)))) if (names(list2[k]) == "pA") { graphics::title(main = A_titles[i, j], col.main = "black") } else if (names(list2[k]) == "pH") { graphics::title(main = H_titles[i, j], col.main = "black") } else if (names(list2[k]) == "pdetA") { graphics::title(main = paste("Determinant of A"), col.main = "black") } graphics::axis(side = 2, at = seq(from = -yticks, to = (nticks0 * yticks), by = yticks), labels = seq(from = -yticks, to = (nticks0 * yticks), by = yticks)) graphics::polygon(x = (elast * den1[, 1]), y = den1[, 2], col = "blue") } #' Plot Posterior Distributions Against Priors #' #' Plot Posterior Distributions Against Priors. #' @author Paul Richardson #' @export #' @import Rcpp #' @name Dist_Plots #' @param results List containing the results from running BH_SBVAR(). #' @param A_titles \emph{(n x n)} matrix containing the titles for the plots of the estimated parameters in the coefficient matrix \emph{A}. \emph{n} is the number of endogenous variables. #' @param H_titles \emph{(n x n)} matrix containing the titles for the plots of the estimated parameters in the coefficient matrix \emph{H} (default = NULL). \emph{n} is the number of endogenous variables. #' @param xlab Character label for the horizontal axis of historical decomposition plots (default = NULL). Default produces plots without a label for the horizontal axis. #' @param ylab Character label for the vertical axis of historical decomposition plots (default = NULL). Default produces plots without a label for the vertical axis. #' @details Plots posterior distributions against prior distributions. #' @examples #' # Import data #' library(BHSBVAR) #' set.seed(123) #' data(USLMData) #' y0 <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2) #' y <- y0 - (matrix(data = 1, nrow = nrow(y0), ncol = ncol(y0)) %*% #' diag(x = colMeans(x = y0, na.rm = FALSE, dims = 1))) #' colnames(y) <- c("Wage", "Employment") #' #' # Set function arguments #' nlags <- 8 #' itr <- 5000 #' burn <- 0 #' thin <- 1 #' acc <- TRUE #' h <- 20 #' cri <- 0.95 #' #' # Priors for A #' pA <- array(data = NA, dim = c(2, 2, 8)) #' pA[, , 1] <- c(0, NA, 0, NA) #' pA[, , 2] <- c(1, NA, -1, NA) #' pA[, , 3] <- c(0.6, 1, -0.6, 1) #' pA[, , 4] <- c(0.6, NA, 0.6, NA) #' pA[, , 5] <- c(3, NA, 3, NA) #' pA[, , 6] <- c(NA, NA, NA, NA) #' pA[, , 7] <- c(NA, NA, 1, NA) #' pA[, , 8] <- c(2, NA, 2, NA) #' #' # Position priors for Phi #' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA)) #' pP[1:nrow(pA), 1:ncol(pA)] <- #' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA)) #' #' # Confidence in the priors for Phi #' x1 <- #' matrix(data = NA, nrow = (nrow(y) - nlags), #' ncol = (ncol(y) * nlags)) #' for (k in 1:nlags) { #' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <- #' y[(nlags - k + 1):(nrow(y) - k),] #' } #' x1 <- cbind(x1, 1) #' colnames(x1) <- #' c(paste(rep(colnames(y), nlags), #' "_L", #' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)), #' decreasing = FALSE), #' sep = ""), #' "cons") #' y1 <- y[(nlags + 1):nrow(y),] #' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1)) #' for (i in 1:ncol(y1)) { #' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) #' yy <- matrix(data = y1[, i], ncol = 1) #' phi <- solve(t(xx) %*% xx, t(xx) %*% yy) #' ee[, i] <- yy - (xx %*% phi) #' } #' somega <- (t(ee) %*% ee) / nrow(ee) #' lambda0 <- 0.2 #' lambda1 <- 1 #' lambda3 <- 100 #' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1) #' v1 <- v1^((-2) * lambda1) #' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1) #' v3 <- kronecker(v1, v2) #' v3 <- (lambda0^2) * rbind(v3, (lambda3^2)) #' v3 <- 1 / v3 #' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3)) #' diag(pP_sig) <- v3 #' #' # Confidence in long-run restriction priors #' pR_sig <- #' array(data = 0, #' dim = c(((nlags * ncol(y)) + 1), #' ((nlags * ncol(y)) + 1), #' ncol(y))) #' Ri <- #' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags), #' matrix(data = c(1, 0), nrow = 1)), #' 0) #' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1 #' #' # Confidence in priors for D #' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y)) #' #' # Set graphical parameters #' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", #' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) #' #' # Estimate the parameters of the model #' results1 <- #' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig, #' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, #' thin = thin, cri = cri) #' #' # Plot Posterior and Prior Densities #' A_titles <- #' matrix(data = NA_character_, nrow = dim(pA)[1], ncol = dim(pA)[2]) #' A_titles[1, 1] <- "Wage Elasticity of Labor Demand" #' A_titles[1, 2] <- "Wage Elasticity of Labor Supply" #' par(mfcol = c(1, 2)) #' dist_results <- #' Dist_Plots(results = results1, A_titles = A_titles) Dist_Plots <- function(results, A_titles, H_titles = NULL, xlab = NULL, ylab = NULL) { #test arguments test <- plot_funs_args_check(results = results, xlab = xlab, ylab = ylab) if (test != "pass") { stop(test) } if (is.null(xlab)) { xlab <- "" } if (is.null(ylab)) { ylab <- "" } pA <- results$pA pdetA <- results$pdetA pH <- results$pH A_den <- results$A_den detA_den <- results$detA_den H_den <- results$H_den if (!is.matrix(A_titles) || ((nrow(A_titles) != dim(pA)[1]) | (ncol(A_titles) != dim(pA)[2]))) { stop(paste("A_titles: Must be a matrix with row and column length each equal to the number of endogenous variables.", sep = "")) } if (is.null(H_titles)) { H_titles <- matrix(data = NA_character_, nrow = dim(pA)[1], ncol = dim(pA)[2]) } if (!is.matrix(H_titles) || ((nrow(H_titles) != dim(pH)[1]) | (ncol(H_titles) != dim(pH)[2]))) { stop(paste("H_titles: Must be a matrix with row and column length each equal to the number of endogenous variables.", sep = "")) } for (i in 1:dim(pA)[1]) { for (j in 1:dim(pA)[2]) { if ((is.na(pA[i, j, 1])) && (!is.na(A_titles[i, j]))) { stop(paste("A_titles: A_titles[", i, ", ", j, "] should be empty since pA[", i, ", ", j, ", ", 1, "] is empty.", sep = "")) } if ((!is.na(pA[i, j, 1])) && (is.na(A_titles[i, j]))) { stop(paste("A_titles: A_titles[", i, ", ", j, "] is missing.", sep = "")) } if ((is.na(pH[i, j, 1])) && (!is.na(H_titles[i, j]))) { stop(paste("H_titles: H_titles[", i, ", ", j, "] should be empty since pH[", i, ", ", j, ", ", 1, "] is empty.", sep = "")) } if ((!is.na(pH[i, j, 1])) && (is.na(H_titles[i, j]))) { stop(paste("H_titles: H_titles[", i, ", ", j, "] is missing.", sep = "")) } } } nticks0 <- 3 list1 <- list(A_den = A_den, H_den = H_den) list2 <- list(pA = pA, pH = pH) hori <- matrix(data = NA_real_, nrow = dim(A_den$hori)[3], ncol = 1) vert <- matrix(data = NA_real_, nrow = dim(A_den$vert)[3], ncol = 1) den1 <- matrix(data = NA_real_, nrow = dim(A_den$vert)[3], ncol = 2) prior_den <- matrix(data = NA_real_, nrow = 501, ncol = 2) max_distance <- matrix(data = 0, nrow = dim(pA)[1], ncol = 1) crr_lb <- 0.05 for (k in 1:length(list1)) { if (names(list2[k]) == "pA") { elast <- -1 } else { elast <- 1 } hori[, ] <- NA_real_ vert[, ] <- NA_real_ den1[, ] <- NA_real_ prior_den[, ] <- NA_real_ max_distance[, ] <- 0 distance <- 0 for (i in 1:(dim(list2[[k]])[1])) { #equations are by column for (j in 1:(dim(list2[[k]])[2])) { hori[, ] <- list1[[k]]$hori[i, j, ] vert[, ] <- list1[[k]]$vert[i, j, ] if (any(!is.na(hori))) { distance <- ceiling(max(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1], na.rm = TRUE) - min(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1], na.rm = TRUE)) + 1 } else { distance <- 0 } if (distance > max_distance[i, 1]) { max_distance[i, 1] <- distance } } } for (i in 1:(dim(list2[[k]])[1])) { #equations are by column for (j in 1:(dim(list2[[k]])[2])) { hori[, ] <- list1[[k]]$hori[i, j, ] vert[, ] <- list1[[k]]$vert[i, j, ] if (!is.na(list2[[k]][i, j, 1])) { if (is.na(list2[[k]][i, j, 2])) { ub <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) + (max_distance[i, 1] * 0.5) lb <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) - (max_distance[i, 1] * 0.5) } else if (list2[[k]][i, j, 2] == 1) { if ((round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0) - (max_distance[i, 1] * 0.5)) < 1) { if (names(list2[k]) == "pA") { ub <- 0 lb <- (-1) * max_distance[i, 1] } else { ub <- max_distance[i, 1] lb <- 0 } } else { ub <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) + (max_distance[i, 1] * 0.5) lb <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) - (max_distance[i, 1] * 0.5) } } else if (list2[[k]][i,j,2] == (-1)) { if ((round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0) + (max_distance[i, 1] * 0.5)) > (-1)) { if (names(list2[k]) == "pA") { ub <- max_distance[i, 1] lb <- 0 } else { ub <- 0 lb <- (-1) * max_distance[i, 1] } } else { ub <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) + (max_distance[i, 1] * 0.5) lb <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) - (max_distance[i, 1] * 0.5) } } den1[, ] <- cbind(hori[, 1], vert[, 1]) den_plot(list2 = list2, den1 = den1, elast = elast, lb = lb, ub = ub, nticks0 = nticks0, A_titles = A_titles, H_titles = H_titles, xlab = xlab, ylab = ylab, k = k, j = j, i = i) prior_den[, 1] <- seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)) if (list2[[k]][i, j, 1] == 0) { if (is.na(list2[[k]][i, j, 2])) { for (h in 1:nrow(prior_den)) { prior_den[h, 2] <- prior_t(prior_den[h, 1], list2[[k]][i, j, 3], list2[[k]][i, j, 4], list2[[k]][i, j, 5]) } } else if (list2[[k]][i, j, 2] == 1) { for (h in 1:nrow(prior_den)) { prior_den[h, 2] <- prior_t_p(prior_den[h, 1], list2[[k]][i, j, 3], list2[[k]][i, j, 4], list2[[k]][i, j, 5]) } } else if (list2[[k]][i, j, 2] == (-1)) { for (h in 1:nrow(prior_den)) { prior_den[h, 2] <- prior_t_n(prior_den[h, 1], list2[[k]][i, j, 3], list2[[k]][i, j, 4], list2[[k]][i, j, 5]) } } } else if (list2[[k]][i, j, 1] == 1) { for (h in 1:nrow(prior_den)) { prior_den[h, 2] <- prior_nonc_t(prior_den[h, 1], list2[[k]][i, j, 3], list2[[k]][i, j, 4], list2[[k]][i, j, 5], list2[[k]][i, j, 6]) } } else if (list2[[k]][i, j, 1] == 2) { for (h in 1:nrow(prior_den)) { if (((list2[[k]][i, j, 2] == 1) & (prior_den[h, 1] >= 1)) | ((list2[[k]][i, j, 2] == -1) & (prior_den[h, 1] <= -1))) { prior_den[h, 2] <- prior_ibeta(abs(prior_den[h, 1]), list2[[k]][i, j, 4], list2[[k]][i, j, 5]) } else { prior_den[h, 2] <- 0 } } } else if (list2[[k]][i, j, 1] == 3) { for (h in 1:nrow(prior_den)) { if (((list2[[k]][i, j, 2] == 1) & (prior_den[h, 1] >= 0) & (prior_den[h, 1] <= 1)) | ((list2[[k]][i, j, 2] == -1) & (prior_den[h, 1] <= 0) & (prior_den[h, 1] >= -1))) { prior_den[h, 2] <- prior_beta(abs(prior_den[h, 1]), list2[[k]][i, j, 4], list2[[k]][i, j, 5]) } else { prior_den[h, 2] <- 0 } } } graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red") } } } } list2 <- list(pdetA = pdetA) hori[, ] <- NA_real_ vert[, ] <- NA_real_ den1[, ] <- NA_real_ prior_den[, ] <- NA_real_ max_distance[, ] <- 0 elast <- 1 if (!is.na(pdetA[1, 1, 1])) { hori[, ] <- detA_den$hori[1, 1, ] vert[, ] <- detA_den$vert[1, 1, ] max_distance[1 ,1] <- ceiling(max(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1], na.rm = TRUE) - min(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1], na.rm = TRUE)) + 1 if (is.na(list2[[1]][1, 1, 2])) { ub <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) + (max_distance[1 ,1] * 0.5) lb <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) - (max_distance[1 ,1] * 0.5) } else if (list2[[1]][1, 1, 2] == 1) { if ((round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0) - (max_distance[1 ,1] * 0.5)) < 0) { ub <- max_distance[1 ,1] lb <- 0 } else { ub <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) + (max_distance[1 ,1] * 0.5) lb <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) - (max_distance[1 ,1] * 0.5) } } else if (list2[[1]][1, 1, 2] == (-1)) { if ((round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0) + (max_distance[1 ,1] * 0.5)) > 0) { ub <- 0 lb <- (-1) * max_distance[1 ,1] } else { ub <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) + (max_distance[1 ,1] * 0.5) lb <- (elast * round(x = mean(hori[which(vert > (max(vert, na.rm = TRUE) * crr_lb)), 1]), digits = 0)) - (max_distance[1 ,1] * 0.5) } } den1[, ] <- cbind(hori[, 1], vert[, 1]) den_plot(list2 = list2, den1 = den1, elast = elast, lb = lb, ub = ub, nticks0 = nticks0, A_titles = A_titles, H_titles = H_titles, xlab = xlab, ylab = ylab, k = 1, j = 1, i = 1) prior_den[, 1] <- seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)) if (pdetA[1, 1, 1] == 0) { if (is.na(pdetA[1, 1, 2])) { for (h in 1:nrow(prior_den)) { prior_den[h, 2] <- prior_t(prior_den[h, 1], list2[[1]][1, 1, 3], list2[[1]][1, 1, 4], list2[[1]][1, 1, 5]) } } else if (pdetA[1, 1, 2] == 1) { for (h in 1:nrow(prior_den)) { prior_den[h, 2] <- prior_t_p(prior_den[h, 1], list2[[1]][1, 1, 3], list2[[1]][1, 1, 4], list2[[1]][1, 1, 5]) } } else if (pdetA[1, 1, 2] == (-1)) { for (h in 1:nrow(prior_den)) { prior_den[h, 2] <- prior_t_n(prior_den[h, 1], list2[[1]][1, 1, 3], list2[[1]][1, 1, 4], list2[[1]][1, 1, 5]) } } } else if (pdetA[1, 1, 1] == 1) { for (h in 1:nrow(prior_den)) { prior_den[h, 2] <- prior_nonc_t(prior_den[h, 1], list2[[1]][1, 1, 3], list2[[1]][1, 1, 4], list2[[1]][1, 1, 5], list2[[1]][1, 1, 6]) } } else if (pdetA[1, 1, 1] == 2) { for (h in 1:nrow(prior_den)) { if (((list2[[1]][1, 1, 2] == 1) & (prior_den[h, 1] >= 1)) | ((list2[[1]][1, 1, 2] == -1) & (prior_den[h, 1] <= -1))) { prior_den[h, 2] <- prior_ibeta(abs(prior_den[h, 1]), list2[[1]][1, 1, 4], list2[[1]][1, 1, 5]) } else { prior_den[h, 2] <- 0 } } } else if (pdetA[1, 1, 1] == 3) { for (h in 1:nrow(prior_den)) { if (((list2[[1]][1, 1, 2] == 1) & (prior_den[h, 1] >= 0) & (prior_den[h, 1] <= 1)) | ((list2[[1]][1, 1, 2] == -1) & (prior_den[h, 1] <= 0) & (prior_den[h, 1] >= -1))) { prior_den[h, 2] <- prior_beta(abs(prior_den[h, 1]), list2[[1]][1, 1, 4], list2[[1]][1, 1, 5]) } else { prior_den[h, 2] <- 0 } } } graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red") } } # Check results from BH_SBVAR. #' @keywords internal BH_SBVAR_results_check <- function(results) { if (!is.list(results)) { return("results: Must be a list.") } if (!is.matrix(results$y)) { return("results: Must be a list containing a matrix 'y'.") } if (!is.array(results$A_chain) | !is.array(results$B_chain) | !is.array(results$D_chain)) { return("results: Must be a list containing an array 'A_chain', 'B_chain', and 'D_chain'.") } if ((length(dim(results$A_chain)) != 3) | (length(dim(results$B_chain)) != 3) | (length(dim(results$D_chain)) != 3)) { return("results: 'A_chain', 'B_chain', and 'D_chain' must have 3 dimensions.") } if ((dim(results$A_chain)[3] != dim(results$B_chain)[3]) | (dim(results$A_chain)[3] != dim(results$D_chain)[3])) { return("results: The number of slices of the third dimension of 'A_chain' must equal the number of slices of the third dimension of 'B_chain' (or 'D_chain').") } if ((ncol(results$y) != ncol(results$A_chain)) | (ncol(results$y) != nrow(results$A_chain))) { return("results: The number of columns and the number of rows of 'A_chain' must be equal to the number of columns in 'y'.") } if ((ncol(results$y) != ncol(results$D_chain)) | (ncol(results$y) != nrow(results$D_chain))) { return("results: The number of columns (or rows) of 'D_chain' must be equal to the number of columns in 'y'.") } if ((ncol(results$y) != ncol(results$D_chain)) | (ncol(results$y) != nrow(results$D_chain))) { return("results: The number of columns (or rows) of 'D_chain' must be equal to the number of columns in 'y'.") } if (all(!is.numeric(results$nlags)) || (length(results$nlags) > 1)) { return("results: 'nlags' should be a single number.") } if ((results$nlags != round(x = results$nlags, digits = 0)) | (results$nlags < 2)) { return("results: 'nlags' should be a positive integer greater than 2.") } if ((ncol(results$y) != ncol(results$B_chain)) || (((results$nlags * ncol(results$y)) + 1) != nrow(results$B_chain))) { return(paste0("results: 'B_chain' should have ", ((results$nlags * ncol(results$y)) + 1), " ((results$nlags * ncol(results$y)) + 1) rows and ", ncol(results$y), " (ncol(results$y)) columns.")) } return("pass") } # Check arguments from the IRF_Plots, HD_Plots, Dist_Plots functions. #' @keywords internal plot_funs_args_check <- function(results, xlab, ylab) { if (is.array(results) && (length(dim(results)) == 3)) { if (length(results) == 0) { return(paste("results: Must be an array obtained from running IRF(), HD(), or FEVD().", sep = "")) } if ((any(!is.finite(results))) || (dim(results)[1] < 4) || (dim(results)[3] != 3) || ((dim(results)[2] / sqrt(dim(results)[2])) != (sqrt(dim(results)[2])))) { return(paste("results: Results from IRF(), or FEVD() are not present.", sep = "")) } } else if (is.list(results)) { if (any(names(results) == "HD")) { if (!is.array(results$HD) || (length(results$HD) == 0)) { return(paste("results: Must be an array obtained from running HD().", sep = "")) } if ((any(!is.finite(results$HD))) || (dim(results$HD)[1] < 4) || (dim(results$HD)[3] != 3) || ((dim(results$HD)[2] / sqrt(dim(results$HD)[2])) != (sqrt(dim(results$HD)[2])))) { return(paste("results: Results from HD() are not present.", sep = "")) } } else { if ((is.null(results$A)) || (!is.array(results$A)) || (any(!is.finite(results$A))) || (dim(results$A)[1] != dim(results$A)[2]) || (dim(results$A)[3] != 3) || (dim(results$A)[2] != ncol(results$y)) || (dim(results$A)[2] < 2)) { return(paste("results: A from BH_SBVAR() is not present.", sep = "")) } } } else { return(paste0("results: Results must be an array or a list containing an array")) } if ((!is.null(xlab)) && ((!is.character(xlab)) || (length(xlab) != 1))) { return(paste("xlab: Must be a character vector containing the label for the horizontal axis.", sep = "")) } if ((!is.null(ylab)) && ((!is.character(ylab)) || (length(ylab) != 1))) { return(paste("ylab: Must be a character vector containing the label for the vertical axis.", sep = "")) } return("pass") }
/scratch/gouwar.j/cran-all/cranData/BHSBVAR/R/BHSBVAR.R
#' Forecast Error Variance Decompositions #' #' Forecast Error Variance Decompositions #' @author Paul Richardson #' @export #' @import Rcpp #' @name FEVD #' @param results List containing the results from running BH_SBVAR(). #' @param h Integer specifying the time horizon for computing impulse responses (default = 12). #' @param acc Boolean indicating whether accumulated impulse responses are to be returned (default = TRUE). #' @param cri credibility intervals for the estimates to be returned (default = 0.95). A value of 0.95 will return 95\% credibility intervals. A value of 0.90 will return 90\% credibility intervals. #' @details Computes forecast error variance decomposition estimates. #' @return An array containing forecast error variance decomposition estimates. #' @examples #' # Import data #' library(BHSBVAR) #' set.seed(123) #' data(USLMData) #' y0 <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2) #' y <- y0 - (matrix(data = 1, nrow = nrow(y0), ncol = ncol(y0)) %*% #' diag(x = colMeans(x = y0, na.rm = FALSE, dims = 1))) #' colnames(y) <- c("Wage", "Employment") #' #' # Set function arguments #' nlags <- 8 #' itr <- 5000 #' burn <- 0 #' thin <- 1 #' acc <- TRUE #' h <- 20 #' cri <- 0.95 #' #' # Priors for A #' pA <- array(data = NA, dim = c(2, 2, 8)) #' pA[, , 1] <- c(0, NA, 0, NA) #' pA[, , 2] <- c(1, NA, -1, NA) #' pA[, , 3] <- c(0.6, 1, -0.6, 1) #' pA[, , 4] <- c(0.6, NA, 0.6, NA) #' pA[, , 5] <- c(3, NA, 3, NA) #' pA[, , 6] <- c(NA, NA, NA, NA) #' pA[, , 7] <- c(NA, NA, 1, NA) #' pA[, , 8] <- c(2, NA, 2, NA) #' #' # Position priors for Phi #' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA)) #' pP[1:nrow(pA), 1:ncol(pA)] <- #' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA)) #' #' # Confidence in the priors for Phi #' x1 <- #' matrix(data = NA, nrow = (nrow(y) - nlags), #' ncol = (ncol(y) * nlags)) #' for (k in 1:nlags) { #' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <- #' y[(nlags - k + 1):(nrow(y) - k),] #' } #' x1 <- cbind(x1, 1) #' colnames(x1) <- #' c(paste(rep(colnames(y), nlags), #' "_L", #' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)), #' decreasing = FALSE), #' sep = ""), #' "cons") #' y1 <- y[(nlags + 1):nrow(y),] #' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1)) #' for (i in 1:ncol(y1)) { #' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) #' yy <- matrix(data = y1[, i], ncol = 1) #' phi <- solve(t(xx) %*% xx, t(xx) %*% yy) #' ee[, i] <- yy - (xx %*% phi) #' } #' somega <- (t(ee) %*% ee) / nrow(ee) #' lambda0 <- 0.2 #' lambda1 <- 1 #' lambda3 <- 100 #' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1) #' v1 <- v1^((-2) * lambda1) #' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1) #' v3 <- kronecker(v1, v2) #' v3 <- (lambda0^2) * rbind(v3, (lambda3^2)) #' v3 <- 1 / v3 #' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3)) #' diag(pP_sig) <- v3 #' #' # Confidence in long-run restriction priors #' pR_sig <- #' array(data = 0, #' dim = c(((nlags * ncol(y)) + 1), #' ((nlags * ncol(y)) + 1), #' ncol(y))) #' Ri <- #' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags), #' matrix(data = c(1, 0), nrow = 1)), #' 0) #' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1 #' #' # Confidence in priors for D #' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y)) #' #' # Set graphical parameters #' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", #' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) #' #' # Estimate the parameters of the model #' results1 <- #' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig, #' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, #' thin = thin, cri = cri) #' #' fevd <- FEVD(results = results1, h = h, acc = acc, cri = cri) #' #' # Plot impulse responses #' varnames <- colnames(USLMData)[2:3] #' shocknames <- c("Labor Demand","Labor Supply") #' fevd_results <- #' FEVD_Plots(results = fevd, varnames = varnames, #' shocknames = shocknames) FEVD <- function(results, h = 12, acc = TRUE, cri = 0.95) { test <- BH_SBVAR_results_check(results = results) if (test != "pass") { stop(test) } if ((all(!is.numeric(h))) || (length(h) > 1)) { stop("h: Should be a single number.") } if ((h != round(x = h, digits = 0)) | (h < 3)) { stop("h: Should be a positive integer greater than 3.") } if ((all(!is.numeric(cri))) || (length(cri) > 1)) { "cri: Should be a single number." } if ((cri > 1) | (cri < 0.5)) { stop("cri: Should be a positive value between 1 and 0.5.") } if ((!is.logical(acc)) || (is.na(acc))) { stop(paste("acc_irf: Must be logical 'TRUE' or 'FALSE'.", sep = "")) } varnames <- colnames(results$y) ci <- (1.0 - ((1.0 - cri) / 2.0)) nvar <- dim(results$A_chain[, , ])[2] nlags <- results$nlags nsli <- dim(results$A_chain[, , ])[3] fevd <- fevd_estimates(A_chain = results$A_chain[, , ], B_chain = results$B_chain[, , ], D_chain = results$D_chain[, , ], nlags = nlags, h = h, acc = acc, ci = ci) dimnames(fevd) <- list(NULL, paste0("Res_", varnames, paste0("_Shk_", varnames[c(sort(x = rep(x = c(1:nvar), times = nvar)))])), paste0(c(((1 - ci) * 100), 50, (ci * 100)),"%")) return(fevd) } #' Plot Forecast Error Variance Decompositions #' #' Plot Forecast Error Variance Decompositions #' @author Paul Richardson #' @export #' @name FEVD_Plots #' @param results List containing the results from running BH_SBVAR(). #' @param varnames Character vector containing the names of the endogenous variables. #' @param shocknames Character vector containing the names of the shocks. #' @param xlab Character label for the horizontal axis of impulse response plots (default = NULL). Default produces plots without a label for the horizontal axis. #' @param ylab Character label for the vertical axis of impulse response plots (default = NULL). Default produces plots without a label for the vertical axis. #' @param rel Boolean indicating whether to display forecast error variance explained by the shock as a percent of total forecast error variance (default = TRUE). #' @details Plots forecast error variance decompositions and returns a list containing the actual processed data used to create the plots. #' @return A list containing forecast error variance decompositions. #' @examples #' # Import data #' library(BHSBVAR) #' set.seed(123) #' data(USLMData) #' y0 <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2) #' y <- y0 - (matrix(data = 1, nrow = nrow(y0), ncol = ncol(y0)) %*% #' diag(x = colMeans(x = y0, na.rm = FALSE, dims = 1))) #' colnames(y) <- c("Wage", "Employment") #' #' # Set function arguments #' nlags <- 8 #' itr <- 5000 #' burn <- 0 #' thin <- 1 #' acc <- TRUE #' h <- 20 #' cri <- 0.95 #' #' # Priors for A #' pA <- array(data = NA, dim = c(2, 2, 8)) #' pA[, , 1] <- c(0, NA, 0, NA) #' pA[, , 2] <- c(1, NA, -1, NA) #' pA[, , 3] <- c(0.6, 1, -0.6, 1) #' pA[, , 4] <- c(0.6, NA, 0.6, NA) #' pA[, , 5] <- c(3, NA, 3, NA) #' pA[, , 6] <- c(NA, NA, NA, NA) #' pA[, , 7] <- c(NA, NA, 1, NA) #' pA[, , 8] <- c(2, NA, 2, NA) #' #' # Position priors for Phi #' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA)) #' pP[1:nrow(pA), 1:ncol(pA)] <- #' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA)) #' #' # Confidence in the priors for Phi #' x1 <- #' matrix(data = NA, nrow = (nrow(y) - nlags), #' ncol = (ncol(y) * nlags)) #' for (k in 1:nlags) { #' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <- #' y[(nlags - k + 1):(nrow(y) - k),] #' } #' x1 <- cbind(x1, 1) #' colnames(x1) <- #' c(paste(rep(colnames(y), nlags), #' "_L", #' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)), #' decreasing = FALSE), #' sep = ""), #' "cons") #' y1 <- y[(nlags + 1):nrow(y),] #' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1)) #' for (i in 1:ncol(y1)) { #' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) #' yy <- matrix(data = y1[, i], ncol = 1) #' phi <- solve(t(xx) %*% xx, t(xx) %*% yy) #' ee[, i] <- yy - (xx %*% phi) #' } #' somega <- (t(ee) %*% ee) / nrow(ee) #' lambda0 <- 0.2 #' lambda1 <- 1 #' lambda3 <- 100 #' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1) #' v1 <- v1^((-2) * lambda1) #' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1) #' v3 <- kronecker(v1, v2) #' v3 <- (lambda0^2) * rbind(v3, (lambda3^2)) #' v3 <- 1 / v3 #' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3)) #' diag(pP_sig) <- v3 #' #' # Confidence in long-run restriction priors #' pR_sig <- #' array(data = 0, #' dim = c(((nlags * ncol(y)) + 1), #' ((nlags * ncol(y)) + 1), #' ncol(y))) #' Ri <- #' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags), #' matrix(data = c(1, 0), nrow = 1)), #' 0) #' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1 #' #' # Confidence in priors for D #' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y)) #' #' # Set graphical parameters #' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", #' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) #' #' # Estimate the parameters of the model #' results1 <- #' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig, #' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, #' thin = thin, cri = cri) #' #' fevd <- FEVD(results = results1, h = h, acc = acc, cri = cri) #' #' # Plot impulse responses #' varnames <- colnames(USLMData)[2:3] #' shocknames <- c("Labor Demand","Labor Supply") #' fevd_results <- #' FEVD_Plots(results = fevd, varnames = varnames, #' shocknames = shocknames) FEVD_Plots <- function(results, varnames, shocknames = NULL, xlab = NULL, ylab = NULL, rel = TRUE) { #test arguments test <- plot_funs_args_check(results = results, xlab = xlab, ylab = ylab) if (test != "pass") { stop(test) } if (!is.vector(varnames) || (!is.character(varnames)) || (length(varnames) != sqrt(dim(results)[2]))) { stop(paste("varnames: Must be a character vector containing the names of the endogenous variables.", sep = "")) } if (is.null(shocknames)) { shocknames <- varnames } if (!is.vector(shocknames) || (!is.character(shocknames)) || (length(shocknames) != sqrt(dim(results)[2]))) { stop(paste("shocknames: Must be a character vector containing the names of the shocks.", sep = "")) } if (is.null(xlab)) { xlab <- "" } if (is.null(ylab)) { ylab <- "" } if (!isTRUE(rel) & !isFALSE(rel)) { stop("rel: Must be TRUE or FALSE.") } fevd <- results nvar <- round(sqrt(dim(results)[2])) xticks <- floor(dim(fevd)[1] / 4) fevdcolnames <- dimnames(fevd)[[2]] colors1 <- c("black", "red", "black", "red") lty1 <- c(1, 2, 1, 2) if (rel) { colors1[] <- rep(x = "black", times = length(colors1)) lty1[] <- rep(x = 1, times = length(lty1)) ind1 <- rep(x = 0, times = sqrt(dim(fevd)[2])) for (i in 1:sqrt(dim(fevd)[2])) { ind1[] <- seq(from = i, to = dim(fevd)[2], by = sqrt(dim(fevd)[2])) fevd[, ind1, "50%"] <- fevd[, ind1, "50%"] * matrix(data = rep(x = (100 / rowSums(fevd[, ind1, "50%"])), times = nvar), ncol = nvar, dimnames = list(NULL, fevdcolnames[ind1])) } fevd[, , c(1, 3)] <- fevd[, , "50%"] } dimnames(fevd)[[3]] <- rep(x = "50%", times = 3) #store results from fevd responses fevd_results <- vector(mode = "list", length = (nvar * nvar)) for (j in 1:nvar) { for (i in 1:nvar) { #fevd responses names(fevd_results)[((nvar * (j - 1)) + i)] <- fevdcolnames[((nvar * (j - 1)) + i)] fevd_results[[((nvar * (j - 1)) + i)]] <- fevd[, ((nvar * (j - 1)) + i), ] #fevd response plots mat_ts <- stats::ts(cbind(0, fevd_results[[((nvar * (j - 1)) + i)]])) colnames(mat_ts) <- c("Series1", "Series2", "Series3", "Series4") stats::ts.plot(mat_ts, col = colors1, gpars = list(xlab = xlab, ylab = ylab, xaxs = "i", yaxs = "r", xaxt = "n", lty = lty1)) graphics::title(main = paste("Response of ", varnames[i], " to ", shocknames[j], sep = ""), col.main = "black") graphics::axis(side = 1, at = seq(from = 1, to = nrow(mat_ts), by = xticks), labels = seq(from = 0, to = (nrow(mat_ts) - 1),by = xticks)) } } return(fevd_results) }
/scratch/gouwar.j/cran-all/cranData/BHSBVAR/R/FEVD.R
#' Historical Decompositions #' #' Historical Decompositions #' @author Paul Richardson #' @export #' @import Rcpp #' @name HD #' @param results List containing the results from running BH_SBVAR(). #' @param cri credibility intervals for the estimates to be returned (default = 0.95). A value of 0.95 will return 95\% credibility intervals. A value of 0.90 will return 90\% credibility intervals. #' @details Computes historical decomposition estimates. #' @return An array containing historical decomposition estimates. #' @examples #' # Import data #' library(BHSBVAR) #' set.seed(123) #' data(USLMData) #' y0 <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2) #' y <- y0 - (matrix(data = 1, nrow = nrow(y0), ncol = ncol(y0)) %*% #' diag(x = colMeans(x = y0, na.rm = FALSE, dims = 1))) #' colnames(y) <- c("Wage", "Employment") #' #' # Set function arguments #' nlags <- 8 #' itr <- 5000 #' burn <- 0 #' thin <- 1 #' acc <- TRUE #' h <- 20 #' cri <- 0.95 #' #' # Priors for A #' pA <- array(data = NA, dim = c(2, 2, 8)) #' pA[, , 1] <- c(0, NA, 0, NA) #' pA[, , 2] <- c(1, NA, -1, NA) #' pA[, , 3] <- c(0.6, 1, -0.6, 1) #' pA[, , 4] <- c(0.6, NA, 0.6, NA) #' pA[, , 5] <- c(3, NA, 3, NA) #' pA[, , 6] <- c(NA, NA, NA, NA) #' pA[, , 7] <- c(NA, NA, 1, NA) #' pA[, , 8] <- c(2, NA, 2, NA) #' #' # Position priors for Phi #' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA)) #' pP[1:nrow(pA), 1:ncol(pA)] <- #' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA)) #' #' # Confidence in the priors for Phi #' x1 <- #' matrix(data = NA, nrow = (nrow(y) - nlags), #' ncol = (ncol(y) * nlags)) #' for (k in 1:nlags) { #' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <- #' y[(nlags - k + 1):(nrow(y) - k),] #' } #' x1 <- cbind(x1, 1) #' colnames(x1) <- #' c(paste(rep(colnames(y), nlags), #' "_L", #' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)), #' decreasing = FALSE), #' sep = ""), #' "cons") #' y1 <- y[(nlags + 1):nrow(y),] #' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1)) #' for (i in 1:ncol(y1)) { #' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) #' yy <- matrix(data = y1[, i], ncol = 1) #' phi <- solve(t(xx) %*% xx, t(xx) %*% yy) #' ee[, i] <- yy - (xx %*% phi) #' } #' somega <- (t(ee) %*% ee) / nrow(ee) #' lambda0 <- 0.2 #' lambda1 <- 1 #' lambda3 <- 100 #' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1) #' v1 <- v1^((-2) * lambda1) #' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1) #' v3 <- kronecker(v1, v2) #' v3 <- (lambda0^2) * rbind(v3, (lambda3^2)) #' v3 <- 1 / v3 #' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3)) #' diag(pP_sig) <- v3 #' #' # Confidence in long-run restriction priors #' pR_sig <- #' array(data = 0, #' dim = c(((nlags * ncol(y)) + 1), #' ((nlags * ncol(y)) + 1), #' ncol(y))) #' Ri <- #' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags), #' matrix(data = c(1, 0), nrow = 1)), #' 0) #' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1 #' #' # Confidence in priors for D #' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y)) #' #' # Set graphical parameters #' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", #' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) #' #' # Estimate the parameters of the model #' results1 <- #' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig, #' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, #' thin = thin, cri = cri) #' #' hd <- HD(results = results1, cri = cri) #' #' # Plot historical decompositions #' varnames <- colnames(USLMData)[2:3] #' shocknames <- c("Labor Demand","Labor Supply") #' freq <- 4 #' start_date <- #' c(floor(USLMData[(nlags + 1), 1]), #' round(((USLMData[(nlags + 1), 1] %% 1) * freq), digits = 0)) #' hd_results <- #' HD_Plots(results = hd, varnames = varnames, #' shocknames = shocknames, #' freq = freq, start_date = start_date) HD <- function(results, cri = 0.95) { test <- BH_SBVAR_results_check(results = results) if (test != "pass") { stop(test) } if ((all(!is.numeric(cri))) || (length(cri) > 1)) { "cri: Should be a single number." } if ((cri > 1) | (cri < 0.5)) { stop("cri: Should be a positive value between 1 and 0.5.") } varnames <- colnames(results$y) ci <- (1.0 - ((1.0 - cri) / 2.0)) nvar <- dim(results$A_chain[, , ])[2] nlags <- results$nlags nsli <- dim(results$A_chain[, , ])[3] hd <- list("y" = NULL, "HD" = NULL) hd$HD <- hd_estimates(A_chain = results$A_chain, B_chain = results$B_chain, y1 = results$y, x1 = results$x, nlags = nlags, ci = ci) dimnames(hd$HD) <- list(NULL, paste0("Res_", varnames, paste0("_Shk_", varnames[c(sort(x = rep(x = c(1:nvar), times = nvar)))])), paste0(c(((1 - ci) * 100), 50, (ci * 100)),"%")) hd$y <- results$y return(hd) } #' Plot Historical Decompositions #' #' Plot Historical Decompositions. #' @author Paul Richardson #' @export #' @name HD_Plots #' @param results List containing the results from running BH_SBVAR(). #' @param varnames Character vector containing the names of the endogenous variables. #' @param shocknames Character vector containing the names of the shocks. #' @param xlab Character label for the horizontal axis of historical decomposition plots (default = NULL). Default produces plots without a label for the horizontal axis. #' @param ylab Character label for the vertical axis of historical decomposition plots (default = NULL). Default produces plots without a label for the vertical axis. #' @param freq Numeric value indicating the frequency of the data. #' @param start_date Numeric vector indicating the date of the first observation of the endogenous variables included in the model. #' @details Plots historical decompositions and returns a list containing the actual processed data used to create the plots. #' @return A list containing historical decompositions. #' @examples #' # Import data #' library(BHSBVAR) #' set.seed(123) #' data(USLMData) #' y0 <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2) #' y <- y0 - (matrix(data = 1, nrow = nrow(y0), ncol = ncol(y0)) %*% #' diag(x = colMeans(x = y0, na.rm = FALSE, dims = 1))) #' colnames(y) <- c("Wage", "Employment") #' #' # Set function arguments #' nlags <- 8 #' itr <- 5000 #' burn <- 0 #' thin <- 1 #' acc <- TRUE #' h <- 20 #' cri <- 0.95 #' #' # Priors for A #' pA <- array(data = NA, dim = c(2, 2, 8)) #' pA[, , 1] <- c(0, NA, 0, NA) #' pA[, , 2] <- c(1, NA, -1, NA) #' pA[, , 3] <- c(0.6, 1, -0.6, 1) #' pA[, , 4] <- c(0.6, NA, 0.6, NA) #' pA[, , 5] <- c(3, NA, 3, NA) #' pA[, , 6] <- c(NA, NA, NA, NA) #' pA[, , 7] <- c(NA, NA, 1, NA) #' pA[, , 8] <- c(2, NA, 2, NA) #' #' # Position priors for Phi #' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA)) #' pP[1:nrow(pA), 1:ncol(pA)] <- #' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA)) #' #' # Confidence in the priors for Phi #' x1 <- #' matrix(data = NA, nrow = (nrow(y) - nlags), #' ncol = (ncol(y) * nlags)) #' for (k in 1:nlags) { #' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <- #' y[(nlags - k + 1):(nrow(y) - k),] #' } #' x1 <- cbind(x1, 1) #' colnames(x1) <- #' c(paste(rep(colnames(y), nlags), #' "_L", #' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)), #' decreasing = FALSE), #' sep = ""), #' "cons") #' y1 <- y[(nlags + 1):nrow(y),] #' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1)) #' for (i in 1:ncol(y1)) { #' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) #' yy <- matrix(data = y1[, i], ncol = 1) #' phi <- solve(t(xx) %*% xx, t(xx) %*% yy) #' ee[, i] <- yy - (xx %*% phi) #' } #' somega <- (t(ee) %*% ee) / nrow(ee) #' lambda0 <- 0.2 #' lambda1 <- 1 #' lambda3 <- 100 #' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1) #' v1 <- v1^((-2) * lambda1) #' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1) #' v3 <- kronecker(v1, v2) #' v3 <- (lambda0^2) * rbind(v3, (lambda3^2)) #' v3 <- 1 / v3 #' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3)) #' diag(pP_sig) <- v3 #' #' # Confidence in long-run restriction priors #' pR_sig <- #' array(data = 0, #' dim = c(((nlags * ncol(y)) + 1), #' ((nlags * ncol(y)) + 1), #' ncol(y))) #' Ri <- #' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags), #' matrix(data = c(1, 0), nrow = 1)), #' 0) #' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1 #' #' # Confidence in priors for D #' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y)) #' #' # Set graphical parameters #' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", #' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) #' #' # Estimate the parameters of the model #' results1 <- #' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig, #' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, #' thin = thin, cri = cri) #' #' hd <- HD(results = results1, cri = cri) #' #' # Plot historical decompositions #' varnames <- colnames(USLMData)[2:3] #' shocknames <- c("Labor Demand","Labor Supply") #' freq <- 4 #' start_date <- #' c(floor(USLMData[(nlags + 1), 1]), #' round(((USLMData[(nlags + 1), 1] %% 1) * freq), digits = 0)) #' hd_results <- #' HD_Plots(results = hd, varnames = varnames, #' shocknames = shocknames, #' freq = freq, start_date = start_date) HD_Plots <- function(results, varnames, shocknames = NULL, xlab = NULL, ylab = NULL, freq, start_date) { #test arguments test <- plot_funs_args_check(results = results, xlab = xlab, ylab = ylab) if (test != "pass") { stop(test) } if (!is.vector(varnames) || (!is.character(varnames)) || (length(varnames) != sqrt(dim(results$HD)[2])) || (length(varnames) != ncol(results$y))) { stop(paste("varnames: Must be a character vector containing the names of the endogenous variables.", sep = "")) } if (is.null(shocknames)) { shocknames <- varnames } if (!is.vector(shocknames) || (!is.character(shocknames)) || (length(shocknames) != sqrt(dim(results$HD)[2])) || (length(shocknames) != ncol(results$y))) { stop(paste("shocknames: Must be a character vector containing the names of the shocks.", sep = "")) } if ((!is.numeric(freq)) || (!is.finite(freq)) || (length(freq) != 1) || ((freq %% 1) != 0) || (freq < 1)) { stop("freq: Must be a finite whole number grater than 0.") } if ((!is.numeric(start_date)) || (any(!is.finite(start_date))) || (length(start_date) != 2) || (any((start_date %% 1) != 0)) || (any(start_date < 0))) { stop("start_date: Must be a numeric vector containing finite whole numbers greater than or equal to 0.") } if (is.null(xlab)) { xlab <- "" } if (is.null(ylab)) { ylab <- "" } y <- results$y hd <- results$HD nvar <- dim(results$y)[2] #store results from histroical decompositions hd_results <- vector(mode = "list", length = (nvar * nvar)) for (j in 1:nvar) { for (i in 1:nvar) { #historical decomposition names(hd_results)[((nvar * (j - 1)) + i)] <- dimnames(hd)[[2]][((nvar * (j - 1)) + i)] hd_results[[((nvar * (j - 1)) + i)]] <- hd[, ((nvar * (j - 1)) + i), ] #historical decomposition plots mat_ts <- stats::ts(cbind(0, y[, i], hd_results[[((nvar * (j - 1)) + i)]]), frequency = freq, start = start_date) colnames(mat_ts) <- c("Series1", "Series2", "Series3", "Series4", "Series5") stats::ts.plot(mat_ts, col = c("black", "black", "red", "red", "red"), gpars = list(xlab = xlab, ylab = ylab, xaxs = "i", yaxs = "r", lty = c(1, 1, 2, 1, 2))) graphics::title(main = paste("Contribution of ", shocknames[j], " Shocks on ", varnames[i], sep = ""), col.main = "black") } } return(hd_results) }
/scratch/gouwar.j/cran-all/cranData/BHSBVAR/R/HD.R
#' Impulse Responses #' #' Impulse Responses #' @author Paul Richardson #' @export #' @import Rcpp #' @name IRF #' @param results List containing the results from running BH_SBVAR(). #' @param h Integer specifying the time horizon for computing impulse responses (default = 12). #' @param acc Boolean indicating whether accumulated impulse responses are to be returned (default = TRUE). #' @param cri credibility intervals for the estimates to be returned (default = 0.95). A value of 0.95 will return 95\% credibility intervals. A value of 0.90 will return 90\% credibility intervals. #' @details Computes impulse response estimates. #' @return An Array containing the impulse response estimates. #' @examples #' # Import data #' library(BHSBVAR) #' set.seed(123) #' data(USLMData) #' y0 <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2) #' y <- y0 - (matrix(data = 1, nrow = nrow(y0), ncol = ncol(y0)) %*% #' diag(x = colMeans(x = y0, na.rm = FALSE, dims = 1))) #' colnames(y) <- c("Wage", "Employment") #' #' # Set function arguments #' nlags <- 8 #' itr <- 5000 #' burn <- 0 #' thin <- 1 #' acc <- TRUE #' h <- 20 #' cri <- 0.95 #' #' # Priors for A #' pA <- array(data = NA, dim = c(2, 2, 8)) #' pA[, , 1] <- c(0, NA, 0, NA) #' pA[, , 2] <- c(1, NA, -1, NA) #' pA[, , 3] <- c(0.6, 1, -0.6, 1) #' pA[, , 4] <- c(0.6, NA, 0.6, NA) #' pA[, , 5] <- c(3, NA, 3, NA) #' pA[, , 6] <- c(NA, NA, NA, NA) #' pA[, , 7] <- c(NA, NA, 1, NA) #' pA[, , 8] <- c(2, NA, 2, NA) #' #' # Position priors for Phi #' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA)) #' pP[1:nrow(pA), 1:ncol(pA)] <- #' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA)) #' #' # Confidence in the priors for Phi #' x1 <- #' matrix(data = NA, nrow = (nrow(y) - nlags), #' ncol = (ncol(y) * nlags)) #' for (k in 1:nlags) { #' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <- #' y[(nlags - k + 1):(nrow(y) - k),] #' } #' x1 <- cbind(x1, 1) #' colnames(x1) <- #' c(paste(rep(colnames(y), nlags), #' "_L", #' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)), #' decreasing = FALSE), #' sep = ""), #' "cons") #' y1 <- y[(nlags + 1):nrow(y),] #' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1)) #' for (i in 1:ncol(y1)) { #' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) #' yy <- matrix(data = y1[, i], ncol = 1) #' phi <- solve(t(xx) %*% xx, t(xx) %*% yy) #' ee[, i] <- yy - (xx %*% phi) #' } #' somega <- (t(ee) %*% ee) / nrow(ee) #' lambda0 <- 0.2 #' lambda1 <- 1 #' lambda3 <- 100 #' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1) #' v1 <- v1^((-2) * lambda1) #' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1) #' v3 <- kronecker(v1, v2) #' v3 <- (lambda0^2) * rbind(v3, (lambda3^2)) #' v3 <- 1 / v3 #' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3)) #' diag(pP_sig) <- v3 #' #' # Confidence in long-run restriction priors #' pR_sig <- #' array(data = 0, #' dim = c(((nlags * ncol(y)) + 1), #' ((nlags * ncol(y)) + 1), #' ncol(y))) #' Ri <- #' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags), #' matrix(data = c(1, 0), nrow = 1)), #' 0) #' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1 #' #' # Confidence in priors for D #' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y)) #' #' # Set graphical parameters #' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", #' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) #' #' # Estimate the parameters of the model #' results1 <- #' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig, #' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, #' thin = thin, cri = cri) #' #' irf <- IRF(results = results1, h = h, acc = acc, cri = cri) #' #' # Plot impulse responses #' varnames <- colnames(USLMData)[2:3] #' shocknames <- c("Labor Demand","Labor Supply") #' irf_results <- #' IRF_Plots(results = irf, varnames = varnames, #' shocknames = shocknames) IRF <- function(results, h = 12, acc = TRUE, cri = 0.95) { test <- BH_SBVAR_results_check(results = results) if (test != "pass") { stop(test) } if ((all(!is.numeric(h))) || (length(h) > 1)) { stop("h: Should be a single number.") } if ((h != round(x = h, digits = 0)) | (h < 3)) { stop("h: Should be a positive integer greater than 3.") } if ((all(!is.numeric(cri))) || (length(cri) > 1)) { "cri: Should be a single number." } if ((cri > 1) | (cri < 0.5)) { stop("cri: Should be a positive value between 1 and 0.5.") } if ((!is.logical(acc)) || (is.na(acc))) { stop(paste("acc_irf: Must be logical 'TRUE' or 'FALSE'.", sep = "")) } varnames <- colnames(results$y) ci <- (1.0 - ((1.0 - cri) / 2.0)) nvar <- dim(results$A_chain[, , ])[2] nlags <- results$nlags nsli <- dim(results$A_chain[, , ])[3] if ((!is.logical(acc)) || (is.na(acc))) { stop(paste("acc: Must be logical 'TRUE' or 'FALSE'.", sep = "")) } irf <- irf_estimates(A_chain = results$A_chain[, , ], B_chain = results$B_chain[, , ], nlags = nlags, h = h, acc = acc, ci = ci) dimnames(irf) <- list(NULL, paste0("Res_", varnames, paste0("_Shk_", varnames[c(sort(x = rep(x = c(1:nvar), times = nvar)))])), paste0(c(((1 - ci) * 100), 50, (ci * 100)),"%")) return(irf) } #' Plot Impulse Responses #' #' Plot Impulse Responses. #' @author Paul Richardson #' @export #' @name IRF_Plots #' @param results List containing the results from running BH_SBVAR(). #' @param varnames Character vector containing the names of the endogenous variables. #' @param shocknames Character vector containing the names of the shocks. #' @param xlab Character label for the horizontal axis of impulse response plots (default = NULL). Default produces plots without a label for the horizontal axis. #' @param ylab Character label for the vertical axis of impulse response plots (default = NULL). Default produces plots without a label for the vertical axis. #' @details Plots impulse responses and returns a list containing the actual processed data used to create the plots. #' @return A list containing impulse responses. #' @examples #' # Import data #' library(BHSBVAR) #' set.seed(123) #' data(USLMData) #' y0 <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2) #' y <- y0 - (matrix(data = 1, nrow = nrow(y0), ncol = ncol(y0)) %*% #' diag(x = colMeans(x = y0, na.rm = FALSE, dims = 1))) #' colnames(y) <- c("Wage", "Employment") #' #' # Set function arguments #' nlags <- 8 #' itr <- 5000 #' burn <- 0 #' thin <- 1 #' acc <- TRUE #' h <- 20 #' cri <- 0.95 #' #' # Priors for A #' pA <- array(data = NA, dim = c(2, 2, 8)) #' pA[, , 1] <- c(0, NA, 0, NA) #' pA[, , 2] <- c(1, NA, -1, NA) #' pA[, , 3] <- c(0.6, 1, -0.6, 1) #' pA[, , 4] <- c(0.6, NA, 0.6, NA) #' pA[, , 5] <- c(3, NA, 3, NA) #' pA[, , 6] <- c(NA, NA, NA, NA) #' pA[, , 7] <- c(NA, NA, 1, NA) #' pA[, , 8] <- c(2, NA, 2, NA) #' #' # Position priors for Phi #' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA)) #' pP[1:nrow(pA), 1:ncol(pA)] <- #' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA)) #' #' # Confidence in the priors for Phi #' x1 <- #' matrix(data = NA, nrow = (nrow(y) - nlags), #' ncol = (ncol(y) * nlags)) #' for (k in 1:nlags) { #' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <- #' y[(nlags - k + 1):(nrow(y) - k),] #' } #' x1 <- cbind(x1, 1) #' colnames(x1) <- #' c(paste(rep(colnames(y), nlags), #' "_L", #' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)), #' decreasing = FALSE), #' sep = ""), #' "cons") #' y1 <- y[(nlags + 1):nrow(y),] #' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1)) #' for (i in 1:ncol(y1)) { #' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) #' yy <- matrix(data = y1[, i], ncol = 1) #' phi <- solve(t(xx) %*% xx, t(xx) %*% yy) #' ee[, i] <- yy - (xx %*% phi) #' } #' somega <- (t(ee) %*% ee) / nrow(ee) #' lambda0 <- 0.2 #' lambda1 <- 1 #' lambda3 <- 100 #' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1) #' v1 <- v1^((-2) * lambda1) #' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1) #' v3 <- kronecker(v1, v2) #' v3 <- (lambda0^2) * rbind(v3, (lambda3^2)) #' v3 <- 1 / v3 #' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3)) #' diag(pP_sig) <- v3 #' #' # Confidence in long-run restriction priors #' pR_sig <- #' array(data = 0, #' dim = c(((nlags * ncol(y)) + 1), #' ((nlags * ncol(y)) + 1), #' ncol(y))) #' Ri <- #' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags), #' matrix(data = c(1, 0), nrow = 1)), #' 0) #' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1 #' #' # Confidence in priors for D #' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y)) #' #' # Set graphical parameters #' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", #' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) #' #' # Estimate the parameters of the model #' results1 <- #' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig, #' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, #' thin = thin, cri = cri) #' #' irf <- IRF(results = results1, h = h, acc = acc, cri = cri) #' #' # Plot impulse responses #' varnames <- colnames(USLMData)[2:3] #' shocknames <- c("Labor Demand","Labor Supply") #' irf_results <- #' IRF_Plots(results = irf, varnames = varnames, #' shocknames = shocknames) IRF_Plots <- function(results, varnames, shocknames = NULL, xlab = NULL, ylab = NULL) { #test arguments test <- plot_funs_args_check(results = results, xlab = xlab, ylab = ylab) if (test != "pass") { stop(test) } if (!is.vector(varnames) || (!is.character(varnames)) || (length(varnames) != sqrt(dim(results)[2]))) { stop(paste("varnames: Must be a character vector containing the names of the endogenous variables.", sep = "")) } if (is.null(shocknames)) { shocknames <- varnames } if (!is.vector(shocknames) || (!is.character(shocknames)) || (length(shocknames) != sqrt(dim(results)[2]))) { stop(paste("shocknames: Must be a character vector containing the names of the shocks.", sep = "")) } if (is.null(xlab)) { xlab <- "" } if (is.null(ylab)) { ylab <- "" } irf <- results nvar <- round(sqrt(dim(results)[2])) xticks <- floor(dim(results)[1] / 4) #store results from impulse responses irf_results <- vector(mode = "list", length = (nvar * nvar)) for (j in 1:nvar) { for (i in 1:nvar) { #impulse responses names(irf_results)[((nvar * (j - 1)) + i)] <- dimnames(irf)[[2]][((nvar * (j - 1)) + i)] irf_results[[((nvar * (j - 1)) + i)]] <- irf[, ((nvar * (j - 1)) + i), ] #impulse response plots mat_ts <- stats::ts(cbind(0, irf_results[[((nvar * (j - 1)) + i)]])) colnames(mat_ts) <- c("Series1", "Series2", "Series3", "Series4") stats::ts.plot(mat_ts, col = c("black", "red", "black", "red"), gpars = list(xlab = xlab, ylab = ylab, xaxs = "i", yaxs = "r", xaxt = "n", lty = c(1, 2, 1, 2))) graphics::title(main = paste("Response of ", varnames[i], " to ", shocknames[j], sep = ""), col.main = "black") graphics::axis(side = 1, at = seq(from = 1, to = nrow(mat_ts), by = xticks), labels = seq(from = 0, to = (nrow(mat_ts) - 1),by = xticks)) } } return(irf_results) }
/scratch/gouwar.j/cran-all/cranData/BHSBVAR/R/IRF.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal prior_nonc_t <- function(a1, p1, sigma1, nu, lam1) { .Call(`_BHSBVAR_prior_nonc_t`, a1, p1, sigma1, nu, lam1) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal prior_t_n <- function(a1, p1, sigma1, nu) { .Call(`_BHSBVAR_prior_t_n`, a1, p1, sigma1, nu) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal prior_t_p <- function(a1, p1, sigma1, nu) { .Call(`_BHSBVAR_prior_t_p`, a1, p1, sigma1, nu) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal prior_t <- function(a1, p1, sigma1, nu) { .Call(`_BHSBVAR_prior_t`, a1, p1, sigma1, nu) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal prior_beta <- function(a1, sh1, sh2) { .Call(`_BHSBVAR_prior_beta`, a1, sh1, sh2) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal prior_ibeta <- function(a1, sh1, sh2) { .Call(`_BHSBVAR_prior_ibeta`, a1, sh1, sh2) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal sum_log_prior_densities <- function(A_test, pA, pdetA, pH) { .Call(`_BHSBVAR_sum_log_prior_densities`, A_test, pA, pdetA, pH) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal log_likelihood_function <- function(A_test, kappa1, y1, omega, zeta_test, somega) { .Call(`_BHSBVAR_log_likelihood_function`, A_test, kappa1, y1, omega, zeta_test, somega) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal hd_estimates <- function(A_chain, B_chain, y1, x1, nlags, ci) { .Call(`_BHSBVAR_hd_estimates`, A_chain, B_chain, y1, x1, nlags, ci) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal irf_estimates <- function(A_chain, B_chain, nlags, h, acc, ci) { .Call(`_BHSBVAR_irf_estimates`, A_chain, B_chain, nlags, h, acc, ci) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal fevd_estimates <- function(A_chain, B_chain, D_chain, nlags, h, acc, ci) { .Call(`_BHSBVAR_fevd_estimates`, A_chain, B_chain, D_chain, nlags, h, acc, ci) } #' @useDynLib BHSBVAR, .registration = TRUE #' @keywords internal MAIN <- function(y1, x1, omega, somega, nlags, pA, pdetA, pH, pP, pP_sig, pR_sig, kappa1, A_start, itr, burn, thin, scale1, ci) { .Call(`_BHSBVAR_MAIN`, y1, x1, omega, somega, nlags, pA, pdetA, pH, pP, pP_sig, pR_sig, kappa1, A_start, itr, burn, thin, scale1, ci) }
/scratch/gouwar.j/cran-all/cranData/BHSBVAR/R/RcppExports.R
#' @docType data #' @title U.S. Labor Market Data #' @description Quarterly U.S. labor market time-series data. These data are the data used in Baumeister and Hamilton (2015). #' @source Dr. Christiane Baumeister's website \href{https://sites.google.com/site/cjsbaumeister/}{https://sites.google.com/site/cjsbaumeister/}. #' @source Dr. James D. Hamilton's website \href{https://econweb.ucsd.edu/~jhamilton/}{https://econweb.ucsd.edu/~jhamilton/}. #' @usage data(USLMData) #' @format Data frame object that includes "Date", "Wage", and "Employment" variables. These data are the percent change in U.S. real wage and employment and were created by taking the difference of the natural log of U.S. real wage and employment levels and multiplying by 100. #' @keywords datasets #' @references Baumeister, C., & Hamilton, J.D. (2015). Sign restrictions, structural vector autoregressions, and useful prior information. \emph{Econometrica}, 83(5), 1963-1999. #' @examples #' data(USLMData) "USLMData"
/scratch/gouwar.j/cran-all/cranData/BHSBVAR/R/USLMData.R
### R code from vignette source 'BHSBVAR.Rnw' ################################################### ### code chunk number 1: Setup ################################################### knitr::opts_knit$set(self.contained = TRUE, concordance = TRUE) knitr::opts_chunk$set(fig.path = "fig/", prompt = TRUE) ################################################### ### code chunk number 2: Data ################################################### rm(list = ls()) library(BHSBVAR) set.seed(123) data(USLMData) y0 <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2) y <- y0 - (matrix(data = 1, nrow = nrow(y0), ncol = ncol(y0)) %*% diag(x = colMeans(x = y0, na.rm = FALSE, dims = 1))) colnames(y) <- c("Wage", "Employment") ################################################### ### code chunk number 3: Inputs ################################################### nlags <- 8 itr <- 200000 burn <- 0 thin <- 20 acc <- TRUE h <- 20 cri <- 0.95 ################################################### ### code chunk number 4: pA ################################################### pA <- array(data = NA, dim = c(ncol(y), ncol(y), 8)) pA[, , 1] <- c(0, NA, 0, NA) pA[, , 2] <- c(1, NA, -1, NA) pA[, , 3] <- c(0.6, 1, -0.6, 1) pA[, , 4] <- c(0.6, NA, 0.6, NA) pA[, , 5] <- c(3, NA, 3, NA) pA[, , 6] <- c(NA, NA, NA, NA) pA[, , 7] <- c(NA, NA, 1, NA) pA[, , 8] <- c(2, NA, 2, NA) ################################################### ### code chunk number 5: pP_pP_sig ################################################### pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA)) pP[1:nrow(pA), 1:ncol(pA)] <- diag(x = 1, nrow = nrow(pA), ncol = ncol(pA)) x1 <- matrix(data = NA, nrow = (nrow(y) - nlags), ncol = (ncol(y) * nlags)) for (k in 1:nlags) { x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <- y[(nlags - k + 1):(nrow(y) - k),] } x1 <- cbind(x1, 1) colnames(x1) <- c(paste(rep(colnames(y), nlags), "_L", sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)), decreasing = FALSE), sep = ""), "cons") y1 <- y[(nlags + 1):nrow(y),] ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1)) for (i in 1:ncol(y1)) { xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1) yy <- matrix(data = y1[, i], ncol = 1) phi <- solve(t(xx) %*% xx, t(xx) %*% yy) ee[, i] <- yy - (xx %*% phi) } somega <- (t(ee) %*% ee) / nrow(ee) lambda0 <- 0.2 lambda1 <- 1 lambda3 <- 100 v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1) v1 <- v1^((-2) * lambda1) v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1) v3 <- kronecker(v1, v2) v3 <- (lambda0^2) * rbind(v3, (lambda3^2)) v3 <- 1 / v3 pP_sig <- diag(x = c(v3), nrow = nrow(v3), ncol = nrow(v3)) ################################################### ### code chunk number 6: pR_pR_sig_kappa1 ################################################### pR_sig <- array(data = 0, dim = c(((nlags * ncol(y)) + 1), ((nlags * ncol(y)) + 1), ncol(y))) Ri <- cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags), matrix(data = c(1, 0), nrow = 1)), 0) pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1 kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y)) ################################################### ### code chunk number 7: Model ################################################### par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) results1 <- BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig, pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn, thin = thin, cri = cri) ################################################### ### code chunk number 8: IRF_plots ################################################### irf <- IRF(results = results1, h = h, acc = acc, cri = cri) varnames <- colnames(USLMData)[2:3] shocknames <- c("Labor Demand","Labor Supply") par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) irf_results <- IRF_Plots(results = irf, varnames = varnames, shocknames = shocknames) ################################################### ### code chunk number 9: FEVD_plots ################################################### fevd <- FEVD(results = results1, h = h, acc = acc, cri = cri) varnames <- colnames(USLMData)[2:3] shocknames <- c("Labor Demand","Labor Supply") par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) fevd_results <- FEVD_Plots(results = fevd, varnames = varnames, shocknames = shocknames) ################################################### ### code chunk number 10: HD_plots ################################################### hd <- HD(results = results1, cri = cri) varnames <- colnames(USLMData)[2:3] shocknames <- c("Labor Demand","Labor Supply") freq <- 4 start_date <- c(floor(USLMData[(nlags + 1), 1]), (floor(((USLMData[(nlags + 1), 1] %% 1) * freq)) + 1)) par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1) hd_results <- HD_Plots(results = hd, varnames = varnames, shocknames = shocknames, freq = freq, start_date = start_date) ################################################### ### code chunk number 11: Dist_plots ################################################### A_titles <- matrix(data = NA_character_, nrow = dim(pA)[1], ncol = dim(pA)[2]) A_titles[1, 1] <- "Wage Elasticity of Labor Demand" A_titles[1, 2] <- "Wage Elasticity of Labor Supply" par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif", mfrow = c(1, 2), mar = c(2, 2.2, 2, 1), las = 1) Dist_Plots(results = results1, A_titles = A_titles) ################################################### ### code chunk number 12: Density1 (eval = FALSE) ################################################### ## density <- ## dt(x = ((a1 - p1) / sigma1), df = nu, ncp = 0, log = FALSE) / sigma1 ################################################### ### code chunk number 13: Density2 (eval = FALSE) ################################################### ## density <- ## dt(x = ((a1 - p1) / sigma1), df = nu, ncp = lam1, log = FALSE) / sigma1 ################################################### ### code chunk number 14: Density3 (eval = FALSE) ################################################### ## density <- ## dt(x = ((a1 - p1) / sigma1), df = nu, ncp = 0, log = FALSE) / ## (sigma1 * ## (1 - pt(q = ((-p1) / sigma1), df = nu, ncp = 0, lower.tail = TRUE, ## log.p = FALSE))) ################################################### ### code chunk number 15: Density5 (eval = FALSE) ################################################### ## density <- dt(x = ((a1 - p1) / sigma1), df = nu, ncp = 0, log = FALSE) / ## (sigma1 * pt(q = ((-p1) / sigma1), df = nu, ncp = 0, lower.tail = TRUE, ## log.p = FALSE)) ################################################### ### code chunk number 16: Density6 (eval = FALSE) ################################################### ## density <- 0 ## if (a1 >= 1) { ## density <- exp( ## ((sh2 - 1) * log((a1 - 1))) + ## (((-1) * (sh2 + sh1)) * log((1 + (a1 - 1)))) - ## log(beta(sh2, sh1)) ## ) ## } ################################################### ### code chunk number 17: Density4 (eval = FALSE) ################################################### ## density <- dbeta(x = a1, shape1 = sh1, shape2 = sh2, ncp = 0, log = FALSE)
/scratch/gouwar.j/cran-all/cranData/BHSBVAR/inst/doc/BHSBVAR.R
abfun = function(m, v){ a = m^2/v + 2 b = m^3/v + m return(list(a=a, b=b)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/abfun.R
alpha.u = function(nu, a0, b0, H){ M = length(nu) a = a0 + M*(H-1) b = b0 - sum(unlist(lapply(nu, function(x){log(1-x[-H])}))) return(rgamma(1, a, b)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/alpha.u.R
b.u = function(hatpai){ return(rbinom(n=length(hatpai), size=1, prob=hatpai)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/b.u.R
bhts = function(Z, iters, H, K, mu00=NULL, mu10=NULL, a.alpha, b.alpha, a.tau, b.tau, pnorm=FALSE, s=NULL, store=FALSE){ if (!is.null(s)) set.seed(s) if (pnorm) Z = lapply(Z, function(x){(x-mean(x))/sd(x)}) a0.alpha = a.alpha a1.alpha = a.alpha b0.alpha = b.alpha b1.alpha = b.alpha a0.tau = a.tau a1.tau = a.tau b0.tau = b.tau b1.tau = b.tau burn = floor(iters/2) sampl = 1 M = length(Z) n = unlist(lapply(Z, length)) z = unlist(Z) a = abfun(var(z), 10^-4)[["a"]] b = abfun(var(z), 10^-4)[["b"]] a0 = a a1 = a b0 = b b1 = b if (is.null(mu00) | is.null(mu10)){ mu00 = mean(z) / 2 mu10 = 3 * mu00 } # parameters to be saved b.s = rep(0, sum(n)) hatpai.s = rep(0, sum(n)) # initialize parameters a.pai = 0.5*sum(n) b.pai = 0.5*sum(n) sigma0 = 1/rgamma(n=K, shape=a0, rate=b0) mu0 = rnorm(K, mu00, 1) sigma1 = 1/rgamma(n=K, shape=a1, rate=b1) mu1 = rnorm(K, mu10, 1) pai = rbeta(1, a.pai, b.pai) hatpai = runif(sum(n)) b = rbinom(n=length(hatpai), size=1, prob=hatpai) ncs = c(0,cumsum(n)) b.aux = lapply(2:length(ncs), function(x){b[(ncs[x-1]+1):ncs[x]]}) ih0 = lapply(n, function(x){sample(H, x, replace=TRUE)}) ih1 = lapply(n, function(x){sample(H, x, replace=TRUE)}) hk0 = matrix(sample(K, M*H, replace=TRUE), M, H) hk1 = matrix(sample(K, M*H, replace=TRUE), M, H) alpha0 = rgamma(1, 1, 1) alpha1 = rgamma(1, 1, 1) tau0 = rgamma(1, 1, 1) tau1 = rgamma(1, 1, 1) nu.h0 = lapply(ih0, nu.u, alpha0, H) nu.h1 = lapply(ih1, nu.u, alpha1, H) nu.k0 = nu.u(hk0, tau0, K) nu.k1 = nu.u(hk1, tau1, K) ph0 = lapply(nu.h0, lambda.u) ph1 = lapply(nu.h1, lambda.u) pk0 = lambda.u(nu.k0) pk1 = lambda.u(nu.k1) ik0 = unlist(lapply(1:M, function(x){hk0[x,ih0[[x]]]})) ik1 = unlist(lapply(1:M, function(x){hk1[x,ih1[[x]]]})) h.pr0 = h.pr.u(z[b==0], unlist(ih0)[b==0], mu0, sigma0, pk0, K, H, unlist(lapply(b.aux, function(x){sum(1-x)}))) h.pr1 = h.pr.u(z[b==1], unlist(ih1)[b==1], mu1, sigma1, pk1, K, H, unlist(lapply(b.aux, sum))) z.pr0 = z.pr.u(z, as.vector(t(hk0)), mu0, sigma0, unlist(ph0), H, n) z.pr1 = z.pr.u(z, as.vector(t(hk1)), mu1, sigma1, unlist(ph1), H, n) if (store){ # variables to be stored sigma0.store = matrix(rep(0, K*iters), iters, K) mu0.store = sigma0.store sigma1.store = sigma0.store mu1.store = sigma0.store pk0.store = sigma0.store pk1.store = sigma0.store tau1.store = rep(0, iters) tau0.store = tau1.store nu.k1.store = matrix(rep(0, K*iters), iters, K) nu.k0.store = matrix(rep(0, K*iters), iters, K) } iter = 1 stop = FALSE while (!stop){ if (iter%%10==0){ cat("iter=", iter, ", ", sep="") if (iter%%100==0) cat("\n") } if (store){ sigma0.store[iter,] = sigma0 sigma1.store[iter,] = sigma1 mu0.store[iter,] = mu0 mu1.store[iter,] = mu1 pk0.store[iter,] = pk0 pk1.store[iter,] = pk1 tau1.store[iter] = tau1 tau0.store[iter] = tau0 nu.k1.store[iter,] = nu.k1 nu.k0.store[iter,] = nu.k0 } # update pai pai = pai.u(b, a.pai, b.pai) # update hatpai hatpai = hatpai.u(z, as.vector(t(hk1)), as.vector(t(hk0)), unlist(ph1), unlist(ph0), sigma1, sigma0, mu1, mu0, pai, H, n) # update b b = b.u(hatpai) # update sigma sigma0 = sapply(1:K, sig.k.u, ik0[b==0], z[b==0], mu00, a0, b0) sigma1 = sapply(1:K, sig.k.u, ik1[b==1], z[b==1], mu10, a1, b1) # update mu mu0 = sapply(1:K, mu.k.u, ik0[b==0], z[b==0], sigma0, mu00) mu1 = sapply(1:K, mu.k.u, ik1[b==1], z[b==1], sigma1, mu10) # update z.pr z.pr0 = z.pr.u(z, as.vector(t(hk0)), mu0, sigma0, unlist(ph0), H, n) z.pr1 = z.pr.u(z, as.vector(t(hk1)), mu1, sigma1, unlist(ph1), H, n) # update ih ih0 = ind.u(z.pr0) ih1 = ind.u(z.pr1) ncs = c(0,cumsum(n)) b.aux = lapply(2:length(ncs), function(x){b[(ncs[x-1]+1):ncs[x]]}) # update h.pr if (sum(b==0)!=0) h.pr0 = h.pr.u(z[b==0], ih0[b==0], mu0, sigma0, pk0, K, H, unlist(lapply(b.aux, function(x){sum(1-x)}))) else h.pr0 = matrix(rep(1/K, M*H*K), M*H, K) if (sum(b==1)!=0) h.pr1 = h.pr.u(z[b==1], ih1[b==1], mu1, sigma1, pk1, K, H, unlist(lapply(b.aux, sum))) else h.pr1 = matrix(rep(1/K, M*H*K), M*H, K) # reorganize ih into list ih0 = lapply(2:length(ncs), function(x){ih0[(ncs[x-1]+1):ncs[x]]}) ih1 = lapply(2:length(ncs), function(x){ih1[(ncs[x-1]+1):ncs[x]]}) # update hk hk0 = matrix(ind.u(h.pr0), M, H, byrow=TRUE) hk1 = matrix(ind.u(h.pr1), M, H, byrow=TRUE) # update ik ik0 = unlist(lapply(1:M, function(x){hk0[x,ih0[[x]]]})) ik1 = unlist(lapply(1:M, function(x){hk1[x,ih1[[x]]]})) # update nu.h nu.h0 = lapply(1:M, function(x){nu.u(ih0[[x]][b.aux[[x]]==0], alpha0, H)}) nu.h1 = lapply(1:M, function(x){nu.u(ih1[[x]][b.aux[[x]]==1], alpha1, H)}) # update nu.k nu.k0 = nu.u(hk0, tau0, K) nu.k1 = nu.u(hk1, tau1, K) # update alpha alpha0 = alpha.u(nu.h0, a0.alpha, b0.alpha, H) alpha1 = alpha.u(nu.h1, a1.alpha, b1.alpha, H) # update tau tau0 = tau.u(nu.k0, a0.tau, b0.tau) tau1 = tau.u(nu.k1, a1.tau, b1.tau) # update ph ph0 = lapply(nu.h0, lambda.u) ph1 = lapply(nu.h1, lambda.u) # update pk pk0 = lambda.u(nu.k0) pk1 = lambda.u(nu.k1) if (iter > burn & iter%%sampl==0){ b.s = b.s + b hatpai.s = hatpai.s + hatpai } if (iter == iters) stop = TRUE else iter = iter + 1 } sampl.num = as.integer((iters-burn)/sampl) hatpai = hatpai.s / sampl.num ncs = c(0,cumsum(n)) hatpai = lapply(2:length(ncs), function(x){hatpai[(ncs[x-1]+1):ncs[x]]}) hatpai = lapply(1:length(n), function(x){v=hatpai[[x]]; names(v)=names(Z[[x]]); return(v);}) names(hatpai) = names(Z) if (store){ dat.store = list(sigma0=sigma0.store, sigma1 = sigma1.store, mu0 = mu0.store, mu1 = mu1.store, pk0 = pk0.store, pk1 = pk1.store, tau1 = tau1.store, tau0 = tau0.store, nu.k1 = nu.k1.store, nu.k0 = nu.k0.store) return(list(hatpai=hatpai, dat.store=dat.store)) } else return(list(hatpai=hatpai)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/bhts.R
bhts2HTML = function(dat, dir, fname, title=NULL, bgcolor="#BBBBEE"){ dat = data.frame(compound=rownames(dat), dat) rownames(dat) = NULL target = HTMLInitFile(dir, filename=fname, Title=title, BackGroundColor=bgcolor) HTML(xtable(dat), file=target) HTMLEndFile() }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/bhts2HTML.R
data.create = function(N, nr, nc, M, p, s=NULL, covrow=NULL, covcol=NULL, c=0.0001, mat=FALSE){ set.seed(s) nm = N*M mscale = c(1, 1.2, 1.4, 1.6) vscale = c(1, 1.1, 1.2, 1.3) mean00 = 0.1 mean0 = rep(mean00, 4) * mscale var00 = 0.01 var0 = rep(var00, 4) * vscale mean10 = 0.2 mean1 = rep(mean10, 4) * mscale var10 = 0.002 var1 = rep(var10, 4) * vscale hitpr = rep(1/4, 4) B = rbinom(n=N*M, size=1, prob=p) I = sapply(1:nm, function(x){which(rmultinom(1,1,hitpr)==1)}) Z = sapply(1:nm, function(x){if(B[x]==0) return(rlnorm(1, lg.mu.sig(mean0[I[x]],var0[I[x]])[["mu"]], lg.mu.sig(mean0[I[x]],var0[I[x]])[["sig"]])) else return(rlnorm(1, lg.mu.sig(mean1[I[x]],var1[I[x]])[["mu"]], lg.mu.sig(mean1[I[x]],var1[I[x]])[["sig"]]))}) ncs = c(0,cumsum(rep(N, M))) Z = lapply(2:length(ncs), function(x){Z[(ncs[x-1]+1):ncs[x]]}) Z = lapply(Z, function(x){d=matrix(x, nr, nc, byrow=TRUE); rownames(d)=LETTERS[seq(1,nrow(d))]; colnames(d)=seq(1,ncol(d)); return(d);}) I = lapply(2:length(ncs), function(x){I[(ncs[x-1]+1):ncs[x]]}) I = lapply(I, function(x){d=matrix(x, nr, nc, byrow=TRUE); rownames(d)=LETTERS[seq(1,nrow(d))]; colnames(d)=seq(1,ncol(d)); return(d);}) names(I) = paste("Plate", seq(1, length(Z)), sep="") I = lapply(I, function(x){d=as.vector(x); names(d)=sapply(colnames(x), function(y){paste(rownames(x), y, sep="")}); return(d);}) B = lapply(2:length(ncs), function(x){B[(ncs[x-1]+1):ncs[x]]}) B = lapply(B, function(x){d=matrix(x, nr, nc, byrow=TRUE); rownames(d)=LETTERS[seq(1,nrow(d))]; colnames(d)=seq(1,ncol(d)); return(d);}) names(B) = paste("Plate", seq(1, length(Z)), sep="") B = lapply(B, function(x){d=as.vector(x); names(d)=sapply(colnames(x), function(y){paste(rownames(x), y, sep="")}); return(d);}) ### incorporate noise if (!is.null(covrow) & !is.null(covcol)){ set.seed(s) covrow_chol = chol(covrow) covcol_chol = chol(covcol) dnoise = lapply(1:M, function(x){matrix(rnorm(nr*nc), nr, nc)}) dnoise = lapply(dnoise, function(x){c*covrow_chol%*%x%*%covcol_chol}) dnoise = lapply(dnoise, function(x){d=as.vector(x); names(d)=sapply(colnames(x), function(y){paste(rownames(x), y, sep="")}); return(d);}) #add to compound data Z = lapply(1:M, function(x){Z[[x]]+dnoise[[x]]}) } names(Z) = paste("Plate", seq(1, length(Z)), sep="") if (!mat) Z = lapply(Z, function(x){d=as.vector(x); names(d)=sapply(colnames(x), function(y){paste(rownames(x), y, sep="")}); return(d);}) return(list(Z=Z, I=I, B=B)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/data.create.R
fdr.r = function(r, hatpai, fdr){ res = sapply(hatpai, function(x){ifelse(x>r,1,0)}) res = sum(res*(1-hatpai)) / sum(res) ifelse(!is.nan(res), return(res-fdr), return(1)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/fdr.r.R
h.pr.u = function(z, ih, mu, sigma, pk, K, H, n){ M = length(n) out <- .Call("stick_multnorm_h", z, ih-1, pk, sigma, mu, n, H, PACKAGE="BHTSpack") return(matrix(out, H*M, K, byrow=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/h.pr.u.R
hatpai.u = function(z, hk1, hk0, ph1, ph0, sigma1, sigma0, mu1, mu0, pai, H, n){ sigma1 = sigma1[hk1] mu1 = mu1[hk1] sigma0 = sigma0[hk0] mu0 = mu0[hk0] out <- .Call("hat_pai", z, ph1, ph0, mu1, mu0, sigma1, sigma0, pai, n, H, PACKAGE="BHTSpack") return(out) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/hatpai.u.R
ind.u = function(pr){ n = nrow(pr) K = ncol(pr) output = rep(0, n) out<-.C("multinomind", as.double(t(pr)), as.integer(n), as.integer(K), as.integer(output), PACKAGE="BHTSpack") return(out[[4]]) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/ind.u.R
lambda.u = function(nu){ H = length(nu) ph = rep(0, H) ph <- .C("lambda", as.double(nu), as.integer(H), as.double(ph), PACKAGE="BHTSpack")[[3]] return(ph) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/lambda.u.R
lg.mu.sig = function(m, v){ mu = log(m^2/sqrt(m^2+v)) sig = sqrt(log((m^2+v)/m^2)) return(list(mu=mu, sig=sig)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/lg.mu.sig.R
mu.k.u = function(k, ik, z, sigma, mu0){ nk = sum(ik==k) z = z[ik==k] if (nk > 0){ z.mean = mean(z) mean.aux = (nk*z.mean+mu0) / (nk+1) sigma.aux = sigma[k]/(nk+1) } else{ mean.aux = mu0 sigma.aux = sigma[k] } return(rnorm(1, mean.aux, sqrt(sigma.aux))) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/mu.k.u.R
nu.u = function(ind, tau, H){ n = length(ind) nu = rep(0, H) nu <- .C("abfun", as.integer(ind-1), as.integer(n), as.double(tau), as.integer(H), as.double(nu), PACKAGE="BHTSpack")[[5]] return(nu) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/nu.u.R
pai.u = function(b, a.pai, b.pai){ pai = rbeta(1, a.pai+sum(b), b.pai+sum(1-b)) return(pai) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/pai.u.R
ptrace = function(res, var, ndisc, nr, nc, type="trace"){ res = res[["dat.store"]][[var]] if (type=="trace"){ if (var == "pk0") var = expression(paste({lambda[k]^{(0)}})) else if (var == "pk1") var = expression(paste({lambda[k]^{(1)}})) else if (var == "mu0") var = expression(paste({mu[0][k]})) else if (var == "mu1") var = expression(paste({mu[1][k]})) else if (var == "sigma0") var = expression(paste({sigma[0][k]^2})) else if (var == "sigma1") var = expression(paste({sigma[1][k]^2})) else stop("Unknown var") } if (type=="acf"){ if (var == "pk0") var = expression(paste("ACF (", {lambda[k]^{(0)}}, ")", sep="")) else if (var == "pk1") var = expression(paste("ACF (", {lambda[k]^{(1)}}, ")", sep="")) else if (var == "mu0") var = expression(paste("ACF (", {mu[0][k]}, ")", sep="")) else if (var == "mu1") var = expression(paste("ACF (", {mu[1][k]}, ")", sep="")) else if (var == "sigma0") var = expression(paste("ACF (", {sigma[0][k]^2}, ")", sep="")) else if (var == "sigma1") var = expression(paste("ACF (", {sigma[1][k]^2}, ")", sep="")) else stop("Unknown var") } if (!is.null(ncol(res))){ ncomp = ncol(res) if (ncomp > nr*nc) stop("Insufficient Number of Plot Cells. Increase nr or nc") res = res[-seq(1, ndisc),] res = t(apply(res, 1, function(x){sort(x)})) layout(matrix(seq(1, nr*nc), nr, nc, byrow=TRUE)) if (type=="trace") invisible(sapply(1:ncomp, function(x){d=res[,x]; plot(d, type="l", xlab="iteration", ylab=var, ylim=range(res), main=paste("k =", x, sep=" "), cex.lab=1);})) if (type=="acf") invisible(sapply(1:ncomp, function(x){d=res[,x]; acf(d,lag.max=length(d),ylab=var,main=paste("k =", x, sep=" "), cex.lab=1);})) } else{ res = res[-seq(1, ndisc)] plot(res, xlab="iteration", pch=16, ylab=var) } }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/ptrace.R
r.fdr = function(res, fdr=0.05){ hatpai = unlist(res[["hatpai"]]) if (fdr < 1-max(hatpai)) stop("minimum achievable FDR: ", 1-max(hatpai)) res = data.frame(ID=names(hatpai), hatpai) ind = sort(res[["hatpai"]], decreasing=TRUE, index.return=TRUE)[["ix"]] res = res[ind,] rownames(res) = NULL r = uniroot(fdr.r, hatpai=res[["hatpai"]], fdr=fdr, interval=c(0,max(hatpai)-10^-4))[["root"]] res = subset(res, hatpai>r) return(list(res=res, r=r)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/r.fdr.R
sig.k.u = function(k, ik, z, mu0, a0, b0){ nk = sum(ik==k) z = z[ik==k] if (nk > 0){ z.mean = mean(z) a = a0+nk b = b0 + 0.5*nk/(nk+1)*((mu0-z.mean)^2) + 0.5*sum((z-z.mean)^2) } else{ a = a0 b = b0 } return(1/rgamma(n=1, shape=a, rate=b)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/sig.k.u.R
tau.u = function(nu, a0, b0){ K = length(nu) a = a0 + K - 1 b = b0 - sum(log(1-nu[-K])) return(rgamma(1, a, b)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/tau.u.R
z.pr.u = function(z, hk, mu, sigma, ph, H, n){ sigma = sigma[hk] mu = mu[hk] out <- .Call("stick_multnorm_z", z, ph, sigma, mu, n, H, PACKAGE="BHTSpack") return(matrix(out, sum(n), H, byrow=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/BHTSpack/R/z.pr.u.R
## ----echo=TRUE---------------------------------------------------------------- # loading library library(BHTSpack) # Generating a data set of 100 8x10 plates, each plate containing 80 compounds. # A total of 8000 compounds. 10% of the compounds are hits. Z = data.create(N=80, nr=8, nc=10, M=100, p=0.4, s=1234) # Generating the data set as before, but this time adding plate noise to all compounds Z = data.create(N=80, nr=8, nc=10, M=100, p=0.4, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) # Running the model with 200 iterations system.time(b.est <- bhts(Z[["Z"]], iters=200, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE)) # Compute threshold (r) for significant hit probabilities at FDR=0.05 res = r.fdr(b.est, fdr=0.05) names(res) res[["r"]] # Significant compound hit list head(res[["res"]]) # Trace plots of hit compound activity ptrace(b.est, "mu1", ndisc=100, nr=3, nc=4) # ACF plots of hit compound activity ptrace(b.est, "mu1", ndisc=100, nr=3, nc=4, type="acf") sessionInfo() ## ----echo=TRUE---------------------------------------------------------------- # loading library library(BHTSpack) # Generating a data set of 100 8x10 plates, each plate containing 80 compounds. # A total of 8000 compounds. 40% of the compounds are hits. Z = data.create(N=80, nr=8, nc=10, M=100, p=0.4, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) # Running the model with 200 iterations b.est = bhts(Z[["Z"]], iters=200, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE) # create an html file #bhts2HTML(res, dir="/dir/", fname="tophits") ## ----echo=TRUE---------------------------------------------------------------- library(BHTSpack) Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.4, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) I = unlist(Z[["I"]]) B = unlist(Z[["B"]]) Z = unlist(Z[["Z"]]) plot(density(Z[I==1 & B==0]), xlim=range(Z), ylim=c(0,6), col="black", lty=2, ylab="Density", main="", xlab="Raw Value") lines(density(Z[I==1 & B==0]), col="blue", lty=2) lines(density(Z[I==2 & B==0]), col="green", lty=2) lines(density(Z[I==3 & B==0]), col="yellow", lty=2) lines(density(Z[I==4 & B==0]), col="red", lty=2) lines(density(Z[B==0]), col="black", lty=2, lwd=2) lines(density(Z[I==1 & B==1]), col="blue", lty=3) lines(density(Z[I==2 & B==1]), col="green", lty=3) lines(density(Z[I==3 & B==1]), col="yellow", lty=3) lines(density(Z[I==4 & B==1]), col="red", lty=3) lines(density(Z[B==1]), col="black", lty=3, lwd=2) legend("topright", legend=c("Component 1", "Component 2", "Component 3", "Component 4", "All Components", "Non-Hits", "Hits"), col=c("blue", "green", "yellow", "red", "black", "black", "black"), lty=c(1, 1, 1, 1, 1, 2, 3), lwd=c(1, 1, 1, 1, 1, 2, 2)) ## ----echo=TRUE---------------------------------------------------------------- #library(BHTSpack) #library(pROC) #library(sights) #score = function(t, sdat, B){ # res = unlist(lapply(sdat, as.vector)) # ind = rep(0, length(res)) # ind[res>t] = 1 # a = auc(B, ind) # return(a) #} ### Left Column #Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.1, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) #system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE)) #hatpai = unlist(b.est[["hatpai"]]) #res = data.frame(IDmatch=names(hatpai), hatpai) #Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]])) #res = merge(res, Btab, by="IDmatch") #Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.1, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE) ## Top plot #bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)})) #summary(bs) #rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)})) #summary(rs) #r = seq(-4, 21, 0.5) #AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))})) #summary(AUC) #btmax = r[which.max(AUC)] #plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.5, 0.75)) #abline(v=btmax, col="red", lty=2) #axis(1, at=c(-5, 5, 10, 15)) #axis(1, at=btmax) #r = seq(-4, 21, 0.5) #AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))})) #summary(AUC) #rtmax = r[which.max(AUC)] #lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green") #axis(1, at=c(-5, 5, 10, 15)) #axis(1, at=rtmax) #legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1)) ## Bottom plot #rhitind = rep(0, length(rs)) #rhitind[rs>rtmax] = 1 #bhitind = rep(0, length(bs)) #bhitind[bs>btmax] = 1 #plot.roc(res[["hitind"]], res[["hatpai"]], col="blue") #lines.roc(unlist(Z[["B"]]), bhitind, col="red") #lines.roc(unlist(Z[["B"]]), rhitind, col="green") #legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score", #" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3), #")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1)) ### Middle Column #Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.05, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) #system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE)) #hatpai = unlist(b.est[["hatpai"]]) #res = data.frame(IDmatch=names(hatpai), hatpai) #Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]])) #res = merge(res, Btab, by="IDmatch") #Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.05, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE) ## Top plot #bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)})) #summary(bs) #rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)})) #summary(rs) #r = seq(-4, 21, 0.5) #AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))})) #summary(AUC) #btmax = r[which.max(AUC)] #plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.5, 0.75)) #abline(v=btmax, col="red", lty=2) #axis(1, at=c(-5, 5, 10, 15)) #axis(1, at=btmax) #r = seq(-5, 26, 0.5) #AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))})) #summary(AUC) #rtmax = r[which.max(AUC)] #lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green") #axis(1, at=c(-5, 5, 10, 15)) #axis(1, at=rtmax) #legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1)) ## Bottom plot #rhitind = rep(0, length(rs)) #rhitind[rs>rtmax] = 1 #bhitind = rep(0, length(bs)) #bhitind[bs>btmax] = 1 #plot.roc(res[["hitind"]], res[["hatpai"]], col="blue") #lines.roc(unlist(Z[["B"]]), bhitind, col="red") #lines.roc(unlist(Z[["B"]]), rhitind, col="green") #legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score", #" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3), #")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1)) ### Right Column #Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) #system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE)) #hatpai = unlist(b.est[["hatpai"]]) #res = data.frame(IDmatch=names(hatpai), hatpai) #Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]])) #res = merge(res, Btab, by="IDmatch") #Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE) ## Top plot #bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)})) #summary(bs) #rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)})) #summary(rs) #r = seq(-4, 23, 0.5) #AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))})) #summary(AUC) #btmax = r[which.max(AUC)] #plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.5, 0.75)) #abline(v=btmax, col="red", lty=2) #axis(1, at=c(-5, 5, 10, 15)) #axis(1, at=btmax) #r = seq(-5, 28, 0.5) #AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))})) #summary(AUC) #rtmax = r[which.max(AUC)] #lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green") #axis(1, at=c(-5, 5, 10, 15)) #axis(1, at=rtmax) #legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1)) ## Bottom plot #rhitind = rep(0, length(rs)) #rhitind[rs>rtmax] = 1 #bhitind = rep(0, length(bs)) #bhitind[bs>btmax] = 1 #plot.roc(res[["hitind"]], res[["hatpai"]], col="blue") #lines.roc(unlist(Z[["B"]]), bhitind, col="red") #lines.roc(unlist(Z[["B"]]), rhitind, col="green") #legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score", #" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3), #")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1)) ## ----echo=TRUE---------------------------------------------------------------- library(BHTSpack) #library(pROC) aucfunc = function(dat, B){ Btab = data.frame(hitind=unlist(B)) Btab = data.frame(IDmatch=rownames(Btab), Btab) Res = merge(dat, Btab, by="IDmatch") return(auc(Res[["hitind"]], Res[["hatpai"]])) } ## Left plot Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.1, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) mu = mean(unlist(Z[["Z"]])) mu00 = seq(mu, 0, -mu/25) mu10 = seq(mu, 2*mu, mu/25) #res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10, #b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);}) #hatpai = lapply(res, function(x){unlist(x[["hatpai"]])}) #hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)}) #AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]])) #plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9)) #abline(v=mu, col="red", lty=2, lwd=2) ## Middle plot Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.05, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) mu = mean(unlist(Z[["Z"]])) mu00 = seq(mu, 0, -mu/25) mu10 = seq(mu, 2*mu, mu/25) #res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10, #b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);}) #hatpai = lapply(res, function(x){unlist(x[["hatpai"]])}) #hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)}) #AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]])) #plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9)) #abline(v=mu, col="red", lty=2, lwd=2) ## Right plot Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) mu = mean(unlist(Z[["Z"]])) mu00 = seq(mu, 0, -mu/25) mu10 = seq(mu, 2*mu, mu/25) #res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10, #b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);}) #hatpai = lapply(res, function(x){unlist(x[["hatpai"]])}) #hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)}) #AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]])) #plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9)) #abline(v=mu, col="red", lty=2, lwd=2) ## Right plot Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) mu = mean(unlist(Z[["Z"]])) mu00 = seq(mu, 0, -mu/25) mu10 = seq(mu, 2*mu, mu/25) #res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10, #b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);}) #hatpai = lapply(res, function(x){unlist(x[["hatpai"]])}) #hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)}) #AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]])) #plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9)) #abline(v=mu, col="red", lty=2, lwd=2) ## ----echo=TRUE---------------------------------------------------------------- #library(BHTSpack) #library(pROC) #library(sights) #score = function(t, sdat, B){ # res = unlist(lapply(sdat, as.vector)) # ind = rep(0, length(res)) # ind[res>t] = 1 # a = auc(B, ind) # return(a) #} #Z = data.create(N=80, nr=8, nc=10, M=5000, p=0.00021, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv")) #system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE)) #hatpai = unlist(b.est[["hatpai"]]) #res = data.frame(IDmatch=names(hatpai), hatpai) #Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]])) #res = merge(res, Btab, by="IDmatch") #Z = data.create(N=80, nr=8, nc=10, M=5000, p=0.00021, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE) ## Top plot #bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)})) #summary(bs) #rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)})) #summary(rs) #r = seq(-4, 30, 0.5) #AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))})) #summary(AUC) #btmax = r[which.max(AUC)] #r = seq(-5, 29, 0.5) #AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))})) #summary(AUC) #rtmax = r[which.max(AUC)] #rhitind = rep(0, length(rs)) #rhitind[rs>rtmax] = 1 #bhitind = rep(0, length(bs)) #bhitind[bs>btmax] = 1 #plot.roc(res[["hitind"]], res[["hatpai"]], col="blue") #lines.roc(unlist(Z[["B"]]), bhitind, col="red") #lines.roc(unlist(Z[["B"]]), rhitind, col="green") #legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score", #" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3), #")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1)) ## ----echo=TRUE---------------------------------------------------------------- #library(BHTSpack) #library(pROC) #library(sights) #library(gdata) #score = function(t, s, B){ # ind = rep(0, length(s)) # ind[s>t] = 1 # a = auc(B, ind) # return(a) #} ## It is assumed that data files are in a folder "temp" ## read data #dat = read.csv("temp/EColiFilamentation2006_screeningdata.csv", sep="\t") #dim(dat) ## read hit indicators #hits = read.csv("temp/CompoundSearchResults.csv", sep=",") #dim(hits) #hits = data.frame(hits, hits=rep(1,nrow(hits))) #hits = data.frame(ChembankId=hits[["ChemBank.Id"]], hitind=rep(1,nrow(hits))) ## merge with hit indicator #dat = merge(dat, hits, by="ChembankId", all.x=TRUE) #dim(dat) #dat[["hitind"]][is.na(dat[["hitind"]])] = 0 ## merge with map #map = read.xls("map.xlsx") #dat = merge(dat, map, by="AssayName") ## Organism DRC39 at 24h #dat = subset(dat, Organism=="DRC39" & ExpTime=="24h") #plates = unique(as.character(dat[["Plate"]])) #unique(as.character(dat[["WellType"]])) #dat = subset(dat, WellType=="compound-treatment") #dat = lapply(plates, function(x){d=subset(dat, Plate==x)}) #names(dat) = plates #l = unlist(lapply(dat, nrow)) #table(l) ## include only 352-well plates #dat = dat[l==352] #unique(as.character(unlist(lapply(dat, function(x){x$AssayName})))) #sum(is.na(unlist(lapply(dat, function(x){x$RawValueA})))) #sum(unlist(lapply(dat, function(x){x$hitind}))) #sum(!is.na(unlist(lapply(dat, function(x){x$RawValueA})))) ## sorting wells row-wise #dat = lapply(dat, function(x){ix=sort.int(as.character(x[["Well"]]), index.return=TRUE)[["ix"]]; return(x[ix,]);}) ## extracting raw values, hit indicators and well names #Z = lapply(dat, function(x){x[["RawValueA"]]}) #B = lapply(dat, function(x){x[["hitind"]]}) #W = lapply(dat, function(x){x[["Well"]]}) ## constructing plates of raw values, row-wise #Z = lapply(Z, function(x){matrix(x, 16, 22, byrow=TRUE)}) ## naming rows and columns of plates #Z = lapply(Z, function(x){rownames(x)=LETTERS[1:16]; colnames(x)=formatC(seq(1,22),flag=0,digits=1); return(x);}) ## constructing plates of indicator variables (row-wise) and vectorizing (column-wise) each plate #B = lapply(B, function(x){as.vector(matrix(x, 16, 22, byrow=TRUE))}) ## constructing plates of well names (row-wise) and vectorizing (column-wise) each plate #W = lapply(W, function(x){as.vector(matrix(x, 16, 22, byrow=TRUE))}) ## Left plot #plot(density(unlist(Z)[unlist(B)==0]), col="blue", ylab="Density", main="", xlim=range(unlist(Z)), xlab="Raw Value") #lines(density(unlist(Z)[unlist(B)==1]), col="red") #legend("topright", legend=c("Non-Hits", "Hits"), col=c("blue", "red"), lty=c(1,1)) ## normalizing plates of raw values #Z = lapply(Z, function(x){(x-mean(x))/sd(x)}) ## naming indicator variables #bn = names(B) #B = lapply(1:length(B), function(x){names(B[[x]])=W[[x]]; return(B[[x]]);}) #names(B) = bn ## construct object for B-score and R-score methods #Zmat = list(Z=Z, B=B) ## construct object for BHTS method ## vectorizing (column-wise) each plate of raw values and naming them with well names #zn = names(Z) #Z = lapply(1:length(Z), function(x){d=as.vector(Z[[x]]); names(d)=W[[x]]; return(d);}) #names(Z) = zn #Z = list(Z=Z, B=B) ## Run BHTS #system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE)) #hatpai = unlist(b.est[["hatpai"]]) #res = data.frame(IDmatch=names(hatpai), hatpai) #Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]])) #res = merge(res, Btab, by="IDmatch") ## Run B-score #bs = unlist(lapply(Zmat[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)})) #summary(bs) ## Middle plot #r = seq(-31, 9, 0.5) #AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Zmat[["B"]]))})) #summary(AUC) #btmax = r[which.max(AUC)] #plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.44, 0.56)) #abline(v=btmax, col="red", lty=2) #axis(1) #axis(1, at=btmax) ## Run R-score #rs = unlist(lapply(Zmat[["Z"]], function(x){matrix(normR(as.vector(t(x)), 16, 22), 16, 22, byrow=TRUE)})) #summary(rs) #r = seq(-45, 29, 0.5) #AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Zmat[["B"]]))})) #summary(AUC) #rtmax = r[which.max(AUC)] #lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green") #abline(v=rtmax, col="green", lty=2) #axis(1, at=rtmax) #legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1)) ## Right plot #rhitind = rep(0, length(rs)) #rhitind[rs>rtmax] = 1 #bhitind = rep(0, length(bs)) #bhitind[bs>btmax] = 1 #plot.roc(res[["hitind"]], res[["hatpai"]], col="blue") #lines.roc(unlist(Zmat[["B"]]), bhitind, col="red") #lines.roc(unlist(Zmat[["B"]]), rhitind, col="green") #legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score", #" (AUC=", round(auc(unlist(Zmat[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Zmat[["B"]]), bhitind), #3), ")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
/scratch/gouwar.j/cran-all/cranData/BHTSpack/inst/doc/BHTSpackManual.R
######################################################################################### ## BI.r ## Blinding Assessment Indices for Randomized, Controlled, Clinical Trials ## Copyright 2010 - 2020: Original 2010 R Code by Nate Mercaldo ([email protected]) ## Copyright 2021 - 2022: Updates by Marc Schwartz ([email protected]) ## This software is distributed under the terms of the GNU General Public License ## Version 3, June 2007. ## This program is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation. ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## You should have received a copy of the GNU General Public License ## along with this program. If not, see https://www.gnu.org/licenses/gpl-3.0.html ######################################################################################### BI <- function(x, weights = NULL, conf.level = 0.95, alternative.J = c("two.sided", "less", "greater"), alternative.B = c("two.sided", "less", "greater"), group.names = c("Treatment", "Placebo")) { ## The format of the 3 x 2 'x' count matrix, ## or the 3 x 2 'weights' matrix, if specified, should be: ## Treatment Placebo ## Treatment xxx xxx ## Placebo xxx xxx ## Don't Know xxx xxx ## where the rows are the assignment guesses by the surveyed party, ## and the columns are the actual assignments if (!identical(dim(x), c(3L, 2L))) { stop("'x' must be a 3 row by 2 column integer matrix of cross-tabulated counts. At present only 3 blinding query response levels and 2 treatment arms are supported.") } ## Use default 1996 James weights for 3 x 2 unless alternative ## weights are specified. Check dim(weights) otherwise. ## Correct guesses are assigned a weight of 0 ## Incorrect guesses are assigned a weight of 0.5 ## Don't Know guesses are assigned a weight of 1 ## Thus, row and column ordering is critical here ## and must match the above structure 1:1 for the count matrix 'x' if (is.null(weights)) { weights <- matrix(c(0, 0.5, 0.5, 0, 1, 1), nrow = 3, ncol = 2, byrow = TRUE) } else if (!identical(dim(weights), c(3L, 2L))) { stop("'weights' must be a 3 row by 2 column numeric matrix specifying alternative James weights for each cell in 'x'.") } if (length(conf.level) != 1 | (conf.level <= 0) | (conf.level >= 1)) { stop("'conf.level' must be a scalar numeric value >0 and <1") } alternative.J <- match.arg(alternative.J) alternative.B <- match.arg(alternative.B) if (alternative.J == "two.sided") { alpha.J <- (1 - conf.level) / 2 Sided.J <- "2-Sided" } else if (alternative.J == "less") { alpha.J <- 1 - conf.level Sided.J <- "1-Sided" } else if (alternative.J == "greater") { alpha.J <- 1 - conf.level Sided.J <- "1-Sided" } if (alternative.B == "two.sided") { alpha.B <- (1 - conf.level) / 2 Sided.B <- "2-Sided" } else if (alternative.B == "less") { alpha.B <- 1 - conf.level Sided.B <- "1-Sided" } else if (alternative.B == "greater") { alpha.B <- 1 - conf.level Sided.B <- "1-Sided" } CI.J <- qnorm(alpha.J, lower.tail = FALSE) CI.B <- qnorm(alpha.B, lower.tail = FALSE) ######################################################################################### ## Compute James' Blinding Index ## Use 'x' here ## First, need to check for the edge case, where all positive (>0) counts in both arms are DK only, ## The correct and incorrect counts for each arm are 0s. ## Based upon the James 1996 paper and the Bang 2004 paper, this results in a ## fixed James index of 1, with an SE of 0. If this is the case, then skip the ## index code below and set fixed values for BI.est and BI.se if (all(x[1:2, ] == 0) & all(x[3, ] > 0)) { BI.est <- 1 BI.se <- 0 } else { x1 <- addmargins(x) P <- x1 / max(x1) Pdk <- P[nrow(P) - 1, ncol(P)] Pdo <- Pde <- v <- term1.denom <- 0 for (i in 1:(nrow(P) - 2)) { for (j in 1:(ncol(P) - 1)) { Pdo <- Pdo + ((weights[i, j] * P[i, j]) / (1 - Pdk)) Pde <- Pde + ((weights[i, j] * P[i, ncol(P)] * (P[nrow(P), j] - P[nrow(P) - 1, j])) / (1 - Pdk) ^ 2) term1.denom <- term1.denom + weights[i,j] * P[i, ncol(P)] * (P[nrow(P), j] - P[nrow(P) - 1, j]) } } Kd <- (Pdo - Pde) / Pde term1.denom <- 4 * term1.denom ^ 2 term1.num <- 0 for (i in 1:(nrow(P) - 2)) { for (j in 1:(ncol(P) - 1)) { extra <- 0 for (r in 1:(ncol(P) - 1)) { extra <- extra + (weights[r, j] * P[r, ncol(P)] + weights[i, r] * (P[nrow(P), r] - P[nrow(P) - 1, r])) } term1.num <- term1.num + ((P[i, j] * (1 - Pdk) ^ 2 * ((1 - Pdk) * weights[i, j] - (1 + Kd) * extra) ^ 2)) } } v.1 <- term1.num / term1.denom v.2 <- Pdk * (1 - Pdk) - (1 - Pdk) * (1 + Kd) * (Pdk + ((1 - Pdk) * (1 + Kd)) / 4) v <- (v.1 + v.2) / x1[nrow(x1), ncol(x1)] BI.est <- (1 + Pdk + (1 - Pdk) * Kd) / 2 BI.se <- sqrt(v) } if (alternative.J == "two.sided") { BI.CI.Lower <- BI.est - (CI.J * BI.se) BI.CI.Upper <- BI.est + (CI.J * BI.se) } else if (alternative.J == "less") { BI.CI.Lower <- 0 BI.CI.Upper <- BI.est + (CI.J * BI.se) } else if (alternative.J == "greater") { BI.CI.Lower <- BI.est - (CI.J * BI.se) BI.CI.Upper <- 1 } BI.James <- matrix(c(BI.est, BI.se, BI.CI.Lower, BI.CI.Upper), nrow = 1) ######################################################################################### ## Compute Bang Blinding Index ## Use t(x) here x2 <- addmargins(t(x)) ## pre-allocate vectors for loop results BI.est <- numeric(2) BI.se <- numeric(2) for (i in 1:(nrow(x2) - 1)) { BI.est[i] <- (2 * (x2[i, i] / (x2[i, 1] + x2[i, 2])) - 1) * ((x2[i, 1] + x2[i, 2]) / (x2[i, 1] + x2[i, 2] + x2[i, 3])) BI.se[i] <- sqrt(((x2[i, 1] / x2[i, ncol(x2)]) * (1 - (x2[i, 1] / x2[i, ncol(x2)])) + (x2[i, 2] / x2[i, ncol(x2)]) * (1 - (x2[i, 2] / x2[i, ncol(x2)])) + 2 * (x2[i, 1] / x2[i, ncol(x2)]) * (x2[i, 2] / x2[i, ncol(x2)])) / x2[i, ncol(x2)]) } if (alternative.B == "two.sided") { BI.CI.Lower <- BI.est - (CI.B * BI.se) BI.CI.Upper <- BI.est + (CI.B * BI.se) } else if (alternative.B == "less") { BI.CI.Lower <- c(-1, -1) BI.CI.Upper <- BI.est + (CI.B * BI.se) } else if (alternative.B == "greater") { BI.CI.Lower <- BI.est - (CI.B * BI.se) BI.CI.Upper <- c(1, 1) } BI.Bang <- matrix(c(BI.est, BI.se, BI.CI.Lower, BI.CI.Upper), nrow = 2) ######################################################################################### ## Format for output rownames(BI.James) <- "Overall" colnames(BI.James) <- c("Estimate", "Std. Error", paste(conf.level * 100, "% ", c("LCL", "UCL"), " (", Sided.J, ")", sep = "")) rownames(BI.Bang) <- group.names colnames(BI.Bang) <- c("Estimate", "Std. Error", paste(conf.level * 100, "% ", c("LCL", "UCL"), " (", Sided.B, ")", sep = "")) ## list results BI <- list(BI.James, BI.Bang) names(BI) <- c("JamesBI", "BangBI") ## Return list BI }
/scratch/gouwar.j/cran-all/cranData/BI/R/BI.r
#' Regulation Strength Sampling Function #' #' Function 'A_sampling' estimates a regulation strength for each sampled binding event in C, #' according to a posterior Gaussian distribution. #' @param Y gene expression data matrix #' @param C sampled TF-gene binding network #' @param A_old regulatory strength sampled from the previous round, #' used as a prior in current function #' @param X sampled transcription factor activity matrix #' @param base_line sampled gene expression baseline activity #' @param C_prior prior TF-gene binding network #' @param sigma_noise variance of gene expression fitting residuals #' @param sigma_A variance of regulatory strength #' @param sigma_baseline variance of gene expression baseline activity #' @param sigma_X variance of transcription factor activity #' @import stats #' @keywords Regulation_Strength A_sampling<-function(Y, C, A_old, X, base_line, C_prior, sigma_noise, sigma_A, sigma_baseline, sigma_X){ Num_genes=nrow(Y) Num_samples=ncol(Y) Num_TFs=ncol(C_prior) A_new=matrix(0, nrow=Num_genes, ncol=Num_TFs) for (n in 1:Num_genes){ for (t2 in 1:Num_TFs){ temp=matrix(0, nrow=1, ncol=Num_samples) for (m in 1:Num_samples){ for (t1 in 1:Num_TFs){ temp[m]=temp[m]+A_old[n,t1]*C[n,t1]*X[t1,m] } } if (C[n,t2]==1){ temp_c=temp-A_old[n,t2]*X[t2,m] vairance_A=Num_samples*sigma_A*sigma_noise/(sum(X[t2,]^2)*sigma_A+Num_samples*sigma_noise) mean_A=sum((Y[n,]-temp_c-base_line[n])*X[t2,])/sigma_noise/Num_samples*vairance_A A_temp=rnorm(1, mean=mean_A, sd=sqrt(vairance_A)) A_new[n,t2] = A_temp }else A_new[n,t2]=0 } } return(A_new) }
/scratch/gouwar.j/cran-all/cranData/BICORN/R/A_sampling.R
#' BICORN Algorithm Function #' #' Function 'BICORN' infers a posterior module-gene regulatory network by iteratively sampling regulatory strength, #' transcription factor activity and several key model parameters. #' @param BICORN_input this list structure contains TF symbols, gene symbols and candidate modules #' @param L total rounds of Gibbs Sampling. #' @param output_threshold number of rounds after which we start to record results. #' @keywords BICORN sampling #' @export #' @examples #' #' # load in the sample data input #' data("sample.input") #' #' # Data initialization (Integerate prior binding network and gene expression data) #' BICORN_input<-data_integration(Binding_matrix = Binding_matrix, Binding_TFs = Binding_TFs, #' Binding_genes = Binding_genes, Exp_data = Exp_data, Exp_genes = Exp_genes, #' Minimum_gene_per_module_regulate = 2) #' #'# Infer cis-regulatory modules (TF combinations) and their target genes #'BICORN_output<-BICORN(BICORN_input, L = 2, output_threshold = 1) # BICORN<-function(BICORN_input = NULL, L = 100, output_threshold = 10){ Y=BICORN_input$expression_data C_prior=BICORN_input$gene_cluster_mapping%*%BICORN_input$C_candidate_clusters C_prior[which(C_prior>0)]=0.9 C_prior[which(C_prior==0)]=0.1 #*******************************known value******************************** alpha=1 beta=1 sigma_A=1 sigma_baseline=1 sigma_X=1 Num_genes=nrow(Y)# number of genes Num_samples=ncol(Y)# number of samples Num_TFs=ncol(C_prior)# number of TFs Num_clusters=nrow(BICORN_input$C_candidate_clusters) #**********************initialize the baseline activity******************** base_line_old=matrix(0, nrow=1,ncol=Num_genes) for (n in 1:Num_genes){ base_line_old[1,n] = rnorm(1, mean=0, sd=sqrt(sigma_baseline)) } #**********************initialize binding matrix and binding strength****** C_old=matrix(0, nrow=Num_genes, ncol=Num_TFs) A_old=matrix(0, nrow=Num_genes, ncol=Num_TFs) threshold=0.5 for (n in 1:Num_genes){ for (t in 1:Num_TFs){ if (C_prior[n,t]>threshold){ C_old[n,t]=1 A_old[n,t]=rnorm(1, mean=0, sd=sqrt(sigma_A)) }else{ C_old[n,t]=0 A_old[n,t]=0 } } } #**********************initialize TF activity****************************** X_old=matrix(0, nrow=Num_TFs, ncol=Num_samples) for (t in 1:Num_TFs){ for (m in 1:Num_samples){ X_old[t,m]=rnorm(1, mean=0, sd=sqrt(sigma_X)) } } #*************************Gibbs sampling*********************************** sigma_noise_summition=matrix(0, nrow=L, ncol=1) C_summition=matrix(0, nrow=Num_genes,ncol=Num_TFs) C_cluster_summition=matrix(0, nrow=Num_genes,ncol=Num_clusters) A_summition=matrix(0, nrow=Num_genes,ncol=Num_TFs) X_summition=matrix(0, nrow=Num_TFs,ncol=Num_samples) for (i in 1:L){ cat(paste("start", i, "round of sampling, in total", L, "rounds!\n")) #****sample noise variance ***************** sigma_noise_new<-sigmanoise_sampling(Y, C_old, A_old, X_old, base_line_old, C_prior, sigma_A, sigma_baseline, sigma_X, alpha, beta) #****sample baseline expression ************ base_line_new<-baseline_sampling(Y, C_old, A_old, X_old, base_line_old, C_prior, sigma_noise_new, sigma_A, sigma_baseline, sigma_X) #****sample binding matrix C and update A with C_old=0 if C_new=1****** Sampled<-C_sampling_cluster(Y, C_old, A_old, X_old, base_line_new, C_prior, sigma_noise_new, sigma_A, sigma_baseline, sigma_X, BICORN_input) #****sample bidning strength matrix A ****** A_new<-A_sampling(Y, Sampled$C, Sampled$A, X_old, base_line_new, C_prior, sigma_noise_new, sigma_A, sigma_baseline, sigma_X) #****sample TFA matrix X ******************* X_new<-X_sampling(Y, Sampled$C, A_new, X_old, base_line_new, C_prior, sigma_noise_new, sigma_A, sigma_baseline, sigma_X) C_old=Sampled$C A_old=A_new X_old=X_new base_line_old=base_line_new if (i>output_threshold){ C_summition=C_summition+Sampled$C C_cluster_summition=C_cluster_summition+Sampled$C_cluster A_summition=A_summition+A_new X_summition=X_summition+X_new } } C_cluster_summition=C_cluster_summition/(L-output_threshold) C_summition=C_summition/(L-output_threshold) BICORN_output=list('Posterior_module_gene_regulatory_network'=C_cluster_summition, 'Posterior_TF_gene_regulatory_network'=C_summition, 'Modules'=BICORN_input$C_candidate_clusters, 'TFs'=BICORN_input$TFs, 'Genes'=BICORN_input$genes) colnames(BICORN_output$Modules) <- BICORN_output$TFs rownames(BICORN_output$Modules) <- c(1:nrow(BICORN_output$Modules)) colnames(BICORN_output$Posterior_module_gene_regulatory_network) <- c(1:nrow(BICORN_output$Modules)) rownames(BICORN_output$Posterior_module_gene_regulatory_network) <- BICORN_output$Genes colnames(BICORN_output$Posterior_TF_gene_regulatory_network) <- BICORN_output$TFs rownames(BICORN_output$Posterior_TF_gene_regulatory_network) <- BICORN_output$Genes return(BICORN_output) }
/scratch/gouwar.j/cran-all/cranData/BICORN/R/BICORN.function.R
#' cis-Regulatory Module Sampling Function #' #' Function 'C_sampling_cluster' samples a candidate cis-regulatory module for each gene, #' according to a discrete posterior probability distribution. #' @param Y gene expression data matrix #' @param C_old TF-gene binding network sampled from the previous round #' @param A_old regulatory strength matrix sampled from the previous round #' @param X_old transcription factor activity matrix sampled from the previous round #' @param base_line_old gene expression baseline activity sampled from the previous round #' @param C_prior prior TF-gene binding network #' @param sigma_noise variance of gene expression fitting residuals #' @param sigma_A variance of regulatory strength #' @param sigma_baseline variance of gene expression baseline activity #' @param sigma_X variance of transcription factor activity #' @param BICORN_input this list structure contains TF symbols, Gene symbols and candidate modules #' @import stats #' @keywords Module sampling C_sampling_cluster<-function(Y, C_old, A_old, X_old, base_line_old, C_prior, sigma_noise, sigma_A, sigma_baseline, sigma_X, BICORN_input){ #Output #Sampled: list structure #Sampled$C: sampled TF-gene regulatory network #Sampled$A: sampled TF-gene regulatory strength (RS) matrix #Sampled$C_cluster: sampled module-gene regulatory network Num_genes=nrow(Y)# number of genes Num_samples=ncol(Y)# number of samples Num_TFs=ncol(C_prior)# number of TFs Num_clusters=nrow(BICORN_input$C_candidate_clusters) C=C_old A=A_old C_cluster=matrix(0, nrow=Num_genes, ncol=Num_clusters) for (n in 1:Num_genes){ #gene post_probability=matrix(0, nrow=1,ncol=Num_clusters) A_candidate=matrix(0,nrow=Num_clusters,ncol=Num_TFs) for (s in 1:Num_clusters){#cluster values based on previous state for (t1 in 1:Num_TFs){ if (C_old[n,t1]==1 && BICORN_input$C_candidate_clusters[s,t1]==1){ A_candidate[s,t1]=A_old[n,t1] }else if (C_old[n,t1]==0 && BICORN_input$C_candidate_clusters[s,t1]==1){ A_candidate[s,t1]=0 }else if (C_old[n,t1]==1 && BICORN_input$C_candidate_clusters[s,t1]==0){ A_candidate[s,t1]=0 }else{ A_candidate[s,t1]=0 } } } for (s in 1:Num_clusters){# sample clusters for (t2 in 1:Num_TFs){ temp_expression=matrix(0, nrow=1, ncol=Num_samples) for (m in 1:Num_samples){ temp_expression[m]=sum(A_candidate[s,]*BICORN_input$C_candidate_clusters[s,]*X_old[,m])#based on previous binding state } if (C_old[n,t2]==1 && BICORN_input$C_candidate_clusters[s,t2]==1){ temp_expression_c=temp_expression-A_candidate[s,t2]*BICORN_input$C_candidate_clusters[s,t2]*X_old[t2,] vairance_A=Num_samples*sigma_A*sigma_noise/(sum(X_old[t2,]^2)*sigma_A+Num_samples*sigma_noise) mean_A=sum((Y[n,]-temp_expression_c-base_line_old[n])*X_old[t2,])/sigma_noise/Num_samples*vairance_A probability_binding=-log(vairance_A)/2-(A_candidate[s,t2]-mean_A)^2/(2*vairance_A)+log(as.numeric(C_prior[n,t2])) }else if (C_old[n,t2]==0 && BICORN_input$C_candidate_clusters[s,t2]==1){ temp_expression_c=temp_expression-A_candidate[s,t2]*BICORN_input$C_candidate_clusters[s,t2]*X_old[t2,] vairance_A=Num_samples*sigma_A*sigma_noise/(sum(X_old[t2,]^2)*sigma_A+Num_samples*sigma_noise) mean_A=sum((Y[n,]-temp_expression_c-base_line_old[n])*X_old[t2,])/sigma_noise/Num_samples*vairance_A A_candidate[s,t2]=rnorm(1, mean=mean_A, sd=sqrt(vairance_A)) probability_binding=-log(vairance_A)/2-(A_candidate[s,t2]-mean_A)^2/(2*vairance_A)+log(as.numeric(C_prior[n,t2])) }else if (C_old[n,t2]==1 && BICORN_input$C_candidate_clusters[s,t2]==0){ temp_expression_c=temp_expression vairance_A=Num_samples*sigma_A*sigma_noise/(sum(X_old[t2,]^2)*sigma_A+Num_samples*sigma_noise) mean_A=sum((Y[n,]-temp_expression_c-base_line_old[n])*X_old[t2,])/sigma_noise/Num_samples*vairance_A A_candidate[s,t2]=0 probability_binding=-log(vairance_A)/2-(0-mean_A)^2/(2*vairance_A)+log(as.numeric(1-C_prior[n,t2])) }else {#C(n,t2)==1 && BICORN_input$C_candidate_clusters(s,t2)==0 temp_expression_c=temp_expression vairance_A=Num_samples*sigma_A*sigma_noise/(sum(X_old[t2,]^2)*sigma_A+Num_samples*sigma_noise) mean_A=sum((Y[n,]-temp_expression_c-base_line_old[n])*X_old[t2,])/sigma_noise/Num_samples*vairance_A A_candidate[s,t2]=0 probability_binding=-log(vairance_A)/2-(0-mean_A)^2/(2*vairance_A)+log(as.numeric(1-C_prior[n,t2])) } post_probability[s] = post_probability[s]+probability_binding#log format of probability } post_probability[s]=max(c(exp(post_probability[s]), 1e-6)) } post_probability=post_probability/sum(post_probability) candidate_state=sample(Num_clusters, 1, prob=post_probability)#sample state according to probability C[n,]=BICORN_input$C_candidate_clusters[candidate_state,] A[n,]=A_candidate[candidate_state,] C_cluster[n,candidate_state]=1 } Sampled=list('C'=C, 'A'=A, 'C_cluster'=C_cluster) return(Sampled) }
/scratch/gouwar.j/cran-all/cranData/BICORN/R/C_sampling_cluster.R
#' Transcription Factor Activity Sampling Function #' #' Function 'X_sampling' estimates the hidden activities of each transcription factor, #'according to a posterior Gaussian random process. #' @param Y gene expression data matrix #' @param C sampled TF-gene binding network #' @param A sampled regulatory strength matrix #' @param X_old sampled transcription factor activity matrix from the previous round #' @param base_line sampled gene expression baseline activity #' @param C_prior prior TF-gene binding network #' @param sigma_noise variance of gene expression fitting residuals #' @param sigma_A variance of regulatory strength #' @param sigma_baseline variance of gene expression baseline activity #' @param sigma_X variance of transcription factor activity #' @import stats #' @keywords TFA sampling X_sampling<-function(Y, C, A, X_old, base_line, C_prior, sigma_noise, sigma_A, sigma_baseline, sigma_X){ Num_genes=nrow(Y) Num_samples=ncol(Y) Num_TFs=ncol(C_prior) X_new=matrix(0, nrow=Num_TFs, ncol=Num_samples) for (t2 in 1:Num_TFs){ for (m in 1:Num_samples){ temp_y=0 for (n in 1:Num_genes){ temp=0 for (t1 in 1:Num_TFs){ temp=temp+A[n,t1]*C[n,t1]*X_old[t1,m] } temp=temp-A[n,t2]*C[n,t2]*X_old[t2,m] temp_y=temp_y+(Y[n,m]-temp-base_line[n])*A[n,t2]*C[n,t2] } variance_X=sigma_X*sigma_noise*Num_genes/(sum(A[,t2]^2)*sigma_X+Num_genes*sigma_noise) mean_X=temp_y/sigma_noise/Num_genes*variance_X X_new[t2,m]=rnorm(1, mean=mean_X, sd=sqrt(variance_X)) } } return(X_new) }
/scratch/gouwar.j/cran-all/cranData/BICORN/R/X_sampling.R
#' Gene Baseline Expression Sampling Function #' #' Function 'baseline_sampling' estimates a baseline expression for each gene, #'according to a posterior Gaussian distribution. #' @param Y gene expression data matrix #' @param C sampled TF-gene binding network #' @param A sampled regulatory strength matrix #' @param X sampled transcription factor activity matrix #' @param base_line_old prior gene expression baseline activity #' @param C_prior prior TF-gene binding network #' @param sigma_noise variance of gene expression fitting residuals #' @param sigma_A variance of regulatory strength #' @param sigma_baseline variance of gene expression baseline activity #' @param sigma_X variance of transcription factor activity #' @import stats #' @keywords Baseline sampling baseline_sampling<-function(Y, C, A, X, base_line_old, C_prior, sigma_noise, sigma_A, sigma_baseline, sigma_X){ Num_genes=nrow(Y) Num_samples=ncol(Y) Num_TFs=ncol(C_prior) baseline_new=matrix(0, nrow=1, ncol=Num_genes) for (n in 1:Num_genes){ temp=matrix(0, nrow=1, ncol=Num_samples) for (m in 1:Num_samples){ for (t in 1:Num_TFs){ temp[m]=temp[m]+A[n,t]*X[t,m] } } mean_baseline = sum(Y[n,]-temp)/Num_samples*sigma_baseline/(sigma_noise+sigma_baseline) variance_baseline = sigma_baseline*sigma_baseline/(sigma_noise+sigma_baseline) baseline_new[n]=rnorm(1, mean=mean_baseline, sd=sqrt(variance_baseline)) } return(baseline_new) }
/scratch/gouwar.j/cran-all/cranData/BICORN/R/baseline_sampling.R
#' Data Initialization for BICORN #' #' Function 'data_integration' integrates the prior TF-gene binding network and gene expression data together. It will remove any genes missing either TF bindings or gene expression and identify a list of candidate cis-regulatory modules. #' @param Binding_matrix loaded prior binding network #' @param Binding_TFs loaded transcription factors #' @param Binding_genes loaded genes in the prior binding network #' @param Exp_data loaded properly normalized gene expression data #' @param Exp_genes loaded genes in the gene expression data #' @param Minimum_gene_per_module_regulate the minimum number of genes regulated by each module, used for candidate module filtering. #' @keywords Initialization #' @export #' @examples #' #' # load in the sample data input #' data("sample.input") #' #' # Data initialization (Integerate prior binding network and gene expression data) #' BICORN_input<-data_integration(Binding_matrix = Binding_matrix, Binding_TFs = Binding_TFs, #' Binding_genes = Binding_genes, Exp_data = Exp_data, Exp_genes = Exp_genes, #' Minimum_gene_per_module_regulate = 2) data_integration<-function(Binding_matrix = NULL, Binding_TFs = NULL, Binding_genes = NULL, Exp_data, Exp_genes = NULL, Minimum_gene_per_module_regulate = 2){ match_list<-match(Binding_genes, Exp_genes) index=which(match_list>0) match_list=match_list[index] Sampling_window_flag<-Binding_matrix[index,c(1:length(Binding_TFs))] Cluster_pattern=unique(Sampling_window_flag) Candidate_cluster_num=matrix(0, nrow=nrow(Cluster_pattern), ncol=1) index_temp=which(rowSums(abs(sweep(Sampling_window_flag,2,Cluster_pattern[1,])))==0) if (sum(index_temp)>0){ Candidate_cluster_num[1,1]=length(index_temp) } for (k in 2:nrow(Cluster_pattern)){ index_temp=which(rowSums(abs(sweep(Sampling_window_flag,2,Cluster_pattern[k,])))==0) if (sum(index_temp)>0){ Candidate_cluster_num[k,1]=length(index_temp) } } C_candidate_clusters=Cluster_pattern[which(Candidate_cluster_num>=Minimum_gene_per_module_regulate),] TF_index=which(colSums(C_candidate_clusters)>0) Candidate_TFs=Binding_TFs[TF_index] C_candidate_clusters=C_candidate_clusters[,TF_index] Sampling_window_flag<-Sampling_window_flag[,TF_index] C_candidate_clusters=rbind(C_candidate_clusters, 0) Num_regions=nrow(Sampling_window_flag) Num_clusters=nrow(C_candidate_clusters) Cluster_mapping=matrix(0, nrow=Num_regions, ncol=Num_clusters) for (k in 1:Num_regions){ for (c in 1:Num_clusters){ if (sum(abs(C_candidate_clusters[c,]-Sampling_window_flag[k,]*C_candidate_clusters[c,]))==0){ Cluster_mapping[k,c]=1 } } } sig_gene_index=unique(match_list) genes=Exp_genes[sig_gene_index] expression_data=Exp_data[sig_gene_index,] TFs=Candidate_TFs gene_cluster_mapping=matrix(0, nrow=length(sig_gene_index), ncol=Num_clusters) for (k in 1:length(sig_gene_index)){ index_temp=which(match_list==sig_gene_index[k]) if (length(index_temp)==1){ gene_cluster_mapping[k,]=Cluster_mapping[index_temp,] }else{ gene_cluster_mapping[k,]=colSums(Cluster_mapping[index_temp,]) } } gene_cluster_mapping[which(gene_cluster_mapping>0)]=1 BICORN_input=list('genes'=genes, 'expression_data'=expression_data, 'TFs'=TFs, 'gene_cluster_mapping'=gene_cluster_mapping, 'C_candidate_clusters'=C_candidate_clusters) return(BICORN_input) }
/scratch/gouwar.j/cran-all/cranData/BICORN/R/data.initialization.R
#' Fitting Residule Variance Sampling Function #' #' Function 'sigmanoise_sampling' estimates the variance of overal gene expression fitting residuals, #'according to an inverse-gamma distribution. #' @param Y gene expression data matrix #' @param C sampled TF-gene binding network #' @param A sampled regulatory strength matrix #' @param X sampled transcription factor activity matrix #' @param base_line sampled gene expression baseline activity #' @param C_prior prior TF-gene binding network #' @param sigma_A variance of regulatory strength #' @param sigma_baseline variance of gene expression baseline activity #' @param sigma_X variance of transcription factor activity #' @param alpha hyper-parameter for inverse-gamma distribution #' @param beta hyper-parameter for inverse-gamma distribution #' @import stats #' @keywords Noise sampling sigmanoise_sampling<-function(Y, C, A, X, base_line, C_prior, sigma_A, sigma_baseline, sigma_X, alpha, beta){ Num_genes=nrow(Y)# number of genes Num_samples=ncol(Y)# number of samples Num_TFs=ncol(C_prior)# number of TFs alpha_new=alpha+1/2 beta_temp=0 for (n in 1:Num_genes){ temp=matrix(0, nrow=1,ncol=Num_samples) for (m in 1:Num_samples){ for (t in 1:Num_TFs){ temp[m]=temp[m]+A[n,t]*X[t,m] } } temp_y=Y[n,]-temp-base_line[n] temp_y=temp_y^2 beta_temp=beta_temp+sum(temp_y)/2 } beta_new=beta+beta_temp/(Num_samples*Num_genes) sigmanoise_new=1/rgamma(1, alpha_new,1/beta_new) #sigmanoise_new=beta_new/rchisq(1, 2*alpha_new) return(sigmanoise_new) }
/scratch/gouwar.j/cran-all/cranData/BICORN/R/sigmanoise_sampling.R
## ----eval=F-------------------------------------------------------------- # #Load the binary binding network # Binding_matrix <- as.matrix(read.table('path_to_prior_bindings/prior_bidnings.txt', row.names = 1, header= TRUE)) # # #Extract TF symbols # Binding_TFs = colnames(Binding_matrix) # # #Extract Gene symbols (can be redundant) # Binding_genes = rownames(Binding_matrix) # ## ----eval=F-------------------------------------------------------------- # #Load normalized gene expression data # Exp_data <- as.matrix(read.table('path_to_gene_expression/gene_expression.txt', row.names = 1, header= TRUE)) # # #Extract gene symbols # Exp_genes=rownames(Exp_data) # ## ----eval = F------------------------------------------------------------ # library(BICORN) # # data(sample.input) ## ----eval = F------------------------------------------------------------ # # Integerate the binary binding network and gene expression data # BICORN_input<-data_integration(Binding_matrix, Binding_TFs, Binding_genes, Exp_data, Exp_genes, Minimum_gene_per_module_regulate = 2) ## ----eval = F------------------------------------------------------------ # # # Infer cis-regulatory modules and their target genes # BICORN_output<-BICORN(BICORN_input, L =100, output_threshold = 10) # ## ----eval = F------------------------------------------------------------ # # # Output candidate cis-regulatory modules # write.csv(BICORN_output$Modules, file = 'BICORN_cis_regulatory_modules.csv', quote = FALSE) # # # Output a weighted module-gene regulatory network # write.csv(BICORN_output$Posterior_module_gene_regulatory_network, file = 'BICORN_module2target_regulatory_network.csv', quote = FALSE) # # # Output a weighted TF-gene regulatory network # write.csv(BICORN_output$Posterior_TF_gene_regulatory_network, file = 'BICORN_TF2gene_regulatory_network.csv', quote = FALSE)
/scratch/gouwar.j/cran-all/cranData/BICORN/inst/doc/my-vignette.R
--- title: "An Introduction to BICORN" author: "Xi Chen" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{An Introduction to BICORN} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- BICORN package is developed to infer de novo cis-regulatory modules by integrating prior transcription factor binding information and gene expression data. ## Loading prior bindings and gene expression data To run BICORN analysis, users need to provide a prior binding file and a gene expression file. The prior binding file should be a tab-seperated text file: 1). The first column of this file must be official gene symbols. They can be redundant considering that one gene's promoter or enhancer region can be partitioned into several bins and co-regulation of binding events falling into each bin rather than the whole region is studied. 2). The column names (row 1, column B, C, ...) are TF symbols and should be unique; 3). Each unit must be either 1 (binding) or 0 (non-binding). ```{r,eval=F} #Load the binary binding network Binding_matrix <- as.matrix(read.table('path_to_prior_bindings/prior_bidnings.txt', row.names = 1, header= TRUE)) #Extract TF symbols Binding_TFs = colnames(Binding_matrix) #Extract Gene symbols (can be redundant) Binding_genes = rownames(Binding_matrix) ``` The gene expression data file should be a tab-seperated text file: 1). The row names of the file should be official gene symbols and MUST be unique; 2). The column names (row 1, column B, C, ...) are expression sample names; 3). Each gene should be properly normalized with 0 mean and standard deviation across all samples. ```{r,eval=F} #Load normalized gene expression data Exp_data <- as.matrix(read.table('path_to_gene_expression/gene_expression.txt', row.names = 1, header= TRUE)) #Extract gene symbols Exp_genes=rownames(Exp_data) ``` To load BICORN demo data, use the command below: ```{r,eval = F} library(BICORN) data(sample.input) ``` ## Initializing BICORN BICORN will pre-process the loaded data by assignning prior bindings to target genes and extracting candidate modules. The minimum number of target genes **'Minimum_gene_per_module_regulate'** must be provided to filter candidate modules. ```{r,eval = F} # Integerate the binary binding network and gene expression data BICORN_input<-data_integration(Binding_matrix, Binding_TFs, Binding_genes, Exp_data, Exp_genes, Minimum_gene_per_module_regulate = 2) ``` ## Running BICORN To run key functions of BICORN, two parameters should be provided to control the sampling process. **'L'** defines the total rounds of Gibbing samplings and **'output_threshold'** defines the initial burning time. ```{r,eval = F} # Infer cis-regulatory modules and their target genes BICORN_output<-BICORN(BICORN_input, L =100, output_threshold = 10) ``` ## BICORN outputs A list of cis-regulotary modules (non-ranked), a posterior module-gene regulatory network (genes as rows and modules as columns, the order of mudules the same as the cis-regulatory module file), and a posterior TF-gene regulatory network (genes as rows and TFs as columns) are printed into following files. ```{r,eval = F} # Output candidate cis-regulatory modules write.csv(BICORN_output$Modules, file = 'BICORN_cis_regulatory_modules.csv', quote = FALSE) # Output a weighted module-gene regulatory network write.csv(BICORN_output$Posterior_module_gene_regulatory_network, file = 'BICORN_module2target_regulatory_network.csv', quote = FALSE) # Output a weighted TF-gene regulatory network write.csv(BICORN_output$Posterior_TF_gene_regulatory_network, file = 'BICORN_TF2gene_regulatory_network.csv', quote = FALSE) ```
/scratch/gouwar.j/cran-all/cranData/BICORN/inst/doc/my-vignette.Rmd
--- title: "An Introduction to BICORN" author: "Xi Chen" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{An Introduction to BICORN} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- BICORN package is developed to infer de novo cis-regulatory modules by integrating prior transcription factor binding information and gene expression data. ## Loading prior bindings and gene expression data To run BICORN analysis, users need to provide a prior binding file and a gene expression file. The prior binding file should be a tab-seperated text file: 1). The first column of this file must be official gene symbols. They can be redundant considering that one gene's promoter or enhancer region can be partitioned into several bins and co-regulation of binding events falling into each bin rather than the whole region is studied. 2). The column names (row 1, column B, C, ...) are TF symbols and should be unique; 3). Each unit must be either 1 (binding) or 0 (non-binding). ```{r,eval=F} #Load the binary binding network Binding_matrix <- as.matrix(read.table('path_to_prior_bindings/prior_bidnings.txt', row.names = 1, header= TRUE)) #Extract TF symbols Binding_TFs = colnames(Binding_matrix) #Extract Gene symbols (can be redundant) Binding_genes = rownames(Binding_matrix) ``` The gene expression data file should be a tab-seperated text file: 1). The row names of the file should be official gene symbols and MUST be unique; 2). The column names (row 1, column B, C, ...) are expression sample names; 3). Each gene should be properly normalized with 0 mean and standard deviation across all samples. ```{r,eval=F} #Load normalized gene expression data Exp_data <- as.matrix(read.table('path_to_gene_expression/gene_expression.txt', row.names = 1, header= TRUE)) #Extract gene symbols Exp_genes=rownames(Exp_data) ``` To load BICORN demo data, use the command below: ```{r,eval = F} library(BICORN) data(sample.input) ``` ## Initializing BICORN BICORN will pre-process the loaded data by assignning prior bindings to target genes and extracting candidate modules. The minimum number of target genes **'Minimum_gene_per_module_regulate'** must be provided to filter candidate modules. ```{r,eval = F} # Integerate the binary binding network and gene expression data BICORN_input<-data_integration(Binding_matrix, Binding_TFs, Binding_genes, Exp_data, Exp_genes, Minimum_gene_per_module_regulate = 2) ``` ## Running BICORN To run key functions of BICORN, two parameters should be provided to control the sampling process. **'L'** defines the total rounds of Gibbing samplings and **'output_threshold'** defines the initial burning time. ```{r,eval = F} # Infer cis-regulatory modules and their target genes BICORN_output<-BICORN(BICORN_input, L =100, output_threshold = 10) ``` ## BICORN outputs A list of cis-regulotary modules (non-ranked), a posterior module-gene regulatory network (genes as rows and modules as columns, the order of mudules the same as the cis-regulatory module file), and a posterior TF-gene regulatory network (genes as rows and TFs as columns) are printed into following files. ```{r,eval = F} # Output candidate cis-regulatory modules write.csv(BICORN_output$Modules, file = 'BICORN_cis_regulatory_modules.csv', quote = FALSE) # Output a weighted module-gene regulatory network write.csv(BICORN_output$Posterior_module_gene_regulatory_network, file = 'BICORN_module2target_regulatory_network.csv', quote = FALSE) # Output a weighted TF-gene regulatory network write.csv(BICORN_output$Posterior_TF_gene_regulatory_network, file = 'BICORN_TF2gene_regulatory_network.csv', quote = FALSE) ```
/scratch/gouwar.j/cran-all/cranData/BICORN/vignettes/my-vignette.Rmd
##################### #'Extract occurrence data for specified species from BIEN #' #'BIEN_occurrence_species downloads occurrence records for specific species from the BIEN database. #' @param species A single species, or a vector of species. Genus and species should be separated by a space. Genus should be capitalized. #' @param only.geovalid Should the returned records be limited to those with validated coordinates? Default is TRUE #' @template occurrence #' @return Dataframe containing occurrence records for the specified species. #' @examples \dontrun{ #' BIEN_occurrence_species("Abies amabilis") #' species_vector<-c("Abies amabilis", "Acer nigrum") #' BIEN_occurrence_species(species_vector) #' BIEN_occurrence_species(species_vector,all.taxonomy = TRUE)} #' @family occurrence functions #' @export BIEN_occurrence_species<-function(species, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, observation.type = FALSE, political.boundaries = FALSE, collection.info = FALSE, only.geovalid = TRUE, ...){ #Test input .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(species) .is_log(native.status) .is_log(observation.type) .is_log(political.boundaries) .is_log(natives.only) .is_log(collection.info) .is_log(only.geovalid) #set conditions for query cultivated_<-.cultivated_check(cultivated) newworld_<-.newworld_check(new.world) taxonomy_<-.taxonomy_check(all.taxonomy) native_<-.native_check(native.status) observation_<-.observation_check(observation.type) political_<-.political_check(political.boundaries) natives_<-.natives_check(natives.only) collection_<-.collection_check(collection.info) geovalid_<-.geovalid_check(only.geovalid) # set the query query <- paste("SELECT scrubbed_species_binomial",taxonomy_$select,native_$select,political_$select," ,latitude, longitude,date_collected, datasource,dataset,dataowner,custodial_institution_codes,collection_code,view_full_occurrence_individual.datasource_id", collection_$select,cultivated_$select,newworld_$select,observation_$select,geovalid_$select," FROM view_full_occurrence_individual WHERE scrubbed_species_binomial in (", paste(shQuote(species, type = "sh"),collapse = ', '), ")", cultivated_$query,newworld_$query,natives_$query,observation_$query, geovalid_$query, " AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND scrubbed_species_binomial IS NOT NULL ORDER BY scrubbed_species_binomial ;") return(.BIEN_sql(query, ...)) } ############## #'Extract occurrence data for specified sf polygon #' #'BIEN_occurrence_sf downloads occurrence records falling within a user-specified sf polygon #' @param sf An object of class sf. Note that the projection must be WGS84. #' @template occurrence #' @return Dataframe containing occurrence records falling within the polygon. #' @examples \dontrun{ #' library(sf) #' #' # first, we download an example shapefile to use (a species range) #' #' BIEN_ranges_species("Carnegiea gigantea")#saves range to the current working directory #' #' # load the range map as an sf object #' #' sf <- st_read(dsn = ".",layer = "Carnegiea_gigantea") #' #' # get the occurrences that occur within the polygon. #' #' species_occurrences <- BIEN_occurrence_sf(sf = sf) #' } #' @family occurrence functions #' @importFrom sf st_geometry st_as_text st_bbox #' @export BIEN_occurrence_sf <- function(sf, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, observation.type = FALSE, political.boundaries = FALSE, collection.info = FALSE, ...){ .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_log(native.status) .is_log(observation.type) .is_log(political.boundaries) .is_log(natives.only) .is_log(collection.info) # Convert the sf to wkt (needed for sql query) wkt <- sf |> st_geometry() |> st_as_text() # Get bounding box of sf (used as a sort of index to make query a bit faster) sf_bbox <- sf |> st_bbox() long_min <- sf_bbox["xmin"] long_max <- sf_bbox["xmax"] lat_min <- sf_bbox["ymin"] lat_max <- sf_bbox["ymax"] #set conditions for query cultivated_ <- .cultivated_check(cultivated) newworld_ <- .newworld_check(new.world) taxonomy_ <- .taxonomy_check(all.taxonomy) native_ <- .native_check(native.status) observation_ <- .observation_check(observation.type) political_ <- .political_check(political.boundaries) natives_ <- .natives_check(natives.only) collection_ <- .collection_check(collection.info) # set the query query <- paste("SELECT scrubbed_species_binomial",taxonomy_$select,native_$select,political_$select," , latitude, longitude, date_collected,datasource,dataset,dataowner,custodial_institution_codes,collection_code,a.datasource_id",collection_$select,cultivated_$select, newworld_$select,observation_$select," FROM (SELECT * FROM view_full_occurrence_individual WHERE higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') AND latitude BETWEEN ",lat_min," AND ",lat_max,"AND longitude BETWEEN ",long_min," AND ",long_max,") a WHERE st_intersects(ST_GeographyFromText('SRID=4326;",paste(wkt),"'),a.geom)",cultivated_$query,newworld_$query,natives_$query, " AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') AND scrubbed_species_binomial IS NOT NULL ;") # create query to retrieve df <- .BIEN_sql(query, ...) if(length(df) == 0){ message("No occurrences found") return(invisible(NULL)) }else{ return(df) } } ############################### #'Extract species list by country #' #'BIEN_list_country downloads a list of all species within a country or countries from the BIEN database. #' @param country A single country or a vector of countries. #' @param country.code A single country code or a vector of country codes equal in length to the vector of states/province codes. #' @template list #' @note Political division (or political division code) spelling needs to be exact and case-sensitive, see \code{\link{BIEN_metadata_list_political_names}} for a list of political divisions and associated codes. #' @return Dataframe containing species list(s) for the specified country or countries. #' @examples \dontrun{ #' BIEN_list_country("Canada") #' country_vector<-c("Canada","United States") #' BIEN_list_country(country_vector)} #' @family list functions #' @export BIEN_list_country <- function(country = NULL, country.code = NULL, cultivated = FALSE, new.world = NULL, ...){ .is_log(cultivated) .is_log_or_null(new.world) .is_char(country) .is_char(country.code) if(is.null(country) & is.null(country.code)) { stop("Please supply either a country name or 2-digit ISO code") } newworld_ <- .newworld_check(new.world) #set base query components sql_select <- paste("SELECT DISTINCT country, scrubbed_species_binomial ") sql_from <- paste(" FROM species_by_political_division ") if(is.null(country.code)){ sql_where <- paste(" WHERE country in (", paste(shQuote(country, type = "sh"),collapse = ', '), ") AND scrubbed_species_binomial IS NOT NULL") }else{ sql_where <- paste(" WHERE country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), ")) AND scrubbed_species_binomial IS NOT NULL") } sql_order_by <- paste(" ORDER BY scrubbed_species_binomial ") # adjust for optional parameters if(!cultivated){ # sql_where <- paste(sql_where, " AND (is_cultivated_observation = 0 OR is_cultivated_observation IS NULL) ") }else{ sql_select <- paste(sql_select, ",is_cultivated_in_region") } #if(!new.world){ # sql_select <- paste(sql_select,",is_new_world") # sql_where <- paste(sql_where, "AND is_new_world = 1 ") #}else{ # sql_where <- paste(sql_where, "AND is_new_world = 1 ") #} # form the final query query <- paste(sql_select,newworld_$select, sql_from, sql_where,newworld_$query, sql_order_by, " ;") return(.BIEN_sql(query, ...)) #return(.BIEN_sql(query)) } ############################ #'Extract a species list by state/province #' #'BIEN_list_state produces a list of all species with geovalidated occurrences falling within specified state(s) or province(s). #' @param state A state or vector of states (or other primary political divisions, e.g. provinces). #' @param country A single country or a vector of countries equal in length to the vector of states/provinces. #' @param state.code A single state/province code, or a vector of states/province codes. #' @param country.code A single country code or a vector of country codes equal in length to the vector of states/province codes. #' @template list #' @return Dataframe containing species list(s) for the specified states/provinces. #' @note Political division (or political division code) spelling needs to be exact and case-sensitive, see \code{\link{BIEN_metadata_list_political_names}} for a list of political divisions and associated codes. #' @examples \dontrun{ #' BIEN_list_state("United States","Michigan") #' state_vector<-c("Michigan","Arizona") #' BIEN_list_state(country="United States", state= state_vector)} #' @family list functions #' @export BIEN_list_state <- function(country = NULL, country.code = NULL, state = NULL, state.code = NULL, cultivated = FALSE, new.world = NULL, ...){ .is_char(country) .is_char(country.code) .is_char(state) .is_char(state.code) .is_log(cultivated) .is_log_or_null(new.world) if(is.null(country)& is.null(country.code)) { stop("Please supply either a country name or 2-digit ISO code") } # set base query components sql_select <- paste("SELECT DISTINCT country, state_province, scrubbed_species_binomial ") sql_from <- paste(" FROM species_by_political_division ") #if supplying country names if(is.null(country.code) & is.null(state.code)){ if(length(country)==1){ sql_where <- paste(" WHERE country in (", paste(shQuote(country, type = "sh"),collapse = ', '), ") AND state_province in (", paste(shQuote(state, type = "sh"),collapse = ', '), ") AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country)==length(state)){ sql_where<-"WHERE (" for(i in 1:length(country)){ condition_i<- paste("(country = ", paste(shQuote(country[i], type = "sh"),collapse = ', '), " AND state_province = ", paste(shQuote(state[i], type = "sh"),collapse = ', '), ")") if(i!=1){condition_i<- paste("OR ",condition_i)}#stick OR onto the condition where needed sql_where<-paste(sql_where,condition_i) }#for i sql_where<-paste(sql_where,") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country, the function requires a vector of countries corresponding to the vector of states") } }#if length(country>1) }else{ if(length(country.code)==1){ sql_where <- paste(" WHERE country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), ")) AND state_province in (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code, type = "sh"),collapse = ', '), ")) AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country.code)==length(state.code)){ sql_where<-"WHERE (" for(i in 1:length(country.code)){ condition_i<- paste("country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code[i], type = "sh"),collapse = ', '), ")) AND state_province in (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code[i], type = "sh"),collapse = ', '), "))") if(i!=1){condition_i<- paste("OR ",condition_i)}#stick OR onto the condition where needed sql_where<-paste(sql_where,condition_i) }#for i sql_where<-paste(sql_where,") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country, the function requires a vector of countries corresponding to the vector of states") } }#if length(country>1) } sql_order_by <- paste(" ORDER BY scrubbed_species_binomial ") # adjust for optional parameters if(!cultivated){ #sql_where <- paste(sql_where, " AND (is_cultivated_observation = 0 OR is_cultivated_observation IS NULL) ") }else{ sql_select <- paste(sql_select, ",is_cultivated_in_region") } #if(!new.world){ # sql_select <- paste(sql_select,",is_new_world") #}else{ # sql_where <- paste(sql_where, "AND is_new_world = 1 ") #} newworld_ <- .newworld_check(new.world) # form the final query query <- paste(sql_select,newworld_$select, sql_from, sql_where,newworld_$query, sql_order_by, " ;") ## form the final query #query <- paste(sql_select, sql_from, sql_where, sql_order_by, " ;") return(.BIEN_sql(query, ...)) } ########################### #'Extract a species list by county. #' #'BIEN_list_county produces a list of all species with geovalidated occurrences falling within specified county or counties. #' @param country A single country or vector of countries #' @param state A state or vector of states (or other primary political divisions, e.g. provinces). #' @param county A single county (or other secondary administrative boundary)or vector of counties. #' @param state.code A single state/province code, or a vector of states/province codes. #' @param country.code A single country (or other primary administrative boundary) code or a vector of country codes equal in length to the vector of states/province codes. #' @param county.code A single county (or other secondary administrative boundary) code or a vector of county codes equal in length to the vectors of states/province codes and country codes. #' @note Political division (or political division code) spelling needs to be exact and case-sensitive, see \code{\link{BIEN_metadata_list_political_names}} for a list of political divisions and associated codes. #' @note We recommend using country, state, and county rather than codes, since county names have not been fully standardized. #' @template list #' @return Dataframe containing species list(s) for the specified states/provinces. #' @note This function requires you supply either 1) a single state and country with one or more counties, or 2) vectors of equal length for each political level. #' @examples \dontrun{ #' BIEN_list_county("United States", "Michigan", "Kent") #' BIEN_list_county(country = "United States", state = "Michigan", county = "Kent") #' county_vector<-c("Kent","Kalamazoo") #' BIEN_list_county(country = "United States", state = "Michigan", county = county_vector)} #' @family list functions #' @export BIEN_list_county <- function(country = NULL, state = NULL, county = NULL, country.code = NULL, state.code = NULL, county.code = NULL, cultivated = FALSE, new.world = NULL, ...){ .is_char(country.code) .is_char(state.code) .is_char(county.code) .is_char(country) .is_char(state) .is_char(county) .is_log(cultivated) .is_log_or_null(new.world) # set base query components sql_select <- paste("SELECT DISTINCT country, state_province, county, scrubbed_species_binomial ") sql_from <- paste(" FROM species_by_political_division ") if(is.null(country.code) & is.null(state.code) & is.null(county.code)){ #sql where if(length(country)==1 & length(state)==1){ sql_where <- paste(" WHERE country in (", paste(shQuote(country, type = "sh"),collapse = ', '), ") AND state_province in (", paste(shQuote(state, type = "sh"),collapse = ', '), ") AND county in (", paste(shQuote(county, type = "sh"),collapse = ', '), ") AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country)==length(state) & length(country)==length(county)){ sql_where<-"WHERE (" for(i in 1:length(country)){ condition_i<- paste("(country = ", paste(shQuote(country[i], type = "sh"),collapse = ', '), " AND state_province = ", paste(shQuote(state[i], type = "sh"),collapse = ', '), " AND county = ", paste(shQuote(county[i], type = "sh"),collapse = ', '), ") ") if(i!=1){condition_i<- paste("OR ",condition_i)}#stick OR onto the condition where needed sql_where<-paste(sql_where,condition_i) }#for i sql_where<-paste(sql_where,") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country and/or state the function requires matching vectors of countries, states and counties.") } }#if length(country>1) }else{ #sql where if(length(country.code)==1 & length(state.code)==1){ sql_where <- paste(" WHERE country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), ")) AND state_province in (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code, type = "sh"),collapse = ', '), ")) AND county in (SELECT county_parish_ascii FROM county_parish WHERE admin2code in (", paste(shQuote(county.code, type = "sh"),collapse = ', '), ")) AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country)==length(state) & length(country)==length(county)){ sql_where<-"WHERE (" for(i in 1:length(country)){ condition_i<- paste("(country = (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), ")) AND state_province = (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code, type = "sh"),collapse = ', '), ")) AND county = (SELECT county_parish_ascii FROM county_parish WHERE admin2code in (", paste(shQuote(county.code, type = "sh"),collapse = ', '), "))" ) if(i!=1){condition_i<- paste("OR ",condition_i)}#stick OR onto the condition where needed sql_where<-paste(sql_where,condition_i) }#for i sql_where<-paste(sql_where,") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country and/or state the function requires matching vectors of countries, states and counties.") } }#if length(country>1) } sql_order_by <- paste(" ORDER BY scrubbed_species_binomial ") # adjust for optional parameters if(!cultivated){ #sql_where <- paste(sql_where, " AND (is_cultivated_observation = 0 OR is_cultivated_observation IS NULL) ") }else{ sql_select <- paste(sql_select, ",is_cultivated_in_region") } #if(!new.world){ # sql_select <- paste(sql_select,",is_new_world") #}else{ # sql_where <- paste(sql_where, "AND is_new_world = 1 ") #} newworld_ <- .newworld_check(new.world) # form the final query query <- paste(sql_select,newworld_$select, sql_from, sql_where,newworld_$query, sql_order_by, " ;") ## form the final query #query <- paste(sql_select, sql_from, sql_where, sql_order_by, " ;") return(.BIEN_sql(query, ...)) } ########################### #'Extract a list of all species in the BIEN database. #' #'BIEN_list_all produces a list of all species in the BIEN database. #' @param ... Additional arguments passed to internal functions. #' @return Dataframe containing a list of all species in the BIEN database. #' @examples \dontrun{ #' species_list<-BIEN_list_all()} #' @family list functions #' @export BIEN_list_all<-function( ...){ query <- paste("SELECT species FROM bien_species_all ORDER BY species ;") return(.BIEN_sql(query, ...)) } ########################### #'Extract a list of species within a given sf polygon. #' #'BIEN_list_sf produces a list of all species with occurrence records falling within a user-supplied sf object. #' @param sf An object of class ff. Note that the object must be in WGS84. #' @template list #' @return Dataframe containing a list of all species with occurrences in the supplied sf object. #' @examples \dontrun{ #' library(sf) #' #' BIEN_ranges_species("Carnegiea gigantea") # saves ranges to the current working directory #' #' sf <- st_read(dsn = ".", #' layer = "Carnegiea_gigantea") #' #' species_list <- BIEN_list_sf(sf = sf) #' } #' @family list functions #' @importFrom sf st_geometry st_as_text st_bbox #' @export BIEN_list_sf <- function(sf, cultivated = FALSE, new.world = NULL, ...){ .is_log(cultivated) .is_log_or_null(new.world) # Convert the sf to wkt (needed for sql query) wkt <- sf |> st_geometry() |> st_as_text() # Get bounding box of sf (used as a sort of index to make query a bit faster) sf_bbox <- sf |> st_bbox() long_min <- sf_bbox["xmin"] long_max <- sf_bbox["xmax"] lat_min <- sf_bbox["ymin"] lat_max <- sf_bbox["ymax"] # adjust for optional parameters if(!cultivated){ cultivated_query <- "AND (is_cultivated_observation = 0 OR is_cultivated_observation IS NULL)" cultivated_select <- "" }else{ cultivated_query <- "" cultivated_select <- ",is_cultivated_observation,is_cultivated_in_region" } newworld_ <- .newworld_check(new.world) #rangeQuery <- paste("SELECT species FROM ranges WHERE species in (", paste(shQuote(species, type = "sh"),collapse = ', '), ") ORDER BY species ;") query <- paste("SELECT DISTINCT scrubbed_species_binomial",cultivated_select,newworld_$select ," FROM (SELECT * FROM view_full_occurrence_individual WHERE higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') AND scrubbed_species_binomial IS NOT NULL AND latitude BETWEEN ",lat_min," AND ",lat_max,"AND longitude BETWEEN ",long_min," AND ",long_max,") a WHERE st_intersects(ST_GeographyFromText('SRID=4326;",paste(wkt),"'),a.geom)",cultivated_query,newworld_$query ," ;") # create query to retrieve df <- .BIEN_sql(query, ...) #df <- .BIEN_sql(query) if(length(df) == 0){ message("No species found") return(invisible(NULL)) }else{ return(df) } } ########################### ########################### #'Extract occurrence data from BIEN for specified genera #' #'BIEN_occurrence_genus downloads occurrence records for specific genus/genera from the BIEN database. #' @param genus A single genus, or a vector of genera. Genera should be capitalized. #' @template occurrence #' @return Dataframe containing occurrence records for the specified genera. #' @examples \dontrun{ #' BIEN_occurrence_genus("Abutilon") #' genus_vector<-c("Abutilon","Abronia") #' BIEN_occurrence_genus(genus_vector) #' BIEN_occurrence_genus(genus = "Abutilon",cultivated = TRUE,new.world = FALSE)} #' @family occurrence functions #' @export BIEN_occurrence_genus <- function(genus, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, observation.type = FALSE, political.boundaries = FALSE, collection.info = FALSE, ...){ .is_char(genus) .is_log(cultivated) .is_log(all.taxonomy) .is_log_or_null(new.world) .is_log(native.status) .is_log(observation.type) .is_log(political.boundaries) .is_log(natives.only) .is_log(collection.info) cultivated_<-.cultivated_check(cultivated) newworld_<-.newworld_check(new.world) taxonomy_<-.taxonomy_check(all.taxonomy) native_<-.native_check(native.status) observation_<-.observation_check(observation.type) political_<-.political_check(political.boundaries) natives_<-.natives_check(natives.only) collection_<-.collection_check(collection.info) # set the query query <- paste("SELECT scrubbed_genus, scrubbed_species_binomial",taxonomy_$select,native_$select,political_$select," ,latitude, longitude,date_collected,datasource,dataset,dataowner,custodial_institution_codes,collection_code,view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,observation_$select, " FROM view_full_occurrence_individual WHERE scrubbed_genus in (", paste(shQuote(genus, type = "sh"),collapse = ', '), ")",cultivated_$query,newworld_$query,natives_$query," AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') AND scrubbed_species_binomial IS NOT NULL ;") return(.BIEN_sql(query, ...)) } ############################ #'Extract species occurrences by family. #' #'BIEN_occurrence_family extracts all occurrences for a given family (or families) from the BIEN database. #' @param family A single family or a vector of families. #' @template occurrence #' @return Dataframe containing occurrence records for the specified family/families. #' @examples \dontrun{ #' BIEN_occurrence_family("Theaceae") #' family_vector<-c("Theaceae","Ericaceae") #' BIEN_occurrence_family(family_vector)} #' @family occurrence functions #' @export BIEN_occurrence_family <- function(family, cultivated = FALSE, new.world = NULL, observation.type = FALSE, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, ...){ .is_char(family) .is_log(cultivated) .is_log_or_null(new.world) .is_log(observation.type) .is_log(all.taxonomy) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) #set conditions for query cultivated_<-.cultivated_check(cultivated) newworld_<-.newworld_check(new.world) taxonomy_<-.taxonomy_check(all.taxonomy) native_<-.native_check(native.status) observation_<-.observation_check(observation.type) political_<-.political_check(political.boundaries) natives_<-.natives_check(natives.only) collection_<-.collection_check(collection.info) # set the query query <- paste("SELECT scrubbed_family",taxonomy_$select,native_$select,political_$select,", scrubbed_species_binomial, latitude, longitude,date_collected,datasource,dataset,dataowner,custodial_institution_codes,collection_code,view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,observation_$select," FROM view_full_occurrence_individual WHERE scrubbed_family in (", paste(shQuote(family, type = "sh"),collapse = ', '), ")",cultivated_$query,newworld_$query,natives_$query, " AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') AND scrubbed_species_binomial IS NOT NULL ;") return(.BIEN_sql(query, ...)) } ####################### #'Extract species occurrence records by state. #' #'BIEN_occurrence_state extracts occurrences records for the specified state(s). #' @param state A state or vector of states (or other primary political divisions, e.g. provinces). #' @param country A single country or vector of countries. #' @param state.code A single state/province code, or a vector of states/province codes. #' @param country.code A single country (or other primary administrative boundary) code or a vector of country codes equal in length to the vector of states/province codes. #' @template occurrence #' @note Political division (or political division code) spelling needs to be exact and case-sensitive, see \code{\link{BIEN_metadata_list_political_names}} for a list of political divisions and associated codes. #' @note This function requires you supply either 1) a single country with one or more states, or 2) vectors of equal length for each political level. #' @return Dataframe containing occurrence records for the specified states/provinces. #' @examples \dontrun{ #' BIEN_occurrence_state("United States","Rhode Island") #' state_vector<-c("Rhode Island","Maryland") #' BIEN_occurrence_state(country="United States",state=state_vector)} #' @family occurrence functions #' @export BIEN_occurrence_state <- function(country = NULL, state = NULL, country.code = NULL, state.code = NULL, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, observation.type = FALSE, political.boundaries = FALSE, collection.info = FALSE, ...){ .is_char(country) .is_char(state) .is_char(country.code) .is_char(state.code) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_log(native.status) .is_log(observation.type) .is_log(political.boundaries) .is_log(natives.only) .is_log(collection.info) #set conditions for query cultivated_ <- .cultivated_check(cultivated) newworld_ <- .newworld_check(new.world) taxonomy_ <- .taxonomy_check(all.taxonomy) native_ <- .native_check(native.status) observation_ <- .observation_check(observation.type) political_ <- .political_check(political.boundaries) natives_ <- .natives_check(natives.only) collection_ <- .collection_check(collection.info) if(is.null(country.code) & is.null(state.code)){ ##state where if(length(country) == 1){ sql_where <- paste(" WHERE country in (", paste(shQuote(country, type = "sh"),collapse = ', '), ") AND state_province in (", paste(shQuote(state, type = "sh"),collapse = ', '), ") AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country) == length(state)){ sql_where <- "WHERE (" for(i in 1:length(country)){ condition_i <- paste("(country = ", paste(shQuote(country[i], type = "sh"),collapse = ', '), " AND state_province = ", paste(shQuote(state[i], type = "sh"),collapse = ', '), ")") if(i != 1){condition_i <- paste("OR ", condition_i)} #stick OR onto the condition where needed sql_where <- paste(sql_where, condition_i) }#for i sql_where <- paste(sql_where, ") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country, the function requires a vector of countries corresponding to the vector of states") } }#if length(country>1) }else{ ##state where if(length(country.code) == 1){ sql_where <- paste(" WHERE country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), ")) AND state_province in (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code, type = "sh"),collapse = ', '), ")) AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country.code) == length(state.code)){ sql_where <- "WHERE (" for(i in 1:length(country.code)){ condition_i <- paste("country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code[i], type = "sh"),collapse = ', '), ")) AND state_province in (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code[i], type = "sh"),collapse = ', '), "))") if(i != 1){condition_i <- paste("OR ",condition_i)}#stick OR onto the condition where needed sql_where <- paste(sql_where, condition_i) }#for i sql_where<-paste(sql_where,") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country, the function requires a vector of countries corresponding to the vector of states") } }#if length(country>1) } # set the query query <- paste("SELECT scrubbed_species_binomial" ,taxonomy_$select,political_$select, ", latitude, longitude,date_collected,datasource, dataset,dataowner,custodial_institution_codes,collection_code,view_full_occurrence_individual.datasource_id", collection_$select,cultivated_$select,newworld_$select,native_$select,observation_$select," FROM view_full_occurrence_individual ", sql_where,cultivated_$query,newworld_$query,natives_$query," AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') AND scrubbed_species_binomial IS NOT NULL ;") return(.BIEN_sql(query, ...)) } ############################# #'Extract species occurrence records by country. #' #'BIEN_occurrence_country extracts occurrences records for the specified country/countries. #' @param country A single country or a vector of country. #' @param country.code A single country code or a vector of country codes equal in length to the vector of states/province codes. #' @template occurrence #' @note Political division (or political division code) spelling needs to be exact and case-sensitive, see \code{\link{BIEN_metadata_list_political_names}} for a list of political divisions and associated codes. #' @return Dataframe containing occurrence records for the specified country. #' @examples \dontrun{ #' BIEN_occurrence_country("Cuba") #' country_vector<-c("Cuba","Bahamas") #' BIEN_occurrence_country(country_vector)} #' @family occurrence functions #' @export BIEN_occurrence_country <- function(country = NULL, country.code = NULL, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, observation.type = FALSE, political.boundaries = FALSE, collection.info = FALSE, ...){ .is_char(country) .is_char(country.code) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_log(native.status) .is_log(natives.only) .is_log(observation.type) .is_log(political.boundaries) .is_log(collection.info) if(is.null(country)& is.null(country.code)) { stop("Please supply either a country or 2-digit ISO code")} #set conditions for query cultivated_ <- .cultivated_check(cultivated) newworld_ <- .newworld_check(new.world) taxonomy_ <- .taxonomy_check(all.taxonomy) native_ <- .native_check(native.status) observation_ <- .observation_check(observation.type) political_ <- .political_check(political.boundaries) natives_ <- .natives_check(natives.only) collection_ <- .collection_check(collection.info) # set the query if(is.null(country.code)){query <- paste("SELECT scrubbed_species_binomial",taxonomy_$select,political_$select,native_$select,", latitude, longitude,date_collected, datasource,dataset,dataowner,custodial_institution_codes,collection_code, view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,observation_$select," FROM view_full_occurrence_individual WHERE country in (", paste(shQuote(country, type = "sh"),collapse = ', '), ")",cultivated_$query,newworld_$query,natives_$query," AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') ;") }else{ query <- paste("SELECT scrubbed_species_binomial",taxonomy_$select,political_$select,native_$select,", latitude, longitude, date_collected,datasource,dataset,dataowner,custodial_institution_codes,collection_code, view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,observation_$select," FROM view_full_occurrence_individual WHERE country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), ")) ",cultivated_$query,newworld_$query,natives_$query," AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') AND scrubbed_species_binomial IS NOT NULL ;") } return(.BIEN_sql(query, ...)) } ############################## #'Extract species occurrence records by county. #' #'BIEN_occurrence_county extracts occurrences records for the specified county or counties. #' @param country A single country or vector of countries. #' @param state A state or vector of states (or other primary political divisions, e.g. provinces). #' @param county A single county or a vector of counties (or other secondary political division, e.g. parish). #' @param state.code A single state/province code, or a vector of states/province codes. #' @param country.code A single country (or other primary administrative boundary) code or a vector of country codes equal in length to the vector of states/province codes. #' @param county.code A single county (or other secondary administrative boundary) code or a vector of county codes equal in length to the vectors of states/province codes and country codes. #' @note Political division (or political division code) spelling needs to be exact and case-sensitive, see \code{\link{BIEN_metadata_list_political_names}} for a list of political divisions and associated codes. #' @template occurrence #' @note This function requires you supply either 1) a single country with one or more states, or 2) vectors of equal length for each political level. #' @return Dataframe containing occurrence records for the specified states/provinces. #' @examples \dontrun{ #' BIEN_occurrence_county("United States","Arizona","Pima") #' country_vector<-c("United States","United States") #' state_vector<-c("Arizona","Michigan") #' county_vector<-c("Pima","Kent") #' BIEN_occurrence_county(country=country_vector, state = state_vector, county = county_vector)} #' @family occurrence functions #' @export BIEN_occurrence_county <- function(country = NULL, state = NULL, county = NULL, country.code = NULL, state.code = NULL, county.code = NULL, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, observation.type = FALSE, political.boundaries = FALSE, collection.info = FALSE, ...){ .is_char(country) .is_char(state) .is_char(county) .is_char(country.code) .is_char(state.code) .is_char(county.code) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_log(native.status) .is_log(natives.only) .is_log(observation.type) .is_log(political.boundaries) .is_log(collection.info) #set conditions for query cultivated_<-.cultivated_check(cultivated) newworld_<-.newworld_check(new.world) taxonomy_<-.taxonomy_check(all.taxonomy) native_<-.native_check(native.status) observation_<-.observation_check(observation.type) political_<-.political_check(political.boundaries) natives_<-.natives_check(natives.only) collection_<-.collection_check(collection.info) if(is.null(country.code) & is.null(state.code) & is.null(county.code)){ #sql where if(length(country) ==1 & length(state) == 1){ sql_where <- paste(" WHERE country in (", paste(shQuote(country, type = "sh"),collapse = ', '), ") AND state_province in (", paste(shQuote(state, type = "sh"),collapse = ', '), ") AND county in (", paste(shQuote(county, type = "sh"),collapse = ', '), ") AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country)==length(state) & length(country)==length(county)){ sql_where<-"WHERE (" for(i in 1:length(country)){ condition_i<- paste("(country = ", paste(shQuote(country[i], type = "sh"),collapse = ', '), " AND state_province = ", paste(shQuote(state[i], type = "sh"),collapse = ', '), " AND county = ", paste(shQuote(county[i], type = "sh"),collapse = ', '), ") ") if(i!=1){condition_i<- paste("OR ",condition_i)}#stick OR onto the condition where needed sql_where<-paste(sql_where,condition_i) }#for i sql_where<-paste(sql_where,") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country and/or state the function requires matching vectors of countries, states and counties.") } }#if length(country>1) }else{ #sql where if(length(country.code) == 1 & length(state.code) == 1){ sql_where <- paste(" WHERE country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), ")) AND state_province in (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code, type = "sh"),collapse = ', '), ")) AND county in (SELECT county_parish_ascii FROM county_parish WHERE admin2code in (", paste(shQuote(county.code, type = "sh"),collapse = ', '), ")) AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country) == length(state) & length(country) == length(county)){ sql_where <- "WHERE (" for(i in 1:length(country)){ condition_i <- paste("(country = (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), ")) AND state_province = (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code, type = "sh"),collapse = ', '), ")) AND county = (SELECT county_parish_ascii FROM county_parish WHERE admin2code in (", paste(shQuote(county.code, type = "sh"),collapse = ', '), "))" ) if(i != 1){condition_i <- paste("OR ",condition_i)}#stick OR onto the condition where needed sql_where<-paste(sql_where,condition_i) }#for i sql_where<-paste(sql_where,") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country and/or state the function requires matching vectors of countries, states and counties.") } }#if length(country>1) }#if codes are not null # set the query query <- paste("SELECT scrubbed_species_binomial" ,taxonomy_$select,political_$select , ",latitude, longitude,date_collected,datasource, dataset,dataowner,custodial_institution_codes,collection_code,view_full_occurrence_individual.datasource_id", collection_$select,cultivated_$select,newworld_$select,native_$select,observation_$select," FROM view_full_occurrence_individual ", sql_where,cultivated_$query,newworld_$query,natives_$query," AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') AND scrubbed_species_binomial IS NOT NULL ;") return(.BIEN_sql(query, ...)) } ############################ #'Extract species occurrence records by a latitude/longitude bounding box. #' #'BIEN_occurrence_box extracts occurrences records falling within the specific area. #' @param min.lat Minimum latitude #' @param max.lat Maximum latitude #' @param min.long Minimum longitude #' @param max.long Maximum longitude #' @param species Optional. A single species or a vector of species. #' @param genus Optional. A single genus or a vector of genera. #' @template occurrence #' @return Dataframe containing occurrence records for the specified area. #' @note Specifying species and/or genera will limit records returned to that set of taxa. #' @examples \dontrun{ #' output_test<- #' BIEN_occurrence_box(min.lat = 32,max.lat = 33,min.long = -114,max.long = -113, #' cultivated = TRUE, new.world = FALSE)} #' @family occurrence functions #' @export BIEN_occurrence_box<-function(min.lat, max.lat, min.long, max.long, species = NULL, genus = NULL, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, observation.type = FALSE, political.boundaries = TRUE, collection.info = FALSE, ...){ .is_num(min.lat) .is_num(max.lat) .is_num(min.long) .is_num(max.long) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_log(native.status) .is_log(natives.only) .is_log(observation.type) .is_log(collection.info) .is_char(species) .is_char(genus) #set conditions for query cultivated_ <- .cultivated_check(cultivated) newworld_ <- .newworld_check(new.world) taxonomy_ <- .taxonomy_check(all.taxonomy) native_ <- .native_check(native.status) observation_ <- .observation_check(observation.type) political_ <- .political_check(political.boundaries) natives_ <- .natives_check(natives.only) collection_ <- .collection_check(collection.info) species_ <- .species_check(species) genus_ <- .genus_check(genus) # set the query query <- paste("SELECT scrubbed_species_binomial", taxonomy_$select,political_$select,native_$select,",latitude, longitude, date_collected,datasource,dataset,dataowner,custodial_institution_codes,collection_code,view_full_occurrence_individual.datasource_id", collection_$select,cultivated_$select,newworld_$select,observation_$select," FROM view_full_occurrence_individual WHERE latitude between " , paste(shQuote(min.lat, type = "sh"),collapse = ', '), "AND " , paste(shQuote(max.lat, type = "sh"),collapse = ', ')," AND longitude between ", paste(shQuote(min.long, type = "sh"),collapse = ', '), "AND " , paste(shQuote(max.long, type = "sh"),collapse = ', '), cultivated_$query,newworld_$query,natives_$query, species_$query, genus_$query , " AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') AND scrubbed_species_binomial IS NOT NULL ;") return(.BIEN_sql(query, ...)) } ##### #'Download range maps for given species. #' #'BIEN_ranges_species extracts range maps for the specified species. #' @param species A single species or a vector of species. #' @template ranges #' @return Range maps for specified species. #' @examples \dontrun{ #' library(sf) #' library(maps) #a convenient source of maps #' species_vector <- c("Abies_lasiocarpa","Abies_amabilis") #' BIEN_ranges_species(species_vector) #' BIEN_ranges_species(species_vector, match_names_only = TRUE) #' temp_dir <- file.path(tempdir(), "BIEN_temp")#Set a working directory #' BIEN_ranges_species(species = species_vector, #' directory = temp_dir)#saves ranges to a temporary directory #' BIEN_ranges_species("Abies_lasiocarpa") #' BIEN_ranges_species("Abies_lasiocarpa", #' directory = temp_dir) #' #' #Reading files #' #' Abies_poly <- st_read(dsn = temp_dir, #' layer = "Abies_lasiocarpa") #' #' #Plotting files #' plot(Abies_poly[1])#plots the range, but doesn't mean much without any reference #' map('world', fill = TRUE, col = "grey")#plots a world map (WGS84 projection), in grey #' #' plot(Abies_poly[1], #' col = "forest green", #' add = TRUE) #adds the range of Abies lasiocarpa to the map #' #' # Getting data from the files (currently only species names and a BIEN ID field) #' Abies_poly$species#gives the species name associated with "Abies_poly"}#' #' @family range functions #' @importFrom sf st_as_sf st_write #' @export BIEN_ranges_species <- function(species, directory = NULL, matched = TRUE, match_names_only = FALSE, include.gid = FALSE, ...){ .is_char(species) .is_log(matched) .is_log(match_names_only) #make sure there are no spaces in the species names species <- gsub(" ","_",species) if(match_names_only == FALSE){ #record original working directory,change to specified directory if given if(is.null(directory)){ directory <- getwd() } # set the query query <- paste("SELECT ST_AsText(geom),species,gid FROM ranges WHERE species in (", paste(shQuote(species, type = "sh"),collapse = ', '), ") ORDER BY species ;") # create query to retrieve df <- .BIEN_sql(query, ...) #df <- .BIEN_sql(query) if(length(df) == 0){ message("No species matched") }else{ for(l in 1:length(df$species)){ sp_range <- st_as_sf(x = df[l,, drop = FALSE], wkt = "st_astext", crs = "epsg:4326") #Make sure that the directory doesn't have a "/" at the end-this confuses rgdal. Probably a more eloquent way to do this with regex... # if(unlist(strsplit(directory,""))[length(unlist(strsplit(directory,"")))]=="/"){ # directory<-paste(unlist(strsplit(directory,""))[-length(unlist(strsplit(directory,"")))],collapse = "") # } if(include.gid == TRUE){ st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l],"_",df$gid[l],sep=""), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE) }else{ st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l]), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE) } #save output }#for species in df loop }#else #list matched species if(matched == TRUE){ found <- as.data.frame(cbind(species,matrix(nrow=length(species),ncol=1,data="No"))) colnames(found) <- c("Species","Range_map_downloaded?") found$`Range_map_downloaded?` <- as.character(found$`Range_map_downloaded?`) found$`Range_map_downloaded?`[which(species%in%df$species)] <- "Yes" return(found) }#matched = true }#match names only if statement if(match_names_only == TRUE){ rangeQuery <- paste("SELECT species FROM ranges WHERE species in (", paste(shQuote(species, type = "sh"),collapse = ', '), ") ORDER BY species ;") # create query to retrieve df <- .BIEN_sql(rangeQuery, ...) #df <- .BIEN_sql(rangeQuery) if(length(df) == 0){ message("No species matched") }else{ found <- as.data.frame(cbind(species,matrix(nrow=length(species),ncol=1,data="No"))) colnames(found) <- c("Species","Range_map_available?") found$`Range_map_available?` <- as.character(found$`Range_map_available?`) found$`Range_map_available?`[which(species%in%df$species)] <- "Yes" return(found) } } #matched_names_only == TRUE } #################################### #'Extract range data for large numbers of species #' #'BIEN_ranges_species_bulk downloads ranges for a large number of species using parallel processing. #' @param species A vector of species or NULL (the default). If NULL, all available ranges will be used. #' @param directory The directory where range shapefiles will be stored. If NULL, a temporary directory will be used. #' @param batch_size The number of ranges to download at once. #' @param return_directory Should the directory be returned? Default is TRUE #' @param use_parallel Logical. Should batches be downloaded in parallel? If set to TRUE, AND if parallel and foreach are available, parallel processing of downloads will use n-1 clusters. #' @return Optionally, the directory to which the files were saved. #' @note This function may take a long time (hours) to run depending on the number of cores, download speed, etc. #' @examples \dontrun{ #' #To download all BIEN ranges maps: #' BIEN_ranges_species_bulk() #' } #' @family range functions #' @export #' @import foreach #' @import doParallel BIEN_ranges_species_bulk <- function(species = NULL, directory = NULL, batch_size = 1000, return_directory = TRUE, use_parallel = FALSE){ #Set species list and directory if NULL if(is.null(species)){ species <- BIEN_ranges_list()$species } if(is.null(directory)){directory <- file.path(tempdir(), "BIEN_temp") print(paste("Files will be saved to ",directory))} if(!file.exists(directory)){ dir.create(directory) } if(nzchar(system.file(package = "doParallel")) & nzchar(system.file(package = "foreach")) & use_parallel){ #Download range maps cl <- parallel::makePSOCKcluster(parallel::detectCores()) doParallel::registerDoParallel(cl = cl, cores = parallel::detectCores() - 1) foreach::foreach(i = 1:ceiling(length(species)/batch_size )) %dopar% BIEN_ranges_species(species = species[(((i-1)*batch_size)+1):(i*batch_size)], directory = file.path(directory,i), matched = FALSE) parallel::stopCluster(cl) rm(cl) }else{ for(i in 1:ceiling(length(species)/batch_size )){ BIEN_ranges_species(species = species[(((i-1)*batch_size)+1):(i*batch_size)], directory = file.path(directory,i), matched = FALSE) } } if(return_directory){return(directory)} }#end fx #################################### #'Download range maps for given genus. #' #'BIEN_ranges_genus extracts range maps for the specified genera. #' @param genus A single genus or a vector of genera. #' @template ranges #' @return Range maps for all available species within the specified genera. #' @examples \dontrun{ #' library(maps) #' library(sf) #' #' genus_vector <- c("Abies","Acer") #' #' temp_dir <- file.path(tempdir(), "BIEN_temp")#Set a working directory #' #' BIEN_ranges_genus(genus_vector) #' #' BIEN_ranges_genus(genus = genus_vector, #' match_names_only = TRUE) #' #' BIEN_ranges_genus(genus = genus_vector, #' directory = temp_dir) #saves ranges to a specified working directory #' #' BIEN_ranges_genus("Abies") #' #' BIEN_ranges_genus(genus = "Abies", #' directory = temp_dir) #' #' #Reading files #' #' Abies_poly <- read_sf(dsn = temp_dir,layer = "Abies_lasiocarpa") #' #' #Plotting files #' #' plot(Abies_poly[1]) #plots the range, but doesn't mean much without any reference #' #' map('world', fill = TRUE, col = "grey") #plots a world map (WGS84 projection), in grey #' #' plot(Abies_poly[1], #' col="forest green", #' add = TRUE) #adds the range of Abies lasiocarpa to the map #' #' # Getting data from the files (currently only species names) #' #' Abies_poly$species#gives the species name associated with "Abies_poly" #' } #' @family range functions #' @importFrom sf st_as_sf st_write #' @export BIEN_ranges_genus <- function(genus, directory = NULL, matched = TRUE, match_names_only = FALSE, include.gid = FALSE, ...){ .is_char(genus) .is_log(matched) .is_log(match_names_only) .is_log(include.gid) #modify the genus list to make searching easier genus <- paste("(",genus,"_",")",sep = "") if(match_names_only == FALSE){ #record original working directory,change to specified directory if given if(is.null(directory)){ directory <- getwd() } # set the query query <- paste("SELECT ST_AsText(geom),species,gid FROM ranges WHERE species ~ '",paste(genus,collapse="|"),"' ORDER BY species ;",sep="") # create query to retrieve df <- .BIEN_sql(query, ...) #df <- .BIEN_sql(query) if(length(df) == 0){ message("No species matched") }else{ for(l in 1:length(df$species)){ sp_range <- st_as_sf(x = df[l,, drop = FALSE], wkt = "st_astext", crs = "epsg:4326") #Make sure that the directory doesn't have a "/" at the end-this confuses rgdal. Probably a more eloquent way to do this with regex... # if(unlist(strsplit(directory,""))[length(unlist(strsplit(directory,"")))]=="/"){ # directory<-paste(unlist(strsplit(directory,""))[-length(unlist(strsplit(directory,"")))],collapse = "") # } if(include.gid == TRUE){ st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l],"_",df$gid[l],sep=""), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE) }else{ st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l]), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE) } #save output }#for species in df loop }#else #list matched species if(matched == TRUE){ found <- as.data.frame(df$species) return(found) }#matched = true }#match names only if statement if(match_names_only == TRUE){ query <- paste("SELECT species FROM ranges WHERE species ~ '",paste(genus,collapse="|"),"' ORDER BY species ;",sep="") # create query to retrieve df <- .BIEN_sql(query, ...) if(length(df) == 0){ message("No species matched") }else{ found<-as.data.frame(df$species) return(found) } } #matched_names_only == TRUE } ####################################### #'Download range maps that intersect a specified bounding box. #' #'BIEN_ranges_box extracts range maps for a specified bounding box. #' @param min.lat Minimum latitude of the ranges included. #' @param max.lat Maximum latitude of the ranges included. #' @param min.long Minimum longitude of the ranges included. #' @param max.long Maximum longitude of the ranges included. #' @param crop.ranges Should the ranges be cropped to the focal area? Default is FALSE. #' @template ranges_spatial #' @return Range maps for all available species within the specified bounding box. #' @examples \dontrun{ #' temp_dir <- file.path(tempdir(), "BIEN_temp") #Set a working directory #' BIEN_ranges_box(42,43,-85,-84,species.names.only = TRUE) #' BIEN_ranges_box(42,43,-85,-84,directory = temp_dir)} #' @family range functions #' @importFrom sf st_as_sf st_write #' @export BIEN_ranges_box <- function(min.lat, max.lat, min.long, max.long, directory = NULL, species.names.only = FALSE, return.species.list = TRUE , crop.ranges = FALSE, include.gid = FALSE, ...){ .is_num(min.lat) .is_num(max.lat) .is_num(min.long) .is_num(max.long) .is_log(include.gid) .is_log(return.species.list) .is_log(species.names.only) if(species.names.only == FALSE){ #record original working directory,change to specified directory if given if(is.null(directory)){ directory <- getwd() } # set the query if(crop.ranges){ query <- paste("SELECT ST_AsText(ST_intersection(geom,ST_MakeEnvelope(",min.long, ",",min.lat,",",max.long,",",max.lat,",4326))),species,gid FROM ranges WHERE st_intersects(ST_MakeEnvelope(",min.long, ",",min.lat,",",max.long,",",max.lat,",4326),geom)") }else{ query <- paste("SELECT ST_AsText(geom),species,gid FROM ranges WHERE st_intersects(ST_MakeEnvelope(",min.long, ",",min.lat,",",max.long,",",max.lat,",4326),geom)") } # create query to retrieve df <- .BIEN_sql(query, ...) #df <- .BIEN_sql(query) if(nrow(df) == 0){ message("No species matched") }else{ for(l in 1:length(df$species)){ sp_range <- st_as_sf(x = df[l,, drop = FALSE], wkt = "st_astext", crs = "epsg:4326") if(include.gid == TRUE){ st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l],"_",df$gid[l],sep=""), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE) }else{ st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l]), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE) } #save output }#for species in df loop if(return.species.list){ return(df$species) }#if return.species.list }#else }#species names only if statement if(species.names.only == TRUE){ # create query to retrieve query<-paste("SELECT species FROM ranges WHERE st_intersects(ST_MakeEnvelope(",min.long, ",",min.lat,",",max.long,",",max.lat,",4326),geom)") df <- .BIEN_sql(query, ...) if(length(df) == 0){ message("No species found") }else{ return(df) } } #species.names.only == TRUE } ####################################### #'Download range maps that intersect the range of a given species. #' #'BIEN_ranges_intersect_species extracts range maps for a specified bounding box. #' @param species Focal species (or a vector of species) for which to extract intersecting ranges. #' @param include.focal Should a range for the focal species be downloaded? Default is TRUE. #' @template ranges_spatial #' @return Range maps for all available species that intersect the range of the focal species. #' @examples \dontrun{ #' temp_dir <- file.path(tempdir(), "BIEN_temp") #Set a working directory #' BIEN_ranges_intersect_species(species = "Carnegiea_gigantea", #' directory = temp_dir,include.focal = TRUE) #' species_vector<-c("Carnegiea_gigantea","Echinocereus coccineus") #' BIEN_ranges_intersect_species(species = species_vector,species.names.only = TRUE) #' } #' @family range functions #' @importFrom sf st_as_sf st_write #' @export BIEN_ranges_intersect_species <- function(species, directory = NULL, species.names.only = FALSE, include.focal = TRUE, return.species.list = TRUE, include.gid = FALSE, ...){ .is_char(species) .is_log(species.names.only) .is_log(include.focal) .is_log(include.gid) #make sure there are no spaces in the species names species <- gsub(" ","_",species) #set query chunk to include focal species if(include.focal){ focal.query <- "" }else{ focal.query <- "a.species != b.species AND" } if(species.names.only == FALSE){ #set directory for saving if(is.null(directory)){ directory <- getwd() } # set the query query <- paste("SELECT b.species AS focal_species, a.species AS intersecting_species,a.species,a.gid, ST_AsText(a.geom) AS geom FROM ranges AS a, (SELECT species, geom FROM ranges WHERE species in (",paste(shQuote(species, type = "sh"),collapse = ', '),")) b WHERE", focal.query," ST_Intersects(a.geom, b.geom) ;") # create query to retrieve df <- .BIEN_sql(query, ...) #df <- .BIEN_sql(query) #df <- .BIEN_sql(query,limit = limit) if(length(df) == 0){ message("No species matched") }else{ for(l in 1:nrow(df)){ sp_range <- st_as_sf(x = df[l,, drop = FALSE], wkt = "geom", crs = "epsg:4326") if(include.gid == TRUE){ suppressWarnings( st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l],"_",df$gid[l],sep=""), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE)) }else{ suppressWarnings( st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l]), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE)) } #save output }#for species in df loop if(return.species.list){ return(df$species) }#if return.species.list }#else }#species names only if statement if(species.names.only == TRUE){ query <- paste("SELECT b.species AS focal_species, a.species AS intersecting_species FROM ranges AS a, (SELECT species, geom FROM ranges WHERE species in (",paste(shQuote(species, type = "sh"),collapse = ', '),")) b WHERE", focal.query," ST_Intersects(a.geom, b.geom) ;") # create query to retrieve df <- .BIEN_sql(query, ...) if(length(df) == 0){ message("No species found") }else{ return(df) } } #species.names.only == TRUE } ####################################### #'Download range maps that intersect a user-supplied sf object. #' #'BIEN_ranges_sf extracts range maps that intersect a specified simple features (sf) object. #' @param sf An object of class sf. #' @param crop.ranges Should the ranges be cropped to the focal area? Default is FALSE. #' @template ranges_spatial #' @return All range maps that intersect the user-supplied sf object. #' @examples \dontrun{ #' #' # Here we use a range map as our example polygon #' #' BIEN_ranges_species("Carnegiea gigantea") #saves ranges to the current working directory #' #' # Read in the polygon with sf #' sf <- sf::st_read(dsn = ".", #' layer = "Carnegiea_gigantea") #' #' BIEN_ranges_sf(sf = sf, #' limit = 10) #' # We use the limit argument to return only 10 range maps. #' # Omit the limit argument to get all ranges #' #' #Note that this will save many shapefiles to the working directory. #' } #' @family range functions #' @importFrom sf st_geometry st_as_text st_as_sf st_write #' @export BIEN_ranges_sf <- function(sf, directory = NULL, species.names.only = FALSE, return.species.list = TRUE, crop.ranges = FALSE, include.gid = FALSE, ...){ .is_log(return.species.list) .is_log(species.names.only) .is_log(crop.ranges) .is_log(include.gid) wkt <- sf |> st_geometry() |> st_as_text() if(species.names.only == FALSE){ #set directory for saving if(is.null(directory)){ directory <- getwd() } # set the query if(crop.ranges){ query <- paste("SELECT ST_AsText(ST_intersection(geom,ST_GeographyFromText('SRID=4326;",paste(wkt),"'))),species,gid FROM ranges WHERE st_intersects(ST_GeographyFromText('SRID=4326;",paste(wkt),"'),geom) ;") }else{ query <- paste("SELECT ST_AsText(geom),species,gid FROM ranges WHERE st_intersects(ST_GeographyFromText('SRID=4326;",paste(wkt),"'),geom) ;") } # create query to retrieve df <- .BIEN_sql(query, ...) if(length(df) == 0){ message("No species matched") }else{ for(l in 1:length(df$species)){ sp_range <- st_as_sf(x = df[l,, drop = FALSE], wkt = "st_astext", crs = "epsg:4326") if(include.gid == TRUE){ st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l],"_",df$gid[l],sep=""), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE) }else{ st_write(obj = sp_range, dsn = directory, layer = paste(df$species[l]), driver = "ESRI Shapefile", append = FALSE, quiet=TRUE) } #save output }#for species in df loop }#else }#species names only if statement if(species.names.only == TRUE){ query <- paste("SELECT species FROM ranges WHERE st_intersects(ST_GeographyFromText('SRID=4326;",paste(wkt),"'),geom) ;") # create query to retrieve df <- .BIEN_sql(query, ...) if(length(df) == 0){ message("No species found") }else{ return(df) } } #species.names.only == TRUE } ####################################### #'Load range maps for specified species. #' #'BIEN_ranges_load_species returns spatial data for the specified species. #' @param species A single species or a vector of species. #' @param ... Additional arguments passed to internal functions. #' @return A sf containing range maps for the specified species. #' @examples \dontrun{ #' library(maps) #' species_vector<-c("Abies_lasiocarpa","Abies_amabilis") #' abies_maps <- BIEN_ranges_load_species(species = species_vector) #' xanthium_strumarium <- BIEN_ranges_load_species(species = "Xanthium strumarium") #' #' #Plotting files #' plot(abies_maps) # plots the sf, but doesn't mean much without any reference #' map('world', fill = TRUE, col = "grey")#plots a world map (WGS84 projection), in grey #' plot(xanthium_strumarium,col="forest green",add = TRUE) #adds the range of X. strumarium #' plot(abies_maps[1,], add = TRUE, col ="light green") #' } #' @family range functions #' @importFrom sf st_as_sf #' @export BIEN_ranges_load_species <- function(species, ...){ .is_char(species) #make sure there are no spaces in the species names species <- gsub(" ","_",species) # set the query query <- paste("SELECT ST_AsText(geom) as geometry,species,gid FROM ranges WHERE species in (", paste(shQuote(species, type = "sh"),collapse = ', '), ") ORDER BY species ;") # create query to retrieve df <- .BIEN_sql(query, ...) #df <- .BIEN_sql(query) if(length(df) == 0){ message("No species matched") return(invisible(NULL)) }else{ poly <- st_as_sf(x = df, wkt = "geometry", crs = "epsg:4326") return(poly) }#else } ############################### #'List available range maps #' #'BIEN_ranges_list a data.frame containing listing all range maps currently available. #' @param ... Additional arguments passed to internal functions. #' @return A data.frame containing the available species and their associated GIDs. #' @examples \dontrun{ #' available_maps<-BIEN_ranges_list()} #' @family range functions #' @family metadata functions #' @export BIEN_ranges_list <- function( ...){ # set the query query <- paste("SELECT species,gid FROM ranges ORDER BY species ;") # create query to retrieve return(.BIEN_sql(query, ...)) } ######################################## #'Extract range data and convert to smaller "skinny" format #' #'BIEN_ranges_shapefile_to_skinny converts ranges to a "skinny" format to save space. #' @param directory The directory where range shapefiles will be stored. If NULL, a temporary directory will be used. #' @param raster A raster (which must have a CRS specified) to be used for rasterizing the ranges. #' @param skinny_ranges_file A filename that will be used to write the skinny ranges will be written to (RDS format). If NULL, this will not be written. #' @return Matrix containing 2 columns: 1) Species name; and 2) the raster cell number it occurs within. #' @examples \dontrun{ #' BIEN_ranges_shapefile_to_skinny(directory = BIEN_ranges_species_bulk(species = c("Acer rubrum")), #' raster = terra::rast(crs = "+proj=laea +lat_0=15 +lon_0=-80 +x_0=0 +y_0=0 +datum=WGS84 #' +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0", #' extent = terra::ext(c(-5261554,5038446,-7434988,7165012 )), #' resolution =c(100000,100000) #' ) #' ) #' } #' @family range functions #' @importFrom sf read_sf st_transform st_crs #' @importFrom fasterize fasterize #' @importFrom raster raster #' @importFrom terra values #' @export BIEN_ranges_shapefile_to_skinny <- function(directory, raster, skinny_ranges_file = NULL){ range_maps <- list.files(path = directory, pattern = ".shp", full.names = TRUE, recursive = TRUE) skinny_occurrences <- NULL raster <- raster(raster) #can be removed once fasterize is updated to include terra for(i in range_maps){ #print(i) raster_i <- read_sf(i) |> st_transform(crs = st_crs(raster)) |> fasterize(raster = raster, fun = "any") if(length(which(values(raster_i) > 0)) > 0){ skinny_occurrences <- rbind(skinny_occurrences, cbind(read_sf(i)$Species, which(values(raster_i) > 0))) }#end if statement }#end i loop #Save skinny occurrences if filename specified if(!is.null(skinny_ranges_file)){ saveRDS(object = skinny_occurrences, file = skinny_ranges_file) } #return skinny occurrences return(skinny_occurrences) }#end fx ######################################## #'Build a richness raster from a skinny range file #' #'BIEN_ranges_skinny_ranges_to_richness_raster takes in "skinny" range data and converts it to a richness raster. #' @param skinny_ranges A matrix output by the function "BIEN_ranges_skinny" or equivalent methods. #' @param raster The raster that was used in building the skinny_ranges matrix. #' @return Raster #' @examples \dontrun{ #' #' #' template_raster <- terra::rast( #' crs = "+proj=laea +lat_0=15 +lon_0=-80 +x_0=0 +y_0=0 +datum=WGS84 #' +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0", #' ext = ext(c(-5261554, 5038446, -7434988, 7165012 )), #' resolution = c(100000, 100000)) #' #' #Download ranges and convert to a "skinny" format #' skinny_ranges <- BIEN_ranges_shapefile_to_skinny( #' directory = BIEN_ranges_species_bulk(species = c("Acer rubrum"), #' raster = template_raster) #' #' #Convert from skinny format to richness raster #' richness_raster<- BIEN_ranges_skinny_ranges_to_richness_raster( #' skinny_ranges = skinny_ranges,raster = template_raster) #' #' plot(richness_raster) #' } #' @family range functions #' @export #' @importFrom terra values BIEN_ranges_skinny_ranges_to_richness_raster <- function(skinny_ranges, raster){ #Create empty output raster output_raster <- raster terra::values(output_raster) <- NA #note that if this "terra::" is omitted, cran checks raise a note #iterate through all cells with at least one occurrence, record output_raster[as.numeric(unique(skinny_ranges[,2]))] <- sapply(X = unique(skinny_ranges[,2]), FUN = function(x){ length(unique(skinny_ranges[which(skinny_ranges[,2] == x), 1]))} ) #return output raster return(output_raster) } ######################################## ######################################## #'Download trait data for given species. #' #'BIEN_trait_species extracts trait data for the species specified. #' @param species A single species or a vector of species. #' @template trait #' @return A dataframe of all available trait data for the given species. #' @examples \dontrun{ #' BIEN_trait_species("Poa annua") #' species_vector<-c("Poa annua","Juncus trifidus") #' BIEN_trait_species(species_vector)} #' @family trait functions #' @export BIEN_trait_species <- function(species, all.taxonomy = FALSE, political.boundaries = FALSE, source.citation = FALSE, ...){ .is_char(species) .is_log(all.taxonomy) .is_log(political.boundaries) .is_log(source.citation) # set the query taxonomy_ <- .taxonomy_check_traits(all.taxonomy) political_ <- .political_check_traits(political.boundaries) source_ <- .source_check_traits(source.citation) query <- paste("SELECT scrubbed_species_binomial, trait_name, trait_value, unit, method, latitude, longitude, elevation_m, url_source",source_$select ,", project_pi, project_pi_contact", political_$select, taxonomy_$select,", access, id FROM agg_traits WHERE scrubbed_species_binomial in (", paste(shQuote(species, type = "sh"),collapse = ', '), ") ;") return(.BIEN_sql(query, ...)) } ############################ #'Calculates species mean values for a given trait, using Genus or Family level data where Species level data is lacking. #' #'BIEN_trait_mean Estimates species mean values for a given trait, using Genus or Family level data where Species level data is absent. #' @param species A single species or a vector of species. #' @param trait A single trait. #' @param ... Additional arguments passed to internal functions. #' @note Trait spelling needs to be exact and case-sensitive, see \code{\link{BIEN_trait_list}} for a list of traits. #' @return A dataframe of estimated trait means and associated metadata for the given species. #' @examples \dontrun{ #' BIEN_trait_mean(species=c("Poa annua","Juncus trifidus"),trait="leaf dry mass per leaf fresh mass") } #' @family trait functions #' @export BIEN_trait_mean <- function(species, trait, ...){ #first, get taxonomic info for the species .is_char(trait) .is_char(species) #make sure there is only one trait if( length(trait) > 1){stop("Multiple traits submitted. This function only handles one trait at a time.")} #make sure trait exists traits_available <- BIEN_trait_list(...) if(!trait %in% traits_available$trait_name){stop("Trait not found.")} # create query to retreive taxonomic info genera <- unlist(lapply(X = strsplit(species," "), FUN = function(x){x[1]})) query <- paste("SELECT DISTINCT scrubbed_family,scrubbed_genus,scrubbed_species_binomial FROM bien_taxonomy WHERE scrubbed_species_binomial in (", paste(shQuote(species, type = "sh"),collapse = ', '), ") or scrubbed_genus in (", paste(shQuote(genera, type = "sh"),collapse = ', '), ") ;") taxonomy_for_traits <- .BIEN_sql(query, ...) #taxonomy_for_traits <- .BIEN_sql(query) if(length(taxonomy_for_traits) == 0){stop("Taxonomic data missing, check species name(s)")} #then, query the various taxonomic levels to get trait data #old query <- paste("SELECT * FROM agg_traits WHERE trait_name in (", paste(shQuote(trait, type = "sh"),collapse = ', '), ") AND family in (", paste(shQuote(unique(taxonomy_for_traits$scrubbed_family) , type = "sh"),collapse = ', '), ") ORDER BY family,taxon,trait_name ;") query <- paste("SELECT * FROM agg_traits WHERE trait_name in (", paste(shQuote(trait, type = "sh"),collapse = ', '), ") AND (scrubbed_family in (", paste(shQuote(unique(taxonomy_for_traits$scrubbed_family) , type = "sh"),collapse = ', '), ") or scrubbed_genus in (", paste(shQuote(unique(taxonomy_for_traits$scrubbed_genus) , type = "sh"),collapse = ', '), ")) ORDER BY scrubbed_family,scrubbed_species_binomial,trait_name ;") traits_df <- suppressWarnings(.BIEN_sql(query, ...)) #suppress warnings to avoid the geom message #traits_df <- suppressWarnings(.BIEN_sql(query)) if(length(traits_df) == 0){stop("No matching trait data for these taxa.")} #finally, choose the best available trait data output_data <- NULL for(i in 1:length(species)){ species_i_data<-list() species_i_data[[1]]<-cbind(traits_df$trait_value[which(traits_df$scrubbed_species_binomial==species[i])],traits_df$id[which(traits_df$scrubbed_species_binomial==species[i])] ) species_i_data[[2]]<-cbind(traits_df$trait_value[which(traits_df$scrubbed_genus==taxonomy_for_traits$scrubbed_genus[which(taxonomy_for_traits$scrubbed_species_binomial==species[i])])],traits_df$id[which(traits_df$scrubbed_genus==taxonomy_for_traits$scrubbed_genus[which(taxonomy_for_traits$scrubbed_species_binomial==species[i])])]) if(length(species_i_data[[2]])==0){ species_i_data[[2]]<-cbind(traits_df$trait_value[which(traits_df$scrubbed_genus==strsplit(species[i]," ")[[1]][1])],traits_df$id[which(traits_df$scrubbed_genus==strsplit(species[i]," ")[[1]][1])]) } #species_i_data[[3]]<-traits_df$trait_value[which(traits_df$family==taxonomy_for_traits$scrubbed_family[i])] species_i_data[[3]]<-cbind(traits_df$trait_value[which(traits_df$scrubbed_family==taxonomy_for_traits$scrubbed_family[which(taxonomy_for_traits$scrubbed_species_binomial==species[i])])],traits_df$id[which(traits_df$scrubbed_family==taxonomy_for_traits$scrubbed_family[which(taxonomy_for_traits$scrubbed_species_binomial==species[i])])]) if(length(species_i_data[[3]])==0){ species_i_data[[3]]<-cbind(traits_df$trait_value[which(traits_df$scrubbed_family==unique(taxonomy_for_traits$scrubbed_family[which(taxonomy_for_traits$scrubbed_genus==strsplit(species[i]," ")[[1]][1])]))],traits_df$id[which(traits_df$scrubbed_family==unique(taxonomy_for_traits$scrubbed_family[which(taxonomy_for_traits$scrubbed_genus==strsplit(species[i]," ")[[1]][1])]))]) } species_i_data[[4]]<-"NA" names(species_i_data)<-c("Species","Genus","Family","NA") species_i_data<-species_i_data[which(lengths(species_i_data)>0)]#prunes list to include only taxonomic levels with data #trait_mean<- species_i_data[1] if(length(species_i_data)>0){ level_used<-names(species_i_data[1]) if(species_i_data[[1]][1]=="NA"){sample_size<-0}else{sample_size<-nrow(species_i_data[[1]])} if(species_i_data[[1]][1]=="NA"){mean_value<-"NA"}else{mean_value<-mean(as.numeric(species_i_data[[1]][,1]), na.rm = TRUE)} if(species_i_data[[1]][1]=="NA"){ids <-"NA"}else{ids<-paste(as.numeric(species_i_data[[1]][,2]),collapse = ",")} unit<-unique(traits_df$unit) output_data<-rbind(output_data,cbind(species[i],mean_value,trait,unit,level_used,sample_size,ids)) }#if data is available }#i loop colnames(output_data)[1] <- "species" output_data <- as.data.frame(output_data) return(output_data) } ############################ #'Download all measurements of a specific trait(s). #' #'BIEN_trait_trait downloads all measurements of the trait(s) specified. #' @param trait A single trait or a vector of traits. #' @template trait #' @note Trait spelling needs to be exact and case-sensitive, see \code{\link{BIEN_trait_list}} for a list of traits. #' @return A dataframe of all available trait data for the given trait(s). #' @examples \dontrun{ #' BIEN_trait_trait("whole plant height") #' trait_vector<-c("whole plant height", "leaf dry mass per leaf fresh mass") #' BIEN_trait_trait(trait_vector)} #' @family trait functions #' @export BIEN_trait_trait <- function(trait, all.taxonomy = FALSE, political.boundaries = FALSE, source.citation = FALSE,...){ .is_char(trait) .is_log(all.taxonomy) .is_log(political.boundaries) .is_log(source.citation) # set the query taxonomy_<-.taxonomy_check_traits(all.taxonomy) political_<-.political_check_traits(political.boundaries) source_<-.source_check_traits(source.citation) query <- paste("SELECT scrubbed_species_binomial, trait_name, trait_value, unit, method, latitude, longitude, elevation_m, url_source",source_$select ,",project_pi, project_pi_contact", political_$select, taxonomy_$select,", access, id FROM agg_traits WHERE trait_name in (", paste(shQuote(trait, type = "sh"),collapse = ', '), ") ;") return(.BIEN_sql(query, ...)) } ############################ #'Download trait data for given species and trait. #' #'BIEN_trait_traitbyspecies extracts entries that contain the specified species and trait(s). #' @param species A single species or a vector of species. #' @param trait A single trait or a vector of traits. #' @template trait #' @note Trait spelling needs to be exact and case-sensitive, see \code{\link{BIEN_trait_list}} for a list of traits. #' @return A dataframe of all data matching the specified trait(s) and species. #' @examples \dontrun{ #' BIEN_trait_traitbyspecies(trait = "whole plant height", species = "Carex capitata") #' trait_vector<-c("whole plant height", "leaf area") #' species_vector<-c("Carex capitata","Betula nana") #' BIEN_trait_traitbyspecies(trait=trait_vector,species=species_vector)} #' @family trait functions #' @export BIEN_trait_traitbyspecies <- function(species, trait, all.taxonomy = FALSE, political.boundaries = FALSE, source.citation = FALSE, ...){ .is_char(species) .is_char(trait) .is_log(all.taxonomy) .is_log(political.boundaries) .is_log(source.citation) # set the query taxonomy_ <- .taxonomy_check_traits(all.taxonomy) political_ <- .political_check_traits(political.boundaries) source_ <- .source_check_traits(source.citation) query <- paste("SELECT scrubbed_species_binomial, trait_name, trait_value, unit, method, latitude, longitude, elevation_m, url_source",source_$select ,", project_pi, project_pi_contact", political_$select, taxonomy_$select,", access, id FROM agg_traits WHERE scrubbed_species_binomial in (", paste(shQuote(species, type = "sh"),collapse = ', '), ") AND trait_name in (", paste(shQuote(trait, type = "sh"),collapse = ', '), ") ;") return(.BIEN_sql(query, ...)) } ########################### #'Download trait data for given genus/genera and trait(s). #' #'BIEN_trait_traitbygenus extracts entries that contain the specified genus/genera and trait(s). #' @param genus A single genus or a vector of genera. #' @param trait A single trait or a vector of traits. #' @template trait #' @note Trait spelling needs to be exact and case-sensitive, see \code{\link{BIEN_trait_list}} for a list of traits. #' @return A dataframe of all data matching the specified trait(s) and genus/genera. #' @examples \dontrun{ #' BIEN_trait_traitbygenus(trait = "whole plant height", genus = "Carex") #' trait_vector<-c("whole plant height", "leaf area") #' genus_vector<-c("Carex","Betula") #' BIEN_trait_traitbygenus(trait=trait_vector,genus=genus_vector)} #' @family trait functions #' @export BIEN_trait_traitbygenus <- function(genus, trait, all.taxonomy = FALSE, political.boundaries = FALSE, source.citation = FALSE, ...){ .is_char(genus) .is_char(trait) .is_log(all.taxonomy) .is_log(political.boundaries) .is_log(source.citation) # set the query taxonomy_<-.taxonomy_check_traits(all.taxonomy) political_<-.political_check_traits(political.boundaries) source_<-.source_check_traits(source.citation) query <- paste("SELECT scrubbed_genus, scrubbed_species_binomial, trait_name, trait_value, unit, method, latitude, longitude, elevation_m, url_source",source_$select ,", project_pi, project_pi_contact", political_$select, taxonomy_$select,", access, id FROM agg_traits WHERE scrubbed_genus in (", paste(shQuote(genus, type = "sh"),collapse = ', '), ") AND trait_name in (", paste(shQuote(trait, type = "sh"),collapse = ', '), ") ;") return(.BIEN_sql(query, ...)) } ########################### #'Download trait data for given families and traits. #' #'BIEN_trait_traitbyfamily extracts entries that contain the specified families and trait(s). #' @param family A single family or a vector of families. #' @param trait A single trait or a vector of traits. #' @template trait #' @note Trait spelling needs to be exact and case-sensitive, see \code{\link{BIEN_trait_list}} for a list of traits. #' @return A dataframe of all data matching the specified trait(s) and family/families. #' @examples \dontrun{ #' BIEN_trait_traitbyfamily(trait = "whole plant height", family = "Poaceae") #' trait_vector <- c("whole plant height", "leaf fresh mass") #' family_vector < -c("Orchidaceae","Poaceae") #' BIEN_trait_traitbyfamily(trait = trait_vector, family = family_vector)} #' @family trait functions #' @export BIEN_trait_traitbyfamily <- function(family, trait, all.taxonomy = FALSE, political.boundaries = FALSE, source.citation = FALSE, ...){ .is_char(family) .is_char(trait) .is_log(all.taxonomy) .is_log(political.boundaries) .is_log(source.citation) # set the query taxonomy_<-.taxonomy_check_traits(all.taxonomy) political_<-.political_check_traits(political.boundaries) source_<-.source_check_traits(source.citation) query <- paste("SELECT scrubbed_family, scrubbed_genus, scrubbed_species_binomial, trait_name, trait_value, unit, method, latitude, longitude, elevation_m, url_source",source_$select ,", project_pi, project_pi_contact", political_$select, taxonomy_$select,", access, id FROM agg_traits WHERE scrubbed_family in (", paste(shQuote(family, type = "sh"),collapse = ', '), ") AND trait_name in (", paste(shQuote(trait, type = "sh"),collapse = ', '), ") ;") return(.BIEN_sql(query, ...)) } ############################ #'Download trait data for given genera. #' #'BIEN_trait_genus extracts entries that contain the specified genera. #' @param genus A single genus or a vector of genera. #' @template trait #' @return A dataframe of all data matching the specified genera. #' @examples \dontrun{ #' BIEN_trait_genus("Acer") #' genus_vector <- c("Acer","Abies") #' BIEN_trait_genus(genus_vector)} #' @family trait functions #' @export BIEN_trait_genus <- function(genus, all.taxonomy = FALSE, political.boundaries = FALSE, source.citation = FALSE, ...){ .is_char(genus) .is_log(all.taxonomy) .is_log(political.boundaries) .is_log(source.citation) # set the query taxonomy_<-.taxonomy_check_traits(all.taxonomy) political_<-.political_check_traits(political.boundaries) source_<-.source_check_traits(source.citation) query <- paste("SELECT scrubbed_genus, scrubbed_species_binomial, trait_name, trait_value, unit, method, latitude, longitude, elevation_m, url_source",source_$select ,", project_pi, project_pi_contact", political_$select, taxonomy_$select,", access,id FROM agg_traits WHERE scrubbed_genus in (", paste(shQuote(genus, type = "sh"),collapse = ', '), ") ;") return(.BIEN_sql(query, ...)) } ########################### #'Download trait data for given families. #' #'BIEN_trait_family extracts all trait data for the specified families. #' @param family A single family or a vector of families. #' @template trait #' @return A dataframe of all data matching the specified families. #' @examples \dontrun{ #' BIEN_trait_family("Poaceae") #' family_vector<-c("Poaceae","Orchidaceae") #' BIEN_trait_family(family_vector)} #' @family trait functions #' @export BIEN_trait_family <- function(family, all.taxonomy = FALSE, political.boundaries = FALSE, source.citation = FALSE, ...){ .is_char(family) .is_log(all.taxonomy) .is_log(political.boundaries) .is_log(source.citation) # set the query taxonomy_<-.taxonomy_check_traits(all.taxonomy) political_<-.political_check_traits(political.boundaries) source_<-.source_check_traits(source.citation) query <- paste("SELECT scrubbed_family, scrubbed_genus, scrubbed_species_binomial, trait_name, trait_value, unit, method, latitude, longitude, elevation_m, url_source",source_$select ,", project_pi, project_pi_contact", political_$select, taxonomy_$select,", access,id FROM agg_traits WHERE scrubbed_family in (", paste(shQuote(family, type = "sh"),collapse = ', '), ") ;") return(.BIEN_sql(query, ...)) } ############################ #'List all available types of trait data #' #'BIEN_trait_list produces a dataframe of all available types of trait data. #' @param ... Additional arguments passed to internal functions. #' @return A dataframe containing all currently available types of trait data and details on measurement. #' @examples \dontrun{ #' BIEN_trait_list()} #' @family trait functions #' @export BIEN_trait_list <- function( ...){ # set the query query <- paste("SELECT DISTINCT trait_name FROM agg_traits ORDER BY trait_name ;") return(.BIEN_sql(query, ...)) } ################################### #'Download trait data for given country. #' #'BIEN_trait_species extracts trait data for the species country. #' @param country A single country or a vector of countries. #' @param trait.name Optional. The trait or traits you want returned. If left blank, all traits will be returned. #' @template trait #' @return A dataframe of all available trait data for the given country. #' @examples \dontrun{ #' BIEN_trait_country("South Africa") #' BIEN_trait_country(country="South Africa",trait="whole plant growth form")} #' @family trait functions #' @export BIEN_trait_country <- function(country, trait.name = NULL, all.taxonomy = FALSE, political.boundaries = TRUE, source.citation = FALSE, ...){ .is_char(country) .is_log(all.taxonomy) .is_log(political.boundaries) .is_log(source.citation) .is_char(trait.name) # set the query taxonomy_<-.taxonomy_check_traits(all.taxonomy) political_<-.political_check_traits(political.boundaries) source_<-.source_check_traits(source.citation) if(!is.null(trait.name)){ trait_select<-paste(" AND", "trait_name in (", paste(shQuote(trait.name, type = "sh"),collapse = ', '), ") ") }else{trait_select <- ""} query <- paste("SELECT scrubbed_species_binomial, trait_name, trait_value, unit, method, latitude, longitude, elevation_m, url_source",source_$select ,", project_pi, project_pi_contact", political_$select, taxonomy_$select,", access, id FROM agg_traits WHERE country in (", paste(shQuote(country, type = "sh"),collapse = ', '), ")",trait_select," ;") return(.BIEN_sql(query, ...)) } ############################# #'Count the number of (geoValid) occurrence records for each species in BIEN #' #'BIEN_occurrence_records_per_species downloads a count of the number of geovalidated occurrence records for each species in the BIEN database. #' @param species A single species, or vector of species. If NULL, the default, it will return counts for all species. #' @param ... Additional arguments passed to internal functions. #' @return A dataframe listing the number of geovalidated occurrence records for each species in the BIEN database. #' @examples \dontrun{ #' occurrence_counts<-BIEN_occurrence_records_per_species()} #' @family occurrence functions #' @export BIEN_occurrence_records_per_species <- function(species = NULL, ...){ if(is.null(species)){ # set the query query <- paste("SELECT DISTINCT scrubbed_species_binomial,count(*) FROM view_full_occurrence_individual WHERE is_geovalid = 1 AND latitude IS NOT NULL AND LONGITUDE IS NOT NULL GROUP BY scrubbed_species_binomial ;") } if(is.character(species)){ query <- paste("SELECT scrubbed_species_binomial,count(*) FROM view_full_occurrence_individual WHERE scrubbed_species_binomial in (", paste(shQuote(species, type = "sh"),collapse = ', '), ") AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type IN ('plot','specimen','literature','checklist') GROUP BY scrubbed_species_binomial ;") } return(.BIEN_sql(query, ...)) } ############################################### #'Count the number of trait observations for each species in the BIEN database #' #'BIEN_trait_traits_per_species downloads a count of the number of records for each trait for each species in the BIEN database. #' @param species Optional species or vector of species. If left blank, returns counts for all species. #' @param ... Additional arguments passed to internal functions. #' @return Returns a dataframe containing the number of trait records for each species in the BIEN database. #' @examples \dontrun{ #' trait_observation_counts<-BIEN_trait_traits_per_species()} #' @family trait functions #' @export BIEN_trait_traits_per_species <- function(species = NULL, ...){ if(!is.null(species)){ species_query<-paste("WHERE scrubbed_species_binomial in (", paste(shQuote(species, type = "sh"),collapse = ', '), ")") }else{ species_query <- "" } # set the query query <- paste("SELECT DISTINCT scrubbed_species_binomial, trait_name,count(*) FROM agg_traits", species_query, "GROUP BY trait_name,scrubbed_species_binomial ORDER BY scrubbed_species_binomial,trait_name ;") return(.BIEN_sql(query, ...)) } ################################ #Plot queries ############################## #'Download plot data from a given datasource. #' #'BIEN_plot_datasource downloads all plot data from a given datasource. #' @param datasource A datasource. See \code{\link{BIEN_plot_list_datasource}} for options. #' @template plot #' @return A dataframe containing all data from the specified datasource. #' @examples \dontrun{ #' BIEN_plot_datasource("SALVIAS")} #' @family plot functions #' @export BIEN_plot_datasource <- function(datasource, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(datasource) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) .is_log(all.metadata) #set conditions for query cultivated_ <- .cultivated_check_plot(cultivated) newworld_ <- .newworld_check_plot(new.world) taxonomy_ <- .taxonomy_check_plot(all.taxonomy) native_ <- .native_check_plot(native.status) natives_ <- .natives_check_plot(natives.only) political_ <- .political_check_plot(political.boundaries) collection_ <- .collection_check_plot(collection.info) md_ <- .md_check_plot(all.metadata) # set the query query <- paste("SELECT view_full_occurrence_individual.plot_name,view_full_occurrence_individual.subplot, view_full_occurrence_individual.elevation_m, view_full_occurrence_individual.plot_area_ha,view_full_occurrence_individual.sampling_protocol, view_full_occurrence_individual.recorded_by, view_full_occurrence_individual.scrubbed_species_binomial, view_full_occurrence_individual.individual_count",taxonomy_$select,political_$select,native_$select," ,view_full_occurrence_individual.latitude, view_full_occurrence_individual.longitude,view_full_occurrence_individual.date_collected,view_full_occurrence_individual.datasource, view_full_occurrence_individual.dataset,view_full_occurrence_individual.dataowner,view_full_occurrence_individual.custodial_institution_codes, view_full_occurrence_individual.collection_code,view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM view_full_occurrence_individual WHERE view_full_occurrence_individual.datasource in (", paste(shQuote(datasource, type = "sh"),collapse = ', '), ")",cultivated_$query,newworld_$query,natives_$query, " AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (view_full_occurrence_individual.is_geovalid = 1 ) AND (view_full_occurrence_individual.georef_protocol is NULL OR view_full_occurrence_individual.georef_protocol<>'county centroid') AND (view_full_occurrence_individual.is_centroid IS NULL OR view_full_occurrence_individual.is_centroid=0) AND observation_type='plot' AND scrubbed_species_binomial IS NOT NULL ) as view_full_occurrence_individual JOIN plot_metadata ON (view_full_occurrence_individual.plot_metadata_id=plot_metadata.plot_metadata_id) ;") # create query to retrieve return(.BIEN_sql(query, ...)) } ################################## #'List available datasources. #' #'BIEN_plot_list_datasource list all plot datasources in the BIEN database. #' @param ... Additional arguments passed to internal functions. #' @return A vector of available datasources. #' @examples \dontrun{ #' BIEN_plot_list_datasource()} #' @family plot functions #' @export BIEN_plot_list_datasource <- function(...){ query <- paste("SELECT DISTINCT plot_metadata.datasource FROM plot_metadata ;") return(.BIEN_sql(query, ...)) } ############################### #'Download plot data from specified countries. #' #'BIEN_plot_country downloads all plot data from specified countries. #' @param country A country or vector of countries. #' @param country.code A single country code or a vector of country codes equal in length to the vector of states/province codes. #' @template plot #' @return A dataframe containing all data from the specified countries. #' @note Political division (or political division code) spelling needs to be exact and case-sensitive, see \code{\link{BIEN_metadata_list_political_names}} for a list of political divisions and associated codes. #' @examples \dontrun{ #' BIEN_plot_country("Costa Rica") #' BIEN_plot_country(c("Costa Rica","Panama"))} #' @family plot functions #' @export BIEN_plot_country <- function(country = NULL, country.code = NULL, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_char(country.code) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(country) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) .is_log(all.metadata) if(is.null(country)& is.null(country.code)) {stop("Please supply either a country name or 2-digit ISO code")} #set conditions for query cultivated_ <- .cultivated_check_plot(cultivated) newworld_ <- .newworld_check_plot(new.world) taxonomy_ <- .taxonomy_check_plot(all.taxonomy) native_ <- .native_check_plot(native.status) natives_ <- .natives_check_plot(natives.only) collection_ <- .collection_check_plot(collection.info) md_ <- .md_check_plot(all.metadata) if(!political.boundaries){ political_select <- "view_full_occurrence_individual.country," }else{ political_select <- "view_full_occurrence_individual.country,view_full_occurrence_individual.state_province,view_full_occurrence_individual.county,view_full_occurrence_individual.locality," } # set the query if(is.null(country.code)){ query <- paste("SELECT ",political_select," view_full_occurrence_individual.plot_name,view_full_occurrence_individual.subplot, view_full_occurrence_individual.elevation_m, view_full_occurrence_individual.plot_area_ha, view_full_occurrence_individual.sampling_protocol,view_full_occurrence_individual.recorded_by, view_full_occurrence_individual.scrubbed_species_binomial,view_full_occurrence_individual.individual_count",taxonomy_$select,native_$select,", view_full_occurrence_individual.latitude, view_full_occurrence_individual.longitude, view_full_occurrence_individual.date_collected, view_full_occurrence_individual.datasource,view_full_occurrence_individual.dataset,view_full_occurrence_individual.dataowner, view_full_occurrence_individual.custodial_institution_codes,view_full_occurrence_individual.collection_code,view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM view_full_occurrence_individual WHERE view_full_occurrence_individual.country in (", paste(shQuote(country, type = "sh"),collapse = ', '), ")", cultivated_$query,newworld_$query,natives_$query, "AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (view_full_occurrence_individual.is_geovalid = 1 ) AND (view_full_occurrence_individual.georef_protocol is NULL OR view_full_occurrence_individual.georef_protocol<>'county centroid') AND (view_full_occurrence_individual.is_centroid IS NULL OR view_full_occurrence_individual.is_centroid=0) AND observation_type='plot' AND scrubbed_species_binomial IS NOT NULL) as view_full_occurrence_individual LEFT JOIN plot_metadata ON (view_full_occurrence_individual.plot_metadata_id=plot_metadata.plot_metadata_id) ;") }else{ query <- paste("SELECT ",political_select," view_full_occurrence_individual.plot_name,view_full_occurrence_individual.subplot, view_full_occurrence_individual.elevation_m, view_full_occurrence_individual.plot_area_ha, view_full_occurrence_individual.sampling_protocol,view_full_occurrence_individual.recorded_by, view_full_occurrence_individual.scrubbed_species_binomial,view_full_occurrence_individual.individual_count",taxonomy_$select,native_$select,", view_full_occurrence_individual.latitude, view_full_occurrence_individual.longitude, view_full_occurrence_individual.date_collected, view_full_occurrence_individual.datasource,view_full_occurrence_individual.dataset,view_full_occurrence_individual.dataowner, view_full_occurrence_individual.custodial_institution_codes,view_full_occurrence_individual.collection_code,view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM view_full_occurrence_individual WHERE view_full_occurrence_individual.country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), "))", cultivated_$query,newworld_$query,natives_$query, " AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (view_full_occurrence_individual.is_geovalid = 1 ) AND (view_full_occurrence_individual.georef_protocol is NULL OR view_full_occurrence_individual.georef_protocol<>'county centroid') AND (view_full_occurrence_individual.is_centroid IS NULL OR view_full_occurrence_individual.is_centroid=0) AND observation_type='plot' AND view_full_occurrence_individual.scrubbed_species_binomial IS NOT NULL) as view_full_occurrence_individual LEFT JOIN plot_metadata ON (view_full_occurrence_individual.plot_metadata_id=plot_metadata.plot_metadata_id) ;") } # create query to retrieve return(.BIEN_sql(query, ...)) } ############################### #'Download plot data from specified states/provinces. #' #'BIEN_plot_state downloads all plot data from specified states/provinces. #' @param country A single country. #' @param state A state or vector of states (or other primary political divisions). #' @param state.code A single state/province code, or a vector of states/province codes. #' @param country.code A single country code or a vector of country codes equal in length to the vector of states/province codes. #' @template plot #' @note Political division (or political division code) spelling needs to be exact and case-sensitive, see \code{\link{BIEN_metadata_list_political_names}} for a list of political divisions and associated codes. #' @note This function requires you supply either 1) a single country with one or states, or 2) vectors of equal length for each political level. #' @return A dataframe containing all data from the specified states. #' @examples \dontrun{ #' BIEN_plot_state(country="United States", state="Colorado") #' BIEN_plot_state(country="United States",state= c("Colorado","California"))} #' @family plot functions #' @export BIEN_plot_state <- function(country = NULL, state = NULL, country.code = NULL, state.code = NULL, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = TRUE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_char(country) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(state) .is_char(state.code) .is_char(country.code) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) .is_log(all.metadata) #set conditions for query cultivated_<-.cultivated_check_plot(cultivated) newworld_<-.newworld_check_plot(new.world) taxonomy_<-.taxonomy_check_plot(all.taxonomy) native_<-.native_check_plot(native.status) natives_<-.natives_check_plot(natives.only) collection_<-.collection_check_plot(collection.info) md_<-.md_check_plot(all.metadata) if(!political.boundaries){ political_select<-"view_full_occurrence_individual.country,view_full_occurrence_individual.state_province," }else{ political_select<-"view_full_occurrence_individual.country,view_full_occurrence_individual.state_province,view_full_occurrence_individual.county,view_full_occurrence_individual.locality," } if(is.null(country.code) & is.null(state.code) ){ #state where if(length(country)==1){ sql_where <- paste(" WHERE country in (", paste(shQuote(country, type = "sh"),collapse = ', '), ") AND state_province in (", paste(shQuote(state, type = "sh"),collapse = ', '), ") AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country)==length(state)){ sql_where<-"WHERE (" for(i in 1:length(country)){ condition_i<- paste("(country = ", paste(shQuote(country[i], type = "sh"),collapse = ', '), " AND state_province = ", paste(shQuote(state[i], type = "sh"),collapse = ', '), ")") if(i!=1){condition_i<- paste("OR ",condition_i)}#stick OR onto the condition where needed sql_where<-paste(sql_where,condition_i) }#for i sql_where<-paste(sql_where,") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country, the function requires a vector of countries corresponding to the vector of states") } }#if length(country>1) }else{ if(length(country.code)==1){ sql_where <- paste(" WHERE country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code, type = "sh"),collapse = ', '), ")) AND state_province in (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code, type = "sh"),collapse = ', '), ")) AND scrubbed_species_binomial IS NOT NULL") }else{ if(length(country.code)==length(state.code)){ sql_where<-"WHERE (" for(i in 1:length(country.code)){ condition_i<- paste("country in (SELECT country FROM country WHERE iso in (", paste(shQuote(country.code[i], type = "sh"),collapse = ', '), ")) AND state_province in (SELECT state_province_ascii FROM county_parish WHERE admin1code in (", paste(shQuote(state.code[i], type = "sh"),collapse = ', '), "))") if(i!=1){condition_i<- paste("OR ",condition_i)}#stick OR onto the condition where needed sql_where<-paste(sql_where,condition_i) }#for i sql_where<-paste(sql_where,") AND scrubbed_species_binomial IS NOT NULL") }else{ stop("If supplying more than one country, the function requires a vector of countries corresponding to the vector of states") } }#if length(country>1) } # set the query query <- paste("SELECT ",political_select," view_full_occurrence_individual.plot_name,subplot, view_full_occurrence_individual.elevation_m, view_full_occurrence_individual.plot_area_ha,view_full_occurrence_individual.sampling_protocol,recorded_by, scrubbed_species_binomial,individual_count", taxonomy_$select,native_$select," ,view_full_occurrence_individual.latitude, view_full_occurrence_individual.longitude,view_full_occurrence_individual.date_collected, view_full_occurrence_individual.datasource,view_full_occurrence_individual.dataset,view_full_occurrence_individual.dataowner,custodial_institution_codes, collection_code,view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM view_full_occurrence_individual ", sql_where,cultivated_$query,newworld_$query,natives_$query, " AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (view_full_occurrence_individual.is_geovalid = 1 ) AND (view_full_occurrence_individual.georef_protocol is NULL OR view_full_occurrence_individual.georef_protocol<>'county centroid') AND (view_full_occurrence_individual.is_centroid IS NULL OR view_full_occurrence_individual.is_centroid=0) AND observation_type='plot' AND view_full_occurrence_individual.scrubbed_species_binomial IS NOT NULL) as view_full_occurrence_individual JOIN plot_metadata ON (view_full_occurrence_individual.plot_metadata_id=plot_metadata.plot_metadata_id) ;") # create query to retrieve return(.BIEN_sql(query, ...)) } ############################### #'Download plot data from specified sf object. #' #'BIEN_plot_sf downloads all plot data falling within a supplied sf polygon. #' @param sf An object of class sf. Note that the projection must be WGS84. #' @template plot #' @return A dataframe containing all plot data from within the specified sf polygon. #' @examples \dontrun{ #' library(sf) #' #' BIEN_ranges_species("Carnegiea gigantea") #saves ranges to the current working directory #' #' sf <- st_read(dsn = ".", #' layer = "Carnegiea_gigantea") #' #' saguaro_plot_data <- BIEN_plot_sf(sf = sf) #' } #' @family plot functions #' @importFrom sf st_geometry st_as_text #' @export BIEN_plot_sf <- function(sf, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = TRUE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) .is_log(all.metadata) # Convert the sf to wkt (needed for sql query) wkt <- sf |> st_geometry() |> st_as_text() #set conditions for query cultivated_ <- .cultivated_check_plot(cultivated) newworld_ <- .newworld_check_plot(new.world) taxonomy_ <- .taxonomy_check_plot(all.taxonomy) native_ <- .native_check_plot(native.status) natives_ <- .natives_check_plot(natives.only) collection_ <- .collection_check_plot(collection.info) md_ <- .md_check_plot(all.metadata) if(!political.boundaries){ political_select <- "view_full_occurrence_individual.country," }else{ political_select <- "view_full_occurrence_individual.country,view_full_occurrence_individual.state_province,view_full_occurrence_individual.county,view_full_occurrence_individual.locality," } # set the query query <- paste("SELECT ",political_select," view_full_occurrence_individual.plot_name,subplot, view_full_occurrence_individual.elevation_m, view_full_occurrence_individual.plot_area_ha,view_full_occurrence_individual.sampling_protocol,recorded_by, scrubbed_species_binomial,individual_count", taxonomy_$select,native_$select," ,view_full_occurrence_individual.latitude, view_full_occurrence_individual.longitude,view_full_occurrence_individual.date_collected, view_full_occurrence_individual.datasource,view_full_occurrence_individual.dataset,view_full_occurrence_individual.dataowner,custodial_institution_codes, collection_code,view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM view_full_occurrence_individual ", "WHERE st_intersects(ST_GeographyFromText('SRID=4326;",paste(wkt),"'),geom) ",cultivated_$query,newworld_$query,natives_$query, " AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (view_full_occurrence_individual.is_geovalid = 1 ) AND (view_full_occurrence_individual.georef_protocol is NULL OR view_full_occurrence_individual.georef_protocol<>'county centroid') AND (view_full_occurrence_individual.is_centroid IS NULL OR view_full_occurrence_individual.is_centroid=0) AND observation_type='plot' AND view_full_occurrence_individual.scrubbed_species_binomial IS NOT NULL ) as view_full_occurrence_individual JOIN plot_metadata ON (view_full_occurrence_individual.plot_metadata_id=plot_metadata.plot_metadata_id) ;") # create query to retrieve return(.BIEN_sql(query, ...)) #return(.BIEN_sql(query)) } ############################### #'List available sampling protocols. #' #'BIEN_plot_list_sampling_protocols list all available sampling protocols. #' @param ... Additional arguments passed to internal functions. #' @return A vector of available sampling protocols. #' @examples \dontrun{ #' BIEN_plot_list_sampling_protocols()} #' @family plot functions #' @export BIEN_plot_list_sampling_protocols <- function(...){ query <- paste("SELECT DISTINCT sampling_protocol FROM plot_metadata ;") return(.BIEN_sql(query, ...)) } ################################ #'Download plot data using a specified sampling protocol. #' #'BIEN_plot_sampling_protocol downloads all plot data using a specified sampling protocol. #' @param sampling_protocol A sampling protocol or vector of sampling protocols. See \code{\link{BIEN_plot_list_sampling_protocols}} for options. #' @template plot #' @return A dataframe containing all data from the specified datasource. #' @examples \dontrun{ #' BIEN_plot_sampling_protocol("Point-intercept")} #' @family plot functions #' @export BIEN_plot_sampling_protocol <- function (sampling_protocol, cultivated = FALSE, new.world = FALSE, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(sampling_protocol) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) .is_log(all.metadata) cultivated_ <- .cultivated_check_plot(cultivated) newworld_ <- .newworld_check_plot(new.world) taxonomy_ <- .taxonomy_check_plot(all.taxonomy) native_ <- .native_check_plot(native.status) natives_ <- .natives_check_plot(natives.only) political_ <- .political_check_plot(political.boundaries) collection_ <- .collection_check_plot(collection.info) md_ <- .md_check_plot(all.metadata) query <- paste("SELECT view_full_occurrence_individual.plot_name,subplot, view_full_occurrence_individual.elevation_m, view_full_occurrence_individual.plot_area_ha, view_full_occurrence_individual.sampling_protocol,recorded_by, scrubbed_species_binomial,individual_count", taxonomy_$select, native_$select, political_$select,", view_full_occurrence_individual.latitude, view_full_occurrence_individual.longitude,date_collected,view_full_occurrence_individual.datasource, view_full_occurrence_individual.dataset,view_full_occurrence_individual.dataowner,custodial_institution_codes,collection_code, view_full_occurrence_individual.datasource_id", collection_$select, cultivated_$select, newworld_$select, md_$select, " FROM (SELECT * FROM view_full_occurrence_individual WHERE view_full_occurrence_individual.sampling_protocol in (", paste(shQuote(sampling_protocol, type = "sh"), collapse = ", "), ")", cultivated_$query, newworld_$query, natives_$query, "AND view_full_occurrence_individual.higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (view_full_occurrence_individual.is_geovalid = 1 ) AND (view_full_occurrence_individual.georef_protocol is NULL OR view_full_occurrence_individual.georef_protocol<>'county centroid') AND (view_full_occurrence_individual.is_centroid IS NULL OR view_full_occurrence_individual.is_centroid=0) AND view_full_occurrence_individual.observation_type='plot' AND view_full_occurrence_individual.scrubbed_species_binomial IS NOT NULL) as view_full_occurrence_individual JOIN plot_metadata ON (view_full_occurrence_individual.plot_metadata_id=plot_metadata.plot_metadata_id) ;") return(.BIEN_sql(query, ...)) } ################################# #'Download plot data by plot name. #' #'BIEN_plot_name downloads all plot data for a set of plot names. #' @param plot.name A plot name or vector of names. See BIEN_plot_metadata for more information on plots. #' @template plot #' @note Plot names can be looked up with \code{\link{BIEN_plot_metadata}}. #' @return A dataframe containing all data from the specified plot(s). #' @examples \dontrun{ #' BIEN_plot_name("SR-1")} #' @family plot functions #' @export BIEN_plot_name <- function(plot.name, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(plot.name) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) .is_log(all.metadata) #set conditions for query cultivated_<-.cultivated_check_plot(cultivated) newworld_<-.newworld_check_plot(new.world) taxonomy_<-.taxonomy_check_plot(all.taxonomy) native_<-.native_check_plot(native.status) natives_<-.natives_check_plot(natives.only) political_<-.political_check_plot(political.boundaries) collection_<-.collection_check_plot(collection.info) md_<-.md_check_plot(all.metadata) # set the query query <- paste("SELECT view_full_occurrence_individual.plot_name,subplot, view_full_occurrence_individual.elevation_m, view_full_occurrence_individual.plot_area_ha, view_full_occurrence_individual.sampling_protocol,view_full_occurrence_individual.recorded_by, view_full_occurrence_individual.scrubbed_species_binomial, view_full_occurrence_individual.individual_count",taxonomy_$select,native_$select,political_$select,", view_full_occurrence_individual.latitude, view_full_occurrence_individual.longitude,view_full_occurrence_individual.date_collected, view_full_occurrence_individual.datasource,view_full_occurrence_individual.dataset,view_full_occurrence_individual.dataowner, view_full_occurrence_individual.custodial_institution_codes,collection_code,view_full_occurrence_individual.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM view_full_occurrence_individual WHERE view_full_occurrence_individual.plot_name in (", paste(shQuote(plot.name, type = "sh"),collapse = ', '), ")", cultivated_$query,newworld_$query,natives_$query, "AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type='plot' AND view_full_occurrence_individual.scrubbed_species_binomial IS NOT NULL ) as view_full_occurrence_individual LEFT JOIN plot_metadata ON (view_full_occurrence_individual.plot_metadata_id=plot_metadata.plot_metadata_id) ;") # create query to retrieve return(.BIEN_sql(query, ...)) } ##################### #'Download plot data by dataset. #' #'BIEN_plot_dataset downloads all plot data for a given dataset or datasets. #' @param dataset A plot dataset or vector of datasets. See BIEN_plot_metadata for more information on plots. #' @template plot #' @return A dataframe containing all data from the specified dataset. #' @note Datasets and related information can be looked up with \code{\link{BIEN_plot_metadata}} #' @examples \dontrun{ #' BIEN_plot_dataset("Gentry Transect Dataset")} #' @family plot functions #' @export BIEN_plot_dataset <- function(dataset, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(dataset) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) .is_log(all.metadata) #set conditions for query cultivated_<-.cultivated_check_plot(cultivated) newworld_<-.newworld_check_plot(new.world) taxonomy_<-.taxonomy_check_plot(all.taxonomy) native_<-.native_check_plot(native.status) natives_<-.natives_check_plot(natives.only) political_<-.political_check_plot(political.boundaries) collection_<-.collection_check_plot(collection.info) md_<-.md_check_plot(all.metadata) # set the query query <- paste("SELECT view_full_occurrence_individual.plot_name,subplot, view_full_occurrence_individual.elevation_m, view_full_occurrence_individual.plot_area_ha, view_full_occurrence_individual.sampling_protocol,recorded_by, scrubbed_species_binomial,individual_count",taxonomy_$select,native_$select,political_$select,", view_full_occurrence_individual.latitude, view_full_occurrence_individual.longitude,view_full_occurrence_individual.date_collected, view_full_occurrence_individual.datasource,view_full_occurrence_individual.dataset, view_full_occurrence_individual.dataowner,custodial_institution_codes,collection_code,view_full_occurrence_individual.datasource_id", collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM view_full_occurrence_individual WHERE view_full_occurrence_individual.dataset in (", paste(shQuote(dataset, type = "sh"),collapse = ', '), ")", cultivated_$query,newworld_$query,natives_$query, " AND higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND is_geovalid = 1 AND (georef_protocol is NULL OR georef_protocol<>'county centroid') AND (is_centroid IS NULL OR is_centroid=0) AND observation_type='plot' AND view_full_occurrence_individual.scrubbed_species_binomial IS NOT NULL ) as view_full_occurrence_individual LEFT JOIN plot_metadata ON (view_full_occurrence_individual.plot_metadata_id=plot_metadata.plot_metadata_id) ;") # create query to retrieve return(.BIEN_sql(query, ...)) } ############################## #'Download plot metadata #' #'BIEN_plot_metadata downloads the plot metadata table. #' @param ... Additional arguments passed to internal functions. #' @return A dataframe containing plot metadata. #' @examples \dontrun{ #' BIEN_plot_metadata()} #' @family plot functions #' @family metadata functions #' @export BIEN_plot_metadata <- function( ...){ # set the query query <- "SELECT * FROM plot_metadata ;" # create query to retrieve return(.BIEN_sql(query, ...)) } ############################## ################################ #Taxonomy queries #'Extract taxonomic information for species #' #'BIEN_taxonomy_species downloads a dataframe of all taxonomic information for given species. #' @param species A single species or a vector of species. #' @template taxonomy #' @return Dataframe containing taxonomic information for the specified species. #' @examples \dontrun{ #' BIEN_taxonomy_species("Cannabis sativa") #' species_vector<-c("Acer nigrum","Cannabis sativa") #' BIEN_taxonomy_species(species_vector)} #' @family taxonomy functions #' @export BIEN_taxonomy_species <- function(species, ...){ .is_char(species) #set base query components sql_select <- paste('SELECT DISTINCT higher_plant_group, "class", superorder, "order", scrubbed_family,scrubbed_genus,scrubbed_species_binomial,scrubbed_author,scrubbed_taxonomic_status') sql_from <- paste(' FROM bien_taxonomy') sql_where <- paste(' WHERE scrubbed_species_binomial in (', paste(shQuote(species, type = "sh"),collapse = ', '), ') AND scrubbed_species_binomial IS NOT NULL') sql_order_by <- paste(' ORDER BY higher_plant_group,scrubbed_family,scrubbed_genus,scrubbed_species_binomial,scrubbed_author ') # form the final query query <- paste(sql_select, sql_from, sql_where, sql_order_by, " ;") # execute the query return(.BIEN_sql(query, ...)) } ################# #'Extract taxonomic information for genera #' #'BIEN_taxonomy_genus downloads a dataframe of all taxonomic information for given genera. #' @param genus A single genus or a vector of genera. #' @template taxonomy #' @return Dataframe containing taxonomic information for the specified genera. #' @examples \dontrun{ #' BIEN_taxonomy_genus("Acer") #' genus_vector<-c("Acer","Quercus") #' BIEN_taxonomy_genus(genus_vector)} #' @family taxonomy functions #' @export BIEN_taxonomy_genus <- function(genus, ...){ .is_char(genus) #set base query components sql_select <- paste('SELECT DISTINCT higher_plant_group, "class", superorder, "order", scrubbed_family,scrubbed_genus,scrubbed_species_binomial,scrubbed_author,scrubbed_taxonomic_status') sql_from <- paste(" FROM bien_taxonomy") sql_where <- paste(" WHERE scrubbed_genus in (", paste(shQuote(genus, type = "sh"),collapse = ', '), ") AND scrubbed_species_binomial IS NOT NULL") sql_order_by <- paste(" ORDER BY higher_plant_group,scrubbed_family,scrubbed_genus,scrubbed_species_binomial,scrubbed_author ") # form the final query query <- paste(sql_select, sql_from, sql_where, sql_order_by, " ;") # execute the query return(.BIEN_sql(query, ...)) } ########### #'Extract taxonomic information for families #' #'BIEN_taxonomy_family downloads a dataframe of all taxonomic information for given families. #' @param family A single family or a vector of families. #' @template taxonomy #' @return Dataframe containing taxonomic information for the specified families. #' @examples \dontrun{ #' BIEN_taxonomy_family("Orchidaceae") #' family_vector<-c("Orchidaceae","Poaceae") #' BIEN_taxonomy_family(family_vector)} #' @family taxonomy functions #' @export BIEN_taxonomy_family <- function(family, ...){ .is_char(family) #set base query components sql_select <- paste('SELECT DISTINCT higher_plant_group, "class", superorder, "order", scrubbed_family,scrubbed_genus,scrubbed_species_binomial,scrubbed_author,scrubbed_taxonomic_status') sql_from <- paste(" FROM bien_taxonomy") sql_where <- paste(" WHERE scrubbed_family in (", paste(shQuote(family, type = "sh"),collapse = ', '), ") AND scrubbed_species_binomial IS NOT NULL") sql_order_by <- paste(" ORDER BY higher_plant_group,scrubbed_family,scrubbed_genus,scrubbed_species_binomial,scrubbed_author ") # form the final query query <- paste(sql_select, sql_from, sql_where, sql_order_by, " ;") #print(query) # execute the query return(.BIEN_sql(query, ...)) } ################################ ############################### #Phylogeny fxs #'Download the complete BIEN phylogenies #' #'BIEN_phylogeny_complete downloads a specified number of the BIEN phylogeny replicates. #' @param n_phylogenies The number of phylogenies to download. Should be an integer between 1 and 100. Default is 1. #' @param seed Argument passed to set.seed. Useful for replicating work with random phylogeny sets. #' @param replicates The specific replicated phylogenies to return. Should be a numeric vector of integers between 1 and 100. #' @template phylogeny #' @return A phylo or multiphylo object containing the specified phylogenies #' @examples \dontrun{ #' phylos<-BIEN_phylogeny_complete(n_phylogenies = 10,seed = 1) #' phylos<-BIEN_phylogeny_complete(replicates = c(1,2,99,100))} #' @family phylogeny functions #' @importFrom ape read.tree #' @export BIEN_phylogeny_complete<-function(n_phylogenies = 1, seed = NULL, replicates = NULL, ...){ .is_num(n_phylogenies) if(!is.null(replicates)){ replicates <- replicates[which(replicates%in%1:100)] query <- paste("SELECT * FROM phylogeny WHERE phylogeny_version = 'BIEN_2016_complete' AND replicate in (", paste(shQuote(replicates, type = "sh"),collapse = ', '),")" ) df <- .BIEN_sql(query, ...) tree <- read.tree(text = df$phylogeny, tree.names = df$replicate) return(tree) } set.seed(seed) if(n_phylogenies > 100){ message("n_phylogenies must be an integer between 1 and 100. Setting n_phylogenies to 100") n_phylogenies <- 100 } if(n_phylogenies < 1){ message("n_phylogenies must be an integer between 1 and 100. Setting n_phylogenies to 1") n_phylogenies <- 1 } phylo_sample <- sample(x = 1:100, size = n_phylogenies, replace = FALSE) query <- paste("SELECT * FROM phylogeny WHERE phylogeny_version = 'BIEN_2016_complete' AND replicate in (", paste(shQuote(phylo_sample, type = "sh"),collapse = ', '),")" ) df <- .BIEN_sql(query, ...) tree <- read.tree(text = df$phylogeny, tree.names = df$replicate) return(tree) } ############################### #'Download the conservative BIEN phylogeny #' #'BIEN_phylogeny_conservative downloads the conservative BIEN phylogeny, which only includes species with molecular data available. #' @template phylogeny #' @return A phylo object containing the BIEN conservative phylogeny #' @examples \dontrun{ #' BIEN_phylo<-BIEN_phylogeny_conservative()} #' @family phylogeny functions #' @importFrom ape read.tree #' @export BIEN_phylogeny_conservative <- function(...){ query <- paste("SELECT * FROM phylogeny WHERE phylogeny_version = 'BIEN_2016_conservative' ;" ) df <- .BIEN_sql(query, ...) tree <- read.tree(text = df$phylogeny,tree.names = df$replicate) return(tree) } ################################# #'Label nodes on a phylogeny #' #'BIEN_phylogeny_label_nodes will label the nodes on a phylogeny based on either the BIEN taxonomy or user-supplied taxa. #' @param phylogeny A single phylogeny. #' @param family Should family-level nodes be labeled? Default is TRUE. #' @param genus Should genus-level nodes be labeled? Default is FALSE. Overwrites family-level nodes where a family contains a single genera. #' @param other_taxa A dataframe containing two columns: 1) the taxa to be labelled; 2) the species associated with each taxon. #' @template phylogeny #' @return Input phylogeny with labeled nodes. #' @examples \dontrun{ #' phylogeny<-BIEN_phylogeny_conservative() #' #'phylogeny<-drop.tip(phy = phylogeny,tip = 101:length(phylogeny$tip.label)) #'plot.phylo(x = phylogeny,show.tip.label = FALSE) #' #'fam_nodes<-BIEN_phylogeny_label_nodes(phylogeny = phylogeny,family = TRUE) #'plot.phylo(x = fam_nodes,show.tip.label = FALSE, show.node.label = TRUE) #' #'gen_nodes<-BIEN_phylogeny_label_nodes(phylogeny = phylogeny, family = FALSE, genus = TRUE) #'plot.phylo(x = gen_nodes, show.tip.label = FALSE, show.node.label = TRUE) #' #'other_taxa <- as.data.frame(matrix(nrow = 10,ncol = 2)) #'colnames(other_taxa)<-c("taxon","species") #'other_taxa$taxon[1:5]<-"A" #Randomly assign a few species to taxon A #'other_taxa$taxon[6:10]<-"B" #Randomly assign a few species to taxon B #'tax_nodes <- #' BIEN_phylogeny_label_nodes(phylogeny = phylogeny, #' family = FALSE, genus = FALSE, other_taxa = other_taxa) #'plot.phylo(x = tax_nodes,show.tip.label = FALSE,show.node.label = TRUE)} #' @family phylogeny functions #' @importFrom ape getMRCA #' @export BIEN_phylogeny_label_nodes <- function(phylogeny, family = TRUE, genus = FALSE, other_taxa = NULL, ...){ if(is.null(phylogeny$node.label)){ phylogeny$node.label[1:phylogeny$Nnode] <- NA } taxonomy <- BIEN_taxonomy_species(species = gsub(pattern = "_",replacement = " ",x = phylogeny$tip.label)) if(family == TRUE){ for(i in 1:length(unique(taxonomy$scrubbed_family))){ fam_i <- unique(taxonomy$scrubbed_family)[i] spp_i <- taxonomy$scrubbed_species_binomial[which(taxonomy$scrubbed_family == fam_i)] mrca_i <- getMRCA(phy = phylogeny, tip = which(phylogeny$tip.label %in% gsub(pattern = " ",replacement = "_", x = spp_i ) )) phylogeny$node.label[mrca_i-length(phylogeny$tip.label)] <- fam_i }} if(genus == TRUE){ for(i in 1:length(unique(taxonomy$scrubbed_genus))){ gen_i <- unique(taxonomy$scrubbed_genus)[i] spp_i <- taxonomy$scrubbed_species_binomial[which(taxonomy$scrubbed_genus == gen_i)] mrca_i <- getMRCA(phy = phylogeny, tip = which(phylogeny$tip.label %in% gsub(pattern = " ",replacement = "_", x = spp_i ) )) phylogeny$node.label[mrca_i-length(phylogeny$tip.label)]<-gen_i }} if(!is.null(other_taxa)){ for(i in 1:length(unique(other_taxa[,1]))){ tax_i <- unique(other_taxa[,1])[i] spp_i <- other_taxa[,2][which(other_taxa[,1]==tax_i)] mrca_i <- getMRCA(phy = phylogeny, tip = which(phylogeny$tip.label %in% gsub(pattern = " ",replacement = "_", x = spp_i ) )) phylogeny$node.label[mrca_i-length(phylogeny$tip.label)]<-tax_i }} return(phylogeny) }#end fx ################################# ################################# #'Download the current BIEN database version and release date #' #'BIEN_metadata_database_version downloads the current version number and release date for the BIEN database. #' @param ... Additional arguments passed to internal functions. #' @return A data frame containing the current version number and release date for the BIEN database. #' @family metadata functions #' @examples \dontrun{ #' BIEN_metadata_database_version()} #' @export BIEN_metadata_database_version <- function(...){ query <- "SELECT db_version, db_release_date FROM bien_metadata a JOIN (SELECT MAX(bien_metadata_id) as max_id FROM bien_metadata) AS b ON a.bien_metadata_id=b.max_id ;" .BIEN_sql(query, ...) } ################################ ################################ #'Check for differing records between old and new dataframes. #' #'BIEN_metadata_match_data compares old and new dataframes, and can check whether they are identical or be used to select rows that are unique to the old or new versions. #' @param old A dataframe that is to be compared to a (typically) newer dataframe. #' @param new A dataframe that is to be compared to a (typically) older dataframe. #' @param return What information should be returned? Current options are: "identical" (Logical, are the two dataframes identical?), "additions" (numeric, which rows are new?), "deletions" (numeric, which rows are no longer present?), "logical" (logical, which elements of the old dataframe are in the new one?). #' @return Logical of varying length (depending on choice of "return" parameter) #' @note Since comparisons are done by row (except when using return="identical"), this function may fail to flag additions or deletions if they are exact duplicates of existing rows. #' @family metadata functions #' @examples \dontrun{ #' new<-BIEN_occurrence_species("Acer nigrum") #' old<-new[-1:-4,]#simulate having an older dataset by removing four rows #' BIEN_metadata_match_data(old,new,return="identical") #' BIEN_metadata_match_data(old,new,return="additions")} #' @export BIEN_metadata_match_data <- function(old, new, return = "identical"){ if(return %in% c("identical","logical","additions","deletions")){ old <- apply(old,MARGIN = 1,FUN = toString) new <- apply(new,MARGIN = 1,FUN = toString) elements <- is.element(new,old) if(return == "logical"){ elements <- is.element(new,old) return(elements) }#returns TRUE where elements are in the old set, false where they are not if(return == "additions"){ elements <- is.element(new,old) return(which(elements == FALSE)) }#returns index of new elements if(return == "deletions"){ elements <- is.element(old,new) return(which(elements == FALSE)) }#returns index of deleted elements if(return == "identical"){ return(identical(old,new)) }#returns true if identical, false otherwise }else{message("Please specify either 'identical','logical','additions' or 'deletions' for the value of the return argument")} } ################################ #'Generate citations for data extracted from BIEN. #' #'BIEN_metadata_citation guides a user through the proper documentation for data downloaded from the BIEN database. #' @param dataframe A data.frame of occurrence data downloaded from the BIEN R package. #' @param trait.dataframe A data.frame of trait data downloaded from the BIEN R package. #' @param trait.mean.dataframe A data.frame of species mean trait data from the function BIEN_trait_mean. #' @param bibtex_file Output file for writing bibtex citations. #' @param acknowledgement_file Output file for writing acknowledgements. #' @param ... Additional arguments passed to internal functions. #' @return A list object containing information needed for data attribution. Full information for herbaria is available at http://sweetgum.nybg.org/science/ih/ #' @examples \dontrun{ #' BIEN_metadata_citation()#If you are referencing the phylogeny or range maps. #' Xanthium_data<-BIEN_occurrence_species("Xanthium strumarium") #' citations<-BIEN_metadata_citation(dataframe=Xanthium_data)#If you are referencing occurrence data} #' @family metadata functions #' @export BIEN_metadata_citation <- function(dataframe = NULL, trait.dataframe = NULL, trait.mean.dataframe = NULL, bibtex_file = NULL, acknowledgement_file = NULL, ...){ BIEN_cite <- '@ARTICLE{Enquist_undated-aw, title = "Botanical big data shows that plant diversity in the New World is driven by climatic-linked differences in evolutionary rates and biotic exclusion", author = "Enquist, B J and Sandel, B and Boyle, B and Svenning, J-C and McGill, B J and Donoghue, J C and Hinchliff, C E and Jorgensen, P M and Kraft, N J B and Marcuse-Kubitza, A and Merow, C and Morueta-Holme, N and Peet, R K and Schildhauer, M and Spencer, N and Regetz, J and Simova, I and Smith, S A and Thiers, B and Violle, C and Wiser, S K and Andelman, S and Casler, N and Condit, R and Dolins, S and Guaderrama, D and Maitner, B S and Narro, M L and Ott, J E and Phillips, O and Sloat, L L and ter Steege, H"}' BIEN_cite <- gsub(pattern = "\n", replacement = "", BIEN_cite) R_package_cite <- '@article{doi:10.1111/2041-210X.12861, author = {Maitner Brian S. and Boyle Brad and Casler Nathan and Condit Rick and Donoghue John and Duran Sandra M. and Guaderrama Daniel and Hinchliff Cody E. and Jorgensen Peter M. and Kraft Nathan J.B. and McGill Brian and Merow Cory and Morueta-Holme Naia and Peet Robert K. and Sandel Brody and Schildhauer Mark and Smith Stephen A. and Svenning Jens-Christian and Thiers Barbara and Violle Cyrille and Wiser Susan and Enquist Brian J.}, title = {The bien r package: A tool to access the Botanical Information and Ecology Network (BIEN) database}, journal = {Methods in Ecology and Evolution}, volume = {9}, number = {2}, pages = {373-379}, keywords = {biodiversity, community plot, ecoinformatics, functional traits, herbarium records, occurrence, phylogeny, plants, presence, R, range maps}, doi = {10.1111/2041-210X.12861}, url = {https://besjournals.onlinelibrary.wiley.com/doi/abs/10.1111/2041-210X.12861}, eprint = {https://besjournals.onlinelibrary.wiley.com/doi/pdf/10.1111/2041-210X.12861}, abstract = {Abstract There is an urgent need for large-scale botanical data to improve our understanding of community assembly, coexistence, biogeography, evolution, and many other fundamental biological processes. Understanding these processes is critical for predicting and handling human-biodiversity interactions and global change dynamics such as food and energy security, ecosystem services, climate change, and species invasions. The Botanical Information and Ecology Network (BIEN) database comprises an unprecedented wealth of cleaned and standardised botanical data, containing roughly 81 million occurrence records from c. 375,000 species, c. 915,000 trait observations across 28 traits from c. 93,000 species, and co-occurrence records from 110,000 ecological plots globally, as well as 100,000 range maps and 100 replicated phylogenies (each containing 81,274 species) for New World species. Here, we describe an r package that provides easy access to these data. The bien r package allows users to access the multiple types of data in the BIEN database. Functions in this package query the BIEN database by turning user inputs into optimised PostgreSQL functions. Function names follow a convention designed to make it easy to understand what each function does. We have also developed a protocol for providing customised citations and herbarium acknowledgements for data downloaded through the bien r package. The development of the BIEN database represents a significant achievement in biological data integration, cleaning and standardization. Likewise, the bien r package represents an important tool for open science that makes the BIEN database freely and easily accessible to everyone.} }' R_package_cite <- gsub(pattern = "\n",replacement = "",R_package_cite) if(!is.null(trait.dataframe)){ trait.query<-paste("SELECT DISTINCT citation_bibtex,source_citation,source, url_source, access, project_pi, project_pi_contact FROM agg_traits WHERE id in (", paste(shQuote(as.integer(trait.dataframe$id), type = "sh"),collapse = ', '),") ;") trait.sources<-.BIEN_sql(trait.query, ...)} if(!is.null(trait.mean.dataframe)){ ids<-paste(trait.mean.dataframe$ids,collapse = ",") ids<-unique(unlist(strsplit(x = ids,split = ","))) ids<-ids[which(ids!="NA")] trait.mean.query<-paste("SELECT DISTINCT citation_bibtex,source_citation,source, url_source, access, project_pi, project_pi_contact FROM agg_traits WHERE id in (", paste(shQuote(as.integer(ids), type = "sh"),collapse = ', '),") ;") trait.mean.sources <- .BIEN_sql(trait.mean.query, ...) #trait.mean.sources <- .BIEN_sql(trait.mean.query) } if(!is.null(trait.dataframe) & !is.null(trait.mean.dataframe)){ trait.sources<- rbind(trait.sources,trait.mean.sources) trait.sources<-unique(trait.sources) } if(is.null(trait.dataframe) & !is.null(trait.mean.dataframe)){ trait.sources <- trait.mean.sources } ######### ########## #If an occurrence dataframe is supplied: if(!is.null(dataframe)){ datasources<-unique(dataframe$datasource_id[!is.na(dataframe$datasource_id)]) query<-paste("WITH a AS (SELECT * FROM datasource where datasource_id in (", paste(shQuote(datasources, type = "sh"),collapse = ', '),")) SELECT * FROM datasource where datasource_id in (SELECT proximate_provider_datasource_id FROM a) OR datasource_id in (SELECT datasource_id FROM a) ;") sources<-.BIEN_sql(query, ...) citation<-list() citation[[1]]<-general<-"Public BIEN data is licensed via a CC-BY-NC-ND license. Please see BIENdata.org for more information. The references in this list should be added to any publication using these data. This is most easily done by specifying a bibtex_file and importing the bibtex formatted references into a reference manager. The acknowledgements in this list should be pasted into the acknowledgements of any resulting publications. Be sure to check for a 'data owners to contact' section in this list, as any authors listed there need to be contacted prior to publishing with their data." citation[[1]]<-gsub(pattern = "\n",replacement = "",citation[[1]]) #Cleaning up the bibtex so that it loads properly into reference managers. Better too many new lines than not enough...for some reason... dl_cites<-unique(sources$source_citation[which(!is.na(sources$source_citation))]) if(!is.null(trait.dataframe)){dl_cites<-c(dl_cites,trait.sources$citation_bibtex)} dl_cites<-gsub(dl_cites,pattern = '"@',replacement = '@') dl_cites<-gsub(dl_cites,pattern = '" @',replacement = '@') dl_cites<-unique(dl_cites[which(!is.na(dl_cites))]) citation[[2]]<-c(BIEN_cite,R_package_cite,dl_cites) citation[[2]]<-gsub(citation[[2]],pattern = "author", replacement = "\nauthor") citation[[2]]<-gsub(citation[[2]],pattern = "title", replacement = "\ntitle") citation[[2]]<-gsub(citation[[2]],pattern = "year", replacement = "\nyear") citation[[2]]<-gsub(citation[[2]],pattern = "organization", replacement = "\norganization") citation[[2]]<-gsub(citation[[2]],pattern = "address", replacement = "\naddress") citation[[2]]<-gsub(citation[[2]],pattern = "url", replacement = "\nurl") citation[[2]]<-gsub(citation[[2]],pattern = "journal", replacement = "\njournal") citation[[2]]<-gsub(citation[[2]],pattern = "note", replacement = "\nnote") citation[[2]]<-iconv(citation[[2]],to="ASCII//TRANSLIT") citation[[2]]<-gsub(citation[[2]],pattern = '\n}\"', replacement = '\n}') citation[[2]]<-gsub(citation[[2]],pattern = '\"\\\nurl', replacement = '\"\\url', fixed = TRUE) if(length(unique(sources$source_name[which(sources$is_herbarium==1)]))>0){ citation[[3]]<-paste("We acknowledge the herbaria that contributed data to this work: ",paste(unique(sources$source_name[which(sources$is_herbarium==1)]),collapse = ", "),".",collapse = "",sep="") } if(length(unique(sources$source_name[which(sources$is_herbarium==1)]))==0){ citation[[3]]<-data.frame() } citation[[4]]<-sources[which(sources$access_conditions=="contact authors"),] citation[[4]]<-citation[[4]][c('primary_contact_fullname','primary_contact_email','access_conditions','source_fullname','source_citation')] if(!is.null(trait.dataframe)){ ack_trait_sources<-trait.sources[which(trait.sources$access=='public (notify the PIs)'),] ack_trait_sources<-ack_trait_sources[c('project_pi','project_pi_contact','access','source_citation','citation_bibtex')] colnames(ack_trait_sources)<-c('primary_contact_fullname','primary_contact_email','access_conditions','source_fullname','source_citation') citation[[4]]<-rbind(citation[[4]],ack_trait_sources) } names(citation)<-c("general information","references","acknowledgements","data owners to contact") #Write acknowledgements if(nrow(citation[[4]])==0){citation[[4]]<-NULL} if(!is.null(acknowledgement_file)){ if(length(unique(sources$source_name[which(sources$is_herbarium==1)]))>0){ writeLines(text = citation$acknowledgements,con = acknowledgement_file)}else{ message("No herbarium records found, not generating an herbarium acknowledgement file.") } } #Write author contact warning and info if("contact authors"%in%sources$access_conditions & is.null(trait.dataframe)){ affected_datasource_id<-sources$datasource_id[which(sources$access_conditions=='contact authors')] n_affected_records<-length(which(dataframe$datasource_id%in%affected_datasource_id)) pct_affected_records<-round(x =( n_affected_records/(length(which(!dataframe$datasource_id%in%affected_datasource_id))+n_affected_records))*100,digits = 2) n_affected_sources<-nrow(citation$`data owners to contact`) pct_affected_sources<-round(x = (n_affected_sources/nrow(sources))*100,digits = 2) message(paste("NOTE: You have references that require you to contact the data owners before publication. This applies to ", n_affected_records, " records (",pct_affected_records,"%) from ",n_affected_sources," sources (",pct_affected_sources,"%).",sep="")) }#if need to contact authors of a study if("contact authors"%in%sources$access_conditions & !is.null(trait.dataframe)){ affected_datasource_id<-sources$datasource_id[which(sources$access_conditions=='contact authors')] #using author to identify datasource here. Not perfect, but should generally work affected_trait__datasource_id<-trait.sources$project_pi_contact[which(trait.sources$access=='public (notify the PIs)')] n_affected_records<-length(which(dataframe$datasource_id%in%affected_datasource_id))+length(which(trait.dataframe$access%in%'public (notify the PIs)')) pct_affected_records<-round(x =( n_affected_records/(nrow(dataframe)+nrow(trait.dataframe) ))*100,digits = 2) n_affected_sources<-nrow(citation$`data owners to contact`) pct_affected_sources<-round(x = (n_affected_sources/(nrow(sources)+nrow(trait.sources)))*100,digits = 2) message(paste("NOTE: You have references that require you to contact the data owners before publication. This applies to ", n_affected_records, " records (",pct_affected_records,"%) from ",n_affected_sources," sources (",pct_affected_sources,"%).",sep="")) }#if need to contact authors of a study } #if a dataframe is supplied ########## ######### #If no dataframe or trait dataframe supplied if(is.null(dataframe) & is.null(trait.dataframe)){ citation<-list() citation[[1]]<-general<-"Public BIEN data is licensed via a CC-BY-NC-ND license. Please see BIENdata.org for more information. The references in this list should be added to any publication using these data. This is most easily done by specifying a bibtex_file and importing the bibtex formatted references into a reference manager. The acknowledgements in this list should be pasted into the acknowledgements of any resulting publications. Be sure to check for a 'data owners to contact' section in this list, as any authors listed there need to be contacted prior to publishing with their data." citation[[1]]<-gsub(pattern = "\n",replacement = "",citation[[1]]) #Cleaning up the bibtex so that it loads properly into reference managers. Better too many new lines than not enough...for some reason... citation[[2]]<-c(BIEN_cite,R_package_cite) citation[[2]]<-gsub(citation[[2]],pattern = "author", replacement = "\nauthor") citation[[2]]<-gsub(citation[[2]],pattern = "title", replacement = "\ntitle") citation[[2]]<-gsub(citation[[2]],pattern = "year", replacement = "\nyear") citation[[2]]<-gsub(citation[[2]],pattern = "organization", replacement = "\norganization") citation[[2]]<-gsub(citation[[2]],pattern = "address", replacement = "\naddress") citation[[2]]<-gsub(citation[[2]],pattern = "url", replacement = "\nurl") citation[[2]]<-gsub(citation[[2]],pattern = "journal", replacement = "\njournal") citation[[2]]<-gsub(citation[[2]],pattern = "note", replacement = "\nnote") citation[[2]]<-iconv(citation[[2]],to="ASCII//TRANSLIT") names(citation)<-c("general information","references") }#if dataframe is null ####### ##### if((!is.null(trait.dataframe) |!is.null(trait.mean.dataframe)) & is.null(dataframe)){ citation<-list() citation[[1]]<-general<-"Public BIEN data is licensed via a CC-BY-NC-ND license. Please see BIENdata.org for more information. The references in this list should be added to any publication using these data. This is most easily done by specifying a bibtex_file and importing the bibtex formatted references into a reference manager. The acknowledgements in this list should be pasted into the acknowledgements of any resulting publications. Be sure to check for a 'data owners to contact' section in this list, as any authors listed there need to be contacted prior to publishing with their data." citation[[1]]<-gsub(pattern = "\n",replacement = "",citation[[1]]) #Cleaning up the bibtex so that it loads properly into reference managers. Better too many new lines than not enough...for some reason... if(!is.null(trait.dataframe)| !is.null(trait.mean.dataframe)){dl_cites<-c(trait.sources$citation_bibtex)} dl_cites<-gsub(dl_cites,pattern = '"@',replacement = '@') dl_cites<-gsub(dl_cites,pattern = '" @',replacement = '@') dl_cites<-unique(dl_cites[which(!is.na(dl_cites))]) citation[[2]]<-c(BIEN_cite,R_package_cite,dl_cites) citation[[2]]<-gsub(citation[[2]],pattern = "author", replacement = "\nauthor") citation[[2]]<-gsub(citation[[2]],pattern = "title", replacement = "\ntitle") citation[[2]]<-gsub(citation[[2]],pattern = "year", replacement = "\nyear") citation[[2]]<-gsub(citation[[2]],pattern = "organization", replacement = "\norganization") citation[[2]]<-gsub(citation[[2]],pattern = "address", replacement = "\naddress") citation[[2]]<-gsub(citation[[2]],pattern = "url", replacement = "\nurl") citation[[2]]<-gsub(citation[[2]],pattern = "journal", replacement = "\njournal") citation[[2]]<-gsub(citation[[2]],pattern = "note", replacement = "\nnote") citation[[2]]<-iconv(citation[[2]],to="ASCII//TRANSLIT") citation[[2]]<-gsub(citation[[2]],pattern = '\n}\"', replacement = '\n}') citation[[2]]<-gsub(citation[[2]],pattern = '\"\\\nurl', replacement = '\"\\url', fixed = TRUE) citation[[3]]<-data.frame() ack_trait_sources<-trait.sources[which(trait.sources$access=='public (notify the PIs)'),] ack_trait_sources<-ack_trait_sources[c('project_pi','project_pi_contact','access','source_citation','citation_bibtex')] citation[[4]]<-ack_trait_sources colnames(citation[[4]])<-c('primary_contact_fullname','primary_contact_email','access_conditions','source_fullname','source_citation') names(citation)<-c("general information","references","acknowledgements","data owners to contact") #Write acknowledgements #add code here if we decide to do trait acknowledgements if(nrow(citation[[4]])==0){citation[[4]]<-NULL} #Write author contact warning and info if('public (notify the PIs)'%in%trait.sources$access){ affected_trait__datasource_id<-trait.sources$project_pi_contact[which(trait.sources$access=='public (notify the PIs)')] n_affected_records<-length(which(trait.dataframe$access%in%'public (notify the PIs)')) pct_affected_records<-round(x =( n_affected_records/(nrow(trait.dataframe) ))*100,digits = 2) n_affected_sources<-nrow(citation$`data owners to contact`) pct_affected_sources<-round(x = (n_affected_sources/(nrow(trait.sources)))*100,digits = 2) message(paste("NOTE: You have references that require you to contact the data owners before publication. This applies to ", n_affected_records, " records (",pct_affected_records,"%) from ",n_affected_sources," sources (",pct_affected_sources,"%).",sep="")) }#if need to contact authors of a study } #if only a trait dataframe is supplied ####### ###### #Write bibtex output if(!is.null(bibtex_file)){ writeLines(text = citation[[2]],con=bibtex_file) } #Return the citation list return(citation) } ##################### #'List political divisions and associated geonames codes. #' #'BIEN_metadata_list_political_names downloads country, state, and county names and associated codes used by BIEN. #' @param ... Additional arguments passed to internal functions. #' @return A dataframe containing political division names and their associated codes. #' @note Political names and codes follow http://www.geonames.org/ #' @examples \dontrun{ #' BIEN_metadata_list_political_names()} #' @family metadata functions #' @export BIEN_metadata_list_political_names <- function(...){ query<-'SELECT country,country_iso, state_province, state_province_ascii,state_province_code AS "state_code", county_parish,county_parish_ascii,county_parish_code AS "county_code" FROM county_parish ;' .BIEN_sql(query, ...) } ################################ ############################ #Stem functions #'Extract stem data for specified species from BIEN #' #'BIEN_stem_species downloads occurrence records for specific species from the BIEN database. #' @param species A single species, or a vector of species. Genus and species should be separated by a space. Genus should be capitalized. #' @template stem #' @return Dataframe containing stem data for the specified species. #' @note Setting either "cultivated" or "native.status" to TRUE will significantly slow the speed of a query. #' @examples \dontrun{ #' BIEN_stem_species("Abies amabilis") #' species_vector<-c("Abies amabilis", "Acer nigrum") #' BIEN_stem_species(species_vector) #' BIEN_stem_species(species_vector,all.taxonomy = TRUE)} #' @family stem functions #' @export BIEN_stem_species <- function(species, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(all.metadata) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(species) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) #set conditions for query cultivated_ <- .cultivated_check_stem(cultivated) newworld_ <- .newworld_check_stem(new.world) taxonomy_ <- .taxonomy_check_stem(all.taxonomy) native_ <- .native_check_stem(native.status) natives_ <- .natives_check_stem(natives.only) political_ <- .political_check_stem(political.boundaries) collection_ <- .collection_check_stem(collection.info) vfoi_ <- .vfoi_check_stem(native.status,cultivated,natives.only,collection.info) md_ <- .md_check_stem(all.metadata) # set the query query <- paste("SELECT analytical_stem.scrubbed_species_binomial",taxonomy_$select,native_$select,political_$select," ,analytical_stem.latitude, analytical_stem.longitude,analytical_stem.date_collected, analytical_stem.relative_x_m, analytical_stem.relative_y_m, analytical_stem.taxonobservation_id,analytical_stem.stem_code, analytical_stem.stem_dbh_cm, analytical_stem.stem_height_m, plot_metadata.dataset,plot_metadata.datasource,plot_metadata.dataowner,analytical_stem.custodial_institution_codes, analytical_stem.collection_code,analytical_stem.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM analytical_stem WHERE scrubbed_species_binomial in (", paste(shQuote(species, type = "sh"),collapse = ', '), ")) AS analytical_stem JOIN plot_metadata ON (analytical_stem.plot_metadata_id= plot_metadata.plot_metadata_id)", vfoi_$join ," WHERE analytical_stem.scrubbed_species_binomial in (", paste(shQuote(species, type = "sh"),collapse = ', '), ")", cultivated_$query,newworld_$query,natives_$query, "AND analytical_stem.higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (analytical_stem.is_geovalid = 1) AND (analytical_stem.georef_protocol is NULL OR analytical_stem.georef_protocol<>'county centroid') AND (analytical_stem.is_centroid IS NULL OR analytical_stem.is_centroid=0) ORDER BY analytical_stem.scrubbed_species_binomial ;") return(.BIEN_sql(query, ...)) } ####################### #'Extract stem data for specified families from BIEN #' #'BIEN_stem_family downloads occurrence records for specific families from the BIEN database. #' @param family A single family, or a vector of families. Families should be capitalized. #' @template stem #' @return Dataframe containing stem data for the specified families. #' @note Setting either "cultivated" or "native.status" to TRUE will significantly slow the speed of a query. #' @examples \dontrun{ #' BIEN_stem_family(family = "Marantaceae") #' family_vector<-c("Marantaceae", "Buxaceae") #' BIEN_stem_family(family = family_vector) #' BIEN_stem_family(family = family_vector, all.taxonomy = TRUE, native.status = TRUE)} #' @family stem functions #' @export BIEN_stem_family <- function(family, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(all.metadata) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(family) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) #set conditions for query cultivated_<-.cultivated_check_stem(cultivated) newworld_<-.newworld_check_stem(new.world) taxonomy_<-.taxonomy_check_stem(all.taxonomy) native_<-.native_check_stem(native.status) natives_<-.natives_check_stem(natives.only) political_<-.political_check_stem(political.boundaries) collection_<-.collection_check_stem(collection.info) vfoi_<-.vfoi_check_stem(native.status,cultivated,natives.only,collection.info) md_<-.md_check_stem(all.metadata) # set the query query <- paste("SELECT analytical_stem.scrubbed_family, analytical_stem.scrubbed_genus,analytical_stem.scrubbed_species_binomial",taxonomy_$select,native_$select, political_$select,", analytical_stem.latitude, analytical_stem.longitude,analytical_stem.date_collected,analytical_stem.relative_x_m, analytical_stem.relative_y_m, analytical_stem.taxonobservation_id, analytical_stem.stem_code, analytical_stem.stem_dbh_cm, analytical_stem.stem_height_m, plot_metadata.dataset,plot_metadata.datasource,plot_metadata.dataowner,analytical_stem.custodial_institution_codes, analytical_stem.collection_code,analytical_stem.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM analytical_stem WHERE scrubbed_family in (", paste(shQuote(family, type = "sh"),collapse = ', '), ")) AS analytical_stem JOIN plot_metadata ON (analytical_stem.plot_metadata_id= plot_metadata.plot_metadata_id)", vfoi_$join ," WHERE analytical_stem.scrubbed_family in (", paste(shQuote(family, type = "sh"),collapse = ', '), ")", cultivated_$query,newworld_$query,natives_$query, "AND analytical_stem.higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (analytical_stem.is_geovalid = 1) AND (analytical_stem.georef_protocol is NULL OR analytical_stem.georef_protocol<>'county centroid') AND (analytical_stem.is_centroid IS NULL OR analytical_stem.is_centroid=0) ORDER BY analytical_stem.scrubbed_genus, analytical_stem.scrubbed_species_binomial ;") return(.BIEN_sql(query, ...)) } ####################################### #'Extract stem data for specified genera from BIEN #' #'BIEN_stem_genus downloads occurrence records for specific genera from the BIEN database. #' @param genus A single genus, or a vector of genera. Genera should be capitalized. #' @template stem #' @return Dataframe containing stem data for the specified genera. #' @note Setting either "cultivated" or "native.status" to TRUE will significantly slow the speed of a query. #' @examples \dontrun{ #' BIEN_stem_genus(genus = "Tovomita") #' genus_vector<-c("Tovomita", "Myrcia") #' BIEN_stem_genus(genus = genus_vector) #' BIEN_stem_genus(genus = genus_vector, all.taxonomy = TRUE)} #' @family stem functions #' @export BIEN_stem_genus <- function(genus, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(all.metadata) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(genus) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) #set conditions for query cultivated_ <- .cultivated_check_stem(cultivated) newworld_ <- .newworld_check_stem(new.world) taxonomy_ <- .taxonomy_check_stem(all.taxonomy) native_ <- .native_check_stem(native.status) natives_ <- .natives_check_stem(natives.only) political_ <- .political_check_stem(political.boundaries) collection_ <- .collection_check_stem(collection.info) vfoi_ <- .vfoi_check_stem(native.status,cultivated,natives.only,collection.info) md_ <- .md_check_stem(all.metadata) # set the query query <- paste("SELECT analytical_stem.scrubbed_genus,analytical_stem.scrubbed_species_binomial",taxonomy_$select,native_$select,political_$select," , analytical_stem.latitude, analytical_stem.longitude,analytical_stem.date_collected, analytical_stem.relative_x_m, analytical_stem.relative_y_m, analytical_stem.taxonobservation_id, analytical_stem.stem_code, analytical_stem.stem_dbh_cm, analytical_stem.stem_height_m, plot_metadata.dataset, plot_metadata.datasource,plot_metadata.dataowner, analytical_stem.custodial_institution_codes, analytical_stem.collection_code, analytical_stem.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM analytical_stem WHERE scrubbed_genus in (", paste(shQuote(genus, type = "sh"),collapse = ', '), ")) AS analytical_stem JOIN plot_metadata ON (analytical_stem.plot_metadata_id= plot_metadata.plot_metadata_id)", vfoi_$join ," WHERE analytical_stem.scrubbed_genus in (", paste(shQuote(genus, type = "sh"),collapse = ', '), ")", cultivated_$query,newworld_$query,natives_$query, "AND analytical_stem.higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (analytical_stem.is_geovalid = 1) AND (analytical_stem.georef_protocol is NULL OR analytical_stem.georef_protocol<>'county centroid') AND (analytical_stem.is_centroid IS NULL OR analytical_stem.is_centroid=0) ORDER BY analytical_stem.scrubbed_genus, analytical_stem.scrubbed_species_binomial ;") return(.BIEN_sql(query, ...)) } ########### #'Extract stem data for a given datasource from BIEN #' #'BIEN_stem_datasource downloads occurrence records for specific datasources from the BIEN database. #' @param datasource A single datasource, or a vector of datasources. #' @template stem #' @return Dataframe containing stem data for the specified datasource. #' @note Setting either "cultivated" or "native.status" to TRUE will significantly slow the speed of a query. #' @note #' @note For a list of available datasources, use \code{\link{BIEN_plot_list_datasource}}. #' @examples \dontrun{ #' BIEN_stem_datasource(datasource = "SALVIAS")} #' @family stem functions #' @export BIEN_stem_datasource <- function(datasource, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(all.metadata) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(datasource) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) #set conditions for query cultivated_<-.cultivated_check_stem(cultivated) newworld_<-.newworld_check_stem(new.world) taxonomy_<-.taxonomy_check_stem(all.taxonomy) native_<-.native_check_stem(native.status) natives_<-.natives_check_stem(natives.only) political_<-.political_check_stem(political.boundaries) collection_<-.collection_check_stem(collection.info) vfoi_<-.vfoi_check_stem(native.status,cultivated,natives.only,collection.info) md_<-.md_check_stem(all.metadata) # set the query query <- paste("SELECT analytical_stem.plot_name,analytical_stem.subplot, analytical_stem.elevation_m, analytical_stem.plot_area_ha,analytical_stem.sampling_protocol, analytical_stem.recorded_by,analytical_stem.scrubbed_species_binomial",taxonomy_$select,native_$select,political_$select," ,analytical_stem.latitude, analytical_stem.longitude,analytical_stem.date_collected,analytical_stem.relative_x_m, analytical_stem.relative_y_m, analytical_stem.taxonobservation_id, analytical_stem.stem_code, analytical_stem.stem_dbh_cm, analytical_stem.stem_height_m, plot_metadata.dataset,plot_metadata.datasource,plot_metadata.dataowner, analytical_stem.custodial_institution_codes, analytical_stem.collection_code,analytical_stem.datasource_id",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM analytical_stem WHERE datasource in (", paste(shQuote(datasource, type = "sh"),collapse = ', '), ")) AS analytical_stem JOIN plot_metadata ON (analytical_stem.plot_metadata_id= plot_metadata.plot_metadata_id)", vfoi_$join ," WHERE analytical_stem.datasource in (", paste(shQuote(datasource, type = "sh"),collapse = ', '), ")", cultivated_$query,newworld_$query,native_$query, "AND analytical_stem.higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (analytical_stem.is_geovalid = 1) AND (analytical_stem.georef_protocol is NULL OR analytical_stem.georef_protocol<>'county centroid') AND (analytical_stem.is_centroid IS NULL OR analytical_stem.is_centroid=0) ORDER BY analytical_stem.scrubbed_species_binomial ;") return(.BIEN_sql(query, ...)) } ########################## #'Download stem data using a specified sampling protocol. #' #'BIEN_stem_sampling_protocol downloads plot-based stem data using a specified sampling protocol. #' @param sampling_protocol A sampling protocol or vector of sampling protocols. See \code{\link{BIEN_plot_list_sampling_protocols}} for options. #' @template stem #' @return A dataframe containing all data from the specified sampling protocol. #' @examples \dontrun{ #' BIEN_stem_sampling_protocol("Point-intercept")} #' @family stem functions #' @export BIEN_stem_sampling_protocol <- function(sampling_protocol, cultivated = FALSE, new.world = NULL, all.taxonomy = FALSE, native.status = FALSE, natives.only = TRUE, political.boundaries = FALSE, collection.info = FALSE, all.metadata = FALSE, ...){ .is_log(all.metadata) .is_log(cultivated) .is_log_or_null(new.world) .is_log(all.taxonomy) .is_char(sampling_protocol) .is_log(native.status) .is_log(natives.only) .is_log(political.boundaries) .is_log(collection.info) #set conditions for query cultivated_<-.cultivated_check_stem(cultivated) newworld_<-.newworld_check_stem(new.world) taxonomy_<-.taxonomy_check_stem(all.taxonomy) native_<-.native_check_stem(native.status) natives_<-.natives_check_stem(natives.only) political_<-.political_check_stem(political.boundaries) collection_<-.collection_check_stem(collection.info) vfoi_<-.vfoi_check_stem(native.status,cultivated,natives.only,collection.info) md_<-.md_check_stem(all.metadata) # set the query query <- paste("SELECT analytical_stem.scrubbed_species_binomial",taxonomy_$select,native_$select,political_$select," ,analytical_stem.latitude, analytical_stem.longitude,analytical_stem.date_collected, analytical_stem.relative_x_m, analytical_stem.relative_y_m, analytical_stem.taxonobservation_id,analytical_stem.stem_code, analytical_stem.stem_dbh_cm, analytical_stem.stem_height_m, plot_metadata.dataset,plot_metadata.datasource,plot_metadata.dataowner,analytical_stem.custodial_institution_codes, analytical_stem.collection_code,analytical_stem.datasource_id,view_full_occurrence_individual.plot_name,view_full_occurrence_individual.subplot, view_full_occurrence_individual.elevation_m, view_full_occurrence_individual.plot_area_ha, view_full_occurrence_individual.sampling_protocol,view_full_occurrence_individual.recorded_by, view_full_occurrence_individual.individual_count",collection_$select,cultivated_$select,newworld_$select,md_$select," FROM (SELECT * FROM analytical_stem WHERE sampling_protocol in (", paste(shQuote(sampling_protocol, type = "sh"),collapse = ', '), ")) AS analytical_stem JOIN plot_metadata ON (analytical_stem.plot_metadata_id= plot_metadata.plot_metadata_id)", vfoi_$join ," WHERE analytical_stem.sampling_protocol in (", paste(shQuote(sampling_protocol, type = "sh"),collapse = ', '), ")", cultivated_$query,newworld_$query,natives_$query, "AND analytical_stem.higher_plant_group NOT IN ('Algae','Bacteria','Fungi') AND (analytical_stem.is_geovalid = 1) AND (analytical_stem.georef_protocol is NULL OR analytical_stem.georef_protocol<>'county centroid') AND (analytical_stem.is_centroid IS NULL OR analytical_stem.is_centroid=0) ORDER BY analytical_stem.scrubbed_species_binomial ;") return(.BIEN_sql(query, ...)) } #######################################
/scratch/gouwar.j/cran-all/cranData/BIEN/R/BIEN.R
#' BIEN: Tools for accessing the BIEN database. #' #' @description The Botanical Information and Ecology Network(BIEN) R package provides access to the BIEN database as well as useful tools for working with the BIEN data. #' #' @section Getting started: #' Type vignette("BIEN") to view the vignette, which contains useful information on the BIEN package. #' #' @references Maitner BS, Boyle B, Casler N, et al. The BIEN R package: A tool to access the Botanical Information and Ecology Network (BIEN) Database. Methods Ecol Evol. 2018;9:373-379. https://doi.org/10.1111/2041-210X.12861 #' #' @docType package #' @name BIEN #' @aliases BIEN-package NULL ################### .onAttach <- function(libname, pkgname) { packageStartupMessage('Type vignette("BIEN") or vignette("BIEN_tutorial") to get started') suppressWarnings(x <- try(readLines("https://raw.githubusercontent.com/bmaitner/RBIEN/master/NOTES", warn = FALSE), silent = TRUE)) if(inherits(class(x),"character")){ if(length(x) != 0){ packageStartupMessage(x, appendLF = TRUE) } } } #######################
/scratch/gouwar.j/cran-all/cranData/BIEN/R/BIEN_info.R
#'Run an SQL query on the BIEN database. #' #'.BIEN_sql is an internal function used to submit SQL queries. #' @param query A PostgreSQL query. #' @param view_full_occurrence_individual Alternative value to be substituted for "view_full_occurrence_individual" in queries when not NULL. #' @param agg_traits Alternative value to be substituted for "agg_traits" in queries when not NULL. #' @param species_by_political_division Alternative value to be substituted for "species_by_political_division" in queries when not NULL. #' @param bien_species_all Alternative value to be substituted for "bien_species_all" in queries when not NULL. #' @param ranges Alternative value to be substituted for "ranges" in queries when not NULL. #' @param bien_taxonomy Alternative value to be substituted for "bien_taxonomy" in queries when not NULL. #' @param phylogeny Alternative value to be substituted for "phylogeny" in queries when not NULL. #' @param bien_metadata Alternative value to be substituted for "bien_metadata" in queries when not NULL. #' @param plot_metadata Alternative value to be substituted for "plot_metadata" in queries when not NULL. #' @param analytical_stem Alternative value to be substituted for "analytical_stem" in queries when not NULL. #' @param datasource Alternative value to be substituted for "datasource" in queries when not NULL. #' @param centroid Alternative value to be substituted for "centroid" in queries when not NULL. #' @param limit A limit on the number of records to be returned. Should be a single number or NULL (the default). #' @param return.query Should the query used be returned rather than executed? Default is FALSE #' @param schema An alternative schema to be accessed. Used for testing purposes. #' @param print.query Should the query used be printed? Default is FALSE #' @param fetch.query If TRUE (the default) query is executed using dbFetch. If FALSE, dbGetQuery is used. #' @note Using fetch.query = TRUE provides better error handling, but fetch.query = FALSE results in a more useful (but uncatchable) error. #' @import RPostgreSQL #' @importFrom DBI dbDriver dbFetch #' @return A dataframe returned by the query. #' @keywords internal #' @examples \dontrun{ #' .BIEN_sql("SELECT DISTINCT country, scrubbed_species_binomial FROM view_full_occurrence_individual #' WHERE country in ( 'United States' );")} .BIEN_sql <- function(query, view_full_occurrence_individual = NULL, agg_traits = NULL, species_by_political_division = NULL, bien_species_all = NULL, ranges = NULL, bien_taxonomy = NULL, phylogeny = NULL, bien_metadata = NULL, plot_metadata = NULL, analytical_stem = NULL, datasource = NULL, centroid = NULL, limit = NULL, return.query = FALSE, schema = NULL, print.query = FALSE, fetch.query = TRUE){ .is_char(query) if(print.query){ query <- gsub(pattern = "\n",replacement = "",query) query <- gsub("(?<=[\\s])\\s*|^\\s+|\\s+$", "", query, perl = TRUE) print(query) } if(!is.null(schema)){ view_full_occurrence_individual <- paste(schema,"view_full_occurrence_individual",sep = ".") agg_traits <- paste(schema,"agg_traits",sep = ".") species_by_political_division <- paste(schema,"species_by_political_division",sep = ".") bien_species_all <- paste(schema,"bien_species_all",sep = ".") ranges <- paste(schema,"ranges",sep = ".") bien_taxonomy <- paste(schema,"bien_taxonomy",sep = ".") phylogeny <- paste(schema,"phylogeny",sep = ".") bien_metadata <- paste(schema,"bien_metadata",sep = ".") plot_metadata <- paste(schema,"plot_metadata",sep = ".") analytical_stem <- paste(schema,"analytical_stem",sep = ".") datasource <- paste(schema,"datasource",sep = ".") centroid <- paste(schema,"centroid",sep = ".") } if(!is.null(view_full_occurrence_individual)){ #query<-gsub(pattern = "\\<view_full_occurrence_individual\\>",replacement = view_full_occurrence_individual,x = query)} query <- gsub(pattern = "(?<!as |AS )(?<!\\S)view_full_occurrence_individual(?!\\S)", replacement = view_full_occurrence_individual, x = query, perl = TRUE) } if(!is.null(plot_metadata)){ #query<-gsub(pattern = "\\<view_full_occurrence_individual\\>",replacement = view_full_occurrence_individual,x = query)} query <- gsub(pattern = "(?<!as |AS )(?<!\\S)plot_metadata(?!\\S)", replacement = plot_metadata, x = query, perl = TRUE) } if(!is.null(analytical_stem)){ #query<-gsub(pattern = "\\<view_full_occurrence_individual\\>",replacement = view_full_occurrence_individual,x = query)} query <- gsub(pattern = "(?<!as |AS )(?<!\\S)analytical_stem(?!\\S)", replacement = analytical_stem, x = query, perl = TRUE) } if(!is.null(agg_traits)){ query <- gsub(pattern = "agg_traits", replacement = agg_traits, x = query) } if(!is.null(species_by_political_division)){ query <- gsub(pattern = "species_by_political_division", replacement = species_by_political_division, x = query) } if(!is.null(bien_species_all)){ query <- gsub(pattern = "bien_species_all", replacement = bien_species_all, x = query) } if(!is.null(ranges)){ query <- gsub(pattern = "ranges", replacement = ranges, x = query) } if(!is.null(bien_taxonomy)){ query <- gsub(pattern = "bien_taxonomy", replacement = bien_taxonomy, x = query) } if(!is.null(phylogeny)){ query <- gsub(pattern = "\\<phylogeny\\>", replacement = phylogeny, x = query) } if(!is.null(bien_metadata)){ query <- gsub(pattern = "\\<bien_metadata\\>", replacement = bien_metadata, x = query) } if(!is.null(datasource)){ query <- gsub(pattern = "(?<=\\s)datasource(?=\\s)", replacement = datasource, x = query, perl = TRUE) } if(!is.null(centroid)){ query <- gsub(pattern = "(?<=\\s)centroid(?=\\s)", replacement = datasource, x = query, perl = TRUE) } if(!is.null(limit)){ query <- gsub(pattern = " ;", replacement = paste(" LIMIT ",limit,";"), x = query) } host <- 'vegbiendev.nceas.ucsb.edu' dbname <- 'public_vegbien' user <- 'public_bien' password <- 'bien_public' # Name the database type that will be used drv <- dbDriver('PostgreSQL') # establish connection with database con <- dbConnect(drv, host = host, dbname = dbname, user = user, password = password) if(return.query){ query <- gsub(pattern = "\n", replacement = "", query) query <- gsub("(?<=[\\s])\\s*|^\\s+|\\s+$", "", query, perl = TRUE) dbDisconnect(con) return(query) } # Execute the query if(fetch.query){ suppressWarnings( df <- tryCatch(expr = dbFetch(res = dbSendQuery(conn = con, statement = query)), error = function(e){e} )) }else{ df <- dbGetQuery(con, statement = query); #dbGetQuery doesn't allow error catching, so we've stopped using it by default } # Disconnect from the database dbDisconnect(con) # Optionally print the query if(print.query){ query <- gsub(pattern = "\n",replacement = "", query) query <- gsub("(?<=[\\s])\\s*|^\\s+|\\s+$", "", query, perl = TRUE) print(query) } #Check whether query worked if("error" %in% class(df)){ message("\nThere was a problem with the query. This is most often due to internet connection issues, but may also be due other factors such as an outdated version of the package.") return(invisible(NULL)) }else{ return(df) } }
/scratch/gouwar.j/cran-all/cranData/BIEN/R/BIEN_sql.R
#Internal functions ###################### #'Set query details #' #'Helper function to set query components. #' @keywords internal .cultivated_check<-function(cultivated){ if(!cultivated){ query <- "AND (is_cultivated_observation = 0 OR is_cultivated_observation IS NULL) AND is_location_cultivated IS NULL" select <- "" }else{ query <- "" select <- ",is_cultivated_observation,is_cultivated_in_region, is_location_cultivated" } output <- as.data.frame(cbind(query,select), stringsAsFactors = FALSE) colnames(output) <- c("query","select") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .newworld_check <- function(new.world){ if(is.null(new.world)){ query <- "" select <- "" output <- as.data.frame(cbind(query,select),stringsAsFactors = FALSE) colnames(output) <- c("query", "select") return(output) } if(!new.world){ query <- "AND is_new_world = 0 " select <- ", is_new_world"} if(new.world){ query <- "AND is_new_world = 1 " select <- ", is_new_world"} output <- as.data.frame(cbind(query,select), stringsAsFactors = FALSE) colnames(output) <- c("query", "select") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .geovalid_check<-function(only.geovalid){ if(!only.geovalid){ query <- "" select <- ",is_geovalid" }else{ query <- "AND is_geovalid = 1" select <- "" } output <- as.data.frame(cbind(query,select), stringsAsFactors = FALSE) colnames(output) <- c("query", "select") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .taxonomy_check <- function(all.taxonomy){ if(!all.taxonomy){ select <- "" }else{ select <- ", verbatim_family,verbatim_scientific_name,family_matched,name_matched,name_matched_author,higher_plant_group,scrubbed_taxonomic_status,scrubbed_family,scrubbed_author" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .native_check <- function(native.status){ if(!native.status){ select <- "" }else{ select <- ",native_status, native_status_reason,native_status_sources,is_introduced,native_status_country,native_status_state_province,native_status_county_parish" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .observation_check <- function(observation.type){ if(!observation.type){ query <- "AND observation_type IN ('plot','specimen','literature','checklist')" select <- "" }else{ query <- "" select <- ",observation_type" } output <- as.data.frame(cbind(query,select), stringsAsFactors = FALSE) colnames(output) <- c("query","select") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .political_check <- function(political.boundaries){ if(!political.boundaries){ select <- "" }else{ select <- ", country,state_province,county,locality,elevation_m" } output <- as.data.frame(cbind(select),stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .natives_check <- function(natives.only){ if(!natives.only){ query <- "" }else{ query <- "AND (is_introduced=0 OR is_introduced IS NULL) " } output <- as.data.frame(cbind(query), stringsAsFactors = FALSE) colnames(output) <- c("query") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .collection_check <- function(collection.info){ if(!collection.info){ select <- "" }else{ select <- ",catalog_number, recorded_by, record_number, date_collected, identified_by, date_identified, identification_remarks " } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ########################## #Traits #'Set query details #' #'Helper function to set query components. #' @keywords internal .political_check_traits <- function(political.boundaries){ if(political.boundaries){ select <- "region, country, state_province, locality_description" }else{ select <- "" } output <- as.data.frame(cbind(select),stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .taxonomy_check_traits <- function(all.taxonomy){ if(all.taxonomy){ select <- "verbatim_family, verbatim_scientific_name, name_submitted, family_matched, name_matched, name_matched_author, higher_plant_group, tnrs_warning, matched_taxonomic_status, scrubbed_taxonomic_status, scrubbed_family, scrubbed_genus, scrubbed_specific_epithet, scrubbed_taxon_name_no_author, scrubbed_taxon_canonical, scrubbed_author, scrubbed_taxon_name_with_author, scrubbed_species_binomial_with_morphospecies" }else{ select <- "" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .source_check_traits <- function(source.citation){ if(source.citation){ select <- ",source_citation" }else{ select <- "" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ##################################### #Plots ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .cultivated_check_plot <- function(cultivated){ if(!cultivated){ query <- "AND (view_full_occurrence_individual.is_cultivated_observation = 0 OR view_full_occurrence_individual.is_cultivated_observation IS NULL) AND view_full_occurrence_individual.is_location_cultivated IS NULL" select <- "" }else{ query <- "" select <- ",view_full_occurrence_individual.is_cultivated_observation,view_full_occurrence_individual.is_cultivated_in_region,view_full_occurrence_individual.is_location_cultivated" } output <- as.data.frame(cbind(query, select), stringsAsFactors = FALSE) colnames(output) <- c("query","select") return(output) } ## ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .newworld_check_plot <- function(new.world){ if(is.null(new.world)){ query <- "" select <- "" output <- as.data.frame(cbind(query,select),stringsAsFactors = FALSE) colnames(output) <- c("query","select") return(output) } if(!new.world){ query <- "AND view_full_occurrence_individual.is_new_world = 0 " select <- ",view_full_occurrence_individual.is_new_world"} if(new.world){ query <- "AND view_full_occurrence_individual.is_new_world = 1 " select <- ",view_full_occurrence_individual.is_new_world"} output <- as.data.frame(cbind(query,select),stringsAsFactors = FALSE) colnames(output) <- c("query","select") return(output) } ## ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .taxonomy_check_plot <- function(all.taxonomy){ if(!all.taxonomy){ select <- "" }else{ select <- ",view_full_occurrence_individual.verbatim_family,view_full_occurrence_individual.verbatim_scientific_name,view_full_occurrence_individual.family_matched,view_full_occurrence_individual.name_matched,view_full_occurrence_individual.name_matched_author,view_full_occurrence_individual.higher_plant_group,view_full_occurrence_individual.scrubbed_taxonomic_status,view_full_occurrence_individual.scrubbed_family,view_full_occurrence_individual.scrubbed_author" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ## ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .native_check_plot <- function(native.status){ if(!native.status){ select <- "" }else{ select <- ",view_full_occurrence_individual.native_status,view_full_occurrence_individual.native_status_reason,view_full_occurrence_individual.native_status_sources,view_full_occurrence_individual.is_introduced,view_full_occurrence_individual.native_status_country,view_full_occurrence_individual.native_status_state_province,view_full_occurrence_individual.native_status_county_parish" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ## ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .natives_check_plot <- function(natives.only){ if(!natives.only){ query <- "" }else{ query <- "AND (view_full_occurrence_individual.is_introduced=0 OR view_full_occurrence_individual.is_introduced IS NULL) " } output <- as.data.frame(cbind(query), stringsAsFactors = FALSE) colnames(output) <- c("query") return(output) } ## ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .political_check_plot <- function(political.boundaries){ if(!political.boundaries){ select <- "" }else{ select <- ",view_full_occurrence_individual.country,view_full_occurrence_individual.state_province,view_full_occurrence_individual.county,view_full_occurrence_individual.locality" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ## ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .collection_check_plot <- function(collection.info){ if(!collection.info){ select <- "" }else{ select <- ",view_full_occurrence_individual.catalog_number, view_full_occurrence_individual.recorded_by, view_full_occurrence_individual.record_number, view_full_occurrence_individual.date_collected, view_full_occurrence_individual.identified_by, view_full_occurrence_individual.date_identified, view_full_occurrence_individual.identification_remarks " } output <- as.data.frame(cbind(select),stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ## ## #'Set query details #' #'Helper function to set query components. #' @keywords internal .md_check_plot <- function(all.metadata){ if(!all.metadata){ select <- "" }else{ select <- ",plot_metadata.coord_uncertainty_m,plot_metadata.methodology_reference,plot_metadata.methodology_description,growth_forms_included_all, growth_forms_included_trees, growth_forms_included_shrubs, growth_forms_included_lianas, growth_forms_included_herbs, growth_forms_included_epiphytes, growth_forms_included_notes, taxa_included_all, taxa_included_seed_plants, taxa_included_ferns_lycophytes, taxa_included_bryophytes,taxa_included_exclusions" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } # #'Set query details #' #'Helper function to set query components. #' @param species Single species or vector of species. #' @keywords internal .species_check <- function(species){ if(is.null(species)){ query <- "" }else{ query <- species_select<-paste(" AND", "scrubbed_species_binomial in (", paste(shQuote(species, type = "sh"),collapse = ', '), ") ") } output <- as.data.frame(cbind(query), stringsAsFactors = FALSE) colnames(output) <- c("query") return(output) } # #'Set query details #' #'Helper function to set query components. #' @param genus Single genus or vector of genera. #' @keywords internal .genus_check <- function(genus){ if(is.null(genus)){ query <- "" }else{ query <- species_select<-paste(" AND", "scrubbed_genus in (", paste(shQuote(genus, type = "sh"),collapse = ', '), ") ") } output <- as.data.frame(cbind(query), stringsAsFactors = FALSE) colnames(output) <- c("query") return(output) } ######################################## #Stem #'Set query details #' #'Helper function to set query components. #' @keywords internal .cultivated_check_stem <- function(cultivated){ if(!cultivated){ query <- "AND (analytical_stem.is_cultivated_observation = 0 OR analytical_stem.is_cultivated_observation IS NULL) AND analytical_stem.is_location_cultivated IS NULL" select <- "" }else{ query <- "" select <- ",analytical_stem.is_cultivated_observation,view_full_occurrence_individual.is_cultivated_in_region,analytical_stem.is_location_cultivated" } output <- as.data.frame(cbind(query,select), stringsAsFactors = FALSE) colnames(output) <- c("query", "select") return(output) } # #'Set query details #' #'Helper function to set query components. #' @keywords internal .newworld_check_stem <- function(new.world){ if(is.null(new.world)){ query <- "" select <- "" output <- as.data.frame(cbind(query,select),stringsAsFactors = FALSE) colnames(output) <- c("query","select") return(output) } if(!new.world){ query <- "AND analytical_stem.is_new_world = 0 " select <- ",analytical_stem.is_new_world"} if(new.world){ query <- "AND analytical_stem.is_new_world = 1 " select <- ",analytical_stem.is_new_world"} output <- as.data.frame(cbind(query,select), stringsAsFactors = FALSE) colnames(output) <- c("query","select") return(output) } # #'Set query details #' #'Helper function to set query components. #' @keywords internal .taxonomy_check_stem <- function(all.taxonomy){ if(!all.taxonomy){ select<-"" }else{ select<-",analytical_stem.verbatim_family,analytical_stem.verbatim_scientific_name,analytical_stem.family_matched,analytical_stem.name_matched,analytical_stem.name_matched_author,analytical_stem.higher_plant_group,analytical_stem.scrubbed_taxonomic_status,analytical_stem.scrubbed_family,analytical_stem.scrubbed_author" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } # #'Set query details #' #'Helper function to set query components. #' @keywords internal .native_check_stem <- function(native.status){ if(!native.status){ select <- "" }else{ select <- ",analytical_stem.native_status,analytical_stem.native_status_reason,analytical_stem.native_status_sources,analytical_stem.is_introduced,analytical_stem.native_status_country,analytical_stem.native_status_state_province,analytical_stem.native_status_county_parish" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } # #'Set query details #' #'Helper function to set query components. #' @keywords internal .natives_check_stem <- function(natives.only){ if(!natives.only){ query <- "" }else{ query <- "AND (view_full_occurrence_individual.is_introduced=0 OR view_full_occurrence_individual.is_introduced IS NULL)" } output <- as.data.frame(cbind(query),stringsAsFactors = FALSE) colnames(output) <- c("query") return(output) } # #'Set query details #' #'Helper function to set query components. #' @keywords internal .political_check_stem <- function(political.boundaries){ if(!political.boundaries){ select <- "" }else{ select <- ",analytical_stem.country,analytical_stem.state_province,analytical_stem.county,analytical_stem.locality" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } # #'Set query details #' #'Helper function to set query components. #' @keywords internal .collection_check_stem <- function(collection.info){ if(!collection.info){ select <- "" }else{ select <- ",view_full_occurrence_individual.catalog_number, view_full_occurrence_individual.recorded_by, view_full_occurrence_individual.record_number, view_full_occurrence_individual.date_collected, view_full_occurrence_individual.identified_by, view_full_occurrence_individual.date_identified, view_full_occurrence_individual.identification_remarks " } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } # #'Set query details #' #'Helper function to set query components. #' @keywords internal .vfoi_check_stem <- function(native.status, cultivated,natives.only, collection.info){ if(native.status | cultivated |natives.only | collection.info){ join <- " JOIN view_full_occurrence_individual ON (analytical_stem.taxonobservation_id = view_full_occurrence_individual.taxonobservation_id)"}else{ join <- "" } output <- as.data.frame(cbind(join),stringsAsFactors = FALSE) colnames(output) <- c("join") return(output) } # #'Set query details #' #'Helper function to set query components. #' @keywords internal .md_check_stem <- function(all.metadata){ if(!all.metadata){ select <- "" }else{ select <- ",plot_metadata.coord_uncertainty_m,plot_metadata.methodology_reference,plot_metadata.methodology_description,growth_forms_included_all, growth_forms_included_trees, growth_forms_included_shrubs, growth_forms_included_lianas, growth_forms_included_herbs, growth_forms_included_epiphytes, growth_forms_included_notes, taxa_included_all, taxa_included_seed_plants, taxa_included_ferns_lycophytes, taxa_included_bryophytes,taxa_included_exclusions" } output <- as.data.frame(cbind(select), stringsAsFactors = FALSE) colnames(output) <- c("select") return(output) } ######################################### #Value checkers ######################################### ################################## #'Check that value is logical #' #'Helper function to check data format. #' @keywords internal #' @examples \dontrun{ #' is_log(TRUE)} .is_log <- function(x) { if (!inherits(x, 'logical')) { stop(sys.call()[-1], " should be logical", call. = FALSE) } } ################################## #'Check that value is logical or null #' #'Helper function to check data format. #' @keywords internal #' @examples \dontrun{ #' is_log_or_null(new.world)} .is_log_or_null <- function(x) { if (!inherits(x, c('logical','NULL'))) { stop(sys.call()[-1], " should be logical or NULL", call. = FALSE) } } ################################### #'Check that value is character #' #'Helper function to check data format. #' @keywords internal #' @examples \dontrun{ #' is_char(species)} .is_char <- function(x) { if (!inherits(x ,c("character","NULL"))) { stop(sys.call()[-1]," should be character", call. = FALSE) } } ################################### #'Check that value is numeric #' #'Helper function to check data format. #' @keywords internal #' @examples \dontrun{ #' is_num(min.lat)} .is_num <- function(x) { if (!inherits(x ,'numeric')) { stop(sys.call()[-1]," should be numeric", call. = FALSE) } } ################################# #################################
/scratch/gouwar.j/cran-all/cranData/BIEN/R/internals.R
--- title: "BIEN R package" author: "Brian Maitner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BIEN R package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The Botanical Information and Ecology Network(BIEN) R package provides access to the BIEN database as well as useful tools for working with the BIEN data. ##Data Usage Agreement Please read the data usage agreement, available at: https://bien.nceas.ucsb.edu/bien/biendata/data-use-policy/ ##Data Usage: Warnings and Caveats Please be aware of quality and appropriateness of BIEN data for the analyses that you may wish to perform or information that you would like to obtain Versioning - data available via BIENdata.org and the the BIEN R package (BIEN) are being updated and improved. This is an active collaborative group who is aiming to continue to improve the quality of botanical information and data. As a result, users should take note of the version number (using the function BIEN_metadata_database_version() ) and be sure to check back periodically for updates. Checking for new results to a query can be done using the function BIEN_metadata_match_data(). Data quality - All data are presented ‘as is’. Default BIEN downloads return all data associated with a given taxonomic name string. There may be additional issues associated with a given name string. Please be aware that data returned may also include data whose geographic coordinates and cultivated status are uncertain or unverifiable. Hybrids - Hybrid species are assigned the specific epithet "x" by the TNRS. To view the full name information for these occurrences, set "all.taxonomy = TRUE". ##Database connection issues Some institution and computer programs (e.g. some antivirus programs) block the SQL connections that this package relies on. While we are exploring ways around this issue, at present the simplest method is to use the package on a computer/network that doesn't block SQL connections. ##Function Names Function names in the BIEN package follow a naming convention which was designed to make locating the desired function easier: * All function names begin with the prefix "BIEN_" * Functions to download occurrence records begin with the prefix "BIEN_occurrence_" * Functions to download range maps begin with the prefix "BIEN_ranges_" * Functions to download trait data begin with the prefix "BIEN_trait_" * Functions to download species lists (or other lists) begin with the prefix "BIEN_list_" * Functions to download plot data begin with the prefix "BIEN_plot_" * Functions to download stem data begin with the prefix "BIEN_stem_" * Functions to download taxonomic information begin with the prefix "BIEN_taxonomy_" * Functions to download phylogenies begin with the prefix "BIEN_phylogeny_" * Functions to access metadata begin with the prefix "BIEN_metadata_" ##Function Directory For full information on each function, see the associated help file. ###BIEN_metadata functions * `BIEN_metadata_database_version` Returns the BIEN database version number and release date * `BIEN_metadata_match_data` Compares the stored results of two (old vs new) queries to check for additions or deletions. * `BIEN_metadata_citation` Generates information needed to cite data downloaded from BIEN. * `BIEN_metadata_list_political_names` Provides a list of political division names used by BIEN. ###BIEN_list functions * `BIEN_list_all` Lists all species present in the BIEN database * `BIEN_list_country` Produces species lists by country * `BIEN_list_county` Produces species lists by county * `BIEN_list_state` Produces species lists by state/province * `BIEN_list_sf` Produces species lists using an sf object ###BIEN_occurrence functions * `BIEN_occurrence_box` Returns all occurrence records within a GIS bounding box * `BIEN_occurrence_country` Returns all occurrence records within a given country * `BIEN_occurrence_family` Returns all occurrence records for a specified family * `BIEN_occurrence_genus` Returns all occurrence records for a specified genus * `BIEN_occurrence_species` Returns all occurrence records for a specified species * `BIEN_occurrence_state` Returns all occurrences records within a given state/province * `BIEN_occurrence_occurrences_per_species` Returns data on the number of occurrence records for species in the BIEN database * `BIEN_occurrence_sf` Returns occurrence records falling within the boundaries of an sf object. ###BIEN_ranges functions * `BIEN_ranges_genus` Downloads range maps for all the species within a given genus/genera * `BIEN_ranges_species` Downloads range maps for given species * `BIEN_ranges_box` Downloads range maps intersecting a given GIS bounding box * `BIEN_ranges_list` Lists available range maps * `BIEN_ranges_sf` Downloads range maps intersecting a user-supplied sf object. ###BIEN_trait functions * `BIEN_trait_family` Returns all available trait data for given families * `BIEN_trait_genus` Returns all available trait data for given genera * `BIEN_trait_list` Lists all available types of trait * `BIEN_trait_mean` Estimates species mean trait values using Genus or Family level data where needed * `BIEN_trait_species` Returns all available trait data for given species * `BIEN_trait_trait` Returns all available trait data for given trait(s) * `BIEN_trait_traitbyfamily` Returns specific trait data for given family * `BIEN_trait_traitbygenus` Returns specific trait data for given genus * `BIEN_trait_traitbyspecies` Returns specific trait data for given species * `BIEN_trait_traits_per_species` Returns data on the number of trait observations for each trait for each species in the BIEN database ###BIEN_plot functions * `BIEN_plot_datasource` Downloads all plot data from a given datasource. * `BIEN_plot_list_datasource` List all datasources in the BIEN database. * `BIEN_plot_country` Downloads all plot data from specified countries. * `BIEN_plot_state` Downloads all plot data from specified states/provinces. * `BIEN_plot_list_sampling_protocols` List all available sampling protocols. * `BIEN_plot_sampling_protocol` Downloads all plot data using a specified sampling protocol. ###BIEN_stem functions * `BIEN_stem_species` Downloads all stem data for given species. * `BIEN_stem_genus` Downloads all stem data for given genera. * `BIEN_stem_family` Downloads all stem data for given families. ###BIEN_taxonomy functions * `BIEN_taxonomy_species` Downloads all taxonomic information for given species. * `BIEN_taxonomy_genus` Downloads all taxonomic information for given genera. * `BIEN_taxonomy_family` Downloads all taxonomic information for given families. ###BIEN_phylogeny functions * `BIEN_phylogeny_complete` Downloads a specified number of the BIEN phylogeny replicates. * `BIEN_phylogeny_conservative` Downloads the conservative BIEN phylogeny, which only includes species with molecular data available.
/scratch/gouwar.j/cran-all/cranData/BIEN/inst/doc/BIEN.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) ## ----load-packages, message = F,warning = FALSE, results = 'hide'------------- library(BIEN) library(ape) #Package for working with phylogenies in R library(maps) #Useful for making quick maps of occurrences library(sf) # A package for spatial data ## ----load-vignette, eval = FALSE---------------------------------------------- # vignette("BIEN") ## ----xs-occurrences----------------------------------------------------------- Xanthium_strumarium <- BIEN_occurrence_species(species = "Xanthium strumarium") ## ----view-xs-occurrences------------------------------------------------------ str(Xanthium_strumarium) head(Xanthium_strumarium) ## ----xs-occurrences-pt-2, eval = FALSE---------------------------------------- # # Xanthium_strumarium_full <- BIEN_occurrence_species(species = "Xanthium strumarium", # cultivated = TRUE, # all.taxonomy = TRUE, # native.status = TRUE, # observation.type = TRUE, # political.boundaries = TRUE) # # ## ----map-xs, eval = FALSE----------------------------------------------------- # # Make a quick map to plot our points on # # map('world', fill = TRUE, col= "grey", bg = "light blue") # # # Plot the points from the full query in red # # points(cbind(Xanthium_strumarium_full$longitude, # Xanthium_strumarium_full$latitude), # col = "red", # pch = 20, # cex = 1) # # # Plot the points from the default query in blue # # points(cbind(Xanthium_strumarium$longitude, # Xanthium_strumarium$latitude), # col = "blue", # pch = 20, # cex = 1) # ## ----occs-bahamas, eval = FALSE----------------------------------------------- # # Bahamas <- BIEN_occurrence_country(country = "Bahamas") # # #Let's see how many species we have # # length(unique(Bahamas$scrubbed_species_binomial)) # #About 400 species with valid occurrence records. # # #Now, let's take a look at where those occurrences are: # # map(regions = "Bahamas" , # fill = TRUE , # col= "grey", # bg = "light blue") # # points(cbind(Bahamas$longitude,Bahamas$latitude), # col = "blue", # pch = 20, # cex = 1) # # #Looks like some islands are considerably better sampled than others. # ## ----xs-range----------------------------------------------------------------- Xanthium_strumarium_range <- BIEN_ranges_load_species(species = "Xanthium strumarium") ## ----xs-range-map------------------------------------------------------------- #First, let's add a base map so that our range has some context: map('world', fill = TRUE , col= "grey", bg = "light blue", xlim = c(-180, -20), ylim = c(-60, 80)) #Now, we can add the range map: plot(Xanthium_strumarium_range[1], col = "green", add = TRUE) ## ----xs-range-and-points------------------------------------------------------ map('world', fill = TRUE , col = "grey", bg = "light blue", xlim = c(-180, -20), ylim = c(-60, 80)) plot(Xanthium_strumarium_range[1], col = "green", add = TRUE) points(cbind(Xanthium_strumarium$longitude,Xanthium_strumarium$latitude), col = "blue", pch = 20, cex = 1) ## ----luq-occs, eval = FALSE--------------------------------------------------- # # LUQUILLO <- BIEN_plot_name(plot.name = "LUQUILLO") # # head(LUQUILLO) # ## ---- eval = FALSE------------------------------------------------------------ # # LUQUILLO_full <- BIEN_plot_name(plot.name = "LUQUILLO", # cultivated = TRUE, # all.taxonomy = TRUE, # native.status = TRUE, # political.boundaries = TRUE, # all.metadata = TRUE) # ## ----salix-traits, eval = FALSE----------------------------------------------- # # Salix_traits <- BIEN_trait_genus(genus = "Salix") # ## ----trait-list, eval = FALSE------------------------------------------------- # # BIEN_trait_list() # ## ----leaf-area, eval = FALSE-------------------------------------------------- # # leaf_area <- BIEN_trait_trait(trait = "leaf area") # ## ----taxonomy----------------------------------------------------------------- Asclepias_taxonomy <- BIEN_taxonomy_genus(genus = "Asclepias") #We see that the genus Asclepias falls within the family Apocynaceae and the order Gentianales. #You'll also notice that a given species may appear more than once (due to multiple circumscriptions, some of which may be illegitimate). #If we'd just like to know all the speciess that aren't illegitimate: Asclepias_species <- unique(Asclepias_taxonomy$scrubbed_species_binomial[Asclepias_taxonomy$scrubbed_taxonomic_status %in% c("accepted", "no opinion")]) ## ----phylogeny, eval = FALSE-------------------------------------------------- # # phylo <- BIEN_phylogeny_conservative() # # #Let's make sure it looks alright # # plot.phylo(x = phylo, show.tip.label = FALSE) # # #If we just want to see which species are included # # phylo_species <- phylo$tip.label # # ## ----stems, eval = FALSE------------------------------------------------------ # # Cupressus_arizonica_stems <- BIEN_stem_species("Cupressus arizonica") # ## ----lists, eval = FALSE------------------------------------------------------ # # Bahamas_species_list <- BIEN_list_country(country = "Bahamas") # # #Notice that we find many more species listed than we found occurrence records for. What happened? There are many records coming from the Bahamas that lack coordinates. These records are used used in the "_list_" functions, but not the occurrence functions. # ## ----lists-multi-country, eval = FALSE---------------------------------------- # # country_vector <- c("Haiti","Dominican Republic") # # Haiti_DR <- BIEN_list_country(country = country_vector) # ## ----lists-pol-divs----------------------------------------------------------- #To see all of the political division names, and associated codes, we can use this function: political_names <- BIEN_metadata_list_political_names() #Let's take a look at what the dataframe contains: head(political_names) #In addition to the standardized country, state (state_province_ascii) and county (county_parish_ascii) names, we have the associated codes that can be used in BIEN functions. #Note that 'state' refers to any primary political division (e.g. province), and 'county' refers to any secondary political division (e.g. parish). #Looking at the political_names dataframe, we see that the Dominican Republic has country code "DO", and Haiti has country code "HT" Haiti_DR_from_codes <- BIEN_list_country(country.code = c("HT","DO")) ## ----md-1--------------------------------------------------------------------- BIEN_metadata_database_version() ## ----selaginella-occs, eval = FALSE------------------------------------------- # # Selaginella_selaginoides_occurrences <- BIEN_occurrence_species("Selaginella selaginoides", new.world = NULL) # ## ----selaginella-md, eval = FALSE--------------------------------------------- # # citation_info <- BIEN_metadata_citation(dataframe = Selaginella_selaginoides_occurrences) # ## ----md-2, eval = FALSE------------------------------------------------------- # # temp_dir <- file.path(tempdir(), "BIEN_temp") #Set a temporary working directory # # # citation_info <- BIEN_metadata_citation(dataframe = Selaginella_selaginoides_occurrences, # bibtex_file = file.path(temp_dir,"selaginella_selaginoides.bib"), # acknowledgement_file = file.path(temp_dir,"selaginella_selaginoides.txt")) # # ## ----md-3, eval = FALSE------------------------------------------------------- # #First, let's get some trait data: # selaginella_selaginoides_traits <- BIEN_trait_species(species = "Selaginella selaginoides") # # #Now, we just need to modify our previous bit of code to include the trait data as well: # # temp_dir <- file.path(tempdir(), "BIEN_temp") # # citation_info <- BIEN_metadata_citation(dataframe = Selaginella_selaginoides_occurrences, # trait.dataframe = selaginella_selaginoides_traits, # bibtex_file = file.path(temp_dir,"selaginella_selaginoides.bib"), # acknowledgement_file = file.path(temp_dir,"selaginella_selaginoides.txt")) #
/scratch/gouwar.j/cran-all/cranData/BIEN/inst/doc/BIEN_tutorial.R
<!-- %\VignetteEngine{knitr::knitr} %\VignetteIndexEntry{BIEN tutorial} %\VignetteEncoding{UTF-8} --> --- title: "BIEN tutorial" --- ```{r setup, include = FALSE} knitr::opts_chunk$set(echo = TRUE) ``` # Setup ```{r load-packages, message = F,warning = FALSE, results = 'hide'} library(BIEN) library(ape) #Package for working with phylogenies in R library(maps) #Useful for making quick maps of occurrences library(sf) # A package for spatial data ``` # An overview of the package We try to make this package as easy and intuitive to use as possible, but it is still often easiest to start with our vignette. Particularly useful are the "Function Names" and "Function Directory" sections. ```{r load-vignette, eval = FALSE} vignette("BIEN") ``` The function names follow a consistent naming strategy, and mostly consist of 3 parts: 1. The prefix "BIEN_" 2. The type of data being accessed, e.g. "occurrence_" 3. How you'll be querying the data. For example, the suffix "state" refers to functions that return data for a specified state. As a complete example, the function `BIEN_occurrence_species` returns occurrence records for a given species (or set of species). # Function Families Currently we have 9 function families in RBIEN. These are sets of functions that access a given type of data. 1. occurrence records (`BIEN_occurrence_...`) 2. range maps (`BIEN_ranges_...`) 3. plot data (`BIEN_plot_...`) 4. trait data (`BIEN_trait_...`) 5. taxonomic information (`BIEN_taxonomy_...`) 6. phylogenies (`BIEN_phylogeny_...`) 7. stem data (`BIEN_stem_...`) 8. species lists (`BIEN_list_...`) 9. metadata (`BIEN_metadata_...`) We'll walk through each of the function families and take a look at some the options available within each. # Occurrence records These functions begin with the prefix `BIEN_occurrence_...` and allow you to query occurrences by either taxonomy or geography. Functions include: 1. `BIEN_occurrence_country` Returns all occurrence records within a given country 2. `BIEN_occurrence_state` Returns all occurrences records within a given state/province 3. `BIEN_occurrence_county` Returns all occurrences records within a given state/province 4. `BIEN_occurrence_family` Returns all occurrence records for a specified family 5. `BIEN_occurrence_genus` Returns all occurrence records for a specified genus 6. `BIEN_occurrence_species` Returns all occurrence records for a specified species Each of these functions has a number of different arguments that modify your query, either refining your search criteria or returning more data for each record. These arguments include: 1. `cultivated` If `TRUE`, records known to be cultivated will be returned. 2. `new.world` If `TRUE`, records returned are limited to those in North and South America, where greater data cleaning and validation has been done. IF `FALSE`, records will be limited to the Old World. If `NULL` (the default), global records will be returned. * Note that the arguments cultivated and new.world may change the number of records returned. 3. `all.taxonomy` If `TRUE`, the query will return additional taxonomic data, including the uncorrected taxonomic information for those records. 4. `native.status` If `TRUE`, additional information will be returned regarding whether a species is native in a given region. 5. `natives.only` If `TRUE`, the default, information for occurrences flagged as introduced will not be returned. 6. `observation.type` If `TRUE`, the query will return whether each record is from either a plot or a specimen. This may be useful if a user believes one type of information may be more accurate. 7. `political.boundaries` If `TRUE`, the query will return information on which country, state, etc. that an occurrence is found within. 8. `collection.info` If `TRUE`, the quest will return additional information about the collection and identification of that specimen. **Example 1: Occurrence records for a species** Okay, enough reading. Let's get some data. Let's say we're interested in the species *Xanthium strumarium* and we'd like some occurrence data. We'll use the function `BIEN_occurrence_species` to grab the occurrence data. ```{r xs-occurrences} Xanthium_strumarium <- BIEN_occurrence_species(species = "Xanthium strumarium") ``` Take a moment and view the dataframe and take a look at the structure ```{r view-xs-occurrences} str(Xanthium_strumarium) head(Xanthium_strumarium) ``` The default data that is returned consists of the latitude, longitude and date collected, along with a set of attribution data. The meaning of some of these columns is obvious (e.g. latitude, longitude), however others may be less clear. The meanings of these columns and the information within is explained in more detail in our data dictionary, available at https://bien.nceas.ucsb.edu/bien/tools/rbien/data-dictionary/ If we want more information on these occurrences, we just need to change the arguments: ```{r xs-occurrences-pt-2, eval = FALSE} Xanthium_strumarium_full <- BIEN_occurrence_species(species = "Xanthium strumarium", cultivated = TRUE, all.taxonomy = TRUE, native.status = TRUE, observation.type = TRUE, political.boundaries = TRUE) ``` We now have considerably more information. If we want to take a quick look at where those occurrences are we could use: ```{r map-xs, eval = FALSE} # Make a quick map to plot our points on map('world', fill = TRUE, col= "grey", bg = "light blue") # Plot the points from the full query in red points(cbind(Xanthium_strumarium_full$longitude, Xanthium_strumarium_full$latitude), col = "red", pch = 20, cex = 1) # Plot the points from the default query in blue points(cbind(Xanthium_strumarium$longitude, Xanthium_strumarium$latitude), col = "blue", pch = 20, cex = 1) ``` **Example 2: Occurrence records for a country** Since we may be interested in a particular geographic area, rather than a particular set of species, there are also options to easily extract data by political region as well. We'll choose a relatively small region, the Bahamas, for our demonstration. ```{r occs-bahamas, eval = FALSE} Bahamas <- BIEN_occurrence_country(country = "Bahamas") #Let's see how many species we have length(unique(Bahamas$scrubbed_species_binomial)) #About 400 species with valid occurrence records. #Now, let's take a look at where those occurrences are: map(regions = "Bahamas" , fill = TRUE , col= "grey", bg = "light blue") points(cbind(Bahamas$longitude,Bahamas$latitude), col = "blue", pch = 20, cex = 1) #Looks like some islands are considerably better sampled than others. ``` # Range maps These functions begin with the prefix `BIEN_ranges_...` and return (unsurprisingly) species ranges. Most of these functions work by saving the downloaded ranges to a specified directory in shapefile format, rather than by loading them into the R environment. Functions include: 1. `BIEN_ranges_species` Downloads range maps for given species and save them to a specified directory. 2. `BIEN_ranges_genus` Saves range maps for all species within a genus to a specified directory. 3. `BIEN_ranges_load_species` This function returns the ranges for a set of species as a sf object. 4. `BIEN_range_sf` This function returns all ranges that intersect a user-specified sf object. The range functions have different arguments than we have seen so far, including: 1. `directory` This is where the function will be saving the shapefiles you download 2. `matched` If `TRUE`, the function will return a dataframe listing which species ranges were downloaded and which weren't. 3. `match_names_only` If `TRUE`, the function will check whether a map is available for each species without actually downloading it 4. `include.gid` If `TRUE`, the function will append a unique gid number to each range map's filename. This argument is designed to allow forward compatibility when BIEN contains multiple range maps for each species. **Example 3: Range maps and occurrence points** If we have a species we're interested in, and would like to load the range map into the environment, we can use the function `BIEN_ranges_load_species`. Let's try this for *Xanthium strumarium*. ```{r xs-range} Xanthium_strumarium_range <- BIEN_ranges_load_species(species = "Xanthium strumarium") ``` The range map is now in our global environment as an sf object. Let's plot the map and see what it looks like. ```{r xs-range-map} #First, let's add a base map so that our range has some context: map('world', fill = TRUE , col= "grey", bg = "light blue", xlim = c(-180, -20), ylim = c(-60, 80)) #Now, we can add the range map: plot(Xanthium_strumarium_range[1], col = "green", add = TRUE) ``` Now, let's add those occurrence points from earlier to this map: ```{r xs-range-and-points} map('world', fill = TRUE , col = "grey", bg = "light blue", xlim = c(-180, -20), ylim = c(-60, 80)) plot(Xanthium_strumarium_range[1], col = "green", add = TRUE) points(cbind(Xanthium_strumarium$longitude,Xanthium_strumarium$latitude), col = "blue", pch = 20, cex = 1) ``` # Plot data These functions begin with the prefix "BIEN_plot_" and return ecological plot data. Functions include: 1. `BIEN_plot_list_sampling_protocols` Returns the different plot sampling protocols found in the BIEN database. 2. `BIEN_plot_list_datasource` Returns the different datasources that are available in the BIEN database. * These first two functions are useful for identifying plots with comparable sampling methods. 3. `BIEN_plot_sampling_protocol` Downloads data for a specified sampling protocol 4. `BIEN_plot_datasource` Downloads data for a specific datasource * These next two function are then useful for downloading datasets with consistent methodology. 5. `BIEN_plot_country` 6. `BIEN_plot_state` 7. `BIEN_plot_dataset` Downloads data for a given dataset (which is nested within a datasource) 8. `BIEN_plot_name` Downloads data for a specific plot name (these are nested within a given dataset) Again we have some of the same arguments available for these queries that we saw for the occurrence functions. We also have the new argument *`all.metadata`*, which causes the functions to return more metadata for each plot. **Example 4: Plot data by plot name** Let's take a look at the data for an individual plot. ```{r luq-occs, eval = FALSE} LUQUILLO <- BIEN_plot_name(plot.name = "LUQUILLO") head(LUQUILLO) ``` We can see that this is a 0.1 hectare transect where stems >= 2.5 cm diameter at breast height were included. If we'd like more detail, we can use additional arguments: ```{r, eval = FALSE} LUQUILLO_full <- BIEN_plot_name(plot.name = "LUQUILLO", cultivated = TRUE, all.taxonomy = TRUE, native.status = TRUE, political.boundaries = TRUE, all.metadata = TRUE) ``` The dataframe `LUQUILLO_full` contains more useful information, including metadata on which taxa were included, which growth forms were included and information on whether species are known to be native or introduced. # Trait data These functions begin with the prefix `BIEN_trait_...` and access the BIEN trait database. Note that the spelling of the trait names must be precise, so we recommend using the function `BIEN_trait_list` first. Traits names are standardized to follow https:<area>//www<area>.top-thesaurus.org/ where available. Trait units have been standardized for each trait. Functions include: 1. `BIEN_trait_list` Start with this. It returns a dataframe of the traits available. 2. `BIEN_trait_family` Returns a dataframe of all trait data for a given family (or families). 3. `BIEN_trait_genus` 4. `BIEN_trait_species` 5. `BIEN_trait_trait` Downloads all records of a specified trait (or traits). 6. `BIEN_trait_mean` Estimates species mean trait values using genus or family level means where species-level data is absent. 7. `BIEN_trait_traitbyfamily` Downloads data for a given family (or families) and trait(s). 8. `BIEN_trait_traitbygenus` 9. `BIEN_trait_traitbyspecies` **Example 5: Accessing trait data** If you're interested in accessing all traits for a taxon, say the genus *Salix*, just go ahead and use the corresponding function: ```{r salix-traits, eval = FALSE} Salix_traits <- BIEN_trait_genus(genus = "Salix") ``` If instead we're interested in a particular trait, the first step is to check if that trait is present and verify the spelling using the function `BIEN_trait_list`. ```{r trait-list, eval = FALSE} BIEN_trait_list() ``` If we're interested in leaf area, we see that this is indeed called "leaf area" in the database. Now that we know the proper spelling, we can use the function `BIEN_trait_trait` to download all observations of that trait. ```{r leaf-area, eval = FALSE} leaf_area <- BIEN_trait_trait(trait = "leaf area") ``` Note that the units have been standardized and that there is a full set of attribution data for each trait. # Taxonomy Data While there are existing packages that query taxonomic data (e.g. those included in the excellent taxize package), the RBIEN taxonomy functions access the taxonomic information that underlies the BIEN database, ensuring consistency. 1. `BIEN_taxonomy_family` Downloads all taxonomic information for a given family. 2. `BIEN_taxonomy_genus` 3. `BIEN_taxonomy_species` **Example 6: Taxonomic data** Let's say we're interested in the genus *Asclepias*, and we'd like to get an idea of how many species there are in this genus and what higher taxa it falls within. ```{r taxonomy} Asclepias_taxonomy <- BIEN_taxonomy_genus(genus = "Asclepias") #We see that the genus Asclepias falls within the family Apocynaceae and the order Gentianales. #You'll also notice that a given species may appear more than once (due to multiple circumscriptions, some of which may be illegitimate). #If we'd just like to know all the speciess that aren't illegitimate: Asclepias_species <- unique(Asclepias_taxonomy$scrubbed_species_binomial[Asclepias_taxonomy$scrubbed_taxonomic_status %in% c("accepted", "no opinion")]) ``` # Phylogenies The BIEN database currently contains 101 phylogenies for new world plants. This includes 100 replicated phylogenies that include a large fraction of New World plant species ("complete phylogenies") and 1 phylogeny containing only those New World plant species for which molecular data were available ("conservative phylogeny"). Currently, there are only 2 functions available: 1. `BIEN_phylogeny_complete` This function will return a specified number of the replicated "complete" phylogenies. Note that each phylogeny is several Mb in size, so downloading many may take a while on slow connections. 2. `BIEN_phylogeny_conservative` This function returns the conservative phylogeny. Arguments: The function `BIEN_phylogeny_complete` has a few arguments that are worth explaining: 1. `n_phylogenies` This is the number of replicated phylogenies that you want to download (between 1 and 100) 2. `seed` This function sets the seed for the random number generator before randomly drawing the phylogenies to be downloaded. This is useful for replicating analyses. 3. `replicates` This function allows you to specify WHICH of the 100 phylogenies to download, rather than having them selected randomly. **Example 7: Phylogenies** Let's say we want to download the conservative phylogeny. ```{r phylogeny, eval = FALSE} phylo <- BIEN_phylogeny_conservative() #Let's make sure it looks alright plot.phylo(x = phylo, show.tip.label = FALSE) #If we just want to see which species are included phylo_species <- phylo$tip.label ``` # Stem Data The BIEN database contains stem data associated with many of the plots. This is typically either diameter at breast height or diameter at ground height. At present, there is only one stem function (although expect more in the future): 1. `BIEN_stem_species` This function downloads all of the stem data for a given species (or set of species) 2. `BIEN_stem_genus` 3. `BIEN_stem_family` 4. `BIEN_stem_datasource` This function downloads all of the stem data for a given datasource. Arguments: The arguments for this function are the same that we have seen in the occurrence and plot functions. **Example 8: Stem data** If we'd like stem data for the species *Cupressus arizonica* ```{r stems, eval = FALSE} Cupressus_arizonica_stems <- BIEN_stem_species("Cupressus arizonica") ``` # Species lists These functions begin with the prefix `BIEN_list_` and allow you to quickly get a list of all the species in a geographic unit. Functions include: 1. `BIEN_list_country` Returns all species found within a country. 2. `BIEN_list_state` Returns all species found within a given state/province or other 2nd level political division. 3. `BIEN_list_county` Returns all species found within a given county/parish/or other 3rd level political division. Some of the same arguments we saw in the occurrence functions appear here as well, including `cultivated` and `new.world`. **Example 9: Species list for a country** Let's return to our previous example. What if we just need a list of the species in the Bahamas, rather than the specific details of each occurrence record? We can instead use the function `BIEN_list_country` to download a list of species, which should be much faster than using `BIEN_occurrence_country` to get a species list. ```{r lists, eval = FALSE} Bahamas_species_list <- BIEN_list_country(country = "Bahamas") #Notice that we find many more species listed than we found occurrence records for. What happened? There are many records coming from the Bahamas that lack coordinates. These records are used used in the "_list_" functions, but not the occurrence functions. ``` If we wanted to retrieve the results for multiple countries at once, that is simple as well. We just need to supply a vector of countries. ```{r lists-multi-country, eval = FALSE} country_vector <- c("Haiti","Dominican Republic") Haiti_DR <- BIEN_list_country(country = country_vector) ``` We can also use political division codes (from geonames.org) instead of writing out the full country names. ```{r lists-pol-divs} #To see all of the political division names, and associated codes, we can use this function: political_names <- BIEN_metadata_list_political_names() #Let's take a look at what the dataframe contains: head(political_names) #In addition to the standardized country, state (state_province_ascii) and county (county_parish_ascii) names, we have the associated codes that can be used in BIEN functions. #Note that 'state' refers to any primary political division (e.g. province), and 'county' refers to any secondary political division (e.g. parish). #Looking at the political_names dataframe, we see that the Dominican Republic has country code "DO", and Haiti has country code "HT" Haiti_DR_from_codes <- BIEN_list_country(country.code = c("HT","DO")) ``` # Metadata The BIEN metadata functions start with the prefix `BIEN_metadata_...` and provide useful metadata for the BIEN database. 1. `BIEN_metadata_database_version` Returns the current version number of the BIEN database and the release date. 2. `BIEN_metadata_match_data` Rudimentary function to check for changed records between old and current queries. 3. `BIEN_metadata_citation` Function to generate bibtex citations for use in reference managers. 4. `BIEN_metadata_list_political_names` Returns a dataframe containing political division names and associate codes. **Example 10: Metadata** To check what the current version of the BIEN database is (which we recommend reporting when using BIEN data): ```{r md-1} BIEN_metadata_database_version() ``` **Example 11: Citations** One of the more innovative features of the BIEN package is that it will generate custom attribution data for you based on what data you downloaded through the package. Let's say we're interested in *Selaginella selaginoides*, and we'd like to download some occurrence data: ```{r selaginella-occs, eval = FALSE} Selaginella_selaginoides_occurrences <- BIEN_occurrence_species("Selaginella selaginoides", new.world = NULL) ``` If we plan on using those data in a publication, we'll need proper attribution. We can use `BIEN_metadata_citation` to do this for us: ```{r selaginella-md, eval = FALSE} citation_info <- BIEN_metadata_citation(dataframe = Selaginella_selaginoides_occurrences) ``` citation_info is a list that contains 3 elements: 1. A bit of general information on how to use the list. 2. A set of bibtex formatted references. 3. Acknowledgement text. To make things even easier on ourselves, we can use some of the additional functionality of the `BIEN_metadata_citation` function: ```{r md-2, eval = FALSE} temp_dir <- file.path(tempdir(), "BIEN_temp") #Set a temporary working directory citation_info <- BIEN_metadata_citation(dataframe = Selaginella_selaginoides_occurrences, bibtex_file = file.path(temp_dir,"selaginella_selaginoides.bib"), acknowledgement_file = file.path(temp_dir,"selaginella_selaginoides.txt")) ``` Now, we have a bibtex file, `selaginella_selaginoides.bib`, that can be loaded into a reference manager (e.g. Endnote, Paperpile, etc.), and a text file, `selaginella_selaginoides.txt`, containing text that can be pasted into the acknowledgements section of a publication. What if we also have some trait data? No problem there, the code handles that as well: ```{r md-3, eval = FALSE} #First, let's get some trait data: selaginella_selaginoides_traits <- BIEN_trait_species(species = "Selaginella selaginoides") #Now, we just need to modify our previous bit of code to include the trait data as well: temp_dir <- file.path(tempdir(), "BIEN_temp") citation_info <- BIEN_metadata_citation(dataframe = Selaginella_selaginoides_occurrences, trait.dataframe = selaginella_selaginoides_traits, bibtex_file = file.path(temp_dir,"selaginella_selaginoides.bib"), acknowledgement_file = file.path(temp_dir,"selaginella_selaginoides.txt")) ``` The updated citation information will now contain references for both trait and occurrence records. # Combining Queries **Example 11: Putting it all together ** Coming soon!
/scratch/gouwar.j/cran-all/cranData/BIEN/inst/doc/BIEN_tutorial.Rmd
--- title: "BIEN R package" author: "Brian Maitner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BIEN R package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- The Botanical Information and Ecology Network(BIEN) R package provides access to the BIEN database as well as useful tools for working with the BIEN data. ##Data Usage Agreement Please read the data usage agreement, available at: https://bien.nceas.ucsb.edu/bien/biendata/data-use-policy/ ##Data Usage: Warnings and Caveats Please be aware of quality and appropriateness of BIEN data for the analyses that you may wish to perform or information that you would like to obtain Versioning - data available via BIENdata.org and the the BIEN R package (BIEN) are being updated and improved. This is an active collaborative group who is aiming to continue to improve the quality of botanical information and data. As a result, users should take note of the version number (using the function BIEN_metadata_database_version() ) and be sure to check back periodically for updates. Checking for new results to a query can be done using the function BIEN_metadata_match_data(). Data quality - All data are presented ‘as is’. Default BIEN downloads return all data associated with a given taxonomic name string. There may be additional issues associated with a given name string. Please be aware that data returned may also include data whose geographic coordinates and cultivated status are uncertain or unverifiable. Hybrids - Hybrid species are assigned the specific epithet "x" by the TNRS. To view the full name information for these occurrences, set "all.taxonomy = TRUE". ##Database connection issues Some institution and computer programs (e.g. some antivirus programs) block the SQL connections that this package relies on. While we are exploring ways around this issue, at present the simplest method is to use the package on a computer/network that doesn't block SQL connections. ##Function Names Function names in the BIEN package follow a naming convention which was designed to make locating the desired function easier: * All function names begin with the prefix "BIEN_" * Functions to download occurrence records begin with the prefix "BIEN_occurrence_" * Functions to download range maps begin with the prefix "BIEN_ranges_" * Functions to download trait data begin with the prefix "BIEN_trait_" * Functions to download species lists (or other lists) begin with the prefix "BIEN_list_" * Functions to download plot data begin with the prefix "BIEN_plot_" * Functions to download stem data begin with the prefix "BIEN_stem_" * Functions to download taxonomic information begin with the prefix "BIEN_taxonomy_" * Functions to download phylogenies begin with the prefix "BIEN_phylogeny_" * Functions to access metadata begin with the prefix "BIEN_metadata_" ##Function Directory For full information on each function, see the associated help file. ###BIEN_metadata functions * `BIEN_metadata_database_version` Returns the BIEN database version number and release date * `BIEN_metadata_match_data` Compares the stored results of two (old vs new) queries to check for additions or deletions. * `BIEN_metadata_citation` Generates information needed to cite data downloaded from BIEN. * `BIEN_metadata_list_political_names` Provides a list of political division names used by BIEN. ###BIEN_list functions * `BIEN_list_all` Lists all species present in the BIEN database * `BIEN_list_country` Produces species lists by country * `BIEN_list_county` Produces species lists by county * `BIEN_list_state` Produces species lists by state/province * `BIEN_list_sf` Produces species lists using an sf object ###BIEN_occurrence functions * `BIEN_occurrence_box` Returns all occurrence records within a GIS bounding box * `BIEN_occurrence_country` Returns all occurrence records within a given country * `BIEN_occurrence_family` Returns all occurrence records for a specified family * `BIEN_occurrence_genus` Returns all occurrence records for a specified genus * `BIEN_occurrence_species` Returns all occurrence records for a specified species * `BIEN_occurrence_state` Returns all occurrences records within a given state/province * `BIEN_occurrence_occurrences_per_species` Returns data on the number of occurrence records for species in the BIEN database * `BIEN_occurrence_sf` Returns occurrence records falling within the boundaries of an sf object. ###BIEN_ranges functions * `BIEN_ranges_genus` Downloads range maps for all the species within a given genus/genera * `BIEN_ranges_species` Downloads range maps for given species * `BIEN_ranges_box` Downloads range maps intersecting a given GIS bounding box * `BIEN_ranges_list` Lists available range maps * `BIEN_ranges_sf` Downloads range maps intersecting a user-supplied sf object. ###BIEN_trait functions * `BIEN_trait_family` Returns all available trait data for given families * `BIEN_trait_genus` Returns all available trait data for given genera * `BIEN_trait_list` Lists all available types of trait * `BIEN_trait_mean` Estimates species mean trait values using Genus or Family level data where needed * `BIEN_trait_species` Returns all available trait data for given species * `BIEN_trait_trait` Returns all available trait data for given trait(s) * `BIEN_trait_traitbyfamily` Returns specific trait data for given family * `BIEN_trait_traitbygenus` Returns specific trait data for given genus * `BIEN_trait_traitbyspecies` Returns specific trait data for given species * `BIEN_trait_traits_per_species` Returns data on the number of trait observations for each trait for each species in the BIEN database ###BIEN_plot functions * `BIEN_plot_datasource` Downloads all plot data from a given datasource. * `BIEN_plot_list_datasource` List all datasources in the BIEN database. * `BIEN_plot_country` Downloads all plot data from specified countries. * `BIEN_plot_state` Downloads all plot data from specified states/provinces. * `BIEN_plot_list_sampling_protocols` List all available sampling protocols. * `BIEN_plot_sampling_protocol` Downloads all plot data using a specified sampling protocol. ###BIEN_stem functions * `BIEN_stem_species` Downloads all stem data for given species. * `BIEN_stem_genus` Downloads all stem data for given genera. * `BIEN_stem_family` Downloads all stem data for given families. ###BIEN_taxonomy functions * `BIEN_taxonomy_species` Downloads all taxonomic information for given species. * `BIEN_taxonomy_genus` Downloads all taxonomic information for given genera. * `BIEN_taxonomy_family` Downloads all taxonomic information for given families. ###BIEN_phylogeny functions * `BIEN_phylogeny_complete` Downloads a specified number of the BIEN phylogeny replicates. * `BIEN_phylogeny_conservative` Downloads the conservative BIEN phylogeny, which only includes species with molecular data available.
/scratch/gouwar.j/cran-all/cranData/BIEN/vignettes/BIEN.Rmd
<!-- %\VignetteEngine{knitr::knitr} %\VignetteIndexEntry{BIEN tutorial} %\VignetteEncoding{UTF-8} --> --- title: "BIEN tutorial" --- ```{r setup, include = FALSE} knitr::opts_chunk$set(echo = TRUE) ``` # Setup ```{r load-packages, message = F,warning = FALSE, results = 'hide'} library(BIEN) library(ape) #Package for working with phylogenies in R library(maps) #Useful for making quick maps of occurrences library(sf) # A package for spatial data ``` # An overview of the package We try to make this package as easy and intuitive to use as possible, but it is still often easiest to start with our vignette. Particularly useful are the "Function Names" and "Function Directory" sections. ```{r load-vignette, eval = FALSE} vignette("BIEN") ``` The function names follow a consistent naming strategy, and mostly consist of 3 parts: 1. The prefix "BIEN_" 2. The type of data being accessed, e.g. "occurrence_" 3. How you'll be querying the data. For example, the suffix "state" refers to functions that return data for a specified state. As a complete example, the function `BIEN_occurrence_species` returns occurrence records for a given species (or set of species). # Function Families Currently we have 9 function families in RBIEN. These are sets of functions that access a given type of data. 1. occurrence records (`BIEN_occurrence_...`) 2. range maps (`BIEN_ranges_...`) 3. plot data (`BIEN_plot_...`) 4. trait data (`BIEN_trait_...`) 5. taxonomic information (`BIEN_taxonomy_...`) 6. phylogenies (`BIEN_phylogeny_...`) 7. stem data (`BIEN_stem_...`) 8. species lists (`BIEN_list_...`) 9. metadata (`BIEN_metadata_...`) We'll walk through each of the function families and take a look at some the options available within each. # Occurrence records These functions begin with the prefix `BIEN_occurrence_...` and allow you to query occurrences by either taxonomy or geography. Functions include: 1. `BIEN_occurrence_country` Returns all occurrence records within a given country 2. `BIEN_occurrence_state` Returns all occurrences records within a given state/province 3. `BIEN_occurrence_county` Returns all occurrences records within a given state/province 4. `BIEN_occurrence_family` Returns all occurrence records for a specified family 5. `BIEN_occurrence_genus` Returns all occurrence records for a specified genus 6. `BIEN_occurrence_species` Returns all occurrence records for a specified species Each of these functions has a number of different arguments that modify your query, either refining your search criteria or returning more data for each record. These arguments include: 1. `cultivated` If `TRUE`, records known to be cultivated will be returned. 2. `new.world` If `TRUE`, records returned are limited to those in North and South America, where greater data cleaning and validation has been done. IF `FALSE`, records will be limited to the Old World. If `NULL` (the default), global records will be returned. * Note that the arguments cultivated and new.world may change the number of records returned. 3. `all.taxonomy` If `TRUE`, the query will return additional taxonomic data, including the uncorrected taxonomic information for those records. 4. `native.status` If `TRUE`, additional information will be returned regarding whether a species is native in a given region. 5. `natives.only` If `TRUE`, the default, information for occurrences flagged as introduced will not be returned. 6. `observation.type` If `TRUE`, the query will return whether each record is from either a plot or a specimen. This may be useful if a user believes one type of information may be more accurate. 7. `political.boundaries` If `TRUE`, the query will return information on which country, state, etc. that an occurrence is found within. 8. `collection.info` If `TRUE`, the quest will return additional information about the collection and identification of that specimen. **Example 1: Occurrence records for a species** Okay, enough reading. Let's get some data. Let's say we're interested in the species *Xanthium strumarium* and we'd like some occurrence data. We'll use the function `BIEN_occurrence_species` to grab the occurrence data. ```{r xs-occurrences} Xanthium_strumarium <- BIEN_occurrence_species(species = "Xanthium strumarium") ``` Take a moment and view the dataframe and take a look at the structure ```{r view-xs-occurrences} str(Xanthium_strumarium) head(Xanthium_strumarium) ``` The default data that is returned consists of the latitude, longitude and date collected, along with a set of attribution data. The meaning of some of these columns is obvious (e.g. latitude, longitude), however others may be less clear. The meanings of these columns and the information within is explained in more detail in our data dictionary, available at https://bien.nceas.ucsb.edu/bien/tools/rbien/data-dictionary/ If we want more information on these occurrences, we just need to change the arguments: ```{r xs-occurrences-pt-2, eval = FALSE} Xanthium_strumarium_full <- BIEN_occurrence_species(species = "Xanthium strumarium", cultivated = TRUE, all.taxonomy = TRUE, native.status = TRUE, observation.type = TRUE, political.boundaries = TRUE) ``` We now have considerably more information. If we want to take a quick look at where those occurrences are we could use: ```{r map-xs, eval = FALSE} # Make a quick map to plot our points on map('world', fill = TRUE, col= "grey", bg = "light blue") # Plot the points from the full query in red points(cbind(Xanthium_strumarium_full$longitude, Xanthium_strumarium_full$latitude), col = "red", pch = 20, cex = 1) # Plot the points from the default query in blue points(cbind(Xanthium_strumarium$longitude, Xanthium_strumarium$latitude), col = "blue", pch = 20, cex = 1) ``` **Example 2: Occurrence records for a country** Since we may be interested in a particular geographic area, rather than a particular set of species, there are also options to easily extract data by political region as well. We'll choose a relatively small region, the Bahamas, for our demonstration. ```{r occs-bahamas, eval = FALSE} Bahamas <- BIEN_occurrence_country(country = "Bahamas") #Let's see how many species we have length(unique(Bahamas$scrubbed_species_binomial)) #About 400 species with valid occurrence records. #Now, let's take a look at where those occurrences are: map(regions = "Bahamas" , fill = TRUE , col= "grey", bg = "light blue") points(cbind(Bahamas$longitude,Bahamas$latitude), col = "blue", pch = 20, cex = 1) #Looks like some islands are considerably better sampled than others. ``` # Range maps These functions begin with the prefix `BIEN_ranges_...` and return (unsurprisingly) species ranges. Most of these functions work by saving the downloaded ranges to a specified directory in shapefile format, rather than by loading them into the R environment. Functions include: 1. `BIEN_ranges_species` Downloads range maps for given species and save them to a specified directory. 2. `BIEN_ranges_genus` Saves range maps for all species within a genus to a specified directory. 3. `BIEN_ranges_load_species` This function returns the ranges for a set of species as a sf object. 4. `BIEN_range_sf` This function returns all ranges that intersect a user-specified sf object. The range functions have different arguments than we have seen so far, including: 1. `directory` This is where the function will be saving the shapefiles you download 2. `matched` If `TRUE`, the function will return a dataframe listing which species ranges were downloaded and which weren't. 3. `match_names_only` If `TRUE`, the function will check whether a map is available for each species without actually downloading it 4. `include.gid` If `TRUE`, the function will append a unique gid number to each range map's filename. This argument is designed to allow forward compatibility when BIEN contains multiple range maps for each species. **Example 3: Range maps and occurrence points** If we have a species we're interested in, and would like to load the range map into the environment, we can use the function `BIEN_ranges_load_species`. Let's try this for *Xanthium strumarium*. ```{r xs-range} Xanthium_strumarium_range <- BIEN_ranges_load_species(species = "Xanthium strumarium") ``` The range map is now in our global environment as an sf object. Let's plot the map and see what it looks like. ```{r xs-range-map} #First, let's add a base map so that our range has some context: map('world', fill = TRUE , col= "grey", bg = "light blue", xlim = c(-180, -20), ylim = c(-60, 80)) #Now, we can add the range map: plot(Xanthium_strumarium_range[1], col = "green", add = TRUE) ``` Now, let's add those occurrence points from earlier to this map: ```{r xs-range-and-points} map('world', fill = TRUE , col = "grey", bg = "light blue", xlim = c(-180, -20), ylim = c(-60, 80)) plot(Xanthium_strumarium_range[1], col = "green", add = TRUE) points(cbind(Xanthium_strumarium$longitude,Xanthium_strumarium$latitude), col = "blue", pch = 20, cex = 1) ``` # Plot data These functions begin with the prefix "BIEN_plot_" and return ecological plot data. Functions include: 1. `BIEN_plot_list_sampling_protocols` Returns the different plot sampling protocols found in the BIEN database. 2. `BIEN_plot_list_datasource` Returns the different datasources that are available in the BIEN database. * These first two functions are useful for identifying plots with comparable sampling methods. 3. `BIEN_plot_sampling_protocol` Downloads data for a specified sampling protocol 4. `BIEN_plot_datasource` Downloads data for a specific datasource * These next two function are then useful for downloading datasets with consistent methodology. 5. `BIEN_plot_country` 6. `BIEN_plot_state` 7. `BIEN_plot_dataset` Downloads data for a given dataset (which is nested within a datasource) 8. `BIEN_plot_name` Downloads data for a specific plot name (these are nested within a given dataset) Again we have some of the same arguments available for these queries that we saw for the occurrence functions. We also have the new argument *`all.metadata`*, which causes the functions to return more metadata for each plot. **Example 4: Plot data by plot name** Let's take a look at the data for an individual plot. ```{r luq-occs, eval = FALSE} LUQUILLO <- BIEN_plot_name(plot.name = "LUQUILLO") head(LUQUILLO) ``` We can see that this is a 0.1 hectare transect where stems >= 2.5 cm diameter at breast height were included. If we'd like more detail, we can use additional arguments: ```{r, eval = FALSE} LUQUILLO_full <- BIEN_plot_name(plot.name = "LUQUILLO", cultivated = TRUE, all.taxonomy = TRUE, native.status = TRUE, political.boundaries = TRUE, all.metadata = TRUE) ``` The dataframe `LUQUILLO_full` contains more useful information, including metadata on which taxa were included, which growth forms were included and information on whether species are known to be native or introduced. # Trait data These functions begin with the prefix `BIEN_trait_...` and access the BIEN trait database. Note that the spelling of the trait names must be precise, so we recommend using the function `BIEN_trait_list` first. Traits names are standardized to follow https:<area>//www<area>.top-thesaurus.org/ where available. Trait units have been standardized for each trait. Functions include: 1. `BIEN_trait_list` Start with this. It returns a dataframe of the traits available. 2. `BIEN_trait_family` Returns a dataframe of all trait data for a given family (or families). 3. `BIEN_trait_genus` 4. `BIEN_trait_species` 5. `BIEN_trait_trait` Downloads all records of a specified trait (or traits). 6. `BIEN_trait_mean` Estimates species mean trait values using genus or family level means where species-level data is absent. 7. `BIEN_trait_traitbyfamily` Downloads data for a given family (or families) and trait(s). 8. `BIEN_trait_traitbygenus` 9. `BIEN_trait_traitbyspecies` **Example 5: Accessing trait data** If you're interested in accessing all traits for a taxon, say the genus *Salix*, just go ahead and use the corresponding function: ```{r salix-traits, eval = FALSE} Salix_traits <- BIEN_trait_genus(genus = "Salix") ``` If instead we're interested in a particular trait, the first step is to check if that trait is present and verify the spelling using the function `BIEN_trait_list`. ```{r trait-list, eval = FALSE} BIEN_trait_list() ``` If we're interested in leaf area, we see that this is indeed called "leaf area" in the database. Now that we know the proper spelling, we can use the function `BIEN_trait_trait` to download all observations of that trait. ```{r leaf-area, eval = FALSE} leaf_area <- BIEN_trait_trait(trait = "leaf area") ``` Note that the units have been standardized and that there is a full set of attribution data for each trait. # Taxonomy Data While there are existing packages that query taxonomic data (e.g. those included in the excellent taxize package), the RBIEN taxonomy functions access the taxonomic information that underlies the BIEN database, ensuring consistency. 1. `BIEN_taxonomy_family` Downloads all taxonomic information for a given family. 2. `BIEN_taxonomy_genus` 3. `BIEN_taxonomy_species` **Example 6: Taxonomic data** Let's say we're interested in the genus *Asclepias*, and we'd like to get an idea of how many species there are in this genus and what higher taxa it falls within. ```{r taxonomy} Asclepias_taxonomy <- BIEN_taxonomy_genus(genus = "Asclepias") #We see that the genus Asclepias falls within the family Apocynaceae and the order Gentianales. #You'll also notice that a given species may appear more than once (due to multiple circumscriptions, some of which may be illegitimate). #If we'd just like to know all the speciess that aren't illegitimate: Asclepias_species <- unique(Asclepias_taxonomy$scrubbed_species_binomial[Asclepias_taxonomy$scrubbed_taxonomic_status %in% c("accepted", "no opinion")]) ``` # Phylogenies The BIEN database currently contains 101 phylogenies for new world plants. This includes 100 replicated phylogenies that include a large fraction of New World plant species ("complete phylogenies") and 1 phylogeny containing only those New World plant species for which molecular data were available ("conservative phylogeny"). Currently, there are only 2 functions available: 1. `BIEN_phylogeny_complete` This function will return a specified number of the replicated "complete" phylogenies. Note that each phylogeny is several Mb in size, so downloading many may take a while on slow connections. 2. `BIEN_phylogeny_conservative` This function returns the conservative phylogeny. Arguments: The function `BIEN_phylogeny_complete` has a few arguments that are worth explaining: 1. `n_phylogenies` This is the number of replicated phylogenies that you want to download (between 1 and 100) 2. `seed` This function sets the seed for the random number generator before randomly drawing the phylogenies to be downloaded. This is useful for replicating analyses. 3. `replicates` This function allows you to specify WHICH of the 100 phylogenies to download, rather than having them selected randomly. **Example 7: Phylogenies** Let's say we want to download the conservative phylogeny. ```{r phylogeny, eval = FALSE} phylo <- BIEN_phylogeny_conservative() #Let's make sure it looks alright plot.phylo(x = phylo, show.tip.label = FALSE) #If we just want to see which species are included phylo_species <- phylo$tip.label ``` # Stem Data The BIEN database contains stem data associated with many of the plots. This is typically either diameter at breast height or diameter at ground height. At present, there is only one stem function (although expect more in the future): 1. `BIEN_stem_species` This function downloads all of the stem data for a given species (or set of species) 2. `BIEN_stem_genus` 3. `BIEN_stem_family` 4. `BIEN_stem_datasource` This function downloads all of the stem data for a given datasource. Arguments: The arguments for this function are the same that we have seen in the occurrence and plot functions. **Example 8: Stem data** If we'd like stem data for the species *Cupressus arizonica* ```{r stems, eval = FALSE} Cupressus_arizonica_stems <- BIEN_stem_species("Cupressus arizonica") ``` # Species lists These functions begin with the prefix `BIEN_list_` and allow you to quickly get a list of all the species in a geographic unit. Functions include: 1. `BIEN_list_country` Returns all species found within a country. 2. `BIEN_list_state` Returns all species found within a given state/province or other 2nd level political division. 3. `BIEN_list_county` Returns all species found within a given county/parish/or other 3rd level political division. Some of the same arguments we saw in the occurrence functions appear here as well, including `cultivated` and `new.world`. **Example 9: Species list for a country** Let's return to our previous example. What if we just need a list of the species in the Bahamas, rather than the specific details of each occurrence record? We can instead use the function `BIEN_list_country` to download a list of species, which should be much faster than using `BIEN_occurrence_country` to get a species list. ```{r lists, eval = FALSE} Bahamas_species_list <- BIEN_list_country(country = "Bahamas") #Notice that we find many more species listed than we found occurrence records for. What happened? There are many records coming from the Bahamas that lack coordinates. These records are used used in the "_list_" functions, but not the occurrence functions. ``` If we wanted to retrieve the results for multiple countries at once, that is simple as well. We just need to supply a vector of countries. ```{r lists-multi-country, eval = FALSE} country_vector <- c("Haiti","Dominican Republic") Haiti_DR <- BIEN_list_country(country = country_vector) ``` We can also use political division codes (from geonames.org) instead of writing out the full country names. ```{r lists-pol-divs} #To see all of the political division names, and associated codes, we can use this function: political_names <- BIEN_metadata_list_political_names() #Let's take a look at what the dataframe contains: head(political_names) #In addition to the standardized country, state (state_province_ascii) and county (county_parish_ascii) names, we have the associated codes that can be used in BIEN functions. #Note that 'state' refers to any primary political division (e.g. province), and 'county' refers to any secondary political division (e.g. parish). #Looking at the political_names dataframe, we see that the Dominican Republic has country code "DO", and Haiti has country code "HT" Haiti_DR_from_codes <- BIEN_list_country(country.code = c("HT","DO")) ``` # Metadata The BIEN metadata functions start with the prefix `BIEN_metadata_...` and provide useful metadata for the BIEN database. 1. `BIEN_metadata_database_version` Returns the current version number of the BIEN database and the release date. 2. `BIEN_metadata_match_data` Rudimentary function to check for changed records between old and current queries. 3. `BIEN_metadata_citation` Function to generate bibtex citations for use in reference managers. 4. `BIEN_metadata_list_political_names` Returns a dataframe containing political division names and associate codes. **Example 10: Metadata** To check what the current version of the BIEN database is (which we recommend reporting when using BIEN data): ```{r md-1} BIEN_metadata_database_version() ``` **Example 11: Citations** One of the more innovative features of the BIEN package is that it will generate custom attribution data for you based on what data you downloaded through the package. Let's say we're interested in *Selaginella selaginoides*, and we'd like to download some occurrence data: ```{r selaginella-occs, eval = FALSE} Selaginella_selaginoides_occurrences <- BIEN_occurrence_species("Selaginella selaginoides", new.world = NULL) ``` If we plan on using those data in a publication, we'll need proper attribution. We can use `BIEN_metadata_citation` to do this for us: ```{r selaginella-md, eval = FALSE} citation_info <- BIEN_metadata_citation(dataframe = Selaginella_selaginoides_occurrences) ``` citation_info is a list that contains 3 elements: 1. A bit of general information on how to use the list. 2. A set of bibtex formatted references. 3. Acknowledgement text. To make things even easier on ourselves, we can use some of the additional functionality of the `BIEN_metadata_citation` function: ```{r md-2, eval = FALSE} temp_dir <- file.path(tempdir(), "BIEN_temp") #Set a temporary working directory citation_info <- BIEN_metadata_citation(dataframe = Selaginella_selaginoides_occurrences, bibtex_file = file.path(temp_dir,"selaginella_selaginoides.bib"), acknowledgement_file = file.path(temp_dir,"selaginella_selaginoides.txt")) ``` Now, we have a bibtex file, `selaginella_selaginoides.bib`, that can be loaded into a reference manager (e.g. Endnote, Paperpile, etc.), and a text file, `selaginella_selaginoides.txt`, containing text that can be pasted into the acknowledgements section of a publication. What if we also have some trait data? No problem there, the code handles that as well: ```{r md-3, eval = FALSE} #First, let's get some trait data: selaginella_selaginoides_traits <- BIEN_trait_species(species = "Selaginella selaginoides") #Now, we just need to modify our previous bit of code to include the trait data as well: temp_dir <- file.path(tempdir(), "BIEN_temp") citation_info <- BIEN_metadata_citation(dataframe = Selaginella_selaginoides_occurrences, trait.dataframe = selaginella_selaginoides_traits, bibtex_file = file.path(temp_dir,"selaginella_selaginoides.bib"), acknowledgement_file = file.path(temp_dir,"selaginella_selaginoides.txt")) ``` The updated citation information will now contain references for both trait and occurrence records. # Combining Queries **Example 11: Putting it all together ** Coming soon!
/scratch/gouwar.j/cran-all/cranData/BIEN/vignettes/BIEN_tutorial.Rmd