content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Extract all YAML fragments from all files in a directory #' #' These function extracts all YAML fragments from all files in a #' directory returning a list of character vectors containing the #' extracted fragments. #' #' @param path The path containing the files. #' @param recursive Whether to also process subdirectories (`TRUE`) #' or not (`FALSE`). #' @param fileRegexes A vector of regular expressions to match the files #' against: only files matching one or more regular expressions in this #' vector are processed. The default regex (`^[^\.]+.*$`) matches all #' files except those that start with a period (`.`). #' @inheritParams extract_yaml_fragments #' #' @return A list of character vectors. #' @examples ### First get the directory where 'yum' is installed #' yumDir <- system.file(package="yum"); #' ### Specify the path of some example files #' examplePath <- file.path(yumDir, "extdata"); #' ### Show files (should be three .dct files) #' list.files(examplePath); #' ### Load these files #' yum::extract_yaml_dir(path=examplePath); #' @export extract_yaml_dir <- function(path, recursive = TRUE, fileRegexes = c("^[^\\.]+.*$"), delimiterRegEx = "^---$", ignoreOddDelimiters = FALSE, encoding="UTF-8", silent=TRUE) { if (!dir.exists(path)) { stop("Directory '", path, "' does not exist!"); } fileList <- list.files(path=path, pattern=fileRegexes, recursive=recursive, full.names=TRUE); res <- lapply(fileList, extract_yaml_fragments, delimiterRegEx = delimiterRegEx, ignoreOddDelimiters = ignoreOddDelimiters, silent=silent); names(res) <- fileList; class(res) <- c("yamlFragmentsFromDir", "list"); return(res); }
/scratch/gouwar.j/cran-all/cranData/yum/R/extract_yaml_dir.R
#' Extract all YAML fragments from a file #' #' These function extracts all YAML fragments from a file, #' returning a list of character vectors containing the extracted #' fragments. #' #' @param text,file As `text` or `file`, you can specify a `file` to read with #' encoding `encoding`, which will then be read using [base::readLines()]. If the #' argument is named `text`, whether it is the path to an existing file is checked #' first, and if it is, that file is read. If the argument is named `file`, and it #' does not point to an existing file, an error is produced (useful if calling #' from other functions). A `text` should be a character vector where every #' element is a line of the original source (like provided by [base::readLines()]); #' although if a character vector of one element *and* including at least one #' newline character (`\\n`) is provided as `text`, it is split at the newline #' characters using [base::strsplit()]. Basically, this behavior means that the #' first argument can be either a character vector or the path to a file; and if #' you're specifying a file and you want to be certain that an error is thrown if #' it doesn't exist, make sure to name it `file`. #' @param delimiterRegEx The regular expression used to locate YAML #' fragments. #' @param ignoreOddDelimiters Whether to throw an error (FALSE) or #' delete the last delimiter (TRUE) if an odd number of delimiters is #' encountered. #' @param encoding The encoding to use when calling [readLines()]. Set to #' NULL to let [readLines()] guess. #' @param silent Whether to be silent (`TRUE`) or informative (`FALSE`). #' #' @return A list of character vectors, where each vector corresponds to #' one YAML fragment in the source file or text. #' @examples #' extract_yaml_fragments(text=" #' --- #' First: YAML fragment #' id: firstFragment #' --- #' Outside of YAML #' --- #' Second: YAML fragment #' id: secondFragment #' parentId: firstFragment #' --- #' Also outside of YAML #' "); #' @export extract_yaml_fragments <- function(text, file, delimiterRegEx = "^---$", ignoreOddDelimiters = FALSE, encoding="UTF-8", silent=TRUE) { if (missing(file)) { if (missing(text)) { stop("Provide either a `file` or a `text` to scan!"); } else { if ((length(text) == 1) && file.exists(text)) { allLines <- readLines(text, encoding=encoding, warn=FALSE); } else { allLines <- text; if ((length(allLines) == 1) && grepl('\n', allLines)) { allLines <- strsplit(allLines, "\n")[[1]]; } } } } else { if (file.exists(file)) { allLines <- readLines(file, encoding=encoding, warn=FALSE); } else { stop("The file you specified in argument `file` ('", paste0(file, collapse=" "), "') does not exist. If you meant to provide a text ", "to process, please use argument `text`"); } } yamlFragments <- grep(delimiterRegEx, allLines); if (length(yamlFragments) == 0) { return(NULL); } else { if (!silent) { cat("Identified ", length(yamlFragments), " lines matching delimiterRegEx '", delimiterRegEx, "': ", vecTxt(yamlFragments), ".\n", sep=""); } } if (!is.even(length(yamlFragments))) { if (ignoreOddDelimiters) { yamlFragments <- yamlFragments[-length(yamlFragments)]; } else { stop("Extracted an uneven number of lines with specifications ", "(the regular expression for the specification ", "delimiter that was specified was '", delimiterRegEx, "'). To ignore the last delimiter, specify ", "'ignoreOddDelimiters=TRUE'."); } } yamlFragmentIndices <- seq_along(yamlFragments); if (length(yamlFragmentIndices) == 2) { indexSets <- list(seq(yamlFragments[1], yamlFragments[2])); } else { indexSets <- mapply(seq, yamlFragments[is.odd(yamlFragmentIndices)], yamlFragments[is.even(yamlFragmentIndices)], SIMPLIFY=FALSE); } res <- lapply(indexSets, function(i, x=allLines) { return(structure(x[i], class="yamlFragment")); }); class(res) <- c("yamlFragments", "list"); return(res); }
/scratch/gouwar.j/cran-all/cranData/yum/R/extract_yaml_fragments.R
#' Find the indices ('line numbers') of all YAML fragments from a file #' #' These function finds all YAML fragments from a file, returning #' their start and end indices or all indices of all lines in the (non-)YAML #' fragments. #' #' @param file The path to a file to scan; if provided, takes precedence #' over `text`. #' @param text A character vector to scan, where every element should #' represent one line in the file; can be specified instead of `file`. #' @param invert Set to `TRUE` to return the indices of the character #' vector that are *not* YAML fragments. #' @param returnFragmentIndices Set to `TRUE` to return all indices of the #' relevant fragments (i.e. including intermediate indices). #' @param returnPairedIndices Whether to return two vectors with the #' start and end indices, or pair them up in vectors of 2. #' @param delimiterRegEx The regular expression used to locate YAML #' fragments. #' @param ignoreOddDelimiters Whether to throw an error (FALSE) or #' delete the last delimiter (TRUE) if an odd number of delimiters is #' encountered. #' @param silent Whether to be silent (TRUE) or informative (FALSE). #' #' @return A list of numeric vectors with start and end indices #' @examples ### Create simple text vector with the right delimiters #' simpleExampleText <- #' c( #' "---", #' "First YAML fragment", #' "---", #' "Outside of YAML", #' "This, too.", #' "---", #' "Second fragment", #' "---", #' "Also outside of YAML", #' "Another one outside", #' "Last one" #' ); #' #' yum::find_yaml_fragment_indices( #' text=simpleExampleText #' ); #' #' yum::find_yaml_fragment_indices( #' text=simpleExampleText, #' returnFragmentIndices = FALSE #' ); #' #' yum::find_yaml_fragment_indices( #' text=simpleExampleText, #' invert = TRUE #' ); #' #' @export find_yaml_fragment_indices <- function(file, text, invert = FALSE, returnFragmentIndices = TRUE, returnPairedIndices = TRUE, delimiterRegEx = "^---$", ignoreOddDelimiters = FALSE, silent=TRUE) { if (missing(file)) { if (missing(text)) { stop("Provide either a `file` or a `text` to scan!"); } else { allLines <- text; } } else { allLines <- readLines(file); } yamlFragments <- grep(delimiterRegEx, allLines); if (length(yamlFragments) == 0) { if (returnFragmentIndices) { if (invert) { return(seq_along(allLines)); } else { return(NULL); } } else { if (invert) { if (returnPairedIndices) { return(list(c(0, length(allLines)))); } else { return(list(0, length(allLines))); } } else { if (returnPairedIndices) { return(NULL); } else { return(NULL); } } } } if (!is.even(length(yamlFragments))) { if (ignoreOddDelimiters) { yamlFragments <- yamlFragments[-length(yamlFragments)]; } else { stop("Extracted an uneven number of lines with specifications ", "(the regular expression for the specification ", "delimiter that was specified was '", delimiterRegEx, "'). To ignore the last delimiter, specify ", "'ignoreOddDelimiters=TRUE'."); } } yamlFragmentIndices <- seq_along(yamlFragments); indexPairIndices <- list(yamlFragments[is.odd(yamlFragmentIndices)], yamlFragments[is.even(yamlFragmentIndices)]); if (invert) { corrected_yamlStarts <- indexPairIndices[[1]] - 1; corrected_yamlEnds <- indexPairIndices[[2]] + 1; if (corrected_yamlStarts[1] < 2) { firstRegularLine <- corrected_yamlEnds[1]; corrected_yamlStarts <- corrected_yamlStarts[-1]; } else { firstRegularLine <- 1; } if (utils::tail(corrected_yamlEnds, 1) > length(allLines)) { lastRegularLine <- utils::tail(corrected_yamlStarts, 1); corrected_yamlEnds <- utils::head(corrected_yamlEnds, -1); } else { lastRegularLine <- length(allLines); } nonYamlStarts <- unique(c(firstRegularLine, corrected_yamlEnds)); nonYamlEnds <- unique(c(corrected_yamlStarts, lastRegularLine)); indexPairIndices <- list(nonYamlStarts, nonYamlEnds); } if (returnFragmentIndices) { res <- lapply(seq_along(indexPairIndices[[1]]), function(pair) { return(seq(indexPairIndices[[1]][pair], indexPairIndices[[2]][pair])); }); } else if (returnPairedIndices) { res <- lapply(seq_along(indexPairIndices[[1]]), function(pair) { return(c(indexPairIndices[[1]][pair], indexPairIndices[[2]][pair])); }); } else { res <- indexPairIndices; } return(res); }
/scratch/gouwar.j/cran-all/cranData/yum/R/find_yaml_fragment_indices.R
#' Flatten a list of lists to a list of atomic vectors #' #' This function takes a hierarchical structure of lists and #' extracts all atomic vectors, returning one flat list of all #' those vectors. #' #' @param x The list of lists. #' #' @return A list of atomic vectors. #' @export #' #' @examples ### First create a list of lists #' listOfLists <- #' list(list(list(1:3, 8:5), 7:7), list(1:4, 8:2)); #' yum::flatten_list_of_lists(listOfLists); flatten_list_of_lists <- function(x) { if (is.atomic(x)) { return(x); } else if (all(unlist(lapply(x, is.atomic)))) { return(x); } else { return(do.call(c, lapply(x, flatten_list_of_lists))); } }
/scratch/gouwar.j/cran-all/cranData/yum/R/flatten_list_of_lists.R
### Basically what Marc Schwartz suggested at Thu Jul 1 19:10:28 CEST 2010 ### on the R-help mailing list, see https://stat.ethz.ch/pipermail/r-help/2010-July/244299.html #' Checking whether numbers are odd or even #' #' @param vector The vector to process #' #' @return A logical vector. #' @rdname oddOrEven #' #' @examples is.odd(4); # is.even(4); #' @export is.odd <- function(vector) { return((vector %% 2) != 0); } #'@rdname oddOrEven #'@export is.even <- function(vector) { return((vector %% 2) == 0); }
/scratch/gouwar.j/cran-all/cranData/yum/R/is.odd_is.even.R
#' Load YAML fragments in one or multiple files and simplify them #' #' These function extracts all YAML fragments from a file or text (`load_and_simplify`) #' or from all files in a directory (`load_and_simplify_dir`) and loads them #' by calling [load_yaml_fragments()], and then calls [simplify_by_flattening()], #' on the result, returning the resulting list. #' #' @inheritParams load_yaml_fragments #' @inheritParams load_yaml_dir #' @inheritParams simplify_by_flattening #' #' @return A list of objects, where each object corresponds to one #' item specified in the read YAML fragment(s) from the source file #' or text. If the convention of the `rock`, `dct` and `justifier` #' packages is followed, each object in this list contains one or #' more named objects (lists), where the name indicates the type #' of information contained. Each of those objects (lists) then #' contains one or more objects of that type, such as metadata or #' codes for `rock`, a decentralized construct taxonomy element #' for `dct`, and a justification, decision, assertion, or source #' for `justifier`. #' @rdname load_and_simplify #' @examples #' yum::load_and_simplify(text=" #' --- #' firstObject: #' id: firstFragment #' --- #' Outside of YAML #' --- #' otherObjectType: #' - #' id: secondFragment #' parentId: firstFragment #' - #' id: thirdFragment #' parentId: firstFragment #' --- #' Also outside of YAML"); #' #' @export load_and_simplify <- function(text, file, yamlFragments=NULL, select=".*", simplify = ".*", delimiterRegEx = "^---$", ignoreOddDelimiters = FALSE, encoding="UTF-8", silent=TRUE) { # if (!requireNamespace("yaml", quietly = TRUE)) { # stop("To parse YAML content, the \"yaml\" package is required. ", # "Please install it using `install.packages('yaml');`.", # call. = FALSE); # } ### When calling these functions, we do not yet select any elements; ### we do this afterwards ourselves. if (!is.null(yamlFragments)) { res <- load_yaml_fragments(yamlFragments=yamlFragments, select=".*", delimiterRegEx=delimiterRegEx, ignoreOddDelimiters=ignoreOddDelimiters, encoding=encoding, silent=silent); } else if ((!missing(file)) || (!missing(text))) { if (!missing(text)) { res <- load_yaml_fragments(text=text, select=".*", delimiterRegEx=delimiterRegEx, ignoreOddDelimiters=ignoreOddDelimiters, encoding=encoding, silent=silent); } else if (!missing(file)) { res <- load_yaml_fragments(file=file, select=".*", delimiterRegEx=delimiterRegEx, ignoreOddDelimiters=ignoreOddDelimiters, encoding=encoding, silent=silent); } } else { stop("Provide either a `file` or a `text` to scan!"); } res <- simplify_by_flattening(res, simplify = simplify); res <- res[grep(select, names(res))]; if (is.null(res)) { res <- list(); } class(res) <- c("simplifiedYum", "list"); return(res); }
/scratch/gouwar.j/cran-all/cranData/yum/R/load_and_simplify.R
#' @rdname load_and_simplify #' @export load_and_simplify_dir <- function(path, recursive = TRUE, fileRegexes = c("^[^\\.]+.*$"), select=".*", simplify = ".*", delimiterRegEx = "^---$", ignoreOddDelimiters = FALSE, encoding="UTF-8", silent=TRUE) { ### Do not yet select any elements; ### we do this afterwards ourselves. res <- load_yaml_dir(path=path, recursive=recursive, fileRegexes=fileRegexes, select=".*", delimiterRegEx = delimiterRegEx, ignoreOddDelimiters = ignoreOddDelimiters, encoding = encoding, silent=silent); ### First remove the names of this list; `load_yaml_dir' names the ### elements using the filenames names(res) <- NULL; res <- simplify_by_flattening(res, simplify = simplify); res <- res[grep(select, names(res))]; if (is.null(res)) { res <- list(); } class(res) <- c("simplifiedYum", "list"); return(res); }
/scratch/gouwar.j/cran-all/cranData/yum/R/load_and_simplify_dir.R
#' Load all YAML fragments from all files in a directory #' #' These function extracts all YAML fragments from all files in a #' directory returning a list of character vectors containing the #' extracted fragments. #' #' These function extracts all YAML fragments from all files in a #' directory and then calls [yaml::yaml.load()] to parse them. It #' then returns a list where each element is a list with the parsed #' fragments in a file. #' #' @param path The path containing the files. #' @param recursive Whether to also process subdirectories (`TRUE`) #' or not (`FALSE`). #' @param fileRegexes A vector of regular expressions to match the files #' against: only files matching one or more regular expressions in this #' vector are processed. The default regex (`^[^\.]+.*$`) matches all #' files except those that start with a period (`.`). #' @inheritParams extract_yaml_dir #' @inheritParams load_yaml_fragments #' #' @return A list of lists of objects. #' @examples ### First get the directory where 'yum' is installed #' yumDir <- system.file(package="yum"); #' ### Specify the path of some example files #' examplePath <- file.path(yumDir, "extdata"); #' ### Show files (should be three .dct files) #' list.files(examplePath); #' ### Load these files #' yum::load_yaml_dir(path=examplePath); #' @export load_yaml_dir <- function(path, recursive = TRUE, fileRegexes = c("^[^\\.]+.*$"), select=".*", delimiterRegEx = "^---$", ignoreOddDelimiters = FALSE, encoding="UTF-8", silent=TRUE) { if (!dir.exists(path)) { stop("Directory '", path, "' does not exist!"); } fileList <- list.files(path=path, pattern=fileRegexes, recursive=recursive, full.names=TRUE); res <- lapply(fileList, load_yaml_fragments, select=select, delimiterRegEx = delimiterRegEx, ignoreOddDelimiters = ignoreOddDelimiters, encoding = encoding, silent=silent); names(res) <- fileList; class(res) <- c("yumFromDir", "list"); return(res); }
/scratch/gouwar.j/cran-all/cranData/yum/R/load_yaml_dir.R
#' Load all YAML fragments from a file #' #' These function extracts all YAML fragments from a file and then #' calls [yaml::yaml.load()] to parse them. It then returns a list #' of the parsed fragments. #' #' @inheritParams extract_yaml_fragments #' @param yamlFragments A character vector of class `yamlFragment` where #' every element corresponds to one line of the YAML fragments, or a list #' of multiple such character vectors (of class `yamlFragments`). Specify #' either `yamlFragments` (which, if specified, takes precedence over `file` #' and `text`), `file`, or `text` (`file` takes precedence over `text`). #' @param select A vector of regular expressions specifying object names #' to retain. The default (`.*`) matches everything, so by default, all #' objects are retained. #' #' @return A list of objects, where each object corresponds to one #' YAML fragment from the source file or text. If the convention of #' the `rock`, `dct` and `justifier` packages is followed, each object #' in this list contains one or more named objects (lists), where the #' name indicated the type of information contained. Each of those #' objects (lists) then contains one or more objects of that type, #' such as metadata or codes for `rock`, a decentralized construct #' taxonomy element for `dct`, and a justification for `justifier`. #' @examples #' yum::load_yaml_fragments(text=" #' --- #' - #' id: firstFragment #' --- #' Outside of YAML #' --- #' - #' id: secondFragment #' parentId: firstFragment #' --- #' Also outside of YAML"); #' #' @export load_yaml_fragments <- function(text, file, yamlFragments=NULL, select=".*", delimiterRegEx = "^---$", ignoreOddDelimiters = FALSE, encoding="UTF-8", silent=TRUE) { # if (!requireNamespace("yaml", quietly = TRUE)) { # stop("To parse YAML content, the \"yaml\" package is required. ", # "Please install it using `install.packages('yaml');`.", # call. = FALSE); # } if (!is.null(yamlFragments)) { if (("yamlFragment" %in% class(yamlFragments)) || ("yamlFragments" %in% class(yamlFragments))) { yamlLineSets <- yamlFragments; loadedFrom <- 'list'; } else if ("yamlFragmentsFromDir" %in% class(yamlFragments)) { return(load_yaml_list(x=yamlFragments, recursive = TRUE, select=select, delimiterRegEx = delimiterRegEx, ignoreOddDelimiters = ignoreOddDelimiters, encoding=encoding, silent=silent)); } else { stop("If passing a list of YAML line sets as produced by ", "the yum `extract_yaml_*` functions, they must have the ", "class `yamlFragment`, `yamlFragments` or `yamlFragmentsFromDir`."); } } else if ((!missing(file)) || (!missing(text))) { if (!missing(text)) { yamlLineSets <- extract_yaml_fragments(text=text, delimiterRegEx=delimiterRegEx, ignoreOddDelimiters=ignoreOddDelimiters, silent=TRUE); if ((length(text)==1) && file.exists(text)) { loadedFrom <- text; } else { loadedFrom <- 'text'; } } else if (!missing(file)) { yamlLineSets <- extract_yaml_fragments(file=file, delimiterRegEx=delimiterRegEx, ignoreOddDelimiters=ignoreOddDelimiters, silent=TRUE); loadedFrom <- file; } } else { stop("Provide either a `file` or a `text` to scan!"); } if ("yamlFragment" %in% class(yamlLineSets)) { rawSpecs <- yaml::yaml.load(yamlLineSets); } else { rawSpecs <- lapply(yamlLineSets, function(lineSets) { tryCatch({ res <- yaml::yaml.load(lineSets); }, error = function(e) { if ((loadedFrom == 'text') || (loadedFrom == 'list')) { stop("The `yaml::yaml.load` function encountered an ", "error processing the provided ", loadedFrom, ". This implies ", "malformed YAML, in other words, ", "maybe a space is omitted or there is some other ", "syntax error. The error it reported is '", e$message, "'."); } else { stop("The `yaml::yaml.load` function encountered an ", "error processing file '", loadedFrom, "'. This implies malformed YAML, in other words, ", "maybe a space is omitted or there is some other ", "syntax error. The error it reported is '", e$message, "'."); } }); return(res); }); } if (!silent) { if (!missing(file)) { cat("Loaded ", length(rawSpecs), " YAML fragments from file '", file, "'.\n", sep=""); } else if (!missing(text) && !silent) { cat("Loaded ", length(rawSpecs), " YAML fragments from the supplied `text` argument.\n", sep=""); } } specNames <- lapply(rawSpecs, names); if (length(select) > 0) { if (!silent) { cat("Applying the selection ", ifelse(length(select)==1, "criterion", "criteria"), " specified in the `select` argument (specifically,", vecTxtQ(select), ").\n", sep=""); } combinedSelect <- paste0(select, collapse="|"); rawSpecs <- lapply(rawSpecs, function(spec) { selectedElements <- union(which(is.null(names(spec))), grep(combinedSelect, names(spec), perl=TRUE)); return(spec[selectedElements]); }); if (!silent) { cat("Selected ", sum(unlist(lapply(rawSpecs, length))), " YAML fragments.\n", sep=""); } } else { if (!silent) { cat("No selection criteria were specified, so ", "returning all objects."); } } rawSpecs <- rawSpecs[unlist(lapply(rawSpecs, length)) > 0]; class(rawSpecs) <- c("yumFromFile", "list"); return(rawSpecs); }
/scratch/gouwar.j/cran-all/cranData/yum/R/load_yaml_fragments.R
#' Load all YAML fragments from all character vectors in a list #' #' These function extracts all YAML fragments from character vectors #' in a list, returning a list of character vectors containing the #' extracted fragments. #' #' This function calls [yaml::yaml.load()] on all character vectors #' in a list. It then returns a list where each element is a list #' with the parsed fragments in a file. #' #' @param x The list containing the character vectors. #' @param recursive Whether to first `unlist` the list (`TRUE`) #' or not (`FALSE`). #' @inheritParams load_yaml_fragments #' #' @return A list of lists of objects. #' @examples #' yamlList <- list(c( #' "---", #' "-", #' " id: firstFragment", #' "---"), c( #' "---", #' "-", #' " id: secondFragment", #' " parentId: firstFragment", #' "---")); #' yum::load_yaml_list(yamlList); #' @export load_yaml_list <- function(x, recursive = TRUE, select=".*", delimiterRegEx = "^---$", ignoreOddDelimiters = FALSE, encoding="UTF-8", silent=TRUE) { if (recursive) { x <- flatten_list_of_lists(x); } res <- lapply(x, function(obj) { if (("yamlFragment" %in% class(obj)) || ("yamlFragments" %in% class(obj))) { return(load_yaml_fragments(yamlFragments=obj, text=NULL, file=NULL, select=select, delimiterRegEx = delimiterRegEx, ignoreOddDelimiters = ignoreOddDelimiters, encoding = encoding, silent=silent)); } }); names(res) <- names(x); class(res) <- c("yumFromList", "list"); return(res); }
/scratch/gouwar.j/cran-all/cranData/yum/R/load_yaml_list.R
#' Simplify the structure of extracted YAML fragments #' #' This function does some cleaning and simplifying to allow #' efficient specification of elements in the YAML fragments. #' #' @param x Extracted (and loaded) YAML fragments #' @param simplify A regular expression specifying which elements to #' simplify (default is everything) # #' @param stopOnError Whether to give an error and stop when encountering # #' an unexpected structure or only give a warning. #' @param .level Internal argument to enable slightly-less-than-elegant 'recursion'. #' #' @return A simplified list (but still a list) #' @export #' #' @examples yamlFragmentExample <- ' #' --- #' source: #' - #' id: src_1 #' label: "Label 1" #' - #' id: src_2 #' label: "Label 2" #' assertion: #' - #' id: assertion_1 #' label: "Assertion 1" #' - #' id: assertion_2 #' label: "Assertion 2" #' --- #' '; #' loadedExampleFragments <- #' load_yaml_fragments(yamlFragmentExample); #' simplified <- #' simplify_by_flattening(loadedExampleFragments); #' #' ### Pre simmplification: #' str(loadedExampleFragments); #' #' ### Post simmplification: #' str(simplified); #' simplify_by_flattening <- function(x, simplify = ".*", # stopOnError=TRUE, .level=1) { ### Normally, if the elements of the provided object (at 'root level') ### have no names, we've been provided with an object containing ### several distinct YAML fragments. Process those separately and ### concatenate the resulting objects together. if (is.null(names(x))) { # if (.level == 1) { return(do.call(c, lapply(x, simplify_by_flattening, simplify=simplify, .level=2))); # } else { # errMsg <- # "Objects at the second level must have names to be able to simplify, but they don't!"; # if (stopOnError) { # stop(errMsg); # } else { # warning(errMsg); # } # } } ### If we get to this point, the elements of this entity have names; ### therefore, this is the 'second call' if we've originall been called ### with an object containing several YAML fragments. Therefore, ### apply all names of these objects to children without names return(do.call(c, lapply(names(x), function(currentName) { ### If the children have names as well, return this ### object as is (which effectively lifts it to the ### 'root' level if (!is.null(names(x[[currentName]]))) { ### Add one level that will then be removed when ### concatenating after having returned the result res <- list(x[[currentName]]); names(res) <- currentName; return(res); } else { ### In this case, the children have no names (aw...), and ### so either give them all the current name if we match ### the regex (assume these are all objects of that type), ### or add a level if we don't match. ### Only rename children if we satisfy the regex if (grepl(simplify, currentName)) { names(x[[currentName]]) <- rep(currentName, length(x[[currentName]])); return(x[[currentName]]); } else { ### If we don't rename and extract here, add one ### level that will then be removed when concatenating res <- list(x[[currentName]]); names(res) <- currentName; return(res); } } }))); }
/scratch/gouwar.j/cran-all/cranData/yum/R/simplify_by_flattening.R
#' Easily parse a vector into a character value #' #' @param vector The vector to process. #' @param delimiter,firstDelimiter,lastDelimiter The delimiters #' to use for respectively the middle, first #' `firstElements`, and last `lastElements` elements. #' @param useQuote This character string is pre- and appended to all elements; #' so use this to quote all elements (`useQuote="'"`), doublequote all #' elements (`useQuote='"'`), or anything else (e.g. `useQuote='|'`). #' The only difference between `vecTxt` and `vecTxtQ` is that the #' latter by default quotes the elements. #' @param firstElements,lastElements The number of elements for which to use #' the first respective last delimiters #' @param lastHasPrecedence If the vector is very short, it's possible that the #' sum of firstElements and lastElements is larger than the vector length. In #' that case, downwardly adjust the number of elements to separate with the #' first delimiter (`TRUE`) or the number of elements to separate with the #' last delimiter (`FALSE`)? #' @param ... Any addition arguments to `vecTxtQ` are passed on to #' `vecTxt`. #' #' @return A character vector of length 1. #' @export #' #' @examples vecTxtQ(names(mtcars)); vecTxt <- function(vector, delimiter = ", ", useQuote = "", firstDelimiter = NULL, lastDelimiter = " & ", firstElements = 0, lastElements = 1, lastHasPrecedence = TRUE) { vector <- paste0(useQuote, vector, useQuote); if (length(vector) == 1) { return(vector); } if (firstElements + lastElements > length(vector)) { if (lastHasPrecedence) { firstElements <- length(vector) - lastElements; } else { lastElements <- length(vector) - firstElements; } } firstTxt <- lastTxt <- ""; if (is.null(firstDelimiter)) { firstDelimiter <- delimiter; } if (is.null(lastDelimiter)) { lastDelimiter <- delimiter; } midBit <- vector; if (firstElements > 0) { firstBit <- utils::head(vector, firstElements); midBit <- utils::tail(vector, -firstElements); firstTxt <- paste0(paste0(firstBit, collapse=firstDelimiter), firstDelimiter); } if (lastElements > 0) { lastBit <- utils::tail(vector, lastElements); midBit <- utils::head(midBit, -lastElements); lastTxt <- paste0(lastDelimiter, paste0(lastBit, collapse=lastDelimiter)); } midTxt <- paste0(midBit, collapse=delimiter); return(paste0(firstTxt, midTxt, lastTxt)); } #'@rdname vecTxt #'@export vecTxtQ <- function(vector, useQuote = "'", ...) { return(vecTxt(vector, useQuote = useQuote, ...)); }
/scratch/gouwar.j/cran-all/cranData/yum/R/vecTxt.R
#' @useDynLib yyjsonr, .registration=TRUE NULL
/scratch/gouwar.j/cran-all/cranData/yyjsonr/R/aaa.R
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Advanced: Values for setting internal options directly on YYJSON library #' #' This is a list of integer values used for setting flags on the \code{yyjson} #' code directly. This is an ADVANCED option and should be used with caution. #' #' Some of these settings overlap and conflict with code needed to handle #' the translation of JSON values to R. #' #" Pass multiple options with #' \code{opts_read_json(yyjson_read_flag = c(yyjson_read_flag$x, yyjson_read_flag$y, ...))} #' #' \describe{ #' \item{YYJSON_READ_NOFLAG}{ #' Default option (RFC 8259 compliant): #' \itemize{ #' \item{Read positive integer as uint64_t.} #' \item{Read negative integer as int64_t.} #' \item{Read floating-point number as double with round-to-nearest mode.} #' \item{Read integer which cannot fit in uint64_t or int64_t as double.} #' \item{Report error if double number is infinity.} #' \item{Report error if string contains invalid UTF-8 character or BOM.} #' \item{Report error on trailing commas, comments, inf and nan literals.} #' } #' } #' #' \item{YYJSON_READ_INSITU}{ #' Read the input data in-situ. #' This option allows the reader to modify and use input data to store string #' values, which can increase reading speed slightly. #' The caller should hold the input data before free the document. #' The input data must be padded by at least \code{YYJSON_PADDING_SIZE} bytes. #' For example: \code{"[1,2]"} should be \code{"[1,2]\0\0\0\0"}, input length should be 5. #' } #' #' \item{YYJSON_READ_STOP_WHEN_DONE}{ #' Stop when done instead of issuing an error if there's additional content #' after a JSON document. This option may be used to parse small pieces of JSON #' in larger data, such as "NDJSON" #' } #' #' \item{YYJSON_READ_ALLOW_TRAILING_COMMAS}{ #' Allow single trailing comma at the end of an object or array, #' such as \code{"[1,2,3,]"} #' } #' #' \item{YYJSON_READ_ALLOW_COMMENTS}{ #' Allow C-style single line and multiple line comments (non-standard). #' } #' #' \item{YYJSON_READ_ALLOW_INF_AND_NAN}{ #' Allow inf/nan number and literal, case-insensitive, #' such as 1e999, NaN, inf, -Infinity (non-standard). #' } #' #' \item{YYJSON_READ_NUMBER_AS_RAW}{ #' Read all numbers as raw strings (value with "YYJSON_TYPE_RAW" type), #' inf/nan literal is also read as raw with "ALLOW_INF_AND_NAN" flag. #' } #' #' \item{YYJSON_READ_ALLOW_INVALID_UNICODE}{ #' Allow reading invalid unicode when parsing string values (non-standard). #' Invalid characters will be allowed to appear in the string values, but #' invalid escape sequences will still be reported as errors. #' This flag does not affect the performance of correctly encoded strings. #' WARNING: Strings in JSON values may contain incorrect encoding when this #' option is used, you need to handle these strings carefully to avoid security #' risks. #' } #' #' \item{YYJSON_READ_BIGNUM_AS_RAW}{ #' Read big numbers as raw strings. These big numbers include integers that #' cannot be represented by "int64_t" and "uint64_t", and floating-point #' numbers that cannot be represented by finite "double". #' The flag will be overridden by "YYJSON_READ_NUMBER_AS_RAW" flag. #' } #' } #' #' @export #' #' @examples #' read_json_str( #' '[12.3]', #' opts = opts_read_json(yyjson_read_flag = yyjson_read_flag$YYJSON_READ_ALLOW_TRAILING_COMMAS) #' ) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ yyjson_read_flag <- list( YYJSON_READ_NOFLAG = 0L, YYJSON_READ_INSITU = 1L, YYJSON_READ_STOP_WHEN_DONE = 2L, YYJSON_READ_ALLOW_TRAILING_COMMAS = 4L, YYJSON_READ_ALLOW_COMMENTS = 8L, YYJSON_READ_ALLOW_INF_AND_NAN = 16L, YYJSON_READ_NUMBER_AS_RAW = 32L, YYJSON_READ_ALLOW_INVALID_UNICODE = 64L, YYJSON_READ_BIGNUM_AS_RAW = 128L ) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Advanced: Values for setting internal options directly on YYJSON library #' #' This is a list of integer values used for setting flags on the \code{yyjson} #' code directly. This is an ADVANCED option and should be used with caution. #' #' Some of these settings overlap and conflict with code needed to handle #' the translation of JSON values to R. #' #" Pass multiple options with #' \code{opts_write_json(yyjson_write_flag = c(write_flag$x, write_flag$y, ...))} #' #' \describe{ #' \item{YYJSON_WRITE_NOFLAG}{ #' Default value. #' \itemize{ #' \item{Write JSON minify.} #' \item{Report error on inf or nan number.} #' \item{Report error on invalid UTF-8 string.} #' \item{Do not escape unicode or slash.} #' } #' } #'\item{YYJSON_WRITE_PRETTY}{Write JSON pretty with 4 space indent.} #'\item{YYJSON_WRITE_ESCAPE_UNICODE}{Escape unicode as `uXXXX`, make the #'output ASCII only.} #'\item{YYJSON_WRITE_ESCAPE_SLASHES}{Escape '/' as '\/'.} #'\item{YYJSON_WRITE_ALLOW_INF_AND_NAN}{Write inf and nan number as 'Infinity' #'and 'NaN' literal (non-standard).} #'\item{YYJSON_WRITE_INF_AND_NAN_AS_NULL}{Write inf and nan number as null literal. #' This flag will override `YYJSON_WRITE_ALLOW_INF_AND_NAN` flag.} #' \item{YYJSON_WRITE_ALLOW_INVALID_UNICODE}{Allow invalid unicode when encoding #' string values (non-standard). #' Invalid characters in string value will be copied byte by byte. #' If `YYJSON_WRITE_ESCAPE_UNICODE` flag is also set, invalid character will be #' escaped as `U+FFFD` (replacement character). #' This flag does not affect the performance of correctly encoded strings.} #' \item{YYJSON_WRITE_PRETTY_TWO_SPACES}{Write JSON pretty with 2 space indent. #' This flag will override `YYJSON_WRITE_PRETTY` flag.} #' } #' #' @export #' #' @examples #' write_json_str("hello/there", opts = opts_write_json( #' yyjson_write_flag = yyjson_write_flag$YYJSON_WRITE_ESCAPE_SLASHES #' )) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ yyjson_write_flag <- list( YYJSON_WRITE_NOFLAG = 0L, YYJSON_WRITE_PRETTY = 1L, YYJSON_WRITE_ESCAPE_UNICODE = 2L, YYJSON_WRITE_ESCAPE_SLASHES = 4L, YYJSON_WRITE_ALLOW_INF_AND_NAN = 8L, YYJSON_WRITE_INF_AND_NAN_AS_NULL = 16L, YYJSON_WRITE_ALLOW_INVALID_UNICODE = 32L, YYJSON_WRITE_PRETTY_TWO_SPACES = 64L ) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Create named list of options for parsing R from JSON #' #' @param int64 how to encode large integers which do not fit into R's integer #' type. 'string' imports them as a character vector. 'double' will #' convert the integer to a double precision numeric value. 'bit64' will #' use the 'integer64' type from the 'bit64' package. Note that the #' 'integer64' type is a \emph{signed} integer type, and a warning will #' be issued if JSON contains an \emph{unsigned} integer which cannot #' be stored in this type. #' @param df_missing_list_elem R value to use when elements are missing in list #' columns in data.frames. Default: NULL #' @param obj_of_arrs_to_df logical. Should a named list of equal-length #' vectors be promoted to a data.frame? Default: TRUE. If FALSE, then #' result will be left as a list. #' @param arr_of_objs_to_df logical. Should an array or objects be promoted to a #' a data.frame? Default: TRUE. If FALSE, then results will be read as a #' list-of-lists. #' @param yyjson_read_flag integer vector of internal \code{yyjson} #' options. See \code{yyjson_read_flag} in this package, and read #' the yyjson API documentation for more information. This is considered #' an advanced option. #' @param str_specials Should \code{'NA'} in a JSON string be converted to the \code{'special'} #' \code{NA} value in R, or left as a \code{'string'}. Default: 'string' #' @param num_specials Should JSON strings 'NA'/'Inf'/'NaN' in a numeric context #' be converted to the \code{'special'} R numeric values #' \code{NA, Inf, NaN}, or left as a \code{'string'}. Default: 'special' #' @param promote_num_to_string Should numeric values be promoted to strings #' when they occur within an array with other string values? Default: FALSE #' means to keep numerics as numeric value and promote the \emph{container} to #' be a \code{list} rather than an atomic vector when types are mixed. If \code{TRUE} #' then array of mixed string/numeric types will be promoted to all #' string values and returned as an atomic character vector. Set this to \code{TRUE} #' if you want to emulate the behaviour of \code{jsonlite::fromJSON()} #' @param length1_array_asis logical. Should JSON arrays with length = 1 be #' marked with class \code{AsIs}. Default: FALSE #' #' @seealso [yyjson_read_flag()] #' @return Named list of options for reading JSON #' @export #' #' @examples #' opts_read_json() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ opts_read_json <- function( promote_num_to_string = FALSE, df_missing_list_elem = NULL, obj_of_arrs_to_df = TRUE, arr_of_objs_to_df = TRUE, str_specials = c('string', 'special'), num_specials = c('special', 'string'), int64 = c('string', 'double', 'bit64'), length1_array_asis = FALSE, yyjson_read_flag = 0L ) { structure( list( promote_num_to_string = isTRUE(promote_num_to_string), df_missing_list_elem = df_missing_list_elem, obj_of_arrs_to_df = isTRUE(obj_of_arrs_to_df), arr_of_objs_to_df = isTRUE(arr_of_objs_to_df), length1_array_asis = isTRUE(length1_array_asis), str_specials = match.arg(str_specials), num_specials = match.arg(num_specials), int64 = match.arg(int64), yyjson_read_flag = as.integer(yyjson_read_flag) ), class = "opts_read_json" ) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Create named list of options for serializing R to JSON #' #' @param digits decimal places to keep for floating point numbers. Default: -1. #' Positive values specify number of decimal places. Using zero will #' write the numeric value as an integer. Values less than zero mean that #' the floating point value should be written as-is (the default). #' @param dataframe how to encode data.frame objects. Options 'rows' or #' columns'. Default: 'rows' #' @param factor how to encode factor objects: must be one of 'string' or 'integer' #' Default: 'string' #' @param auto_unbox automatically unbox all atomic vectors of length 1 such that #' they appear as atomic elements in JSON rather than arrays of length 1. #' @param pretty Logical value indicating if the created JSON string should have #' whitespace for indentation and linebreaks. Default: FALSE. #' Note: this option is equivalent to \code{yyjson_write_flag = write_flag$YYJSON_WRITE_PRETTY} #' @param name_repair How should unnamed items in a partially named list be handled? #' 'none' means to leave their names blank in JSON (which may not be valid JSON). #' 'minimal' means to use the integer position index of the item as its name if #' it is missing. Default: 'none' #' @param str_specials Should a special value of \code{NA} in a character vector #' be converted to a #' JSON \code{null} value, or converted to a string "NA"? Default: 'null' #' @param num_specials Should special numeric values (i.e. NA, NaN, Inf) be #' converted to a JSON \code{null} value or converted to a string #' representation e.g. "NA"/"NaN" etc. Default: 'null' #' @param yyjson_write_flag integer vector corresponding to internal \code{yyjson} #' options. See \code{yyjson_write_flag} in this package, and read #' the yyjson API documentation for more information. This is considered #' an advanced option. #' #' @seealso [yyjson_write_flag()] #' @return Named list of options for writing JSON #' @export #' #' @examples #' write_json_str(head(iris, 3), opts = opts_write_json(factor = 'integer')) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ opts_write_json <- function( digits = -1, pretty = FALSE, auto_unbox = FALSE, dataframe = c("rows", "columns"), factor = c("string", "integer"), name_repair = c('none', 'minimal'), num_specials = c('null', 'string'), str_specials = c('null', 'string'), yyjson_write_flag = 0L) { structure( list( digits = as.integer(digits), dataframe = match.arg(dataframe), factor = match.arg(factor), auto_unbox = isTRUE(auto_unbox), pretty = isTRUE(pretty), name_repair = match.arg(name_repair), str_specials = match.arg(str_specials), num_specials = match.arg(num_specials), yyjson_write_flag = as.integer(yyjson_write_flag) ), class = "opts_write_json" ) }
/scratch/gouwar.j/cran-all/cranData/yyjsonr/R/json-opts.R
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Convert JSON in a character string to R #' #' @param str a single character string #' @param opts Named list of options for parsing. Usually created by \code{opts_read_json()} #' @param ... Other named options can be used to override any options in \code{opts}. #' The valid named options are identical to arguments to [opts_read_json()] #' #' @family JSON Parsers #' @return R object #' @export #' #' @examples #' read_json_str("4294967297", opts = opts_read_json(int64 = 'string')) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ read_json_str <- function(str, opts = list(), ...) { .Call( parse_from_str_, str, modify_list(opts, list(...)) ) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Convert JSON in a raw vector to R #' #' @inheritParams read_json_str #' @param raw_vec raw vector #' #' @family JSON Parsers #' @return R object #' @export #' #' @examples #' raw_str <- as.raw(utf8ToInt('[1, 2, 3, "four"]')) #' read_json_raw(raw_str) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ read_json_raw <- function(raw_vec, opts = list(), ...) { .Call( parse_from_raw_, raw_vec, modify_list(opts, list(...)) ) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Convert JSON to R #' #' @inheritParams read_json_str #' @param filename full path to text file containing JSON. #' #' @family JSON Parsers #' @return R object #' @export #' #' @examples #' tmp <- tempfile() #' write_json_file(head(iris, 3), tmp) #' read_json_file(tmp) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ read_json_file <- function(filename, opts = list(), ...) { .Call( parse_from_file_, filename, modify_list(opts, list(...)) ) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Parse JSON from an R connection object. #' #' Currently, this is not very efficient as the entire contents of the connection are #' read into R as a string and then the JSON parsed from there. #' #' For plain text files it is faster to use #' \code{read_json_file()}. #' #' @inheritParams read_json_str #' @param conn connection object. e.g. \code{url('https://jsonplaceholder.typicode.com/todos/1')} #' #' #' @examples #' if (interactive()) { #' read_json_conn(url("https://api.github.com/users/hadley/repos")) #' } #' #' #' @family JSON Parsers #' @return R object #' @export #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ read_json_conn <- function(conn, opts = list(), ...) { str <- paste(readLines(conn), collapse = "") read_json_str(str, opts, ...) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Convert R object to JSON string #' #' @param x the object to be encoded #' @param opts Named list of serialization options. Usually created by [opts_write_json()] #' @param ... Other named options can be used to override any options in \code{opts}. #' The valid named options are identical to arguments to [opts_write_json()] #' #' @return Single string containing JSON #' #' @family JSON Serializer #' @export #' #' @examples #' write_json_str(head(iris, 3), pretty = TRUE) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ write_json_str <- function(x, opts = list(), ...) { .Call( serialize_to_str_, x, modify_list(opts, list(...)) ) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Convert R object to JSON file #' #' @inheritParams write_json_str #' @param filename filename #' #' @return None #' @family JSON Serializer #' @export #' #' @examples #' tmp <- tempfile() #' write_json_file(head(iris, 3), tmp) #' read_json_file(tmp) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ write_json_file <- function(x, filename, opts = list(), ...) { .Call( serialize_to_file_, x, filename, modify_list(opts, list(...)) ) invisible(NULL) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Validate JSON in file or string #' #' @inheritParams read_json_file #' @param filename path to file containing JSON #' @param str character string containing JSON #' @param verbose logical. If the JSON is not valid, should a warning be #' shown giving details? #' #' @return Logical value. TRUE if JSON validates as OK, otherwise FALSE #' @export #' #' @examples #' tmp <- tempfile() #' write_json_file(head(iris, 3), tmp) #' validate_json_file(tmp) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ validate_json_file <- function(filename, verbose = FALSE, opts = list(), ...) { opts <- modify_list(opts, list(...)) .Call( validate_json_file_, filename, verbose, opts ) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' @rdname validate_json_file #' @export #' #' @examples #' str <- write_json_str(iris) #' validate_json_str(str) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ validate_json_str <- function(str, verbose = FALSE, opts = list(), ...) { opts <- modify_list(opts, list(...)) .Call( validate_json_str_, str, verbose, opts ) }
/scratch/gouwar.j/cran-all/cranData/yyjsonr/R/json.R
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Work-a-like replacement for built-in 'modifyList' #' #' @param old,new lists #' @return updated list #' @noRd #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ modify_list <- function(old, new) { for (nm in names(new)) old[[nm]] <- new[[nm]] old }
/scratch/gouwar.j/cran-all/cranData/yyjsonr/R/utils.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = FALSE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- suppressPackageStartupMessages({ library(yyjsonr) }) ## ----eval=FALSE--------------------------------------------------------------- # read_json_str(str) # read_json_str(str, opts = list()) # read_json_str(str, opts = opts_read_json()) ## ----eval=FALSE--------------------------------------------------------------- # read_json_str(str, opts = list(str_specials = 'string')) # read_json_str(str, opts = opts_read_json(str_specials = 'string')) # read_json_str(str, str_specials = 'string') ## ----------------------------------------------------------------------------- json <- '[1,2,3.1,"apple", null]' read_json_str(json) ## ----------------------------------------------------------------------------- yyjsonr::read_json_str(json, promote_num_to_string = TRUE) ## ----------------------------------------------------------------------------- str <- '[{"a":1, "b":2}, {"a":3, "b":4}]' read_json_str(str) ## ----------------------------------------------------------------------------- str <- '[{"a":1, "b":[1,2]}, {"a":3, "b":2}]' read_json_str(str) ## ----------------------------------------------------------------------------- str <- '[{"a":1, "b":[1,2]}, {"a":2}]' read_json_str(str) read_json_str(str, df_missing_list_elem = NA) ## ----------------------------------------------------------------------------- str <- '{"a":[1,2],"b":["apple", "banana"]}' read_json_str(str) read_json_str(str, obj_of_arrs_to_df = FALSE) ## ----------------------------------------------------------------------------- str_unequal <- '{"a":[1,2],"b":["apple", "banana", "carrot"]}' read_json_str(str_unequal) ## ----------------------------------------------------------------------------- str <- '[{"a":1, "b":2}, {"a":3, "b":4}]' read_json_str(str) read_json_str(str, arr_of_objs_to_df = FALSE) ## ----------------------------------------------------------------------------- str <- '[{"a":1, "b":2}, {"a":3, "b":4, "c":99}]' read_json_str(str) ## ----------------------------------------------------------------------------- str <- '["hello", "NA", null]' read_json_str(str) # default: str_specials = 'string' read_json_str(str, str_specials = 'special') ## ----------------------------------------------------------------------------- str <- '[1.23, "NA", "NaN", "Inf", "-Inf", null]' read_json_str(str) # default: num_specials = 'special' read_json_str(str, num_specials = 'string') ## ----echo=FALSE--------------------------------------------------------------- suppressPackageStartupMessages( library(bit64) ) ## ----------------------------------------------------------------------------- str <- '[1, 274877906944]' # default: int64 = 'string' # Since result is a mix of types, a list is returned read_json_str(str) # Read large integer as double robj <- read_json_str(str, int64 = 'double') class(robj) robj # Read large integer as 'bit64::integer64' type library(bit64) read_json_str(str, int64 = 'bit64') ## ----------------------------------------------------------------------------- read_json_str('67') |> str() read_json_str('[67]') |> str() read_json_str('67' , length1_array_asis = TRUE) |> str() read_json_str('[67]', length1_array_asis = TRUE) |> str() # Has 'AsIs' class ## ----------------------------------------------------------------------------- str <- '{"a":67, "b":[67], "c":[1,2]}' # Length-1 vectors output as JSON arrays read_json_str(str) |> write_json_str(auto_unbox = FALSE) |> cat() # Length-1 vectors output as JSON scalars read_json_str(str) |> write_json_str(auto_unbox = TRUE) |> cat() # Length-1 vectors output as JSON arrays read_json_str(str, length1_array_asis = TRUE) |> write_json_str(auto_unbox = FALSE) |> cat() # !!!! # Those values marked with 'AsIs' class when reading are output # as length-1 JSON arrays read_json_str(str, length1_array_asis = TRUE) |> write_json_str(auto_unbox = TRUE) |> cat() ## ----------------------------------------------------------------------------- # A reference list of all the possible YYJSON options yyjsonr::yyjson_read_flag read_json_str( "[1, 2, 3, ] // A JSON comment not allowed by the standard", opts = opts_read_json(yyjson_read_flag = c( yyjson_read_flag$YYJSON_READ_ALLOW_TRAILING_COMMAS, yyjson_read_flag$YYJSON_READ_ALLOW_COMMENTS )) )
/scratch/gouwar.j/cran-all/cranData/yyjsonr/inst/doc/from_json_options.R
--- title: "Configuration Options for Parsing from JSON" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Configuration Options for Parsing from JSON} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = FALSE, comment = "#>" ) ``` ```{r setup} suppressPackageStartupMessages({ library(yyjsonr) }) ``` Overview ----------------------------------------------------------------------------- This vignette: * introduces the `opts` argument for reading JSON with the `read_json_X()` family of functions. * outlines the creation of default options with `opts_read_json()` * provides extended examples of how these options control parsing of JSON The `opts` argument - Specifying options when reading JSON ----------------------------------------------------------------------------- All `read_json_x()` functions have an `opts` argument. `opts` takes a named list of options used to configure the way `yyjsonr` parses JSON into R objects. The default argument for `opts` is an empty list, which internally sets the default options for parsing. The default options for parsing can also be viewed by running `opts_read_json()`. The following three function calls are all equivalent ways of calling `read_json_str()` using the default options: ```{r eval=FALSE} read_json_str(str) read_json_str(str, opts = list()) read_json_str(str, opts = opts_read_json()) ``` Setting arguments to override the default options ----------------------------------------------------------------------------- Setting a single option (and keeping all other options at their default value) can be done in a number of ways. The following three function calls are all equivalent: ```{r eval=FALSE} read_json_str(str, opts = list(str_specials = 'string')) read_json_str(str, opts = opts_read_json(str_specials = 'string')) read_json_str(str, str_specials = 'string') ``` Option `promote_num_to_string` - mixtures of numeric and string types ----------------------------------------------------------------------------- By default, `yyjsonr` does not promote string values to numerica values i.e. `promote_num_to_string = FALSE`. If an array contains mixed types, then an R *list* will be returned, so that all JSON values retain their original type. ```{r} json <- '[1,2,3.1,"apple", null]' read_json_str(json) ``` If `promote_num_to_string` is set to `TRUE`, then `yyjsonr` will promote numeric types to strings if the following conditions are met: * values are stored in a JSON array * the JSON array only contains numerics, strings or the JSON `null` value ```{r} yyjsonr::read_json_str(json, promote_num_to_string = TRUE) ``` Option `df_missing_list_elem` - Missing list elements (when parsing data.frames) ----------------------------------------------------------------------------- When JSON data is being parsed into an R data.frame some columns become *list-columns* if there are mixed types in the original JSON. It is possible that some values are completely missing in the JSON representation, and the `df_missing_list_elem` specifies the replacement for this missing value in the R data.frame. The default value is `df_missing_list_elem = NULL`. ### JSON to data.frame (no *list columns* needed) ```{r} str <- '[{"a":1, "b":2}, {"a":3, "b":4}]' read_json_str(str) ``` ### JSON to data.frame - *list-columns* required ```{r} str <- '[{"a":1, "b":[1,2]}, {"a":3, "b":2}]' read_json_str(str) ``` ```{r} str <- '[{"a":1, "b":[1,2]}, {"a":2}]' read_json_str(str) read_json_str(str, df_missing_list_elem = NA) ``` Option `obj_of_arrs_to_df` - Reading JSON as a data.frame ----------------------------------------------------------------------------- By default, if JSON looks like it represents a data.frame it will be loaded as such. That is, a JSON `{}` object which contains only `[]` arrays (all of equal length) will be treated as data.frame. This is the default i.e. `obj_of_arrs_to_df = TRUE`. If `obj_of_arrs_to_df = FALSE` then this data will be read in as a named list. In addition, if the `[]` arrays are not all the same length, then the data will also be read in as a named list as no inference of missing values will be done. ```{r} str <- '{"a":[1,2],"b":["apple", "banana"]}' read_json_str(str) read_json_str(str, obj_of_arrs_to_df = FALSE) ``` ```{r} str_unequal <- '{"a":[1,2],"b":["apple", "banana", "carrot"]}' read_json_str(str_unequal) ``` Option `arr_of_objs_to_df` - Reading JSON as a data.frame ----------------------------------------------------------------------------- ```{r} str <- '[{"a":1, "b":2}, {"a":3, "b":4}]' read_json_str(str) read_json_str(str, arr_of_objs_to_df = FALSE) ``` ```{r} str <- '[{"a":1, "b":2}, {"a":3, "b":4, "c":99}]' read_json_str(str) ``` Option `str_specials` - Reading string `"NA"` from JSON ----------------------------------------------------------------------------- JSON only really has the value `null` for representing special missing values, and this is converted to an R `NA_character_` value when it is encountered in a string-ish context. When `yyjsonr` encounters a literal `"NA"` value in a string-ish context, its conversion to an R value is controlled by the `str_specials` options The possible values for the `str_specials` argument are: * `string` read in as the literal character string `"NA"` (the default behaviour) * `special` read in as `NA_character_` ```{r} str <- '["hello", "NA", null]' read_json_str(str) # default: str_specials = 'string' read_json_str(str, str_specials = 'special') ``` Option `num_specials` - Reading numeric `"NA"`, `"NaN"` and `"Inf"` ----------------------------------------------------------------------------- JSON only really has the value `null` for representing special missing values, and this is converted to an R `NA_integer_` or `NA_real_` value when it is encountered in a number-ish context. When `yyjsonr` encounters a literal `"NA"`, `"NaN"` or `"Inf"` value in a number-ish context, its conversion to an R value is controlled by the `num_specials` options. The possible values for the `num_specials` argument are: * `special` read in as an actual numeric `NA`, `NaN` or `Inf` value (the default behaviour) * `string` read in as the literal character string `"NA"` etc ```{r} str <- '[1.23, "NA", "NaN", "Inf", "-Inf", null]' read_json_str(str) # default: num_specials = 'special' read_json_str(str, num_specials = 'string') ``` Option `int64` - large integer support ----------------------------------------------------------------------------- JSON supports large integers outside the range of R's 32-bit integer type. When such a large value is encountered in JSON, the `int64` option controls the value's representation in R. The possible values for the `int64` option are: * `string` store JSON integer as a string in R * `double` will store the JSON integer as a double precisision numeric. If the integer is outside the range +/- 2^53, then it may not be stored perfectly in the double. * `bit64` convert to a 64-bit integer supported by the [`{bit64}`](https://cran.r-project.org/package=bit64) package. ```{r echo=FALSE} suppressPackageStartupMessages( library(bit64) ) ``` ```{r} str <- '[1, 274877906944]' # default: int64 = 'string' # Since result is a mix of types, a list is returned read_json_str(str) # Read large integer as double robj <- read_json_str(str, int64 = 'double') class(robj) robj # Read large integer as 'bit64::integer64' type library(bit64) read_json_str(str, int64 = 'bit64') ``` Option `length1_array_asis` - distinguishing scalars from length-1 vectors ----------------------------------------------------------------------------- JSON supports the concept of both scalar and vector values i.e. in JSON scalar `67` is different from an array of length 1 `[67]`. The `length1_array_asis` option is for situations where it is important to distinguish these value types in R. However, R does not make this distinction between scalars and vectors of length 1. To assist in translating objects from JSON to R and back to JSON, setting `length1_array_asis = TRUE` will mark JSON arrays of length 1 with the class `AsIs`. This option defaults to `FALSE`. ```{r} read_json_str('67') |> str() read_json_str('[67]') |> str() read_json_str('67' , length1_array_asis = TRUE) |> str() read_json_str('[67]', length1_array_asis = TRUE) |> str() # Has 'AsIs' class ``` This option is then used with the option `auto_unbox` when writing JSON in order to control how length-1 R vectors are written. Shown below, if the length-1 vector is marked with `AsIs` class when reading, then when writing out to JSON with `auto_unbox = TRUE` it becomes a JSON vector value. In the following example, only the second value (`[67]`) is affected by the option `length1_array_asis`. When the option is `TRUE` the value is tagged with a class of `AsIs`. Then when the created R object is subsequently written out to a JSON string, its structure is determined by `auto_unbox` which understands how to handle this class. ```{r} str <- '{"a":67, "b":[67], "c":[1,2]}' # Length-1 vectors output as JSON arrays read_json_str(str) |> write_json_str(auto_unbox = FALSE) |> cat() # Length-1 vectors output as JSON scalars read_json_str(str) |> write_json_str(auto_unbox = TRUE) |> cat() # Length-1 vectors output as JSON arrays read_json_str(str, length1_array_asis = TRUE) |> write_json_str(auto_unbox = FALSE) |> cat() # !!!! # Those values marked with 'AsIs' class when reading are output # as length-1 JSON arrays read_json_str(str, length1_array_asis = TRUE) |> write_json_str(auto_unbox = TRUE) |> cat() ``` Option `yyjson_read_flag` - internal `YYJSON` C library options ----------------------------------------------------------------------------- The `yyjson` C library supports a number of internal options for reading JSON. These options are considered advanced, and the user is referred to the [`yyjson` documentation](https://ibireme.github.io/yyjson/doc/doxygen/html/md_doc__a_p_i.html#autotoc_md36) for further explanation on what they control. **Warning**: some of these advanced options do not make sense for interfacing with R, or otherwise conflict with how this package converts JSON to R objects. ```{r} # A reference list of all the possible YYJSON options yyjsonr::yyjson_read_flag read_json_str( "[1, 2, 3, ] // A JSON comment not allowed by the standard", opts = opts_read_json(yyjson_read_flag = c( yyjson_read_flag$YYJSON_READ_ALLOW_TRAILING_COMMAS, yyjson_read_flag$YYJSON_READ_ALLOW_COMMENTS )) ) ```
/scratch/gouwar.j/cran-all/cranData/yyjsonr/inst/doc/from_json_options.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(yyjsonr) ## ----------------------------------------------------------------------------- # A simple 3D array mat <- array(1:12, dim = c(2,3,2)) mat ## ----------------------------------------------------------------------------- # jsonlite's serialization of matrices is internally consistent and re-parses # to the initial matrix. str <- jsonlite::toJSON(mat, pretty = TRUE) cat(str) jsonlite::fromJSON(str) ## ----------------------------------------------------------------------------- # yyjsonr's serialization of matrices is internally consistent and re-parses # to the initial matrix. # But note that it is *different* to what jsonlite does. str <- yyjsonr::write_json_str(mat, pretty = TRUE) cat(str) yyjsonr::read_json_str(str)
/scratch/gouwar.j/cran-all/cranData/yyjsonr/inst/doc/jsonlite-comparison.R
--- title: "Comparison to jsonlite parsing" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Comparison to jsonlite parsing} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(yyjsonr) ``` Parsing differences compared to `{jsonlite}` ============================================================================= `{jsonlite}` and `{yyjsonr}` may read and write some JSON differently due to varying assumptions, data configurations or option settings. This document keeps a record of major differences to be aware of. In `yyjsonr` 3-d arrays are parsed as multiple 2-d matrices and combined ----------------------------------------------------------------------------- In `{yyjsonr}` the order in which elements in an array are serialized to JSON correspond to a JSON `[]` array of row-major matrices in human-readable order. `{jsonlite}` does things differently. The array formats are internally consistent within each package, but not cross-compatible between them i.e. you cannot serialize an array in `{yyjsonr}` and re-create it exactly using `{jsonlite}`. In the examples below, a simple 3d matrix is serialized with both `jsonlite` and `yyjsonr`. ```{r} # A simple 3D array mat <- array(1:12, dim = c(2,3,2)) mat ``` ```{r} # jsonlite's serialization of matrices is internally consistent and re-parses # to the initial matrix. str <- jsonlite::toJSON(mat, pretty = TRUE) cat(str) jsonlite::fromJSON(str) ``` ```{r} # yyjsonr's serialization of matrices is internally consistent and re-parses # to the initial matrix. # But note that it is *different* to what jsonlite does. str <- yyjsonr::write_json_str(mat, pretty = TRUE) cat(str) yyjsonr::read_json_str(str) ```
/scratch/gouwar.j/cran-all/cranData/yyjsonr/inst/doc/jsonlite-comparison.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = FALSE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- suppressPackageStartupMessages({ library(yyjsonr) }) ## ----eval=FALSE--------------------------------------------------------------- # write_json_str(iris) # write_json_str(iris, opts = list()) # write_json_str(iris, opts = opts_write_json()) ## ----eval=FALSE--------------------------------------------------------------- # write_json_str(iris, opts = list(str_specials = 'string')) # write_json_str(iris, opts = opts_write_json(str_specials = 'string')) # write_json_str(iris, str_specials = 'string') ## ----------------------------------------------------------------------------- robj <- c(1, 1.23, 3.141592654) write_json_str(robj) write_json_str(robj, digits = 2) write_json_str(robj, digits = 0) ## ----------------------------------------------------------------------------- robj <- head(iris, 2) write_json_str(robj) |> cat() write_json_str(robj, pretty = TRUE) |> cat() ## ----------------------------------------------------------------------------- robj <- list(1, c(1, 2), NA) write_json_str(robj) |> cat() write_json_str(robj, auto_unbox = TRUE) |> cat() ## ----------------------------------------------------------------------------- robj <- head(iris, 3) write_json_str(robj, pretty = TRUE) |> cat() write_json_str(robj, pretty = TRUE, dataframe = "cols") |> cat() ## ----echo=FALSE--------------------------------------------------------------- set.seed(1) ## ----------------------------------------------------------------------------- robj <- sample(iris$Species, 10) write_json_str(robj) |> cat() write_json_str(robj, factor = 'integer') |> cat() ## ----------------------------------------------------------------------------- robj <- list(a = 1, b = 2, 67) write_json_str(robj, pretty = TRUE) |> cat() write_json_str(robj, pretty = TRUE, name_repair = 'minimal') |> cat() ## ----------------------------------------------------------------------------- robj <- c(1.23, NA_real_, NaN, Inf, -Inf) write_json_str(robj) |> cat() write_json_str(robj, num_specials = 'string') |> cat() ## ----------------------------------------------------------------------------- robj <- c("hello", NA_character_) write_json_str(robj) |> cat() write_json_str(robj, str_specials = 'string') |> cat() ## ----------------------------------------------------------------------------- # A reference list of all the possible YYJSON options yyjsonr::yyjson_write_flag write_json_str( c('hello / there', '#RStats'), opts = opts_write_json(yyjson_write_flag = c( yyjson_write_flag$YYJSON_WRITE_ESCAPE_SLASHES )) ) |> cat()
/scratch/gouwar.j/cran-all/cranData/yyjsonr/inst/doc/to_json_options.R
--- title: "Configuration Options for Serializing to JSON" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Configuration Options for Serializing to JSON} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = FALSE, comment = "#>" ) ``` ```{r setup} suppressPackageStartupMessages({ library(yyjsonr) }) ``` Overview ----------------------------------------------------------------------------- This vignette: * introduces the `opts` argument for writing JSON with the `write_json_X()` family of functions. * outlines the creation of default options with `opts_write_json()` * provides extended examples of how these options control writing JSON The `opts` argument - Specifying options when reading JSON ----------------------------------------------------------------------------- All `write_json_x()` functions have an `opts` argument. `opts` takes a named list of options used to configure the way `yyjsonr` writes JSON from R objects. The default argument for `opts` is an empty list, which internally sets the default options for writing. The default options for writing JSON can also be viewed by running `opts_write_json()`. The following three function calls are all equivalent ways of calling `write_json_str()` using the default options: ```{r eval=FALSE} write_json_str(iris) write_json_str(iris, opts = list()) write_json_str(iris, opts = opts_write_json()) ``` Setting arguments to override the default options ----------------------------------------------------------------------------- Setting a single option (and keeping all other options at their default value) can be done in a number of ways. The following three function calls are all equivalent: ```{r eval=FALSE} write_json_str(iris, opts = list(str_specials = 'string')) write_json_str(iris, opts = opts_write_json(str_specials = 'string')) write_json_str(iris, str_specials = 'string') ``` Option `digits` - Number of decimal places for numeric values ------------------------------------------------------------------------------- The `digits` option controls the number of decimal places output for numeric values. The default value of `digits = -1` indicates that the internal `yyjson` C library formatting should be used. ```{r} robj <- c(1, 1.23, 3.141592654) write_json_str(robj) write_json_str(robj, digits = 2) write_json_str(robj, digits = 0) ``` Option `pretty` - Use whitespace to make the JSON pretty ------------------------------------------------------------------------------- The `pretty` option is a logical value indicating whether or not whitespace should be used to make the resulting JSON more readable. ```{r} robj <- head(iris, 2) write_json_str(robj) |> cat() write_json_str(robj, pretty = TRUE) |> cat() ``` Option `auto_unbox` - Handling for R vectors of length 1 ------------------------------------------------------------------------------- The `auto_unbox` option is a logical value indicating whether single values should be written as JSON scalars or JSON arrays (with length 1). When `auto_unbox = FALSE` (the default), single values are always written as a JSON array i.e. within `[]` brackets. When `auto_unbox = TRUE`, single values are written as bare JSON scalar values ```{r} robj <- list(1, c(1, 2), NA) write_json_str(robj) |> cat() write_json_str(robj, auto_unbox = TRUE) |> cat() ``` Option `dataframe` - Orientation of data.frame output ------------------------------------------------------------------------------- The `dataframe` option controls the orientation of the data output to JSON: * `dataframe = "rows"` (the default) writes the data one-row-at-a-time as a JSON `[]` array containing a JSON `{}` object for each row. * `dataframe = "cols"` writes the data one-column-at-a-time as a JSON `{}` object containing JSON `[]` arrays. ```{r} robj <- head(iris, 3) write_json_str(robj, pretty = TRUE) |> cat() write_json_str(robj, pretty = TRUE, dataframe = "cols") |> cat() ``` Option `factor` - factor representation ------------------------------------------------------------------------------- The `factor` option indicates whether factors should be output as `string` (the default) or `integer` values. ```{r echo=FALSE} set.seed(1) ``` ```{r} robj <- sample(iris$Species, 10) write_json_str(robj) |> cat() write_json_str(robj, factor = 'integer') |> cat() ``` Option `name_repair` - Dealing with missing names in lists ------------------------------------------------------------------------------- When writing R lists which are only partially named, `name_repair` controls the names which are generated for the JSON output. * `name_repair = "none"` (the default) means that no names are created, and an empty string will be used as the key. * `name_repair = "minimal"` will generate default names for each unnamed list item based upon its position in the list. ```{r} robj <- list(a = 1, b = 2, 67) write_json_str(robj, pretty = TRUE) |> cat() write_json_str(robj, pretty = TRUE, name_repair = 'minimal') |> cat() ``` Option `num_specials` - Writing numeric `NA`, `NaN` and `Inf` ------------------------------------------------------------------------------- JSON only has a single `null` value as a representation of missing-ness or special-ness of a value. That is, it has no natural representations to distinguish the special R numeric values like `NA`, `NaN` and `Inf`. The `num_specials` option configures handling of these values in the JSON output: * `num_specials = "null"` (the default) will write special numeric values as JSON `null` values. * `num_specials = "string"` will write string representations of these values. ```{r} robj <- c(1.23, NA_real_, NaN, Inf, -Inf) write_json_str(robj) |> cat() write_json_str(robj, num_specials = 'string') |> cat() ``` Option `str_specials` - Writing character `NA` ------------------------------------------------------------------------------- JSON only has a single `null` value as a representation of missing-ness or special-ness of a value. That is, it has no specific representation of `NA_character_`. The `str_specials` option configures handling of `NA_character_` values in the JSON output: * `str_specials = "null"` (the default) will write `NA_character_` as JSON `null`. * `str_specials = "string"` will write `NA_character_` as `"NA"`. ```{r} robj <- c("hello", NA_character_) write_json_str(robj) |> cat() write_json_str(robj, str_specials = 'string') |> cat() ``` Option `yyjson_write_flag` - internal `YYJSON` C library options ----------------------------------------------------------------------------- The `yyjson` C library supports a number of internal options for writing JSON. These options are considered advanced, and the user is referred to the [`yyjson` documentation](https://ibireme.github.io/yyjson/doc/doxygen/html/md_doc__a_p_i.html#autotoc_md43) for further explanation on what they control. **Warning**: some of these advanced options do not make sense for interfacing with R, or otherwise conflict with how this package converts R objects to JSON. ```{r} # A reference list of all the possible YYJSON options yyjsonr::yyjson_write_flag write_json_str( c('hello / there', '#RStats'), opts = opts_write_json(yyjson_write_flag = c( yyjson_write_flag$YYJSON_WRITE_ESCAPE_SLASHES )) ) |> cat() ```
/scratch/gouwar.j/cran-all/cranData/yyjsonr/inst/doc/to_json_options.Rmd
--- title: "Benchmarks" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Benchmarks} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.width = 6, fig.height = 4 ) ``` ```{r setup} library(yyjsonr) library(bench) library(ggplot2) library(tidyr) library(ggbeeswarm) library(geojsonsf) library(sf) ``` Benchmark overview ============================================================================== * Benchmarking was done an Apple M2 Silicon. * Test-cases were drawn from other packages and other examples seen in the wild Validate JSON String ---------------------------------------------------------------------------- ```{r} json_str <- write_json_str(iris) res00 <- bench::mark( jsonlite = jsonlite::validate(json_str), jsonify = jsonify::validate_json(json_str), yyjsonr = yyjsonr::validate_json_str(json_str), check = TRUE ) ``` ```{r echo=FALSE} res00$benchmark <- 'Validate "iris" in JSON String' knitr::kable(res00[,1:5]) plot(res00) + theme_bw() + theme(legend.position = 'none') ``` To JSON String ---------------------------------------------------------------------------- ```{r} res01 <- bench::mark( jsonlite = jsonlite::toJSON(iris), jsonify = jsonify::to_json(iris), yyjsonr = yyjsonr::write_json_str(iris), check = FALSE ) ``` ```{r echo=FALSE} res01$benchmark <- 'Convert "iris" to JSON String' knitr::kable(res01[,1:5]) plot(res01) + theme_bw() + theme(legend.position = 'none') ``` From JSON String ---------------------------------------------------------------------------- ```{r} json_str <- write_json_str(iris) res02 <- bench::mark( jsonlite = jsonlite::fromJSON(json_str), jsonify = jsonify::from_json(json_str), yyjsonr = yyjsonr::read_json_str(json_str), check = TRUE ) ``` ```{r echo=FALSE} res02$benchmark <- 'Read "iris" from JSON String' knitr::kable(res02[,1:5]) plot(res02) + theme_bw() + theme(legend.position = 'none') ``` From JSON raw vector ---------------------------------------------------------------------------- ```{r warning=FALSE} # a <- nanonext::ncurl("https://postman-echo.com/get", convert = FALSE) # raw_data <- a$data raw_data <- as.raw(c(0x7b, 0x0a, 0x20, 0x20, 0x22, 0x61, 0x72, 0x67, 0x73, 0x22, 0x3a, 0x20, 0x7b, 0x7d, 0x2c, 0x0a, 0x20, 0x20, 0x22, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x3a, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2d, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3a, 0x20, 0x22, 0x68, 0x74, 0x74, 0x70, 0x73, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2d, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x2d, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a, 0x20, 0x22, 0x34, 0x34, 0x33, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x22, 0x68, 0x6f, 0x73, 0x74, 0x22, 0x3a, 0x20, 0x22, 0x70, 0x6f, 0x73, 0x74, 0x6d, 0x61, 0x6e, 0x2d, 0x65, 0x63, 0x68, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2d, 0x61, 0x6d, 0x7a, 0x6e, 0x2d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2d, 0x69, 0x64, 0x22, 0x3a, 0x20, 0x22, 0x52, 0x6f, 0x6f, 0x74, 0x3d, 0x31, 0x2d, 0x36, 0x35, 0x33, 0x62, 0x61, 0x33, 0x38, 0x65, 0x2d, 0x35, 0x65, 0x65, 0x66, 0x32, 0x39, 0x64, 0x38, 0x30, 0x61, 0x35, 0x63, 0x65, 0x62, 0x32, 0x30, 0x33, 0x65, 0x36, 0x64, 0x32, 0x64, 0x35, 0x61, 0x22, 0x0a, 0x20, 0x20, 0x7d, 0x2c, 0x0a, 0x20, 0x20, 0x22, 0x75, 0x72, 0x6c, 0x22, 0x3a, 0x20, 0x22, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x70, 0x6f, 0x73, 0x74, 0x6d, 0x61, 0x6e, 0x2d, 0x65, 0x63, 0x68, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x65, 0x74, 0x22, 0x0a, 0x7d )) res03 <- bench::mark( jsonlite = jsonlite::fromJSON(rawConnection(raw_data)), yyjsonr = yyjsonr::read_json_raw(raw_data), check = FALSE ) ``` ```{r echo=FALSE} res03$benchmark <- 'From JSON Raw Vector' knitr::kable(res03[,1:5]) plot(res03) + theme_bw() + theme(legend.position = 'none') ``` To JSON File ---------------------------------------------------------------------------- ```{r} json_file <- tempfile() res04 <- bench::mark( jsonlite = jsonlite::write_json(iris, json_file), yyjsonr = yyjsonr::write_json_file(iris, json_file), check = FALSE ) ``` ```{r echo=FALSE} res04$benchmark <- 'Write "iris" to JSON File' knitr::kable(res04[, 1:5]) plot(res04) + theme_bw() + theme(legend.position = 'none') ``` From JSON File ------------------------------------------------------------------------------ ```{r} json_file <- tempfile() jsonlite::write_json(iris, json_file) res05 <- bench::mark( jsonlite = jsonlite::fromJSON(file(json_file)), jsonify = jsonify::from_json(json_file), yyjsonr = yyjsonr::read_json_file(json_file), check = TRUE ) ``` ```{r echo=FALSE} res05$benchmark <- 'Read "iris" from JSON File' knitr::kable(res05[, 1:5]) plot(res05) + theme_bw() + theme(legend.position = 'none') ``` Write modest data.frame to string (10 thousand rows) ---------------------------------------------------------------------------- ```{r} n <- 1e5 df <- data.frame( id = 1:n , value = sample(letters, size = n, replace = T) , val2 = rnorm(n = n) , log = sample(c(T,F), size = n, replace = T) , stringsAsFactors = FALSE ) res10 <- bench::mark( jsonlite = jsonlite::toJSON( df ), jsonify = jsonify::to_json( df ), yyjsonr = yyjsonr::write_json_str( df ), check = FALSE ) ``` ```{r echo=FALSE} res10$benchmark <- '10k row data.frame to file' knitr::kable(res10[,1:5]) plot(res10) + theme_bw() + theme(legend.position = 'none') ``` Read modest data.frame from string (10 thousand rows) ---------------------------------------------------------------------------- ```{r} str <- jsonlite::toJSON( df ) res11 <- bench::mark( jsonlite = jsonlite::fromJSON( str ), jsonify = jsonify::from_json( str ), # rcppsimdjson = RcppSimdJson::fparse( str ), yyjsonr = yyjsonr::read_json_str( str ), check = TRUE ) ``` ```{r echo=FALSE} res11$benchmark <- '10k row data.frame from string' knitr::kable(res11[,1:5]) plot(res11) + theme_bw() + theme(legend.position = 'none') ``` Summary =============================================================================== ```{r echo = FALSE, fig.width = 8, fig.height = 6} library(dplyr) plot_df <- bind_rows( res00, res01, res02, res03, res04, res05, # res13, res14, # geojson #res06, res07, res08, # res09, res10, res11 #, res12, ) plot_df$benchmark <- factor( plot_df$benchmark, levels = unique(plot_df$benchmark) ) plot_df <- plot_df %>% mutate( package = as.character(expression), iters = `itr/sec`, speed = iters ) %>% select(benchmark, package, iters, speed) plot_df <- plot_df %>% group_by(benchmark) %>% mutate( ref_speed = speed[which(package %in% c('jsonlite', 'geojsonsf'))], speed = speed / ref_speed ) %>% ungroup() ggplot(plot_df) + geom_col(aes(package, speed, fill = package), position = position_dodge2(preserve = "single")) + facet_wrap(~benchmark, scales = 'free_y', ncol = 3) + theme_bw() + theme(legend.position = 'none') + scale_fill_manual(values = c(rep(grey(0.5), 2), 'dodgerblue3')) + geom_hline(yintercept = 1, color = 'red', alpha = 0.5, linetype = 2) + labs( x = NULL, y = "Factor speed increase over reference implementation", title = "Speed-up compared to reference implementation", subtitle = "Red line indicates reference implementation {jsonlite}" ) if (FALSE) { ggsave("./man/figures/benchmark-summary.png", width = 12, height = 10) saveRDS(plot_df, "man/benchmark/cache-df-types.rds") } ```
/scratch/gouwar.j/cran-all/cranData/yyjsonr/man/benchmark/benchmarks.Rmd
--- title: "Configuration Options for Parsing from JSON" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Configuration Options for Parsing from JSON} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = FALSE, comment = "#>" ) ``` ```{r setup} suppressPackageStartupMessages({ library(yyjsonr) }) ``` Overview ----------------------------------------------------------------------------- This vignette: * introduces the `opts` argument for reading JSON with the `read_json_X()` family of functions. * outlines the creation of default options with `opts_read_json()` * provides extended examples of how these options control parsing of JSON The `opts` argument - Specifying options when reading JSON ----------------------------------------------------------------------------- All `read_json_x()` functions have an `opts` argument. `opts` takes a named list of options used to configure the way `yyjsonr` parses JSON into R objects. The default argument for `opts` is an empty list, which internally sets the default options for parsing. The default options for parsing can also be viewed by running `opts_read_json()`. The following three function calls are all equivalent ways of calling `read_json_str()` using the default options: ```{r eval=FALSE} read_json_str(str) read_json_str(str, opts = list()) read_json_str(str, opts = opts_read_json()) ``` Setting arguments to override the default options ----------------------------------------------------------------------------- Setting a single option (and keeping all other options at their default value) can be done in a number of ways. The following three function calls are all equivalent: ```{r eval=FALSE} read_json_str(str, opts = list(str_specials = 'string')) read_json_str(str, opts = opts_read_json(str_specials = 'string')) read_json_str(str, str_specials = 'string') ``` Option `promote_num_to_string` - mixtures of numeric and string types ----------------------------------------------------------------------------- By default, `yyjsonr` does not promote string values to numerica values i.e. `promote_num_to_string = FALSE`. If an array contains mixed types, then an R *list* will be returned, so that all JSON values retain their original type. ```{r} json <- '[1,2,3.1,"apple", null]' read_json_str(json) ``` If `promote_num_to_string` is set to `TRUE`, then `yyjsonr` will promote numeric types to strings if the following conditions are met: * values are stored in a JSON array * the JSON array only contains numerics, strings or the JSON `null` value ```{r} yyjsonr::read_json_str(json, promote_num_to_string = TRUE) ``` Option `df_missing_list_elem` - Missing list elements (when parsing data.frames) ----------------------------------------------------------------------------- When JSON data is being parsed into an R data.frame some columns become *list-columns* if there are mixed types in the original JSON. It is possible that some values are completely missing in the JSON representation, and the `df_missing_list_elem` specifies the replacement for this missing value in the R data.frame. The default value is `df_missing_list_elem = NULL`. ### JSON to data.frame (no *list columns* needed) ```{r} str <- '[{"a":1, "b":2}, {"a":3, "b":4}]' read_json_str(str) ``` ### JSON to data.frame - *list-columns* required ```{r} str <- '[{"a":1, "b":[1,2]}, {"a":3, "b":2}]' read_json_str(str) ``` ```{r} str <- '[{"a":1, "b":[1,2]}, {"a":2}]' read_json_str(str) read_json_str(str, df_missing_list_elem = NA) ``` Option `obj_of_arrs_to_df` - Reading JSON as a data.frame ----------------------------------------------------------------------------- By default, if JSON looks like it represents a data.frame it will be loaded as such. That is, a JSON `{}` object which contains only `[]` arrays (all of equal length) will be treated as data.frame. This is the default i.e. `obj_of_arrs_to_df = TRUE`. If `obj_of_arrs_to_df = FALSE` then this data will be read in as a named list. In addition, if the `[]` arrays are not all the same length, then the data will also be read in as a named list as no inference of missing values will be done. ```{r} str <- '{"a":[1,2],"b":["apple", "banana"]}' read_json_str(str) read_json_str(str, obj_of_arrs_to_df = FALSE) ``` ```{r} str_unequal <- '{"a":[1,2],"b":["apple", "banana", "carrot"]}' read_json_str(str_unequal) ``` Option `arr_of_objs_to_df` - Reading JSON as a data.frame ----------------------------------------------------------------------------- ```{r} str <- '[{"a":1, "b":2}, {"a":3, "b":4}]' read_json_str(str) read_json_str(str, arr_of_objs_to_df = FALSE) ``` ```{r} str <- '[{"a":1, "b":2}, {"a":3, "b":4, "c":99}]' read_json_str(str) ``` Option `str_specials` - Reading string `"NA"` from JSON ----------------------------------------------------------------------------- JSON only really has the value `null` for representing special missing values, and this is converted to an R `NA_character_` value when it is encountered in a string-ish context. When `yyjsonr` encounters a literal `"NA"` value in a string-ish context, its conversion to an R value is controlled by the `str_specials` options The possible values for the `str_specials` argument are: * `string` read in as the literal character string `"NA"` (the default behaviour) * `special` read in as `NA_character_` ```{r} str <- '["hello", "NA", null]' read_json_str(str) # default: str_specials = 'string' read_json_str(str, str_specials = 'special') ``` Option `num_specials` - Reading numeric `"NA"`, `"NaN"` and `"Inf"` ----------------------------------------------------------------------------- JSON only really has the value `null` for representing special missing values, and this is converted to an R `NA_integer_` or `NA_real_` value when it is encountered in a number-ish context. When `yyjsonr` encounters a literal `"NA"`, `"NaN"` or `"Inf"` value in a number-ish context, its conversion to an R value is controlled by the `num_specials` options. The possible values for the `num_specials` argument are: * `special` read in as an actual numeric `NA`, `NaN` or `Inf` value (the default behaviour) * `string` read in as the literal character string `"NA"` etc ```{r} str <- '[1.23, "NA", "NaN", "Inf", "-Inf", null]' read_json_str(str) # default: num_specials = 'special' read_json_str(str, num_specials = 'string') ``` Option `int64` - large integer support ----------------------------------------------------------------------------- JSON supports large integers outside the range of R's 32-bit integer type. When such a large value is encountered in JSON, the `int64` option controls the value's representation in R. The possible values for the `int64` option are: * `string` store JSON integer as a string in R * `double` will store the JSON integer as a double precisision numeric. If the integer is outside the range +/- 2^53, then it may not be stored perfectly in the double. * `bit64` convert to a 64-bit integer supported by the [`{bit64}`](https://cran.r-project.org/package=bit64) package. ```{r echo=FALSE} suppressPackageStartupMessages( library(bit64) ) ``` ```{r} str <- '[1, 274877906944]' # default: int64 = 'string' # Since result is a mix of types, a list is returned read_json_str(str) # Read large integer as double robj <- read_json_str(str, int64 = 'double') class(robj) robj # Read large integer as 'bit64::integer64' type library(bit64) read_json_str(str, int64 = 'bit64') ``` Option `length1_array_asis` - distinguishing scalars from length-1 vectors ----------------------------------------------------------------------------- JSON supports the concept of both scalar and vector values i.e. in JSON scalar `67` is different from an array of length 1 `[67]`. The `length1_array_asis` option is for situations where it is important to distinguish these value types in R. However, R does not make this distinction between scalars and vectors of length 1. To assist in translating objects from JSON to R and back to JSON, setting `length1_array_asis = TRUE` will mark JSON arrays of length 1 with the class `AsIs`. This option defaults to `FALSE`. ```{r} read_json_str('67') |> str() read_json_str('[67]') |> str() read_json_str('67' , length1_array_asis = TRUE) |> str() read_json_str('[67]', length1_array_asis = TRUE) |> str() # Has 'AsIs' class ``` This option is then used with the option `auto_unbox` when writing JSON in order to control how length-1 R vectors are written. Shown below, if the length-1 vector is marked with `AsIs` class when reading, then when writing out to JSON with `auto_unbox = TRUE` it becomes a JSON vector value. In the following example, only the second value (`[67]`) is affected by the option `length1_array_asis`. When the option is `TRUE` the value is tagged with a class of `AsIs`. Then when the created R object is subsequently written out to a JSON string, its structure is determined by `auto_unbox` which understands how to handle this class. ```{r} str <- '{"a":67, "b":[67], "c":[1,2]}' # Length-1 vectors output as JSON arrays read_json_str(str) |> write_json_str(auto_unbox = FALSE) |> cat() # Length-1 vectors output as JSON scalars read_json_str(str) |> write_json_str(auto_unbox = TRUE) |> cat() # Length-1 vectors output as JSON arrays read_json_str(str, length1_array_asis = TRUE) |> write_json_str(auto_unbox = FALSE) |> cat() # !!!! # Those values marked with 'AsIs' class when reading are output # as length-1 JSON arrays read_json_str(str, length1_array_asis = TRUE) |> write_json_str(auto_unbox = TRUE) |> cat() ``` Option `yyjson_read_flag` - internal `YYJSON` C library options ----------------------------------------------------------------------------- The `yyjson` C library supports a number of internal options for reading JSON. These options are considered advanced, and the user is referred to the [`yyjson` documentation](https://ibireme.github.io/yyjson/doc/doxygen/html/md_doc__a_p_i.html#autotoc_md36) for further explanation on what they control. **Warning**: some of these advanced options do not make sense for interfacing with R, or otherwise conflict with how this package converts JSON to R objects. ```{r} # A reference list of all the possible YYJSON options yyjsonr::yyjson_read_flag read_json_str( "[1, 2, 3, ] // A JSON comment not allowed by the standard", opts = opts_read_json(yyjson_read_flag = c( yyjson_read_flag$YYJSON_READ_ALLOW_TRAILING_COMMAS, yyjson_read_flag$YYJSON_READ_ALLOW_COMMENTS )) ) ```
/scratch/gouwar.j/cran-all/cranData/yyjsonr/vignettes/from_json_options.Rmd
--- title: "Comparison to jsonlite parsing" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Comparison to jsonlite parsing} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(yyjsonr) ``` Parsing differences compared to `{jsonlite}` ============================================================================= `{jsonlite}` and `{yyjsonr}` may read and write some JSON differently due to varying assumptions, data configurations or option settings. This document keeps a record of major differences to be aware of. In `yyjsonr` 3-d arrays are parsed as multiple 2-d matrices and combined ----------------------------------------------------------------------------- In `{yyjsonr}` the order in which elements in an array are serialized to JSON correspond to a JSON `[]` array of row-major matrices in human-readable order. `{jsonlite}` does things differently. The array formats are internally consistent within each package, but not cross-compatible between them i.e. you cannot serialize an array in `{yyjsonr}` and re-create it exactly using `{jsonlite}`. In the examples below, a simple 3d matrix is serialized with both `jsonlite` and `yyjsonr`. ```{r} # A simple 3D array mat <- array(1:12, dim = c(2,3,2)) mat ``` ```{r} # jsonlite's serialization of matrices is internally consistent and re-parses # to the initial matrix. str <- jsonlite::toJSON(mat, pretty = TRUE) cat(str) jsonlite::fromJSON(str) ``` ```{r} # yyjsonr's serialization of matrices is internally consistent and re-parses # to the initial matrix. # But note that it is *different* to what jsonlite does. str <- yyjsonr::write_json_str(mat, pretty = TRUE) cat(str) yyjsonr::read_json_str(str) ```
/scratch/gouwar.j/cran-all/cranData/yyjsonr/vignettes/jsonlite-comparison.Rmd
--- title: "Configuration Options for Serializing to JSON" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Configuration Options for Serializing to JSON} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = FALSE, comment = "#>" ) ``` ```{r setup} suppressPackageStartupMessages({ library(yyjsonr) }) ``` Overview ----------------------------------------------------------------------------- This vignette: * introduces the `opts` argument for writing JSON with the `write_json_X()` family of functions. * outlines the creation of default options with `opts_write_json()` * provides extended examples of how these options control writing JSON The `opts` argument - Specifying options when reading JSON ----------------------------------------------------------------------------- All `write_json_x()` functions have an `opts` argument. `opts` takes a named list of options used to configure the way `yyjsonr` writes JSON from R objects. The default argument for `opts` is an empty list, which internally sets the default options for writing. The default options for writing JSON can also be viewed by running `opts_write_json()`. The following three function calls are all equivalent ways of calling `write_json_str()` using the default options: ```{r eval=FALSE} write_json_str(iris) write_json_str(iris, opts = list()) write_json_str(iris, opts = opts_write_json()) ``` Setting arguments to override the default options ----------------------------------------------------------------------------- Setting a single option (and keeping all other options at their default value) can be done in a number of ways. The following three function calls are all equivalent: ```{r eval=FALSE} write_json_str(iris, opts = list(str_specials = 'string')) write_json_str(iris, opts = opts_write_json(str_specials = 'string')) write_json_str(iris, str_specials = 'string') ``` Option `digits` - Number of decimal places for numeric values ------------------------------------------------------------------------------- The `digits` option controls the number of decimal places output for numeric values. The default value of `digits = -1` indicates that the internal `yyjson` C library formatting should be used. ```{r} robj <- c(1, 1.23, 3.141592654) write_json_str(robj) write_json_str(robj, digits = 2) write_json_str(robj, digits = 0) ``` Option `pretty` - Use whitespace to make the JSON pretty ------------------------------------------------------------------------------- The `pretty` option is a logical value indicating whether or not whitespace should be used to make the resulting JSON more readable. ```{r} robj <- head(iris, 2) write_json_str(robj) |> cat() write_json_str(robj, pretty = TRUE) |> cat() ``` Option `auto_unbox` - Handling for R vectors of length 1 ------------------------------------------------------------------------------- The `auto_unbox` option is a logical value indicating whether single values should be written as JSON scalars or JSON arrays (with length 1). When `auto_unbox = FALSE` (the default), single values are always written as a JSON array i.e. within `[]` brackets. When `auto_unbox = TRUE`, single values are written as bare JSON scalar values ```{r} robj <- list(1, c(1, 2), NA) write_json_str(robj) |> cat() write_json_str(robj, auto_unbox = TRUE) |> cat() ``` Option `dataframe` - Orientation of data.frame output ------------------------------------------------------------------------------- The `dataframe` option controls the orientation of the data output to JSON: * `dataframe = "rows"` (the default) writes the data one-row-at-a-time as a JSON `[]` array containing a JSON `{}` object for each row. * `dataframe = "cols"` writes the data one-column-at-a-time as a JSON `{}` object containing JSON `[]` arrays. ```{r} robj <- head(iris, 3) write_json_str(robj, pretty = TRUE) |> cat() write_json_str(robj, pretty = TRUE, dataframe = "cols") |> cat() ``` Option `factor` - factor representation ------------------------------------------------------------------------------- The `factor` option indicates whether factors should be output as `string` (the default) or `integer` values. ```{r echo=FALSE} set.seed(1) ``` ```{r} robj <- sample(iris$Species, 10) write_json_str(robj) |> cat() write_json_str(robj, factor = 'integer') |> cat() ``` Option `name_repair` - Dealing with missing names in lists ------------------------------------------------------------------------------- When writing R lists which are only partially named, `name_repair` controls the names which are generated for the JSON output. * `name_repair = "none"` (the default) means that no names are created, and an empty string will be used as the key. * `name_repair = "minimal"` will generate default names for each unnamed list item based upon its position in the list. ```{r} robj <- list(a = 1, b = 2, 67) write_json_str(robj, pretty = TRUE) |> cat() write_json_str(robj, pretty = TRUE, name_repair = 'minimal') |> cat() ``` Option `num_specials` - Writing numeric `NA`, `NaN` and `Inf` ------------------------------------------------------------------------------- JSON only has a single `null` value as a representation of missing-ness or special-ness of a value. That is, it has no natural representations to distinguish the special R numeric values like `NA`, `NaN` and `Inf`. The `num_specials` option configures handling of these values in the JSON output: * `num_specials = "null"` (the default) will write special numeric values as JSON `null` values. * `num_specials = "string"` will write string representations of these values. ```{r} robj <- c(1.23, NA_real_, NaN, Inf, -Inf) write_json_str(robj) |> cat() write_json_str(robj, num_specials = 'string') |> cat() ``` Option `str_specials` - Writing character `NA` ------------------------------------------------------------------------------- JSON only has a single `null` value as a representation of missing-ness or special-ness of a value. That is, it has no specific representation of `NA_character_`. The `str_specials` option configures handling of `NA_character_` values in the JSON output: * `str_specials = "null"` (the default) will write `NA_character_` as JSON `null`. * `str_specials = "string"` will write `NA_character_` as `"NA"`. ```{r} robj <- c("hello", NA_character_) write_json_str(robj) |> cat() write_json_str(robj, str_specials = 'string') |> cat() ``` Option `yyjson_write_flag` - internal `YYJSON` C library options ----------------------------------------------------------------------------- The `yyjson` C library supports a number of internal options for writing JSON. These options are considered advanced, and the user is referred to the [`yyjson` documentation](https://ibireme.github.io/yyjson/doc/doxygen/html/md_doc__a_p_i.html#autotoc_md43) for further explanation on what they control. **Warning**: some of these advanced options do not make sense for interfacing with R, or otherwise conflict with how this package converts R objects to JSON. ```{r} # A reference list of all the possible YYJSON options yyjsonr::yyjson_write_flag write_json_str( c('hello / there', '#RStats'), opts = opts_write_json(yyjson_write_flag = c( yyjson_write_flag$YYJSON_WRITE_ESCAPE_SLASHES )) ) |> cat() ```
/scratch/gouwar.j/cran-all/cranData/yyjsonr/vignettes/to_json_options.Rmd
cmultRepl <- function(X, label= 0, method= c("GBM","SQ","BL","CZM","user"), output= c("prop","p-counts"), frac= 0.65, threshold= 0.5, adjust= TRUE, t= NULL, s= NULL, z.warning= 0.8, z.delete= TRUE, suppress.print= FALSE, delta= NULL) { if (any(X<0, na.rm=T)) stop("X contains negative values") if (is.vector(X) | is.character(X) | (nrow(X)==1)) stop("X must be a data matrix") if (!is.na(label)) { if (!any(X==label,na.rm=T)) stop(paste("Label",label,"was not found in the data set")) if (label!=0 & any(X==0,na.rm=T)) stop("Zero values not labelled as count zeros were found in the data set") if (any(is.na(X))) stop(paste("NA values not labelled as count zeros were found in the data set")) } if (is.na(label)) { if (any(X==0,na.rm=T)) stop("Zero values not labelled as count zeros were found in the data set") if (!any(is.na(X),na.rm=T)) stop(paste("Label",label,"was not found in the data set")) } if (!missing("delta")) { warning("The delta argument is deprecated, use frac instead: frac has been set equal to delta.") frac <- delta } X <- as.data.frame(X, stringsAsFactors=TRUE) X[X==label] <- NA checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } N <- nrow(X); D <- ncol(X) n <- apply(X,1,sum,na.rm=TRUE) # Determining t and s method <- match.arg(method) output <- match.arg(output) if (method!="CZM"){ if (method=="user") {t <- t} else { alpha <- matrix(0,nrow=N,ncol=D) for (i in 1:N){ alpha[i,] <- apply(X,2,function(x) sum(x[-i],na.rm=T)) } t <- alpha/rowSums(alpha) if ((method=="GBM") && (any(t==0))) {stop("GBM method: not enough information to compute t hyper-parameter, probably there are columns with < 2 positive values.")} } s <- switch(method, GBM = 1/apply(t,1,function(x) exp(mean(log(x)))), SQ = sqrt(n), BL = D, user = s) repl <- t*(s/(n+s)) } if (method=="CZM"){ repl <- frac*matrix(1,ncol=D,nrow=N)*(threshold/n) } # Multiplicative replacement on the closed data X2 <- t(apply(X,1,function(x) x/sum(x,na.rm=T))) colmins <- apply(X2,2,function(x) min(x,na.rm=T)) adjusted <- 0 for (i in 1:N){ if (any(is.na(X2[i,]))){ z <- which(is.na(X2[i,])) X2[i,z] <- repl[i,z] if (adjust==TRUE){ if (any(X2[i,z] > colmins[z])){ f <- which(X2[i,z] > colmins[z]) X2[i,z][f] <- frac*colmins[z][f] adjusted <- adjusted + length(f) } } X2[i,-z] <- (1-(sum(X2[i,z])))*X2[i,-z] } } # Rescale to p-counts if required if (output=="p-counts"){ for (i in 1:N){ if (any(is.na(X[i,]))){ zero <- which(is.na(X[i,])) pos <- setdiff(1:D,zero)[1] X[i,zero] <- (X[i,pos]/X2[i,pos])*X2[i,zero] } } res <- X } else {res <- X2} if (suppress.print == FALSE){ if ((adjust==TRUE) & (adjusted > 0)) {cat(paste("No. adjusted imputations: ",adjusted,"\n"))} } return(as.data.frame(res,stringsAsFactors=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/cmultRepl.R
#' @title Log-contrast homogeneity test #' #' @description This function tests for homogeneity across groups of means and variances of #' user-defined log-contrasts. Groups can be defined by either zero/unobserved data patterns or by a grouping #' factor in fully observed zero-free data sets. #' #' @details Homogeneity of log-contrast means and variances across groups is tested using either parametric or non-parametric tests. When #' \code{method = "parametric"}, ordinary analysis of variance and Bartlett's tests are used. Alternatively, #' Kruskal-Wallis and Fligner-Killen tests are used instead when \code{method = "nonparametric"}. The results of a permutation test of homogeneity of variation #' arrays based on total weighted squared relative errors are also provided (see \code{\link{zVarArrayTest}} for more details). #' The log-contrast is specified by the \code{lc} argument using a vector of codes 1, -1 and 0 for components #' in the numerator, denominator and omitted respectively. #' #' @param X Compositional data set (\code{\link{matrix}} or \code{\link{data.frame}} class). #' @param label Unique label (\code{\link{numeric}} or \code{\link{character}}) used to denote zero or unobserved data in \code{X} (\code{label = 0}, default). #' @param groups Grouping factor in fully observed zero-free data sets (\code{groups = NULL}, default). #' @param lc User-defined log-contrast (see details below). #' @param method Approach used for mean and variance homogeneity testing (\code{method = "parametric"}, default). #' @param b Number of bootstrap resamples used by permutation test (\code{b = 1000}, default). #' #' @return Test p-values for log-contrast means and variances. #' #' @seealso \code{\link{zPatterns}}, \code{\link{zVarArray}}, \code{\link{zVarArrayError}}. #' #' @examples #' data(Water) #' zPatterns(Water, label = 0) #' #' # Test of homogeneity in log-contrast Potassium/Arsenic*Calcium #' lcTest(Water, label = 0, lc = c(1,-1,-1,0)) lcTest <- function(X, label = 0, groups = NULL, lc = NULL, method = c("parametric", "nonparametric"), b = 1000){ if (any(X<0, na.rm=T)) stop("X contains negative values") if (is.vector(X)) stop("X must be a matrix or data.frame class object") if (is.null(label)) stop("A value for label must be given") if (!is.null(groups)){ if (any(X == label, na.rm = T)) stop(paste("Label", label, "was found in the data set. No zeros or unobserved values are allowed when a grouping factor is specified")) } if (!is.na(label)) { if (!any(X == label, na.rm = T) & (is.null(groups))) stop(paste("Label", label, "was not found in the data set")) if (label != 0 & any(X == 0, na.rm = T)) stop("Zero values not labelled as such were found in the data set") if (any(is.na(X))) stop(paste( "NA values not labelled as zero values were found in the data set" )) } if (is.na(label)) { if (any(X == 0, na.rm = T)) stop("Zero values not labelled as such were found in the data set") if (!any(is.na(X), na.rm = T) & (is.null(groups))) stop(paste("Label", label, "was not found in the data set")) } if(is.null(lc)){stop("A sensible log-contrast must be specified to use this function")} if(length(lc) != ncol(X)) { stop("The number of columns in X and lc do not agree")} if((all(lc >= 0)) | (all(lc <= 0))) { stop(paste("The log-contrast is not correctly defined"))} method <- match.arg(method) X <- as.data.frame(X,stringsAsFactors=TRUE) if (is.null(groups)){ g <- zPatterns(X,label = label, plot = FALSE, suppress.print = TRUE) ifelse(is.na(label), X[is.na(X)] <- 0, X[X == label] <- 0) } else{ g <- as.factor(groups) levNames <- levels(g) levels(g) <- 1:length(levels(g)) } unobs <- sapply(split(X,g),function(x) sum(1*(x[1,]==0)),simplify = TRUE) if (any(unobs > (ncol(X)-2))) { warning("Some groups have less than two components available and NAs were produced (use zPatterns to check out)") } ni <- table(g); col <- ncol(X); nind <- nrow(X) numPat <- length(levels(g)) p <- as.numeric(levels(g)) pi <- ni / nind # % obs in each group usepart <- (lc!=0) usePat <- vector("numeric") Xfeas <- vector("numeric") nifeas <- pifeas <- gfeas <- vector("numeric") for (pat in 1:numPat){ x <- X[g == pat,usepart] if (all(x[1,] > 0)){ usePat <- cbind(usePat,pat,stringsAsFactors=TRUE) Xfeas <- rbind(Xfeas,x) gfeas <- c(gfeas,g[g==pat]) nifeas <- c(nifeas,ni[pat]) pifeas <- c(pifeas,pi[pat]) }#end if }# end for gfeas <- as.factor(gfeas) pfeas <- as.numeric(levels(gfeas)) Apval <- Bpval <- pvalExp <- pvalVar <- NA # check if there is a pattern if (length(usePat) > 0){ # log-contrasts of data bal <- rep(0,ncol(Xfeas)) den <- sum(lc==-1) num <- sum(lc==1) bal[lc[usepart]==1] <- sqrt(den/((den+num)*num)) bal[lc[usepart]==-1] <- -sqrt(num/((den+num)*den)) Yfeas <- YfeasV <- as.matrix(log(Xfeas))%*%bal # center to zero each group for variance test numpatj <- nlevels(gfeas) for (k in 1:numpatj){ YfeasV[gfeas==pfeas[k]] <- scale(Yfeas[gfeas==pfeas[k]],TRUE,FALSE) } if (length(usePat) > 1){ if (method == "parametric"){ # anova test of location if (min(nifeas)>1) {Apval<-anova(lm(Yfeas~gfeas))$"Pr(>F)"[1]} # Bartlett test of variances if (min(nifeas)>1) {Bpval<-bartlett.test(YfeasV~gfeas)$p.value} } if (method == "nonparametric"){ # Kruskal-Wallis test of location if (min(nifeas)>1) {Apval <- kruskal.test(Yfeas ~ gfeas)$p.value} # Fligner-Killeen test of variances if (min(nifeas)>1) {Bpval <- fligner.test(YfeasV ~ gfeas)$p.value} } if (is.na(Apval) | is.na(Bpval)) warning("Log-contrast present in groups of < 2 observations and NAs produced (use zPatterns to check out)") # Permutation test ErrVar <- ErrExp<-rep(0,b) for (rept in 1:b){ lx <- sample(Yfeas) lxV <- sample(YfeasV) # exp and var EbP <- tapply(lx,gfeas,mean) #expectation by Pattern VbP <- tapply(lxV,gfeas,var) #variance by Pattern EbP[is.na(VbP)] <- NA # NA for both if based on only one value # By pattern VarByP <- ((nifeas[!is.na(VbP)]-1)*VbP[!is.na(VbP)])/(nifeas[!is.na(VbP)]) ExpByP <- EbP[!is.na(EbP)] # Overall VarTot <- as.numeric((length(YfeasV)-1)*var(YfeasV)/length(YfeasV)) # Var ExpTot <- as.numeric(mean(Yfeas)) # Exp # squared relative error ErrVar[rept] <- sum((pi[pfeas[!is.na(VbP)]])*((1-(VarByP[!is.na(VbP)]/VarTot))^2)) ErrExp[rept] <- sum((pi[pfeas[!is.na(EbP)]])*((1-(ExpByP[!is.na(EbP)]/ExpTot))^2)) }# end for Permutation test # Original errors EbP <- tapply(Yfeas,gfeas,mean) #expectation by Pattern VbP <- tapply(YfeasV,gfeas,var) #variance by Pattern EbP[is.na(VbP)] <- NA # NA for both if based on only one value # By pattern VarByP <- ((nifeas[!is.na(VbP)]-1)*VbP[!is.na(VbP)])/(nifeas[!is.na(VbP)]) ExpByP <- EbP[!is.na(EbP)] # Overall VarTot <- as.numeric((length(YfeasV)-1)*var(YfeasV)/length(YfeasV)) # Var ExpTot <- as.numeric(mean(Yfeas)) # Exp # Squared relative error ErrVarOr <- sum((pi[pfeas[!is.na(VbP)]])*((1-(VarByP/VarTot))^2)) ErrExpOr <- sum((pi[pfeas[!is.na(EbP)]])*((1-(ExpByP/ExpTot))^2)) # p-value (add 1 for the original sample, it is included) pvalExp <- (sum(ErrExp>ErrExpOr)+1)/(b+1) pvalVar <- (sum(ErrVar>ErrVarOr)+1)/(b+1) } else{stop("Log-contrast only available for one pattern or group")} } else{stop("Log-contrast not available for any pattern or group")} lcstringn <- vector("character") lcstringd <- vector("character") lcnamesn <- names(X)[which(lc==1)] for (i in 1:length(lcnamesn)) lcstringn <- paste(lcstringn,lcnamesn[i],sep="*") lcstringn <- substring(lcstringn,2,nchar(lcstringn)) lcnamesd <- names(X)[which(lc==-1)] for (i in 1:length(lcnamesd)) lcstringd <- paste(lcstringd,lcnamesd[i],sep="*") lcstringd <- substring(lcstringd,2,nchar(lcstringd)) cat("\n") cat("Log-contrast homogeneity tests \n") cat("------------------------------ \n") cat(paste("Number of groups:",nlevels(g),"\n")) cat(paste("Log-contrast: ","(",lcstringn,")"," / ","(",lcstringd,")","\n",sep="")) if (method == "nonparametric"){ cat(paste("Kruskal-Wallis test of log-contrast means:",round(Apval,4),"\n")) cat(paste("Fligner-Killeen test of log-contrast variances:",round(Bpval,4),"\n")) } if (method == "parametric"){ cat(paste("ANOVA test of log-contrast means:",round(Apval,4),"\n")) cat(paste("Bartlett test of log-contrast variances:",round(Bpval,4),"\n")) } cat(paste("Permutation test of total weighted SRE in log-contrast means:",round(pvalExp,4),"\n")) cat(paste("Permutation test of total weighted SRE in log-contrast variances:",round(pvalVar,4),"\n")) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/lcTest.R
lrDA <- function(X,label=NULL,dl=NULL,ini.cov=c("lrEM","complete.obs","multRepl"),frac=0.65, imp.missing=FALSE,n.iters=1000,m=1,store.mi=FALSE,closure=NULL,z.warning=0.8, z.delete=TRUE,delta=NULL){ if (any(X<0, na.rm=T)) stop("X contains negative values") if ((is.vector(X)) | (nrow(X)==1)) stop("X must be a data matrix") if (is.null(label)) stop("A value for label must be given") if (!is.na(label)){ if (!any(X==label,na.rm=T)) stop(paste("Label",label,"was not found in the data set")) if (label!=0 & any(X==0,na.rm=T)) stop("Zero values not labelled as censored or missing values were found in the data set") if (any(is.na(X))) stop(paste("NA values not labelled as censored or missing values were found in the data set")) } if (is.na(label)){ if (any(X==0,na.rm=T)) stop("Zero values not labelled as censored or missing values were found in the data set") if (!any(is.na(X),na.rm=T)) stop(paste("Label",label,"was not found in the data set")) } if (imp.missing==FALSE){ if (is.character(dl)) stop("dl must be a numeric vector or matrix") if (is.null(dl)){ # If dl not given use min per column dl <- apply(X,2, function(x) min(x[x!=label])) warning("No dl vector or matrix provided. The minimum observed values for each column used as detection limits.") } if (is.vector(dl)) dl <- matrix(dl,nrow=1) dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes } if (imp.missing==FALSE){ if (ncol(dl)!=ncol(X)) stop("The number of columns in X and dl do not agree") if ((nrow(dl)>1) & (nrow(dl)!=nrow(X))) stop("The number of rows in X and dl do not agree") } if ((store.mi==TRUE) & (m==1)) store.mi <- FALSE if (!missing("delta")){ warning("The delta argument is deprecated, use frac instead: frac has been set equal to delta.") frac <- delta } lm.sweep <- function(M,C,varobs){ sweep.matrix <- function(A,ind){ nn <- nrow(A); p <- ncol(A) S <- A for (j in ind){ S[j,j] <- -1/A[j,j] for (i in 1:p) { if (i != j){ S[i,j] <- -A[i,j]*S[j,j] S[j,i] <- S[i,j] } } for (i in 1:p){ if (i != j){ for (k in 1:p){ if (k != j){ S[i,k] <- A[i,k] - S[i,j]*A[j,k] S[k,i] <- S[i,k] } } } } A <- S } return(A) } p <- length(M) q <- length(varobs) i <- rep(1,p) i[varobs] <- i[varobs]-1 dep <- which(i!=0) ndep <- length(dep) A <- matrix(0,p+1,p+1) A[1,1] <- -1 A[1,2:(p+1)] <- M A[2:(p+1),1] <- matrix(M,ncol=1) A[2:(p+1),2:(p+1)] <- C reor <- c(1,varobs+1,dep+1) A <- A[reor,reor] A <- sweep.matrix(A,1:(q+1)) B <- A[1:(q+1),(q+2):(p+1)] CR <- A[(q+2):(p+1),(q+2):(p+1)] return(list(betas=B,resid=CR)) } inv.raw <- function(Y,X,pos,closed,nn,c){ inv.alr <- function(x,pos){ ad<-1/(rowSums(exp(x))+1) ax<-exp(x)*ad if(pos==1) { a<-cbind(ad,ax,stringsAsFactors=TRUE) } else { if (dim(x)[2] < pos){ a<-cbind(ax,ad,stringsAsFactors=TRUE) } else { a<-cbind(ax[,1:(pos-1)],ad,ax[,pos:(dim(x)[2])],stringsAsFactors=TRUE) } } return(a) } Y <- inv.alr(Y,pos) for (i in 1:nn){ if (any(is.na(X[i,]))){ vbdl <- which(is.na(X[i,])) X[i,vbdl] <- (X[i,pos]/Y[i,pos])*Y[i,vbdl] } } if (closed==1){ X <- t(apply(X,1,function(x) x/sum(x)*c[1])) } return(as.data.frame(X,stringsAsFactors=TRUE)) } riwish <- function(v,S){ # From ratematrix package S <- solve(S) if (!is.matrix(S)) S <- matrix(S) if (v < nrow(S)) { stop(message = "v is less than the dimension of S in rwish().\n") } p <- nrow(S) CC <- chol(S) Z <- matrix(0, p, p) diag(Z) <- sqrt(stats::rchisq(p, v:(v - p + 1))) if (p > 1) { pseq <- 1:(p - 1) Z[rep(p * pseq, pseq) + unlist(lapply(pseq, seq))] <- stats::rnorm(p *(p - 1)/2) } out <- crossprod(Z %*% CC) return(solve(out)) } ini.cov <- match.arg(ini.cov) X <- as.data.frame(X,stringsAsFactors=TRUE) nn <- nrow(X); p <- ncol(X) if (nn <= p) stop("The lrDA algorithm works on regular data sets (no. rows > no. columns). You can consider lrSVD for wide dat sets.") X[X==label] <- NA X <- apply(X,2,as.numeric) c <- apply(X,1,sum,na.rm=TRUE) checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } if (imp.missing==FALSE) {if (nrow(dl)==1) dl <- matrix(rep(1,nn),ncol=1)%*%dl} # Check for closure closed <- 0 if (all( abs(c - mean(c)) < .Machine$double.eps^0.3 )) closed <- 1 pos <- which(!is.na(colSums(X)))[1] if (is.na(pos)) stop("lrDA requires at least one fully observed column") if (imp.missing==FALSE){ cpoints <- log(dl)-log(X[,pos])-.Machine$double.eps cpoints <- cpoints[,-pos] } X_alr <- log(X)-log(X[,pos]); X_alr <- as.matrix(X_alr[,-pos]) nn <- nrow(X_alr); p <- ncol(X_alr) if (ini.cov == "complete.obs"){ if (inherits(try(solve(cov(X,use=ini.cov)),silent=TRUE),"try-error")) stop("ini.cov: too few complete cases for using 'complete.obs'") M <- matrix(colMeans(X_alr,na.rm=T),ncol=1) C <- cov(X_alr,use=ini.cov)} if (ini.cov == "multRepl"){ X.mr <- multRepl(X,label=NA,dl=dl,frac=frac,imp.missing=imp.missing,closure=closure,z.warning=z.warning,z.delete=z.delete) X.mr_alr <- t(apply(X.mr,1,function(x) log(x)-log(x[pos])))[,-pos] M <- matrix(colMeans(X.mr_alr,na.rm=T),ncol=1) C <- cov(X.mr_alr)} if (ini.cov == "lrEM"){ X.em <- lrEM(X,label=NA,dl=dl,ini.cov="multRepl",frac=frac,imp.missing=imp.missing,closure=closure,suppress.print=TRUE, z.warning=z.warning,z.delete=z.delete) X.em_alr <- t(apply(X.em,1,function(x) log(x)-log(x[pos])))[,-pos] M <- matrix(colMeans(X.em_alr,na.rm=T),ncol=1) C <- cov(X.em_alr)} misspat <- as.data.frame(is.na(X)*1,stringsAsFactors=TRUE) misspat <- as.factor(do.call(paste,c(misspat,sep=""))) levels(misspat) <- 1:(length(levels(misspat))) t <- 0 k <- 0 runs <- 0 alt.in <- FALSE alt.pat <- 0 alt.mr <- 0 if (m > 1){ imputed <- matrix(0,nrow=m,ncol=sum(is.na(X_alr))) if (store.mi==TRUE) mi.list <- vector(mode="list",m) } while (t <= n.iters*m){ Y <- X_alr runs <- runs + 1 # I-step for (npat in 1:length(levels(misspat))){ i <- which(misspat==npat) varmiss <- which(is.na(X_alr[i[1],])) if (length(varmiss) == 0) {next} # Skip first pattern if all obs varobs <- which(!is.na(X_alr[i[1],])) if (length(varobs) == 0){ alt.in <- TRUE temp <- multRepl(X[i,,drop=FALSE],label=NA,dl=dl[i,,drop=FALSE],frac=frac,imp.missing=imp.missing,closure=closure,z.warning=z.warning,z.delete=z.delete) Y[i,] <- t(apply(temp,1,function(x) log(x)-log(x[pos])))[,-pos] if (runs == 1){ alt.pat <- c(alt.pat,npat) alt.mr <- list(alt.mr,i) } break } sigmas <- matrix(0,ncol=p) B <- matrix(lm.sweep(M,C,varobs)[[1]],ncol=length(varmiss)) CR <- lm.sweep(M,C,varobs)[[2]] Y[i,varmiss] <- matrix(1,nrow=length(i))%*%B[1,] + X_alr[i, varobs, drop=FALSE]%*%B[2:(length(varobs)+1),] sigmas[varmiss] <- sqrt(diag(as.matrix(CR))) if (imp.missing==FALSE){ for (j in 1:length(varmiss)){ sigma <- sigmas[varmiss[j]] Y[i,varmiss[j]] <- rtruncnorm(1,-Inf,cpoints[i,varmiss[j]],Y[i,varmiss[j]],sigma) } } if (imp.missing==TRUE){ for (j in 1:length(varmiss)){ sigma <- sigmas[varmiss[j]] Y[i,varmiss[j]] <- rnorm(1,Y[i,varmiss[j]],sigma) } } } if ((t%in%((1:m)*n.iters)) & (m > 1)){ k <- k + 1 imputed[k,] <- Y[which(is.na(X_alr))] if (store.mi==TRUE){ mi.list[[k]] <- Y } } # P-step C <- riwish(nn-1,nn*cov(Y)) M <- mvrnorm(1,colMeans(Y),(1/nn)*C) t <- t + 1 } if ((m > 1) & (store.mi == FALSE)) Y[which(is.na(X_alr))] <- colMeans(imputed) # MI estimates if (store.mi==FALSE) X <- inv.raw(Y,X,pos,closed,nn,c) if (store.mi==TRUE) X <- lapply(mi.list,FUN=function(x) inv.raw(x,X,pos,closed,nn,c)) if (alt.in) { if (imp.missing==FALSE){ cat("Warning: samples with only one observed component were found \n") for (i in 2:length(alt.pat)){ cat(paste(" Pattern no.",alt.pat[i],"was imputed using multiplicative simple replacement \n")) cat(" Affected samples id: "); cat(alt.mr[[i]]); cat("\n\n") } } } return(X) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/lrDA.R
lrEM <- function(X,label=NULL,dl=NULL,rob=FALSE,ini.cov=c("complete.obs","multRepl"),frac=0.65,tolerance=0.0001, max.iter=50,rlm.maxit=150,imp.missing=FALSE,suppress.print=FALSE, closure=NULL,z.warning=0.8,z.delete=TRUE,delta=NULL){ if (any(X<0, na.rm=T)) stop("X contains negative values") if ((is.vector(X)) | (nrow(X)==1)) stop("X must be a data matrix") if (is.null(label)) stop("A value for label must be given") if (!is.na(label)){ if (!any(X==label,na.rm=T)) stop(paste("Label",label,"was not found in the data set")) if (label!=0 & any(X==0,na.rm=T)) stop("Zero values not labelled as censored or missing values were found in the data set") if (any(is.na(X))) stop(paste("NA values not labelled as censored or missing values were found in the data set")) } if (is.na(label)){ if (any(X==0,na.rm=T)) stop("Zero values not labelled as censored or missing values were found in the data set") if (!any(is.na(X),na.rm=T)) stop(paste("Label",label,"was not found in the data set")) } if (imp.missing==FALSE){ if (is.character(dl)) stop("dl must be a numeric vector or matrix") if (is.null(dl)){ # If dl not given use min per column dl <- apply(X,2, function(x) min(x[x!=label])) warning("No dl vector or matrix provided. The minimum observed values for each column used as detection limits.") } if (is.vector(dl)) dl <- matrix(dl,nrow=1) dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes } if (imp.missing==FALSE){ if (ncol(dl)!=ncol(X)) stop("The number of columns in X and dl do not agree") if ((nrow(dl)>1) & (nrow(dl)!=nrow(X))) stop("The number of rows in X and dl do not agree") } if (!missing("delta")){ warning("The delta argument is deprecated, use frac instead: frac has been set equal to delta.") frac <- delta } ini.cov <- match.arg(ini.cov) lm.sweep <- function(M,C,varobs){ sweep.matrix <- function(A,ind){ nn <- nrow(A); D <- ncol(A) S <- A for (j in ind){ S[j,j] <- -1/A[j,j] for (i in 1:D) { if (i != j){ S[i,j] <- -A[i,j]*S[j,j] S[j,i] <- S[i,j] } } for (i in 1:D){ if (i != j){ for (k in 1:D){ if (k != j){ S[i,k] <- A[i,k] - S[i,j]*A[j,k] S[k,i] <- S[i,k] } } } } A <- S } return(A) } D <- length(M) q <- length(varobs) i <- rep(1,D) i[varobs] <- i[varobs]-1 dep <- which(i!=0) ndep <- length(dep) A <- matrix(0,D+1,D+1) A[1,1] <- -1 A[1,2:(D+1)] <- M A[2:(D+1),1] <- matrix(M,ncol=1) A[2:(D+1),2:(D+1)] <- C reor <- c(1,varobs+1,dep+1) A <- A[reor,reor] A <- sweep.matrix(A,1:(q+1)) B <- A[1:(q+1),(q+2):(D+1)] CR <- A[(q+2):(D+1),(q+2):(D+1)] return(list(betas=B,resid=CR)) } inv.alr <- function(x,pos){ ad<-1/(rowSums(exp(x))+1) ax<-exp(x)*ad if(pos==1) { a<-cbind(ad,ax,stringsAsFactors=TRUE) } else { if (dim(x)[2] < pos){ a<-cbind(ax,ad,stringsAsFactors=TRUE) } else { a<-cbind(ax[,1:(pos-1)],ad,ax[,pos:(dim(x)[2])],stringsAsFactors=TRUE) } } return(a) } ilr <- function (x){ D <- length(x) z <- vector(mode="double",length=D-1) for (i in 1:(D-1)){ z[i] <- sqrt((D-i)/(D-i+1))*log(x[i]/((prod(x[(i+1):D]))^(1/(D-i)))) } return(as.numeric(z)) } inv.ilr <- function(z){ D <- length(z) + 1 x <- vector(mode="double",length=D) x[1] <- exp(sqrt((D-1)/D)*z[1]) for (i in 2:(D-1)){ x[i] <- exp(-sum((1/sqrt((D-1:(i-1)+1)*(D-1:(i-1))))*z[1:(i-1)]) + sqrt(D-i)/sqrt(D-i+1)*z[i]) } x[D] <- exp(-sum((1/sqrt((D-1:(D-1)+1)*(D-1:(D-1))))*z[1:(D-1)])) x <- x/sum(x) return(x) } ## Preliminaries ---- X <- as.data.frame(X,stringsAsFactors=TRUE) nn <- nrow(X); D <- ncol(X) if (nn <= D) stop("The lrEM algorithm works on regular data sets (no. rows > no. columns). You can consider lrSVD for wide dat sets.") X[X==label] <- NA X <- as.data.frame(apply(X,2,as.numeric),stringsAsFactors=TRUE) c <- apply(X,1,sum,na.rm=TRUE) checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } if (imp.missing==FALSE) {if (nrow(dl)==1) dl <- matrix(rep(1,nn),ncol=1)%*%dl} # Check for closure closed <- 0 if (all( abs(c - mean(c)) < .Machine$double.eps^0.3 )) closed <- 1 misspat <- as.data.frame(is.na(X)*1,stringsAsFactors=TRUE) misspat <- as.factor(do.call(paste,c(misspat,sep=""))) levels(misspat) <- 1:(length(levels(misspat))) ## Ordinary lrEM ---- if (rob==FALSE){ pos <- which(!is.na(colSums(X)))[1] if (is.na(pos)) stop("lrEM based on alr requires at least one complete column") if (imp.missing==FALSE){ cpoints <- log(dl)-log(X[,pos])-.Machine$double.eps cpoints <- cpoints[,-pos] } X_alr <- log(X)-log(X[,pos]); X_alr <- as.matrix(X_alr[,-pos]) nn <- nrow(X_alr); D <- ncol(X_alr) if (ini.cov != "multRepl"){ if (inherits(try(solve(cov(X_alr,use=ini.cov)),silent=TRUE),"try-error")) stop("ini.cov: singular initial covariance matrix. Probably too few complete rows in data set for using 'complete.obs'") M <- matrix(colMeans(X_alr,na.rm=T),ncol=1) C <- cov(X_alr,use=ini.cov)} else { X.mr <- multRepl(X,label=NA,dl=dl,frac=frac,imp.missing=imp.missing,closure=closure,z.warning=z.warning,z.delete=z.delete) if (any(X.mr < 0)) {stop("ini.cov: negative values produced using multRepl (please check out closure argument and multRepl help for advice)")} X.mr_alr <- t(apply(X.mr,1,function(x) log(x)-log(x[pos])))[,-pos] M <- matrix(colMeans(X.mr_alr,na.rm=T),ncol=1) C <- cov(X.mr_alr) } iter_again <- 1 niters <- 0 alt.in <- FALSE alt.pat <- 0 alt.mr <- 0 while (iter_again == 1){ niters <- niters + 1 Mnew <- M Cnew <- C Y <- X_alr v <- matrix(0,D,D) for (npat in 1:length(levels(misspat))){ i <- which(misspat==npat) varmiss <- which(is.na(X_alr[i[1],])) if (length(varmiss) == 0) {next} # Skip first pattern if all obs varobs <- which(!is.na(X_alr[i[1],])) if (length(varobs) == 0){ alt.in <- TRUE temp <- multRepl(X[i,,drop=FALSE],label=NA,dl=dl[i,,drop=FALSE],frac=frac,imp.missing=imp.missing,closure=closure,z.warning=z.warning,z.delete=z.delete) Y[i,] <- t(apply(temp,1,function(x) log(x)-log(x[pos])))[,-pos] if (niters == 1){ alt.pat <- c(alt.pat,npat) alt.mr <- list(alt.mr,i) } break } sigmas <- matrix(0,ncol=D) B <- matrix(lm.sweep(M,C,varobs)[[1]],ncol=length(varmiss)) CR <- lm.sweep(M,C,varobs)[[2]] Y[i,varmiss] <- matrix(1,nrow=length(i))%*%B[1,] + X_alr[i, varobs, drop=FALSE]%*%B[2:(length(varobs)+1),] sigmas[varmiss] <- sqrt(diag(as.matrix(CR))) if (imp.missing==FALSE){ for (j in 1:length(varmiss)){ sigma <- sigmas[varmiss[j]] fdN01 <- dnorm((cpoints[i,varmiss[j]]-Y[i,varmiss[j]])/sigma) fdistN01 <- pnorm((cpoints[i,varmiss[j]]-Y[i,varmiss[j]])/sigma) Y[i,varmiss[j]] <- Y[i,varmiss[j]]-sigma*(fdN01/fdistN01) } } v[varmiss,varmiss] <- v[varmiss,varmiss] + CR*length(i) } M <- matrix(colMeans(Y),ncol=1) dif <- Y - matrix(1,nrow=nn)%*%t(M) PC <- t(dif)%*%dif C <- (PC+v)/(nn-1) # Convergence check Mdif <- max(abs(M-Mnew)) Cdif <- max(max(abs(C-Cnew))) if ((max(c(Mdif,Cdif)) < tolerance) | (niters == max.iter)) iter_again <- 0 } Y <- inv.alr(Y,pos) for (i in 1:nn){ if (any(is.na(X[i,]))){ vbdl <- which(is.na(X[i,])) X[i,vbdl] <- (X[i,pos]/Y[i,pos])*Y[i,vbdl] } } } # End ordinary lrEM ## Robust lrEM ---- if (rob==TRUE){ if (ini.cov == "multRepl"){ if (imp.missing == TRUE){ X.mr <- multRepl(X,label=NA,imp.missing=T,closure=closure,z.warning=z.warning,z.delete=z.delete) if (any(X.mr < 0)) {stop("ini.cov: negative values produced using multRepl (please check out closure argument and multRepl help for advice)")} } else {X.mr <- multRepl(X,label=NA,dl=dl,frac=frac,closure=closure,z.warning=z.warning,z.delete=z.delete) if (any(X.mr < 0)) {stop("ini.cov: negative values produced using multRepl (please check out closure argument and multRepl help for advice)")} } } miss <- by(X,misspat,function(x) which(is.na(x[1,]))) obs <- by(X,misspat,function(x) which(!is.na(x[1,]))) iter_again <- 1 niters <- 0 X.old <- X alt.in <- FALSE alt.pat <- 0 alt.mr <- 0 nnn <- 0 while (iter_again == 1){ niters <- niters+1 if (niters > 1) {X.old <- X; C.old <- C} for (npat in 1:length(levels(misspat))){ if (length(miss[[npat]]) == 0) {next} # Skip first pattern if all obs if ((length(obs[[npat]]) == 1) & (!any(npat==alt.pat))){ alt.in <- TRUE if (imp.missing==FALSE){ X[misspat==npat,] <- multRepl(X.old[misspat==npat,,drop=FALSE], label=NA,dl=dl[misspat==npat,,drop=FALSE], frac=frac,closure=closure,z.warning=z.warning,z.delete=z.delete) } if (imp.missing==TRUE){ stop("Please remove samples with only one observed component (check it out using zPatterns).") } alt.pat <- c(alt.pat,npat) alt.mr <- list(alt.mr,which(misspat==npat)) } if (length(obs[[npat]]) > 1) { feeder <- X.old[,obs[[npat]]] for (m in 1:length(miss[[npat]])){ p <- miss[[npat]][m] target <- X.old[,p] if (imp.missing==FALSE){ phi <- t(apply(cbind(dl=dl[misspat==npat,p],feeder[misspat==npat,],stringsAsFactors=TRUE),1,ilr)) } regbasis <- as.data.frame(t(apply(cbind(target,feeder),1,ilr)),stringsAsFactors=TRUE) if (niters == 1){ if (ini.cov == "complete.obs"){ if (nrow(regbasis[misspat==1,]) > ncol(regbasis[misspat==1,])) robreg <- rlm(V1 ~ .,data=regbasis[misspat==1,],method="MM",maxit = rlm.maxit) else stop("ini.cov: singular initial covariance matrix. Probably too few complete rows in data set. Use ini.cov = 'multRepl' instead") } if (ini.cov == "multRepl"){ target <- X.mr[,p] feeder <- X.mr[,obs[[npat]]] regbasis.mr <- as.data.frame(t(apply(cbind(target,feeder),1,ilr)),stringsAsFactors=TRUE) robreg <- rlm(V1 ~ .,data=regbasis.mr,method="MM",maxit = rlm.maxit) } } else robreg <- rlm(V1 ~ .,data=regbasis,method="MM",maxit = rlm.maxit) B <- matrix(robreg$coefficients,ncol=1) sigma <- robreg$s est <- cbind(V1=B[1,] + as.matrix(regbasis[misspat==npat,-1])%*%B[-1,],regbasis[misspat==npat,-1],stringsAsFactors=TRUE) if (imp.missing==FALSE){ est[,1] <- est[,1] - sigma*(dnorm((phi[,1]-est[,1])/sigma)/pnorm((phi[,1]-est[,1])/sigma)) } est <- t(apply(est,1,inv.ilr)) est <- est[,1]*(feeder[misspat==npat,1]/est[,2]) X[misspat==npat,p] <- est } } } C <- cov(t(apply(X,1,ilr))) # Convergence check if (niters > 1) if((norm(C-C.old,type="F") < tolerance) | (niters == max.iter)) iter_again <- 0 } } # End robust lrEM ## Final section ---- if (closed==1){ X <- t(apply(X,1,function(x) x/sum(x)*c[1])) } if (suppress.print==FALSE){ if (alt.in) { if (imp.missing==FALSE){ warning("Censoring patterns with only one observed component in the data set.") cat("Censored samples with only one observed component imputed by simple multiplicative replacement. \n") for (i in 2:length(alt.pat)){ cat("Row numbers: "); cat(alt.mr[[i]]); cat("\n\n") } } } cat(paste("No. iterations to converge: ",niters,"\n\n")) } return(as.data.frame(X,stringsAsFactors=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/lrEM.R
lrEMplus <- function(X, dl = NULL, rob = FALSE, ini.cov = c("complete.obs", "multRepl"), frac = 0.65, tolerance = 0.0001, max.iter = 50, rlm.maxit=150, suppress.print = FALSE, closure=NULL, z.warning=0.8, z.delete=TRUE, delta=NULL){ if (any(X<0, na.rm=T)) stop("X contains negative values") if (is.character(dl)) stop("dl must be a numeric vector or matrix") if (is.null(dl)){ # If dl not given use min per column dl <- apply(X,2, function(x) min(x[x!=0])) warning("No dl vector or matrix provided. The minimum observed values for each column used as detection limits.") } if (is.vector(dl)) dl <- matrix(dl,nrow=1) dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes if ((is.vector(X)) | (nrow(X)==1)) stop("X must be a data matrix") if (ncol(dl)!=ncol(X)) stop("The number of columns in X and dl do not agree") if ((nrow(dl)>1) & (nrow(dl)!=nrow(X))) stop("The number of rows in X and dl do not agree") if (any(is.na(X))==FALSE) stop("No missing data were found in the data set") if (any(X==0, na.rm=T)==FALSE) stop("No zeros were found in the data set") if (!missing("delta")){ warning("The delta argument is deprecated, use frac instead: frac has been set equal to delta.") frac <- delta } ini.cov <- match.arg(ini.cov) gm <- function(x, na.rm=TRUE){ exp(sum(log(x), na.rm=na.rm) / length(x)) } ## Preliminaries ---- X <- as.data.frame(X,stringsAsFactors=TRUE) nn <- nrow(X); D <- ncol(X) X <- as.data.frame(apply(X,2,as.numeric),stringsAsFactors=TRUE) c <- apply(X,1,sum,na.rm=TRUE) checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } if (nrow(dl)==1) dl <- matrix(rep(1,nn),ncol=1)%*%dl # Check for closure closed <- 0 if (all( abs(c - mean(c)) < .Machine$double.eps^0.3 )) closed <- 1 if (sum(is.na(X)) > sum(X==0,na.rm=T)){ X.old <- X # Initial simple imputation of zero for (i in 1:nn){ if (any(X.old[i, ]==0,na.rm=T)){ z <- which(X.old[i, ]==0) X.old[i,z] <- frac*dl[i,z] } } # Initial lrEM imputation of missing data X.old <- lrEM(X.old, label = NA, imp.missing = TRUE, ini.cov = ini.cov, rob = rob, tolerance = tolerance, max.iter = max.iter, rlm.maxit = rlm.maxit, suppress.print = TRUE, closure = closure, z.warning = z.warning, z.delete = z.delete) } if (sum(is.na(X)) <= sum(X==0,na.rm=T)){ X.old <- X # Initial ordinary geo mean imputation of missing (ignores 0s in the column if any) gmeans <- apply(X.old,2,function(x) gm(x[x!=0])) for (i in 1:nn){ if (any(is.na(X.old[i, ]))){ z <- which(is.na(X.old[i, ])) X.old[i,z] <- gmeans[z] } } # Initial lrEM imputation of zeros X.old <- lrEM(X.old, label = 0, dl = dl, ini.cov = ini.cov, rob = rob, tolerance = tolerance, max.iter = max.iter, rlm.maxit = rlm.maxit, suppress.print = TRUE, closure = closure, z.warning = z.warning, z.delete = z.delete) } # Initial parameter estimates X.old_alr <- log(X.old)-log(X.old[,D]) X.old_alr <- as.matrix(X.old_alr[,-D]) M.old <- matrix(colMeans(X.old_alr),ncol=1) C.old <- cov(X.old_alr) iter_again <- 1 niters <- 0 while (iter_again == 1){ niters <- niters+1 if (niters > 1) {X.old <- X.new; M.old <- M.new; C.old <- C.new} X.old <- as.matrix(X.old) X.old[which(X==0)] <- 0 X.new <- lrEM(X.old, label = 0, dl = dl, ini.cov = ini.cov, rob = rob, tolerance = tolerance, max.iter = max.iter, rlm.maxit = rlm.maxit, suppress.print = TRUE, closure = closure, z.warning = z.warning, z.delete = z.delete) X.new[is.na(X)] <- NA X.new <- lrEM(X.new, label = NA, imp.missing = TRUE, ini.cov = ini.cov, rob = rob, tolerance = tolerance, max.iter = max.iter, rlm.maxit = rlm.maxit, suppress.print = TRUE, closure = closure, z.warning = z.warning, z.delete = z.delete) X.new_alr <- log(X.new)-log(X.new[,D]) X.new_alr <- as.matrix(X.new_alr[,-D]) M.new <- matrix(colMeans(X.new_alr),ncol=1) C.new <- cov(X.new_alr) # Convergence check Mdif <- max(abs(M.new-M.old)) Cdif <- max(max(abs(C.new-C.old))) if ((max(c(Mdif,Cdif)) < tolerance) | (niters == max.iter)) iter_again <- 0 } ## Final section ---- if (closed==1) X.new <- t(apply(X.new,1,function(x) x/sum(x)*c[1])) # If not closed lrEM above takes care of it if (suppress.print==FALSE) cat(paste("No. iterations to converge: ",niters,"\n\n")) return(as.data.frame(X.new,stringsAsFactors=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/lrEMplus.R
lrSVD <- function(X, label = NULL, dl = NULL, frac = 0.65, ncp = 2, imp.missing = FALSE, beta = 0.5, method = c("ridge", "EM"), row.w = NULL, coeff.ridge = 1, threshold = 1e-4, seed = NULL, nb.init = 1, max.iter = 1000, z.warning = 0.8, z.delete=TRUE, ...) { if (any(X < 0, na.rm = T)) stop("X contains negative values") if ((is.vector(X)) | (nrow(X) == 1)) stop("X must be a data matrix") if (is.null(label)) stop("A value for label must be given") if (!is.na(label)) { if (!any(X == label, na.rm = T)) stop(paste("Label", label, "was not found in the data set")) if (label != 0 & any(X == 0, na.rm = T)) stop("Zero values not labelled as censored values were found in the data set") if (any(is.na(X))) stop(paste("NA values not labelled as censored values were found in the data set")) } if (is.na(label)) { if (any(X == 0, na.rm = T)) stop("Zero values not labelled as censored values were found in the data set") if (!any(is.na(X), na.rm = T)) stop(paste("Label", label, "was not found in the data set")) } if (imp.missing==FALSE){ if (is.character(dl)) stop("dl must be a numeric vector or matrix") if (is.null(dl)){ # If dl not given use min per column dl <- apply(X,2, function(x) min(x[x!=label])) warning("No dl vector or matrix provided. The minimum observed values for each column used as detection limits.") } if (is.vector(dl)) dl <- matrix(dl,nrow=1) dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes } if (imp.missing == FALSE) { if (ncol(dl) != ncol(X)) stop("The number of columns in X and dl do not agree") if ((nrow(dl) > 1) & (nrow(dl) != nrow(X))) stop("The number of rows in X and dl do not agree") } if (is.numeric(ncp)){ if (ncp > min(nrow(X) - 2, ncol(X) - 2)) stop("ncp is too large for the size of the data matrix") } if (is.null(row.w)) row.w = rep(1, nrow(X)) / nrow(X) # Equal weight for all rows svd.triplet <- function(X, row.w = NULL, col.w = NULL, ncp = Inf) # From FactoMineR package { tryCatch.W.E <- function(expr) { W <- NULL w.handler <- function(w) { W <<- w invokeRestart("muffleWarning") } list(value = withCallingHandlers(tryCatch(expr, error = function(e) e), warning = w.handler), warning = W) } if (is.null(row.w)) row.w <- rep(1/nrow(X), nrow(X)) if (is.null(col.w)) col.w <- rep(1, ncol(X)) ncp <- min(ncp, nrow(X) - 1, ncol(X)) row.w <- row.w/sum(row.w) X <- t(t(X) * sqrt(col.w)) * sqrt(row.w) if (ncol(X) < nrow(X)) { svd.usuelle <- tryCatch.W.E(svd(X, nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "message") { svd.usuelle <- tryCatch.W.E(svd(t(X), nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "d") { aux <- svd.usuelle$u svd.usuelle$u <- svd.usuelle$v svd.usuelle$v <- aux } else { bb <- eigen(crossprod(X, X), symmetric = TRUE) svd.usuelle <- vector(mode = "list", length = 3) svd.usuelle$d[svd.usuelle$d < 0] = 0 svd.usuelle$d <- sqrt(svd.usuelle$d) svd.usuelle$v <- bb$vec[, 1:ncp] svd.usuelle$u <- t(t(crossprod(t(X), svd.usuelle$v))/svd.usuelle$d[1:ncp]) } } U <- svd.usuelle$u V <- svd.usuelle$v if (ncp > 1) { mult <- sign(as.vector(crossprod(rep(1, nrow(V)), as.matrix(V)))) mult[mult == 0] <- 1 U <- t(t(U) * mult) V <- t(t(V) * mult) } U <- U/sqrt(row.w) V <- V/sqrt(col.w) } else { svd.usuelle <- tryCatch.W.E(svd(t(X), nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "message") { svd.usuelle <- tryCatch.W.E(svd(X, nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "d") { aux <- svd.usuelle$u svd.usuelle$u <- svd.usuelle$v svd.usuelle$v <- aux } else { bb <- eigen(crossprod(t(X), t(X)), symmetric = TRUE) svd.usuelle <- vector(mode = "list", length = 3) svd.usuelle$d[svd.usuelle$d < 0] = 0 svd.usuelle$d <- sqrt(svd.usuelle$d) svd.usuelle$v <- bb$vec[, 1:ncp] svd.usuelle$u <- t(t(crossprod(X, svd.usuelle$v))/svd.usuelle$d[1:ncp]) } } U <- svd.usuelle$v V <- svd.usuelle$u mult <- sign(as.vector(crossprod(rep(1, nrow(V)), as.matrix(V)))) mult[mult == 0] <- 1 V <- t(t(V) * mult)/sqrt(col.w) U <- t(t(U) * mult)/sqrt(row.w) } vs <- svd.usuelle$d[1:min(ncol(X), nrow(X) - 1)] num <- which(vs[1:ncp] < 1e-15) if (length(num) == 1) { U[, num] <- U[, num, drop = FALSE] * vs[num] V[, num] <- V[, num, drop = FALSE] * vs[num] } if (length(num) > 1) { U[, num] <- t(t(U[, num]) * vs[num]) V[, num] <- t(t(V[, num]) * vs[num]) } res <- list(vs = vs, U = U, V = V) return(res) } impute <- function(X = NULL, dl = NULL, bal = NULL, frac = 0.65, ncp = 2, beta = 0.5, method=c("ridge","EM"), row.w = NULL, coeff.ridge = 1, threshold = 1e-4, seed = NULL, max.iter = 1000, init = 1, ...) { # (scale argument removed: no scaling of olr columns by (weighted) variance allowed) # Weighted AVERAGE = moyenne poids moy.p <- function(V, poids) { res <- sum(V * poids,na.rm = TRUE)/sum(poids[!is.na(V)]) } # Geometric mean by columns (for missing data case) gm <- function(x, na.rm = TRUE){ exp(sum(log(x), na.rm = na.rm)/length(x)) } nb.iter <- 1 old <- Inf objective <- 0 if (!is.null(seed)) {set.seed(seed)} # fix seed to have same results # OLR of initial data matrix # Missing pattern missRaw <- which(is.na(X)) obsRaw <- which(!is.na(X)) X <- as.matrix(X) Xaux <- X # copy original raw data with NA caux <- apply(Xaux, 1, sum, na.rm = TRUE) # Initial imputation if (imp.missing == FALSE) { if (init == 1) {X[missRaw] <- frac*dl[missRaw]} # mult repl else {X[missRaw] <- runif(1,0.50,0.8)*dl[missRaw]} # random initialisations if nb.init/init > 1 } else{ # Initial geo mean imputation of missing gmeans <- apply(Xaux,2,function(x) gm(x)) nn <- nrow(X) gmeans <- matrix(rep(1, nn), ncol = 1) %*% gmeans X[missRaw] <- gmeans[missRaw] } # Xhat: OLR-coordinates Xhat <- t(bal %*% t(log(X))) # Number of components ncp <- min(ncp, ncol(Xhat), nrow(Xhat) - 1) # Weighted column mean mean.p.or=mean.p <- apply(Xhat, 2, moy.p,row.w) # Matrix centring Xhat <- t(t(Xhat) - mean.p) # Update X: olr.inv(Xhat) X <- exp(t(t(bal) %*% t(Xhat))) X <- X / apply(X, 1, sum) # Aux data matrix for observed and non-observed data fittedX <- fittedXus <- Xhat fittedXRaw <- fittedXusRaw <- X if (ncp == 0) {nb.iter <- 0} while (nb.iter > 0) { # Update data matrix X[missRaw] <- fittedXRaw[missRaw] # Xhat: OLR-coordinates Xhat <- t(bal %*% t(log(X))) # Recover the centre Xhat <- t(t(Xhat) + mean.p) mean.p <- apply(Xhat, 2, moy.p, row.w) # violations check X <- exp(t(t(bal) %*% t(Xhat))) X <- X/apply(X, 1, sum) Xaux2 <- X * caux viol <- which(Xaux2 > dl) Xaux2[viol] <- dl[viol] Xhat <- t(bal %*% t(log(Xaux2))) mean.p <- apply(Xhat, 2, moy.p, row.w) # Update X: olr.inv(Xhat) X <- exp(t(t(bal) %*% t(Xhat))) X <- X / apply(X, 1, sum) # Impute observed values fittedXusRC <- t(t(fittedXus) + mean.p)# recover centre # RAW values # INV-OLR # fittedXusRCRaw <- exp(t(t(bal) %*% t(fittedXusRC))) fittedXusRCRaw <- fittedXusRCRaw / apply(fittedXusRCRaw, 1, sum) X[obsRaw] <- ((fittedXusRCRaw[obsRaw]) ^ (1 - beta)) * ((X[obsRaw]) ^ beta) # check DL X <- X/apply(X,1,sum) Xaux2 <- X*caux # re-scaled to original viol <- which(Xaux2 > dl) Xaux2[viol] <- dl[viol] # Xhat: OLR-coordinates Xhat <- t(bal %*% t(log(Xaux2))) # Update mean mean.p <- apply(Xhat, 2, moy.p, row.w) # Centring Xhat <- t(t(Xhat) - mean.p) # Update X # INV-OLR X <- exp(t(t(bal) %*% t(Xhat))) X <- X / apply(X, 1, sum) # SVD calculation WEIGHTED by row.w and rank ncp svd.res <- svd.triplet(Xhat, row.w = row.w, ncp = ncp) sigma2 <- nrow(Xhat) * ncol(Xhat) / min(ncol(Xhat), nrow(Xhat) - 1) * sum((svd.res$vs[-c(1:ncp)] ^ 2) / ((nrow(Xhat) - 1) * ncol(Xhat) - (nrow(Xhat) - 1) * ncp - ncol(Xhat) * ncp + ncp ^ 2)) sigma2 <- min(sigma2 * coeff.ridge, svd.res$vs[ncp + 1] ^ 2) if (method == "EM") sigma2 <- 0 # usual lambda lambda.us <- svd.res$vs[1:ncp] # calculate the usual new matrix fittedXus <- tcrossprod(t(t(svd.res$U[, 1:ncp, drop = FALSE] * row.w) * lambda.us), svd.res$V[, 1:ncp, drop = FALSE]) fittedXus <- fittedXus / row.w # lambda for regularisation lambda.shrinked <- (svd.res$vs[1:ncp] ^ 2 - sigma2) / svd.res$vs[1:ncp] # calculate the new matrix for regularisation fittedX <- tcrossprod(t(t(svd.res$U[, 1:ncp, drop = FALSE] * row.w) * lambda.shrinked), svd.res$V[, 1:ncp, drop = FALSE]) fittedX <- fittedX / row.w # calculate the Frobenius norm of the difference between iterations (convergence) # INV-OLR fittedXRaw <- exp(t(t(bal) %*% t(fittedX))) fittedXRaw <- fittedXRaw / apply(fittedXRaw, 1, sum) diffRaw <- X / fittedXRaw diffRaw[missRaw] <- 1 # OLR-coordinates diff <- t(bal %*% t(log(diffRaw))) objective <- sum(diff^2*row.w) # objective <- mean((Xhat[-missing]-fittedX[-missing])^2) # Convergence criterion <- abs(1 - objective/old) old <- objective nb.iter <- nb.iter + 1 if (!is.nan(criterion)) { if ((criterion < threshold) && (nb.iter > 5)) nb.iter <- 0 if ((objective < threshold) && (nb.iter > 5)) nb.iter <- 0 } if (nb.iter > max.iter) { nb.iter <- 0 warning(paste("Stopped after ",max.iter," iterations")) } } # END LOOP WHILE # Preparing the results Xhat <- t(t(Xhat) + mean.p.or) # Update X # INV-OLR X <- exp(t(t(bal) %*% t(Xhat))) X <- X/apply(X,1,sum) # completeObs completeObs <- Xaux / apply(Xaux, 1, sum, na.rm = TRUE) completeObs[missRaw] <- X[missRaw] completeObs <- completeObs / apply(completeObs, 1, sum) # violation dl check completeObs <- completeObs * caux viol <- which(completeObs > dl) completeObs[viol] <- dl[viol] completeObs[obsRaw] <- Xaux[obsRaw] completeObs <- completeObs/apply(completeObs, 1, sum) # fittedX <- t(t(fittedX) + mean.p) # INV-OLR fittedXRaw <- exp(t(t(bal) %*% t(fittedX))) fittedXRaw <- fittedXRaw/apply(fittedXRaw,1,sum) # Return complete matrix and imputed matrix result <- list() result$completeObs <- completeObs result$fittedX <- fittedXRaw return(result) } method <- match.arg(method) ## Preliminaries ---- X <- as.data.frame(X, stringsAsFactors = TRUE) nn <- nrow(X) D <- ncol(X) X[X == label] <- NA X <- as.data.frame(apply(X, 2, as.numeric), stringsAsFactors = TRUE) c <- apply(X, 1, sum, na.rm = TRUE) checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } # Check for closure closed <- 0 if (all(abs(c - mean(c)) < .Machine$double.eps^0.3)) closed <- 1 # Sort columns decreasingly according to observed cells Xaux <- as.matrix(X) XauxClosed <- Xaux / apply(Xaux, 1, sum, na.rm = TRUE) #as.matrix(X) # copy original data set # Replace zeros by NA Xna <- X # copy of original data #colnames(Xna) pz <- apply(apply(Xna, 2, is.na), 2, sum) / nn # Ordered decreasingly by number of zeros (NA) X <- Xna[, order(-pz)] if (imp.missing == FALSE) { if (nrow(dl) == 1) {dl <- matrix(dl[, order(-pz)], nrow = 1)} else{dl <- dl[, order(-pz)]} } # Balance matrix for olr Smat <- diag(rep(1,D)) Smat[upper.tri(Smat)] <- -1 Smat <- Smat[-D,] bal <- Smat numsbp <- dim(Smat)[1] for (f in 1:numsbp) { den <- sum(bal[f, ] == -1) num <- sum(bal[f, ] == 1) bal[f, bal[f, ] == 1] <- sqrt(den / ((den + num) * num)) bal[f, bal[f, ] == -1] <- -sqrt(num / ((den + num) * den)) } # Build dl matrix for SVD imputation if (imp.missing == FALSE) { if (nrow(dl) == 1) dl <- matrix(rep(1, nn), ncol = 1) %*% dl } else {dl <- matrix(0, nrow = nn, ncol = D)} # fake dl matrix for the case of missing # Set observed data as upper bound for estimates of observed values observedRaw <- which(!is.na(X)) missingRaw <- which(is.na(X)) Xaux2 <- as.matrix(X) # DL observed and missing values = maximum value = observed = infinity upper bound Xmax <- apply(X, 2, max, na.rm = TRUE) Xmax <- matrix(rep(1, nn), ncol = 1) %*% Xmax dl[observedRaw] <- Xmax[observedRaw] if (imp.missing == TRUE) { dl[missingRaw] <- Xmax[missingRaw] } colnames(dl) <- colnames(X) ## Imputation --- for (i in 1:nb.init) { if (!any(is.na(X))) return(X) res.impute <- impute(X = X, dl = dl, bal = bal, frac = frac, ncp = ncp, beta = beta, method = method, row.w = row.w, coeff.ridge = coeff.ridge, threshold = threshold, seed = if (!is.null(seed)) { (seed * (i - 1))} else {NULL},max.iter = max.iter,init = i) } ## Final section --- # Re-scale to original units Y <- res.impute$completeObs XauxClosed <- XauxClosed[, order(-pz)] XauxClosed[missingRaw] <- Y[missingRaw] X <- XauxClosed * c if (closed == 1) { X <- (X / apply(X, 1, sum)) * c[1] } # Original order X <- X[, colnames(Xna)] return(as.data.frame(X, stringsAsFactors = TRUE)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/lrSVD.R
lrSVDplus <- function(X, dl = NULL, frac = 0.65, ncp = 2, beta = 0.5, method = c("ridge", "EM"), row.w = NULL, coeff.ridge = 1, threshold = 1e-4, seed = NULL, nb.init = 1, max.iter = 1000, z.warning=0.8, z.delete=TRUE, ...) { if (any(X < 0, na.rm = T)) stop("X contains negative values") if (is.character(dl)) stop("dl must be a numeric vector or matrix") if (is.null(dl)){ # If dl not given use min per column dl <- apply(X,2, function(x) min(x[x!=0])) warning("No dl vector or matrix provided. The minimum observed values for each column used as detection limits.") } if (is.vector(dl)) dl <- matrix(dl,nrow=1) dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes if ((is.vector(X)) | (nrow(X) == 1)) stop("X must be a data matrix") if (any(is.na(X)) == FALSE) stop("No missing data were found in the data set") if (any(X == 0, na.rm = T) == FALSE) stop("No zeros were found in the data set") if (ncol(dl) != ncol(X)) stop("The number of columns in X and dl do not agree") if ((nrow(dl) > 1) & (nrow(dl) != nrow(X))) stop("The number of rows in X and dl do not agree") if (ncp > min(nrow(X) - 2, ncol(X) - 2)) stop("ncp is too large for the size of the data matrix") if (is.null(row.w)) row.w = rep(1, nrow(X)) / nrow(X) # Equal weight for all rows svd.triplet <- function(X, row.w = NULL, col.w = NULL, ncp = Inf) # From FactoMineR package { tryCatch.W.E <- function(expr) { W <- NULL w.handler <- function(w) { W <<- w invokeRestart("muffleWarning") } list(value = withCallingHandlers(tryCatch( expr, error = function(e) e ), warning = w.handler), warning = W) } if (is.null(row.w)) row.w <- rep(1 / nrow(X), nrow(X)) if (is.null(col.w)) col.w <- rep(1, ncol(X)) ncp <- min(ncp, nrow(X) - 1, ncol(X)) row.w <- row.w / sum(row.w) X <- t(t(X) * sqrt(col.w)) * sqrt(row.w) if (ncol(X) < nrow(X)) { svd.usuelle <- tryCatch.W.E(svd(X, nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "message") { svd.usuelle <- tryCatch.W.E(svd(t(X), nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "d") { aux <- svd.usuelle$u svd.usuelle$u <- svd.usuelle$v svd.usuelle$v <- aux } else { bb <- eigen(crossprod(X, X), symmetric = TRUE) svd.usuelle <- vector(mode = "list", length = 3) svd.usuelle$d[svd.usuelle$d < 0] = 0 svd.usuelle$d <- sqrt(svd.usuelle$d) svd.usuelle$v <- bb$vec[, 1:ncp] svd.usuelle$u <- t(t(crossprod(t(X), svd.usuelle$v)) / svd.usuelle$d[1:ncp]) } } U <- svd.usuelle$u V <- svd.usuelle$v if (ncp > 1) { mult <- sign(as.vector(crossprod(rep(1, nrow( V )), as.matrix(V)))) mult[mult == 0] <- 1 U <- t(t(U) * mult) V <- t(t(V) * mult) } U <- U / sqrt(row.w) V <- V / sqrt(col.w) } else { svd.usuelle <- tryCatch.W.E(svd(t(X), nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "message") { svd.usuelle <- tryCatch.W.E(svd(X, nu = ncp, nv = ncp))$val if (names(svd.usuelle)[[1]] == "d") { aux <- svd.usuelle$u svd.usuelle$u <- svd.usuelle$v svd.usuelle$v <- aux } else { bb <- eigen(crossprod(t(X), t(X)), symmetric = TRUE) svd.usuelle <- vector(mode = "list", length = 3) svd.usuelle$d[svd.usuelle$d < 0] = 0 svd.usuelle$d <- sqrt(svd.usuelle$d) svd.usuelle$v <- bb$vec[, 1:ncp] svd.usuelle$u <- t(t(crossprod(X, svd.usuelle$v)) / svd.usuelle$d[1:ncp]) } } U <- svd.usuelle$v V <- svd.usuelle$u mult <- sign(as.vector(crossprod(rep(1, nrow( V )), as.matrix(V)))) mult[mult == 0] <- 1 V <- t(t(V) * mult) / sqrt(col.w) U <- t(t(U) * mult) / sqrt(row.w) } vs <- svd.usuelle$d[1:min(ncol(X), nrow(X) - 1)] num <- which(vs[1:ncp] < 1e-15) if (length(num) == 1) { U[, num] <- U[, num, drop = FALSE] * vs[num] V[, num] <- V[, num, drop = FALSE] * vs[num] } if (length(num) > 1) { U[, num] <- t(t(U[, num]) * vs[num]) V[, num] <- t(t(V[, num]) * vs[num]) } res <- list(vs = vs, U = U, V = V) return(res) } impute <- function(X = NULL, dl = NULL, bal = NULL, frac = 0.65, ncp = 2, beta = 0.5, method = c("ridge", "EM"), row.w = NULL, coeff.ridge = 1, threshold = 1e-4, seed = NULL, max.iter = 1000, init = 1, ...) { # (scale argument removed: no scaling of olr columns by (weighted) variance allowed) # Weighted AVERAGE = moyenne poids moy.p <- function(V, poids) { res <- sum(V * poids, na.rm = TRUE) / sum(poids[!is.na(V)]) } # Geometric mean by columns gm <- function(x, na.rm = TRUE) { exp(sum(log(x), na.rm = na.rm) / length(x)) } nb.iter <- 1 old <- Inf objective <- 0 if (!is.null(seed)) {set.seed(seed)} # fix seed to have same results # OLR of initial DATA MATRIX # patterns missRaw <- which(is.na(X)) zeRaw <- which(X == 0) obsRaw <- which((!is.na(X)) & (!(X == 0))) X <- as.matrix(X) Xaux <- X # copy original raw data with NA and zeros caux <- apply(Xaux, 1, sum, na.rm = TRUE) # Initial imputation of zeros if (init == 1) {X[zeRaw] <- frac * dl[zeRaw]} # mult repl else {X[zeRaw] <- runif(1, 0.50, 0.8) * dl[zeRaw]} # random initialisations if nb.init/init > 1 # Initial geo mean imputation of missing (ignores 0s in original column if any) gmeans <- apply(Xaux, 2, function(x) gm(x[x != 0])) nn <- nrow(X) gmeans <- matrix(rep(1, nn), ncol = 1) %*% gmeans X[missRaw] <- gmeans[missRaw] # Xhat: OLR-coordinates Xhat <- t(bal %*% t(log(X))) # Number of components ncp <- min(ncp, ncol(Xhat), nrow(Xhat) - 1) # Weighted column mean mean.p.or=mean.p <- apply(Xhat, 2, moy.p, row.w) # Matrix centring Xhat <- t(t(Xhat) - mean.p) # update X: olr.inv(Xhat) X <- exp(t(t(bal) %*% t(Xhat))) X <- X / apply(X, 1, sum) # aux data matrix for observed and non-observed data fittedX <- fittedXus <- Xhat fittedXRaw <- fittedXusRaw <- X if (ncp == 0) {nb.iter <- 0} while (nb.iter > 0) { # Update data matrix X[missRaw] <- fittedXRaw[missRaw] X[zeRaw] <- fittedXRaw[zeRaw] # Xhat: OLR-coordinates Xhat <- t(bal %*% t(log(X))) # Recover the centre Xhat <- t(t(Xhat) + mean.p) mean.p <- apply(Xhat, 2, moy.p, row.w) # violations check X <- exp(t(t(bal) %*% t(Xhat))) X <- X/apply(X, 1, sum) Xaux2 <- X * caux viol <- which(Xaux2 > dl) Xaux2[viol] <- dl[viol] Xhat <- t(bal %*% t(log(Xaux2))) mean.p <- apply(Xhat, 2, moy.p, row.w) # Update X: olr.inv(Xhat) X <- exp(t(t(bal) %*% t(Xhat))) X <- X / apply(X, 1, sum) # Impute observed values fittedXusRC <- t(t(fittedXus) + mean.p)# recover centre # RAW values # INV-OLR fittedXusRCRaw <- exp(t(t(bal) %*% t(fittedXusRC))) fittedXusRCRaw <- fittedXusRCRaw / apply(fittedXusRCRaw, 1, sum) X[obsRaw] <- ((fittedXusRCRaw[obsRaw]) ^ (1 - beta)) * ((X[obsRaw]) ^ beta) # check DL X <- X / apply(X, 1, sum) Xaux2 <- X * caux # re-scaled to original viol <- which(Xaux2 > dl) Xaux2[viol] <- dl[viol] # Xhat: OLR-coordinates Xhat <- t(bal %*% t(log(Xaux2))) # Update mean mean.p <- apply(Xhat, 2, moy.p, row.w) # Centring Xhat <- t(t(Xhat) - mean.p) # Update X # INV-OLR X <- exp(t(t(bal) %*% t(Xhat))) X <- X / apply(X, 1, sum) # SVD calculation WEIGHTED by row.w and rank ncp svd.res <- svd.triplet(Xhat, row.w = row.w, ncp = ncp) sigma2 <- nrow(Xhat) * ncol(Xhat) / min(ncol(Xhat), nrow(Xhat) - 1) * sum((svd.res$vs[-c(1:ncp)] ^ 2) / ((nrow(Xhat) - 1) * ncol(Xhat) - (nrow(Xhat) - 1) * ncp - ncol(Xhat) * ncp + ncp ^ 2 )) sigma2 <- min(sigma2 * coeff.ridge, svd.res$vs[ncp + 1] ^ 2) if (method == "EM") sigma2 <- 0 # Usual lambda lambda.us <- svd.res$vs[1:ncp] # Calculate the usual new matrix fittedXus <- tcrossprod(t(t(svd.res$U[, 1:ncp, drop = FALSE] * row.w) * lambda.us), svd.res$V[, 1:ncp, drop = FALSE]) fittedXus <- fittedXus / row.w # Lambda for regularisation lambda.shrinked <- (svd.res$vs[1:ncp] ^ 2 - sigma2) / svd.res$vs[1:ncp] # Calculate new matrix for regularisation fittedX <- tcrossprod(t(t(svd.res$U[, 1:ncp, drop = FALSE] * row.w) * lambda.shrinked), svd.res$V[, 1:ncp, drop = FALSE]) fittedX <- fittedX / row.w # Calculate Frobenius norm of the difference between iterations (convergence) # INV-OLR fittedXRaw <- exp(t(t(bal) %*% t(fittedX))) fittedXRaw <- fittedXRaw / apply(fittedXRaw, 1, sum) diffRaw <- X / fittedXRaw diffRaw[missRaw] <- 1 diffRaw[zeRaw] <- 1 # OLR-coordinates diff <- t(bal %*% t(log(diffRaw))) objective <- sum(diff ^ 2 * row.w) # objective <- mean((Xhat[-missing]-fittedX[-missing])^2) # Convergence criterion <- abs(1 - objective / old) old <- objective nb.iter <- nb.iter + 1 if (!is.nan(criterion)) { if ((criterion < threshold) && (nb.iter > 5)) nb.iter <- 0 if ((objective < threshold) && (nb.iter > 5)) nb.iter <- 0 } if (nb.iter > max.iter) { nb.iter <- 0 warning(paste("Stopped after ", max.iter, " iterations")) } } # END LOOP WHILE # Preparing the result Xhat <- t(t(Xhat) + mean.p.or) # Update X # INV-OLR X <- exp(t(t(bal) %*% t(Xhat))) X <- X / apply(X, 1, sum) # completeObs completeObs <- Xaux / apply(Xaux, 1, sum, na.rm = TRUE) completeObs[missRaw] <- X[missRaw] completeObs[zeRaw] <- X[zeRaw] completeObs <- completeObs / apply(completeObs, 1, sum) # violation dl check completeObs <- completeObs * caux viol <- which(completeObs > dl) completeObs[viol] <- dl[viol] completeObs[obsRaw] <- Xaux[obsRaw] completeObs <- completeObs/apply(completeObs, 1, sum) # fittedX <- t(t(fittedX) + mean.p) # INV-OLR fittedXRaw <- exp(t(t(bal) %*% t(fittedX))) fittedXRaw <- fittedXRaw / apply(fittedXRaw, 1, sum) # Return complete matrix and imputed matrix result <- list() result$completeObs <- completeObs result$fittedX <- fittedXRaw return(result) } # end IMPUTE function method <- match.arg(method) ## Preliminaries ---- X <- as.data.frame(X, stringsAsFactors = TRUE) nn <- nrow(X) D <- ncol(X) X <- as.data.frame(apply(X, 2, as.numeric), stringsAsFactors = TRUE) c <- apply(X, 1, sum, na.rm = TRUE) # Check for closure closed <- 0 if (all(abs(c - mean(c)) < .Machine$double.eps ^ 0.3)) closed <- 1 Xaux <- as.matrix(X) XauxClosed <- Xaux / apply(Xaux, 1, sum, na.rm = TRUE) # Order columns by number of zeros Xtmp <- X # copy of original data Xtmp[X == 0] <- NA pz <- apply(apply(Xtmp, 2, is.na), 2, sum) / nn X <- X[, order(-pz)] # decreasing order if (nrow(dl) == 1) { dl <- matrix(dl[, order(-pz)], nrow = 1) } else { dl <- dl[, order(-pz)] } # Indexes type of values missingRaw <- which(is.na(X)) # missing zeroRaw <- which(X == 0) # zeros observedRaw <- which((!is.na(X)) & (!(X == 0))) # observed checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } # Balance matrix for olr Smat <- diag(rep(1, D)) Smat[upper.tri(Smat)] <- -1 Smat <- Smat[-D, ] bal <- Smat numsbp <- dim(Smat)[1] for (f in 1:numsbp) { den <- sum(bal[f, ] == -1) num <- sum(bal[f, ] == 1) bal[f, bal[f, ] == 1] <- sqrt(den / ((den + num) * num)) bal[f, bal[f, ] == -1] <- -sqrt(num / ((den + num) * den)) } # Build dl matrix for SVD imputation if (nrow(dl) == 1) dl <- matrix(rep(1, nn), ncol = 1) %*% dl # Set observed data as upper bound for estimates of observed values Xaux2 <- as.matrix(X) # DL observed and missing values = maximum value = observed = infinity upper bound Xmax <- apply(X, 2, max, na.rm = TRUE) Xmax <- matrix(rep(1, nn), ncol = 1) %*% Xmax dl[observedRaw] <- Xmax[observedRaw] # dl observed dl[missingRaw] <- Xmax[missingRaw] # dl missing colnames(dl) <- colnames(X) ## Imputation --- for (i in 1:nb.init) { if (!any(is.na(X))) return(X) res.impute <- impute(X = X, dl = dl, bal = bal, frac = frac, ncp = ncp, beta = beta, method = method, row.w = row.w, coeff.ridge = coeff.ridge, threshold = threshold, seed = if (!is.null(seed)) {(seed * (i - 1))} else {NULL}, max.iter = max.iter, init = i) } ## Final section --- # Re-scale to original units Y <- res.impute$completeObs XauxClosed <- XauxClosed[, order(-pz)] XauxClosed[missingRaw] <- Y[missingRaw] XauxClosed[zeroRaw] <- Y[zeroRaw] X <- XauxClosed * c if (closed == 1) { X <- (X / apply(X, 1, sum)) * c[1] } # Original order X <- X[, colnames(Xtmp)] return(as.data.frame(X, stringsAsFactors = TRUE)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/lrSVDplus.R
multKM <- function (X,label=NULL,dl=NULL,n.draws=1000,n.knots=NULL,z.warning=0.8,z.delete=TRUE) { if (any(X<0, na.rm=T)) stop("X contains negative values") if ((is.vector(X)) | (nrow(X)==1)) stop("X must be a data matrix") if (is.null(label)) stop("A value for label must be given") if (!is.na(label)){ if (!any(X==label,na.rm=T)) stop(paste("Label",label,"was not found in the data set")) if (label!=0 & any(X==0,na.rm=T)) stop("Zero values not labelled as censored values were found in the data set") if (any(is.na(X))) stop(paste("NA values not labelled as censored values were found in the data set")) } if (is.na(label)){ if (any(X==0,na.rm=T)) stop("Zero values not labelled as censored values were found in the data set") if (!any(is.na(X),na.rm=T)) stop(paste("Label",label,"was not found in the data set")) } if (is.character(dl)) stop("dl must be a numeric vector or matrix") if (is.null(dl)){ # If dl not given use min per column dl <- apply(X,2, function(x) min(x[x!=label])) warning("No dl vector or matrix provided. The minimum observed values for each column used as detection limits.") } if (is.vector(dl)) dl <- matrix(dl,nrow=1) dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes if (ncol(dl)!=ncol(X)) stop("The number of columns in X and dl do not agree") if ((nrow(dl)>1) & (nrow(dl)!=nrow(X))) stop("The number of rows in X and dl do not agree") if ((!is.null(n.knots)) & (length(n.knots)!=1) & (length(n.knots)!=ncol(X))) stop("The dimensions of n.knots and X do not agree") if ((!is.null(n.knots)) & (length(n.knots)==1)) {n.knots <- rep(list(n.knots),ncol(X))} km.imp <- function(x,dl,...){ who <- is.na(x); w <- which(who) xcen <- ifelse(who,TRUE,FALSE) x[who] <- dl[who] km.ecdf <- cenfit(x,xcen) x.km <- rev(km.ecdf@survfit$time) y.km <- rev(km.ecdf@survfit$surv) if (is.null(n.knots.part)) {scdf <- smooth.spline(x.km,y.km)} if (!is.null(n.knots.part)) {scdf <- smooth.spline(x.km,y.km,nknots=n.knots.part)} scdf.fun <- approxfun(scdf$x,scdf$y) inv.scdf <- approxfun(scdf$y,scdf$x) for (i in 1:length(w)){ if (dl[w[i]] > min(x[!who])){ temp <- inv.scdf(runif(n.draws,0,scdf.fun(dl[w[i]]))) x[w[i]] <- exp(mean(log(temp),na.rm=T)) } } return(as.numeric(x)) } X[X==label] <- NA X <- apply(X,2,as.numeric) checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } nn <- nrow(X); p <- ncol(X) c <- apply(X,1,sum,na.rm=TRUE) # Check for closure closed <- 0 if (all( abs(c - mean(c)) < .Machine$double.eps^0.3 )) closed <- 1 if (nrow(dl)==1){ dl <- matrix(rep(1,nn),ncol=1)%*%dl est <- dl } else est <- dl for (part in 1:p) { if (any(is.na(X[,part]))) { n.knots.part <- n.knots[[part]] est[,part] <- km.imp(X[,part],dl[,part],n.draws,n.knots.part) } else {est[,part] <- 0} } Y <- X for (i in 1:nn){ if (any(is.na(X[i,]))){ z <- which(is.na(X[i,])) Y[i,z] <- est[i,z] Y[i,-z] <- (1-(sum(Y[i,z]))/c[i])*X[i,-z] X[i,z] <- as.numeric((X[i,-z][1]/Y[i,-z][1]))*Y[i,z] } } if (closed==1){ X <- t(apply(X,1,function(x) x/sum(x)*c[1])) } return(as.data.frame(X,stringsAsFactors=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/multKM.R
multLN <- function (X,label=NULL,dl=NULL,rob=FALSE,random=FALSE,z.warning=0.8,z.delete=TRUE) { if (any(X<0, na.rm=T)) stop("X contains negative values") if ((is.vector(X)) | (nrow(X)==1)) stop("X must be a data matrix") if (is.null(label)) stop("A value for label must be given") if (!is.na(label)){ if (!any(X==label,na.rm=T)) stop(paste("Label",label,"was not found in the data set")) if (label!=0 & any(X==0,na.rm=T)) stop("Zero values not labelled as censored values were found in the data set") if (any(is.na(X))) stop(paste("NA values not labelled as censored values were found in the data set")) } if (is.na(label)){ if (any(X==0,na.rm=T)) stop("Zero values not labelled as censored values were found in the data set") if (!any(is.na(X),na.rm=T)) stop(paste("Label",label,"was not found in the data set")) } if (is.character(dl)) stop("dl must be a numeric vector or matrix") if (is.null(dl)){ # If dl not given use min per column dl <- apply(X,2, function(x) min(x[x!=label])) warning("No dl vector or matrix provided. The minimum observed values for each column used as detection limits.") } if (is.vector(dl)) dl <- matrix(dl,nrow=1) dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes if (ncol(dl)!=ncol(X)) stop("The number of columns in X and dl do not agree") if ((nrow(dl)>1) & (nrow(dl)!=nrow(X))) stop("The number of rows in X and dl do not agree") X[X==label] <- NA X <- apply(X,2,as.numeric) checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } nn <- nrow(X); p <- ncol(X) c <- apply(X,1,sum,na.rm=TRUE) # Check for closure closed <- 0 if (all( abs(c - mean(c)) < .Machine$double.eps^0.3 )) closed <- 1 if (nrow(dl)==1){ dl <- matrix(rep(1,nn),ncol=1)%*%dl est <- dl } else est <- dl if (random==FALSE){ cenGeoMean <- function(x,dl,...){ xcen <- ifelse(is.na(x),TRUE,FALSE) x[is.na(x)] <- dl[is.na(x)] if (rob) {ymean <- summary(cenros(x,xcen))$coefficients[1]; ysd <- summary(cenros(x,xcen))$coefficients[2]} else {ymean <- mean(suppressWarnings(cenmle(log(x),xcen,dist="gaussian")))[1]; ysd <- sd(suppressWarnings(cenmle(log(x),xcen,dist="gaussian")))[1]} fdl <- dnorm((log(dl)-ymean)/ysd, mean = 0, sd = 1, log = FALSE) Pdl <- pnorm((log(dl)-ymean)/ysd, mean = 0, sd = 1, log.p = FALSE) gmeancen <- exp(ymean-ysd*(fdl/Pdl)) return(as.numeric(gmeancen)) } for (part in 1:p) { if (any(is.na(X[,part]))) { est[,part] <- cenGeoMean(X[,part],dl[,part],rob) } else {est[,part] <- 0} } Y <- X for (i in 1:nn){ if (any(is.na(X[i,]))){ z <- which(is.na(X[i,])) Y[i,z] <- est[i,z] Y[i,-z] <- (1-(sum(Y[i,z]))/c[i])*X[i,-z] X[i,z] <- as.numeric((X[i,-z][1]/Y[i,-z][1]))*Y[i,z] } } } # End if not random else{ # If random meanln <- rep(0,p); sdln <- rep(0,p) for (j in 1:p){ x <- X[,j] xcen <- ifelse(is.na(X[,j]),TRUE,FALSE) x[is.na(X[,j])] <- dl[is.na(X[,j]),j] if (rob) {ymean <- summary(cenros(x,xcen))$coefficients[1]; ysd <- summary(cenros(x,xcen))$coefficients[2]} else {ymean <- mean(suppressWarnings(cenmle(log(x),xcen,dist="gaussian")))[1]; ysd <- sd(suppressWarnings(cenmle(log(x),xcen,dist="gaussian")))[1]} meanln[j] <- ymean sdln[j] <- ysd } Y <- X for (i in 1:nn){ if (any(is.na(X[i,]))){ z <- which(is.na(X[i,])) for (j in 1:length(z)){ Y[i,z[j]] <- exp(rtruncnorm(1,-Inf,log(dl[i,z[j]]),meanln[z[j]],sdln[z[j]])) } Y[i,-z] <- (1-(sum(Y[i,z]))/c[i])*X[i,-z] X[i,z] <- as.numeric((X[i,-z][1]/Y[i,-z][1]))*Y[i,z] } } } # End if random if (closed==1){ X <- t(apply(X,1,function(x) x/sum(x)*c[1])) } return(as.data.frame(X,stringsAsFactors=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/multLN.R
multRepl <- function(X,label=NULL,dl=NULL,frac=0.65,imp.missing=FALSE,closure=NULL,z.warning=0.8,z.delete=TRUE,delta=NULL){ if (any(X<0, na.rm=T)) stop("X contains negative values") if (is.character(X)) stop("X is not a valid data matrix or vector.") if (is.null(label)) stop("A value for label must be given") if (!is.na(label)){ if (!any(X==label,na.rm=T)) stop(paste("Label",label,"was not found in the data set")) if (label!=0 & any(X==0,na.rm=T)) stop("Zero values not labelled as censored or missing values were found in the data set") if (any(is.na(X))) stop(paste("NA values not labelled as censored or missing values were found in the data set")) } if (is.na(label)){ if (any(X==0,na.rm=T)) stop("Zero values not labelled as censored or missing values were found in the data set") if (!any(is.na(X),na.rm=T)) stop(paste("Label",label,"was not found in the data set")) } if (imp.missing==FALSE){ if (is.character(dl)) stop("dl must be a numeric vector or matrix") if (is.null(dl)){ # If dl not given use min per column dl <- apply(X,2, function(x) min(x[x!=label])) warning("No dl vector or matrix provided. The minimum observed values for each column used as detection limits.") } if (is.vector(dl)) dl <- matrix(dl,nrow=1) dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes } if (is.vector(X)){ if (imp.missing==TRUE) stop("Data matrix required: missing values cannot be imputed in single vectors") if (ncol(dl)!=ncol(as.data.frame(matrix(X,ncol=length(X)),stringsAsFactors=TRUE))) stop("The number of columns in X and dl do not agree") } if (!is.vector(X)){ if (imp.missing==FALSE){ if (ncol(dl)!=ncol(X)) stop("The number of columns in X and dl do not agree") if ((nrow(dl)>1) & (nrow(dl)!=nrow(X))) stop("The number of rows in X and dl do not agree") } } if (!missing("delta")){ warning("The delta argument is deprecated, use frac instead: frac has been set equal to delta.") frac <- delta } gm <- function(x, na.rm=TRUE){ exp(sum(log(x), na.rm=na.rm) / length(x[!is.na(x)])) } nam <- NULL if (!is.null(names(X))) nam <- names(X) if (is.vector(X)) X <- as.data.frame(matrix(X,ncol=length(X)),stringsAsFactors=TRUE) X[X==label] <- NA X <- apply(X,2,as.numeric) if (is.vector(X)) X <- as.data.frame(matrix(X,ncol=length(X)),stringsAsFactors=TRUE) if (nrow(X) > 1){ checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } } nn <- nrow(X); D <- ncol(X) c <- apply(X,1,sum,na.rm=TRUE) # Check for closure closed <- 0 if (all(abs(c - mean(c)) < .Machine$double.eps^0.3)) closed <- 1 if (imp.missing==FALSE){ if (nrow(dl)==1) dl <- matrix(rep(1,nn),ncol=1)%*%dl } Y <- X if (!is.null(closure)){ if (closed == 1) {stop("closure: The data are already closed to ",c[1])} resid <- apply(X,1, function(x) closure-sum(x, na.rm = TRUE)) Xresid <- cbind(X,resid,stringsAsFactors=TRUE) c <- rep(closure,nn) Y <- Xresid } if (imp.missing==FALSE){ for (i in 1:nn){ if (any(is.na(X[i,]))){ z <- which(is.na(X[i,])) Y[i,z] <- frac*dl[i,z] if (!is.null(closure)){ Y[i,-z] <- (1-(sum(Y[i,z]))/c[i])*Xresid[i,-z] tmp <- Y[i,-(D+1)] X[i,z] <- as.numeric((X[i,-z][1]/tmp[-z][1]))*Y[i,z] } else{ Y[i,-z] <- (1-(sum(Y[i,z]))/c[i])*X[i,-z] X[i,z] <- as.numeric((X[i,-z][1]/Y[i,-z][1]))*Y[i,z] } } } } if (imp.missing==TRUE){ gms <- apply(X,2,gm) for (i in 1:nn){ if (any(is.na(X[i,]))){ z <- which(is.na(X[i,])) Y[i,z] <- gms[z] if (!is.null(closure)){ Y[i,-z] <- ((c[i]-(sum(Y[i,z])))/sum(Xresid[i,-z]))*Xresid[i,-z] tmp <- Y[i,-(D+1)] X[i,z] <- as.numeric((X[i,-z][1]/tmp[-z][1]))*Y[i,z] } else{ Y[i,-z] <- ((c[i]-(sum(Y[i,z])))/sum(X[i,-z]))*X[i,-z] X[i,z] <- as.numeric((X[i,-z][1]/Y[i,-z][1]))*Y[i,z] } } } } if (!is.null(nam)) names(X) <- nam if (closed==1){ X <- t(apply(X,1,function(x) x/sum(x)*c[1])) } if (any(X < 0)) warning("multRepl: negative imputed values were generated (please check out help for advice)") return(as.data.frame(X,stringsAsFactors=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/multRepl.R
multReplus <- function(X, dl = NULL, frac = 0.65, closure = NULL, z.warning = 0.8, z.delete = TRUE, delta = NULL){ if (any(X<0, na.rm=T)) stop("X contains negative values") if (is.character(dl)) stop("dl must be a numeric vector or matrix") if (is.null(dl)){ # If dl not given use min per column dl <- apply(X,2, function(x) min(x[x!=0])) warning("No dl vector or matrix provided. The minimum observed values for each column used as detection limits.") } if (is.vector(dl)) dl <- matrix(dl,nrow=1) dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes if ((is.vector(X)) | (nrow(X)==1)) stop("X must be a data matrix") if (ncol(dl)!=ncol(X)) stop("The number of columns in X and dl do not agree") if ((nrow(dl)>1) & (nrow(dl)!=nrow(X))) stop("The number of rows in X and dl do not agree") if (any(is.na(X))==FALSE) stop("No missing data were found in the data set") if (any(X==0, na.rm=T)==FALSE) stop("No zeros were found in the data set") if (!missing("delta")){ warning("The delta argument is deprecated, use frac instead: frac has been set equal to delta.") frac <- delta } gm <- function(x, na.rm=TRUE){ exp(sum(log(x), na.rm=na.rm) / length(x[!is.na(x)])) } nam <- NULL if (!is.null(names(X))) nam <- names(X) if (is.vector(X)) X <- as.data.frame(matrix(X,ncol=length(X)),stringsAsFactors=TRUE) ## Preliminaries ---- X <- as.data.frame(X,stringsAsFactors=TRUE) nn <- nrow(X); D <- ncol(X) X <- as.data.frame(apply(X,2,as.numeric),stringsAsFactors=TRUE) c <- apply(X,1,sum,na.rm=TRUE) checkNumZerosCol <- apply(X, 2, function(x) sum(is.na(x))) if (any(checkNumZerosCol/nrow(X) > z.warning)) { cases <- which(checkNumZerosCol/nrow(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (ncol(X)-2)) { stop(paste("Almost all columns contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[,-cases] action <- "deleted" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Column no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } checkNumZerosRow <- apply(X, 1, function(x) sum(is.na(x))) if (any(checkNumZerosRow/ncol(X) > z.warning)) { cases <- which(checkNumZerosRow/ncol(X) > z.warning) if (z.delete == TRUE) { if (length(cases) > (nrow(X)-2)) { stop(paste("Almost all rows contain >", z.warning*100, "% zeros/unobserved values (see arguments z.warning and z.delete).", sep="")) } X <- X[-cases,] action <- "deleted" warning(paste("Row no. ",cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete).\n", sep="")) } else { action <- "found" warning(paste("Row no. ", cases," containing >", z.warning*100, "% zeros/unobserved values ", action, " (see arguments z.warning and z.delete. Check out with zPatterns()).\n", sep="")) } } if (nrow(dl)==1) dl <- matrix(rep(1,nn),ncol=1)%*%dl # Check for closure closed <- 0 if (all( abs(c - mean(c)) < .Machine$double.eps^0.3 )) closed <- 1 Y <- X if (!is.null(closure)){ if (closed == 1) {stop("closure: The data are already closed to ",c[1])} resid <- apply(X,1, function(x) closure-sum(x, na.rm = TRUE)) Xresid <- cbind(X,resid,stringsAsFactors=TRUE) c <- rep(closure,nn) Y <- Xresid } ## Imputation of missing (ignoring 0s in column if any) ---- gms <- apply(X,2,function(x) gm(x[x!=0])) for (i in 1:nn){ if (any(is.na(X[i,]))){ z <- which(is.na(X[i,])) Y[i,z] <- gms[z] if (!is.null(closure)){ Y[i,-z] <- ((c[i]-(sum(Y[i,z])))/sum(Xresid[i,-z]))*Xresid[i,-z] tmp <- Y[i,-(D+1)] nz_idx <- which(tmp[-z]!=0)[1] # Use a non-zero part for adjustment X[i,z] <- as.numeric((X[i,-z][nz_idx]/tmp[-z][nz_idx]))*Y[i,z] } else{ Y[i,-z] <- ((c[i]-(sum(Y[i,z])))/sum(X[i,-z]))*X[i,-z] nz_idx <- which(Y[i,-z]!=0)[1] # Use a non-zero part for adjustment X[i,z] <- as.numeric((X[i,-z][nz_idx]/Y[i,-z][nz_idx]))*Y[i,z] } } } if (closed==1){ X <- t(apply(X,1,function(x) x/sum(x)*c[1])) } if (any(X < 0, na.rm = T)) stop("multRepl: negative imputed values were generated (please check out help for advice)") ## Imputation of zeros ---- X <- multRepl(X,label=0,dl=dl,frac=frac,closure=closure,z.warning=z.warning,z.delete=z.delete) ## Final section ---- if (!is.null(nam)) names(X) <- nam return(as.data.frame(X,stringsAsFactors=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/multReplus.R
splineKM <- function(x,label=NULL,dl=NULL,n.knots=NULL, legend.pos="bottomright", ylab="ECDF", xlab="Value", col.km="black",lty.km=1,lwd.km=1, col.sm="red",lty.sm=2,lwd.sm=2,...){ if (is.character(dl) || is.null(dl)) stop("dl must be a numeric vector or matrix") if (length(dl)!=length(x)) stop("x and dl must be two vectors of the same length") if (is.null(label)) stop("A value for label must be given") if (!is.na(label)){ if (label!=0 & any(x==0,na.rm=T)) stop("Zero values not labelled as censored values were found in the data") if (any(is.na(x))) stop(paste("NA values not labelled as censored values were found in the data")) } if (is.na(label)){ if (any(x==0,na.rm=T)) stop("Zero values not labelled as censored values were found in the data") if (!any(is.na(x),na.rm=T)) stop(paste("Label",label,"was not found in the data")) } if ((!is.null(n.knots)) & (length(n.knots)!=1)) stop("n.knots must contain a single value") x[x==label] <- NA who <- is.na(x); w <- which(who) xcen <- ifelse(who,TRUE,FALSE) x[who] <- dl[who] dat <- data.frame(x,xcen,stringsAsFactors=TRUE) km.ecdf <- cenfit(dat$x,dat$xcen) x <- rev(km.ecdf@survfit$time) y <- rev(km.ecdf@survfit$surv) if (is.null(n.knots)) {scdf <- smooth.spline(x,y)} if (!is.null(n.knots)) {scdf <- smooth.spline(x,y,nknots=n.knots)} scdf <- approxfun(scdf$x,scdf$y) plot(km.ecdf@survfit,conf.int=FALSE,ylab=ylab,xlab=xlab, col=col.km,lty=lty.km,lwd=lwd.km, ...) lines(x,scdf(x),type="l", col=col.sm,lty=lty.sm,lwd=lwd.sm) abline(h=1,col="white",lwd=4) legend(legend.pos,bty="n", legend=c("KM estimate","KMSS estimate"), lty=c(lty.km,lty.sm),col=c(col.km,col.sm),lwd=c(lwd.km,lwd.sm)) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/splineKM.R
zPatterns <- function(X,label=NULL,plot=TRUE, axis.labels=c("Component","Pattern ID"), bar.ordered=as.character(c(FALSE,FALSE)), bar.colors=c("red3","red3"), bar.labels=FALSE, show.means=FALSE,round.means=2, cex.means=1, type.means=c("cgm","am"), cell.colors=c("dodgerblue","white"), cell.labels=c(label,paste("No",label)), cex.axis=1.1, grid.color="black", grid.lty="dotted", legend=TRUE, suppress.print=FALSE, ...){ cgm <- function(X, round.means = round.means) { ms <- apply(X,2,function(x){ if (all(is.na(x))) {x <- NA} else {exp(mean(log(x),na.rm=T))} }) ms[is.na(ms)] <- 0 round(ms/sum(ms)*100,round.means) } am <- function(X, round.means = round.means) { ms <- apply(X,2,function(x){ if (all(is.na(x))) {x <- NA} else {mean(x,na.rm=T)} }) round(ms,round.means) } plot.patterns <- function(a, show.means = show.means, round.means = round.means, cex.means=cex.means, X = X, pat = pat.ID, ...) { zones <- matrix(c(2,4,1,3), ncol=2, byrow=TRUE) layout(zones, widths=c(4/5,1.5/5), heights=c(2/5,3.5/5)) par(mar=c(3,3,0.5,0.5)) a <- as.matrix(a[rev(rownames(a)),]) image(1:ncol(a),1:nrow(a),t(a),col=rev(cell.colors),axes=F) mtext(side=1,text=axis.labels[1],line=1.75) mtext(side=2,text=axis.labels[2],line=1.75) par(mgp=c(3, .3, 0)) axis(side = 1,at = seq(1,ncol(a),by=1),labels=colnames(a),tck=0,cex.axis=cex.axis) axis(side = 2,at = seq(1,nrow(a),by=1),labels=rownames(a),las=2,tck=0,cex.axis=cex.axis) box() grid(ncol(a),nrow(a),col=grid.color,lty=grid.lty) if (show.means == TRUE){ if (type.means == "cgm"){ cgmp <- by(X,pat.ID,cgm,round.means=round.means) cgmp <- cgmp[pat.ID2] # Reorder as in patterns table cgmp <- do.call(rbind,cgmp) cgmp <- cgmp[nrow(a):1,] cgmp[cgmp==0] <- NA for (i in 1:nrow(a)){ for (j in 1:ncol(a)){ text(j,i,label=cgmp[i,j],cex=cex.means) } } } if (type.means == "am"){ amp <- by(X,pat.ID,am,round.means=round.means) amp <- amp[pat.ID2] amp <- do.call(rbind,amp) amp <- amp[nrow(a):1,] for (i in 1:nrow(a)){ for (j in 1:ncol(a)){ text(j,i,label=amp[i,j],cex=cex.means) } } } } par(mar=c(0,3.25,1,0.75)) a <- barplot(as.vector(prop.col),axes=F,col=bar.colors[1],xaxs="i", ylim=c(0,max(as.vector(prop.col)+0.2*max(as.vector(prop.col))))) if (bar.labels==TRUE) text(a,as.vector(prop.col),labels=as.vector(prop.col),cex=0.85,pos=3) par(mar=c(3.25,0,0.75,0.75)) a <- barplot(rev(as.vector(pat.freq)),horiz=T,axes=F,col=bar.colors[2],yaxs="i", xlim=c(0,max(as.vector(pat.freq)+0.3*max(as.vector(pat.freq))))) if (bar.labels==TRUE) text(rev(as.vector(pat.freq)),a,labels=rev(as.vector(pat.freq)),cex=0.85,pos=4) par(mar=c(0,0,3,0)) plot.new() if (legend==TRUE){ if (any(is.na(cell.labels))) cell.labels[is.na(cell.labels)] <- "NA" legend("topleft",cell.labels,pch=c(22,22),bty="n", pt.bg=cell.colors,pt.cex=2,cex=1.1)} } type.means <- match.arg(type.means) if (any(X<0, na.rm=T)) stop("X contains negative values") if (is.vector(X)) stop("X must be a matrix or data.frame class object") if (is.null(label)) stop("A value for label must be given") if (!is.na(label)){ if (!any(X==label,na.rm=T)) stop(paste("Label",label,"was not found in the data set")) if (label!=0 & any(X==0,na.rm=T)) warning("Unidentified zero values were found and will be ignored") if (any(is.na(X))) warning(paste("Unidentified NA values were found in the data set and will be ignored")) } if (is.na(label)){ if (any(X==0,na.rm=T)) warning("Unidentified zero values were found in the data set and will be ignored") if (!any(is.na(X),na.rm=T)) stop(paste("Label",label,"was not found in the data set")) } X <- as.data.frame(X,stringsAsFactors=TRUE) n <- nrow(X); p <- ncol(X) if (is.na(label)) miss <- as.data.frame(is.na(X)*1,stringsAsFactors=TRUE) else miss <- as.data.frame((X==label)*1,stringsAsFactors=TRUE) miss[is.na(miss)] <- 0 # Ignore any unlabelled NAs/zeros to graph patterns miss <- cbind(miss,pat=do.call(paste,c(miss,sep="")),stringsAsFactors=TRUE) tmp <-data.frame(Pattern=names(table(miss$pat)),ID=1:nlevels(miss$pat),stringsAsFactors=TRUE) pat.ID <- as.factor(tmp$ID[match(miss$pat,tmp$Pattern)]) # IDs in original row order miss <- miss[order(miss$pat),] # Order according to patterns pat.freq <- round(as.vector(table(miss$pat))/n*100,2) # Rel freq of ordered patterns prop.col <- round(colSums(miss[,1:p])/n*100,2) prop <- round(sum(miss[,1:p])/(n*p)*100,2) tab <- miss[!duplicated(miss),1:p] pat.ID2 <- levels(pat.ID) rownames(tab) <- pat.ID2 if (bar.ordered[1]==TRUE){ tab <- tab[order(pat.freq,decreasing=T),] pat.ID2 <- pat.ID2[order(pat.freq,decreasing=T)] pat.freq <- pat.freq[order(pat.freq,decreasing=T)] } if (bar.ordered[2]==TRUE){ tab <- tab[,order(prop.col,decreasing=T)] X <- X[,order(prop.col,decreasing=T)] prop.col <- prop.col[order(prop.col,decreasing=T)] } tab.num <- tab tab[tab==1] <- "+" tab[tab==0] <- "-" # Summary X[X==label] <- NA; X[X==0] <- NA # Ignore labelled/NAs/zeros for summaries if (plot==TRUE) plot.patterns(tab.num, show.means=show.means,round.means=round.means, cex.means=cex.means,X = X, pat = pat.ID2, ...) tab <- cbind(Patt.ID=pat.ID2, tab, No.Unobs=rowSums(tab[,1:p]=="+"), Patt.Perc=pat.freq) if (suppress.print==FALSE){ cat("Patterns ('+' means ",cell.labels[1],", '-' means ",cell.labels[2],") \n\n",sep="") print(tab,row.names=FALSE) cat("\n") cat("Percentage cells by component \n") print(prop.col) cat("\n") cat(paste("Overall percentage cells: ",prop,"% \n",sep="")) } invisible(pat.ID) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/zPatterns.R
#' @title Variation array for grouped data #' #' @description This function returns overall and separate variation arrays for groups #' in a compositional data set. Groups can be defined by either zero/unobserved data patterns or #' by a grouping factor in fully observed zero-free data sets. #' #' @details This function is mainly aimed to investigate heterogeneous relative variation #' structures in compositional data sets containing zeros or unobserved values. For each pattern of zero or unobserved values, #' log-ratio variances (upper triangle of variation matrix) and means (lower triangle of variation matrix) are computed from the #' available data. Note that (1) NAs are produced for log-ratio variances and means in groups containing less than two observations, #' and (2) at least two components must be available in each group to compute log-ratios. #' #' The overall estimate is obtained across groups by pairwise deletion. Note that, unlike the ordinary \code{\link{var}} #' function, maximum likelihood estimates of the variances are computed. That is, #' the observed sum of squares is divided by the corresponding number of observations n and not by n-1. #' #' Group-wise variation arrays can be obtained from fully observed zero-free data by setting a grouping factor #' using the argument \code{groups}. #' #' @seealso \code{\link{zPatterns}}. #' #' @param X Compositional data set (\code{\link{matrix}} or \code{\link{data.frame}} class). #' @param label Unique label (\code{\link{numeric}} or \code{\link{character}}) used to denote zeros/unobserved data in \code{X} (\code{label = 0}, default). #' @param groups Grouping factor in fully observed zero-free data sets (\code{groups = NULL}, default). #' @param suppress.print Suppress printed feedback (\code{suppress.print = FALSE}, default). #' @return List of variation arrays by pattern/group and overall. #' #' @examples #' data(Water) #' zPatterns(Water, label = 0) #' zVarArray(Water) #' #' # From a completed data set #' #' data(mdl) # matrix of limits of detection for Water #' Water_multKM <- multKM(Water,label=0,dl=mdl) # nondetects imputation #' #' # Results split by two ficticious groups A and B #' zVarArray(Water_multKM,groups=rep(c("A","B"),each=50)) zVarArray <- function(X, label = 0, groups = NULL, suppress.print = FALSE) { if (any(X<0, na.rm=T)) stop("X contains negative values") if (is.vector(X)) stop("X must be a matrix or data.frame class object") if (is.null(label)) stop("A value for label must be given") if (!is.null(groups)){ if (any(X == label, na.rm = T)) stop(paste("Label", label, "was found in the data set. No zeros or unobserved values are allowed when a grouping factor is specified")) } if (!is.na(label)) { if (!any(X == label, na.rm = T) & (is.null(groups))) stop(paste("Label", label, "was not found in the data set")) if (label != 0 & any(X == 0, na.rm = T)) stop("Zero values not labelled as such were found in the data set") if (any(is.na(X))) stop(paste( "NA values not labelled as zero values were found in the data set" )) } if (is.na(label)) { if (any(X == 0, na.rm = T)) stop("Zero values not labelled as such were found in the data set") if (!any(is.na(X), na.rm = T) & (is.null(groups))) stop(paste("Label", label, "was not found in the data set")) } X <- as.data.frame(X,stringsAsFactors=TRUE) if (is.null(groups)){ g <- zPatterns(X,label = label, plot = FALSE, suppress.print = TRUE) ifelse(is.na(label), X[is.na(X)] <- 0, X[X == label] <- 0) } else{ g <- as.factor(groups) levNames <- levels(g) levels(g) <- 1:length(levels(g)) } unobs <- sapply(split(X,g),function(x) sum(1*(x[1,]==0)),simplify = TRUE) if (any(unobs > (ncol(X)-2))) { warning("Some groups have less than two components available and NAs were produced (use zPatterns to check out)") } if (any(table(g) < 2)) warning("Some groups contain less than two observations and NAs were produced (use zPatterns to check out)") ni <- table(g); col <- ncol(X); nind <- nrow(X) numPat <- length(levels(g)) p <- as.numeric(levels(g)) pi <- ni / nind # % obs in each pattern VarArrByP <- array(NA, c(col, col, numPat)) # variation by pattern VarArrTot <- nvartot <- matrix(0, col, col) # overall variation matrix colnames(VarArrTot) <- rownames(VarArrTot) <- colnames(X) for (pat in 1:numPat) {diag(VarArrByP[,,pat])<-rep(0,col)} # Variation array by pair of logratios for (di in 1:(col - 1)) { # by rows denominator di VariatMat Xdi <- X[X[, di] > 0, ] # subset of observed Xdi gdi <- g[X[, di] > 0] # subset pattern number of obs Xdi for (nj in (di + 1):col) { # by columns numerator nj VariatMat if (any(Xdi[, nj] > 0)) { Xnj <- Xdi[Xdi[, nj] > 0, c(di, nj)] # subset common di and nj gnj <- factor(gdi[Xdi[, nj] > 0]) # subset common patterns ngnj <- table(gnj) lxdinj <- lxdinjV <- log(Xnj[, 2] / Xnj[, 1]) p <- as.numeric(levels(gnj)) # by pattern # exp and var EbP <- -tapply(lxdinj, gnj, mean) #expectation by Pattern VbP <- tapply(lxdinj, gnj, var) #variance by Pattern EbP[is.na(VbP)] <- NA # NA for both if based on only one value VarArrByP[di, nj, p[!is.na(VbP)]] <- ((ngnj[!is.na(VbP)] - 1) * VbP[!is.na(VbP)]) / (ngnj[!is.na(VbP)]) VarArrByP[nj, di, p[!is.na(EbP)]] <- EbP[!is.na(EbP)] # whole # center to zero each group for variance test numpatj <- nlevels(gnj) for (k in 1:numpatj) { lxdinjV[gnj == p[k]] <- scale(lxdinj[gnj == p[k]], TRUE, FALSE) } # VarArrTot[di, nj] <- as.numeric((length(lxdinjV) - 1) * var(lxdinjV) / length(lxdinjV)) VarArrTot[nj, di] <- -as.numeric(mean(lxdinj)) } # end if: control if common no-zero data data exists }# end for 2 by columns numerator nj VariatMat }# end for 1: by rows denominato di VariatMat # Formatting VarArrByP <- lapply(seq(dim(VarArrByP)[3]), function(x) VarArrByP[ , , x]) for (i in 1:length(VarArrByP)){ colnames(VarArrByP[[i]]) <- colnames(X) rownames(VarArrByP[[i]]) <- colnames(X) } if (is.null(groups)){ names(VarArrByP) <- paste("Pattern",seq(length(VarArrByP)),sep="") } else{ names(VarArrByP) <- levNames } VarArrByP[["Overall"]] <- VarArrTot result <- VarArrByP if (suppress.print == FALSE) { print(lapply(result,round,4)) } invisible(result) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/zVarArray.R
#' @title Variation array relative error #' #' @description This function computes squared relative errors of variation arrays per group with respect to the overall variation array #' based on observed data in a compositional data set. Groups can be defined by either zero/unobserved data patterns or #' by a grouping factor in fully observed zero-free data sets. #' #' @details Squared relative errors (SRE) are calculated by confronting variation arrays (log-ratio variances and means) obtained per group and #' the overall variation array based on observed data. Raw SREs are computed for each available pair-wise log-ratio. The weighted version uses #' the corresponding group sizes to weight raw SREs. Total SRE is obtained as the sum of weighted SREs for each log-ratio. Further details by group are #' provided by setting \code{breakdown = TRUE}. #' #' @param X Compositional data set (\code{\link{matrix}} or \code{\link{data.frame}} class). #' @param label Unique label (\code{\link{numeric}} or \code{\link{character}}) used to denote zeros/unobserved data in \code{X} (\code{label = 0}, default). #' @param groups Grouping factor in fully observed zero-free data sets (\code{groups = NULL}, default). #' @param breakdown Logical value. Show results broken down by group (\code{breakdown = FALSE}, default). #' @param suppress.print Suppress printed feedback (\code{suppress.print = FALSE}, default). #' @return 1. SRE for each log-ratio variance and mean. #' 2. Weighted SRE for each log-ratio variance and mean. #' 3. Total SRE across log-ratio variances and means. #' 4. Percentage contribution of each log-ratio to SRE in log-ratio variances and means. #' If \code{breakdown = TRUE}: #' 4. SREs per group. #' 5. Weighted SREs per group. #' 6. Percentage contribution of each group to total SRE. #' #' @seealso \code{\link{zPatterns}}, \code{\link{zVarArray}}. #' #' @examples #' data(Water) #' zPatterns(Water, label = 0) #' zVarArrayError(Water) #' zVarArrayError(Water, breakdown = TRUE) #' #' # From a completed data set #' #' data(mdl) # matrix of limits of detection for Water #' Water_multKM <- multKM(Water,label=0,dl=mdl) # nondetects imputation #' #' # Results split by two ficticious groups A and B #' zVarArrayError(Water_multKM,groups=rep(c("A","B"),each=50)) zVarArrayError <- function(X, label = 0, groups = NULL, breakdown = FALSE, suppress.print = FALSE) { if (any(X<0, na.rm=T)) stop("X contains negative values") if (is.vector(X)) stop("X must be a matrix or data.frame class object") if (is.null(label)) stop("A value for label must be given") if (!is.null(groups)){ if (any(X == label, na.rm = T)) stop(paste("Label", label, "was found in the data set. No zeros or unobserved values are allowed when a grouping factor is specified")) } if (!is.na(label)) { if (!any(X == label, na.rm = T) & (is.null(groups))) stop(paste("Label", label, "was not found in the data set")) if (label != 0 & any(X == 0, na.rm = T)) stop("Zero values not labelled as such were found in the data set") if (any(is.na(X))) stop(paste( "NA values not labelled as zero values were found in the data set" )) } if (is.na(label)) { if (any(X == 0, na.rm = T)) stop("Zero values not labelled as such were found in the data set") if (!any(is.na(X), na.rm = T) & (is.null(groups))) stop(paste("Label", label, "was not found in the data set")) } X <- as.data.frame(X,stringsAsFactors=TRUE) if (is.null(groups)){ g <- zPatterns(X,label = label, plot = FALSE, suppress.print = TRUE) ifelse(is.na(label), X[is.na(X)] <- 0, X[X == label] <- 0) } else{ g <- as.factor(groups) levNames <- levels(g) levels(g) <- 1:length(levels(g)) } ni <- table(g); col <- ncol(X); nind <- nrow(X) numPat <- length(levels(g)) p <- as.numeric(levels(g)) pi <- ni / nind # % obs in each group # Error matrices ErrMat <- matrix(NA,col,col) # raw relative error ErrPropMat<-matrix(NA,col,col) # relative error proportional to number of obs in group (pi) # Raw relative error by group ErrArr <- array(NA,c(col, col, numPat)) # Relative error proportional to pi by group ErrPropArr <- array(NA,c(col, col, numPat)) # Names rownames(ErrPropMat) <- colnames(ErrPropMat) <- colnames(X) rownames(ErrMat) <- colnames(ErrMat) <-colnames(X) ## Call zVarArray VMZ <- zVarArray(X = X, label = 0, groups = groups, suppress.print = TRUE) vErrExp <- vErrVar <- 0 ## Squared relative errors for each pair-wise log-ratio # Compute VARIATION ARRAY BY pair of components for (di in 1:(col-1)){ # by rows denominator di for (nj in (di+1):col){ # by columns numerator nj # Overall VarArrTotV <- VMZ$Overall[di,nj] # Var VarArrTotE <- VMZ$Overall[nj,di] # Exp # By group (values for the given log-ratio across groups) VbP <- sapply(VMZ[1:(length(VMZ)-1)],function(x) x[di,nj],simplify=TRUE) # Var EbP <- sapply(VMZ[1:(length(VMZ)-1)],function(x) x[nj,di],simplify=TRUE) # Exp # Raw relative error by group ErrArr[di,nj,p[!is.na(VbP)]] <- (1-(VbP[!is.na(VbP)]/VarArrTotV))^2 # Var ErrArr[nj,di,p[!is.na(EbP)]] <- (1-(EbP[!is.na(EbP)]/VarArrTotE))^2 # Exp # Raw relative error (the previous added up across groups) ErrMat[di,nj] <- sum(ErrArr[di,nj,p[!is.na(VbP)]]) # Var ErrMat[nj,di] <- sum(ErrArr[nj,di,p[!is.na(EbP)]]) # Exp # Relative error proportional to number of obs per group (pi) # By group ErrPropArr[di,nj,p[!is.na(VbP)]] <- pi[p[!is.na(VbP)]]*ErrArr[di,nj,p[!is.na(VbP)]] ErrPropArr[nj,di,p[!is.na(EbP)]] <- pi[p[!is.na(EbP)]]*ErrArr[nj,di,p[!is.na(EbP)]] # Summed up across groups ErrPropMat[di,nj] <- sum(ErrPropArr[di,nj,p[!is.na(VbP)]]) ErrPropMat[nj,di] <- sum(ErrPropArr[nj,di,p[!is.na(EbP)]]) # Total squared relative error for log-ratio vars (Var) and means (Exp) (based on weighted SRE) vErrVar <- vErrVar + ErrPropMat[di,nj] vErrExp <- vErrExp + ErrPropMat[nj,di] }# end for 2 by columns numerator nj VariatMat }# end for 1 by rows denominator di VariatMat # Percentage contribution of each log-ratio var and mean A <- ErrPropMat A[upper.tri(A)] <- A[upper.tri(A)]/sum(A[upper.tri(A)])*100 A[lower.tri(A)] <- A[lower.tri(A)]/sum(A[lower.tri(A)])*100 # Percentage error of each group sExp <- sVar <- rep(NA,numPat) for (pat in 1:numPat){ RL <- RU <- ErrPropArr[,,pat] # Var log-ratio RU[!upper.tri(ErrPropArr[,,pat])] <- NA # Exp log-ratio RL[!lower.tri(ErrPropArr[,,pat])] <- NA # Total if (sum(!is.na(RU))>0) {sVar[pat] <- sum(RU[!is.na(RU)])} if (sum(!is.na(RL))>0) {sExp[pat] <- sum(RL[!is.na(RL)])} }# end for Pat # In percentage sVarPer <- sVar/sum(sVar,na.rm=T)*100; names(sVarPer) <- names(VMZ)[1:(length(VMZ)-1)] sExpPer <- sExp/sum(sExp,na.rm=T)*100; names(sExpPer) <- names(VMZ)[1:(length(VMZ)-1)] # Formatting ErrArr ErrArr <- lapply(seq(dim(ErrArr)[3]), function(x) ErrArr[ , , x]) for (i in 1:length(ErrArr)){ colnames(ErrArr[[i]]) <- colnames(X) rownames(ErrArr[[i]]) <- colnames(X) } names(ErrArr) <- names(VMZ)[1:(length(VMZ)-1)] # Formatting ErrPropArr ErrPropArr <- lapply(seq(dim(ErrPropArr)[3]), function(x) ErrPropArr[ , , x]) for (i in 1:length(ErrPropArr)){ colnames(ErrPropArr[[i]]) <- colnames(X) rownames(ErrPropArr[[i]]) <- colnames(X) } names(ErrPropArr) <- names(VMZ)[1:(length(VMZ)-1)] if (breakdown==FALSE){ result <- list(SRE=ErrMat, WeightedSRE=ErrPropMat, TotalSREvars=vErrVar, TotalSREmeans=vErrExp, PercContribSRE=A) } else{ result <- list(SRE=ErrMat, WeightedSRE=ErrPropMat, TotalSREvars=vErrVar, TotalSREmeans=vErrExp, PercContribSRE=A, SREbyGroup=ErrArr, WeightedSREbyGroup=ErrPropArr, PercContribTotalSREbyGroupVars=sVarPer, PercContribTotalSREbyGroupMeans=sExpPer) } if (suppress.print == FALSE) { print(lapply(result,function(x){ if (is(x, "list")) {lapply(x,round,4)} else {round(x,4)} })) } invisible(result) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/zVarArrayError.R
#' @title Variation array homogeneity test #' #' @description This function performs a permutation test of the homogeneity of group-wise and overall variation arrays from all #' pair-wise log-ratios in a compositional data set. Groups can be defined by either zero/unobserved data patterns or by a grouping #' factor in fully observed zero-free data sets. #' #' @details The permutation test of homogeneity is based on total weighted squared relative errors (SRE) reflecting on divergence #' between group-wise variation arrays and overall (see \code{\link{zVarArrayError}} and #' \code{\link{zVarArray}} for more details). Note that for groups including less than two observations SRE is set to NA. #' #' @param X Compositional data set (\code{\link{matrix}} or \code{\link{data.frame}} class). #' @param label Unique label (\code{\link{numeric}} or \code{\link{character}}) used to denote zeros/unobserved data in \code{X} (\code{label = 0}, default). #' @param groups Grouping factor in fully observed zero-free data sets (\code{groups = NULL}, default). #' @param b Number of bootstrap resamples used (\code{b = 1000}, default). #' #' @return Test p-values for log-ratio variances and means. #' #' @seealso \code{\link{zPatterns}}, \code{\link{zVarArray}}, \code{\link{zVarArrayError}}. #' #' @examples #' data(Water) #' zPatterns(Water, label = 0) #' zVarArrayTest(Water) zVarArrayTest <- function(X, label = 0, groups = NULL, b = 1000){ if (any(X<0, na.rm=T)) stop("X contains negative values") X <- as.data.frame(X,stringsAsFactors=TRUE) if (is.null(groups)){ g <- zPatterns(X,label = label, plot = FALSE, suppress.print = TRUE) ifelse(is.na(label), X[is.na(X)] <- 0, X[X == label] <- 0) } else{ g <- as.factor(groups) levNames <- levels(g) levels(g) <- 1:length(levels(g)) } ni <- table(g); col <- ncol(X); nind <- nrow(X) numPat <- length(levels(g)) p <- as.numeric(levels(g)) pi <- ni / nind # % obs in each group # Weighted SRE for the original data set resE <- zVarArrayError(X, label = 0, groups = groups, suppress.print = TRUE) vErrExp <- vErrVar <- rep(0,b) ## Weighted SRE for EACH LOG-RATIO # VARIATION ARRAY BY pair of logratios for (di in 1:(col-1)){ # by rows denominator di VariatMat Xdi <- X[X[,di]>0,] # subset of observed Xdi gdi <- g[X[,di]>0] # subset pattern number of obs Xdi for (nj in (di+1):col){ # by columns numerator nj VariatMat vErrExpij <- vErrVarij <- rep(0,b) if (any(Xdi[,nj]>0)){ Xnj <- Xdi[Xdi[,nj]>0,c(di,nj)] # subset common di and nj gnj <- factor(gdi[Xdi[,nj]>0]) ngnj <- table(gnj) p <- as.numeric(levels(gnj)) lxdinjOr <- lxdinjOrV <- log(Xnj[,2]/Xnj[,1]) # center to zero each group for variance test numpatj <- nlevels(gnj) for (k in 1:numpatj){ lxdinjOrV[gnj==p[k]] <- scale(lxdinjOr[gnj==p[k]],TRUE,FALSE) } # PERMUTATION TEST for (rept in 1:b){ lxdinj <- sample(lxdinjOr) lxdinjV <- sample(lxdinjOrV) # var and exp VbP <- tapply(lxdinjV,gnj,var) #variance by group EbP <- -tapply(lxdinj,gnj,mean) #expectation by group EbP[is.na(VbP)] <- NA # NA for both if based on only one value # By group VarArrByPV <- ((ngnj[!is.na(VbP)]-1)*VbP[!is.na(VbP)])/(ngnj[!is.na(VbP)]) VarArrByPE <- EbP[!is.na(EbP)] # Overall VarArrTotV <- as.numeric((length(lxdinjOrV)-1)*var(lxdinjOrV)/length(lxdinjOrV)) # Var VarArrTotE <- -as.numeric(mean(lxdinjOr)) # Exp # Weighted squared relative error vErrVarij[rept] <- sum((pi[p[!is.na(VbP)]])*((1-(VarArrByPV/VarArrTotV))^2)) vErrExpij[rept] <- sum((pi[p[!is.na(EbP)]])*((1-(VarArrByPE/VarArrTotE))^2)) }# end of Permutation test vErrExp <- vErrExp + vErrExpij vErrVar <- vErrVar + vErrVarij } # end if: control if common no-zero data data exists }# end for 2 by columns numerator nj VariatMat } # end for 1 by rows denominator di VariatMat # p-value (add 1 for the original sample, it is included) pvalExp <- (sum(vErrExp >= resE$TotalSREmeans)+1)/(b+1) pvalVar <- (sum(vErrVar >= resE$TotalSREvars)+1)/(b+1) cat("\n") cat("Variation array homogeneity test \n") cat("------------------------------- \n") cat(paste("Number of groups:",nlevels(g),"\n")) cat(paste("P-value for homogeneity of log-ratio variances:",round(pvalVar,4),"\n")) cat(paste("P-value for homogeneity of log-ratio means:",round(pvalExp,4),"\n")) }
/scratch/gouwar.j/cran-all/cranData/zCompositions/R/zVarArrayTest.R
# toLongDate Copyright (C) 2009, Oliver Kirchkamp # This program comes with ABSOLUTELY NO WARRANTY # This is free software, and you are welcome to redistribute it # under certain conditions. See the enclosed LICENSE for details. toLongDate <- function (shortDate) { sapply(as.character(shortDate),function(zz) { pre <- ifelse(substr(zz,1,2)<"80","20","19") if (nchar(zz)==8) { # hour <- which(LETTERS==substr(zz,7,7))-1 minute<- 60*which(LETTERS==substr(zz,7,7)) + (which(c(as.character(0:9),LETTERS)==substr(zz,8,8)))*2 - 21 sprintf("%s%s-%02d:%02d",pre,substr(zz,1,6),minute%/%60,minute%%60) } else if (nchar(zz)==11) sprintf("%s%s-%s:%s",pre,substr(zz,1,6),substr(zz,8,9),substr(zz,10,11)) else zz }) }
/scratch/gouwar.j/cran-all/cranData/zTree/R/toLongDate.R
# zTreeSbj Copyright (C) 2009, Oliver Kirchkamp # This program comes with ABSOLUTELY NO WARRANTY # This is free software, and you are welcome to redistribute it # under certain conditions. See the enclosed LICENSE for details. zTreeSbj <- function(files,sep="\t",zTree.silent=getOption("zTree.silent"),zTree.encoding=getOption("zTree.encoding"),ignore.errors=FALSE) { myWarnings<-NULL w.handler <- function(w) { myWarnings<<-c(myWarnings,list(w)) invokeRestart("muffleWarning") } if(is.null(zTree.silent)) zTree.silent <- FALSE if(is.null(zTree.encoding)) zTree.encoding <- getOption("encoding") wrong.Enc<-NULL sbj<-plyr::ldply (files, function(filename) { if(!zTree.silent) cat("reading ",filename,"...\n") Tfile<-file(filename,"r",encoding=zTree.encoding) aa<-withCallingHandlers(readLines(Tfile),warning=w.handler) ## catch warning for invalid encoding: if(length(myWarnings)>0) if(sum(sapply(myWarnings,function(x) startsWith(x$message,"invalid input")))>0) wrong.Enc<<-c(wrong.Enc,filename) close(Tfile) aa.i<-iconv(aa,sub="byte") ## try to convert to native encoding anyway if(sum(aa.i!=aa)>0) { wrong.Enc<<-c(wrong.Enc,filename) if(!ignore.errors) stop("You are currently using encoding \"",zTree.encoding,"\". Your data seem to use a different encoding. *** Read the manual! Change \"zTree.encoding\"! ***") } Date<-sub(".sbj$","",sub(".*/","",filename)) aa2<-strsplit(aa.i[-1],sep) cols<-max(unlist(lapply(aa2,length))) aa3<-t(matrix(unlist(lapply(aa2,function(x) c(x,rep("",cols-length(x))))),byrow=TRUE,nrow=length(aa2))) colnames(aa3)<-aa3[1,] data.frame(cbind(Date=Date,aa3[-1,])) }) if(length(wrong.Enc)>0) { wText <- paste("You are currently using encoding \"",zTree.encoding,"\".\nYour data seem to use a different encoding.\n*** Some of your data could not be translated. ***\n*** Read the manual! Change \"zTree.encoding\"! ***",sep="") if(ignore.errors) { warning(wText) } else { stop(wText) } } sbj }
/scratch/gouwar.j/cran-all/cranData/zTree/R/zTreeSbj.R
# zTreeTables Copyright (C) 2009, Oliver Kirchkamp # This program comes with ABSOLUTELY NO WARRANTY # This is free software, and you are welcome to redistribute it # under certain conditions. See the enclosed LICENSE for details. zTreeTables <- function(files,tables=c("globals","subjects"),sep = "\t",zTree.silent=getOption("zTree.silent"),zTree.encoding=getOption("zTree.encoding"),ignore.errors=FALSE) { if(is.null(zTree.silent)) zTree.silent <- FALSE if(is.null(zTree.encoding)) zTree.encoding <- getOption("encoding") manipulated.files <- list() wrong.Enc<-NULL splittable <- function(filename,tables=c("globals","subjects")) { getTable <- function(start, stop) { if (!is.na(stop) && !is.na(start)) { names<-f.table[[start]][-3] names[1]<-"Date" names[2]<-"Treatment" if(stop==start+1) ## empty table return(data.frame(NULL)) tab<-as.data.frame(matrix(nrow=stop-start-1,ncol=length(names))) colnames(tab)<-names for( i in 1:(stop-start-1)) { tab[i,] <- f.table[[start+i]][-3] } for (n in colnames(tab)) { if (is.character(tab[[n]])) { tab[[n]][tab[[n]]=="-"] <- NA mm<-tryCatch(mean(as.numeric(tab[[n]]),na.rm=TRUE),warning=function(x) NA) if (!is.na(mm)) { tab[[n]]<-as.numeric(tab[[n]]) } } } tab } } getTables <- function(name) { tab<-NULL for (i in which ((splitname==name))) { new<-getTable(splitpoints[i],splitpoints[i+1]) fail<-names(new)=="" if (sum(fail)>0) manipulated.files[[filename]] <<- unique(c(unlist(manipulated.files[filename]),name)) new<-new[,!fail] if (length(new)>0) { if (is.null(tab)) { tab<-new } else { tab <- plyr::rbind.fill(tab,new) } } } tab } myWarnings<-NULL w.handler <- function(w) { myWarnings<<-c(myWarnings,list(w)) invokeRestart("muffleWarning") } ## advanceTable <- function() { last<<-last+1 splitpoints[last]<<-i splitname[last]<<-f.table[[i]][3] splittreat[last]<<-f.table[[i]][2] splitcols[last]<<-length(f.table[[i]]) } ## if(!zTree.silent) cat("reading ",filename,"...\n") f.size<-file.info(filename)$size f.content <- readChar(filename,f.size,useBytes=TRUE) Encoding(f.content)<-zTree.encoding fc.content <- iconv(f.content,zTree.encoding,sub="byte") if(fc.content != f.content) { wrong.Enc<<-c(wrong.Enc,filename) if(!ignore.errors) stop("You are currently using encoding \"",zTree.encoding,"\". Your data seem to use a different encoding. *** Read the manual! Change \"zTree.encoding\"! ***") } ## f.lines <- unlist(strsplit(fc.content,"\\n")) ## figure out table names with linebreaks: f.rr <- grep("\\r\\r$",f.lines) ## which lines end with \\r\\r f.rr <- f.rr[grep("\\t",f.lines[f.rr+1])] ## make sure that next line starts with \\t if(length(f.rr>0)) { warning(paste0("*** Table name with linebreaks in lines ",paste(f.rr,collapse=", ")," ***")); f.lines[f.rr] <- paste(sub("\\r\\r$","",f.lines[f.rr]),f.lines[f.rr+1],sep="") ## copy up f.lines <- f.lines[-(f.rr+1)] ## drop lines that we have copied up } f.lines <- sub("\r","",f.lines) ## f.table<-strsplit(f.lines,sep) if(length(f.table[[1]])<3) stop(sprintf("*** cells are not separated by '%s'. Proper z-Tree files use \\t as a separator. Use the \"sep\" option! ***",ifelse(sep=="\t","\\t",sep))) splitpoints<-array() splitname<-array() splittreat<-array() table(splitname) splitcols<-array() last<-0 for (i in 1:length(f.table)) { if (length(f.table[[i]])<3) stop(paste0("*** incomplete entry in line ",i," ***")); ## if (last==0) {## the first line in the file advanceTable(); } else if ((f.table[[i]][3] != f.table[[i-1]][3]) || ## name of table has changed sum(f.table[[i]] != f.table[[ splitpoints[last] ]]) == 0) { ## same name, but new header advanceTable(); } splitpoints[last+1]<-i+1 } result<-list() if(is.null(tables)) tables<-unique(splitname); do <- intersect(splitname,tables) miss <- setdiff(splitname,tables) if(!zTree.silent & length(miss>0)) cat ("Skipping:",miss,"\n") for (name in do) { if(!zTree.silent) cat ("Doing:",name,"\n") aTable<-getTables(name) if (!is.null(aTable)) result[[name]]<-aTable } if(!is.null(manipulated.files[[filename]])) { ## add statistics on structure of table in case of manipulation myRange<-range(sapply(f.table,length)) manipulated.files[[filename]] <<- c(manipulated.files[[filename]],sprintf("[%d,%d]",myRange[1],myRange[2])) } result } z<-splittable(files[1],tables) for (name in files[-1]) { if(!zTree.silent) cat (sprintf("*** %s is file %d / %d ***\n",name,which(name==files),length(files))) a<-splittable(name,tables) for(t in union(names(a),names(z))) { if (!is.null(a[[t]])) # there is no such table z[[t]]<-plyr::rbind.fill(z[[t]],a[[t]]) } } ## wrong encoding: if(length(wrong.Enc)>0) { wText <- paste("You are currently using encoding \"",zTree.encoding,"\".\n Your data seem to use a different encoding.\n*** Some of your data could not be translated. ***\n*** Read the manual! Use the option \"zTree.encoding\"! ***",sep="") warning(wText) } ## manipulated Files: wText<-paste(sapply(names(manipulated.files),function(file) sprintf("*** File %s contains empty cells in %s. This is not a z-Tree file ***", file,paste(manipulated.files[[file]],collapse=", "))),collapse="\n") if(length(manipulated.files)>0) { if (ignore.errors) { warning(wText) } else { stop(paste(wText,"\n*** use \"ignore.errors\" to continue ***")) } } ## try to convert characters to numbers if this does not introduce more NAs: for (t in names(z)) { for(n in names(z[[t]])) if(typeof(z[[t]][[n]])=="character") if(!is.null(q<-tryCatch(as.numeric(z[[t]][[n]]),warning=function(x) NULL))) z[[t]][[n]]<-q } z }
/scratch/gouwar.j/cran-all/cranData/zTree/R/zTreeTables.R
.onLoad <- function (libname, pkgname) { op <- options() op.zTree <- list(zTree.silent=FALSE,zTree.encoding="latin1") toset <- !(names(op.zTree) %in% names(op)) if(any(toset)) options(op.zTree[toset]) invisible() }
/scratch/gouwar.j/cran-all/cranData/zTree/R/zzz.R
#' Dataset containing an example LD profile #' #' A simulated LD profile, containing example LD statistics for #' genetic distances of 0 to 0.0049, in bins of size 0.0001. #' #' @docType data #' #' @usage data(LDprofile) #' #' @format A data frame with 50 rows and 5 variables: #' \describe{ #' \item{bin}{the lower bound of each bin} #' \item{rsq}{the expected \eqn{r^2}{r^2} value for a pair of SNPs, where the genetic distance between them falls in the given bin} #' \item{sd}{the standard deviation of the expected \eqn{r^2}{r^2} value} #' \item{Beta_a}{the first shape parameter for the Beta distribution fitted for this bin} #' \item{Beta_b}{the second shape parameter for the Beta distribution fitted for this bin} #' } "LDprofile"
/scratch/gouwar.j/cran-all/cranData/zalpha/R/LDprofile-data.R
#' Runs the LR function #' #' Returns the \code{|L||R|} value for each SNP location supplied to the function, where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}. #' For more information about the \code{|L||R|} diversity statistic, please see Jacobs (2016). #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \code{LR} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param X Optional. Specify a region of the chromosome to calculate LR for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate LR for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \code{LR} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps example dataset #' data(snps) #' ## run LR over all the SNPs with a window size of 3000 bp #' LR(snps$bp_positions,3000) #' ## only return results for SNPs between locations 600 and 1500 bp #' LR(snps$bp_positions,3000,X=c(600,1500)) #' #' @export LR <- function(pos, ws, X = NULL) { #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],LR=rep(NA,outputLength)) # Loop over each position in the output list and calculate LR for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## get L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP outputList$LR[i]<-noL*noR } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/LR.R
#' Runs the L_plus_R function #' #' Returns the \eqn{{|L| \choose 2} + {|R| \choose 2}}{(|L| choose 2) + (|R| choose 2)} value for each SNP location supplied to the function. \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}. #' For more information about the \code{L_plus_R} diversity statistic, please see Jacobs (2016). #' #' #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \code{L_plus_R} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param X Optional. Specify a region of the chromosome to calculate \code{L_plus_R} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate L_plus_R for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \code{L_plus_R }values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps example dataset #' data(snps) #' ## run L_plus_R over all the SNPs with a window size of 3000 bp #' L_plus_R(snps$bp_positions,3000) #' ## only return results for SNPs between locations 600 and 1500 bp #' L_plus_R(snps$bp_positions,3000,X=c(600,1500)) #' #' @export L_plus_R <- function(pos, ws, X = NULL) { #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],L_plus_R=rep(NA,outputLength)) # Loop over each position in the output list and calculate L_plus_R for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## get L, R and L_plus_R noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP ## if n < 2 then choose function will return 0 outputList$L_plus_R[i]<-choose(noL,2)+choose(noR,2) } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/L_plus_R.R
#' Runs the Zalpha function #' #' Returns a \eqn{Z_{\alpha}}{Zalpha} value for each SNP location supplied to the function. #' For more information about the \eqn{Z_{\alpha}}{Zalpha} statistic, please see Jacobs (2016). #' The \eqn{Z_{\alpha}}{Zalpha} statistic is defined as: #' \deqn{Z_{\alpha}=\frac{{|L| \choose 2}^{-1}\sum_{i,j \in L}r^2_{i,j} + {|R| \choose 2}^{-1}\sum_{i,j \in L}r^2_{i,j}}{2}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, and \eqn{r^2}{r^2} is equal to the squared correlation between a pair of SNPs #' #' @importFrom stats cor na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\alpha}}{Zalpha} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\alpha}}{Zalpha} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\alpha}}{Zalpha} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\alpha}}{Zalpha} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps example dataset #' data(snps) #' ## run Zalpha over all the SNPs with a window size of 3000 bp #' Zalpha(snps$bp_positions,3000,as.matrix(snps[,3:12])) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zalpha(snps$bp_positions,3000,as.matrix(snps[,3:12]),X=c(600,1500)) #' #' @export Zalpha <- function(pos, ws, x, minRandL = 4, minRL = 25, X = NULL) { #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zalpha=rep(NA,outputLength)) # Loop over each position in the output list and calculate Zalpha for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zalpha[i]<-NA } else { ##Left LrsqSum<-sum(lower_triangle(cor(t(x[pos>=currentPos-ws/2 & pos < currentPos,]),use="pairwise.complete.obs")^2)) ##Right RrsqSum<-sum(lower_triangle(cor(t(x[pos<=currentPos+ws/2 & pos > currentPos,]),use="pairwise.complete.obs")^2)) outputList$Zalpha[i]<-(LrsqSum/choose(noL,2)+RrsqSum/choose(noR,2))/2 } } if (sum(is.na(outputList$Zalpha))==outputLength){ warning("No Zalpha values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zalpha.R
#' Runs the Zalpha function using a cumulative beta distribution function on the r-squared values for the region #' #' Returns a \eqn{Z_{\alpha}^{BetaCDF}}{Zalpha} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\alpha}^{BetaCDF}}{Zalpha} statistic, please see Jacobs (2016). #' The \eqn{Z_{\alpha}^{BetaCDF}}{Zalpha} statistic is defined as: #' \deqn{{Z_{\alpha}^{BetaCDF}}=\frac{{|L| \choose 2}^{-1}\sum_{i,j \in L}\frac{B(r^2_{i,j};a,b)}{B(a,b)} + {|R| \choose 2}^{-1}\sum_{i,j \in R}\frac{B(r^2_{i,j};a,b)}{B(a,b)}}{2}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to #' the squared correlation between a pair of SNPs, and \eqn{\frac{B(r^2_{i,j};a,b)}{B(a,b)}} is the cumulative distribution function for the Beta distribution given #' the estimated a and b parameters from the LD profile. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @importFrom stats cor pbeta na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\alpha}^{BetaCDF}}{Zalpha} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_Beta_a A numeric vector containing the first estimated Beta parameter for the corresponding bin in the LD profile. #' @param LDprofile_Beta_b A numeric vector containing the second estimated Beta parameter for the corresponding bin in the LD profile. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\alpha}^{BetaCDF}}{Zalpha} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\alpha}^{BetaCDF}}{Zalpha} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\alpha}^{BetaCDF}}{Zalpha} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zalpha_BetaCDF over all the SNPs with a window size of 3000 bp #' Zalpha_BetaCDF(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$Beta_a,LDprofile$Beta_b) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zalpha_BetaCDF(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$Beta_a,LDprofile$Beta_b,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zalpha_BetaCDF<-function(pos, ws, x, dist, LDprofile_bins, LDprofile_Beta_a, LDprofile_Beta_b, minRandL = 4, minRL = 25, X = NULL){ #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_Beta_a is a numeric vector if (is.numeric(LDprofile_Beta_a) ==FALSE || is.vector(LDprofile_Beta_a)==FALSE){ stop("LDprofile_Beta_a must be a numeric vector") } #Check LDprofile_Beta_b is a numeric vector if (is.numeric(LDprofile_Beta_b) ==FALSE || is.vector(LDprofile_Beta_b)==FALSE){ stop("LDprofile_Beta_b must be a numeric vector") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_Beta_a)){ stop("LDprofile_Beta_a must contain the same number of values as there are bins given in LDprofile_bins") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_Beta_b)){ stop("LDprofile_Beta_b must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zalpha_BetaCDF=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zalpha for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zalpha_BetaCDF[i]<-NA } else { ##Left # Find distances between each SNP in L and round to bin size bins<-sapply(lower_triangle(outer(dist[pos>=currentPos-ws/2 & pos < currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) Lrsq<- lower_triangle(cor(t(x[pos>=currentPos-ws/2 & pos < currentPos,]),use="pairwise.complete.obs")^2) LrsqExp<-merge(data.frame(bins=as.character(bins),Lrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_Beta_a,LDprofile_Beta_b),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) LrsqSum<-sum(pbeta(LrsqExp$Lrsq,LrsqExp$LDprofile_Beta_a,LrsqExp$LDprofile_Beta_b)) ##Right bins<-sapply(lower_triangle(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos<=currentPos+ws/2 & pos > currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) Rrsq<-lower_triangle(cor(t(x[pos<=currentPos+ws/2 & pos > currentPos,]),use="pairwise.complete.obs")^2) RrsqExp<-merge(data.frame(bins=as.character(bins),Rrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_Beta_a,LDprofile_Beta_b),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) RrsqSum<-sum(pbeta(RrsqExp$Rrsq,RrsqExp$LDprofile_Beta_a,RrsqExp$LDprofile_Beta_b)) outputList$Zalpha_BetaCDF[i]<-(LrsqSum/choose(noL,2)+RrsqSum/choose(noR,2))/2 } } if (sum(is.na(outputList$Zalpha_BetaCDF))==outputLength){ warning("No Zalpha_BetaCDF values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zalpha_BetaCDF.R
#' Runs the Zalpha function using the Z score of the r-squared values for the region #' #' Returns a \eqn{Z_{\alpha}^{Zscore}}{Zalpha} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\alpha}^{Zscore}}{Zalpha} statistic, please see Jacobs (2016). #' The \eqn{Z_{\alpha}^{Zscore}}{Zalpha} statistic is defined as: #' \deqn{{Z_{\alpha}^{Zscore}}=\frac{{|L| \choose 2}^{-1}\sum_{i,j \in L}\frac{r^2_{i,j}-E[r^2_{i,j}]}{\sigma[r^2_{i,j}]} + {|R| \choose 2}^{-1}\sum_{i,j \in R}\frac{r^2_{i,j}-E[r^2_{i,j}]}{\sigma[r^2_{i,j}]}}{2}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to #' the squared correlation between a pair of SNPs, \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile, and \eqn{\sigma[r^2]}{\sigma[r^2]} is the standard deviation. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @importFrom stats cor na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\alpha}^{Zscore}}{Zalpha} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_rsq A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1. #' @param LDprofile_sd A numeric vector containing the standard deviation of the \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\alpha}^{Zscore}}{Zalpha} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\alpha}^{Zscore}}{Zalpha} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\alpha}^{Zscore}}{Zalpha} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zalpha_Zscore over all the SNPs with a window size of 3000 bp #' Zalpha_Zscore(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,LDprofile$sd) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zalpha_Zscore(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,LDprofile$sd,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zalpha_Zscore<-function(pos, ws, x, dist, LDprofile_bins, LDprofile_rsq, LDprofile_sd, minRandL = 4, minRL = 25, X = NULL){ #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_rsq is a numeric vector if (is.numeric(LDprofile_rsq) ==FALSE || is.vector(LDprofile_rsq)==FALSE){ stop("LDprofile_rsq must be a numeric vector") } #Check LDprofile_sd is a numeric vector if (is.numeric(LDprofile_sd) ==FALSE || is.vector(LDprofile_sd)==FALSE){ stop("LDprofile_sd must be a numeric vector") } #Check values of LDprofile_rsq are between 0 and 1 if (sum(LDprofile_rsq<0 | LDprofile_rsq>1)>0){ stop("Values stored in LDprofile_rsq must be between 0 and 1") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_rsq)){ stop("LDprofile_rsq must contain the same number of values as there are bins given in LDprofile_bins") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_sd)){ stop("LDprofile_sd must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zalpha_Zscore=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zalpha for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zalpha_Zscore[i]<-NA } else { ##Left # Find distances between each SNP in L and round to bin size bins<-sapply(lower_triangle(outer(dist[pos>=currentPos-ws/2 & pos < currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) Lrsq<- lower_triangle(cor(t(x[pos>=currentPos-ws/2 & pos < currentPos,]),use="pairwise.complete.obs")^2) LrsqExp<-merge(data.frame(bins=as.character(bins),Lrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq,LDprofile_sd),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) LrsqSum<-sum((LrsqExp$Lrsq-LrsqExp$LDprofile_rsq)/LrsqExp$LDprofile_sd) ##Right bins<-sapply(lower_triangle(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos<=currentPos+ws/2 & pos > currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) Rrsq<-lower_triangle(cor(t(x[pos<=currentPos+ws/2 & pos > currentPos,]),use="pairwise.complete.obs")^2) RrsqExp<-merge(data.frame(bins=as.character(bins),Rrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq,LDprofile_sd),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) RrsqSum<-sum((RrsqExp$Rrsq-RrsqExp$LDprofile_rsq)/RrsqExp$LDprofile_sd) outputList$Zalpha_Zscore[i]<-(LrsqSum/choose(noL,2)+RrsqSum/choose(noR,2))/2 } } if (sum(is.na(outputList$Zalpha_Zscore))==outputLength){ warning("No Zalpha_Zscore values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zalpha_Zscore.R
#' Runs all the statistics in the zalpha package #' #' Returns every statistic for each SNP location, given the appropriate parameters. See Details for more information. #' #' Not all statistics will be returned, depending on the parameters supplied to the function.\cr #' If \code{x} is not supplied, only \code{\link{Zalpha_expected}}, \code{\link{Zbeta_expected}}, \code{\link{LR}} and \code{\link{L_plus_R}} will be calculated.\cr #' For any of the statistics which use an expected \eqn{r^2}{r^2} value, the parameters \code{dist}, \code{LDprofile_bins} and \code{LDprofile_rsq} must be supplied. #' This includes the statistics: \code{\link{Zalpha_expected}}, \code{\link{Zalpha_rsq_over_expected}}, \code{\link{Zalpha_log_rsq_over_expected}}, \code{\link{Zalpha_Zscore}}, \code{\link{Zalpha_BetaCDF}}, \code{\link{Zbeta_expected}}, \code{\link{Zbeta_rsq_over_expected}}, \code{\link{Zbeta_log_rsq_over_expected}}, \code{\link{Zbeta_Zscore}} and \code{\link{Zbeta_BetaCDF}}. #' \itemize{ #' \item For \code{\link{Zalpha_Zscore}} and \code{\link{Zbeta_Zscore}} to be calculated, the parameter \code{LDprofile_sd} must also be supplied. #' \item For \code{\link{Zalpha_BetaCDF}} and \code{\link{Zbeta_BetaCDF}} to be calculated, the parameters \code{LDprofile_Beta_a} and \code{LDprofile_Beta_b} must also be supplied. #' } #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' For more information about the statistics, please see Jacobs (2016). #' #' @importFrom stats cor pbeta na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the statistics will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x Optional. A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param dist Optional. A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins Optional. A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_rsq Optional. A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1. #' @param LDprofile_sd Optional. A numeric vector containing the standard deviation of the \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. #' @param LDprofile_Beta_a Optional. A numeric vector containing the first estimated Beta parameter for the corresponding bin in the LD profile. #' @param LDprofile_Beta_b Optional. A numeric vector containing the second estimated Beta parameter for the corresponding bin in the LD profile. #' @param minRandL Minimum number of SNPs in each set R and L for the statistics to be calculated. L is the set of SNPs to the left of the target SNP and R to the right, within the given window size \code{ws}. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate the statistics for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate the statistics for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the statistics for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zalpha_all over all the SNPs with a window size of 3000 bp #' ## will return all 15 statistics #' Zalpha_all(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,LDprofile$sd,LDprofile$Beta_a,LDprofile$Beta_b) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zalpha_all(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,LDprofile$sd,LDprofile$Beta_a,LDprofile$Beta_b,X=c(600,1500)) #' ## will only return statistics not requiring an LD profile #'Zalpha_all(snps$bp_positions,3000,as.matrix(snps[,3:12])) #' #' @export #' @seealso \code{\link{Zalpha}}, \code{\link{Zalpha_expected}}, \code{\link{Zalpha_rsq_over_expected}}, \code{\link{Zalpha_log_rsq_over_expected}}, \code{\link{Zalpha_Zscore}}, \code{\link{Zalpha_BetaCDF}}, \code{\link{Zbeta}}, \code{\link{Zbeta_expected}}, \code{\link{Zbeta_rsq_over_expected}}, \code{\link{Zbeta_log_rsq_over_expected}}, \code{\link{Zbeta_Zscore}}, \code{\link{Zbeta_BetaCDF}}, \code{\link{LR}}, \code{\link{L_plus_R}}, \code{\link{create_LDprofile}}. Zalpha_all <- function(pos, ws, x=NULL, dist=NULL, LDprofile_bins=NULL, LDprofile_rsq=NULL, LDprofile_sd=NULL, LDprofile_Beta_a=NULL, LDprofile_Beta_b=NULL, minRandL = 4, minRL = 25, X = NULL) { #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #If x is supplied if (is.null(x)==FALSE){ #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } if (is.null(dist)==FALSE){ #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } } if (is.null(LDprofile_bins)==FALSE){ #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } } if (is.null(LDprofile_rsq)==FALSE & is.null(LDprofile_bins)==FALSE){ #Check LDprofile_rsq is a numeric vector if (is.numeric(LDprofile_rsq) ==FALSE || is.vector(LDprofile_rsq)==FALSE){ stop("LDprofile_rsq must be a numeric vector") } #Check values of LDprofile_rsq are between 0 and 1 if (sum(LDprofile_rsq<0 | LDprofile_rsq>1)>0){ stop("Values stored in LDprofile_rsq must be between 0 and 1") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_rsq)){ stop("LDprofile_rsq must contain the same number of values as there are bins given in LDprofile_bins") } } if(is.null(LDprofile_sd)==FALSE & is.null(LDprofile_bins)==FALSE){ #Check LDprofile_sd is a numeric vector if (is.numeric(LDprofile_sd) ==FALSE || is.vector(LDprofile_sd)==FALSE){ stop("LDprofile_sd must be a numeric vector") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_sd)){ stop("LDprofile_sd must contain the same number of values as there are bins given in LDprofile_bins") } } if(is.null(LDprofile_Beta_a)==FALSE & is.null(LDprofile_Beta_b)==FALSE & is.null(LDprofile_bins)==FALSE){ #Check LDprofile_Beta_a is a numeric vector if (is.numeric(LDprofile_Beta_a) ==FALSE || is.vector(LDprofile_Beta_a)==FALSE){ stop("LDprofile_Beta_a must be a numeric vector") } #Check LDprofile_Beta_b is a numeric vector if (is.numeric(LDprofile_Beta_b) ==FALSE || is.vector(LDprofile_Beta_b)==FALSE){ stop("LDprofile_Beta_b must be a numeric vector") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_Beta_a)){ stop("LDprofile_Beta_a must contain the same number of values as there are bins given in LDprofile_bins") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_Beta_b)){ stop("LDprofile_Beta_b must contain the same number of values as there are bins given in LDprofile_bins") } } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],LR=rep(NA,outputLength),L_plus_R=rep(NA,outputLength)) if (is.null(dist)==FALSE & is.null(LDprofile_bins)==FALSE & is.null(LDprofile_rsq)==FALSE){ outputList$Zalpha_expected<-rep(NA,outputLength) outputList$Zbeta_expected<-rep(NA,outputLength) } if (is.null(x)==FALSE){ outputList$Zalpha<-rep(NA,outputLength) outputList$Zbeta<-rep(NA,outputLength) if (is.null(dist)==FALSE & is.null(LDprofile_bins)==FALSE & is.null(LDprofile_rsq)==FALSE){ outputList$Zalpha_rsq_over_expected<-rep(NA,outputLength) outputList$Zalpha_log_rsq_over_expected<-rep(NA,outputLength) outputList$Zbeta_rsq_over_expected<-rep(NA,outputLength) outputList$Zbeta_log_rsq_over_expected<-rep(NA,outputLength) if (is.null(LDprofile_sd)==FALSE){ outputList$Zalpha_Zscore<-rep(NA,outputLength) outputList$Zbeta_Zscore<-rep(NA,outputLength) } if (is.null(LDprofile_Beta_a)==FALSE & is.null(LDprofile_Beta_b)==FALSE){ outputList$Zalpha_BetaCDF<-rep(NA,outputLength) outputList$Zbeta_BetaCDF<-rep(NA,outputLength) } } } # Loop over each position in the output list and calculate Zalpha for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP outputList$LR[i]<-noL*noR outputList$L_plus_R[i]<-choose(noL,2)+choose(noR,2) if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA for everything - leave as is } else { if (is.null(x)==FALSE){ ##Left Lrsq <- lower_triangle(cor(t(x[pos>=currentPos-ws/2 & pos < currentPos,]),use="pairwise.complete.obs")^2) ##Right Rrsq<-lower_triangle(cor(t(x[pos<=currentPos+ws/2 & pos > currentPos,]),use="pairwise.complete.obs")^2) ##Over rsq<-as.vector(t((cor(t(x[pos>=currentPos-ws/2 & pos<=currentPos+ws/2,]),use="pairwise.complete.obs")^2)[1:noL,(noL+2):(noL+noR+1)])) outputList$Zalpha[i]<-(sum(Lrsq)/choose(noL,2)+sum(Rrsq)/choose(noR,2))/2 outputList$Zbeta[i]<-sum(rsq)/(noL*noR) } else { Lrsq<-NA Rrsq<-NA rsq<-NA } if (is.null(dist)==FALSE & is.null(LDprofile_bins)==FALSE & is.null(LDprofile_rsq)==FALSE){ #Left bins<-sapply(lower_triangle(outer(dist[pos>=currentPos-ws/2 & pos < currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) LrsqExp<-merge(data.frame(bins=as.character(bins),Lrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) #Right bins<-sapply(lower_triangle(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos<=currentPos+ws/2 & pos > currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) RrsqExp<-merge(data.frame(bins=as.character(bins),Rrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) #Over bins<-sapply(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-"),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) rsqExp<-merge(data.frame(bins=as.character(bins),rsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) outputList$Zalpha_expected[i]<-(sum(LrsqExp$LDprofile_rsq)/choose(noL,2)+sum(RrsqExp$LDprofile_rsq)/choose(noR,2))/2 outputList$Zbeta_expected[i]<-sum(rsqExp$LDprofile_rsq)/(noL*noR) if (is.null(x)==FALSE){ outputList$Zalpha_rsq_over_expected[i]<-(sum(LrsqExp$Lrsq/LrsqExp$LDprofile_rsq)/choose(noL,2)+sum(RrsqExp$Rrsq/RrsqExp$LDprofile_rsq)/choose(noR,2))/2 outputList$Zbeta_rsq_over_expected[i]<-sum(rsqExp$rsq/rsqExp$LDprofile_rsq)/(noL*noR) #removes zeros by replacing with lowest correlation greater than zero, for logging LrsqExplog <- LrsqExp RrsqExplog <- RrsqExp rsqExplog <- rsqExp LrsqExplog$Lrsq[LrsqExplog$Lrsq==0]<-min(LrsqExplog$Lrsq[LrsqExplog$Lrsq>0]) RrsqExplog$Rrsq[RrsqExplog$Rrsq==0]<-min(RrsqExplog$Rrsq[RrsqExplog$Rrsq>0]) rsqExplog$rsq[rsqExplog$rsq==0]<-min(rsqExplog$rsq[rsqExplog$rsq>0]) outputList$Zalpha_log_rsq_over_expected[i]<-(sum(log10(LrsqExplog$Lrsq/LrsqExp$LDprofile_rsq))/choose(noL,2)+sum(log10(RrsqExplog$Rrsq/RrsqExp$LDprofile_rsq))/choose(noR,2))/2 outputList$Zbeta_log_rsq_over_expected[i]<-sum(log10(rsqExplog$rsq/rsqExp$LDprofile_rsq))/(noL*noR) if (is.null(LDprofile_sd)==FALSE){ LrsqExp<-merge(LrsqExp,data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_sd),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) RrsqExp<-merge(RrsqExp,data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_sd),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) rsqExp<-merge(rsqExp,data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_sd),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) outputList$Zalpha_Zscore[i]<-(sum((LrsqExp$Lrsq-LrsqExp$LDprofile_rsq)/LrsqExp$LDprofile_sd)/choose(noL,2)+sum((RrsqExp$Rrsq-RrsqExp$LDprofile_rsq)/RrsqExp$LDprofile_sd)/choose(noR,2))/2 outputList$Zbeta_Zscore[i]<-sum((rsqExp$rsq-rsqExp$LDprofile_rsq)/rsqExp$LDprofile_sd)/(noL*noR) } if (is.null(LDprofile_Beta_a)==FALSE & is.null(LDprofile_Beta_b)==FALSE){ LrsqExp<-merge(LrsqExp,data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_Beta_a,LDprofile_Beta_b),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) RrsqExp<-merge(RrsqExp,data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_Beta_a,LDprofile_Beta_b),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) rsqExp<-merge(rsqExp,data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_Beta_a,LDprofile_Beta_b),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) outputList$Zalpha_BetaCDF[i]<-(sum(pbeta(LrsqExp$Lrsq,LrsqExp$LDprofile_Beta_a,LrsqExp$LDprofile_Beta_b))/choose(noL,2)+sum(pbeta(RrsqExp$Rrsq,RrsqExp$LDprofile_Beta_a,RrsqExp$LDprofile_Beta_b))/choose(noR,2))/2 outputList$Zbeta_BetaCDF[i]<-sum(pbeta(rsqExp$rsq,rsqExp$LDprofile_Beta_a,rsqExp$LDprofile_Beta_b))/(noL*noR) } } } } } if (length(outputList)>3){ if (sum(sapply(outputList[-c(1:3)],function(x) sum(is.na(x))==outputLength))==length(outputList)-3){ warning("No statistics were calculated, try reducing minRandL and minRL or increasing the window size") } } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zalpha_all.R
#' Runs the Zalpha function on the expected r-squared values for the region #' #' Returns a \eqn{Z_{\alpha}^{E[r^2]}}{Zalpha} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\alpha}^{E[r^2]}}{Zalpha} statistic, please see Jacobs (2016). #' The \eqn{Z_{\alpha}^{E[r^2]}} statistic is defined as: #' \deqn{{Z_{\alpha}^{E[r^2]}}=\frac{{|L| \choose 2}^{-1}\sum_{i,j \in L}E[r^2_{i,j}] + {|R| \choose 2}^{-1}\sum_{i,j \in R}E[r^2_{i,j}]}{2}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, #' and \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\alpha}^{E[r^2]}}{Zalpha} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_rsq A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\alpha}^{E[r^2]}}{Zalpha} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\alpha}^{E[r^2]}}{Zalpha} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\alpha}^{E[r^2]}}{Zalpha} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zalpha_expected over all the SNPs with a window size of 3000 bp #' Zalpha_expected(snps$bp_positions,3000,snps$cM_distances,LDprofile$bin,LDprofile$rsq) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zalpha_expected(snps$bp_positions,3000,snps$cM_distances,LDprofile$bin,LDprofile$rsq,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zalpha_expected<-function(pos, ws, dist, LDprofile_bins, LDprofile_rsq, minRandL = 4, minRL = 25, X = NULL) { #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_rsq is a numeric vector if (is.numeric(LDprofile_rsq) ==FALSE || is.vector(LDprofile_rsq)==FALSE){ stop("LDprofile_rsq must be a numeric vector") } #Check values of LDprofile_rsq are between 0 and 1 if (sum(LDprofile_rsq<0 | LDprofile_rsq>1)>0){ stop("Values stored in LDprofile_rsq must be between 0 and 1") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_rsq)){ stop("LDprofile_rsq must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zalpha_expected=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zalpha for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zalpha_expected[i]<-NA } else { ##Left # Find distances between each SNP in L and round to bin size bins<-sapply(lower_triangle(outer(dist[pos>=currentPos-ws/2 & pos < currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) LrsqSum<-sum(merge(data.frame(bins=as.character(bins)),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE)[,2]) ##Right bins<-sapply(lower_triangle(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos<=currentPos+ws/2 & pos > currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) RrsqSum<-sum(merge(data.frame(bins=as.character(bins)),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE)[,2]) outputList$Zalpha_expected[i]<-(LrsqSum/choose(noL,2)+RrsqSum/choose(noR,2))/2 } } if (sum(is.na(outputList$Zalpha_expected))==outputLength){ warning("No Zalpha_expected values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zalpha_expected.R
#' Runs the Zalpha function on the log of the r-squared values over the expected r-squared values for the region #' #' Returns a \eqn{Z_{\alpha}^{log_{10}(r^2/E[r^2])}}{Zalpha} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\alpha}^{log_{10}(r^2/E[r^2])}}{Zalpha} statistic, please see Jacobs (2016). #' The \eqn{Z_{\alpha}^{log_{10}(r^2/E[r^2])}}{Zalpha} statistic is defined as: #' \deqn{{Z_{\alpha}^{log_{10}(r^2/E[r^2])}}=\frac{{|L| \choose 2}^{-1}\sum_{i,j \in L}log_{10}(r^2_{i,j}/E[r^2_{i,j}]) + {|R| \choose 2}^{-1}\sum_{i,j \in R}log_{10}(r^2_{i,j}/E[r^2_{i,j}])}{2}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to #' the squared correlation between a pair of SNPs, and \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @importFrom stats cor na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\alpha}^{log_{10}(r^2/E[r^2])}}{Zalpha} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_rsq A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\alpha}^{log_{10}(r^2/E[r^2])}}{Zalpha} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\alpha}^{log_{10}(r^2/E[r^2])}}{Zalpha} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\alpha}^{log_{10}(r^2/E[r^2])}}{Zalpha} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zalpha_log_rsq_over_expected over all the SNPs with a window size of 3000 bp #' Zalpha_log_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zalpha_log_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zalpha_log_rsq_over_expected<-function(pos, ws, x, dist, LDprofile_bins, LDprofile_rsq, minRandL = 4, minRL = 25, X = NULL){ #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_rsq is a numeric vector if (is.numeric(LDprofile_rsq) ==FALSE || is.vector(LDprofile_rsq)==FALSE){ stop("LDprofile_rsq must be a numeric vector") } #Check values of LDprofile_rsq are between 0 and 1 if (sum(LDprofile_rsq<0 | LDprofile_rsq>1)>0){ stop("Values stored in LDprofile_rsq must be between 0 and 1") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_rsq)){ stop("LDprofile_rsq must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zalpha_log_rsq_over_expected=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zalpha for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zalpha_log_rsq_over_expected[i]<-NA } else { ##Left # Find distances between each SNP in L and round to bin size bins<-sapply(lower_triangle(outer(dist[pos>=currentPos-ws/2 & pos < currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) Lrsq<- lower_triangle(cor(t(x[pos>=currentPos-ws/2 & pos < currentPos,]),use="pairwise.complete.obs")^2) Lrsq[Lrsq==0]<-min(Lrsq[Lrsq>0]) #removes zeros by replacing with lowest correlation greater than zero LrsqExp<-merge(data.frame(bins=as.character(bins),Lrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) LrsqSum<-sum(log10(LrsqExp$Lrsq/LrsqExp$LDprofile_rsq)) ##Right bins<-sapply(lower_triangle(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos<=currentPos+ws/2 & pos > currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) Rrsq<-lower_triangle(cor(t(x[pos<=currentPos+ws/2 & pos > currentPos,]),use="pairwise.complete.obs")^2) Rrsq[Rrsq==0]<-min(Rrsq[Rrsq>0]) #removes zeros by replacing with lowest correlation greater than zero RrsqExp<-merge(data.frame(bins=as.character(bins),Rrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) RrsqSum<-sum(log10(RrsqExp$Rrsq/RrsqExp$LDprofile_rsq)) outputList$Zalpha_log_rsq_over_expected[i]<-(LrsqSum/choose(noL,2)+RrsqSum/choose(noR,2))/2 } } if (sum(is.na(outputList$Zalpha_log_rsq_over_expected))==outputLength){ warning("No Zalpha_log_rsq_over_expected values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zalpha_log_rsq_over_expected.R
#' Runs the Zalpha function on the r-squared values over the expected r-squared values for the region #' #' Returns a \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} statistic, please see Jacobs (2016). #' The \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} statistic is defined as: #' \deqn{{Z_{\alpha}^{r^2/E[r^2]}}=\frac{{|L| \choose 2}^{-1}\sum_{i,j \in L}r^2_{i,j}/E[r^2_{i,j}] + {|R| \choose 2}^{-1}\sum_{i,j \in R}r^2_{i,j}/E[r^2_{i,j}]}{2}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to #' the squared correlation between a pair of SNPs, and \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @importFrom stats cor na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_rsq A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zalpha_rsq_over_expected over all the SNPs with a window size of 3000 bp #' Zalpha_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zalpha_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zalpha_rsq_over_expected<-function(pos, ws, x, dist, LDprofile_bins, LDprofile_rsq, minRandL = 4, minRL = 25, X = NULL){ #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_rsq is a numeric vector if (is.numeric(LDprofile_rsq) ==FALSE || is.vector(LDprofile_rsq)==FALSE){ stop("LDprofile_rsq must be a numeric vector") } #Check values of LDprofile_rsq are between 0 and 1 if (sum(LDprofile_rsq<0 | LDprofile_rsq>1)>0){ stop("Values stored in LDprofile_rsq must be between 0 and 1") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_rsq)){ stop("LDprofile_rsq must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zalpha_rsq_over_expected=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zalpha for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zalpha_rsq_over_expected[i]<-NA } else { ##Left # Find distances between each SNP in L and round to bin size bins<-sapply(lower_triangle(outer(dist[pos>=currentPos-ws/2 & pos < currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) Lrsq<- lower_triangle(cor(t(x[pos>=currentPos-ws/2 & pos < currentPos,]),use="pairwise.complete.obs")^2) LrsqExp<-merge(data.frame(bins=as.character(bins),Lrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) LrsqSum<-sum(LrsqExp$Lrsq/LrsqExp$LDprofile_rsq) ##Right bins<-sapply(lower_triangle(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos<=currentPos+ws/2 & pos > currentPos],"-")),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) Rrsq<-lower_triangle(cor(t(x[pos<=currentPos+ws/2 & pos > currentPos,]),use="pairwise.complete.obs")^2) RrsqExp<-merge(data.frame(bins=as.character(bins),Rrsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) RrsqSum<-sum(RrsqExp$Rrsq/RrsqExp$LDprofile_rsq) outputList$Zalpha_rsq_over_expected[i]<-(LrsqSum/choose(noL,2)+RrsqSum/choose(noR,2))/2 } } if (sum(is.na(outputList$Zalpha_rsq_over_expected))==outputLength){ warning("No Zalpha_rsq_over_expected values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zalpha_rsq_over_expected.R
#' Runs the Zbeta function #' #' Returns a \eqn{Z_{\beta}}{Zbeta} value for each SNP location supplied to the function. #' For more information about the \eqn{Z_{\beta}}{Zbeta} statistic, please see Jacobs (2016). #' The \eqn{Z_{\beta}}{Zbeta} statistic is defined as: #' \deqn{Z_{\beta}=\frac{\sum_{i \in L,j \in R}r^2_{i,j}}{|L||R|}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, and \eqn{r^2}{r^2} is equal to the squared correlation between a pair of SNPs #' #' @importFrom stats cor na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\beta}}{Zbeta} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\beta}}{Zbeta} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\beta}}{Zbeta} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\beta}}{Zbeta} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps example dataset #' data(snps) #' ## run Zbeta over all the SNPs with a window size of 3000 bp #' Zbeta(snps$bp_positions,3000,as.matrix(snps[,3:12])) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zbeta(snps$bp_positions,3000,as.matrix(snps[,3:12]),X=c(600,1500)) #' #' @export Zbeta <- function(pos, ws, x, minRandL = 4, minRL = 25, X = NULL) { #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zbeta=rep(NA,outputLength)) # Loop over each position in the output data frame and calculate Zbeta for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zbeta[i]<-NA } else { rsqSum<-sum((cor(t(x[pos>=currentPos-ws/2 & pos<=currentPos+ws/2,]),use="pairwise.complete.obs")^2)[1:noL,(noL+2):(noL+noR+1)]) outputList$Zbeta[i]<-rsqSum/(noL*noR) } } if (sum(is.na(outputList$Zbeta))==outputLength){ warning("No Zbeta values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zbeta.R
#' Runs the Zbeta function using a cumulative beta distribution function on the r-squared values for the region #' #' Returns a \eqn{Z_{\beta}^{BetaCDF}}{Zbeta} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\beta}^{BetaCDF}}{Zbeta} statistic, please see Jacobs (2016). #' The \eqn{Z_{\beta}^{BetaCDF}}{Zbeta} statistic is defined as: #' \deqn{Z_{\beta}^{BetaCDF}=\frac{\sum_{i \in L,j \in R}\frac{B(r^2_{i,j};a,b)}{B(a,b)}}{|L||R|}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to #' the squared correlation between a pair of SNPs, and \eqn{\frac{B(r^2_{i,j};a,b)}{B(a,b)}} is the cumulative distribution function for the Beta distribution given #' the estimated a and b parameters from the LD profile. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @importFrom stats cor pbeta na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\beta}^{BetaCDF}}{Zbeta} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_Beta_a A numeric vector containing the first estimated Beta parameter for the corresponding bin in the LD profile. #' @param LDprofile_Beta_b A numeric vector containing the second estimated Beta parameter for the corresponding bin in the LD profile. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\beta}^{BetaCDF}}{Zbeta} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\beta}^{BetaCDF}}{Zbeta} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\beta}^{BetaCDF}}{Zbeta} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zbeta_BetaCDF over all the SNPs with a window size of 3000 bp #' Zbeta_BetaCDF(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$Beta_a,LDprofile$Beta_b) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zbeta_BetaCDF(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$Beta_a,LDprofile$Beta_b,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zbeta_BetaCDF<-function(pos, ws, x, dist, LDprofile_bins, LDprofile_Beta_a, LDprofile_Beta_b, minRandL = 4, minRL = 25, X = NULL){ #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_Beta_a is a numeric vector if (is.numeric(LDprofile_Beta_a) ==FALSE || is.vector(LDprofile_Beta_a)==FALSE){ stop("LDprofile_Beta_a must be a numeric vector") } #Check LDprofile_Beta_b is a numeric vector if (is.numeric(LDprofile_Beta_b) ==FALSE || is.vector(LDprofile_Beta_b)==FALSE){ stop("LDprofile_Beta_b must be a numeric vector") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_Beta_a)){ stop("LDprofile_Beta_a must contain the same number of values as there are bins given in LDprofile_bins") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_Beta_b)){ stop("LDprofile_Beta_b must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zbeta_BetaCDF=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zbeta for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zbeta_BetaCDF[i]<-NA } else { # Find distances between each SNP in L and round to bin size bins<-sapply(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-"),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) rsq<-as.vector(t((cor(t(x[pos>=currentPos-ws/2 & pos<=currentPos+ws/2,]),use="pairwise.complete.obs")^2)[1:noL,(noL+2):(noL+noR+1)])) rsqExp<-merge(data.frame(bins=as.character(bins),rsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_Beta_a,LDprofile_Beta_b),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) rsqSum<-sum(pbeta(rsqExp$rsq,rsqExp$LDprofile_Beta_a,rsqExp$LDprofile_Beta_b)) outputList$Zbeta_BetaCDF[i]<-rsqSum/(noL*noR) } } if (sum(is.na(outputList$Zbeta_BetaCDF))==outputLength){ warning("No Zbeta_BetaCDF values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zbeta_BetaCDF.R
#' Runs the Zbeta function using the Z score of the r-squared values for the region #' #' Returns a \eqn{Z_{\beta}^{Zscore}}{Zbeta} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\beta}^{Zscore}}{Zbeta} statistic, please see Jacobs (2016). #' The \eqn{Z_{\beta}^{Zscore}}{Zbeta} statistic is defined as: #' \deqn{Z_{\beta}^{Zscore}=\frac{\sum_{i \in L,j \in R}\frac{r^2_{i,j}-E[r^2_{i,j}]}{\sigma[r^2_{i,j}]}}{|L||R|}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to #' the squared correlation between a pair of SNPs, \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile, and \eqn{\sigma[r^2]}{\sigma[r^2]} is the standard deviation. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @importFrom stats cor na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\beta}^{Zscore}}{Zbeta} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_rsq A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1. #' @param LDprofile_sd A numeric vector containing the standard deviation of the \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\beta}^{Zscore}}{Zbeta} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\beta}^{Zscore}}{Zbeta} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\beta}^{Zscore}}{Zbeta} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zbeta_Zscore over all the SNPs with a window size of 3000 bp #' Zbeta_Zscore(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,LDprofile$sd) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zbeta_Zscore(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,LDprofile$sd,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zbeta_Zscore<-function(pos, ws, x, dist, LDprofile_bins, LDprofile_rsq, LDprofile_sd, minRandL = 4, minRL = 25, X = NULL){ #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_rsq is a numeric vector if (is.numeric(LDprofile_rsq) ==FALSE || is.vector(LDprofile_rsq)==FALSE){ stop("LDprofile_rsq must be a numeric vector") } #Check LDprofile_sd is a numeric vector if (is.numeric(LDprofile_sd) ==FALSE || is.vector(LDprofile_sd)==FALSE){ stop("LDprofile_sd must be a numeric vector") } #Check values of LDprofile_rsq are between 0 and 1 if (sum(LDprofile_rsq<0 | LDprofile_rsq>1)>0){ stop("Values stored in LDprofile_rsq must be between 0 and 1") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_rsq)){ stop("LDprofile_rsq must contain the same number of values as there are bins given in LDprofile_bins") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_sd)){ stop("LDprofile_sd must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zbeta_Zscore=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zbeta for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zbeta_Zscore[i]<-NA } else { # Find distances between each SNP in L and round to bin size bins<-sapply(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-"),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) rsq<-as.vector(t((cor(t(x[pos>=currentPos-ws/2 & pos<=currentPos+ws/2,]),use="pairwise.complete.obs")^2)[1:noL,(noL+2):(noL+noR+1)])) rsqExp<-merge(data.frame(bins=as.character(bins),rsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq,LDprofile_sd),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) rsqSum<-sum((rsqExp$rsq-rsqExp$LDprofile_rsq)/rsqExp$LDprofile_sd) outputList$Zbeta_Zscore[i]<-rsqSum/(noL*noR) } } if (sum(is.na(outputList$Zbeta_Zscore))==outputLength){ warning("No Zbeta_Zscore values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zbeta_Zscore.R
#' Runs the Zbeta function on the expected r-squared values for the region #' #' Returns a \eqn{Z_{\beta}^{E[r^2]}}{Zbeta} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\beta}^{E[r^2]}}{Zbeta} statistic, please see Jacobs (2016). #' The \eqn{Z_{\beta}^{E[r^2]}}{Zbeta} statistic is defined as: #' \deqn{Z_{\beta}^{E[r^2]}=\frac{\sum_{i \in L,j \in R}E[r^2_{i,j}]}{|L||R|}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, #' and \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\beta}^{E[r^2]}}{Zbeta} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_rsq A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\beta}^{E[r^2]}}{Zbeta} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\beta}^{E[r^2]}}{Zbeta} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\beta}^{E[r^2]}}{Zbeta} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zbeta_expected over all the SNPs with a window size of 3000 bp #' Zbeta_expected(snps$bp_positions,3000,snps$cM_distances,LDprofile$bin,LDprofile$rsq) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zbeta_expected(snps$bp_positions,3000,snps$cM_distances,LDprofile$bin,LDprofile$rsq,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zbeta_expected<-function(pos, ws, dist, LDprofile_bins, LDprofile_rsq, minRandL = 4, minRL = 25, X = NULL) { #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_rsq is a numeric vector if (is.numeric(LDprofile_rsq) ==FALSE || is.vector(LDprofile_rsq)==FALSE){ stop("LDprofile_rsq must be a numeric vector") } #Check values of LDprofile_rsq are between 0 and 1 if (sum(LDprofile_rsq<0 | LDprofile_rsq>1)>0){ stop("Values stored in LDprofile_rsq must be between 0 and 1") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_rsq)){ stop("LDprofile_rsq must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zbeta_expected=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zbeta for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zbeta_expected[i]<-NA } else { # Find the distances between each SNP in the over region and round to bin size bins<-sapply(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-"),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) rsqSum<-sum(merge(data.frame(bins=as.character(bins)),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE)[,2]) outputList$Zbeta_expected[i]<-rsqSum/(noL*noR) } } if (sum(is.na(outputList$Zbeta_expected))==outputLength){ warning("No Zbeta_expected values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zbeta_expected.R
#' Runs the Zbeta function on the log of the r-squared values over the expected r-squared values for the region #' #' Returns a \eqn{Z_{\beta}^{log_{10}(r^2/E[r^2])}}{Zbeta} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\beta}^{log_{10}(r^2/E[r^2])}}{Zbeta} statistic, please see Jacobs (2016). #' The \eqn{Z_{\beta}^{log_{10}(r^2/E[r^2])}}{Zbeta} statistic is defined as: #' \deqn{Z_{\beta}^{log_{10}(r^2/E[r^2])}=\frac{\sum_{i \in L,j \in R}log_{10}(r^2_{i,j}/E[r^2_{i,j}])}{|L||R|}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to #' the squared correlation between a pair of SNPs, and \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @importFrom stats cor na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\beta}^{log_{10}(r^2/E[r^2])}}{Zbeta} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_rsq A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\beta}^{log_{10}(r^2/E[r^2])}}{Zbeta} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\beta}^{log_{10}(r^2/E[r^2])}}{Zbeta} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\beta}^{log_{10}(r^2/E[r^2])}}{Zbeta} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zbeta_log_rsq_over_expected over all the SNPs with a window size of 3000 bp #' Zbeta_log_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zbeta_log_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zbeta_log_rsq_over_expected<-function(pos, ws, x, dist, LDprofile_bins, LDprofile_rsq, minRandL = 4, minRL = 25, X = NULL){ #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_rsq is a numeric vector if (is.numeric(LDprofile_rsq) ==FALSE || is.vector(LDprofile_rsq)==FALSE){ stop("LDprofile_rsq must be a numeric vector") } #Check values of LDprofile_rsq are between 0 and 1 if (sum(LDprofile_rsq<0 | LDprofile_rsq>1)>0){ stop("Values stored in LDprofile_rsq must be between 0 and 1") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_rsq)){ stop("LDprofile_rsq must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zbeta_log_rsq_over_expected=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zbeta for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zbeta_log_rsq_over_expected[i]<-NA } else { # Find distances between each SNP in L and round to bin size bins<-sapply(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-"),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) rsq<-as.vector(t((cor(t(x[pos>=currentPos-ws/2 & pos<=currentPos+ws/2,]),use="pairwise.complete.obs")^2)[1:noL,(noL+2):(noL+noR+1)])) rsq[rsq==0]<-min(rsq[rsq>0]) #removes zeros by replacing with lowest correlation greater than zero rsqExp<-merge(data.frame(bins=as.character(bins),rsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) rsqSum<-sum(log10(rsqExp$rsq/rsqExp$LDprofile_rsq)) outputList$Zbeta_log_rsq_over_expected[i]<-rsqSum/(noL*noR) } } if (sum(is.na(outputList$Zbeta_log_rsq_over_expected))==outputLength){ warning("No Zbeta_log_rsq_over_expected values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zbeta_log_rsq_over_expected.R
#' Runs the Zbeta function on the r-squared values over the expected r-squared values for the region #' #' Returns a \eqn{Z_{\beta}^{r^2/E[r^2]}}{Zbeta} value for each SNP location supplied to the function, based on #' the expected \eqn{r^2} values given an LD profile and genetic distances. #' For more information about the \eqn{Z_{\beta}^{r^2/E[r^2]}}{Zbeta} statistic, please see Jacobs (2016). #' The \eqn{Z_{\beta}^{r^2/E[r^2]}}{Zbeta} statistic is defined as: #' \deqn{Z_{\beta}^{r^2/E[r^2]}=\frac{\sum_{i \in L,j \in R}r^2_{i,j}/E[r^2_{i,j}]}{|L||R|}} #' where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to #' the squared correlation between a pair of SNPs, and \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile. #' #' The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or #' real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD #' profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower #' bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated #' using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile. #' #' @importFrom stats cor na.omit #' #' @param pos A numeric vector of SNP locations #' @param ws The window size which the \eqn{Z_{\beta}^{r^2/E[r^2]}}{Zbeta} statistic will be calculated over. This should be on the same scale as the \code{pos} vector. #' @param x A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic. #' @param dist A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}. #' @param LDprofile_bins A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size. #' @param LDprofile_rsq A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1. #' @param minRandL Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4. #' @param minRL Minimum value for the product of the set sizes for R and L. Default is 25. #' @param X Optional. Specify a region of the chromosome to calculate \eqn{Z_{\beta}^{r^2/E[r^2]}}{Zbeta} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\beta}^{r^2/E[r^2]}}{Zbeta} for every SNP in the \code{pos} vector. #' #' @return A list containing the SNP positions and the \eqn{Z_{\beta}^{r^2/E[r^2]}}{Zbeta} values for those SNPs #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps and LDprofile example datasets #' data(snps) #' data(LDprofile) #' ## run Zbeta_rsq_over_expected over all the SNPs with a window size of 3000 bp #' Zbeta_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq) #' ## only return results for SNPs between locations 600 and 1500 bp #' Zbeta_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances, #' LDprofile$bin,LDprofile$rsq,X=c(600,1500)) #' #' @export #' @seealso \code{\link{create_LDprofile}} Zbeta_rsq_over_expected<-function(pos, ws, x, dist, LDprofile_bins, LDprofile_rsq, minRandL = 4, minRL = 25, X = NULL){ #Check things are in the correct format #Check pos is a numeric vector if (is.numeric(pos) ==FALSE || is.vector(pos)==FALSE){ stop("pos must be a numeric vector") } #Check x is a matrix if (is.matrix(x)==FALSE){ stop("x must be a matrix") } #Check x has rows equal to the length of pos if (length(pos) != nrow(x)){ stop("The number of rows in x must equal the number of SNP locations given in pos") } #Check SNPs are all biallelic if(sum(apply(x,1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Check dist is a numeric vector if (is.numeric(dist) ==FALSE || is.vector(dist)==FALSE){ stop("dist must be a numeric vector") } #Check dist is the same length as pos if (length(pos) != length(dist)){ stop("The number of values in dist must equal the number of SNP locations given in pos") } #Check windowsize is a number greater than 0 if(is.numeric(ws) ==FALSE || ws <= 0){ stop("ws must be a number greater than 0") } #Check LDprofile_bins is a numeric vector if (is.numeric(LDprofile_bins) ==FALSE || is.vector(LDprofile_bins)==FALSE){ stop("LDprofile_bins must be a numeric vector") } #Get bin size from LDprofile_bins bin_size<-LDprofile_bins[2]-LDprofile_bins[1] #Check LDprofile_bins are of equal size if (isTRUE(all.equal(diff(LDprofile_bins),rep(bin_size,length(LDprofile_bins)-1)))==FALSE){ stop("LDprofile_bins must be of equal size") } #Check LDprofile_rsq is a numeric vector if (is.numeric(LDprofile_rsq) ==FALSE || is.vector(LDprofile_rsq)==FALSE){ stop("LDprofile_rsq must be a numeric vector") } #Check values of LDprofile_rsq are between 0 and 1 if (sum(LDprofile_rsq<0 | LDprofile_rsq>1)>0){ stop("Values stored in LDprofile_rsq must be between 0 and 1") } #Check that the LDprofile vectors are the same length if (length(LDprofile_bins) != length(LDprofile_rsq)){ stop("LDprofile_rsq must contain the same number of values as there are bins given in LDprofile_bins") } #Check minRandL is 0 or greater if(is.numeric(minRandL) ==FALSE || minRandL < 0){ stop("minRandL must be a number greater than or equal to 0") } #Check minRL is 0 or greater if(is.numeric(minRL) ==FALSE || minRL < 0){ stop("minRL must be a number greater than or equal to 0") } #If X is specified, check it is in the correct format if (is.null(X)==FALSE){ if(is.numeric(X)==FALSE || is.vector(X)==FALSE){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { if (length(X) != 2){ stop("X should be a numeric vector of length 2 e.g. c(100,200)") } else { # X is in the correct format # Check that X will actually return a result (i.e. that the region specied by X overlaps with pos) if ((length(pos[pos>=X[1] & pos <= X[2]])>0) == FALSE){ stop("The region specified by X is outside the region contained in the pos vector") } } } } else { # Set X equal to the extremes of pos X<-c(pos[1],pos[length(pos)]) } # Force the R code to print decimals in full rather than in scientific format oldOptions<-options(scipen=999) on.exit(options(oldOptions)) #Change matrix x to numeric if it isn't already if (is.numeric(x)==FALSE){ x<-matrix(as.numeric(factor(x)),nrow=dim(x)[1]) } # Set up output list outputLength<-length(pos[pos>=X[1] & pos <= X[2]]) outputList<-list(position=pos[pos>=X[1] & pos <= X[2]],Zbeta_rsq_over_expected=rep(NA,outputLength)) # Loop over each position in the output list and calculate the expected Zbeta for (i in 1:outputLength){ # Current physical position in chromosome currentPos<-outputList$position[i] ## check L, R and LR noL <- length(pos[pos>=currentPos-ws/2 & pos < currentPos]) ## Number of SNPs to the left of the current SNP noR <- length(pos[pos<=currentPos+ws/2 & pos > currentPos]) ## Number of SNPs to the right of the current SNP if (noL < minRandL || noR < minRandL || noL*noR < minRL){ #NA outputList$Zbeta_rsq_over_expected[i]<-NA } else { # Find distances between each SNP in L and round to bin size bins<-sapply(outer(dist[pos<=currentPos+ws/2 & pos > currentPos],dist[pos>=currentPos-ws/2 & pos < currentPos],"-"),assign_bins,bin_size=bin_size) bins[bins>max(LDprofile_bins)]<-max(LDprofile_bins) rsq<-as.vector(t((cor(t(x[pos>=currentPos-ws/2 & pos<=currentPos+ws/2,]),use="pairwise.complete.obs")^2)[1:noL,(noL+2):(noL+noR+1)])) rsqExp<-merge(data.frame(bins=as.character(bins),rsq),data.frame(LDprofile_bins=as.character(LDprofile_bins),LDprofile_rsq),by.x="bins",by.y="LDprofile_bins",all.x=TRUE,sort=FALSE) rsqSum<-sum(rsqExp$rsq/rsqExp$LDprofile_rsq) outputList$Zbeta_rsq_over_expected[i]<-rsqSum/(noL*noR) } } if (sum(is.na(outputList$Zbeta_rsq_over_expected))==outputLength){ warning("No Zbeta_rsq_over_expected values were calculated, try reducing minRandL and minRL or increasing the window size") } return(outputList) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/Zbeta_rsq_over_expected.R
## Calculates the bin a distance falls into ## ## Due to floating point errors, the floor function cannot be relied upon to ## round down a distance between two SNPs to the nearest bin. Thus it is ## necessary to first test the ceiling to see if it is in fact equal, before ## then using the floor function. ## ## For example, the SNP positions of SNPs 1 and 2 are 0.00235 and 0.00345 ## respectively. The difference is 0.0011, and with a bin size of 0.0001, one ## would expect the difference to be assigned to the "0.0011" bin. However, ## the floor function in R instead sets the bin to 0.001, as it stores the ## result of 0.00345-0.00235 as a number a tiny bit smaller than 0.0011 due ## to floating point errors. ## ## @param bin_size a number representing the size of the bins in the LD profile ## @param number the number to be assigned to a bin ## ## @return a number representing the bin the number has been assigned to ## assign_bins<-function(bin_size,number){ ceilingTemp<-ceiling(number/bin_size) if(isTRUE(all.equal(ceilingTemp,number/bin_size))){ return(ceilingTemp*bin_size) } else { return(floor(number/bin_size)*bin_size) } }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/assign_bins.R
#' Creates an LD profile #' #' An LD (linkage disequilibrium) profile is a look-up table containing the expected correlation between SNPs given the genetic distance between them. The use of an LD profile can increase the accuracy of results by taking into account the expected correlation between SNPs. This function aids the user in creating their own LD profile. #' #' The input for \code{dist} and \code{x} can be lists. This allows multiple datasets to be used in the creation of the LD profile. For example, using all 22 autosomes from the human genome would involve 22 different distance vectors and SNP matrices. #' Both lists should be the same length and should correspond exactly to each other (i.e. the distances in each element of \code{dist} should go with the SNPs in the same element of x) #' #' In the output, bins represent lower bounds. The first bin contains pairs where the genetic distance is greater than or equal to 0 and less than \code{bin_size}. The final bin contains pairs where the genetic distance is greater than or equal to \code{max_dist}-\code{bin_size} and less than \code{max_dist}. #' If the \code{max_dist} is not an increment of \code{bin_size}, it will be adjusted to the next highest increment. The final bin will be the bin that \code{max_dist} falls into. For example, if the \code{max_dist} is given as 4.5 and the \code{bin_size} is 1, the final bin will be 4. #' \code{max_dist} should be big enough to cover the genetic distances between pairs of SNPs within the window size given when the \eqn{Z_{\alpha}}{Zalpha} statistics are run. Any pairs with genetic distances bigger than \code{max_dist} will be assigned the values in the maximum bin of the LD profile.\cr #' #' By default, Beta parameters are not calculated. To fit a Beta distribution to the expected correlations, needed for the \code{\link{Zalpha_BetaCDF}} and \code{\link{Zbeta_BetaCDF}} statistics, \code{beta_params} should be set to TRUE and the package 'fitdistrplus' must be installed. #' #' Ideally, an LD profile would be generated using data from a null population with no selection, For example by using a simulation if the other population parameters are known. However, often these are unknown or complex, so generating an LD profile using the same data as is being analysed is acceptable, as long as the bins are large enough. #' #' @importFrom stats cor sd #' #' @param dist A numeric vector, or a list of numeric vectors, containing the genetic distance for each SNP. #' @param x A matrix of SNP values, or a list of matrices. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{dist} vector. SNPs should all be biallelic. #' @param bin_size The size of each bin, in the same units as \code{dist}. #' @param max_dist Optional. The maximum genetic distance to be considered. If this is not supplied, it will default to the maximum distance in the \code{dist} vector. #' @param beta_params Optional. Beta parameters are calculated if this is set to TRUE. Default is FALSE. #' #' @return A data frame containing an LD profile that can be used by other statistics in this package. #' @references Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807 #' @examples #' ## load the snps example dataset #' data(snps) #' ## Create an LD profile using this data #' create_LDprofile(snps$cM_distances,as.matrix(snps[,3:12]),0.001) #' ## To get the Beta distribution parameter estimates, the fitdistrplus package is required #' if (requireNamespace("fitdistrplus", quietly = TRUE)==TRUE) { #' create_LDprofile(snps$cM_distances,as.matrix(snps[,3:12]),0.001,beta_params=TRUE) #' } #' #' #' @export #' @seealso \code{\link{Zalpha_expected}}, \code{\link{Zalpha_rsq_over_expected}}, \code{\link{Zalpha_log_rsq_over_expected}}, \code{\link{Zalpha_Zscore}}, \code{\link{Zalpha_BetaCDF}}, \code{\link{Zbeta_expected}}, \code{\link{Zbeta_rsq_over_expected}}, \code{\link{Zbeta_log_rsq_over_expected}}, \code{\link{Zbeta_Zscore}}, \code{\link{Zbeta_BetaCDF}}, \code{\link{Zalpha_all}}. #' create_LDprofile<-function(dist,x,bin_size,max_dist=NULL,beta_params=FALSE){ #Changes dist into a list if it is not one already if (is.list(dist)==FALSE){ dist<-list(dist) } #Changes x into a list if it is not one already if (is.list(x)==FALSE){ x<-list(x) } #Checks #Check the dist list and the x list have the same length if (length(dist)!=length(x)){ stop("dist and x should contain the same number of elements") } #Check for each element in dist and x for (el in 1:length(dist)){ #Check dist is vector if (is.numeric(dist[[el]]) ==FALSE || is.vector(dist[[el]])==FALSE){ stop("dist must be a numeric vector or list of numeric vectors") } #Check x is a matrix if (is.matrix(x[[el]])==FALSE){ stop("x must be a matrix or list of matrices") } #Check x has rows equal to the length of dist if (length(dist[[el]]) != nrow(x[[el]])){ stop("The number of rows in x must equal the number of SNP genetic distances given in the corresponding dist") } #Check SNPs are all biallelic if (sum(apply(x[[el]],1,function(x){length(na.omit(unique(x)))}) != 2)>0){ stop("SNPs must all be biallelic") } #Change matrix x to numeric if it isn't already if (is.numeric(x[[el]])==FALSE){ x[[el]]<-matrix(as.numeric(factor(x[[el]])),nrow=dim(x[[el]])[1]) } } #Check bin_size is a number if (is.numeric(bin_size) ==FALSE || bin_size <= 0){ stop("bin_size must be a number greater than 0") } #Check max_dist is a number or NULL if (is.null(max_dist)==FALSE){ if (is.numeric(max_dist) ==FALSE || max_dist <= 0){ stop("max_dist must be a number greater than 0") } } else { #Set max_dist to the maximum distance in the data if it was not supplied max_dist<-max(sapply(dist,function(x){x[length(x)]-x[1]}),na.rm = TRUE) #If max_dist is now zero then set it equal to bin_size if(isTRUE(all.equal(max_dist,0))){ max_dist<-bin_size } } #Adjusts the max_dist value so it is equal to an increment of bin_size if it isn't already if(!isTRUE(all.equal(max_dist,assign_bins(bin_size,max_dist)))){ max_dist<-assign_bins(bin_size,max_dist)+bin_size } #Check beta_params is logical if (is.logical(beta_params)==FALSE){ stop("beta_params must be TRUE or FALSE") } #If beta_params is TRUE, check for fitdistrplus package if (beta_params==TRUE){ if (requireNamespace("fitdistrplus", quietly = TRUE)==FALSE) { stop("Package \"fitdistrplus\" needed for Beta parameters to be calculated. Please install it.") } } diffs<-NULL rsq<-NULL #for each element in dist and x, get the differences and rsquared values for (el in 1:length(dist)){ #Find the differences in genetic distances between pairs of SNPs tempdiffs<-lower_triangle(outer(dist[[el]],dist[[el]],"-")) #Find the rsquared value between pairs of SNPs temprsq<-lower_triangle(cor(t(x[[el]]),use="pairwise.complete.obs")^2) #Filter for just those less than the max genetic distance and filter out missing distances temprsq<-temprsq[tempdiffs<max_dist & is.na(tempdiffs)==FALSE] tempdiffs<-tempdiffs[tempdiffs<max_dist & is.na(tempdiffs)==FALSE] #Add to final vector diffs<-c(diffs,tempdiffs) rsq<-c(rsq,temprsq) } rm(tempdiffs,temprsq) #Assign diffs to bins bins<-assign_bins(bin_size,diffs) #Create LDprofile data frame LDprofile<-data.frame(bin=seq(0,max_dist-bin_size,bin_size),rsq=NA,sd=NA,Beta_a=NA,Beta_b=NA,n=NA) #Loop for each bin (i) for (i in 1:nrow(LDprofile)){ LDprofile$n[i]<-sum(equal_vector(bins,LDprofile$bin[i])) #If there is at least one pair whose genetic distance falls within the bin, calculate stats if (LDprofile$n[i]>0){ #Get the rsquared values for all pairs in this bin temprsq<-rsq[equal_vector(bins,LDprofile$bin[i])] #Calculate the mean LDprofile$rsq[i]<-mean(temprsq) #Calculate the standard deviation LDprofile$sd[i]<-sd(temprsq) #Calculate Beta distribution parameters if required #Do not calculate for bins containing less than two pairs or the standatd deviation is zero if (beta_params==TRUE & LDprofile$n[i]>1 & LDprofile$sd[i]>0){ if (sum(equal_vector(temprsq,1) | equal_vector(temprsq,0))>0){ #If there are any 0s or 1s adjust the data temprsq<-(temprsq*(length(temprsq)-1)+0.5)/length(temprsq) } #Try to fit the data to a Beta distribution betafit<-try(fitdistrplus::fitdist(temprsq,"beta"),silent=TRUE) if (class(betafit) != "try-error"){ LDprofile$Beta_a[i]<-betafit$estimate[1] LDprofile$Beta_b[i]<-betafit$estimate[2] } else { #If failed to fit, try again using estimated beta parameters to initialise startBetaParams<-est_Beta_Params(LDprofile$rsq[i], LDprofile$sd[i]^2) betafit<-try(fitdistrplus::fitdist(temprsq,"beta",start=list(shape1=startBetaParams$alpha, shape2=startBetaParams$beta)),silent=TRUE) if (class(betafit) != "try-error"){ LDprofile$Beta_a[i]<-betafit$estimate[1] LDprofile$Beta_b[i]<-betafit$estimate[2] } else { #If Beta parameters cannot be fitted, return NA LDprofile$Beta_a[i]<-NA LDprofile$Beta_b[i]<-NA } } } } } return(LDprofile) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/create_LDprofile.R
## Compares a vector to a value and returns a vector of logical values ## ## @param vector The vector to be compared to the value ## @param value The value each item in the vector should be compared to ## ## @return A vector of TRUE and FALSE values ## equal_vector <- function(vector, value){ return(abs(vector-value)<.Machine$double.eps^0.5) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/equal_vector.R
## Estimates starting parameters for the Beta distribution calculation ## ## @param mu The mean of the data. ## @param var The variance of the data. ## ## @return A list containing the estimated beta parameters alpha and beta. ## est_Beta_Params <- function(mu, var){ alpha <- ((1-mu)/var - 1/mu) * mu^2 beta <- alpha * (1/mu - 1) return(params = list(alpha = alpha, beta = beta)) }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/est_Beta_Params.R
## Helper function to get the lower triangle of a matrix ## ## @param x a matrix ## ## @return a vector of values from the lower triangle of the matrix ## ## lower_triangle<-function(x){ x[lower.tri(x)] }
/scratch/gouwar.j/cran-all/cranData/zalpha/R/lower_triangle.R
#' Dataset containing details on simulated SNPs #' #' A dataset containing the positions, genetic distances and #' alleles for 20 SNPs, across 10 simulated chromosomes. #' #' @format A data frame with 20 rows and 12 variables: #' \describe{ #' \item{bp_positions}{location of the SNP on the chromosome e.g. in base pairs} #' \item{cM_distances}{genetic distance of the SNP from the start of the chromosome e.g. in centimorgans} #' \item{chrom_1}{allele of the SNP on the first example chromosome} #' \item{chrom_2}{allele of the SNP on the second example chromosome} #' \item{chrom_3}{allele of the SNP on the third example chromosome} #' \item{chrom_4}{allele of the SNP on the fourth example chromosome} #' \item{chrom_5}{allele of the SNP on the fifth example chromosome} #' \item{chrom_6}{allele of the SNP on the sixth example chromosome} #' \item{chrom_7}{allele of the SNP on the seventh example chromosome} #' \item{chrom_8}{allele of the SNP on the eighth example chromosome} #' \item{chrom_9}{allele of the SNP on the ninth example chromosome} #' \item{chrom_10}{allele of the SNP on the tenth example chromosome} #' } #' @examples #' snps "snps"
/scratch/gouwar.j/cran-all/cranData/zalpha/R/snps-data.R
## ---- echo = FALSE, message=FALSE--------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(zalpha) ## ----------------------------------------------------------------------------- library(zalpha) data(snps) ## This is what the dataset looks like: snps ## ----------------------------------------------------------------------------- results<-Zalpha(snps$bp_positions,3000,as.matrix(snps[,3:12])) results plot(results$position,results$Zalpha) ## ----------------------------------------------------------------------------- Zalpha(snps$bp_positions,3000,as.matrix(snps[,3:12]),X=c(500,1000)) ## ----------------------------------------------------------------------------- snps$cM_distances ## ----------------------------------------------------------------------------- data(LDprofile) LDprofile ## ----------------------------------------------------------------------------- Zalpha_expected(snps$bp_positions, 3000, snps$cM_distances, LDprofile$bin, LDprofile$rsq) ## ----------------------------------------------------------------------------- Zalpha_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zalpha_log_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zalpha_Zscore(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq, LDprofile$sd) Zalpha_BetaCDF(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$Beta_a, LDprofile$Beta_b) ## ----------------------------------------------------------------------------- results<-Zbeta(snps$bp_positions,3000,as.matrix(snps[,3:12])) results plot(results$position,results$Zbeta) ## ----------------------------------------------------------------------------- Zbeta_expected(snps$bp_positions, 3000, snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zbeta_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zbeta_log_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zbeta_Zscore(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq, LDprofile$sd) Zbeta_BetaCDF(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$Beta_a, LDprofile$Beta_b) ## ----------------------------------------------------------------------------- Zalpha_all(snps$bp_positions,3000,as.matrix(snps[,3:12])) ## ----------------------------------------------------------------------------- create_LDprofile(snps$cM_distances,as.matrix(snps[,3:12]),bin_size = 0.001,beta_params = TRUE) ## ----------------------------------------------------------------------------- ## Generate three chromosomes of data - cM distances and SNP values chrom1_cM_distances<-snps$cM_distances chrom1_snp_values<-as.matrix(snps[,3:12]) chrom2_cM_distances<-snps$cM_distances chrom2_snp_values<-as.matrix(snps[,3:12]) chrom3_cM_distances<-snps$cM_distances chrom3_snp_values<-as.matrix(snps[,3:12]) ## create a list of the cM distances cM_distances_list<-list(chrom1_cM_distances,chrom2_cM_distances,chrom3_cM_distances) ## create a list of SNP value matrices snp_values_list<-list(chrom1_snp_values,chrom2_snp_values,chrom3_snp_values) ## create the LD profile using the lists as the dist and x parameters create_LDprofile(cM_distances_list,snp_values_list,bin_size = 0.001,beta_params = TRUE)
/scratch/gouwar.j/cran-all/cranData/zalpha/inst/doc/zalpha.R
--- title: "zalpha" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{zalpha} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message=FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(zalpha) ``` The zalpha package contains statistics for identifying areas of the genome that have undergone a selective sweep. The idea behind these statistics is to find areas of the genome that are highly correlated, as this can be a sign that a sweep has occurred recently in the vicinity. For more information on the statistics, please see the paper by Jacobs et al. (2016)[1] referenced below. ## A simple example Here we have a dataset containing five humans with a single pair of chromosomes each. The chromosome has 20 SNPs, at base pair 100, 200, 300, ..., 2000. -------- ------------ -------------------- Person 1 Chromosome A AGGAAGGGATACGGTTATAC Chromosome B CGGAACTGATCAGCTCAGGG Person 2 Chromosome A CGGTACTGTCACGGGCATGG Chromosome B ATGTAGGGTCCAGCTCTGAC Person 3 Chromosome A ATGTCGGCATCCAGGCAGAC Chromosome B ATGAACGCATCAACTTTGAG Person 4 Chromosome A CGGTCGGCTCCAGCTTTTGG Chromosome B CTGTCCGCTCCCGGGTTTGC Person 5 Chromosome A CGCAACGGACACGCGCATGC Chromosome B CTGACGTCACCCAGTTTGAG -------- ------------ -------------------- For this simple example all that is needed is: * The vector of SNP locations * The matrix of SNP values. This could be in ACGT format as above, or in 0 and 1 notation, or any other notation as long as SNPs are biallelic. Data extracted from a PLINK .tped file is in the ideal format for this analysis. Note the genetic data is phased. ### The snps dataset The snps dataset is a data frame that comes with the zalpha package. It is identical to the simple example above, but with 0s and 1s instead of ACGTs. Realistically, the dataset would be much bigger. It is highly recommended to only use SNPs with a minor allele frequency of over 5%, as it is hard to find correlations between rare alleles. Any missing values should be coded as NA. The snps dataset can be loaded using the code: ```{r} library(zalpha) data(snps) ## This is what the dataset looks like: snps ``` This data set contains information about each of the SNPs. The first column gives the physical location of the SNP along the chromosome, in whatever units is useful to the user (usually bp or Kb). In this example, the positions are in base pairs (bp). The next column is the genetic distance of the SNP from the start of the chromosome. Ignore this column for now. The final columns are the SNP alleles for each of the chromosomes in the population. Each SNP must be biallelic, but can contain any value, for example 0s and 1s, or ACGTs. The data can contain missing values, however it is recommended that the cut off is 10% missing at most. Missing values should be coded as NA. It is also recommended to use a minor allele frequency of 5% or higher. __Note:__ There is no requirement to put data into a data frame - all that is required is a vector of SNP positions and a matrix of SNP values. ## Zalpha To test for selection, the user can use the Zalpha function. This function assigns the first SNP in the dataset as the "target locus", calculates the $Z_{\alpha}$ value, then moves on to the next SNP making that the target locus, until every SNP in the dataset has been considered. It works by calculating correlations between alleles on each side of the target locus and averaging them. To do this, the function needs three inputs: * __pos__ A vector of the physical locations of each of the SNPs. For this example, we will use the first column from the snps dataset: snps$bp_positions. * __ws__ The window size. This is set to 3000 bp for this small example but for human analysis realistically a window size of around 200 Kb is appropriate. The window is centred on the target locus and considers SNPs that are within ws/2 to the left and ws/2 to the right of the target SNP. ws should always use the same units as pos i.e. if pos is in bp, ws should be in bp. * __x__ A matrix of the SNP alleles across each chromosome in the sample. The number of rows should be equal to the number of SNPs, and the columns are each of the chromosomes. For this example we extract the SNP values from the snps dataset found in columns 3 to 12, and convert into a matrix: as.matrix(snps[,3:12]). ```{r} results<-Zalpha(snps$bp_positions,3000,as.matrix(snps[,3:12])) results plot(results$position,results$Zalpha) ``` The output is in the form of a list and shows the positions of each of the SNPs and the $Z_{\alpha}$ value calculated for each SNP. The NAs are because there were not enough SNPs on one side of the target locus for an accurate $Z_{\alpha}$ value to be calculated. This is controlled by the parameters minRandL and minRL, which have defaults 4 and 25 respectively. minRandL specifies the minimum number of SNPs that must be to the left and right of the target SNP within the window for $Z_{\alpha}$ to be calculated. minRL is the product of these numbers. The graph shows a sharp increase in $Z_{\alpha}$ values in the centre of this region, which could indicate the presence of a sweep. The user should compare the values across the whole genome to find outliers. Say the user is only interested in the output of Zalpha for a particular region of the chromosome; this is achieved by setting the "X" parameter to the lower and upper bounds of the region. ```{r} Zalpha(snps$bp_positions,3000,as.matrix(snps[,3:12]),X=c(500,1000)) ``` That concludes the simple example of the Zalpha function! It is recommended that the user uses the Zalpha_all function, as this function will calculate all the statistics in the zalpha package in one go, rather than running all of the statistics separately. More information on the Zalpha_all function can be found further down this vignette. Read on for information on the other statistics in the package and what they require. ## Adjusting for expected correlations between SNPs There are many reasons apart from selection that pairs of SNPs could be more correlated than the rest of the genome, including regions of low recombination and genetic drift. This package allows the user to correct for expected correlations between SNPs. There are multiple functions included in this package that adjust for expected correlations, all of which have an example below. First however, the new inputs will be described. The extra inputs required are: * __dist__ A vector containing the genetic distances between SNPs * An LD profile Returning to the snps example dataset, we can now consider the second column of the dataset "cM_distances". ```{r} snps$cM_distances ``` Each value is the genetic distance of the SNP from the start of the chromosome. This could be in centimorgans (cM), linkage disequilibrium units (LDU) or any other way of measuring genetic distance, as long as it is additive (i.e. the distance between SNP A and SNP C is equal to the distance between SNP A and SNP B plus SNP B and SNP C). There are many ways of calculating the genetic distances between SNPs. Some software that could be used include LDhat[2], pyrho[3], FastEPRR[4], and LDJump[5]. ### LD Profile Using an LD (linkage disequilibrium) profile allows the user to adjust for variable recombination rates along the chromosome. An LD profile is a basic look-up table. It tells the user what the expected correlation between two SNPs is, given the genetic distance between them. Here is the example LD profile provided with the zalpha R package: ```{r} data(LDprofile) LDprofile ``` The LD profile contains data about the expected correlation between SNPs given the genetic distance between them. The columns in the example are: * __bin__ This is the lower bound of the bin. In this example, row 1 would include any SNPs greater than or equal to 0 but less than 0.0001 centimorgans apart. * __rsq__ The expected r^2^ value for pairs of SNPs whose genetic distance between them falls within the bin. * __sd__ The standard deviation of r^2^ for the bin. * __Beta_a__ The first shape of the Beta distribution fitted to this bin. * __Beta_b__ The second shape of the Beta distribution. If we know two SNPs are 0.00017 cM apart, this LD profile tells us that we expect the r^2^ value to be 0.093, with a standard deviation of 0.22, and that the expected distribution of r^2^ values for SNPs this far apart is Beta(0.27,2.03). The package contains a function for creating an LD profile. This is explained lower down this vignette. The vignette continues by using the example LDprofile dataset supplied. ## Zalpha_expected The expected $Z_{\alpha}$ value (denoted $Z_{\alpha}^{E[r^2]}$) can be calculated for a chromosome given an LD profile and the genetic distances between each SNP in the chromosome. Instead of calculating the r^2^ values between SNPs, the function uses the expected correlations. It does this by working out the genetic distance between each pair of SNPs and uses the r^2^ values given in the LD profile for SNPs that far apart. ```{r} Zalpha_expected(snps$bp_positions, 3000, snps$cM_distances, LDprofile$bin, LDprofile$rsq) ``` Note that this statistic does not use the SNP value data. Once $Z_{\alpha}^{E[r^2]}$ has been calculated, it can be combined with the $Z_{\alpha}$ results to adjust for recombination, for example by computing $Z_{\alpha}$/$Z_{\alpha}^{E[r^2]}$. Outliers in this new combined statistic could be potential selection candidates. Other functions that take into account variable recombination rates are Zalpha_rsq_over_expected, Zalpha_log_rsq_over_expected, Zalpha_Zscore, and Zalpha_BetaCDF. These statistics all use the actual r^2^ values from the data combined with the expected r^2^ values from the LD profile in various ways. Examples of these statistics are here: ```{r} Zalpha_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zalpha_log_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zalpha_Zscore(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq, LDprofile$sd) Zalpha_BetaCDF(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$Beta_a, LDprofile$Beta_b) ``` Note that not all the statistics need all the columns from the LD profile. ## Zbeta The Zbeta function works in the same way as the Zalpha function but evaluates correlations between pairs of SNPs where one is to the left of the target locus and the other is to the right. It is useful to use the $Z_{\beta}$ statistic in conjunction with the $Z_{\alpha}$ statistic, as they behave differently depending on how close to fixation the sweep is. For example, while a sweep is in progress both $Z_{\alpha}$ and $Z_{\beta}$ would be higher than other areas of the chromosome without a sweep present. However, when a sweep reaches near-fixation, $Z_{\beta}$ would decrease whereas $Z_{\alpha}$ would remain high. Combining $Z_{\alpha}$ and $Z_{\beta}$ into new statistics such as $Z_{\alpha}$/$Z_{\beta}$ is one way of analysing this. The Zbeta function requires the exact same inputs as the Zalpha function. Here is an example: ```{r} results<-Zbeta(snps$bp_positions,3000,as.matrix(snps[,3:12])) results plot(results$position,results$Zbeta) ``` Comparing this to the $Z_{\alpha}$ graph in the earlier example, we can see that the value of $Z_{\beta}$ decreases where $Z_{\alpha}$ increases. This could indicate that, if there is a sweep at this locus, it is near-fixation. There is an equivalent Zbeta function for each of the Zalpha variations. Here is an example for each of them: ```{r} Zbeta_expected(snps$bp_positions, 3000, snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zbeta_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zbeta_log_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zbeta_Zscore(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq, LDprofile$sd) Zbeta_BetaCDF(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$Beta_a, LDprofile$Beta_b) ``` ## Diversity statistics LR and L_plus_R These statistics show the diversity around the target locus. LR calculates the number of SNPs to the left of the target locus multiplied by the number of SNPs to the right. L_plus_R is the total number of pairs of SNPs on the left and the right of the target locus. The idea behind these statistics is that if the diversity is low, there might be a sweep in this region. Care should be taken when interpreting these statistics if diversity has been altered by filtering and, when using the Zalpha_all function below, the use of minRL and minRandL parameters. ## Zalpha_all __Zalpha_all is the recommended function for using this package.__ It will run all the statistics included in the package ($Z_{\alpha}$ and $Z_{\beta}$ variations), so the user does not have to run multiple functions to calculate all the statistics they want. The function will only calculate the statistics it has been given the appropriate inputs for, so it is flexible. For example, this code will only run Zalpha, Zbeta and the two diversity statistics LR and L_plus_R, as an LD profile was not supplied: ```{r} Zalpha_all(snps$bp_positions,3000,as.matrix(snps[,3:12])) ``` Supplying an LD profile and genetic distances for each SNP will result in more of the statistics being calculated. There are many ways that the resulting statistics can be combined to give new insights into the data, see Jacobs et al. (2016)[1]. ## Identifying regions under selection To find candidate regions for selection, first calculate the statistics across the chromosome, including any combined statistics that may be of interest. It is then suggested to find the maximum value for windows of around 200 Kb for each statistic (minimum values for the diversity statistics). Any regions that are outliers compared to the rest of the chromosome could be considered candidates and can be investigated further. ## create_LDprofile An LD profile is required to adjust for expected correlations. It is a basic look-up table that tells the user what the expected correlation is between a pair of SNPs, given the genetic distance between them. To create a simple LD profile, the user just needs two things: * __dist__ a vector of genetic distances * __x__ a matrix of SNP values The user also needs to tell the function what bin_size to use, and optionally if they want to calculate Beta distribution parameters (this requires the fitdistrplus package to be installed). The function considers all pairs of SNPs. It separates the pairs of SNPs into bins based on the genetic distance between them. The correlation between each pair of SNPs is calculated. In each bin, the average and the standard deviation of these correlations is calculated. If beta_params=TRUE, then a beta distribution will be fitted to the correlations in each bin too. We can use the snps dataset as an example of this: ```{r} create_LDprofile(snps$cM_distances,as.matrix(snps[,3:12]),bin_size = 0.001,beta_params = TRUE) ``` This code has created an LD profile with 6 columns. These are: * __bin__ This is the lower bound of the bin, e.g. row one shows information for genetic distances that are between 0 and 0.001 cM. * __rsq__ This is the expected r^2^ for genetic distances who fall in the given bin. For example, in row one the expected r^2^ value for SNPs which are 0-0.001 cM apart is 0.1023. * __sd__ This is the standard deviation for the r^2^ values. * __Beta_a__ This is the first shape parameter for the Beta distribution fitted to this bin * __Beta_b__ This is the second shape parameter for the Beta distribution fitted to this bin * __n__ This is the number of pairs of SNPs with a genetic distance falling within this bin, whose correlations were used to calculate the statistics. There is one more optional input parameter - max_dist - which sets the maximum distance SNPs can be apart for calculating for the LD profile. For real world data, Jacobs et al. (2016)[1] recommend using distances up to 2 cM assigned to bins of size 0.0001 cM. Without this parameter, the code will generate bins up to the maximum distance between pairs of SNPs, which is likely to be inefficient as most distances will not be used. max_dist should be big enough to cover the genetic distances between pairs of SNPs within the window size given when the $Z_{\alpha}$ statistics are run. Any pairs with genetic distances bigger than max_dist will be assigned the values in the maximum bin of the LD profile. Ideally, we would want to generate an LD profile based on genetic data without selection but exactly matching the other population parameters for our data. This could be done using simulated data (using software such as MSMS[6] or SLiM[7]). We could use another genetic dataset containing a similar population. Alternatively, we could generate an LD profile using the same dataset that we are analysing for selection. Care should be taken that bins are big enough to have a lot of data in so expected r^2^ values are not overly affected by outliers. Realistically, the user will not have just one chromosome of data for creating the LD profile, but will likely have a whole genome. So far, we have used a vector of genetic distances and a SNP value matrix in our example. However, with multiple chromosomes there will be a vector of genetic distances and a SNP value matrix for each chromosome, and it would be good to use all that information to create the LD profile. Therefore, the function has been written to accept multiple vectors of genetic distances and multiple SNP value matrices via lists. The __dist__ parameter will accept a vector or a list of vectors. The __x__ parameter will accept a matrix or a list of matrices. For example, if we use the snps dataset but this time pretend it is three different chromosomes. ```{r} ## Generate three chromosomes of data - cM distances and SNP values chrom1_cM_distances<-snps$cM_distances chrom1_snp_values<-as.matrix(snps[,3:12]) chrom2_cM_distances<-snps$cM_distances chrom2_snp_values<-as.matrix(snps[,3:12]) chrom3_cM_distances<-snps$cM_distances chrom3_snp_values<-as.matrix(snps[,3:12]) ## create a list of the cM distances cM_distances_list<-list(chrom1_cM_distances,chrom2_cM_distances,chrom3_cM_distances) ## create a list of SNP value matrices snp_values_list<-list(chrom1_snp_values,chrom2_snp_values,chrom3_snp_values) ## create the LD profile using the lists as the dist and x parameters create_LDprofile(cM_distances_list,snp_values_list,bin_size = 0.001,beta_params = TRUE) ``` Care should be taken that the chromosomes stay in the same order in each list. Congratulations! You should now be able to create your own LD profile and use the zalpha package. ## References [1] Jacobs, G.S., Sluckin, T.J., and Kivisild, T. *Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.* Genetics, 2016. **203**(4): p. 1807 [2] McVean, G. A. T., Myers, S. R., Hunt, S., Deloukas, P., Bentley, D. R., and Donnelly, P. *The Fine-Scale Structure of Recombination Rate Variation in the Human Genome.* Science, 2004. **304**(5670): 581-584. [3] Spence, J.P. and Song, Y.S. *Inference and analysis of population-specific fine-scale recombination maps across 26 diverse human populations.* Science Advances, 2019. **5**(10): eaaw9206. [4] Gao, F., Ming, C., Hu, W. J., and Li, H. P. *New Software for the Fast Estimation of Population Recombination Rates (FastEPRR) in the Genomic Era.* G3-Genes Genomes Genetics, 2016. **6**(6): 1563-1571. [5] Hermann, P., Heissl, A., Tiemann-Boege, I., and Futschik, A. *LDJump: Estimating variable recombination rates from population genetic data.* Molecular Ecology Resources, 2019. **19**(3): 623-638. [6] Ewing, G. and Hermisson, J. *MSMS: a coalescent simulation program including recombination, demographic structure and selection at a single locus.* Bioinformatics, 2010. **26**(16):2064-2065. [7] Haller, B.C. and Messer, P.W. *SLiM 3: Forward Genetic Simulations Beyond the Wright–Fisher Model.* Molecular Biology and Evolution, 2019. **36**(3):632-637.
/scratch/gouwar.j/cran-all/cranData/zalpha/inst/doc/zalpha.Rmd
## Code for creating the zalpha package sticker # Use the hexSticker package #install.packages("hexSticker") library(hexSticker) # Create the DNA graph png("man/figures/zalpha_image.png",res=100,bg="transparent",width=15,height=10,units = "cm") par(mar=c(1,1,1,1)) plot_x<-seq(0,15,0.01) plot_norm<-seq(-4,4,length.out=length(plot_x)) plot_line_1<-sin(plot_x*pi)+10*dnorm(plot_norm) plot_line_2<--sin(plot_x*pi)+10*dnorm(plot_norm) line_1_color<-"black" line_2_color<-"blue" line_vert_color<-"black" axis_color<-"black" plot(plot_x,plot_line_1,type="n",ylim=c(-1,6),lwd=6,xaxt="n",yaxt="n",ylab="",xlab="",frame.plot = FALSE) # Plot the vertical lines for(i in 0:14){ segments(plot_x[34+i*100], plot_line_1[34+i*100], y1 = plot_line_2[34+i*100], lwd=3, col=line_vert_color) segments(plot_x[66+i*100], plot_line_1[66+i*100], y1 = plot_line_2[66+i*100], lwd=3, col=line_vert_color) } # Plot each segment at a time so that the lines crossover properly lines(plot_x[1:51],plot_line_1[1:51],col=line_1_color,lwd=6) lines(plot_x[1:51],plot_line_2[1:51],col=line_2_color,lwd=6) for(i in 0:6){ lines(plot_x[c(52:151)+i*200],plot_line_2[c(52:151)+i*200],col=line_2_color,lwd=6) lines(plot_x[c(52:151)+i*200],plot_line_1[c(52:151)+i*200],col=line_1_color,lwd=6) } for(i in 0:6){ lines(plot_x[c(152:251)+i*200],plot_line_1[c(152:251)+i*200],col=line_1_color,lwd=6) lines(plot_x[c(152:251)+i*200],plot_line_2[c(152:251)+i*200],col=line_2_color,lwd=6) } lines(plot_x[1451:1501],plot_line_1[1451:1501],col=line_1_color,lwd=6) lines(plot_x[1451:1501],plot_line_2[1451:1501],col=line_2_color,lwd=6) # Add in the axes axis(side=1,lwd=2,col=axis_color) axis(side=2,at=seq(-1,3,1),lwd=2,col=axis_color) dev.off() # Create the sticker imgurl <- "man/figures/zalpha_image.png" sticker(imgurl, package="zalpha", p_size=10, p_color = "white", s_x=1.01, s_y=1.4, s_width=.9, p_y=0.6, h_fill="red",h_color = "blue",spotlight=TRUE,l_x=1,l_y=1, filename="man/figures/sticker.png")
/scratch/gouwar.j/cran-all/cranData/zalpha/man/figures/createSticker.R
--- title: "zalpha" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{zalpha} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r, echo = FALSE, message=FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(zalpha) ``` The zalpha package contains statistics for identifying areas of the genome that have undergone a selective sweep. The idea behind these statistics is to find areas of the genome that are highly correlated, as this can be a sign that a sweep has occurred recently in the vicinity. For more information on the statistics, please see the paper by Jacobs et al. (2016)[1] referenced below. ## A simple example Here we have a dataset containing five humans with a single pair of chromosomes each. The chromosome has 20 SNPs, at base pair 100, 200, 300, ..., 2000. -------- ------------ -------------------- Person 1 Chromosome A AGGAAGGGATACGGTTATAC Chromosome B CGGAACTGATCAGCTCAGGG Person 2 Chromosome A CGGTACTGTCACGGGCATGG Chromosome B ATGTAGGGTCCAGCTCTGAC Person 3 Chromosome A ATGTCGGCATCCAGGCAGAC Chromosome B ATGAACGCATCAACTTTGAG Person 4 Chromosome A CGGTCGGCTCCAGCTTTTGG Chromosome B CTGTCCGCTCCCGGGTTTGC Person 5 Chromosome A CGCAACGGACACGCGCATGC Chromosome B CTGACGTCACCCAGTTTGAG -------- ------------ -------------------- For this simple example all that is needed is: * The vector of SNP locations * The matrix of SNP values. This could be in ACGT format as above, or in 0 and 1 notation, or any other notation as long as SNPs are biallelic. Data extracted from a PLINK .tped file is in the ideal format for this analysis. Note the genetic data is phased. ### The snps dataset The snps dataset is a data frame that comes with the zalpha package. It is identical to the simple example above, but with 0s and 1s instead of ACGTs. Realistically, the dataset would be much bigger. It is highly recommended to only use SNPs with a minor allele frequency of over 5%, as it is hard to find correlations between rare alleles. Any missing values should be coded as NA. The snps dataset can be loaded using the code: ```{r} library(zalpha) data(snps) ## This is what the dataset looks like: snps ``` This data set contains information about each of the SNPs. The first column gives the physical location of the SNP along the chromosome, in whatever units is useful to the user (usually bp or Kb). In this example, the positions are in base pairs (bp). The next column is the genetic distance of the SNP from the start of the chromosome. Ignore this column for now. The final columns are the SNP alleles for each of the chromosomes in the population. Each SNP must be biallelic, but can contain any value, for example 0s and 1s, or ACGTs. The data can contain missing values, however it is recommended that the cut off is 10% missing at most. Missing values should be coded as NA. It is also recommended to use a minor allele frequency of 5% or higher. __Note:__ There is no requirement to put data into a data frame - all that is required is a vector of SNP positions and a matrix of SNP values. ## Zalpha To test for selection, the user can use the Zalpha function. This function assigns the first SNP in the dataset as the "target locus", calculates the $Z_{\alpha}$ value, then moves on to the next SNP making that the target locus, until every SNP in the dataset has been considered. It works by calculating correlations between alleles on each side of the target locus and averaging them. To do this, the function needs three inputs: * __pos__ A vector of the physical locations of each of the SNPs. For this example, we will use the first column from the snps dataset: snps$bp_positions. * __ws__ The window size. This is set to 3000 bp for this small example but for human analysis realistically a window size of around 200 Kb is appropriate. The window is centred on the target locus and considers SNPs that are within ws/2 to the left and ws/2 to the right of the target SNP. ws should always use the same units as pos i.e. if pos is in bp, ws should be in bp. * __x__ A matrix of the SNP alleles across each chromosome in the sample. The number of rows should be equal to the number of SNPs, and the columns are each of the chromosomes. For this example we extract the SNP values from the snps dataset found in columns 3 to 12, and convert into a matrix: as.matrix(snps[,3:12]). ```{r} results<-Zalpha(snps$bp_positions,3000,as.matrix(snps[,3:12])) results plot(results$position,results$Zalpha) ``` The output is in the form of a list and shows the positions of each of the SNPs and the $Z_{\alpha}$ value calculated for each SNP. The NAs are because there were not enough SNPs on one side of the target locus for an accurate $Z_{\alpha}$ value to be calculated. This is controlled by the parameters minRandL and minRL, which have defaults 4 and 25 respectively. minRandL specifies the minimum number of SNPs that must be to the left and right of the target SNP within the window for $Z_{\alpha}$ to be calculated. minRL is the product of these numbers. The graph shows a sharp increase in $Z_{\alpha}$ values in the centre of this region, which could indicate the presence of a sweep. The user should compare the values across the whole genome to find outliers. Say the user is only interested in the output of Zalpha for a particular region of the chromosome; this is achieved by setting the "X" parameter to the lower and upper bounds of the region. ```{r} Zalpha(snps$bp_positions,3000,as.matrix(snps[,3:12]),X=c(500,1000)) ``` That concludes the simple example of the Zalpha function! It is recommended that the user uses the Zalpha_all function, as this function will calculate all the statistics in the zalpha package in one go, rather than running all of the statistics separately. More information on the Zalpha_all function can be found further down this vignette. Read on for information on the other statistics in the package and what they require. ## Adjusting for expected correlations between SNPs There are many reasons apart from selection that pairs of SNPs could be more correlated than the rest of the genome, including regions of low recombination and genetic drift. This package allows the user to correct for expected correlations between SNPs. There are multiple functions included in this package that adjust for expected correlations, all of which have an example below. First however, the new inputs will be described. The extra inputs required are: * __dist__ A vector containing the genetic distances between SNPs * An LD profile Returning to the snps example dataset, we can now consider the second column of the dataset "cM_distances". ```{r} snps$cM_distances ``` Each value is the genetic distance of the SNP from the start of the chromosome. This could be in centimorgans (cM), linkage disequilibrium units (LDU) or any other way of measuring genetic distance, as long as it is additive (i.e. the distance between SNP A and SNP C is equal to the distance between SNP A and SNP B plus SNP B and SNP C). There are many ways of calculating the genetic distances between SNPs. Some software that could be used include LDhat[2], pyrho[3], FastEPRR[4], and LDJump[5]. ### LD Profile Using an LD (linkage disequilibrium) profile allows the user to adjust for variable recombination rates along the chromosome. An LD profile is a basic look-up table. It tells the user what the expected correlation between two SNPs is, given the genetic distance between them. Here is the example LD profile provided with the zalpha R package: ```{r} data(LDprofile) LDprofile ``` The LD profile contains data about the expected correlation between SNPs given the genetic distance between them. The columns in the example are: * __bin__ This is the lower bound of the bin. In this example, row 1 would include any SNPs greater than or equal to 0 but less than 0.0001 centimorgans apart. * __rsq__ The expected r^2^ value for pairs of SNPs whose genetic distance between them falls within the bin. * __sd__ The standard deviation of r^2^ for the bin. * __Beta_a__ The first shape of the Beta distribution fitted to this bin. * __Beta_b__ The second shape of the Beta distribution. If we know two SNPs are 0.00017 cM apart, this LD profile tells us that we expect the r^2^ value to be 0.093, with a standard deviation of 0.22, and that the expected distribution of r^2^ values for SNPs this far apart is Beta(0.27,2.03). The package contains a function for creating an LD profile. This is explained lower down this vignette. The vignette continues by using the example LDprofile dataset supplied. ## Zalpha_expected The expected $Z_{\alpha}$ value (denoted $Z_{\alpha}^{E[r^2]}$) can be calculated for a chromosome given an LD profile and the genetic distances between each SNP in the chromosome. Instead of calculating the r^2^ values between SNPs, the function uses the expected correlations. It does this by working out the genetic distance between each pair of SNPs and uses the r^2^ values given in the LD profile for SNPs that far apart. ```{r} Zalpha_expected(snps$bp_positions, 3000, snps$cM_distances, LDprofile$bin, LDprofile$rsq) ``` Note that this statistic does not use the SNP value data. Once $Z_{\alpha}^{E[r^2]}$ has been calculated, it can be combined with the $Z_{\alpha}$ results to adjust for recombination, for example by computing $Z_{\alpha}$/$Z_{\alpha}^{E[r^2]}$. Outliers in this new combined statistic could be potential selection candidates. Other functions that take into account variable recombination rates are Zalpha_rsq_over_expected, Zalpha_log_rsq_over_expected, Zalpha_Zscore, and Zalpha_BetaCDF. These statistics all use the actual r^2^ values from the data combined with the expected r^2^ values from the LD profile in various ways. Examples of these statistics are here: ```{r} Zalpha_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zalpha_log_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zalpha_Zscore(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq, LDprofile$sd) Zalpha_BetaCDF(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$Beta_a, LDprofile$Beta_b) ``` Note that not all the statistics need all the columns from the LD profile. ## Zbeta The Zbeta function works in the same way as the Zalpha function but evaluates correlations between pairs of SNPs where one is to the left of the target locus and the other is to the right. It is useful to use the $Z_{\beta}$ statistic in conjunction with the $Z_{\alpha}$ statistic, as they behave differently depending on how close to fixation the sweep is. For example, while a sweep is in progress both $Z_{\alpha}$ and $Z_{\beta}$ would be higher than other areas of the chromosome without a sweep present. However, when a sweep reaches near-fixation, $Z_{\beta}$ would decrease whereas $Z_{\alpha}$ would remain high. Combining $Z_{\alpha}$ and $Z_{\beta}$ into new statistics such as $Z_{\alpha}$/$Z_{\beta}$ is one way of analysing this. The Zbeta function requires the exact same inputs as the Zalpha function. Here is an example: ```{r} results<-Zbeta(snps$bp_positions,3000,as.matrix(snps[,3:12])) results plot(results$position,results$Zbeta) ``` Comparing this to the $Z_{\alpha}$ graph in the earlier example, we can see that the value of $Z_{\beta}$ decreases where $Z_{\alpha}$ increases. This could indicate that, if there is a sweep at this locus, it is near-fixation. There is an equivalent Zbeta function for each of the Zalpha variations. Here is an example for each of them: ```{r} Zbeta_expected(snps$bp_positions, 3000, snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zbeta_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zbeta_log_rsq_over_expected(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq) Zbeta_Zscore(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$rsq, LDprofile$sd) Zbeta_BetaCDF(snps$bp_positions, 3000, as.matrix(snps[,3:12]), snps$cM_distances, LDprofile$bin, LDprofile$Beta_a, LDprofile$Beta_b) ``` ## Diversity statistics LR and L_plus_R These statistics show the diversity around the target locus. LR calculates the number of SNPs to the left of the target locus multiplied by the number of SNPs to the right. L_plus_R is the total number of pairs of SNPs on the left and the right of the target locus. The idea behind these statistics is that if the diversity is low, there might be a sweep in this region. Care should be taken when interpreting these statistics if diversity has been altered by filtering and, when using the Zalpha_all function below, the use of minRL and minRandL parameters. ## Zalpha_all __Zalpha_all is the recommended function for using this package.__ It will run all the statistics included in the package ($Z_{\alpha}$ and $Z_{\beta}$ variations), so the user does not have to run multiple functions to calculate all the statistics they want. The function will only calculate the statistics it has been given the appropriate inputs for, so it is flexible. For example, this code will only run Zalpha, Zbeta and the two diversity statistics LR and L_plus_R, as an LD profile was not supplied: ```{r} Zalpha_all(snps$bp_positions,3000,as.matrix(snps[,3:12])) ``` Supplying an LD profile and genetic distances for each SNP will result in more of the statistics being calculated. There are many ways that the resulting statistics can be combined to give new insights into the data, see Jacobs et al. (2016)[1]. ## Identifying regions under selection To find candidate regions for selection, first calculate the statistics across the chromosome, including any combined statistics that may be of interest. It is then suggested to find the maximum value for windows of around 200 Kb for each statistic (minimum values for the diversity statistics). Any regions that are outliers compared to the rest of the chromosome could be considered candidates and can be investigated further. ## create_LDprofile An LD profile is required to adjust for expected correlations. It is a basic look-up table that tells the user what the expected correlation is between a pair of SNPs, given the genetic distance between them. To create a simple LD profile, the user just needs two things: * __dist__ a vector of genetic distances * __x__ a matrix of SNP values The user also needs to tell the function what bin_size to use, and optionally if they want to calculate Beta distribution parameters (this requires the fitdistrplus package to be installed). The function considers all pairs of SNPs. It separates the pairs of SNPs into bins based on the genetic distance between them. The correlation between each pair of SNPs is calculated. In each bin, the average and the standard deviation of these correlations is calculated. If beta_params=TRUE, then a beta distribution will be fitted to the correlations in each bin too. We can use the snps dataset as an example of this: ```{r} create_LDprofile(snps$cM_distances,as.matrix(snps[,3:12]),bin_size = 0.001,beta_params = TRUE) ``` This code has created an LD profile with 6 columns. These are: * __bin__ This is the lower bound of the bin, e.g. row one shows information for genetic distances that are between 0 and 0.001 cM. * __rsq__ This is the expected r^2^ for genetic distances who fall in the given bin. For example, in row one the expected r^2^ value for SNPs which are 0-0.001 cM apart is 0.1023. * __sd__ This is the standard deviation for the r^2^ values. * __Beta_a__ This is the first shape parameter for the Beta distribution fitted to this bin * __Beta_b__ This is the second shape parameter for the Beta distribution fitted to this bin * __n__ This is the number of pairs of SNPs with a genetic distance falling within this bin, whose correlations were used to calculate the statistics. There is one more optional input parameter - max_dist - which sets the maximum distance SNPs can be apart for calculating for the LD profile. For real world data, Jacobs et al. (2016)[1] recommend using distances up to 2 cM assigned to bins of size 0.0001 cM. Without this parameter, the code will generate bins up to the maximum distance between pairs of SNPs, which is likely to be inefficient as most distances will not be used. max_dist should be big enough to cover the genetic distances between pairs of SNPs within the window size given when the $Z_{\alpha}$ statistics are run. Any pairs with genetic distances bigger than max_dist will be assigned the values in the maximum bin of the LD profile. Ideally, we would want to generate an LD profile based on genetic data without selection but exactly matching the other population parameters for our data. This could be done using simulated data (using software such as MSMS[6] or SLiM[7]). We could use another genetic dataset containing a similar population. Alternatively, we could generate an LD profile using the same dataset that we are analysing for selection. Care should be taken that bins are big enough to have a lot of data in so expected r^2^ values are not overly affected by outliers. Realistically, the user will not have just one chromosome of data for creating the LD profile, but will likely have a whole genome. So far, we have used a vector of genetic distances and a SNP value matrix in our example. However, with multiple chromosomes there will be a vector of genetic distances and a SNP value matrix for each chromosome, and it would be good to use all that information to create the LD profile. Therefore, the function has been written to accept multiple vectors of genetic distances and multiple SNP value matrices via lists. The __dist__ parameter will accept a vector or a list of vectors. The __x__ parameter will accept a matrix or a list of matrices. For example, if we use the snps dataset but this time pretend it is three different chromosomes. ```{r} ## Generate three chromosomes of data - cM distances and SNP values chrom1_cM_distances<-snps$cM_distances chrom1_snp_values<-as.matrix(snps[,3:12]) chrom2_cM_distances<-snps$cM_distances chrom2_snp_values<-as.matrix(snps[,3:12]) chrom3_cM_distances<-snps$cM_distances chrom3_snp_values<-as.matrix(snps[,3:12]) ## create a list of the cM distances cM_distances_list<-list(chrom1_cM_distances,chrom2_cM_distances,chrom3_cM_distances) ## create a list of SNP value matrices snp_values_list<-list(chrom1_snp_values,chrom2_snp_values,chrom3_snp_values) ## create the LD profile using the lists as the dist and x parameters create_LDprofile(cM_distances_list,snp_values_list,bin_size = 0.001,beta_params = TRUE) ``` Care should be taken that the chromosomes stay in the same order in each list. Congratulations! You should now be able to create your own LD profile and use the zalpha package. ## References [1] Jacobs, G.S., Sluckin, T.J., and Kivisild, T. *Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.* Genetics, 2016. **203**(4): p. 1807 [2] McVean, G. A. T., Myers, S. R., Hunt, S., Deloukas, P., Bentley, D. R., and Donnelly, P. *The Fine-Scale Structure of Recombination Rate Variation in the Human Genome.* Science, 2004. **304**(5670): 581-584. [3] Spence, J.P. and Song, Y.S. *Inference and analysis of population-specific fine-scale recombination maps across 26 diverse human populations.* Science Advances, 2019. **5**(10): eaaw9206. [4] Gao, F., Ming, C., Hu, W. J., and Li, H. P. *New Software for the Fast Estimation of Population Recombination Rates (FastEPRR) in the Genomic Era.* G3-Genes Genomes Genetics, 2016. **6**(6): 1563-1571. [5] Hermann, P., Heissl, A., Tiemann-Boege, I., and Futschik, A. *LDJump: Estimating variable recombination rates from population genetic data.* Molecular Ecology Resources, 2019. **19**(3): 623-638. [6] Ewing, G. and Hermisson, J. *MSMS: a coalescent simulation program including recombination, demographic structure and selection at a single locus.* Bioinformatics, 2010. **26**(16):2064-2065. [7] Haller, B.C. and Messer, P.W. *SLiM 3: Forward Genetic Simulations Beyond the Wright–Fisher Model.* Molecular Biology and Evolution, 2019. **36**(3):632-637.
/scratch/gouwar.j/cran-all/cranData/zalpha/vignettes/zalpha.Rmd
#' 2020 Crosswalk of ZIP Code Tabulation Areas (ZCTAs) #' #' The primary data was obtained via the function get_zcta_crosswalk. There are 3 types of columns: ZCTA, #' state and county. Where data in practice sometimes appears as both character and numeric, columns for both are #' provided. #' #' @docType data #' @name zcta_crosswalk #' @usage data(zcta_crosswalk) NULL #' Metadata for Each "State" in zcta_crosswalk #' #' The complete dataset in ?zcta_crosswalk contains information on 56 state and #' state-equivalents. This dataframe contains the full name of each "state", plus #' its USPS abbreviation and FIPS code. #' #' @docType data #' @name state_names #' @usage data(state_names) NULL
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/R/data.R
# What a horrible world we live in, where code like this is necessary. # See https://community.rstudio.com/t/how-to-solve-no-visible-binding-for-global-variable-note/28887 globalVariables("zcta_crosswalk") #' Returns a ZCTA crosswalk as a tibble #' #' Returns the Census Bureau's 2020 ZCTA Country Relationship file #' as a tibble. This function is included so that users can see how the crosswalk #' was generated. It is not intended for use by end users. #' @seealso All 2020 ZIP Code Tabulation Area 5-Digit (ZCTA5) Relationship Files: https://rb.gy/h0l5cs #' @importFrom readr read_delim #' @importFrom dplyr rename select mutate filter #' @importFrom rlang .data #' @importFrom stringr str_sub #' @export #' @returns A tibble, where each row is a (zcta, county, state) combination. get_zcta_crosswalk = function() { url = "https://www2.census.gov/geo/docs/maps-data/data/rel2020/zcta520/tab20_zcta520_county20_natl.txt" zcta_crosswalk = read_delim(file = url, delim = "|") # Select and rename columns zcta_crosswalk = zcta_crosswalk |> rename(zcta = .data$GEOID_ZCTA5_20, county_fips = .data$GEOID_COUNTY_20, county_name = .data$NAMELSAD_COUNTY_20) |> select(.data$zcta, .data$county_fips, .data$county_name) # 1. The county FIPS is always 5 characters. And the first 2 characters always # indicate the state. See https://en.wikipedia.org/wiki/FIPS_county_code. # Breaking out the state allows for easier state selection later. # 2. This file has all counties, some of which do not have a ZCTA. Remove # those counties. zcta_crosswalk |> mutate(state_fips = str_sub(.data$county_fips, 1, 2)) |> filter(!is.na(.data$zcta)) } #' Return the ZCTAs in a vector of counties #' #' Given a vector of counties, return the ZIP Code Tabulation Areas (ZCTAs) #' in those counties. Note counties must be identified by FIPS code (character #' or numeric) in this function because county names are not unique between states. #' For example, 30 states have a county named "Washington". #' #' @param counties A vector of Counties. Must be by FIPS code (numeric or #' character). #' @examples #' # "06075" is San Francisco County, California #' get_zctas_by_county("06075") #' #' # 6075 (== as.numeric("06075")) works too #' get_zctas_by_county(6075) #' #' # Multiple counties at the same time are also OK #' get_zctas_by_county(c("06075", "36059")) #' #' @importFrom utils data #' @importFrom dplyr pull filter #' @export #' @returns A vector, where each element is a ZCTA in the requested county. get_zctas_by_county = function(counties) { data("zcta_crosswalk", package = "zctaCrosswalk", envir = environment()) if (all(counties %in% zcta_crosswalk$county_fips)) { col = "county_fips" } else if (all(counties %in% zcta_crosswalk$county_fips_numeric)) { col = "county_fips_numeric" } else { stop("User supplied bad data! Type 'get_zctas_by_county' to understand how this function works.") } message(paste("Using column", col)) zcta_crosswalk |> filter(!!sym(col) %in% counties) |> pull(.data$zcta) |> unique() } #' Return the ZCTAs in a vector of states #' #' Given a vector of states, return the ZIP Code Tabulation Areas (ZCTAs) #' in those states. #' #' @param states A vector of States. Can be FIPS Codes (either character or numeric), names or USPS abbreviations. #' #' @examples #' # Not case sensitive when using state names #' ca_zctas = get_zctas_by_state("CaLiFoRNia") #' length(ca_zctas) #' head(ca_zctas) #' #' # "06" is the FIPS code for California #' ca_zctas = get_zctas_by_state("06") #' length(ca_zctas) #' head(ca_zctas) #' #' # 6 is OK too - sometimes people use numbers for FIPS codes #' ca_zctas = get_zctas_by_state(6) #' length(ca_zctas) #' head(ca_zctas) #' #' # USPS state abbreviations are also OK #' ca_zctas = get_zctas_by_state("CA") #' length(ca_zctas) #' head(ca_zctas) #' #' # Multiple states at the same time are also OK #' ca_ny_zctas = get_zctas_by_state(c("CA", "NY")) #' length(ca_ny_zctas) #' head(ca_ny_zctas) #' #' @export #' @importFrom dplyr filter pull sym #' @returns A vector, where each element is a ZCTA in the requested state. get_zctas_by_state = function(states) { data("zcta_crosswalk", package = "zctaCrosswalk", envir = environment()) if (all(tolower(states) %in% zcta_crosswalk$state_name)) { col = "state_name" states = tolower(states) } else if (all(states %in% zcta_crosswalk$state_usps)) { col = "state_usps" } else if (all(states %in% zcta_crosswalk$state_fips)) { col = "state_fips" } else if (all(states %in% zcta_crosswalk$state_fips_numeric)) { col = "state_fips_numeric" } else { stop("User supplied bad data! Type 'get_zctas_by_state' to understand how this function works.") } message(paste("Using column", col)) zcta_crosswalk |> filter(!!sym(col) %in% states) |> pull(.data$zcta) |> unique() } #' Return metadata on a ZCTA #' #' Given a vector of ZIP Code Tabulation Areas (ZCTAs), return what #' state and county they are in. NOTE: A single ZCTA can span multiple #' states and counties. #' #' @param zctas A vector of ZCTAs (character or numeric) #' @examples #' get_zcta_metadata("90210") #' #' # Some ZCTAs span multiple counties #' get_zcta_metadata(39573) #' @export #' @importFrom dplyr filter #' @returns A tibble, where each row is a (zcta, county, state) combination. get_zcta_metadata = function(zctas) { data("zcta_crosswalk", package = "zctaCrosswalk", envir = environment()) stopifnot(all(zctas %in% zcta_crosswalk$zcta)) zcta_crosswalk |> filter(.data$zcta %in% zctas) }
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/R/zcta.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(zctaCrosswalk) ## ----------------------------------------------------------------------------- # Not case sensitive when using state names head( get_zctas_by_state("California") ) # USPS state abbreviations are also OK - but these *are* case sensitive head( get_zctas_by_state("CA") ) # Multiple states at the same time are also OK head( get_zctas_by_state(c("CA", "NY")) ) # Throws an error - you can't mix types in a single request # get_zctas_by_state(c("California", "NY")) ## ----------------------------------------------------------------------------- ca1 = get_zctas_by_state("CA") ca2 = get_zctas_by_state("06") ca3 = get_zctas_by_state(6) all(ca1 == ca2) all(ca2 == ca3) ## ----------------------------------------------------------------------------- # "06075" is San Francisco County, California head( get_zctas_by_county("06075") ) # 6075 (== as.numeric("06075")) works too head( get_zctas_by_county(6075) ) # Multiple counties at the same time are also OK head( get_zctas_by_county(c("06075", "36059")) ) ## ----------------------------------------------------------------------------- get_zcta_metadata("90210") # Some ZCTAs span multiple counties get_zcta_metadata(39573)
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/inst/doc/a01_introduction.R
--- title: "Introduction" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{1) Introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(zctaCrosswalk) ``` This package is designed to help answer common analytical questions that arise when working with US ZIP Codes. Note: the entity which maintains US ZIP Codes (the US Postal Service) does not release a map or crosswalk of that dataset. As a result, most analysts instead use [ZIP Code Tabulation Areas (ZCTAs)](https://www.census.gov/programs-surveys/geography/guidance/geo-areas/zctas.html) which are maintained by the US Census Bureau. Census also provides [Relationship Files](https://www.census.gov/geographies/reference-files/time-series/geo/relationship-files.2020.html#zcta) that maps ZCTAs to other geographies. This package provides the Census Bureau's "2020 ZCTA to County Relationship File" as a tibble, combines it with useful publicly available metadata (such as State names) and provides convenience functions for querying it. The main functions in this package are: * `?get_zctas_by_state` * `?get_zctas_by_county` * `?get_zcta_metadata` ## ?get_zctas_by_state `?get_zctas_by_state` takes a vector of states and returns the vector of ZCTAs in those states. Here are some examples: ```{r} # Not case sensitive when using state names head( get_zctas_by_state("California") ) # USPS state abbreviations are also OK - but these *are* case sensitive head( get_zctas_by_state("CA") ) # Multiple states at the same time are also OK head( get_zctas_by_state(c("CA", "NY")) ) # Throws an error - you can't mix types in a single request # get_zctas_by_state(c("California", "NY")) ``` A common problem when doing analytics with states is ambiguity around names. For example, most people write "Washington, DC". But this dataset uses "District of Columbia". The most common solution to this problem is to use [FIPS Codes](https://en.wikipedia.org/wiki/Federal_Information_Processing_Standard_state_code) when doing analytics with states. And so `?get_zctas_by_state` also supports FIPS codes. Note that technically FIPS codes are characters and have a leading zero (e.g. California is "06"). But in practice people often use numbers (e.g. 6 for California) as well. As a result, `?get_zctas_by_state` supports both: ```{r} ca1 = get_zctas_by_state("CA") ca2 = get_zctas_by_state("06") ca3 = get_zctas_by_state(6) all(ca1 == ca2) all(ca2 == ca3) ``` ## ?get_zctas_by_county `?get_zctas_by_county` works analogously to `?get_zctas_by_state`. The primary difference is that it only accepts FIPS codes. This is because [FIPS county codes](https://en.wikipedia.org/wiki/FIPS_county_code) are unique, but their names are not. (For example, 30 counties in this dataset are named "Washington County"!) If you need to find the FIPS code for a particular county, I recommend simply googling it (e.g. "FIPS code for San Francisco County California") or consulting [this](https://en.wikipedia.org/wiki/List_of_United_States_FIPS_codes_by_county) page. Note that the FIPS codes can be either character or numeric. ```{r} # "06075" is San Francisco County, California head( get_zctas_by_county("06075") ) # 6075 (== as.numeric("06075")) works too head( get_zctas_by_county(6075) ) # Multiple counties at the same time are also OK head( get_zctas_by_county(c("06075", "36059")) ) ``` ## ?get_zcta_metadata `?get_zcta_metadata` takes a vector of ZCTAs and returns all available metadata on them. The ZCTAs can be either character or numeric. ```{r} get_zcta_metadata("90210") # Some ZCTAs span multiple counties get_zcta_metadata(39573) ```
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/inst/doc/a01_introduction.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, warning = FALSE, message = FALSE---------------------------------- library(zctaCrosswalk) library(tidycensus) library(dplyr) ## ----------------------------------------------------------------------------- zcta_income = get_acs( geography = "zcta", variables = "B19013_001", year = 2021) head(zcta_income) ## ----------------------------------------------------------------------------- nrow(zcta_income) sf_zcta_income = zcta_income |> dplyr::filter(GEOID %in% get_zctas_by_county("06075")) nrow(sf_zcta_income) head(sf_zcta_income) ## ----eval = FALSE------------------------------------------------------------- # library(zctaCrosswalk) # library(tidycensus) # library(dplyr) # library(mapview) # # all_zctas = get_acs( # geography = "zcta", # variables = "B19013_001", # year = 2021, # geometry = TRUE) # # filtered_zctas = filter(all_zctas, GEOID %in% get_zctas_by_county(6075)) # # mapview(filtered_zctas, zcol = "estimate")
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/inst/doc/a02_workflow-tidycensus.R
--- title: "Workflow with tidycensus" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{2) Workflow with tidycensus} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, warning = FALSE, message = FALSE} library(zctaCrosswalk) library(tidycensus) library(dplyr) ``` `zctaCrosswalk` was designed to work well with the `tidycensus` package. `tidycensus` is currently the most popular way to access Census data in R. Here is an example of using it to get Median Household Income on all ZCTAs in the US: ```{r} zcta_income = get_acs( geography = "zcta", variables = "B19013_001", year = 2021) head(zcta_income) ``` Note that `?get_acs` returns data for all ZCTAs in the US. It does not provide an option to get data on ZCTAs by State or County. And the dataframe it returns does not provide enough metadata to allow you to do this subselection yourself. A primary motivation for creating the `zctaCrosswalk` package was to support this type of analysis. Note that `?get_acs` returns the ZCTA in a column called `GEOID`. We can combine this fact with `?dplyr::filter`, `?get_zctas_by_county` and `?get_zctas_by_state` to subset to any states or counties we choose. Here we filter `zcta_income` to ZCTAs in San Francisco County, California: ```{r} nrow(zcta_income) sf_zcta_income = zcta_income |> dplyr::filter(GEOID %in% get_zctas_by_county("06075")) nrow(sf_zcta_income) head(sf_zcta_income) ``` ## Mapping the Result A primary motivation in creating this workflow (and indeed, this package) was to create demographic maps at the ZCTA level for selected states and counties. If this interests you as well, I encourage you to copy the below code into R and view the output yourself. (Unfortunately, R package vignettes do not seem to handle map output from the `mapview` package well). This is a powerful and elegant pattern for visualizing ZCTA demographics in R: ```{r eval = FALSE} library(zctaCrosswalk) library(tidycensus) library(dplyr) library(mapview) all_zctas = get_acs( geography = "zcta", variables = "B19013_001", year = 2021, geometry = TRUE) filtered_zctas = filter(all_zctas, GEOID %in% get_zctas_by_county(6075)) mapview(filtered_zctas, zcol = "estimate") ```
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/inst/doc/a02_workflow-tidycensus.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, warning = FALSE, message = FALSE---------------------------------- library(zctaCrosswalk) library(dplyr) ## ----------------------------------------------------------------------------- data(zcta_crosswalk) print(zcta_crosswalk, n = 5) ## ----------------------------------------------------------------------------- data(state_names) print(state_names, n = 5)
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/inst/doc/a03_developer-notes.R
--- title: "Developer Notes" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{3) Developer Notes} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, warning = FALSE, message = FALSE} library(zctaCrosswalk) library(dplyr) ``` While creating this package I was acutely aware that ZCTAs change frequently. For example, back in 2016 I created a similar package called [choroplethrZip](https://github.com/arilamstein/choroplethrZip). That package is now out of date, because the underlying data it stores became out of date. I expect that something similar will eventually happen with this package. This vignette is written as a "note to my future self" in case I wind up needing to write a similar package again in the future. It is also intended to increase the number of people who understand how to create packages like this. ## ?zcta_crosswalk The core data structure in this package is `?zcta_crosswalk`: ```{r} data(zcta_crosswalk) print(zcta_crosswalk, n = 5) ``` Most of the effort in creating this package was spent creating this data structure. Let's see how it was created. ### ?get_zcta_crosswalk Start by looking at the contents of the function `?get_zcta_crosswalk`: ``` get_zcta_crosswalk = function() { url = "https://www2.census.gov/geo/docs/maps-data/data/rel2020/zcta520/tab20_zcta520_county20_natl.txt" zcta_crosswalk = read_delim(file = url, delim = "|") # Select and rename columns zcta_crosswalk = zcta_crosswalk |> rename(zcta = .data$GEOID_ZCTA5_20, county_fips = .data$GEOID_COUNTY_20, county_name = .data$NAMELSAD_COUNTY_20) |> select(.data$zcta, .data$county_fips, .data$county_name) # 1. The county FIPS is always 5 characters. And the first 2 characters always # indicate the state. See https://en.wikipedia.org/wiki/FIPS_county_code. # Breaking out the state allows for easier state selection later. # 2. This file has all counties, some of which do not have a ZCTA. Remove # those counties. zcta_crosswalk |> mutate(state_fips = str_sub(.data$county_fips, 1, 2)) |> filter(!is.na(.data$zcta)) } ``` The function reads and transforms the contents of a URL. At the time of this writing that is the URL for the Census Bureau's "2010 ZCTA to County Relationship File", a file which I mentioned earlier. This means that if Census publishes an updated dataset in the same format tomorrow you could just change the URL, rerun the code and get the updated data in R. (Note that I do not know when Census plans to update this dataset or whether they plan to publish it in the same format.) If you open the URL referenced in `?get_zcta_crosswalk` in a browser you will see rows like this: ``` 221704258470394|90210|ZCTA5 90210|27823432|153478|G6350|B5|S|275901063468976|06037|Los Angeles County|10513491099|1787501506|G4020|H1|A|27823432|153478 ``` This tells us that ZCTA 90210 is in Los Angeles County. It also tells us that Los Angles County has FIPS Code 06037. ### Adding in State Information Unfortunately, the file does not directly contain any state information. And since I wanted to run queries like "Get all ZCTAs in a given state", I needed to add that in. I started by splitting out the first two characters of each County FIPS Code into a new column called `state_fips`. This allows a user to search for ZCTAs in a state if they know the state's FIPS code. However, this does not help us if users want to select a state by it's name or Postal Code Abbreviation. To address this limitation I created a new dataframe called `state_names`, and used it to join against the results of `?get_zcta_crosswalk`: ```{r} data(state_names) print(state_names, n = 5) ``` One thing to keep in mind is that while there are technically only 50 states, "state" in this dataset really means "any top level administrative region". This dataset contains 56 states (the extra ones are: the District of Columbia, Puerto Rico, US Virigin Islands, American Samoa, Guam and the Northern Mariana Islands). I believe that it would be useful for R to have a standalone package that contains a data frame like this for all FIPS codes. I did not break `state_names` out into a separate package because even though it has 56 state-level entities, the full list is much [larger](https://www.census.gov/library/reference/code-lists/ansi.html). The code I used to generate `state_names` is in `inst/gen_state_states.R`. Note that while R has two built-in vectors that deal with state names (`state.abb` and `state.name`), they cannot help us here because: (1) they do not contain FIPS codes and (2) they only contain 50 states. ## Learning About ZCTAs If you would like to learn more about ZCTAs (including how they differ from ZIP Codes), I recommend two references: 1. My free course [Mapmaking in R with Choroplethr](https://ari-lamsteins-courses.thinkific.com/courses/mapmaking-in-r-with-choroplethr) has three sections dedicated to "ZIP Code Choropleths". 2. In 2017 I had the pleasure of meeting Jon Sperling, who is one of the creators of the ZCTA, at the Association of Public Data Users (APDU) conference. You can learn about that meeting, including a reference to one of his papers on the topic, [here](https://arilamstein.com/blog/2017/10/24/meeting-titans-open-data/). One of my recollections from that meeting is Jon explaining that ZIP Codes are designed to follow roads. This means that different sides of a single block can have different ZIP codes. Census geography, however, treats blocks as atomic. This means that all homes on a single block must have the same ZCTA. This difference in construction means that ZIPs and ZCTAs are unlikely to ever truly be identical. ## Closing Thoughts My primary concern with this dataset is that people will assume that it is a crosswalk for present-day ZIP Codes. As stated above, ZCTAs rarely (if ever) line up perfectly with ZIP Codes. Additionally, this dataset was published in 2020, and it is not clear how many changes have occurred to ZIP Codes in the interim. ## Funding I would like to thank my employer, [MarketBridge](https://market-bridge.com/), for supporting the development of this package. This package would not have been developed without their support.
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/inst/doc/a03_developer-notes.Rmd
library(zctaCrosswalk) library(tibble) library(dplyr) library(choroplethrMaps) # choroplethrMaps package has a state.regions df that has most, but not all, of what we need data(state.regions) state.regions = as_tibble(state.regions) nrow(state.regions) # 51 state.regions # Generate extra rows we need extra_states = rbind( data.frame(region = "puerto rico", abb = "PR", fips.numeric = 72L, fips.character="72"), data.frame(region = "u.s. virigin islands", abb = "VI", fips.numeric = 78L, fips.character="78"), data.frame(region = "american samoa", abb = "AS", fips.numeric = 60L, fips.character="60"), data.frame(region = "guam", abb = "GU", fips.numeric = 66L, fips.character="66"), data.frame(region = "northern mariana islands", abb = "MP", fips.numeric = 69L, fips.character="69") ) # Now merge and rename columns state_names = rbind(state.regions, extra_states) colnames(state_names) = c("full", "usps", "fips_numeric", "fips_character") save(state_names, file="data/state_names.rda")
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/inst/gen_state_names.R
--- title: "Introduction" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{1) Introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(zctaCrosswalk) ``` This package is designed to help answer common analytical questions that arise when working with US ZIP Codes. Note: the entity which maintains US ZIP Codes (the US Postal Service) does not release a map or crosswalk of that dataset. As a result, most analysts instead use [ZIP Code Tabulation Areas (ZCTAs)](https://www.census.gov/programs-surveys/geography/guidance/geo-areas/zctas.html) which are maintained by the US Census Bureau. Census also provides [Relationship Files](https://www.census.gov/geographies/reference-files/time-series/geo/relationship-files.2020.html#zcta) that maps ZCTAs to other geographies. This package provides the Census Bureau's "2020 ZCTA to County Relationship File" as a tibble, combines it with useful publicly available metadata (such as State names) and provides convenience functions for querying it. The main functions in this package are: * `?get_zctas_by_state` * `?get_zctas_by_county` * `?get_zcta_metadata` ## ?get_zctas_by_state `?get_zctas_by_state` takes a vector of states and returns the vector of ZCTAs in those states. Here are some examples: ```{r} # Not case sensitive when using state names head( get_zctas_by_state("California") ) # USPS state abbreviations are also OK - but these *are* case sensitive head( get_zctas_by_state("CA") ) # Multiple states at the same time are also OK head( get_zctas_by_state(c("CA", "NY")) ) # Throws an error - you can't mix types in a single request # get_zctas_by_state(c("California", "NY")) ``` A common problem when doing analytics with states is ambiguity around names. For example, most people write "Washington, DC". But this dataset uses "District of Columbia". The most common solution to this problem is to use [FIPS Codes](https://en.wikipedia.org/wiki/Federal_Information_Processing_Standard_state_code) when doing analytics with states. And so `?get_zctas_by_state` also supports FIPS codes. Note that technically FIPS codes are characters and have a leading zero (e.g. California is "06"). But in practice people often use numbers (e.g. 6 for California) as well. As a result, `?get_zctas_by_state` supports both: ```{r} ca1 = get_zctas_by_state("CA") ca2 = get_zctas_by_state("06") ca3 = get_zctas_by_state(6) all(ca1 == ca2) all(ca2 == ca3) ``` ## ?get_zctas_by_county `?get_zctas_by_county` works analogously to `?get_zctas_by_state`. The primary difference is that it only accepts FIPS codes. This is because [FIPS county codes](https://en.wikipedia.org/wiki/FIPS_county_code) are unique, but their names are not. (For example, 30 counties in this dataset are named "Washington County"!) If you need to find the FIPS code for a particular county, I recommend simply googling it (e.g. "FIPS code for San Francisco County California") or consulting [this](https://en.wikipedia.org/wiki/List_of_United_States_FIPS_codes_by_county) page. Note that the FIPS codes can be either character or numeric. ```{r} # "06075" is San Francisco County, California head( get_zctas_by_county("06075") ) # 6075 (== as.numeric("06075")) works too head( get_zctas_by_county(6075) ) # Multiple counties at the same time are also OK head( get_zctas_by_county(c("06075", "36059")) ) ``` ## ?get_zcta_metadata `?get_zcta_metadata` takes a vector of ZCTAs and returns all available metadata on them. The ZCTAs can be either character or numeric. ```{r} get_zcta_metadata("90210") # Some ZCTAs span multiple counties get_zcta_metadata(39573) ```
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/vignettes/a01_introduction.Rmd
--- title: "Workflow with tidycensus" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{2) Workflow with tidycensus} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, warning = FALSE, message = FALSE} library(zctaCrosswalk) library(tidycensus) library(dplyr) ``` `zctaCrosswalk` was designed to work well with the `tidycensus` package. `tidycensus` is currently the most popular way to access Census data in R. Here is an example of using it to get Median Household Income on all ZCTAs in the US: ```{r} zcta_income = get_acs( geography = "zcta", variables = "B19013_001", year = 2021) head(zcta_income) ``` Note that `?get_acs` returns data for all ZCTAs in the US. It does not provide an option to get data on ZCTAs by State or County. And the dataframe it returns does not provide enough metadata to allow you to do this subselection yourself. A primary motivation for creating the `zctaCrosswalk` package was to support this type of analysis. Note that `?get_acs` returns the ZCTA in a column called `GEOID`. We can combine this fact with `?dplyr::filter`, `?get_zctas_by_county` and `?get_zctas_by_state` to subset to any states or counties we choose. Here we filter `zcta_income` to ZCTAs in San Francisco County, California: ```{r} nrow(zcta_income) sf_zcta_income = zcta_income |> dplyr::filter(GEOID %in% get_zctas_by_county("06075")) nrow(sf_zcta_income) head(sf_zcta_income) ``` ## Mapping the Result A primary motivation in creating this workflow (and indeed, this package) was to create demographic maps at the ZCTA level for selected states and counties. If this interests you as well, I encourage you to copy the below code into R and view the output yourself. (Unfortunately, R package vignettes do not seem to handle map output from the `mapview` package well). This is a powerful and elegant pattern for visualizing ZCTA demographics in R: ```{r eval = FALSE} library(zctaCrosswalk) library(tidycensus) library(dplyr) library(mapview) all_zctas = get_acs( geography = "zcta", variables = "B19013_001", year = 2021, geometry = TRUE) filtered_zctas = filter(all_zctas, GEOID %in% get_zctas_by_county(6075)) mapview(filtered_zctas, zcol = "estimate") ```
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/vignettes/a02_workflow-tidycensus.Rmd
--- title: "Developer Notes" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{3) Developer Notes} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, warning = FALSE, message = FALSE} library(zctaCrosswalk) library(dplyr) ``` While creating this package I was acutely aware that ZCTAs change frequently. For example, back in 2016 I created a similar package called [choroplethrZip](https://github.com/arilamstein/choroplethrZip). That package is now out of date, because the underlying data it stores became out of date. I expect that something similar will eventually happen with this package. This vignette is written as a "note to my future self" in case I wind up needing to write a similar package again in the future. It is also intended to increase the number of people who understand how to create packages like this. ## ?zcta_crosswalk The core data structure in this package is `?zcta_crosswalk`: ```{r} data(zcta_crosswalk) print(zcta_crosswalk, n = 5) ``` Most of the effort in creating this package was spent creating this data structure. Let's see how it was created. ### ?get_zcta_crosswalk Start by looking at the contents of the function `?get_zcta_crosswalk`: ``` get_zcta_crosswalk = function() { url = "https://www2.census.gov/geo/docs/maps-data/data/rel2020/zcta520/tab20_zcta520_county20_natl.txt" zcta_crosswalk = read_delim(file = url, delim = "|") # Select and rename columns zcta_crosswalk = zcta_crosswalk |> rename(zcta = .data$GEOID_ZCTA5_20, county_fips = .data$GEOID_COUNTY_20, county_name = .data$NAMELSAD_COUNTY_20) |> select(.data$zcta, .data$county_fips, .data$county_name) # 1. The county FIPS is always 5 characters. And the first 2 characters always # indicate the state. See https://en.wikipedia.org/wiki/FIPS_county_code. # Breaking out the state allows for easier state selection later. # 2. This file has all counties, some of which do not have a ZCTA. Remove # those counties. zcta_crosswalk |> mutate(state_fips = str_sub(.data$county_fips, 1, 2)) |> filter(!is.na(.data$zcta)) } ``` The function reads and transforms the contents of a URL. At the time of this writing that is the URL for the Census Bureau's "2010 ZCTA to County Relationship File", a file which I mentioned earlier. This means that if Census publishes an updated dataset in the same format tomorrow you could just change the URL, rerun the code and get the updated data in R. (Note that I do not know when Census plans to update this dataset or whether they plan to publish it in the same format.) If you open the URL referenced in `?get_zcta_crosswalk` in a browser you will see rows like this: ``` 221704258470394|90210|ZCTA5 90210|27823432|153478|G6350|B5|S|275901063468976|06037|Los Angeles County|10513491099|1787501506|G4020|H1|A|27823432|153478 ``` This tells us that ZCTA 90210 is in Los Angeles County. It also tells us that Los Angles County has FIPS Code 06037. ### Adding in State Information Unfortunately, the file does not directly contain any state information. And since I wanted to run queries like "Get all ZCTAs in a given state", I needed to add that in. I started by splitting out the first two characters of each County FIPS Code into a new column called `state_fips`. This allows a user to search for ZCTAs in a state if they know the state's FIPS code. However, this does not help us if users want to select a state by it's name or Postal Code Abbreviation. To address this limitation I created a new dataframe called `state_names`, and used it to join against the results of `?get_zcta_crosswalk`: ```{r} data(state_names) print(state_names, n = 5) ``` One thing to keep in mind is that while there are technically only 50 states, "state" in this dataset really means "any top level administrative region". This dataset contains 56 states (the extra ones are: the District of Columbia, Puerto Rico, US Virigin Islands, American Samoa, Guam and the Northern Mariana Islands). I believe that it would be useful for R to have a standalone package that contains a data frame like this for all FIPS codes. I did not break `state_names` out into a separate package because even though it has 56 state-level entities, the full list is much [larger](https://www.census.gov/library/reference/code-lists/ansi.html). The code I used to generate `state_names` is in `inst/gen_state_states.R`. Note that while R has two built-in vectors that deal with state names (`state.abb` and `state.name`), they cannot help us here because: (1) they do not contain FIPS codes and (2) they only contain 50 states. ## Learning About ZCTAs If you would like to learn more about ZCTAs (including how they differ from ZIP Codes), I recommend two references: 1. My free course [Mapmaking in R with Choroplethr](https://ari-lamsteins-courses.thinkific.com/courses/mapmaking-in-r-with-choroplethr) has three sections dedicated to "ZIP Code Choropleths". 2. In 2017 I had the pleasure of meeting Jon Sperling, who is one of the creators of the ZCTA, at the Association of Public Data Users (APDU) conference. You can learn about that meeting, including a reference to one of his papers on the topic, [here](https://arilamstein.com/blog/2017/10/24/meeting-titans-open-data/). One of my recollections from that meeting is Jon explaining that ZIP Codes are designed to follow roads. This means that different sides of a single block can have different ZIP codes. Census geography, however, treats blocks as atomic. This means that all homes on a single block must have the same ZCTA. This difference in construction means that ZIPs and ZCTAs are unlikely to ever truly be identical. ## Closing Thoughts My primary concern with this dataset is that people will assume that it is a crosswalk for present-day ZIP Codes. As stated above, ZCTAs rarely (if ever) line up perfectly with ZIP Codes. Additionally, this dataset was published in 2020, and it is not clear how many changes have occurred to ZIP Codes in the interim. ## Funding I would like to thank my employer, [MarketBridge](https://market-bridge.com/), for supporting the development of this package. This package would not have been developed without their support.
/scratch/gouwar.j/cran-all/cranData/zctaCrosswalk/vignettes/a03_developer-notes.Rmd
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 .zdist_lpdf <- function(x, mu, sigma, a, b) { .Call(`_zcurve_zdist_lpdf`, x, mu, sigma, a, b) } .tdist_lpdf <- function(x, mu, df, a, b) { .Call(`_zcurve_tdist_lpdf`, x, mu, df, a, b) } .zdist_pdf <- function(x, mu, sigma, a, b) { .Call(`_zcurve_zdist_pdf`, x, mu, sigma, a, b) } .zdist_cens_lpdf <- function(lb, ub, mu, sigma, a, b) { .Call(`_zcurve_zdist_cens_lpdf`, lb, ub, mu, sigma, a, b) } .tdist_pdf <- function(x, mu, df, a, b) { .Call(`_zcurve_tdist_pdf`, x, mu, df, a, b) } .dirichlet_rng <- function(alpha) { .Call(`_zcurve_dirichlet_rng`, alpha) } .zcurve_EM_fit_RCpp <- function(x, type, mu, sigma, theta, a, b, sig_level, max_iter, criterion) { .Call(`_zcurve_zcurve_EM_fit_RCpp`, x, type, mu, sigma, theta, a, b, sig_level, max_iter, criterion) } .zcurve_EM_fit_fast_RCpp <- function(x, mu, sigma, theta, a, b, sig_level, max_iter, criterion) { .Call(`_zcurve_zcurve_EM_fit_fast_RCpp`, x, mu, sigma, theta, a, b, sig_level, max_iter, criterion) } .zcurve_EMc_fit_fast_RCpp <- function(x, lb, ub, mu, sigma, theta, a, b, sig_level, max_iter, criterion) { .Call(`_zcurve_zcurve_EMc_fit_fast_RCpp`, x, lb, ub, mu, sigma, theta, a, b, sig_level, max_iter, criterion) } .zcurve_EMc_fit_fast_w_RCpp <- function(x, x_w, lb, ub, b_w, mu, sigma, theta, a, b, sig_level, max_iter, criterion) { .Call(`_zcurve_zcurve_EMc_fit_fast_w_RCpp`, x, x_w, lb, ub, b_w, mu, sigma, theta, a, b, sig_level, max_iter, criterion) } .zcurve_EM_start_RCpp <- function(x, type, K, mu, sigma, mu_alpha, mu_max, theta_alpha, a, b, sig_level, fit_reps, max_iter, criterion) { .Call(`_zcurve_zcurve_EM_start_RCpp`, x, type, K, mu, sigma, mu_alpha, mu_max, theta_alpha, a, b, sig_level, fit_reps, max_iter, criterion) } .zcurve_EM_boot_RCpp <- function(x, type, mu, sigma, theta, a, b, sig_level, bootstrap, max_iter, criterion) { .Call(`_zcurve_zcurve_EM_boot_RCpp`, x, type, mu, sigma, theta, a, b, sig_level, bootstrap, max_iter, criterion) } .zcurve_EM_start_fast_RCpp <- function(x, K, mu, sigma, mu_alpha, mu_max, theta_alpha, a, b, sig_level, fit_reps, max_iter, criterion) { .Call(`_zcurve_zcurve_EM_start_fast_RCpp`, x, K, mu, sigma, mu_alpha, mu_max, theta_alpha, a, b, sig_level, fit_reps, max_iter, criterion) } .zcurve_EM_boot_fast_RCpp <- function(x, mu, sigma, theta, a, b, sig_level, bootstrap, max_iter, criterion) { .Call(`_zcurve_zcurve_EM_boot_fast_RCpp`, x, mu, sigma, theta, a, b, sig_level, bootstrap, max_iter, criterion) } .zcurve_EMc_start_fast_RCpp <- function(x, lb, ub, K, mu, sigma, mu_alpha, mu_max, theta_alpha, a, b, sig_level, fit_reps, max_iter, criterion) { .Call(`_zcurve_zcurve_EMc_start_fast_RCpp`, x, lb, ub, K, mu, sigma, mu_alpha, mu_max, theta_alpha, a, b, sig_level, fit_reps, max_iter, criterion) } .zcurve_EMc_boot_fast_RCpp <- function(x, lb, ub, indx, mu, sigma, theta, a, b, sig_level, bootstrap, max_iter, criterion) { .Call(`_zcurve_zcurve_EMc_boot_fast_RCpp`, x, lb, ub, indx, mu, sigma, theta, a, b, sig_level, bootstrap, max_iter, criterion) } .zcurve_EMc_boot_fast_w_RCpp <- function(x, x_w, lb, ub, b_w, indx, mu, sigma, theta, a, b, sig_level, bootstrap, max_iter, criterion) { .Call(`_zcurve_zcurve_EMc_boot_fast_w_RCpp`, x, x_w, lb, ub, b_w, indx, mu, sigma, theta, a, b, sig_level, bootstrap, max_iter, criterion) }
/scratch/gouwar.j/cran-all/cranData/zcurve/R/RcppExports.R
#' @title Prepare data for z-curve #' #' @description \code{zcurve_data} is used to prepare data for the #' [zcurve()] function. The function transform strings containing #' reported test statistics \code{"z", "t", "f", "chi", "p"} into two-sided #' p-values. Test statistics reported as inequalities are as considered #' to be censored as well as test statistics reported with low accuracy #' (i.e., rounded to too few decimals). See details for more information. #' #' @param data a vector strings containing the test statistics. #' @param id a vector identifying observations from the same cluster. #' @param rounded an optional argument specifying whether de-rounding should be applied. #' Defaults to \code{FALSE} to treat all input as exact values or a numeric #' vector with values specifying precision of the input. The other option, #' \code{FALSE}, automatically extracts the number of decimals from input #' and treats the input as censored if it does not surpass the \code{stat_precise} and #' the \code{p_precise} thresholds. #' @param stat_precise an integer specifying the numerical precision of #' \code{"z", "t", "f"} statistics treated as exact values. #' @param p_precise an integer specifying the numerical precision of #' p-values treated as exact values. #' #' @details By default, the function extract the type of test statistic: #' \enumerate{ #' \item \code{"F(df1, df2)=x"} for F-statistic with df1 and df2 degrees of freedom, #' \item \code{"chi(df)=x"} for chi-square statistic with df degrees of freedom, #' \item \code{"t(df)=x"} for t-statistic with df degrees of freedom, #' \item \code{"z=x"} for z-statistic, #' \item \code{"p=x"} for p-value. #' } #' The input is not case sensitive and automatically removes empty spaces. Furthermore, #' inequalities (\code{"<"} and \code{">"}) can be used to denote censoring. I.e., that #' the p-value is lower than \code{"x"} or that the test statistic is larger than \code{"x"} #' respectively. The automatic de-rounding procedure (if \code{rounded = TRUE}) treats #' p-values with less decimal places than specified in \code{p_precise} or test statistics #' with less decimal places than specified in \code{stat_precise} as censored on an interval #' that could result in a given rounded value. I.e., a \code{"p = 0.03"} input would be #' de-rounded as a p-value lower than 0.035 but larger than 0.025. #' #' #' @return An object of type \code{"zcurve_data"}. #' @export zcurve_data #' #' @examples #' # Specify a character vector containing the test statistics #' data <- c("z = 2.1", "t(34) = 2.21", "p < 0.03", "F(2,23) > 10", "p = 0.003") #' #' # Obtain the z-curve data object #' data <- zcurve_data(data) #' #' # inspect the resulting object #' data #' @seealso [zcurve()], [print.zcurve_data()], [head.zcurve_data()] zcurve_data <- function(data, id = NULL, rounded = TRUE, stat_precise = 2, p_precise = 3){ if(!is.character(data)){ stop("'data' must be a character vector") } if(is.null(id)){ id <- 1:length(data) }else if(is.vector(id) && length(data) == length(id)){ id <- as.numeric(as.factor(as.character(id))) }else{ stop("'id' must be a vector of the same length as the data") } data <- tolower(data) data <- gsub(" ", "", data) # deal with chi^2 data <- gsub("chi2", "c", data) data <- gsub("chi", "c", data) # extract the values stat_type <- substr(data, 1, 1) stat_val <- substr(data, regexpr("[=]|[<]|[>]", data) + 1, nchar(data)) stat_df1 <- ifelse(stat_type %in% c("t", "f", "c"), substr(data, regexpr("\\(", data) + 1, regexpr("[,]|[\\)]", data) - 1), NA) stat_df2 <- ifelse(stat_type == "f", substr(data, regexpr(",", data) + 1, regexpr("[\\)]", data) - 1), NA) censored <- grepl("<", data) | grepl(">", data) digits <- ifelse(regexpr("\\.", data) == -1, 0, nchar(data) - regexpr("\\.", data)) # check the input if(any(!stat_type %in% c("t", "z", "p", "f", "c"))) stop(paste0("Unknown test statistic: ", paste0("'", unique(stat_type[!stat_type %in% c("t", "z", "p", "f", "c")]),"'", collapse = ", "), ".")) # check that all matches are numeric stat_val <- tryCatch( as.numeric(stat_val), warning = function(w) stop(paste0("The following input could not be decoded: ", paste0("'", data[which(is.na(suppressWarnings(as.numeric(stat_val))))], "'", collapse = ", "), "."), call. = FALSE) ) stat_df1 <- tryCatch( as.numeric(stat_df1), warning = function(w) stop(paste0("The following input could not be decoded: ", paste0("'", data[which(is.na(suppressWarnings(as.numeric(stat_df1))))], "'", collapse = ", "), "."), call. = FALSE) ) stat_df2 <- tryCatch( as.numeric(stat_df2), warning = function(w) stop(paste0("The following input could not be decoded: ", paste0("'", data[which(is.na(suppressWarnings(as.numeric(stat_df2))))], "'", collapse = ", "), "."), call. = FALSE) ) # set rounding (0 = un-rounded due to automatic conversion) if(length(rounded) == 1 && !rounded){ # deal with the values as precise values rounded <- rep(-1, length(data)) }else if(length(rounded) == 1 && rounded){ # specify automatic rounding rounded <- rep(-1, length(data)) rounded[stat_type == "p" & digits < p_precise] <- digits[stat_type == "p" & digits < p_precise] rounded[stat_type != "p" & digits < stat_precise] <- digits[stat_type != "p" & digits < stat_precise] }else{ # use user specify rounding if(length(rounded) != length(data)) stop("The rounding indicator does not match the lenght of data input.") if(!is.numeric(rounded)) stop("The rounding indicator is not numeric.") if(any(rounded < 0)) stop("The rounding indicator must be non-negative.") } # prepare empty containers p_vals <- rep(NA, length(data)) p_vals.lb <- rep(NA, length(data)) p_vals.ub <- rep(NA, length(data)) # compute and allocate the p-values accordingly for(i in seq_along(data)){ if(rounded[i] == -1 && !censored[i]){ # precise non-censored values p_vals[i] <- tryCatch( switch( stat_type[i], "f" = stats::pf(stat_val[i], df1 = stat_df1[i], df2 = stat_df2[i], lower.tail = FALSE), "c" = stats::pchisq(stat_val[i], df = stat_df1[i], lower.tail = FALSE), "t" = stats::pt(abs(stat_val[i]), df = stat_df1[i], lower.tail = FALSE) * 2, "z" = stats::pnorm(abs(stat_val[i]), lower.tail = FALSE) * 2, "p" = stat_val[i] ), warning = function(w) stop(paste0("The following input could not be decoded: '", data[i], "'.")) ) }else if(rounded[i] == -1 && censored[i]){ # precise censored values p_vals.ub[i] <- tryCatch( switch( stat_type[i], "f" = stats::pf(stat_val[i], df1 = stat_df1[i], df2 = stat_df2[i], lower.tail = FALSE), "c" = stats::pchisq(stat_val[i], df = stat_df1[i], lower.tail = FALSE), "t" = stats::pt(abs(stat_val[i]), df = stat_df1[i], lower.tail = FALSE) * 2, "z" = stats::pnorm(abs(stat_val[i]), lower.tail = FALSE) * 2, "p" = stat_val[i] ), warning = function(w) stop(paste0("The following input could not be decoded: '", data[i], "'.")) ) p_vals.lb[i] <- 0 }else if(rounded[i] != -1 && !censored[i]){ # rounded non-censored values temp_stat_val.lb <- abs(stat_val[i]) - 0.5 * 10^-digits[i] temp_stat_val.ub <- abs(stat_val[i]) + 0.5 * 10^-digits[i] temp_stat_val.lb <- ifelse(temp_stat_val.lb < 0, 0, temp_stat_val.lb) p_vals.ub[i] <- tryCatch( switch( stat_type[i], "f" = stats::pf(temp_stat_val.lb , df1 = stat_df1[i], df2 = stat_df2[i], lower.tail = FALSE), "c" = stats::pchisq(temp_stat_val.lb, df = stat_df1[i], lower.tail = FALSE), "t" = stats::pt(temp_stat_val.lb, df = stat_df1[i], lower.tail = FALSE) * 2, "z" = stats::pnorm(temp_stat_val.lb, lower.tail = FALSE) * 2, "p" = stat_val[i] + 0.5 * 10^-digits[i] ), warning = function(w) stop(paste0("The following input could not be decoded: '", data[i], "'.")) ) p_vals.lb[i] <- tryCatch( switch( stat_type[i], "f" = stats::pf(temp_stat_val.ub, df1 = stat_df1[i], df2 = stat_df2[i], lower.tail = FALSE), "c" = stats::pchisq(temp_stat_val.ub, df = stat_df1[i], lower.tail = FALSE), "t" = stats::pt(temp_stat_val.ub, df = stat_df1[i], lower.tail = FALSE) * 2, "z" = stats::pnorm(temp_stat_val.ub, lower.tail = FALSE) * 2, "p" = stat_val[i] - 0.5 * 10^-digits[i] ), warning = function(w) stop(paste0("The following input could not be decoded: '", data[i], "'.")) ) }else if(rounded[i] != -1 && censored[i]){ # rounded censored values temp_stat_val.ub <- abs(stat_val[i]) + 0.5 * 10^-digits[i] p_vals.ub[i] <- tryCatch( switch( stat_type[i], "f" = stats::pf(temp_stat_val.lb , df1 = stat_df1[i], df2 = stat_df2[i], lower.tail = FALSE), "c" = stats::pchisq(temp_stat_val.lb, df = stat_df1[i], lower.tail = FALSE), "t" = stats::pt(temp_stat_val.lb, df = stat_df1[i], lower.tail = FALSE) * 2, "z" = stats::pnorm(temp_stat_val.lb, lower.tail = FALSE) * 2, "p" = stat_val[i] + 0.5 * 10^-digits[i] ), warning = function(w) stop(paste0("The following input could not be decoded: '", data[i], "'.")) ) p_vals.lb[i] <- 0 } } output <- list( precise = data.frame( "input" = data[!is.na(p_vals)], "p" = p_vals[!is.na(p_vals)], "id" = id[!is.na(p_vals)] ), censored = data.frame( "input" = data[!is.na(p_vals.lb)], "p.lb" = p_vals.lb[!is.na(p_vals.lb)], "p.ub" = p_vals.ub[!is.na(p_vals.ub)], "id" = id[!is.na(p_vals.lb)] ) ) class(output) <- "zcurve_data" return(output) } ### methods #' Prints a z-curve data object #' @param x z-curve data object #' @param ... Additional arguments #' @export print.zcurve_data #' @rawNamespace S3method(print, zcurve_data) #' @seealso [zcurve_data()] print.zcurve_data <- function(x, ...){ cat(paste0("Object of class z-curve data with ", nrow(x$precise), " precise and ", nrow(x$censored), " censored p-values.\n\n")) cat("Precise p-values:\n") print(x$precise, ...) cat("\n") cat("Censored p-values:\n") print(x$censored, ...) } #' Prints first few rows of a z-curve data object #' @param x z-curve data object #' @param ... Additional arguments #' @export head.zcurve_data #' @rawNamespace S3method(head, zcurve_data) #' @seealso [zcurve_data()] #' @importFrom utils head head.zcurve_data <- function(x, ...){ cat(paste0("Object of class z-curve data with ", nrow(x$precise), " precise and ", nrow(x$censored), " censored p-values.\n\n")) cat("Precise p-values:\n") print(head(x$precise, ...)) cat("\n") cat("Censored p-values:\n") print(head(x$censored, ...)) }
/scratch/gouwar.j/cran-all/cranData/zcurve/R/data-preparation.R
#' @title Fit a z-curve #' #' @description \code{zcurve} is used to fit z-curve models. The function #' takes input of z-statistics or two-sided p-values and returns object of #' class \code{"zcurve"} that can be further interrogated by summary and plot #' function. It default to EM model, but different version of z-curves can #' be specified using the \code{method} and \code{control} arguments. See #' 'Examples' and 'Details' for more information. #' #' @param z a vector of z-scores. #' @param p a vector of two-sided p-values, internally transformed to #' z-scores. #' @param data an object created with [zcurve_data()] function. #' @param z.lb a vector with start of censoring intervals of censored z-scores. #' @param z.ub a vector with end of censoring intervals of censored z-scores. #' @param p.lb a vector with start of censoring intervals of censored two-sided p-values. #' @param p.ub a vector with end of censoring intervals of censored two-sided p-values. #' @param method the method to be used for fitting. Possible options are #' Expectation Maximization \code{"EM"} and density \code{"density"}, #' defaults to \code{"EM"}. #' @param bootstrap the number of bootstraps for estimating CI. To skip #' bootstrap specify \code{FALSE}. #' @param parallel whether the bootstrap should be performed in parallel. #' Defaults to \code{FALSE}. The implementation is not completely stable #' and might cause a connection error. #' @param control additional options for the fitting algorithm more details in #' \link[=control_EM]{control EM} or \link[=control_density]{control density}. #' #' @details The function returns the EM method by default and changing #' \code{method = "density"} gives the KD2 version of z-curve as outlined in #' \insertCite{zcurve2;textual}{zcurve}. For the original z-curve #' \insertCite{zcurve1}{zcurve}, referred to as KD1, specify #' \code{'control = "density", control = list(model = "KD1")'}. #' #' @references #' \insertAllCited{} #' #' @return The fitted z-curve object #' @export zcurve #' #' @examples #' # load data from OSC 2015 reproducibility project #' OSC.z #' #' # fit an EM z-curve (with disabled bootstrap due to examples times limits) #' m.EM <- zcurve(OSC.z, method = "EM", bootstrap = FALSE) #' # a version with 1000 boostraped samples would looked like: #' \donttest{m.EM <- zcurve(OSC.z, method = "EM", bootstrap = 1000)} #' #' # or KD2 z-curve (use larger bootstrap for real inference) #' m.D <- zcurve(OSC.z, method = "density", bootstrap = FALSE) #' #' # inspect the results #' summary(m.EM) #' summary(m.D) #' # see '?summary.zcurve' for more output options #' #' # plot the results #' plot(m.EM) #' plot(m.D) #' # see '?plot.zcurve' for more plotting options #' #' # to specify more options, set the control arguments #' # ei. increase the maximum number of iterations and change alpha level #' ctr1 <- list( #' "max_iter" = 9999, #' "alpha" = .10 #' ) #' \dontrun{m1.EM <- zcurve(OSC.z, method = "EM", bootstrap = FALSE, control = ctr1)} #' # see '?control_EM' and '?control_density' for more information about different #' # z-curves specifications #' @seealso [summary.zcurve()], [plot.zcurve()], [control_EM], [control_density] zcurve <- function(z, z.lb, z.ub, p, p.lb, p.ub, data, method = "EM", bootstrap = 1000, parallel = FALSE, control = NULL){ if(!method %in% c("EM", "density")) stop("Wrong method, select a supported option") # set bootstrap if(!is.numeric(bootstrap)){ bootstrap <- FALSE }else if(bootstrap <= 0){ bootstrap <- FALSE } if(missing(data)){ # check input input_type <- NULL if((!missing(z.lb) & missing(z.ub)) | (!missing(z.ub) & missing(z.lb))) stop("Both lower and upper bound for z-scores needs to be supplied.") if((!missing(p.lb) & missing(p.ub)) | (!missing(p.ub) & missing(p.lb))) stop("Both lower and upper bound for p-values needs to be supplied.") if(missing(z) & missing(p) & missing(z.lb) & missing(p.lb)) stop("No data input") if(!missing(z)){ if(!is.numeric(z)) stop("Wrong z-scores input: Data are not nummeric.") if(!is.vector(z)) stop("Wrong z-scores input: Data are not a vector") if(all(z <= 1 & z >= 0)) stop("It looks like you are entering p-values rather than z-scores. To use p-values, explicitly name your argument 'zcurve(p = [vector of p-values])'") input_type <- c(input_type, "z") } if(!missing(p)){ if(!is.numeric(p)) stop("Wrong p-values input: Data are not nummeric.") if(!is.vector(p)) stop("Wrong p-values input: Data are not a vector") input_type <- c(input_type, "p") } }else if(inherits(data, "zcurve_data")){ input_type <- "zcurve-data" }else{ stop("The 'data' input must be created by the `zcurve_data()` function. See `?zcurve_data()` for more information.") } # create results object object <- NULL object$call <- match.call() object$method <- method object$input_type <- input_type # create control if(method == "EM"){ control <- .zcurve_EM.control(control) }else if(method == "density"){ control <- .zcurve_density.control(control) } ### prepare data if(missing(data)){ # get point estimates on the same scale if(!missing(z)){ z <- abs(z) }else{ z <- numeric() } if(!missing(p)){ z <- c(z, .p_to_z(p)) } # get censoring on the same scale if(!missing(z.lb)){ lb <- abs(z.lb) ub <- abs(z.ub) }else{ lb <- NULL ub <- NULL } if(!missing(p.lb)){ lb <- c(lb, .p_to_z(p.ub)) ub <- c(ub, .p_to_z(p.lb)) } # restrict censoring to the fitting range & treat extremely censored values as extremely significant values if(!is.null(lb)){ if(any(lb < control$a)) stop("All censored observations must be higher than the fitting range.") z <- c(z, lb[lb >= control$b]) ub <- ub[lb < control$b] lb <- lb[lb < control$b] } if(length(lb) > 0){ # restrict the upper censoring to the fitting range ub <- ifelse(ub > control$b, control$b, ub) # update control if(method == "EM"){ control$type <- 3 }else if(method == "density"){ stop("Censoring is not available for the density algorithm.") } } object$data <- z object$data_censoring <- data.frame(lb = lb, ub = ub) }else{ if(nrow(data$precise) != 0){ object$data <- .p_to_z(data$precise$p) }else{ object$data <- numeric() } if(nrow(data$censored) != 0){ lb <- .p_to_z(data$censored$p.ub) ub <- .p_to_z(data$censored$p.lb) # remove non-significant censored p-values if(any(lb < control$a)){ warning(paste0(sum(lb < control$a), " censored p-values removed due to the upper bound being larger that the fitting range."), immediate. = TRUE, call. = FALSE) ub <- ub[lb >= control$a] lb <- lb[lb >= control$a] } # move too significant censored p-values among precise p-values if(length(lb) > 0 && any(lb >= control$b)){ object$data <- c(object$data, lb[lb >= control$b]) ub <- ub[lb < control$b] lb <- lb[lb < control$b] } if(length(lb) > 0){ # restrict the upper censoring to the fitting range ub <- ifelse(ub > control$b, control$b, ub) object$data_censoring <- data.frame(lb = lb, ub = ub) # update control if(method == "EM"){ control$type <- 3 }else if(method == "density"){ stop("Censoring is not available for the density algorithm.") } }else{ object$data_censoring <- data.frame(lb = NULL, ub = NULL) } }else{ object$data_censoring <- data.frame(lb = NULL, ub = NULL) } } object$control <- control # only run the algorithm with some significant results if(sum(object$data > control$a & object$data < control$b) + nrow(object$data_censoring) < 10) stop("There must be at least 10 z-scores in the fitting range but a much larger number is recommended.") # use appropriate algorithm if(method == "EM"){ fit <- .zcurve_EM(z = object$data, lb = object$data_censoring$lb, ub = object$data_censoring$ub, control = control) }else if(method == "density"){ fit <- .zcurve_density(z = object$data, control = control) } object$fit <- fit # check convergence if(method == "EM"){ object$converged <- ifelse(fit$iter < control$max_iter, TRUE, FALSE) }else if(method == "density"){ object$converged <- fit$converged if(fit$message == "singular convergence (7)") object$converged <- TRUE if(fit$message == "both X-convergence and relative convergence (5)") object$converged <- TRUE } if(object$converged == FALSE) warning("Model did not converge.") # do bootstrap if(bootstrap != FALSE){ # use apropriate algorithm if(method == "EM"){ if(parallel){ fit_boot <- .zcurve_EM_boot.par(z = object$data, lb = object$data_censoring$lb, ub = object$data_censoring$ub, control = control, fit = fit, bootstrap = bootstrap) }else{ fit_boot <- .zcurve_EM_boot(z = object$data, lb = object$data_censoring$lb, ub = object$data_censoring$ub, control = control, fit = fit, bootstrap = bootstrap) } }else if(method == "density"){ if(parallel){ fit_boot <- .zcurve_density_boot.par(z = object$data, control = control, bootstrap = bootstrap) }else{ fit_boot <- .zcurve_density_boot(z = object$data, control = control, bootstrap = bootstrap) } } object$boot <- fit_boot } # estimates object$coefficients <- .get_estimates(mu = fit$mu, weights = fit$weights, prop_high = fit$prop_high, sig_level = control$sig_level, a = control$a) # boot estimates if(bootstrap != FALSE){ object$coefficients_boot <- data.frame(t(sapply(1:bootstrap, function(i){ .get_estimates(mu = fit_boot$mu[i,], weights = fit_boot$weights[i,], prop_high = fit_boot$prop_high[i], sig_level = control$sig_level, a = control$a) }))) } class(object) <- "zcurve" return(object) } ### methods #' Prints a fitted z-curve object #' @param x Fitted z-curve object #' @param ... Additional arguments #' @export print.zcurve #' @rawNamespace S3method(print, zcurve) #' @seealso [zcurve()] print.zcurve <- function(x, ...){ cat("Call:\n") print(x$call) cat("\nEstimates:\n") print(x$coefficients[1:2]) } #' Summarize fitted z-curve object #' #' @param object A fitted z-curve object. #' @param type Whether the results \code{"results"} or the #' mixture mode parameters \code{"parameters"} should be #' returned. Defaults to \code{"results"}. #' @param all Whether additional results, such as file drawer #' ration, expected and missing number of studies, and Soric FDR #' be returned. Defaults to \code{FALSE} #' @param ERR.adj Confidence intervals adjustment for ERR. Defaults #' to \code{.03} as proposed by Bartos & Schimmack (in preparation). #' @param EDR.adj Confidence intervals adjustment for EDR. Defaults #' to \code{.05} as proposed by Bartos & Schimmack (in preparation). #' @param round.coef To how many decimals should the coefficient #' be rounded. Defaults to \code{3}. #' @param ... Additional arguments #' #' @return Summary of a z-curve object #' #' @method summary zcurve #' @export summary.zcurve #' @rawNamespace S3method(summary, zcurve) #' @seealso [zcurve()] summary.zcurve <- function(object, type = "results", all = FALSE, ERR.adj = .03, EDR.adj = .05, round.coef = 3, ...){ if(substr(object$method, 1, 2) == "EM"){ if(!is.null(object$boot)){ fit_index <- c(object$fit$Q, unname(stats::quantile(object$boot$Q, c(.025, .975)))) }else{ fit_index <- c(object$fit$Q) } method_text <- object$method iter_text <- paste(c(object$fit$iter_start," + ",object$fit$iter), collapse = "") fit_stat <- "Q" }else if(object$method == "density"){ if(!is.null(object$boot)){ fit_index <- c(object$fit$objective, unname(stats::quantile(object$boot$objective, c(.025, .975)))) }else{ fit_index <- c(object$fit$objective) } if(object$control$version == 1){ method_text <- paste(c(object$method, " (version 1)"), collapse = "") fit_stat <- "MAE (*1e3)" fit_index <- fit_index*1e3 }else{ method_text <- object$method fit_stat <- "RMSE" } iter_text <- object$fit$iter } temp_N_sig <- sum(object$data > stats::qnorm(object$control$sig_level/2, lower.tail = FALSE)) + nrow(object$data_censoring[object$data_censoring$lb > object$control$a,]) temp_N_obs <- length(object$data) + nrow(object$data_censoring) temp_N_used <- sum(object$data > object$control$a & object$data < object$control$b) + nrow(object$data_censoring[object$data_censoring$lb > object$control$a & object$data_censoring$lb < object$control$b ,]) model <- list( "method" = method_text, "model" = ifelse(is.null(object$control$model), "custom", object$control$model), "fit_index" = fit_index, "fit_stat" = fit_stat, "iter" = iter_text, "input_type"= object$input_type, "N_all" = temp_N_obs, "N_sig" = temp_N_sig, "N_used" = temp_N_used ) if(type == "results" | substr(type,1,3) == "res"){ if(!is.null(object$boot)){ l.CI <- c(stats::quantile(object$coefficients_boot$ERR, .025), stats::quantile(object$coefficients_boot$EDR, .025)) u.CI <- c(stats::quantile(object$coefficients_boot$ERR, .975), stats::quantile(object$coefficients_boot$EDR, .975)) }else{ l.CI <- NULL u.CI <- NULL } TAB <- cbind(Estimate = stats::coef(object)[1:2], l.CI = l.CI, u.CI = u.CI) # adjust CIs if(!is.null(object$boot)){ TAB["ERR", "l.CI"] <- ifelse(TAB["ERR", "l.CI"] - ERR.adj < object$control$sig_level/2, object$control$sig_level/2, TAB["ERR", "l.CI"] - ERR.adj) TAB["ERR", "u.CI"] <- ifelse(TAB["ERR", "u.CI"] + ERR.adj > 1, 1, TAB["ERR", "u.CI"] + ERR.adj) TAB["EDR", "l.CI"] <- ifelse(TAB["EDR", "l.CI"] - EDR.adj < object$control$sig_level, object$control$sig_level, TAB["EDR", "l.CI"] - EDR.adj) TAB["EDR", "u.CI"] <- ifelse(TAB["EDR", "u.CI"] + EDR.adj > 1, 1, TAB["EDR", "u.CI"] + EDR.adj) } # additional stats if(all){ temp_sig_level <- object$control$sig_level if(!is.null(object$boot)){ TAB <- rbind( TAB, "Soric FDR" = c( "Estimate" = .get_Soric_FDR(TAB["EDR","Estimate"], temp_sig_level), "l.CI" = .get_Soric_FDR(TAB["EDR","u.CI"], temp_sig_level), "u.CI" = .get_Soric_FDR(TAB["EDR","l.CI"], temp_sig_level)), "File Drawer R" = c( "Estimate" = .get_file_drawer_R(TAB["EDR","Estimate"]), "l.CI" = .get_file_drawer_R(TAB["EDR","u.CI"]), "u.CI" = .get_file_drawer_R(TAB["EDR","l.CI"])), "Expected N" = c( "Estimate" = .get_expected_N(TAB["EDR","Estimate"], temp_N_sig), "l.CI" = .get_expected_N(TAB["EDR","u.CI"], temp_N_sig), "u.CI" = .get_expected_N(TAB["EDR","l.CI"], temp_N_sig)), "Missing N" = c( "Estimate" = .get_missing_N(TAB["EDR","Estimate"], temp_N_sig, temp_N_obs), "l.CI" = .get_missing_N(TAB["EDR","u.CI"], temp_N_sig, temp_N_obs), "u.CI" = .get_missing_N(TAB["EDR","l.CI"], temp_N_sig, temp_N_obs)) ) }else{ TAB <- rbind( TAB, "Soric FDR" = c("Estimate" = .get_Soric_FDR(TAB["EDR","Estimate"], temp_sig_level)), "File Drawer R" = c("Estimate" = .get_file_drawer_R(TAB["EDR","Estimate"])), "Expected N" = c("Estimate" = .get_expected_N(TAB["EDR","Estimate"], temp_N_sig)), "Missing N" = c("Estimate" = .get_missing_N(TAB["EDR","Estimate"], temp_N_sig, temp_N_obs))) } } if(object$method == "density"){ if(object$control$version == 1){ TAB <- as.data.frame(TAB[1,,drop=FALSE]) } } }else if(type == "parameters" | substr(type,1,3) == "par"){ if(!is.null(object$boot)){ if(object$method == "density"){ if(object$control$version == 1){ M_l.CI <- apply(object$boot$mu,2,stats::quantile, prob = .025) M_u.CI <- apply(object$boot$mu,2,stats::quantile, prob = .975) }else{ M_l.CI <- NULL M_u.CI <- NULL } }else{ M_l.CI <- NULL M_u.CI <- NULL } W_l.CI <- apply(object$boot$weights,2,stats::quantile, prob = .025) W_u.CI <- apply(object$boot$weights,2,stats::quantile, prob = .975) }else{ M_l.CI <- NULL M_u.CI <- NULL W_l.CI <- NULL W_u.CI <- NULL } TAB <- cbind('Mean ' = object$fit$mu, 'l.CI' = M_l.CI, 'u.CI ' = M_u.CI, 'Weight' = object$fit$weights, 'l.CI' = W_l.CI, 'u.CI' = W_u.CI) rownames(TAB) <- as.character(1:length(object$fit$mu)) } res <- list(call = object$call, coefficients = TAB, model = model, converged = object$converged, round.coef = round.coef) class(res) <- "summary.zcurve" return(res) } #' Prints summary object for z-curve method #' @param x Summary of a z-curve object #' @param ... Additional arguments #' @method print.summary zcurve #' @export print.summary.zcurve #' @rawNamespace S3method(print, summary.zcurve) #' @seealso [zcurve()] print.summary.zcurve <- function(x, ...){ cat("Call:\n") print(x$call) cat("\n") cat(paste(c("model: ",x$model$model, " via ", x$model$method, "\n"), collapse = "")) cat("\n") #stats::printCoefmat(x$coefficients, digits = 2, # cs.ind = c(1:ncol(x$coefficients)), tst.ind = integer(), zap.ind = integer()) temp_to_int <- !rownames(x$coefficients) %in% c("Expected N", "Missing N") temp_coef <- x$coefficients if(length(temp_to_int) != 0){ temp_coef[temp_to_int,] <- apply(as.data.frame(x$coefficients[temp_to_int,]), 2, function(p).rXd(p, X = x$round.coef)) temp_coef[!temp_to_int,] <- round(x$coefficients[!temp_to_int,]) } print(temp_coef,quote = FALSE, right = T) if(length(x$model$fit_index) > 1){ fit_index_CI <- paste(c(", 95% CI[", .r2d(x$model$fit_index[2]), ", ", .r2d(x$model$fit_index[3]),"]"), collapse = "") }else{ fit_index_CI <- NULL } cat("\n") if(x$converged){ cat(paste(c("Model converged in ", x$model$iter, " iterations", "\n"), collapse = "")) }else{ cat(paste(c("\033[0;31m", "Model did not converge in ", x$model$iter, " iterations", "\033[0m", "\n"), collapse = "")) } obs_proportion <- stats::prop.test(x$model$N_sig, x$model$N_all) cat(paste0("Fitted using ", x$model$N_used, " ", paste(x$model$input_type, collapse = " and "), "-values. ", x$model$N_all, " supplied, ", x$model$N_sig, " significant (ODR = ", .r2d(obs_proportion$estimate), ", 95% CI [", .r2d(obs_proportion$conf.int[1]), ", ", .r2d(obs_proportion$conf.int[2]), "]).\n")) cat(paste(c(x$model$fit_stat," = " , .r2d(x$model$fit_index[1]), fit_index_CI, "\n"), collapse = "")) } #' Plot fitted z-curve object #' #' @param x Fitted z-curve object #' @param annotation Add annotation to the plot. Defaults #' to \code{FALSE}. #' @param CI Plot confidence intervals for the estimated z-curve. Defaults #' to \code{FALSE}. #' @param extrapolate Scale the chart to the extrapolated area. Defaults #' to \code{FALSE}. #' @param plot_type Type of plot to by produced. Defaults to \code{"base"} #' for th base plotting function. An alternative is \code{"ggplot"} for a #' ggplot2. #' @param y.anno A vector of length 8 specifying the y-positions #' of the individual annotation lines relative to the figure's height. #' Defaults to \code{c(.95, .88, .78, .71, .61, .53, .43, .35)} #' @param x.anno A number specifying the x-position of the block #' of annotations relative to the figure's width. #' @param cex.anno A number specifying the size of the annotation text. #' @param ... Additional arguments including \code{main}, \code{xlab}, #' \code{ylab}, \code{xlim}, \code{ylim}, \code{cex.axis}, \code{cex.lab} #' #' @method plot zcurve #' @export plot.zcurve #' @rawNamespace S3method(plot, zcurve) #' #' @examples \dontrun{ #' # simulate some z-statistics and fit a z-curve #' z <- abs(rnorm(300,3)) #' m.EM <- zcurve(z, method = "EM", bootstrap = 100) #' #' # plot the z-curve #' plot(m.EM) #' #' # add annotation text and model fit CI #' plot(m.EM, annotation = TRUE, CI = TRUE) #' #' # change the location of the annotation to the left #' plot(m.EM, annotation = TRUE, CI = TRUE, x_text = 0) #' } #' @seealso [zcurve()] plot.zcurve <- function(x, annotation = FALSE, CI = FALSE, extrapolate = FALSE, plot_type = "base", y.anno = c(.95, .88, .78, .71, .61, .53, .43, .35), x.anno = .6, cex.anno = 1, ...){ if(is.null(x$boot)) CI <- FALSE if(substr(tolower(plot_type), 1, 1) == "b"){ plot_type <- "base" }else if(substr(tolower(plot_type), 1, 1) == "g"){ plot_type <- "ggplot" }else{ stop("Unrecognized `plot_type` argument. The possibly options are `base` and `ggplot`.") } additional <- list(...) if(is.null(additional$main)){ main <- paste(c("z-curve (", ifelse(is.null(x$control$model), "custom", x$control$model), " via ", x$method, ")"), collapse = "") }else{ main <- additional$main } if(is.null(additional$xlab)){ xlab <- "z-scores" }else{ xlab <- additional$xlab } if(is.null(additional$ylab)){ ylab <- "Density" }else{ ylab <- additional$ylab } if(is.null(additional$xlim)){ xlim <- NULL }else{ xlim <- additional$xlim } if(is.null(additional$ylim)){ ylim <- NULL }else{ ylim <- additional$ylim } if(is.null(additional$cex.axis)){ cex.axis <- 1 }else{ cex.axis <- additional$cex.axis } if(is.null(additional$cex.lab)){ cex.lab <- 1 }else{ cex.lab <- additional$cex.lab } # set breaks for the histogram br1 <- seq(x$control$a, x$control$b, .20) br2 <- seq(0, x$control$a, .20) # change the last break accordingly to the cut points br1[length(br1)] <- x$control$b br2[length(br2)] <- x$control$a # use histograms to get counts in each bin h1 <- graphics::hist(x$data[x$data > x$control$a & x$data < x$control$b], breaks = br1, plot = F) # add censored observations to the histogram if(nrow(x$data_censoring) != 0){ # spread the censored observation across the z-values cen_counts <- do.call(rbind, lapply(1:nrow(x$data_censoring), function(i){ temp_counts <- (x$data_censoring$lb[i] < h1$breaks[-length(h1$breaks)] & x$data_censoring$ub[i] > h1$breaks[-length(h1$breaks)]) temp_counts[temp_counts] <- 1/sum(temp_counts) return(temp_counts) })) cen_counts <- apply(cen_counts, 2, sum) # add the counts and standardize the density h1$counts <- h1$counts + cen_counts h1$density <- (h1$counts / sum(h1$counts)) * (length(h1$counts)/(x$control$b - x$control$a)) } # add histogram for non-sig results if(length(x$data[x$data < x$control$a])){ h2 <- graphics::hist(x$data[x$data < x$control$a], breaks = br2, plot = F) # scale the density of non-significant z-scores appropriately to the first one h2$density <- h2$density * (x$control$a/(x$control$b - x$control$a)) h2$density <- h2$density/( (length(x$data[x$data > x$control$a & x$data < x$control$b])/(x$control$b - x$control$a)) / (length(x$data[x$data < x$control$a])/(x$control$a)) ) }else{ h2 <- NULL } # compute fitted z-curve density x_seq <- seq(0, x$control$b, .01) y_den <- sapply(1:length(x$fit$mu), function(i){ x$fit$weights[i]*exp(.zdist_lpdf(x_seq, x$fit$mu[i], 1, x$control$a, x$control$b)) }) y_den <- apply(y_den, 1, sum) # and the piecewise confidence intervals if(CI & !is.null(x$boot)){ y_den_boot <- sapply(1:nrow(x$boot$mu),function(b){ y_den <- sapply(1:length(x$boot$mu[b,]), function(i){ x$boot$weights[b,i]*exp(.zdist_lpdf(x_seq, x$boot$mu[b,i], 1, x$control$a, x$control$b)) }) y_den <- apply(y_den, 1, sum) }) y_den_l.CI <- apply(y_den_boot, 1, stats::quantile, prob = .025) y_den_u.CI <- apply(y_den_boot, 1, stats::quantile, prob = .975) } ### setting of the axis and values for text allignment x_max <- x$control$b x.anno <- x_max*x.anno if(extrapolate){ y_max <- max(c(y_den, h1$density, h2$density)) }else{ y_max <- max(c(h1$density, h2$density)) } # adjusting the height of the chart so the text is higher than the highest ploted thing in the x-range of the text if(annotation & CI){ y_max <- ifelse(max(c(y_den_u.CI[x.anno < x_seq], h1$density[x.anno < h1$breaks[-length(h1$density)]])) > y_max*(y.anno[length(y.anno)] - .025), max(c(y_den_u.CI[x.anno < x_seq], h1$density[x.anno < h1$breaks[-length(h1$density)]]))/(y.anno[length(y.anno)] - .025), y_max) }else if(annotation & !CI){ y_max <- ifelse(max(c(y_den[x.anno < x_seq], h1$density[x.anno < h1$breaks[-length(h1$density)]])) > y_max*(y.anno[length(y.anno)] - .025), max(c(y_den[x.anno < x_seq], h1$density[x.anno < h1$breaks[-length(h1$density)]]))/(y.anno[length(y.anno)] - .025), y_max) } # overwrite xmin, xmax, ymin, ymax if xlim and ylim are specified if(!is.null(ylim)){ y_min <- ylim[1] y_max <- ylim[2] }else{ y_min <- 0 } if(!is.null(xlim)){ x_min <- xlim[1] x_max <- xlim[2] }else{ x_min <- 0 } if(plot_type == "base"){ # plot z-scores used for fitting graphics::plot(h1, freq = FALSE, density = 0, angle = 0, border = "blue", xlim = c(x_min, x_max), ylim = c(y_min, y_max), ylab = ylab, xlab = xlab, main = main, cex.lab = cex.lab, cex.axis = cex.axis, lwd = 1, las = 1) # and un-used z-scores if(!is.null(h2)){ graphics::par(new=TRUE) graphics::plot(h2, freq = FALSE, density = 0, angle = 0, border ="grey30", xlim = c(x_min, x_max), ylim = c(y_min, y_max), axes = FALSE, ann = FALSE, lwd = 1, las = 1) } # add the density estimate if the model was estimated by density if(x$method == "density"){ graphics::lines(x$fit$density$x, x$fit$density$y, lty = 1, col = "grey60", lwd = 4) } # significance line if(x.anno*x_max < x$control$a){ graphics::lines(rep(x$control$a,2), c(0, (min(y.anno) - .025)*y_max), col = "blue", lty = 2, lwd = 1) graphics::lines(rep(stats::qnorm(x$control$sig_level/2, lower.tail = FALSE),2), c(0, (min(y.anno) - .025)*y_max), col = "red", lty = 1, lwd = 2) }else{ graphics::abline(v = x$control$a, col = "blue", lty = 2, lwd = 1) graphics::abline(v = stats::qnorm(x$control$sig_level/2, lower.tail = FALSE), col = "red", lty = 1, lwd = 2) } # predicted densities graphics::lines(x_seq, y_den, lty = 1, col = "blue", lwd = 5) if(CI & !is.null(x$boot)){ graphics::lines(x_seq, y_den_l.CI, lty = 3, col = "blue", lwd = 3) graphics::lines(x_seq, y_den_u.CI, lty = 3, col = "blue", lwd = 3) } # add annotation if(annotation){ x_summary <- summary(x) graphics::text(x.anno, y_max*y.anno[1] , paste0("Range: ",.r2d(min(x$data))," to ",.r2d(max(x$data))), adj = c(0, 0), cex = cex.anno) graphics::text(x.anno, y_max*y.anno[2] , paste0(x_summary$model$N_all, " tests, ", x_summary$model$N_sig, " significant"), adj = c(0, 0), cex = cex.anno) obs_proportion <- stats::prop.test(x_summary$model$N_sig, x_summary$model$N_all) graphics::text(x.anno, y_max*y.anno[3] , paste0("Observed discovery rate:"), adj = c(0, 0), cex = cex.anno) graphics::text(x.anno, y_max*y.anno[4] , paste0(.r2d(obs_proportion$estimate), " 95% CI [", .r2d(obs_proportion$conf.int[1]), " ,", .r2d(obs_proportion$conf.int[2]), "]"), adj = c(0, 0), cex = cex.anno) if(!is.null(x$boot)){ graphics::text(x.anno, y_max*y.anno[5] , paste0("Expected discovery rate:"), adj = c(0, 0), cex = cex.anno) graphics::text(x.anno, y_max*y.anno[6] , paste0(.r2d(x_summary$coefficients["EDR","Estimate"]), " 95% CI [", .r2d(x_summary$coefficients["EDR","l.CI"]), " ,", .r2d(x_summary$coefficients["EDR","u.CI"]), "]"), adj = c(0, 0), cex = cex.anno) graphics::text(x.anno, y_max*y.anno[7] , paste0("Expected replicability rate:"), adj = c(0, 0), cex = cex.anno) graphics::text(x.anno, y_max*y.anno[8] , paste0(.r2d(x_summary$coefficients["ERR","Estimate"]), " 95% CI [", .r2d(x_summary$coefficients["ERR","l.CI"]), " ,", .r2d(x_summary$coefficients["ERR","u.CI"]), "]"), adj = c(0, 0), cex = cex.anno) }else{ graphics::text(x.anno, y_max*y.anno[5] , paste0("Expected discovery rate:"), adj = c(0, 0), cex = cex.anno) graphics::text(x.anno, y_max*y.anno[6] , paste0(.r2d(x_summary$coefficients["EDR","Estimate"])), adj = c(0, 0), cex = cex.anno) graphics::text(x.anno, y_max*y.anno[7] , paste0("Expected replicability rate:"), adj = c(0, 0), cex = cex.anno) graphics::text(x.anno, y_max*y.anno[8] , paste0(.r2d(x_summary$coefficients["ERR","Estimate"])), adj = c(0, 0), cex = cex.anno) } } return(invisible()) }else if(plot_type == "ggplot"){ out <- ggplot2::ggplot() + ggplot2::scale_x_continuous( name = xlab, breaks = pretty(c(x_min, x_max)), limits = c(x_min, x_max)) + ggplot2::scale_y_continuous( name = ylab, breaks = pretty(c(y_min, y_max)), limits = c(y_min, y_max)) + ggplot2::ggtitle(main) + ggplot2::theme_classic() # add significant z-scores out <- out + ggplot2::geom_rect( data = data.frame( xmin = h1$breaks[-length(h1$breaks)], xmax = h1$breaks[-1], ymin = 0, ymax = h1$density), mapping = ggplot2::aes( xmin = .data[["xmin"]], xmax = .data[["xmax"]], ymin = .data[["ymin"]], ymax = .data[["ymax"]]), fill = "white", col = "blue") # add non-significant z-scores (if any) if(!is.null(h2)){ out <- out + ggplot2::geom_rect( data = data.frame( xmin = h2$breaks[-length(h2$breaks)], xmax = h2$breaks[-1], ymin = 0, ymax = h2$density), mapping = ggplot2::aes( xmin = .data[["xmin"]], xmax = .data[["xmax"]], ymin = .data[["ymin"]], ymax = .data[["ymax"]]), fill = "white", col = "grey") } # add the density estimate if the model was estimated by density if(x$method == "density"){ out <- out + ggplot2::geom_line( data = data.frame( x = x$fit$density$x, y = x$fit$density$y ), mapping = ggplot2::aes( x = .data[["x"]], y = .data[["y"]] ), linewidth = 1, col = "grey60", linetype = 4) } # significance line if(x.anno*x_max < x$control$a){ #do not overdraw the annotation in case it is in the way out <- out + ggplot2::geom_line( data = data.frame( x = rep(x$control$a,2), y = c(0, (min(y.anno) - .025)*y_max) ), mapping = ggplot2::aes( x = .data[["x"]], y = .data[["y"]] ), linewidth = 1, col = "blue", linetype = 1) out <- out + ggplot2::geom_line( data = data.frame( x = rep(stats::qnorm(x$control$sig_level/2, lower.tail = FALSE),2), y = c(0, (min(y.anno) - .025)*y_max) ), mapping = ggplot2::aes( x = .data[["x"]], y = .data[["y"]] ), linewidth = 1.5, col = "red", linetype = 2) }else{ out <- out + ggplot2::geom_vline( xintercept = x$control$a, linewidth = 1, col = "blue", linetype = 1) out <- out + ggplot2::geom_vline( xintercept = stats::qnorm(x$control$sig_level/2, lower.tail = FALSE), linewidth = 1.5, col = "red", linetype = 2) } # predicted densities out <- out + ggplot2::geom_line( data = data.frame( x = x_seq, y = y_den ), mapping = ggplot2::aes( x = .data[["x"]], y = .data[["y"]] ), linewidth = 2, col = "blue", linetype = 1) if(CI & !is.null(x$boot)){ out <- out + ggplot2::geom_line( data = data.frame( x = x_seq, y = y_den_l.CI ), mapping = ggplot2::aes( x = .data[["x"]], y = .data[["y"]] ), linewidth = 1.75, col = "blue", linetype = 3) out <- out + ggplot2::geom_line( data = data.frame( x = x_seq, y = y_den_u.CI ), mapping = ggplot2::aes( x = .data[["x"]], y = .data[["y"]] ), linewidth = 1.75, col = "blue", linetype = 3) } # add annotation if(annotation){ x_summary <- summary(x) obs_proportion <- stats::prop.test(x_summary$model$N_sig, x_summary$model$N_all) ggplot2_base_size <- 5 out <- out + ggplot2::geom_text( data = data.frame( x = x.anno, y = y_max*y.anno[1:4], label = c( paste0("Range: ",.r2d(min(x$data))," to ",.r2d(max(x$data))), paste0(x_summary$model$N_all, " tests, ", x_summary$model$N_sig, " significant"), paste0("Observed discovery rate:"), paste0(.r2d(obs_proportion$estimate), " 95% CI [", .r2d(obs_proportion$conf.int[1]), " ,", .r2d(obs_proportion$conf.int[2]), "]") ) ), mapping = ggplot2::aes( x = .data[["x"]], y = .data[["y"]], label = .data[["label"]]), hjust = 0, vjust = 0, size = ggplot2_base_size*cex.anno) if(!is.null(x$boot)){ out <- out + ggplot2::geom_text( data = data.frame( x = x.anno, y = y_max*y.anno[5:8], label = c( paste0("Expected discovery rate:"), paste0(.r2d(x_summary$coefficients["EDR","Estimate"]), " 95% CI [", .r2d(x_summary$coefficients["EDR","l.CI"]), " ,", .r2d(x_summary$coefficients["EDR","u.CI"]), "]"), paste0("Expected replicability rate:"), paste0(.r2d(x_summary$coefficients["ERR","Estimate"]), " 95% CI [", .r2d(x_summary$coefficients["ERR","l.CI"]), " ,", .r2d(x_summary$coefficients["ERR","u.CI"]), "]") ) ), mapping = ggplot2::aes( x = .data[["x"]], y = .data[["y"]], label = .data[["label"]]), hjust = 0, vjust = 0, size = ggplot2_base_size*cex.anno) }else{ out <- out + ggplot2::geom_text( data = data.frame( x = x.anno, y = y_max*y.anno[5:8], label = c( paste0("Expected discovery rate:"), paste0(.r2d(x_summary$coefficients["EDR","Estimate"])), paste0("Expected replicability rate:"), paste0(.r2d(x_summary$coefficients["ERR","Estimate"])) ) ), mapping = ggplot2::aes( x = .data[["x"]], y = .data[["y"]], label = .data[["label"]]), hjust = 0, vjust = 0, size = ggplot2_base_size*cex.anno) } } return(out) } } #' @title Z-scores from subset of original studies featured in OSC 2015 #' reproducibility project #' #' @description The dataset contains z-scores from subset of original #' studies featured in psychology reproducibility project #' \insertCite{osc}{zcurve}. Only z-scores from studies with unambiguous #' original outcomes are supplied (eliminating 7 studies with marginally #' significant results). The real replication rate for those studies is #' 35/90 (the whole project reports 36/97). #' #' @format A vector with 90 observations #' #' @references #' \insertAllCited{} "OSC.z" # cleaning .onUnload <- function (libpath) { library.dynam.unload("zcurve", libpath) }
/scratch/gouwar.j/cran-all/cranData/zcurve/R/main.R
#### helper tools for export #' @title Compute power corresponding to z-scores #' @description A function for computing power of two-sided tests #' corresponding to z-scores for a given significance level. #' \code{alpha} (or corresponding cut-off z-score \code{a}) #' #' @param z A vector of z-scores #' @param alpha Level of significance alpha #' @param a Or, alternatively a z-score corresponding to \code{alpha} #' @param two.sided Whether directionality of the effect size should be taken into account. #' #' @export z_to_power #' #' @examples # mean powers corresponding to the mean components of KD2 #' z_to_power(0:6, alpha = .05) z_to_power <- function(z, alpha = .05, a = stats::qnorm(alpha/2,lower.tail = FALSE), two.sided = TRUE){ if(!all(sapply(z, function(x)x >= 0)))stop("z must be >= 0") if(a < 0)stop("a must be >= 0") if(is.null(a) & is.null(alpha))stop("Either 'alpha' or 'a' must be provided") if(is.null(alpha) & !is.null(a))alpha <- stats::pnorm(a, lower.tail = FALSE)*2 if(alpha < 0 | alpha > 1)stop("alpha must be >= 0 & <= 1") if(two.sided){ return(1 - stats::pnorm(a, z, 1) + stats::pnorm(-a, z, 1)) }else{ return(1 - stats::pnorm(a, z, 1)) } } #' @title Compute z-score corresponding to a power #' @description A function for computing z-scores of two-sided tests #' corresponding to power \code{power} for a given significance level #' alpha \code{alpha} (or corresponding cut-off z-statistic \code{a}). #' #' @param power A vector of powers #' @param alpha Level of significance alpha #' @param a Or, alternatively a z-score corresponding to \code{alpha} #' @param two.sided Whether directionality of the effect size should be taken into account. #' @param nleqslv_control A named list of control parameters passed to the #' \link[nleqslv]{nleqslv} function used for solving the inverse of #' \link[=z_to_power]{z_to_power} function. #' #' @export power_to_z #' #' @examples # z-scores corresponding to the (aproximate) power of components of EM2 #' power_to_z(c(0.05, 0.20, 0.40, 0.60, 0.80, 0.974, 0.999), alpha = .05) power_to_z <- function(power, alpha = .05, a = stats::qnorm(alpha/2,lower.tail = FALSE), two.sided = TRUE, nleqslv_control = list(xtol = 1e-15, maxit = 300, stepmax = .5)){ if(a < 0)stop("a must be >= 0") if(is.null(a) & is.null(alpha))stop("Either 'alpha' or 'a' must be provided") if(is.null(alpha) & !is.null(a))alpha <- stats::pnorm(a, lower.tail = FALSE)*2 if(alpha < 0 | alpha > 1)stop("alpha must be >= 0 & <= 1") if(!all(sapply(power, function(x)x >= alpha & x <= 1)))stop("power must be >= alpha & <= 1") sapply(power, function(pow)nleqslv::nleqslv(.5, .solve_power_to_z, power = pow, a = a, two.sided = two.sided, control = nleqslv_control)$x) } .solve_power_to_z <- function(x, power, a, two.sided){ y = numeric(1) y = z_to_power(z = x, a = a, two.sided = two.sided) - power y } ### internal tools for the results computation .p_to_z <- function(p){ if(!all(sapply(p, function(x)x >= 0 & x <= 1)))stop("p-values must be >= 0 & <= 1") stats::qnorm(p/2, lower.tail = F) } .get_pop_weights <- function(weights, mu, a){ scaling_power <- c(z_to_power(mu, a = a), 1) pop_weights <- (weights + weights*(1-scaling_power)/(scaling_power)) / sum(weights + weights*(1-scaling_power)/(scaling_power)) return(pop_weights) } .get_EDR <- function(power, pop_weights){ sum(pop_weights * power) # 1/sum(weights / power) # old formula based on estimated weights } .get_ERR <- function(power2, power1, pop_weights){ sum(pop_weights * power2 * power1) / sum(pop_weights * power2) # sum(weights * power) # old formula based on estimated weights } .get_estimates <- function(mu, weights, prop_high, sig_level, a){ power2 <- c(z_to_power(z = mu, alpha = sig_level), 1) # power - two-sided power1 <- c(z_to_power(z = mu, alpha = sig_level, two.sided = FALSE), 1) # power - one-sided weights <- c(weights*(1-prop_high), prop_high) # estimated weights pop_weights <- .get_pop_weights(weights, mu, a) # transformed into the overall weights EDR <- .get_EDR(power2, pop_weights) ERR <- .get_ERR(power2, power1, pop_weights) Z0 <- weights[1] estimates <- c( "ERR" = ERR, "EDR" = EDR, "Z0" = Z0 ) return(estimates) } # additional functions for summary computation .get_Soric_FDR <- function(EDR, sig_level){ ((1/EDR) - 1)*(sig_level/(1-sig_level)) } .get_file_drawer_R <- function(EDR){ (1-EDR)/EDR } .get_expected_N <- function(EDR, N_sig){ .get_file_drawer_R(EDR)*N_sig + N_sig } .get_missing_N <- function(EDR, N_sig, N_obs){ .get_expected_N(EDR, N_sig) - N_obs } # rounding for plot .r2d <- function(x)format(round(x, 2), nsmall = 2) .rXd <- function(x,X)format(round(x, X), nsmall = X) .rXdn <- function(x,X)as.numeric(.rXd(x,X)) ### the exported function #' @title Reports whether x is a zcurve object #' #' @param x an object to test #' @export is.zcurve is.zcurve <- function(x){ inherits(x, "zcurve") } #' Prints estimates from z-curve object #' @param x Estimate of a z-curve object #' @param ... Additional arguments #' @method print.estimates zcurve #' @export print.summary.zcurve #' @rawNamespace S3method(print, estimates.zcurve) #' @seealso [zcurve()] print.estimates.zcurve <- function(x, ...){ est_names <- names(x[1:(length(x)-1)]) est_values <- .rXdn(unlist(x[1:(length(x)-1)]), x$round.coef) names(est_values) <- est_names print(est_values) } #' @title z-curve estimates #' #' @description The following functions extract estimates #' from the z-curve object. #' #' @param object the z-curve object #' @param round.coef rounding for the printed values #' #' @export ERR #' @export EDR #' @export ODR #' @export Soric #' @export file_drawer_ration #' @export expected_n #' @export missing_n #' @export significant_n #' @export included_n #' @name zcurve.estimates #' #' @details Technically, ODR, significant n, and included n #' are not z-curve estimates but they are grouped in this #' category for convenience. #' @seealso [zcurve()] NULL #' @rdname zcurve.estimates ERR <- function(object, round.coef = 3){ if(!is.zcurve(object))stop("The functions requires an 'zcurve' object.") sum <- summary(object)$coefficients val <- list() val[["Estimate"]] <- sum["ERR",1] if(!is.null(object[["boot"]])){ val[["l.CI"]] <- sum["ERR", "l.CI"] val[["u.CI"]] <- sum["ERR", "u.CI"] } val[["round.coef"]] <- round.coef class(val) <- "estimates.zcurve" return(val) } #' @rdname zcurve.estimates EDR <- function(object, round.coef = 3){ if(!is.zcurve(object))stop("The functions requires an 'zcurve' object.") sum <- summary(object)$coefficients val <- list() val[["Estimate"]] <- sum["EDR",1] if(!is.null(object[["boot"]])){ val[["l.CI"]] <- sum["EDR", "l.CI"] val[["u.CI"]] <- sum["EDR", "u.CI"] } val[["round.coef"]] <- round.coef class(val) <- "estimates.zcurve" return(val) } #' @rdname zcurve.estimates ODR <- function(object, round.coef = 3){ if(!is.zcurve(object))stop("The functions requires an 'zcurve' object.") sum <- summary(object)$model prt <- stats::prop.test(sum$N_sig, sum$N_all) val <- list() val[["Estimate"]] <- unname(prt$estimate) val[["l.CI"]] <- unname(prt$conf.int[1]) val[["u.CI"]] <- unname(prt$conf.int[2]) val[["round.coef"]] <- round.coef class(val) <- "estimates.zcurve" return(val) } #' @rdname zcurve.estimates Soric <- function(object, round.coef = 3){ if(!is.zcurve(object))stop("The functions requires an 'zcurve' object.") sum <- summary(object, all = TRUE)$coefficients val <- list() val[["Estimate"]] <- sum["Soric FDR",1] if(!is.null(object[["boot"]])){ val[["l.CI"]] <- sum["Soric FDR", "l.CI"] val[["u.CI"]] <- sum["Soric FDR", "u.CI"] } val[["round.coef"]] <- round.coef class(val) <- "estimates.zcurve" return(val) } #' @rdname zcurve.estimates file_drawer_ration <- function(object, round.coef = 3){ if(!is.zcurve(object))stop("The functions requires an 'zcurve' object.") sum <- summary(object, all = TRUE)$coefficients val <- list() val[["Estimate"]] <- sum["File Drawer R",1] if(!is.null(object[["boot"]])){ val[["l.CI"]] <- sum["File Drawer R", "l.CI"] val[["u.CI"]] <- sum["File Drawer R", "u.CI"] } val[["round.coef"]] <- round.coef class(val) <- "estimates.zcurve" return(val) } #' @rdname zcurve.estimates expected_n <- function(object, round.coef = 0){ if(!is.zcurve(object))stop("The functions requires an 'zcurve' object.") sum <- summary(object, all = TRUE)$coefficients val <- list() val[["Estimate"]] <- sum["Expected N",1] if(!is.null(object[["boot"]])){ val[["l.CI"]] <- sum["Expected N", "l.CI"] val[["u.CI"]] <- sum["Expected N", "u.CI"] } val[["round.coef"]] <- round.coef class(val) <- "estimates.zcurve" return(val) } #' @rdname zcurve.estimates missing_n <- function(object, round.coef = 0){ if(!is.zcurve(object))stop("The functions requires an 'zcurve' object.") sum <- summary(object, all = TRUE)$coefficients val <- list() val[["Estimate"]] <- sum["Missing N",1] if(!is.null(object[["boot"]])){ val[["l.CI"]] <- sum["Missing N", "l.CI"] val[["u.CI"]] <- sum["Missing N", "u.CI"] } val[["round.coef"]] <- round.coef class(val) <- "estimates.zcurve" return(val) } #' @rdname zcurve.estimates significant_n <- function(object){ if(!is.zcurve(object))stop("The functions requires an 'zcurve' object.") sum <- summary(object)$model val <- list() val[["N"]] <- sum$N_sig val[["round.coef"]] <- 0 class(val) <- "estimates.zcurve" return(val) } #' @rdname zcurve.estimates included_n <- function(object){ if(!is.zcurve(object))stop("The functions requires an 'zcurve' object.") sum <- summary(object)$model val <- list() val[["N"]] <- sum$N_used val[["round.coef"]] <- 0 class(val) <- "estimates.zcurve" return(val) }
/scratch/gouwar.j/cran-all/cranData/zcurve/R/tools.R
#' @title Options for the zcurve package #' #' @description A placeholder object and functions for the zcurve package. #' (adapted from the runjags R package). #' #' @param name the name of the option to get the current value of - for a list of #' available options, see details below. #' @param ... named option(s) to change - for a list of available options, see #' details below. #' #' @return The current value of all available zcurve options (after applying any #' changes specified) is returned invisibly as a named list. #' #' @export zcurve.options #' @export zcurve.get_option #' @name zcurve_options #' @aliases zcurve_options zcurve.options zcurve.get_option NULL #' @rdname zcurve_options zcurve.options <- function(...){ opts <- list(...) for(i in seq_along(opts)){ if(!names(opts)[i] %in% names(zcurve.private)) stop(paste("Unmatched or ambiguous option '", names(opts)[i], "'", sep="")) assign(names(opts)[i], opts[[i]] , envir = zcurve.private) } return(invisible(zcurve.private$options)) } #' @rdname zcurve_options zcurve.get_option <- function(name){ if(length(name)!=1) stop("Only 1 option can be retrieved at a time") if(!name %in% names(zcurve.private)) stop(paste("Unmatched or ambiguous option '", name, "'", sep="")) # Use eval as some defaults are put in using 'expression' to avoid evaluating at load time: return(eval(zcurve.private[[name]])) } zcurve.private <- new.env() # Use 'expression' for functions to avoid having to evaluate before the package is fully loaded: assign("defaultoptions", list(envir = zcurve.private)) assign("options", zcurve.private$defaultoptions, envir = zcurve.private) assign("max_cores", parallel::detectCores(logical = TRUE) - 1, envir = zcurve.private)
/scratch/gouwar.j/cran-all/cranData/zcurve/R/utilities.R
#' @keywords internal #' @aliases zcurve-package "_PACKAGE" # The following block is used by usethis to automatically manage # roxygen namespace tags. Modify with care! ## usethis namespace: start #' @useDynLib zcurve, .registration = TRUE #' @importFrom Rcpp sourceCpp #' @importFrom Rdpack reprompt #' @importFrom rlang .data ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/zcurve/R/zcurve-package.R
.zcurve_EM <- function(z, lb, ub, control){ # get starting value z-curves if(control$type == 1){ fit_start <- .zcurve_EM_start_fast_RCpp(x = z, K = control$K, mu = control$mu, sigma = control$sigma, mu_alpha = control$mu_alpha, mu_max = control$mu_max, theta_alpha = control$theta_alpha, a = control$a, b = control$b, sig_level = control$sig_level, fit_reps = control$fit_reps, max_iter = control$max_iter_start, criterion = control$criterion_start) }else if(control$type == 2){ fit_start <- .zcurve_EM_start_RCpp(x = z, type = control$type, K = control$K, mu = control$mu, sigma = control$sigma, mu_alpha = control$mu_alpha, mu_max = control$mu_max, theta_alpha = control$theta_alpha, a = control$a, b = control$b, sig_level = control$sig_level, fit_reps = control$fit_reps, max_iter = control$max_iter_start, criterion = control$criterion_start) }else if(control$type == 3){ fit_start <- .zcurve_EMc_start_fast_RCpp(x = z, lb = lb, ub = ub, K = control$K, mu = control$mu, sigma = control$sigma, mu_alpha = control$mu_alpha, mu_max = control$mu_max, theta_alpha = control$theta_alpha, a = control$a, b = control$b, sig_level = control$sig_level, fit_reps = control$fit_reps, max_iter = control$max_iter_start, criterion = control$criterion_start) } # fit final z-curve if(control$type == 1){ fit <- .zcurve_EM_fit_fast_RCpp(x = z, mu = fit_start$mu[which.max(fit_start$Q),], sigma = control$sigma, theta = fit_start$weights[which.max(fit_start$Q),], a = control$a, b = control$b, sig_level = control$sig_level, max_iter = control$max_iter, criterion = control$criterion) }else if(control$type == 2){ fit <- .zcurve_EM_fit_RCpp(x = z, type = control$type, mu = fit_start$mu[which.max(fit_start$Q),], sigma = control$sigma, theta = fit_start$weights[which.max(fit_start$Q),], a = control$a, b = control$b, sig_level = control$sig_level, max_iter = control$max_iter, criterion = control$criterion) }else if(control$type == 3){ fit <- .zcurve_EMc_fit_fast_RCpp(x = z, lb = lb, ub = ub, mu = fit_start$mu[which.max(fit_start$Q),], sigma = control$sigma, theta = fit_start$weights[which.max(fit_start$Q),], a = control$a, b = control$b, sig_level = control$sig_level, max_iter = control$max_iter, criterion = control$criterion) } return( list( "mu" = fit$mu, "weights" = fit$weights, "prop_high" = fit$prop_high, "Q" = fit$Q, "iter" = fit$iter, "iter_start" = fit_start$iter[which.max(fit_start$Q)] ) ) } .zcurve_EM_boot <- function(z, lb, ub, control, fit, bootstrap){ if(control$type == 1){ fit_boot <- .zcurve_EM_boot_fast_RCpp(x = z, mu = fit$mu, sigma = control$sigma, theta = fit$weights, a = control$a, b = control$b, sig_level = control$sig_level, bootstrap = bootstrap, max_iter = control$max_iter_boot, criterion = control$criterion_boot ) }else if(control$type == 2){ fit_boot <- .zcurve_EM_boot_RCpp(x = z, type = control$type, mu = fit$mu, sigma = control$sigma, theta = fit$weights, a = control$a, b = control$b, sig_level = control$sig_level, bootstrap = bootstrap, criterion = control$criterion_boot, max_iter = control$max_iter_boot) }else if(control$type == 3){ indx <- c( if(length(z) > 0) 1:length(z), if(length(lb) > 0) (-length(lb)):-1 ) fit_boot <- .zcurve_EMc_boot_fast_RCpp(x = z, lb = lb, ub = ub, indx = indx, mu = fit$mu, sigma = control$sigma, theta = fit$weights, a = control$a, b = control$b, sig_level = control$sig_level, bootstrap = bootstrap, criterion = control$criterion_boot, max_iter = control$max_iter_boot) } return( list( "mu" = fit_boot$mu, "weights" = fit_boot$weights, "Q" = fit_boot$Q, "prop_high" = fit_boot$prop_high, "iter" = fit_boot$iter ) ) } .zcurve_EM_boot.par <- function(z, lb, ub, control, fit, bootstrap){ cores <- zcurve.get_option("max_cores") core_load <- split(1:bootstrap, rep(1:cores, length.out = bootstrap)) core_load <- sapply(core_load, length) initial_seed <- sample(.Machine$integer.max, 1) cl <- parallel::makePSOCKcluster(cores) parallel::clusterEvalQ(cl, {library("zcurve")}) parallel::clusterExport(cl, c("z", "lb", "ub", "control", "fit", "bootstrap", "core_load", "initial_seed"), envir = environment()) fit_boot <- parallel::parLapplyLB(cl, 1:cores, function(i){ set.seed(initial_seed + i) return(.zcurve_EM_boot(z, lb, ub, control, fit, core_load[i])) }) parallel::stopCluster(cl) return( list( "mu" = do.call(rbind, lapply(fit_boot, function(x) x$mu)), "weights" = do.call(rbind, lapply(fit_boot, function(x) x$weights)), "Q" = do.call(c, lapply(fit_boot, function(x) x$Q)), "prop_high" = do.call(c, lapply(fit_boot, function(x) x$prop_high)), "iter" = do.call(c, lapply(fit_boot, function(x) x$iter)) ) ) } #' @name control_EM #' @title Control settings for the zcurve EM algorithm #' @description All these settings are passed to the Expectation Maximization #' fitting algorithm. All unspecified settings are set to the default value. #' Setting \code{model = "EM"} sets all settings to the default #' value irrespective of any other setting and fits z-curve as described in #' \insertCite{zcurve2;textual}{zcurve} #' #' @param model A type of model to be fitted, defaults to \code{"EM"} #' for a z-curve with 7 z-scores centered components. #' @param sig_level An alpha level of the test statistics, defaults to #' \code{.05} #' @param a A beginning of fitting interval, defaults to #' \code{qnorm(sig_level/2,lower.tail = F)} #' @param b An end of fitting interval, defaults to \code{5} #' @param mu Means of the components, defaults to #' \code{0:6} #' @param sigma A standard deviation of the components, defaults to #' \code{rep(1, length(mu))} #' @param theta_alpha A vector of alpha parameters of a Dirichlet distribution #' for generating random starting values for the weights, defaults to #' \code{rep(.5, length(mu))} #' @param theta_max Upper limits for weights, defaults to #' \code{rep(1,length(mu))} #' @param criterion A criterion to terminate the EM algorithm, #' defaults to \code{1e-6} #' @param criterion_start A criterion to terminate the starting phase #' of the EM algorithm, defaults to \code{1e-3} #' @param criterion_boot A criterion to terminate the bootstrapping phase #' of the EM algorithm, defaults to \code{1e-5} #' @param max_iter A maximum number of iterations of the EM algorithm #' (not including the starting iterations) defaults to \code{10000} #' @param max_iter_start A maximum number of iterations for the #' starting phase of EM algorithm, defaults to \code{100} #' @param max_iter_boot A maximum number of iterations for the #' booting phase of EM algorithm, defaults to \code{100} #' @param fit_reps A number of starting fits to get the initial #' position for the EM algorithm, defaults to \code{100} #' #' @references #' \insertAllCited{} #' #' @examples # to increase the number of starting fits #' # and change the means of the mixture components #' #' ctrl <- list( #' fit_reps = 50, #' mu = c(0, 1.5, 3, 4.5, 6) #' ) #' \dontrun{zcurve(OSC.z, method = "EM", control = ctrl)} #' #' @seealso [zcurve()], [control_density] NULL .zcurve_EM.control <- function(control){ #if(is.null(control)){ # control$sig_level <- .05 # control$a <- stats::qnorm(control$sig_level/2,lower.tail = F) # control$b <- 5 # control$type <- 1 # legacy from z-curve with estimated means # control$mu <- c(0, 1.11, 1.71, 2.21, 2.80, 3.9, 5) # control$sigma <- rep(1, length(control$mu)) # control$K <- length(control$mu) # legacy from z-curve with estimated means # control$theta_alpha <- rep(.5, length(control$mu)) # control$mu_alpha <- 2:(control$K+1) # legacy from z-curve with estimated means # control$mu_max <- control$b + 2 # legacy from z-curve with estimated means # control$criterion <- 1e-5 # control$max_iter <- 1000 # control$criterion_start <- 1e-3 # control$max_iter_start <- 100 # control$fit_reps <- 20 # control$model <- "EM7p" # return(control) #} if(is.null(control)){ control$sig_level <- .05 control$a <- stats::qnorm(control$sig_level/2,lower.tail = F) control$b <- 6 control$type <- 1 control$mu <- 0:6 control$sigma <- rep(1, length(control$mu)) control$K <- length(control$mu) control$theta_alpha <- rep(.5, length(control$mu)) control$mu_alpha <- 2:(control$K+1) control$mu_max <- control$b + 2 control$criterion <- 1e-6 control$max_iter <- 10000 control$criterion_boot <- 1e-5 control$max_iter_boot <- 1000 control$criterion_start <- 1e-3 control$max_iter_start <- 100 control$fit_reps <- 100 control$model <- "EM" return(control) } if(!is.null(control[["model"]])){ if(control$model == "EM"){ control$sig_level <- .05 control$a <- stats::qnorm(control$sig_level/2,lower.tail = F) control$b <- 6 control$type <- 1 control$mu <- 0:6 control$sigma <- rep(1, length(control$mu)) control$K <- length(control$mu) control$theta_alpha <- rep(.5, length(control$mu)) control$mu_alpha <- 2:(control$K+1) control$mu_max <- control$b + 2 control$criterion <- 1e-6 control$max_iter <- 10000 control$criterion_boot <- 1e-5 control$max_iter_boot <- 1000 control$criterion_start <- 1e-3 control$max_iter_start <- 100 control$fit_reps <- 100 control$model <- "EM" return(control) } } if(is.null(control[["sig_level"]])){ control$sig_level <- .05 } if(is.null(control[["a"]])){ control$a <- stats::qnorm(control$sig_level/2,lower.tail = F) } if(is.null(control[["b"]])){ control$b <- 6 } if(is.null(control[["type"]])){ control$type <- 1 } if(is.null(control[["mu"]])){ control$mu <- 0:6 } if(is.null(control[["sigma"]])){ control$sigma <- rep(1, length(control$mu)) } if(is.null(control[["K"]])){ control$K <- ifelse(control$type == 1, length(control$mu), 4) } if(is.null(control[["theta_alpha"]])){ control$theta_alpha <- rep(.5, length(control$mu)) } if(is.null(control[["mu_alpha"]])){ control$mu_alpha <- 2:(control$K+1) } if(is.null(control[["mu_max"]])){ control$mu_max <- control$b + 2 } if(is.null(control[["criterion"]])){ control$criterion <- 1e-6 } if(is.null(control[["max_iter"]])){ control$max_iter <- 10000 } if(is.null(control[["criterion_boot"]])){ control$criterion_boot <- 1e-5 } if(is.null(control[["max_iter_boot"]])){ control$max_iter_boot <- 1000 } if(is.null(control[["criterion_start"]])){ control$criterion_start <- 1e-3 } if(is.null(control[["max_iter_start"]])){ control$max_iter_start <- 100 } if(is.null(control[["fit_reps"]])){ control$fit_reps <- 100 } if(is.null(control[["model"]])){ control$model <- NULL } return(control) }
/scratch/gouwar.j/cran-all/cranData/zcurve/R/zcurve_EM.R
#' @title Fit a z-curve to clustered data #' #' @description \code{zcurve_clustered} is used to fit z-curve models to #' clustered data. The function requires a data object created with the #' [zcurve_data()] function as the input (where id denotes clusters). #' Two different methods that account for clustering ar implemented via #' the EM model: \code{"w"} for down weighting the likelihood of the test #' statistics proportionately to the number of repetitions in the clusters, #' and \code{"b"} for a nested bootstrap where only a single study from each #' bootstrap is selected for model fitting. #' @param data an object created with [zcurve_data()] function. #' @param method the method to be used for fitting. Possible options are #' down weighting \code{"w"} and nested bootstrap \code{"b"}. #' Defaults to \code{"w"}. #' @param bootstrap the number of bootstraps for estimating CI. To skip #' bootstrap specify \code{FALSE}. #' @param parallel whether the bootstrap should be performed in parallel. #' Defaults to \code{FALSE}. The implementation is not completely stable #' and might cause a connection error. #' @param control additional options for the fitting algorithm more details in #' \link[=control_EM]{control EM}. #' #' #' @references #' \insertAllCited{} #' #' @return The fitted z-curve object #' #' @seealso [zcurve()], [summary.zcurve()], [plot.zcurve()], [control_EM], [control_density] #' @export zcurve_clustered <- function(data, method = "b", bootstrap = 1000, parallel = FALSE, control = NULL){ warning("Please note that the clustering adjustment is an experimental feature.", immediate. = TRUE) if(!method %in% c("w", "b")) stop("Wrong method, select a supported option.") if(method == "b" && is.logical(bootstrap) && !bootstrap) stop("The nested boostrap method requires bootstrap.") # set bootstrap if(!is.numeric(bootstrap)){ bootstrap <- FALSE }else if(bootstrap <= 0){ bootstrap <- FALSE } if(!inherits(data, "zcurve_data")){ stop("The 'data' input must be created by the `zcurve_data()` function. See `?zcurve_data()` for more information.") } # create results object object <- NULL object$call <- match.call() object$method <- switch(method, "w" = "EM (weighted)", "b" = "EM (bootstrapped)") object$input_type <- "zcurve-data" # create control control <- .zcurve_EM.control(control) ### prepare data if(nrow(data$precise) != 0){ z <- .p_to_z(data$precise$p) z_id <- data$precise$id }else{ z <- numeric() z_id <- numeric() } if(nrow(data$censored) != 0){ lb <- .p_to_z(data$censored$p.ub) ub <- .p_to_z(data$censored$p.lb) b_id <- data$censored$id # remove non-significant censored p-values if(any(lb < control$a)){ warning(paste0(sum(lb < control$a), " censored p-values removed due to the upper bound being larger that the fitting range."), immediate. = TRUE, call. = FALSE) b_id <- b_id[lb >= control$a] ub <- ub[lb >= control$a] lb <- lb[lb >= control$a] } # move too significant censored p-values among precise p-values if(length(lb) > 0 && any(lb >= control$b)){ object$data <- c(object$data, lb[lb >= control$b]) b_id <- b_id[lb < control$b] ub <- ub[lb < control$b] lb <- lb[lb < control$b] } if(length(lb) > 0){ # restrict the upper censoring to the fitting range ub <- ifelse(ub > control$b, control$b, ub) # update control control$type <- 3 }else{ lb <- numeric() ub <- numeric() b_id <- numeric() } }else{ lb <- numeric() ub <- numeric() b_id <- numeric() } object$data <- z object$data_id <- z_id object$data_censoring <- data.frame(lb = lb, ub = ub, id = b_id) object$control <- control # only run the algorithm with some significant results if(sum(z > control$a & z < control$b) + length(lb) < 10) stop("There must be at least 10 z-scores in the fitting range but a much larger number is recommended.") # use appropriate algorithm if(method == "b"){ fit_b <- .zcurve_EM_b(z = z, z_id = z_id, lb = lb, ub = ub, b_id = b_id, control = control, bootstrap = bootstrap, parallel = parallel) fit <- fit_b$fit }else if(method == "w"){ fit <- .zcurve_EM_w(z = z, z_id = z_id, lb = lb, ub = ub, b_id = b_id, control = control) } object$fit <- fit # check convergence object$converged <- ifelse(fit$iter < control$max_iter, TRUE, FALSE) # do bootstrap if(bootstrap != FALSE){ if(method == "b"){ fit_boot <- fit_b$fit_boot }else if(method == "w"){ if(parallel){ fit_boot <- .zcurve_EM_w_boot.par(z = z, z_id = z_id, lb = lb, ub = ub, b_id = b_id, control = control, fit = fit, bootstrap = bootstrap) }else{ fit_boot <- .zcurve_EM_w_boot(z = z, z_id = z_id, lb = lb, ub = ub, b_id = b_id, control = control, fit = fit, bootstrap = bootstrap) } } object$boot <- fit_boot } # estimates object$coefficients <- .get_estimates(mu = fit$mu, weights = fit$weights, prop_high = fit$prop_high, sig_level = control$sig_level, a = control$a) # boot estimates if(bootstrap != FALSE){ object$coefficients_boot <- data.frame(t(sapply(1:bootstrap, function(i){ .get_estimates(mu = fit_boot$mu[i,], weights = fit_boot$weights[i,], prop_high = fit_boot$prop_high[i], sig_level = control$sig_level, a = control$a) }))) } class(object) <- c("zcurve", "zcurve.clustered") return(object) } .zcurve_EM_b <- function(z, z_id, lb, ub, b_id, control, bootstrap, parallel){ # get starting value z-curves fit_start <- .zcurve_EMc_start_fast_RCpp(x = z, lb = lb, ub = ub, K = control$K, mu = control$mu, sigma = control$sigma, mu_alpha = control$mu_alpha, mu_max = control$mu_max, theta_alpha = control$theta_alpha, a = control$a, b = control$b, sig_level = control$sig_level, fit_reps = control$fit_reps, max_iter = control$max_iter_start, criterion = control$criterion_start) # fit final z-curve data_index <- rbind( data.frame( z = z, lb = rep(NA, length(z)), ub = rep(NA, length(z)), id = z_id, type = rep(1, length(z)) ), data.frame( z = rep(NA, length(lb)), lb = lb, ub = ub, id = b_id, type = rep(2, length(lb)) ) ) if(parallel){ cores <- zcurve.get_option("max_cores") core_load <- split(1:bootstrap, rep(1:cores, length.out = bootstrap)) core_load <- sapply(core_load, length) initial_seed <- sample(.Machine$integer.max, 1) cl <- parallel::makePSOCKcluster(cores) parallel::clusterEvalQ(cl, {library("zcurve")}) parallel::clusterExport(cl, c("fit_start", "control", "data_index", "core_load", "initial_seed"), envir = environment()) fit <- parallel::parLapplyLB(cl, 1:cores, function(i){ set.seed(initial_seed + i) fit <- list() for(i in 1:core_load[i]){ boot_data <- .boot_id(data_index) fit[[i]] <- .zcurve_EMc_fit_fast_RCpp(x = boot_data$z[boot_data$type == 1], lb = boot_data$lb[boot_data$type == 2], ub = boot_data$ub[boot_data$type == 2], mu = fit_start$mu[which.max(fit_start$Q),], sigma = control$sigma, theta = fit_start$weights[which.max(fit_start$Q),], a = control$a, b = control$b, sig_level = control$sig_level, max_iter = control$max_iter, criterion = control$criterion) } return(fit) }) parallel::stopCluster(cl) fit <- do.call(c, fit) }else{ fit <- list() for(i in 1:bootstrap){ boot_data <- .boot_id(data_index) fit[[i]] <- .zcurve_EMc_fit_fast_RCpp(x = boot_data$z[boot_data$type == 1], lb = boot_data$lb[boot_data$type == 2], ub = boot_data$ub[boot_data$type == 2], mu = fit_start$mu[which.max(fit_start$Q),], sigma = control$sigma, theta = fit_start$weights[which.max(fit_start$Q),], a = control$a, b = control$b, sig_level = control$sig_level, max_iter = control$max_iter, criterion = control$criterion) } } fit_boot = list( "mu" = do.call(rbind, lapply(fit, function(f) f[["mu"]])), "weights" = do.call(rbind, lapply(fit, function(f) f[["weights"]])), "prop_high" = do.call(c, lapply(fit, function(f) f[["prop_high"]])), "Q" = do.call(c, lapply(fit, function(f) f[["Q"]])), "iter" = do.call(c, lapply(fit, function(f) f[["iter"]])) ) return( list( fit = list( "mu" = apply(fit_boot[["mu"]], 2, mean), "weights" = apply(fit_boot[["weights"]], 2, mean), "prop_high" = mean(fit_boot[["prop_high"]]), "Q" = mean(fit_boot[["Q"]]), "iter" = mean(fit_boot[["iter"]]), "iter_start" = fit_start$iter[which.max(fit_start$Q)] ), fit_boot = fit_boot ) ) } .zcurve_EM_w <- function(z, z_id, lb, ub, b_id, control){ # get starting value z-curves fit_start <- .zcurve_EMc_start_fast_RCpp(x = z, lb = lb, ub = ub, K = control$K, mu = control$mu, sigma = control$sigma, mu_alpha = control$mu_alpha, mu_max = control$mu_max, theta_alpha = control$theta_alpha, a = control$a, b = control$b, sig_level = control$sig_level, fit_reps = control$fit_reps, max_iter = control$max_iter_start, criterion = control$criterion_start) # compute weights id_freq <- table(c(z_id, b_id)) id_weights <- data.frame( id = names(id_freq), w = 1/as.vector(id_freq) ) # fit final z-curve fit <- .zcurve_EMc_fit_fast_w_RCpp(x = z, x_w = id_weights$w[match(z_id, id_weights$id)], lb = lb, ub = ub, b_w = id_weights$w[match(b_id, id_weights$id)], mu = fit_start$mu[which.max(fit_start$Q),], sigma = control$sigma, theta = fit_start$weights[which.max(fit_start$Q),], a = control$a, b = control$b, sig_level = control$sig_level, max_iter = control$max_iter, criterion = control$criterion) return( list( "mu" = fit$mu, "weights" = fit$weights, "prop_high" = fit$prop_high, "Q" = fit$Q, "iter" = fit$iter, "iter_start" = fit_start$iter[which.max(fit_start$Q)] ) ) } .zcurve_EM_w_boot <- function(z, z_id, lb, ub, b_id, control, fit, bootstrap){ # compute weights id_freq <- table(c(z_id, b_id)) id_weights <- data.frame( id = names(id_freq), w = 1/as.vector(id_freq) ) x_w <- id_weights$w[match(z_id, id_weights$id)] b_w <- id_weights$w[match(b_id, id_weights$id)] indx <- c( if(length(z) > 0) 1:length(z), if(length(lb) > 0) (-length(lb)):-1 ) fit_boot <- .zcurve_EMc_boot_fast_w_RCpp(x = z, x_w = x_w, lb = lb, ub = ub, b_w = b_w, indx = indx, mu = fit$mu, sigma = control$sigma, theta = fit$weights, a = control$a, b = control$b, sig_level = control$sig_level, bootstrap = bootstrap, criterion = control$criterion_boot, max_iter = control$max_iter_boot) return( list( "mu" = fit_boot$mu, "weights" = fit_boot$weights, "Q" = fit_boot$Q, "prop_high" = fit_boot$prop_high, "iter" = fit_boot$iter ) ) } .zcurve_EM_w_boot.par <- function(z, z_id, lb, ub, b_id, control, fit, bootstrap){ cores <- zcurve.get_option("max_cores") core_load <- split(1:bootstrap, rep(1:cores, length.out = bootstrap)) core_load <- sapply(core_load, length) initial_seed <- sample(.Machine$integer.max, 1) cl <- parallel::makePSOCKcluster(cores) parallel::clusterEvalQ(cl, {library("zcurve")}) parallel::clusterExport(cl, c("z", "z_id", "lb", "ub", "b_id", "control", "fit", "bootstrap", "core_load", "initial_seed"), envir = environment()) fit_boot <- parallel::parLapplyLB(cl, 1:cores, function(i){ set.seed(initial_seed + i) return(.zcurve_EM_w_boot(z, z_id, lb, ub, b_id, control, fit, core_load[i])) }) parallel::stopCluster(cl) return( list( "mu" = do.call(rbind, lapply(fit_boot, function(x) x$mu)), "weights" = do.call(rbind, lapply(fit_boot, function(x) x$weights)), "Q" = do.call(c, lapply(fit_boot, function(x) x$Q)), "prop_high" = do.call(c, lapply(fit_boot, function(x) x$prop_high)), "iter" = do.call(c, lapply(fit_boot, function(x) x$iter)) ) ) } .boot_id <- function(data){ unique_id <- unique(data$id) if(length(unique_id) == nrow(data)){ return(data) } boot_out <- list() for(id in unique_id){ temp_data <- data[data$id == id,] if(nrow(temp_data) == 1){ boot_out[[id]] <- temp_data }else{ boot_out[[id]] <- temp_data[sample(nrow(temp_data), 1),] } } boot_out <- do.call(rbind, boot_out) return(boot_out) }
/scratch/gouwar.j/cran-all/cranData/zcurve/R/zcurve_clustered.R
# wrapper .zcurve_density <- function(z, control) { if(control$version == 2){ temp_fit <- .zcurve_density_ver2(z, control) }else if(control$version == 1){ temp_fit <- .zcurve_density_ver1(z, control) } return(temp_fit) } .zcurve_density_boot <- function(z, control, bootstrap){ temp_ncol <- ifelse(control$version == 2, length(control$mu), control$K) results <- list( "mu" = matrix(NA, ncol = temp_ncol, nrow = bootstrap), "weights" = matrix(NA, ncol = temp_ncol, nrow = bootstrap), "N_fit" = rep(NA, times = bootstrap), "objective" = rep(NA, times = bootstrap), "iter" = rep(NA, times = bootstrap), "FDR_max" = rep(NA, times = bootstrap) # probably not useful to save individual densities from the bootstrap #"density" = list( # "x" = NULL, # "y" = NULL #) ) for(i in 1:bootstrap){ z_boot <- sample(z, replace = TRUE) if(control$version == 2){ temp_fit <- .zcurve_density_ver2(z_boot, control) results$FDR_max[i] <- temp_fit$FDR_max }else if(control$version == 1){ temp_fit <- .zcurve_density_ver1(z_boot, control) } results$mu[i,] <- temp_fit$mu results$weights[i,] <- temp_fit$weights results$prop_high[i] <- temp_fit$prop_high results$objective[i] <- temp_fit$objective results$iter[i] <- temp_fit$iter } return(results) } .zcurve_density_boot.par <- function(z, control, bootstrap){ cores <- zcurve.get_option("max_cores") core_load <- split(1:bootstrap, rep(1:cores, length.out = bootstrap)) core_load <- sapply(core_load, length) initial_seed <- sample(.Machine$integer.max, 1) cl <- parallel::makePSOCKcluster(cores) parallel::clusterEvalQ(cl, {library("zcurve")}) parallel::clusterExport(cl, c("z", "control", "bootstrap", "core_load", "initial_seed"), envir = environment()) fit_boot <- parallel::parLapplyLB(cl, 1:cores, function(i){ set.seed(initial_seed + i) return(.zcurve_density_boot(z, control, core_load[i])) }) parallel::stopCluster(cl) results <- list( "mu" = do.call(rbind, lapply(fit_boot, function(x) x$mu)), "weights" = do.call(rbind, lapply(fit_boot, function(x) x$weights)), "prop_high" = do.call(c, lapply(fit_boot, function(x) x$prop_high)), "objective" = do.call(c, lapply(fit_boot, function(x) x$objective)), "iter" = do.call(c, lapply(fit_boot, function(x) x$iter)) ) if(control$version == 2){ results$FDR_max = do.call(c, lapply(fit_boot, function(x) x$FDR_max)) } return(results) } # original z-curve1.0 .zcurve_density_ver1 <- function(z, control) { prop_high <- sum(z > control$b) / sum(z > stats::qnorm(control$sig_level/2, lower.tail = FALSE)) ncomp <- control$K bw <- control$bw cv <- control$a Z <- z[z > control$a & z < control$b] augZ = c(subset(Z,Z<Inf),2*cv-subset(Z,Z<Inf)) #5. Augmented Z for density estimation to avoid asymtote to zero at cv. DensityEstimate = stats::density(augZ,n=100,bw=bw,from=1.96,to=6) #6. Get Densities using Kernel.Density.Function dens.Z = DensityEstimate$x; #7. x-axis values of Density Slices (z-scores) dens.obs = DensityEstimate$y; #8. Observed Densities for the Density Slices dens.obs = dens.obs/(sum(dens.obs)) #9. Sum of Densities equals 1 slices = length(dens.Z) #10. Number of "Slices" of the Density Distribution slices.width = dens.Z[2] - dens.Z[1] #11. Width of a "Slice" of the Density Distribution ### This is the actual estimation function that fits estimated density to observed density ### Prepare start values and limits for nlminb package that fits z-curve to the data startval = c(1,2,3,1/3,1/3,1/3) #25 Starting Values for Means (1,2,3) Starting Values for weights (1/3) lowlim = rep(0,6) #26 lower limit for Means and Weights, all 0 highlim = c(6,6,6,1,1,1) #27 upper limit for Means = 6, upper limit for weights = 1 ### Execute the fit.zcurve function to get estimates auto = suppressWarnings(stats::nlminb(startval,.zcurve_density_ver1_fit,lower=lowlim,upper=highlim, ncomp = ncomp, dens.Z = dens.Z, dens.obs = dens.obs, control=list(eval.max=control$max_eval, iter.max = control$max_iter, rel.tol = control$criterion) ) ) #28 nlminb searches for Means and Weights that minimize the fit criterion ### Get the Estimated Means and Weights Z.Means = auto$par[1:ncomp] #29 get the final Means Z.w = auto$par[(ncomp+1):(2*ncomp)] #30 Get the final Weights Z.w = Z.w/sum(Z.w) #31 Weights are positive and sum to one. mean = Z.Means weights = Z.w # scaling for plotting bar.width = DensityEstimate$x[2] - DensityEstimate$x[1] Z.Density.Y = DensityEstimate$y/(sum(DensityEstimate$y*bar.width)) return( list( "mu" = Z.Means, "weights" = Z.w, "prop_high" = prop_high, "objective" = auto$objective, "converged" = auto$convergence, "message" = auto$message, "iter" = auto$iterations, "density" = list( "x" = DensityEstimate$x, "y" = Z.Density.Y ) ) ) } .zcurve_density_ver1_fit <- function(parameter, ncomp, dens.Z, dens.obs) { #12 repeated until best fit is reached Z.Means = parameter[1:ncomp] #13 These are the estimated means while approximating observed density Z.weights = abs(parameter[(ncomp+1):(2*ncomp)]) #14 These are the estimated weights Z.weights = Z.weights/sum(Z.weights) #15 Weights are scaled to add to 1 dens = c() #16 variable that stores the estimated densities for each component for(j in 1:ncomp) { #17 Do for each component dcomp = stats::dnorm(dens.Z-Z.Means[j]) #18 get the densities for the z-scores of the observed density distribution dcomp = dcomp/sum(dcomp) #19 scale them to add up to 1 dcomp = dcomp*Z.weights[j] #20 weight them according to the estimated weight dens = rbind(dens,dcomp) ##21 store results in a matrix } # End of For loop dens.est = colSums(dens) #22 get the estimated density as the sum of the weighted densities of the 3 components MeanAbsError = mean(abs(dens.est - dens.obs)) #23 Mean absolute difference is the fit criterion (smaller values = better fit) return(MeanAbsError) #24 return fit value to the fitting function } #' @name control_density_v1 #' @title Control settings for the original z-curve density algorithm #' @description All settings are passed to the density fitting #' algorithm. All unspecified settings are set to the default value. #' Setting \code{model = "KD1"} sets all settings to the default #' value irrespective of any other setting and fits z-curve as described #' in \insertCite{zcurve1;textual}{zcurve}. #' #' @param version Set to \code{1} to fit the original version of z-curve. #' Defaults to \code{2} = the updated version of z-curve. For its settings #' page go to [control_density]. #' @param model A type of model to be fitted, defaults to \code{"KD1"} #' (the only possibility) #' @param sig_level An alpha level of the test statistics, defaults to #' \code{.05} #' @param a A beginning of fitting interval, defaults to #' \code{qnorm(sig_level/2,lower.tail = F)} #' @param b An end of fitting interval, defaults to \code{6} #' @param K Number of mixture components, defaults to \code{3} #' @param max_iter A maximum number of iterations for the \link[stats]{nlminb} #' optimization for fitting mixture model, defaults to \code{150} #' @param max_eval A maximum number of evaluation for the \link[stats]{nlminb} #' optimization for fitting mixture model, defaults to \code{300} #' @param criterion A criterion to terminate \link[stats]{nlminb} optimization, #' defaults to \code{1e-10} #' @param bw A bandwidth of the kernel density estimation, defaults to \code{"nrd0"} #' #' @references #' \insertAllCited{} #' #' @examples # to increase the number of iterations #' ctrl <- list( #' version = 1, #' max_iter = 300 #' ) #' \dontrun{zcurve(OSC.z, method = "density", control = ctrl)} #' #' @seealso [zcurve()], [control_density], [control_EM] NULL # z-curve 2.0 (KD2) .zcurve_density_ver2 <- function(z, control) { prop_high <- sum(z > control$b) / sum(z > stats::qnorm(control$sig_level/2, lower.tail = FALSE)) z.val.input <- z Z.INT = z.val.input[z.val.input >= control$a & z.val.input <= control$b + 1] # depriciated, probablyt part of max.FDR estimation trial z.extreme = sum(z.val.input > control$b)/sum(z.val.input > control$a) densy = .zcurve_density_get_densities(Z.INT, z.val.input, control) Z.Density.X <- densy[,1] Z.Density.Y <- densy[,2] control$SLOPE = stats::coef(stats::lm(Z.Density.Y[Z.Density.X < control$a+1] ~ Z.Density.X[Z.Density.X < control$a+1]))[1] n.bars = length(Z.Density.X) bar.width = Z.Density.X[2] - Z.Density.X[1] ### get the densities for each interval and each non-centrality parameter Dens = c() for(i in 1:n.bars) { for (j in 1:length(control$mu)) { Dens = c(Dens,.zdist_pdf(Z.Density.X[i],control$mu[j],control$sigma,control$a,control$b)) } } Dens = matrix(Dens,length(control$mu),byrow=FALSE) Dens = Dens/(rowSums(Dens) * bar.width) para.val = .zcurve_density_get_weights_free(control, Dens = Dens, n.bars = n.bars, Z.Density.Y = Z.Density.Y, Z.Density.X = Z.Density.X) # control$SLOPE = para.val[1] probably redundant WZ0 = para.val$weights[1] fit.free = para.val$objective FDR.RES = c(WZ0,NA) FDR.RES = FDR.RES*(1-z.extreme) #para.est = Compute.Power(c(para.val$mu, para.val$weights),z.extreme) #para.est W = para.val$weights #W # precision = .2 if (control$compute_FDR) FDR.RES[2] = .zcurve_density_get_weights_fixed(z.val.input = z.val.input, W = W, fit.free = fit.free, precision = .2, n.bars = n.bars, Z.Density.X = Z.Density.X, Z.Density.Y = Z.Density.Y, Dens = Dens, control = control) #FDR.RES[2] # precision = control$precision_FDR W[1] = max(FDR.RES) if (control$compute_FDR) FDR.RES[2] = .zcurve_density_get_weights_fixed(z.val.input = z.val.input, W = W, fit.free = fit.free, precision = control$precision_FDR, n.bars = n.bars, Z.Density.X = Z.Density.X, Z.Density.Y = Z.Density.Y, Dens = Dens, control = control) #FDR.RES #res = c(para.est,FDR.RES) #names(res) = c("ERR","EDR","Weight0","Max.FDR") #res return( list( "mu" = para.val$mu, "weights" = para.val$weights, "prop_high" = prop_high, "objective" = para.val$objective, "converged" = para.val$converged, "message" = para.val$message, "iter" = para.val$iter, "FDR_max" = FDR.RES[2], "density" = list( "x" = Z.Density.X, "y" = Z.Density.Y ) ) ) } #' @name control_density #' @title Control settings for the z-curve 2.0 density algorithm #' @description All settings are passed to the density fitting #' algorithm. All unspecified settings are set to the default value. #' Setting \code{model = "KD2"} sets all settings to the default #' value irrespective of any other setting and fits z-curve as #' describe in \insertCite{zcurve2;textual}{zcurve}. In order to fit the #' z-curve 1.0 density algorithm, set \code{model = "KD1"} and go to #' [control_density_v1] #' #' @param version Which version of z-curve should be fitted. Defaults to #' \code{2} = z-curve 2.0. Set to \code{1} in order to fit the original #' version of z-curve. For its settings page go to [control_density_v1]. #' @param model A type of model to be fitted, defaults to \code{"KD2"} #' (another possibility is \code{"KD1"} for the original z-curve 1.0, see #' [control_density_v1] for its settings) #' @param sig_level An alpha level of the test statistics, defaults to #' \code{.05} #' @param a A beginning of fitting interval, defaults to #' \code{qnorm(sig_level/2,lower.tail = F)} #' @param b An end of fitting interval, defaults to \code{6} #' @param mu Means of the components, defaults to \code{seq(0,6,1)} #' @param sigma A standard deviation of the components, "Don't touch this" #' \- Ulrich Schimmack, defaults to \code{1} #' @param theta_min Lower limits for weights, defaults to #' \code{rep(0,length(mu))} #' @param theta_max Upper limits for weights, defaults to #' \code{rep(1,length(mu))} #' @param max_iter A maximum number of iterations for the \link[stats]{nlminb} #' optimization for fitting mixture model, defaults to \code{150} #' @param max_eval A maximum number of evaluation for the \link[stats]{nlminb} #' optimization for fitting mixture model, defaults to \code{1000} #' @param criterion A criterion to terminate \link[stats]{nlminb} optimization, #' defaults to \code{1e-03} #' @param bw A bandwidth of the kernel density estimation, defaults to \code{.10} #' @param aug Augment truncated kernel density, defaults to \code{TRUE} #' @param aug.bw A bandwidth of the augmentation, defaults to \code{.20} #' @param n.bars A resolution of density function, defaults to \code{512} #' @param density_dbc Use \link[evmix]{bckden} to estimate a truncated kernel density, #' defaults to \code{FALSE}, in which case \link[stats]{density} is used #' @param compute_FDR Whether to compute FDR, leads to noticeable increase in #' computation, defaults to \code{FALSE} #' @param criterion_FDR A criterion for estimating the maximum FDR, defaults #' to \code{.02} #' @param criterion_FDR_dbc A criterion for estimating the maximum FDR using #' the \link[evmix]{bckden} function, defaults to \code{.01} #' @param precision_FDR A maximum FDR precision, defaults to \code{.05} #' #' @references #' \insertAllCited{} #' #' @examples # to decrease the criterion and increase the number of iterations #' ctrl <- list( #' max_iter = 300, #' criterion = 1e-4 #' ) #' \dontrun{zcurve(OSC.z, method = "density", control = ctrl)} #' #' @seealso [zcurve()], [control_density_v1], [control_EM] NULL .zcurve_density.control <- function(control){ if(is.null(control)){ control$version <- 2 control$sig_level <- .05 control$sig_level_Z <- stats::qnorm(control$sig_level/2,lower.tail = F) control$a <- stats::qnorm(control$sig_level/2,lower.tail = F) # Beginning of fitting interval control$b <- 6 # End of fitting interval control$mu <- seq(0,6,1) # means of the components control$sigma <- 1 # Don't touch this!!! # Change Standard Deviation of Normals control$theta_min <- rep(0,length(control$mu)) # set lower limit for weights, default = 0 control$theta_max <- rep(1,length(control$mu)) # set upper limits for weights, default = 1 control$max_iter <- 150 # settings for the nlminb agortihm for fitting mixture model control$max_eval <- 1000 # settings for the nlminb agortihm for fitting mixture model control$criterion <- 1e-03 # Criterion to terminate nlminb control$bw <- .10 # Bandwidth of Kernal Density control$aug <- TRUE # Augment truncated Kernal Density control$aug.bw <- .20 # Augment Bandwidth control$n.bars <- 512 # resolution of density function (doesn't seem to matter much) control$density_dbc <- FALSE # USE dbckden function to truncate Kernal Density control$criterion_FDR <- .02 # criterion for maximum FDR control$criterion_FDR_dbc <- .01 # criterion for maximum FDR using density_dbc function control$precision_FDR <- .05 # Maximum FDR precision (low precision slows things down) control$compute_FDR <- FALSE # Compute Maximum FDR, Slows Things Down Considerably control$model <- "KD2" ### probably to be removed control$PLOT = F control$FDR.PLOT = F #"FDR.PLOT" = FALSE # Make a screen plot of the FDR #"PLOT" = FALSE # Show Fitting of Density Distribution ### unused in the code #"SLOPE.crit" = 1 # Slope Criterion to set EDR estimate to NA #"USE.SLOPE" = FALSE # Use Slope Criterion to Exclude EDR estimates ### unused and should be elsewhere #"BOOT.FDR" = TRUE # Do a Bootstrap for the Max. FDR, really slow #"BOOT" = FALSE # Bootstrap or No.Bootstrap Computation of Power #"boot.iter" = 0 # How many bootstraps for CI; 0 = no CI return(control) } if(!is.null(control[["model"]])){ if(control$model == "KD2"){ control$version <- 2 control$sig_level <- .05 control$sig_level_Z <- stats::qnorm(control$sig_level/2,lower.tail = F) control$a <- stats::qnorm(control$sig_level/2,lower.tail = F) # Beginning of fitting interval control$b <- 6 # End of fitting interval control$mu <- seq(0,6,1) # means of the components control$sigma <- 1 # Don't touch this!!! # Change Standard Deviation of Normals control$theta_min <- rep(0,length(control$mu)) # set lower limit for weights, default = 0 control$theta_max <- rep(1,length(control$mu)) # set upper limits for weights, default = 1 control$max_iter <- 150 # settings for the nlminb agortihm for fitting mixture model control$max_eval <- 1000 # settings for the nlminb agortihm for fitting mixture model control$criterion <- 1e-03 # Criterion to terminate nlminb control$bw <- .10 # Bandwidth of Kernal Density control$aug <- TRUE # Augment truncated Kernal Density control$aug.bw <- .20 # Augment Bandwidth control$n.bars <- 512 # resolution of density function (doesn't seem to matter much) control$density_dbc <- FALSE # USE dbckden function to truncate Kernal Density control$criterion_FDR <- .02 # criterion for maximum FDR control$criterion_FDR_dbc <- .01 # criterion for maximum FDR using density_dbc function control$precision_FDR <- .05 # Maximum FDR precision (low precision slows things down) control$compute_FDR <- FALSE # Compute Maximum FDR, Slows Things Down Considerably control$model <- "KD2" ### probably to be removed control$PLOT = F control$FDR.PLOT = F return(control) } if(control[["model"]] == "KD1"){ control$version <- 1 control$sig_level <- .05 control$sig_level_Z <- stats::qnorm(control$sig_level/2,lower.tail = F) control$a <- stats::qnorm(control$sig_level/2,lower.tail = F) # Beginning of fitting interval control$b <- 6 # End of fitting interval control$K <- 3 control$max_iter <- 150 # settings for the nlminb agortihm for fitting mixture model control$max_eval <- 300 # settings for the nlminb agortihm for fitting mixture model control$criterion <- 1e-10 # Criterion to terminate nlminb control$bw <- "nrd0" # Bandwidth of Kernal Density control$model <- "KD1" return(control) } } if(!is.null(control[["version"]])){ if(control[["version"]] == 1){ if(is.null(control[["sig_level"]])){ control$sig_level <- .05 } if(is.null(control[["sig_level_Z"]])){ control$sig_level_Z <- stats::qnorm(control$sig_level/2,lower.tail = F) } if(is.null(control[["a"]])){ control$a <- stats::qnorm(control$sig_level/2,lower.tail = F) # Beginning of fitting interval } if(is.null(control[["b"]])){ control$b <- 6 # End of fitting interval } if(is.null(control[["K"]])){ control$K <- 3 } if(is.null(control[["max_iter"]])){ control$max_iter <- 150 # settings for the nlminb agortihm for fitting mixture model } if(is.null(control[["max_eval"]])){ control$max_eval <- 300 # settings for the nlminb agortihm for fitting mixture model } if(is.null(control[["criterion"]])){ control$criterion <- 1e-10 # Criterion to terminate nlminb } if(is.null(control[["bw"]])){ control$bw <- "nrd0" # Bandwidth of Kernal Density } return(control) } } # individual parameter settings if(is.null(control[["version"]])){ control$version <- 2 } if(is.null(control[["sig_level"]])){ control$sig_level <- .05 } if(is.null(control[["sig_level_Z"]])){ control$sig_level_Z <- stats::qnorm(control$sig_level/2,lower.tail = F) } if(is.null(control[["a"]])){ control$a <- stats::qnorm(control$sig_level/2,lower.tail = F) # Beginning of fitting interval } if(is.null(control[["b"]])){ control$b <- 6 # End of fitting interval } if(is.null(control[["mu"]])){ control$mu <- seq(0,6,1) # means of the components } if(is.null(control[["sigma"]])){ control$sigma <- 1 # Don't touch this!!! # Change Standard Deviation of Normals } if(is.null(control[["theta_min"]])){ control$theta_min <- rep(0,length(control$mu)) # set lower limit for weights, default = 0 } if(is.null(control[["theta_max"]])){ control$theta_max <- rep(1,length(control$mu)) # set upper limits for weights, default = 1 } if(is.null(control[["max_iter"]])){ control$max_iter <- 150 # settings for the nlminb agortihm for fitting mixture model } if(is.null(control[["max_eval"]])){ control$max_eval <- 1000 # settings for the nlminb agortihm for fitting mixture model } if(is.null(control[["criterion"]])){ control$criterion <- 1e-03 # Criterion to terminate nlminb } if(is.null(control[["bw"]])){ control$bw <- .10 # Bandwidth of Kernal Density } if(is.null(control[["aug"]])){ control$aug <- TRUE # Augment truncated Kernal Density } if(is.null(control[["aug.bw"]])){ control$aug.bw <- .20 # augation Bandwidth } if(is.null(control[["n.bars"]])){ control$n.bars <- 512 # resolution of density function (doesn't seem to matter much) } if(is.null(control[["density_dbc"]])){ control$density_dbc <- FALSE # USE dbckden function to truncate Kernal Density } if(is.null(control[["criterion_FDR"]])){ control$criterion_FDR <- .02 # criterion for maximum FDR } if(is.null(control[["criterion_FDR_dbc"]])){ control$criterion_FDR_dbc <- .01 # criterion for maximum FDR using density_dbc function } if(is.null(control[["precision_FDR"]])){ control$precision_FDR <- .05 # Maximum FDR precision (low precision slows things down) } if(is.null(control[["compute_FDR"]])){ control$compute_FDR <- FALSE # Compute Maximum FDR, Slows Things Down Considerably } if(is.null(control[["model"]])){ control$model <- NULL } ### probably to be removed if(is.null(control[["PLOT"]])){ control$PLOT = F } if(is.null(control[["FDR.PLOT"]])){ control$FDR.PLOT = F } return(control) } #### additional functions #### # not used anymore ######################################################################### ### This Function Computes Power from Weights and Non-Centrality Parameters ######################################################################### #Compute.Power = function(para.val,z.extreme) { # # ### the input weights based on z.curve method # ### these are the weights based on the a value # ### a could be 1.96 (all significant) # ### but it can also be other values # ### Therefore the weights cannot be directly used # ### to estimate power/replicability # w.inp = para.val[(length(control$mu)+1):(length(control$mu)*2)]# # # pow = pnorm(control$mu,control$sig_level_Z) + pnorm(-control$mu,control$sig_level_Z)# # # ### this gives the power with the a z-score as the criterion value # ### this power is used as a weight to get the weights for the full distribution # ### using Jerry's insight that weight before selection is weight after selection divided by power # w = pnorm(control$mu,control$a) + pnorm(-control$mu,control$a) # round(w,3)# # # ### now we compute the weights before selection (w.all) # ### once we have the weights, we devided by sum of all weights # ### so that they add up to 1 # w.all = w.inp / w # w.all = w.all / sum(w.all) # # ### now we are ready to compute the weights after selection for significance # ### using Jerry's fomrula in reverse going from before selection to after selection # ### by multiplying by power (w) # ### again all the weights are standardized by dividing by the sum of all weights # # w.sig = w.all * pow # w.sig = w.sig / sum(w.sig) # w.sig # # ### compute ERR # ### this is easy, replicabilty is simply the weighted sum of power # ### using the weights after selection for significance, w.sig # ERR = sum(pow*w.sig) # # ### than the maximum value used (default z > 6) # ERR = ERR*(1 - z.extreme) + z.extreme # # ### compute average power for all results, including estimated file drawer # ### this is also easy, here average power before selection is computed # ### as the weighted average of power using the weights before selection, w.all # # w.ext = c(w.sig*(1-z.extreme),z.extreme) # pow.ext = c(pow,1) # EDR = 1/sum(w.ext/pow.ext) # # ### res stores the results to be past back from the function # res = c(ERR,EDR) # # return(res) # #} ####################################################### ### Fitting ZCurve for FDR ESTIMATE ####################################################### .zcurve_density_get_weights_fixed = function(z.val.input, W, fit.free, precision, n.bars, Z.Density.X, Z.Density.Y, Dens, control){ if (control$FDR.PLOT) { fit = c() W.set = seq(0,1,control$precision_FDR) for (WZ0 in W.set) { theta_min = rep(0,length(control$mu)) theta_max = rep(1,length(control$mu)) startval = (control$theta_min+control$theta_max)/2 startval = startval/sum(startval) startval[1] = 1 theta_min[1] = 0 theta_max[1] = 0 ### start the estimation process auto = stats::nlminb(startval,.zcurve_density_fitting_fixed,lower=theta_min,upper=theta_max, WZ0 = WZ0, n.bars = n.bars, Z.Density.X = Z.Density.X, Z.Density.Y = Z.Density.Y, Dens = Dens, PLOT = control$PLOT) fit = c(fit,auto$objective) } graphics::plot((W.set-1)/10,fit,ylim=c(0,max(fit)+.05),xlab="Percentage of False Positives",ylab="Root Mean Square Discrepancy of Densities", col="red", pch=16, cex=2) graphics::abline(h=fit.free,col="blue",lwd=2,lty=2) graphics::abline(h=fit.free+crit,lty=2,col="red") # windows() crit = fit.free + control$criterion_FDR crit if (control$density_dbc) crit = fit.free + control$criterion_FDR_dbc MAX.FDR = max(W.set[fit < crit]) MAX.FDR } else { WZ0 = trunc(W[1]/precision)*precision crit = control$criterion_FDR + fit.free if (control$density_dbc) crit = control$criterion_FDR_dbc + fit.free fit.z0 = 0 while (fit.z0 < crit & WZ0 <= 1) { theta_min = rep(0,length(control$mu)) theta_max = rep(1,length(control$mu)) startval = W+.10 startval = startval/sum(startval) theta_min[1] = 0 theta_max[1] = 0 auto = stats::nlminb(startval,.zcurve_density_fitting_fixed, control=list(rel.tol = control$criterion), lower=theta_min,upper=theta_max, WZ0 = WZ0, n.bars = n.bars, Z.Density.Y = Z.Density.Y, PLOT = control$PLOT) auto$par W = (auto$par/sum(auto$par))*(1-WZ0) W[1] = WZ0 W fit.z0 = auto$objective if (fit.z0 < crit) WZ0 = WZ0 + precision } MAX.FDR = WZ0 - precision } return(MAX.FDR) } .zcurve_density_fitting_fixed = function(theta, WZ0, n.bars, Z.Density.X, Z.Density.Y, Dens, PLOT){ ### get the weights and rescale theta = theta/sum(theta)*(1-WZ0) weight = c(WZ0,theta) ### compute the new estimated density distribution z.est = c() for (i in 1:n.bars) z.est[i] = sum(Dens[,i]*weight) ### compare to observed density distribution rmse = sqrt(mean((z.est-Z.Density.Y)^2)) ### showing the fitting of the function in a plot if(PLOT) { rval = stats::runif(1) if (rval > .9) { graphics::lines(Z.Density.X,z.est,lty=1,col="red1",ylim=c(0,1),) graphics::points(Z.Density.X,z.est,pch=20,col="red1",ylim=c(0,1),) } } ### return value to optimization function return(rmse) } ####################################################### ### Get Weights of Non-Central Z-scores ####################################################### ### This function fits an observed distribution of z-scores to a multi-model mixture model .zcurve_density_get_weights_free = function(control, Dens, n.bars, Z.Density.Y, Z.Density.X){ startval = rep(1/length(control$mu),length(control$mu)) startval[1] = 1 startval = startval/sum(startval) auto = stats::nlminb(startval,.zcurve_density_fitting_free, Dens = Dens, n.bars = n.bars, Z.Density.Y = Z.Density.Y, Z.Density.X = Z.Density.X, lower = control$theta_min, upper = control$theta_max, PLOT = control$PLOT, control = list(eval.max=control$max_eval, iter.max = control$max_iter)) fit.free = auto$objective WT = auto$par WT = WT/sum(WT) # res = c(control$SLOPE,control$mu,WT,fit.free) # names(res) = c("SLOPE",rep("mu",length(control$mu)),rep("Weight",length(WT)),"Fit.Free") res <- list( "slope" = control$SLOPE, "mu" = control$mu, "weights" = WT, "objective" = auto$objective, "iter" = auto$iterations, "converged" = auto$convergence == 0, "message" = auto$message ) return(res) } ### THIS IS THE FUNCTION THAT COMPARES OBSERVED TO PREDICTED Z-VALUE DISTRIBUTIONS .zcurve_density_fitting_free = function(theta, Dens, n.bars, Z.Density.Y, Z.Density.X, PLOT){ ### get the weights and rescale weight = theta weight = weight/sum(weight) ### compute the new estimated density distribution z.est = c() for (i in 1:n.bars) z.est[i] = sum(Dens[,i]*weight) ### compare to observed density distribution rmse = sqrt(mean((z.est-Z.Density.Y)^2)) ### showing the fitting of the function in a plot if(PLOT==TRUE) { if (stats::runif(1) < .1) { graphics::plot(Z.Density.X,Z.Density.Y,type='l',ylim=c(0,1),xlab='Z') graphics::lines(Z.Density.X,z.est,lty=1,col="red1",ylim=c(0,1),) graphics::points(Z.Density.X,z.est,pch=20,col="red1",ylim=c(0,1),) Sys.sleep(1) } } ### return value to optimization function return(rmse) } ############################################## ### Get Densities ############################################## .zcurve_density_get_densities = function(Z.INT, z.val.input, control){ ### find the maximum z-score. This is only needed if the maximum z-score is below b max.z = control$b if (max(Z.INT) < max.z) max.z = max(Z.INT) if (control$density_dbc) { Z.Density.X = seq(control$a,control$b,.01)-control$a xx = Z.INT-control$a Z.Density.Y = evmix::dbckden(Z.Density.X,xx,bw=control$bw,bcmethod="reflect") Z.Density.X = Z.Density.X + control$a } else { if (control$aug) { if (control$a >= control$sig_level_Z + 2*control$bw) { AUG = z.val.input[z.val.input > control$a - 2*control$bw & z.val.input < control$a] } else { AUG = c() n.AUG = round(length(Z.INT[Z.INT > control$a & Z.INT < control$a+control$aug.bw])) if (n.AUG > 0) AUG = seq(control$a-control$aug.bw,control$a-.01,control$aug.bw/n.AUG) } Z.INT.USE = c(Z.INT,AUG) } else { Z.INT.USE = Z.INT[Z.INT > control$a & Z.INT <= max.z + 1] } Z.Density = stats::density(Z.INT.USE,n=control$n.bars,bw=control$bw,from=control$a,to=max.z) Z.Density.X = cbind(Z.Density$x,Z.Density$y)[Z.Density$x > control$a & Z.Density$x < max.z,1] Z.Density.Y = cbind(Z.Density$x,Z.Density$y)[Z.Density$x > control$a & Z.Density$x < max.z,2] } # End of density_dbc bar.width = Z.Density.X[2] - Z.Density.X[1] Z.Density.Y = Z.Density.Y/(sum(Z.Density.Y*bar.width)) densy = cbind(Z.Density.X,Z.Density.Y) return(densy) } ### End of Get Densities
/scratch/gouwar.j/cran-all/cranData/zcurve/R/zcurve_density.R
.onAttach <- function(libname, pkgname){ # packageStartupMessage( # "Please, note the following changes in version 1.0.9 (see NEWS for more details):\n- The ERR estimate now takes the directionality of the expected replications into account, which might lead to slight changes in the estimates." # ) }
/scratch/gouwar.j/cran-all/cranData/zcurve/R/zzz.R
#' Get Metrics on All Zendesk Tickets #' #' This function takes your Email Id, authentication token, #' and sub-domain and parses all the tickets and its corresponding #' metrics in a list. Since each iteration only returns 100 #' tickets at a time you must run the loop until the #' "has_more" parameter is equal to FALSE. #' #' Its not a good practice to write down these authentication #' parameters in your code. There are various methods and #' packages available that are more secure; this package #' doesn't require you to use any one in particular. #' #' #' @references \url{https://developer.zendesk.com/rest_api #' /docs/support/ticket_metrics} #' #' @param email_id Zendesk Email Id (username). #' @param token Zendesk API token. #' @param subdomain Your organization's Zendesk sub-domain. #' #' @return Data Frame with metrics for all tickets #' #' @import dplyr #' @importFrom jsonlite "fromJSON" #' @importFrom httr "content" #' @importFrom httr "authenticate" #' @importFrom purrr "map_dfr" #' #' @export #' #' @examples \dontrun{ #' ticket_metrics <- get_all_ticket_metrics(email_id, token, subdomain) #' } get_all_ticket_metrics <- function(email_id, token, subdomain) { user <- paste0(email_id, "/token") pwd <- token subdomain <- subdomain after_cursor <- "" # Stop Pagination when the parameter "next_page" is null. req_metrics <- list() stop_paging <- FALSE i <- 1 while (stop_paging == FALSE) { url_metrics <- paste0( "https://", subdomain, ".zendesk.com/api/v2/ticket_metrics.json?page[after]=", after_cursor, "&page[size]=100" ) req_metrics[[i]] <- httr::RETRY("GET", url = paste0(url_metrics), httr::authenticate(user, pwd), times = 4, pause_min = 10, terminate_on = NULL, terminate_on_success = TRUE, pause_cap = 5 ) if (jsonlite::fromJSON( httr::content( req_metrics[[i]], "text" ), flatten = TRUE)$meta$has_more == FALSE) { stop_paging <- TRUE } after_cursor <- jsonlite::fromJSON( httr::content(req_metrics[[i]], "text"), flatten = TRUE )$meta$after_cursor i <- i + 1 } build_data_frame <- function(c) { metrics <- as.data.frame((jsonlite::fromJSON(httr::content( req_metrics[[c]], "text" ), flatten = TRUE))$ticket_metrics) } ticket_metrics_df <- purrr::map_dfr( seq_len(length(req_metrics)), build_data_frame ) return(ticket_metrics_df) }
/scratch/gouwar.j/cran-all/cranData/zdeskR/R/get_all_ticket_metrics.R
#' Returns the system and all the custom fields defined by #' your organization's zendesk administrator #' #' It takes your Email Id, authentication token, #' sub-domain as parameters and gets the system and all #' the custom fields available for a zendesk ticket. #' #' It's not a good practice to write down these authentication #' parameters in your code. There are various methods and #' packages available that are more secure; this package #' doesn't require you to use any one in particular. #' #' @references \url{https://developer.zendesk.com/rest_api #' /docs/support/ticket_fields} #' #' @param email_id Zendesk Email Id (username). #' @param token Zendesk API token. #' @param subdomain Your organization's Zendesk sub-domain. #' #' @return A data frame containing all ticket fields #' #' @import dplyr #' @importFrom magrittr "%>%" #' @importFrom jsonlite "fromJSON" #' @importFrom httr "content" #' #' @export #' #' @examples \dontrun{ #' fields <- get_custom_fields(email_id, token, subdomain) #' } get_custom_fields <- function(email_id, token, subdomain) { user <- paste0(email_id, "/token") pwd <- token subdomain <- subdomain url_fields <- paste0( "https://", subdomain, ".zendesk.com/api/v2/ticket_fields.json" ) field_req <- httr::RETRY("GET", url = url_fields, httr::authenticate(user, pwd), times = 4, pause_min = 10, terminate_on = NULL, terminate_on_success = TRUE, pause_cap = 5 ) field_content <- httr::content(field_req, "text") field_json <- jsonlite::fromJSON(field_content, flatten = TRUE) field_df <- as.data.frame(field_json$ticket_fields) return(field_df) }
/scratch/gouwar.j/cran-all/cranData/zdeskR/R/get_custom_fields.R
#' Get Zendesk Tickets #' #' This function takes your Email Id, authentication token, #' sub-domain and start time as parameters and gets all the #' tickets which have been updated on or after the start #' time parameter. By default each page returns 1000 unique #' tickets and an "after_cursor" value which stores a #' pointer to the next page. After getting the first page #' it uses the pointer to fetch the subsequent pages. #' #' The start time parameter should be in 'UTC' format as #' Zendesk uses the 'UTC' time zone when retrieving tickets #' after the start time. For example, the US Eastern Time Zone #' is currently four hours being UTC. If one wanted to get tickets #' starting on August 1 at 12 am, you would need to enter #' "2020-08-01 04:00:00". The user must do proper adjustment #' to accommodate the time zone difference, if desired. A #' date can be provided, it will retrieve results as of 12 am #' in the UTC time zone. #' #' Start and end times can be entered with or without the time #' component. End time cannot be in the future, but should work #' for values up to one minute prior to the current time. #' #' It's not a good practice to write down these authentication #' parameters in your code. There are various methods and #' packages available that are more secure; this package #' doesn't require you to use any one in particular. #' #' The remove_cols parameter allows the removal of custom fields causing errors. #' Errors occurred when a field was sometimes blank and assigned a logical type #' and then appended to non-blank, non-logical inside of purrr::map_dfr. #' See issue #1 on GH. #' #' @references \url{https://developer.zendesk.com/rest_api #' /docs/support/incremental_export#start_time} #' #' @param email_id Zendesk Email Id (username). #' @param token Zendesk API token. #' @param subdomain Your organization's Zendesk sub-domain. #' @param start_time String with a date or datetime to get all #' tickets modified after that date. #' @param remove_cols Vector of column names to remove from the results. #' #' @return a Data Frame containing all tickets after the #' start time. #' #' @import dplyr #' @importFrom magrittr "%>%" #' @importFrom jsonlite "fromJSON" #' @importFrom httr "content" #' @importFrom tidyr "pivot_wider" #' @importFrom purrr "map_dfr" #' @importFrom plyr "rbind.fill" #' #' @export #' #' @examples \dontrun{ #' all_tickets <- get_tickets(email_id, token, subdomain, #' start_time = "2021-01-31 00:00:00", end_time = "2021-01-31 23:59:59" #' ) #' } get_tickets <- function(email_id, token, subdomain, start_time, remove_cols = NULL) { user <- paste0(email_id, "/token") pwd <- token unix_start <- to_unixtime(as.POSIXct(start_time)) request_ticket <- list() stop_paging <- FALSE i <- 1 while (stop_paging == FALSE) { url <- paste0( "https://", subdomain, ".zendesk.com/api/v2/incremental/tickets.json?start_time=", unix_start ) request_ticket[[i]] <- httr::RETRY("GET", url = url, httr::authenticate(user, pwd), times = 4, pause_min = 10, terminate_on = NULL, terminate_on_success = TRUE, pause_cap = 5 ) unix_start <- (jsonlite::fromJSON(httr::content( request_ticket[[i]], "text" ), flatten = TRUE))$end_time if ((jsonlite::fromJSON(httr::content(request_ticket[[i]], "text"), flatten = TRUE ))$end_of_stream == TRUE) { stop_paging <- TRUE } i <- i + 1 } build_data_frame <- function(c) { tickets <- as.data.frame((jsonlite::fromJSON(httr::content( request_ticket[[c]], "text" ), flatten = TRUE))$tickets) } tickets <- purrr::map_dfr(seq_len(length(request_ticket)), build_data_frame) pivot_data_frame <- function(c) { pivot_df <- as.data.frame(tickets$custom_fields[c]) %>% mutate(across(.cols = .data$value, as.character)) %>% tidyr::pivot_wider(names_from = .data$id, values_from = .data$value) %>% select(-remove_cols) } ticket_final <- purrr::map_dfr(seq_len(nrow(tickets)), pivot_data_frame) ticket_final2 <- bind_cols(tickets, ticket_final) return(ticket_final2) }
/scratch/gouwar.j/cran-all/cranData/zdeskR/R/get_tickets.R
#' Get tickets comments/replies #' #' This function takes your email ID, authentication token, sub-domain, #' and specific ticket ID to fetch all comments/replies to this wanted ticket. #' #' By default only these columns are returned: "id", "type", "author_id", #' "body", "created_at", "have_attachments". You can add other variables using #' the `add_cols` parameter. The variables that can be inserted are described in #' the Zendesk API documentation: https://developer.zendesk.com/api-reference/ #' ticketing/tickets/ticket_comments/. #' #' The meaning of the default columns included are described in the previous #' link, except "have-attachments" which is a boolean field that will be "Yes" #' if the comment has an attachment or "No" if it does not. The attachment #' itself cannot be returned. #' #' If you request the `metadata` sensitive data (location, lat, long, #' IP address, etc.) will be included. This data should be handled with care and #' only stored and used per your organization's policies and applicable #' privacy regulations. #' #' @references \url{https://developer.zendesk.com/api-reference/ticketing/ #' tickets/ticket_comments/} #' #' @param email_id Zendesk Email ID (username). #' @param token Zendesk API token. #' @param subdomain Your organization's Zendesk sub-domain. #' @param ticket_id The ticket ID number. A numeric value. #' @param add_cols Vector of column names to select in addition to the default. #' @param metadata Logical value (TRUE or FALSE). If TRUE, metadata columns will #' be included. This is set to FALSE by default. #' #' @return a Data Frame containing all comments/replies for a single ticket. #' #' @import dplyr #' @import jsonlite #' @import httr #' @importFrom magrittr "%>%" #' @importFrom jsonlite "fromJSON" #' @importFrom httr "content" #' @importFrom tidyselect "all_of" #' #' @export #' #' @examples \dontrun{ #' ## Extracting comments with default columns and without sensitive data #' comments_ticket_id <- get_tickets_comments(email_id, token, subdomain, #' ticket_id, add_cols = NULL, metadata = FALSE) #' #'## Extracting comments with additional columns and sensitive data #' comments_ticket_id <- get_tickets_comments(email_id, token, subdomain, #' ticket_id, add_cols = c("html_body", "attachments"), metadata = TRUE) #' } # function to extract tickets comments/replies get_tickets_comments <- function(email_id, token, subdomain, ticket_id, add_cols = NULL, metadata = FALSE) { user <- paste0(email_id, "/token") pwd <- token ticket_id <- ticket_id url <- paste0( "https://", subdomain, ".zendesk.com/api/v2/tickets/", ticket_id, "/comments?") request_ticket <- httr::RETRY("GET", url = url, httr::authenticate(user, pwd)) response_text <- httr::content(request_ticket, as = "text") parsed_response <- jsonlite::fromJSON(response_text) default_columns <- c("id", "type", "author_id", "body", "created_at", "have_attachments") if(length(add_cols)!= 0) { columns_final <- c(default_columns, add_cols) } else { columns_final <- default_columns } sensitive_data_columns <- c("metadata") if(metadata == TRUE) { # default without sensitive data columns_final <- c(columns_final, sensitive_data_columns) } else { columns_final <- columns_final } comments_extracted2 <- as.data.frame(parsed_response$comments) %>% mutate(have_attachments = ifelse( sapply("attachments", function(list_element) length(list_element)==0), "No", "Yes" )) %>% select(all_of(columns_final)) }
/scratch/gouwar.j/cran-all/cranData/zdeskR/R/get_tickets_comments.R
#' Returns All Available Zendesk Users. #' #' It takes your Email Id, authentication token, #' sub-domain and parse all the users in a list. #' It iterates through all the pages returning only 100 users per #' page until the "next_page" parameter becomes null indicating #' there are no more pages to fetch. #' #' It's not a good practice to write down these authentication #' parameters in your code. There are various methods and #' packages available that are more secure; this package #' doesn't require you to use any one in particular. #' #' The start_page parameter is useful if you have many users. Each #' page contains 100 users. Zendesk does not have an incremental #' method for pulling users by date but after you retrieve all of #' your users once, you can then increment your start page to #' something that will limit the number of users you are #' re-pulling each time. #' #' If you are pulling partial lists of users be aware that you #' will not get updates on older users. You will only get recently #' created users, not modified/deleted users and their modified #' data nor updated last login dates. #' #' #' @references \url{https://developer.zendesk.com/rest_api #' /docs/support/users} #' #' @param email_id Zendesk Email Id (username). #' @param token Zendesk API token. #' @param subdomain Your organization's Zendesk sub-domain. #' @param start_time String with a date or datetime to get all #' tickets modified after that date. #' @param user_role User role, one of "all", "end-user", "agent", or "admin". #' #' @return Data Frame with user details #' #' @import dplyr #' @importFrom jsonlite "fromJSON" #' @importFrom httr "content" #' @importFrom httr "authenticate" #' @importFrom purrr "map_dfr" #' #' @export #' #' @examples \dontrun{ #' users <- get_users(email_id, token, subdomain) #' } get_users <- function(email_id, token, subdomain, start_time, user_role = "all") { user <- paste0(email_id, "/token") pwd <- token unix_start <- to_unixtime(as.POSIXct(start_time)) req_users <- list() stop_paging <- FALSE i <- 1 while (stop_paging == FALSE) { url_users <- paste0( "https://", subdomain, ".zendesk.com/api/v2/incremental/users.json?start_time=", unix_start ) req_users[[i]] <- httr::RETRY("GET", url = url_users, httr::authenticate(user, pwd), times = 4, pause_min = 10, terminate_on = NULL, terminate_on_success = TRUE, pause_cap = 5 ) unix_start <- (jsonlite::fromJSON(httr::content( req_users[[i]], "text" ), flatten = TRUE))$end_time if ((jsonlite::fromJSON(httr::content(req_users[[i]], "text"), flatten = TRUE ))$end_of_stream == TRUE) { stop_paging <- TRUE } i <- i + 1 } build_data_frame <- function(c) { users <- as.data.frame((jsonlite::fromJSON(httr::content( req_users[[c]], "text" ), flatten = TRUE))$users) } if(user_role == "all") { users_df <- purrr::map_dfr(seq_len(length(req_users)), build_data_frame) } else { users_df <- purrr::map_dfr(seq_len(length(req_users)), build_data_frame) %>% filter(.data$role == user_role) } return(users_df) }
/scratch/gouwar.j/cran-all/cranData/zdeskR/R/get_users.R
#' Convert a Date or Datetime Object to Unix Datetime #' #' This is a general purpose helper function for this #' package useful in converting a time stamp to unix time. #' #' This function is inspired/borrowed from a github page #' by Neal Richardson who did a similar work to convert the #' general date time format to unix time. #' #' The unix time stamp is a way to track time as a running #' total of seconds. This count starts at the Unix Epoch on #' January 1st, 1970 at UTC. Therefore, the unix time stamp #' is merely the number of seconds between a particular date #' and the Unix Epoch. #' #' @param x Date or datetime string or object. #' #' @return Integer vector with unix time stamp. #' #' @keywords internal to_unixtime <- function(x) { if (is.character(x)) { x <- from_8601(x) } # CHECKS if the date belongs to the POSIXt.Date class or not # if yes than it converts to unix. if (inherits(x, c("POSIXt", "Date"))) { x <- as.POSIXct(x) } x <- as.integer(x) if (is.na(x)) { message(paste0( "The start time is not in the right format.", "Fix and retry the request." )) stop() } return(x) } #' Clean Date Time Inputs and Convert to UTC #' #' This function is called from the to_unixtime() function. #' It takes a data-time object as a parameter and returns #' a cleaner string by stripping out unnecessary objects #' from the timestamp. #' #' @param x Start time value provided by user. #' #' @importFrom stats "na.omit" #' #' @return A datetime vector with the user's start time #' converted to UTC time zone. #' #' @keywords internal from_8601 <- function(x) { # Parse ISO-8601-formatted date strings and return POSIXlt if (all(grepl("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", na.omit(x)))) { pattern <- "%Y-%m-%d" } else if (any(grepl("+", x, fixed = TRUE))) { # Strip out a : from the timezone offset, if present x <- sub("^(.*[+-][0-9]{2}):([0-9]{2})$", "\\1\\2", x) pattern <- "%Y-%m-%dT%H:%M:%OS%z" } else { pattern <- "%Y-%m-%dT%H:%M:%OS" } return(strptime(x, pattern, tz = "UTC")) }
/scratch/gouwar.j/cran-all/cranData/zdeskR/R/utils.R