content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Generate Random Data Sets #' #' Generates random data sets including: data.frames, lists, and vectors. #' @docType package #' @name wakefield #' @aliases wakefield package-wakefield NULL #' Augmented List of Grady Ward's English Words and Mark Kantrowitz's Names List #' #' A dataset containing a vector of Grady Ward's English words augmented with #' \pkg{qdapDictionaries}'s \code{DICTIONARY}, Mark Kantrowitz's names list, #' other proper nouns, and contractions. #' #' @details A dataset containing a vector of Grady Ward's English words #' augmented with proper nouns (U.S. States, Countries, Mark Kantrowitz's Names #' List, and months) and contractions. That dataset is augmented to increase the #' data set size. #' #' @docType data #' @keywords datasets #' @name grady_augmented #' @usage data(grady_augmented) #' @format A character vector with 122806 elements #' @references Moby Thesaurus List by Grady Ward https://www.gutenberg.org \cr \cr #' List of names from Mark Kantrowitz http://www.cs.cmu.edu/afs/cs/project/ai-repository/ai/areas/nlp/corpora/names/. #' A copy of the http://www.cs.cmu.edu/afs/cs/project/ai-repository/ai/areas/nlp/corpora/names/readme.txt #' per the author's request. NULL #' Gender Neutral Names #' #' A dataset containing a character vector gender neutral names according to the #' U.S. Census. #' #' @docType data #' @keywords datasets #' @name name_neutral #' @usage data(name_neutral) #' @format A character vector with 662 elements #' @references http://www.census.gov NULL #' 2012 U.S. Presidential Debate Dialogue #' #' A dataset containing 2911 ordered sentences used by speakers during the three #' 2012 presidential debates. #' #' @docType data #' @keywords datasets #' @name presidential_debates_2012 #' @usage data(presidential_debates_2012) #' @format A character vector with 2911 elements NULL #' Animal List #' #' A dataset containing a character vector animals #' #' @docType data #' @keywords datasets #' @name animal_list #' @usage data(animal_list) #' @format A character vector with 591 elements #' @references https://a-z-animals.com/animals NULL #' State Populations (2010) #' #' A dataset containing U.S. state populations. #' #' @details #' \itemize{ #' \item State. The 50 U.S. states. #' \item Population. Population of state. #' \item Proportion. Proportion of total U.S. population. #' } #' #' @docType data #' @keywords datasets #' @name state_populations #' @usage data(state_populations) #' @format A data frame with 50 rows and 3 variables #' @references https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_population NULL #' Languages of the World #' #' A dataset containing native language use statistics taken from: #' https://en.wikipedia.org/wiki/List_of_languages_by_number_of_native_speakers. #' #' @details #' \itemize{ #' \item Language. The language spoken. #' \item N. The number of speakers world-wide. #' \item Proportion. The proportion of speakers. #' \item Percent. The percentage of speakers. #' } #' #' @docType data #' @keywords datasets #' @name languages #' @usage data(languages) #' @format A data frame with 99 rows and 4 variables #' @references https://en.wikipedia.org/wiki/List_of_languages_by_number_of_native_speakers NULL
/scratch/gouwar.j/cran-all/cranData/wakefield/R/wakefield-package.R
#' Generate Random Vector of Years #' #' Generate a random vector of years. #' #' @inheritParams r_sample_factor #' @return Returns a random vector of year elements. #' @keywords year #' @export #' @include utils.R r_sample.R #' @family variable functions #' @examples #' year(10) #' pr <- probs(length(1996:2016)) #' pie(table(year(10000, x= 1996:2016, prob = pr))) year <-function (n, x = 1996:as.numeric(format(Sys.Date(), "%Y")), prob = NULL, name = "Year") { if (missing(n)) stop("`n` is missing") out <- sample(x = x, size = n, replace = TRUE, prob = prob) varname(out, name) }
/scratch/gouwar.j/cran-all/cranData/wakefield/R/year.R
#' Generate Random Vector of Zip Codes #' #' Generate a random vector of zip codes. #' #' @inheritParams color #' @return Returns a random vector of zip code elements. #' @keywords zip_code #' @export #' @include utils.R r_sample.R #' @family variable functions #' @examples #' zip_code(10) #' pie(table(zip_code(10000, prob = probs(10)))) zip_code <- function (n, k = 10, x = 10000:99999, prob = NULL, name = "Zip") { stopifnot(k < length(x) || k > 0) if (!is.null(prob) && length(prob) != k) stop("length of `prob` must equa `k`") out <- sample(x = lvls <- gsub("(\\w)(\\w*)", "\\U\\1\\L\\2", sample(x, k), perl=TRUE), size = n, replace = TRUE, prob = prob) varname(out, name) }
/scratch/gouwar.j/cran-all/cranData/wakefield/R/zip_code.R
#' @title Export wal instance to JPEG format image file. #' #' @param wal a wal instance, as returned by \code{read.wal} #' #' @param filepath character string, path to the JPEG file to write, including the file extension. #' #' @param ... extra parameters passed to \code{jpeg::writeJPEG}. Can be used to set JPEG quality. #' #' @inheritParams read.wal #' #' @examples #' \dontrun{ #' walf = '~/data/q2_pak0_extracted/textures/e1u2/basic1_7.wal'; #' wal = read.wal(walf); #' wal.export.to.jpeg(wal, "~/basic1_7.jpg"); #' } #' #' @importFrom jpeg writeJPEG #' @importFrom freesurferformats rotate3D flip3D #' @export wal.export.to.jpeg <- function(wal, filepath, apply_palette = wal::pal_q2(), ...) { if(! is.character(filepath)) { stop("Parameter 'filepath' must be a character string."); } if(is.null(apply_palette)) { stop("Paramter 'palette' must not be NULL"); } check.palette(apply_palette); channel_red = apply_palette[wal$raw_data, 1]; channel_green = apply_palette[wal$raw_data, 2]; channel_blue = apply_palette[wal$raw_data, 3]; jpeg_img = array( (c( channel_red , channel_green, channel_blue )) , dim = c( wal$header$width, wal$header$height, 3)) / 255.; jpeg_img = freesurferformats::rotate3D(jpeg_img, 3, 90) jpeg_img = freesurferformats::flip3D(jpeg_img, 1, "horizontally") jpeg::writeJPEG(jpeg_img, target = filepath, ...); } #' @title Export wal instance to PNG format image file. #' #' @param wal a wal instance, as returned by \code{read.wal} #' #' @param filepath character string, path to the PNG file to write, including the file extension. #' #' @param ... extra parameters passed to \code{png::writePNG}. #' #' @inheritParams read.wal #' #' @examples #' \dontrun{ #' walf = '~/data/q2_pak0_extracted/textures/e1u2/basic1_7.wal'; #' wal = read.wal(walf); #' wal.export.to.png(wal, "~/basic1_7.png"); #' } #' #' @importFrom png writePNG #' @importFrom freesurferformats rotate3D flip3D #' @export wal.export.to.png <- function(wal, filepath, apply_palette = wal::pal_q2(), ...) { if(! is.character(filepath)) { stop("Parameter 'filepath' must be a character string."); } if(is.null(apply_palette)) { stop("Paramter 'palette' must not be NULL"); } check.palette(apply_palette); channel_red = apply_palette[wal$raw_data, 1]; channel_green = apply_palette[wal$raw_data, 2]; channel_blue = apply_palette[wal$raw_data, 3]; png_img = array( (c( channel_red , channel_green, channel_blue )) , dim = c( wal$header$width, wal$header$height, 3)) / 255.; png_img = freesurferformats::rotate3D(png_img, 3, 90) png_img = freesurferformats::flip3D(png_img, 1, "horizontally") png::writePNG(png_img, target = filepath, ...); }
/scratch/gouwar.j/cran-all/cranData/wal/R/export.R
#' @title Convert image to WAL instance. #' #' @description Convert an input RGB image to a WAL instance, re-mapping its colors to the WAL palette in the process and generating the mipmaps. #' #' @param in_image numeric matrix with 3 dimensions: widt, height, channels. Values must be in range 0..1. This is the image format returned by \code{jpeg::readJPEG} and \code{png::readPNG}. The image can have arbitrary colors, but the colors in the final WAL image will be limited to the palette. Both the width and height must be multiples of 8. Typical idtech1/2 textures use 32, 64, ..., 512. The reason is the mipmaps. #' #' @param apply_palette n x 3 integer matrix, the palette for the WAL image. This is not saved to the wal image, but still required because the colors from the \code{in_image} will be adapted to the palette colors (replaced with the most similar ones). If the palette does not cover the colors in the source image well, the resulting WAL image will look bad (dissimilar to the source image). #' #' @inheritParams writeWAL #' #' @return wal instance #' #' @examples #' \dontrun{ #' wal = img.to.wal(jpeg::readJPEG("~/mytex.jpg")); #' } #' #' @export img.to.wal <- function(in_image, apply_palette = wal::pal_q2(), wal = wal.template()) { if(length(dim(in_image)) != 3L) { stop("Parameter 'in_image' must have 3 dimensions: image width, height, and channels (R, G, B)."); } check.palette(apply_palette); wal$header$width = dim(in_image)[1]; wal$header$height = dim(in_image)[2]; num_channels = dim(in_image)[3]; if(num_channels != 3L) { stop("Parameter 'in_image': third dimension must have length 3 (channels R, G, B)."); } if(wal$header$width %% 8 != 0L) { stop(sprintf("Input image has invalid width %d, must be a multiple of 8.", wal$header$width)); } if(wal$header$height %% 8 != 0L) { stop(sprintf("Input image has invalid height %d, must be a multiple of 8.", wal$header$height)); } in_image = in_image * 255L; # convert data from range 0..1 to 0..255. in_image_matrix = matrix(in_image, c((wal$header$width * wal$header$height), 3L)); palette_col_indices = closest.color.from.palette(in_image_matrix, apply_palette); wal$file_data_all_mipmaps = expand.rawdata.to.mipmaps(palette_col_indices, wal$header$width, wal$header$height); wal$raw_data = wal$file_data_all_mipmaps[1:(wal$header$width * wal$header$height)]; wal$header$mip_level_offsets = get.mipmap.data.offsets(wal$header$width, wal$header$height); return(wal); } #' @title Compute length of mipmaps in bytes from width and height of largest image (mipmap0). #' #' @param mm0_width integer, width of mipmap 0 #' #' @param mm0_height integer, height of mipmap 0 #' #' @return integer vector of length 4, the lengths. #' #' @keywords internal get.mipmap.data.lengths <- function(mm0_width, mm0_height) { m0_l = mm0_width * mm0_height; m1_l = (mm0_width/2) * (mm0_height/2); m2_l = (mm0_width/4) * (mm0_height/4); m3_l = (mm0_width/8) * (mm0_height/8); return(c(m0_l, m1_l, m2_l, m3_l)); } #' @title Get mipmap offsets for WAL header, based on mipmap sizes and start offset. #' #' @inheritParams get.mipmap.data.lengths #' #' @param start_at integer, the offset at which the data starts in the file. Must be 100L for WAL format. #' #' @return integer vector of length 4, the offsets. #' #' @keywords internal get.mipmap.data.offsets <- function(mm0_width, mm0_height, start_at = 100L) { mipmaps_lengths = get.mipmap.data.lengths(mm0_width, mm0_height); m0_o = start_at; m1_o = start_at + mipmaps_lengths[1]; m2_o = start_at + mipmaps_lengths[1] + mipmaps_lengths[2]; m3_o = start_at + mipmaps_lengths[1] + mipmaps_lengths[2] + mipmaps_lengths[3]; return(c(m0_o, m1_o, m2_o, m3_o)); } #' @title Generate a WAL structure template. #' #' @description Generates a WAL instance that can be modified and filled with new data. The template represents a black 32x32 image (if the palette used to display it adhers to the convention that the first color is black). The indices used are 1-based (black is at index 1, not 0). #' #' @keywords internal wal.template <- function() { wal = list('header' = list()); wal$header$tex_name = "e1u1/black"; wal$header$width = 32L; wal$header$height = 32L; # the last (4th) offset is the beginning of the data for the last mipmap, the end is the EOF. wal$header$mip_level_offsets = c(100L, (100L + 32*32), (100L + 32*32 + 16*16), (100L + 32*32 + 16*16 + 8*8)); wal$header$anim_name = ""; wal$header$flags = 0L; wal$header$contents = 0L; wal$header$value = 0L; num_values = 32*32 + 16*16 + 8*8 + 4*4; # for all mipmaps wal$file_data_all_mipmaps = rep(1L, num_values); # the first value in the palette is black. wal$raw_data = wal$file_data_all_mipmaps[1:(32*32)]; class(wal) = c('wal', class(wal)); return(wal); } #' @title Given the pixel data for the largest image, generate the full data for all mipmaps. #' #' @param raw_data_mip_level0 integer vector or matrix, the image data for the largest mipmap. #' #' @param width integer, width of image for mip level 0 #' #' @param height integer, width of image for mip level 0 #' #' @keywords internal expand.rawdata.to.mipmaps <- function(raw_data_mip_level0, width, height, byrow = TRUE) { if(! is.matrix(raw_data_mip_level0)) { raw_data_mip_level0 = matrix(raw_data_mip_level0, ncol = width, byrow = byrow); } mip_level1 = half.image(raw_data_mip_level0, byrow = byrow); mip_level2 = half.image(mip_level1, byrow = byrow); mip_level3 = half.image(mip_level2, byrow = byrow); return(c(as.integer(raw_data_mip_level0), as.integer(mip_level1), as.integer(mip_level2), as.integer(mip_level3))); } #' @title Reduce image size by 2 along both axes by dropping pixels. #' #' @param image_data integer matrix, a 1-channel image. #' #' @keywords internal half.image <- function(image_data, byrow = TRUE) { if(! is.matrix(image_data)) { stop("Parameter 'image_data' must be a matrix."); } mip_data = rep(NA, ((ncol(image_data) / 2L) * (nrow(image_data) / 2L))); current_index = 1L; for(row_idx in 1:nrow(image_data)) { for(col_idx in 1:ncol(image_data)) { if(row_idx %% 2 == 0L) { if(col_idx %% 2 == 0L) { mip_data[current_index] = image_data[row_idx, col_idx]; current_index = current_index + 1L; } } } } mip_data = matrix(mip_data, ncol = (ncol(image_data) / 2L), byrow = byrow); return(mip_data); } #' @title Find closest color from palette for each RGB color. #' #' @description Find closest color from a palette for given colors. The similarity method used to define 'closest' is deltaE, and the input RGB colors are transformed to LAB space for the computation, assuming they are given in sRGB space. #' #' @param colors_rgb n x 3 integer matrix, the truecolor (arbitrary) input RGB colors for which you want to find the most similar colors included in the fixed palette. Range 0..255. #' #' @param fixed_palette_rgb the fixed palette, an n x 3 matrix of integers, representing the fixed palette colors in RGB values in range 0..255. #' #' @return vector of n integers, the index of the closest color into the palette for each of the \code{colors_rgb}. #' #' @examples #' colors_rgb = matrix(c(255, 0, 0, 100, 100, 100, 10, 10, 10, 5, 5, 5), #' ncol = 3, byrow = TRUE); #' fixed_palette_rgb = matrix(c(255, 0, 0, 255, 5, 0, 11, 11, 11, 0, 0, 0, #' 255, 255, 255), ncol = 3, byrow = TRUE); #' pal_similar_colors = closest.color.from.palette(colors_rgb, #' fixed_palette_rgb); #' #' @importFrom spacesXYZ DeltaE #' @importFrom grDevices convertColor #' @export closest.color.from.palette <- function(colors_rgb, fixed_palette_rgb) { if(max(colors_rgb) <= 1.0) { warning("Parameters 'colors_rgb': max data value <= 1.0, is the data in range 0-255?"); } if(max(fixed_palette_rgb) <= 1.0) { warning("Parameters 'fixed_palette_rgb': max data value <= 1.0, is the data in range 0-255?"); } colors_lab = grDevices::convertColor(colors_rgb, from="sRGB", to="Lab"); fixed_palette_lab = grDevices::convertColor(fixed_palette_rgb, from="sRGB", to="Lab"); result_indices = rep(NA, nrow(colors_rgb)); for(i in 1:nrow(colors_rgb)) { result_indices[i] = which.min(spacesXYZ::DeltaE(colors_lab[i,], fixed_palette_lab)); } return(result_indices); }
/scratch/gouwar.j/cran-all/cranData/wal/R/image_to_wal.R
#' @title Get Q2 palette. #' #' @return 256 x 3 integer matrix, representing the RGB color values for an index into the palette. #' #' @examples #' pal = pal_q2(); #' dim(pal); #' #' @export pal_q2 <- function() { q2pal = structure(c(0L, 15L, 31L, 47L, 63L, 75L, 91L, 107L, 123L, 139L, 155L, 171L, 187L, 203L, 219L, 235L, 99L, 91L, 83L, 79L, 71L, 63L, 59L, 51L, 47L, 43L, 39L, 35L, 27L, 23L, 19L, 15L, 95L, 91L, 91L, 87L, 83L, 79L, 71L, 63L, 59L, 51L, 47L, 39L, 35L, 27L, 23L, 19L, 143L, 123L, 115L, 103L, 207L, 167L, 139L, 111L, 235L, 203L, 175L, 147L, 119L, 91L, 63L, 35L, 167L, 159L, 151L, 139L, 127L, 115L, 103L, 87L, 75L, 67L, 59L, 51L, 43L, 35L, 27L, 19L, 123L, 115L, 107L, 103L, 95L, 87L, 83L, 75L, 67L, 63L, 55L, 47L, 39L, 31L, 23L, 15L, 111L, 95L, 83L, 67L, 55L, 39L, 27L, 15L, 179L, 191L, 203L, 215L, 203L, 179L, 159L, 135L, 115L, 91L, 71L, 47L, 23L, 19L, 15L, 11L, 7L, 7L, 7L, 0L, 0L, 0L, 0L, 0L, 139L, 131L, 123L, 115L, 107L, 99L, 91L, 87L, 75L, 63L, 51L, 43L, 31L, 19L, 11L, 0L, 151L, 143L, 135L, 127L, 119L, 115L, 107L, 99L, 91L, 79L, 67L, 55L, 47L, 35L, 23L, 15L, 159L, 147L, 139L, 127L, 119L, 107L, 99L, 87L, 79L, 67L, 55L, 43L, 31L, 23L, 11L, 0L, 119L, 111L, 103L, 99L, 91L, 83L, 75L, 71L, 63L, 55L, 47L, 39L, 35L, 27L, 19L, 11L, 155L, 143L, 135L, 123L, 115L, 103L, 95L, 87L, 75L, 63L, 55L, 47L, 35L, 27L, 19L, 11L, 0L, 35L, 63L, 83L, 95L, 95L, 95L, 255L, 255L, 255L, 255L, 255L, 255L, 255L, 255L, 255L, 255L, 255L, 239L, 227L, 211L, 199L, 183L, 171L, 155L, 143L, 127L, 115L, 95L, 71L, 47L, 27L, 239L, 55L, 255L, 0L, 43L, 27L, 19L, 235L, 195L, 159L, 123L, 235L, 199L, 167L, 135L, 159L, 0L, 15L, 31L, 47L, 63L, 75L, 91L, 107L, 123L, 139L, 155L, 171L, 187L, 203L, 219L, 235L, 75L, 67L, 63L, 59L, 55L, 47L, 43L, 39L, 35L, 31L, 27L, 23L, 19L, 15L, 15L, 11L, 95L, 91L, 83L, 79L, 75L, 71L, 63L, 59L, 55L, 47L, 43L, 39L, 35L, 27L, 23L, 19L, 119L, 99L, 91L, 79L, 151L, 123L, 103L, 83L, 159L, 139L, 119L, 99L, 79L, 59L, 39L, 23L, 59L, 47L, 43L, 39L, 31L, 23L, 23L, 19L, 15L, 15L, 15L, 11L, 11L, 11L, 7L, 7L, 95L, 87L, 83L, 79L, 71L, 67L, 63L, 55L, 51L, 47L, 39L, 35L, 27L, 23L, 15L, 11L, 59L, 55L, 47L, 43L, 35L, 27L, 19L, 11L, 91L, 123L, 155L, 187L, 215L, 199L, 183L, 167L, 151L, 135L, 119L, 103L, 83L, 75L, 67L, 63L, 55L, 47L, 39L, 31L, 23L, 15L, 7L, 0L, 87L, 79L, 71L, 67L, 59L, 51L, 47L, 43L, 35L, 31L, 27L, 19L, 15L, 11L, 7L, 0L, 159L, 151L, 139L, 131L, 123L, 115L, 107L, 99L, 91L, 79L, 67L, 55L, 47L, 35L, 23L, 15L, 75L, 67L, 59L, 55L, 47L, 43L, 35L, 31L, 27L, 23L, 19L, 15L, 11L, 7L, 0L, 0L, 123L, 115L, 107L, 99L, 91L, 87L, 79L, 71L, 63L, 55L, 47L, 39L, 31L, 23L, 15L, 7L, 171L, 159L, 151L, 139L, 131L, 119L, 111L, 103L, 91L, 79L, 67L, 59L, 47L, 35L, 23L, 15L, 255L, 231L, 211L, 187L, 167L, 143L, 123L, 255L, 255L, 255L, 255L, 255L, 255L, 235L, 215L, 191L, 171L, 147L, 127L, 107L, 87L, 71L, 59L, 43L, 31L, 23L, 15L, 7L, 0L, 0L, 0L, 0L, 0L, 55L, 0L, 0L, 43L, 27L, 19L, 151L, 115L, 87L, 63L, 211L, 171L, 139L, 107L, 91L, 0L, 15L, 31L, 47L, 63L, 75L, 91L, 107L, 123L, 139L, 155L, 171L, 187L, 203L, 219L, 235L, 35L, 31L, 31L, 27L, 27L, 23L, 23L, 19L, 19L, 19L, 15L, 15L, 11L, 11L, 7L, 7L, 111L, 103L, 95L, 91L, 83L, 75L, 67L, 59L, 55L, 47L, 43L, 39L, 35L, 27L, 23L, 19L, 83L, 67L, 59L, 47L, 75L, 59L, 47L, 39L, 39L, 35L, 31L, 27L, 23L, 15L, 11L, 7L, 43L, 35L, 27L, 19L, 15L, 11L, 7L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 75L, 67L, 63L, 59L, 55L, 51L, 47L, 43L, 39L, 35L, 27L, 23L, 19L, 15L, 11L, 7L, 23L, 23L, 23L, 23L, 19L, 15L, 11L, 7L, 79L, 111L, 147L, 183L, 223L, 211L, 195L, 183L, 167L, 155L, 139L, 127L, 111L, 103L, 91L, 83L, 75L, 63L, 51L, 43L, 31L, 19L, 11L, 0L, 87L, 79L, 71L, 67L, 59L, 51L, 47L, 43L, 35L, 31L, 27L, 19L, 15L, 11L, 7L, 0L, 123L, 115L, 107L, 99L, 95L, 87L, 79L, 71L, 67L, 59L, 51L, 43L, 35L, 27L, 19L, 11L, 63L, 55L, 47L, 39L, 35L, 27L, 23L, 19L, 15L, 11L, 11L, 7L, 7L, 0L, 0L, 0L, 207L, 195L, 183L, 167L, 155L, 143L, 127L, 115L, 103L, 87L, 75L, 63L, 47L, 35L, 23L, 7L, 123L, 111L, 99L, 87L, 75L, 67L, 59L, 51L, 39L, 27L, 19L, 11L, 7L, 0L, 0L, 0L, 0L, 15L, 27L, 39L, 47L, 51L, 51L, 255L, 211L, 167L, 127L, 83L, 39L, 31L, 23L, 15L, 7L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 255L, 0L, 255L, 35L, 23L, 15L, 127L, 83L, 51L, 27L, 199L, 155L, 119L, 87L, 83L), .Dim = c(256L, 3L)); return(q2pal); } #' @title Get Q1 palette. #' #' @return 256 x 3 integer matrix, representing the RGB color values for an index into the palette. #' #' @examples #' pal = pal_q1(); #' dim(pal); #' #' @export pal_q1 <- function() { palette_q1_file = system.file("extdata", "palette_q1.lmp", package = "wal", mustWork = TRUE); lmp = read.lmp(palette_q1_file); return(matrix(lmp, ncol = 3L, byrow = TRUE)); } #' @title Read binary lump, or 'lmp' files. #' #' @param filepath character string, path to the input file. #' #' @param dlength the expected data length, in bytes. #' #' @return vector of dlength unsigned integers in range 0..255. #' #' @keywords internal read.lmp <- function(filepath, dlength = 768L) { fh = file(filepath, "rb"); on.exit({ close(fh) }); endian = 'little'; raw_data = readBin(fh, integer(), n = dlength, size = 1L, signed = FALSE, endian = endian); return(raw_data); }
/scratch/gouwar.j/cran-all/cranData/wal/R/palette.R
#' @title Check whether object is a Quake 1 or 2 alias model. #' #' @param x any R object #' #' @export is.quakemodel <- function(x) { return(inherits(x, 'quakemodel_mdl') | inherits(x, 'quakemodel_md2')); } #' @title Convert Quake Model to 'fs.surface' instance. #' #' @param quakemodel an instance of \code{quakemodel_mdl} or \code{quakemodel_md2}. #' #' @param frame_idx integer, the frame to export. Quake models may contain animations made up of several frames. The mesh connectivity is unaltered between frames, but the vertex positions differ. #' #' @return \code{fs.surface} mesh instance, as used by the \code{freesurferformats} package. #' #' @export quakemodel.to.fs.surface <- function(quakemodel, frame_idx = 1L) { sf = list('faces'=(quakemodel$triangles$vertex + 1L), 'vertices'=quakemodel$frames[[frame_idx]]$vertex_coords); class(sf) = c(class(sf), 'fs.surface'); return(sf); } #' #' @title Convert Quake Model to rgl 'tmesh3d' instance. #' #' #' #' @param quakemodel an instance of \code{quakemodel_mdl} or \code{quakemodel_md2}. #' #' #' #' @param frame_idx integer, the frame to export. Quake models may contain animations made up of several frames. The mesh connectivity is unaltered between frames, but the vertex positions differ. #' #' #' #' @return \code{tmesh3d} mesh instance, as used by the \code{rgl} package. You can use \code{rgl::shade3d(your_tmesh3d_instance)} to visualize the model. #' #' #' #' @export #' quakemodel.to.tmesh3d <- function(quakemodel, frame_idx = 1L) { #' sf = quakemodel.to.fs.surface(quakemodel, frame_idx = frame_idx); #' tm = rgl::tmesh3d(t(sf$vertices), t(sf$faces), homogeneous = FALSE); #' return(tm); #' } #'
/scratch/gouwar.j/cran-all/cranData/wal/R/read_model_common.R
# Functions for reading Quake II models ('.md2' files). #' @title Read Quake II model in MD2 format. #' #' @param filepath character string, the path to the MD2 file #' #' @param anim logical, whether to load the whole animation (if present). Returns a list of models, the animation frames. If FALSE, only the first frame is returned. #' #' @note Ignore this function, it will be moved to a different package. #' #' @export read.quake.md2 <- function(filepath, anim = FALSE) { fh = file(filepath, "rb"); on.exit({ close(fh) }); endian = 'little'; md2 = list(); header = list(); header$ident = readBin(fh, integer(), n = 1, size = 4, endian = endian); header$version = readBin(fh, integer(), n = 1, size = 4, endian = endian); if(header$ident != 844121161 | header$version != 8L) { stop(sprintf("File '%s' not in MD2 format.\n", filepath)); } hdr_data = readBin(fh, integer(), n = 15, size = 4, endian = endian); header$skinwidth = hdr_data[1]; header$skinheight = hdr_data[2]; header$framesize = hdr_data[3]; header$num_skins = hdr_data[4]; header$num_vertices = hdr_data[5]; header$num_st = hdr_data[6]; header$num_tris = hdr_data[7]; header$num_glcmds = hdr_data[8]; header$num_frames = hdr_data[9]; header$offset_skins = hdr_data[10]; header$offset_st = hdr_data[11]; header$offset_tris = hdr_data[12]; header$offset_frames = hdr_data[13]; header$offset_glcmds = hdr_data[14]; header$offset_end = hdr_data[15]; # read model data: skins (a.k.a. textures) seek(fh, where = header$offset_skins, origin = "start"); md2$skins = list(); if(header$num_skins > 0L) { for(i in 1:header$num_skins) { #md2$skins[[i]] = readBin(fh, character()); md2$skins[[i]] = readChar(fh, 64L, useBytes = TRUE); } } # read model data: texture coords seek(fh, where = header$offset_st, origin = "start"); md2$texcoords = list(); md2$texcoords_unscaled = list(); if(header$num_st > 0L) { md2$texcoords$s= rep(NA, (header$num_st)); md2$texcoords$t= rep(NA, (header$num_st)); md2$texcoords_unscaled$s= rep(NA, (header$num_st)); md2$texcoords_unscaled$t= rep(NA, (header$num_st)); for(i in 1:header$num_st) { md2$texcoords_unscaled$s[[i]] = readBin(fh, integer(), n = 1L, size = 2L); md2$texcoords_unscaled$t[[i]] = readBin(fh, integer(), n = 1L, size = 2L); } md2$texcoords$s = md2$texcoords_unscaled$s / header$skinwidth; md2$texcoords$t = md2$texcoords_unscaled$t / header$skinheight; } # read model data: triangles (vertex and texture indices) seek(fh, where = header$offset_tris, origin = "start"); md2$triangles = list(); if(header$num_tris > 0L) { md2$triangles$vertex = matrix(rep(NA, (header$num_tris * 3L)), ncol = 3); # vertex indices md2$triangles$st = matrix(rep(NA, (header$num_tris * 3L)), ncol = 3); # texture coord indices for(i in 1:header$num_tris) { md2$triangles$vertex[i,] = readBin(fh, integer(), n = 3L, size = 2L, signed = FALSE); md2$triangles$st[i,] = readBin(fh, integer(), n = 3L, size = 2L, signed = FALSE); } } if(max(md2$triangles$vertex) >= header$num_vertices){ warning(sprintf("Found triangle referencing 0-based vertex index %d, but there are only %d vertices.\n", max(md2$triangles$vertex) >= header$num_vertices)); } # read model data: openGL commands seek(fh, where = header$offset_glcmds, origin = "start"); if(header$num_glcmds > 0L) { md2$glcmds = readBin(fh, integer(), n = header$num_glcmds, size = 4L); } ## inside frame loop: seek(fh, where = header$offset_frames, origin = "start"); md2$frames = list(); if(header$num_frames > 0L) { pdn = predefined.md2.normals(); for(i in 1:header$num_frames) { this_frame = list(); # read model data: vertex coords (and normal vector indices into pre-defined normal vector) this_frame$scale = readBin(fh, numeric(), n = 3L, size = 4L); this_frame$translate = readBin(fh, numeric(), n = 3L, size = 4L); this_frame$name = readChar(fh, 16L, useBytes = TRUE); if(header$num_vertices > 0L) { this_frame$vertex_coords = matrix(rep(NA, (3 * header$num_vertices)), ncol = 3L); this_frame$vertex_normals = matrix(rep(NA, (3 * header$num_vertices)), ncol = 3L); for(j in 1:header$num_vertices) { this_vert_coords_raw = readBin(fh, integer(), n = 3L, size = 1L, signed = FALSE); this_vert_normal_index = readBin(fh, integer(), n = 1L, size = 1L, signed = FALSE); # compute real vertex coords using frame scale and translation this_frame$vertex_coords[j,] = (this_frame$scale * this_vert_coords_raw) + this_frame$translate; this_frame$vertex_normals[j,] = pdn[this_vert_normal_index,]; } } md2$frames[[i]] = this_frame; } } expected_framesize = 12L + 12L + 16L + (4L * header$num_vertices); if(header$framesize != expected_framesize) { warning(sprintf("Framesize from header is %d, expected %d.\n", header$framesize, expected_framesize)); } md2$header = header; class(md2) = c(class(md2), 'quakemodel_md2'); return(md2); } #' @title Predefined MD2 normals from Quake 2. #' #' @return 3xn matrix of normals. #' #' @keywords internal predefined.md2.normals <- function() { normals_raw = c( -0.525731, 0.000000, 0.850651 , -0.442863, 0.238856, 0.864188 , -0.295242, 0.000000, 0.955423 , -0.309017, 0.500000, 0.809017 , -0.162460, 0.262866, 0.951056 , 0.000000, 0.000000, 1.000000 , 0.000000, 0.850651, 0.525731 , -0.147621, 0.716567, 0.681718 , 0.147621, 0.716567, 0.681718 , 0.000000, 0.525731, 0.850651 , 0.309017, 0.500000, 0.809017 , 0.525731, 0.000000, 0.850651 , 0.295242, 0.000000, 0.955423 , 0.442863, 0.238856, 0.864188 , 0.162460, 0.262866, 0.951056 , -0.681718, 0.147621, 0.716567 , -0.809017, 0.309017, 0.500000 , -0.587785, 0.425325, 0.688191 , -0.850651, 0.525731, 0.000000 , -0.864188, 0.442863, 0.238856 , -0.716567, 0.681718, 0.147621 , -0.688191, 0.587785, 0.425325 , -0.500000, 0.809017, 0.309017 , -0.238856, 0.864188, 0.442863 , -0.425325, 0.688191, 0.587785 , -0.716567, 0.681718, -0.147621 , -0.500000, 0.809017, -0.309017 , -0.525731, 0.850651, 0.000000 , 0.000000, 0.850651, -0.525731 , -0.238856, 0.864188, -0.442863 , 0.000000, 0.955423, -0.295242 , -0.262866, 0.951056, -0.162460 , 0.000000, 1.000000, 0.000000 , 0.000000, 0.955423, 0.295242 , -0.262866, 0.951056, 0.162460 , 0.238856, 0.864188, 0.442863 , 0.262866, 0.951056, 0.162460 , 0.500000, 0.809017, 0.309017 , 0.238856, 0.864188, -0.442863 , 0.262866, 0.951056, -0.162460 , 0.500000, 0.809017, -0.309017 , 0.850651, 0.525731, 0.000000 , 0.716567, 0.681718, 0.147621 , 0.716567, 0.681718, -0.147621 , 0.525731, 0.850651, 0.000000 , 0.425325, 0.688191, 0.587785 , 0.864188, 0.442863, 0.238856 , 0.688191, 0.587785, 0.425325 , 0.809017, 0.309017, 0.500000 , 0.681718, 0.147621, 0.716567 , 0.587785, 0.425325, 0.688191 , 0.955423, 0.295242, 0.000000 , 1.000000, 0.000000, 0.000000 , 0.951056, 0.162460, 0.262866 , 0.850651, -0.525731, 0.000000 , 0.955423, -0.295242, 0.000000 , 0.864188, -0.442863, 0.238856 , 0.951056, -0.162460, 0.262866 , 0.809017, -0.309017, 0.500000 , 0.681718, -0.147621, 0.716567 , 0.850651, 0.000000, 0.525731 , 0.864188, 0.442863, -0.238856 , 0.809017, 0.309017, -0.500000 , 0.951056, 0.162460, -0.262866 , 0.525731, 0.000000, -0.850651 , 0.681718, 0.147621, -0.716567 , 0.681718, -0.147621, -0.716567 , 0.850651, 0.000000, -0.525731 , 0.809017, -0.309017, -0.500000 , 0.864188, -0.442863, -0.238856 , 0.951056, -0.162460, -0.262866 , 0.147621, 0.716567, -0.681718 , 0.309017, 0.500000, -0.809017 , 0.425325, 0.688191, -0.587785 , 0.442863, 0.238856, -0.864188 , 0.587785, 0.425325, -0.688191 , 0.688191, 0.587785, -0.425325 , -0.147621, 0.716567, -0.681718 , -0.309017, 0.500000, -0.809017 , 0.000000, 0.525731, -0.850651 , -0.525731, 0.000000, -0.850651 , -0.442863, 0.238856, -0.864188 , -0.295242, 0.000000, -0.955423 , -0.162460, 0.262866, -0.951056 , 0.000000, 0.000000, -1.000000 , 0.295242, 0.000000, -0.955423 , 0.162460, 0.262866, -0.951056 , -0.442863, -0.238856, -0.864188 , -0.309017, -0.500000, -0.809017 , -0.162460, -0.262866, -0.951056 , 0.000000, -0.850651, -0.525731 , -0.147621, -0.716567, -0.681718 , 0.147621, -0.716567, -0.681718 , 0.000000, -0.525731, -0.850651 , 0.309017, -0.500000, -0.809017 , 0.442863, -0.238856, -0.864188 , 0.162460, -0.262866, -0.951056 , 0.238856, -0.864188, -0.442863 , 0.500000, -0.809017, -0.309017 , 0.425325, -0.688191, -0.587785 , 0.716567, -0.681718, -0.147621 , 0.688191, -0.587785, -0.425325 , 0.587785, -0.425325, -0.688191 , 0.000000, -0.955423, -0.295242 , 0.000000, -1.000000, 0.000000 , 0.262866, -0.951056, -0.162460 , 0.000000, -0.850651, 0.525731 , 0.000000, -0.955423, 0.295242 , 0.238856, -0.864188, 0.442863 , 0.262866, -0.951056, 0.162460 , 0.500000, -0.809017, 0.309017 , 0.716567, -0.681718, 0.147621 , 0.525731, -0.850651, 0.000000 , -0.238856, -0.864188, -0.442863 , -0.500000, -0.809017, -0.309017 , -0.262866, -0.951056, -0.162460 , -0.850651, -0.525731, 0.000000 , -0.716567, -0.681718, -0.147621 , -0.716567, -0.681718, 0.147621 , -0.525731, -0.850651, 0.000000 , -0.500000, -0.809017, 0.309017 , -0.238856, -0.864188, 0.442863 , -0.262866, -0.951056, 0.162460 , -0.864188, -0.442863, 0.238856 , -0.809017, -0.309017, 0.500000 , -0.688191, -0.587785, 0.425325 , -0.681718, -0.147621, 0.716567 , -0.442863, -0.238856, 0.864188 , -0.587785, -0.425325, 0.688191 , -0.309017, -0.500000, 0.809017 , -0.147621, -0.716567, 0.681718 , -0.425325, -0.688191, 0.587785 , -0.162460, -0.262866, 0.951056 , 0.442863, -0.238856, 0.864188 , 0.162460, -0.262866, 0.951056 , 0.309017, -0.500000, 0.809017 , 0.147621, -0.716567, 0.681718 , 0.000000, -0.525731, 0.850651 , 0.425325, -0.688191, 0.587785 , 0.587785, -0.425325, 0.688191 , 0.688191, -0.587785, 0.425325 , -0.955423, 0.295242, 0.000000 , -0.951056, 0.162460, 0.262866 , -1.000000, 0.000000, 0.000000 , -0.850651, 0.000000, 0.525731 , -0.955423, -0.295242, 0.000000 , -0.951056, -0.162460, 0.262866 , -0.864188, 0.442863, -0.238856 , -0.951056, 0.162460, -0.262866 , -0.809017, 0.309017, -0.500000 , -0.864188, -0.442863, -0.238856 , -0.951056, -0.162460, -0.262866 , -0.809017, -0.309017, -0.500000 , -0.681718, 0.147621, -0.716567 , -0.681718, -0.147621, -0.716567 , -0.850651, 0.000000, -0.525731 , -0.688191, 0.587785, -0.425325 , -0.587785, 0.425325, -0.688191 , -0.425325, 0.688191, -0.587785 , -0.425325, -0.688191, -0.587785 , -0.587785, -0.425325, -0.688191 , -0.688191, -0.587785, -0.425325 ); return(matrix(normals_raw, ncol = 3L, byrow = TRUE)); } #quadf = '~/data/q2_pak/models/items/quaddama/tris.md2' #md2q = read.quake.md2(quadf); #' @title Check whether object is Quake 2 MD2 model #' #' @param x any R object #' #' @export is.quakemodel_md2 <- function(x) inherits(x, 'quakemodel_md2')
/scratch/gouwar.j/cran-all/cranData/wal/R/read_model_md2.R
# Functions for reading Quake alias models (aka procs). # This includes entity models (weapons, pickups) and enemy models. The file extension is '.mdl'. #' @title Read Quake model in MDL format. #' #' @param filepath character string, the path to the MDL file #' #' @param do_checks logical, whether to perform some sanity checks on the data and warn on suspicious results. #' #' @note Ignore this function, it will be moved to a different package. #' #' @examples #' \dontrun{ #' mdlf = "~/data/q1_pak/progs/quaddama.mdl" #' mdl = read.quake.mdl(mdlf); #' } #' #' @export read.quake.mdl <- function(filepath, do_checks = FALSE) { fh = file(filepath, "rb"); on.exit({ close(fh) }); endian = 'little'; mdl = list('header' = list()); int_size = 4L; mdl$header$id = readBin(fh, integer(), n = 1, size = int_size, endian = endian); mdl$header$version = readBin(fh, integer(), n = 1, size = int_size, endian = endian); if(mdl$header$id != 1330660425L | mdl$header$version != 6L) { stop(sprintf("File '%s' not in MDL format.\n", filepath)); } mdl$header$scale = readBin(fh, numeric(), n = 3, size = 4, endian = endian); mdl$header$origin = readBin(fh, numeric(), n = 3, size = 4, endian = endian); mdl$header$radius = readBin(fh, numeric(), n = 1, size = 4, endian = endian); # bbox radius mdl$header$offsets = readBin(fh, numeric(), n = 3, size = 4, endian = endian); # eye pos mdl$header$num_skins = readBin(fh, integer(), n = 1, size = int_size, endian = endian); # number of skin textures mdl$header$skin_width = readBin(fh, integer(), n = 1, size = int_size, endian = endian); mdl$header$skin_height = readBin(fh, integer(), n = 1, size = int_size, endian = endian); mdl$header$num_verts = readBin(fh, integer(), n = 1, size = int_size, endian = endian); mdl$header$num_tris = readBin(fh, integer(), n = 1, size = int_size, endian = endian); mdl$header$num_frames = readBin(fh, integer(), n = 1, size = int_size, endian = endian); mdl$header$sync_type = readBin(fh, integer(), n = 1, size = int_size, endian = endian); # 0=synchron, 1=random mdl$header$flags = readBin(fh, integer(), n = 1, size = int_size, endian = endian); # 0 mdl$header$size = readBin(fh, numeric(), n = 1, size = 4, endian = endian); # average tris size if(do_checks) { # some sanity checks if((mdl$header$skin_width %% 4) != 0L) { warning(sprintf("Invalid skin texture width %d, must be multiple of 4.\n", mdl$header$skin_width)); } if((mdl$header$skin_height %% 4) != 0L) { warning(sprintf("Invalid skin texture height %d, must be multiple of 4.\n", mdl$header$skin_height)); } if(! mdl$header$sync_type %in% c(0L, 1L)) { warning("Invalid sync type, must be 0 or 1."); } if(mdl$header$flags != 0L) { warning(sprintf("Invalid flags %d, must be 0.\n", mdl$header$flags)); } } # next follow model skins. Could be one or a group. mdl$skins = list(); mdl$skins$skin_type = readBin(fh, integer(), n = 1, size = 4, endian = endian); if(mdl$skins$skin_type == 0L) { # single picture mdl$skins$skin_pic = readBin(fh, integer(), n = (mdl$header$skin_width * mdl$header$skin_height) , size = 1, signed = FALSE, endian = endian); } else { mdl$skins$num_skins_in_group = readBin(fh, integer(), n = 1, size = 4, endian = endian); mdl$skins$time_per_skin = readBin(fh, numeric(), n = mdl$skins$num_skins_in_group, size = 4, endian = endian); mdl$skins$skin_pics = list(); if(mdl$skins$num_skins_in_group > 0L) { for(skin_idx in 1:mdl$skins$num_skins_in_group) { mdl$skins$skin_pics[[skin_idx]] = readBin(fh, integer(), n = (mdl$header$skin_width * mdl$header$skin_height) , size = 1, signed = FALSE, endian = endian); } } } # skin texture coords if(mdl$header$num_verts > 0L) { mdl$skins$skinverts = matrix(rep(NA, (mdl$header$num_verts * 3L)), ncol = 3L); for(skin_vert_idx in 1:mdl$header$num_verts) { # The 3 values per vertex are: onseam (whether vertex is on seam between model front and back), s (horizontal texture coord in range [0, skinwidth[), t (vertical texture coord in range [0, skinheight[). # The first value (onseam) must be 0 or 32L. mdl$skins$skinverts[skin_vert_idx,] = readBin(fh, integer(), n = 3L, size = 4, endian = endian); } } # triangles (as indices into vertex list) mdl$triangles = list(); if(mdl$header$num_tris > 0L) { mdl$triangles$raw = matrix(rep(NA, (mdl$header$num_tris * 4L)), ncol = 4L); # the 4 values are: flag face_is_front (0=FALSE, 1s=TRUE), and the 3 vertex indices of the triangle. for(triangle_idx in 1:mdl$header$num_tris) { mdl$triangles$raw[triangle_idx,] = readBin(fh, integer(), n = 4L, size = 4, endian = endian); } mdl$triangles$triangle_is_front = mdl$triangles$raw[, 1L]; mdl$triangles$vertex = mdl$triangles$raw[, 2:4]; mdl$triangles$raw = NULL; } if(any(!(mdl$triangles$triangle_is_front %in% c(0L, 1L)))) { warning("Found triangles with invalid 'triangle_is_front' value (expected 0 or 1)."); } if(max(mdl$triangles$vertex) >= mdl$header$num_verts){ warning(sprintf("Found triangle referencing 0-based vertex index %d, but there are only %d vertices.\n", max(mdl$triangles$vertex) >= mdl$header$num_verts)); } # next follow model frames. Each frame contains vertex positions (a model in a certain orientation). mdl$frames = list(); if(mdl$header$num_frames > 0L) { for(frame_idx in 1:mdl$header$num_frames) { this_frame = list(); this_frame$frame_type = readBin(fh, integer(), n = 1, size = 4, endian = endian); # 0 = simple frame, everything else = full frame. if(this_frame$frame_type == 0L) { # single simple frame # min vertex position this_frame$min_vertex = readBin(fh, integer(), n = 4, size = 1, signed = FALSE, endian = endian); # same for max vertex position. this_frame$max_vertex = readBin(fh, integer(), n = 4, size = 1, signed = FALSE, endian = endian); this_frame$name = readChar(fh, 16L, useBytes = TRUE); # frame name. # the 4 values are: 1-3=packed position 255 (x,y,z), 4=index into normal list. this_frame$vertex_coords_raw = matrix(readBin(fh, integer(), n = (mdl$header$num_verts * 4L), size = 1, signed = FALSE, endian = endian), ncol = 4L, byrow = TRUE); this_frame$vertex_coords = unpack.vertex.coords(this_frame$vertex_coords_raw[,1:3], mdl$header); this_frame$vertex_normals = lookup.q1.normals(this_frame$vertex_coords_raw[,4]); #this_frame$vertex_coords_raw = NULL; } else { # full frame: group of simple frames and extra data. # min vertex position over all following frames. The 4 values are: 1..3=packed position in range 0..255. 4=normal index (into list of pre-defined normals, approximate value for Gouroud Shading). this_frame$min_vertex = readBin(fh, integer(), n = 4, size = 1, signed = FALSE, endian = endian); # same for max vertex position. this_frame$max_vertex = readBin(fh, integer(), n = 4, size = 1, signed = FALSE, endian = endian); this_frame$num_simple_frames = this_frame$frame_type; # TODO: where to get this? the current value this_frame$frame_type is a guess. this_frame$frame_timings = readBin(fh, numeric(), n = this_frame$num_simple_frames, size = 4, endian = endian); this_frame$simple_frames = list(); for(simple_frame_idx in 1:this_frame$num_simple_frames) { this_simple_frame = list(); this_simple_frame$min_vertex = readBin(fh, integer(), n = 4, size = 1, signed = FALSE, endian = endian); # same for max vertex position. this_simple_frame$max_vertex = readBin(fh, integer(), n = 4, size = 1, signed = FALSE, endian = endian); this_simple_frame$name = readChar(fh, 16L, useBytes = TRUE); # frame name. this_simple_frame$vertex_coords_raw = matrix(readBin(fh, integer(), n = (mdl$header$num_verts * 4L), size = 1, signed = FALSE, endian = endian), ncol = 4L, byrow = TRUE); this_simple_frame$vertex_coords = unpack.vertex.coords(this_simple_frame$vertex_coords_raw[,1:3], mdl$header); this_simple_frame$vertex_normals = lookup.q1.normals(this_simple_frame$vertex_coords_raw[,4]); this_simple_frame$vertex_coords_raw = NULL; this_frame$simple_frames[[simple_frame_idx]] = this_simple_frame; } } mdl$frames[[frame_idx]] = this_frame; } } class(mdl) = c(class(mdl), 'quakemodel_mdl'); return(mdl); } #' @title Unpack vertex coords from Q1 0-255 representation. #' #' @param coords_packed matrix of n x 3 integers in range 0..255, the packed coords from an MDL file. #' #' @param mdl_header MDL header or named list, only the fields 'header$scale' and 'header$origin' are used. #' #' @keywords internal unpack.vertex.coords <- function(coords_packed, mdl_header) { if(ncol(coords_packed) != 3L) { stop("Parameter 'coords_packed' must be a matrix with 3 columns."); } if(is.null(mdl_header$origin) | is.null(mdl_header$scale)) { stop("Parameter 'mdl_header' must have 'origin' and 'scale' entries.") } nc = ncol(coords_packed); coords_unpacked = matrix(rep(NA, (nc * nrow(coords_packed))), ncol = nc); for(row_idx in 1:nrow(coords_packed)) { coords_unpacked[row_idx,] = (coords_packed[row_idx,] * mdl_header$scale) + mdl_header$origin; } return(coords_unpacked); } #' @title Return list of pre-defined Quake I normals. #' #' @return n x 3 matrix of doubles, the normals. Hardcoded. #' #' @keywords internal predefined.mdl.normals <- function() { q1_norms = c( -0.525731, 0.000000, 0.850651 , -0.442863, 0.238856, 0.864188 , -0.295242, 0.000000, 0.955423 , -0.309017, 0.500000, 0.809017 , -0.162460, 0.262866, 0.951056 , 0.000000, 0.000000, 1.000000 , 0.000000, 0.850651, 0.525731 , -0.147621, 0.716567, 0.681718 , 0.147621, 0.716567, 0.681718 , 0.000000, 0.525731, 0.850651 , 0.309017, 0.500000, 0.809017 , 0.525731, 0.000000, 0.850651 , 0.295242, 0.000000, 0.955423 , 0.442863, 0.238856, 0.864188 , 0.162460, 0.262866, 0.951056 , -0.681718, 0.147621, 0.716567 , -0.809017, 0.309017, 0.500000 , -0.587785, 0.425325, 0.688191 , -0.850651, 0.525731, 0.000000 , -0.864188, 0.442863, 0.238856 , -0.716567, 0.681718, 0.147621 , -0.688191, 0.587785, 0.425325 , -0.500000, 0.809017, 0.309017 , -0.238856, 0.864188, 0.442863 , -0.425325, 0.688191, 0.587785 , -0.716567, 0.681718, -0.147621 , -0.500000, 0.809017, -0.309017 , -0.525731, 0.850651, 0.000000 , 0.000000, 0.850651, -0.525731 , -0.238856, 0.864188, -0.442863 , 0.000000, 0.955423, -0.295242 , -0.262866, 0.951056, -0.162460 , 0.000000, 1.000000, 0.000000 , 0.000000, 0.955423, 0.295242 , -0.262866, 0.951056, 0.162460 , 0.238856, 0.864188, 0.442863 , 0.262866, 0.951056, 0.162460 , 0.500000, 0.809017, 0.309017 , 0.238856, 0.864188, -0.442863 , 0.262866, 0.951056, -0.162460 , 0.500000, 0.809017, -0.309017 , 0.850651, 0.525731, 0.000000 , 0.716567, 0.681718, 0.147621 , 0.716567, 0.681718, -0.147621 , 0.525731, 0.850651, 0.000000 , 0.425325, 0.688191, 0.587785 , 0.864188, 0.442863, 0.238856 , 0.688191, 0.587785, 0.425325 , 0.809017, 0.309017, 0.500000 , 0.681718, 0.147621, 0.716567 , 0.587785, 0.425325, 0.688191 , 0.955423, 0.295242, 0.000000 , 1.000000, 0.000000, 0.000000 , 0.951056, 0.162460, 0.262866 , 0.850651, -0.525731, 0.000000 , 0.955423, -0.295242, 0.000000 , 0.864188, -0.442863, 0.238856 , 0.951056, -0.162460, 0.262866 , 0.809017, -0.309017, 0.500000 , 0.681718, -0.147621, 0.716567 , 0.850651, 0.000000, 0.525731 , 0.864188, 0.442863, -0.238856 , 0.809017, 0.309017, -0.500000 , 0.951056, 0.162460, -0.262866 , 0.525731, 0.000000, -0.850651 , 0.681718, 0.147621, -0.716567 , 0.681718, -0.147621, -0.716567 , 0.850651, 0.000000, -0.525731 , 0.809017, -0.309017, -0.500000 , 0.864188, -0.442863, -0.238856 , 0.951056, -0.162460, -0.262866 , 0.147621, 0.716567, -0.681718 , 0.309017, 0.500000, -0.809017 , 0.425325, 0.688191, -0.587785 , 0.442863, 0.238856, -0.864188 , 0.587785, 0.425325, -0.688191 , 0.688191, 0.587785, -0.425325 , -0.147621, 0.716567, -0.681718 , -0.309017, 0.500000, -0.809017 , 0.000000, 0.525731, -0.850651 , -0.525731, 0.000000, -0.850651 , -0.442863, 0.238856, -0.864188 , -0.295242, 0.000000, -0.955423 , -0.162460, 0.262866, -0.951056 , 0.000000, 0.000000, -1.000000 , 0.295242, 0.000000, -0.955423 , 0.162460, 0.262866, -0.951056 , -0.442863, -0.238856, -0.864188 , -0.309017, -0.500000, -0.809017 , -0.162460, -0.262866, -0.951056 , 0.000000, -0.850651, -0.525731 , -0.147621, -0.716567, -0.681718 , 0.147621, -0.716567, -0.681718 , 0.000000, -0.525731, -0.850651 , 0.309017, -0.500000, -0.809017 , 0.442863, -0.238856, -0.864188 , 0.162460, -0.262866, -0.951056 , 0.238856, -0.864188, -0.442863 , 0.500000, -0.809017, -0.309017 , 0.425325, -0.688191, -0.587785 , 0.716567, -0.681718, -0.147621 , 0.688191, -0.587785, -0.425325 , 0.587785, -0.425325, -0.688191 , 0.000000, -0.955423, -0.295242 , 0.000000, -1.000000, 0.000000 , 0.262866, -0.951056, -0.162460 , 0.000000, -0.850651, 0.525731 , 0.000000, -0.955423, 0.295242 , 0.238856, -0.864188, 0.442863 , 0.262866, -0.951056, 0.162460 , 0.500000, -0.809017, 0.309017 , 0.716567, -0.681718, 0.147621 , 0.525731, -0.850651, 0.000000 , -0.238856, -0.864188, -0.442863 , -0.500000, -0.809017, -0.309017 , -0.262866, -0.951056, -0.162460 , -0.850651, -0.525731, 0.000000 , -0.716567, -0.681718, -0.147621 , -0.716567, -0.681718, 0.147621 , -0.525731, -0.850651, 0.000000 , -0.500000, -0.809017, 0.309017 , -0.238856, -0.864188, 0.442863 , -0.262866, -0.951056, 0.162460 , -0.864188, -0.442863, 0.238856 , -0.809017, -0.309017, 0.500000 , -0.688191, -0.587785, 0.425325 , -0.681718, -0.147621, 0.716567 , -0.442863, -0.238856, 0.864188 , -0.587785, -0.425325, 0.688191 , -0.309017, -0.500000, 0.809017 , -0.147621, -0.716567, 0.681718 , -0.425325, -0.688191, 0.587785 , -0.162460, -0.262866, 0.951056 , 0.442863, -0.238856, 0.864188 , 0.162460, -0.262866, 0.951056 , 0.309017, -0.500000, 0.809017 , 0.147621, -0.716567, 0.681718 , 0.000000, -0.525731, 0.850651 , 0.425325, -0.688191, 0.587785 , 0.587785, -0.425325, 0.688191 , 0.688191, -0.587785, 0.425325 , -0.955423, 0.295242, 0.000000 , -0.951056, 0.162460, 0.262866 , -1.000000, 0.000000, 0.000000 , -0.850651, 0.000000, 0.525731 , -0.955423, -0.295242, 0.000000 , -0.951056, -0.162460, 0.262866 , -0.864188, 0.442863, -0.238856 , -0.951056, 0.162460, -0.262866 , -0.809017, 0.309017, -0.500000 , -0.864188, -0.442863, -0.238856 , -0.951056, -0.162460, -0.262866 , -0.809017, -0.309017, -0.500000 , -0.681718, 0.147621, -0.716567 , -0.681718, -0.147621, -0.716567 , -0.850651, 0.000000, -0.525731 , -0.688191, 0.587785, -0.425325 , -0.587785, 0.425325, -0.688191 , -0.425325, 0.688191, -0.587785 , -0.425325, -0.688191, -0.587785 , -0.587785, -0.425325, -0.688191 , -0.688191, -0.587785, -0.425325 ); return(matrix(q1_norms, ncol = 3L, byrow = TRUE)); } #' @title Lookup Quake I normals by index. #' #' @param normal_indices integer vector of length n, the normal indices (0-based). #' #' @return n x 3 matrix of doubles, the normals #' #' @keywords internal lookup.q1.normals <- function(normal_indices) { if( ! is.vector(normal_indices)) { stop("Parameter 'normal_indices' must be an integer vector."); } return(predefined.mdl.normals()[(normal_indices + 1L)]); } #' @title Check whether object is Quake 1 MDL model #' #' @param x any R object #' #' @export is.quakemodel_mdl <- function(x) inherits(x, 'quakemodel_mdl')
/scratch/gouwar.j/cran-all/cranData/wal/R/read_model_mdl.R
# Functions to read PAK archives. #' @title Read Quake PAK archive. #' #' @param filepath character string, path to the file including extension. #' #' @return a 'pak' instance. #' #' @examples #' \dontrun{ #' pakf = '~/.steam/steam/steamapps/common/Quake/Id1/PAK0.PAK'; #' pak = read.pak(pakf); #' } #' #' @export read.pak <- function(filepath) { fh = file(filepath, "rb"); on.exit({ close(fh) }); endian = 'little'; pak = list('header' = list('derived' = list())); pak$header$id = readChar(fh, 4L, useBytes = TRUE); if(pak$header$id != "PACK") { stop(sprintf("File '%s' not in Quake PACK format.\n", filepath)); } pak$header$ft_offset = readBin(fh, integer(), n = 1L, size = 4L, endian = endian); # file table offset. pak$header$ft_size = readBin(fh, integer(), n = 1, size = 4, endian = endian); # size of file table. pak$header$derived$num_files = pak$header$ft_size / 64L; # read file data. seek(fh, where = pak$header$ft_offset, origin = "start"); entry_file_names = rep(NA, pak$header$derived$num_files); entry_offsets = rep(NA, pak$header$derived$num_files); entry_sizes = rep(NA, pak$header$derived$num_files); for(entry_idx in 1:pak$header$derived$num_files) { entry_file_names[[entry_idx]] = readChar(fh, 56, useBytes = TRUE); entry_offsets[[entry_idx]] = readBin(fh, integer(), n = 1L, size = 4L, endian = endian); # file data offset. entry_sizes[[entry_idx]] = readBin(fh, integer(), n = 1L, size = 4L, endian = endian); # file data size. } pak$contents = data.frame('name' = entry_file_names, 'offset' = entry_offsets, 'size' = entry_sizes, stringsAsFactors = FALSE); class(pak) = c(class(pak), 'pak'); return(pak); } #' @title Extract PAK contents into existing directory. #' #' @param pak_filepath character string, path to input PAK file. #' #' @param outdir character string, the output directory in which the files should be created. Must be writeable. The sub directories and filenames are derived from the data in the WAD. #' #' @note PAK files can contain a directory structure, and new subdirectories will be created under \code{outdir} as needed to preserve it. #' #' @export pak.extract <- function(pak_filepath, outdir = getwd()) { if(! dir.exists(outdir)) { stop(sprintf("Base output directory '%d' does not exist.\n", outdir)); } pak = read.pak(pak_filepath); if(nrow(pak$contents) > 0L) { for(row_idx in 1:nrow(pak$contents)) { out_filename_with_dir_part = pak$contents$name[row_idx]; # something like 'e1u1/metal2_2'. out_subdirs = file.path(outdir, dirname(out_filename_with_dir_part)); if(! dir.exists(out_subdirs)) { dir.create(out_subdirs, recursive = TRUE); } out_filename = basename(out_filename_with_dir_part); out_filepath = file.path(out_subdirs, out_filename); save.filepart(pak_filepath, pak$contents$offset[row_idx], pak$contents$size[row_idx], out_filepath); } } else { warning("Empty PAK file."); } }
/scratch/gouwar.j/cran-all/cranData/wal/R/read_pak.R
#' @title Extract any of the supported Quake archives. #' #' @param filepath character string, path to existing and readable file in PAK or WAD2 format. #' #' @param outdir character string, path to an existing and writeable output directory into which to extract the archive. #' #' @param format character string, of one 'auto' to detect from filename, 'QARCHIVE_TYPE_WAD' for WAD2, or 'QARCHIVE_TYPE_PAK' for PACK. #' #' @param do_pre_checks logical, whether to perform extra sanity checks on the other parameters. #' #' @export qarchive.extract <- function(filepath, outdir, format = 'auto', do_pre_checks = TRUE) { if(do_pre_checks) { if(! file.exists(filepath)) { stop(sprintf("File '%s' does not exist or cannot be read. Please check or fix permissions.\n", filepath)); } if(! dir.exists(outdir)) { stop(sprintf("Output directory '%s' does not exist or cannot be read. Please create it or fix permissions.\n", outdir)); } } qarchive_type = qarchive.type.from.filename(filepath); if(qarchive_type == 'QARCHIVE_TYPE_WAD') { wad.extract(filepath, outdir); } else if(qarchive_type == 'QARCHIVE_TYPE_PAK') { pak.extract(filepath, outdir); } else { stop("Invalid or unsupported Quake archive type."); } } #' @title Determine archive type from file name extension. #' #' @inheritParams qarchive.extract #' #' @return character string, one of 'QARCHIVE_TYPE_WAD' or 'QARCHIVE_TYPE_PAK'. #' #' @keywords internal qarchive.type.from.filename <- function(filepath) { if(any(endsWith(filepath, c('.wad', '.WAD')))) { return('QARCHIVE_TYPE_WAD'); } else if(any(endsWith(filepath, c('.pak', '.PAK')))) { return('QARCHIVE_TYPE_PAK'); } else { warning(sprintf("Cannot guess archive type from file extension, trying 'PAK'.")); return('QARCHIVE_TYPE_PAK'); } }
/scratch/gouwar.j/cran-all/cranData/wal/R/read_quake_archive.R
# functions to read WAD files, which are archives holding several other files (think tar). #' @title Read Quake WAD file. #' #' @param filepath character string, path to the file. #' #' @return a wad instance, can be used to extract data or list contents. #' #' @examples #' \dontrun{ #' wadf = '~/knave.wad'; #' wad = read.wad(wadf); #' wad.contents(wad); #' } #' #' @export read.wad <- function(filepath) { fh = file(filepath, "rb"); on.exit({ close(fh) }); endian = 'little'; wad = list('header' = list()); wad$header$magic = readChar(fh, 4L); if(wad$header$magic != "WAD2") { stop("Not a supported WAD file: file magic mismatch."); } wad$header$num_entries = readBin(fh, integer(), n = 1, size = 4, endian = endian); wad$header$dir_offset = readBin(fh, integer(), n = 1, size = 4, endian = endian); seek(fh, where = wad$header$dir_offset, origin = "start"); wadentry_offset = c(); wadentry_dsize = c(); wadentry_size = c(); wadentry_type = c(); wadentry_compression = c(); wadentry_dummy = c(); wadentry_dir_name = c(); for(entry_idx in 1L:wad$header$num_entries) { wadentry_offset = c(wadentry_offset, readBin(fh, integer(), n = 1, size = 4, endian = endian)); wadentry_dsize = c(wadentry_dsize, readBin(fh, integer(), n = 1, size = 4, endian = endian)); wadentry_size = c(wadentry_size, readBin(fh, integer(), n = 1, size = 4, endian = endian)); wadentry_type = c(wadentry_type, readBin(fh, integer(), n = 1, size = 1, signed = FALSE, endian = endian)); wadentry_compression = c(wadentry_compression, readBin(fh, integer(), n = 1, size = 1, signed = FALSE, endian = endian)); wadentry_dummy = c(wadentry_dummy, readBin(fh, integer(), n = 1, size = 2, signed = FALSE, endian = endian)); wadentry_dir_name = c(wadentry_dir_name, readChar(fh, 16L)); } wadentry_type_string = get.wadentry.type.strings(wadentry_type); wad$contents = data.frame("offset" = wadentry_offset, "dsize" = wadentry_dsize, "size" = wadentry_size, "type" = wadentry_type, "type_string" = wadentry_type_string, "compression" = wadentry_compression, "dummy" = wadentry_dummy, "dir_name" = wadentry_dir_name, stringsAsFactors = FALSE); class(wad) = c(class(wad), "wad"); return(wad); } #' @title Translate wad directory entry types from the integer to the string representation. #' #' @param wadentry_type_int integer, WAD entry type code #' #' @return type string #' #' @keywords internal get.wadentry.type.strings <- function(wadentry_type_int) { wadentry_string = rep("unknown", length(wadentry_type_int)); cr = 1L; for(wti in wad_dir.types.int()) { wadentry_string[which(wadentry_type_int == wti)] = wad_dir.types.string()[cr]; cr = cr + 1L; } return(wadentry_string); } #' @title Get integers representing WAD dir entry types. #' #' @keywords internal #' #' @seealso wad_dir.types.string wad_dir.types.int <- function() { return(c(64L, 66L, 68L, 69L)); } #' @title Read part of binary file and save as new file. #' #' @param infile for input file, part of it gets read. #' #' @param read_from integer, index at which to start reading, from start of file. Used to \code{seek} to the position. #' #' @param read_len integer, the number of bytes to read. #' #' @param outfile character string, the output filename. #' #' @keywords internal save.filepart <- function(infile, read_from, read_len, outfile) { fh = file(infile, "rb"); on.exit({ close(fh) }); endian = "little"; read_from = as.integer(read_from); if(read_from < 0L) { stop("Invalid read_from parameter."); } read_len = as.integer(read_len); if(read_len < 0L) { stop("Invalid read_len parameter."); } #cat(sprintf("Writing %d bytes starting at %d to new file %s.\n", read_len, read_from, outfile)) seek(fh, where = read_from, origin = "start"); raw_data = readBin(fh, raw(), n = read_len, size = 1L, endian = endian); close(fh); if(length(raw_data) != read_len) { warning(sprintf("Extracted filepart length mismatch: expected %d, read %d.\n", read_len, length(raw_data))); } fh_out = file(outfile, "wb"); on.exit({ close(fh_out) }); writeBin(raw_data, fh_out); flush(fh_out); } #' @title S3 print function for WAD #' #' @param x wad instance #' #' @param ... extra arguments, ignored #' #' @export print.wad <- function(x, ...) { num_palettes = length(which(x$contents$type == 64L)); num_pic_statbar = length(which(x$contents$type == 66L)); num_tex = length(which(x$contents$type == 68L)); num_pic_console = length(which(x$contents$type == 69L)); cat(sprintf("WAD file holding %d palettes, %d statbar pics, %d textures and %d console pics.\n", num_palettes, num_pic_statbar, num_tex, num_pic_console)); } #' @title Get strings describing WAD dir entry types. #' #' @keywords internal #' #' @seealso wad_dir.types.int wad_dir.types.string <- function() { return(c("color_palette", "pic_status_bar", "texture", "pic_console")); } #' @title Get file extensions for WAD dir entry type strings. #' #' @keywords internal #' #' @return named list, which maps \code{wad_dir.types.string}s to file extensions. Afaik, there are not standard file extensions for these file types, and I made the ones used here up. #' #' @seealso wad_dir.types.string wad_dir.fileext.mapping <- function() { ext_mapping = list("color_palette" = '.qpl', "pic_status_bar" = '.psb', "texture" = '.q1t', "pic_console" = '.pco', "default" = '.qrs'); return(ext_mapping); } #' @title List WAD file contents. #' #' @param wad a wad instance, see \code{read.wad}. Alternatively a character string, which will be interpreted as a filepath to a WAD file that should be loaded. #' #' @return data.frame, info on the files inside the wad. #' @export wad.contents <- function(wad) { if(is.character(wad)) { wad = read.wad(wad); } contents = data.frame("type" = wad$contents$type_string, "name" = wad$contents$dir_name, "size" = wad$contents$size, stringsAsFactors = FALSE); return(contents); } #' @title Extract WAD contents into existing directory. #' #' @param wad_filepath character string, path to input WAD file. #' #' @param outdir character string, the output directory in which the files should be created. The filenames are derived from the data in the WAD. #' #' @param file_ext_mapping named list, with keys corresponding to the type names and values are file extensions, including the dot, to use for them. #' #' @note One can read extracted textures with \code{read.quake1miptex()}. #' #' @export wad.extract <- function(wad_filepath, outdir = getwd(), file_ext_mapping = wad_dir.fileext.mapping()) { wad = read.wad(wad_filepath); if(nrow(wad$contents) > 0L) { for(row_idx in 1:nrow(wad$contents)) { out_filename_cleaned = wad.texname.clean(wad$contents$dir_name[row_idx]); file_ext = file_ext_mapping$default; if(wad$contents$type_string[row_idx] %in% file_ext_mapping) { file_ext = file_ext_mapping[[wad$contents$type_string[row_idx]]]; } out_filepath = file.path(outdir, paste(out_filename_cleaned, file_ext, sep="")); save.filepart(wad_filepath, wad$contents$offset[row_idx], wad$contents$dsize[row_idx], out_filepath); } } else { warning("Empty WAD file."); } } #' @title Replace special chars in texture names to turn it into a valid filename. #' #' @param texnames character string, texture names from a WAD file. The textures may contain the special characters '*' and '+', which are used to indicate sequences (textures that change on an event, like a pressed button turning from red to green) and other things. #' #' @return character strings usable as filenames. #' #' @keywords internal wad.texname.clean <- function(texnames) { texnames = gsub("\\*", "s__", texnames); texnames = gsub("\\+", "p__", texnames); return(texnames); } #' @title Read a Quake mipmap texture from a WAD2 file. #' #' @param filepath character string, path to WAD file. #' #' @param at_offset integer, the index in the WAD file where the texture starts. #' #' @return a 'qmiptex' instance, its like a wall with shorter name field (16 instead of 32) and some fields (anim_name, flags, contents, value) missing. #' #' @examples #' \dontrun{ #' qm = read.quake1miptex("~/knave.wad", at_offset = 1317632); #' plotwal.mipmap(qm, apply_palette = pal_q1()); #' } #' #' @export read.quake1miptex <- function(filepath, at_offset = 0L) { fh = file(filepath, "rb"); on.exit({ close(fh) }); endian = "little"; seek(fh, where = at_offset, origin = "start"); qtex = list('header' = list()); class(qtex) = c(class(qtex), 'qmiptex', 'wal'); num_mip_levels = 4L; qtex$header$name = readChar(fh, 16L); qtex$header$width = readBin(fh, integer(), n = 1, size = 4, endian = endian); qtex$header$height = readBin(fh, integer(), n = 1, size = 4, endian = endian); qtex$header$mip_level_offsets = readBin(fh, integer(), n = num_mip_levels, size = 4, endian = endian); if(qtex$header$width <= 0L | qtex$header$height <= 0L) { stop("Invalid mipmap texture image dimensions"); } data_length_all_mipmaps = sum(get.mipmap.data.lengths(qtex$header$width, qtex$header$height)); pixel_data = readBin(fh, integer(), n = data_length_all_mipmaps, size = 1L, signed = FALSE, endian = endian); qtex$file_data_all_mipmaps = pixel_data + 1L; qtex$raw_data = get.wal.mipmap.data(qtex, mip_level = 0L); return(qtex); }
/scratch/gouwar.j/cran-all/cranData/wal/R/read_wad.R
#' @title Read bitmap file in WAL format. #' #' @param filepath character string, path to the file including extension #' #' @param hdr logical, whether to return full list with header #' #' @param hdr_only logical, whether to read only the header #' #' @param apply_palette optional 256 x 3 integer matrix, the palette. Must contain values in range 0..255. Pass NULL if you do not want to apply any palette. The resulting \code{wal} object will not have an 'image' entry then. #' #' @return integer pixel matrix, each pixel value is in range 0-255 and refers to an index in a palette. The palette is NOT included in the file, so you will need to define one or get it from elsewhere to see the final image. #' #' @examples #' \dontrun{ #' walf = '~/data/q2_pak0_extracted/textures/e1u2/basic1_7.wal'; #' wal = read.wal(walf); #' plot(wal); #' } #' #' @export read.wal <- function(filepath, hdr = TRUE, hdr_only = FALSE, apply_palette = wal::pal_q2()) { fh = file(filepath, "rb"); on.exit({ close(fh) }); endian = 'little'; num_mip_maps = 4L; read_mip_maps = TRUE; wal = list(); header = list(); header$tex_name = readChar(fh, 32L); header$width = readBin(fh, integer(), n = 1, size = 4, endian = endian); header$height = readBin(fh, integer(), n = 1, size = 4, endian = endian); header$mip_level_offsets = readBin(fh, integer(), n = num_mip_maps, size = 4, endian = endian); header$anim_name = readChar(fh, 32L); # Next frame name in animation, if any. Empty string if none, which is the most common case. header$flags = readBin(fh, integer(), n = 1, size = 4, endian = endian); header$contents = readBin(fh, integer(), n = 1, size = 4, endian = endian); header$value = readBin(fh, integer(), n = 1, size = 4, endian = endian); if(hdr_only) { return(header); } if(header$width < 1L | header$height < 1L) { warning("File not in WAL format (or invalid zero-length image dimension)."); } # Read data for all mipmaps. mip_level0_data_size = header$width * header$height; # sanity check: does the data length of the first mipmap match width x height. if(header$mip_level_offsets[2L] - header$mip_level_offsets[1L] != mip_level0_data_size) { warning(sprintf("Expected %d pixel values in image based on width %d and height %d, but first mipmap size is %d.\n", mip_level0_data_size, header$width * header$height, (header$mip_level_offsets[2L] - header$mip_level_offsets[1L]))); } header$mipmaps = list(); header$mipmaps$mip_level0_data_size = mip_level0_data_size; header$mipmaps$mip_level1_data_size = header$mip_level_offsets[3L] - header$mip_level_offsets[2L]; header$mipmaps$mip_level2_data_size = header$mip_level_offsets[4L] - header$mip_level_offsets[3L]; seek(fh, where = 0L, origin = "end"); end_pos = seek(fh, where = NA); # Find file size (last position). header$mipmaps$mip_level3_data_size = end_pos - header$mip_level_offsets[4L]; seek(fh, where = header$mip_level_offsets[1L], origin = "start"); raw_data = readBin(fh, integer(), n = mip_level0_data_size, size = 1, signed = FALSE, endian = endian); # vector raw_data = raw_data + 1L; # R uses 1-based indices. Note that raw_data is for first mipmap only. if(length(raw_data) != mip_level0_data_size) { warning(sprintf("Expected %d pixel values, but %d read.\n", mip_level0_data_size, length(raw_data))); } pixel_cmap_indices = matrix(data = raw_data, nrow = header$height, ncol = header$width, byrow = TRUE); # reshaped to image matrix wal = list('header' = header, 'raw_data' = raw_data, 'pixel_cmap_indices' = pixel_cmap_indices); class(wal) = c(class(wal), 'wal'); if(! is.null(apply_palette)) { check.palette(apply_palette); } wal$header$mipmaps$mip_level0_dim = c(wal$header$width, wal$header$height); if(read_mip_maps) { seek(fh, where = header$mip_level_offsets[2L], origin = "start"); wal$raw_data_mip_level1 = readBin(fh, integer(), n = header$mipmaps$mip_level1_data_size, size = 1, signed = FALSE, endian = endian) + 1L; wal$image_mip_level1 = apply.palette.to.rawdata(wal$raw_data_mip_level1, apply_palette, header$width / 2L, header$height / 2L); wal$header$mipmaps$mip_level1_dim = c(wal$header$width / 2L, wal$header$height / 2L); seek(fh, where = header$mip_level_offsets[3L], origin = "start"); wal$raw_data_mip_level2 = readBin(fh, integer(), n = header$mipmaps$mip_level2_data_size, size = 1, signed = FALSE, endian = endian) + 1L; wal$image_mip_level2 = apply.palette.to.rawdata(wal$raw_data_mip_level2, apply_palette, header$width / 4L, header$height / 4L); wal$header$mipmaps$mip_level2_dim = c(wal$header$width / 4L, wal$header$height / 4L); seek(fh, where = header$mip_level_offsets[4L], origin = "start"); wal$raw_data_mip_level3 = readBin(fh, integer(), n = header$mipmaps$mip_level3_data_size, size = 1, signed = FALSE, endian = endian) + 1L; wal$image_mip_level3 = apply.palette.to.rawdata(wal$raw_data_mip_level3, apply_palette, header$width / 8L, header$height / 8L); wal$header$mipmaps$mip_level3_dim = c(wal$header$width / 8L, wal$header$height / 8L); } wal$image = apply.palette.to.rawdata(raw_data, apply_palette, header$width, header$height); wal$file_data_all_mipmaps = c(wal$raw_data, wal$raw_data_mip_level1, wal$raw_data_mip_level2, wal$raw_data_mip_level3); if(hdr) { return(wal); } else { if(is.null(apply_palette)) { stop("Cannot return image without palette. Set hdr to TRUE or pass non-NULL apply_palette parameter."); } return(wal$image); } } #' @title Read bitmap image in WAL format, returning image data only. #' #' @description Read a bitmap image in WAL format, and return data in the same format as \code{png::readPNG} and \code{jpeg::readJPEG} do. #' #' @inheritParams read.wal #' #' @seealso \code{read.wal} if you want to read the header and have more control. #' #' @return numeric matrix with dimension width x height x channels, with all color values in range 0..1. #' #' @examples #' \dontrun{ #' walf = '~/data/q2_pak0_extracted/textures/e1u2/basic1_7.wal'; #' wal_image = readWAL(walf); #' dim(wal_image); #' } #' #' @export readWAL <- function(filepath, apply_palette = wal::pal_q2()) { return(read.wal(filepath, hdr = FALSE, hdr_only = FALSE, apply_palette = apply_palette) / 255.); } #' @title Apply a palette to index data to create a 2D image. #' #' @param raw_data integer vector of pixel data, each entry represents an index into the palette. #' #' @param apply_palette integer matrix, the palette. #' #' @param img_width integer, the width of the image to create. #' #' @param img_height integer, the height of the image to create. #' #' @keywords internal apply.palette.to.rawdata <- function(raw_data, apply_palette, img_width, img_height) { if(! is.null(apply_palette)) { channel_red = apply_palette[raw_data, 1]; channel_green = apply_palette[raw_data, 2]; channel_blue = apply_palette[raw_data, 3]; return(array( c( channel_red , channel_green, channel_blue ) , dim = c( img_width , img_height , 3))); } else { return(NULL); } } #' @title Check palette, stop on invalid data. #' #' @param pal a palette, i.e., a 256 x 3 integer matrix, with values in range 0..255L. #' #' @keywords internal check.palette <- function(pal) { if(! is.matrix(pal)) { stop("Palette must be a matrix."); } if( ! all.equal(dim(pal), c(256, 3))) { stop("Palette must be a 256 x 3 matrix."); } if(min(pal) < 0L | max(pal) > 255L) { stop("All palette values must be in range 0..255.") } if(max(pal) <= 1L) { warning(sprintf("Palette max value is %d, but the values must be in range 0..255.\n", max(pal))); } return(invisible(NULL)); } #' @title S3 plot function for wal image. #' #' @param x a wal instance. #' #' @param ... extra args, not used. #' #' @export #' @importFrom graphics plot plot.wal <- function(x, ...) { if(requireNamespace('imager', quietly = TRUE)) { if(! is.null(x$image)) { graphics::plot(imager::as.cimg(array(x$image, dim=c(x$header$width, x$header$height, 1, 3)))); } else { warning("The wal instance contains no final image, using grayscale preview palette. Use 'plotwal.mipmap()' to set palette for viewing."); apply_palette = cbind(0L:255L, 0L:255L, 0L:255L); check.palette(apply_palette); img = apply.palette.to.rawdata(x$raw_data, apply_palette, x$header$width , x$header$height); graphics::plot(imager::as.cimg(array(img, dim=c(x$header$width, x$header$height, 1, 3)))); } } else { stop("The 'imager' package must be installed to plot PCX images."); } } #' @title Plot a mipmap level from a WAL image. #' #' @param wal a WAL image instance, as returned by \code{read.wal}. #' #' @param mip_level integer in range 0..3, the mipmap to plot. Level 0 is the original full-size image, the other ones get smaller and smaller (by factor 2 on each dimension, so 1/4th the size of their predecessor). #' #' @inheritParams read.wal #' #' @examples #' \dontrun{ #' walf = '~/data/q2_pak0_extracted/textures/e1u2/basic1_7.wal'; #' wal = read.wal(walf); #' plotwal.mipmap(wal, mip_level = 3); #' } #' #' @export plotwal.mipmap <- function(wal, mip_level = 0L, apply_palette = wal::pal_q2()) { if(mip_level < 0L | mip_level > 3L) { stop("Paramter 'mip_level' must be an integer in range 0..3."); } raw_data = get.wal.mipmap.data(wal, mip_level); img_width = get.wal.mipmap.widths(wal$header$width)[mip_level+1L]; img_height = get.wal.mipmap.heights(wal$header$height)[mip_level+1L]; if(is.null(apply_palette)) { warning("The wal instance contains no final image and none supplied in parameter 'apply_palette'. Using grayscale preview palette."); apply_palette = cbind(0L:255L, 0L:255L, 0L:255L); } check.palette(apply_palette); img = apply.palette.to.rawdata(raw_data, apply_palette, img_width , img_height); graphics::plot(imager::as.cimg(array(img, dim=c(img_width, img_height, 1, 3)))); } #' @title Plot raw pixel index data as image. #' #' @param raw_data integer vector in containing width * height values in range 0..255, and optionally additional mipmap data at the end (which will be ignored). The raw image data. Can be a Q2 WAL data, Q1 miptex data, or anything else. #' #' @param width positive integer, the image width. #' #' @param height positive integer, the image height. #' #' @examples #' \dontrun{ #' # Plot the Q1 shambler skin: #' mdl = read.quake.mdl("~/data/q1_pak/progs/shambler.mdl"); #' plotwal.rawdata(mdl$skins$skin_pic, mdl$header$skin_width, #' mdl$header$skin_height, apply_palette = pal_q1()); #' } #' #' @inheritParams read.wal #' @export plotwal.rawdata <- function(raw_data, width, height, apply_palette = wal::pal_q2()) { img_size = width * height; if(length(raw_data) > img_size) { raw_data = raw_data[1L:img_size]; # cut off mipmap data, if any. } if(length(raw_data) == img_size) { if(is.null(apply_palette)) { warning("The wal instance contains no final image and none supplied in parameter 'apply_palette'. Using grayscale preview palette."); apply_palette = cbind(0L:255L, 0L:255L, 0L:255L); } check.palette(apply_palette); img = apply.palette.to.rawdata(raw_data, apply_palette, width , height); graphics::plot(imager::as.cimg(array(img, dim=c(width, height, 1, 3)))); } else { stop(sprintf("Image raw_data too small (%d) for given width %d and height %d, expected at least %d.\n", length(raw_data), width, height, img_size)); } } #' @title Retrieve raw data for given mipmap level from WAL instance. #' #' @inheritParams plotwal.mipmap #' #' @keywords internal get.wal.mipmap.data <- function(wal, mip_level) { mm_offset = get.mipmap.data.offsets(wal$header$width, wal$header$height, start_at = 0L); mm_len = get.mipmap.data.lengths(wal$header$width, wal$header$height); mm0 = wal$file_data_all_mipmaps[mm_offset[1]:mm_offset[2]]; mm1 = wal$file_data_all_mipmaps[mm_offset[2]:mm_offset[3]]; mm2 = wal$file_data_all_mipmaps[mm_offset[3]:mm_offset[4]] mm3 = wal$file_data_all_mipmaps[mm_offset[4]:(mm_offset[4]+mm_len[4])]; if(mip_level == 0L) { return(mm0); } else if(mip_level == 1L) { return(mm1); } else if(mip_level == 2L) { return(mm2); } else if(mip_level == 3L) { return(mm3); } else { stop("Invalid mip_level, must be 0..3."); } } #' @title Compute widths of the 4 mipimap levels from base width. #' #' @param width_mm integer, the base mipmap width. #' #' @return integer vector of length 4, the mipmap widths. #' @keywords internal get.wal.mipmap.widths <- function(width_mm) { return(c(width_mm, width_mm/2, width_mm/4, width_mm/8)); } #' @title Compute widths of the 4 mipimap levels from base width. #' #' @param height_mm integer, the base mipmap height. #' #' @return integer vector of length 4, the mipmap heights. #' #' @keywords internal get.wal.mipmap.heights <- function(height_mm) { return(get.wal.mipmap.widths(height_mm)); }
/scratch/gouwar.j/cran-all/cranData/wal/R/read_wal.R
## 24 bit JPEG/PNG colors to indexed WAL colors: # convert RGB colors to LAB space (colorscience::rgb2x or see grDevices::convertColor, https://cran.r-project.org/web/packages/colordistance/vignettes/lab-analyses.html) # use deltaE metric to compute distances (colorscience::deltaE2000() or spacesXYZ::DeltaE()) # pick color with ## mimap issue: # find out storage order # find out width and height in pixels of the 3 smaller mipmap levels (depends on 1st, I guess) #' @title Write WAL instance to bitmap file in WAL format. #' #' @param filepath character string, path to the file including extension #' #' @param wal a wal instance. Note that 1 will be substracted from the data when it is written, as indices are stored 0-based in the file. #' #' @examples #' \dontrun{ #' walf = '~/data/q2_pak0_extracted/textures/e1u2/basic1_7.wal'; #' wal = read.wal(walf); #' writeWAL(tempfile(fileext = ".wal"), wal); #' } #' #' @export writeWAL <- function(filepath, wal) { fh = file(filepath, "wb", blocking = TRUE); on.exit({ close(fh) }, add = TRUE); endian = "little"; if(nchar(wal$header$tex_name) > 0L) { if(nchar(wal$header$tex_name) > 32L) { stop("Max length for tex_name is 32."); } writeChar(wal$header$tex_name, fh, eos = NULL); } writeBin(as.raw(rep(0L, (32L - nchar(wal$header$tex_name)))), fh, endian = endian); # fill remaining space up to max 32 bytes with zeroes. writeBin(as.integer(wal$header$width), fh, size = 4, endian = endian); writeBin(as.integer(wal$header$height), fh, size = 4, endian = endian); writeBin(as.integer(wal$header$mip_level_offsets), fh, size = 4, endian = endian); # these are 4 integers. if(nchar(wal$header$anim_name) > 0L) { if(nchar(wal$header$anim_name) > 32L) { stop("Max length for anim_name is 32."); } writeChar(wal$header$anim_name, fh, eos = NULL); } writeBin(as.raw(rep(0L, (32L - nchar(wal$header$anim_name)))), fh, endian = endian); # fill remaining space up to max 32 bytes with zeroes. writeBin(as.integer(wal$header$flags), fh, size = 4, endian = endian); writeBin(as.integer(wal$header$contents), fh, size = 4, endian = endian); writeBin(as.integer(wal$header$value), fh, size = 4, endian = endian); # write data writeBin(as.integer(wal$file_data_all_mipmaps -1L), fh, size = 1, endian = endian); }
/scratch/gouwar.j/cran-all/cranData/wal/R/write_wal.R
## ----------------------------------------------------------------------------- library("wal"); wal_file = system.file("extdata", "bricks.wal", package = "wal", mustWork = TRUE); wal_image = wal::readWAL(wal_file); ## ----------------------------------------------------------------------------- dim(wal_image); ## ----------------------------------------------------------------------------- wal = wal::read.wal(wal_file); ## ----------------------------------------------------------------------------- wal$header$width; ## ----------------------------------------------------------------------------- plot(wal); ## ----------------------------------------------------------------------------- plotwal.mipmap(wal, apply_palette = wal::pal_q2(), mip_level = 1) ## ----------------------------------------------------------------------------- plotwal.mipmap(wal, apply_palette = wal::pal_q1(), mip_level = 3) ## ----------------------------------------------------------------------------- plotwal.mipmap(wal, apply_palette = wal::pal_q2(), mip_level = 3) ## ---- eval = FALSE------------------------------------------------------------ # writeWAL("~/mytexture.wal", wal); ## ---- eval = FALSE------------------------------------------------------------ # wal.export.to.jpeg(wal, "~/mytexture.jpg"); # wal.export.to.png(wal, "~/mytexture.png"); ## ---- eval = FALSE------------------------------------------------------------ # wal_imported = img.to.wal(png::readPNG("~/mytexture.png")); # writeWAL("~/mytexture.wal", wal_imported); # # wal_imported = img.to.wal(jpeg::readJPEG("~/mytexture.jpg")); # writeWAL("~/mytexture.wal", wal_imported);
/scratch/gouwar.j/cran-all/cranData/wal/inst/doc/wal.R
--- title: "Reading and writing Quake WAL textures with wal" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Reading and writing Quake WAL textures with wal} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- In this document, we show how to read, write, import and export WAL textures, which are used by idtech1 and idtech2 games. ## Reading WAL files If you only need the pixel data of a WAL file, try this: ```{r} library("wal"); wal_file = system.file("extdata", "bricks.wal", package = "wal", mustWork = TRUE); wal_image = wal::readWAL(wal_file); ``` The return value is an array with 3 dimensions, representing image width, height, and channels. It contains RGB color values in range 0..1: ```{r} dim(wal_image); ``` To read the WAL file and get more detailed data, including the header and all mipmaps, read it into a wal instance instead: ```{r} wal = wal::read.wal(wal_file); ``` This allows you to do more things, like converting to other formats and re-writing to other files. The wal instance is a named list, feel free to explore it. E.g., to see the header information, do this: ```{r} wal$header$width; ``` ### Preview a WAL texture in R If you loaded a wal instance, you can plot it: ```{r} plot(wal); ``` This plots the largest mip level with the Quake 2 palette. If you need more control, e.g., you want to plot a different mip level or use a certain palette, use plotwal.mipmap instead: ```{r} plotwal.mipmap(wal, apply_palette = wal::pal_q2(), mip_level = 1) ``` The mipmaps are 0..3, where 0 is the largest (highest quality) version. Let's look at the lowest quality version with the Q1 palette: ```{r} plotwal.mipmap(wal, apply_palette = wal::pal_q1(), mip_level = 3) ``` As you can see, the Q1 palette fits this particular image worse than the Q2 palette: ```{r} plotwal.mipmap(wal, apply_palette = wal::pal_q2(), mip_level = 3) ``` ### Writing WAL image files You can write a WAL instance to a file like this: ```{r, eval = FALSE} writeWAL("~/mytexture.wal", wal); ``` ### Exporting to PNG and JPEG Exporting a WAL instance to JPEG or PNG format is straightforward: ```{r, eval = FALSE} wal.export.to.jpeg(wal, "~/mytexture.jpg"); wal.export.to.png(wal, "~/mytexture.png"); ``` ### Converting JPG or PNG images to WAL format This way is tricky for several reasons: WAL files must have certain dimensions and they use a fixed palette with 256 colors. This means that if you convert a JPG or PNG image, which can have 16 million different colors, to a file to WAL format, it will look different (unless, by coincidence, the image only consists of colors which occur in the palette). During the conversion, each color in the source image is replaced with the most similar color from the palette. Of course, different input colors may be mapped to the same palette color, so the quality will be worse. How much worse depends on how well the palette fits the source image. That all being said, if you have input files in PNG or JPEG format, you can covert them to WAL like this: ```{r, eval = FALSE} wal_imported = img.to.wal(png::readPNG("~/mytexture.png")); writeWAL("~/mytexture.wal", wal_imported); wal_imported = img.to.wal(jpeg::readJPEG("~/mytexture.jpg")); writeWAL("~/mytexture.wal", wal_imported); ``` The widths and heights of the input files must be a power of 2. Typical values used in the games are 16, 32, 64, 128, 256, and 512. The width and height for a single file be different. So 32x256 and 64x64 are fine, but 50x50 is not. When importing PNGs, also keep in mind that WAL does not support the alpha channel.
/scratch/gouwar.j/cran-all/cranData/wal/inst/doc/wal.Rmd
--- title: "Reading and writing Quake WAL textures with wal" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Reading and writing Quake WAL textures with wal} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- In this document, we show how to read, write, import and export WAL textures, which are used by idtech1 and idtech2 games. ## Reading WAL files If you only need the pixel data of a WAL file, try this: ```{r} library("wal"); wal_file = system.file("extdata", "bricks.wal", package = "wal", mustWork = TRUE); wal_image = wal::readWAL(wal_file); ``` The return value is an array with 3 dimensions, representing image width, height, and channels. It contains RGB color values in range 0..1: ```{r} dim(wal_image); ``` To read the WAL file and get more detailed data, including the header and all mipmaps, read it into a wal instance instead: ```{r} wal = wal::read.wal(wal_file); ``` This allows you to do more things, like converting to other formats and re-writing to other files. The wal instance is a named list, feel free to explore it. E.g., to see the header information, do this: ```{r} wal$header$width; ``` ### Preview a WAL texture in R If you loaded a wal instance, you can plot it: ```{r} plot(wal); ``` This plots the largest mip level with the Quake 2 palette. If you need more control, e.g., you want to plot a different mip level or use a certain palette, use plotwal.mipmap instead: ```{r} plotwal.mipmap(wal, apply_palette = wal::pal_q2(), mip_level = 1) ``` The mipmaps are 0..3, where 0 is the largest (highest quality) version. Let's look at the lowest quality version with the Q1 palette: ```{r} plotwal.mipmap(wal, apply_palette = wal::pal_q1(), mip_level = 3) ``` As you can see, the Q1 palette fits this particular image worse than the Q2 palette: ```{r} plotwal.mipmap(wal, apply_palette = wal::pal_q2(), mip_level = 3) ``` ### Writing WAL image files You can write a WAL instance to a file like this: ```{r, eval = FALSE} writeWAL("~/mytexture.wal", wal); ``` ### Exporting to PNG and JPEG Exporting a WAL instance to JPEG or PNG format is straightforward: ```{r, eval = FALSE} wal.export.to.jpeg(wal, "~/mytexture.jpg"); wal.export.to.png(wal, "~/mytexture.png"); ``` ### Converting JPG or PNG images to WAL format This way is tricky for several reasons: WAL files must have certain dimensions and they use a fixed palette with 256 colors. This means that if you convert a JPG or PNG image, which can have 16 million different colors, to a file to WAL format, it will look different (unless, by coincidence, the image only consists of colors which occur in the palette). During the conversion, each color in the source image is replaced with the most similar color from the palette. Of course, different input colors may be mapped to the same palette color, so the quality will be worse. How much worse depends on how well the palette fits the source image. That all being said, if you have input files in PNG or JPEG format, you can covert them to WAL like this: ```{r, eval = FALSE} wal_imported = img.to.wal(png::readPNG("~/mytexture.png")); writeWAL("~/mytexture.wal", wal_imported); wal_imported = img.to.wal(jpeg::readJPEG("~/mytexture.jpg")); writeWAL("~/mytexture.wal", wal_imported); ``` The widths and heights of the input files must be a power of 2. Typical values used in the games are 16, 32, 64, 128, 256, and 512. The width and height for a single file be different. So 32x256 and 64x64 are fine, but 50x50 is not. When importing PNGs, also keep in mind that WAL does not support the alpha channel.
/scratch/gouwar.j/cran-all/cranData/wal/vignettes/wal.Rmd
new_compare <- function(x = character(), max_diffs = if (in_ci()) Inf else 10) { stopifnot(is.character(x)) structure(x, max_diffs = max_diffs, class = "waldo_compare") } #' @export print.waldo_compare <- function(x, n = attr(x, "max_diffs"), ...) { stopifnot(is.numeric(n) && length(n) == 1 && n >= 1) if (length(x) == 0) { cli::cat_bullet("No differences", bullet = "tick", bullet_col = "green") } else { if (length(x) > n) { x <- c(x[seq_len(n)], glue("And {length(x) - floor(n)} more differences ...")) } cat(paste0(x, collapse = "\n\n"), "\n", sep = "") } invisible(x) }
/scratch/gouwar.j/cran-all/cranData/waldo/R/compare-class.R
compare_data_frame <- function(x, y, paths = c("x", "y"), opts = compare_opts()) { # Only show row diffs if columns are atomic, have same names and types and there are rows if (!all_atomic(x) || !all_atomic(y)) { return() } if (!same_cols(x, y)) { return() } if (nrow(x) == 0 || nrow(y) == 0) { return() } rows <- df_rows(x, y, paths = paths, tolerance = opts$tolerance) if (is.null(rows)) { return() } diff_rows(rows, paths = paths, max_diffs = opts$max_diffs) } diff_rows <- function(rows, paths = c("x", "y"), max_diffs = 10) { diffs <- ses_shortest(rows$x, rows$y) if (length(diffs) == 0) { return(new_compare()) } # Align with diffs header <- paste0(" ", names(rows$header), cli::style_bold(rows$header)) format <- lapply(diffs, function(diff) { path_label <- paste0(paths[[1]], " vs ", paths[[2]]) lines <- line_by_line(rows$x, rows$y, diff, max_diffs = max_diffs) paste0(c(path_label, header, lines), collapse = "\n") }) new_compare(unlist(format, recursive = FALSE)) } # Make a character matrix of formatted cell values df_rows <- function(x, y, paths = c("x", "y"), tolerance = NULL) { # If same length, drop identical columns if (nrow(x) == nrow(y)) { is_equal <- function(x, y) { if (is_numeric(x)) { num_equal(x, y, tolerance = tolerance) } else { identical(x, y) } } same <- vapply(seq_along(x), function(j) is_equal(x[[j]], y[[j]]), logical(1)) x <- x[!same] y <- y[!same] } if (ncol(x) == 0) { return() } printed_rows(x, y, paths = paths) } printed_rows <- function(x, y, paths = c("x", "y")) { joint <- rbind(x, y) if (!is.data.frame(joint)) { # i.e is a matrix joint <- as.data.frame(joint) names(joint) <- paste0("[,", format(seq_along(joint)), "]") } # A speedier implementation of print.data.frame cols <- lapply(joint, format) for (i in seq_along(cols)) { cols[[i]] <- format(c(names(joint)[[i]], cols[[i]]), justify = "right") } lines <- do.call(paste, cols) row_idx <- c(seq_len(nrow(x)), seq_len(nrow(y))) row_idx <- paste0(rep(paths, c(nrow(x), nrow(y))), "[", row_idx, ", ] ") names(lines) <- format(c("", row_idx), align = "right") list( header = lines[1], x = lines[2:(nrow(x) + 1)], y = lines[(nrow(x) + 2):length(lines)] ) } same_cols <- function(x, y) { if (!identical(names(x), names(y))) { return(FALSE) } for (j in seq_along(x)) { if (!is.numeric(x[[j]]) || !is.numeric(y[[j]])) { if (!identical(typeof(x[[j]]), typeof(y[[j]]))) { return(FALSE) } } if (!identical(attributes(x[[j]]), attributes(y[[j]]))) { return(FALSE) } } TRUE } unrowname <- function(x) { row.names(x) <- NULL x } all_atomic <- function(x) { all(vapply(x, is_atomic, logical(1))) }
/scratch/gouwar.j/cran-all/cranData/waldo/R/compare-data-frame.R
compare_opts <- function(..., tolerance = NULL, max_diffs = if (in_ci()) Inf else 10, ignore_srcref = TRUE, ignore_attr = FALSE, ignore_encoding = TRUE, ignore_function_env = FALSE, ignore_formula_env = FALSE, list_as_map = FALSE, quote_strings = TRUE ) { base <- old_opts(...) seen <- new.env(parent = emptyenv()) seen$envs <- list() waldo <- list( tolerance = tolerance, max_diffs = max_diffs, ignore_srcref = ignore_srcref, ignore_attr = ignore_attr, ignore_encoding = ignore_encoding, ignore_function_env = ignore_function_env, ignore_formula_env = ignore_formula_env, list_as_map = list_as_map, quote_strings = quote_strings, seen = seen ) utils::modifyList(waldo, base) } old_opts <- function(..., tol, check.attributes, checkNames) { out <- list() if (!missing(tol)) { warn("`tol` is deprecated; please use `tolerance` instead") out$tolerance <- tol } if (!missing(check.attributes)) { warn("`check.attributes` is deprecated; please use `ignore_attr` instead") out$ignore_attr <- !check.attributes } if (!missing(checkNames)) { warn("`checkNames` no longer supported; please use `ignore_attr` instead") out$ignore_attr <- !checkNames } if (!missing(...)) { args <- substitute(...()) exprs <- vapply(args, expr_deparse, character(1)) names <- names2(args) exprs <- ifelse(names == "", exprs, paste0(names, " = ", exprs)) warn(paste0("Unused arguments (", paste0(exprs, collapse = ', '), ")")) } out }
/scratch/gouwar.j/cran-all/cranData/waldo/R/compare-opts.R
compare_vector <- function(x, y, paths = c("x", "y"), opts = compare_opts()) { # Early exit for numerics (except for) with format methods if (typeof(x) %in% c("integer", "double") && num_equal(x, y, opts$tolerance)) { return() } if (!isTRUE(opts$ignore_attr) && is.object(x) && has_format_method(x)) { x_str <- format(x) y_str <- format(y) out <- compare_character(x_str, y_str, paths, max_diffs = opts$max_diffs) paths <- paste0("unclass(", paths, ")") } else { out <- character() } if (length(out) == 0) { out <- c(out, switch(typeof(x), integer = , double = compare_numeric(x, y, paths, tolerance = opts$tolerance, max_diffs = opts$max_diffs ), complex = compare_complex(x, y, paths, tolerance = opts$tolerance, max_diffs = opts$max_diffs ), logical = compare_logical(x, y, paths, max_diffs = opts$max_diffs), raw = , character = compare_character(x, y, paths, quote = if (opts$quote_strings) '"' else NULL, max_diffs = opts$max_diffs) )) } out } has_format_method <- function(x) { for (class in class(x)) { if (!is.null(utils::getS3method("format", class, optional = TRUE))) { return(TRUE) } } FALSE } compare_logical <- function(x, y, paths = c("x", "y"), max_diffs = Inf) { diff_element( encodeString(x), encodeString(y), paths, quote = NULL, max_diffs = max_diffs ) } compare_character <- function(x, y, paths = c("x", "y"), quote = "\"", max_diffs = Inf) { if (multiline(x) || multiline(y)) { x <- split_by_line(x) y <- split_by_line(y) opts <- compare_opts(max_diffs = max_diffs) if (length(x) == 1 && length(y) == 1) { new_compare(compare_by_line1(x, y, paths, opts)) } else { new_compare(compare_by_line(x, y, paths, opts)) } } else { diff_element( x, y, paths, quote = quote, max_diffs = max_diffs, is_string = TRUE ) } } compare_numeric <- function(x, y, paths = c("x", "y"), tolerance = default_tol(), max_diffs = Inf) { if (num_equal(x, y, tolerance)) { return(new_compare()) } if (length(dim(x)) == 2 && identical(dim(x), dim(y))) { rows <- printed_rows(x, y, paths = paths) out <- diff_rows(rows, paths = paths, max_diffs = max_diffs) if (length(out) > 0) { return(out) } } if (length(x) == length(y)) { digits <- min_digits(x, y, tolerance) x_fmt <- num_exact(x, digits = digits) y_fmt <- num_exact(y, digits = digits) } else { # Not align, so need to find max number of digits x_fmt <- as.character(x) y_fmt <- as.character(y) } out <- diff_element( x_fmt, y_fmt, paths, quote = NULL, justify = "right", max_diffs = max_diffs ) if (length(out) > 0) { out } else { glue::glue("{paths[[1]]} != {paths[[2]]} but don't know how to show the difference") } } compare_complex <- function(x, y, paths = c("x", "y"), tolerance = default_tol(), max_diffs = Inf) { if (length(x) == length(y)) { c( compare_numeric( Re(x), Re(y), paths = paste0("Re(", paths, ")"), tolerance = tolerance, max_diffs = max_diffs ), compare_numeric( Im(x), Im(y), paths = paste0("Im(", paths, ")"), tolerance = tolerance, max_diffs = max_diffs ) ) } else { x_fmt <- format(x) y_fmt <- format(y) diff_element( x_fmt, y_fmt, paths, quote = NULL, justify = "right", max_diffs = max_diffs ) } } # Helpers ----------------------------------------------------------------- num_exact <- function(x, digits = 6) { sprintf(paste0("%0.", digits, "f"), x) } # Minimal number of digits needed to show differences min_digits <- function(x, y, tolerance = default_tol()) { attributes(x) <- NULL attributes(y) <- NULL n <- digits(abs(x - y)) if (!is.null(tolerance)) { n <- min(n, digits(tolerance)) } n } # This looks ok: # grid <- 10 ^ seq(0, -6, length.out = 1e3) # plot(grid, sapply(grid, digits), log = "x") digits <- function(x) { x <- x[!is.na(x) & x != 0] if (length(x) == 0) { return(0) } scale <- -log10(min(x)) if (scale <= 0) { # Don't add digits if x > 1 0L } else { # Need to first round roughly to avoid tiny FP differences ceiling(round(scale, digits = 2)) } }
/scratch/gouwar.j/cran-all/cranData/waldo/R/compare-value.R
#' Compare two objects #' #' @description #' This compares two R objects, identifying the key differences. It: #' #' * Orders the differences from most important to least important. #' * Displays the values of atomic vectors that are actually different. #' * Carefully uses colour to emphasise changes (while still being readable #' when colour isn't available). #' * Uses R code (not a text description) to show where differences arise. #' * Where possible, it compares elements by name, rather than by position. #' * Errs on the side of producing too much output, rather than too little. #' #' `compare()` is an alternative to [all.equal()]. #' #' @section Controlling comparisons: #' #' There are two ways for an object (rather than the person calling `compare()` #' or `expect_equal()` to control how it is compared to other objects. #' First, if the object has an S3 class, you can provide a [compare_proxy()] #' method that provides an alternative representation of the object; this is #' particularly useful if important data is stored outside of R, e.g. in #' an external pointer. #' #' Alternatively, you can attach an attribute called `"waldo_opts"` to your #' object. This should be a list of compare options, using the same names #' and possible values as the arguments to this function. This option #' is ignored by default (`ignore_attr`) so that you can set the options in #' the object that you control. (If you don't want to see the attributes #' interactively, you could attach them in a [compare_proxy()] method.) #' #' Options supplied in this way also affect all the children. This means #' options are applied in the following order, from lowest to highest #' precedence: #' #' 1. Defaults from `compare()`. #' 1. The `waldo_opts` for the parents of `x`. #' 1. The `waldo_opts` for the parents of `y`. #' 1. The `waldo_opts` for `x`. #' 1. The `waldo_opts` for `y`. #' 1. User-specified arguments to `compare()`. #' #' Use these techniques with care. If you accidentally cover up an important #' difference you can create a confusing situation where `x` and `y` behave #' differently but `compare()` reports no differences in the underlying objects. #' #' @param x,y Objects to compare. `x` is treated as the reference object #' so messages describe how `y` is different to `x`. #' @param x_arg,y_arg Name of `x` and `y` arguments, used when generated paths #' to internal components. These default to "old" and "new" since it's #' most natural to supply the previous value then the new value. #' @param ... A handful of other arguments are supported with a warning for #' backward comparability. These include: #' #' * `all.equal()` arguments `checkNames` and `check.attributes` #' * `testthat::compare()` argument `tol` #' #' All other arguments are ignored with a warning. #' @param tolerance If non-`NULL`, used as threshold for ignoring small #' floating point difference when comparing numeric vectors. Using any #' non-`NULL` value will cause integer and double vectors to be compared #' based on their values, not their types, and will ignore the difference #' between `NaN` and `NA_real_`. #' #' It uses the same algorithm as [all.equal()], i.e., first we generate #' `x_diff` and `y_diff` by subsetting `x` and `y` to look only locations #' with differences. Then we check that #' `mean(abs(x_diff - y_diff)) / mean(abs(y_diff))` (or just #' `mean(abs(x_diff - y_diff))` if `y_diff` is small) is less than #' `tolerance`. #' @param max_diffs Control the maximum number of differences shown. The #' default shows 10 differences when run interactively and all differences #' when run in CI. Set `max_diffs = Inf` to see all differences. #' @param ignore_srcref Ignore differences in function `srcref`s? `TRUE` by #' default since the `srcref` does not change the behaviour of a function, #' only its printed representation. #' @param ignore_attr Ignore differences in specified attributes? #' Supply a character vector to ignore differences in named attributes. #' By default the `"waldo_opts"` attribute is listed in `ignore_attr` so #' that changes to it are not reported; if you customize `ignore_attr`, you #' will probably want to do this yourself. #' #' For backward compatibility with `all.equal()`, you can also use `TRUE`, #' to all ignore differences in all attributes. This is not generally #' recommended as it is a blunt tool that will ignore many important #' functional differences. #' @param ignore_function_env,ignore_formula_env Ignore the environments of #' functions and formulas, respectively? These are provided primarily for #' backward compatibility with `all.equal()` which always ignores these #' environments. #' @param ignore_encoding Ignore string encoding? `TRUE` by default, because #' this is R's default behaviour. Use `FALSE` when specifically concerned #' with the encoding, not just the value of the string. #' @param list_as_map Compare lists as if they are mappings between names and #' values. Concretely, this drops `NULLs` in both objects and sorts named #' components. #' @param quote_strings Should strings be surrounded by quotes? If `FALSE`, #' only side-by-side and line-by-line comparisons will be used, and there's #' no way to distinguish between `NA` and `"NA"`. #' @returns A character vector with class "waldo_compare". If there are no #' differences it will have length 0; otherwise each element contains the #' description of a single difference. #' @export #' @examples #' # Thanks to diffobj package comparison of atomic vectors shows differences #' # with a little context #' compare(letters, c("z", letters[-26])) #' compare(c(1, 2, 3), c(1, 3)) #' compare(c(1, 2, 3), c(1, 3, 4, 5)) #' compare(c(1, 2, 3), c(1, 2, 5)) #' #' # More complex objects are traversed, stopping only when the types are #' # different #' compare( #' list(x = list(y = list(structure(1, z = 2)))), #' list(x = list(y = list(structure(1, z = "a")))) #' ) #' #' # Where possible, recursive structures are compared by name #' compare(iris, rev(iris)) #' #' compare(list(x = "x", y = "y"), list(y = "y", x = "x")) #' # Otherwise they're compared by position #' compare(list("x", "y"), list("x", "z")) #' compare(list(x = "x", x = "y"), list(x = "x", y = "z")) #' compare <- function(x, y, ..., x_arg = "old", y_arg = "new", tolerance = NULL, max_diffs = if (in_ci()) Inf else 10, ignore_srcref = TRUE, ignore_attr = "waldo_opts", ignore_encoding = TRUE, ignore_function_env = FALSE, ignore_formula_env = FALSE, list_as_map = FALSE, quote_strings = TRUE ) { opts <- compare_opts( ..., tolerance = tolerance, max_diffs = max_diffs, ignore_srcref = ignore_srcref, ignore_attr = ignore_attr, ignore_encoding = ignore_encoding, ignore_formula_env = ignore_formula_env, ignore_function_env = ignore_function_env, list_as_map = list_as_map, quote_strings = quote_strings ) # Record options overridden by user opts$user_specified <- intersect(names(opts), names(match.call())) out <- compare_structure(x, y, paths = c(x_arg, y_arg), opts = opts) new_compare(out, max_diffs) } compare_structure <- function(x, y, paths = c("x", "y"), opts = compare_opts()) { if (!is_missing(x)) { proxy <- compare_proxy(x, paths[[1]]) x <- proxy$object paths[[1]] <- proxy$path } if (!is_missing(y)) { proxy <- compare_proxy(y, paths[[2]]) y <- proxy$object paths[[2]] <- proxy$path } opts <- merge_lists(opts, attr(x, "waldo_opts"), attr(y, "waldo_opts"), opts[opts$user_specified] ) if (is_identical(x, y, opts)) { return(character()) } # Compare type term <- compare_terminate(x, y, paths, tolerance = opts$tolerance, ignore_attr = opts$ignore_attr ) if (length(term) > 0) { return(term) } if (is_list(x) && opts$list_as_map) { x <- as_map(x) y <- as_map(y) } out <- character() # Then length if ((is_list(x) || is_pairlist(x)) && length(x) != length(y)) { out <- c(out, should_be("length {length(x)}", "length {length(y)}")) } # Then attributes/slots if (isS4(x)) { out <- c(out, compare_character(is(x), is(y), glue("is({paths})"))) out <- c(out, compare_by_slot(x, y, paths, opts)) # S4 objects can have attributes that are not slots out <- c(out, compare_by_attr( attrs(x, c(slotNames(x), "class")), attrs(y, c(slotNames(y), "class")), paths, opts) ) } else if (!isTRUE(opts$ignore_attr)) { if (is_call(x) && opts$ignore_formula_env) { attr(x, ".Environment") <- NULL attr(y, ".Environment") <- NULL } if ((is_closure(x) || is_call(x)) && opts$ignore_srcref) { x <- zap_srcref(x) y <- zap_srcref(y) } out <- c(out, compare_by_attr(attrs(x, opts$ignore_attr), attrs(y, opts$ignore_attr), paths, opts)) } # Then contents if (is_list(x) || is_pairlist(x) || is.expression(x)) { if (is.data.frame(x) && is.data.frame(y)) { out <- c(out, compare_data_frame(x, y, paths, opts = opts)) } x <- unclass(x) y <- unclass(y) ignore_names <- isTRUE(opts$ignore_attr) || "names" %in% opts$ignore_attr if (!ignore_names && is_dictionaryish(x) && is_dictionaryish(y)) { out <- c(out, compare_by_name(x, y, paths, opts)) } else { out <- c(out, compare_by_pos(x, y, paths, opts)) } } else if (is_environment(x)) { if (is_seen(list(x, y), opts$seen$envs)) { # Only report difference between pairs of environments once return(out) } else if (is_named_env(x) || is_named_env(y)) { # Compare by reference out <- c(out, should_be("<env:{env_label(x)}>", "<env:{env_label(y)}>")) } else { # Compare by value x_fields <- as.list.environment(x, all.names = TRUE) y_fields <- as.list.environment(y, all.names = TRUE) # Can't use as.list(sorted = TRUE), https://github.com/r-lib/waldo/issues/84 if (length(x_fields) > 0) x_fields <- x_fields[order(names(x_fields))] if (length(y_fields) > 0) y_fields <- y_fields[order(names(y_fields))] if (env_has(x, ".__enclos_env__")) { # enclosing env of R6 methods is object env opts$ignore_function_env <- TRUE x_fields$.__enclos_env__ <- NULL y_fields$.__enclos_env__ <- NULL } opts$seen$envs <- c(opts$seen$envs, list(list(x, y))) out <- c(out, compare_structure(x_fields, y_fields, paths, opts = opts)) out <- c(out, compare_structure( parent.env(x), parent.env(y), paste0("parent.env(", paths, ")"), opts = opts) ) } } else if (is_closure(x)) { if (opts$ignore_function_env) { environment(x) <- emptyenv() environment(y) <- emptyenv() } out <- c(out, compare_by_fun(x, y, paths, opts)) } else if (is_primitive(x)) { out <- c(out, should_be("`{deparse(x)}`", "`{deparse(y)}`")) } else if (is_symbol(x)) { out <- c(out, should_be("`{deparse(x)}`", "`{deparse(y)}`")) } else if (is_call(x)) { attributes(x) <- NULL attributes(y) <- NULL if (!identical(x, y)) { diff <- compare_character( deparse(x), deparse(y), paths, quote = "`", max_diffs = opts$max_diffs ) if (length(diff) == 0) { # Fallback if deparse equal but AST different diff <- compare_structure(as.list(x), as.list(y), paths, opts = opts) } out <- c(out, diff) } } else if (is_atomic(x)) { if (is_character(x) && !opts$ignore_encoding) { out <- c(out, compare_character( Encoding(x), Encoding(y), glue("Encoding({paths})"), max_diffs = opts$max_diffs )) } out <- c(out, compare_vector(x, y, paths = paths, opts = opts)) } else if (typeof(x) == "externalptr") { x <- utils::capture.output(print(x)) y <- utils::capture.output(print(y)) out <- c(out, should_be("{x}", "{y}")) } else if (typeof(x) == "char") { x <- paste0("CHARSXP: ", deparse(x)) y <- paste0("CHARSXP: ", deparse(y)) out <- c(out, should_be("{x}", "{y}")) } else if (typeof(x) == "...") { # Unevaluated dots are unlikely to lead to any significant differences # in behaviour (they're usually captured incidentally) so we just # ignore } else if (!typeof(x) %in% c("S4", "object")) { abort(glue("{paths[[1]]}: unsupported type '{typeof(x)}'"), call = NULL) } out } is_named_env <- function(x) { environmentName(x) != "" } is_seen <- function(x, envs) { for (env in envs) { if (identical(x, env)) { return(TRUE) } } FALSE } # Fast path for "identical" elements - in the long run we'd eliminate this # by re-writing all of waldo in C, but this gives us a nice performance boost # with for a relatively low cost in the meantime. is_identical <- function(x, y, opts) { # These comparisons aren't 100% correct because they don't affect comparison # of character vectors/functions further down the tree. But I think that's # unlikely to have an impact in practice since they're opt-in. if (is_character(x) && is_character(y) && !opts$ignore_encoding) { identical(x, y) && identical(Encoding(x), Encoding(y)) } else if (is_function(x) && is_function(y) && !opts$ignore_srcref) { identical(x, y) && identical(attr(x, "srcref"), attr(y, "srcref")) } else { identical(x, y) } } compare_terminate <- function(x, y, paths, tolerance = NULL, ignore_attr = FALSE) { type_x <- friendly_type_of(x) type_y <- friendly_type_of(y) if (is_missing(x) && !is_missing(y)) { type_y <- col_d(type_y) } else if (!is_missing(x) && is_missing(y)) { type_x <- col_a(type_x) } else { type_x <- col_c(type_x) type_y <- col_c(type_y) } type_mismatch_msg <- should_be("{type_x}{short_val(x)}", "{type_y}{short_val(y)}") # missing needs to be treated here because `typeof(missing_arg())` is symbol if (is_missing(x) != is_missing(y)) { return(type_mismatch_msg) } if (typeof(x) == typeof(y) && oo_type(x) == oo_type(y)) { return(character()) } ignore_class <- isTRUE(ignore_attr) || "class" %in% ignore_attr if (ignore_class && (typeof(x) == typeof(y))) { return(character()) } if (!is.null(tolerance) && is_numeric(x) && is_numeric(y)) { return(character()) } # don't care about difference between builtin and special if (is_primitive(x) && is_primitive(y)) { return(should_be("`{deparse(x)}`", "`{deparse(y)}`")) } type_mismatch_msg } should_be <- function(x, y) { string <- paste0( "`{paths[[1]]}` is ", x, "\n", "`{paths[[2]]}` is ", y ) glue(string, .envir = caller_env(), .trim = FALSE) } # compare_each ------------------------------------------------------------ compare_by <- function(index_fun, extract_fun, path_fun) { function(x, y, paths, opts) { idx <- index_fun(x, y) if (length(idx) == 0) return(character()) x_paths <- path_fun(paths[[1]], idx) y_paths <- path_fun(paths[[2]], idx) out <- character() for (i in seq_along(idx)) { out <- c(out, compare_structure( x = extract_fun(x, idx[[i]]), y = extract_fun(y, idx[[i]]), paths = c(x_paths[[i]], y_paths[[i]]), opts = opts) ) } out } } index_name <- function(x, y) union(names(x), names(y)) extract_name <- function(x, i) if (has_name(x, i)) .subset2(x, i) else missing_arg() path_name <- function(path, i) glue("{path}${i}") compare_by_name <- compare_by(index_name, extract_name, path_name) index_pos <- function(x, y) seq_len(max(length(x), length(y))) extract_pos <- function(x, i) if (i <= length(x)) .subset2(x, i) else missing_arg() path_pos <- function(path, i) glue("{path}[[{i}]]") compare_by_pos <- compare_by(index_pos, extract_pos, path_pos) path_line <- function(path, i) glue("lines({path}[[{i}]])") compare_by_line <- compare_by(index_pos, extract_pos, path_line) path_line1 <- function(path, i) glue("lines({path})") compare_by_line1 <- compare_by(index_pos, extract_pos, path_line1) path_attr <- function(path, i) { # from ?attributes, excluding row.names() because it's not a simple accessor funs <- c("comment", "class", "dim", "dimnames", "levels", "names", "tsp") ifelse(i %in% funs, glue("{i}({path})"), glue("attr({path}, '{i}')")) } compare_by_attr <- compare_by(index_name, extract_name, path_attr) #' @importFrom methods slotNames .hasSlot slot is index_slot <- function(x, y) union(slotNames(x), slotNames(y)) extract_slot <- function(x, i) if (.hasSlot(x, i)) slot(x, i) else missing_arg() path_slot <- function(path, i) glue("{path}@{i}") compare_by_slot <- compare_by(index_slot, extract_slot, path_slot) extract_fun <- function(x, i) switch(i, fn_body(x), fn_fmls(x), fn_env(x)) path_fun <- function(path, i) { fun <- unname(c("body", "formals", "environment")[i]) glue("{fun}({path})") } compare_by_fun <- compare_by(function(x, y) 1:3, extract_fun, path_fun)
/scratch/gouwar.j/cran-all/cranData/waldo/R/compare.R
diff_align <- function(diff, x, y) { n <- nrow(diff) x_out <- character() y_out <- character() x_idx <- integer() y_idx <- integer() for (i in seq_len(n)) { row <- diff[i, , drop = FALSE] x_i <- seq2(row$x1, row$x2) y_i <- seq2(row$y1, row$y2) # Sometimes (last row?) a change is really one change + many additions if (row$t == "c" && length(x_i) != length(y_i)) { m <- max(length(x_i), length(y_i)) length(x_i) <- m length(y_i) <- m } x_out <- c(x_out, switch(row$t, a = col_x(extract(x, c(x_i, NA[y_i]))), c = col_c(extract(x, x_i)), d = col_d(extract(x, x_i)), x = col_x(extract(x, x_i)) )) y_out <- c(y_out, switch(row$t, a = col_a(extract(y, y_i)), c = col_c(extract(y, y_i)), d = col_x(extract(y, c(y_i, NA[x_i]))), x = col_x(extract(y, y_i)) )) x_idx <- c(x_idx, x_i[x_i != 0], if (row$t == "a") NA[y_i]) y_idx <- c(y_idx, y_i[y_i != 0], if (row$t == "d") NA[x_i]) } # Ensure both contexts are same length if (length(x_out) != length(y_out)) { # TODO: need to figure out when to truncate from left vs right len <- min(length(x_out), length(y_out)) x_out <- x_out[seq(length(x_out) - len + 1, length(x_out))] y_out <- y_out[seq(length(y_out) - len + 1, length(y_out))] x_idx <- x_idx[seq(length(x_idx) - len + 1, length(x_idx))] y_idx <- y_idx[seq(length(y_idx) - len + 1, length(y_idx))] } x_slice <- make_slice(x, x_idx) y_slice <- make_slice(y, y_idx) list( x = x_out, y = y_out, x_slice = x_slice, y_slice = y_slice, x_idx = x_idx, y_idx = y_idx ) } extract <- function(x, idx) { out <- x[idx] out[is.na(idx)] <- "" out } # Only want to show slice if it's partial make_slice <- function(x, idx) { if (all(is.na(idx))) { return(NULL) } idx <- range(idx, na.rm = TRUE) if (idx[[1]] <= 1 && idx[[2]] >= length(x)) { NULL } else { idx } } col_a <- function(x) cli::col_blue(x) col_d <- function(x) cli::col_yellow(x) col_c <- function(x) cli::col_green(x) col_x <- function(x) cli::col_grey(x) # values ------------------------------------------------------------------ diff_element <- function(x, y, paths = c("x", "y"), quote = "\"", justify = "left", max_diffs = 10, width = getOption("width"), is_string = FALSE) { # Must quote before comparison to ensure that "NA" and NA_character # have different representation if (!is.null(quote)) { x <- encodeString(unclass(x), quote = quote) y <- encodeString(unclass(y), quote = quote) } diff <- ses_shortest(x, y) if (length(diff) == 0) { return(new_compare()) } format <- lapply(diff, format_diff_matrix, x = x, y = y, paths = paths, justify = justify, width = width, max_diffs = max_diffs, # Paired comparisons are confusing for unquoted strings use_paired = !is_string || !is.null(quote) ) new_compare(unlist(format, recursive = FALSE)) } format_diff_matrix <- function(diff, x, y, paths, justify = "left", width = getOption("width"), max_diffs = 10, use_paired = TRUE) { alignment <- diff_align(diff, x, y) mat <- rbind(alignment$x, alignment$y) n <- min(ncol(mat), max_diffs) n_trunc <- ncol(mat) - n # Label slices, if needed x_path_label <- label_path(paths[[1]], alignment$x_slice) y_path_label <- label_path(paths[[2]], alignment$y_slice) # Paired lines --------------------------------------------------------------- if (use_paired) { mat_out <- cbind(paste0("`", c(x_path_label, y_path_label), "`:"), mat) if (n_trunc > 0) { mat_out <- mat_out[, seq_len(n + 1)] mat_out <- cbind(mat_out, c(paste0("and ", n_trunc, " more..."), "...")) } out <- apply(mat_out, 2, fansi_align, justify = justify) rows <- apply(out, 1, paste, collapse = " ") if (fansi::nchar_ctl(rows[[1]]) <= width) { return(paste0(rows, collapse = "\n")) } } # Side-by-side --------------------------------------------------------------- x_idx_out <- label_idx(alignment$x_idx) y_idx_out <- label_idx(alignment$y_idx) idx_width <- max(nchar(x_idx_out), nchar(y_idx_out)) divider <- ifelse(mat[1,] == mat[2, ], "|", "-") mat_out <- cbind(c(paths[[1]], "|", paths[[2]]), rbind(mat[1, ], divider, mat[2, ])) if (n_trunc > 0) { mat_out <- mat_out[, seq_len(n + 1)] mat_out <- cbind(mat_out, c("...", "", "...")) x_idx_out <- c(x_idx_out[seq_len(n)], "...") y_idx_out <- c(y_idx_out[seq_len(n)], paste0("and ", n_trunc, " more ...")) } mat_out <- rbind( format(c("", x_idx_out), justify = "right"), mat_out, format(c("", y_idx_out), justify = "left") ) out <- apply(mat_out, 1, fansi_align, justify = "left") rows <- apply(out, 1, paste, collapse = " ") if (fansi::nchar_ctl(rows[[1]]) <= width) { return(paste0(rows, collapse = "\n")) } # Line-by-line --------------------------------------------------------------- lines <- line_by_line(x, y, diff, max_diffs = max_diffs) paste0( paste0(x_path_label, " vs ", y_path_label), "\n", paste0(lines, collapse = "\n") ) } line_by_line <- function(x, y, diff, max_diffs = 10) { lines <- character() if (nrow(diff) == 0) { return(lines) } line_a <- function(x) if (length(x) > 0) col_a(paste0("+ ", names(x), x)) line_d <- function(x) if (length(x) > 0) col_d(paste0("- ", names(x), x)) line_x <- function(x) if (length(x) > 0) col_x(paste0(" ", names(x), x)) diff_lengths <- cumsum(pmax(diff$x2 - diff$x1, diff$y2 - diff$y1) + 1) all_diff_lengths <- last(diff_lengths) if (all_diff_lengths > max_diffs) { diffs_ok <- which(stats::lag(diff_lengths, 0) <= max_diffs) if (length(diffs_ok) == 0) { diff_ok <- 0 diff_length_partial <- max_diffs } else { diff_ok <- last(diffs_ok) diff_length_partial <- max_diffs - diff_lengths[[diff_ok]] } if (diff_length_partial > 0) { partial_diff <- diff[diff_ok + 1, ] partial_diff$x2 <- min(partial_diff$x2, partial_diff$x1 + diff_length_partial - 1) partial_diff$y2 <- min(partial_diff$y2, partial_diff$y1 + diff_length_partial - 1) } else { partial_diff <- NULL } diff <- rbind(diff[seq_len(diff_ok), ], partial_diff) n_trunc <- all_diff_lengths - max_diffs } else { n_trunc <- 0 } for (i in seq_len(nrow(diff))) { row <- diff[i, , drop = FALSE] x_i <- seq2(row$x1, row$x2) y_i <- seq2(row$y1, row$y2) lines <- c(lines, switch(row$t, x = line_x(x[x_i]), a = c(line_x(x[x_i]), line_a(y[y_i])), c = interleave(line_d(x[x_i]), line_a(y[y_i])), d = line_d(x[x_i]) )) } if (n_trunc > 0) { lines <- c(lines, paste0("and ", n_trunc, " more ...")) } lines } interleave <- function(x, y) { # Only interleave if same number of lines if (length(x) == length(y)) { ord <- c(seq_along(x), seq_along(y)) c(x, y)[order(ord)] } else { c(x, y) } } label_path <- function(path, slice) { if (is.null(slice)) { path } else { paste0(path, "[", slice[[1]], ":", slice[[2]], "]") } } label_idx <- function(idx) { ifelse(is.na(idx), "", paste0("[", idx, "]")) } last <- function(x) { x[[length(x)]] }
/scratch/gouwar.j/cran-all/cranData/waldo/R/diff.R
num_equal <- function(x, y, tolerance = default_tol()) { if (length(x) != length(y)) { return(FALSE) } if (any(is.na(x) != is.na(y))) { return(FALSE) } if (is.null(tolerance) && any(is.nan(x) != is.nan(y))) { return(FALSE) } attributes(x) <- NULL attributes(y) <- NULL same <- is.na(x) | x == y if (is.null(tolerance)) { return(all(same)) } else if (all(same)) { return(TRUE) } x_diff <- x[!same] y_diff <- y[!same] avg_diff <- mean(abs(x_diff - y_diff)) avg_y <- mean(abs(y_diff)) # compute relative difference when y is "large" but finite if (is.finite(avg_y) && avg_y > tolerance) { avg_diff <- avg_diff / avg_y } avg_diff < tolerance }
/scratch/gouwar.j/cran-all/cranData/waldo/R/num_equal.R
#' Proxy for waldo comparison #' #' @description #' Use this generic to override waldo's default comparison if you need to #' override the defaults (typically because your object stores data in an #' external pointer). #' #' waldo comes with methods for a few common cases: #' #' * data.table: the `.internal.selfref` and `index` attributes #' are set to `NULL`. Both attributes are used for performance optimisation, and #' don't affect the data. #' #' * `xml2::xml_node`: the underlying XML data is stored in memory in C, #' behind an external pointer, so the we best can do is to convert the #' object to a string. #' #' * Classes from the `RProtoBuf` package: like XML objects, these store #' data in memory in C++ and only expose string names to R. Fortunately, #' these have well-understood string representations that we can use for #' comparisons. See #' <https://protobuf.dev/reference/cpp/api-docs/google.protobuf.text_format/> #' #' @param x An object. #' @param path Path #' @return A list with two components: #' * `object`: the modified object #' * `path`: an updated path showing what modification was applied #' @export compare_proxy <- function(x, path = "x") { if (typeof(x) == "char") { return(list(object = x, path = path)) } UseMethod("compare_proxy") } #' @export compare_proxy.default <- function(x, path) { list(object = x, path = path) } #' @export compare_proxy.data.table <- function(x, path) { attr(x, ".internal.selfref") <- NULL attr(x, "index") <- NULL list(object = x, path = path) } #' @export compare_proxy.xml_node <- function(x, path) { list(object = as.character(x), path = paste0("as.character(", path, ")")) } #' @export compare_proxy.POSIXlt <- function(x, path) { # From R 4.3: More experimentally, a ‘"POSIXlt"’ object may have an attribute # ‘"balanced"’ indicating if it is known to be filled or fully balanced. # This is a performance optimisation that waldo can ignore. attr(x, "balanced") <- NULL list(object = x, path = path) } # RProtoBuf objects ------------------------------------------------------- compare_protobuf <- function(x, path) { list(object = x$toString(), path = paste0(path, "$toString()")) } #' @export compare_proxy.Message <- compare_protobuf #' @export compare_proxy.Descriptor <- compare_protobuf #' @export compare_proxy.EnumDescriptor <- compare_protobuf #' @export compare_proxy.FieldDescriptor <- compare_protobuf #' @export compare_proxy.ServiceDescriptor <- compare_protobuf #' @export compare_proxy.FileDescriptor <- compare_protobuf #' @export compare_proxy.EnumValueDescriptor <- compare_protobuf #' @export compare_proxy.MethodDescriptor <- compare_protobuf
/scratch/gouwar.j/cran-all/cranData/waldo/R/proxy.R
# <https://www.gnu.org/software/diffutils/manual/diffutils.html#Detailed-Normal> # # * `lar`: Add the lines in range `r` of the second file # after line `l` of the first file. # * `fct`: Replace the lines in range `f` of the first file # with lines in range `t` of the second file. # * `rdl`: Delete the lines in range `r` from the first file; line `l` is # where they would have appeared in the second file had they not been deleted. ses <- function(x, y) { attributes(x) <- NULL attributes(y) <- NULL if (is.character(x)) { x <- enc2utf8(x) y <- enc2utf8(y) } out <- diffobj::ses(x, y, warn = FALSE, max.diffs = 100) out <- rematch2::re_match(out, paste0( "(?:(?<x1>\\d+),)?(?<x2>\\d+)", "(?<t>[acd])", "(?:(?<y1>\\d+),)?(?<y2>\\d+)" ))[1:5] out$x1 <- ifelse(out$x1 == "", out$x2, out$x1) out$y1 <- ifelse(out$y1 == "", out$y2, out$y1) out$x1 <- as.integer(out$x1) out$x2 <- as.integer(out$x2) out$y1 <- as.integer(out$y1) out$y2 <- as.integer(out$y2) out } ses_elementwise <- function(x, y) { n_x <- length(x) n_y <- length(y) n <- min(n_x, n_y) id <- seq_len(n) same <- (is.na(x[id]) & is.na(y[id])) | x[id] == y[id] same[is.na(same)] <- FALSE neq <- id[!same] if (length(neq) == 0) { n_x <- length(x) n_y <- length(y) if (length(x) > length(y)) { return(ses_df(n_y + 1, n_x, "d", n_y, n_y)) } else if (length(x) < length(y)) { return(ses_df(n_x, n_x, "a", n_x + 1, n_y)) } else { return(ses_df(integer(), integer(), character(), integer(), integer())) } } new_group <- c(TRUE, neq[-1] - 1 != neq[-length(neq)]) group_id <- cumsum(new_group) diffs <- unname(split(neq, group_id)) x1 <- y1 <- vapply(diffs, function(x) x[[1]], integer(1)) x2 <- y2 <- vapply(diffs, function(x) x[[length(x)]], integer(1)) t <- rep("c", length(diffs)) if (length(y) > length(x)) { y2[[length(diffs)]] <- n_y } else if (length(x) > length(y)) { x2[[length(diffs)]] <- n_x } ses_df(x1, x2, t, y1, y2) } ses_shortest <- function(x, y, size = 3) { ses1 <- ses(x, y) if (nrow(ses1) == 0) { return(list()) } ses2 <- ses_elementwise(x, y) context1 <- ses_chunks(ses1, length(x), length(y), size = size) context2 <- ses_chunks(ses2, length(x), length(y), size = size) diff_length <- function(ses) ses$x2[nrow(ses)] - ses$x1[[1]] + 1 diff1 <- sum(vapply(context1, diff_length, double(1)), na.rm = TRUE) diff2 <- sum(vapply(context2, diff_length, double(1))) if (diff1 == diff2) { # If contextual diffs are same length, break tie using total # number of changes if (diff_length(ses1) < diff_length(ses2)) { context1 } else { context2 } } else if (diff1 < diff2) { context1 } else { context2 } } ses_chunks <- function(diff, n_x, n_y, size = 3) { # Compute context around each individual diff diff$x_start <- pmax(diff$x1 - size, 1) diff$x_end <- pmin(diff$x2 + size, n_x) diff$y_start <- pmax(diff$y1 - size, 1) diff$y_end <- pmin(diff$y2 + size, n_y) # Split up into non-contiguous chunks new_group <- c(TRUE, diff$x_start[-1] > diff$x_end[-nrow(diff)]) group_id <- cumsum(new_group) diffs <- unname(split(diff, group_id)) # Fill in rows that are the same in x and y lapply(diffs, diff_complete) } diff_complete <- function(diff) { n <- nrow(diff) diff$pos <- 1:n ctxt <- data.frame( pos = 1:(n + 1) - 0.5, x1 = c(diff$x_start[[1]], diff$x2 + 1), x2 = c(diff$x1 - 1, diff$x_end[[n]]), t = "x", y1 = c(diff$y_start[[1]], diff$y2 + 1), y2 = c(diff$y1 - 1, diff$y_end[[n]]) ) out <- rbind(diff[names(ctxt)], ctxt) # Interleave in correct order out <- out[order(out$pos), , drop = FALSE] out$pos <- NULL # Drop rows with no data needed <- (out$x2 - out$x1) >= 0 | (out$y2 - out$y1) >= 0 out[needed, , drop = FALSE] } ses_df <- function(x1, x2, t, y1, y2) { tibble::tibble(x1 = x1, x2 = x2, t = t, y1 = y1, y2 = y2) }
/scratch/gouwar.j/cran-all/cranData/waldo/R/ses.R
oo_type <- function(x) { if (is.object(x)) { if (isS4(x)) { "S4" } else { if (inherits(x, "R6")) { "R6" } else { "S3" } } } else { "base" } } friendly_type_of <- function(x) { if (is_missing(x)) { return("absent") } if (!is.object(x)) { return(friendly_type(typeof(x))) } if (!isS4(x)) { if (inherits(x, "R6")) { klass <- paste(setdiff(class(x), "R6"), collapse = "/") paste0("an R6 object of class <", klass, ">") } else { paste0( "an S3 object of class <", paste(class(x), collapse = "/"), ">, ", friendly_type(typeof(x)) ) } } else { paste0("an S4 object of class <", class(x), ">") } } friendly_type <- function(type) { switch(type, logical = "a logical vector", integer = "an integer vector", numeric = , double = "a double vector", complex = "a complex vector", character = "a character vector", raw = "a raw vector", string = "a string", list = "a list", NULL = "NULL", environment = "an environment", externalptr = "a pointer", weakref = "a weak reference", S4 = "an S4 object", name = , symbol = "a symbol", language = "a call", pairlist = "a pairlist node", expression = "an expression vector", quosure = "a quosure", formula = "a formula", char = "an internal string", promise = "an internal promise", ... = "an internal dots object", any = "an internal `any` object", bytecode = "an internal bytecode object", primitive = , builtin = , special = "a primitive function", closure = "a function", type ) } short_val <- function(x) { if (is.object(x) || !is_atomic(x)) { return("") } if (is.character(x)) { x <- encodeString(x, quote = "'") } if (length(x) > 5) { x <- c(x[1:5], "...") } paste0(" (", paste0(x, collapse = ", "), ")") } attrs <- function(x, ignore) { out <- attributes(x) names <- setdiff(names2(out), ignore) first <- intersect(c("class", "names", "dim"), names) rest <- sort(setdiff(names, first)) out[c(first, rest)] } is_numeric <- function(x) is_integer(x) || is_double(x) in_ci <- function() { isTRUE(as.logical(Sys.getenv("CI", "FALSE"))) } if (getRversion() < "3.3.0") { strrep <- function(x, times) { vapply( times, function(n) paste(rep(x, n), collapse = ""), FUN.VALUE = character(1) ) } } fansi_align <- function(x, width = NULL, justify = c("left", "right")) { justify <- arg_match(justify) nchar <- fansi::nchar_ctl(x) width <- width %||% max(nchar) padding <- strrep(" ", pmax(0, width - nchar)) switch(justify, left = paste0(x, padding), right = paste0(padding, x) ) } split_by_line <- function(x) { trailing_nl <- grepl("\n$", x) x <- strsplit(x, "\n") x[trailing_nl] <- lapply(x[trailing_nl], c, "") x } multiline <- function(x) any(grepl("\n", x)) default_tol <- function() .Machine$double.eps^0.5 merge_lists <- function(...) { all <- compact(list(...)) Reduce(utils::modifyList, all, init = list()) } compact <- function(x) { is_null <- vapply(x, is.null, logical(1)) x[!is_null] } as_map <- function(x) { # Remove nulls is_null <- vapply(x, is.null, logical(1)) x <- x[!is_null] # Sort named components, preserving positions of unnamed nx <- names2(x) is_named <- nx != "" if (any(is_named)) { idx <- seq_along(x) idx[is_named] <- idx[is_named][order(nx[is_named])] x <- x[idx] } x } scrub_environment <- function(x) { gsub("<env:0x[0-9a-f]+>", "<env: 0x********>", x) }
/scratch/gouwar.j/cran-all/cranData/waldo/R/utils.R
#' @keywords internal #' @import rlang #' @importFrom glue glue "_PACKAGE" # The following block is used by usethis to automatically manage # roxygen namespace tags. Modify with care! ## usethis namespace: start ## usethis namespace: end NULL release_extra_revdeps <- function() "testthat"
/scratch/gouwar.j/cran-all/cranData/waldo/R/waldo-package.R
#' Identify walking bouts in GPS and accelerometry data: #' #' This function identifies walking bouts in GPS and accelerometry data. #' It processes the GPS data and accelerometry counts to create walk bouts. #' #' @param gps_data A data frame containing GPS data #' @param accelerometry_counts A data frame containing accelerometry counts #' @param ... Additional arguments to be passed to the function #' @param collated_arguments A list of collated arguments #' #' @returns A data frame containing identified walk bouts #' #' @export identify_walk_bouts_in_gps_and_accelerometry_data <- function(gps_data, accelerometry_counts, ..., collated_arguments = NULL){ collated_arguments <- collate_arguments(..., collated_arguments=collated_arguments) # complete_days <- generate_c_d() bouts <- process_accelerometry_counts_into_bouts(accelerometry_counts, collated_arguments=collated_arguments) # take out the complete days processing from identify bouts # merge on complete_days info gps_epochs <- process_gps_data_into_gps_epochs(gps_data, collated_arguments=collated_arguments) walk_bouts <- process_bouts_and_gps_epochs_into_walkbouts(bouts, gps_epochs, collated_arguments=collated_arguments) return(walk_bouts) } #' Summarize walking bouts: #' This function summarizes walking bouts and calculates the median speed, complete day, non-wearing, bout start, and duration of each bout. #' #' @param walk_bouts A data frame containing identified walk bouts #' @param ... Additional arguments to be passed to the function #' @param collated_arguments A list of collated arguments #' #' @returns A data frame summarizing identified walk bouts #' #' @export summarize_walk_bouts <- function(walk_bouts, ..., collated_arguments = NULL){ bout <- median <- speed <- complete_day <- time <- bout_category <- NULL collated_arguments <- collate_arguments(..., collated_arguments=collated_arguments) summary_walk_bouts <- walk_bouts %>% dplyr::group_by(bout) %>% dplyr::filter(!is.na(bout)) %>% dplyr::summarise( median_speed = median(speed, na.rm=TRUE), complete_day = any(complete_day), bout_start = lubridate::as_datetime( min(as.numeric(time)), tz = "UTC"), duration = (max(as.numeric(time) + collated_arguments$epoch_length) - min(as.numeric(time)))/60, bout_category = (bout_category[1]) ) return(summary_walk_bouts) }
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/identify_walk_bouts_in_gps_and_accelerometry_data.R
# library(hexSticker) # # # Define the colors to use in the sticker # time <- as.POSIXct(c("2012-04-07 00:00:30", "2012-04-07 00:01:00", "2012-04-07 00:01:30", "2012-04-07 00:02:00", "2012-04-07 00:02:30", "2012-04-07 00:03:00", "2012-04-07 00:03:30", "2012-04-07 00:04:00", "2012-04-07 00:04:30", "2012-04-07 00:05:00", "2012-04-07 00:05:30", "2012-04-07 00:06:00", "2012-04-07 00:06:30", "2012-04-07 00:07:00", "2012-04-07 00:07:30", "2012-04-07 00:08:00", "2012-04-07 00:08:30", "2012-04-07 00:09:00"), tz="UTC") # counts <- c(0, 25, 100, 500, 700, 800, 700, 700, 600, 650, 600, 800, 500, 600, 650, 500, 25, 25) # df <- data.frame(times, counts) # primary_color <- "#4e79a7" # # # Use ggplot to create the plot of time versus count data # p <- ggplot2::ggplot(df, aes(x = times, y = counts)) + # ggplot2::geom_line(color = primary_color) + # ggplot2::theme_bw() + # ggplot2::theme(panel.background = element_rect(fill = 'transparent')) + # theme(axis.text.x=element_blank(), #remove x axis labels # axis.ticks.x=element_blank(), #remove x axis ticks # axis.text.y=element_blank(), #remove y axis labels # axis.ticks.y=element_blank() #remove y axis ticks # ) + # ggplot2::ylim(0, max(counts) + 50) + # labs(x = "time", y = "activity count") # # # Use the `hexSticker` function to create the logo # logo <- # sticker <- hexSticker::sticker(p, # package = " walkboutr ", # p_size=16, s_x=1, s_y=.75, s_width=1.1, s_height=.8 # )
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/logo.R
#' Global parameters and constants #' #'List of Parameters #' `epoch_length` The duration of an epoch in seconds. #' `active_counts_per_epoch_min` Minimum accelerometer counts for an epoch to be considered active (vs. inactive). #' `minimum_bout_length` Minimum number of epochs for a period of activity to be considered as a potential bout. #' `local_time_zone` Local time zone of the data - data come in and are returned in UTC, but local time zone is used to compute complete_days. #' `maximum_number_consec_inactive_epochs_in_bout` Number of consecutive epochs that can be labeled as inactive during a bout without ending the bout. #' parameters <- list( epoch_length = 30, active_counts_per_epoch_min = 500, minimum_bout_length = 10, local_time_zone = "America/Los_Angeles", maximum_number_consec_inactive_epochs_in_bout = 3 ) #' List of Constants #' `non_wearing_min_threshold_epochs` Number of consecutive epochs with activity counts of 0 that constitute a non_wearing period. #' `min_wearing_hours_per_day` Minimum number of hours in a day an individual must wear an accelerometer for the day to be considered complete. #' `min_gps_obs_within_bout` Minimum number of GPS observations within a bout for that bout to be considered to have complete GPS data. #' `min_gps_coverage_ratio` Minimum ratio of data points with versus without GPS data for the bout to be considered to have complete GPS data. #' `dwellbout_radii_quantile` Threshold for outliering GPS data points - any data points above the 95th percentile are outliered. #' `max_dwellbout_radii_ft` Maximum radius, in feet, of a bounding circle that would be considered a dwell bout (rather than a potential walk bout). #' `min_dwellbout_obs` Minimum number of observations to consider something a potential dwell bout. #' `max_walking_cpe` Maxiumum CPE value before the accelerometer is considered to be picking up on an activity other than walking. #' `min_walking_speed_km_h` Minimum speed considered walking. #' `max_walking_speed_km_h` Maximum speed considered walking. #' constants <- list( non_wearing_min_threshold_epochs = 40, min_wearing_hours_per_day = 8, min_gps_obs_within_bout = 5, min_gps_coverage_ratio = 0.2, dwellbout_radii_quantile = 0.95, max_dwellbout_radii_ft = 66, min_dwellbout_obs = 10, max_walking_cpe = 2863, min_walking_speed_km_h = 2, max_walking_speed_km_h = 6 ) #' Collate Arguments #' This function collates user-provided arguments with pre-defined parameters and constants. #' #' @param ... named arguments passed by the user #' @param collated_arguments NULL or previously collated arguments #' #' @returns A list of all arguments, including both pre-defined parameters and constants and any user-provided arguments. collate_arguments <- function(..., collated_arguments = NULL){ user_arguments <- list(...) if (!is.null(collated_arguments) & length(user_arguments) > 0){ stop(paste0("Error: ")) } arguments <- c(parameters, constants) for(n in names(user_arguments)){ if(!(n %in% names(parameters))){ stop(paste("Error: unknown parameter ", n, ". Accepted parameter names are: ", toString(names(parameters)), sep = ", ")) } arguments[n] <- user_arguments[n] } return(arguments) }
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/parameters.R
#' Generate Bout Plot #' #' This function generates a plot of accelerometry counts and GPS radius for a specific bout. #' #' @param accelerometry_counts A data frame or tibble containing accelerometry counts. #' @param gps_data A data frame or tibble containing GPS data. #' @param bout_number The number of the bout to be plotted. #' @param leading_minutes number of minutes before a bout starts that we want to plot #' @param trailing_minutes number of minutes after a bout ends that we want to plot #' @param gps_target_size proportional size of circle plot #' @param ... Additional arguments to be passed to the function #' @param collated_arguments A list of collated arguments #' #' @return A ggplot object representing the bout plot. #' #' @export generate_bout_plot <- function(accelerometry_counts,gps_data,bout_number, leading_minutes = 8, trailing_minutes = 12, gps_target_size = 0.25, ..., collated_arguments = NULL){ time <- bout <- activity_counts <- NULL b <- bout_number collated_arguments <- collate_arguments(..., collated_arguments=collated_arguments) bouts <- process_accelerometry_counts_into_bouts(accelerometry_counts) gps_epochs <- process_gps_data_into_gps_epochs(gps_data) walk_bouts <- bouts %>% dplyr::left_join(gps_epochs, by = "time") %>% dplyr::arrange(time) %>% dplyr::mutate(bout = ifelse(bout==0,NA,bout)) # if there are no bouts, just return the data if(sum(is.na(walk_bouts$bout)) == nrow(walk_bouts)){ return(walk_bouts) } else{ bout_radii <- generate_bout_radius(walk_bouts,collated_arguments$dwellbout_radii_quantile) # returns df: bout, bout_radius (numer) gps_completeness <- evaluate_gps_completeness(walk_bouts,collated_arguments$min_gps_obs_within_bout,collated_arguments$min_gps_coverage_ratio) # returns df: bout, complete_gps (T/F), median_speed all_bouts <- walk_bouts %>% dplyr::left_join(gps_completeness, by = c("bout")) %>% dplyr::left_join(bout_radii, by = c("bout")) %>% dplyr::filter(!is.na(bout)) %>% dplyr::select(c("latitude", "longitude", "bout_radius", "complete_gps", "bout","time", "activity_counts")) df <- all_bouts %>% dplyr::filter(bout == b) %>% dplyr::select(-c("bout")) bout_radius <- max(df$bout_radius) # proportionally scale down the values to make plotting faster bout_thresh_ratio <- collated_arguments$max_dwellbout_radii_ft/bout_radius plot_bout_radius <- .1*bout_radius plot_max_dwellbout_radii_ft <- bout_thresh_ratio*plot_bout_radius if(collated_arguments$max_dwellbout_radii_ft > bout_radius){ plot_dat_b <- data.frame(x0 = 1:(plot_max_dwellbout_radii_ft*2.2), y0 = 1:(plot_max_dwellbout_radii_ft*2.2)) %>% dplyr::mutate(alpha = 0.2)} else{ plot_dat_b <- data.frame(x0 = 1:(plot_bout_radius*2.2), y0 = 1:(plot_bout_radius*2.2)) %>% dplyr::mutate(alpha = 0.2) } colors <- list(threshold = "skyblue4", data_radius = "palegreen4") # circle plotting code p <- ggplot2::ggplot() + ggplot2::coord_fixed() + ggplot2::theme_void() thresh_circle <- ggforce::geom_circle(data = plot_dat_b, ggplot2::aes(x0=0, y0=0, r = plot_max_dwellbout_radii_ft), color = colors$threshold, fill = colors$threshold, show.legend = TRUE) bout_circle <- ggforce::geom_circle(data = plot_dat_b, ggplot2::aes(x0=0, y0=0, r = plot_bout_radius), color = colors$data_radius, fill = colors$data_radius, show.legend = TRUE) if(!(any(is.na(df$complete_gps)))){ if(plot_max_dwellbout_radii_ft>plot_bout_radius) { circles <- p + thresh_circle + bout_circle title <- paste0("Dwell Bout (radius = ", plot_bout_radius %>% round(2), " feet)") title_color <- colors$threshold } else{ circles <- p + bout_circle + thresh_circle title <- paste0("Non-Dwell Bout (radius = ", plot_bout_radius %>% round(2), " feet)") title_color <- colors$data_radius } } else{ circles <- p + thresh_circle title <- paste0("Incomplete GPS Coverage") title_color <- colors$threshold } ## ACCELEROMETRY PLOT min_n <- min(as.numeric(df$time)) start <- lubridate::with_tz(min_n, tz = collated_arguments$local_time_zone)- lubridate::minutes(leading_minutes) max_n <- max(as.numeric(df$time)) end <- lubridate::with_tz(max_n, tz = collated_arguments$local_time_zone) + lubridate::minutes(trailing_minutes) df <- df %>% dplyr::mutate(active = ifelse(activity_counts > collated_arguments$active_counts_per_epoch_min, "high", "low")) %>% dplyr::mutate(time = lubridate::ymd_hms(time)) %>% dplyr::filter(time > start) %>% dplyr::filter(time < end) xmax <- end xmin <- start + (1 - gps_target_size)*(end - start) y_low <- 0 y_high <- max(walk_bouts$activity_counts)*1.2 ymax <- y_high ymin <- (1 - gps_target_size) * y_high plot <- ggplot2::ggplot(walk_bouts, ggplot2::aes(x = time, y = activity_counts)) + ggplot2::geom_point() + ggplot2::geom_hline(yintercept=collated_arguments$active_counts_per_epoch_min, linetype="dashed", color = "darksalmon") + ggplot2::xlim(as.POSIXct(start), as.POSIXct(end)) + ggplot2::ylim(y_low, y_high) + ggplot2::geom_text(ggplot2::aes(end, collated_arguments$active_counts_per_epoch_min, label = "Active", size = 12)) + ggplot2::geom_line() + ggplot2::ggtitle(paste(title)) + ggplot2::labs(x = "Time", y = "Accelerometer Counts") + ggplot2::theme_bw() + ggplot2::theme(legend.position = "none") + ggplot2::annotation_custom(ggplot2::ggplotGrob(circles), xmin = xmin, xmax = end, ymin = ymin, ymax = ymax) } return(plot) }
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/plot.R
#' Process Accelerometry Counts into Bouts #' #' This function processes accelerometry counts into bouts of activity and #' returns those bouts as well as flags for whether the individual was wearing #' their device and if the wearing day can be considered complete #' #' The input schema for the accelerometry data is `time` and `activity_counts`. #' - `time` should be a column in date-time format, in the UTC time zone, with no null values. #' - `activity_counts` should be a positive numeric column with no null values. #' #' @param accelerometry_counts A data frame with two columns: time and activity counts (CPE, counts per epoch) #' @param ... Additional arguments to be passed to the function. #' @param collated_arguments An optional list of previously collated arguments. #' #' @return A list of processed data frames containing identified walk bouts, non-wearing periods, #' and complete days, based on the provided accelerometry counts and processing parameters. #' #' @details This function processes accelerometry counts into bouts of activity. #' The function first validates the input data in the first step. #' In the second step, the function identifies bouts of activity based on a #' specified minimum number of active counts per epoch, a maximum number of #' consecutive inactive epochs allowed within a bout, and a minimum bout length. #' In the third step, the function identifies non-wearing periods based on a #' specified threshold of consecutive epochs with 0 activity counts. #' In the fourth step, the function identifies complete days of wearing the #' accelerometer based on a specified minimum number of hours of wearing and #' the epoch length. The returned list includes information about each complete #' day, including the start and end times of each day, the duration of the day #' in seconds, the number of epochs, the total number of cpm for the day, and #' the bouts of activity within the day. #' #' @export process_accelerometry_counts_into_bouts <- function(accelerometry_counts, ..., collated_arguments = NULL) { collated_arguments <- collate_arguments(..., collated_arguments = collated_arguments) # Step 1: validate data validate_accelerometry_data(accelerometry_counts) # Step 2: Identify bouts accelerometry_counts <- identify_bouts(accelerometry_counts, collated_arguments$maximum_number_consec_inactive_epochs_in_bout, collated_arguments$active_counts_per_epoch_min, collated_arguments$minimum_bout_length) # Step 3: Identify nonwearing periods accelerometry_counts <- identify_non_wearing_periods(accelerometry_counts, collated_arguments$non_wearing_min_threshold_epochs) # Step 4: Identify complete days bouts <- identify_complete_days(accelerometry_counts, collated_arguments$min_wearing_hours_per_day, collated_arguments$epoch_length, collated_arguments$local_time_zone) return(bouts) } #' Run Length Encoding: #' #' A function that runs a normal run length encoding and adds some extra variables for use in calculations. #' #' @param x a vector to run the function on #' #' @returns a data.frame with columns for lengths, values, end, and begin run_length_encode <- function(x){ # running a normal run length encoding and adding some extra variables for use in calculations rle_df <- with(rle(as.numeric(x)), data.frame(dplyr::tibble( "lengths" = lengths, "values" = values, "end" = cumsum(lengths), "begin" = (end-lengths)+1))) return(rle_df) } #' Identify Bouts: #' #' @param accelerometry_counts A data frame containing accelerometry counts and times #' @param maximum_number_consec_inactive_epochs_in_bout Maximum number of consecutive inactive epochs in a bout without ending the bout #' @param active_counts_per_epoch_min Minimum accelerometer counts for an epoch to be considered active (vs. inactive) #' @param minimum_bout_length Minimum number of epochs for a period of activity to be considered as a potential bout #' #' @returns A data frame with the same columns as the input data frame \code{accelerometry_counts}, #' but with a new column named \code{bout} that indicates whether each epoch is part of a bout #' (in which case it gets a bout number assigned) or not (0) #' #' @details This function partitions the accelerometry data into bouts of activity and non-bouts by #' first identifying all epochs that are definitely not part of bouts. Then, it uses run length encoding to #' partition the data into potential bouts and non-bouts, and labels each potential bout as a bout or non-bout #' based on whether it meets the criteria for bout length and the number of consecutive inactive epochs allowed. #' Finally, the function adds a new column to the input data frame \code{accelerometry_counts} named \code{bout} #' that indicates whether each epoch is part of a bout (1) or not (0). identify_bouts <- function(accelerometry_counts, maximum_number_consec_inactive_epochs_in_bout, active_counts_per_epoch_min, minimum_bout_length){ activity_counts <- inactive <- values <- maybe_bout <- bout <- time <- . <- NULL n_epochs_date <- non_wearing <- total_wearing_epochs_whole_day <- NULL # Identify all epochs that are definitely not part of bouts # if we have 4 or more epochs where the activity level is below our activity threshold # then the epoch at the left most edge of that window is definitely not part of a bout # we can identify these periods by making a boolean col that identifies all low activity periods # and then doing a rolling sum of that activity col with a window size of 4 to find a rolling sum # and labeling all epochs where that rolling sum is 4 as non-bout. non_bout_window <- maximum_number_consec_inactive_epochs_in_bout + 1 accelerometry_counts <- accelerometry_counts %>% dplyr::mutate(bout = 0, inactive = activity_counts < active_counts_per_epoch_min, non_bout = data.table::frollsum(inactive, non_bout_window, fill = non_bout_window) == non_bout_window) # Use that identification to partition dataset into non bouts and maybe bouts using a run length encoding non_bout_rle <- run_length_encode(accelerometry_counts$non_bout) %>% dplyr::mutate(maybe_bout = 1-values) # Every sequence of epochs labeled maybe_bout will have a number of inactive periods at the end of the series equal to the # maximum number of consecutive inactive epochs in a bout. # So, we find all potential bouts by filtering to anything labeled maybe_bout with enough epochs to # meet our minimum bout length and account for trailing inactive periods. potential_bout_length <- minimum_bout_length + maximum_number_consec_inactive_epochs_in_bout potential_bouts <- non_bout_rle %>% dplyr::filter((lengths >= potential_bout_length) & (maybe_bout == 1)) # Remove inactive and non_bout cols accelerometry_counts <- accelerometry_counts %>% dplyr::select(-c("inactive", "non_bout")) # If there are no potential bouts, return accelerometry_counts and all bout labels are NA if(nrow(potential_bouts) == 0){ return(accelerometry_counts) } # Otherwise, label bouts num_bouts <- 0 for (i in 1:nrow(potential_bouts)){ row <- dplyr::slice(potential_bouts, i) start_ind <- row$begin end_ind <- row$end-maximum_number_consec_inactive_epochs_in_bout active_epochs <- accelerometry_counts %>% dplyr::slice(start_ind:end_ind) %>% dplyr::filter(activity_counts >= active_counts_per_epoch_min) %>% nrow() is_bout <- active_epochs >= minimum_bout_length if (is_bout){ num_bouts <- num_bouts + 1 accelerometry_counts <- accelerometry_counts %>% dplyr::mutate(bout = ifelse(dplyr::row_number() %in% (start_ind:end_ind), num_bouts, bout)) } } return(accelerometry_counts) } #' Identify non-wearing periods: #' This function identifies non-wearing periods in accelerometry data based on a #' threshold of consecutive epochs with activity counts of 0. #' #' @param accelerometry_counts a data frame containing columns for time #' (in POSIXct format) and activity_counts #' @param non_wearing_min_threshold_epochs an integer value indicating the #' minimum number of consecutive epochs with 0 activity counts that constitute a non-wearing period #' #' @returns a data frame with the same columns as the input data frame \code{accelerometry_counts}, #' but with a new column named \code{non_wearing} that indicates whether the #' individual was wearing their accelerometer during a given period. #' #' @details #' Identify periods where the accelerometer is not being worn based on the activity counts and a minimum threshold value. identify_non_wearing_periods <- function(accelerometry_counts, non_wearing_min_threshold_epochs){ activity_counts <- values <- NULL accelerometry_counts <- accelerometry_counts %>% dplyr::mutate(inactive = (activity_counts == 0), non_wearing = F) inactive_rle <- run_length_encode(accelerometry_counts$inactive) non_wearing <- inactive_rle %>% dplyr::filter(values == 1 & lengths >= non_wearing_min_threshold_epochs) if(nrow(non_wearing) == 0){ return(accelerometry_counts) } for(i in 1:nrow(non_wearing)){ row <- dplyr::slice(non_wearing, i) start_ind <- row$begin end_ind <- row$end accelerometry_counts <- accelerometry_counts %>% dplyr::mutate(non_wearing = dplyr::row_number() %in% (start_ind:end_ind)) } return(accelerometry_counts) } #' Identify complete wearing days #' This function identifies complete days based on accelerometry data by #' calculating the total number of epochs worn per day and comparing it to the #' minimum number of wearing epochs per day required to consider a day complete. #' #' @param accelerometry_counts A data frame containing accelerometry counts and non-wearing epochs. #' @param min_wearing_hours_per_day Minimum number of hours of wearing time required for a day to be considered complete. #' @param epoch_length The duration of an epoch in seconds. #' @param local_time_zone The local time zone of the data. The data come in and are returned in UTC, but the local time zone is used to compute complete_days. #' #' @returns A data frame containing accelerometer counts, non-wearing epochs, and a binary variable indicating if the day is complete or not. identify_complete_days <- function(accelerometry_counts, min_wearing_hours_per_day, epoch_length, local_time_zone){ time <- . <- n_epochs_date <- non_wearing <- total_wearing_epochs_whole_day <- NULL min_wearing_epochs_per_day <- (min_wearing_hours_per_day*60*60)/epoch_length # max_non_wearing_per_day <- 24-min_wearing_hours_per_day complete_days_df <- accelerometry_counts %>% dplyr::mutate(date = lubridate::as_date(time, tz = local_time_zone)) %>% dplyr::group_by(date) %>% dplyr::summarise(n_epochs_date = nrow(.), total_wearing_epochs_whole_day = n_epochs_date - sum(non_wearing)) %>% dplyr::mutate(complete_day = total_wearing_epochs_whole_day >= min_wearing_epochs_per_day) %>% # dplyr::mutate(complete_day = total_non_wearing_epochs_whole_day <= max_non_wearing_per_day) %>% # dplyr::select(-c(total_non_wearing_epochs_whole_day)) dplyr::select(-c(total_wearing_epochs_whole_day)) accelerometry_counts <- accelerometry_counts %>% dplyr::mutate(date = lubridate::as_date(time, tz = local_time_zone)) %>% dplyr::left_join(complete_days_df, by = c("date")) %>% dplyr::select(-c("date")) return(accelerometry_counts) }
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/process_accelerometry_counts_into_bouts.R
#' Process bouts and GPS epochs into walk bouts #' #' This function processes bouts and GPS epochs into walk bouts. It uses a set of parameters and constants to determine whether an epoch is active or inactive, the minimum number of epochs for a period of activity to be considered as a potential bout, the local time zone of the data, and other relevant information. It takes in two data frames, "bouts" and "gps_epochs", and returns a processed data frame, "walk_bouts", with added columns "bout", "bout_radius", "bout_category", "complete_days", "non_wearing", and "speed".#' #' @param bouts a data frame containing bout information #' @param gps_epochs a data frame containing GPS information #' @param ... additional arguments to be passed on to other functions #' @param collated_arguments a list of arguments collated from other functions #' #' @returns a processed data frame, "walk_bouts", with added columns "bout", "bout_radius", "bout_category", "complete_days", "non_wearing", and "speed"#' #' #' @details The function first collates the arguments passed to it with the collate_arguments() function. It then merges "gps_epochs" and "bouts" data frames by "time" column, and orders the resulting data frame by "time". Then, it generates the "bout_radius" using the generate_bout_radius() function, which calculates the radius of a bounding circle that would be considered a dwell bout. Next, the function evaluates the completeness of GPS data using the evaluate_gps_completeness() function, which determines the number of GPS observations within a bout and the ratio of data points with versus without GPS data. Finally, the function generates the "bout_category" using the generate_bout_category() function, which determines whether a bout is a walk bout or a dwell bout, and calculates the complete days, non-wearing periods, and speed. #' The function categorizes bouts into the following categories: #' - dwell bout #' - non-walk too vigorous #' - non-walk too slow #' - non-walk too fast #' - unknown lack of gps #' #' NOTE: If there are multiple GPS points associated with a given epoch interval, #' we use the latest possible GPS data point within that epoch. As such, #' median walking speed is calculated for only the latest available GPS data point in each epoch. #' #' NOTE: The median speed is calculated using only the GPS data points that remain after #' GPS data processing. All GPS data points that are outliered for the calculation of a bout #' radius, are, however, included in the assessment of GPS completeness as they are outliers #' but are still present GPS data points. #' #' NOTE: Outliered data points are excluded from the radius calculation but are included in #' subsequent functions that assess GPS completeness. They are also returned from #' these functions with the original data and all new variables. #' #' @export process_bouts_and_gps_epochs_into_walkbouts <- function(bouts, gps_epochs, ..., collated_arguments = NULL){ time <- bout <- NULL collated_arguments <- collate_arguments(..., collated_arguments = collated_arguments) walk_bouts <- bouts %>% dplyr::left_join(gps_epochs, by = "time") %>% dplyr::arrange(time) %>% dplyr::mutate(bout = ifelse(bout==0,NA,bout)) # if there are no bouts, just return the data if(sum(is.na(walk_bouts$bout)) == nrow(walk_bouts)){ return(walk_bouts) } else{ bout_radii <- generate_bout_radius(walk_bouts, collated_arguments$dwellbout_radii_quantile) # returns df: bout, bout_radius (numer) gps_completeness <- evaluate_gps_completeness(walk_bouts, collated_arguments$min_gps_obs_within_bout, collated_arguments$min_gps_coverage_ratio) # returns df: bout, complete_gps (T/F), median_speed walk_bouts <- generate_bout_category(walk_bouts, bout_radii, gps_completeness, collated_arguments$max_dwellbout_radii_ft, collated_arguments$max_walking_cpe, collated_arguments$min_walking_speed_km_h, collated_arguments$max_walking_speed_km_h) # returns df: bout, bout_category, complete_days, non_wearing, speed return(walk_bouts) } } #' Outlier GPS data points #' This function identifies outlier GPS points for the bout radius calculation from a given set of latitude and longitude coordinates. #' #' @param lat_long A data frame containing the latitude and longitude coordinates for the GPS points. #' @param dwellbout_radii_quantile The threshold for outliering GPS data points - any data points above the specified percentile are outliered. #' #' @returns A data frame containing the latitude and longitude coordinates for the non-outlier GPS points. outlier_gps_points <- function(lat_long, dwellbout_radii_quantile){ quantile <- . <- NULL # outlier gps points that are above the 95% percentile of summed distances distance_sum <- sp::SpatialPoints(coords = cbind(long = lat_long$longitude, lat = lat_long$latitude)) %>% sp::spDists(., longlat = TRUE) %>% colSums() points_to_keep <- distance_sum < quantile(distance_sum, dwellbout_radii_quantile)[[1]][1] lat_long <- cbind(lat_long, points_to_keep) %>% dplyr::filter(points_to_keep==TRUE) return(lat_long) } #' Generate Bounding Circle Radius for Walking Bouts #' #' This function generates a bounding circle radius for each walking bout identified in the input data. The bounding circle is defined as the smallest circle that fully contains all GPS locations observed during a walking bout. #' #' @param walk_bouts A data frame containing GPS locations for each walking bout, with columns "longitude", "latitude", and "bout" (a unique identifier for each bout) #' @param dwellbout_radii_quantile A quantile (between 0 and 1) used to filter outlying GPS data points before generating the bounding circle. GPS points with a distance from the center greater than the radius of the circle that contains (1 - dwellbout_radii_quantile) of the GPS points are considered outliers and are excluded. #' #' @returns A data frame containing the bout identifier and the radius of the bounding circle for each walking bout. generate_bout_radius <- function(walk_bouts, dwellbout_radii_quantile){ bout <- longitude <- latitude <- . <- NULL bout_radii <- data.frame(bout = integer(), bout_radius=numeric()) bout_labels <- walk_bouts %>% tidyr::drop_na(bout) %>% dplyr::select(bout) %>% unique() # drop rows with NA bout label for(bout_label in bout_labels){ # pull long/lat and remove outliers lat_long <- walk_bouts %>% dplyr::filter(bout==bout_label) %>% tidyr::drop_na() lat_long <- outlier_gps_points(lat_long, dwellbout_radii_quantile) lat_long <- lat_long %>% dplyr::distinct(longitude, latitude, .keep_all = TRUE) if(nrow(lat_long > 1)){ # derive radius of bounding circle circle <- lat_long %>% dplyr::select(longitude, latitude) %>% as.matrix() %>% # - convert x and y columns to two-column matrix with n rows sf::st_multipoint() %>% # generate (x, y) coordinates lwgeom::st_minimum_bounding_circle() circle_area <- geosphere::areaPolygon(x=circle[[1]]) circle_radius <- sqrt(circle_area/pi) %>% measurements::conv_unit(., from = 'm', to = 'ft') } else { circle_radius <- NA } bout_radii <- rbind(bout_radii, data.frame(bout = bout_label, bout_radius = circle_radius)) } return(bout_radii) } #' Evaluate GPS completeness for each walking bout #' #' This function evaluates the completeness of GPS data for each walking bout. For each bout, it checks if the number of valid GPS records (with speed, latitude, and longitude data) is greater than a specified threshold, and if the ratio of valid GPS records to total records is greater than a specified minimum. If both of these conditions are met, the function considers the GPS data for the bout to be complete. The function also calculates the median speed for each bout. #' #' @param walk_bouts A data frame containing information about walking bouts, including GPS data. #' @param min_gps_obs_within_bout The minimum number of GPS observations required for a bout to be considered to have complete GPS data. #' @param min_gps_coverage_ratio The minimum ratio of GPS observations with valid data to total GPS observations for a bout to be considered to have complete GPS data. #' #' @returns A data frame containing information about the GPS completeness and median speed for each bout. evaluate_gps_completeness <- function(walk_bouts, min_gps_obs_within_bout, min_gps_coverage_ratio){ bout <- speed <- latitude <- longitude <- n_valid_gps_records <- gps_coverage_ratio <- NULL sufficient_gps_coverage <- sufficient_gps_records <- NULL # determine if we have sufficient gps coverage for each bout gps_completeness <- walk_bouts %>% dplyr::group_by(bout) %>% dplyr::summarise( n_valid_gps_records = sum(!is.na(speed) & !is.na(latitude) & !is.na(longitude)), # speed and GPS units gps_coverage_ratio = ifelse(sum(!is.na(bout))!=0, n_valid_gps_records/sum(!is.na(bout)), NA), sufficient_gps_records = n_valid_gps_records>min_gps_obs_within_bout, sufficient_gps_coverage = gps_coverage_ratio>min_gps_coverage_ratio, median_speed = stats::median(speed, na.rm=TRUE)) %>% dplyr::mutate(complete_gps = ifelse((sufficient_gps_coverage==FALSE & sufficient_gps_records == FALSE), FALSE, TRUE)) %>% # can take out this ifelse since its all T/F dplyr::select(c("bout", "complete_gps", "median_speed")) return(gps_completeness) } #' Generate bout categories #' #' Given accelerometer bout data, this function generates bout categories, which includes dwell bouts, non-walk bouts that are either too slow, too fast, or too vigorous, and bouts with an unknown lack of GPS data. #' #' @param walk_bouts a data frame that contains bout information for walking bouts. #' @param bout_radii a data frame that contains bout radii information. #' @param gps_completeness a data frame that contains GPS data completeness information. #' @param max_dwellbout_radii_ft a numeric scalar that specifies the maximum radius, in feet, of a bounding circle that would be considered a dwell bout. #' @param max_walking_cpe a numeric scalar that specifies the maximum activity counts per epoch value before the accelerometer is considered to be picking up on an activity other than walking. #' @param min_walking_speed_km_h a numeric scalar that specifies the minimum speed considered walking. #' @param max_walking_speed_km_h a numeric scalar that specifies the maximum speed considered walking. #' #' @returns a data frame with the following columns: bout, dwell_bout (T/F), non_walk_too_vigorous (T/F), non_walk_slow (T/F), non_walk_fast (T/F), non_walk_incomplete_gps (T/F) #' #' @details The function uses the bout information for walking bouts, bout radii information, and GPS data completeness information to generate the bout categories. #' #' The function first generates dwell bouts by joining the bout radii information and GPS data completeness information on the bout column, and then filters out the rows that have bout values that are missing using the filter function. Then, it calculates the dwell bout values as TRUE if the complete_gps column is TRUE and the bout_radius column is less than max_dwellbout_radii_ft. The resulting data frame only contains the bout and dwell_bout columns. #' The function then joins the resulting data frame with the walking bout data frame using the bout column. Then, for the non-walk bouts, the function calculates whether they are too vigorous, too slow, or too fast. For the non-walk bouts that are too vigorous, the function calculates the mean activity_counts for each bout, and then sets the non_walk_too_vigorous value as TRUE if the mean activity_counts value is greater than max_walking_cpe. For the non-walk bouts that are too slow or too fast, the function calculates the median speed for each bout, and then sets the non_walk_slow or non_walk_fast value as TRUE if the median speed value is less than min_walking_speed_km_h or greater than max_walking_speed_km_h, respectively. Finally, the function generates a non_walk_incomplete_gps value as TRUE if the complete_gps value is FALSE for the bout. #' The resulting data frame contains the following columns: bout, dwell_bout (T/F), non_walk_too_vigorous (T/F), non_walk_slow (T/F), non_walk_fast (T/F), non_walk_incomplete_gps (T/F). generate_bout_category <- function(walk_bouts, bout_radii, gps_completeness, max_dwellbout_radii_ft, max_walking_cpe, min_walking_speed_km_h, max_walking_speed_km_h){ bout <- complete_gps <- bout_radius <- activity_counts <- speed <- median_speed <- NULL bout_category <- median_speed <- mean_cpe <- dwell_bout <- non_walk_incomplete_gps <- NULL # bout categories: # walk bout # dwell bout # nonwalk too vigorous, # nonwalk too slow, # nonwalk too fast, # unknown lack of gps dwell_bouts <- bout_radii %>% dplyr::filter(!(is.na(bout))) %>% dplyr::left_join(gps_completeness, by = "bout") %>% dplyr::mutate(dwell_bout = complete_gps & (bout_radius < max_dwellbout_radii_ft)) %>% dplyr::select(c("bout", "dwell_bout")) # cols: bout, dwell_bout (T/F) nonwalk_incomplete_gps <- gps_completeness %>% dplyr::mutate(non_walk_incomplete_gps = !complete_gps) %>% dplyr::filter(!is.na(bout)) %>% dplyr::select(c("bout", "non_walk_incomplete_gps")) ### These are the lines that define precedence of categorization ### bout_categories <- walk_bouts %>% dplyr::filter(!is.na(bout)) %>% dplyr::left_join(dwell_bouts, by = c("bout")) %>% dplyr::left_join(nonwalk_incomplete_gps, by = c("bout")) %>% dplyr::mutate(bout_category = "walk_bout") %>% dplyr::group_by(bout) %>% dplyr::summarise(mean_cpe = mean(activity_counts), median_speed = stats::median(speed, na.rm=TRUE), bout_category = ifelse((median_speed > max_walking_speed_km_h),"non_walk_fast",bout_category), bout_category = ifelse((median_speed < min_walking_speed_km_h),"non_walk_slow",bout_category), bout_category = ifelse((mean_cpe > max_walking_cpe),"non_walk_too_vigorous",bout_category), bout_category = ifelse(any(dwell_bout),"dwell_bout",bout_category), bout_category = ifelse(any(non_walk_incomplete_gps),"non_walk_incomplete_gps",bout_category)) %>% dplyr::select(c(bout,bout_category)) categorized_bouts <- bout_categories %>% merge(walk_bouts, by = c("bout")) %>% dplyr::select(-c("inactive","n_epochs_date")) return(categorized_bouts) }
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/process_bouts_and_gps_epochs_into_walkbouts.R
#' Convert GPS data into GPS epochs #' #' The input schema for the accelerometry data is `time`, `latitude`, `longitude`, and `speed`. #' - `time` should be a column in date-time format, in the UTC time zone, with no null values. #' - `latitude` should be a numeric, non-null latitude coordinate between -90 and 90 #' - `longitude` should be a numeric, non-null longitude coordinate between -180 and 180 #' - `speed` should be a numeric, non-null value in kilometers per hour #' #' This function processes GPS data into GPS epochs, with each epoch having a duration specified by \code{epoch_length}. #' #' @param gps_data A data frame containing GPS data. Must have columns "Latitude", "Longitude" #' @param ... Additional arguments to be passed to the function. #' @param collated_arguments A named list of arguments, used to avoid naming conflicts when calling this function as part of a pipeline. Optional. #' #' @returns A data frame with columns latitude, longitude, time, and speed, where time is now the nearest epoch start time #' #' @export process_gps_data_into_gps_epochs <- function(gps_data, ..., collated_arguments = NULL) { collated_arguments <- collate_arguments(..., collated_arguments = collated_arguments) validate_gps_data(gps_data) gps_epochs <- assign_epoch_start_time(gps_data, collated_arguments$epoch_length) return(gps_epochs) } #' Validate GPS data #' #' This function validates GPS data for required variables, correct variable class, and correct data range. #' #' @param gps_data A data frame containing GPS data with the following variables: time, latitude, longitude, and speed. #' #' @returns This function does not return anything. It throws an error if the GPS data fails any of the validation checks. #' #' @export validate_gps_data <- function(gps_data){ # Validation schema diff <- setdiff(names(gps_data), c("time", "latitude", "longitude", "speed")) missing <- setdiff(c("time", "latitude", "longitude", "speed"), names(gps_data)) if(length(missing)>0){ stop(paste0("Error: data provided are missing `", missing, "` columns.")) } if(length(diff)>0){ diff <- paste0(diff, collapse = ', ') stop(paste0("Error: data provided have the following extra columns: ", diff)) } # Validate time variable if(!lubridate::is.timepoint(gps_data$time)){ stop(paste0("Error: time is not provided in date-time format. class of time variable should be: `POSIXct` `POSIXt`")) } if(any(is.na(gps_data$time))){ stop(paste0("Error: time data contain NAs")) } if(!(lubridate::tz(gps_data$time) == "UTC")){ stop(paste0("Error: time zone provided is not UTC.")) } # Validate latitude/longitude variable if(!(class(gps_data$latitude) %in% c("numeric"))){ stop(paste0("Error: latitude column is not class integer or numeric.")) } if(any(is.na(gps_data$latitude))){ stop(paste0("Error: latitude column contains NAs")) } if(any(gps_data$latitude < -90 | gps_data$latitude > 90)){ stop(paste0("Error: latitude column contains invalid latitude coordinates")) } if(!(class(gps_data$longitude) %in% c("integer", "numeric"))){ stop(paste0("Error: longitude column is not class integer or numeric.")) } if(any(is.na(gps_data$longitude))){ stop(paste0("Error: longitude column contains NAs")) } if(any(gps_data$longitude < -180 | gps_data$longitude > 180)){ stop(paste0("Error: longitude column contains invalid longitude coordinates")) } # Validate speed variable if(!(class(gps_data$speed) %in% c("numeric"))){ stop(paste0("Error: speed column is not class integer or numeric.")) } if(any(is.na(gps_data$speed))){ stop(paste0("Error: speed column contains NAs")) } if(any(gps_data$speed<0)){ stop(paste0("Error: speed column contains negative values")) } if(any(gps_data$speed > 2000)){ message("Warning: speed column contains implausibly large values") } } #' Assign Epoch Start Time #' #' @param gps_data A data frame with GPS data including a column of timestamps and columns for latitude and longitude #' @param epoch_length The duration of an epoch in seconds #' @details Selects the closest 30 second increment to assign epoch start time and takes the GPS coordinates associated with the latest time if there are multiple GPS data points in a given 30 second increment. This function returns a data frame of GPS data with a column of epoch times. #' #' @returns A data frame of GPS data with an additional column indicating epoch start time #' #' @export assign_epoch_start_time <- function(gps_data, epoch_length){ time <- epoch_time <- dx_p <- NULL # select the closest 30 second increment to assign epoch start time # if there are multiple gps data points in a given 30 second increment, # takes the gps coordinates associated with the latest time gps_epochs <- gps_data %>% dplyr::mutate(epoch_time = as.numeric(time)) %>% dplyr::mutate(dx_p = epoch_time%%epoch_length) %>% dplyr::mutate(epoch_time = epoch_time-dx_p) %>% dplyr::group_by(epoch_time) %>% dplyr::filter(as.numeric(time) == max(as.numeric(time))) %>% dplyr::mutate(time = lubridate::as_datetime(epoch_time, tz="UTC")) %>% dplyr:: ungroup() %>% dplyr::select(-c(dx_p, epoch_time)) return(gps_epochs) }
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/process_gps_data_into_gps_epochs.R
#' Generate a dataset with date-time, speed, and latitude and longitude of someone moving through space on a walk in Seattle #' #' @param start_lat The starting latitude of the walk. #' @param start_long The starting longitude of the walk. #' @param start_time The start time of a series of data #' @param time_interval The time interval between points in seconds. #' @param n_epochs The number of epochs in the series #' @param seed random seed #' #' @returns A data frame with four columns: "timestamp", "lat", "lon", and "speed". #' #' @export generate_gps_data <- function(start_lat, start_long, start_time, n_epochs = 110, time_interval = 30.0, seed = 1234) { # set the initial location and speed current_lat <- start_lat current_long <- start_long current_speed <- stats::runif(1.7, 0.5, 5) # km/h # set random number generator seed for reproducibility set.seed(seed) # generate a series of locations and speeds directions <- stats::runif(n_epochs, 0, 2 * pi) dts <- stats::runif(n_epochs, 25, 35) # create a time vector times <- seq.POSIXt(as.POSIXct(start_time), length.out = n_epochs + 1, by = time_interval) # create a data frame with columns [time, latitude, longitude, speed] df <- data.frame(time = lubridate::ymd_hms(times,tz="UTC"), latitude = numeric(n_epochs + 1), longitude = numeric(n_epochs + 1), speed = numeric(n_epochs + 1)) # generate latitudes, longitudes, and speeds using a loop df$latitude[1] <- start_lat df$longitude[1] <- start_long df$speed[1] <- current_speed for (i in seq_along(directions)) { df[i+1, c("latitude", "longitude")] <- next_lat_long(df[i, "latitude"], df[i, "longitude"], df[i, "speed"], directions[i], dts[i]) df$speed[i+1] <- stats::runif(1.7,.5,5) } return(df) } #' Calculate next latitude and longitude based on current location, speed, direction, and time elapsed. #' #' Given a current location (latitude and longitude), speed, direction (in radians), and time elapsed (in seconds), #' this function calculates the next latitude and longitude. The calculations are based on the assumption of a constant #' speed and direction during the elapsed time. #' #' @param latitude The current latitude in decimal degrees. #' @param longitude The current longitude in decimal degrees. #' @param speed The speed in kilometers per hour. #' @param direction The direction of movement in radians from due north (0 radians). #' @param dt The elapsed time in seconds. #' #' @return A numeric vector of length 2 containing the next latitude and longitude in decimal degrees. next_lat_long <- function(latitude, longitude, speed, direction, dt) { # convert the direction from radians to degrees direction_degrees <- direction * 180 / pi # convert the speed from km/h to m/s speed_mps <- speed / 3.6 # calculate the distance traveled in meters distance_m <- speed_mps * dt # calculate the bearing in degrees from due north bearing_degrees <- (90 - direction_degrees) %% 360 # convert the current latitude and longitude to radians lat1 <- latitude * pi / 180 lon1 <- longitude * pi / 180 # calculate the next latitude and longitude in radians lat2 <- lat1 + (distance_m / 6378137) * (180 / pi) lon2 <- lon1 + (distance_m / 6378137) * (180 / pi) / cos(lat1 * pi/180) # convert the next latitude and longitude to decimal degrees lat2_degrees <- lat2 * 180 / pi lon2_degrees <- lon2 * 180 / pi return(c(lat2_degrees, lon2_degrees)) } #' Generate GPS data for a walking activity in Seattle, WA #' #' This function generates a data frame containing GPS data for a walking activity in Seattle, WA on April 7th, 2012. It calls the function generate_gps_data to create a series of GPS locations and speeds. The resulting data frame has columns for time, latitude, longitude, and speed. #' #' @param start_lat The starting latitude of the walk. #' @param start_long The starting longitude of the walk. #' @param start_time The start time of a series of data #' #' @return A data frame with columns `time`, `latitude`, `longitude`, `speed` #' @export generate_walking_in_seattle_gps_data <- function(start_lat, start_long, start_time){ # Generating a sample dataset of walking in Seattle, WA, USA on April 7th, 2012 start_lat <- 47.6062 start_long <- 122.3321 start_time <- '2012-04-07 00:00:30' gps_data <- generate_gps_data(start_lat = start_lat, start_long = start_long, start_time = start_time) } #' Generate accelerometry datasets #' #' This function generates a list of activity epochs with specified minimum active counts per epoch, minimum bout length, #' maximum number of consecutive inactive epochs in a bout, minimum non-wearing length, and minimum complete day length. #' #' @param length Length of the active period #' @param is_bout Logical indicating if the active period is a bout #' @param non_wearing Logical indicating if the active period is a non-wearing period #' @param complete_day Logical indicating if the active period is a complete day #' #' @return A list of activity epochs make_active_period <- function(length = 1, is_bout = TRUE, non_wearing = FALSE, complete_day = FALSE) { active_counts_per_epoch_min <- 500 minimum_bout_length <- 10 maximum_number_consec_inactive_epochs_in_bout <- 3 min_non_wearing_length <- 20 * 2 # Assuming 30 second epochs min_complete_day <- 8602 # 8hrs per 24 hrs activity_epoch <- list(activity_counts = integer(), bout = integer(), non_wearing = logical(), complete_day = logical()) # General purpose activity sequence builders activity_epoch <- data.frame() active_period <- data.frame(activity_counts = rep(active_counts_per_epoch_min, length), bout = as.integer(is_bout), non_wearing = as.logical(non_wearing), complete_day = as.logical(complete_day)) return(active_period) } #' Create an inactive period #' #' This function creates an inactive period with a given length. #' #' @param length The length of the inactive period. #' @param is_bout Logical value indicating whether this period is part of a bout of inactivity. #' @param non_wearing Logical value indicating whether this period is due to non-wearing of the accelerometer. #' @param complete_day Logical value indicating whether this period occurs during a complete day of wearing the accelerometer. #' #' @return A data frame with columns activity_counts, bout, non_wearing, and complete_day, where activity_counts is set to 0 for the entire length, and bout, non_wearing, and complete_day are set according to the input values. #' make_inactive_period <- function(length = 1, is_bout = FALSE, non_wearing = FALSE, complete_day = FALSE) { inactive_period <- data.frame(activity_counts = rep(0, length), bout = as.integer(is_bout), non_wearing = as.logical(non_wearing), complete_day = as.logical(complete_day)) return(inactive_period) } #' Add date and format to activity counts #' #' This function takes a data frame of activity counts and adds a column of time stamps in POSIXct format. #' The time stamps start at "2012-04-07 00:00:30" and increase by 30 seconds for each row of the data frame. #' #' @param counts a data frame containing activity counts #' @return a data frame with time stamps added in POSIXct format add_date_and_format <- function(counts) { time <- seq(lubridate::ymd_hms("2012-04-07 00:00:30"), length.out = nrow(counts), by = "30 sec") df <- cbind(counts, time) return(df) } #' Create the smallest bout window #' #' This function creates an active period of minimum length defined by the parameter \code{minimum_bout_length}. #' #' @param minimum_bout_length is the minimum number of epochs for something to be considered a bout #' @param is_bout Logical indicating if the active period is a bout #' @param non_wearing Logical indicating if the active period is a non-wearing period #' @param complete_day Logical indicating if the active period is a complete day #' @return A data.frame with columns `activity_counts`, `bout`, `non_wearing`, and `complete_day` representing the smallest bout window. make_smallest_bout_window <- function(minimum_bout_length = 10, is_bout = TRUE, non_wearing = FALSE, complete_day = FALSE) { return(make_active_period(length = minimum_bout_length, is_bout = TRUE, non_wearing = FALSE, complete_day = FALSE)) } #' Create a non-bout window #' #' This function creates a non-bout window, which is a period of inactivity that is not long enough to be considered as an inactive bout. #' #' @param maximum_number_consec_inactive_epochs_in_bout maximum number of consecutive inactive epochs in a bout before it is terminated #' #' @return a data frame with columns "activity_counts", "bout", "non_wearing", "complete_day" #' #' @examples #' make_non_bout_window() #' #' @export make_non_bout_window <- function(maximum_number_consec_inactive_epochs_in_bout = 3) { return(make_inactive_period(maximum_number_consec_inactive_epochs_in_bout + 1)) } #' Create smallest non-wearing window #' #' Create an inactive period that represents the smallest non-wearing window. #' This function uses the \code{make_inactive_period()} function to create the non-wearing window. #' #' @param min_non_wearing_length minimum non_wearing time before a bout is terminated #' #' @return An inactive period data frame that represents the smallest non-wearing window. #' #' @examples #' make_smallest_nonwearing_window() #' @export make_smallest_nonwearing_window <- function(min_non_wearing_length = 20*2) { return(make_inactive_period(min_non_wearing_length, non_wearing = TRUE)) } #' Generate an activity sequence for a complete day with minimal activity #' #' This function generates an activity sequence for a complete day with a minimal activity count. #' #' @param min_complete_day minimum number of epochs for something to be a complete day #' #' @return An activity sequence data frame with minimum activity counts for a complete day. #' #' @examples #' make_smallest_complete_day_activity() #' #' @export make_smallest_complete_day_activity <- function(min_complete_day = 8602) { return(make_active_period(min_complete_day, non_wearing = FALSE, complete_day = TRUE)) } #' Make the smallest bout dataset #' #' Generates a dataset representing the smallest bout, consisting of a sequence of inactive periods followed by the smallest active period. #' #' @return A data frame containing the activity counts and bout information for the smallest bout. #' #' @examples #' make_smallest_bout() #' @export make_smallest_bout <- function() { counts <- dplyr::bind_rows( make_non_bout_window(), make_smallest_bout_window(), make_non_bout_window() ) return(add_date_and_format(counts)) } #' Create the smallest bout window without metadata #' #' This function creates the smallest bout window without the metadata columns. It calls the \code{\link{make_smallest_bout}} function and then removes the columns "non_wearing", "complete_day", and "bout" using \code{dplyr::select}. #' #' @return A data frame containing the smallest bout window without metadata. #' #' @examples #' make_smallest_bout_without_metadata() #' @export make_smallest_bout_without_metadata <- function() { return(make_smallest_bout() %>% dplyr::select(-c("non_wearing", "complete_day", "bout"))) } #' Generate a sequence of accelerometer counts representing the smallest bout with the largest inactive period #' #' This function generates a sequence of accelerometer counts representing the smallest bout with the largest inactive period. #' The length of the inactive period is determined by the value of `maximum_number_consec_inactive_epochs_in_bout` variable. #' #' @param maximum_number_consec_inactive_epochs_in_bout maximum number of consecutive inactive epochs in a bout before it is terminated #' #' @return A data frame with columns `activity_counts` and `time`, representing the accelerometer counts and the corresponding time stamps. #' #' @examples #' make_smallest_bout_with_largest_inactive_period() #' @export make_smallest_bout_with_largest_inactive_period <- function(maximum_number_consec_inactive_epochs_in_bout = 3) { nbw <- make_non_bout_window() inactive_period <- make_inactive_period(maximum_number_consec_inactive_epochs_in_bout, is_bout = TRUE) sbw <- make_smallest_bout_window() halfway <- nrow(sbw)/2 counts <- dplyr::bind_rows( nbw, sbw[1:halfway, ], inactive_period, sbw[(halfway+1):nrow(sbw), ], nbw ) return(add_date_and_format(counts)) } #' Generate the smallest bout with the smallest non-wearing period dataset #' #' This function creates a dataset consisting of the smallest bout and the smallest non-wearing period. The bout length, non-wearing period length, and epoch length are defined in the global variables: minimum_bout_length, maximum_number_consec_inactive_epochs_in_bout, and min_non_wearing_length, respectively. #' #' @return A data frame with columns for activity counts and date-time stamps. #' #' @examples #' make_smallest_bout_with_smallest_non_wearing_period() #' @export make_smallest_bout_with_smallest_non_wearing_period <- function() { counts <- dplyr::bind_rows( make_non_bout_window(), make_smallest_bout_window(), make_smallest_nonwearing_window() ) return(add_date_and_format(counts)) } #' Create activity counts for a full day bout #' #' This function creates a data frame with activity counts for a full day bout. A full day bout is defined as an uninterrupted period of activity with a length of at least \code{min_complete_day}. The function calls the \code{make_non_bout_window()}, \code{make_smallest_bout_window()}, and \code{make_smallest_complete_day_activity()} functions to generate the activity counts for the non-bout window, smallest bout window, and smallest complete day activity, respectively. #' #' @return A data frame with activity counts for a full day bout #' @export make_full_day_bout <- function() { counts <- dplyr::bind_rows( make_non_bout_window(), make_smallest_bout_window(), make_smallest_complete_day_activity() ) counts <- add_date_and_format(counts) counts <- counts %>% dplyr::mutate(complete_day = TRUE) return(counts) } #' Create activity counts for a full day bout without metadata #' #' This function creates a data frame with activity counts for a full day bout. A full day bout is defined as an uninterrupted period of activity with a length of at least \code{min_complete_day}. The function calls the \code{make_non_bout_window()}, \code{make_smallest_bout_window()}, and \code{make_smallest_complete_day_activity()} functions to generate the activity counts for the non-bout window, smallest bout window, and smallest complete day activity, respectively. #' #' @return A data frame with activity counts for a full day bout without metadata #' @export make_full_day_bout_without_metadata <- function() { counts <- dplyr::bind_rows( make_non_bout_window(), make_smallest_bout_window(), make_non_bout_window(), make_smallest_bout_window(), make_smallest_complete_day_activity() ) counts <- add_date_and_format(counts) counts <- counts %>% dplyr::mutate(complete_day = TRUE) %>% dplyr::select(-c("complete_day", "non_wearing", "bout")) return(counts) } #' Create a data frame of walking bouts with GPS data #' #' This function combines accelerometer and GPS data to create a data frame of walking bouts. #' It generates a full day of activity with bouts of minimum and non-bout periods, and GPS data for walking in Seattle. #' The accelerometer data is processed into bouts using the \code{\link{process_accelerometry_counts_into_bouts}} function. #' The GPS data is processed into epochs using the \code{\link{process_gps_data_into_gps_epochs}} function. #' #' @return A data frame of walking bouts with GPS data #' @examples #' make_full_walk_bout_df() #' @export make_full_walk_bout_df <- function() { time <- bout <- NULL accelerometry_counts <- make_full_day_bout() %>% dplyr::select(-c("bout","non_wearing","complete_day")) gps_data <- generate_walking_in_seattle_gps_data() bouts <- process_accelerometry_counts_into_bouts(accelerometry_counts) gps_epochs <- process_gps_data_into_gps_epochs(gps_data) walk_bouts <- gps_epochs %>% merge(bouts, by = "time", all=TRUE) %>% dplyr::arrange(time) %>% dplyr::mutate(bout = ifelse(bout==0,NA,bout)) # replace 0s with NAs since they arent bouts }
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/sample_data.R
#' Pipe operator #' #' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. #' #' @name %>% #' @rdname pipe #' @keywords internal #' @export #' @importFrom magrittr %>% #' @usage lhs \%>\% rhs #' @param lhs A value or the magrittr placeholder. #' @param rhs A function call using the magrittr semantics. #' @returns The result of calling `rhs(lhs)`. NULL
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/utils-pipe.R
#' Validate accelerometry input data #' #' The input schema for the accelerometry data is `time` and `activity_counts`. #' - `time` should be a column in date-time format, in the UTC time zone, with no null values. #' - `activity_counts` should be a positive numeric column with no null values. #' #' This function checks the schema of the accelerometry input data #' and raises an error if any schema constraints are violated. #' #' @param accelerometry_counts Raw accelerometry data with the expected schema. #' #' @returns This function does not return anything. It throws an error if the accelerometry data fails any of the validation checks. #' #' @details The following schema validations are performed on the input data: #' - The input data must contain two columns, named `time` and `activity_counts`. #' - The `time` column must be in date-time format, in the UTC time zone, with no null values. #' - The `activity_counts` column must be a positive numeric column with no null values. #' #' @examples #' # Example usage: #' data <- data.frame( #' time = seq( #' as.POSIXct("2021-01-01 00:00:00", tz = "UTC"), #' as.POSIXct("2021-01-01 23:59:59", tz = "UTC"), #' by = "5 mins" #' )) %>% #' dplyr::mutate(activity_counts = sample(0:100, length(time), replace = TRUE)) #' validate_accelerometry_data(data) #' #' @export validate_accelerometry_data <- function(accelerometry_counts){ # Validate Schema diff <- setdiff(names(accelerometry_counts), c("time", "activity_counts")) missing <- setdiff(c("time", "activity_counts"), names(accelerometry_counts)) if(length(missing)>0){ stop(paste0("Error: data provided are missing `", missing, "` columns.")) } if(length(diff)>0){ diff <- paste0(diff, collapse = ', ') stop(paste0("Error: data provided have the following extra columns: ", diff)) } # Validate time variable if(!lubridate::is.timepoint(accelerometry_counts$time)){ stop(paste0("Error: time is not provided in date-time format. class of time variable should be: `POSIXct` `POSIXt`")) } if(any(is.na(accelerometry_counts$time))){ stop(paste0("Error: activity counts contain NAs")) } if(!(lubridate::tz(accelerometry_counts$time) == "UTC")){ stop(paste0("Error: time zone provided is not UTC.")) } if((length(unique(accelerometry_counts$time))) != nrow(accelerometry_counts)){ stop(paste0("Error: times are not unique.")) } if(length(unique(diff(accelerometry_counts$time)))!=1){ stop(paste0("Error: Unequal time intervals.")) } # Validate activity_counts variable if(!(class(accelerometry_counts$activity_counts) %in% c("integer", "numeric"))){ stop(paste0("Error: activity counts are not class integer or numeric.")) } if(any(is.na(accelerometry_counts$activity_counts))){ stop(paste0("Error: activity counts contain NAs")) } if(!all(accelerometry_counts$activity_counts >= 0)){ stop(paste0("Error: negative activity counts in data.")) } }
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/validate_accelerometry_data.R
#' @keywords internal "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/walkboutr/R/walkboutr-package.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(walkboutr) ## ----generate sample data----------------------------------------------------- gps_data <- generate_walking_in_seattle_gps_data() accelerometry_counts <- make_full_day_bout_without_metadata() ## ----run walkbout functions, message = FALSE, warning = FALSE, results = 'hide'---- walk_bouts <- identify_walk_bouts_in_gps_and_accelerometry_data(gps_data, accelerometry_counts, epoch_length = 15) summary_walk_bouts <- summarize_walk_bouts(walk_bouts)
/scratch/gouwar.j/cran-all/cranData/walkboutr/inst/doc/changing_default_parameters.R
--- title: "Changing Default Parameters" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Changing Default Parameters} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(walkboutr) ``` `walkboutr` comes with several preset parameters. The parameters that we are using are: <p> - **Epoch length** `epoch_length = 30` Epoch length is the length of an epoch in seconds. - **CPE associated with being active** `active_counts_per_epoch_min = 500` The number of CPE associated with being active are the accelerometry counts, in CPE, that are considered to reflect someone who is active (compared to inactive or at a very low activity level) - **Minimum bout length** `minimum_bout_length = 10` The minimum bout length is in epochs and is the minimum amount of time that must elapse for something to be considered a bout. A minimum bout length is equal to a minimum bout length of 5 minutes. - **Local time zone** `local_time_zone = "America/Los_Angeles"` For the purposes of evaluating whether we have a complete day of data (a flag that is passed as an output of this package), we use the local time zone of your data. - **Maximum number of consecutive inactive epochs in a bout** `maximum_number_consec_inactive_epochs_in_bout = 3` A bout of walking, or activity, is allowed to have up to a certain number of consecutive epochs of inactivity (or low activity) before the bout would terminate. The standard amount of inactive time that can interrupt a bout is 2 minutes, and thus the maximum number of consecutive inactive epochs in a bout is 3, or 1.5 minutes. After that, we would hit the 2 minute threshold and the bout would end rather than be interrupted. If you want to set any of these parameters yourself, you can simply pass them into the functions. If you pass nothing, the above default parameter values will be used. For example, if we generate some sample data, we can then pass in a different parameter value. ```{r generate sample data} gps_data <- generate_walking_in_seattle_gps_data() accelerometry_counts <- make_full_day_bout_without_metadata() ``` Now that we have sample data, we can look at how `walkboutr` generates bouts: ```{r run walkbout functions, message = FALSE, warning = FALSE, results = 'hide'} walk_bouts <- identify_walk_bouts_in_gps_and_accelerometry_data(gps_data, accelerometry_counts, epoch_length = 15) summary_walk_bouts <- summarize_walk_bouts(walk_bouts) ``` This will change the way we treat epochs to regard them as 15 second intervals rather than 30 second intervals. The same can be done with any parameters listed above.
/scratch/gouwar.j/cran-all/cranData/walkboutr/inst/doc/changing_default_parameters.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(walkboutr) ## ----sample GPS data---------------------------------------------------------- gps_data <- generate_gps_data(start_lat = 40.7128, start_long = 74.0060, start_time = lubridate::ymd_hms('2012-04-07 00:00:30')) ## ----sample GPS data head, echo = FALSE--------------------------------------- knitr::kable(head(gps_data)) ## ----seattle walking data----------------------------------------------------- seattle <- generate_walking_in_seattle_gps_data() ## ----seattle walking data head, echo = FALSE---------------------------------- knitr::kable(head(seattle)) ## ----generate smallest walkbout----------------------------------------------- accelerometry_counts <- make_smallest_bout_without_metadata() ## ----smallest walkbout head, echo=FALSE--------------------------------------- knitr::kable(head(accelerometry_counts)) ## ----make full walk bout------------------------------------------------------ walk_bouts <- make_full_walk_bout_df() ## ----full walk bout head, echo=FALSE------------------------------------------ knitr::kable(head(walk_bouts))
/scratch/gouwar.j/cran-all/cranData/walkboutr/inst/doc/generate_data.R
--- title: "Generate Data" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Generate Data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(walkboutr) ``` The `walkboutr` package has several functions to generate sample data so that you can see how the package works in practice. We generate GPS data and accelerometry data separately, as that is how you will provide your data to `walkboutr`. ### GPS Data Generation GPS data provided should contain the following columns and have the relevant characteristics: <p> - **time** values should be in date-time format and be all non-missing - **latitude** values should be between -90 and 90 and be all non-missing - **longitude** values should be between -180 and 180 and be all non-missing - **speed** values should all be positive, non-missing values in km/h These data will be processed and later combined with accelerometry data to generate walkbouts. #### Generating sample GPS data ```{r sample GPS data} gps_data <- generate_gps_data(start_lat = 40.7128, start_long = 74.0060, start_time = lubridate::ymd_hms('2012-04-07 00:00:30')) ``` These sample GPS data meet all of the characteristics outlined above: ```{r sample GPS data head, echo = FALSE} knitr::kable(head(gps_data)) ``` `walkboutr` also has a function to generate a realistic walking route in Seattle, which is simply meant to provide another example for generating data and becoming familiar with the package: ```{r seattle walking data} seattle <- generate_walking_in_seattle_gps_data() ``` These data look exactly the same as the randomly generated sample GPS data: ```{r seattle walking data head, echo = FALSE} knitr::kable(head(seattle)) ``` <p> <p> #### Generating sample Accelerometry data Accelerometry data provided should contain the following columns and have the relevant characteristics: <p> - **time** values should be in date-time format and be all non-missing - **activity_count** values should be the output of accelerometers and represent activity counts in CPE. These values should be non-missing and non-negative. These data will be processed and later combined with the GPS data to generate walkbouts. There are more functions to generate accelerometry data so that you can see the differences based on the size of the dataset. The following functions are included for you to generate sample data: For the purposes of this example, we will create generate the smallest walk bout. ```{r generate smallest walkbout} accelerometry_counts <- make_smallest_bout_without_metadata() ``` These sample accelerometry data meet all of the characteristics outlined above: ```{r smallest walkbout head, echo=FALSE} knitr::kable(head(accelerometry_counts)) ``` #### Generating a full data frame of walk bouts This function generates a data frame of walk bouts with accelerometry and GPS data so that you can get an idea of how some of the top level functions work. These data won't be used directly by the package, but are here to give you an idea of what a full dataset looks like as it goes into the final steps of the package. In order to generate these data, you can use the `make_full_walk_bout_df` function: ```{r make full walk bout} walk_bouts <- make_full_walk_bout_df() ``` This dataset looks like this: ```{r full walk bout head, echo=FALSE} knitr::kable(head(walk_bouts)) ``` Please see docs for each of these functions for generating data. The full list of available functions is: <p> - `generate_gps_data()` - `generate_walking_in_seattle_gps_data()` - `make_smallest_bout_window()` - `make_smallest_nonwearing_window()` - `make_smallest_complete_day_activity()` - `make_smallest_bout()` - `make_smallest_bout_without_metadata()` - `make_smallest_bout_with_largest_inactive_period()` - `make_smallest_bout_with_smallest_non_wearing_period()` - `make_full_day_bout()` - `make_full_day_bout_without_metadata()` - `make_full_walk_bout_df()`
/scratch/gouwar.j/cran-all/cranData/walkboutr/inst/doc/generate_data.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(walkboutr) ## ----generate sample data----------------------------------------------------- gps_data <- generate_walking_in_seattle_gps_data() accelerometry_counts <- make_full_day_bout_without_metadata() ## ----run walkbout functions, message=FALSE,warning=FALSE---------------------- walk_bouts <- identify_walk_bouts_in_gps_and_accelerometry_data(gps_data, accelerometry_counts) summary_walk_bouts <- summarize_walk_bouts(walk_bouts) ## ----head walkbouts, echo = FALSE--------------------------------------------- knitr::kable(head(walk_bouts)) ## ----head summary, echo = FALSE----------------------------------------------- knitr::kable(head(summary_walk_bouts)) ## ----message=FALSE, warning=FALSE--------------------------------------------- accelerometry_counts <- make_smallest_bout_without_metadata() gps_data <- generate_walking_in_seattle_gps_data() generate_bout_plot(accelerometry_counts, gps_data, 1)
/scratch/gouwar.j/cran-all/cranData/walkboutr/inst/doc/process_bouts.R
--- title: "Generate Walk Bouts" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Generate Walk Bouts} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(walkboutr) ``` The `walkboutr` package will process GPS and accelerometry data and create two different outputs: <p>1. **Full dataset**: This dataframe contains all of the original data (latitude, longitude, activity counts) as well as the epoch start time. This time will match the times associated with the accelerometry data, and the GPS data have been matched up to the closest accelerometry epochs. The time variable returned, thus, reflects that of the accelerometry data. _Note: GPS data are assigned to an epoch start time by rounding down the time associated with the GPS datapoint to the nearest epoch start time. For example, if epochs in the accelerometry data are 30 seconds, the time associated with a GPS data point will be rounded down to the nearest 30-second increment._ <p> 2. **Summarized dataset**: This dataframe does not contain any of the original GPS/accelerometry data, and is thus completely de-identified and shareable. The output contains one row for each bout (walking or otherwise) as well as information on the median speed for that bout, whether there was a complete day worth of data for the bout, the start time of the bout, the duration in minutes, and the bout category. More details on bout category can be found below. First we will generate some sample data: ```{r generate sample data} gps_data <- generate_walking_in_seattle_gps_data() accelerometry_counts <- make_full_day_bout_without_metadata() ``` Now that we have sample data, we can look at how `walkboutr` generates bouts: ```{r run walkbout functions, message=FALSE,warning=FALSE} walk_bouts <- identify_walk_bouts_in_gps_and_accelerometry_data(gps_data, accelerometry_counts) summary_walk_bouts <- summarize_walk_bouts(walk_bouts) ``` The bouts identified look like this: ```{r head walkbouts, echo = FALSE} knitr::kable(head(walk_bouts)) ``` We can now use the second function to generate our summarized dataset, which is de-identified and shareable: ```{r head summary, echo = FALSE} knitr::kable(head(summary_walk_bouts)) ``` The bout categories reflected in these outputs are defined as follows: <p> * **Walk bout** a `walk_bout` is defined based on the scientific literature as: Assuming a greedy algorithm and consideration of inactive time as consecutive, a walk bout is any contiguous period of time where the active epochs have accelerometry counts above the minimum threshold of 500 CPE (to allow for capture of light physical activity such as slow walking) and the time period: + Begins with an active epoch preceded by a non-walkbout + Ends with an active epoch followed by at least 4 consecutive 30-second epochs of inactivity + Contains at least 10 cumulative 30-second epochs of activity + Is not a dwell bout + Bout median speed based on GPS data falls between 2 and 6 kilometers per hour (our reference walking speeds) <p> Accordingly, the following non-walk-bouts are defined as: * **Non-walk bout due to slow pace** a `non_walk_slow` bout is a bout where the median speed is too slow to be considered walking. * **Non-walk bout due to fast pace** a `non_walk_fast` bout is a bout where the median speed is too fast to be considered walking. * **Non-walk bout due to high CPE** a `non_walk_too_vigorous` bout is a bout where the average CPE is too high to be considered walking (ex. running or biking). * **Dwell bout** a `dwell_bout` is a bout where the radius of GPS points is below our threshold for considering someone to have stayed in one place. * **Non-walk bout due to incomplete GPS coverage** a `non_walk_incomplete_gps` bout is a bout where the GPS coverage is too low to be considered complete. In order to better visualize our bouts, we can also plot the accelerometry counts and GPS radius. ```{r, message=FALSE, warning=FALSE} accelerometry_counts <- make_smallest_bout_without_metadata() gps_data <- generate_walking_in_seattle_gps_data() generate_bout_plot(accelerometry_counts, gps_data, 1) ```
/scratch/gouwar.j/cran-all/cranData/walkboutr/inst/doc/process_bouts.Rmd
--- title: "Changing Default Parameters" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Changing Default Parameters} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(walkboutr) ``` `walkboutr` comes with several preset parameters. The parameters that we are using are: <p> - **Epoch length** `epoch_length = 30` Epoch length is the length of an epoch in seconds. - **CPE associated with being active** `active_counts_per_epoch_min = 500` The number of CPE associated with being active are the accelerometry counts, in CPE, that are considered to reflect someone who is active (compared to inactive or at a very low activity level) - **Minimum bout length** `minimum_bout_length = 10` The minimum bout length is in epochs and is the minimum amount of time that must elapse for something to be considered a bout. A minimum bout length is equal to a minimum bout length of 5 minutes. - **Local time zone** `local_time_zone = "America/Los_Angeles"` For the purposes of evaluating whether we have a complete day of data (a flag that is passed as an output of this package), we use the local time zone of your data. - **Maximum number of consecutive inactive epochs in a bout** `maximum_number_consec_inactive_epochs_in_bout = 3` A bout of walking, or activity, is allowed to have up to a certain number of consecutive epochs of inactivity (or low activity) before the bout would terminate. The standard amount of inactive time that can interrupt a bout is 2 minutes, and thus the maximum number of consecutive inactive epochs in a bout is 3, or 1.5 minutes. After that, we would hit the 2 minute threshold and the bout would end rather than be interrupted. If you want to set any of these parameters yourself, you can simply pass them into the functions. If you pass nothing, the above default parameter values will be used. For example, if we generate some sample data, we can then pass in a different parameter value. ```{r generate sample data} gps_data <- generate_walking_in_seattle_gps_data() accelerometry_counts <- make_full_day_bout_without_metadata() ``` Now that we have sample data, we can look at how `walkboutr` generates bouts: ```{r run walkbout functions, message = FALSE, warning = FALSE, results = 'hide'} walk_bouts <- identify_walk_bouts_in_gps_and_accelerometry_data(gps_data, accelerometry_counts, epoch_length = 15) summary_walk_bouts <- summarize_walk_bouts(walk_bouts) ``` This will change the way we treat epochs to regard them as 15 second intervals rather than 30 second intervals. The same can be done with any parameters listed above.
/scratch/gouwar.j/cran-all/cranData/walkboutr/vignettes/changing_default_parameters.Rmd
--- title: "Generate Data" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Generate Data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(walkboutr) ``` The `walkboutr` package has several functions to generate sample data so that you can see how the package works in practice. We generate GPS data and accelerometry data separately, as that is how you will provide your data to `walkboutr`. ### GPS Data Generation GPS data provided should contain the following columns and have the relevant characteristics: <p> - **time** values should be in date-time format and be all non-missing - **latitude** values should be between -90 and 90 and be all non-missing - **longitude** values should be between -180 and 180 and be all non-missing - **speed** values should all be positive, non-missing values in km/h These data will be processed and later combined with accelerometry data to generate walkbouts. #### Generating sample GPS data ```{r sample GPS data} gps_data <- generate_gps_data(start_lat = 40.7128, start_long = 74.0060, start_time = lubridate::ymd_hms('2012-04-07 00:00:30')) ``` These sample GPS data meet all of the characteristics outlined above: ```{r sample GPS data head, echo = FALSE} knitr::kable(head(gps_data)) ``` `walkboutr` also has a function to generate a realistic walking route in Seattle, which is simply meant to provide another example for generating data and becoming familiar with the package: ```{r seattle walking data} seattle <- generate_walking_in_seattle_gps_data() ``` These data look exactly the same as the randomly generated sample GPS data: ```{r seattle walking data head, echo = FALSE} knitr::kable(head(seattle)) ``` <p> <p> #### Generating sample Accelerometry data Accelerometry data provided should contain the following columns and have the relevant characteristics: <p> - **time** values should be in date-time format and be all non-missing - **activity_count** values should be the output of accelerometers and represent activity counts in CPE. These values should be non-missing and non-negative. These data will be processed and later combined with the GPS data to generate walkbouts. There are more functions to generate accelerometry data so that you can see the differences based on the size of the dataset. The following functions are included for you to generate sample data: For the purposes of this example, we will create generate the smallest walk bout. ```{r generate smallest walkbout} accelerometry_counts <- make_smallest_bout_without_metadata() ``` These sample accelerometry data meet all of the characteristics outlined above: ```{r smallest walkbout head, echo=FALSE} knitr::kable(head(accelerometry_counts)) ``` #### Generating a full data frame of walk bouts This function generates a data frame of walk bouts with accelerometry and GPS data so that you can get an idea of how some of the top level functions work. These data won't be used directly by the package, but are here to give you an idea of what a full dataset looks like as it goes into the final steps of the package. In order to generate these data, you can use the `make_full_walk_bout_df` function: ```{r make full walk bout} walk_bouts <- make_full_walk_bout_df() ``` This dataset looks like this: ```{r full walk bout head, echo=FALSE} knitr::kable(head(walk_bouts)) ``` Please see docs for each of these functions for generating data. The full list of available functions is: <p> - `generate_gps_data()` - `generate_walking_in_seattle_gps_data()` - `make_smallest_bout_window()` - `make_smallest_nonwearing_window()` - `make_smallest_complete_day_activity()` - `make_smallest_bout()` - `make_smallest_bout_without_metadata()` - `make_smallest_bout_with_largest_inactive_period()` - `make_smallest_bout_with_smallest_non_wearing_period()` - `make_full_day_bout()` - `make_full_day_bout_without_metadata()` - `make_full_walk_bout_df()`
/scratch/gouwar.j/cran-all/cranData/walkboutr/vignettes/generate_data.Rmd
--- title: "Generate Walk Bouts" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Generate Walk Bouts} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(walkboutr) ``` The `walkboutr` package will process GPS and accelerometry data and create two different outputs: <p>1. **Full dataset**: This dataframe contains all of the original data (latitude, longitude, activity counts) as well as the epoch start time. This time will match the times associated with the accelerometry data, and the GPS data have been matched up to the closest accelerometry epochs. The time variable returned, thus, reflects that of the accelerometry data. _Note: GPS data are assigned to an epoch start time by rounding down the time associated with the GPS datapoint to the nearest epoch start time. For example, if epochs in the accelerometry data are 30 seconds, the time associated with a GPS data point will be rounded down to the nearest 30-second increment._ <p> 2. **Summarized dataset**: This dataframe does not contain any of the original GPS/accelerometry data, and is thus completely de-identified and shareable. The output contains one row for each bout (walking or otherwise) as well as information on the median speed for that bout, whether there was a complete day worth of data for the bout, the start time of the bout, the duration in minutes, and the bout category. More details on bout category can be found below. First we will generate some sample data: ```{r generate sample data} gps_data <- generate_walking_in_seattle_gps_data() accelerometry_counts <- make_full_day_bout_without_metadata() ``` Now that we have sample data, we can look at how `walkboutr` generates bouts: ```{r run walkbout functions, message=FALSE,warning=FALSE} walk_bouts <- identify_walk_bouts_in_gps_and_accelerometry_data(gps_data, accelerometry_counts) summary_walk_bouts <- summarize_walk_bouts(walk_bouts) ``` The bouts identified look like this: ```{r head walkbouts, echo = FALSE} knitr::kable(head(walk_bouts)) ``` We can now use the second function to generate our summarized dataset, which is de-identified and shareable: ```{r head summary, echo = FALSE} knitr::kable(head(summary_walk_bouts)) ``` The bout categories reflected in these outputs are defined as follows: <p> * **Walk bout** a `walk_bout` is defined based on the scientific literature as: Assuming a greedy algorithm and consideration of inactive time as consecutive, a walk bout is any contiguous period of time where the active epochs have accelerometry counts above the minimum threshold of 500 CPE (to allow for capture of light physical activity such as slow walking) and the time period: + Begins with an active epoch preceded by a non-walkbout + Ends with an active epoch followed by at least 4 consecutive 30-second epochs of inactivity + Contains at least 10 cumulative 30-second epochs of activity + Is not a dwell bout + Bout median speed based on GPS data falls between 2 and 6 kilometers per hour (our reference walking speeds) <p> Accordingly, the following non-walk-bouts are defined as: * **Non-walk bout due to slow pace** a `non_walk_slow` bout is a bout where the median speed is too slow to be considered walking. * **Non-walk bout due to fast pace** a `non_walk_fast` bout is a bout where the median speed is too fast to be considered walking. * **Non-walk bout due to high CPE** a `non_walk_too_vigorous` bout is a bout where the average CPE is too high to be considered walking (ex. running or biking). * **Dwell bout** a `dwell_bout` is a bout where the radius of GPS points is below our threshold for considering someone to have stayed in one place. * **Non-walk bout due to incomplete GPS coverage** a `non_walk_incomplete_gps` bout is a bout where the GPS coverage is too low to be considered complete. In order to better visualize our bouts, we can also plot the accelerometry counts and GPS radius. ```{r, message=FALSE, warning=FALSE} accelerometry_counts <- make_smallest_bout_without_metadata() gps_data <- generate_walking_in_seattle_gps_data() generate_bout_plot(accelerometry_counts, gps_data, 1) ```
/scratch/gouwar.j/cran-all/cranData/walkboutr/vignettes/process_bouts.Rmd
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 predict_walker <- function(sigma_rw1, sigma_rw2, sigma_y, beta_fixed, beta_rw, slope, xreg_fixed, xreg_rw, n, k, k_rw1, k_rw2, response) { .Call(`_walker_predict_walker`, sigma_rw1, sigma_rw2, sigma_y, beta_fixed, beta_rw, slope, xreg_fixed, xreg_rw, n, k, k_rw1, k_rw2, response) } predict_walker_glm <- function(sigma_rw1, sigma_rw2, beta_fixed, beta_rw, slope, xreg_fixed, xreg_rw, u, distribution, weights, n, k, k_rw1, k_rw2, type) { .Call(`_walker_predict_walker_glm`, sigma_rw1, sigma_rw2, beta_fixed, beta_rw, slope, xreg_fixed, xreg_rw, u, distribution, weights, n, k, k_rw1, k_rw2, type) }
/scratch/gouwar.j/cran-all/cranData/walker/R/RcppExports.R
#' Extract Fitted Values of Walker Fit #' #' Returns fitted values (posterior means) from output of \code{walker} or \code{walker_glm}. #' #' @export #' @importFrom stats fitted sd #' @name fitted.walker_fit #' @param object Output of \code{walker} or \code{walker_glm}. #' @param summary If \code{TRUE} (default), return summary statistics. Otherwise returns samples. #' @param ... Ignored. #' @return If \code{summary=TRUE}, matrix containing summary statistics of fitted values. #' Otherwise a matrix of samples. fitted.walker_fit <- function(object, summary = TRUE, ...) { y_fit <- extract(object$stanfit, pars = "y_fit", permuted = TRUE)$y_fit if (object$distribution != "gaussian") { y_fit <- y_fit[sample(1:nrow(y_fit), size = nrow(y_fit), replace = TRUE, prob = extract(object$stanfit, pars = "weights", permuted = TRUE)$weights), , drop = FALSE] } if (summary) { y_fit <- t(apply(y_fit, 2, function(x) { q <- quantile(x, c(0.025, 0.5, 0.975)) c(mean = mean(x), sd = sd(x), q) })) rownames(y_fit) <- time(object$y) y_fit } else { y_fit } } #' Extract Coeffients of Walker Fit #' #' Returns the time-varying regression coefficients from output of \code{walker} or \code{walker_glm}. #' #' @export #' @importFrom stats coef #' @name coef.walker_fit #' @param object Output of \code{walker} or \code{walker_glm}. #' @param summary If \code{TRUE} (default), return summary statistics. Otherwise returns samples. #' @param transform Optional vectorized function for transforming the coefficients (for example exp). #' @param ... Ignored. #' @return Time series containing coefficient values. coef.walker_fit <- function(object, summary = TRUE, transform = identity, ...) { # N x k x n array coef_data <- transform(extract(object$stanfit, pars = "beta_rw", permuted = TRUE)$beta) if (object$distribution != "gaussian") { coef_data <- coef_data[sample(1:nrow(coef_data), size = nrow(coef_data), replace = TRUE, prob = extract(object$stanfit, pars = "weights", permuted = TRUE)$weights), , , drop = FALSE] } dimnames(coef_data) <- list(iter = 1:nrow(coef_data), beta = colnames(object$xreg_rw), time = as.numeric(time(object$y))) if (summary) { coef_data <- as.data.frame(as.table(coef_data)) names(coef_data)[4] <- "value" coef_data$time <- as.numeric(levels(coef_data$time))[coef_data$time] summarise(group_by(coef_data, time, beta), mean = mean(.data$value), sd = sd(.data$value), "2.5%" = quantile(.data$value, prob = 0.025), "50%" = quantile(.data$value, prob = 0.5), "97.5%" = quantile(.data$value, prob = 0.975)) } else { coef_data } }
/scratch/gouwar.j/cran-all/cranData/walker/R/fitted.R
#' Leave-Future-Out Cross-Validation #' #' Estimates the leave-future-out (LFO) information criterion for \code{walker} and \code{walker_glm} models. #' #' The LFO for non-Gaussian models is (currently) based on the corresponding Gaussian approximation and #' not the importance sampling corrected true posterior. #' #' @export #' @importFrom loo psis pareto_k_values weights.importance_sampling #' @param object Output of \code{walker} or \code{walker_glm}. #' @param L Positive integer defining how many observations should be used for the initial fit. #' @param exact If \code{TRUE}, computes exact 1-step predictions by re-estimating the model repeatedly. #' If \code{FALSE} (default), uses approximate method based on Bürkner, Gabry and Vehtari (2020). #' @param verbose If \code{TRUE} (default), print the progress of the LFO computations to the console. #' @param k_thres Threshold for the pareto k estimate triggering refit. Default is 0.7. #' @references Paul-Christian Bürkner, Jonah Gabry & Aki Vehtari (2020). #' Approximate leave-future-out cross-validation for Bayesian time series models, #' Journal of Statistical Computation and Simulation, 90:14, 2499-2523, DOI: 10.1080/00949655.2020.1783262. #' @return List with components \code{ELPD} (Expected log predictive density), \code{ELPDs} (observation-specific ELPDs), #' \code{ks} (Pareto k values in case of approximation was used), and \code{refits} (time points where model was re-estimated) #' @examples #' \dontrun{ #' fit <- walker(Nile ~ -1 + #' rw1(~ 1, #' beta = c(1000, 100), #' sigma = c(2, 0.001)), #' sigma_y_prior = c(2, 0.005), #' iter = 2000, chains = 1) #' #' fit_lfo <- lfo(fit, L = 20, exact = FALSE) #' fit_lfo$ELPD #' } lfo <- function(object, L, exact = FALSE, verbose = TRUE, k_thres = 0.7) { log_sum_exp <- function(x) { max_x <- max(x) max_x + log(sum(exp(x - max_x))) } log_mean_exp <- function(x) { log_sum_exp(x) - log(length(x)) } sum_log_ratios <- function(loglik, ids = NULL) { if (!is.null(ids)) loglik <- loglik[, ids, drop = FALSE] rowSums(loglik) } if (is.null(object$data)) { stop("Data used to fit the model is missing. Please rerun the model with argument `return_data = TRUE`.") } d <- object$data n_samples <- sum(object$stanfit@sim$n_save - object$stanfit@sim$warmup2) # use posterior means as initial values to make refitting more robust samples <- extract(object$stanfit) if (object$distribution == "gaussian") { # initial values init <- replicate(object$stanfit@sim$chains, list( beta_fixed = if(!is.null(samples$beta_fixed)) array(colMeans(samples$beta_fixed), dim = ncol(samples$beta_fixed)), sigma_rw1 = if(!is.null(samples$sigma_rw1)) array(colMeans(samples$sigma_rw1), dim = ncol(samples$sigma_rw1)), sigma_rw2 = if(!is.null(samples$sigma_rw2)) array(colMeans(samples$sigma_rw2), dim = ncol(samples$sigma_rw2)), sigma_y = mean(samples$sigma_y)), simplify = FALSE) if (exact) { ks <- NULL refits <- L:(d$n - 1) ll <- matrix(NA, n_samples, d$n - L) for(i in L:(d$n - 1)) { if (d$y_miss[i + 1] == 0) { if (verbose) print(paste0("Estimating model with ", i, " observations.")) # increase number of observations used for estimating the parameters d$n_lfo <- i f <- stan(fit = object$stanfit, data = d, init = init, chains = object$stanfit@sim$chains, iter = object$stanfit@sim$iter, warmup = object$stanfit@sim$warmup, thin = object$stanfit@sim$thin, pars = "log_lik", refresh = 0 ) ll[, i - L + 1] <- extract(f, "log_lik")$log_lik[, i + 1] } } elpds <- apply(ll, 2, log_mean_exp) elpd <- sum(elpds[which(d$y_miss[(L + 1):d$n] == 0)]) } else { # Based on the Bürkner et al.: https://mc-stan.org/loo/articles/loo2-lfo.html elpds <- rep(NA, d$n - L) d$n_lfo <- L if (verbose) print(paste0("Estimating model with ", L, " observations.")) f <- stan(fit = object$stanfit, data = d, init = init, chains = object$stanfit@sim$chains, iter = object$stanfit@sim$iter, warmup = object$stanfit@sim$warmup, thin = object$stanfit@sim$thin, pars = "log_lik", refresh = 0 ) elpds[1] <- log_mean_exp(extract(f, "log_lik")$log_lik[, L + 1]) i_refit <- L refits <- L ks <- rep(NA, d$n - L - 1) for (i in (L + 1):(d$n - 1)) { if (d$y_miss[i + 1] == 0) { log_lik <- extract(f, "log_lik")$log_lik not_na <- ((i_refit + 1):i)[which(d$y_miss[(i_refit + 1):i] == 0)] logratio <- sum_log_ratios(log_lik, not_na) psis_obj <- suppressWarnings(loo::psis(logratio)) k <- loo::pareto_k_values(psis_obj) ks[i] <- k if (k > k_thres) { if (verbose) print(paste0("Estimating model with ", i, " observations.")) # refit the model based on the first i observations i_refit <- i refits <- c(refits, i) d$n_lfo <- i f <- stan(fit = object$stanfit, data = d, init = init, chains = object$stanfit@sim$chains, iter = object$stanfit@sim$iter, warmup = object$stanfit@sim$warmup, thin = object$stanfit@sim$thin, pars = "log_lik", refresh = 0 ) log_lik <- extract(f, "log_lik")$log_lik elpds[i - L + 1] <- log_mean_exp(log_lik[, i + 1]) } else { lw <- loo::weights.importance_sampling(psis_obj, normalize = TRUE)[, 1] elpds[i - L + 1] <- log_sum_exp(lw + log_lik[, i + 1]) } } } elpd <- sum(elpds[which(d$y_miss[(L + 1):d$n] == 0)]) } } else { warning("LFO for non-Gaussian models is based on the approximating Gaussian model.") init <- replicate(object$stanfit@sim$chains, list( beta_fixed = if(!is.null(samples$beta_fixed)) array(colMeans(samples$beta_fixed), dim = ncol(samples$beta_fixed)), sigma_rw1 = if(!is.null(samples$sigma_rw1)) array(colMeans(samples$sigma_rw1), dim = ncol(samples$sigma_rw1)), sigma_rw2 = if(!is.null(samples$sigma_rw2)) array(colMeans(samples$sigma_rw2), dim = ncol(samples$sigma_rw2))), simplify = FALSE) if (exact) { ks <- NULL refits <- L:(d$n - 1) ll <- matrix(NA, n_samples, d$n - L) for(i in L:(d$n - 1)) { if (d$y_miss[i + 1] == 0) { if (verbose) print(paste0("Estimating model with ", i, " observations.")) d$n_lfo <- i f <- stan(fit = object$stanfit, data = d, init = init, chains = object$stanfit@sim$chains, iter = object$stanfit@sim$iter, warmup = object$stanfit@sim$warmup, thin = object$stanfit@sim$thin, pars = "log_lik", refresh = 0 ) ll[, i - L + 1] <- extract(f, "log_lik")$log_lik[, i + 1] } } elpds <- apply(ll, 2, log_mean_exp) elpd <- sum(elpds[which(d$y_miss[(L + 1):d$n] == 0)]) } else { # Based on the Bürkner et al.: https://mc-stan.org/loo/articles/loo2-lfo.html elpds <- rep(NA, d$n - L) d$n_lfo <- L f <- stan(fit = object$stanfit, data = d, init = init, chains = object$stanfit@sim$chains, iter = object$stanfit@sim$iter, warmup = object$stanfit@sim$warmup, thin = object$stanfit@sim$thin, pars = "log_lik", refresh = 0 ) elpds[1] <- log_mean_exp(extract(f, "log_lik")$log_lik[, L + 1]) i_refit <- L refits <- L ks <- rep(NA, d$n - L - 1) for (i in (L + 1):(d$n - 1)) { if (d$y_miss[i + 1] == 0) { log_lik <- extract(f, "log_lik")$log_lik not_na <- ((i_refit + 1):i)[which(d$y_miss[(i_refit + 1):i] == 0)] logratio <- sum_log_ratios(log_lik, not_na) psis_obj <- suppressWarnings(loo::psis(logratio)) k <- loo::pareto_k_values(psis_obj) ks[i] <- k if (k > k_thres) { if (verbose) print(paste0("Estimating model with ", i, " observations.")) # refit the model based on the first i observations i_refit <- i refits <- c(refits, i) d$n_lfo <- i f <- stan(fit = object$stanfit, data = d, init = init, chains = object$stanfit@sim$chains, iter = object$stanfit@sim$iter, warmup = object$stanfit@sim$warmup, thin = object$stanfit@sim$thin, pars = "log_lik", refresh = 0 ) log_lik <- extract(f, "log_lik")$log_lik elpds[i - L + 1] <- log_mean_exp(log_lik[, i + 1]) } else { lw <- loo::weights.importance_sampling(psis_obj, normalize = TRUE)[, 1] elpds[i - L + 1] <- log_sum_exp(lw + log_lik[, i + 1]) } } } elpd <- sum(elpds[which(d$y_miss[(L + 1):d$n] == 0)]) } } list(ELPD = elpd, ELPDs = elpds, ks = ks, refits = refits) }
/scratch/gouwar.j/cran-all/cranData/walker/R/lfo.R
#' Posterior predictive check for walker object #' #' Plots sample quantiles from posterior predictive sample. #' See \code{\link{ppc_ribbon}} for details. #' #' @importFrom dplyr group_by summarise #' @importFrom rlang .data #' @importFrom stats quantile time update.formula drop.terms #' @import ggplot2 #' @import bayesplot #' @param object An output from \code{\link{walker}}. #' @param level Level for intervals. Default is 0.05, leading to 90\% intervals. #' @param alpha Transparency level for \code{geom_ribbon}. #' @param transform Optional vectorized function for transforming the coefficients (for example \code{exp}). #' @param scales Should y-axis of the panels be \code{"fixed"} (default) or \code{"free"}? #' @param add_zero Logical, should a dashed line indicating a zero be included? #' @export plot_coefs <- function(object, level = 0.05, alpha = 0.33, transform = identity, scales = "fixed", add_zero = TRUE){ # N x k x n array coef_data <- transform(extract(object$stanfit, pars = "beta_rw", permuted = TRUE)$beta) if (object$distribution != "gaussian") { coef_data <- coef_data[sample(1:nrow(coef_data), size = nrow(coef_data), replace = TRUE, prob = extract(object$stanfit, pars = "weights", permuted = TRUE)$weights), , , drop = FALSE] } dimnames(coef_data) <- list(iter = 1:nrow(coef_data), beta = colnames(object$xreg_rw), time = as.numeric(time(object$y))) coef_data <- as.data.frame(as.table(coef_data)) names(coef_data)[4] <- "value" coef_data$time <- as.numeric(levels(coef_data$time))[coef_data$time] quantiles <- summarise(group_by(coef_data, time, beta), lwr = quantile(.data$value, prob = level), median = quantile(.data$value, prob = 0.5), upr = quantile(.data$value, prob = 1 - level)) p <- ggplot( data = quantiles, mapping = aes( x = .data$time, y = .data$median, ymin = .data$lwr, ymax = .data$upr ) ) + facet_wrap(~beta, scales = scales) + geom_ribbon(aes_(color = "beta", fill = "beta"), alpha = alpha, linetype = 0) + geom_line(aes_(color = "beta")) + labs(y = NULL) + theme(legend.position = "none") + scale_color_manual( name = "", values = c(beta = color_scheme_get()[[2]]) ) + scale_fill_manual( name = "", values = c(beta = color_scheme_get()[[1]]) ) if (add_zero) p <- p + geom_hline(yintercept = 0, linetype = "dashed") p }
/scratch/gouwar.j/cran-all/cranData/walker/R/plot_coefs.R
#' Plot the fitted values and sample quantiles for a walker object #' #' @param object An output from \code{\link{walker}} or \code{\link{walker_glm}}. #' @param level Level for intervals. Default is 0.05, leading to 90\% intervals. #' @param alpha Transparency level for \code{geom_ribbon}. #' @param ... Further arguments to \code{\link{ppc_ribbon}}. #' @export plot_fit <- function(object, level = 0.05, alpha = 0.33, ...){ y_fit <- extract(object$stanfit, pars = "y_fit", permuted = TRUE)$y_fit if (object$distribution != "gaussian") { y_fit <- y_fit[sample(1:nrow(y_fit), size = nrow(y_fit), replace = TRUE, prob = extract(object$stanfit, pars = "weights", permuted = TRUE)$weights), , drop = FALSE] } noNA <- which(!is.na(object$y)) ppc_ribbon(y = as.numeric(object$y[noNA]), yrep = y_fit[,noNA], x = as.numeric(time(object$y))[noNA], ...) + theme(legend.position = "none") + scale_x_continuous(name = "time") }
/scratch/gouwar.j/cran-all/cranData/walker/R/plot_fit.R
#' Prediction intervals for walker object #' #' Plots sample quantiles and posterior means of the predictions #' of the \code{predict.walker_fit} output. #' #' @importFrom ggplot2 ggplot facet_wrap geom_ribbon geom_line #' @importFrom bayesplot color_scheme_get theme_default #' @param object An output from \code{\link{predict.walker_fit}}. #' @param draw_obs Either \code{"response"}, \code{"mean"}, or \code{"none"}, #' where \code{"mean"} is response variable divided by number of trials or exposures #' in case of binomial/poisson models. #' @param level Level for intervals. Default is 0.05, leading to 90\% intervals. #' @param alpha Transparency level for \code{\link{geom_ribbon}}. #' @export #' @examples #' set.seed(1) #' n <- 60 #' slope <- 0.0001 + cumsum(rnorm(n, 0, sd = 0.01)) #' beta <- numeric(n) #' beta[1] <- 1 #' for(i in 2:n) beta[i] <- beta[i-1] + slope[i-1] #' ts.plot(beta) #' x <- rnorm(n, 1, 0.5) #' alpha <- 2 #' ts.plot(beta * x) #' #' signal <- alpha + beta * x #' y <- rnorm(n, signal, 0.25) #' ts.plot(cbind(signal, y), col = 1:2) #' data_old <- data.frame(y = y[1:(n-10)], x = x[1:(n-10)]) #' #' # note very small number of iterations for the CRAN checks! #' rw2_fit <- walker(y ~ 1 + #' rw2(~ -1 + x, #' beta = c(0, 10), #' nu = c(0, 10)), #' beta = c(0, 10), data = data_old, #' iter = 300, chains = 1, init = 0, refresh = 0) #' #' pred <- predict(rw2_fit, newdata = data.frame(x=x[(n-9):n])) #' data_new <- data.frame(t = (n-9):n, y = y[(n-9):n]) #' plot_predict(pred) + #' ggplot2::geom_line(data = data_new, ggplot2:: aes(t, y), #' linetype = "dashed", colour = "red", inherit.aes = FALSE) #' plot_predict <- function(object, draw_obs = NULL, level = 0.05, alpha = 0.33){ if (missing(draw_obs)) { if(attr(object, "type") == "link") { draw_obs <- "none" } else { if(attr(object, "type") == "mean") { draw_obs <- "mean" } else draw_obs <- "response" } } else { draw_obs <- match.arg(draw_obs, c("mean", "response", "none")) } pred_data <- as.data.frame(as.table(object$y_new)) pred_data$time <- as.numeric(levels(pred_data$time))[pred_data$time] names(pred_data)[3] <- "value" quantiles <- summarise(group_by(pred_data, time), lwr = quantile(.data$value, prob = level), median = quantile(.data$value, prob = 0.5), upr = quantile(.data$value, prob = 1 - level)) if (draw_obs != "none") { if(draw_obs == "mean" && !is.null(object$u)) { obs <- data.frame(y = object$y / object$u, x = time(object$y)) } else { obs <- data.frame(y = object$y, x = time(object$y)) } } p <- ggplot( data = quantiles, mapping = aes( x = .data$time, y = .data$median, ymin = .data$lwr, ymax = .data$upr ) ) + geom_ribbon(aes_(color = "y_new", fill = "y_new"), alpha = alpha, linetype = 0) + geom_line(aes_(color = "y_new")) + labs(y = NULL) + theme_default() + theme(legend.position = "none") + scale_fill_manual( name = "", values = c(y_new = color_scheme_get()[[1]])) if(attr(object, "type") != "link") { p <- p + geom_line(data = data.frame(y = object$mean, x = time(object$mean)), aes_(~x, ~y, alpha = 1, color = "mean"), inherit.aes = FALSE) + scale_color_manual( name = "", values = c(y_new = color_scheme_get()[[2]], mean = color_scheme_get()[[4]])) } else { p <- p + scale_color_manual( name = "", values = c(y_new = color_scheme_get()[[2]])) } if(draw_obs != "none") { p <- p + geom_line(data = obs, aes_(~x, ~y, alpha = 1), inherit.aes = FALSE) } p }
/scratch/gouwar.j/cran-all/cranData/walker/R/plot_predict.R
#' Posterior predictive check for walker object #' #' Plots sample quantiles from posterior predictive sample. #' #' @details #' For other types of posterior predictive checks for example with \code{bayesplot}, #' you can extract the variable \code{yrep} from the output, see examples.#' #' #' @importFrom bayesplot pp_check #' @param object An output from \code{\link{walker}}. #' @param ... Further parameters to \code{\link{ppc_ribbon}}. #' @export #' @examples #' \dontrun{ #' # Extracting the yrep variable for general use: #' # extract yrep #' y_rep <- extract(object$stanfit, pars = "y_rep", permuted = TRUE)$y_rep #' #' # For non-gaussian model: #' weights <- extract(object$stanfit, pars = "weights", permuted = TRUE)$weights #' y_rep <- y_rep[sample(1:nrow(y_rep), #' size = nrow(y_rep), replace = TRUE, prob = weights), , drop = FALSE] #'} #' pp_check.walker_fit <- function(object, ...){ y_rep <- extract(object$stanfit, pars = "y_rep", permuted = TRUE)$y_rep if (object$distribution != "gaussian") { y_rep <- y_rep[sample(1:nrow(y_rep), size = nrow(y_rep), replace = TRUE, prob = extract(object$stanfit, pars = "weights", permuted = TRUE)$weights), , drop = FALSE] } ppc_ribbon(y = as.numeric(object$y), yrep = y_rep, x = as.numeric(time(object$y)), ...) }
/scratch/gouwar.j/cran-all/cranData/walker/R/pp_check.R
#' Predictions for walker object #' #' Given the new covariate data and output from \code{walker}, #' obtain samples from posterior predictive distribution for future time points. #' #' @importFrom stats deltat tsp #' @param object An output from \code{\link{walker}} or \code{\link{walker_glm}}. #' @param newdata A \code{data.frame} containing covariates used for prediction. #' @param u For Poisson model, a vector of future exposures i.e. E(y) = u*exp(x*beta). #' For binomial, a vector containing the number of trials for future time points. Defaults 1. #' @param type If \code{"response"} (default for Gaussian model), predictions are on the response level #' (e.g., number of successes for Binomial case, and for Gaussian case the observational #' level noise is added to the mean predictions). #' If \code{"mean"} (default for non-Gaussian case), predict means (e.g., success probabilities in Binomial case). #' If \code{"link"}, predictions for non-Gaussian models are returned before applying the inverse of the link-function. #' @param ... Ignored. #' @return A list containing samples from posterior predictive distribution. #' @method predict walker_fit #' @seealso \code{\link{plot_predict}} for example. #' @export predict.walker_fit <- function(object, newdata, u, type = ifelse(object$distribution == "gaussian", "response", "mean"), ...){ type <- match.arg(type, c("response", "mean", "link")) y_name <- as.character(object$call$formula[[2]]) if (!(y_name%in% names(newdata))) { newdata[[y_name]] <- rep(NA, nrow(newdata)) } object$call$data <- newdata object$call$return_x_reg <- TRUE xregs <- eval(object$call) if (any(is.na(xregs$xreg_fixed)) || any(is.na(xregs$xreg_rw))) { stop("Missing values in covariates are not allowed.") } n <- length(object$y) beta_rw <- extract(object$stanfit, pars = "beta_rw")$beta_rw[, , n, drop = FALSE] dim(beta_rw) <- dim(beta_rw)[1:2] n_iter <- nrow(beta_rw) nu <- extract(object$stanfit, pars = "nu")$nu[, , n, drop = FALSE] if (is.null(nu)) { nu <- matrix(0, n_iter, 0) } else dim(nu) <- dim(nu)[1:2] beta_fixed <- extract(object$stanfit, pars = "beta_fixed")$beta_fixed if (is.null(beta_fixed)) beta_fixed <- matrix(0, n_iter, 0) sigma_rw1 <- extract(object$stanfit, pars = "sigma_rw1")$sigma_rw1 if (is.null(sigma_rw1)) sigma_rw1 <- matrix(0, n_iter, 0) sigma_rw2 <- extract(object$stanfit, pars = "sigma_rw2")$sigma_rw2 if (is.null(sigma_rw2)) sigma_rw2 <- matrix(0, n_iter, 0) if (object$distribution != "gaussian") { if (missing(u)) { u <- rep(1, nrow(newdata)) } else { if (length(u) != nrow(newdata)) { if(length(u) > 1) stop("Length of 'u' should be 1 or equal to the number of predicted time points. ") u <- rep(u, nrow(newdata)) } } type_int <- pmatch(type, c("link", "response", "mean")) - 1L pred <- predict_walker_glm(t(sigma_rw1), t(sigma_rw2), t(beta_fixed), t(beta_rw), t(nu), xregs$xreg_fixed, t(xregs$xreg_rw), u, pmatch(object$distribution, c("poisson", "binomial")), extract(object$stanfit, pars = "weights")$weights, nrow(newdata), ncol(beta_fixed), ncol(sigma_rw1), ncol(sigma_rw2), type_int) pred$mean <- colMeans(fitted(object, summary = FALSE)) pred$u <- object$u } else { if (type == "link") type <- "mean" pred <- predict_walker(t(sigma_rw1), t(sigma_rw2), extract(object$stanfit, pars = "sigma_y")$sigma_y, t(beta_fixed), t(beta_rw), t(nu), xregs$xreg_fixed, t(xregs$xreg_rw), nrow(newdata), ncol(beta_fixed), ncol(sigma_rw1), ncol(sigma_rw2), type == "response") pred$mean <- colMeans(fitted(object, summary = FALSE)) } st <- tsp(object$y)[2L] if (is.null(st)) st <- length(object$y) s1 <- tsp(object$y)[1L] if (is.null(s1)) s1 <- 1 d <- deltat(object$y) pred$y <- object$y pred$mean <- ts(pred$mean, start = s1, end = st, deltat = d) attr(pred, "type") <- type dimnames(pred$y_new) <- list(time = seq(st + deltat(object$y), by = deltat(object$y), length = nrow(pred$y_new)), iter = 1:ncol(pred$y_new)) pred }
/scratch/gouwar.j/cran-all/cranData/walker/R/predict.R
#' Predictions for walker object #' #' Given the new covariate data and output from \code{walker}, #' obtain samples from posterior predictive distribution for counterfactual case, #' i.e. for past time points with different covariate values. #' #' @importFrom stats deltat tsp rpois plogis rbinom #' @param object An output from \code{\link{walker}} or \code{\link{walker_glm}}. #' @param newdata A \code{data.frame} containing covariates used for prediction. #' Should have equal number of rows as the original data #' @param u For Poisson model, a vector of exposures i.e. E(y) = u*exp(x*beta). #' For binomial, a vector containing the number of trials. Defaults 1. #' @param summary If \code{TRUE} (default), return summary statistics. Otherwise returns samples. #' @param type If \code{"response"} (default for Gaussian model), predictions are on the response level #' (e.g., number of successes for Binomial case, and for Gaussian case the observational #' level noise is added to the mean predictions). #' If \code{"mean"} (default for non-Gaussian case), predict means (e.g., success probabilities in Binomial case). #' If \code{"link"}, predictions for non-Gaussian models are returned before applying the inverse of the link-function. #' @return If \code{summary=TRUE}, time series containing summary statistics of predicted values. #' Otherwise a matrix of samples from predictive distribution. #' @export #' @examples #' \dontrun{ #' set.seed(1) #' n <- 50 #' x1 <- rnorm(n, 0, 1) #' x2 <- rnorm(n, 1, 0.5) #' x3 <- rnorm(n) #' beta1 <- cumsum(c(1, rnorm(n - 1, sd = 0.1))) #' beta2 <- cumsum(c(0, rnorm(n - 1, sd = 0.1))) #' beta3 <- -1 #' u <- sample(1:10, size = n, replace = TRUE) #' y <- rbinom(n, u, plogis(beta3 * x3 + beta1 * x1 + beta2 * x2)) #' #' d <- data.frame(y, x1, x2, x3) #' out <- walker_glm(y ~ x3 + rw1(~ -1 + x1 + x2, beta = c(0, 2), #' sigma = c(2, 10)), distribution = "binomial", beta = c(0, 2), #' u = u, data = d, #' iter = 2000, chains = 1, refresh = 0) #' #' # what if our covariates were constant? #' newdata <- data.frame(x1 = rep(0.4, n), x2 = 1, x3 = -0.1) #' #' fitted <- fitted(out) #' pred <- predict_counterfactual(out, newdata, type = "mean") #' #' ts.plot(cbind(fitted[, c(1, 3, 5)], pred[, c(1, 3, 5)]), #' col = rep(1:2, each = 3), lty = c(1, 2, 2)) #'} predict_counterfactual <- function(object, newdata, u, summary = TRUE, type = ifelse(object$distribution == "gaussian", "response", "mean")){ type <- match.arg(type, c("response", "mean", "link")) y_name <- as.character(object$call$formula[[2]]) if (!(y_name%in% names(newdata))) { newdata[[y_name]] <- rep(NA, nrow(newdata)) } object$call$data <- newdata object$call$return_x_reg <- TRUE xregs <- eval(object$call) if (any(is.na(xregs$xreg_fixed)) || any(is.na(xregs$xreg_rw))) { stop("Missing values in covariates are not allowed.") } n <- length(object$y) if (n != nrow(newdata)) stop("Number of rows in 'newdata' should match with the original data. ") beta_fixed <- extract(object$stanfit, pars = "beta_fixed")$beta_fixed beta_rw <- extract(object$stanfit, pars = "beta_rw")$beta_rw n_iter <- nrow(beta_rw) y_new <- matrix(0, n, n_iter) if (!is.null(beta_fixed)) { beta_fixed <- t(beta_fixed) xregs$xreg_fixed <- t(xregs$xreg_fixed) for (i in 1:n_iter) { for(t in 1:n) { y_new[t, i] <- beta_fixed[, i] %*% xregs$xreg_fixed[, t] } } } for (i in 1:n_iter) { for (t in 1:n) { y_new[t, i] <- y_new[t, i] + beta_rw[i, , t] %*% xregs$xreg_rw[t, ] } } if (object$distribution != "gaussian") { if (missing(u)) { u <- rep(1, nrow(newdata)) } else { if (length(u) != nrow(newdata)) { if(length(u) > 1) stop("Length of 'u' should be 1 or equal to the number of predicted time points. ") u <- rep(u, nrow(newdata)) } } if (type != "link") { if (object$distribution == "poisson") { for (i in 1:n_iter) { y_new[, i] <- u * exp(y_new[, i]) } } else { y_new <- plogis(y_new) } if (type == "response") { if (object$distribution == "poisson") { for (i in 1:n_iter) { y_new[, i] <- rpois(n, y_new[,i]) } } else { for (i in 1:n_iter) { y_new[, i] <- rbinom(n, u, y_new[, i]) } } } } } if (summary) { y_new <- t(apply(y_new, 1, function(x) { q <- quantile(x, c(0.025, 0.5, 0.975)) c(mean = mean(x), sd = sd(x), q) })) rownames(y_new) <- time(object$y) } y_new }
/scratch/gouwar.j/cran-all/cranData/walker/R/predict_counterfactual.R
#' Print Summary of walker_fit Object #' #' Prints the summary information of time-invariant model parameters. In case of non-Gaussian models, #' results based on approximate model are returned with a warning. #' #' @param x An output from \code{\link{walker}} or \code{\link{walker_glm}}. #' @param ... Additional arguments to \code{\link{print.stanfit}}. #' @method print walker_fit #' @export print.walker_fit <- function(x, ...) { pars <- setdiff(x$stanfit@sim$pars_oi, c("beta_rw", "nu", "y_fit", "y_rep", "log_lik")) if(x$distribution != "gaussian") warning("Results are based on approximate model, use summary method for exact results.") print(x$stanfit, pars = pars, ...) } #' Coerce Posterior Samples of walker Fit to a Data Frame #' #' Creates a data.frame object from the output of walker fit. #' #' @param x An output from \code{\link{walker}} or \code{\link{walker_glm}}. #' @param row.names \code{NULL} (default) or a character vector giving the row names #' for the data frame. #' @param optional Ignored (part of generic \code{as.data.frame} signature). #' @param type Either \code{tiv} (time-invariant parameters) or \code{tv} (time-varying coefficients). #' @param ... Ignored. #' @method as.data.frame walker_fit #' @export #' @examples #' \dontrun{ #' as.data.frame(fit, "tiv") %>% #' group_by(variable) %>% #' summarise(mean = mean(value), #' lwr = quantile(value, 0.05), #' upr = quantile(value, 0.95)) #' } #' as.data.frame.walker_fit <- function(x, row.names = NULL, optional = FALSE, type, ...) { type <- match.arg(type, c("tiv", "tv")) if (type == "tiv") { pars <- setdiff(x$stanfit@sim$pars_oi, c("beta_rw", "nu", "y_fit", "y_rep", "lp__", "weights", "log_lik")) samples <- extract(x$stanfit, pars = pars, permuted = FALSE) n <- nrow(samples) k <- ncol(samples) d <- data.frame(iter = 1:n, chain = rep(1:k, each = n), value = c(samples), variable = rep(dimnames(samples)[[3]], each = n * k), row.names = row.names) if (x$distribution != "gaussian") { d$weight <- c(extract(x$stanfit, pars = "weights", permuted = FALSE)) } } else { pars <- intersect(x$stanfit@sim$pars_oi, c("beta_rw", "nu")) samples <- extract(x$stanfit, pars = pars, permuted = FALSE) n <- nrow(samples) k <- ncol(samples) d <- data.frame(iter = 1:n, chain = rep(1:k, each = n), time = rep(as.numeric(time(x$y)), each = n * k * ncol(x$xreg_rw)), value = c(samples), variable = rep(paste0("beta_", colnames(x$xreg_rw)), each = n * k), row.names = row.names) if (x$distribution != "gaussian") { d$weight <- c(extract(x$stanfit, pars = "weights", permuted = FALSE)) } } d } #' Summary of walker_fit Object #' #' Return summary information of time-invariant model parameters. #' #' @param object An output from \code{\link{walker}} or \code{\link{walker_glm}}. #' @param type Either \code{tiv} (time-invariant parameters, the default) or \code{tv} (time-varying coefficients). #' @param ... Ignored. #' @importFrom Hmisc wtd.mean wtd.var wtd.quantile #' @importFrom coda spectrum0.ar #' @method summary walker_fit #' @export summary.walker_fit <- function(object, type = "tiv", ...) { type <- match.arg(type, c("tiv", "tv")) if (type == "tiv") { pars <- setdiff(object$stanfit@sim$pars_oi, c("beta_rw", "nu", "y_fit", "y_rep", "lp__", "weights")) } else { pars <- intersect(object$stanfit@sim$pars_oi, c("beta_rw", "nu")) } if (object$distribution == "gaussian") { d <- as.data.frame(summary(object$stanfit, pars = pars)$summary[, c("mean", "se_mean", "sd", "2.5%", "97.5%", "n_eff")]) d$n_eff <- round(d$n_eff) } else { samples <- extract(object$stanfit, pars = pars, permuted = FALSE) w <- extract(object$stanfit, pars = "weights", permuted = FALSE) means <- apply(samples, 3, function(x) wtd.mean(x, c(w), normwt = TRUE)) sds <- sqrt(apply(samples, 3, function(x) wtd.var(x, c(w), normwt = TRUE))) lwrs <- apply(samples, 3, function(x) wtd.quantile(x, c(w), 0.025, normwt = TRUE)) uprs <- apply(samples, 3, function(x) wtd.quantile(x, c(w), 0.975, normwt = TRUE)) ess <- numeric(dim(samples)[3]) for (i in 1:seq_along(ncol(samples))) { c2_est <- mean(w[, i, 1])^2 * length(w[, i, 1]) for (j in seq_along(ess)) { v <- spectrum0.ar(samples[,i,j] * w[, i, 1])$spec / c2_est ess[j] <- ess[j] + sds[j]^2 / v } } d <- data.frame(mean = means, se_mean = sds / sqrt(ess), sd = sds, "2.5%" = lwrs, "97.5%" = uprs, n_eff = round(ess), row.names = dimnames(samples)[[3]], check.names = FALSE) } d }
/scratch/gouwar.j/cran-all/cranData/walker/R/print_fit.R
#' Construct a first-order random walk component #' #' Auxiliary function used inside of the formula of \code{walker}. #' #' @export #' @param formula Formula for RW1 part of the model. Only right-hand-side is used. #' @param data Optional data.frame. #' @param beta A length vector of length two which defines the #' prior mean and standard deviation of the Gaussian prior for coefficients at time 1. #' @param sigma A vector of length two, defining the Gamma prior for #' the coefficient level standard deviation. First element corresponds to the shape parameter and #' second to the rate parameter. Default is Gamma(2, 0.0001). #' @param gamma An optional k times n matrix defining a known non-negative weights of the #' random walk noises, where k is the number of coefficients and n is the #' number of time points. Then, the standard deviation of the random walk noise #' for each coefficient is of form gamma_t * sigma (instead of just sigma). rw1 <- function(formula, data, beta, sigma = c(2, 0.0001), gamma = NULL) { mf <- match.call(expand.dots = FALSE) mf <- mf[c(1L, match(c("formula", "data"), names(mf), 0L))] mf$drop.unused.levels <- TRUE mf$na.action <- as.name("na.pass") mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) xreg <- model.matrix(attr(mf, "terms"), mf) check_normal(beta, "beta") check_gamma(sigma, "sigma") n <- nrow(xreg) if (is.null(gamma)) { gamma <- matrix(1, ncol(xreg), n) } else { if (ncol(gamma) != n) stop("The number of column of gamma matrix for 'rw1' should equal to the number of observations. ") if (!is.numeric(gamma) | any(gamma < 0 | is.na(gamma))) stop("Argument 'gamma' should be numeric matrix of non-negative values. ") } list(xreg = xreg, beta = beta, sigma = sigma, gamma = gamma) } #' Construct a second-order random walk component #' #' Auxiliary function used inside of the formula of \code{walker}. #' #' @export #' @param formula Formula for RW2 part of the model. Only right-hand-side is used. #' @param data Optional data.frame. #' @param beta A vector of length two which defines the #' prior mean and standard deviation of the Gaussian prior for coefficients at time 1. #' @param sigma A vector of length two, defining the Gamma prior for #' the slope level standard deviation. First element corresponds to the shape parameter and #' second to the rate parameter. Default is Gamma(2, 0.0001). #' @param nu A vector of length two which defines the #' prior mean and standard deviation of the Gaussian prior for the slopes nu at time 1. #' @param gamma An optional k times n matrix defining a known non-negative #' weights of the slope noises, where k is the number of coefficients #' and n is the number of time points. Then, the standard deviation of the #' noise term for each coefficient's slope is of form gamma_t * sigma #' (instead of just sigma). #' @export rw2 <- function(formula, data, beta, sigma = c(2, 0.0001), nu, gamma = NULL) { mf <- match.call(expand.dots = FALSE) mf <- mf[c(1L, match(c("formula", "data"), names(mf), 0L))] mf$drop.unused.levels <- TRUE mf$na.action <- as.name("na.pass") mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) xreg <- model.matrix(attr(mf, "terms"), mf) check_normal(beta, "beta") check_gamma(sigma, "sigma") check_normal(beta, "nu") n <- nrow(xreg) if (is.null(gamma)) { gamma <- matrix(1, ncol(xreg), n) } else { if (ncol(gamma) != n) stop("The number of column of gamma matrix for 'rw2' should equal to the number of observations. ") if (!is.numeric(gamma) | any(gamma < 0 | is.na(gamma))) stop("Argument 'gamma' should be numeric matrix of non-negative values. ") } list(xreg = xreg, beta = beta, sigma = sigma, nu = nu, gamma = gamma) }
/scratch/gouwar.j/cran-all/cranData/walker/R/rw.R
# Generated by rstantools. Do not edit by hand. # names of stan models stanmodels <- c("rw1_model", "rw1_model_naive", "walker_glm", "walker_lm") # load each stan module Rcpp::loadModule("stan_fit4rw1_model_mod", what = TRUE) Rcpp::loadModule("stan_fit4rw1_model_naive_mod", what = TRUE) Rcpp::loadModule("stan_fit4walker_glm_mod", what = TRUE) Rcpp::loadModule("stan_fit4walker_lm_mod", what = TRUE) # instantiate each stanmodel object stanmodels <- sapply(stanmodels, function(model_name) { # create C++ code for stan model stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan") stan_file <- file.path(stan_file, paste0(model_name, ".stan")) stanfit <- rstan::stanc_builder(stan_file, allow_undefined = TRUE, obfuscate_model_name = FALSE) stanfit$model_cpp <- list(model_cppname = stanfit$model_name, model_cppcode = stanfit$cppcode) # create stanmodel object methods::new(Class = "stanmodel", model_name = stanfit$model_name, model_code = stanfit$model_code, model_cpp = stanfit$model_cpp, mk_cppmodule = function(x) get(paste0("rstantools_model_", model_name))) })
/scratch/gouwar.j/cran-all/cranData/walker/R/stanmodels.R
check_normal <- function(x, name) { if(length(x) != 2) stop(paste0("Argument ", name, " should be a vector of length two, ", "defining the mean and standard deviation for the Gaussian prior. ")) if(!(x[2] > 0)) stop(paste0("Prior standard deviation for ", name, " should be positive. ")) } check_gamma <- function(x, name) { if(length(x) != 2) stop(paste0("Argument ", name, " should be a vector of length two, ", "defining the shape and rate for the Gamma prior the parameter ", name, ". ")) if(!all(x > 0)) stop(paste0("Both parameters of the Gamma prior for the parameter ", name, " should be positive. ")) }
/scratch/gouwar.j/cran-all/cranData/walker/R/test_args.R
#' Bayesian regression with random walk coefficients #' #' Function \code{walker} performs Bayesian inference of a linear #' regression model with time-varying, random walk regression coefficients, #' i.e. ordinary regression model where instead of constant coefficients the #' coefficients follow first or second order random walks. #' All Markov chain Monte Carlo computations are done using Hamiltonian #' Monte Carlo provided by Stan, using a state space representation of the model #' in order to marginalise over the coefficients for efficient sampling. #' #' The \code{rw1} and \code{rw2} functions used in the formula define new formulas #' for the first and second order random walks. In addition, these functions #' need to be supplied with priors for initial coefficients and the #' standard deviations. For second order random walk model, these sigma priors #' correspond to the standard deviation of slope disturbances. For \code{rw2}, #' also a prior for the initial slope nu needs to be defined. See examples. #' #' @note Beware of overfitting and identifiability issues. In particular, #' be careful in not defining multiple intercept terms #' (only one should be present). #' By default \code{rw1} and \code{rw2} calls add their own time-varying #' intercepts, so you should use \code{0} or \code{-1} to remove some of them #' (or the time-invariant intercept in the fixed-part of the formula). #' #' @import rstan Rcpp methods #' @importFrom rstan sampling #' @importFrom Rcpp loadModule evalCpp #' @importFrom stats model.matrix model.response rnorm delete.response terms window ts end glm poisson rgamma #' @importFrom rstantools rstan_config #' @importFrom RcppParallel RcppParallelLibs CxxFlags #' @rdname walker #' @useDynLib walker, .registration = TRUE #' @param formula An object of class \code{{formula}} with additional terms #' \code{rw1} and/or \code{rw2} e.g. \code{y ~ x1 + rw1(~ -1 + x2)}. See details. #' @param data An optional data.frame or object coercible to such, as in \code{{lm}}. #' @param beta A length vector of length two which defines the #' prior mean and standard deviation of the Gaussian prior for time-invariant coefficients #' @param sigma_y_prior A vector of length two, defining the a Gamma prior for #' the observation level standard deviation with first element corresponding to the shape parameter and #' second to rate parameter. Default is Gamma(2, 0.0001). Not used in \code{walker_glm}. #' @param chains Number of Markov chains. Default is 4. #' @param init Initial value specification, see \code{\link{sampling}}. #' Note that compared to default in \code{rstan}, here the default is a to sample from the priors. #' @param return_x_reg If \code{TRUE}, does not perform sampling, but instead returns the matrix of #' predictors after processing the \code{formula}. #' @param gamma_y An optional vector defining known non-negative weights for the standard #' deviation of the observational level noise at each time point. #' More specifically, the observational level standard deviation sigma_t is #' defined as \eqn{\sigma_t = gamma_t * \sigma_y} (in default case #' \eqn{\sigma_t = sigma_y}) #' @param return_data if \code{TRUE}, returns data input to \code{sampling}. This is needed for #' \code{lfo}. #' @param ... Further arguments to \code{\link{sampling}}. #' @return A list containing the \code{stanfit} object, observations \code{y}, #' and covariates \code{xreg} and \code{xreg_new}. #' @seealso \code{\link{walker_glm}} for non-Gaussian models. #' @export #' @examples #' #' \dontrun{ #' set.seed(1) #' x <- rnorm(10) #' y <- x + rnorm(10) #' #' # different intercept definitions: #' #' # both fixed intercept and time-varying level, #' # can be unidentifiable without strong priors: #' fit1 <- walker(y ~ rw1(~ x, beta = c(0, 1)), #' beta = c(0, 1), chains = 1, iter = 1000, init = 0) #' #' # only time-varying level, using 0 or -1 removes intercept: #' fit2 <- walker(y ~ 0 + rw1(~ x, beta = c(0, 1)), chains = 1, iter = 1000, #' init = 0) #' #' # time-varying level, no covariates: #' fit3 <- walker(y ~ 0 + rw1(~ 1, beta = c(0, 1)), chains = 1, iter = 1000) #' #' # fixed intercept no time-varying level: #' fit4 <- walker(y ~ rw1(~ 0 + x, beta = c(0, 1)), #' beta = c(0, 1), chains = 1, iter = 1000) #' #' # only time-varying effect of x: #' fit5 <- walker(y ~ 0 + rw1(~ 0 + x, beta = c(0, 1)), chains = 1, iter = 1000) #' } #' #' \dontrun{ #' #' rw1_fit <- walker(Nile ~ -1 + #' rw1(~ 1, #' beta = c(1000, 100), #' sigma = c(2, 0.001)), #' sigma_y_prior = c(2, 0.005), #' iter = 2000, chains = 1) #' #' rw2_fit <- walker(Nile ~ -1 + #' rw2(~ 1, #' beta = c(1000, 100), #' sigma = c(2, 0.001), #' nu = c(0, 100)), #' sigma_y_prior = c(2, 0.005), #' iter = 2000, chains = 1) #' #' g_y <- geom_point(data = data.frame(y = Nile, x = time(Nile)), #' aes(x, y, alpha = 0.5), inherit.aes = FALSE) #' g_rw1 <- plot_coefs(rw1_fit) + g_y #' g_rw2 <- plot_coefs(rw2_fit) + g_y #' if(require("gridExtra")) { #' grid.arrange(g_rw1, g_rw2, ncol=2, top = "RW1 (left) versus RW2 (right)") #' } else { #' g_rw1 #' g_rw2 #' } #' #' y <- window(log10(UKgas), end = time(UKgas)[100]) #' n <- 100 #' cos_t <- cos(2 * pi * 1:n / 4) #' sin_t <- sin(2 * pi * 1:n / 4) #' dat <- data.frame(y, cos_t, sin_t) #' fit <- walker(y ~ -1 + #' rw1(~ cos_t + sin_t, beta = c(0, 10), sigma = c(2, 1)), #' sigma_y_prior = c(2, 10), data = dat, chains = 1, iter = 2000) #' print(fit$stanfit, pars = c("sigma_y", "sigma_rw1")) #' #' plot_coefs(fit) #' # posterior predictive check: #' pp_check(fit) #' #' newdata <- data.frame( #' cos_t = cos(2 * pi * 101:108 / 4), #' sin_t = sin(2 * pi * 101:108 / 4)) #' pred <- predict(fit, newdata) #' plot_predict(pred) #' #' # example on scalability #' set.seed(1) #' n <- 2^12 #' beta1 <- cumsum(c(0.5, rnorm(n - 1, 0, sd = 0.05))) #' beta2 <- cumsum(c(-1, rnorm(n - 1, 0, sd = 0.15))) #' x1 <- rnorm(n, mean = 2) #' x2 <- cos(1:n) #' rw <- cumsum(rnorm(n, 0, 0.5)) #' signal <- rw + beta1 * x1 + beta2 * x2 #' y <- rnorm(n, signal, 0.5) #' #' d <- data.frame(y, x1, x2) #' #' n <- 2^(6:12) #' times <- numeric(length(n)) #' for(i in seq_along(n)) { #' times[i] <- sum(get_elapsed_time( #' walker(y ~ 0 + rw1(~ x1 + x2, #' beta = c(0, 10)), #' data = d[1:n[i],], #' chains = 1, seed = 1, refresh = 0)$stanfit)) #' } #' plot(log2(n), log2(times)) #'} walker <- function(formula, data, sigma_y_prior = c(2, 0.01), beta, init, chains, return_x_reg = FALSE, gamma_y = NULL, return_data = TRUE, ...) { if (missing(data)) { data <- environment(formula) } else { data <- as.data.frame(data) } # Modifying formula object, catching special functions mc <- match.call() mf <- match.call(expand.dots = FALSE) mf <- mf[c(1L, match(c("formula", "data"), names(mf), 0L))] mf[[1L]] <- quote(stats::model.frame) mf$na.action <- as.name("na.pass") mf$drop.unused.levels <- TRUE specials <- c("rw1", "rw2") all_terms <- terms(formula, specials = specials, data = data) rws <- unlist(attr(all_terms, "specials")) if (length(rws) > 0) { if (length(attr(all_terms, "term.labels")) == length(rws)){ all_terms <- terms(update.formula(all_terms, . ~ . + .emptyx.), specials = specials) } drops <- which(attr(all_terms, "term.labels") %in% rownames(attr(all_terms, "factors"))[rws]) mf$formula <- formula(drop.terms(all_terms, drops, keep.response = TRUE)) mf$formula <- update.formula(mf$formula, . ~ . - .emptyx., simplify = TRUE) } else { stop("Model does not contain time-varying part, use ordinary regression modelling instead.") } # build y and xreg mf <- eval(mf, parent.frame()) y <- model.response(mf, "numeric") n <- length(y) xreg_fixed <- model.matrix(attr(mf, "terms"), mf) ## RWs vars <- attr(all_terms, "variables") if (!is.null(attr(all_terms, "specials")$rw1)) { comp <- vars[[1 + attr(all_terms, "specials")$rw1[1]]] rw1_out <- eval(comp, envir = data, enclos = parent.frame()) # only intercept if (nrow(rw1_out$xreg) == 0) { rw1_out$xreg <- matrix(1, n, 1) rw1_out$gamma <- matrix(1, 1, n) } if (nrow(rw1_out$xreg) != n) stop("length of the series and covariates do not match.") } else { rw1_out <- list(xreg = matrix(0, n, 0), beta = rep(1, 2), sigma = rep(1, 2), gamma = matrix(0, 0, n)) } if (!is.null(attr(all_terms, "specials")$rw2)) { comp <- vars[[1 + attr(all_terms, "specials")$rw2[1]]] rw2_out <- eval(comp, envir = data, enclos = parent.frame()) # only intercept if (nrow(rw2_out$xreg) == 0) { rw2_out$xreg <- matrix(1, n, 1) rw2_out$gamma <- matrix(1, 1, n) } if (nrow(rw2_out$xreg) != n) stop("length of the series and covariates do not match.") } else { rw2_out <- list(xreg = matrix(0, n, 0), beta = rep(1, 2), sigma = rep(1, 2), nu = rep(1, 2), gamma = matrix(0, 0, n)) } xreg_rw <- cbind(rw1_out$xreg, rw2_out$xreg) k_fixed <- max(0, ncol(xreg_fixed)) k_rw1 <- max(0, ncol(rw1_out$xreg)) k_rw2 <- max(0, ncol(rw2_out$xreg)) if (return_x_reg) return(list(xreg_fixed = xreg_fixed, xreg_rw = xreg_rw)) if (any(is.na(xreg_fixed)) || any(is.na(xreg_rw))) stop("Missing values in covariates are not allowed.") if(k_fixed > 0) check_normal(beta) check_gamma(sigma_y_prior, "sigma_y_prior") if (is.null(gamma_y)) { gamma_y <- rep(1, n) } else { if (length(gamma_y) != n) stop("The length of gamma vector should equal to the number of observations. ") if (!is.numeric(gamma_y) | any(gamma_y < 0 | is.na(gamma_y))) stop("Argument 'gamma_y' should be numeric vector of nonnegative values. ") } stan_data <- list( k_fixed = k_fixed, k_rw1 = k_rw1, k_rw2 = k_rw2, m = k_rw1 + 2 * k_rw2, k = k_rw1 + k_rw2, n = n, n_lfo = n, xreg_fixed = xreg_fixed, xreg_rw = t(xreg_rw), y = y, y_miss = as.integer(is.na(y)), sigma_y_shape = sigma_y_prior[1], sigma_y_rate = sigma_y_prior[2], beta_fixed_mean = if (k_fixed > 0) beta[1] else 1, beta_fixed_sd = if (k_fixed > 0) beta[2] else 1, beta_rw1_mean = rw1_out$beta[1], beta_rw1_sd = rw1_out$beta[2], beta_rw2_mean = rw2_out$beta[1], beta_rw2_sd = rw2_out$beta[2], sigma_rw1_shape = rw1_out$sigma[1], sigma_rw1_rate = rw1_out$sigma[2], sigma_rw2_shape = rw2_out$sigma[1], sigma_rw2_rate = rw2_out$sigma[2], nu_mean = rw2_out$nu[1], nu_sd = rw2_out$nu[2], gamma_y = gamma_y, gamma_rw1 = rw1_out$gamma, gamma_rw2 = rw2_out$gamma ) stan_data$y[is.na(y)] <- 0 ## Stan does not accept NA's if (missing(chains)) chains <- 4 if (missing(init)) { init <- replicate(chains, list(beta_fixed = if (k_fixed > 0) { structure(rnorm(k_fixed, beta[1], beta[2] / 10), dim = k_fixed) } else { structure(numeric(0), dim = 0) }, sigma_y = rgamma(1, sigma_y_prior[1], sigma_y_prior[2]), sigma_rw1 = if (k_rw1 > 0) { structure(rgamma(k_rw1, rw1_out$sigma[1], rw1_out$sigma[2]), dim = k_rw1) } else { structure(numeric(0), dim = 0) }, sigma_rw2 = if (k_rw2 > 0) { structure(rgamma(k_rw2, rw2_out$sigma[1], rw2_out$sigma[2]), dim = k_rw2) } else { structure(numeric(0), dim = 0) }), simplify = FALSE) } stanfit <- sampling(stanmodels$walker_lm, data = stan_data, chains = chains, init = init, pars = c("sigma_y", "sigma_rw1", "sigma_rw2", "beta_fixed", "beta_rw", "nu", "y_fit", "y_rep", "log_lik"), ...) structure(list(stanfit = stanfit, y = y, xreg_fixed = xreg_fixed, xreg_rw = xreg_rw, distribution = "gaussian", data = if(return_data) stan_data else NULL, call = mc), class = "walker_fit") } #' Bayesian generalized linear model with time-varying coefficients #' #' Function \code{walker_glm} is a generalization of \code{walker} for non-Gaussian #' models. Compared to \code{walker}, the returned samples are based on Gaussian approximation, #' which can then be used for exact-approximate analysis by weighting the sample properly. These weights #' are also returned as a part of the \code{stanfit} (they are generated in the #' generated quantities block of Stan model). Note that plotting functions \code{pp_check}, #' \code{plot_coefs}, and \code{plot_predict} resample the posterior based on weights #' before plotting, leading to "exact" analysis. #' #' The underlying idea of \code{walker_glm} is based on paper #' "Importance sampling type estimators based on approximate marginal MCMC" by #' Vihola M, Helske J and Franks J which is available at ArXiv. #' #' \code{walker_glm} uses the global approximation (i.e. start of the MCMC) instead of more accurate #' but slower local approximation (where model is approximated at each iteration). #' However for these restricted models global approximation should be sufficient, #' assuming the the initial estimate of the conditional mode of p(xbeta | y, sigma) not too #' far away from the true posterior. Therefore by default \code{walker_glm} first finds the #' maximum likelihood estimates of the standard deviation parameters #' (using \code{\link[KFAS]{KFAS}}) package, and #' constructs the approximation at that point, before running the Bayesian #' analysis. #' #' @inheritParams walker #' @importFrom KFAS SSModel SSMcustom fitSSM approxSSM #' @param distribution Either \code{"poisson"} or \code{"binomial"}. #' @param initial_mode The initial guess of the fitted values on log-scale. #' Defines the Gaussian approximation used in the MCMC. #' Either \code{"obs"} (corresponds to log(y+0.1) in Poisson case), #' \code{"glm"} (mode is obtained from time-invariant GLM), \code{"mle"} #' (default; mode is obtained from maximum likelihood estimate of the model), #' or numeric vector (custom guess). #' @param u For Poisson model, a vector of exposures i.e. \eqn{E(y) = u*exp(x*beta)}. #' For binomial, a vector containing the number of trials. Defaults 1. #' @param mc_sim Number of samples used in importance sampling. Default is 50. #' @param return_data if \code{TRUE}, returns data input to \code{sampling}. This is needed for #' \code{lfo}. #' @return A list containing the \code{stanfit} object, observations \code{y}, #' covariates \code{xreg_fixed}, and \code{xreg_rw}. #' @seealso Package \code{diagis} in CRAN, which provides functions for computing weighted #' summary statistics. #' @export #' @examples #' #' set.seed(1) #' n <- 25 #' x <- rnorm(n, 1, 1) #' beta <- cumsum(c(1, rnorm(n - 1, sd = 0.1))) #' #' level <- -1 #' u <- sample(1:10, size = n, replace = TRUE) #' y <- rpois(n, u * exp(level + beta * x)) #' ts.plot(y) #' #' # note very small number of iterations for the CRAN checks! #' out <- walker_glm(y ~ -1 + rw1(~ x, beta = c(0, 10), #' sigma = c(2, 10)), distribution = "poisson", #' iter = 200, chains = 1, refresh = 0) #' print(out$stanfit, pars = "sigma_rw1") ## approximate results #' if (require("diagis")) { #' weighted_mean(extract(out$stanfit, pars = "sigma_rw1")$sigma_rw1, #' extract(out$stanfit, pars = "weights")$weights) #' } #' plot_coefs(out) #' pp_check(out) #' #' \dontrun{ #' data("discoveries", package = "datasets") #' out <- walker_glm(discoveries ~ -1 + #' rw2(~ 1, beta = c(0, 10), sigma = c(2, 10), nu = c(0, 2)), #' distribution = "poisson", iter = 2000, chains = 1, refresh = 0) #' #' plot_fit(out) #' #' # Dummy covariate example #' #' fit <- walker_glm(VanKilled ~ -1 + #' rw1(~ law, beta = c(0, 1), sigma = c(2, 10)), dist = "poisson", #' data = as.data.frame(Seatbelts), chains = 1, refresh = 0) #' #' # compute effect * law #' d <- coef(fit, transform = function(x) { #' x[, 2, 1:170] <- 0 #' x #' }) #' #' require("ggplot2") #' d %>% ggplot(aes(time, mean)) + #' geom_ribbon(aes(ymin = `2.5%`, ymax = `97.5%`), fill = "grey90") + #' geom_line() + facet_wrap(~ beta, scales = "free") + theme_bw() #' } #' walker_glm <- function(formula, data, beta, init, chains, return_x_reg = FALSE, distribution , initial_mode = "kfas", u, mc_sim = 50, return_data = TRUE, ...) { distribution <- match.arg(distribution, choices = c("poisson", "binomial")) if (missing(data)) { data <- environment(formula) } else { data <- as.data.frame(data) } # Modifying formula object, catching special functions mc <- match.call() mf <- match.call(expand.dots = FALSE) mf <- mf[c(1L, match(c("formula", "data"), names(mf), 0L))] mf[[1L]] <- quote(stats::model.frame) mf$na.action <- as.name("na.pass") mf$drop.unused.levels <- TRUE specials <- c("rw1", "rw2") all_terms <- terms(formula, specials = specials, data = data) rws <- unlist(attr(all_terms, "specials")) if (length(rws) > 0) { if (length(attr(all_terms, "term.labels")) == length(rws)){ all_terms <- terms(update.formula(all_terms, . ~ . + .emptyx.), specials = specials) } drops <- which(attr(all_terms, "term.labels") %in% rownames(attr(all_terms, "factors"))[rws]) mf$formula <- formula(drop.terms(all_terms, drops, keep.response = TRUE)) mf$formula <- update.formula(mf$formula, . ~ . - .emptyx., simplify = TRUE) } else { stop("Model does not contain time-varying part, use ordinary GLM instead.") } # build y and xreg mf <- eval(mf, parent.frame()) y <- model.response(mf, "numeric") n <- length(y) xreg_fixed <- model.matrix(attr(mf, "terms"), mf) ## RWs vars <- attr(all_terms, "variables") if (!is.null(attr(all_terms, "specials")$rw1)) { comp <- vars[[1 + attr(all_terms, "specials")$rw1[1]]] rw1_out <- eval(comp, envir = data, enclos = parent.frame()) # only intercept if (nrow(rw1_out$xreg) == 0) { rw1_out$xreg <- matrix(1, n, 1) rw1_out$gamma <- matrix(1, 1, n) } if (nrow(rw1_out$xreg) != n) stop("length of the series and covariates do not match.") } else { rw1_out <- list(xreg = matrix(0, n, 0), beta = rep(1, 2), sigma = rep(1, 2), gamma = matrix(0, 0, n)) } if (!is.null(attr(all_terms, "specials")$rw2)) { comp <- vars[[1 + attr(all_terms, "specials")$rw2[1]]] rw2_out <- eval(comp, envir = data, enclos = parent.frame()) # only intercept if (nrow(rw2_out$xreg) == 0) { rw2_out$xreg <- matrix(1, n, 1) rw2_out$gamma <- matrix(1, 1, n) } if (nrow(rw2_out$xreg) != n) stop("length of the series and covariates do not match.") } else { rw2_out <- list(xreg = matrix(0, n, 0), beta = rep(1, 2), sigma = rep(1, 2), nu = rep(1, 2), gamma = matrix(0, 0, n)) } xreg_rw <- cbind(rw1_out$xreg, rw2_out$xreg) k_fixed <- max(0, ncol(xreg_fixed)) k_rw1 <- max(0, ncol(rw1_out$xreg)) k_rw2 <- max(0, ncol(rw2_out$xreg)) if (return_x_reg) return(list(xreg_fixed = xreg_fixed, xreg_rw = xreg_rw)) if (any(is.na(xreg_fixed)) || any(is.na(xreg_rw))) stop("Missing values in covariates are not allowed.") if(k_fixed > 0) check_normal(beta, "beta") if (missing(u)) { u <- rep(1, n) } if (length(u) == 1) u <- rep(u, n) if (length(u) != n) stop("Length of 'u' should be equal to the number of observations. ") if(any(u <= 0)) stop("All values of 'u' must be positive. ") if(distribution == "binomial" && any(u < y)) stop("Number of trials 'u' must be larger or equal to number of successes y. ") beta_fixed_mean = if (k_fixed > 0) beta[1] else 1 beta_fixed_sd = if (k_fixed > 0) beta[2] else 1 beta_rw1_mean = rw1_out$beta[1] beta_rw1_sd = rw1_out$beta[2] beta_rw2_mean = rw2_out$beta[1] beta_rw2_sd = rw2_out$beta[2] nu_mean = rw2_out$nu[1] nu_sd = rw2_out$nu[2] if (is.numeric(initial_mode)) { pseudo_H <- 1 / (u * exp(initial_mode)) pseudo_y <- y * pseudo_H + initial_mode - 1 } else { switch(initial_mode, obs = { expmode <- y / u + 0.1 pseudo_H <- 1 / (u * expmode) pseudo_y <- y * pseudo_H + log(expmode) - 1 }, glm = { fit <- glm(y ~ ., data = data.frame(cbind(xreg_fixed, xreg_rw)), offset = log(u), family = poisson) pseudo_H <- 1 / fit$fitted.values pseudo_y <- y * pseudo_H + fit$linear.predictors - log(u) - 1 }, kfas = { m <- k_fixed + k_rw1 + 2 * k_rw2 Zt <- array(0, dim = c(1, m, n)) if (k_fixed > 0) { Zt[1, 1:k_fixed, ] <- t(xreg_fixed) } Zt[1, (k_fixed + 1):(k_fixed + k_rw1 + k_rw2),] <- t(xreg_rw) Tt <- Rt <- diag(m) if(k_rw2 > 0) { Tt[(k_fixed + k_rw1 + 1):(k_fixed + k_rw1 + k_rw2), (k_fixed + k_rw1 + k_rw2 + 1):m] <- diag(k_rw2) } Qt <- diag(rep(c(0, NA, 0, NA), times = c(k_fixed, k_rw1, k_rw2, k_rw2)), m) a1 <- rep(c(beta_fixed_mean, beta_rw1_mean, beta_rw2_mean, nu_mean), times = c(k_fixed, k_rw1, k_rw2, k_rw2)) P1 <- diag(rep(c(beta_fixed_sd, beta_rw1_sd, beta_rw2_sd, nu_sd), times = c(k_fixed, k_rw1, k_rw2, k_rw2)), m) P1inf <- diag(0, m) model <- SSModel(y ~ -1 + SSMcustom(Zt, Tt, Rt, Qt, a1, P1, P1inf), distribution = distribution, u = u) fit <- fitSSM(model, inits = rep(-1, k_rw1 + k_rw2), method = "BFGS") app <- approxSSM(fit$model) pseudo_H <- as.numeric(app$H) pseudo_y <- as.numeric(app$y) }, stop("Argument 'initial_mode' should be either 'obs', 'glm', 'kfas', or a numeric vector.") ) } stan_data <- list( k_fixed = k_fixed, k_rw1 = k_rw1, k_rw2 = k_rw2, m = k_rw1 + 2 * k_rw2, k = k_rw1 + k_rw2, n = n, n_lfo = n, xreg_fixed = xreg_fixed, xreg_rw = t(xreg_rw), beta_fixed_mean = beta_fixed_mean, beta_fixed_sd = beta_fixed_sd, beta_rw1_mean = beta_rw1_mean, beta_rw1_sd = beta_rw1_sd, beta_rw2_mean = beta_rw2_mean, beta_rw2_sd = beta_rw2_sd, sigma_rw1_shape = rw1_out$sigma[1], sigma_rw1_rate = rw1_out$sigma[2], sigma_rw2_shape = rw2_out$sigma[1], sigma_rw2_rate = rw2_out$sigma[2], nu_mean = nu_mean, nu_sd = nu_sd, y = pseudo_y, y_miss = as.integer(is.na(y)), Ht = pseudo_H, y_original = y, u = as.integer(u), distribution = pmatch(distribution, c("poisson", "binomial")), N = mc_sim, gamma_rw1 = rw1_out$gamma, gamma_rw2 = rw2_out$gamma ) stan_data$y[is.na(y)] <- 0 ## Stan does not accept NA's if (missing(chains)) chains <- 4 if (missing(init)) { init <- replicate(chains, list(beta_fixed = if (k_fixed > 0) { structure(rnorm(k_fixed, beta[1], beta[2] / 10), dim = k_fixed) } else { structure(numeric(0), dim = 0) }, sigma_rw1 = if (k_rw1 > 0) { structure(rgamma(k_rw1, rw1_out$sigma[1], rw1_out$sigma[2]), dim = k_rw1) } else { structure(numeric(0), dim = 0) }, sigma_rw2 = if (k_rw2 > 0) { structure(rgamma(k_rw2, rw2_out$sigma[1], rw2_out$sigma[2]), dim = k_rw2) } else { structure(numeric(0), dim = 0) }), simplify = FALSE) } stanfit <- sampling(stanmodels$walker_glm, data = stan_data, chains = chains, init = init, pars = c("sigma_rw1", "sigma_rw2", "beta_fixed", "beta_rw", "nu", "y_fit", "y_rep", "weights", "log_lik"), ...) structure(list(stanfit = stanfit, y = y, xreg_fixed = xreg_fixed, xreg_rw = xreg_rw, u = u, distribution = distribution, data = if(return_data) stan_data else NULL, call = mc), class = "walker_fit") }
/scratch/gouwar.j/cran-all/cranData/walker/R/walker.R
#' Comparison of naive and state space implementation of RW1 regression model #' #' This function is the first iteration of the function \code{walker}, #' which supports only time-varying model where all coefficients ~ rw1. #' This is kept as part of the package in order to compare "naive" and #' state space versions of the model in the vignette, #' but there is little reason to use it for other purposes. #' #' @export #' @param formula An object of class \code{\link[stats:formula]{formula}}. See \code{\link{lm}} for details. #' @param data An optional data.frame or object coercible to such, as in \code{\link{lm}}. #' @param beta A matrix with \eqn{k} rows and 2 columns, where first columns defines the #' prior means of the Gaussian priors of the corresponding \eqn{k} regression coefficients, #' and the second column defines the the standard deviations of those prior distributions. #' @param sigma A matrix with \eqn{k + 1} rows and two colums with similar structure as #' \code{beta}, with first row corresponding to the prior of the standard deviation of the #' observation level noise, and rest of the rows define the priors for the standard deviations of #' random walk noise terms. The prior distributions for all sigmas are #' Gaussians truncated to positive real axis. For non-Gaussian models, this should contain only k rows. #' For second order random walk model, these priors correspond to the slope level standard deviations. #' @param naive Only used for \code{walker} function. #' If \code{TRUE}, use "standard" approach which samples the joint posterior #' \eqn{p(beta, sigma | y)}. If \code{FALSE} (the default), use marginalisation approach #' where we sample the marginal posterior \eqn{p(sigma | y)} and generate the samples of #' \eqn{p(beta | sigma, y)} using state space modelling techniques #' (namely simulation smoother by Durbin and Koopman (2002)). Both methods give asymptotically #' identical results, but the latter approach is computationally much more efficient. #' @param return_x_reg If \code{TRUE}, does not perform sampling, but instead returns the matrix of #' predictors after processing the \code{formula}. #' @param chains Number of Markov chains. Default is 4. #' @param init Initial value specification, see \code{\link{sampling}}. #' @param ... Additional arguments to \code{\link{sampling}}. #' @examples #' \dontrun{ #' ## Comparing the approaches, note that with such a small data #' ## the differences aren't huge, but try same with n = 500 and/or more terms... #' set.seed(123) #' n <- 100 #' beta1 <- cumsum(c(0.5, rnorm(n - 1, 0, sd = 0.05))) #' beta2 <- cumsum(c(-1, rnorm(n - 1, 0, sd = 0.15))) #' x1 <- rnorm(n, 1) #' x2 <- 0.25 * cos(1:n) #' ts.plot(cbind(beta1 * x1, beta2 *x2), col = 1:2) #' u <- cumsum(rnorm(n)) #' y <- rnorm(n, u + beta1 * x1 + beta2 * x2) #' ts.plot(y) #' lines(u + beta1 * x1 + beta2 * x2, col = 2) #' kalman_walker <- walker_rw1(y ~ -1 + #' rw1(~ x1 + x2, beta = c(0, 2), sigma = c(0, 2)), #' sigma_y = c(0, 2), iter = 2000, chains = 1) #' print(kalman_walker$stanfit, pars = c("sigma_y", "sigma_rw1")) #' betas <- extract(kalman_walker$stanfit, "beta")[[1]] #' ts.plot(cbind(u, beta1, beta2, apply(betas, 2, colMeans)), #' col = 1:3, lty = rep(2:1, each = 3)) #' sum(get_elapsed_time(kalman_walker$stanfit)) #' naive_walker <- walker_rw1(y ~ x1 + x2, iter = 2000, chains = 1, #' beta = cbind(0, rep(2, 3)), sigma = cbind(0, rep(2, 4)), #' naive = TRUE) #' print(naive_walker$stanfit, pars = c("sigma_y", "sigma_b")) #' sum(get_elapsed_time(naive_walker$stanfit)) #' #' ## Larger problem, this takes some time with naive approach #' #' set.seed(123) #' n <- 500 #' beta1 <- cumsum(c(1.5, rnorm(n - 1, 0, sd = 0.05))) #' beta2 <- cumsum(c(-1, rnorm(n - 1, 0, sd = 0.5))) #' beta3 <- cumsum(c(-1.5, rnorm(n - 1, 0, sd = 0.15))) #' beta4 <- 2 #' x1 <- rnorm(n, 1) #' x2 <- 0.25 * cos(1:n) #' x3 <- runif(n, 1, 3) #' ts.plot(cbind(beta1 * x1, beta2 * x2, beta3 * x3), col = 1:3) #' a <- cumsum(rnorm(n)) #' signal <- a + beta1 * x1 + beta2 * x2 + beta3 * x3 #' y <- rnorm(n, signal) #' ts.plot(y) #' lines(signal, col = 2) #' kalman_walker <- walker_rw1(y ~ x1 + x2 + x3, iter = 2000, chains = 1, #' beta = cbind(0, rep(2, 4)), sigma = cbind(0, rep(2, 5))) #' print(kalman_walker$stanfit, pars = c("sigma_y", "sigma_b")) #' betas <- extract(kalman_walker$stanfit, "beta")[[1]] #' ts.plot(cbind(u, beta1, beta2, beta3, apply(betas, 2, colMeans)), #' col = 1:4, lty = rep(2:1, each = 4)) #' sum(get_elapsed_time(kalman_walker$stanfit)) #' # need to increase adapt_delta in order to get rid of divergences #' # and max_treedepth to get rid of related warnings #' # and still we end up with low BFMI warning after hours of computation #' naive_walker <- walker_rw1(y ~ x1 + x2 + x3, iter = 2000, chains = 1, #' beta = cbind(0, rep(2, 4)), sigma = cbind(0, rep(2, 5)), #' naive = TRUE, control = list(adapt_delta = 0.9, max_treedepth = 15)) #' print(naive_walker$stanfit, pars = c("sigma_y", "sigma_b")) #' sum(get_elapsed_time(naive_walker$stanfit)) #' } #' walker_rw1 <- function(formula, data, beta, sigma, init, chains, naive = FALSE, return_x_reg = FALSE, ...) { # build y and xreg mf <- match.call(expand.dots = FALSE) mf <- mf[c(1L, match(c("formula", "data"), names(mf), 0L))] mf$drop.unused.levels <- TRUE mf$na.action <- "na.pass" mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) y <- model.response(mf, "numeric") n <- length(y) xreg <- model.matrix(attr(mf, "terms"), mf) if (return_x_reg) return(xreg) k <- ncol(xreg) xreg_new <- matrix(0, 0, k) n_new <- 0L if (any(is.na(xreg))) stop("Missing values in covariates are not allowed.") if (any(is.na(y))) stop("Missing values in response are not (yet) allowed.") if(!identical(dim(beta), c(k, 2L))) { stop("beta should be k x 2 matrix containing columns of prior means and sds for each k coefficients. ") } if(!identical(dim(sigma), c(k + 1L, 2L))) { stop("sigma should be (k + 1) x 2 matrix containing columns of prior means and sds for each k + 1 standard deviations. ") } stan_data <- list(k = k, n = n, y = y, xreg = t(xreg), n_new = n_new, xreg_new = t(xreg_new), beta_mean = structure(beta[, 1], dim = k), beta_sd = structure(beta[, 2], dim = k), sigma_mean = sigma[, 1], sigma_sd = sigma[, 2]) if (missing(chains)) chains <- 4 if (missing(init)) { init <- replicate(chains, list(sigma_y = abs(rnorm(1, sigma[1, 1], sigma[1, 2])), sigma_b = structure(abs(rnorm(k, sigma[-1, 1], sigma[-1, 2])), dim = k), beta = structure(rnorm(k, beta[, 1], beta[, 2]), dim = k)), simplify = FALSE) } args <- list(...) stanfit <- if (naive) { if (is.null(args$pars)) { args$pars <- c("sigma_y", "sigma_b", "beta") } do.call(sampling, c(list(object = stanmodels$rw1_model_naive, data = stan_data, chains = chains, init = init), args)) } else { if (is.null(args$pars)) { args$pars <-c("sigma_y", "sigma_b", "beta", "y_rep", if (n_new > 0) c("y_new", "beta_new")) } do.call(sampling, c(list(object = stanmodels$rw1_model, data = stan_data, chains = chains, init = init), args)) } structure(list(stanfit = stanfit, y = y, xreg = xreg, xreg_new = xreg_new), class = "walker_fit_old") }
/scratch/gouwar.j/cran-all/cranData/walker/R/walker_rw1.R
.onAttach <- function(libname, pkgname) { note <- "Note: Since walker version 1.0.0, the prior distribution for the standard deviation parameters is Gamma(shape, rate)." packageStartupMessage(paste(strwrap(note), collapse = "\n")) }
/scratch/gouwar.j/cran-all/cranData/walker/R/zzz.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) library(walker) ## ----example------------------------------------------------------------------ set.seed(1) n <- 100 beta1 <- cumsum(c(0.5, rnorm(n - 1, 0, sd = 0.05))) beta2 <- cumsum(c(-1, rnorm(n - 1, 0, sd = 0.15))) x1 <- rnorm(n, mean = 2) x2 <- cos(1:n) rw <- cumsum(rnorm(n, 0, 0.5)) ts.plot(cbind(rw, beta1 * x1, beta2 * x2), col = 1:3) ## ----observations------------------------------------------------------------- signal <- rw + beta1 * x1 + beta2 * x2 y <- rnorm(n, signal, 0.5) ts.plot(cbind(signal, y), col = 1:2) ## ----walker------------------------------------------------------------------- set.seed(1) fit <- walker(y ~ -1 + rw1(~ x1 + x2, beta = c(0, 10), sigma = c(2, 10)), refresh = 0, chains = 1, sigma_y = c(2, 1)) ## ----pars--------------------------------------------------------------------- print(fit$stanfit, pars = c("sigma_y", "sigma_rw1")) library(bayesplot) mcmc_areas(as.matrix(fit$stanfit), regex_pars = c("sigma_y", "sigma_rw1")) ## ----plot_with_true_betas----------------------------------------------------- betas <- summary(fit$stanfit, "beta_rw")$summary[, "mean"] ts.plot(cbind(rw, beta1, beta2, matrix(betas, ncol = 3)), col = rep(1:3, 2), lty = rep(1:2, each = 3)) ## ----plot_pretty_betas-------------------------------------------------------- plot_coefs(fit, scales = "free") + ggplot2::theme_bw() ## ----ppc---------------------------------------------------------------------- pp_check(fit) ## ----------------------------------------------------------------------------- library(ggplot2) library(dplyr) fitted <- fitted(fit) # estimates given our actual observed data newdata <- data.frame(x1 = c(x1[1:59], rep(0, 41)), x2 = x2) pred_x1_1 <- predict_counterfactual(fit, newdata, type = "mean") cbind(as.data.frame(rbind(fitted, pred_x1_1)), type = rep(c("observed", "counterfactual"), each = n), time = 1:n) %>% ggplot(aes(x = time, y = mean)) + geom_ribbon(aes(ymin = `2.5%`, ymax = `97.5%`, fill = type), alpha = 0.2) + geom_line(aes(colour = type)) + theme_bw() ## ----prediction--------------------------------------------------------------- new_data <- data.frame(x1 = rnorm(10, mean = 2), x2 = cos((n + 1):(n + 10))) pred <- predict(fit, new_data) plot_predict(pred) ## ----walker_rw2--------------------------------------------------------------- fit_rw2 <-walker(y ~ -1 + rw2(~ x1 + x2, beta = c(0, 10), sigma = c(2, 0.001), nu = c(0, 10)), refresh = 0, init = 0, chains = 1, sigma_y = c(2, 0.001)) plot_coefs(fit_rw2, scales = "free") + ggplot2::theme_bw() ## ----naive, eval = FALSE------------------------------------------------------ # set.seed(1) # set seed to simulate same initial values for both methods # naive_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, # chains = 2, cores = 2, iter = 1e4, # beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), # naive = TRUE, # control = list(adapt_delta = 0.999, max_treedepth = 12)) # # set.seed(1) # kalman_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, # chains = 2, cores = 2, iter = 1e4, # beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), # naive = FALSE) ## ----naive-run, eval = FALSE, echo = FALSE------------------------------------ # # actual code run, remove betas in order to reduce size of the package # set.seed(1) # naive_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, # chains = 2, cores = 2, iter = 1e4, # beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), # naive = TRUE, save_warmup = FALSE, # pars = c("sigma_y", "sigma_b"), # control = list(adapt_delta = 0.999, max_treedepth = 12)) # # set.seed(1) # kalman_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, # chains = 2, cores = 2, iter = 1e4, # beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), # naive = FALSE, save_warmup = FALSE, # pars = c("sigma_y", "sigma_b")) # # save(naive_fit, kalman_fit, file = "vignette_results.rds") ## ---- echo = FALSE------------------------------------------------------------ load("vignette_results.rds") ## ----warnings-and-time-------------------------------------------------------- check_hmc_diagnostics(naive_fit$stanfit) check_hmc_diagnostics(kalman_fit$stanfit) get_elapsed_time(naive_fit$stanfit) get_elapsed_time(kalman_fit$stanfit) ## ----main-results------------------------------------------------------------- print(naive_fit$stanfit, pars = c("sigma_y", "sigma_b")) print(kalman_fit$stanfit, pars = c("sigma_y", "sigma_b"))
/scratch/gouwar.j/cran-all/cranData/walker/inst/doc/walker.R
--- title: "Efficient Bayesian generalized linear models with time-varying coefficients" author: "Jouni Helske" date: "13 October 2022" output: html_document: default bibliography: walker.bib link-citations: true vignette: | %\VignetteIndexEntry{Efficient Bayesian generalized linear models with time-varying coefficients} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(walker) ``` ## Introduction This is complementary vignette to the paper @walkerpaper with more detailed examples and a short comparison with naive `Stan` implementation for regression model with time-varying coefficients. The varying coefficient regression models are extension to basic linear regression models where instead of constant but unknown regression coefficients, the underlying coefficients are assumed to vary over time according to random walk. In their simplest form these models can be used to model regression models with additional time series component, and they also allow robust modelling of phenomenas where the effect size of the predictor variables can vary during the period of the study, for example due to interactions with unobserved counfounders. The `R` [@R] package `walker` provides an efficient method for fully Bayesian inference of such models, where the main computations are performed using the Markov chain Monte Carlo (MCMC) algorithms provided by `Stan` [@stan, @rstan]. This also allows the use of many general diagnostic and graphical tools provided by several `Stan` related `R` packages such as `ShinyStan` [@shinystan]. the linear model with time-varying coefficients is defined as A linear model with time-varying coefficients defined as $$ \begin{aligned} y_t &= x'_t \beta_t + \epsilon_t, \quad t = 1,\ldots, n\\ \beta_{t+1} &= \beta_t + \eta_t, \end{aligned} $$ where $y_t$ is the observation at time $t$, $x_t$ contains the corresponding predictor variables, $\beta_t$ is a $k$ dimensional vector of regression coefficients at time $t$, $\epsilon_t \sim N(0, \sigma^2_{\epsilon})$, and $\eta_t \sim N(0, D)$, with $D$ being $k \times k$ diagonal matrix with diagonal elements $\sigma^2_{i,\eta}$, $i=1,\ldots,k$. Denote the unknown parameters of the model by $\beta = (\beta'_1, \ldots, \beta'_n)'$ and $\sigma = (\sigma_{\epsilon}, \sigma_{1, \eta}, \ldots, \sigma_{k, \eta})$. We define priors for first $\beta_1$ as $N(\mu_{\beta_1}, \sigma_{\beta_1})$, and for $\sigma_i \sim N(\mu_{\sigma_i}, \sigma_{\sigma_i})$, $i=1,\ldots,k+1$, truncated to positive values. Although in principle writing this model as above in `Stan` is straightforward, standard implementation can lead to some computational problems (see the illustration in next Section). An alternative solution used by `walker` is based on the marginalization of the regression coefficients $\beta$ during the MCMC sampling by using the Kalman filter. This provides fast and accurate inference of marginal posterior $p(\sigma | y)$. Then, the corresponding joint posterior $p(\sigma, \beta | y) = p(\beta | \sigma, y)p(\sigma | y)$ can be obtained by simulating the regression coefficients given marginal posterior of standard deviations. This sampling can be performed for example by simulation smoothing algorithm by @durbin. Note that we have opted to sample the $\beta$ parameters given $\sigma$'s, but it is also possible to obtain somewhat more accurate summary statistics such as mean and variance of these parameters by using the standard Kalman smoother for computation of $\textrm{E}(\beta| \sigma, y)$ and $\textrm{Var}(\beta| \sigma, y)$, and using the law of total expectation. In this vignette we first introduce the basic use using simple linear regression model with first order random walk, and then discuss extensions to second order random walk, as well as how to deal with non-Gaussian data. ## Illustration Let us consider a observations $y$ of length $n=100$, generated by random walk (i.e. time varying intercept) and two predictors. This is rather small problem, but it was chosen in order to make possible comparisons with the "naive" `Stan` implementation. For larger problems (in terms of number of observations and especially number of predictors) it is very difficult to get naive implementation to work properly, as even after tweaking several parameters of the underlying MCMC sampler, one typically ends up with divergent transitions or low BMFI index, meaning that the results are not to be trusted. First we simulate the coefficients and the predictors: ```{r example} set.seed(1) n <- 100 beta1 <- cumsum(c(0.5, rnorm(n - 1, 0, sd = 0.05))) beta2 <- cumsum(c(-1, rnorm(n - 1, 0, sd = 0.15))) x1 <- rnorm(n, mean = 2) x2 <- cos(1:n) rw <- cumsum(rnorm(n, 0, 0.5)) ts.plot(cbind(rw, beta1 * x1, beta2 * x2), col = 1:3) ``` ```{r observations} signal <- rw + beta1 * x1 + beta2 * x2 y <- rnorm(n, signal, 0.5) ts.plot(cbind(signal, y), col = 1:2) ``` Then we can call function `walker`. The model is defined as a formula like in `lm`, and we can give several arguments which are passed to `sampling` method of `rstan`, such as number of iteration `iter` and number of chains `chains` (default values for these are 2000 and 4). In addition to these, we use arguments `beta` and `sigma`, which define the Gaussian and Gamma prior distributions for $\beta_1$ and $\sigma$ respectively. These arguments should be vectors of length two defining the parameters of the prior distributions: mean and sd for the coefficients, and shape and rate for standard deviation parameters). ```{r walker} set.seed(1) fit <- walker(y ~ -1 + rw1(~ x1 + x2, beta = c(0, 10), sigma = c(2, 10)), refresh = 0, chains = 1, sigma_y = c(2, 1)) ``` We sometimes get a few (typically one) warning message about numerical problems, as the sampling algorithm warms up, but this is nothing to be concerned with (if more errors occur, then a Github issue for `walker` package is more than welcome). The output of `walker` is `walker_fit` object, which is essentially a list with `stanfit` from `Stan`'s `sampling` function, and the original observations `y` and the covariate matrix `xreg`. Thus we can use all the available tools for postprocessing `stanfit` objects: ```{r pars} print(fit$stanfit, pars = c("sigma_y", "sigma_rw1")) library(bayesplot) mcmc_areas(as.matrix(fit$stanfit), regex_pars = c("sigma_y", "sigma_rw1")) ``` Let's check how well our estimates of $\beta$ coincide with the true values (the solid lines correspond to true values): ```{r plot_with_true_betas} betas <- summary(fit$stanfit, "beta_rw")$summary[, "mean"] ts.plot(cbind(rw, beta1, beta2, matrix(betas, ncol = 3)), col = rep(1:3, 2), lty = rep(1:2, each = 3)) ``` There is also simpler (and prettier) `ggplot2` based plotting function for coefficients: ```{r plot_pretty_betas} plot_coefs(fit, scales = "free") + ggplot2::theme_bw() ``` ### Posterior predictive checks The `walker` function also returns samples from the posterior predictive distribution $p(y^{\textrm{rep}} | y) = \int p(y^{\textrm{rep}} | \beta, \sigma, y) p(\beta, \sigma | y) \textrm{d}\beta\textrm{d}\sigma$. This can be used to used for example in assessment of model fit to the data. By comparing the replication series (mean and 95% quantiles in black) and the original observations (in red) we see that very good overlap, which is not that surprising given that we know the correct model: ```{r ppc} pp_check(fit) ``` It is also possible to perform leave-future-out cross validation (LFO-CV) for the `walker` models. While the exact LFO-CV can be computationally demanding, `walker` also supports approximate PSIS-LFO-CV [@lfo] via function `lfo`. ### Counterfactual predictions We can also produce "counterfactual" predictions using our estimated model. For example, we can ask what if `x1` variable was constant 0 from time point 60 onward: ```{r} library(ggplot2) library(dplyr) fitted <- fitted(fit) # estimates given our actual observed data newdata <- data.frame(x1 = c(x1[1:59], rep(0, 41)), x2 = x2) pred_x1_1 <- predict_counterfactual(fit, newdata, type = "mean") cbind(as.data.frame(rbind(fitted, pred_x1_1)), type = rep(c("observed", "counterfactual"), each = n), time = 1:n) %>% ggplot(aes(x = time, y = mean)) + geom_ribbon(aes(ymin = `2.5%`, ymax = `97.5%`, fill = type), alpha = 0.2) + geom_line(aes(colour = type)) + theme_bw() ``` ### Out-of-sample prediction It is also possible to obtain actual forecasts given new covariates $x^{new}$: ```{r prediction} new_data <- data.frame(x1 = rnorm(10, mean = 2), x2 = cos((n + 1):(n + 10))) pred <- predict(fit, new_data) plot_predict(pred) ``` ## Extensions: Smoother effects and non-Gaussian models When modelling regression coefficients as a simple random walk, the posterior estimates of these coefficients can have large short-term variation which might not be realistic in practice. One way of imposing more smoothness for the estimates is to switch from random walk coefficients to integrated second order random walk coefficients: $$ \beta_{t+1} = \beta_t + \nu_t,\\ \nu_{t+1} = \nu_t + \eta_t. $$ This is essentially local linear trend model [@harvey] with restriction that there is no noise on the $\beta$ level. This model can be estimated by switching `rw1` function inside of the walker formula to `rw2`, with almost identical interface, but now $\sigma$ correspond to the standard deviations of the slope terms $\nu$. The Gaussian prior for $\nu_1$ most also be defined. Using RW2 model, the coefficient estimates of our example model are clearly smoother: ```{r walker_rw2} fit_rw2 <-walker(y ~ -1 + rw2(~ x1 + x2, beta = c(0, 10), sigma = c(2, 0.001), nu = c(0, 10)), refresh = 0, init = 0, chains = 1, sigma_y = c(2, 0.001)) plot_coefs(fit_rw2, scales = "free") + ggplot2::theme_bw() ``` So far we have focused on simple Gaussian linear regression. The above treatment cannot be directly extended to non-Gaussian models such as Poisson regression, as the marginal log-likelihood is intractable. However, it is possible to use relatively accurate Gaussian approximations, and the resulting approximate posterior can then be weighted using importance sampling type correction [@vihola], leading to asymptotically exact inference (if necessary). Function `walker_glm` extends the the package to handle Poisson and binomial observations using the aforementioned methodology. Further extension to negative binomial distribution is planned in future. ## Comparison with naive approach We now compare the efficiency of the "naive" implementation and the state space approach. For this, we use function `walker_rw1`, which supports only basic random walk models (here priors for standard deviations are defined as truncated normal distributions). We can perform the same analysis with naive implementation by setting the argument `naive` to `TRUE`. Note that in this case we need to adjust the default sampling parameters (argument `control`) to avoid (most of the) problems in sampling. ```{r naive, eval = FALSE} set.seed(1) # set seed to simulate same initial values for both methods naive_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, chains = 2, cores = 2, iter = 1e4, beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), naive = TRUE, control = list(adapt_delta = 0.999, max_treedepth = 12)) set.seed(1) kalman_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, chains = 2, cores = 2, iter = 1e4, beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), naive = FALSE) ``` ```{r naive-run, eval = FALSE, echo = FALSE} # actual code run, remove betas in order to reduce size of the package set.seed(1) naive_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, chains = 2, cores = 2, iter = 1e4, beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), naive = TRUE, save_warmup = FALSE, pars = c("sigma_y", "sigma_b"), control = list(adapt_delta = 0.999, max_treedepth = 12)) set.seed(1) kalman_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, chains = 2, cores = 2, iter = 1e4, beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), naive = FALSE, save_warmup = FALSE, pars = c("sigma_y", "sigma_b")) save(naive_fit, kalman_fit, file = "vignette_results.rds") ``` (In order to keep CRAN checks fast, the results here are precomputed.) ```{r, echo = FALSE} load("vignette_results.rds") ``` With naive implementation we get some warnings and much higher computation time: ```{r warnings-and-time} check_hmc_diagnostics(naive_fit$stanfit) check_hmc_diagnostics(kalman_fit$stanfit) get_elapsed_time(naive_fit$stanfit) get_elapsed_time(kalman_fit$stanfit) ``` Naive implementation produces also much smaller effective sample sizes: ```{r main-results} print(naive_fit$stanfit, pars = c("sigma_y", "sigma_b")) print(kalman_fit$stanfit, pars = c("sigma_y", "sigma_b")) ``` ## Discussion In this vignette we illustrated the benefits of marginalisation in the context of time-varying regression models. The underlying idea is not new; this approach is typical in classic Metropolis-type algorithms for linear-Gaussian state space models where the marginal likelihood $p(y | \theta)$ (where $\theta$ denotes the hyperparameters i.e. not the latents states such as $\beta$'s in current context) is used in the computation of the acceptance probability. Here we rely on readily available Hamiltonian Monte Carlo based `Stan` software, thus allowing us to enjoy the benefits of diverse tools of the `Stan` community. The original motivation behind the `walker` was to test the efficiency of importance sampling type weighting method of @vihola in a dynamic generalized linear model setting within the HMC context. Although some of our other preliminary results have suggested that naive combination of the HMC with IS weighting can provide rather limited computational gains, in case of GLMs with time-varying coefficients so called global approximation technique [@vihola] provides fast and robust alternative to standard HMC approach of `Stan`. ## Acknowledgements This work has been supported by the Academy of Finland research grants 284513, 312605, 311877, and 331817. ## References
/scratch/gouwar.j/cran-all/cranData/walker/inst/doc/walker.Rmd
--- title: "Efficient Bayesian generalized linear models with time-varying coefficients" author: "Jouni Helske" date: "13 October 2022" output: html_document: default bibliography: walker.bib link-citations: true vignette: | %\VignetteIndexEntry{Efficient Bayesian generalized linear models with time-varying coefficients} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(walker) ``` ## Introduction This is complementary vignette to the paper @walkerpaper with more detailed examples and a short comparison with naive `Stan` implementation for regression model with time-varying coefficients. The varying coefficient regression models are extension to basic linear regression models where instead of constant but unknown regression coefficients, the underlying coefficients are assumed to vary over time according to random walk. In their simplest form these models can be used to model regression models with additional time series component, and they also allow robust modelling of phenomenas where the effect size of the predictor variables can vary during the period of the study, for example due to interactions with unobserved counfounders. The `R` [@R] package `walker` provides an efficient method for fully Bayesian inference of such models, where the main computations are performed using the Markov chain Monte Carlo (MCMC) algorithms provided by `Stan` [@stan, @rstan]. This also allows the use of many general diagnostic and graphical tools provided by several `Stan` related `R` packages such as `ShinyStan` [@shinystan]. the linear model with time-varying coefficients is defined as A linear model with time-varying coefficients defined as $$ \begin{aligned} y_t &= x'_t \beta_t + \epsilon_t, \quad t = 1,\ldots, n\\ \beta_{t+1} &= \beta_t + \eta_t, \end{aligned} $$ where $y_t$ is the observation at time $t$, $x_t$ contains the corresponding predictor variables, $\beta_t$ is a $k$ dimensional vector of regression coefficients at time $t$, $\epsilon_t \sim N(0, \sigma^2_{\epsilon})$, and $\eta_t \sim N(0, D)$, with $D$ being $k \times k$ diagonal matrix with diagonal elements $\sigma^2_{i,\eta}$, $i=1,\ldots,k$. Denote the unknown parameters of the model by $\beta = (\beta'_1, \ldots, \beta'_n)'$ and $\sigma = (\sigma_{\epsilon}, \sigma_{1, \eta}, \ldots, \sigma_{k, \eta})$. We define priors for first $\beta_1$ as $N(\mu_{\beta_1}, \sigma_{\beta_1})$, and for $\sigma_i \sim N(\mu_{\sigma_i}, \sigma_{\sigma_i})$, $i=1,\ldots,k+1$, truncated to positive values. Although in principle writing this model as above in `Stan` is straightforward, standard implementation can lead to some computational problems (see the illustration in next Section). An alternative solution used by `walker` is based on the marginalization of the regression coefficients $\beta$ during the MCMC sampling by using the Kalman filter. This provides fast and accurate inference of marginal posterior $p(\sigma | y)$. Then, the corresponding joint posterior $p(\sigma, \beta | y) = p(\beta | \sigma, y)p(\sigma | y)$ can be obtained by simulating the regression coefficients given marginal posterior of standard deviations. This sampling can be performed for example by simulation smoothing algorithm by @durbin. Note that we have opted to sample the $\beta$ parameters given $\sigma$'s, but it is also possible to obtain somewhat more accurate summary statistics such as mean and variance of these parameters by using the standard Kalman smoother for computation of $\textrm{E}(\beta| \sigma, y)$ and $\textrm{Var}(\beta| \sigma, y)$, and using the law of total expectation. In this vignette we first introduce the basic use using simple linear regression model with first order random walk, and then discuss extensions to second order random walk, as well as how to deal with non-Gaussian data. ## Illustration Let us consider a observations $y$ of length $n=100$, generated by random walk (i.e. time varying intercept) and two predictors. This is rather small problem, but it was chosen in order to make possible comparisons with the "naive" `Stan` implementation. For larger problems (in terms of number of observations and especially number of predictors) it is very difficult to get naive implementation to work properly, as even after tweaking several parameters of the underlying MCMC sampler, one typically ends up with divergent transitions or low BMFI index, meaning that the results are not to be trusted. First we simulate the coefficients and the predictors: ```{r example} set.seed(1) n <- 100 beta1 <- cumsum(c(0.5, rnorm(n - 1, 0, sd = 0.05))) beta2 <- cumsum(c(-1, rnorm(n - 1, 0, sd = 0.15))) x1 <- rnorm(n, mean = 2) x2 <- cos(1:n) rw <- cumsum(rnorm(n, 0, 0.5)) ts.plot(cbind(rw, beta1 * x1, beta2 * x2), col = 1:3) ``` ```{r observations} signal <- rw + beta1 * x1 + beta2 * x2 y <- rnorm(n, signal, 0.5) ts.plot(cbind(signal, y), col = 1:2) ``` Then we can call function `walker`. The model is defined as a formula like in `lm`, and we can give several arguments which are passed to `sampling` method of `rstan`, such as number of iteration `iter` and number of chains `chains` (default values for these are 2000 and 4). In addition to these, we use arguments `beta` and `sigma`, which define the Gaussian and Gamma prior distributions for $\beta_1$ and $\sigma$ respectively. These arguments should be vectors of length two defining the parameters of the prior distributions: mean and sd for the coefficients, and shape and rate for standard deviation parameters). ```{r walker} set.seed(1) fit <- walker(y ~ -1 + rw1(~ x1 + x2, beta = c(0, 10), sigma = c(2, 10)), refresh = 0, chains = 1, sigma_y = c(2, 1)) ``` We sometimes get a few (typically one) warning message about numerical problems, as the sampling algorithm warms up, but this is nothing to be concerned with (if more errors occur, then a Github issue for `walker` package is more than welcome). The output of `walker` is `walker_fit` object, which is essentially a list with `stanfit` from `Stan`'s `sampling` function, and the original observations `y` and the covariate matrix `xreg`. Thus we can use all the available tools for postprocessing `stanfit` objects: ```{r pars} print(fit$stanfit, pars = c("sigma_y", "sigma_rw1")) library(bayesplot) mcmc_areas(as.matrix(fit$stanfit), regex_pars = c("sigma_y", "sigma_rw1")) ``` Let's check how well our estimates of $\beta$ coincide with the true values (the solid lines correspond to true values): ```{r plot_with_true_betas} betas <- summary(fit$stanfit, "beta_rw")$summary[, "mean"] ts.plot(cbind(rw, beta1, beta2, matrix(betas, ncol = 3)), col = rep(1:3, 2), lty = rep(1:2, each = 3)) ``` There is also simpler (and prettier) `ggplot2` based plotting function for coefficients: ```{r plot_pretty_betas} plot_coefs(fit, scales = "free") + ggplot2::theme_bw() ``` ### Posterior predictive checks The `walker` function also returns samples from the posterior predictive distribution $p(y^{\textrm{rep}} | y) = \int p(y^{\textrm{rep}} | \beta, \sigma, y) p(\beta, \sigma | y) \textrm{d}\beta\textrm{d}\sigma$. This can be used to used for example in assessment of model fit to the data. By comparing the replication series (mean and 95% quantiles in black) and the original observations (in red) we see that very good overlap, which is not that surprising given that we know the correct model: ```{r ppc} pp_check(fit) ``` It is also possible to perform leave-future-out cross validation (LFO-CV) for the `walker` models. While the exact LFO-CV can be computationally demanding, `walker` also supports approximate PSIS-LFO-CV [@lfo] via function `lfo`. ### Counterfactual predictions We can also produce "counterfactual" predictions using our estimated model. For example, we can ask what if `x1` variable was constant 0 from time point 60 onward: ```{r} library(ggplot2) library(dplyr) fitted <- fitted(fit) # estimates given our actual observed data newdata <- data.frame(x1 = c(x1[1:59], rep(0, 41)), x2 = x2) pred_x1_1 <- predict_counterfactual(fit, newdata, type = "mean") cbind(as.data.frame(rbind(fitted, pred_x1_1)), type = rep(c("observed", "counterfactual"), each = n), time = 1:n) %>% ggplot(aes(x = time, y = mean)) + geom_ribbon(aes(ymin = `2.5%`, ymax = `97.5%`, fill = type), alpha = 0.2) + geom_line(aes(colour = type)) + theme_bw() ``` ### Out-of-sample prediction It is also possible to obtain actual forecasts given new covariates $x^{new}$: ```{r prediction} new_data <- data.frame(x1 = rnorm(10, mean = 2), x2 = cos((n + 1):(n + 10))) pred <- predict(fit, new_data) plot_predict(pred) ``` ## Extensions: Smoother effects and non-Gaussian models When modelling regression coefficients as a simple random walk, the posterior estimates of these coefficients can have large short-term variation which might not be realistic in practice. One way of imposing more smoothness for the estimates is to switch from random walk coefficients to integrated second order random walk coefficients: $$ \beta_{t+1} = \beta_t + \nu_t,\\ \nu_{t+1} = \nu_t + \eta_t. $$ This is essentially local linear trend model [@harvey] with restriction that there is no noise on the $\beta$ level. This model can be estimated by switching `rw1` function inside of the walker formula to `rw2`, with almost identical interface, but now $\sigma$ correspond to the standard deviations of the slope terms $\nu$. The Gaussian prior for $\nu_1$ most also be defined. Using RW2 model, the coefficient estimates of our example model are clearly smoother: ```{r walker_rw2} fit_rw2 <-walker(y ~ -1 + rw2(~ x1 + x2, beta = c(0, 10), sigma = c(2, 0.001), nu = c(0, 10)), refresh = 0, init = 0, chains = 1, sigma_y = c(2, 0.001)) plot_coefs(fit_rw2, scales = "free") + ggplot2::theme_bw() ``` So far we have focused on simple Gaussian linear regression. The above treatment cannot be directly extended to non-Gaussian models such as Poisson regression, as the marginal log-likelihood is intractable. However, it is possible to use relatively accurate Gaussian approximations, and the resulting approximate posterior can then be weighted using importance sampling type correction [@vihola], leading to asymptotically exact inference (if necessary). Function `walker_glm` extends the the package to handle Poisson and binomial observations using the aforementioned methodology. Further extension to negative binomial distribution is planned in future. ## Comparison with naive approach We now compare the efficiency of the "naive" implementation and the state space approach. For this, we use function `walker_rw1`, which supports only basic random walk models (here priors for standard deviations are defined as truncated normal distributions). We can perform the same analysis with naive implementation by setting the argument `naive` to `TRUE`. Note that in this case we need to adjust the default sampling parameters (argument `control`) to avoid (most of the) problems in sampling. ```{r naive, eval = FALSE} set.seed(1) # set seed to simulate same initial values for both methods naive_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, chains = 2, cores = 2, iter = 1e4, beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), naive = TRUE, control = list(adapt_delta = 0.999, max_treedepth = 12)) set.seed(1) kalman_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, chains = 2, cores = 2, iter = 1e4, beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), naive = FALSE) ``` ```{r naive-run, eval = FALSE, echo = FALSE} # actual code run, remove betas in order to reduce size of the package set.seed(1) naive_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, chains = 2, cores = 2, iter = 1e4, beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), naive = TRUE, save_warmup = FALSE, pars = c("sigma_y", "sigma_b"), control = list(adapt_delta = 0.999, max_treedepth = 12)) set.seed(1) kalman_fit <- walker_rw1(y ~ x1 + x2, refresh = 0, chains = 2, cores = 2, iter = 1e4, beta = cbind(0, rep(5, 3)), sigma = cbind(0, rep(2, 4)), naive = FALSE, save_warmup = FALSE, pars = c("sigma_y", "sigma_b")) save(naive_fit, kalman_fit, file = "vignette_results.rds") ``` (In order to keep CRAN checks fast, the results here are precomputed.) ```{r, echo = FALSE} load("vignette_results.rds") ``` With naive implementation we get some warnings and much higher computation time: ```{r warnings-and-time} check_hmc_diagnostics(naive_fit$stanfit) check_hmc_diagnostics(kalman_fit$stanfit) get_elapsed_time(naive_fit$stanfit) get_elapsed_time(kalman_fit$stanfit) ``` Naive implementation produces also much smaller effective sample sizes: ```{r main-results} print(naive_fit$stanfit, pars = c("sigma_y", "sigma_b")) print(kalman_fit$stanfit, pars = c("sigma_y", "sigma_b")) ``` ## Discussion In this vignette we illustrated the benefits of marginalisation in the context of time-varying regression models. The underlying idea is not new; this approach is typical in classic Metropolis-type algorithms for linear-Gaussian state space models where the marginal likelihood $p(y | \theta)$ (where $\theta$ denotes the hyperparameters i.e. not the latents states such as $\beta$'s in current context) is used in the computation of the acceptance probability. Here we rely on readily available Hamiltonian Monte Carlo based `Stan` software, thus allowing us to enjoy the benefits of diverse tools of the `Stan` community. The original motivation behind the `walker` was to test the efficiency of importance sampling type weighting method of @vihola in a dynamic generalized linear model setting within the HMC context. Although some of our other preliminary results have suggested that naive combination of the HMC with IS weighting can provide rather limited computational gains, in case of GLMs with time-varying coefficients so called global approximation technique [@vihola] provides fast and robust alternative to standard HMC approach of `Stan`. ## Acknowledgements This work has been supported by the Academy of Finland research grants 284513, 312605, 311877, and 331817. ## References
/scratch/gouwar.j/cran-all/cranData/walker/vignettes/walker.Rmd
# HTTP Response Status Code Description # 200 1 Walk Score successfully returned. # 200 2 Score is being calculated and is not currently available. # 404 30 Invalid latitude/longitude. # 500 series 31 Walk Score API internal error. # 200 40 Your WSAPIKEY is invalid. # 200 41 Your daily API quota has been exceeded. # 403 42 Your IP address has been blocked. #' Get Walk Scores from the Walk Score API #' #' This package provides a tidy interface to the Walk Score API, a proprietary #' API that measures a location's "walkability" using a number between 0 and 100. #' #' The Walk Score API has a free tier which allows 5,000 API calls per day, and #' paid tiers with higher limits. #' #' This function makes it easy to spread your API calls out over a few days. When #' you call the function for the first time, if necessary it creates a new column #' of walks cores and assigns each row `NA`. Then, each row's walk score is populated #' as the function gets a good API response. The function breaks automatically #' upon detecting a rate limit, returning all results collected so far. When your #' rate limit resets and you call the function again, it picks up from the first #' `NA` walk score it finds and continues on. So make sure to save your results #' after each batch, but you don't need to keep track of fine-grained batch issues #' or worry about losing a whole batch if a response errors out--the function #' handles that for you. #' #' You'll need a valid Walk Score API key to use this package. #' #' **Please Note** neither this package nor its author are affiliated with Walk #' Score in any way, nor are any warranties made about this package or any data #' available through the Walk Score API. "Walk Score" is copyrighted and a #' registered trademark of its owner, *again, with whom we are not affiliated*. #' #' API documentation is available here: [https://www.walkscore.com/professional/api.php](https://www.walkscore.com/professional/api.php) #' #' @param df A `tibble` with columns named `lat` and `lon` containing latitude and longitude respectively. #' @param apikey Character. A valid Walk Score API key. #' @param polite_pause Numeric. The number of seconds to pause between API calls. Default is 0.2. #' @param verbose Boolean. Should we print lots of info to the console? #' #' @return The input `tibble` with new columns containing Walk Score API responses. #' @examples \dontrun{ #' df <- data.frame(lat = 45.378791, lon = -75.662508) #' df <- walkscore::walkscore(df, apikey = "your api key") #' } #' @export walkscore <- function(df, apikey, polite_pause = 0.2, verbose = FALSE){ if (apikey =="") stop ("API key is required.") # if we don't have any walkscores yet, add a column of NA values. # we'll use NA values to figure out which rows we need to process if (!"walkscore" %in% colnames(df)) df$walkscore <- NA_real_ # get the indices of the items without walkscores df_na_indices <- which(is.na(df$walkscore)) # loop through each row for (i in df_na_indices) { if (verbose) message("Row ", i) # if this row has a valid walkscore skip it if (!is.na(df[i,]$walkscore)) { if (verbose) message(" Skipping! We already have a walkscore for this row.") next } if (verbose) message(" Trying to get walk score...") api_result <- try({ url <- sprintf("https://api.walkscore.com/score?format=json&lat=%s&lon=%s&transit=1&bike=1&wsapikey=%s&address=%s", df[i,]$lat, df[i,]$lon, apikey, "") api_response <- httr::GET(url) # get http status: did api call work at all? http_status <- api_response$status_code if (http_status == 200){ if (verbose) message(" Success! HTTP 200 response..") api_response_content <- httr::content(api_response) # set up a default API response object, in case we error on the first try result <- api_response # if we got a good walkscore, format the results if (api_response_content$status == 1){ if (verbose) message(" Success again! A valid walk score...") result <- api_response_content |> unlist() |> dplyr::as_tibble(rownames = "name") |> tidyr::pivot_wider(names_from = "name", values_from = "value") |> dplyr::select(-dplyr::any_of(c("more_info_icon", "more_info_link", "help_link", "logo_url"))) } # end if api_response_content$status == 1 if (api_response_content$status != 1){ if (verbose) message(" Failure! Didn't get a valid walk score...") class(result) <- c(class(result), "error") # handle other conditions where we get good HTTP response but another kind of error if (api_response_content$status == "40") class(result) <- c(class(result), "keyinvalid", "break") if (api_response_content$status == "41") class(result) <- c(class(result), "ratelimit", "break") if (api_response_content$status == "42") class(result) <- c(class(result), "ipblocked", "break") } # end if api_response_content$status != 1 } # end if http_status == 200 if (http_status != 200) { if (verbose) message(" Failure! An invalid HTTP response..") # we got some other kind of http error result <- api_response class(result) <- c(class(result), "error", "httperror", "break") } # end if http_status != 200 result }) # if we didn't get an error, add the data if (!"error" %in% class(api_result)){ # add all the new info to the row in question # using base R so that it will create columns if they're not there yet # basic type checking # we set applicable columns to numeric at the end, before returning final results for (colname in colnames(result)) { # try to get the class of this input data column so we can type match # we get an error if the column name is not already in the dataframe-- # i.e. if it's new data class_of_old <- try(class(df[i,colname][[1]]), silent = TRUE) if ("try-error" %in% class(class_of_old)){ df[i,colname] <- result[,colname] } else if ("numeric" %in% class_of_old ) { df[i,colname] <- as.numeric(result[,colname]) } else if ("character" %in% class_of_old) { df[i,colname] <- as.character(result[,colname] ) } else if ("POSIXct" %in% class_of_old) { df[i,colname] <- as.POSIXct( result[,colname][[1]]) } # end if for response type } # end for (colname in colnames(result)) } # end if (!"error" in class(api_result)) # if we did get an error, deal with that here if ("error" %in% class(api_result)){ warning(sprintf("Bad API response processing row %s.", i)) # if we got an error so bad we need to break, then break # e.g. rate limit, invalid API key, ip blocked if ("break" %in% class(api_result)) { # default details gives all of the API key info. warning_details <- { step1 <- unlist(api_result) step2 <- paste0(names(step1),": ", step1) step3 <- paste0(step2, collapse="\n") paste0("**********************\nAPI Reponse Details:\n", step3) } # add helpful info if we got a helpful status code if ("ratelimit" %in% class(api_result)) warning_details <- sprintf("Rate limit reached. No more API calls possible until rate limit resets.\n%s", warning_details) if ("keyinvalid" %in% class(api_result)) warning_details <- sprintf("Invalid API key. Have you signed up for a Walkscore API key?\n%s", warning_details) if ("ipblocked" %in% class(api_result)) warning_details <- sprintf("IP address blocked.\n%s", warning_details) warning(warning_details) break } } # do a polite pause Sys.sleep(polite_pause) } # end for (i in 1:nrow(df)) # set any applicable columns to numeric df$walkscore <- as.numeric(df$walkscore) if ("bike.score" %in% colnames(df)) df$bike.score <- as.numeric(df$bike.score) if ("snapped_lon" %in% colnames(df)) df$snapped_lon <- as.numeric(df$snapped_lon) if ("snapped_lat" %in% colnames(df)) df$snapped_lat <- as.numeric(df$snapped_lat) return(df) }
/scratch/gouwar.j/cran-all/cranData/walkscore/R/walkscore.R
checkTSsupport <- function(city,state,key){ ci <- tolower(city) st <- tolower(state) URL <- paste("http://transit.walkscore.com/transit/supported/cities/?wsapikey=",key,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = "\n", quiet = TRUE)) ciline <- 0 ciline <- c(grep(ci,X),ciline) if (ciline[1] > 0){ stline <- 0 stline <- c(grep(st,X[ciline[1]+1]),stline) if (stline[1] > 0){ return(TRUE) } else { return(FALSE) } } else { return(FALSE) } }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/checkTSsupport.R
geoloc <- function(address,apikey){ place <- address place <- gsub(" ","+",place) URL <- paste("http://maps.google.com/maps/geo?q=",place,"&output=json&oe=utf8\ &sensor=true_or_false&key=",apikey,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = "\n", quiet = TRUE)) coord <- grep("\"coordinates\":",X) coord <- strsplit(X[coord],"\"coordinates\"") coord <- coord[[1]][2] coord <- gsub(": [ ","",coord,fixed=TRUE) coord <- strsplit(coord[[1]],",") long <- as.numeric(coord[[1]][1]) lat <- as.numeric(coord[[1]][2]) acc <- grep("Accuracy",X) acc <- strsplit(X[acc]," : ") acc <- gsub(",","",acc[[1]][2]) acc <- as.numeric(acc) loc <- grep("LocalityName", X) loc <- strsplit(X[loc], " : ") loc <- gsub("\"","",loc[[1]][2],fixed=TRUE) loc <- gsub(",","",loc) admin <- grep("AdministrativeAreaName",X) admin <- strsplit(X[admin], " : ") admin <- gsub("\"","",admin[[1]][2],fixed=TRUE) admin <- gsub(",","",admin) country <- grep("CountryName",X) country <- strsplit(X[country], " : ") country <- gsub("\"","",country[[1]][2],fixed=TRUE) country <- gsub(",","",country) object <- list() class(object) <- "GoogleGeoloc" object$coordinates <- c(long,lat) object$accuracy <- acc object$city <- loc object$state <- admin object$country <- country return(object) }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/geoloc.R
getTS <- function(x,y,city,state,key){ city <- gsub(" ", "+",city) URL <- paste("http://transit.walkscore.com/transit/score/?lat=",y,"&lon=",x,"&city=",city,"&state=",state,"&wsapikey=",key,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = "\n", quiet = TRUE)) string <- X[grep("transit_score",X)] string2 <- X[grep("ws_link",X)] string3 <- X[grep("description",X)] string4 <- X[grep("summary",X)] if (length(X) > 0){ tscore <- strsplit(string,": ") tscore <- gsub(",","",tscore[[1]][2]) tscore <- as.numeric(tscore) link <- strsplit(string2,": ") link <- gsub("\"","",link[[1]][2],fixed=TRUE) link <- gsub(", ","",link) desc <- strsplit(string3,": ") desc <- gsub("\"","",desc[[1]][2],fixed=TRUE) desc <- gsub(", ","",desc) sum <- strsplit(string4,": ") sum <- gsub("\"","",sum[[1]][2],fixed=TRUE) sum <- gsub(", ","",sum) } else { print("error, please check supported cities list") } object <- list() class(object) <- "TransitScore" object$transitscore <- tscore object$url <- link object$description <- desc object$summary <- sum return(object) }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/getTS.R
getTScities <- function(key){ URL <- paste("http://transit.walkscore.com/transit/supported/cities/?wsapikey=",key,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = "\n", quiet = TRUE)) citylines <- grep("\"city\":",X) statelines <- citylines + 1 citylist <- X[citylines] citylist <- gsub("\"city\":","",citylist) citylist <- gsub(" ","",citylist) citylist <- gsub("\"","",citylist,fixed=TRUE) citylist <- gsub(",","",citylist,fixed=TRUE) statelist <- X[statelines] statelist <- gsub("\"state\":","",statelist) statelist <- gsub(" ","",statelist) statelist <- gsub("\"","",statelist,fixed=TRUE) statelist <- gsub(",","",statelist,fixed=TRUE) print(paste(citylist,statelist)) }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/getTScities.R
getWS <- function(x,y,key){ URL <- paste("http://api.walkscore.com/score?lat=",y,"&lon=",x,"&wsapikey=",key,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = "\n", quiet = TRUE)) string <- X[grep("<walkscore>",X)] string2 <- X[grep("<description>",X)] string3 <- X[grep("<updated>",X)] string4 <- X[grep("<snapped_lat>",X)] string5 <- X[grep("snapped_lon>",X)] string6 <- X[grep("<status>",X)] st <- strsplit(string6,"<status>") st2 <- gsub("</status>","",st[[1]][2]) status <- as.numeric(st2) if (status == 1){ walk <- strsplit(string,"<walkscore>") walk2 <- gsub("</walkscore>","",walk[[1]][2]) wscore <- as.numeric(walk2) des <- strsplit(string2,"<description>") des2 <- gsub("</description>","",des[[1]][2]) descr <- des2 up <- strsplit(string3,"<updated>") up2 <- gsub("</updated>","",up[[1]][2]) update <- up2 snlat <- strsplit(string4,"<snapped_lat>") snlat2 <- gsub("</snapped_lat>","",snlat[[1]][2]) snla <- as.numeric(snlat2) snlon <- strsplit(string5,"<snapped_lon>") snlon2 <- gsub("</snapped_lon>","",snlon[[1]][2]) snlo <- as.numeric(snlon2) } else { wscore <- "NA" descr <- "NA" update <- "NA" snlo <- "NA" snla <- "NA" } object <- list() class(object) <- "WalkScore" object$status <- status object$walkscore <- wscore object$description <- descr object$updated <- update object$snappedLong <- snlo object$snappedLat <- snla return(object) }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/getWS.R
networkSearch <- function(x,y,key){ URL <- paste("http://transit.walkscore.com/transit/search/network/?lat=",y,"&lon=",x,"&wsapikey=",key,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = "\n", quiet = TRUE)) split <- strsplit(X,"}",fixed=TRUE) routesline <- grep("\"routes\"",split[[1]],fixed=TRUE) stopsline <- grep("\"stops\"",split[[1]],fixed=TRUE) rtext <- split[[1]][routesline:(stopsline-1)] stext <- split[[1]][stopsline:length(split[[1]])] rtext <- gsub("{\"routes\":","",rtext,fixed=TRUE) stext <- gsub("\"stops\":","",stext,fixed=TRUE) routelist <- list() stoplist <- list() if (length(X) > 0){ for (i in 1:(length(rtext)-1)){ text <- strsplit(rtext[i],", ",fixed=TRUE) cat <- text[[1]][grep("category",text[[1]])] cat <- strsplit(cat,": ",fixed=TRUE) cat <- gsub("\"","",cat[[1]][3],fixed=TRUE) age <- text[[1]][grep("\"agency\"",text[[1]],fixed=TRUE)] age <- strsplit(age,": ",fixed=TRUE) age <- gsub("\"","",age[[1]][2],fixed=TRUE) name <- text[[1]][grep("\"name\"",text[[1]],fixed=TRUE)] name <- strsplit(name,": ",fixed=TRUE) name <- gsub("\"","",name[[1]][2],fixed=TRUE) url <- text[[1]][grep("\"agency_url\"",text[[1]],fixed=TRUE)] url <- strsplit(url,": ",fixed=TRUE) url <- gsub("\"","",url[[1]][2],fixed=TRUE) id <- text[[1]][grep("\"id\"",text[[1]],fixed=TRUE)] id <- strsplit(id,": ",fixed=TRUE) id <- gsub("\"","",id[[1]][2],fixed=TRUE) sstart <- grep("\"stop_ids\"",text[[1]]) send <- grep("\"id\"",text[[1]])-1 slist <- c() for (j in sstart:send){ stop <- gsub("\"stop_ids\": [","",text[[1]][j],fixed=TRUE) stop <- gsub("\"","",stop,fixed=TRUE) stop <- gsub("]","",stop,fixed=TRUE) slist <- c(slist,stop) } object <- list() class(object) <- "Route" object$routeID <- id object$routeName <- name object$routeCatagory <- cat object$agency <- age object$agencyURL <- url object$stopList <- slist routelist[[i]] <- object } for (i in 1:(length(stext)-2)){ text <- strsplit(stext[i],", ",fixed=TRUE) lat <- text[[1]][grep("\"lat\"",text[[1]],fixed=TRUE)] lat <- strsplit(lat,": ",fixed=TRUE) lat <- as.numeric(lat[[1]][3]) lon <- text[[1]][grep("\"lon\"",text[[1]],fixed=TRUE)] lon <- strsplit(lon,": ",fixed=TRUE) lon <- as.numeric(lon[[1]][2]) name <- text[[1]][grep("\"name\"",text[[1]],fixed=TRUE)] name <- strsplit(name,": ",fixed=TRUE) name <- gsub("\"","",name[[1]][2],fixed=TRUE) id <- text[[1]][grep("\"id\"",text[[1]],fixed=TRUE)] id <- strsplit(id,": ",fixed=TRUE) id <- gsub("\"","",id[[1]][2],fixed=TRUE) rstart <- grep("\"route_ids\"",text[[1]]) rend <- grep("\"lon\"",text[[1]])-1 rlist <- c() for (j in rstart:rend){ route <- gsub("\"route_ids\": [","",text[[1]][j],fixed=TRUE) route <- gsub("\"","",route,fixed=TRUE) route <- gsub("]","",route,fixed=TRUE) rlist <- c(rlist,route) } object <- list() class(object) <- "Stop" object$stopID <- id object$stopName <- name object$stopLong <- lon object$stopLat <- lat object$routeList <- rlist stoplist[[i]] <- object } } object <- list() class(object) <- "NetworkSearch" object$routelist <- routelist object$stoplist <- stoplist return(object) }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/networkSearch.R
routeDetails <- function(routeid,key){ URL <- paste("http://transit.walkscore.com/transit/route/",routeid,"/?wsapikey=",key,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = "\n", quiet = TRUE)) string <- X[grep("\"name\":",X,fixed=TRUE)] string2 <- X[grep("\"category\":",X,fixed=TRUE)] string3 <- X[grep("\"agency\":",X,fixed=TRUE)] string4 <- X[grep("\"agency_url\":",X,fixed=TRUE)] string5 <- X[grep("\"geometry_wkt\":",X,fixed=TRUE)] sbegin <- grep("\"stop_ids\":",X,fixed=TRUE) send <- grep("],",X,fixed=TRUE) if (length(X) > 0){ name <- strsplit(string,": ") name <- gsub("\"","",name[[1]][2]) name <- gsub(", ","",name) cat <- strsplit(string2,": ") cat <- gsub("\"","",cat[[1]][2], fixed=TRUE) cat <- gsub(", ","",cat) age <- strsplit(string3,": ") age <- gsub("\"","",age[[1]][2],fixed=TRUE) age <- gsub(", ","",age) url <- strsplit(string4,": ") url <- gsub("\"","",url[[1]][2],fixed=TRUE) url <- gsub(", ","",url) geom <- strsplit(string5,": ") geom <- gsub("\"","",geom[[1]][2],fixed=TRUE) geom <- gsub(", ","",geom) geom <- gsub("LINESTRING","",geom) geom <- gsub("(","",geom,fixed=TRUE) geom <- gsub(")","",geom,fixed=TRUE) geom <- strsplit(geom,",") if (length(geom)>1){ geolist <- data.frame() for (i in 1:length(geom[[1]])){ coords <- strsplit(geom[[1]][i]," ") geolist[i,1] <- as.numeric(coords[[1]][1]) geolist[i,2] <- as.numeric(coords[[1]][2]) } names(geolist) <- c("X","Y") } else { geolist <- "Unavailable" } slist <- c() for (i in (sbegin+1):(send-1)){ str <- X[i] str <- gsub(" ","",str) str <- gsub("\"","",str,fixed=TRUE) str <- gsub(",","",str) slist <- c(slist,str) } } else { print("Error, invalid route ID") } object <- list() class(object) <- "RouteDetails" object$routeID <- routeid object$routeName <- name object$routeCatagory <- cat object$agengy <- age object$agencyURL <- url object$routeGeometry <- geolist object$stopList <- slist return(object) }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/routeDetails.R
stopDetails <- function(stopid,key){ URL <- paste("http://transit.walkscore.com/transit/stop/",stopid,"/?wsapikey=",key,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = "\n", quiet = TRUE)) string <- X[grep("\"name\":",X,fixed=TRUE)] string2 <- X[grep("\"lon\":",X,fixed=TRUE)] string3 <- X[grep("\"lat\":",X,fixed=TRUE)] rbegin <- grep("\"route_ids\":",X,fixed=TRUE) rend <- grep("],",X,fixed=TRUE) if (length(X) > 0){ name <- strsplit(string,": ") name <- gsub("\"","",name[[1]][2]) lon <- strsplit(string2,": ") lon <- gsub(",","",lon[[1]][2]) lon <- as.numeric(lon) lat <- strsplit(string3,": ") lat <- gsub(",","",lat[[1]][2]) lat <- as.numeric(lat) rlist <- c() for (i in (rbegin+1):(rend-1)){ str <- X[i] str <- gsub(" ","",str) str <- gsub("\"","",str,fixed=TRUE) str <- gsub(",","",str) rlist <- c(rlist,str) } } else { print("Error, invalid stop ID") } object <- list() class(object) <- "StopDetails" object$stopID <- stopid object$stopName <- name object$stopLong <- lon object$stopLat <- lat object$routelist <- rlist return(object) }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/stopDetails.R
stopSearch <- function(x,y,key){ URL <- paste("http://transit.walkscore.com/transit/search/stops/?lat=",y,"&lon=",x,"&wsapikey=",key,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = "\n", quiet = TRUE)) split <- strsplit(X,"{",fixed=TRUE) stoplines <- grep("distance",split[[1]]) stoplines <- c(stoplines,length(split[[1]])+1) if (length(X) > 0){ slist <- list() for (i in 1:(length(stoplines)-1)){ text <- split[[1]][stoplines[i]:(stoplines[(i+1)]-1)] info <- text[grep("distance",text)] info <- strsplit(info, ",", fixed = TRUE) dist <- info[[1]][grep("distance",info[[1]])] dist <- strsplit(dist,": ") dist <- as.numeric(dist[[1]][2]) sname <- info[[1]][grep("name",info[[1]])] sname <- strsplit(sname,": ",fixed=TRUE) sname <- gsub("\"","",sname[[1]][2],fixed=TRUE) lastline <- text[length(text)] lastline <- strsplit(lastline,", ", fixed = TRUE) lon <- lastline[[1]][grep("\"lon",lastline[[1]],fixed=TRUE)] lon <- strsplit(lon,": ",fixed=TRUE) lon <- as.numeric(lon[[1]][2]) lat <- lastline[[1]][grep("\"lat",lastline[[1]],fixed=TRUE)] lat <- strsplit(lat,": ",fixed=TRUE) lat <- as.numeric(lat[[1]][2]) sid <- lastline[[1]][length(lastline[[1]])] sid <- strsplit(sid,": ",fixed=TRUE) sid <- gsub("\"","",sid[[1]][2],fixed=TRUE) sid <- gsub("}","",sid,fixed=TRUE) sid <- gsub("]","",sid,fixed=TRUE) rlist <- list() r <- grep("category",text) for (j in 1:length(r)){ rtext <- strsplit(text[r[j]],",") cat <- rtext[[1]][grep("category",rtext[[1]])] cat <- strsplit(cat,": ",fixed=TRUE) cat <- gsub("\"","",cat[[1]][2],fixed=TRUE) id <- rtext[[1]][grep("\"id\"",rtext[[1]],fixed=TRUE)] id <- strsplit(id,": ",fixed=TRUE) id <- gsub("\"","",id[[1]][2],fixed=TRUE) rname <- rtext[[1]][grep("\"name\"",rtext[[1]],fixed=TRUE)] rname <- strsplit(rname,": ",fixed=TRUE) rname <- gsub("\"","",rname[[1]][2],fixed=TRUE) rname <- gsub("}","",rname,fixed=TRUE) rname <- gsub("]","",rname,fixed=TRUE) age <- rtext[[1]][grep("\"agency\"",rtext[[1]],fixed=TRUE)] age <- strsplit(age,": ",fixed=TRUE) age <- gsub("\"","",age[[1]][2],fixed=TRUE) url <- rtext[[1]][grep("\"agency_url\"",rtext[[1]],fixed=TRUE)] url <- strsplit(url,": ",fixed=TRUE) url <- gsub("\"","",url[[1]][2],fixed=TRUE) rlist <- list() class(rlist) <- "RouteDetails" rlist$id <- id rlist$routeName <- rname rlist$routeCatagory <- cat rlist$routeAgency <- age rlist$routeURL <- url } object <- list() class(object) <- "Stop2" object$stopName <- sname object$stopID <- id object$stopDistance <- dist object$stopLong <- lon object$stopLat <- lat object$routeDetails <- rlist slist[[i]] <- object } } return(slist) }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/stopSearch.R
walkshed <- function(x,y,key){ URL <- paste("http://api.walkscore.com/walk_shed?lat=",y,"&lon=",x,"&wsapikey=",key,sep="") X <- character(0) X <- c(X, scan(file = URL, what = "", sep = " ", quiet = TRUE)) status <- X[grep("{status:",X,fixed=TRUE) + 1] status <- as.numeric(gsub(",","",status,fixed=TRUE)) lat <- X[grep("{lat",X,fixed=TRUE) + 1] lat <- as.numeric(gsub(",","",lat)) lon <- X[grep("{lat",X,fixed=TRUE) + 3] lon <- as.numeric(gsub("},","",lon)) geo <- X[grep("{type:",X,fixed=TRUE) + 1] geo <- gsub(",","",geo) cstart <- grep("coordinates:",X,fixed=TRUE) + 1 cend <- grep("radius:",X,fixed=TRUE) - 1 lonlist <- c() latlist <- c() count <- 0 for (i in cstart:cend){ count <- count + 1 if (count %% 2 == 1){ n <- X[i] n <- gsub("[","",n,fixed=TRUE) n <- gsub(",","",n,fixed=TRUE) n <- as.numeric(n) lonlist <- c(lonlist,n) } if (count %% 2 == 0){ n <- X[i] n <- gsub("]","",n,fixed=TRUE) n <- gsub(",","",n,fixed=TRUE) n <- gsub("}","",n,fixed=TRUE) n <- as.numeric(n) latlist <- c(latlist,n) } } coords <- data.frame(lonlist,latlist) rad <- X[grep("radius:",X,fixed=TRUE) + 1] rad <- as.numeric(gsub("},","",rad,fixed=TRUE)) slat <- X[grep("snapped_lat:",X,fixed=TRUE) + 1] slat <- as.numeric(gsub(",","",slat,fixed=TRUE)) slon <- X[grep("snapped_lon:",X,fixed=TRUE) + 1] slon <- as.numeric(gsub("}","",slon,fixed=TRUE)) object <- list() class(object) <- "Walkshed" object$status <- status object$origin <- c(lon,lat) object$geometry <- geo object$coordinates <- coords object$radius <- rad object$snappedLong <- slon object$snappedLat <- slat return(object) }
/scratch/gouwar.j/cran-all/cranData/walkscoreAPI/R/walkshed.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # custom_modules.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' Register a Wallace module #' #' Before running the Wallace application with \code{run_wallace()}, you can #' register your own modules to be used in Wallace. #' #' @param config_file The path to a YAML file that contains the information about #' one or more modules. #' @seealso \code{\link[wallace]{create_module}} #' @export register_module <- function(config_file) { full_path <- NULL tryCatch({ full_path <- normalizePath(path = config_file, mustWork = TRUE) }, error = function(e) {}) if (is.null(full_path)) { stop("Cannot find the given file: ", config_file, call. = FALSE) } if (tools::file_ext(full_path) != "yml") { stop("The provided file is not a YAML file: ", config_file, call. = FALSE) } new_paths <- unique(c(getOption("wallace_module_configs"), full_path)) options("wallace_module_configs" = new_paths) } #' Create a Wallace module #' #' Create the template of a new Wallace module. #' #' @param id The id of the module. #' @param dir A directory where the new module should be created. #' @param map Whether or not the module should support modifying the map. #' @param result Whether or not the module should support showing information in #' the Result tab. #' @param rmd Whether or not the module should add Rmd code to the Session Code #' download. #' @param save Whether or not the module has some custom data to save when the #' user saves the current session. #' @seealso \code{\link[wallace]{register_module}} #' @export create_module <- function(id, dir, map = FALSE, result = FALSE, rmd = FALSE, save = FALSE) { if (!grepl("^[A-Za-z0-9_]+$", id)) { stop("The id can only contain English characters, digits, and underscores", call. = FALSE) } # Copy the simple skeleton files to the new module directory dir.create(dir, showWarnings = FALSE, recursive = TRUE) file.copy(system.file("module_skeleton", "skeleton.yml", package = "wallace"), file.path(dir, glue::glue("{id}.yml")), overwrite = TRUE) file.copy(system.file("module_skeleton", "skeleton.md", package = "wallace"), file.path(dir, glue::glue("{id}.md")), overwrite = TRUE) if (rmd) { file.copy(system.file("module_skeleton", "skeleton.Rmd", package = "wallace"), file.path(dir, glue::glue("{id}.Rmd")), overwrite = TRUE) } # Copy the R code file, use the correct ID in all functions, and remove any # functions that the user doesn't want to use in this module r_file <- readLines(system.file("module_skeleton", "skeleton.R", package = "wallace")) r_file <- paste(r_file, collapse = "\n") if (!map) { r_file <- gsub("\n\\{\\{id}}_module_map <- function.*?}\n", "", r_file) } if (!result) { r_file <- gsub("\n\\{\\{id}}_module_result <- function.*?}\n", "", r_file) r_file <- gsub("\n *output\\$.*?})\n", "", r_file) } if (!rmd) { r_file <- gsub("\n\\{\\{id}}_module_rmd <- function.*?)\n}", "", r_file) } if (!save) { r_file <- gsub("\n *return\\(list\\(.*?))\n", "", r_file) } r_file <- gsub("\\{\\{id}}", id, r_file) writeLines(r_file, file.path(dir, glue::glue("{id}.R"))) message(glue::glue("Template for module `{id}` successfully created at ", "`{normalizePath(dir)}`.\nDon't forget to call ", "`wallace::register_module(\"{dir}/{id}.yml\")` before running ", "the app to add your module to Wallace.")) invisible() }
/scratch/gouwar.j/cran-all/cranData/wallace/R/custom_modules.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # envs_ecoClimate.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title envs_ecoClimate Obtain ecoClimate variables #' @description download ecoClimate variables. See www.ecoclimate.org. #' #' @details This function is called by the module envs to download ecoClimate #' variables from www.ecoclimate.org. The variables to be downloaded are #' selected by the user with bcSel and the resolution is fixed to 0.5 degrees. #' This function currently gets variables from Dropbox and the process takes #' significantly more time than for other datasets. It returns a rasterStack #' of selected variables. #' #' @param bcAOGCM Name of the Atmospheric and Oceanic Global Circulation Model. #' Options are: "CCSM", "CNRM", "MIROC", "FGOALS", "GISS", "IPSL","MRI", "MPI" #' @param bcScenario Select the temporal scenario that you want to download. #' Options are: "LGM" (21,000 years ago), "Holo" (6,000 years ago), #' "Present", "Future 2.6" (rcp 2.6), "Future 4.5" (rcp 4.5), #' "Future 6" (rcp 6), "Future 8.5" (rcp 8.5) #' @param ecoClimSel Numeric vector with list of variables to select. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL. #' #' @examples #' bcAOGCM <- "CCSM" #' bcScenario <- "LGM" #' ecoClimSel <- c(1,2,3) #' \dontrun{ #' varsEcoClimate <- envs_ecoClimate(bcAOGCM, bcScenario, ecoClimSel) #' } #' #' @return A rasterStack of selected variables #' #' @author Sara Varela <sara_varela@@yahoo.com> #' @author Jamie M. Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' #' @export #' envs_ecoClimate <- function(bcAOGCM, bcScenario, ecoClimSel, logger = NULL) { smartProgress(logger, message = "Retrieving ecoClimate data...", { ecoClimatelayers <- ecoClimate_getdata(AOGCM = bcAOGCM, Baseline = "Modern", Scenario = bcScenario, logger) }) if (is.null(ecoClimatelayers)) return() ecoClimatelayers <- ecoClimate_select(ecoClimatelayers, Sels = ecoClimSel) # Changing rasters names names(ecoClimatelayers) <- gsub("\\.", "", names(ecoClimatelayers)) i <- grep('bio[0-9]$', names(ecoClimatelayers)) editNames <- paste('bio', sapply(strsplit(names(ecoClimatelayers)[i], 'bio'), function(x) x[2]), sep = '0') names(ecoClimatelayers)[i] <- editNames # Define WGS84 raster::crs(ecoClimatelayers) <- raster::crs("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") logger %>% writeLog("Environmental predictors: ecoClimate bioclimatic variables ", paste(names(ecoClimatelayers), collapse = ", "), " at 0.5 degree resolution. Global Circulation Model = ", bcAOGCM, ", Scenario = ", bcScenario, ". ") return(ecoClimatelayers) } ###Auxiliary functions #' @title ecoClimate_getdata #' #' @description download ecoClimate layers. more info at www.ecoclimate.org #' #' @usage ecoClimate_getdata(AOGCM, Baseline, Scenario, logger) #' #' @param AOGCM Select the AOGCM. #' Options are: "CCSM", "CNRM", "MIROC", "COSMOS", "FGOALS", "GISS", "IPSL", #' "MRI", "MPI" #' @param Baseline Select a baseline for the climatic layers. #' Options are: "Pre-industrial" (piControl-1760), "Historical" (1900-1949), #' "Modern" (1950-1999) #' @param Scenario Select a temporal scenario. #' Options are: "LGM" (21,000 years ago), "Holo" (6,000 years ago), #' "Present", "Future 2.6" (rcp 2.6), "Future 4.5" (rcp 4.5), "Future 6" (rcp 6), #' "Future 8.5" (rcp 8.5) #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in shiny, #' otherwise leave the default NULL #' @export #' #' @examples \dontrun{ #' CCSM_mod_present <- ecoclimate_getdata("CCSM", "Modern", "Present") #' dev.new() #' plot(CCSM_mod_present) #' } #' ecoClimate_getdata <- function (AOGCM, Baseline, Scenario, logger = NULL) { if (!(AOGCM %in% c("CCSM", "CNRM", "MIROC", "COSMOS", "FGOALS", "GISS", "IPSL", "MRI", "MPI"))) { stop(paste0("ecoClimate has no data for AOGCM=", AOGCM, ". Check the spelling.")) } if (!(Baseline %in% c("Pre-industrial", "Historical","Modern"))) { stop(paste0("ecoClimate has no data for Baseline=", Baseline, ". Check the spelling.")) } if (!(Scenario %in% c("LGM", "Holo", "Present", "Future 2.6", "Future 4.5", "Future 6", "Future 8.5"))) { stop(paste0("ecoClimate has no data for Scenario=", Scenario, ". Check the spelling.")) } if (AOGCM == "CCSM" && Baseline == "Modern" && Scenario == "Present") { FinURL <- paste0("https://www.dropbox.com/sh/ntl1ieo3fb5q2g9/AAByugxGAwEvuNxCMsj7gfI2a/bio%20%23%20CCSM_Modern%281950-1999%29.txt?dl=1") } if (AOGCM == "CNRM" && Baseline == "Modern" && Scenario == "Present") { FinURL <- paste0("https://www.dropbox.com/sh/ntl1ieo3fb5q2g9/AAB7ltZRxzYkjv6gZ4QNVWBka/bio%20%23%20CNRM_Modern%281950-1999%29.txt?dl=1") } if (AOGCM == "MIROC" && Baseline == "Modern" && Scenario == "Present") { FinURL <- paste0("https://www.dropbox.com/sh/ntl1ieo3fb5q2g9/AAA4zPbhp-TMN8ohv6plWoFha/bio%20%23%20MIROC_Modern%281950-1999%29.txt?dl=1") } if (AOGCM == "COSMOS" && Baseline == "Modern" && Scenario == "Present") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "FGOALS" && Baseline == "Modern" && Scenario == "Present") { FinURL <- paste0("https://www.dropbox.com/sh/ntl1ieo3fb5q2g9/AACThTqhfBRHpECBKmTJNA4Za/bio%20%23%20FGOALS_Modern%281950-1999%29.txt?dl=1") } if (AOGCM == "GISS" && Baseline == "Modern" && Scenario == "Present") { FinURL <- paste0("https://www.dropbox.com/sh/ntl1ieo3fb5q2g9/AADt6BQzXu1Uk_B9oW6_XnWUa/bio%20%23%20GISS_Modern%281950-1999%29.txt?dl=1") } if (AOGCM == "IPSL" && Baseline == "Modern" && Scenario == "Present") { FinURL <- paste0("https://www.dropbox.com/sh/ntl1ieo3fb5q2g9/AAC0Sv9Ga5BmU5EhrYUxguA6a/bio%20%23%20IPSL_Modern%281950-1999%29.txt?dl=1") } if (AOGCM == "MRI" && Baseline == "Modern" && Scenario == "Present") { FinURL <- paste0("https://www.dropbox.com/sh/ntl1ieo3fb5q2g9/AAAwZpUpIYfD69d4sgThrWB2a/bio%20%23%20MRI_Modern%281950-1999%29.txt?dl=1") } if (AOGCM == "MPI" && Baseline == "Modern" && Scenario == "Present") { FinURL <- paste0("https://www.dropbox.com/sh/ntl1ieo3fb5q2g9/AADlJ8v41rP0Nd65PMCeCIxFa/bio%20%23%20MPI_Modern%281950-1999%29.txt?dl=1") } ## LGM if (AOGCM == "CCSM" && Baseline == "Modern" && Scenario == "LGM") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AADWJOs0_8zQmhc0XJxJE9a2a/bio%20%23baseline_Modern%281950-1999%29%23%20CCSM_LGM%2821ka%29.txt?dl=1") } if (AOGCM == "CNRM" && Baseline == "Modern" && Scenario == "LGM") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AABWiVQptLSgkPkVC4aJCM2Ta/bio%20%23baseline_Modern%281950-1999%29%23%20CNRM_LGM%2821ka%29.txt?dl=1") } if (AOGCM == "MIROC" && Baseline == "Modern" && Scenario == "LGM") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AAB5NqekMCHtD4_6eJWPJs1ja/bio%20%23baseline_Modern%281950-1999%29%23%20MIROC_LGM%2821ka%29.txt?dl=1") } if (AOGCM == "COSMOS" && Baseline == "Modern" && Scenario == "LGM") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "FGOALS" && Baseline == "Modern" && Scenario == "LGM") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AABSZKM3FJKJnatGPW164WK5a/bio%20%23baseline_Modern%281950-1999%29%23%20FGOALS_LGM%2821ka%29.txt?dl=1") } if (AOGCM == "GISS" && Baseline == "Modern" && Scenario == "LGM") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AADiQFLKVFunSXNC-qaNL_voa/bio%20%23baseline_Modern%281950-1999%29%23%20GISS_LGM%2821ka%29.txt?dl=1") } if (AOGCM == "IPSL" && Baseline == "Modern" && Scenario == "LGM") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AABmebhEnLInaR1-d6R6Giara/bio%20%23baseline_Modern%281950-1999%29%23%20IPSL_LGM%2821ka%29.txt?dl=1") } if (AOGCM == "MRI" && Baseline == "Modern" && Scenario == "LGM") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AADUevi4Go4dkm4smZCei6Mqa/bio%20%23baseline_Modern%281950-1999%29%23%20MRI_LGM%2821ka%29.txt?dl=1") } if (AOGCM == "MPI" && Baseline == "Modern" && Scenario == "LGM") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AAB51J8w7P_awhDlif4mmte0a/bio%20%23baseline_Modern%281950-1999%29%23%20MPI_LGM%2821ka%29.txt?dl=1") } ## Holocene if (AOGCM == "CCSM" && Baseline == "Modern" && Scenario == "Holo") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AADh9RT99OC7J5wfJojxiJGQa/bio%20%23baseline_Modern%281950-1999%29%23%20CCSM_mHol%286ka%29.txt?dl=1") } if (AOGCM == "CNRM" && Baseline == "Modern" && Scenario == "Holo") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AABZo3wRHaMImb8xde2yc50xa/bio%20%23baseline_Modern%281950-1999%29%23%20CNRM_mHol%286ka%29.txt?dl=1") } if (AOGCM == "MIROC" && Baseline == "Modern" && Scenario == "Holo") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AACP_KZWd4cceIUXTkEkfMfQa/bio%20%23baseline_Modern%281950-1999%29%23%20MIROC_mHol%286ka%29.txt?dl=1") } if (AOGCM == "COSMOS" && Baseline == "Modern" && Scenario == "Holo") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "FGOALS" && Baseline == "Modern" && Scenario == "Holo") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AAAvdu0QfwI7BF1xNtgUe6y8a/bio%20%23baseline_Modern%281950-1999%29%23%20FGOALS_mHol%286ka%29.txt?dl=1") } if (AOGCM == "GISS" && Baseline == "Modern" && Scenario == "Holo") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "IPSL" && Baseline == "Modern" && Scenario == "Holo") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AAB30ZvQWFycw9h0yuqPolqua/bio%20%23baseline_Modern%281950-1999%29%23%20IPSL_mHol%286ka%29.txt?dl=1") } if (AOGCM == "MRI" && Baseline == "Modern" && Scenario == "Holo") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AACdpC0rERiDFuucY-6zHkW6a/bio%20%23baseline_Modern%281950-1999%29%23%20MRI_mHol%286ka%29.txt?dl=1") } if (AOGCM == "MPI" && Baseline == "Modern" && Scenario == "Holo") { FinURL <- paste0("https://www.dropbox.com/sh/kijh17ehg8v3uv8/AABkgb52UgXyq-4TvVeaRgCMa/bio%20%23baseline_Modern%281950-1999%29%23%20MPI_mHol%286ka%29.txt?dl=1") } ## FUTURE 8.5 if (AOGCM == "CCSM" && Baseline == "Modern" && Scenario == "Future 8.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AABMSJRupXipBfwa33rFAOWIa/bio%20%23baseline_Modern%281950-1999%29%23%20CCSM_rcp85%282080-2100%29.txt?dl=1") } if (AOGCM == "CNRM" && Baseline == "Modern" && Scenario == "Future 8.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAC5kjvYo-tlD1rplQzR5Yvga/bio%20%23baseline_Modern%281950-1999%29%23%20CNRM_rcp85%282080-2100%29.txt?dl=1") } if (AOGCM == "MIROC" && Baseline == "Modern" && Scenario == "Future 8.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAAXxCEacxrks78dEY_ZVHpha/bio%20%23baseline_Modern%281950-1999%29%23%20MIROC_rcp85%282080-2100%29.txt?dl=1") } if (AOGCM == "COSMOS" && Baseline == "Modern" && Scenario == "Future 8.5") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "FGOALS" && Baseline == "Modern" && Scenario == "Future 8.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AABEhuQiHinPOg2xhdduGsCOa/bio%20%23baseline_Modern%281950-1999%29%23%20FGOALS_rcp85%282080-2100%29.txt?dl=1") } if (AOGCM == "GISS" && Baseline == "Modern" && Scenario == "Future 8.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAASGrldKVv6zV_GCDf-T78ka/bio%20%23baseline_Modern%281950-1999%29%23%20GISS_rcp85%282080-2100%29.txt?dl=1") } if (AOGCM == "IPSL" && Baseline == "Modern" && Scenario == "Future 8.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAA-7SOARlsIHE5WalMwFluPa/bio%20%23baseline_Modern%281950-1999%29%23%20IPSL_rcp85%282080-2100%29.txt?dl=1") } if (AOGCM == "MRI" && Baseline == "Modern" && Scenario == "Future 8.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AADQ-geA4e9nzXQSH4SOdq3la/bio%20%23baseline_Modern%281950-1999%29%23%20MRI_rcp85%282080-2100%29.txt?dl=1") } if (AOGCM == "MPI" && Baseline == "Modern" && Scenario == "Future 8.5") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } ## FUTURE 2.6 if (AOGCM == "CCSM" && Baseline == "Modern" && Scenario == "Future 2.6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAD2rXFucDHfwmOW7LUAhF5ia/bio%20%23baseline_Modern%281950-1999%29%23%20CCSM_rcp26%282080-2100%29.txt?dl=1") } if (AOGCM == "CNRM" && Baseline == "Modern" && Scenario == "Future 2.6") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "MIROC" && Baseline == "Modern" && Scenario == "Future 2.6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAA49-6_FwVvRxsQLBborOkha/bio%20%23baseline_Modern%281950-1999%29%23%20MIROC_rcp26%282080-2100%29.txt?dl=1") } if (AOGCM == "COSMOS" && Baseline == "Modern" && Scenario == "Future 2.6") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "FGOALS" && Baseline == "Modern" && Scenario == "Future 2.6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AACGdnVRhJfh1x6lhE2Yq9iaa/bio%20%23baseline_Modern%281950-1999%29%23%20FGOALS_rcp26%282080-2100%29.txt?dl=1") } if (AOGCM == "GISS" && Baseline == "Modern" && Scenario == "Future 2.6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAAC5MgcCaL4E_i9TiffD8Iga/bio%20%23baseline_Modern%281950-1999%29%23%20GISS_rcp26%282080-2100%29.txt?dl=1") } if (AOGCM == "IPSL" && Baseline == "Modern" && Scenario == "Future 2.6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AADQrUtdiTkOm4WVMjMyaWhfa/bio%20%23baseline_Modern%281950-1999%29%23%20IPSL_rcp26%282080-2100%29.txt?dl=1") } if (AOGCM == "MRI" && Baseline == "Modern" && Scenario == "Future 2.6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AADxV4qNkInBdqpNMSASycTCa/bio%20%23baseline_Modern%281950-1999%29%23%20MRI_rcp26%282080-2100%29.txt?dl=1") } if (AOGCM == "MPI" && Baseline == "Modern" && Scenario == "Future 2.6") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } ## FUTURE 4.5 if (AOGCM == "CCSM" && Baseline == "Modern" && Scenario == "Future 4.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AADV5Qf8D7wgWSMuL30I-XQBa/bio%20%23baseline_Modern%281950-1999%29%23%20CCSM_rcp45%282080-2100%29.txt?dl=1") } if (AOGCM == "CNRM" && Baseline == "Modern" && Scenario == "Future 4.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAAIcA1wbD-YXtMxhHMSx07Sa/bio%20%23baseline_Modern%281950-1999%29%23%20CNRM_rcp45%282080-2100%29.txt?dl=1") } if (AOGCM == "MIROC" && Baseline == "Modern" && Scenario == "Future 4.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AADCaiv9XlR32llK2tISaT92a/bio%20%23baseline_Modern%281950-1999%29%23%20MIROC_rcp45%282080-2100%29.txt?dl=1") } if (AOGCM == "COSMOS" && Baseline == "Modern" && Scenario == "Future 4.5") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "FGOALS" && Baseline == "Modern" && Scenario == "Future 4.5") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "GISS" && Baseline == "Modern" && Scenario == "Future 4.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAAiV_w1REiX61Si9FjBCMLxa/bio%20%23baseline_Modern%281950-1999%29%23%20GISS_rcp45%282080-2100%29.txt?dl=1") } if (AOGCM == "IPSL" && Baseline == "Modern" && Scenario == "Future 4.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAAXW0R4mAH2LmZJa8TUiIcVa/bio%20%23baseline_Modern%281950-1999%29%23%20IPSL_rcp45%282080-2100%29.txt?dl=1") } if (AOGCM == "MRI" && Baseline == "Modern" && Scenario == "Future 4.5") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AABET5mP2c9qPladhp6nkcHBa/bio%20%23baseline_Modern%281950-1999%29%23%20MRI_rcp45%282080-2100%29.txt?dl=1") } if (AOGCM == "MPI" && Baseline == "Modern" && Scenario == "Future 4.5") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } ## FUTURE 6 if (AOGCM == "CCSM" && Baseline == "Modern" && Scenario == "Future 6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAAu9NfGSwBSqvQ_sbDrUjtpa/bio%20%23baseline_Modern%281950-1999%29%23%20CCSM_rcp60%282080-2100%29.txt?dl=1") } if (AOGCM == "CNRM" && Baseline == "Modern" && Scenario == "Future 6") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "MIROC" && Baseline == "Modern" && Scenario == "Future 6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AADuGCrBF0brWWtrBjomSQfOa/bio%20%23baseline_Modern%281950-1999%29%23%20MIROC_rcp60%282080-2100%29.txt?dl=1") } if (AOGCM == "COSMOS" && Baseline == "Modern" && Scenario == "Future 4.5") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "FGOALS" && Baseline == "Modern" && Scenario == "Future 6") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } if (AOGCM == "GISS" && Baseline == "Modern" && Scenario == "Future 6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AADo_ux8CBb6fBGxxSrdRrJRa/bio%20%23baseline_Modern%281950-1999%29%23%20GISS_rcp60%282080-2100%29.txt?dl=1") } if (AOGCM == "IPSL" && Baseline == "Modern" && Scenario == "Future 6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AACBY6nVnv4oeCie5G6vHDm7a/bio%20%23baseline_Modern%281950-1999%29%23%20IPSL_rcp60%282080-2100%29.txt?dl=1") } if (AOGCM == "MRI" && Baseline == "Modern" && Scenario == "Future 6") { FinURL <- paste0("https://www.dropbox.com/sh/ei6m84sctoinhi9/AAAfNyo79Z3RpJ-7wqzMjdRZa/bio%20%23baseline_Modern%281950-1999%29%23%20MRI_rcp60%282080-2100%29.txt?dl=1") } if (AOGCM == "MPI" && Baseline == "Modern" && Scenario == "Future 6") { logger %>% writeLog(type = 'error', "ecoClimate has no data for AOGCM = ", AOGCM, ", Baseline = ", Baseline, ", Scenario = ", Scenario) return() } # Download data fn <- paste(tempfile(), '.txt', sep='') fnDw <- utils::download.file(url = FinURL, destfile = fn, method = "auto", quiet = FALSE, mode = "wb", cacheOK = TRUE) if (file.exists(fn) & fnDw == 0) { climate_data <- utils::read.table(fn, TRUE) sp::gridded(climate_data) <- ~ long + lat map_climate<- raster::stack(climate_data)[[-1]] } else { stop('Could not download the ecoClimate file') } return(map_climate) } #' ecoClimate_select #' #' select which bioclimatic variables and set the extent you want (crop the raster stack to your study extent) #' #' @usage ecoClimate_select(map_climate, Sels=c(1:19), extent=c(-180, 180, -90, 90)) #' #' @param map_climate raster stack with all the variables #' @param Sels vector of integer numbers. 1 for bio1, 2 for bio2, etc. e.g. Sels= c(1,12,6) for selecting bio1, bio12 and bio6 #' @param extent vector. xmin, xmax, ymin, ymax. e.g. c() #' @export #' @examples \dontrun{ #' CCSM_mod_present <- ecoclimate_getdata("CCSM", "Modern", "Present") #' Europe_CCSM_m_p_bio1_12 <- ecoClimate_select(CCSM_mod_present, c(1, 12), #' extent = c(-20, 80, 20, 80)) #' dev.new() #' plot(Europe_CCSM_m_p_bio1_12) #' } #' #' ecoClimate_select <- function(map_climate, Sels = c(1:19), extent = c(-180, 180, -90, 90)) { select_var <- map_climate[[Sels]] crop_stack <- raster::crop(select_var, extent) return(crop_stack) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/envs_ecoClimate.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # envs_userEnvs.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' #' @title envs_userEnvs #' @description Load user provided rasters #' #' @details This function is called by the module envs to load user provided raster #' variables for use in further analyses. It returns either a rasterStack or #' rasterBrick of loaded variables with appropriate names for further analyses. #' #' @param rasPath character. Path to rasters, must be the full path including #' file name and extension #' @param rasName character. Vector of raster names to be assigned to #' loaded rasters #' @param doBrick logical. Converts downloaded rasters to brick for faster #' processing #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL # @keywords #' #' @examples #' \dontrun{ #' pathRast <- list.files(system.file("extdata/wc", package = "wallace"), #' pattern = ".tif$", full.names = TRUE) #' nameRast <- list.files(system.file("extdata/wc", package = "wallace"), #' pattern = ".tif$", full.names = FALSE) #' userEnvs <- envs_userEnvs(rasPath = pathRast, rasName = nameRast) #' } #' #' @return A rasterStack or a rasterBrick (if doBrick = TRUE) of user #' provided rasters #' #' @author Jamie Kass <jamie.m.kass@@gmail.com > #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @export #' #' envs_userEnvs <- function(rasPath, rasName, doBrick = FALSE, logger = NULL){ smartProgress(logger, message = "Reading in rasters...", { rasStack <- raster::stack(rasPath) }) # assign names names(rasStack) <- tools::file_path_sans_ext(rasName) if (is.na(raster::crs(rasStack))) { logger %>% writeLog( type = "warning", paste0('Input rasters have undefined coordinate reference system (CRS). ', 'Mapping functionality in components Visualize Model Results and ', 'Transfer Model will not work. If you wish to map rasters in these ', 'components, please define their projections and upload again. ', 'See guidance text in this module for more details.')) } # convert to brick for faster processing if (doBrick == TRUE) { smartProgress(logger, message = "Converting to RasterBrick for faster processing...", { rasStack <- raster::brick(rasStack) }) } return(rasStack) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/envs_userEnvs.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # envs_worldclim.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title envs_worldclim Obtain WorldClim variables #' @description download WorldClim variables. See www.worldclim.com. #' #' @details This function is called by the module envs to download #' WorldClim variables from www.worldclim.com. The variables to be downloaded #' are selected by the user with bcSel and the resolution with bcRes. It #' returns either a rasterStack or rasterBrick of selected variables with #' appropriate names for further analyses. #' #' @param bcRes numeric. Resolution of the climatic layers. Currently #' available resolutions are 0.5, 2.5 and 10. #' @param bcSel character. Vector with bionames to be selected. #' @param mapCntr numeric. Vector with longitude and latitude for a tile. #' Required for bcRes 0.5, for other resolutions world data will be downloaded. #' @param doBrick logical. Converts downloaded rasters to brick for faster #' processing. #' @param logger Stores all notification messages to be displayed in the #' Log Window of Wallace GUI. Insert the logger reactive list here for #' running in shiny, otherwise leave the default NULL. #' #' @examples #' \dontrun{ #' bcRes <- 10 # (10 arcmin) #' envar <- c('bio05', 'bio06', 'bio13', 'bio14') #' arcmin10 <- envs_worldclim(bcRes, bcSel = envar) #' } #' #' @return A rasterStack or a rasterBrick (if doBrick=TRUE) of downloaded #' worldclim rasters at the requested resolution. #' #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' #' @seealso \code{\link[raster]{getData}} #' #' @export envs_worldclim <- function(bcRes, bcSel, mapCntr, doBrick = FALSE, logger = NULL) { if(bcRes == '') { logger %>% writeLog(type = 'error', 'Select a raster resolution.') return() } smartProgress(logger, message = "Retrieving WorldClim data...", { wcbc <- raster::getData(name = "worldclim", var = "bio", res = bcRes, lon = mapCntr[1], lat = mapCntr[2]) # change names if bio01 is bio1, and so forth if (bcRes == 0.5) { names(wcbc) <- gsub("_.*", "", names(wcbc)) } i <- grep('bio[0-9]$', names(wcbc)) editNames <- paste('bio', sapply(strsplit(names(wcbc)[i], 'bio'), function(x) x[2]), sep = '0') names(wcbc)[i] <- editNames wcbc <- wcbc[[bcSel]] }) # convert to brick for faster processing if(doBrick == TRUE) { smartProgress(logger, message = "Converting to RasterBrick for faster processing...", { wcbc <- raster::brick(wcbc) }) } logger %>% writeLog("WorldClim bioclimatic variables ", paste(names(wcbc), collapse = ", "), " at ", bcRes, " arcmin resolution.") return(wcbc) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/envs_worldclim.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # espace_nicheOv.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title espace_nicheOv Niche Overlap #' @description Function evaluates niche overlap between the two species for #' which the occurrence density grid was computed #' #' @details #' The niche overlap quantification is based on the occurrence densities and #' the densities of environmental conditions available in the background extent #' that are estimated in the module Occurrence Density Grid. The function #' computes 4 different things; Schoener's D, unfilling, stability, #' expansion indices (Guisan et al. 2014 TREE), and tests for niche #' equivalency and niche similarity. #' #' @param z1 ecospat niche object for species 1 from espace_occDens. #' @param z2 ecospat niche object for species 2 from espace_occDens. #' @param iter numeric. Number of iterations. #' @param equivalency logical. Whether to run equivalency test. Default is FALSE. #' @param similarity logical. Whether to run similarity test. Default is TRUE. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL. #' @examples #' \dontrun{ #' sp.name1 <- "Bassaricyon_alleni" #' sp.name2 <- "Bassaricyon_neblina" #' envs <- envs_userEnvs(rasPath = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = TRUE), #' rasName = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = FALSE)) #' #' occs.z1 <- read.csv(system.file("extdata/Bassaricyon_alleni.csv", #' package = "wallace")) #' occs.z2 <- read.csv(system.file("extdata/Bassaricyon_neblina.csv", #' package = "wallace")) #' #' bgPts.z1 <- read.csv(system.file("extdata/Bassaricyon_alleni_bgPoints.csv", #' package = "wallace")) #' bgPts.z2 <- read.csv(system.file("extdata/Bassaricyon_neblina_bgPoints.csv", #' package = "wallace")) #' #' occsExt.z1 <- raster::extract(envs, occs.z1[, c("longitude", "latitude")]) #' occsExt.z2 <- raster::extract(envs, occs.z2[, c("longitude", "latitude")]) #' bgExt.z1 <- raster::extract(envs, bgPts.z1[, c("longitude", "latitude")]) #' bgExt.z2 <- raster::extract(envs, bgPts.z2[, c("longitude", "latitude")]) #' pcaZ <- espace_pca(sp.name1, sp.name2, #' occsExt.z1, occsExt.z2, #' bgExt.z1, bgExt.z2) #' occDens <- espace_occDens(sp.name1, sp.name2, pcaZ) #' nicheOv <- espace_nicheOv(z1 = occDens[[sp.name1]], #' z2 = occDens[[sp.name2]], #' iter = 100, equivalency = TRUE, #' similarity = TRUE) #' } #' #' @return A list of 4 elements if all is set to TRUE. Elements are overlap #' (Schoener's D), USE (ecospat.niche.dyn.index), equiv and simil. #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Olivier Broennimann <olivier.broennimann@@unil.ch> #' @seealso \code{\link{espace_pca}} \code{\link{espace_occDens}} #' \code{\link[ecospat]{ecospat.niche.overlap}} #' \code{\link[ecospat]{ecospat.niche.dyn.index}} #' \code{\link[ecospat]{ecospat.niche.equivalency.test}} #' \code{\link[ecospat]{ecospat.niche.similarity.test}} #' @export espace_nicheOv <- function(z1, z2, iter = 100, equivalency = FALSE, similarity = TRUE, logger = NULL) { nicheOv <- list() # Schoener's D nicheOv$overlap <- ecospat::ecospat.niche.overlap(z1, z2, cor = TRUE) # Unfilling, stability, expansion indices (Guisan et al. 2014 TREE) nicheOv$USE <- ecospat::ecospat.niche.dyn.index(z1, z2, intersection = 0)$dynamic.index.w # niche tests if (equivalency == TRUE) { smartProgress(logger, message = "Calculating niche equivalency...", { nicheOv$equiv <- ecospat::ecospat.niche.equivalency.test( z1, z2, rep = 100, overlap.alternative = "higher" ) }) } if (similarity == TRUE) { smartProgress(logger, message = "Calculating niche similarity", { nicheOv$simil <- ecospat::ecospat.niche.similarity.test( z1, z2, rep = 100, overlap.alternative = "higher", rand.type = 1 ) }) } return(nicheOv) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/espace_nicheOv.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # espace_occDens.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title Occurrence density grid #' @description calculates the part of environmental space more densely #' populated by species & the availability of environmental conditions in the #' background #' #' @details #' This function implements a density estimation for each region in the #' environmental space (gridded at 100*100 pixels). Then an occurrence #' density is estimated using a kernel density approach. The density of #' environmental conditions in the background is calculated in the same way. # #' @param sp.name1 character name of species 1 to be analyzed. #' @param sp.name2 character name of species 2 to be analyzed. #' @param pca pca output of pca component ( in list format) #' @param logger stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL. #' @examples #' \dontrun{ #' sp.name1 <- "Bassaricyon_alleni" #' sp.name2 <- "Bassaricyon_neblina" #' envs <- envs_userEnvs(rasPath = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = TRUE), #' rasName = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = FALSE)) #' #' occs.z1 <- read.csv(system.file("extdata/Bassaricyon_alleni.csv", #' package = "wallace")) #' occs.z2 <- read.csv(system.file("extdata/Bassaricyon_neblina.csv", #' package = "wallace")) #' #' bgPts.z1 <- read.csv(system.file("extdata/Bassaricyon_alleni_bgPoints.csv", #' package = "wallace")) #' bgPts.z2 <- read.csv(system.file("extdata/Bassaricyon_neblina_bgPoints.csv", #' package = "wallace")) #' #' occsExt.z1 <- raster::extract(envs, occs.z1[, c("longitude", "latitude")]) #' occsExt.z2 <- raster::extract(envs, occs.z2[, c("longitude", "latitude")]) #' bgExt.z1 <- raster::extract(envs, bgPts.z1[, c("longitude", "latitude")]) #' bgExt.z2 <- raster::extract(envs, bgPts.z2[, c("longitude", "latitude")]) #' pcaZ <- espace_pca(sp.name1, sp.name2, #' occsExt.z1, occsExt.z2, #' bgExt.z1, bgExt.z2) #' occDens <- espace_occDens(sp.name1, sp.name2, pcaZ) #' } #' #' @return Returns a list of 2 lists (one for each species). Each list is an #' ecospat niche object that contains 10 species specific slots with #' information outputed by ecospat::grid.clim.dyn. z.uncor is the density of #' occurrence of the species and z.cor the occupancy of the environment by #' the species. It has the input parameters as individual slots. #' @author Jamie Kass <jamie.m.kass@@gmail.com > #' @author Olivier Broennimann <olivier.broennimann@@unil.ch> #' @seealso \code{\link{espace_pca}} \code{\link{espace_nicheOv}} #' \code{\link[ecospat]{ecospat.grid.clim.dyn}} #' @export espace_occDens <- function(sp.name1, sp.name2, pca, logger = NULL) { bg <- pca$scores$bg sp <- pca$scores$sp scores.bg12 <- pca$scores[bg != 'sp', 1:2] scores.bg1 <- pca$scores[bg == sp.name1, 1:2] scores.occs1 <- pca$scores[sp == sp.name1, 1:2] scores.bg2 <- pca$scores[bg == sp.name2, 1:2] scores.occs2 <- pca$scores[sp == sp.name2, 1:2] # Checking if occs points are outside of PCA bg extent axis.min <- apply(scores.bg12, 2, min, na.rm = TRUE) axis.max <- apply(scores.bg12, 2, max, na.rm = TRUE) occs.check1 <- data.frame(cbind( (scores.occs1[, 1] - axis.min[1]) / abs(axis.max[1] - axis.min[1]), (scores.occs1[, 2] - axis.min[2]) / abs(axis.max[2] - axis.min[2]) )) occs.check2 <- data.frame(cbind( (scores.occs2[, 1] - axis.min[1]) / abs(axis.max[1] - axis.min[1]), (scores.occs2[, 2] - axis.min[2]) / abs(axis.max[2] - axis.min[2]) )) out.occ1 <- which(occs.check1 < 0, arr.ind = TRUE) out.occ2 <- which(occs.check2 < 0, arr.ind = TRUE) if (nrow(out.occ1) > 0) { scores.occs1 <- scores.occs1[-(out.occ1[1:(length(out.occ1)/2)]), ] logger %>% writeLog( type = "warning", hlSpp(sp.name1), "Occurrences outside of the extent of the background points in the ", "PCA space. A number of occurrences (n = ", nrow(out.occ1), ") were ", "removed for continuing with the occurrence density grid analysis." ) } if (nrow(out.occ2) > 0) { scores.occs2 <- scores.occs2[-(out.occ2[1:(length(out.occ2)/2)]), ] logger %>% writeLog( type = "warning", hlSpp(sp.name2), "Occurrences outside of the extent of the background points in the ", "PCA space. A number of occurrences (n = ", nrow(out.occ2), ") were ", "removed for continuing with the occurrence density grid analysis." ) } smartProgress(logger, message = "Running occurrence density grids...", { occDens1 <- ecospat::ecospat.grid.clim.dyn(scores.bg12, scores.bg1, scores.occs1, 100, kernel.method = "ks") # incProgress(1/2) occDens2 <- ecospat::ecospat.grid.clim.dyn(scores.bg12, scores.bg2, scores.occs2, 100, kernel.method = "ks") # incProgress(1/2) }) occDens <- list() occDens[[sp.name1]] <- occDens1 occDens[[sp.name2]] <- occDens2 logger %>% writeLog(hlSpp(paste0(sp.name1, " and ", sp.name2)), "Occurrence density grid.") return(occDens) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/espace_occDens.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # espace_pca.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title espace_pca Principal component analysis #' @description Principal component analysis to reduce dimensionality of #' environmental space #' @details #' This function is called by the component espace to calibrate a PCA for #' 2 species in environmental space. When using within Wallace, #' GUI parameters are obtained from the model object, in particular, table of #' occurrences with environmental values and table of background points with #' environmental values. User must be careful as these tables must contain only #' environmental variables and not the point coordinates as outputted by model #' objects. The PCA is calibrated over the whole set of background points. #' The provided species name(s) are only used for logger messages and not for #' querying or selecting occurrences. #' @param sp.name1 character. Name of species 1 to be analyzed. #' @param sp.name2 character. Name of species 2 to be analyzed. Default is NULL. #' @param occs.z1 table of occurrences with environmental values only for sp1. #' @param occs.z2 table of occurrences with environmental values only for sp2. #' Default is NULL. #' @param bgPts.z1 table of background points with environmental values only #' for sp1. #' @param bgPts.z2 table of background points with environmental values only #' for sp2. Default is NULL. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in shiny, #' otherwise leave the default NULL #' @examples #' \dontrun{ #' sp.name1 <- "Bassaricyon_alleni" #' sp.name2 <- "Bassaricyon_neblina" #' envs <- envs_userEnvs(rasPath = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = TRUE), #' rasName = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = FALSE)) #' #' occs.z1 <- read.csv(system.file("extdata/Bassaricyon_alleni.csv", #' package = "wallace")) #' occs.z2 <- read.csv(system.file("extdata/Bassaricyon_neblina.csv", #' package = "wallace")) #' #' bgPts.z1 <- read.csv(system.file("extdata/Bassaricyon_alleni_bgPoints.csv", #' package = "wallace")) #' bgPts.z2 <- read.csv(system.file("extdata/Bassaricyon_neblina_bgPoints.csv", #' package = "wallace")) #' #' occsExt.z1 <- raster::extract(envs, occs.z1[, c("longitude", "latitude")]) #' occsExt.z2 <- raster::extract(envs, occs.z2[, c("longitude", "latitude")]) #' bgExt.z1 <- raster::extract(envs, bgPts.z1[, c("longitude", "latitude")]) #' bgExt.z2 <- raster::extract(envs, bgPts.z2[, c("longitude", "latitude")]) #' pcaZ <- espace_pca(sp.name1, sp.name2, #' occsExt.z1, occsExt.z2, #' bgExt.z1, bgExt.z2) #' } #' @return A list of 14 elements of classes dudi and pca as in dudi.pca #' @seealso \code{\link[ade4]{dudi.pca}} #' #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Olivier Broennimann <olivier.broennimann@@unil.ch> #' @export espace_pca <- function(sp.name1, sp.name2 = NULL, occs.z1, occs.z2 = NULL, bgPts.z1, bgPts.z2 = NULL, logger = NULL) { if (!is.null(bgPts.z2)) { data <- rbind(occs.z1, occs.z2, bgPts.z1, bgPts.z2) sp <- c(rep(sp.name1, nrow(occs.z1)), rep(sp.name2, nrow(occs.z2)), rep('bg', nrow(bgPts.z1)), rep('bg', nrow(bgPts.z2))) bg <- c(rep('sp', nrow(occs.z1)), rep('sp', nrow(occs.z2)), rep(sp.name1, nrow(bgPts.z1)), rep(sp.name2, nrow(bgPts.z2))) } else { data <- rbind(occs.z1, bgPts.z1) sp <- c(rep(sp.name1, nrow(occs.z1)), rep('bg', nrow(bgPts.z1))) bg <- c(rep('sp', nrow(occs.z1)), rep(sp.name1, nrow(bgPts.z1))) } # pca calibration and prediction of scores pca <- ade4::dudi.pca(data, row.w = bg > 0, center = TRUE, scale = TRUE, scannf = FALSE, nf = ncol(data)) pca$scores <- cbind(pca$li, sp, bg) if (is.null(sp.name2)) { spNames <- sp.name1 } else { spNames <- paste0(sp.name1, " and ", sp.name2) } logger %>% writeLog(hlSpp(spNames), "Principal component analysis.") return(pca) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/espace_pca.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # helper_functions.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # ####################### # # MISC # ####################### # #' @title printVecAsis #' @description For internal use. Print vector as character string #' @param x vector #' @param asChar exclude c notation at the beginning of string #' @keywords internal #' @export printVecAsis <- function(x, asChar = FALSE) { if (is.character(x)) { if (length(x) == 1) { return(paste0("\'", x, "\'")) } else { if (asChar == FALSE) { return(paste0("c(", paste(sapply(x, function(a) paste0("\'", a, "\'")), collapse = ", "), ")")) } else { return(paste0("(", paste(sapply(x, function(a) paste0("\'", a, "\'")), collapse = ", "), ")")) } } } else { if (length(x) == 1) { return(x) } else { if (asChar == FALSE) { return(paste0("c(", paste(x, collapse = ", "), ")")) } else { return(paste0("(", paste(x, collapse = ", "), ")")) } } } } #' @title Spurious package call to avoid note of functions outside R folder #' @description For internal use. #' @param x x #' @keywords internal #' @export spurious <- function(x) { DT::renderDataTable(x) RColorBrewer::brewer.pal(x) leafem::addMouseCoordinates(x) leaflet.extras::removeDrawToolbar(x) markdown::html_format() rmarkdown::github_document(x) shinyWidgets::pickerInput(x) shinyjs::disable(x) zip::zipr(x) return() } ####################### # # SHINY LOG # ####################### # #' @title fmtSpN #' @description For internal use. Format species name with underscore #' @param spN Species name #' @keywords internal #' @export fmtSpN <- function(spN) { spN <- as.character(spN) # separate by space spN.fmt <- sapply(spN, function(x) strsplit(x, split = ' ')) # put underscores in spN.fmt <- sapply(spN.fmt, function(x) paste(x, collapse = '_')) return(spN.fmt) } #' @title hlSpp #' @description For internal use. Green and bold species name in Windows Log #' @param spN Species name #' @keywords internal #' @export hlSpp <- function(spN) { if (is.null(spN)) { return("") } else if (grepl("_", spN)) { spN <- gsub("_", " ", spN) boldSpp <- paste0('<font color="#003300"><b><i>', spN, '</i> | </b></font>') return(boldSpp) } } #' @title smartProgress #' @description For internal use. Either prints a message to console or makes #' a progress bar in the shiny app the entry of the first param "logs" turns on #' shiny functionality #' @param logs Wallace logger #' @param message A single-element character vector; the message to be displayed #' to the user. #' @param expr The work to be done. #' @keywords internal #' @export smartProgress <- function(logs, message, expr) { if(!is.null(logs)) { withProgress(message = message, expr) } else { message(message) expr } } #' @title spName #' @description For internal use. Retrieves the species name for use internally #' in non-shiny functions #' @param spN Species name #' @keywords internal #' @export spName <- function(spN) { if (is.null(spN)) { return("species") } else { return(paste(strsplit(as.character(spN), "_")[[1]], collapse = " ")) } } #' @title writeLog #' @description For internal use. Add text to a logger #' @param logger The logger to write the text to. Can be NULL or a function #' @param ... Messages to write to the logger #' @param type One of "default", "error", "warning" #' @keywords internal #' @export writeLog <- function(logger, ..., type = 'default') { if (is.null(logger)) { if (type == 'error') { stop(paste0(..., collapse = ""), call. = FALSE) } else if (type == 'warning') { warning(paste0(..., collapse = ""), call. = FALSE) } else { message(paste0(..., collapse = "")) } } else if (is.function(logger)) { if (type == "default") { pre <- "> " } else if (type == 'error') { shinyalert::shinyalert("Please, check Log window for more information ", type = "error") pre <- '> <font color="red"><b>! ERROR</b></font> : ' } else if (type == 'warning') { shinyalert::shinyalert("Please, check Log window for more information ", type = "warning") pre <- '> <font color="orange"><b>! WARNING</b></font> : ' } newEntries <- paste0('<br>', pre, ..., collapse = "") logger(paste0(logger(), newEntries)) } else { warning("Invalid logger type") } invisible() } ####################### # # MAPPING # ####################### # #' @title clearAll #' @description For internal use. Clean everything in leaflet map. #' @param map leaflet map #' @keywords internal #' @export clearAll <- function(map) { map %>% clearMarkers() %>% clearShapes() %>% clearImages() %>% clearControls() %>% removeLayersControl() } #' @title polyZoom #' @description For internal use. Zooms appropriately for any polygon #' @param xmin Minimum longitude #' @param xmax Maximum longitude #' @param ymin Minimum latitude #' @param ymax Maximum latitude #' @param fraction Expand zoom fraction #' @keywords internal #' @export polyZoom <- function(xmin, ymin, xmax, ymax, fraction) { x <- (xmax - xmin) * fraction y <- (ymax - ymin) * fraction x1 <- xmin - x x2 <- xmax + x y1 <- ymin - y y2 <- ymax + y return(c(x1, y1, x2, y2)) } #' @title zoom2Occs #' @description For internal use. Zoom to occ pts. #' @param map leaflet map #' @param occs occurrences table #' @keywords internal #' @export zoom2Occs <- function(map, occs) { lat <- occs["latitude"] lon <- occs["longitude"] lg.diff <- abs(max(lon) - min(lon)) lt.diff <- abs(max(lat) - min(lat)) if (lg.diff > 1) lg.diff <- 1 if (lt.diff > 1) lt.diff <- 1 z <- c(min(lon - lg.diff), min(lat - lt.diff), max(lon + lg.diff), max(lat + lt.diff)) map %>% fitBounds(z[1], z[2], z[3], z[4]) ## this section makes letter icons for occs based on basisOfRecord # makeOccIcons <- function(width = 10, height = 10, ...) { # occIcons <- c('H', 'O', 'P', 'U', 'F', 'M', 'I', 'L', 'A', 'X') # files <- character(9) # # create a sequence of png images # for (i in 1:9) { # f <- tempfile(fileext = '.png') # png(f, width = width, height = height, bg = 'transparent') # graphics::par(mar = c(0, 0, 0, 0)) # plot.new() # graphics::points(.5, .5, pch = occIcons[i], cex = min(width, height) / 8, col='red', ...) # dev.off() # files[i] <- f # } # files # } # occIcons <- makeOccIcons() # iconList <- list(HUMAN_OBSERVATION=1, OBSERVATION=2, PRESERVED_SPECIMEN=3, # UNKNOWN_EVIDENCE=4, FOSSIL_SPECIMEN=5, MACHINE_OBSERVATION=6, # LIVING_SPECIMEN=7, LITERATURE_OCCURRENCE=8, MATERIAL_SAMPLE=9) # values$origOccs$basisNum <- unlist(iconList[values$origOccs$basisOfRecord]) # proxy %>% addMarkers(data = values$origOccs, lat = ~latitude, lng = ~longitude, # layerId = as.numeric(rownames(values$origOccs)), # icon = ~icons(occIcons[basisNum])) } ####################### # # OBTAIN OCCS # ####################### # #' @title popUpContent #' @description For internal use. Make new column for leaflet marker popup content #' @param occs occurrence table #' @keywords internal #' @export popUpContent <- function(occs) { lat <- round(as.numeric(occs['latitude']), digits = 2) lon <- round(as.numeric(occs['longitude']), digits = 2) as.character(tagList( tags$strong(paste("occID:", occs['occID'])), tags$br(), tags$strong(paste("Latitude:", lat)), tags$br(), tags$strong(paste("Longitude:", lon)), tags$br(), tags$strong(paste("Year:", occs['year'])), tags$br(), tags$strong(paste("Inst. Code:", occs['institutionCode'])), tags$br(), tags$strong(paste("Country:", occs['country'])), tags$br(), tags$strong(paste("State/Prov.:", occs['stateProvince'])), tags$br(), tags$strong(paste("Locality:", occs['locality'])), tags$br(), tags$strong(paste("Elevation:", occs['elevation'])), tags$br(), tags$strong(paste("Basis of Record:", occs['basisOfRecord'])) )) } ####################### # # ENV DATA # ####################### # #' @title remEnvsValsNA #' @description For internal use. Remove occs with NA values #' @param occs occurrence table #' @param occsEnvsVals Occurrence table with environmental values #' @param spN Species name #' @param logger Wallace logger #' @keywords internal #' @export remEnvsValsNA <- function(occs, occsEnvsVals, spN, logger) { withProgress(message = "Checking for points with NA values and in same cells...", { na.rowNums <- which(rowSums(is.na(occsEnvsVals[, -1])) >= 1) if (length(na.rowNums) == nrow(occsEnvsVals)) { logger %>% writeLog( type = 'error', hlSpp(spN), paste0('No localities overlay with environmental ', 'predictors. For example, all localities may be marine -- please redo with ', 'terrestrial occurrences.') ) return() } if (length(na.rowNums) > 0) { logger %>% writeLog( type = 'warning', hlSpp(spN), 'Removed records without environmental values with occIDs: ', paste(sort(occs[na.rowNums, "occID"]), collapse = ', '), ".") occs <- occs[-na.rowNums, ] occsEnvsVals <- occsEnvsVals[-na.rowNums, ] } # Remove same cell duplicates occs.dups <- duplicated(occsEnvsVals[, 1]) if (sum(occs.dups) > 0) { logger %>% writeLog( type = 'warning', hlSpp(spN), "Removed ", sum(occs.dups), " localities that ", "shared the same grid cell. occIDs: ", paste(sort(occs[occs.dups, "occID"]), collapse = ', '), ".") occs <- occs[!occs.dups, ] occsEnvsVals <- occsEnvsVals[!occs.dups, ] } return(list(occs = occs, occsEnvsVals = occsEnvsVals)) }) } ####################### # # ESPACE # ####################### # #' @title ecospat.plot.nicheDEV #' @description For internal use. Plot occ density #' @param z A gridclim object for the species distribution created by ecospat.grid.clim.dyn()/espace_occDens(). #' @param title A title for the plot. #' @param name.axis1 A label for the first axis. #' @param name.axis2 A label for the second axis. #' @param cor Correct the occurrence densities of the species by the prevalence of the environments in its range (TRUE = yes, FALSE = no). #' @keywords internal #' @export ecospat.plot.nicheDEV <- function(z, title = "", name.axis1 = "Axis 1", name.axis2 = "Axis 2", cor = FALSE) { if (is.null(z$y)) { R <- length(z$x) x <- z$x xx <- sort(rep(1:length(x), 2)) if (cor == FALSE) y1 <- z$z.uncor/max(z$z.uncor) if (cor == TRUE) y1 <- z$z.cor/max(z$z.cor) Y1 <- z$Z/max(z$Z) yy1 <- sort(rep(1:length(y1), 2))[-c(1:2, length(y1) * 2)] YY1 <- sort(rep(1:length(Y1), 2))[-c(1:2, length(Y1) * 2)] plot(x, y1, type = "n", xlab = name.axis1, ylab = "density of occurrence") graphics::polygon(x[xx], c(0, y1[yy1], 0, 0), col = "grey") graphics::lines(x[xx], c(0, Y1[YY1], 0, 0)) } if (!is.null(z$y)) { if (cor == FALSE) terra::plot(z$z.uncor,col=grDevices::gray(100:0 / 100),legend=FALSE, xlab = name.axis1, ylab = name.axis2,mar = c(3.1,3.1,2.1,3.1)) if (cor == TRUE) terra::plot(z$z.cor,col=grDevices::gray(100:0 / 100),legend=FALSE, xlab = name.axis1, ylab = name.axis2,mar = c(3.1,3.1,2.1,3.1)) terra::contour( z$Z, add = TRUE, levels = stats::quantile(z$Z[z$Z > 0], c(0, 0.5)), drawlabels = FALSE, lty = c(1, 2) ) } title(title) } # end of espace. BAJ added 10/31/2023 after ecospat.plot.niche() from ecospat 4.0.0 wasn't working ####################### # # VISUALIZE & TRANSFER # ####################### # #' @title getRasterVals #' @description Retrieve the value range for a prediction raster for plotting #' @param r raster #' @param type Maxent prediction type. It can be "raw", "logistic" or "cloglog" #' @keywords internal #' @export getRasterVals <- function(r, type = 'raw') { v <- raster::values(r) # remove NAs v <- v[!is.na(v)] if(type == 'logistic' | type == 'cloglog') v <- c(v, 0, 1) # set to 0-1 scale return(v) } #' @title mxNonzeroCoefs #' @description For internal use. Pulls out all non-zero, non-redundant #' (removes hinge/product/threshold) predictor names #' @param mx Model object #' @param alg Maxent version used. It can be "maxent.jar" or "maxnet" #' @keywords internal #' @export mxNonzeroCoefs <- function(mx, alg) { if (alg == "maxent.jar") { lambdas <- mx@lambdas[1:(length(mx@lambdas)-4)] x <- data.frame(var = sapply(lambdas, FUN = function(x) strsplit(x, ',')[[1]][1]), coef = sapply(lambdas, FUN = function(x) as.numeric(strsplit(x, ',')[[1]][2])), row.names = 1:length(lambdas)) #remove any rows that have a zero lambdas value (Second column) x <- x[(x[,2] != 0),] #remove any rows that have duplicate "var"s (hinges, quadratics, product) x <- unique(sub("\\^\\S*", "", x[,1])) x <- unique(sub("\\`", "", x)) x <- unique(sub("\\'", "", x)) x <- unique(sub("\\=\\S*", "", x)) x <- unique(sub("\\(", "", x)) x <- unique(unlist(strsplit(x, split = "\\*"))) x <- sort(x) } else if (alg == "maxnet") { lambdas <- mx$betas x <- data.frame(var = names(lambdas), coef = lambdas, row.names = 1:length(lambdas)) #remove any rows that have a zero lambdas value (Second column) x <- x[(x[,2] != 0),] #remove any rows that have duplicate "var"s (hinges, quadratics, product) x <- unique(sub("\\^\\S*", "", x[,1])) x <- unique(sub("\\I", "", x)) x <- unique(sub("\\hinge", "", x)) x <- unique(sub("\\categorical", "", x)) x <- unique(sub("\\)\\:\\S*", "", x)) x <- unique(sub("\\(", "", x)) x <- unique(unlist(strsplit(x, split = "\\:"))) x <- sort(x) } } #' @title predictMaxnet #' @description Create a raster prediction for a maxnet model #' @param mod Model object #' @param envs Environmental rasters #' @param clamp Use clamping. Boolean #' @param type Maxent prediction type. It can be "raw", "logistic" or "cloglog" #' @keywords internal #' @export predictMaxnet <- function(mod, envs, clamp, type) { requireNamespace("maxnet", quietly = TRUE) envs.n <- raster::nlayers(envs) envs.pts <- raster::getValues(envs) %>% as.data.frame() mxnet.p <- stats::predict(mod, envs.pts, type = type, clamp = clamp) envs.pts[as.numeric(row.names(mxnet.p)), "pred"] <- mxnet.p pred <- raster::rasterFromXYZ(cbind(raster::coordinates(envs), envs.pts$pred), res = raster::res(envs), crs = raster::crs(envs)) return(pred) } #' @title reverseLabel #' @description For internal use. Reverse label in leaflet legends #' @param ... labelFormat parameters #' @param reverse_order Reverse order or legends #' @keywords internal #' @export reverseLabel <- function(..., reverse_order = FALSE) { if (reverse_order) { function(type = "numeric", cuts) { cuts <- sort(cuts, decreasing = TRUE) } } else { labelFormat(...) } } ##################### # # DOWNLOAD # ##################### # #' @title write_csv_robust #' @description For internal use. Write Robust CSV #' @param x Table #' @param ... labelFormat parameters #' @keywords internal #' @export write_csv_robust <- function(x, ...) { a <- dplyr::mutate_if(.tbl = x, .predicate = function(col) inherits(col, "list"), .funs = function(col) { vapply(col, jsonlite::toJSON, character(1L)) }) utils::write.csv(a, ...) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/helper_functions.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # model_bioclim.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title model_bioclim Generate Bioclim model #' @description The function generates a BIOCLIM model using #' ENMeval 2.0 #' #' @details #' The function generates a model in ENMeval using a user provided partition of #' occurrences from previous components in the GUI. #' #' @param occs data frame of cleaned or processed occurrences obtained from #' components occs: Obtain occurrence data or, poccs: Process occurrence data. #' @param bg coordinates of background points to be used for modeling. #' @param user.grp a list of two vectors containing group assignments for #' occurrences (occs.grp) and background points (bg.grp). #' @param bgMsk a RasterStack or a RasterBrick of environmental layers cropped #' and masked to match the provided background extent. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running #' in shiny, otherwise leave the default NULL. #' @param spN character. Species name to be used for all logger messages. #' #' @examples #' \dontrun{ #' envs <- envs_userEnvs(rasPath = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = TRUE), #' rasName = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = FALSE)) #' occs <- read.csv(system.file("extdata/Bassaricyon_alleni.csv", #' package = "wallace")) #' bg <- read.csv(system.file("extdata/Bassaricyon_alleni_bgPoints.csv", #' package = "wallace")) #' partblock <- part_partitionOccs(occs, bg, method = 'block') #' m <- model_bioclim(occs, bg, partblock, envs) #' } #' #' @return Function returns an ENMevaluate object with all the evaluated models #' and a selection of appropriate fields. #' @author Jamie M. Kass <jkass@@gradcenter.cuny.edu> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> # @note #' @seealso \code{\link[ENMeval]{ENMevaluate}} #' @importFrom rlang .data #' @export model_bioclim <- function(occs, bg, user.grp, bgMsk, logger = NULL, spN = NULL) { # get just coordinates occs.xy <- occs %>% dplyr::select(.data$longitude, .data$latitude) bg.xy <- bg %>% dplyr::select(.data$longitude, .data$latitude) smartProgress(logger, message = paste0("Building/Evaluating BIOCLIM model for ", spName(spN), "..."), { e <- ENMeval::ENMevaluate(occs = occs.xy, envs = bgMsk, bg = bg.xy, algorithm = "bioclim", partitions = "user", user.grp = user.grp) }) logger %>% writeLog(hlSpp(spN), "BIOCLIM ran successfully and output evaluation results.") return(e) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/model_bioclim.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # model_maxent.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title model_maxent Generate maxent.jar or maxnet model #' @description This functions generates maxent.jar or maxnet models using #' ENMeval 2.0 and user provided tuning parameters. #' #' @details #' The function generates model in ENMeval using a user provided partition of #' occurrences from previous components in the GUI. User can activate #' clamping and input tuning arguments to be used for model building. #' #' @param occs data frame of cleaned or processed occurrences obtained from #' components occs: Obtain occurrence data or, poccs: Process occurrence data. #' @param bg coordinates of background points to be used for modeling. #' @param user.grp a list of two vectors containing group assignments for #' occurrences (occs.grp) and background points (bg.grp). #' @param bgMsk a RasterStack or a RasterBrick of environmental layers cropped #' and masked to match the provided background extent. #' @param rms vector of range of regularization multipliers to be used in the #' ENMeval run. #' @param rmsStep step to be used when defining regularization multipliers to #' be used from the provided range. #' @param fcs feature classes to be tested in the ENMeval run. #' @param clampSel Boolean use of clamping in the model. #' @param algMaxent character. algorithm to be used in modeling. A selection #' of "maxnet" or "maxent.jar". #' @param catEnvs if categorical predictor variables are included must provide #' the names. #' @param parallel logical. Whether to use parallel in the generation of #' models. Default is FALSE #' @param numCores numeric. If using parallel how many cores to use. Default is #' NULL. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL. #' @param spN character. Species name to be used for all logger messages. # @keywords #' #' @examples #' \dontrun{ #' envs <- envs_userEnvs(rasPath = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = TRUE), #' rasName = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = FALSE)) #' occs <- read.csv(system.file("extdata/Bassaricyon_alleni.csv", #' package = "wallace")) #' bg <- read.csv(system.file("extdata/Bassaricyon_alleni_bgPoints.csv", #' package = "wallace")) #' partblock <- part_partitionOccs(occs, bg, method = 'block') #' rms <- c(1:2) #' rmsStep <- 1 #' fcs <- c('L', 'LQ') #' m <- model_maxent(occs = occs, bg = bg, user.grp = partblock, #' bgMsk = envs, rms = rms, rmsStep, fcs, #' clampSel = TRUE, algMaxent = "maxnet", #' parallel = FALSE) #' } #' #' @return Function returns an ENMevaluate object with all the evaluated models #' and a selection of appropriate fields. #' @author Jamie M. Kass <jkass@@gradcenter.cuny.edu> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> # @note #' @seealso \code{\link[ENMeval]{ENMevaluate}} # @references # @aliases - a list of additional topic names that will be mapped to # this documentation when the user looks them up from the command # line. # @family - a family name. All functions that have the same family tag will be # linked in the documentation. #' @importFrom rlang .data #' @export model_maxent <- function(occs, bg, user.grp, bgMsk, rms, rmsStep, fcs, clampSel, algMaxent, catEnvs = NULL, parallel = FALSE, numCores = NULL, logger = NULL, spN = NULL) { if (is.null(user.grp)) { logger %>% writeLog( type = 'error', "Before building a model, please partition occurrences for cross-validation." ) return() } # if maxent.jar selected check for jar file and whether rJava can be loaded if (algMaxent == "maxent.jar") { # error for no maxent.jar in dismo directory jar <- paste(system.file(package = "dismo"), "/java/maxent.jar", sep = '') if (!file.exists(jar)) { logger %>% writeLog( type = 'error', "To use Maxent, make sure you download, ", strong("maxent.jar"), " from the ", a("AMNH Maxent webpage", href = "http://biodiversityinformatics.amnh.org/open_source/maxent/", target = "_blank"), " and place it in this directory:", br(), em(jar)) return() } if (!requireNamespace('rJava')) { logger %>% writeLog( type = "error", paste0('Package rJava cannot load. Please download the latest version of ', 'Java, and make sure it is the correct version (e.g. 64-bit for a ', '64-bit system). After installing, try "library(rJava)". If it ', 'loads properly, restart Wallace and try again. If it does not, ', 'please consult www.github.com/wallaceecomod/wallace for more ', 'tips on getting rJava to work.')) return() } # Check maxent version if (is.null(getOption('dismo_rJavaLoaded'))) { # to avoid trouble on macs Sys.setenv(NOAWT=TRUE) if ( requireNamespace('rJava') ) { rJava::.jpackage('dismo') options(dismo_rJavaLoaded=TRUE) } else { stop('rJava cannot be loaded') } } mxe <- rJava::.jnew("meversion") maxentJARversion <- try(rJava::.jcall(mxe, "S", "meversion")) if (maxentJARversion < "3.4.3") { logger %>% writeLog( type = "error", "Please, use the updated version of Maxent (v3.4.4). Currently, you are ", "using (", maxentJARversion, ")." ) return() } if (maxentJARversion == "3.4.3") { logger %>% writeLog( "Please, consider to updated version of Maxent (v3.4.4). Currently, you are ", "using (", maxentJARversion, ")." ) } } # define the vector of RMs to input rms.interval <- seq(rms[1], rms[2], rmsStep) ##set up tuning parameter argument tune.args=list(fc = fcs, rm = rms.interval) # create the Progress Bar object for ENMeval if (!is.null(logger)) { progress <- shiny::Progress$new() progress$set(message = paste0("Building/Evaluating ENMs for ", spName(spN), "..."), value = 0) on.exit(progress$close()) n <- length(rms.interval) * length(fcs) updateProgress <- function(value = NULL, detail = NULL) { progress$inc(amount = 1/n, detail = detail) } } else { n <- length(rms.interval) * length(fcs) updateProgress <- FALSE } # get just coordinates occs.xy <- occs %>% dplyr::select(.data$longitude, .data$latitude) bg.xy <- bg %>% dplyr::select(.data$longitude, .data$latitude) # run ENMeval e <- ENMeval::ENMevaluate(occs = as.data.frame(occs.xy), bg = as.data.frame(bg.xy), partitions = 'user', user.grp = user.grp, envs = bgMsk, tune.args = tune.args, doClamp = clampSel, algorithm = algMaxent, categoricals = catEnvs, parallel = parallel, numCores = numCores, parallelType = "doSNOW", # taxon.name = NULL, # user.enm = NULL, # occs.ind = NULL, # kfolds = NA, # aggregation.factor = c(2,2), # orientation = "lat_lon", # overlap = FALSE, # overlapStat = c("D","I"), # pred.type = "cloglog", # abs.auc.diff = FALSE, # user.test.grps = NULL, updateProgress = updateProgress, quiet = FALSE) occPredVals <- raster::extract(e@predictions, occs.xy) endTxt <- paste("]), using", algMaxent, "with clamping", ifelse(clampSel, "on.", "off.")) logger %>% writeLog(hlSpp(spN), "Maxent ran successfully and output evaluation ", "results for ", nrow(e@results), " models (Regularization multiplier values: [", paste(rms.interval, collapse = ", "),"]; Feature classes: [", paste(fcs, collapse = ", "), endTxt, "") return(e) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/model_maxent.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # occs_paleoDb.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' #' @title `occs_paleoDb` query paleobioDB database #' #' @description #' #' query paleobioDB database and returns the complete list of data, data with #' #' coordinates, and data with no duplicates #' #' #' #' @details #' #' This function is called by the module occs_queryDb to query the paleobioDB #' #' database for species occurrence records in the Holocene. It removes #' #' records with duplicate coordinates, and selects some columns with fields #' #' appropriate to studies in biogeography. #' #' #' #' @param spName character. Species name. For paleobioDb it returns records #' #' associated with the specified taxonomic name, including any synonyms. #' #' @param occNum integer maximum number of records. #' #' @param timeInterval character currently a single timeInterval is allowed: #' #' "Holocene" (Holocene). #' #' @param logger Stores all notification messages to be displayed in the #' #' Log Window of Wallace GUI. Insert the logger reactive list here for #' #' running in shiny, otherwise leave the default NULL #' #' @return A list of 2 dataframes. First dataframe is the original downloaded #' #' dataset, second dataframe without duplicates and with appropriate fields #' #' for analyses. #' #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' #' @author Sara Varela <sara_varela@@yahoo.com> #' #' @examples #' #' \dontrun{ #' #' spName <- "Didelphis virginiana" #' #' occNum <- 100 #' #' timeInterval <- "Holocene" #' #' occsPaleo <- occs_paleoDb(spName, occNum, timeInterval) #' #' } #' #' @export #' #' occs_paleoDb <- function(spName, occNum, timeInterval, logger = NULL) { #' if (!requireNamespace("paleobioDB", quietly = TRUE)) { #' logger %>% #' writeLog( #' type = "warning", #' "This module is available if you install the 'paleobioDB' package ", #' "(which is a suggested package for Wallace, not a required dependency). If you ", #' "want to install it, close Wallace and run the following line in the ", #' "R Console: ", em("install.packages('paleobioDB')") #' ) #' return() #' } #' spName <- trimws(spName) #' # figure out how many separate names (components of scientific name) were entered #' nameSplit <- length(unlist(strsplit(spName, " "))) #' # if two names not entered, throw error and return #' if (nameSplit != 2) { #' logger %>% writeLog(type = 'error', #' 'Please input both genus and species names of ONE species.') #' return() #' } #' spName <- paste0(toupper(substring(spName, 1, 1)), #' substring(spName, 2, nchar(spName))) #' smartProgress(logger, message = paste0("Querying paleobioDB ..."), { #' occsOrig <- try(paleobioDB::pbdb_occurrences(taxon_name = spName, #' limit = occNum, #' interval = timeInterval, #' vocab = 'pbdb', #' show = c("coords", "bin", "loc")), #' silent = TRUE) #' }) #' #' if (inherits(occsOrig, "try-error")) { #' logger %>% writeLog( #' type = 'error', #' hlSpp(hlSpp(fmtSpN(spName))), #' "No records found, please check the spelling.") #' return() #' } #' #' occsOrig <- dplyr::as_tibble(occsOrig) #' occsOrig$lng <- as.numeric(occsOrig$lng) #' occsOrig$lat <- as.numeric(occsOrig$lat) #' # get total number of records found in database #' totRows <- nrow(occsOrig) #' # extract occurrence tibble #' names(occsOrig)[names(occsOrig) == "lng"] <- "longitude" #' names(occsOrig)[names(occsOrig) == "lat"] <- "latitude" #' names(occsOrig)[names(occsOrig) == "cc"] <- "country" #' occsOrig$taxon_name <- as.character(occsOrig$taxon_name) #' names(occsOrig)[names(occsOrig) == "taxon_name"] <- "scientific_name" #' #' # make new column for original ID #' occsOrig$occID <- 1:nrow(occsOrig) #' #' # subset to just records with latitude and longitude #' # all plaeobioDB recors have coords, so this warning is commented until future database #' # occsXY <- occsOrig[!is.na(occsOrig$longitude) & !is.na(occsOrig$latitude),] #' # if (nrow(occsXY) == 0) { #' # logger %>% writeLog( #' # type = 'warning', #' # hlSpp(spName), "No records with coordinates found in paleobioDB.") #' # } #' occsXY <- occsOrig #' #' dups <- duplicated(occsXY[,c('longitude','latitude')]) #' occs <- occsXY[!dups, ] #' #' # subset by key columns and make id and popup columns #' cols <- c("occID", "scientific_name", "longitude", "latitude", #' # "early_interval", "late_interval", #' "country", "collection_no", "record_type", #' "early_age", "late_age") #' occs <- occs %>% dplyr::select(dplyr::one_of(cols)) %>% #' # make new column for leaflet marker popup content #' dplyr::mutate(pop = unlist(apply(occs, 1, popUpContent))) %>% #' dplyr::arrange(dplyr::across(cols)) #' occs$early_age <- as.numeric(occs$early_age) #' occs$late_age <- as.numeric(occs$late_age) #' noCoordsRem <- nrow(occsOrig) - nrow(occsXY) #' #' dupsRem <- nrow(occsXY) - nrow(occs) #' logger %>% writeLog( #' hlSpp(fmtSpN(spName)), #' 'Total paleobioDb records returned [', nrow(occsOrig), '] (limit ', occNum, #' '). Records without coordinates removed [', #' noCoordsRem, ']. Duplicated records removed [', dupsRem, #' ']. Remaining records [', nrow(occs), '].') #' return(list(orig = occsOrig, cleaned = as.data.frame(occs))) #' }
/scratch/gouwar.j/cran-all/cranData/wallace/R/occs_paleoDb.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # occs_queryDb.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' occs_queryDb Query online database for species occurrence records. #' #' @description Queries a given database for occurrence data on the provided species #' #' @details #' This function is called by the module occs_queryDb to query a database for #' species occurrence records, subset to only those records with coordinates, #' remove records with duplicate coordinates, and select some columns with fields #' appropriate to studies in biogeography. #' #' @param spNames character. Species Latin name, with format "Genus species". #' @param occDb character. Biodiversity database to query; current choices are #' "gbif", "vertnet", and "BIEN" #' @param occNum numeric. Maximum number of occurrence records to return #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in shiny, #' otherwise leave the default NULL #' @param doCitations logical. Set TRUE to use `occCite` to get a complete list #' of original data sources in a citable format #' @param gbifUser specify only if using `occCite` with GBIF to get a complete list #' of original data sources in a citable format. This, as well as `gbifEmail` #' and `gbifPW` are constraints imposed by GBIF to obtain the complete set of #' metadata associated with occurrence records and is not stored or used by #' `wallace` for any other purposes. #' @param gbifEmail specify only if using `occCite` with GBIF to get a #' complete list of original data sources in a citable format. #' @param gbifPW specify only if using `occCite` with GBIF to get a complete #' list of original data sources in a citable format. #' @param RmUncertain specify if occurrences without uncertainty information #' should be removed (default is FALSE) #' @return list of lists one list per species with occurrence records. Each #' individual species list with appropriate fields for analysis #' #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @author Hannah Owens #' @author Andrea Paz <paz.andreita@@gmail.com> #' @examples #' \dontrun{ #' occs_queryDb(spName = "Bassaricyon alleni", occDb = "gbif", occNum = 10) #' } #' @importFrom rlang .data #' @export occs_queryDb <- function(spNames, occDb, occNum = NULL, doCitations = FALSE, gbifUser = NULL, gbifEmail = NULL, gbifPW = NULL, RmUncertain = FALSE, logger = NULL) { if (occDb == "bien" & !requireNamespace("BIEN", quietly = TRUE)) { logger %>% writeLog( type = "warning", "This option is available if you install the 'BIEN' package ", "(which is a suggested package for Wallace, not a required dependency). If you ", "want to install it, close Wallace and run the following line in the ", "R Console: ", em("install.packages('BIEN')") ) return() } if (occDb == "gbif" & doCitations == TRUE & !requireNamespace("occCite", quietly = TRUE)) { logger %>% writeLog( type = "warning", "This option is available if you install the 'occCite' package ", "(which is a suggested package for Wallace, not a required dependency). If you ", "want to install it, close Wallace and run the following line in the ", "R Console: ", em("install.packages('occCite')") ) return() } # Get all species names for textInput Shiny if (length(spNames) == 1) { if (grepl(x = spNames, pattern = ",")) { spNames <- trimws(strsplit(spNames, ",")[[1]]) } } # function for capitalizing genus names spCap <- function(x) { paste0(toupper(substring(x, 1, 1)), substring(x, 2, nchar(x))) } # capitalize genus names spNames <- sapply(spNames, spCap) # figure out how many separate names (components of scientific name) were entered namesSplit <- sapply(spNames, function(x) strsplit(x, " ")) namesSplitCheck <- sapply(namesSplit, function(x) length(x) == 2) # if two names not entered, throw error and return if (!all(namesSplitCheck)) { logger %>% writeLog(type = 'error', 'Please input both genus and species names.') return() } occList <- list() for (sp in spNames) { # query database smartProgress(logger, message = paste0("Querying ", occDb, " for ", sp, "..."), { if (occDb == 'vertnet') { q <- spocc::occ(sp, occDb, limit = occNum) myOccCitations <- NULL } else if (occDb == 'gbif') { if (doCitations == FALSE) { q <- spocc::occ(sp, occDb, limit = occNum) myOccCitations <- NULL } else if (doCitations == TRUE) { if(any(unlist(lapply(list(gbifUser, gbifEmail, gbifPW), is.null)))) { logger %>% writeLog( type = 'error', paste0('Please specify your GBIF username, email, and password. ', 'This is needed to get citations for occurrence records. Wallace ', 'does not store your information or use it for anything else.') ) return() } login <- occCite::GBIFLoginManager(user = gbifUser, email = gbifEmail, pwd = gbifPW) if (is.null(login)) { logger %>% writeLog( type = 'error', "There is an error in your GBIF credentials. Please check them" ) return() } nameGBIF <- occCite::studyTaxonList(x = sp) bestMatch <- as.character(nameGBIF@cleanedTaxonomy$`Best Match`) inputMatch <- as.character(nameGBIF@cleanedTaxonomy$`Input Name`) if (bestMatch == "No match") { logger %>% writeLog( type = "error", hlSpp(fmtSpN(sp)), "There is no match in GBIF database. Please check the spelling." ) return() } if (bestMatch != inputMatch) { logger %>% writeLog( type = 'warning', hlSpp(inputMatch), "There is no a stricly match in the GBIF search. Data ", "downloaded corresponds to ", em(bestMatch), ". ") } myBTO <- occCite::occQuery(x = sp, datasource = "gbif", GBIFLogin = login, checkPreviousGBIFDownload = FALSE) # make something with the same slots as spocc that we use q <- list(gbif = list(meta = list(found = NULL), data = list(fmtSpN(sp)))) gbif_raw <- utils::read.table(unz( as.character(myBTO@occResults[[bestMatch]][['GBIF']][['RawOccurrences']]), "occurrence.txt"), sep = "\t", header = TRUE, quote = "", encoding = "UTF-8") gbif_occCite_df <- gbif_raw %>% dplyr::as_tibble() %>% dplyr::select(.data$scientificName, .data$decimalLongitude, .data$decimalLatitude, .data$countryCode, .data$stateProvince, .data$locality, .data$year, .data$basisOfRecord, .data$catalogNumber, .data$institutionCode, .data$elevation, .data$coordinateUncertaintyInMeters) %>% dplyr::rename(name = .data$scientificName, longitude = .data$decimalLongitude, latitude = .data$decimalLatitude, country = .data$countryCode) q[[occDb]]$meta$found <- nrow(myBTO@occResults[[bestMatch]][['GBIF']][['OccurrenceTable']]) q[[occDb]]$data[[fmtSpN(sp)]] <- gbif_occCite_df doiGBIF <- myBTO@occResults[[bestMatch]][['GBIF']]$Metadata$doi dateDOI <- format(as.Date(myBTO@occResults[[bestMatch]][['GBIF']]$Metadata$created), "%d %B %Y") citeGBIF <- list(doi = doiGBIF, date = dateDOI) logger %>% writeLog( hlSpp(fmtSpN(sp)), " #CiteTheDOI: Gbif.org (", dateDOI, ") GBIF Ocurrence Download https://doi.org/", doiGBIF ) } } else if (occDb == 'bien') { qBien <- BIEN::BIEN_occurrence_species(species = sp) # make something with the same slots as spocc that we use q <- list(bien = list(meta = list(found = NULL), data = list(fmtSpN(sp)))) q[[occDb]]$meta$found <- nrow(qBien) q[[occDb]]$data[[fmtSpN(sp)]] <- qBien } }) # if species not found, print message to log box and return if (q[[occDb]]$meta$found == 0) { logger %>% writeLog(type = 'error', hlSpp(fmtSpN(sp)), 'No records found. Please check the spelling.') return() } # extract occurrence tibbles occsOrig <- q[[occDb]]$data[[fmtSpN(sp)]] # make sure latitude and longitude are numeric (sometimes they aren't) occsOrig$latitude <- as.numeric(occsOrig$latitude) occsOrig$longitude <- as.numeric(occsOrig$longitude) # make new column for original ID occsOrig$occID <- as.numeric(row.names(occsOrig)) # delete colums with list to avoid conflict occsOrig["networkKeys"] <- NULL # subset to just records with latitude and longitude occsXY <- occsOrig[!is.na(occsOrig$latitude) & !is.na(occsOrig$longitude),] # if no records with coordinates, throw warning if (nrow(occsXY) == 0) { logger %>% writeLog( type = 'warning', hlSpp(fmtSpN(sp)), 'No records with coordinates found in ', occDb, ". ") return() } noCoordsRem <- nrow(occsOrig) - nrow(occsXY) # round longitude and latitude with 5 digits occsXY['longitude'] <- round(occsXY['longitude'], 5) occsXY['latitude'] <- round(occsXY['latitude'], 5) occs<-occsXY if (occDb == 'gbif') { fields <- c("name", "longitude", "latitude", "country", "stateProvince", "locality", "year", "basisOfRecord", "catalogNumber", "institutionCode", "elevation", "coordinateUncertaintyInMeters") for (i in fields) if (!(i %in% names(occs))) occs[i] <- NA occs <- occs %>% dplyr::rename(scientific_name = .data$name, state_province = .data$stateProvince, record_type = .data$basisOfRecord, institution_code = .data$institutionCode, catalog_number = .data$catalogNumber, uncertainty = .data$coordinateUncertaintyInMeters) } else if (occDb == 'vertnet') { # standardize VertNet column names fields <- c("name", "longitude", "latitude", "country", "stateprovince", "locality", "year", "basisofrecord", "catalognumber", "institutioncode", "maximumelevationinmeters", "coordinateuncertaintyinmeters") for (i in fields) if (!(i %in% names(occs))) occs[i] <- NA occs <- occs %>% dplyr::rename(scientific_name = .data$name, state_province = .data$stateprovince, record_type = .data$basisofrecord, institution_code = .data$institutioncode, catalog_number = .data$catalognumber, elevation = .data$maximumelevationinmeters, uncertainty = .data$coordinateuncertaintyinmeters) # } else if (occDb == 'bison') { # standardize BISON column names # fields <- c("providedScientificName", "longitude", "latitude", "countryCode", # "stateProvince", "verbatimLocality", "year", "basisOfRecord", # "catalogNumber", "ownerInstitutionCollectionCode", # "elevation", "uncertainty") # # BISON field requirements (no downloaded by spocc) "elevation" # for (i in fields) if (!(i %in% names(occs))) occs[i] <- NA # occs <- occs %>% dplyr::rename(scientific_name = .data$providedScientificName, # country = .data$countryCode, # state_province = .data$stateProvince, # locality = .data$verbatimLocality, # record_type = .data$basisOfRecord, # institution_code = # .data$ownerInstitutionCollectionCode, # catalog_number = .data$catalogNumber) } else if (occDb == 'bien') { fields <- c("scrubbed_species_binomial", "longitude", "latitude", "collection_code", "country", "state_province", "locality", "year", "record_type", "catalog_number", "elevation", "uncertainty") # BIEN field requirements (no downloaded by BIEN) "country", # "state_province", "locality", "year", "record_type", "institution_code", # "elevation", "uncertainty" for (i in fields) if (!(i %in% names(occs))) occs[i] <- NA occs <- occs %>% dplyr::as_tibble() %>% dplyr::rename(scientific_name = .data$scrubbed_species_binomial, institution_code = .data$collection_code) } noUncertainRem <- 0 if (RmUncertain == TRUE) { occs <- occs[!is.na(occs$uncertainty), ] noUncertainRem<- nrow(occsOrig) - (nrow(occs)+noCoordsRem) if(nrow(occs)==0){ logger %>% writeLog( type = 'warning', hlSpp(fmtSpN(sp)), 'No records with coordinate uncertainty information found in ', occDb, ".") return() } } dups <- duplicated(occs[,c('longitude','latitude')]) occs <- occs[!dups,] # subset by key columns and make id and popup columns cols <- c("occID", "scientific_name", "longitude", "latitude", "country", "state_province", "locality", "year", "record_type", "catalog_number", "institution_code", "elevation", "uncertainty") occs <- occs %>% dplyr::select(dplyr::one_of(cols)) %>% dplyr::mutate(year = as.integer(.data$year), uncertainty = as.numeric(.data$uncertainty)) %>% # # make new column for leaflet marker popup content dplyr::mutate(pop = unlist(apply(occs, 1, popUpContent))) %>% dplyr::arrange(dplyr::across(cols)) # subset by key columns and make id and popup columns noCoordsRem <- nrow(occsOrig) - nrow(occsXY) dupsRem <- nrow(occsXY) - nrow(occs) # get total number of records found in database totRows <- q[[occDb]]$meta$found if (RmUncertain == TRUE) { logger %>% writeLog(hlSpp(fmtSpN(sp)), 'Total ', occDb, ' records returned [', nrow(occsOrig), '] out of [', totRows, '] total', if (!(doCitations | occDb == 'bien')) {paste0(' (limit ', occNum,')')}, '. Records without coordinates removed [', noCoordsRem, ']. Records without uncertainty information removed [', noUncertainRem, ']. Duplicated records removed [', dupsRem, ']. Remaining records [', nrow(occs), '].') } else {logger %>% writeLog(hlSpp(fmtSpN(sp)), 'Total ', occDb, ' records returned [', nrow(occsOrig), '] out of [', totRows, '] total', if (!(doCitations | occDb == 'bien')) {paste0(' (limit ', occNum,')')}, '. Records without coordinates removed [', noCoordsRem, ']. Duplicated records removed [', dupsRem, ']. Remaining records [', nrow(occs), '].') } # put into list if (doCitations & occDb == "gbif") { occList[[fmtSpN(sp)]] <- list(orig = occsOrig, cleaned = as.data.frame(occs), citation = citeGBIF) } else { occList[[fmtSpN(sp)]] <- list(orig = occsOrig, cleaned = as.data.frame(occs)) } } return(occList) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/occs_queryDb.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # occs_userOccs.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title occs_userOccs Loads user provided occurrence records #' @description #' Load user database with species occurrence records. Returns a list of lists, #' one per species provided in database in each species list with a set of #' appropriate fields #' @details #' This function is called by the module occs_queryDb to load a user provided #' database for species occurrence records, subset to only those records with #' coordinates, remove records with duplicate coordinates, and select some #' columns with fields appropriate to studies in biogeography. #' #' @param txtPath path to database including database name and extension #' @param txtName name of database without the extension. Database must have #' at least three columns named 'scientific_name', 'longitude', 'latitude' #' @param txtSep field separator used in database (as in read.delim) #' @param txtDec decimal separator used for coordinates in database #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running #' in shiny, otherwise leave the default NULL #' @examples #' txtPath <- system.file("extdata/Bassaricyon_alleni.csv", package = "wallace") #' txtName <- 'Bassaricyon_alleni' #' user.occs <- occs_userOccs(txtPath, txtName) #' #' #' @return List of lists. One list per species with occurrence records. Each #' individual species list with appropriate fields for analysis #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @importFrom rlang .data #' @export occs_userOccs <- function(txtPath, txtName, txtSep = ",", txtDec = ".", logger = NULL) { # read in txt txt <- tryCatch(expr = utils::read.delim(file = txtPath, header = TRUE, sep = txtSep, dec = txtDec), error = function(e) "error") if (inherits(txt, "character")) { logger %>% writeLog( type = "error", paste0("There is something wrong in your file. Check file format or ", "delimiter and decimal separators.")) return() } # check to make sure all column names are correct if (sum(c('scientific_name', 'longitude', 'latitude') %in% names(txt)) != 3) { logger %>% writeLog( type = "error", paste0('Please input a file with columns "scientific_name", ', '"longitude", "latitude".')) return() } # subset to just records with non-NA latitude and longitude txt.xy <- txt %>% dplyr::filter(!is.na(.data$latitude) & !is.na(.data$longitude)) txt.xy$scientific_name <- trimws(txt.xy$scientific_name) # get all species names occs <- txt.xy %>% dplyr::filter(!grepl("bg_", .data$scientific_name)) spNames <- trimws(as.character(occs$scientific_name)) spCap <- function(x) { paste0(toupper(substring(x, 1, 1)), substring(x, 2, nchar(x))) } # capitalize genus names spNames <- sapply(spNames, spCap) # figure out how many separate names (components of scientific name) were entered namesSplit <- sapply(spNames, function(x) strsplit(x, " ")) namesSplitCheck <- sapply(namesSplit, function(x) length(x) == 2) # if two names not entered, throw error and return if (!all(namesSplitCheck)) { logger %>% writeLog(type = 'error', paste0('Please input just genus and species epithet in scientific', ' name field in your file (e.g., "Canis lupus").')) return() } if (nrow(occs) == 0) { logger %>% writeLog(type = 'warning', 'No records with coordinates found in ', txtName, ".") return() } # Check that longitude and latitude are numeric else if (!is.numeric(txt$longitude) | !is.numeric(txt$latitude)) { logger %>% writeLog( type = "error", 'Please input txt file. Not all values in longitude or latitude are numeric.') return() } # Transform scientific_name field txt.xy$scientific_name <- spNames # put species into a list in the same form as spp occsList <- list() for (i in unique(spNames)) { sp.occs <- txt.xy %>% dplyr::filter(.data$scientific_name == i) # add occID field if it doesn't exist if(!("occID" %in% names(sp.occs))) sp.occs$occID <- row.names(sp.occs) # add all cols to match dbOccs if not already there for (col in c("country", "state_province", "locality", "year", "record_type", "catalog_number", "institution_code", "elevation", "uncertainty")) { if (!(col %in% names(sp.occs))) sp.occs[,col] <- NA } # add popup field and arrange cols <- c("occID", "scientific_name", "longitude", "latitude", "country", "state_province", "locality", "year", "record_type", "catalog_number", "institution_code", "elevation", "uncertainty") sp.occs <- sp.occs %>% dplyr::select(dplyr::one_of(cols)) %>% dplyr::mutate(year = as.integer(.data$year), uncertainty = as.numeric(.data$uncertainty)) %>% # # make new column for leaflet marker popup content dplyr::mutate(pop = unlist(apply(sp.occs, 1, popUpContent))) %>% dplyr::arrange(dplyr::across(cols)) n <- fmtSpN(i) # subset to just records with latitude and longitude occsXY <- sp.occs[!is.na(sp.occs$latitude) & !is.na(sp.occs$longitude),] # round longitude and latitude with 5 digits occsXY['longitude'] <- round(occsXY['longitude'], 5) occsXY['latitude'] <- round(occsXY['latitude'], 5) dups <- duplicated(occsXY[,c('longitude','latitude')]) occs <- occsXY[!dups,] occsList[[n]] <- list(orig = sp.occs, cleaned = as.data.frame(occs)) # subset by key columns and make id and popup columns dupsRem <- nrow(sp.occs) - nrow(occs) logger %>% writeLog( hlSpp(n), "Data uploaded from <i>'", txtName, "'</i>: Duplicated records removed [", dupsRem, "]. Remaining records [", nrow(occs), "].") # look for background records sp.bg <- txt.xy %>% dplyr::filter(.data$scientific_name == paste0("bg_", i)) # if they exist, load them into occsList for the current species if(nrow(sp.bg) > 0) { occsList[[n]]$bg <- sp.bg logger %>% writeLog( hlSpp(n), "Data for uploaded from <i>'", txtName, "'</i>: ", nrow(sp.bg), " background records.") } } return(occsList) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/occs_userOccs.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # part_partitionOccs.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title part_partitionOccs Partition occurrence data #' @description This function partitions occurrence data and background points #' according to a user-selected method. #' #' @details #' This function is used in the partition occurrence data component. #' A user-selected method is used to partition occurrence and background points #' into different groups for model testing. #' A list of group assignments for both occurrences and background points is #' returned. #' #' @param occs data frame of cleaned or processed occurrences obtained from #' components occs: Obtain occurrence data or, poccs: Process occurrence data. #' @param bg coordinates of background points to be used for modeling. #' @param method character. Partitioning method to be used, one of 5 options: \cr #' (1) 'jack' Non-spatial Partition - jackknife \cr #' (2) 'rand' Non-spatial Partition - random k-fold \cr #' (3) 'block' spatial Partition - block \cr #' (4) 'cb1' spatial Partition - checkerboard 1 (K=2) \cr #' (5) 'cb2' spatial Partition - checkerboard 2 (K=4) \cr #' @param kfolds numeric. Number of partitions to create if selected method is #' random k-fold (must be >=2). If other method then keep default of NULL. #' @param bgMask a RasterStack or a RasterBrick of environmental layers cropped #' and masked. #' @param aggFact numeric. Aggregation factor to be used when using checkerboard #' partition (must be >= 1). #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running #' in shiny, otherwise leave the default NULL. #' @param spN data frame of cleaned occurrences obtained from component #' occs: Obtain occurrence data. Used to obtain species name for logger #' messages. #' #' @examples #' \dontrun{ #' envs <- envs_userEnvs(rasPath = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = TRUE), #' rasName = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = FALSE)) #' occs <- read.csv(system.file("extdata/Bassaricyon_alleni.csv", #' package = "wallace")) #' bg <- read.csv(system.file("extdata/Bassaricyon_alleni_bgPoints.csv", #' package = "wallace")) #' partblock <- part_partitionOccs(occs, bg, method = 'rand', kfold = 4) #' } #' #' @return A list of two vectors containing group assignments for occurrences #' (occs.grp) and background points (bg.grp). #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @author Andrea Paz <paz.andreita@@gmail.com> # @note #' @seealso \code{\link[ENMeval]{partitions}} # @references # @aliases - a list of additional topic names that will be mapped to # this documentation when the user looks them up from the command # line. # @family - a family name. All functions that have the same family tag will be # linked in the documentation. #' @importFrom rlang .data #' @export part_partitionOccs <- function(occs, bg, method, kfolds = NULL, bgMask = NULL, aggFact = NULL, logger = NULL, spN = NULL) { if (method == '') { logger %>% writeLog( type = 'error', "Please select a partitioning option.") return() } occs.xy <- occs %>% dplyr::select(.data$longitude, .data$latitude) bg.xy <- bg %>% dplyr::select(.data$longitude, .data$latitude) if (method == 'jack') { group.data <- ENMeval::get.jackknife(occs.xy, bg.xy) logger %>% writeLog( hlSpp(spN), "Occurrences partitioned by jackknife method.") } if (method == 'rand') { if(is.null(kfolds)) { logger %>% writeLog( type = 'error', hlSpp(spN), "Please specify a kfold value to use the random partition function.") return() } if (kfolds < 2) { logger %>% writeLog( type = 'error', hlSpp(spN), "Please specify a kfold value greater than 1.") return() } group.data <- ENMeval::get.randomkfold(occs.xy, bg.xy, kfolds) logger %>% writeLog( hlSpp(spN), "Occurrences partitioned by random k-fold (k = ", kfolds, ").") } if (method == 'block') { group.data <- ENMeval::get.block(occs.xy, bg.xy) logger %>% writeLog( hlSpp(spN), "Occurrences partitioned by block method.") } if (method == 'cb1' | method == 'cb2') { if(is.null(aggFact)) { logger %>% writeLog( type = 'error', hlSpp(spN), paste0("Please specify an aggregation factor to use checkerboard ", "partition functions.")) return() } if(is.na(aggFact) | aggFact <= 1) { logger %>% writeLog( type = 'error', hlSpp(spN), "Please specify a positive aggregation factor greater than 1.") return() } if(is.null(bgMask)) { logger %>% writeLog( type = 'error', hlSpp(spN), paste0("Please specify a background mask to use checkerboard ", "partition functions.")) return() } } if(method == 'cb1') { smartProgress(logger, message = "Aggregating rasters...", { group.data <- ENMeval::get.checkerboard1(occs.xy, bgMask, bg.xy, aggFact) }) logger %>% writeLog(hlSpp(spN), "Occurrences partitioned by checkerboard 1 method with ", "aggregation factor ", aggFact, ".") } if(method == 'cb2') { smartProgress(logger, message = "Aggregating rasters...", { group.data <- ENMeval::get.checkerboard2(occs.xy, bgMask, bg.xy, aggFact) }) logger %>% writeLog(hlSpp(spN), "Occurrences partitioned by checkerboard 2 method with ", "aggregation factor ", aggFact, ".") } return(group.data) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/part_partitionOccs.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # penvs_bgExtent.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title penvs_bgExtent Generate background extent #' @description This function generates a background area according to a user- #' provided method. #' #' @details This function is used in the select study region component. Here, #' the user can select between three methods ('bounding box', 'point buffers' #' or ' minimum convex polygon') to determine the background extent based on the #' observed occurrences. The function returns a SpatialPolygonsDataFrame #' object of the desired extent. #' #' @param occs data frame of cleaned or processed occurrences obtained from #' components occs: Obtain occurrence data or, poccs: Process occurrence data. #' @param bgSel character. Method of background building. Must be one of three #' options: 'bounding box' , 'point buffers' or ' minimum convex polygon'. #' @param bgBuf numeric. Buffer distance in degrees to be used in the building #' of the background area. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL. #' @param spN data frame of cleaned occurrences obtained from component occs: #' Obtain occurrence data. Used to obtain species name for logger messages. #' @examples #' occs <- read.csv(system.file("extdata/Bassaricyon_alleni.csv", #' package = "wallace"))[, 2:3] #' occs$occID <- 1:nrow(occs) #' bgExt <- penvs_bgExtent(occs, bgSel = 'bounding box', bgBuf = 0.5) #' #' @return A SpatialPolygons object that contains all occurrences from occs #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @author Bethany A. Johnson <bjohnso005@@citymail.cuny.edu> # @note #' @seealso \code{\link{penvs_userBgExtent}}, \code{\link{penvs_drawBgExtent}}, #' \code{\link{penvs_bgMask}} , \code{\link{penvs_bgSample}} #' @export #' penvs_bgExtent <- function(occs, bgSel, bgBuf, logger = NULL, spN = NULL) { if (nrow(occs) <= 2) { logger %>% writeLog(type = 'error', 'Too few localities (<2) to create a background polygon.') return() } # extract just coordinates occs.xy <- occs[c('longitude', 'latitude')] # make spatial pts object of original occs and preserve origID occs.sp <- sp::SpatialPointsDataFrame(occs.xy, data = occs['occID']) # make an sf obj occs.sf <- sf::st_as_sf(occs.xy, coords = c("longitude", "latitude")) occs.sf <- sf::st_union(occs.sf, by_feature = FALSE) # generate background extent - one grid cell is added to perimeter of each shape # to ensure cells of points on border are included if (bgSel == 'bounding box') { xmin <- occs.sp@bbox[1] xmax <- occs.sp@bbox[3] ymin <- occs.sp@bbox[2] ymax <- occs.sp@bbox[4] bb <- matrix(c(xmin, xmin, xmax, xmax, xmin, ymin, ymax, ymax, ymin, ymin), ncol = 2) bgExt <- sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(bb)), 1))) msg <- "Study extent: bounding box." } else if (bgSel == "minimum convex polygon") { mcp.xy <- as.data.frame(sp::coordinates(occs.xy)) coords.t <- grDevices::chull(mcp.xy[, 1], mcp.xy[, 2]) xy.bord <- mcp.xy[coords.t, ] xy.bord <- rbind(xy.bord[nrow(xy.bord), ], xy.bord) bgExt <- sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(as.matrix(xy.bord))), 1))) msg <- "Study extent: minimum convex polygon." } else if (bgSel == 'point buffers') { if (bgBuf <= 0) { logger %>% writeLog(type = 'error', 'Change buffer distance to a positive value.') return() } bgExt <- sf::st_buffer(occs.sf, dist = bgBuf) msg <- paste0("Study extent: buffered points. Buffered by ", bgBuf, " degrees.") } if (bgBuf >= 0 & bgSel != 'point buffers') { bgExt <- sf::st_as_sf(bgExt) bgExt <- sf::st_buffer(bgExt, dist = bgBuf) logger %>% writeLog(hlSpp(spN), msg, ' Buffered by ', bgBuf, ' degrees.') } else if (bgBuf < 0 & bgSel != 'point buffers') { logger %>% writeLog(type = 'error', 'All localities must be included within extent. Change buffer distance to a positive value.') return() } else { logger %>% writeLog(hlSpp(spN), msg) } bgExt <- sf::as_Spatial(bgExt) return(bgExt) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/penvs_bgExtent.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # penvs_bgMask.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title penvs_bgMask Mask environmental data #' @description This functions crops and masks the environmental data to the #' provided background area. #' #' @details #' This function is used in the select study region component. Here, the #' environmental layers to be used in the modeling are cropped and masked #' to the provided background area. The background area is determined in #' the function penvs_bgExtent from the same component. The function returns #' the provided environmental layers cropped and masked in the provided #' format (either a rasterBrick or a rasterStack). #' #' @param occs data frame of cleaned or processed occurrences obtained from #' components occs: Obtain occurrence data or, poccs: Process occurrence data. #' @param envs a RasterStack or RasterBrick of environmental layers to be #' processed. This determines the output type. #' @param bgExt a SpatialPolygonsDataFrame with the background area to be used #' for processing. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running #' in shiny, otherwise leave the default NULL. #' @param spN species name to be used for all logger messages #' @examples #' \dontrun{ #' occs <- read.csv(system.file("extdata/Bassaricyon_alleni.csv", #' package = "wallace"))[, 2:3] #' occs$occID <- 1:nrow(occs) #' envs <- envs_userEnvs(rasPath = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = TRUE), #' rasName = list.files(system.file("extdata/wc", #' package = "wallace"), #' pattern = ".tif$", full.names = FALSE)) #' bgExt <- penvs_bgExtent(occs, bgSel = 'bounding box', bgBuf = 0.5) #' bgMask <- penvs_bgMask(occs, envs, bgExt) #' } #' #' @return A RasterStack or a RasterBrick of environmental layers cropped and #' masked to match the provided background extent. #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @seealso \code{\link{penvs_userBgExtent}}, #' \code{\link{penvs_drawBgExtent}}, \code{\link{penvs_bgExtent}}, #' \code{\link{penvs_bgSample}} #' @export penvs_bgMask <- function(occs, envs, bgExt, logger = NULL, spN = NULL) { if (is.null(bgExt)) { logger %>% writeLog( type = 'error', hlSpp(spN), "Before sampling background points, define the background extent.") return() } # mask envs by background extent smartProgress(logger, message = paste0("Masking rasters for ", spName(spN), "..."), { bgCrop <- raster::crop(envs, bgExt) bgMask <- raster::mask(bgCrop, bgExt) # GEPB: Workaround when raster alignment is changed after crop, which makes appears # new duplicated occs in the same grid cells. occsEnvsVals <- as.data.frame(raster::extract(bgMask, occs[, c('longitude', 'latitude')], cellnumbers = TRUE)) occs.dups <- duplicated(occsEnvsVals[, 1]) if (sum(occs.dups) > 0) { bgMask <- terra::project(terra::rast(bgMask), terra::rast(envs), method = 'near') bgMask <- methods::as(bgMask, "Raster") } }) logger %>% writeLog(hlSpp(spN), 'Environmental data masked.') return(bgMask) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/penvs_bgMask.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # penvs_bgSample.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title penvs_bgSample Sample background points #' @description This function samples background points from an area determined #' by a rasterBrick or RasterStack of environmental layers previously cropped #' and masked to user determined extent. #' #' @details #' This function is used in the select study region component. Here, a user #' provided amount of points is randomly sampled from the RasterBrick or #' RasterStack of environmental variables cropped and masked to a given #' background extent. The maximum number of points to be sampled is the number #' of non NA cells in each layer of the reference RasterBrick or RasterStack #' If the requested number of points is larger than the number of cells in #' the reference RasterBrick or RasterStack then only a proportion of the #' requested will be returned. #' #' @param occs data frame of cleaned or processed occurrences obtained from #' components occs: Obtain occurrence data or, poccs: Process occurrence data. #' @param bgMask a RasterStack or a RasterBrick of environmental layers cropped #' and masked. #' @param bgPtsNum numeric. Number of points to be sampled from the area, they #' will be sampled as long as <= non NA cells in any reference layer. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL. #' @param spN data frame of cleaned occurrences obtained from component occs: #' Obtain occurrence data. Used to obtain species name for logger messages. #' @examples #' \dontrun{ #' occs <- occs_queryDb(spName = "Panthera onca", occDb = "gbif", #' occNum = 100) #' occs <- as.data.frame(occs[[1]]$cleaned) #' envs <- envs_worldclim(bcRes = 10, #' bcSel = c("bio03", "bio04", "bio13", "bio14"), #' doBrick = TRUE) #' bgExt <- penvs_bgExtent(occs, bgSel = 'bounding box', bgBuf = 0.5) #' bgMask <- penvs_bgMask(occs, envs, bgExt) #' bgsample <- penvs_bgSample(occs, bgMask, bgPtsNum = 1000) #' } #' #' @return a dataframe containing point coordinates (longitude and latitude). #' All points are within the area provided in the RasterBrick or RasterStack (bgMask). #' Maximum number of points is equal to non NA cells in each layer of the #' reference brick or stack. #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @seealso \code{\link{penvs_bgMask}}, \code{\link{penvs_bgExtent}} #' \code{\link{penvs_userBgExtent}}, \code{\link{penvs_drawBgExtent}}, #' \code{\link[dismo]{randomPoints}} #' @importFrom rlang .data #' @export penvs_bgSample <- function(occs, bgMask, bgPtsNum, logger = NULL, spN = NULL) { # sample random background points smartProgress(logger, message = "Generating background points...", { bgXY <- dismo::randomPoints(bgMask, bgPtsNum) bgXY <- bgXY %>% as.data.frame() %>% dplyr::select(longitude = .data$x, latitude = .data$y) bgNonNA <- raster::ncell(bgMask) - raster::freq(bgMask, value = NA)[[1]] }) bg.prop <- round(nrow(bgXY)/bgPtsNum, digits = 2) if(bg.prop == 1) { logger %>% writeLog( hlSpp(spN), bgPtsNum, " random background points sampled out of ", bgNonNA, " total points. ") } else { logger %>% writeLog(type = "warning", hlSpp(spN), bgPtsNum, " random background points requested, but only ", 100 * bg.prop, "% of points (n = ", nrow(bgXY), ") were able to be sampled. ", "The maximum number of background points available to be sample on the polygon extent is ", bgNonNA, ".") } return(bgXY) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/penvs_bgSample.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # penvs_drawBgExtent.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title penvs_drawBgExtent: Draw background extent #' @description This function generates a background area according to a user #' drawn polygon and provided buffer. #' #' @details #' This function is used in the select study region component. Here, in the GUI, #' the user draws a polygon to be used as the background extent and may #' include a buffer to the given polygon. The buffered poylgon must include #' all occurrences (occs) or function will return an error. The function #' returns a SpatialPolygonsDataFrame object of the desired extent (+ buffer). #' #' @param polyExtXY coordinates of polygon endpoints obtained from user drawn #' polygon in GUI. #' @param polyExtID numeric. ID to be used in the generation of the polygon. #' @param drawBgBuf the buffer to be used in generating the #' SpatialPolygonsDataFrame, maybe be 0 or >0. A number must be specified. #' @param occs data frame of cleaned or processed occurrences obtained from #' components occs: Obtain occurrence data or, poccs: Process occurrence data. #' @param logger Stores all notification messages to be displayed in the #' Log Window of Wallace GUI. Insert the logger reactive list here for #' running in shiny, otherwise leave the default NULL. #' @param spN data frame of cleaned occurrences obtained from component #' occs: Obtain occurrence data. Used to obtain species name for logger #' messages. #' #' @examples #' occs <- read.csv(system.file("extdata/Bassaricyon_alleni.csv", #' package = "wallace"))[, 2:3] #' occs$occID <- 1:nrow(occs) #' longitude <- c(-27.78641, -74.09170, -84.01930, -129.74867, #' -142.19085, -45.55045, -28.56050) #' latitude <- c(-40.40539, -37.02010, 2.28455, 40.75350, #' 56.35954, 54.55045, -7.11861) #' expertDrawPoly <- matrix(c(longitude, latitude), byrow = FALSE, #' ncol = 2) #' drawBgBf <- penvs_drawBgExtent(polyExtXY = expertDrawPoly, polyExtID = 1, #' drawBgBuf = 0.5, occs) #' @return This functions returns a SpatialPolygons object based on the user #' specified coordinates (drawn on map). This SpatialPolygons object may be #' larger than specified if drawBgBuf > 0. The SpatialPolygons object will #' include all occurrences. #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @author Bethany A. Johnson <bjohnso005@@citymail.cuny.edu> # @note #' @seealso \code{\link{penvs_userBgExtent}}, \code{\link{penvs_bgExtent}}, #' \code{\link{penvs_bgMask}} , \code{\link{penvs_bgSample}} #' @export penvs_drawBgExtent <- function(polyExtXY, polyExtID, drawBgBuf, occs, logger = NULL, spN = NULL) { ptRem <- NULL occs.xy <- occs[c('longitude', 'latitude')] # make spatial pts object of original occs and preserve origID pts <- sp::SpatialPointsDataFrame(occs.xy, data = occs['occID']) newPoly <- sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(polyExtXY)), ID = polyExtID))) intersect <- sp::over(pts, newPoly) newPoly.sf <- sf::st_as_sf(newPoly) ptRem <- ifelse(all(!is.na(intersect)), 0, as.numeric(which(is.na(intersect)))) if (ptRem == 0) { bgExt <- sf::st_buffer(newPoly.sf, dist = drawBgBuf) bgExt <- sf::as_Spatial(bgExt) if (drawBgBuf == 0) { logger %>% writeLog(hlSpp(spN), 'Draw polygon without buffer.') } else { logger %>% writeLog(hlSpp(spN), 'Draw polygon with buffer of ', drawBgBuf, ' degrees.') } return(bgExt) } else if (ptRem > 0) { logger %>% writeLog(type = 'error', hlSpp(spN), "The drawn polygon did not include all localities. ", "Remove the polygon before drawing a new one.") return() } }
/scratch/gouwar.j/cran-all/cranData/wallace/R/penvs_drawBgExtent.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # penvs_userBgExtent.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title penvs_userBgExtent: user provided background extent #' @description This function generates a background area according to a user #' provided polygon and buffer. #' #' @details #' This function is used in the select study region component. Here, the user #' provides either a shapefile or a csv with vertex coordinates with the #' desired shape for the background extent, the user may include a buffer to #' the given polygon. The buffered polygon must include all occurrences #' (occs) or function will return an error. The function returns a #' SpatialPolygons object of the desired extent (+ buffer). #' #' @param bgShp_path path to the user provided shapefile or csv with vertex #' coordinates. #' @param bgShp_name name of the user provided shapefile or csv with vertex #' coordinates. #' @param userBgBuf buffer to be used in creating the background extent must #' be >= 0. #' @param occs data frame of cleaned or processed occurrences obtained from #' components occs: Obtain occurrence data or, poccs: Process occurrence data. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL. #' @param spN Species name. #' @examples #' occs <- read.csv(system.file("extdata/Bassaricyon_neblina.csv", #' package = "wallace"))[, 2:3] #' occs$occID <- 1:nrow(occs) #' pathShp <- list.files(system.file("extdata/shp", package = "wallace"), #' full.names = TRUE) #' nameShp <- list.files(system.file("extdata/shp", package = "wallace"), #' full.names = FALSE) #' userBgbf <- penvs_userBgExtent(bgShp_path = pathShp, bgShp_name = nameShp, #' userBgBuf = 0.2, occs = occs) #' #' @return This function returns a SpatialPolygons object with the user #' provided shape (+ a buffer if userBgBuf >0). The polygon will be at least #' large enough to contain all occurrences. #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @author Andrea Paz <paz.andreita@@gmail.com> #' @author Bethany A. Johnson <bjohnso005@@citymail.cuny.edu> # @note #' @seealso \code{\link{penvs_drawBgExtent}}, \code{\link{penvs_bgExtent}}, #' \code{\link{penvs_bgMask}} , \code{\link{penvs_bgSample}} #' @export penvs_userBgExtent <- function(bgShp_path, bgShp_name, userBgBuf, occs, logger = NULL, spN = NULL) { pathdir <- dirname(bgShp_path) pathfile <- basename(bgShp_path) # get extensions of all input files exts <- sapply(strsplit(bgShp_name, '\\.'), FUN = function(x) x[2]) if (length(exts) == 1 & exts[1] == 'csv') { f <- utils::read.csv(bgShp_path, header = TRUE) bgExt <- sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(f)), 1))) } else if ('shp' %in% exts) { if (length(exts) < 3) { logger %>% writeLog(type = 'error', paste0('If entering a shapefile, please select all the ', 'following files: .shp, .shx, .dbf.')) return() } # get index of .shp i <- which(exts == 'shp') if (!file.exists(file.path(pathdir, bgShp_name)[i])) { file.rename(bgShp_path, file.path(pathdir, bgShp_name)) } # read in shapefile and extract coords bgExt <- sf::st_read(file.path(pathdir, bgShp_name)[i]) bgExt <- sf::as_Spatial(bgExt) } else { logger %>% writeLog(type = 'error', paste0('Please enter either a CSV file of vertex coordinates ', 'or shapefile (.shp, .shx, .dbf).')) return() } if (userBgBuf >= 0) { bgExt <- sf::st_as_sf(bgExt) bgExt <- sf::st_buffer(bgExt, dist = userBgBuf) bgExt <- sf::as_Spatial(bgExt) } else { bgExt <- sf::st_as_sf(bgExt) bgExt <- sf::st_buffer(bgExt, dist = userBgBuf) bgExt <- sf::as_Spatial(bgExt) } ### Points outside polygon occs.xy <- occs[c('longitude', 'latitude')] # make spatial pts object of original occs and preserve origID pts <- sp::SpatialPointsDataFrame(occs.xy, data = occs['occID']) intersecto <- sp::over(pts, bgExt) ptRem <- ifelse(all(!is.na(intersecto)), 0, as.numeric(which(is.na(intersecto)))) if (ptRem == 0) { if (userBgBuf > 0) { logger %>% writeLog( hlSpp(spN), 'Study extent user-defined polygon buffered by ', userBgBuf, ' degrees.') } else { logger %>% writeLog( hlSpp(spN), "Study extent: user-defined polygon.") } return(bgExt) } else if (ptRem > 0) { logger %>% writeLog(type = 'error', hlSpp(spN), "The polygon did not include all localities. ", "You can remove localities in Process Occs component") return() } }
/scratch/gouwar.j/cran-all/cranData/wallace/R/penvs_userBgExtent.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # poccs_removeByID.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title poccs_removeByID Remove occurrence by ID #' @description This function removes user selected occurrences by ID. #' #' @details #' This function is called by the remove occurrences by ID module. It allows for #' removal of a single occurrence flagged by the user on the map. The function #' will return a data frame of occurrences with all relevant columns for #' further analyses and without the occurrence selected by the user. #' #' @param occs data frame of cleaned occurrences obtained from component occs: #' Obtain occurrence data #' @param removeID the ID of the occurrence to be removed from the occurrences #' dataframe. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL #' @param spN data frame of cleaned occurrences obtained from component occs: #' Obtain occurrence data. Used to obtain species name for logger messages. #' @examples #' occs <- read.csv(system.file("extdata/Bassaricyon_neblina.csv", #' package = "wallace"))[, 2:3] #' occs$occID <- 1:nrow(occs) #' out.ID <- poccs_removeByID(occs, 11) #' #' @return A new occurence dataframe without the user selected occurrence #' mantaining all columns from original dataframe for further analyses. #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> # @note # @seealso # @references #' @export poccs_removeByID <- function(occs, removeID, logger = NULL, spN = NULL) { if (is.null(occs)) { logger %>% writeLog(type = 'error', "Before processing occurrences, obtain the data in component 1.") return() } if (!(removeID %in% occs$occID)) { logger %>% writeLog(type = 'error','Entered occID not found.') return() } # find which occID corresponds to row for removal i <- which(removeID == occs$occID) # remove the row occs.remID <- occs[-i,] logger %>% writeLog( hlSpp(spN), "Removed occurrence with occID = ", removeID, ". Updated data has n = ", nrow(occs.remID), " records.") if (nrow(occs.remID) < 4) { logger %>% writeLog(type = 'error', hlSpp(spN), "After removing occurrences, there are three or less points. ", "You need more occurrences to continue the analysis." ) return() } return(occs.remID) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/poccs_removeByID.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # poccs_selectOccs.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title poccs_selectOccs Remove occurrences outside of polygon #' @description This function removes occurrences outside of a user created #' polygon. #' #' @details #' This function is called by the select occurrences on map module. It #' allows for removal of occurrences outside the user drawn polygon in #' the map. The function will return a data frame of occurrences with all #' relevant columns for further analyses and without the occurrences outside #' of the polygon. #' #' @param occs data frame of cleaned occurrences obtained from component #' occs: Obtain occurrence data. #' @param polySelXY matrix of longitude and latitude describing the expert #' drawn polygon. #' @param polySelID numeric. Polygon ID to be used in SpatialPolygons creation, #' defaults to 1. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running in #' shiny, otherwise leave the default NULL. #' @param spN data frame of cleaned occurrences obtained from component #' occs: Obtain occurrence data. Used to obtain species name for logger #' messages. #' @examples #' occs <- read.csv(system.file("extdata/Bassaricyon_neblina.csv", #' package = "wallace"))[, 2:3] #' occs$occID <- 1:nrow(occs) #' longitude <- c(-71.58400, -78.81300, -79.34034, -69.83331, #' -66.47149, -66.71319, -71.11931) #' latitude <- c(13.18379, 7.52315, 0.93105, -1.70167, #' 0.98391, 6.09208, 12.74980) #' expertAddedPoly <- matrix(c(longitude, latitude), byrow = FALSE, ncol = 2) #' out.occs <- poccs_selectOccs(occs, polySelXY = expertAddedPoly, #' polySelID = 1) #' @return A new occurence dataframe including only occurences inside the #' provided polygon and mantaining all columns from original dataframe for #' further analyses. #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @export poccs_selectOccs <- function(occs, polySelXY, polySelID = 1, logger = NULL, spN = NULL) { if (is.null(occs)) { logger %>% writeLog(type = 'error', "Before processing occurrences, obtain the data in component 1.") return() } if (is.null(polySelXY)) { logger %>% writeLog( type = 'error', 'The polygon has not been finished. Please press "Finish" on the map ', 'toolbar then the "Select Occurrences" button.') return() } occs.xy <- occs[c('longitude', 'latitude')] # make spatial pts object of original occs and preserve origID pts <- sp::SpatialPointsDataFrame(occs.xy, data=occs['occID']) # create new polygon from coords newPoly <- sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(polySelXY)), ID = polySelID))) intersect <- sp::over(pts, newPoly) ptRemIndex <- as.numeric(which(is.na(intersect))) remIDs <- printVecAsis(pts[ptRemIndex,]$occID) # need code here to format the string better if (is.na(ptRemIndex[1])){ logger %>% writeLog( type = 'warning', hlSpp(spN), "Your polygon is selecting all occurrences. None will be removed.") occs.sel <- occs return() } occs.sel <- occs[-ptRemIndex,] logger %>% writeLog( hlSpp(spN), "Removing occurrence(s) with occID = ", remIDs, ". Updated data has n = ", nrow(occs.sel), " records.") if (nrow(occs.sel) < 4) { logger %>% writeLog( type = 'error', hlSpp(spN), "After removing occurrences, there are three or less points. ", "You need more occurrences to continue the analysis." ) return() } return(occs.sel) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/poccs_selectOccs.R
# Wallace EcoMod: a flexible platform for reproducible modeling of # species niches and distributions. # # poccs_thinOccs.R # File author: Wallace EcoMod Dev Team. 2023. # -------------------------------------------------------------------------- # This file is part of the Wallace EcoMod application # (hereafter “Wallace”). # # Wallace is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Wallace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Wallace. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------- # #' @title poocs_thinOccs Thin occurrences #' @description The function thins the observed occurrences by a user provided #' distance. #' #' @details #' This function is called by the component poccs: process occurrence data to #' thin the occurrence data to a user specified distance. Providing an output #' with preserved columns appropriate for further analyses and a maximized #' number of occurrences that are separated by at least the provided distance. #' #' @param occs data frame of cleaned occurrences obtained from component occs: #' Obtain occurrence data #' @param thinDist distance in kilometers to be used for thinning. Number must be #' positive. #' @param logger Stores all notification messages to be displayed in the Log #' Window of Wallace GUI. Insert the logger reactive list here for running #' in shiny, otherwise leave the default NULL. #' @param spN data frame of cleaned occurrences obtained from component occs: #' Obtain occurrence data. Used to obtain species name for logger messages. #' #' @examples #' occs <- read.csv(system.file("extdata/Bassaricyon_neblina.csv", #' package = "wallace")) #' occs$occID <- 1:nrow(occs) #' out.thin <- poccs_thinOccs(occs = occs, thinDist = 30) #' #' #' @return Output is a data frame of thinned occurences (all occurences at a #' distance >thinDist) with the same columns as occs #' @author Jamie Kass <jamie.m.kass@@gmail.com> #' @author Gonzalo E. Pinilla-Buitrago <gepinillab@@gmail.com> #' @seealso \code{\link[spThin]{thin}} #' @export poccs_thinOccs <- function(occs, thinDist, logger = NULL, spN = NULL) { if (is.null(occs)) { logger %>% writeLog(type = 'error', "Before processing occurrences, obtain the data in component 1.") return() } if (thinDist <= 0) { logger %>% writeLog(type = "error", 'Assign positive distance to thinning parameter.') return() } # query database smartProgress(logger, message = paste0("Spatially thinning for ", spName(spN), "..."), { output <- spThin::thin(loc.data = occs, lat.col = 'latitude', long.col = 'longitude', spec.col = 'scientific_name', thin.par = thinDist, reps = 100, locs.thinned.list.return = TRUE, write.files = FALSE, write.log.file = FALSE, verbose = FALSE) # pull thinned dataset with max records, not just the first in the list maxThin <- which(sapply(output, nrow) == max(sapply(output, nrow))) # if more than one max, pick first maxThin <- output[[ifelse(length(maxThin) > 1, maxThin[1], maxThin)]] occs.thin <- occs[as.numeric(rownames(maxThin)),] # if (!is.null(values$inFile)) { # thinned.inFile <- values$inFile[as.numeric(rownames(output[[1]])),] # } }) logger %>% writeLog( hlSpp(spN), 'Total records thinned (', thinDist, ' km) to ', nrow(occs.thin), ' localities') if (nrow(occs.thin) < 4) { logger %>% writeLog(type = 'error', hlSpp(spN), "After removing occurrences, there are three or less points. ", "You need more occurrences to continue the analysis." ) return() } return(occs.thin) }
/scratch/gouwar.j/cran-all/cranData/wallace/R/poccs_thinOccs.R