content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Explain Multivariate Adaptive Regression Splines Using SHAP Values #' #' Explains the predictions of a MARS (Multivariate Adaptive Regression Splines) #' model using SHAP (Shapley Additive Explanations) values. It utilizes the #' DALEXtra and DALEX packages to provide SHAP-based explanations for the #' specified model. #' #' @import earth #' @import DALEX #' @import DALEXtra #' @import Formula #' @import parsnip #' @import plotmo #' @import plotrix #' @import recipes #' @import rsample #' @import TeachingDemos #' @import parsnip #' @import recipes #' @import rsample #' @import workflows #' @importFrom dplyr mutate_if #' @importFrom dplyr select #' @importFrom stats as.formula #' #' @param vip_featured A character value #' @param hiv_data A data frame #' @param nt A numeric value #' @param pd A numeric value #' @param pru A character value #' @param vip_train A data frame #' @param vip_new A numeric vector #' @param orderings A numeric value #' #' @return A data frame #' @export #' #' @examples #' library(dplyr) #' library(rsample) #' library(Formula) #' library(plotmo) #' library(plotrix) #' library(TeachingDemos) #' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559, #' 173, 764, 780, 244, 527, 417, 800, #' 602, 494, 345, 780, 780, 527, 556, #' 559, 238, 288, 244, 353, 169, 556, #' 824, 169, 342, 423, 441, 507, 559) #' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103, #' 11388, 46, 103, 11388, 40, 0, 11388, #' 0, 4095, 40, 93, 49, 49, 49, #' 4095, 6837, 38961, 38961, 0, 0, 93, #' 40, 11388, 38961, 40, 75, 4095, 103) #' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496, #' 230, 605, 432, 170, 670, 238, 238, #' 634, 422, 429, 513, 327, 465, 479, #' 661, 382, 364, 109, 398, 209, 1960, #' 992, 275, 331, 454, 479, 553, 496) #' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0, #' 262, 0, 15089, 13016, 1513, 60, 60, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 84, 292, 414, 26176, 62, 126, 93, #' 80, 1690, 5113, 71, 289, 3063, 0) #' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777, #' 149, 628, 614, 253, 918, 326, 326, #' 574, 361, 253, 726, 659, 596, 427, #' 447, 326, 253, 248, 326, 260, 918, #' 700, 127, 127, 547, 547, 547, 777) #' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0, #' 955, 0, 0, 0, 0, 40, 0, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 0, 23601, 0, 40, 0, 0, 0, #' 0, 0, 0, 0, 0, 0, 0) #' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |> #' as.data.frame() #' set.seed(123) #' hi_data <- rsample::initial_split(x) #' set.seed(123) #' hiv_data <- hi_data |> #' rsample::training() #' nt <- 3 #' pd <- 1 #' pru <- "none" #' vip_featured <- c("cd_2022") #' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022") #' set.seed(123) #' vi_train <- rsample::initial_split(x) #' set.seed(123) #' vip_train <- vi_train |> #' rsample::training() |> #' dplyr::select(rsample::all_of(vip_features)) #' vip_new <- vip_train[1,] #' orderings <- 20 #' viralx_mars_shap(vip_featured, hiv_data, nt, pd, pru, vip_train, vip_new,orderings) viralx_mars_shap <- function(vip_featured, hiv_data, nt, pd, pru, vip_train, vip_new, orderings) { DALEXtra::explain_tidymodels(workflows::workflow() |> workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data)) |> workflows::add_model(parsnip::mars(num_terms = nt, prod_degree = pd, prune_method = pru) |> parsnip::set_engine("earth") |> parsnip::set_mode("regression")) |> parsnip::fit(data = hiv_data), data = vip_train, y = vip_featured, label = "mars", verbose = FALSE) |> DALEX::predict_parts(vip_new, type ="shap", B = orderings) }
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_mars_shap.R
#' Visualize SHAP Values for Multivariate Adaptive Regression Splines Model #' #' Visualizes SHAP (Shapley Additive Explanations) values for a MARS #' (Multivariate Adaptive Regression Splines) model by employing the DALEXtra #' and DALEX packages to provide visual insights into the impact of a specified #' variable on the model's predictions. #' #' @import DALEX #' @import DALEXtra #' @import Formula #' @import parsnip #' @import plotmo #' @import plotrix #' @import recipes #' @import rsample #' @import TeachingDemos #' @import vdiffr #' @import workflows #' @importFrom stats as.formula #' #' @param vip_featured A character value #' @param hiv_data A data frame #' @param nt A numeric value #' @param pd A numeric value #' @param pru A character value #' @param vip_train A data frame #' @param vip_new A numeric vector #' @param orderings A numeric value #' #' @return A ggplot object #' @export #' #' @examples #' library(dplyr) #' library(rsample) #' library(Formula) #' library(plotmo) #' library(plotrix) #' library(TeachingDemos) #' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559, #' 173, 764, 780, 244, 527, 417, 800, #' 602, 494, 345, 780, 780, 527, 556, #' 559, 238, 288, 244, 353, 169, 556, #' 824, 169, 342, 423, 441, 507, 559) #' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103, #' 11388, 46, 103, 11388, 40, 0, 11388, #' 0, 4095, 40, 93, 49, 49, 49, #' 4095, 6837, 38961, 38961, 0, 0, 93, #' 40, 11388, 38961, 40, 75, 4095, 103) #' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496, #' 230, 605, 432, 170, 670, 238, 238, #' 634, 422, 429, 513, 327, 465, 479, #' 661, 382, 364, 109, 398, 209, 1960, #' 992, 275, 331, 454, 479, 553, 496) #' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0, #' 262, 0, 15089, 13016, 1513, 60, 60, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 84, 292, 414, 26176, 62, 126, 93, #' 80, 1690, 5113, 71, 289, 3063, 0) #' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777, #' 149, 628, 614, 253, 918, 326, 326, #' 574, 361, 253, 726, 659, 596, 427, #' 447, 326, 253, 248, 326, 260, 918, #' 700, 127, 127, 547, 547, 547, 777) #' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0, #' 955, 0, 0, 0, 0, 40, 0, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 0, 23601, 0, 40, 0, 0, 0, #' 0, 0, 0, 0, 0, 0, 0) #' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |> #' as.data.frame() #' set.seed(123) #' hi_data <- rsample::initial_split(x) #' set.seed(123) #' hiv_data <- hi_data |> #' rsample::training() #' nt <- 3 #' pd <- 1 #' pru <- "none" #' vip_featured <- c("cd_2022") #' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022") #' set.seed(123) #' vi_train <- rsample::initial_split(x) #' set.seed(123) #' vip_train <- vi_train |> #' rsample::training() |> #' dplyr::select(rsample::all_of(vip_features)) #' vip_new <- vip_train[1,] #' orderings <- 20 #' viralx_mars_vis(vip_featured, hiv_data, nt, pd, pru, vip_train, vip_new, orderings) viralx_mars_vis <- function(vip_featured, hiv_data, nt, pd, pru, vip_train, vip_new, orderings) { DALEXtra::explain_tidymodels(workflows::workflow() |> workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data)) |> workflows::add_model(parsnip::mars(num_terms = nt, prod_degree = pd, prune_method = pru) |> parsnip::set_engine("earth") |> parsnip::set_mode("regression")) |> parsnip::fit(data = hiv_data), data = vip_train, y = vip_featured, label = "mars", verbose = FALSE) |> DALEX::predict_parts(vip_new, type ="shap", B = orderings) |> plot() }
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_mars_vis.R
#' Explain Neural Network Regression Model #' #' Explains the predictions of a neural network regression model for viral load #' or CD4 counts using the DALEX and DALEXtra tools #' #' @import DALEX #' @import DALEXtra #' @import earth #' @import parsnip #' @import recipes #' @import rsample #' @import vdiffr #' @import workflows #' @importFrom dplyr mutate_if #' @importFrom dplyr select #' @importFrom stats as.formula #' #' @param vip_featured A character value #' @param hiv_data A data frame #' @param hu A numeric value #' @param plty A numeric value #' @param epo A numeric value #' @param vip_train A data frame #' @param vip_new A numeric vector #' #' @return A data frame #' @export #' #' @examples #' library(dplyr) #' library(rsample) #' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559, #' 173, 764, 780, 244, 527, 417, 800, #' 602, 494, 345, 780, 780, 527, 556, #' 559, 238, 288, 244, 353, 169, 556, #' 824, 169, 342, 423, 441, 507, 559) #' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103, #' 11388, 46, 103, 11388, 40, 0, 11388, #' 0, 4095, 40, 93, 49, 49, 49, #' 4095, 6837, 38961, 38961, 0, 0, 93, #' 40, 11388, 38961, 40, 75, 4095, 103) #' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496, #' 230, 605, 432, 170, 670, 238, 238, #' 634, 422, 429, 513, 327, 465, 479, #' 661, 382, 364, 109, 398, 209, 1960, #' 992, 275, 331, 454, 479, 553, 496) #' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0, #' 262, 0, 15089, 13016, 1513, 60, 60, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 84, 292, 414, 26176, 62, 126, 93, #' 80, 1690, 5113, 71, 289, 3063, 0) #' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777, #' 149, 628, 614, 253, 918, 326, 326, #' 574, 361, 253, 726, 659, 596, 427, #' 447, 326, 253, 248, 326, 260, 918, #' 700, 127, 127, 547, 547, 547, 777) #' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0, #' 955, 0, 0, 0, 0, 40, 0, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 0, 23601, 0, 40, 0, 0, 0, #' 0, 0, 0, 0, 0, 0, 0) #' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |> #' as.data.frame() #' set.seed(123) #' hi_data <- rsample::initial_split(x) #' set.seed(123) #' hiv_data <- hi_data |> #' rsample::training() #' hu <- 5 #' plty <- 1.131656e-09 #' epo <- 176 #' vip_featured <- c("cd_2022") #' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022") #' set.seed(123) #' vi_train <- rsample::initial_split(x) #' set.seed(123) #' vip_train <- vi_train |> #' rsample::training() |> #' dplyr::select(rsample::all_of(vip_features)) #' vip_new <- vip_train[1,] #' viralx_nn(vip_featured, hiv_data, hu, plty, epo, vip_train, vip_new) viralx_nn <- function(vip_featured, hiv_data, hu, plty, epo, vip_train, vip_new) { DALEXtra::explain_tidymodels(workflows::workflow() |> workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data) |> recipes::step_normalize(recipes::all_predictors())) |> workflows::add_model(parsnip::mlp(hidden_units = hu, penalty = plty, epochs = epo) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression")) |> parsnip::fit(data = hiv_data), data = vip_train, y = vip_featured, label = "nn + normalized", verbose = FALSE) |> DALEX::predict_parts(vip_new) |> as.data.frame() |> dplyr::select(1,2) |> dplyr::mutate_if(is.numeric, round, digits = 2) }
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_nn.R
#' Global Explainers for Neural Network Models #' #' The viralx_nn_glob function is designed to provide global explanations for #' the specified neural network model. #' #' @param vip_featured A character value specifying the variable of interest for which you want to explain predictions. #' @param hiv_data A data frame containing the dataset used for training the neural network model. #' @param hu A numeric value representing the number of hidden units in the neural network. #' @param plty A numeric value representing the penalty term for the neural network model. #' @param epo A numeric value specifying the number of epochs for training the neural network. #' @param vip_train A data frame containing the training data used for generating global explanations. #' @param v_train A numeric vector representing the target variable for the global explanations. #' #' @return A list containing global explanations for the specified neural network model. #' @export #' #' @examples #' library(dplyr) #' library(rsample) #' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559, #' 173, 764, 780, 244, 527, 417, 800, #' 602, 494, 345, 780, 780, 527, 556, #' 559, 238, 288, 244, 353, 169, 556, #' 824, 169, 342, 423, 441, 507, 559) #' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103, #' 11388, 46, 103, 11388, 40, 0, 11388, #' 0, 4095, 40, 93, 49, 49, 49, #' 4095, 6837, 38961, 38961, 0, 0, 93, #' 40, 11388, 38961, 40, 75, 4095, 103) #' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496, #' 230, 605, 432, 170, 670, 238, 238, #' 634, 422, 429, 513, 327, 465, 479, #' 661, 382, 364, 109, 398, 209, 1960, #' 992, 275, 331, 454, 479, 553, 496) #' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0, #' 262, 0, 15089, 13016, 1513, 60, 60, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 84, 292, 414, 26176, 62, 126, 93, #' 80, 1690, 5113, 71, 289, 3063, 0) #' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777, #' 149, 628, 614, 253, 918, 326, 326, #' 574, 361, 253, 726, 659, 596, 427, #' 447, 326, 253, 248, 326, 260, 918, #' 700, 127, 127, 547, 547, 547, 777) #' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0, #' 955, 0, 0, 0, 0, 40, 0, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 0, 23601, 0, 40, 0, 0, 0, #' 0, 0, 0, 0, 0, 0, 0) #' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |> #' as.data.frame() #' set.seed(123) #' hi_data <- rsample::initial_split(x) #' set.seed(123) #' hiv_data <- hi_data |> #' rsample::training() #' hu <- 5 #' plty <- 1.131656e-09 #' epo <- 176 #' vip_featured <- c("cd_2022") #' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022") #' set.seed(123) #' vi_train <- rsample::initial_split(x) #' set.seed(123) #' vip_train <- vi_train |> #' rsample::training() |> #' dplyr::select(rsample::all_of(vip_features)) #' v_train <- vi_train |> #' rsample::training() |> #' dplyr::select(rsample::all_of(vip_featured)) #' viralx_nn_glob(vip_featured, hiv_data, hu, plty, epo, vip_train, v_train) viralx_nn_glob <- function(vip_featured, hiv_data, hu, plty, epo, vip_train, v_train) { DALEXtra::explain_tidymodels(workflows::workflow() |> workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data) |> recipes::step_normalize(recipes::all_predictors())) |> workflows::add_model(parsnip::mlp(hidden_units = hu, penalty = plty, epochs = epo) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression")) |> parsnip::fit(data = hiv_data), data = vip_train, y = v_train, label = "nn + normalized", verbose = FALSE) |> DALEX::model_parts() }
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_nn_glob.R
#' Explain Neural Network Model Using SHAP Values #' #' Explains the predictions of a neural network model using SHAP (Shapley #' Additive Explanations) values. It utilizes the DALEXtra and DALEX packages to #' provide SHAP-based explanations for the specified model. #' #' @import DALEX #' @import DALEXtra #' @import parsnip #' @import recipes #' @import rsample #' @import workflows #' @importFrom dplyr mutate_if #' @importFrom dplyr select #' @importFrom stats as.formula #' #' @param vip_featured A character value #' @param hiv_data A data frame #' @param hu A numeric value #' @param plty A numeric value #' @param epo A numeric value #' @param vip_train A data frame #' @param vip_new A numeric vector #' @param orderings A numeric value #' #' @return A data frame #' @export #' #' @examples #' library(dplyr) #' library(rsample) #' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559, #' 173, 764, 780, 244, 527, 417, 800, #' 602, 494, 345, 780, 780, 527, 556, #' 559, 238, 288, 244, 353, 169, 556, #' 824, 169, 342, 423, 441, 507, 559) #' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103, #' 11388, 46, 103, 11388, 40, 0, 11388, #' 0, 4095, 40, 93, 49, 49, 49, #' 4095, 6837, 38961, 38961, 0, 0, 93, #' 40, 11388, 38961, 40, 75, 4095, 103) #' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496, #' 230, 605, 432, 170, 670, 238, 238, #' 634, 422, 429, 513, 327, 465, 479, #' 661, 382, 364, 109, 398, 209, 1960, #' 992, 275, 331, 454, 479, 553, 496) #' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0, #' 262, 0, 15089, 13016, 1513, 60, 60, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 84, 292, 414, 26176, 62, 126, 93, #' 80, 1690, 5113, 71, 289, 3063, 0) #' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777, #' 149, 628, 614, 253, 918, 326, 326, #' 574, 361, 253, 726, 659, 596, 427, #' 447, 326, 253, 248, 326, 260, 918, #' 700, 127, 127, 547, 547, 547, 777) #' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0, #' 955, 0, 0, 0, 0, 40, 0, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 0, 23601, 0, 40, 0, 0, 0, #' 0, 0, 0, 0, 0, 0, 0) #' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |> #' as.data.frame() #' set.seed(123) #' hi_data <- rsample::initial_split(x) #' set.seed(123) #' hiv_data <- hi_data |> #' rsample::training() #' hu <- 5 #' plty <- 1.131656e-09 #' epo <- 176 #' vip_featured <- c("cd_2022") #' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022") #' set.seed(123) #' vi_train <- rsample::initial_split(x) #' set.seed(123) #' vip_train <- vi_train |> #' rsample::training() |> #' dplyr::select(rsample::all_of(vip_features)) #' vip_new <- vip_train[1,] #' orderings <- 20 #' viralx_nn_shap(vip_featured, hiv_data, hu, plty, epo, vip_train, vip_new, orderings) viralx_nn_shap <- function(vip_featured, hiv_data, hu, plty, epo, vip_train, vip_new, orderings) { DALEXtra::explain_tidymodels(workflows::workflow() |> workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data) |> recipes::step_normalize(recipes::all_predictors())) |> workflows::add_model(parsnip::mlp(hidden_units = hu, penalty = plty, epochs = epo) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression")) |> parsnip::fit(data = hiv_data), data = vip_train, y = vip_featured, label = "nn + normalized", verbose = FALSE) |> DALEX::predict_parts(vip_new, type ="shap", B = orderings) }
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_nn_shap.R
#' Visualize SHAP Values for Neural Network Model #' #' Visualizes SHAP (Shapley Additive Explanations) values for a neural network #' model by employing the DALEXtra and DALEX packages to provide visual insights #' into the impact of a specified variable on the model's predictions. #' #' @import DALEX #' @import DALEXtra #' @import parsnip #' @import recipes #' @import rsample #' @import vdiffr #' @import workflows #' @importFrom stats as.formula #' #' @param vip_featured A character value #' @param hiv_data A data frame #' @param hu A numeric value #' @param plty A numeric value #' @param epo A numeric value #' @param vip_train A data frame #' @param vip_new A numeric vector #' @param orderings A numeric value #' #' @return A ggplot object #' @export #' #' @examples #' library(dplyr) #' library(rsample) #' cd_2019 <- c(824, 169, 342, 423, 441, 507, 559, #' 173, 764, 780, 244, 527, 417, 800, #' 602, 494, 345, 780, 780, 527, 556, #' 559, 238, 288, 244, 353, 169, 556, #' 824, 169, 342, 423, 441, 507, 559) #' vl_2019 <- c(40, 11388, 38961, 40, 75, 4095, 103, #' 11388, 46, 103, 11388, 40, 0, 11388, #' 0, 4095, 40, 93, 49, 49, 49, #' 4095, 6837, 38961, 38961, 0, 0, 93, #' 40, 11388, 38961, 40, 75, 4095, 103) #' cd_2021 <- c(992, 275, 331, 454, 479, 553, 496, #' 230, 605, 432, 170, 670, 238, 238, #' 634, 422, 429, 513, 327, 465, 479, #' 661, 382, 364, 109, 398, 209, 1960, #' 992, 275, 331, 454, 479, 553, 496) #' vl_2021 <- c(80, 1690, 5113, 71, 289, 3063, 0, #' 262, 0, 15089, 13016, 1513, 60, 60, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 84, 292, 414, 26176, 62, 126, 93, #' 80, 1690, 5113, 71, 289, 3063, 0) #' cd_2022 <- c(700, 127, 127, 547, 547, 547, 777, #' 149, 628, 614, 253, 918, 326, 326, #' 574, 361, 253, 726, 659, 596, 427, #' 447, 326, 253, 248, 326, 260, 918, #' 700, 127, 127, 547, 547, 547, 777) #' vl_2022 <- c(0, 0, 53250, 0, 40, 1901, 0, #' 955, 0, 0, 0, 0, 40, 0, #' 49248, 159308, 56, 0, 516675, 49, 237, #' 0, 23601, 0, 40, 0, 0, 0, #' 0, 0, 0, 0, 0, 0, 0) #' x <- cbind(cd_2019, vl_2019, cd_2021, vl_2021, cd_2022, vl_2022) |> #' as.data.frame() #' set.seed(123) #' hi_data <- rsample::initial_split(x) #' set.seed(123) #' hiv_data <- hi_data |> #' rsample::training() #' hu <- 5 #' plty <- 1.131656e-09 #' epo <- 176 #' vip_featured <- c("cd_2022") #' vip_features <- c("cd_2019", "vl_2019", "cd_2021", "vl_2021", "vl_2022") #' set.seed(123) #' vi_train <- rsample::initial_split(x) #' set.seed(123) #' vip_train <- vi_train |> #' rsample::training() |> #' dplyr::select(rsample::all_of(vip_features)) #' vip_new <- vip_train[1,] #' orderings <- 20 #' viralx_nn_vis(vip_featured, hiv_data, hu, plty, epo, vip_train, vip_new, orderings) viralx_nn_vis <- function(vip_featured, hiv_data, hu, plty, epo, vip_train, vip_new, orderings) { DALEXtra::explain_tidymodels(workflows::workflow() |> workflows::add_recipe(recipes::recipe(stats::as.formula(paste(vip_featured,"~.")), data = hiv_data) |> recipes::step_normalize(recipes::all_predictors())) |> workflows::add_model(parsnip::mlp(hidden_units = hu, penalty = plty, epochs = epo) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression")) |> parsnip::fit(data = hiv_data), data = vip_train, y = vip_featured, label = "nn + normalized", verbose = FALSE) |> DALEX::predict_parts(vip_new, type ="shap", B = orderings) |> plot() }
/scratch/gouwar.j/cran-all/cranData/viralx/R/viralx_nn_vis.R
#' @title Viridis Color Palettes #' #' @description A wrapper function around \code{\link[viridisLite]{viridis}} to #' turn it into a palette function compatible with #' \code{\link[ggplot2]{discrete_scale}}. #' #' @details See \code{\link[viridisLite]{viridis}} and #' \code{\link[viridisLite]{viridis.map}} for more information on the color #' palettes. #' #' @param alpha The alpha transparency, a number in [0,1], see argument alpha in #' \code{\link[grDevices]{hsv}}. #' #' @param begin The (corrected) hue in [0,1] at which the color map begins. #' #' @param end The (corrected) hue in [0,1] at which the color map ends. #' #' @param direction Sets the order of colors in the scale. If 1, the default, #' colors are ordered from darkest to lightest. If -1, the order of colors is #' reversed. #' #' @param option A character string indicating the color map option to use. #' Eight options are available: #' \itemize{ #' \item "magma" (or "A") #' \item "inferno" (or "B") #' \item "plasma" (or "C") #' \item "viridis" (or "D") #' \item "cividis" (or "E") #' \item "rocket" (or "F") #' \item "mako" (or "G") #' \item "turbo" (or "H") #' } #' #' @author Bob Rudis: \email{bob@@rud.is} #' @author Simon Garnier: \email{garnier@@njit.edu} #' #' @examples #' library(scales) #' show_col(viridis_pal()(12)) #' #' @importFrom viridisLite viridis #' #' @export viridis_pal <- function(alpha = 1, begin = 0, end = 1, direction = 1, option= "D") { function(n) { viridisLite::viridis(n, alpha, begin, end, direction, option) } } #' @title Viridis Color Scales for ggplot2 #' #' @description Scale functions (fill and colour/color) for #' \code{\link[ggplot2]{ggplot2}}. #' #' For \code{discrete == FALSE} (the default) all other arguments are as to #' \code{\link[ggplot2]{scale_fill_gradientn}} or #' \code{\link[ggplot2]{scale_color_gradientn}}. Otherwise the function will #' return a \code{\link[ggplot2]{discrete_scale}} with the plot-computed number #' of colors. #' #' See \code{\link[viridisLite]{viridis}} and #' \code{\link[viridisLite]{viridis.map}} for more information on the color #' palettes. #' #' @param ... Parameters to \code{\link[ggplot2]{discrete_scale}} if #' \code{discrete == TRUE}, or \code{\link[ggplot2]{scale_fill_gradientn}}/ #' \code{\link[ggplot2]{scale_color_gradientn}} if \code{discrete == FALSE}. #' #' @param alpha The alpha transparency, a number in [0,1], see argument alpha in #' \code{\link[grDevices]{hsv}}. #' #' @param begin The (corrected) hue in [0,1] at which the color map begins. #' #' @param end The (corrected) hue in [0,1] at which the color map ends. #' #' @param direction Sets the order of colors in the scale. If 1, the default, #' colors are as output by \code{\link[viridis]{viridis_pal}}. If -1, the order #' of colors is reversed. #' #' @param discrete Generate a discrete palette? (default: \code{FALSE} - #' generate continuous palette). #' #' @param option A character string indicating the color map option to use. #' Eight options are available: #' \itemize{ #' \item "magma" (or "A") #' \item "inferno" (or "B") #' \item "plasma" (or "C") #' \item "viridis" (or "D") #' \item "cividis" (or "E") #' \item "rocket" (or "F") #' \item "mako" (or "G") #' \item "turbo" (or "H") #' } #' #' @param aesthetics Character string or vector of character strings listing the #' name(s) of the aesthetic(s) that this scale works with. This can be useful, #' for example, to apply colour settings to the colour and fill aesthetics at #' the same time, via aesthetics = c("colour", "fill"). #' #' @rdname scale_viridis #' #' @author Noam Ross \email{noam.ross@@gmail.com} #' @author Bob Rudis \email{bob@@rud.is} #' @author Simon Garnier: \email{garnier@@njit.edu} #' #' @importFrom ggplot2 scale_fill_gradientn scale_color_gradientn discrete_scale #' #' @importFrom gridExtra grid.arrange #' #' @examples #' library(ggplot2) #' #' # Ripped from the pages of ggplot2 #' p <- ggplot(mtcars, aes(wt, mpg)) #' p + geom_point(size = 4, aes(colour = factor(cyl))) + #' scale_color_viridis(discrete = TRUE) + #' theme_bw() #' #' # Ripped from the pages of ggplot2 #' dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6) #' dsub$diff <- with(dsub, sqrt(abs(x - y)) * sign(x - y)) #' d <- ggplot(dsub, aes(x, y, colour = diff)) + geom_point() #' d + scale_color_viridis() + theme_bw() #' #' #' # From the main viridis example #' dat <- data.frame(x = rnorm(10000), y = rnorm(10000)) #' #' ggplot(dat, aes(x = x, y = y)) + #' geom_hex() + coord_fixed() + #' scale_fill_viridis() + theme_bw() #' #' library(ggplot2) #' library(MASS) #' library(gridExtra) #' #' data("geyser", package="MASS") #' #' ggplot(geyser, aes(x = duration, y = waiting)) + #' xlim(0.5, 6) + ylim(40, 110) + #' stat_density2d(aes(fill = ..level..), geom = "polygon") + #' theme_bw() + #' theme(panel.grid = element_blank()) -> gg #' #' grid.arrange( #' gg + scale_fill_viridis(option = "A") + labs(x = "Viridis A", y = NULL), #' gg + scale_fill_viridis(option = "B") + labs(x = "Viridis B", y = NULL), #' gg + scale_fill_viridis(option = "C") + labs(x = "Viridis C", y = NULL), #' gg + scale_fill_viridis(option = "D") + labs(x = "Viridis D", y = NULL), #' gg + scale_fill_viridis(option = "E") + labs(x = "Viridis E", y = NULL), #' gg + scale_fill_viridis(option = "F") + labs(x = "Viridis F", y = NULL), #' gg + scale_fill_viridis(option = "G") + labs(x = "Viridis G", y = NULL), #' gg + scale_fill_viridis(option = "H") + labs(x = "Viridis H", y = NULL), #' ncol = 4, nrow = 2 #' ) #' #' @export scale_fill_viridis <- function(..., alpha = 1, begin = 0, end = 1, direction = 1, discrete = FALSE, option = "D", aesthetics = "fill") { if (discrete) { discrete_scale(aesthetics, "viridis", viridis_pal(alpha, begin, end, direction, option), ...) } else { scale_fill_gradientn(colours = viridisLite::viridis(256, alpha, begin, end, direction, option), aesthetics = aesthetics, ...) } } #' @rdname scale_viridis #' @importFrom ggplot2 scale_fill_gradientn scale_color_gradientn discrete_scale #' @export scale_color_viridis <- function(..., alpha = 1, begin = 0, end = 1, direction = 1, discrete = FALSE, option = "D", aesthetics = "color") { if (discrete) { discrete_scale(aesthetics, "viridis", viridis_pal(alpha, begin, end, direction, option), ...) } else { scale_color_gradientn(colours = viridisLite::viridis(256, alpha, begin, end, direction, option), aesthetics = aesthetics, ...) } } #' @rdname scale_viridis #' @aliases scale_color_viridis #' @export scale_colour_viridis <- scale_color_viridis #' @importFrom viridisLite viridis #' @export viridisLite::viridis #' @importFrom viridisLite inferno #' @export viridisLite::inferno #' @importFrom viridisLite magma #' @export viridisLite::magma #' @importFrom viridisLite plasma #' @export viridisLite::plasma #' @importFrom viridisLite cividis #' @export viridisLite::cividis #' @importFrom viridisLite mako #' @export viridisLite::mako #' @importFrom viridisLite rocket #' @export viridisLite::rocket #' @importFrom viridisLite turbo #' @export viridisLite::turbo #' @importFrom viridisLite viridis.map #' @export viridisLite::viridis.map #' @title USA Unemployment in 2009 #' #' @description A data set containing the 2009 unemployment data in the USA by #' county. #' #' @format A data frame with 3218 rows and 8 variables: #' \describe{ #' \item{id}{the county ID number} #' \item{state_fips}{the state FIPS number} #' \item{county_fips}{the county FIPS number} #' \item{name}{the county name} #' \item{year}{the year} #' \item{rate}{the unemployment rate} #' \item{county}{the county abbreviated name} #' \item{state}{the state acronym} #' } #' @source \url{http://datasets.flowingdata.com/unemployment09.csv} "unemp"
/scratch/gouwar.j/cran-all/cranData/viridis/R/scales.R
## ----setup, include=FALSE----------------------------------------------------- library(viridis) knitr::opts_chunk$set(echo = TRUE, fig.retina=2, fig.width=7, fig.height=5) ## ----tldr_base, message=FALSE------------------------------------------------- x <- y <- seq(-8*pi, 8*pi, len = 40) r <- sqrt(outer(x^2, y^2, "+")) filled.contour(cos(r^2)*exp(-r/(2*pi)), axes=FALSE, color.palette=viridis, asp=1) ## ----tldr_ggplot, message=FALSE----------------------------------------------- library(ggplot2) ggplot(data.frame(x = rnorm(10000), y = rnorm(10000)), aes(x = x, y = y)) + geom_hex() + coord_fixed() + scale_fill_viridis() + theme_bw() ## ----for_repeat, include=FALSE------------------------------------------------ n_col <- 128 img <- function(obj, nam) { image(1:length(obj), 1, as.matrix(1:length(obj)), col=obj, main = nam, ylab = "", xaxt = "n", yaxt = "n", bty = "n") } ## ----begin, message=FALSE, include=FALSE-------------------------------------- library(viridis) library(scales) library(colorspace) library(dichromat) ## ----show_scales, echo=FALSE, fig.height=3.575-------------------------------- par(mfrow=c(8, 1), mar=rep(1, 4)) img(rev(viridis(n_col)), "viridis") img(rev(magma(n_col)), "magma") img(rev(plasma(n_col)), "plasma") img(rev(inferno(n_col)), "inferno") img(rev(cividis(n_col)), "cividis") img(rev(mako(n_col)), "mako") img(rev(rocket(n_col)), "rocket") img(rev(turbo(n_col)), "turbo") ## ----01_normal, echo=FALSE---------------------------------------------------- par(mfrow=c(7, 1), mar=rep(1, 4)) img(rev(rainbow(n_col)), "rainbow") img(rev(heat.colors(n_col)), "heat") img(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "ggplot default") img(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "brewer blues") img(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "brewer yellow-green-blue") img(rev(viridis(n_col)), "viridis") img(rev(magma(n_col)), "magma") ## ----02_deutan, echo=FALSE---------------------------------------------------- par(mfrow=c(7, 1), mar=rep(1, 4)) img(dichromat(rev(rainbow(n_col)), "deutan"), "rainbow") img(dichromat(rev(heat.colors(n_col)), "deutan"), "heat") img(dichromat(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "deutan"), "ggplot default") img(dichromat(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "deutan"), "brewer blues") img(dichromat(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "deutan"), "brewer yellow-green-blue") img(dichromat(rev(viridis(n_col)), "deutan"), "viridis") img(dichromat(rev(magma(n_col)), "deutan"), "magma") ## ----03_protan, echo=FALSE---------------------------------------------------- par(mfrow=c(7, 1), mar=rep(1, 4)) img(dichromat(rev(rainbow(n_col)), "protan"), "rainbow") img(dichromat(rev(heat.colors(n_col)), "protan"), "heat") img(dichromat(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "protan"), "ggplot default") img(dichromat(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "protan"), "brewer blues") img(dichromat(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "protan"), "brewer yellow-green-blue") img(dichromat(rev(viridis(n_col)), "protan"), "viridis") img(dichromat(rev(magma(n_col)), "protan"), "magma") ## ----04_tritan, echo=FALSE---------------------------------------------------- par(mfrow=c(7, 1), mar=rep(1, 4)) img(dichromat(rev(rainbow(n_col)), "tritan"), "rainbow") img(dichromat(rev(heat.colors(n_col)), "tritan"), "heat") img(dichromat(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "tritan"), "ggplot default") img(dichromat(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "tritan"), "brewer blues") img(dichromat(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "tritan"), "brewer yellow-green-blue") img(dichromat(rev(viridis(n_col)), "tritan"), "viridis") img(dichromat(rev(magma(n_col)), "tritan"), "magma") ## ----05_desatureated, echo=FALSE---------------------------------------------- par(mfrow=c(7, 1), mar=rep(1, 4)) img(desaturate(rev(rainbow(n_col))), "rainbow") img(desaturate(rev(heat.colors(n_col))), "heat") img(desaturate(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col)))), "ggplot default") img(desaturate(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col))), "brewer blues") img(desaturate(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col))), "brewer yellow-green-blue") img(desaturate(rev(viridis(n_col))), "viridis") img(desaturate(rev(magma(n_col))), "magma") ## ----tempmap, message=FALSE--------------------------------------------------- library(terra) library(httr) par(mfrow=c(1,1), mar=rep(0.5, 4)) temp_raster <- "http://ftp.cpc.ncep.noaa.gov/GIS/GRADS_GIS/GeoTIFF/TEMP/us_tmax/us.tmax_nohads_ll_20150219_float.tif" try(GET(temp_raster, write_disk("us.tmax_nohads_ll_20150219_float.tif")), silent=TRUE) us <- rast("us.tmax_nohads_ll_20150219_float.tif") us <- project(us, y="+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs") image(us, col=inferno(256), asp=1, axes=FALSE, xaxs="i", xaxt='n', yaxt='n', ann=FALSE) ## ----ggplot2------------------------------------------------------------------ library(maps) library(mapproj) data(unemp, package = "viridis") county_df <- map_data("county", projection = "albers", parameters = c(39, 45)) names(county_df) <- c("long", "lat", "group", "order", "state_name", "county") county_df$state <- state.abb[match(county_df$state_name, tolower(state.name))] county_df$state_name <- NULL state_df <- map_data("state", projection = "albers", parameters = c(39, 45)) choropleth <- merge(county_df, unemp, by = c("state", "county")) choropleth <- choropleth[order(choropleth$order), ] ggplot(choropleth, aes(long, lat, group = group)) + geom_polygon(aes(fill = rate), colour = alpha("white", 1 / 2), linewidth = 0.2) + geom_polygon(data = state_df, colour = "white", fill = NA) + coord_fixed() + theme_minimal() + ggtitle("US unemployment rate by county") + theme(axis.line = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), axis.title = element_blank()) + scale_fill_viridis(option="magma") ## ----discrete----------------------------------------------------------------- p <- ggplot(mtcars, aes(wt, mpg)) p + geom_point(size=4, aes(colour = factor(cyl))) + scale_color_viridis(discrete=TRUE) + theme_bw()
/scratch/gouwar.j/cran-all/cranData/viridis/inst/doc/intro-to-viridis.R
--- title: "Introduction to the viridis color maps" author: - "Bob Rudis, Noam Ross and Simon Garnier" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true toc_depth: 1 vignette: > %\VignetteIndexEntry{Introduction to the viridis color maps} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <style> img { max-width: 100%; max-height: 100%; } </style> # tl;dr Use the color scales in this package to make plots that are pretty, better represent your data, easier to read by those with colorblindness, and print well in gray scale. Install **viridis** like any R package: ``` install.packages("viridis") library(viridis) ``` For base plots, use the `viridis()` function to generate a palette: ```{r setup, include=FALSE} library(viridis) knitr::opts_chunk$set(echo = TRUE, fig.retina=2, fig.width=7, fig.height=5) ``` ```{r tldr_base, message=FALSE} x <- y <- seq(-8*pi, 8*pi, len = 40) r <- sqrt(outer(x^2, y^2, "+")) filled.contour(cos(r^2)*exp(-r/(2*pi)), axes=FALSE, color.palette=viridis, asp=1) ``` For ggplot, use `scale_color_viridis()` and `scale_fill_viridis()`: ```{r, tldr_ggplot, message=FALSE} library(ggplot2) ggplot(data.frame(x = rnorm(10000), y = rnorm(10000)), aes(x = x, y = y)) + geom_hex() + coord_fixed() + scale_fill_viridis() + theme_bw() ``` --- # Introduction [`viridis`](https://cran.r-project.org/package=viridis), and its companion package [`viridisLite`](https://cran.r-project.org/package=viridisLite) provide a series of color maps that are designed to improve graph readability for readers with common forms of color blindness and/or color vision deficiency. The color maps are also perceptually-uniform, both in regular form and also when converted to black-and-white for printing. These color maps are designed to be: - **Colorful**, spanning as wide a palette as possible so as to make differences easy to see, - **Perceptually uniform**, meaning that values close to each other have similar-appearing colors and values far away from each other have more different-appearing colors, consistently across the range of values, - **Robust to colorblindness**, so that the above properties hold true for people with common forms of colorblindness, as well as in grey scale printing, and - **Pretty**, oh so pretty `viridisLite` provides the base functions for generating the color maps in base `R`. The package is meant to be as lightweight and dependency-free as possible for maximum compatibility with all the `R` ecosystem. [`viridis`](https://cran.r-project.org/package=viridis) provides additional functionalities, in particular bindings for `ggplot2`. --- # The Color Scales The package contains eight color scales: "viridis", the primary choice, and five alternatives with similar properties - "magma", "plasma", "inferno", "civids", "mako", and "rocket" -, and a rainbow color map - "turbo". The color maps `viridis`, `magma`, `inferno`, and `plasma` were created by Stéfan van der Walt ([@stefanv](https://github.com/stefanv)) and Nathaniel Smith ([@njsmith](https://github.com/njsmith)). If you want to know more about the science behind the creation of these color maps, you can watch this [presentation of `viridis`](https://youtu.be/xAoljeRJ3lU) by their authors at SciPy 2015. The color map `cividis` is a corrected version of 'viridis', developed by Jamie R. Nuñez, Christopher R. Anderton, and Ryan S. Renslow, and originally ported to `R` by Marco Sciaini ([@msciain](https://github.com/marcosci)). More info about `cividis` can be found in [this paper](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0199239). The color maps `mako` and `rocket` were originally created for the `Seaborn` statistical data visualization package for Python. More info about `mako` and `rocket` can be found on the [`Seaborn` website](https://seaborn.pydata.org/tutorial/color_palettes.html). The color map `turbo` was developed by Anton Mikhailov to address the shortcomings of the Jet rainbow color map such as false detail, banding and color blindness ambiguity. More infor about `turbo` can be found [here](https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html). ```{r for_repeat, include=FALSE} n_col <- 128 img <- function(obj, nam) { image(1:length(obj), 1, as.matrix(1:length(obj)), col=obj, main = nam, ylab = "", xaxt = "n", yaxt = "n", bty = "n") } ``` ```{r begin, message=FALSE, include=FALSE} library(viridis) library(scales) library(colorspace) library(dichromat) ``` ```{r show_scales, echo=FALSE, fig.height=3.575} par(mfrow=c(8, 1), mar=rep(1, 4)) img(rev(viridis(n_col)), "viridis") img(rev(magma(n_col)), "magma") img(rev(plasma(n_col)), "plasma") img(rev(inferno(n_col)), "inferno") img(rev(cividis(n_col)), "cividis") img(rev(mako(n_col)), "mako") img(rev(rocket(n_col)), "rocket") img(rev(turbo(n_col)), "turbo") ``` --- # Comparison Let's compare the viridis and magma scales against these other commonly used sequential color palettes in R: - Base R palettes: `rainbow.colors`, `heat.colors`, `cm.colors` - The default **ggplot2** palette - Sequential [colorbrewer](https://colorbrewer2.org/) palettes, both default blues and the more viridis-like yellow-green-blue ```{r 01_normal, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(rev(rainbow(n_col)), "rainbow") img(rev(heat.colors(n_col)), "heat") img(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "ggplot default") img(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "brewer blues") img(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "brewer yellow-green-blue") img(rev(viridis(n_col)), "viridis") img(rev(magma(n_col)), "magma") ``` It is immediately clear that the "rainbow" palette is not perceptually uniform; there are several "kinks" where the apparent color changes quickly over a short range of values. This is also true, though less so, for the "heat" colors. The other scales are more perceptually uniform, but "viridis" stands out for its large *perceptual range*. It makes as much use of the available color space as possible while maintaining uniformity. Now, let's compare these as they might appear under various forms of colorblindness, which can be simulated using the **[dichromat](https://cran.r-project.org/package=dichromat)** package: ### Green-Blind (Deuteranopia) ```{r 02_deutan, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(dichromat(rev(rainbow(n_col)), "deutan"), "rainbow") img(dichromat(rev(heat.colors(n_col)), "deutan"), "heat") img(dichromat(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "deutan"), "ggplot default") img(dichromat(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "deutan"), "brewer blues") img(dichromat(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "deutan"), "brewer yellow-green-blue") img(dichromat(rev(viridis(n_col)), "deutan"), "viridis") img(dichromat(rev(magma(n_col)), "deutan"), "magma") ``` ### Red-Blind (Protanopia) ```{r 03_protan, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(dichromat(rev(rainbow(n_col)), "protan"), "rainbow") img(dichromat(rev(heat.colors(n_col)), "protan"), "heat") img(dichromat(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "protan"), "ggplot default") img(dichromat(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "protan"), "brewer blues") img(dichromat(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "protan"), "brewer yellow-green-blue") img(dichromat(rev(viridis(n_col)), "protan"), "viridis") img(dichromat(rev(magma(n_col)), "protan"), "magma") ``` ### Blue-Blind (Tritanopia) ```{r 04_tritan, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(dichromat(rev(rainbow(n_col)), "tritan"), "rainbow") img(dichromat(rev(heat.colors(n_col)), "tritan"), "heat") img(dichromat(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "tritan"), "ggplot default") img(dichromat(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "tritan"), "brewer blues") img(dichromat(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "tritan"), "brewer yellow-green-blue") img(dichromat(rev(viridis(n_col)), "tritan"), "viridis") img(dichromat(rev(magma(n_col)), "tritan"), "magma") ``` ### Desaturated ```{r 05_desatureated, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(desaturate(rev(rainbow(n_col))), "rainbow") img(desaturate(rev(heat.colors(n_col))), "heat") img(desaturate(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col)))), "ggplot default") img(desaturate(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col))), "brewer blues") img(desaturate(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col))), "brewer yellow-green-blue") img(desaturate(rev(viridis(n_col))), "viridis") img(desaturate(rev(magma(n_col))), "magma") ``` We can see that in these cases, "rainbow" is quite problematic - it is not perceptually consistent across its range. "Heat" washes out at bright colors, as do the brewer scales to a lesser extent. The ggplot scale does not wash out, but it has a low perceptual range - there's not much contrast between low and high values. The "viridis" and "magma" scales do better - they cover a wide perceptual range in brightness in brightness and blue-yellow, and do not rely as much on red-green contrast. They do less well under tritanopia (blue-blindness), but this is an extrememly rare form of colorblindness. --- # Usage The `viridis()` function produces the `viridis` color scale. You can choose the other color scale options using the `option` parameter or the convenience functions `magma()`, `plasma()`, `inferno()`, `cividis()`, `mako()`, `rocket`()`, and `turbo()`. Here the `inferno()` scale is used for a raster of U.S. max temperature: ```{r tempmap, message=FALSE} library(terra) library(httr) par(mfrow=c(1,1), mar=rep(0.5, 4)) temp_raster <- "http://ftp.cpc.ncep.noaa.gov/GIS/GRADS_GIS/GeoTIFF/TEMP/us_tmax/us.tmax_nohads_ll_20150219_float.tif" try(GET(temp_raster, write_disk("us.tmax_nohads_ll_20150219_float.tif")), silent=TRUE) us <- rast("us.tmax_nohads_ll_20150219_float.tif") us <- project(us, y="+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs") image(us, col=inferno(256), asp=1, axes=FALSE, xaxs="i", xaxt='n', yaxt='n', ann=FALSE) ``` The package also contains color scale functions for **ggplot** plots: `scale_color_viridis()` and `scale_fill_viridis()`. As with `viridis()`, you can use the other scales with the `option` argument in the `ggplot` scales. Here the "magma" scale is used for a cloropleth map of U.S. unemployment: ```{r, ggplot2} library(maps) library(mapproj) data(unemp, package = "viridis") county_df <- map_data("county", projection = "albers", parameters = c(39, 45)) names(county_df) <- c("long", "lat", "group", "order", "state_name", "county") county_df$state <- state.abb[match(county_df$state_name, tolower(state.name))] county_df$state_name <- NULL state_df <- map_data("state", projection = "albers", parameters = c(39, 45)) choropleth <- merge(county_df, unemp, by = c("state", "county")) choropleth <- choropleth[order(choropleth$order), ] ggplot(choropleth, aes(long, lat, group = group)) + geom_polygon(aes(fill = rate), colour = alpha("white", 1 / 2), linewidth = 0.2) + geom_polygon(data = state_df, colour = "white", fill = NA) + coord_fixed() + theme_minimal() + ggtitle("US unemployment rate by county") + theme(axis.line = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), axis.title = element_blank()) + scale_fill_viridis(option="magma") ``` The ggplot functions also can be used for discrete scales with the argument `discrete=TRUE`. ```{r discrete} p <- ggplot(mtcars, aes(wt, mpg)) p + geom_point(size=4, aes(colour = factor(cyl))) + scale_color_viridis(discrete=TRUE) + theme_bw() ``` # Gallery Here are some examples of viridis being used in the wild: James Curley uses **viridis** for matrix plots ([Code](https://gist.github.com/jalapic/9a1c069aa8cee4089c1e)): [![](http://pbs.twimg.com/media/CQWw9EgWsAAoUi0.png)](http://pbs.twimg.com/media/CQWw9EgWsAAoUi0.png) Christopher Moore created these contour plots of potential in a dynamic plankton-consumer model: [![](http://pbs.twimg.com/media/CQWTy7wWcAAa-gu.jpg)](http://pbs.twimg.com/media/CQWTy7wWcAAa-gu.jpg)
/scratch/gouwar.j/cran-all/cranData/viridis/inst/doc/intro-to-viridis.Rmd
--- title: "Introduction to the viridis color maps" author: - "Bob Rudis, Noam Ross and Simon Garnier" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true toc_depth: 1 vignette: > %\VignetteIndexEntry{Introduction to the viridis color maps} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <style> img { max-width: 100%; max-height: 100%; } </style> # tl;dr Use the color scales in this package to make plots that are pretty, better represent your data, easier to read by those with colorblindness, and print well in gray scale. Install **viridis** like any R package: ``` install.packages("viridis") library(viridis) ``` For base plots, use the `viridis()` function to generate a palette: ```{r setup, include=FALSE} library(viridis) knitr::opts_chunk$set(echo = TRUE, fig.retina=2, fig.width=7, fig.height=5) ``` ```{r tldr_base, message=FALSE} x <- y <- seq(-8*pi, 8*pi, len = 40) r <- sqrt(outer(x^2, y^2, "+")) filled.contour(cos(r^2)*exp(-r/(2*pi)), axes=FALSE, color.palette=viridis, asp=1) ``` For ggplot, use `scale_color_viridis()` and `scale_fill_viridis()`: ```{r, tldr_ggplot, message=FALSE} library(ggplot2) ggplot(data.frame(x = rnorm(10000), y = rnorm(10000)), aes(x = x, y = y)) + geom_hex() + coord_fixed() + scale_fill_viridis() + theme_bw() ``` --- # Introduction [`viridis`](https://cran.r-project.org/package=viridis), and its companion package [`viridisLite`](https://cran.r-project.org/package=viridisLite) provide a series of color maps that are designed to improve graph readability for readers with common forms of color blindness and/or color vision deficiency. The color maps are also perceptually-uniform, both in regular form and also when converted to black-and-white for printing. These color maps are designed to be: - **Colorful**, spanning as wide a palette as possible so as to make differences easy to see, - **Perceptually uniform**, meaning that values close to each other have similar-appearing colors and values far away from each other have more different-appearing colors, consistently across the range of values, - **Robust to colorblindness**, so that the above properties hold true for people with common forms of colorblindness, as well as in grey scale printing, and - **Pretty**, oh so pretty `viridisLite` provides the base functions for generating the color maps in base `R`. The package is meant to be as lightweight and dependency-free as possible for maximum compatibility with all the `R` ecosystem. [`viridis`](https://cran.r-project.org/package=viridis) provides additional functionalities, in particular bindings for `ggplot2`. --- # The Color Scales The package contains eight color scales: "viridis", the primary choice, and five alternatives with similar properties - "magma", "plasma", "inferno", "civids", "mako", and "rocket" -, and a rainbow color map - "turbo". The color maps `viridis`, `magma`, `inferno`, and `plasma` were created by Stéfan van der Walt ([@stefanv](https://github.com/stefanv)) and Nathaniel Smith ([@njsmith](https://github.com/njsmith)). If you want to know more about the science behind the creation of these color maps, you can watch this [presentation of `viridis`](https://youtu.be/xAoljeRJ3lU) by their authors at SciPy 2015. The color map `cividis` is a corrected version of 'viridis', developed by Jamie R. Nuñez, Christopher R. Anderton, and Ryan S. Renslow, and originally ported to `R` by Marco Sciaini ([@msciain](https://github.com/marcosci)). More info about `cividis` can be found in [this paper](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0199239). The color maps `mako` and `rocket` were originally created for the `Seaborn` statistical data visualization package for Python. More info about `mako` and `rocket` can be found on the [`Seaborn` website](https://seaborn.pydata.org/tutorial/color_palettes.html). The color map `turbo` was developed by Anton Mikhailov to address the shortcomings of the Jet rainbow color map such as false detail, banding and color blindness ambiguity. More infor about `turbo` can be found [here](https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html). ```{r for_repeat, include=FALSE} n_col <- 128 img <- function(obj, nam) { image(1:length(obj), 1, as.matrix(1:length(obj)), col=obj, main = nam, ylab = "", xaxt = "n", yaxt = "n", bty = "n") } ``` ```{r begin, message=FALSE, include=FALSE} library(viridis) library(scales) library(colorspace) library(dichromat) ``` ```{r show_scales, echo=FALSE, fig.height=3.575} par(mfrow=c(8, 1), mar=rep(1, 4)) img(rev(viridis(n_col)), "viridis") img(rev(magma(n_col)), "magma") img(rev(plasma(n_col)), "plasma") img(rev(inferno(n_col)), "inferno") img(rev(cividis(n_col)), "cividis") img(rev(mako(n_col)), "mako") img(rev(rocket(n_col)), "rocket") img(rev(turbo(n_col)), "turbo") ``` --- # Comparison Let's compare the viridis and magma scales against these other commonly used sequential color palettes in R: - Base R palettes: `rainbow.colors`, `heat.colors`, `cm.colors` - The default **ggplot2** palette - Sequential [colorbrewer](https://colorbrewer2.org/) palettes, both default blues and the more viridis-like yellow-green-blue ```{r 01_normal, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(rev(rainbow(n_col)), "rainbow") img(rev(heat.colors(n_col)), "heat") img(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "ggplot default") img(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "brewer blues") img(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "brewer yellow-green-blue") img(rev(viridis(n_col)), "viridis") img(rev(magma(n_col)), "magma") ``` It is immediately clear that the "rainbow" palette is not perceptually uniform; there are several "kinks" where the apparent color changes quickly over a short range of values. This is also true, though less so, for the "heat" colors. The other scales are more perceptually uniform, but "viridis" stands out for its large *perceptual range*. It makes as much use of the available color space as possible while maintaining uniformity. Now, let's compare these as they might appear under various forms of colorblindness, which can be simulated using the **[dichromat](https://cran.r-project.org/package=dichromat)** package: ### Green-Blind (Deuteranopia) ```{r 02_deutan, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(dichromat(rev(rainbow(n_col)), "deutan"), "rainbow") img(dichromat(rev(heat.colors(n_col)), "deutan"), "heat") img(dichromat(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "deutan"), "ggplot default") img(dichromat(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "deutan"), "brewer blues") img(dichromat(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "deutan"), "brewer yellow-green-blue") img(dichromat(rev(viridis(n_col)), "deutan"), "viridis") img(dichromat(rev(magma(n_col)), "deutan"), "magma") ``` ### Red-Blind (Protanopia) ```{r 03_protan, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(dichromat(rev(rainbow(n_col)), "protan"), "rainbow") img(dichromat(rev(heat.colors(n_col)), "protan"), "heat") img(dichromat(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "protan"), "ggplot default") img(dichromat(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "protan"), "brewer blues") img(dichromat(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "protan"), "brewer yellow-green-blue") img(dichromat(rev(viridis(n_col)), "protan"), "viridis") img(dichromat(rev(magma(n_col)), "protan"), "magma") ``` ### Blue-Blind (Tritanopia) ```{r 04_tritan, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(dichromat(rev(rainbow(n_col)), "tritan"), "rainbow") img(dichromat(rev(heat.colors(n_col)), "tritan"), "heat") img(dichromat(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col))), "tritan"), "ggplot default") img(dichromat(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col)), "tritan"), "brewer blues") img(dichromat(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col)), "tritan"), "brewer yellow-green-blue") img(dichromat(rev(viridis(n_col)), "tritan"), "viridis") img(dichromat(rev(magma(n_col)), "tritan"), "magma") ``` ### Desaturated ```{r 05_desatureated, echo=FALSE} par(mfrow=c(7, 1), mar=rep(1, 4)) img(desaturate(rev(rainbow(n_col))), "rainbow") img(desaturate(rev(heat.colors(n_col))), "heat") img(desaturate(rev(seq_gradient_pal(low = "#132B43", high = "#56B1F7", space = "Lab")(seq(0, 1, length=n_col)))), "ggplot default") img(desaturate(gradient_n_pal(brewer_pal(type="seq")(9))(seq(0, 1, length=n_col))), "brewer blues") img(desaturate(gradient_n_pal(brewer_pal(type="seq", palette = "YlGnBu")(9))(seq(0, 1, length=n_col))), "brewer yellow-green-blue") img(desaturate(rev(viridis(n_col))), "viridis") img(desaturate(rev(magma(n_col))), "magma") ``` We can see that in these cases, "rainbow" is quite problematic - it is not perceptually consistent across its range. "Heat" washes out at bright colors, as do the brewer scales to a lesser extent. The ggplot scale does not wash out, but it has a low perceptual range - there's not much contrast between low and high values. The "viridis" and "magma" scales do better - they cover a wide perceptual range in brightness in brightness and blue-yellow, and do not rely as much on red-green contrast. They do less well under tritanopia (blue-blindness), but this is an extrememly rare form of colorblindness. --- # Usage The `viridis()` function produces the `viridis` color scale. You can choose the other color scale options using the `option` parameter or the convenience functions `magma()`, `plasma()`, `inferno()`, `cividis()`, `mako()`, `rocket`()`, and `turbo()`. Here the `inferno()` scale is used for a raster of U.S. max temperature: ```{r tempmap, message=FALSE} library(terra) library(httr) par(mfrow=c(1,1), mar=rep(0.5, 4)) temp_raster <- "http://ftp.cpc.ncep.noaa.gov/GIS/GRADS_GIS/GeoTIFF/TEMP/us_tmax/us.tmax_nohads_ll_20150219_float.tif" try(GET(temp_raster, write_disk("us.tmax_nohads_ll_20150219_float.tif")), silent=TRUE) us <- rast("us.tmax_nohads_ll_20150219_float.tif") us <- project(us, y="+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs") image(us, col=inferno(256), asp=1, axes=FALSE, xaxs="i", xaxt='n', yaxt='n', ann=FALSE) ``` The package also contains color scale functions for **ggplot** plots: `scale_color_viridis()` and `scale_fill_viridis()`. As with `viridis()`, you can use the other scales with the `option` argument in the `ggplot` scales. Here the "magma" scale is used for a cloropleth map of U.S. unemployment: ```{r, ggplot2} library(maps) library(mapproj) data(unemp, package = "viridis") county_df <- map_data("county", projection = "albers", parameters = c(39, 45)) names(county_df) <- c("long", "lat", "group", "order", "state_name", "county") county_df$state <- state.abb[match(county_df$state_name, tolower(state.name))] county_df$state_name <- NULL state_df <- map_data("state", projection = "albers", parameters = c(39, 45)) choropleth <- merge(county_df, unemp, by = c("state", "county")) choropleth <- choropleth[order(choropleth$order), ] ggplot(choropleth, aes(long, lat, group = group)) + geom_polygon(aes(fill = rate), colour = alpha("white", 1 / 2), linewidth = 0.2) + geom_polygon(data = state_df, colour = "white", fill = NA) + coord_fixed() + theme_minimal() + ggtitle("US unemployment rate by county") + theme(axis.line = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), axis.title = element_blank()) + scale_fill_viridis(option="magma") ``` The ggplot functions also can be used for discrete scales with the argument `discrete=TRUE`. ```{r discrete} p <- ggplot(mtcars, aes(wt, mpg)) p + geom_point(size=4, aes(colour = factor(cyl))) + scale_color_viridis(discrete=TRUE) + theme_bw() ``` # Gallery Here are some examples of viridis being used in the wild: James Curley uses **viridis** for matrix plots ([Code](https://gist.github.com/jalapic/9a1c069aa8cee4089c1e)): [![](http://pbs.twimg.com/media/CQWw9EgWsAAoUi0.png)](http://pbs.twimg.com/media/CQWw9EgWsAAoUi0.png) Christopher Moore created these contour plots of potential in a dynamic plankton-consumer model: [![](http://pbs.twimg.com/media/CQWTy7wWcAAa-gu.jpg)](http://pbs.twimg.com/media/CQWTy7wWcAAa-gu.jpg)
/scratch/gouwar.j/cran-all/cranData/viridis/vignettes/intro-to-viridis.Rmd
#' @title Color Map Data #' #' @description A data set containing the RGB values of the color maps included #' in the package. These are: #' \itemize{ #' \item{}{'magma', 'inferno', 'plasma', and 'viridis' as defined in Matplotlib #' for Python. These color maps are designed in such a way that they will #' analytically be perfectly perceptually-uniform, both in regular form and #' also when converted to black-and-white. They are also designed to be #' perceived by readers with the most common form of color blindness. They #' were created by \href{https://github.com/stefanv}{Stéfan van der Walt} #' and \href{https://github.com/njsmith}{Nathaniel Smith};} #' \item{}{'cividis', a corrected version of 'viridis', 'cividis', developed by #' Jamie R. Nuñez, Christopher R. Anderton, and Ryan S. Renslow, and #' originally ported to R by Marco Sciaini. It is designed to be perceived by #' readers with all forms of color blindness;} #' \item{}{'rocket' and 'mako' as defined in Seaborn for Python;} #' \item{}{'turbo', an improved Jet rainbow color map for reducing false detail, #' banding and color blindness ambiguity.} #' } #' #' @references #' \itemize{ #' \item{}{'magma', 'inferno', 'plasma', and 'viridis': https://bids.github.io/colormap/} #' \item{}{'cividis': https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0199239} #' \item{}{'rocket' and 'mako': https://seaborn.pydata.org/index.html} #' \item{}{'turbo': https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html} #' } #' #' @format A data frame with 2048 rows and 4 variables: #' \itemize{ #' \item{R: }{Red value;} #' \item{G: }{Green value;} #' \item{B: }{Blue value;} #' \item{opt: }{The colormap "option" (A: magma; B: inferno; C: plasma; #' D: viridis; E: cividis; F: rocket; G: mako; H: turbo).} #' } #' #' @author Simon Garnier: \email{garnier@@njit.edu} / \href{https://twitter.com/sjmgarnier}{@@sjmgarnier} "viridis.map" #' @title Viridis Color Palettes #' #' @description This function creates a vector of \code{n} equally spaced colors #' along the selected color map. #' #' @param n The number of colors (\eqn{\ge 1}) to be in the palette. #' #' @param alpha The alpha transparency, a number in [0,1], see argument alpha in #' \code{\link[grDevices]{hsv}}. #' #' @param begin The (corrected) hue in [0,1] at which the color map begins. #' #' @param end The (corrected) hue in [0,1] at which the color map ends. #' #' @param direction Sets the order of colors in the scale. If 1, the default, #' colors are ordered from darkest to lightest. If -1, the order of colors is #' reversed. #' #' @param option A character string indicating the color map option to use. #' Eight options are available: #' \itemize{ #' \item{}{"magma" (or "A")} #' \item{}{"inferno" (or "B")} #' \item{}{"plasma" (or "C")} #' \item{}{"viridis" (or "D")} #' \item{}{"cividis" (or "E")} #' \item{}{"rocket" (or "F")} #' \item{}{"mako" (or "G")} #' \item{}{"turbo" (or "H")} #' } #' #' @return \code{viridis} returns a character vector, \code{cv}, of color hex #' codes. This can be used either to create a user-defined color palette for #' subsequent graphics by \code{palette(cv)}, a \code{col =} specification in #' graphics functions or in \code{par}. #' #' @author Simon Garnier: \email{garnier@@njit.edu} / \href{https://twitter.com/sjmgarnier}{@@sjmgarnier} #' #' @details #' \if{html}{Here are the color scales: #' \figure{viridis-scales.png}{options: style="display:block;margin-left:auto;margin-right:auto;width:750px;max-width:75\%;"} #' } #' \if{latex}{Here are the color scales: #' \out{\begin{center}}\figure{viridis-scales.png}\out{\end{center}} #' } #' #' \code{magma()}, \code{plasma()}, \code{inferno()}, \code{cividis()}, #' \code{rocket()}, \code{mako()}, and \code{turbo()} are convenience functions #' for the other color map options, which are useful when the scale must be #' passed as a function name. #' #' Semi-transparent colors (\eqn{0 < alpha < 1}) are supported only on some #' devices: see \code{\link[grDevices]{rgb}}. #' #' @examples #' library(ggplot2) #' library(hexbin) #' #' dat <- data.frame(x = rnorm(10000), y = rnorm(10000)) #' #' ggplot(dat, aes(x = x, y = y)) + #' geom_hex() + coord_fixed() + #' scale_fill_gradientn(colours = viridis(256, option = "D")) #' #' # using code from RColorBrewer to demo the palette #' n = 200 #' image( #' 1:n, 1, as.matrix(1:n), #' col = viridis(n, option = "D"), #' xlab = "viridis n", ylab = "", xaxt = "n", yaxt = "n", bty = "n" #' ) #' @export viridis <- function(n, alpha = 1, begin = 0, end = 1, direction = 1, option = "D") { if (begin < 0 | begin > 1 | end < 0 | end > 1) { stop("begin and end must be in [0,1]") } if (abs(direction) != 1) { stop("direction must be 1 or -1") } if (n == 0) { return(character(0)) } if (direction == -1) { tmp <- begin begin <- end end <- tmp } option <- switch(EXPR = option, A = "A", magma = "A", B = "B", inferno = "B", C = "C", plasma = "C", D = "D", viridis = "D", E = "E", cividis = "E", F = "F", rocket = "F", G = "G", mako = "G", H = "H", turbo = "H", {warning(paste0("Option '", option, "' does not exist. Defaulting to 'viridis'.")); "D"}) map <- viridisLite::viridis.map[viridisLite::viridis.map$opt == option, ] map_cols <- grDevices::rgb(map$R, map$G, map$B) fn_cols <- grDevices::colorRamp(map_cols, space = "Lab", interpolate = "spline") cols <- fn_cols(seq(begin, end, length.out = n)) / 255 grDevices::rgb(cols[, 1], cols[, 2], cols[, 3], alpha = alpha) } #' @rdname viridis #' #' @return \code{viridisMap} returns a \code{n} lines data frame containing the #' red (\code{R}), green (\code{G}), blue (\code{B}) and alpha (\code{alpha}) #' channels of \code{n} equally spaced colors along the selected color map. #' \code{n = 256} by default. #' #' @export viridisMap <- function(n = 256, alpha = 1, begin = 0, end = 1, direction = 1, option = "D") { # nocov start if (begin < 0 | begin > 1 | end < 0 | end > 1) { stop("begin and end must be in [0,1]") } if (abs(direction) != 1) { stop("direction must be 1 or -1") } if (n == 0) { return(data.frame(R = double(0), G = double(0), B = double(0), alpha = double(0))) } if (direction == -1) { tmp <- begin begin <- end end <- tmp } option <- switch(EXPR = option, A = "A", magma = "A", B = "B", inferno = "B", C = "C", plasma = "C", D = "D", viridis = "D", E = "E", cividis = "E", E = "F", rocket = "F", E = "G", mako = "G", H = "H", turbo = "H", {warning(paste0("Option '", option, "' does not exist. Defaulting to 'viridis'.")); "D"}) map <- viridisLite::viridis.map[viridisLite::viridis.map$opt == option, ] map_cols <- grDevices::rgb(map$R, map$G, map$B) fn_cols <- grDevices::colorRamp(map_cols, space = "Lab", interpolate = "spline") cols <- fn_cols(seq(begin, end, length.out = n)) / 255 data.frame(R = cols[, 1], G = cols[, 2], B = cols[, 3], alpha = alpha) } # nocov end #' @rdname viridis #' @export magma <- function(n, alpha = 1, begin = 0, end = 1, direction = 1) { viridis(n, alpha, begin, end, direction, option = "magma") } #' @rdname viridis #' @export inferno <- function(n, alpha = 1, begin = 0, end = 1, direction = 1) { viridis(n, alpha, begin, end, direction, option = "inferno") } #' @rdname viridis #' @export plasma <- function(n, alpha = 1, begin = 0, end = 1, direction = 1) { viridis(n, alpha, begin, end, direction, option = "plasma") } #' @rdname viridis #' @export cividis <- function(n, alpha = 1, begin = 0, end = 1, direction = 1) { viridis(n, alpha, begin, end, direction, option = "cividis") } #' @rdname viridis #' @export rocket <- function(n, alpha = 1, begin = 0, end = 1, direction = 1) { viridis(n, alpha, begin, end, direction, option = "rocket") } #' @rdname viridis #' @export mako <- function(n, alpha = 1, begin = 0, end = 1, direction = 1) { viridis(n, alpha, begin, end, direction, option = "mako") } #' @rdname viridis #' @export turbo <- function(n, alpha = 1, begin = 0, end = 1, direction = 1) { viridis(n, alpha, begin, end, direction, option = "turbo") }
/scratch/gouwar.j/cran-all/cranData/viridisLite/R/viridis.R
#' @export viridis.map <- data.frame( R = c(0.00146159096, 0.00225764007, 0.00327943222, 0.00451230222, 0.00594976987, 0.0075879855, 0.0094260439, 0.0114654337, 0.0137075706, 0.0161557566, 0.018815367, 0.021691934, 0.0247917814, 0.0281228154, 0.0316955304, 0.0355204468, 0.0396084872, 0.043829535, 0.0480616391, 0.0523204388, 0.0566148978, 0.060949393, 0.0653301801, 0.0697637296, 0.0742565152, 0.0788150034, 0.0834456313, 0.088154773, 0.0929486914, 0.097833477, 0.102814972, 0.107898679, 0.113094451, 0.118405035, 0.123832651, 0.129380192, 0.135053322, 0.140857952, 0.146785234, 0.152839217, 0.159017511, 0.165308131, 0.171713033, 0.17821173, 0.184800877, 0.191459745, 0.198176877, 0.204934882, 0.211718061, 0.21851159, 0.225302032, 0.232076515, 0.238825991, 0.245543175, 0.252220252, 0.258857304, 0.265446744, 0.271994089, 0.2784933, 0.284951097, 0.291365817, 0.297740413, 0.304080941, 0.310382027, 0.316654235, 0.322899126, 0.329114038, 0.335307503, 0.341481725, 0.347635742, 0.353773161, 0.359897941, 0.366011928, 0.372116205, 0.378210547, 0.384299445, 0.390384361, 0.39646667, 0.402547663, 0.408628505, 0.414708664, 0.420791157, 0.426876965, 0.432967001, 0.439062114, 0.445163096, 0.451270678, 0.457385535, 0.463508291, 0.469639514, 0.475779723, 0.481928997, 0.488088169, 0.494257673, 0.500437834, 0.506628929, 0.512831195, 0.519044825, 0.525269968, 0.531506735, 0.537755194, 0.544015371, 0.550287252, 0.556570783, 0.562865867, 0.569172368, 0.575490107, 0.581818864, 0.588158375, 0.594508337, 0.600868399, 0.607238169, 0.613617209, 0.620005032, 0.626401108, 0.632804854, 0.639215638, 0.645632778, 0.652055535, 0.658483116, 0.664914668, 0.671349279, 0.677785975, 0.684223712, 0.69066138, 0.697097796, 0.7035317, 0.709961888, 0.716387038, 0.722805451, 0.729215521, 0.735615545, 0.742003713, 0.748378107, 0.754736692, 0.761077312, 0.767397681, 0.77369538, 0.779967847, 0.786212372, 0.792426972, 0.79860776, 0.804751511, 0.810854841, 0.816914186, 0.822925797, 0.82888574, 0.834790818, 0.84063568, 0.846415804, 0.85212649, 0.85776287, 0.863320397, 0.868793368, 0.874176342, 0.879463944, 0.884650824, 0.889731418, 0.894700194, 0.899551884, 0.904281297, 0.908883524, 0.913354091, 0.917688852, 0.921884187, 0.925937102, 0.92984509, 0.933606454, 0.937220874, 0.940687443, 0.944006448, 0.947179528, 0.95021015, 0.953099077, 0.955849237, 0.958464079, 0.960949221, 0.963310281, 0.965549351, 0.967671128, 0.969680441, 0.971582181, 0.973381238, 0.975082439, 0.976690494, 0.978209957, 0.979645181, 0.981000291, 0.982279159, 0.983485387, 0.984622298, 0.985692925, 0.986700017, 0.987646038, 0.988533173, 0.989363341, 0.990138201, 0.990871208, 0.991558165, 0.992195728, 0.992784669, 0.993325561, 0.993834412, 0.994308514, 0.994737698, 0.995121854, 0.995480469, 0.995809924, 0.996095703, 0.996341406, 0.996579803, 0.996774784, 0.996925427, 0.997077185, 0.997186253, 0.997253982, 0.99732518, 0.997350983, 0.997350583, 0.997341259, 0.997284689, 0.997228367, 0.99713848, 0.997019342, 0.996898254, 0.996726862, 0.996570645, 0.996369065, 0.996162309, 0.995932448, 0.995680107, 0.995423973, 0.995131288, 0.994851089, 0.994523666, 0.9942219, 0.993865767, 0.993545285, 0.993169558, 0.992830963, 0.992439881, 0.992089454, 0.991687744, 0.991331929, 0.990929685, 0.990569914, 0.990174637, 0.989814839, 0.989433736, 0.989077438, 0.988717064, 0.988367028, 0.988032885, 0.987690702, 0.987386827, 0.987052509, 0.00146159096, 0.00226726368, 0.00329899092, 0.00454690615, 0.00600552565, 0.00767578856, 0.00956051094, 0.0116634769, 0.0139950388, 0.0165605595, 0.0193732295, 0.0224468865, 0.0257927373, 0.0294324251, 0.0333852235, 0.0376684211, 0.0422525554, 0.0469146287, 0.0516437624, 0.0564491009, 0.06133972, 0.066331262, 0.0714289181, 0.076636756, 0.0819620773, 0.0874113897, 0.0929901526, 0.0987024972, 0.104550936, 0.110536084, 0.116656423, 0.122908126, 0.129284984, 0.13577845, 0.142377819, 0.149072957, 0.155849711, 0.162688939, 0.169575148, 0.176493202, 0.183428775, 0.190367453, 0.197297425, 0.204209298, 0.211095463, 0.217948648, 0.224762908, 0.231538148, 0.238272961, 0.244966911, 0.251620354, 0.258234265, 0.264809649, 0.271346664, 0.277849829, 0.284321318, 0.290763373, 0.297178251, 0.303568182, 0.309935342, 0.316281835, 0.322609671, 0.328920763, 0.335216916, 0.341499828, 0.347771086, 0.354032169, 0.360284449, 0.366529195, 0.372767575, 0.379000659, 0.385228383, 0.391452659, 0.397674379, 0.403894278, 0.410113015, 0.416331169, 0.422549249, 0.428767696, 0.434986885, 0.441207124, 0.447428382, 0.453650614, 0.459874623, 0.466100494, 0.472328255, 0.478557889, 0.484789325, 0.491022448, 0.497257069, 0.503492698, 0.509729541, 0.515967304, 0.522205646, 0.528444192, 0.534682523, 0.540920186, 0.547156706, 0.553391649, 0.559624442, 0.565854477, 0.572081108, 0.578303656, 0.584521407, 0.590733615, 0.596939751, 0.60313893, 0.609330184, 0.615512627, 0.62168534, 0.627847374, 0.633997746, 0.640135447, 0.646259648, 0.652369348, 0.658463166, 0.664539964, 0.670598572, 0.676637795, 0.682656407, 0.688653158, 0.694626769, 0.700575937, 0.706499709, 0.712396345, 0.718264447, 0.724102613, 0.729909422, 0.735683432, 0.741423185, 0.747127207, 0.752794009, 0.75842209, 0.76400994, 0.769556038, 0.775058888, 0.780517023, 0.785928794, 0.791292674, 0.796607144, 0.801870689, 0.807081807, 0.812239008, 0.817340818, 0.822385784, 0.827372474, 0.832299481, 0.837165425, 0.841968959, 0.846708768, 0.851383572, 0.85599213, 0.860533241, 0.865005747, 0.869408534, 0.87374053, 0.878000715, 0.882188112, 0.886301795, 0.890340885, 0.894304553, 0.898192017, 0.902002544, 0.905735448, 0.90939009, 0.912965874, 0.916462251, 0.91987871, 0.923214783, 0.926470039, 0.929644083, 0.932736555, 0.935747126, 0.938675494, 0.941521384, 0.944284543, 0.946964741, 0.949561766, 0.952075421, 0.954505523, 0.956851903, 0.959114397, 0.96129285, 0.96338711, 0.965397031, 0.967322465, 0.969163264, 0.970919277, 0.972590351, 0.974176327, 0.975677038, 0.977092313, 0.978421971, 0.979665824, 0.980823673, 0.981895311, 0.982880522, 0.983779081, 0.984590755, 0.985315301, 0.985952471, 0.986502013, 0.98696367, 0.987337182, 0.987622296, 0.987818759, 0.98792633, 0.987944783, 0.98787391, 0.987713535, 0.987463516, 0.987123759, 0.986694229, 0.98617497, 0.985565739, 0.984865203, 0.984075129, 0.983195992, 0.982228463, 0.981173457, 0.980032178, 0.978806183, 0.977497453, 0.976108474, 0.974637842, 0.973087939, 0.971467822, 0.969783146, 0.968040817, 0.966242589, 0.964393924, 0.962516656, 0.960625545, 0.958720088, 0.956834075, 0.954997177, 0.953215092, 0.951546225, 0.950018481, 0.948683391, 0.947594362, 0.946809163, 0.946391536, 0.946402951, 0.946902568, 0.947936825, 0.94954483, 0.951740304, 0.954529281, 0.957896053, 0.96181202, 0.966248822, 0.971161622, 0.976510983, 0.982257307, 0.988362068, 0.0503832136, 0.0635363639, 0.0753531234, 0.0862217979, 0.0963786097, 0.105979704, 0.115123641, 0.123902903, 0.13238072, 0.140603076, 0.148606527, 0.156420649, 0.164069722, 0.171573925, 0.178950212, 0.186212958, 0.193374449, 0.20044526, 0.207434551, 0.214350298, 0.22119675, 0.227982971, 0.234714537, 0.241396253, 0.248032377, 0.25462669, 0.261182562, 0.267702993, 0.274190665, 0.280647969, 0.287076059, 0.293477695, 0.299855122, 0.306209825, 0.312543124, 0.318856183, 0.325150025, 0.331425547, 0.337683446, 0.343924591, 0.350149699, 0.356359209, 0.362553473, 0.368732762, 0.37489727, 0.381047116, 0.387182639, 0.39330401, 0.399410821, 0.405502914, 0.411580082, 0.417642063, 0.423688549, 0.429719186, 0.435733575, 0.441732123, 0.4477136, 0.453677394, 0.459622938, 0.465549631, 0.471456847, 0.477343929, 0.483210198, 0.489054951, 0.494877466, 0.500677687, 0.506454143, 0.512206035, 0.51793258, 0.52363299, 0.529306474, 0.534952244, 0.54056951, 0.546157494, 0.551715423, 0.557242538, 0.562738096, 0.568201372, 0.573631859, 0.579028682, 0.584391137, 0.589718606, 0.595010505, 0.600266283, 0.605485428, 0.610667469, 0.615811974, 0.620918555, 0.625986869, 0.631016615, 0.636007543, 0.640959444, 0.645872158, 0.650745571, 0.655579615, 0.660374266, 0.665129493, 0.669845385, 0.67452206, 0.679159664, 0.683758384, 0.68831844, 0.692840088, 0.697323615, 0.701769334, 0.70617759, 0.710548747, 0.714883195, 0.719181339, 0.723443604, 0.727670428, 0.731862231, 0.736019424, 0.740142557, 0.744232102, 0.748288533, 0.752312321, 0.756303937, 0.760263849, 0.764192516, 0.768090391, 0.771957916, 0.775795522, 0.779603614, 0.783382636, 0.787132978, 0.790855015, 0.794549101, 0.798215577, 0.801854758, 0.805466945, 0.809052419, 0.812611506, 0.816144382, 0.819651255, 0.823132309, 0.826587706, 0.830017584, 0.833422053, 0.836801237, 0.840155276, 0.843484103, 0.846787726, 0.850066132, 0.853319279, 0.856547103, 0.85974952, 0.862926559, 0.86607792, 0.869203436, 0.872302917, 0.875376149, 0.878422895, 0.881442916, 0.884435982, 0.887401682, 0.890339687, 0.893249647, 0.896131191, 0.898983931, 0.901807455, 0.904601295, 0.907364995, 0.910098088, 0.912800095, 0.915470518, 0.918108848, 0.920714383, 0.92328666, 0.925825146, 0.928329275, 0.930798469, 0.93323214, 0.935629684, 0.937990034, 0.940312939, 0.942597771, 0.944843893, 0.947050662, 0.949217427, 0.95134353, 0.953427725, 0.95546964, 0.95746877, 0.95942443, 0.96133593, 0.963202573, 0.965023656, 0.96679847, 0.968525639, 0.970204593, 0.971835007, 0.973416145, 0.974947262, 0.976427606, 0.977856416, 0.979232922, 0.980556344, 0.98182589, 0.983040742, 0.984198924, 0.98530076, 0.986345421, 0.987332067, 0.988259846, 0.989127893, 0.989935328, 0.990681261, 0.991364787, 0.99198499, 0.992540939, 0.993031693, 0.993456302, 0.993813802, 0.994103226, 0.994323596, 0.994473934, 0.99455326, 0.994560594, 0.994494964, 0.994355411, 0.994140989, 0.993850778, 0.99348219, 0.993033251, 0.992505214, 0.99189727, 0.99120868, 0.990438793, 0.989587065, 0.988647741, 0.987620557, 0.986509366, 0.985314198, 0.984031139, 0.98265282, 0.981190389, 0.979643637, 0.977994918, 0.976264977, 0.974443038, 0.972530009, 0.970532932, 0.968443477, 0.966271225, 0.964021057, 0.961681481, 0.959275646, 0.956808068, 0.954286813, 0.951726083, 0.949150533, 0.94660227, 0.944151742, 0.94189612, 0.940015097, 0.26700401, 0.26851048, 0.26994384, 0.27130489, 0.27259384, 0.27380934, 0.27495242, 0.27602238, 0.2770184, 0.27794143, 0.27879067, 0.2795655, 0.28026658, 0.28089358, 0.28144581, 0.28192358, 0.28232739, 0.28265633, 0.28291049, 0.28309095, 0.28319704, 0.28322882, 0.28318684, 0.283072, 0.28288389, 0.28262297, 0.28229037, 0.28188676, 0.28141228, 0.28086773, 0.28025468, 0.27957399, 0.27882618, 0.27801236, 0.27713437, 0.27619376, 0.27519116, 0.27412802, 0.27300596, 0.27182812, 0.27059473, 0.26930756, 0.26796846, 0.26657984, 0.2651445, 0.2636632, 0.26213801, 0.26057103, 0.25896451, 0.25732244, 0.25564519, 0.25393498, 0.25219404, 0.25042462, 0.24862899, 0.2468114, 0.24497208, 0.24311324, 0.24123708, 0.23934575, 0.23744138, 0.23552606, 0.23360277, 0.2316735, 0.22973926, 0.22780192, 0.2258633, 0.22392515, 0.22198915, 0.22005691, 0.21812995, 0.21620971, 0.21429757, 0.21239477, 0.2105031, 0.20862342, 0.20675628, 0.20490257, 0.20306309, 0.20123854, 0.1994295, 0.1976365, 0.19585993, 0.19410009, 0.19235719, 0.19063135, 0.18892259, 0.18723083, 0.18555593, 0.18389763, 0.18225561, 0.18062949, 0.17901879, 0.17742298, 0.17584148, 0.17427363, 0.17271876, 0.17117615, 0.16964573, 0.16812641, 0.1666171, 0.16511703, 0.16362543, 0.16214155, 0.16066467, 0.15919413, 0.15772933, 0.15626973, 0.15481488, 0.15336445, 0.1519182, 0.15047605, 0.14903918, 0.14760731, 0.14618026, 0.14475863, 0.14334327, 0.14193527, 0.14053599, 0.13914708, 0.13777048, 0.1364085, 0.13506561, 0.13374299, 0.13244401, 0.13117249, 0.1299327, 0.12872938, 0.12756771, 0.12645338, 0.12539383, 0.12439474, 0.12346281, 0.12260562, 0.12183122, 0.12114807, 0.12056501, 0.12009154, 0.11973756, 0.11951163, 0.11942341, 0.11948255, 0.11969858, 0.12008079, 0.12063824, 0.12137972, 0.12231244, 0.12344358, 0.12477953, 0.12632581, 0.12808703, 0.13006688, 0.13226797, 0.13469183, 0.13733921, 0.14020991, 0.14330291, 0.1466164, 0.15014782, 0.15389405, 0.15785146, 0.16201598, 0.1663832, 0.1709484, 0.17570671, 0.18065314, 0.18578266, 0.19109018, 0.19657063, 0.20221902, 0.20803045, 0.21400015, 0.22012381, 0.2263969, 0.23281498, 0.2393739, 0.24606968, 0.25289851, 0.25985676, 0.26694127, 0.27414922, 0.28147681, 0.28892102, 0.29647899, 0.30414796, 0.31192534, 0.3198086, 0.3277958, 0.33588539, 0.34407411, 0.35235985, 0.36074053, 0.3692142, 0.37777892, 0.38643282, 0.39517408, 0.40400101, 0.4129135, 0.42190813, 0.43098317, 0.44013691, 0.44936763, 0.45867362, 0.46805314, 0.47750446, 0.4870258, 0.49661536, 0.5062713, 0.51599182, 0.52577622, 0.5356211, 0.5455244, 0.55548397, 0.5654976, 0.57556297, 0.58567772, 0.59583934, 0.60604528, 0.61629283, 0.62657923, 0.63690157, 0.64725685, 0.65764197, 0.66805369, 0.67848868, 0.68894351, 0.69941463, 0.70989842, 0.72039115, 0.73088902, 0.74138803, 0.75188414, 0.76237342, 0.77285183, 0.78331535, 0.79375994, 0.80418159, 0.81457634, 0.82494028, 0.83526959, 0.84556056, 0.8558096, 0.86601325, 0.87616824, 0.88627146, 0.89632002, 0.90631121, 0.91624212, 0.92610579, 0.93590444, 0.94563626, 0.95529972, 0.96489353, 0.97441665, 0.98386829, 0.99324789, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0055, 0.0236, 0.0416, 0.0576, 0.071, 0.0827, 0.0932, 0.103, 0.112, 0.1204, 0.1283, 0.1359, 0.1431, 0.15, 0.1566, 0.163, 0.1692, 0.1752, 0.1811, 0.1868, 0.1923, 0.1977, 0.203, 0.2082, 0.2133, 0.2183, 0.2232, 0.2281, 0.2328, 0.2375, 0.2421, 0.2466, 0.2511, 0.2556, 0.2599, 0.2643, 0.2686, 0.2728, 0.277, 0.2811, 0.2853, 0.2894, 0.2934, 0.2974, 0.3014, 0.3054, 0.3093, 0.3132, 0.317, 0.3209, 0.3247, 0.3285, 0.3323, 0.3361, 0.3398, 0.3435, 0.3472, 0.3509, 0.3546, 0.3582, 0.3619, 0.3655, 0.3691, 0.3727, 0.3763, 0.3798, 0.3834, 0.3869, 0.3905, 0.394, 0.3975, 0.401, 0.4045, 0.408, 0.4114, 0.4149, 0.4183, 0.4218, 0.4252, 0.4286, 0.432, 0.4354, 0.4388, 0.4422, 0.4456, 0.4489, 0.4523, 0.4556, 0.4589, 0.4622, 0.4656, 0.4689, 0.4722, 0.4756, 0.479, 0.4825, 0.4861, 0.4897, 0.4934, 0.4971, 0.5008, 0.5045, 0.5083, 0.5121, 0.5158, 0.5196, 0.5234, 0.5272, 0.531, 0.5349, 0.5387, 0.5425, 0.5464, 0.5502, 0.5541, 0.5579, 0.5618, 0.5657, 0.5696, 0.5735, 0.5774, 0.5813, 0.5852, 0.5892, 0.5931, 0.597, 0.601, 0.605, 0.6089, 0.6129, 0.6168, 0.6208, 0.6248, 0.6288, 0.6328, 0.6368, 0.6408, 0.6449, 0.6489, 0.6529, 0.657, 0.661, 0.6651, 0.6691, 0.6732, 0.6773, 0.6813, 0.6854, 0.6895, 0.6936, 0.6977, 0.7018, 0.706, 0.7101, 0.7142, 0.7184, 0.7225, 0.7267, 0.7308, 0.735, 0.7392, 0.7434, 0.7476, 0.7518, 0.756, 0.7602, 0.7644, 0.7686, 0.7729, 0.7771, 0.7814, 0.7856, 0.7899, 0.7942, 0.7985, 0.8027, 0.807, 0.8114, 0.8157, 0.82, 0.8243, 0.8287, 0.833, 0.8374, 0.8417, 0.8461, 0.8505, 0.8548, 0.8592, 0.8636, 0.8681, 0.8725, 0.8769, 0.8813, 0.8858, 0.8902, 0.8947, 0.8992, 0.9037, 0.9082, 0.9127, 0.9172, 0.9217, 0.9262, 0.9308, 0.9353, 0.9399, 0.9444, 0.949, 0.9536, 0.9582, 0.9628, 0.9674, 0.9721, 0.9767, 0.9814, 0.986, 0.9907, 0.9954, 1, 1, 1, 1, 1, 1, 0.01060815, 0.01428972, 0.01831941, 0.02275049, 0.02759119, 0.03285175, 0.03853466, 0.04447016, 0.05032105, 0.05611171, 0.0618531, 0.06755457, 0.0732236, 0.0788708, 0.08450105, 0.09011319, 0.09572396, 0.10132312, 0.10692823, 0.1125315, 0.11813947, 0.12375803, 0.12938228, 0.13501631, 0.14066867, 0.14633406, 0.15201338, 0.15770877, 0.16342174, 0.16915387, 0.17489524, 0.18065495, 0.18643324, 0.19223028, 0.19804623, 0.20388117, 0.20973515, 0.21560818, 0.22150014, 0.22741085, 0.23334047, 0.23928891, 0.24525608, 0.25124182, 0.25724602, 0.26326851, 0.26930915, 0.27536766, 0.28144375, 0.2875374, 0.29364846, 0.29977678, 0.30592213, 0.31208435, 0.31826327, 0.32445869, 0.33067031, 0.33689808, 0.34314168, 0.34940101, 0.355676, 0.36196644, 0.36827206, 0.37459292, 0.38092887, 0.38727975, 0.39364518, 0.40002537, 0.40642019, 0.41282936, 0.41925278, 0.42569057, 0.43214263, 0.43860848, 0.44508855, 0.45158266, 0.45809049, 0.46461238, 0.47114798, 0.47769736, 0.48426077, 0.49083761, 0.49742847, 0.50403286, 0.51065109, 0.51728314, 0.52392883, 0.53058853, 0.53726173, 0.54394898, 0.5506426, 0.55734473, 0.56405586, 0.57077365, 0.5774991, 0.58422945, 0.59096382, 0.59770215, 0.60444226, 0.61118304, 0.61792306, 0.62466162, 0.63139686, 0.63812122, 0.64483795, 0.65154562, 0.65824241, 0.66492652, 0.67159578, 0.67824099, 0.684863, 0.69146268, 0.69803757, 0.70457834, 0.71108138, 0.7175507, 0.72398193, 0.73035829, 0.73669146, 0.74297501, 0.74919318, 0.75535825, 0.76145589, 0.76748424, 0.77344838, 0.77932641, 0.78513609, 0.79085854, 0.7965014, 0.80205987, 0.80752799, 0.81291606, 0.81820481, 0.82341472, 0.82852822, 0.83355779, 0.83850183, 0.84335441, 0.84813096, 0.85281737, 0.85742602, 0.86196552, 0.86641628, 0.87079129, 0.87507281, 0.87925878, 0.8833417, 0.88731387, 0.89116859, 0.89490337, 0.8985026, 0.90197527, 0.90530097, 0.90848638, 0.911533, 0.9144225, 0.91717106, 0.91978131, 0.92223947, 0.92456587, 0.92676657, 0.92882964, 0.93078135, 0.93262051, 0.93435067, 0.93599076, 0.93752831, 0.93899289, 0.94036561, 0.94167588, 0.94291042, 0.94408513, 0.94520419, 0.94625977, 0.94727016, 0.94823505, 0.94914549, 0.95001704, 0.95085121, 0.95165009, 0.9524044, 0.95312556, 0.95381595, 0.95447591, 0.95510255, 0.95569679, 0.95626788, 0.95681685, 0.9573439, 0.95784842, 0.95833051, 0.95879054, 0.95922872, 0.95964538, 0.96004345, 0.96042097, 0.96077819, 0.9611152, 0.96143273, 0.96173392, 0.96201757, 0.96228344, 0.96253168, 0.96276513, 0.96298491, 0.96318967, 0.96337949, 0.96355923, 0.96372785, 0.96388426, 0.96403203, 0.96417332, 0.9643063, 0.96443322, 0.96455845, 0.96467922, 0.96479861, 0.96492035, 0.96504223, 0.96516917, 0.96530224, 0.96544032, 0.96559206, 0.96575293, 0.96592829, 0.96612013, 0.96632832, 0.96656022, 0.96681185, 0.96709183, 0.96739773, 0.96773482, 0.96810471, 0.96850919, 0.96893132, 0.96935926, 0.9698028, 0.97025511, 0.97071849, 0.97120159, 0.97169389, 0.97220061, 0.97272597, 0.97327085, 0.97383206, 0.97441222, 0.97501782, 0.97564391, 0.97628674, 0.97696114, 0.97765722, 0.97837585, 0.97912374, 0.979891, 0.98067764, 0.98137749, 0.04503935, 0.04933018, 0.05356262, 0.05774337, 0.06188095, 0.06598247, 0.07005374, 0.07409947, 0.07812339, 0.08212852, 0.08611731, 0.09009161, 0.09405308, 0.09800301, 0.10194255, 0.10587261, 0.1097942, 0.11370826, 0.11761516, 0.12151575, 0.12541095, 0.12930083, 0.13317849, 0.13701138, 0.14079223, 0.14452486, 0.14820351, 0.15183185, 0.15540398, 0.15892417, 0.16238588, 0.16579435, 0.16914226, 0.17243586, 0.17566717, 0.17884322, 0.18195582, 0.18501213, 0.18800459, 0.19093944, 0.19381092, 0.19662307, 0.19937337, 0.20206187, 0.20469116, 0.20725547, 0.20976258, 0.21220409, 0.21458611, 0.21690827, 0.21916481, 0.2213631, 0.22349947, 0.2255714, 0.22758415, 0.22953569, 0.23142077, 0.2332454, 0.2350092, 0.23670785, 0.23834119, 0.23991189, 0.24141903, 0.24286214, 0.24423453, 0.24554109, 0.2467815, 0.24795393, 0.24905614, 0.25007383, 0.25098926, 0.25179696, 0.25249346, 0.25307401, 0.25353152, 0.25386167, 0.25406082, 0.25412435, 0.25404842, 0.25383134, 0.2534705, 0.25296722, 0.2523226, 0.25153974, 0.25062402, 0.24958205, 0.24842386, 0.24715928, 0.24580099, 0.24436202, 0.24285591, 0.24129828, 0.23970131, 0.23807973, 0.23644557, 0.2348113, 0.23318874, 0.2315855, 0.23001121, 0.2284748, 0.22698081, 0.22553305, 0.22413977, 0.22280246, 0.22152555, 0.22030752, 0.2191538, 0.21806257, 0.21703799, 0.21607792, 0.21518463, 0.21435467, 0.21358663, 0.21288172, 0.21223835, 0.21165312, 0.21112526, 0.21065161, 0.21023306, 0.20985996, 0.20951045, 0.20916709, 0.20882976, 0.20849798, 0.20817199, 0.20785149, 0.20753716, 0.20722876, 0.20692679, 0.20663156, 0.20634336, 0.20606303, 0.20578999, 0.20552612, 0.20527189, 0.20502868, 0.20479718, 0.20457804, 0.20437304, 0.20418396, 0.20401238, 0.20385896, 0.20372653, 0.20361709, 0.20353258, 0.20347472, 0.20344718, 0.20345161, 0.20349089, 0.20356842, 0.20368663, 0.20384884, 0.20405904, 0.20431921, 0.20463464, 0.20500731, 0.20544449, 0.20596097, 0.20654832, 0.20721003, 0.20795035, 0.20877302, 0.20968223, 0.21068163, 0.21177544, 0.21298582, 0.21430361, 0.21572716, 0.21726052, 0.21890636, 0.220668, 0.22255447, 0.22458372, 0.22673713, 0.22901625, 0.23142316, 0.23395924, 0.23663857, 0.23946645, 0.24242624, 0.24549008, 0.24863372, 0.25187832, 0.25524083, 0.25870947, 0.26229238, 0.26604085, 0.26993099, 0.27397488, 0.27822463, 0.28264201, 0.2873016, 0.29215894, 0.29729823, 0.30268199, 0.30835665, 0.31435139, 0.3206671, 0.32733158, 0.33437168, 0.34182112, 0.34968889, 0.35799244, 0.36675371, 0.3759816, 0.38566792, 0.39579804, 0.40634556, 0.41730243, 0.4285828, 0.44012728, 0.45189421, 0.46378379, 0.47573199, 0.48769865, 0.49962354, 0.5114027, 0.52301693, 0.53447549, 0.54578602, 0.55695565, 0.56798832, 0.57888639, 0.5896541, 0.60028928, 0.61079441, 0.62116633, 0.63140509, 0.64150735, 0.65147219, 0.66129632, 0.67097934, 0.68051833, 0.68991419, 0.69916533, 0.70827373, 0.71723995, 0.72606665, 0.73475675, 0.74331358, 0.75174143, 0.76004473, 0.76827704, 0.77647029, 0.78462009, 0.79273542, 0.8008109, 0.80885107, 0.81685878, 0.82483206, 0.83277661, 0.84069127, 0.84857662, 0.8564431, 0.86429066, 0.87218969, 0.18995, 0.19483, 0.19956, 0.20415, 0.2086, 0.21291, 0.21708, 0.22111, 0.225, 0.22875, 0.23236, 0.23582, 0.23915, 0.24234, 0.24539, 0.2483, 0.25107, 0.25369, 0.25618, 0.25853, 0.26074, 0.2628, 0.26473, 0.26652, 0.26816, 0.26967, 0.27103, 0.27226, 0.27334, 0.27429, 0.27509, 0.27576, 0.27628, 0.27667, 0.27691, 0.27701, 0.27698, 0.2768, 0.27648, 0.27603, 0.27543, 0.27469, 0.27381, 0.27273, 0.27106, 0.26878, 0.26592, 0.26252, 0.25862, 0.25425, 0.24946, 0.24427, 0.23874, 0.23288, 0.22676, 0.22039, 0.21382, 0.20708, 0.20021, 0.19326, 0.18625, 0.17923, 0.17223, 0.16529, 0.15844, 0.15173, 0.14519, 0.13886, 0.13278, 0.12698, 0.12151, 0.11639, 0.11167, 0.10738, 0.10357, 0.10026, 0.0975, 0.09532, 0.09377, 0.09287, 0.09267, 0.0932, 0.09451, 0.09662, 0.09958, 0.10342, 0.10815, 0.11374, 0.12014, 0.12733, 0.13526, 0.14391, 0.15323, 0.16319, 0.17377, 0.18491, 0.19659, 0.20877, 0.22142, 0.23449, 0.24797, 0.2618, 0.27597, 0.29042, 0.30513, 0.32006, 0.33517, 0.35043, 0.36581, 0.38127, 0.39678, 0.41229, 0.42778, 0.44321, 0.45854, 0.47375, 0.48879, 0.50362, 0.51822, 0.53255, 0.54658, 0.56026, 0.57357, 0.58646, 0.59891, 0.61088, 0.62233, 0.63323, 0.64362, 0.65394, 0.66428, 0.67462, 0.68494, 0.69525, 0.70553, 0.71577, 0.72596, 0.7361, 0.74617, 0.75617, 0.76608, 0.77591, 0.78563, 0.79524, 0.80473, 0.8141, 0.82333, 0.83241, 0.84133, 0.8501, 0.85868, 0.86709, 0.8753, 0.88331, 0.89112, 0.8987, 0.90605, 0.91317, 0.92004, 0.92666, 0.93301, 0.93909, 0.94489, 0.95039, 0.9556, 0.96049, 0.96507, 0.96931, 0.97323, 0.97679, 0.98, 0.98289, 0.98549, 0.98781, 0.98986, 0.99163, 0.99314, 0.99438, 0.99535, 0.99607, 0.99654, 0.99675, 0.99672, 0.99644, 0.99593, 0.99517, 0.99419, 0.99297, 0.99153, 0.98987, 0.98799, 0.9859, 0.9836, 0.98108, 0.97837, 0.97545, 0.97234, 0.96904, 0.96555, 0.96187, 0.95801, 0.95398, 0.94977, 0.94538, 0.94084, 0.93612, 0.93125, 0.92623, 0.92105, 0.91572, 0.91024, 0.90463, 0.89888, 0.89298, 0.88691, 0.88066, 0.87422, 0.8676, 0.86079, 0.8538, 0.84662, 0.83926, 0.83172, 0.82399, 0.81608, 0.80799, 0.79971, 0.79125, 0.7826, 0.77377, 0.76476, 0.75556, 0.74617, 0.73661, 0.72686, 0.71692, 0.7068, 0.6965, 0.68602, 0.67535, 0.66449, 0.65345, 0.64223, 0.63082, 0.61923, 0.60746, 0.5955, 0.58336, 0.57103, 0.55852, 0.54583, 0.53295, 0.51989, 0.50664, 0.49321, 0.4796), G = c(0.000466127766, 0.00129495431, 0.00230452991, 0.00349037666, 0.00484285, 0.00635613622, 0.00802185006, 0.00982831486, 0.0117705913, 0.0138404966, 0.0160262753, 0.0183201254, 0.0207147875, 0.0232009284, 0.0257651161, 0.028397457, 0.0310895652, 0.0338299885, 0.0366066101, 0.039406602, 0.0421598925, 0.0447944924, 0.0473177796, 0.0497264666, 0.0520167766, 0.0541844801, 0.0562249365, 0.0581331465, 0.0599038167, 0.0615314414, 0.0630104053, 0.0643351102, 0.0654920358, 0.0664791593, 0.0672946449, 0.0679349264, 0.0683912798, 0.068654071, 0.0687382323, 0.0686368599, 0.0683540225, 0.0679108689, 0.067305326, 0.0665758073, 0.0657324381, 0.0648183312, 0.0638624166, 0.0629066192, 0.0619917876, 0.0611584918, 0.0604451843, 0.0598886855, 0.0595170384, 0.0593524384, 0.0594147119, 0.0597055998, 0.0602368754, 0.0609935552, 0.0619778136, 0.0631676261, 0.0645534486, 0.0661170432, 0.0678353452, 0.0697024767, 0.0716895272, 0.0737819504, 0.0759715081, 0.0782361045, 0.0805635079, 0.0829463512, 0.0853726329, 0.0878311772, 0.0903143031, 0.0928159917, 0.0953322947, 0.0978549106, 0.100379466, 0.102902194, 0.105419865, 0.107929771, 0.110431177, 0.11292021, 0.115395258, 0.117854987, 0.120298314, 0.122724371, 0.125132484, 0.127522145, 0.129892998, 0.132244819, 0.1345775, 0.13689139, 0.139186217, 0.141462106, 0.143719323, 0.145958202, 0.148179144, 0.150382611, 0.152569121, 0.154739247, 0.156893613, 0.159032895, 0.161157816, 0.163269149, 0.165367714, 0.167454379, 0.169530062, 0.171595728, 0.173652392, 0.175701122, 0.177743036, 0.179779309, 0.18181117, 0.183839907, 0.185866869, 0.187893468, 0.189921182, 0.191951556, 0.19398621, 0.196026835, 0.198075202, 0.200133166, 0.202202663, 0.204285721, 0.206384461, 0.2085011, 0.210637956, 0.212797337, 0.214981693, 0.217193831, 0.219436516, 0.221712634, 0.224025196, 0.226377345, 0.228772352, 0.231213625, 0.233704708, 0.236249283, 0.23885117, 0.241514325, 0.24424225, 0.247039798, 0.24991135, 0.252861399, 0.25589455, 0.259015505, 0.262229049, 0.265539703, 0.268952874, 0.272473491, 0.276106469, 0.279856666, 0.283729003, 0.287728205, 0.291858679, 0.296124596, 0.30053009, 0.305078817, 0.309773445, 0.314616425, 0.319609981, 0.324755126, 0.330051947, 0.335500068, 0.341098112, 0.346843685, 0.352733817, 0.358764377, 0.364929312, 0.371224168, 0.377642889, 0.384177874, 0.390819546, 0.397562894, 0.404400213, 0.411323666, 0.418323245, 0.425389724, 0.432518707, 0.439702976, 0.446935635, 0.45421017, 0.461520484, 0.468860936, 0.47622635, 0.483612031, 0.491013764, 0.4984278, 0.505850848, 0.513280054, 0.520712972, 0.528147545, 0.53558207, 0.543015173, 0.550445778, 0.557873075, 0.565296495, 0.572706259, 0.580106828, 0.587501706, 0.594891088, 0.602275297, 0.60964354, 0.616998953, 0.624349657, 0.631696376, 0.639026596, 0.646343897, 0.653658756, 0.660969379, 0.668255621, 0.675541484, 0.682827953, 0.690087897, 0.697348991, 0.704610791, 0.711847714, 0.719089119, 0.726324415, 0.733544671, 0.740771893, 0.747980563, 0.755189852, 0.762397883, 0.769590975, 0.77679486, 0.783976508, 0.791167346, 0.798347709, 0.805527126, 0.812705773, 0.819875302, 0.827051773, 0.834212826, 0.841386618, 0.848540474, 0.855711038, 0.862858846, 0.870024467, 0.877168404, 0.884329694, 0.891469549, 0.89862705, 0.905762748, 0.91291501, 0.920048699, 0.927195612, 0.93432854, 0.941470354, 0.948604077, 0.95574152, 0.962878026, 0.970012413, 0.977154231, 0.984287561, 0.991437853, 0.000466127766, 0.00126992553, 0.00224934863, 0.00339180156, 0.00469194561, 0.00613611626, 0.00771344131, 0.00941675403, 0.0112247138, 0.0131362262, 0.0151325789, 0.0171991484, 0.0193306298, 0.0215030771, 0.0237024271, 0.0259207864, 0.0281385015, 0.0303236129, 0.0324736172, 0.0345691867, 0.0365900213, 0.0385036268, 0.0402939095, 0.0419053329, 0.0433278666, 0.0445561662, 0.0455829503, 0.0464018731, 0.0470080541, 0.0473986708, 0.047573592, 0.0475360183, 0.0472930838, 0.0468563678, 0.0462422566, 0.0454676444, 0.0445588056, 0.0435542881, 0.0424893149, 0.0414017089, 0.0403288858, 0.0393088888, 0.0384001825, 0.0376322609, 0.0370296488, 0.0366146049, 0.0364049901, 0.0364052511, 0.0366209949, 0.0370545017, 0.0377052832, 0.0385706153, 0.0396468666, 0.0409215821, 0.0423528741, 0.0439325787, 0.0456437598, 0.0474700293, 0.0493958927, 0.0514069729, 0.0534901321, 0.0556335178, 0.0578265505, 0.0600598734, 0.0623252772, 0.06461561, 0.0669246832, 0.0692471753, 0.0715785403, 0.0739149211, 0.0762530701, 0.0785914864, 0.0809267058, 0.0832568129, 0.0855803445, 0.0878961593, 0.0902033992, 0.0925014543, 0.0947899342, 0.0970686417, 0.099337551, 0.101597079, 0.103847716, 0.106089165, 0.108321923, 0.110546584, 0.112763831, 0.11497443, 0.117179219, 0.119379132, 0.121575414, 0.123768654, 0.125959947, 0.128150439, 0.130341324, 0.132533845, 0.134729286, 0.136928959, 0.139134147, 0.141346265, 0.143566769, 0.14579715, 0.148038934, 0.150293679, 0.152562977, 0.154848232, 0.157151161, 0.159473549, 0.161817111, 0.164183582, 0.166574724, 0.168992314, 0.17143815, 0.173913876, 0.176421271, 0.178962399, 0.181539111, 0.184153268, 0.186806728, 0.189501352, 0.192238994, 0.1950215, 0.197850703, 0.200728196, 0.203656029, 0.206635993, 0.209669834, 0.21275927, 0.215905976, 0.219111589, 0.222377697, 0.225705837, 0.229097492, 0.232554083, 0.236076967, 0.239667435, 0.24332672, 0.247055968, 0.250856232, 0.254728485, 0.25867361, 0.262692401, 0.266785558, 0.270953688, 0.2751973, 0.279516805, 0.283912516, 0.288384647, 0.292933312, 0.297558528, 0.302260213, 0.307038188, 0.311892183, 0.316821833, 0.321826685, 0.326906201, 0.33205976, 0.337286663, 0.342586137, 0.34795734, 0.353399363, 0.35891124, 0.364491949, 0.370140419, 0.375855533, 0.381636138, 0.387481044, 0.393389034, 0.399358867, 0.405389282, 0.411479007, 0.417626756, 0.423831237, 0.430091162, 0.436405243, 0.442772199, 0.449190757, 0.455659658, 0.462177656, 0.468743522, 0.475356048, 0.482014044, 0.488716345, 0.495461806, 0.502249309, 0.509077761, 0.515946092, 0.522853259, 0.529798246, 0.536780059, 0.543797733, 0.550850323, 0.557936911, 0.5650566, 0.572208516, 0.579391803, 0.586605627, 0.593849168, 0.601121626, 0.608422211, 0.615750147, 0.623104667, 0.630485011, 0.637890424, 0.645320152, 0.652773439, 0.660249526, 0.667747641, 0.675267, 0.682806802, 0.690366218, 0.697944391, 0.705540424, 0.713153375, 0.72078246, 0.728427497, 0.736086521, 0.743758326, 0.751441596, 0.759134892, 0.766836624, 0.774545028, 0.782258138, 0.789973753, 0.797691563, 0.805409333, 0.813121725, 0.820825143, 0.828515491, 0.836190976, 0.843848069, 0.85147634, 0.859068716, 0.866624355, 0.874128569, 0.881568926, 0.888942277, 0.896225909, 0.903409063, 0.910472964, 0.917399053, 0.924168246, 0.930760752, 0.937158971, 0.943347775, 0.949317522, 0.9550629, 0.960586693, 0.965895868, 0.97100333, 0.975924241, 0.980678193, 0.985282161, 0.989753437, 0.994108844, 0.998364143, 0.0298028976, 0.0284259729, 0.0272063728, 0.0261253206, 0.0251650976, 0.0243092436, 0.02355625, 0.0228781011, 0.0222583774, 0.0216866674, 0.0211535876, 0.0206507174, 0.0201705326, 0.0197063415, 0.0192522243, 0.0188029767, 0.0183540593, 0.0179015512, 0.0174421086, 0.0169729276, 0.0164970484, 0.0160071509, 0.0155015065, 0.0149791041, 0.0144393586, 0.0138820918, 0.0133075156, 0.0127162163, 0.0121091423, 0.0114875915, 0.0108554862, 0.0102128849, 0.00956079551, 0.00890185346, 0.00823900704, 0.00757551051, 0.00691491734, 0.00626107379, 0.00561830889, 0.0049905308, 0.00438202557, 0.00379781761, 0.00324319591, 0.00272370721, 0.00224514897, 0.00181356205, 0.00143446923, 0.00111388259, 0.000859420809, 0.000678091517, 0.000577101735, 0.000563847476, 0.00064590278, 0.000831008207, 0.00112705875, 0.00153984779, 0.00207954744, 0.00275470302, 0.00357374415, 0.00454518084, 0.00567758762, 0.00697958743, 0.00845983494, 0.0101269996, 0.0119897486, 0.014055064, 0.0163333443, 0.0188332232, 0.0215631918, 0.0245316468, 0.0277468735, 0.03121703, 0.034950131, 0.0389540334, 0.0431364795, 0.0473307585, 0.0515448092, 0.0557776706, 0.0600281369, 0.0642955547, 0.0685790261, 0.0728775875, 0.0771902878, 0.0815161895, 0.0858543713, 0.0902039303, 0.0945639838, 0.0989336721, 0.10331216, 0.107698641, 0.112092335, 0.116492495, 0.120898405, 0.125309384, 0.129724785, 0.134143997, 0.138566428, 0.14299154, 0.147418835, 0.151847851, 0.156278163, 0.160709387, 0.165141174, 0.169573215, 0.174005236, 0.178437, 0.182868306, 0.187298986, 0.191728906, 0.196157962, 0.200586086, 0.205013174, 0.209439071, 0.213863965, 0.218287899, 0.222710942, 0.227133187, 0.231554749, 0.235975765, 0.240396394, 0.244816813, 0.24923722, 0.253657797, 0.258078397, 0.262499662, 0.266921859, 0.271345267, 0.275770179, 0.280196901, 0.28462575, 0.289057057, 0.293491117, 0.297927865, 0.30236813, 0.306812282, 0.311260703, 0.315713782, 0.320171913, 0.324635499, 0.329104836, 0.333580106, 0.338062109, 0.342551272, 0.347048028, 0.351552815, 0.356066072, 0.360588229, 0.365119408, 0.369660446, 0.374211795, 0.37877391, 0.383347243, 0.387932249, 0.392529339, 0.397138877, 0.401761511, 0.406397694, 0.411047871, 0.415712489, 0.420391986, 0.425086807, 0.429797442, 0.434524335, 0.439267908, 0.444028574, 0.448806744, 0.453602818, 0.45841742, 0.463250828, 0.468103387, 0.472975465, 0.47786742, 0.482779603, 0.487712357, 0.492666544, 0.497642038, 0.502639147, 0.507658169, 0.51269939, 0.517763087, 0.522849522, 0.52795955, 0.533093083, 0.538250172, 0.543431038, 0.54863589, 0.553864931, 0.559118349, 0.564396327, 0.569699633, 0.57502827, 0.580382015, 0.585761012, 0.591165394, 0.596595287, 0.602050811, 0.607532077, 0.61303919, 0.61857225, 0.624131362, 0.629717516, 0.635329876, 0.640968508, 0.646633475, 0.652324832, 0.65804263, 0.663786914, 0.66955772, 0.675355082, 0.681179025, 0.687029567, 0.692906719, 0.698810484, 0.704740854, 0.710697814, 0.716681336, 0.722691379, 0.72872789, 0.734790799, 0.74088002, 0.746995448, 0.753136955, 0.75930439, 0.765498551, 0.771719833, 0.777966775, 0.78423912, 0.790536569, 0.796858775, 0.803205337, 0.809578605, 0.815977942, 0.82240062, 0.82884598, 0.83531536, 0.84181173, 0.848328902, 0.854866468, 0.861432314, 0.868015998, 0.874622194, 0.881250063, 0.887896125, 0.894563989, 0.901249365, 0.907950379, 0.914672479, 0.921406537, 0.928152065, 0.93490773, 0.941670605, 0.9484349, 0.95518986, 0.961916487, 0.968589814, 0.975158357, 0.00487433, 0.00960483, 0.01462494, 0.01994186, 0.02556309, 0.03149748, 0.03775181, 0.04416723, 0.05034437, 0.05632444, 0.06214536, 0.06783587, 0.07341724, 0.07890703, 0.0843197, 0.08966622, 0.09495545, 0.10019576, 0.10539345, 0.11055307, 0.11567966, 0.12077701, 0.12584799, 0.13089477, 0.13592005, 0.14092556, 0.14591233, 0.15088147, 0.15583425, 0.16077132, 0.16569272, 0.17059884, 0.1754902, 0.18036684, 0.18522836, 0.19007447, 0.1949054, 0.19972086, 0.20452049, 0.20930306, 0.21406899, 0.21881782, 0.22354911, 0.2282621, 0.23295593, 0.23763078, 0.24228619, 0.2469217, 0.25153685, 0.2561304, 0.26070284, 0.26525384, 0.26978306, 0.27429024, 0.27877509, 0.28323662, 0.28767547, 0.29209154, 0.29648471, 0.30085494, 0.30520222, 0.30952657, 0.31382773, 0.3181058, 0.32236127, 0.32659432, 0.33080515, 0.334994, 0.33916114, 0.34330688, 0.34743154, 0.35153548, 0.35561907, 0.35968273, 0.36372671, 0.36775151, 0.37175775, 0.37574589, 0.37971644, 0.38366989, 0.38760678, 0.39152762, 0.39543297, 0.39932336, 0.40319934, 0.40706148, 0.41091033, 0.41474645, 0.4185704, 0.42238275, 0.42618405, 0.42997486, 0.43375572, 0.4375272, 0.44128981, 0.4450441, 0.4487906, 0.4525298, 0.45626209, 0.45998802, 0.46370813, 0.4674229, 0.47113278, 0.47483821, 0.47853961, 0.4822374, 0.48593197, 0.4896237, 0.49331293, 0.49700003, 0.50068529, 0.50436904, 0.50805136, 0.51173263, 0.51541316, 0.51909319, 0.52277292, 0.52645254, 0.53013219, 0.53381201, 0.53749213, 0.54117264, 0.54485335, 0.54853458, 0.55221637, 0.55589872, 0.55958162, 0.56326503, 0.56694891, 0.57063316, 0.57431754, 0.57800205, 0.58168661, 0.58537105, 0.58905521, 0.59273889, 0.59642187, 0.60010387, 0.60378459, 0.60746388, 0.61114146, 0.61481702, 0.61849025, 0.62216081, 0.62582833, 0.62949242, 0.63315277, 0.63680899, 0.64046069, 0.64410744, 0.64774881, 0.65138436, 0.65501363, 0.65863619, 0.66225157, 0.66585927, 0.66945881, 0.67304968, 0.67663139, 0.68020343, 0.68376525, 0.68731632, 0.69085611, 0.69438405, 0.6978996, 0.70140222, 0.70489133, 0.70836635, 0.71182668, 0.71527175, 0.71870095, 0.72211371, 0.72550945, 0.72888753, 0.73224735, 0.73558828, 0.73890972, 0.74221104, 0.74549162, 0.74875084, 0.75198807, 0.75520266, 0.75839399, 0.76156142, 0.76470433, 0.76782207, 0.77091403, 0.77397953, 0.7770179, 0.78002855, 0.78301086, 0.78596419, 0.78888793, 0.79178146, 0.79464415, 0.79747541, 0.80027461, 0.80304099, 0.80577412, 0.80847343, 0.81113836, 0.81376835, 0.81636288, 0.81892143, 0.82144351, 0.82392862, 0.82637633, 0.82878621, 0.83115784, 0.83349064, 0.83578452, 0.83803918, 0.84025437, 0.8424299, 0.84456561, 0.84666139, 0.84871722, 0.8507331, 0.85270912, 0.85464543, 0.85654226, 0.85839991, 0.86021878, 0.86199932, 0.86374211, 0.86544779, 0.86711711, 0.86875092, 0.87035015, 0.87191584, 0.87344918, 0.87495143, 0.87642392, 0.87786808, 0.87928545, 0.88067763, 0.88204632, 0.88339329, 0.88472036, 0.88602943, 0.88732243, 0.88860134, 0.88986815, 0.89112487, 0.89237353, 0.89361614, 0.89485467, 0.89609127, 0.89732977, 0.8985704, 0.899815, 0.90106534, 0.90232311, 0.90358991, 0.90486726, 0.90615657, 0.1262, 0.1292, 0.1321, 0.135, 0.1379, 0.1408, 0.1437, 0.1465, 0.1492, 0.1519, 0.1546, 0.1574, 0.1601, 0.1629, 0.1657, 0.1685, 0.1714, 0.1743, 0.1773, 0.1798, 0.1817, 0.1834, 0.1852, 0.1872, 0.1901, 0.193, 0.1958, 0.1987, 0.2015, 0.2044, 0.2073, 0.2101, 0.213, 0.2158, 0.2187, 0.2215, 0.2244, 0.2272, 0.23, 0.2329, 0.2357, 0.2385, 0.2414, 0.2442, 0.247, 0.2498, 0.2526, 0.2555, 0.2583, 0.2611, 0.2639, 0.2667, 0.2695, 0.2723, 0.2751, 0.278, 0.2808, 0.2836, 0.2864, 0.2892, 0.292, 0.2948, 0.2976, 0.3004, 0.3032, 0.306, 0.3088, 0.3116, 0.3144, 0.3172, 0.32, 0.3228, 0.3256, 0.3284, 0.3312, 0.334, 0.3368, 0.3396, 0.3424, 0.3453, 0.3481, 0.3509, 0.3537, 0.3565, 0.3593, 0.3622, 0.365, 0.3678, 0.3706, 0.3734, 0.3763, 0.3791, 0.3819, 0.3848, 0.3876, 0.3904, 0.3933, 0.3961, 0.399, 0.4018, 0.4047, 0.4075, 0.4104, 0.4132, 0.4161, 0.4189, 0.4218, 0.4247, 0.4275, 0.4304, 0.4333, 0.4362, 0.439, 0.4419, 0.4448, 0.4477, 0.4506, 0.4535, 0.4564, 0.4593, 0.4622, 0.4651, 0.468, 0.4709, 0.4738, 0.4767, 0.4797, 0.4826, 0.4856, 0.4886, 0.4915, 0.4945, 0.4975, 0.5005, 0.5035, 0.5065, 0.5095, 0.5125, 0.5155, 0.5186, 0.5216, 0.5246, 0.5277, 0.5307, 0.5338, 0.5368, 0.5399, 0.543, 0.5461, 0.5491, 0.5522, 0.5553, 0.5584, 0.5615, 0.5646, 0.5678, 0.5709, 0.574, 0.5772, 0.5803, 0.5835, 0.5866, 0.5898, 0.5929, 0.5961, 0.5993, 0.6025, 0.6057, 0.6089, 0.6121, 0.6153, 0.6185, 0.6217, 0.625, 0.6282, 0.6315, 0.6347, 0.638, 0.6412, 0.6445, 0.6478, 0.6511, 0.6544, 0.6577, 0.661, 0.6643, 0.6676, 0.671, 0.6743, 0.6776, 0.681, 0.6844, 0.6877, 0.6911, 0.6945, 0.6979, 0.7013, 0.7047, 0.7081, 0.7115, 0.715, 0.7184, 0.7218, 0.7253, 0.7288, 0.7322, 0.7357, 0.7392, 0.7427, 0.7462, 0.7497, 0.7532, 0.7568, 0.7603, 0.7639, 0.7674, 0.771, 0.7745, 0.7781, 0.7817, 0.7853, 0.7889, 0.7926, 0.7962, 0.7998, 0.8035, 0.8071, 0.8108, 0.8145, 0.8182, 0.8219, 0.8256, 0.8293, 0.833, 0.8367, 0.8405, 0.8442, 0.848, 0.8518, 0.8556, 0.8593, 0.8632, 0.867, 0.8708, 0.8746, 0.8785, 0.8823, 0.8862, 0.8901, 0.894, 0.8979, 0.9018, 0.9057, 0.9094, 0.9131, 0.9169, 0.01808215, 0.02048237, 0.0229766, 0.02554464, 0.02818316, 0.03088792, 0.03365771, 0.03648425, 0.03936808, 0.04224835, 0.04504866, 0.04778179, 0.05045047, 0.05305461, 0.05559631, 0.05808059, 0.06050127, 0.06286782, 0.06517224, 0.06742194, 0.06961499, 0.07174938, 0.07383015, 0.07585609, 0.0778224, 0.07973393, 0.08159108, 0.08339312, 0.0851396, 0.08682996, 0.08848235, 0.09009031, 0.09165431, 0.09317479, 0.09465217, 0.09608689, 0.09747934, 0.09882993, 0.10013944, 0.10140876, 0.10263737, 0.10382562, 0.10497384, 0.10608236, 0.10715148, 0.1081815, 0.1091727, 0.11012568, 0.11104133, 0.11191896, 0.11275876, 0.11356089, 0.11432553, 0.11505284, 0.1157429, 0.11639585, 0.11701189, 0.11759095, 0.11813362, 0.11863987, 0.11910909, 0.1195413, 0.11993653, 0.12029443, 0.12061482, 0.12089756, 0.12114272, 0.12134964, 0.12151801, 0.12164769, 0.12173833, 0.12178916, 0.12179973, 0.12177004, 0.12169883, 0.12158557, 0.12142996, 0.12123063, 0.12098721, 0.12069864, 0.12036349, 0.11998161, 0.11955087, 0.11907081, 0.11853959, 0.1179558, 0.11731817, 0.11662445, 0.11587369, 0.11506307, 0.11420757, 0.11330456, 0.11235265, 0.11135597, 0.11031233, 0.10922707, 0.10810205, 0.10693774, 0.10573912, 0.10450943, 0.10325288, 0.10197244, 0.10067417, 0.09938212, 0.0980891, 0.09680192, 0.09552918, 0.09428017, 0.09306598, 0.09192342, 0.09085633, 0.0898675, 0.08897226, 0.0882129, 0.08761223, 0.08716212, 0.08688725, 0.0868623, 0.08704683, 0.08747196, 0.08820542, 0.08919792, 0.09050716, 0.09213602, 0.09405684, 0.09634794, 0.09892473, 0.10184672, 0.10506637, 0.10858333, 0.11239964, 0.11645784, 0.12080606, 0.12535343, 0.13014118, 0.13511035, 0.14025098, 0.14556683, 0.15099892, 0.15657772, 0.1622583, 0.16801239, 0.17387796, 0.17982114, 0.18587368, 0.19203259, 0.19830556, 0.20469941, 0.21121788, 0.21785614, 0.22463251, 0.23152063, 0.23854541, 0.24568473, 0.25292623, 0.26028902, 0.26773821, 0.27526191, 0.28287251, 0.29053388, 0.29823282, 0.30598085, 0.31373977, 0.3215093, 0.32928362, 0.33703942, 0.34479177, 0.35250734, 0.36020899, 0.36786594, 0.37549479, 0.3830811, 0.39062329, 0.39813168, 0.4055909, 0.41300424, 0.42038251, 0.42771398, 0.43500005, 0.44224144, 0.44944853, 0.45661389, 0.46373781, 0.47082238, 0.47787236, 0.48489115, 0.49187351, 0.49882008, 0.50573243, 0.51261283, 0.51946267, 0.52628305, 0.53307513, 0.53983991, 0.54657593, 0.55328624, 0.55997184, 0.5666337, 0.57327231, 0.57988594, 0.58647675, 0.59304598, 0.5995944, 0.60612062, 0.6126247, 0.61910879, 0.6255736, 0.63201624, 0.63843852, 0.64484214, 0.65122535, 0.65758729, 0.66393045, 0.67025402, 0.67655564, 0.68283846, 0.68910113, 0.69534192, 0.7015636, 0.70776351, 0.71394212, 0.72010124, 0.72623592, 0.73235058, 0.73844258, 0.74451182, 0.75055966, 0.75658231, 0.76258381, 0.76855969, 0.77451297, 0.78044149, 0.78634563, 0.79222565, 0.79809112, 0.80395415, 0.80981139, 0.81566605, 0.82151775, 0.82736371, 0.83320847, 0.83905052, 0.84488881, 0.85072354, 0.85655639, 0.86238689, 0.86821321, 0.87403763, 0.87986189, 0.88568129, 0.89149971, 0.89731727, 0.90313207, 0.90894778, 0.91476465, 0.92061729, 0.01482344, 0.01709292, 0.01950702, 0.02205989, 0.02474764, 0.0275665, 0.03051278, 0.03358324, 0.03677446, 0.0400833, 0.04339148, 0.04664706, 0.04985685, 0.05302279, 0.05614641, 0.05922941, 0.06227277, 0.06527747, 0.06824548, 0.07117741, 0.07407363, 0.07693611, 0.07976988, 0.08259683, 0.08542126, 0.08824175, 0.09106304, 0.09388372, 0.09670855, 0.09953561, 0.10236998, 0.10520905, 0.10805832, 0.11091443, 0.11378321, 0.11666074, 0.11955283, 0.12245547, 0.12537395, 0.1283047, 0.13125179, 0.13421303, 0.13719028, 0.14018372, 0.14319196, 0.14621882, 0.14925954, 0.15231929, 0.15539445, 0.15848519, 0.16159489, 0.16471913, 0.1678599, 0.1710185, 0.17419169, 0.17738041, 0.18058733, 0.18380872, 0.18704459, 0.190297, 0.19356547, 0.19684817, 0.20014508, 0.20345642, 0.20678459, 0.21012669, 0.21348266, 0.21685249, 0.22023618, 0.22365053, 0.22710664, 0.23060342, 0.23414353, 0.23772973, 0.24136961, 0.24506548, 0.2488164, 0.25262843, 0.25650743, 0.26044852, 0.26446165, 0.2685428, 0.27269346, 0.27691629, 0.28120467, 0.28556371, 0.28998148, 0.29446327, 0.29899398, 0.30357852, 0.30819938, 0.31286235, 0.3175495, 0.32226344, 0.32699241, 0.33173196, 0.33648036, 0.34122763, 0.34597357, 0.35071512, 0.35544612, 0.36016515, 0.36487341, 0.36956728, 0.37424409, 0.37890437, 0.38354668, 0.38817169, 0.39277882, 0.39736958, 0.40194196, 0.40649717, 0.41103579, 0.41555771, 0.42006355, 0.42455441, 0.42903064, 0.43349321, 0.43794288, 0.44238227, 0.44680966, 0.45122981, 0.45564335, 0.46005094, 0.46445309, 0.46885041, 0.47324327, 0.47763224, 0.48201774, 0.48640018, 0.49078002, 0.49515755, 0.49953341, 0.50390766, 0.50828072, 0.51265277, 0.51702417, 0.52139527, 0.52576622, 0.53013715, 0.53450825, 0.53887991, 0.54325208, 0.5476249, 0.55199854, 0.55637318, 0.56074869, 0.56512531, 0.56950304, 0.57388184, 0.57826181, 0.58264293, 0.58702506, 0.59140842, 0.59579264, 0.60017798, 0.60456387, 0.60894927, 0.61333521, 0.61772167, 0.62210845, 0.62649546, 0.63088252, 0.63526951, 0.63965621, 0.64404072, 0.64842404, 0.65280655, 0.65718791, 0.66156803, 0.66594665, 0.67032297, 0.67469531, 0.67906542, 0.6834332, 0.68779836, 0.69216072, 0.69651881, 0.70087194, 0.70522162, 0.70957083, 0.71392166, 0.71827158, 0.72261873, 0.72696469, 0.73130855, 0.73564353, 0.73997282, 0.74429484, 0.74860229, 0.75290034, 0.75717817, 0.76144162, 0.76567816, 0.76989232, 0.77407636, 0.77822478, 0.78233575, 0.78640315, 0.79042043, 0.79437948, 0.79827511, 0.80210037, 0.80584651, 0.80950627, 0.81307432, 0.81654592, 0.81991799, 0.82318339, 0.82635051, 0.82942353, 0.83240398, 0.83530763, 0.83814472, 0.84092197, 0.84365379, 0.8463718, 0.84908401, 0.85179048, 0.8544913, 0.85718723, 0.85987893, 0.86256715, 0.8652532, 0.86793835, 0.87062438, 0.87331311, 0.87600675, 0.87870746, 0.8814179, 0.8841403, 0.88687758, 0.88963189, 0.89240612, 0.89520211, 0.89802257, 0.90086891, 0.90374337, 0.90664718, 0.90958151, 0.91254787, 0.91554656, 0.91856549, 0.921603, 0.92466151, 0.92773848, 0.93083672, 0.93395528, 0.9370938, 0.94025378, 0.94343371, 0.94663473, 0.9498573, 0.95309792, 0.95635719, 0.95960708, 0.07176, 0.08339, 0.09498, 0.10652, 0.11802, 0.12947, 0.14087, 0.15223, 0.16354, 0.17481, 0.18603, 0.1972, 0.20833, 0.21941, 0.23044, 0.24143, 0.25237, 0.26327, 0.27412, 0.28492, 0.29568, 0.30639, 0.31706, 0.32768, 0.33825, 0.34878, 0.35926, 0.3697, 0.38008, 0.39043, 0.40072, 0.41097, 0.42118, 0.43134, 0.44145, 0.45152, 0.46153, 0.47151, 0.48144, 0.49132, 0.50115, 0.51094, 0.52069, 0.5304, 0.54015, 0.54995, 0.55979, 0.56967, 0.57958, 0.5895, 0.59943, 0.60937, 0.61931, 0.62923, 0.63913, 0.64901, 0.65886, 0.66866, 0.67842, 0.68812, 0.69775, 0.70732, 0.7168, 0.7262, 0.73551, 0.74472, 0.75381, 0.76279, 0.77165, 0.78037, 0.78896, 0.7974, 0.80569, 0.81381, 0.82177, 0.82955, 0.83714, 0.84455, 0.85175, 0.85875, 0.86554, 0.87211, 0.87844, 0.88454, 0.8904, 0.896, 0.90142, 0.90673, 0.91193, 0.91701, 0.92197, 0.9268, 0.93151, 0.93609, 0.94053, 0.94484, 0.94901, 0.95304, 0.95692, 0.96065, 0.96423, 0.96765, 0.97092, 0.97403, 0.97697, 0.97974, 0.98234, 0.98477, 0.98702, 0.98909, 0.99098, 0.99268, 0.99419, 0.99551, 0.99663, 0.99755, 0.99828, 0.99879, 0.9991, 0.99919, 0.99907, 0.99873, 0.99817, 0.99739, 0.99638, 0.99514, 0.99366, 0.99195, 0.98999, 0.98775, 0.98524, 0.98246, 0.97941, 0.9761, 0.97255, 0.96875, 0.9647, 0.96043, 0.95593, 0.95121, 0.94627, 0.94113, 0.93579, 0.93025, 0.92452, 0.91861, 0.91253, 0.90627, 0.89986, 0.89328, 0.88655, 0.87968, 0.87267, 0.86553, 0.85826, 0.85087, 0.84337, 0.83576, 0.82806, 0.82025, 0.81236, 0.80439, 0.79634, 0.78823, 0.78005, 0.77181, 0.76352, 0.75519, 0.74682, 0.73842, 0.73, 0.7214, 0.7125, 0.7033, 0.69382, 0.68408, 0.67408, 0.66386, 0.65341, 0.64277, 0.63193, 0.62093, 0.60977, 0.59846, 0.58703, 0.57549, 0.56386, 0.55214, 0.54036, 0.52854, 0.51667, 0.50479, 0.49291, 0.48104, 0.4692, 0.4574, 0.44565, 0.43399, 0.42241, 0.41093, 0.39958, 0.38836, 0.37729, 0.36638, 0.35566, 0.34513, 0.33482, 0.32473, 0.31489, 0.3053, 0.29599, 0.28696, 0.27824, 0.26981, 0.26152, 0.25334, 0.24526, 0.2373, 0.22945, 0.2217, 0.21407, 0.20654, 0.19912, 0.19182, 0.18462, 0.17753, 0.17055, 0.16368, 0.15693, 0.15028, 0.14374, 0.13731, 0.13098, 0.12477, 0.11867, 0.11268, 0.1068, 0.10102, 0.09536, 0.0898, 0.08436, 0.07902, 0.0738, 0.06868, 0.06367, 0.05878, 0.05399, 0.04931, 0.04474, 0.04028, 0.03593, 0.03169, 0.02756, 0.02354, 0.01963, 0.01583), B = c(0.01386552, 0.0183311461, 0.0237083291, 0.0299647059, 0.0371296695, 0.0449730774, 0.0528443561, 0.060749638, 0.0686665843, 0.076602666, 0.0845844897, 0.092610105, 0.100675555, 0.108786954, 0.116964722, 0.125209396, 0.133515085, 0.141886249, 0.150326989, 0.158841025, 0.167445592, 0.176128834, 0.184891506, 0.193735088, 0.202660374, 0.211667355, 0.220755099, 0.229921611, 0.239163669, 0.248476662, 0.2578544, 0.267288933, 0.276783978, 0.286320656, 0.295879431, 0.305442931, 0.31499989, 0.32453764, 0.334011109, 0.34340445, 0.352688028, 0.361816426, 0.370770827, 0.379497161, 0.387972507, 0.396151969, 0.404008953, 0.411514273, 0.418646741, 0.425391816, 0.431741767, 0.437694665, 0.443255999, 0.448435938, 0.453247729, 0.457709924, 0.461840297, 0.465660375, 0.469190328, 0.472450879, 0.475462193, 0.478243482, 0.480811572, 0.48318634, 0.485380429, 0.487408399, 0.489286796, 0.491024144, 0.492631321, 0.494120923, 0.495501096, 0.496778331, 0.497959963, 0.499053326, 0.500066568, 0.501001964, 0.501864236, 0.50265759, 0.503385761, 0.504052118, 0.504661843, 0.505214935, 0.505713602, 0.506159754, 0.506555026, 0.506900806, 0.507198258, 0.507448336, 0.507651812, 0.507809282, 0.507921193, 0.507988509, 0.508010737, 0.507987836, 0.507919772, 0.50780642, 0.50764757, 0.507442938, 0.507192172, 0.50689486, 0.506550538, 0.506158696, 0.505718782, 0.50523021, 0.504692365, 0.504104606, 0.503466273, 0.50277669, 0.502035167, 0.501241011, 0.500393522, 0.499491999, 0.498535746, 0.497524075, 0.496456304, 0.495331769, 0.494149821, 0.492909832, 0.491611196, 0.490253338, 0.488835712, 0.487357807, 0.485819154, 0.484219325, 0.482557941, 0.480834678, 0.47904927, 0.477201121, 0.47528978, 0.473315708, 0.471278924, 0.469179541, 0.467017774, 0.464793954, 0.462508534, 0.460162106, 0.457755411, 0.455289354, 0.452765022, 0.450183695, 0.447543155, 0.444848441, 0.442101615, 0.439304963, 0.436461074, 0.433572874, 0.430643647, 0.427671352, 0.42466562, 0.421631064, 0.418572767, 0.415496319, 0.412402889, 0.409303002, 0.406205397, 0.403118034, 0.40004706, 0.397001559, 0.393994634, 0.391036674, 0.388136889, 0.385308008, 0.382563414, 0.379915138, 0.377375977, 0.374959077, 0.372676513, 0.370540883, 0.368566525, 0.366761699, 0.365136328, 0.36370113, 0.362467694, 0.361438431, 0.360619076, 0.360014232, 0.359629789, 0.35946902, 0.359529151, 0.359810172, 0.36031112, 0.361030156, 0.361964652, 0.363111292, 0.364466162, 0.366024854, 0.367782559, 0.369734157, 0.371874301, 0.374197501, 0.376698186, 0.379370774, 0.382209724, 0.385209578, 0.388365009, 0.391670846, 0.395122099, 0.398713971, 0.402441058, 0.406298792, 0.410282976, 0.414389658, 0.418613221, 0.422949672, 0.427396771, 0.431951492, 0.436607159, 0.441360951, 0.446213021, 0.451160201, 0.456191814, 0.461314158, 0.466525689, 0.471811461, 0.477181727, 0.482634651, 0.488154375, 0.493754665, 0.499427972, 0.505166839, 0.510983331, 0.516859378, 0.522805996, 0.528820775, 0.534892341, 0.541038571, 0.547232992, 0.553498939, 0.559819643, 0.566201824, 0.572644795, 0.57914013, 0.585701463, 0.592307093, 0.598982818, 0.605695903, 0.612481798, 0.6192993, 0.626189463, 0.633109148, 0.640099465, 0.647116021, 0.654201544, 0.661308839, 0.668481201, 0.675674592, 0.682925602, 0.690198194, 0.697518628, 0.704862519, 0.712242232, 0.719648627, 0.727076773, 0.734536205, 0.742001547, 0.749504188, 0.01386552, 0.018570352, 0.0242390508, 0.0309092475, 0.038557898, 0.0468360336, 0.0551430756, 0.063459808, 0.071861689, 0.0802817951, 0.0887668094, 0.0973274383, 0.105929835, 0.114621328, 0.123397286, 0.132232108, 0.141140519, 0.150163867, 0.159254277, 0.168413539, 0.177642172, 0.186961588, 0.196353558, 0.205798788, 0.215289113, 0.224813479, 0.234357604, 0.2439037, 0.2534303, 0.262912235, 0.272320803, 0.28162417, 0.290788012, 0.299776404, 0.30855291, 0.317085139, 0.325338414, 0.333276678, 0.340874188, 0.348110606, 0.354971391, 0.361446945, 0.367534629, 0.373237557, 0.378563264, 0.383522415, 0.388128944, 0.39240015, 0.396353388, 0.400006615, 0.403377897, 0.406485031, 0.409345373, 0.411976086, 0.414392106, 0.416607861, 0.418636756, 0.420491164, 0.422182449, 0.423720999, 0.425116277, 0.426376869, 0.427510546, 0.42852432, 0.429424503, 0.430216765, 0.430906186, 0.431497309, 0.431994185, 0.432400419, 0.432719214, 0.432954973, 0.433108763, 0.433182647, 0.433178526, 0.433098056, 0.432942678, 0.432713635, 0.432411996, 0.432038673, 0.431594438, 0.431080497, 0.430497898, 0.429845789, 0.429124507, 0.42833432, 0.427475431, 0.426547991, 0.425552106, 0.424487908, 0.42335611, 0.422155676, 0.420886594, 0.419548848, 0.418142411, 0.416667258, 0.415123366, 0.413510662, 0.411828882, 0.410078028, 0.408258132, 0.406369246, 0.404411444, 0.402384829, 0.400289528, 0.398124897, 0.395891308, 0.393589349, 0.391219295, 0.388781456, 0.38627618, 0.383703854, 0.381064906, 0.378358969, 0.375586209, 0.372748214, 0.369845599, 0.366879025, 0.363849195, 0.360756856, 0.357602797, 0.354387853, 0.3511129, 0.347776863, 0.344382594, 0.340931208, 0.337423766, 0.333861367, 0.330245147, 0.326576275, 0.322855952, 0.31908541, 0.31526591, 0.311398734, 0.307485188, 0.303526312, 0.299522665, 0.295476756, 0.291389943, 0.287263585, 0.283099033, 0.278897629, 0.274660698, 0.270389545, 0.266085445, 0.261749643, 0.257383341, 0.2529877, 0.248563825, 0.244112767, 0.239635512, 0.235132978, 0.230606009, 0.226055368, 0.221481734, 0.216885699, 0.212267762, 0.207628326, 0.202967696, 0.19828608, 0.193583583, 0.188860212, 0.184115876, 0.179350388, 0.174563472, 0.169754764, 0.164923826, 0.160070152, 0.155193185, 0.150292329, 0.145366973, 0.140416519, 0.135440416, 0.130438175, 0.12540944, 0.120354038, 0.115272059, 0.110163947, 0.105030614, 0.0998735931, 0.0946952268, 0.0894989073, 0.0842893891, 0.0790731907, 0.0738591143, 0.0686589199, 0.0634881971, 0.058367489, 0.0533237243, 0.048392009, 0.0436177922, 0.0390500131, 0.0349306227, 0.0314091591, 0.0285075931, 0.0262497353, 0.0246613416, 0.0237702263, 0.0236063833, 0.0242021174, 0.0255921853, 0.0278139496, 0.0309075459, 0.0349160639, 0.0398857472, 0.0455808037, 0.0517503867, 0.0583286889, 0.0652570167, 0.072489233, 0.0799897176, 0.0877314215, 0.0956941797, 0.103863324, 0.112228756, 0.120784651, 0.129526579, 0.138453063, 0.147564573, 0.156863224, 0.166352544, 0.176037298, 0.185923357, 0.196017589, 0.206331925, 0.216876839, 0.227658046, 0.238685942, 0.249971582, 0.261533898, 0.273391112, 0.285545675, 0.298010219, 0.310820466, 0.323973947, 0.337475479, 0.351368713, 0.365627005, 0.380271225, 0.395289169, 0.410665194, 0.426373236, 0.442367495, 0.458591507, 0.474969778, 0.491426053, 0.507859649, 0.524203026, 0.540360752, 0.55627509, 0.571925382, 0.587205773, 0.60215433, 0.616760413, 0.631017009, 0.644924005, 0.527974883, 0.533123681, 0.538007001, 0.542657691, 0.547103487, 0.551367851, 0.555467728, 0.55942348, 0.563250116, 0.566959485, 0.570561711, 0.574065446, 0.577478074, 0.58080589, 0.584054243, 0.587227661, 0.590329954, 0.593364304, 0.596333341, 0.599239207, 0.602083323, 0.604867403, 0.607592438, 0.610259089, 0.612867743, 0.615418537, 0.617911385, 0.620345997, 0.622721903, 0.625038468, 0.627294975, 0.62949049, 0.631623923, 0.633694102, 0.635699759, 0.637639537, 0.639512001, 0.641315649, 0.643048936, 0.644710195, 0.646297711, 0.647809772, 0.649244641, 0.650600561, 0.651875762, 0.653068467, 0.654176761, 0.655198755, 0.656132835, 0.656977276, 0.65773038, 0.658390492, 0.658956004, 0.659425363, 0.659797077, 0.660069009, 0.660240367, 0.660309966, 0.660276655, 0.660139383, 0.65989721, 0.659549311, 0.659094989, 0.658533677, 0.657864946, 0.657087561, 0.656202294, 0.655209222, 0.654108545, 0.652900629, 0.65158601, 0.650165396, 0.648639668, 0.647009884, 0.645277275, 0.64344325, 0.641509389, 0.63947744, 0.637348841, 0.635126108, 0.632811608, 0.630407727, 0.627916992, 0.625342058, 0.622685703, 0.619950811, 0.617140367, 0.61425744, 0.611305174, 0.608286774, 0.605205491, 0.602064611, 0.598867442, 0.5956173, 0.592317494, 0.588971318, 0.585582301, 0.582153572, 0.578688247, 0.575189431, 0.571660158, 0.56810338, 0.564521958, 0.560918659, 0.557296144, 0.55365697, 0.550003579, 0.546338299, 0.542663338, 0.538980786, 0.535292612, 0.531600995, 0.527908434, 0.524215533, 0.520523766, 0.516834495, 0.513148963, 0.509468305, 0.505793543, 0.502125599, 0.49846529, 0.494813338, 0.491170517, 0.487539124, 0.483917732, 0.480306702, 0.476706319, 0.473116798, 0.469538286, 0.465970871, 0.46241458, 0.458869577, 0.455337565, 0.451816385, 0.448305861, 0.444805781, 0.441315901, 0.437835947, 0.434365616, 0.430905052, 0.427454836, 0.424013059, 0.420579333, 0.417153264, 0.413734445, 0.410322469, 0.406916975, 0.403518809, 0.400126027, 0.396738211, 0.393354947, 0.389975832, 0.386600468, 0.383228622, 0.379860246, 0.376494232, 0.373130228, 0.369767893, 0.366406907, 0.363046965, 0.359687758, 0.356328796, 0.352969777, 0.349610469, 0.346250656, 0.342890148, 0.339528771, 0.336165582, 0.332800827, 0.329434512, 0.32606655, 0.322696876, 0.319325444, 0.315952211, 0.31257544, 0.309196628, 0.305815824, 0.302433101, 0.299048555, 0.295662308, 0.292274506, 0.288883445, 0.285490391, 0.282096149, 0.27870099, 0.275305214, 0.271909159, 0.2685132, 0.265117752, 0.261721488, 0.258325424, 0.254931256, 0.251539615, 0.2481512, 0.244766775, 0.241387186, 0.238013359, 0.234646316, 0.231287178, 0.227937141, 0.224595006, 0.221264889, 0.217948456, 0.214647532, 0.211364122, 0.208100426, 0.204858855, 0.201642049, 0.1984529, 0.195294567, 0.1921705, 0.189084459, 0.186040537, 0.18304318, 0.180097207, 0.177207826, 0.174380656, 0.171621733, 0.168937522, 0.166334918, 0.163821243, 0.161404226, 0.159091984, 0.156890625, 0.154807583, 0.152854862, 0.151041581, 0.149376885, 0.14786981, 0.146529128, 0.145357284, 0.144362644, 0.143556679, 0.142945116, 0.142528388, 0.142302653, 0.142278607, 0.142453425, 0.142808191, 0.143350944, 0.144061156, 0.144922913, 0.145918663, 0.147014438, 0.148179639, 0.149370428, 0.150520343, 0.151566019, 0.152409489, 0.152921158, 0.152925363, 0.152177604, 0.150327944, 0.146860789, 0.140955606, 0.131325517, 0.32941519, 0.33542652, 0.34137895, 0.34726862, 0.35309303, 0.35885256, 0.36454323, 0.37016418, 0.37571452, 0.38119074, 0.38659204, 0.39191723, 0.39716349, 0.40232944, 0.40741404, 0.41241521, 0.41733086, 0.42216032, 0.42690202, 0.43155375, 0.43611482, 0.44058404, 0.44496, 0.44924127, 0.45342734, 0.45751726, 0.46150995, 0.46540474, 0.46920128, 0.47289909, 0.47649762, 0.47999675, 0.48339654, 0.48669702, 0.48989831, 0.49300074, 0.49600488, 0.49891131, 0.50172076, 0.50443413, 0.50705243, 0.50957678, 0.5120084, 0.5143487, 0.5165993, 0.51876163, 0.52083736, 0.52282822, 0.52473609, 0.52656332, 0.52831152, 0.52998273, 0.53157905, 0.53310261, 0.53455561, 0.53594093, 0.53726018, 0.53851561, 0.53970946, 0.54084398, 0.5419214, 0.54294396, 0.54391424, 0.54483444, 0.54570633, 0.546532, 0.54731353, 0.54805291, 0.54875211, 0.54941304, 0.55003755, 0.55062743, 0.5511844, 0.55171011, 0.55220646, 0.55267486, 0.55311653, 0.55353282, 0.55392505, 0.55429441, 0.55464205, 0.55496905, 0.55527637, 0.55556494, 0.55583559, 0.55608907, 0.55632606, 0.55654717, 0.55675292, 0.55694377, 0.5571201, 0.55728221, 0.55743035, 0.55756466, 0.55768526, 0.55779216, 0.55788532, 0.55796464, 0.55803034, 0.55808199, 0.55811913, 0.55814141, 0.55814842, 0.55813967, 0.55811466, 0.5580728, 0.55801347, 0.557936, 0.55783967, 0.55772371, 0.55758733, 0.55742968, 0.5572505, 0.55704861, 0.55682271, 0.55657181, 0.55629491, 0.55599097, 0.55565893, 0.55529773, 0.55490625, 0.55448339, 0.55402906, 0.55354108, 0.55301828, 0.55245948, 0.55186354, 0.55122927, 0.55055551, 0.5498411, 0.54908564, 0.5482874, 0.54744498, 0.54655722, 0.54562298, 0.54464114, 0.54361058, 0.54253043, 0.54139999, 0.54021751, 0.53898192, 0.53769219, 0.53634733, 0.53494633, 0.53348834, 0.53197275, 0.53039808, 0.52876343, 0.52706792, 0.52531069, 0.52349092, 0.52160791, 0.51966086, 0.5176488, 0.51557101, 0.5134268, 0.51121549, 0.50893644, 0.5065889, 0.50417217, 0.50168574, 0.49912906, 0.49650163, 0.49380294, 0.49103252, 0.48818938, 0.48527326, 0.48228395, 0.47922108, 0.47608431, 0.4728733, 0.46958774, 0.46622638, 0.46278934, 0.45927675, 0.45568838, 0.45202405, 0.44828355, 0.44446673, 0.44057284, 0.4366009, 0.43255207, 0.42842626, 0.42422341, 0.41994346, 0.41558638, 0.41115215, 0.40664011, 0.40204917, 0.39738103, 0.39263579, 0.38781353, 0.38291438, 0.3779385, 0.37288606, 0.36775726, 0.36255223, 0.35726893, 0.35191009, 0.34647607, 0.3409673, 0.33538426, 0.32972749, 0.32399761, 0.31819529, 0.31232133, 0.30637661, 0.30036211, 0.29427888, 0.2881265, 0.28190832, 0.27562602, 0.26928147, 0.26287683, 0.25641457, 0.24989748, 0.24332878, 0.23671214, 0.23005179, 0.22335258, 0.21662012, 0.20986086, 0.20308229, 0.19629307, 0.18950326, 0.18272455, 0.17597055, 0.16925712, 0.16260273, 0.15602894, 0.14956101, 0.14322828, 0.13706449, 0.13110864, 0.12540538, 0.12000532, 0.11496505, 0.11034678, 0.10621724, 0.1026459, 0.09970219, 0.09745186, 0.09595277, 0.09525046, 0.09537439, 0.09633538, 0.09812496, 0.1007168, 0.10407067, 0.10813094, 0.11283773, 0.11812832, 0.12394051, 0.13021494, 0.13689671, 0.1439362, 0.3015, 0.3077, 0.3142, 0.3205, 0.3269, 0.3334, 0.34, 0.3467, 0.3537, 0.3606, 0.3676, 0.3746, 0.3817, 0.3888, 0.396, 0.4031, 0.4102, 0.4172, 0.4241, 0.4307, 0.4347, 0.4363, 0.4368, 0.4368, 0.4365, 0.4361, 0.4356, 0.4349, 0.4343, 0.4336, 0.4329, 0.4322, 0.4314, 0.4308, 0.4301, 0.4293, 0.4287, 0.428, 0.4274, 0.4268, 0.4262, 0.4256, 0.4251, 0.4245, 0.4241, 0.4236, 0.4232, 0.4228, 0.4224, 0.422, 0.4217, 0.4214, 0.4212, 0.4209, 0.4207, 0.4205, 0.4204, 0.4203, 0.4202, 0.4201, 0.42, 0.42, 0.42, 0.4201, 0.4201, 0.4202, 0.4203, 0.4205, 0.4206, 0.4208, 0.421, 0.4212, 0.4215, 0.4218, 0.4221, 0.4224, 0.4227, 0.4231, 0.4236, 0.424, 0.4244, 0.4249, 0.4254, 0.4259, 0.4264, 0.427, 0.4276, 0.4282, 0.4288, 0.4294, 0.4302, 0.4308, 0.4316, 0.4322, 0.4331, 0.4338, 0.4346, 0.4355, 0.4364, 0.4372, 0.4381, 0.439, 0.44, 0.4409, 0.4419, 0.443, 0.444, 0.445, 0.4462, 0.4473, 0.4485, 0.4496, 0.4508, 0.4521, 0.4534, 0.4547, 0.4561, 0.4575, 0.4589, 0.4604, 0.462, 0.4635, 0.465, 0.4665, 0.4679, 0.4691, 0.4701, 0.4707, 0.4714, 0.4719, 0.4723, 0.4727, 0.473, 0.4732, 0.4734, 0.4736, 0.4737, 0.4738, 0.4739, 0.4739, 0.4738, 0.4739, 0.4738, 0.4736, 0.4735, 0.4733, 0.4732, 0.4729, 0.4727, 0.4723, 0.472, 0.4717, 0.4714, 0.4709, 0.4705, 0.4701, 0.4696, 0.4691, 0.4685, 0.468, 0.4673, 0.4668, 0.4662, 0.4655, 0.4649, 0.4641, 0.4632, 0.4625, 0.4617, 0.4609, 0.46, 0.4591, 0.4583, 0.4573, 0.4562, 0.4553, 0.4543, 0.4532, 0.4521, 0.4511, 0.4499, 0.4487, 0.4475, 0.4463, 0.445, 0.4437, 0.4424, 0.4409, 0.4396, 0.4382, 0.4368, 0.4352, 0.4338, 0.4322, 0.4307, 0.429, 0.4273, 0.4258, 0.4241, 0.4223, 0.4205, 0.4188, 0.4168, 0.415, 0.4129, 0.4111, 0.409, 0.407, 0.4049, 0.4028, 0.4007, 0.3984, 0.3961, 0.3938, 0.3915, 0.3892, 0.3869, 0.3843, 0.3818, 0.3793, 0.3766, 0.3739, 0.3712, 0.3684, 0.3657, 0.3627, 0.3599, 0.3569, 0.3538, 0.3507, 0.3474, 0.3442, 0.3409, 0.3374, 0.334, 0.3306, 0.3268, 0.3232, 0.3195, 0.3155, 0.3116, 0.3076, 0.3034, 0.299, 0.2947, 0.2901, 0.2856, 0.2807, 0.2759, 0.2708, 0.2655, 0.26, 0.2593, 0.2634, 0.268, 0.2731, 0.10018654, 0.10374486, 0.10738511, 0.11108639, 0.11483751, 0.11863035, 0.12245873, 0.12631831, 0.13020508, 0.13411624, 0.13804929, 0.14200206, 0.14597263, 0.14995981, 0.15396203, 0.15797687, 0.16200507, 0.16604287, 0.17009175, 0.17414848, 0.17821272, 0.18228425, 0.18636053, 0.19044109, 0.19452676, 0.1986151, 0.20270523, 0.20679668, 0.21088893, 0.21498104, 0.2190294, 0.22303512, 0.22699705, 0.23091409, 0.23478512, 0.23860907, 0.24238489, 0.24611154, 0.2497868, 0.25340813, 0.25697736, 0.2604936, 0.26395596, 0.26736359, 0.27071569, 0.27401148, 0.2772502, 0.28043021, 0.2835489, 0.28660853, 0.2896085, 0.29254823, 0.29542718, 0.29824485, 0.30100076, 0.30369448, 0.30632563, 0.3088938, 0.31139721, 0.3138355, 0.31620996, 0.31852037, 0.32076656, 0.32294825, 0.32506528, 0.3271175, 0.32910494, 0.33102734, 0.33288464, 0.33467689, 0.33640407, 0.33806605, 0.33966284, 0.34119475, 0.34266151, 0.34406324, 0.34540024, 0.34667231, 0.34787978, 0.34902273, 0.35010104, 0.35111537, 0.35206533, 0.35295152, 0.35377385, 0.35453252, 0.35522789, 0.35585982, 0.35642903, 0.35693521, 0.35737863, 0.35775059, 0.35804813, 0.35827146, 0.35841679, 0.35848469, 0.35847347, 0.35838029, 0.35820487, 0.35794557, 0.35760108, 0.35716891, 0.35664819, 0.35603757, 0.35533555, 0.35454107, 0.3536529, 0.3526697, 0.35159077, 0.3504148, 0.34914061, 0.34776864, 0.3462986, 0.34473046, 0.3430635, 0.34129974, 0.33943958, 0.33748452, 0.33543669, 0.33329799, 0.33107204, 0.32876184, 0.32637117, 0.32390525, 0.32136808, 0.31876642, 0.31610488, 0.313391, 0.31063031, 0.30783, 0.30499738, 0.30213802, 0.29926105, 0.2963705, 0.29347474, 0.29057852, 0.2876878, 0.28480819, 0.281943, 0.27909826, 0.27627462, 0.27346473, 0.27070818, 0.26797378, 0.26529697, 0.26268136, 0.26014181, 0.25769539, 0.2553592, 0.25314362, 0.25108745, 0.24918223, 0.24748098, 0.24598324, 0.24470258, 0.24369359, 0.24294137, 0.24245973, 0.24229568, 0.24242622, 0.24285536, 0.24362274, 0.24468803, 0.24606461, 0.24775328, 0.24972157, 0.25199928, 0.25452808, 0.25734661, 0.2603949, 0.26369821, 0.26722004, 0.27094924, 0.27489742, 0.27902322, 0.28332283, 0.28780969, 0.29244728, 0.29722817, 0.30214494, 0.3072105, 0.31239776, 0.31769923, 0.32310953, 0.32862553, 0.33421404, 0.33985601, 0.34555431, 0.35130912, 0.35711942, 0.36298589, 0.36890904, 0.3748895, 0.38092784, 0.3870292, 0.39319057, 0.39941173, 0.40569343, 0.41203603, 0.41844491, 0.42491751, 0.43145271, 0.43805131, 0.44471698, 0.45145074, 0.45824902, 0.46511271, 0.47204746, 0.47905028, 0.4861196, 0.4932578, 0.50046894, 0.5077467, 0.51509334, 0.52251447, 0.53000231, 0.53756026, 0.5451917, 0.5528892, 0.5606593, 0.56849894, 0.57640375, 0.58438387, 0.59242739, 0.60053991, 0.60871954, 0.61696136, 0.62527295, 0.63364277, 0.64207921, 0.65057302, 0.65912731, 0.66773889, 0.6764046, 0.68512266, 0.69383201, 0.70252255, 0.71120296, 0.71987163, 0.72851999, 0.73716071, 0.74578903, 0.75440141, 0.76299805, 0.77158353, 0.78015619, 0.78871034, 0.79725261, 0.8057883, 0.81430324, 0.82280948, 0.83130786, 0.83979337, 0.84827858, 0.85676611, 0.86536915, 0.02092227, 0.02535719, 0.03018802, 0.03545515, 0.04115287, 0.04691409, 0.05264306, 0.05834631, 0.06403249, 0.06970862, 0.07538208, 0.08105568, 0.08673591, 0.09242646, 0.09813162, 0.103854, 0.10959847, 0.11536893, 0.12116393, 0.12698763, 0.1328442, 0.13873064, 0.14465095, 0.15060265, 0.15659379, 0.16262484, 0.16869476, 0.17480366, 0.18094993, 0.18713384, 0.19335329, 0.19960847, 0.20589698, 0.21221911, 0.21857219, 0.2249565, 0.23136943, 0.23781116, 0.24427914, 0.25077369, 0.25729255, 0.26383543, 0.27040111, 0.27698891, 0.28359861, 0.29022775, 0.29687795, 0.30354703, 0.31023563, 0.31694355, 0.32366939, 0.33041431, 0.33717781, 0.34395925, 0.35075983, 0.35757941, 0.3644173, 0.37127514, 0.3781528, 0.38504973, 0.39196711, 0.39890581, 0.4058667, 0.4128484, 0.41985299, 0.42688124, 0.43393244, 0.4410088, 0.448113, 0.45519562, 0.46223892, 0.46925447, 0.47623196, 0.48316271, 0.49001976, 0.49679407, 0.50348932, 0.51007843, 0.51653282, 0.52286845, 0.52903422, 0.53503572, 0.54085315, 0.54645752, 0.55185939, 0.55701246, 0.56194601, 0.56660884, 0.57104399, 0.57519929, 0.57913247, 0.58278615, 0.5862272, 0.58941872, 0.59240198, 0.59518282, 0.59775543, 0.60016456, 0.60240251, 0.6044784, 0.60642528, 0.60825252, 0.60994938, 0.61154118, 0.61304472, 0.61446646, 0.61581561, 0.61709794, 0.61831922, 0.61948028, 0.62059763, 0.62167507, 0.62271724, 0.62373011, 0.62471794, 0.62568371, 0.6266318, 0.62756504, 0.62848279, 0.62938329, 0.63030696, 0.63124483, 0.63219599, 0.63315928, 0.63413391, 0.63511876, 0.63611321, 0.63711608, 0.63812656, 0.63914367, 0.64016638, 0.6411939, 0.64222457, 0.64325811, 0.64429331, 0.64532947, 0.64636539, 0.64739979, 0.64843198, 0.64946117, 0.65048638, 0.65150606, 0.65251978, 0.6535266, 0.65452542, 0.655515, 0.65649508, 0.65746419, 0.65842151, 0.65936642, 0.66029768, 0.6612145, 0.66211645, 0.66300179, 0.66387079, 0.66472159, 0.66555409, 0.66636568, 0.66715744, 0.66792838, 0.66867802, 0.66940555, 0.6701105, 0.67079211, 0.67145005, 0.67208182, 0.67268861, 0.67326978, 0.6738255, 0.67435491, 0.67485792, 0.67533374, 0.67578061, 0.67620044, 0.67659251, 0.67695703, 0.67729378, 0.67760151, 0.67788018, 0.67813088, 0.67835215, 0.67854868, 0.67872193, 0.67887024, 0.67898912, 0.67907645, 0.67914062, 0.67917264, 0.67917096, 0.67914468, 0.67907959, 0.67899164, 0.67886578, 0.67871894, 0.67853896, 0.67833512, 0.67811118, 0.67786729, 0.67761027, 0.67734882, 0.67709394, 0.67685638, 0.67664969, 0.67649539, 0.67641393, 0.67642947, 0.67656899, 0.67686215, 0.67735255, 0.6780564, 0.67900049, 0.68021733, 0.6817062, 0.68347352, 0.68552698, 0.68783929, 0.69029789, 0.69288545, 0.69561066, 0.69848331, 0.70150427, 0.70468261, 0.70802931, 0.71154204, 0.71523675, 0.71910895, 0.72317003, 0.72741689, 0.73185717, 0.73648495, 0.74130658, 0.74631123, 0.75150483, 0.75687187, 0.76241714, 0.76812286, 0.77399039, 0.7800041, 0.78615802, 0.79244474, 0.79884925, 0.80536823, 0.81196513, 0.81855729, 0.82514119, 0.83172131, 0.83829355, 0.84485982, 0.85142101, 0.8579751, 0.86452477, 0.87106853, 0.8776059, 0.88414253, 0.89067759, 0.89725384, 0.23217, 0.26149, 0.29024, 0.31844, 0.34607, 0.37314, 0.39964, 0.42558, 0.45096, 0.47578, 0.50004, 0.52373, 0.54686, 0.56942, 0.59142, 0.61286, 0.63374, 0.65406, 0.67381, 0.693, 0.71162, 0.72968, 0.74718, 0.76412, 0.7805, 0.79631, 0.81156, 0.82624, 0.84037, 0.85393, 0.86692, 0.87936, 0.89123, 0.90254, 0.91328, 0.92347, 0.93309, 0.94214, 0.95064, 0.95857, 0.96594, 0.97275, 0.97899, 0.98461, 0.9893, 0.99303, 0.99583, 0.99773, 0.99876, 0.99896, 0.99835, 0.99697, 0.99485, 0.99202, 0.98851, 0.98436, 0.97959, 0.97423, 0.96833, 0.9619, 0.95498, 0.94761, 0.93981, 0.93161, 0.92305, 0.91416, 0.90496, 0.8955, 0.8858, 0.8759, 0.86581, 0.85559, 0.84525, 0.83484, 0.82437, 0.81389, 0.80342, 0.79299, 0.78264, 0.7724, 0.7623, 0.75237, 0.74265, 0.73316, 0.72393, 0.715, 0.70599, 0.69651, 0.6866, 0.67627, 0.66556, 0.65448, 0.64308, 0.63137, 0.61938, 0.60713, 0.59466, 0.58199, 0.56914, 0.55614, 0.54303, 0.52981, 0.51653, 0.50321, 0.48987, 0.47654, 0.46325, 0.45002, 0.43688, 0.42386, 0.41098, 0.39826, 0.38575, 0.37345, 0.3614, 0.34963, 0.33816, 0.32701, 0.31622, 0.30581, 0.29581, 0.28623, 0.27712, 0.26849, 0.26038, 0.2528, 0.24579, 0.23937, 0.23356, 0.22835, 0.2237, 0.2196, 0.21602, 0.21294, 0.21032, 0.20815, 0.2064, 0.20504, 0.20406, 0.20343, 0.20311, 0.2031, 0.20336, 0.20386, 0.20459, 0.20552, 0.20663, 0.20788, 0.20926, 0.21074, 0.2123, 0.21391, 0.21555, 0.21719, 0.2188, 0.22038, 0.22188, 0.22328, 0.22456, 0.2257, 0.22667, 0.22744, 0.228, 0.22831, 0.22836, 0.22811, 0.22754, 0.22663, 0.22536, 0.22369, 0.22161, 0.21918, 0.2165, 0.21358, 0.21043, 0.20706, 0.20348, 0.19971, 0.19577, 0.19165, 0.18738, 0.18297, 0.17842, 0.17376, 0.16899, 0.16412, 0.15918, 0.15417, 0.1491, 0.14398, 0.13883, 0.13367, 0.12849, 0.12332, 0.11817, 0.11305, 0.10797, 0.10294, 0.09798, 0.0931, 0.08831, 0.08362, 0.07905, 0.07461, 0.07031, 0.06616, 0.06218, 0.05837, 0.05475, 0.05134, 0.04814, 0.04516, 0.04243, 0.03993, 0.03753, 0.03521, 0.03297, 0.03082, 0.02875, 0.02677, 0.02487, 0.02305, 0.02131, 0.01966, 0.01809, 0.0166, 0.0152, 0.01387, 0.01264, 0.01148, 0.01041, 0.00942, 0.00851, 0.00769, 0.00695, 0.00629, 0.00571, 0.00522, 0.00481, 0.00449, 0.00424, 0.00408, 0.00401, 0.00401, 0.0041, 0.00427, 0.00453, 0.00486, 0.00529, 0.00579, 0.00638, 0.00705, 0.0078, 0.00863, 0.00955, 0.01055), opt = c('A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H') )
/scratch/gouwar.j/cran-all/cranData/viridisLite/R/zzz.R
#' Computes temporal autocorrelation in a vector, and returns a dataframe for easy plotting. #' #' @description It reads a vector representing a time series, applies \code{\link{acf}} for a given number of lags #' #' @usage acfToDf( #' x = NULL, #' lag.max = 100, #' length.out = 10 #' ) #' #' @param x numeric vector. Must represent a variable sampled at regular times. #' @param lag.max integer, number of lags over which to compute temporal autocorrelation. #' @param length.out integer, total number of lags to consider for plotting. Should be a subset of \code{lag.max}. #' #' @details This function computes temporal autocorrelation of a given vector using \code{\link{acf}}, and returns a dataframe ready for easy plotting with \code{\link{plotAcf}}. #' #' #' @author Blas M. Benito <[email protected]> #' #' @return A dataframe with the columns: #' #' \itemize{ #' \item \emph{lag}: numeric, lag in the time units of \code{x} with a maximum determined by \code{lag.max}, and a number of unique values determined by \code{length.out} #' \item \emph{acf}: Pearson correlation index returned by the \code{\link{acf}} for a given number of lags for the given lag. #' \item \emph{ci.max}: Maximum value of the confidence interval of \code{acf}. #' \item \emph{ci.min}: Minimum value of the confidence interval of \code{acf}. #' } #' #' @seealso \code{\link{acf}}, \code{\link{plotAcf}} #' #' @examples #' #'#getting a driver #'data(driverA) #' #'#computing temporal autocorrelations #'x.df <- acfToDf( #' x = driverA, #' lag.max = 1000, #' length.out = 100 #') #'str(x.df) #' #'#plotting output #'plotAcf(x.df) #' #' @export acfToDf <- function(x = NULL, lag.max = 100, length.out = 10){ #CHECKS INPUT VECTOR #---------------------- if(!is.vector(x) | length(x) == 0 | is.null(x)){ x = as.vector(x) if(is.vector(x) == FALSE){ stop("acfToDf: the argument x is not a vector.") } } #CHECKS lag.max #---------------------- if(lag.max > length(x)){ lag.max <- length(x) - 1 #default behavior of acf() } #CHECKS length.out #---------------------- if(length.out > lag.max){ length.out <- lag.max } #COMPUTING AUTOCORRELATION AND CONFIDENCE INTERVALS #---------------------- #computing autocorrelation acf.output <- acf(x, lag.max=lag.max, plot=FALSE) #computes confidence interval (same equation as in plot.acf()) acf.ci <- qnorm((1 + 0.95)/2)/sqrt(acf.output$n.used) #PREPARING OUTPUT DATAFRAME #---------------------- #generating dataframe with all lags acf.df <- data.frame(lag=acf.output$lag, acf=acf.output$acf, ci.max=acf.ci, ci.min=-acf.ci) #resampling to reduce the number of lines to be plotted acf.df <- acf.df[floor(seq(1, nrow(acf.df), length.out = length.out)),] return(acf.df) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/acfToDf.R
#' Aggregates the output of \code{\link{simulatePopulation}}. #' #' @description Takes the output of \code{\link{simulatePopulation}} and aggregates it into centimetres by following a sediment accumulation rate produced by \code{\link{simulateAccumulationRate}}. It further samples it at given depth intervals. It intends to simulate a pseudo-realistic sedimentation of the pollen produced by the simulation, and to apply a pollen-sampling pattern to a virtual pollen core. #' #' #' @usage aggregateSimulation( #' simulation.output=NULL, #' accumulation.rate=NULL, #' sampling.intervals=1 #' ) #' #' @param simulation.output list, output of \code{\link{simulatePopulation}}. #' @param accumulation.rate dataframe, output of \code{\link{simulateAccumulationRate}}. #' @param sampling.intervals integer, numeric vector, depth interval or intervals between consecutive samples in centimetres. If 1, all samples are returned, if 2, returned samples are separated by 1 cm. #' #' @details The function uses the values in the \code{grouping} column of the \code{\link{simulateAccumulationRate}} output to aggregate together (by computing the \code{mean}) as many samples as cases in \code{grouping} have the same identificator. Output samples are identified by the average age of the samples within the given centimetre. #' #' @author Blas M. Benito <[email protected]> #' #' @return A list of dataframes with as many rows as virtual taxa were produced by \code{\link{simulatePopulation}}, and the following columns: column 1 is the original data, column 2 is the original data aggregated by the accumulation rate, columns 3 to n are the different sampling intervals defined by the user. #' #' @seealso \code{\link{simulateAccumulationRate}}, \code{\link{simulatePopulation}} #' #' @examples #' #'#getting example data #'data(simulation) #'data(accumulationRate) #' #'#aggregating first simulation outcome #'sim.output.aggregated <- aggregateSimulation( #' simulation.output = simulation[1], #' accumulation.rate = accumulationRate, #' sampling.intervals = c(2,6)) #' #'#comparing simulations #'par(mfrow = c(3,1)) #'#notice the subsetting of the given column of the input list #'plot(sim.output.aggregated[[1,1]]$Time, #' sim.output.aggregated[[1,1]]$Pollen, #' type = "l", #' xlim = c(500, 1000), #' main = "Annual" #' ) #'plot(sim.output.aggregated[[1,2]]$Time, #' sim.output.aggregated[[1,2]]$Pollen, #' type = "l", #' xlim = c(500, 1000), #' main = "2cm" #' ) #'plot(sim.output.aggregated[[1,3]]$Time, #' sim.output.aggregated[[1,3]]$Pollen, #' type = "l", #' xlim = c(500, 1000), #' main = "6cm" #' ) #' #'#check differences in nrow #'nrow(sim.output.aggregated[[1,1]]) #original data #'nrow(sim.output.aggregated[[1,2]]) #2cm #'nrow(sim.output.aggregated[[1,3]]) #6cm intervals #' #' @export aggregateSimulation <- function(simulation.output = NULL, accumulation.rate = NULL, sampling.intervals = 1){ if(is.null(accumulation.rate)){stop("The argument accumulation.rate is empty.")} if(is.null(simulation.output)){stop("The argument simulation.output is empty.")} #function to aggregate a dataframe applyAccumulationRate <- function(x){ temp = x temp = temp[temp$Period=="Simulation", ] temp$Period = NULL temp.aggregated = aggregate(x = temp, by = list(group = accumulation.rate$grouping), FUN = mean) temp.aggregated$group = NULL temp.aggregated$Period = "Simulation" return(temp.aggregated) } #function to apply sampling intervals applySamplingIntervals <- function(x){ nrow.x = nrow(x) sampling.sequence = seq(1, nrow.x, by = sampling.interval) output.df = x[sampling.sequence, ] return(output.df) } #apply accumulation rate accrate.list <- lapply(X = simulation.output, FUN = applyAccumulationRate) #apply sampling intervals intervals.list <- list() if(!is.null(sampling.intervals)){ for(sampling.interval in sampling.intervals){ intervals.list <- cbind(intervals.list, lapply(X = accrate.list, FUN = applySamplingIntervals)) } #adding lists together output.list <- cbind(simulation.output, accrate.list, intervals.list) } else { #adding lists together output.list <- cbind(simulation.output, accrate.list) } names(output.list) <- NULL return(output.list) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/aggregateSimulation.R
#' Compares different simulations produced by \code{\link{simulatePopulation}}. #' #' @description Plots together the results of different virtual taxa produced by a single run of \code{\link{simulatePopulation}}. #' #' #' @usage compareSimulations( #' simulation.output = NULL, #' species = "all", #' filename = NULL, #' columns = "Pollen", #' time.zoom = NULL, #' width = 12, #' text.size = 20, #' title.size = 25, #' plot.title = "" #' ) #' #' @param simulation.output list, output of \code{\link{simulatePopulation}}. #' @param species a number or vector or numbers representing rows in the parameters dataframe, or a string or vector of strings referencing to the "label" column of the parameters dataframe. #' @param filename character string, name of output pdf file. If NULL or empty, no pdf is produced. #' @param time.zoom vector of two numbers, indicating the beginnign and end of the time interval to be plotted (i.e. "c(5000, 10000)") #' @param columns character string or vector of character strings with these possible values: "Pollen", "Population.mature", "Population.immature", "Population.viable.seeds", "Suitability", "Biomass.total", "Biomass.mature", "Biomass.immature", "Mortality.mature", "Mortality.immature", "Driver.A", "Driver.B". #' @param plot.title character string to use as plot title. #' @param width plot width in inches. #' @param text.size text size of the plot. #' @param title.size plot title size. #' #' @details The user can decide what virtual taxa to plot through the \code{species} argument, and what information to show throught the \code{columns} argument. Output is plotted on screen by default, and printed to pdf if the \code{filename} argument is filled. #' #' @author Blas M. Benito <[email protected]> #' #' #' @seealso \code{\link{simulatePopulation}}, \code{\link{plotSimulation}} #' #' @examples #' #'#getting example data #'data(simulation) #' #'#compare taxa 1, 2, and 3. #'compareSimulations(simulation.output = simulation, #' species = c(1, 2, 3), #' columns = c("Pollen", "Suitability"), #' time.zoom = c(1000, 2000) #') #' #' @export compareSimulations <- function( simulation.output = NULL, species = "all", filename = NULL, columns = "Pollen", time.zoom = NULL, width = 12, text.size = 20, title.size = 25, plot.title = ""){ #SELECTING SPECIES #---------------- #creating dictionary of species names and indexes #getting the data if(inherits(simulation.output, "list")){ if(is.null(names(simulation.output)) & length(simulation.output) > 1){ list.names <- 1:length(simulation.output) } else { list.names <- names(simulation.output) } names.dictionary <- data.frame(name = list.names, index = 1:length(simulation.output)) } if(is.character(species)){ if(species == "all" | species == "ALL" | species == "All"){ selected.species <- names.dictionary$index } else { if(sum(species %in% names.dictionary$name) != length(species)){ stop("You have selected species that are not available in the parameters table.") } else { selected.species <- names.dictionary[names.dictionary$name %in% species, "index"] } } } if(is.numeric(species)){ if(sum(species %in% names.dictionary$index) != 0){ selected.species <- species } } #checking time.zoom if(!is.null(time.zoom) & length(time.zoom) != 2){stop("Argument time.zoom must be a vector of length two, as in: time.zoom = c(1000, 2000)")} #CHECKING COLUMN NAMES column.names <- c("Time", "Pollen", "Population.mature", "Population.immature", "Population.viable.seeds", "Suitability", "Driver.A", "Driver.B", "Biomass.total", "Biomass.mature", "Biomass.immature", "Mortality.mature", "Mortality.immature", "Period") if(sum(columns %in% column.names) != length(columns)){cat("Warning, the argument columns seem to be wrong. The available column names are:", column.names, ". Returning results based on the column 'Pollen'\n")} #GETTING THE DATA FROM THE INPUT LIST output.df <- simulation.output[[selected.species[1]]] output.df$Species <- names(simulation.output)[selected.species[1]] #age limits of the plot if(is.null(time.zoom)){ age.min <- 0 age.max <- max(output.df$Time) } else { age.min <- time.zoom[1] age.max <- time.zoom[2] } #ITERATING THROUGH LIST ELEMENTS for(i in selected.species[-1]){ temp <- simulation.output[[i]] temp$Species <- names(simulation.output)[i] output.df <- plyr::rbind.fill(output.df, temp) } #REMOVING BURNIN output.df <- output.df[output.df$Period == "Simulation", ] output.df$Period <- NULL #GETTING THE SPECIES SELECTED BY THE USER # output.df <- output.df[output.df$Species %in% selected.species, ] #TO LONG FORMAT output.df.long <- tidyr::gather(data = output.df, Variable, Value, 2:(ncol(output.df)-1)) #ORDER OF PANELS TO PLOT output.df.long$Variable <- factor(output.df.long$Variable, levels = c("Driver.A", "Driver.B","Suitability", "Population.mature", "Population.immature", "Population.viable.seeds", "Biomass.total", "Biomass.mature", "Biomass.immature", "Mortality.mature", "Mortality.immagure", "Pollen")) #GETTING VARIABLES SELECTED BY THE USER output.df.long <- output.df.long[output.df.long$Variable %in% columns, ] #plot p1 <- ggplot(data = output.df.long, aes(x = Time, y = Value, color = Species, group = Species)) + geom_line(size = 0.5, alpha = 0.7) + scale_color_viridis(discrete = TRUE, direction = -1, begin = 0, end = 0.8) + facet_wrap(facets = "Variable", scales = "free_y", ncol = 1, drop = TRUE) + ggtitle(plot.title) + xlab("Time") + ylab("") + geom_vline(xintercept = seq(age.min, age.max, by = 200), color = "gray") + scale_x_continuous(breaks = seq(age.min, age.max, by = (age.max-age.min)/10)) + labs(color = 'Legend') + guides(color = guide_legend(override.aes = list(size = 2))) + coord_cartesian(xlim = c(age.min, age.max)) + cowplot::theme_cowplot() + theme(text = element_text(size = text.size), plot.title = element_text(size = title.size), legend.position = "bottom", panel.background = element_blank()) invisible(print(p1)) if(!is.null(filename)){ggsave(filename, width = width, height = 2*length(columns))} }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/compareSimulations.R
#' Fix data types in a parameters dataframe. #' #' @description It converts all columns (but the \code{label} one) of a parameters dataframe created by \code{\link{parametersDataframe}} and filled by the user into type numeric, and checks the coherence of the parameters for each taxon. It provides feedback on the check results on screen for each taxon. #' #' @usage fixParametersTypes(x) #' #' @param x dataframe resulting from \code{\link{parametersDataframe}}. #' #' @author Blas M. Benito <[email protected]> #' #' @return Same dataframe provided in argument \code{x} but with fixed data types. #' #' @seealso \code{\link{parametersDataframe}} #' #' @examples #' #' parameters <- parametersDataframe(rows=1) #' parameters[1,] <- c("Species 1", 50, 20, 2, 0.2, 0, 100, 1000, 1, 0, 50, 10, 0, 0, 600, 600) #' parameters <- fixParametersTypes(x=parameters) #' #' @export fixParametersTypes <- function(x){ #sets all columns but the first one into numeric x[, 2:ncol(x)] <- sapply(x[, 2:ncol(x)], as.numeric) #checking columns if(!is.character(x$label)){x$label <- as.character(x$label)} #checking taxa by taxa for (i in 1:nrow(x)){ message(paste("Checking taxon ", x[i,"label"], sep="")) #checking for NA values if(sum(is.na(x[i,])) > 0){ message("There are NA values in the parameters of this taxon, please, fix this and come back here!") } #checking reproductive and maximum age if(x[i, "reproductive.age"] >= x[i, "maximum.age"]){ message("reproductive.age is >= than maximum.age, setting reproductive.age to half of maximum.age.") x[i, "reproductive.age"] <- floor(x[i, "maximum.age"]/2) } #checking maximum biomass and carrying capacity if((x[i, "carrying.capacity"] / x[i, "maximum.biomass"]) < 20){ message(paste("carrying.capacity for the taxon is too low, increasing it to", x[i, "maximum.biomass"] * 20, sep="")) x[i, "carrying.capacity"] <- x[i, "maximum.biomass"] * 20 } #checking pollen control if(x[i, "pollen.control"] > 1 | x[i, "pollen.control"] < 0){ message("pollen.control must be between 0 and 1, setting it to 0") x[i, "pollen.control"] <- 0 } #checking driver weights if(x[i, "driver.A.weight"] + x[i, "driver.B.weight"] != 1){ message("The sum of driver.A.weight and driver.B.weight must be 1.") } #checking standard deviation of niche functions if(x[i, "niche.A.sd"] == 0){ message("niche.A.sd was 0, changing it to 1.") x[i, "niche.A.sd"] <- 1 } if(x[i, "niche.B.sd"] == 0){ message("niche.B.sd was 0, changing it to 1.") x[i, "niche.B.sd"] <- 1 } } return(x) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/fixParametersTypes.R
#' Plots main simulation parameters. #' #' @description Plots the environmental niche, fecundity, growth curve, and maturity age, of each virtual taxa in a parameters dataframe for \code{\link{simulatePopulation}}, to help the user in making choices while adjusting them. #' #' @usage parametersCheck( #' parameters, #' species = "all", #' driver.A = NULL, #' driver.B = NULL, #' drivers = NULL, #' filename = NULL #' ) #' #' @param parameters the parameters dataframe. #' @param species if "all" or "ALL", all species in "parameters" are plotted. It also accepts a vector of numbers representing the rows of the selected species, or a vector of names of the selected species. #' @param driver.A numeric vector with driver values. #' @param driver.B numeric vector with driver values. #' @param drivers dataframe with drivers #' @param filename character string, filename of the output pdf. #' #' @details The function prints the plot, can save it to a pdf file if \code{filename} is provided, and returns a \code{\link[ggplot2]{ggplot2}} object. Priority is given to drivers introduced through the \code{drivers} argument. #' #' @author Blas M. Benito <[email protected]> #' #' @return A \code{\link{ggplot2}} object. #' #' @seealso \code{\link{parametersDataframe}}, \code{\link{fixParametersTypes}} #' #' @examples #'\donttest{ #'#getting data #'data(parameters) #'data(drivers) #' #'#plotting parameters #'parametersCheck( #' parameters = parameters, #' drivers = drivers #' ) #'} #' #' @export parametersCheck <- function(parameters, species = "all", driver.A = NULL, driver.B = NULL, drivers = NULL, filename = NULL){ #CHECKING INPUT DATA #------------------- #CHECKING parameters if(is.null(parameters) == TRUE | is.data.frame(parameters) == FALSE){ stop("The argument 'parameters' empty.") } else { if(sum(!(colnames(parameters) %in% c("label", "maximum.age", "reproductive.age", "fecundity", "growth.rate", "pollen.control", "maximum.biomass", "carrying.capacity", "driver.A.weight", "driver.B.weight", "niche.A.mean", "niche.A.sd", "niche.B.mean", "niche.B.sd", "autocorrelation.length.A", "autocorrelation.length.B"))) != 0){ stop(paste("The following column/s of 'parameters' seem to be missing: ", colnames(parameters)[!(colnames(parameters) %in% c("label", "maximum.age", "reproductive.age", "fecundity", "growth.rate", "pollen.control", "maximum.biomass", "carrying.capacity", "driver.A.weight", "driver.B.weight", "niche.A.mean", "niche.A.sd", "niche.B.mean", "niche.B.sd", "autocorrelation.length.A", "autocorrelation.length.B"))], sep="")) } } #CHECKING drivers #------------------ if(is.null(drivers) == FALSE | is.data.frame(drivers) == TRUE){ #checking columns if(sum(!(colnames(drivers) %in% c("time", "driver", "autocorrelation.length", "value"))) != 0){ stop(paste("The following column/s of 'drivers' seem to be missing: ", colnames(parameters)[!(colnames(parameters) %in% c("time", "driver", "autocorrelation.length", "value"))], sep="")) } } else { if(is.null(driver.A) | is.vector(driver.A) | !is.numeric(driver.A)){ stop("driver.A should be a numeric vector") } if(is.null(driver.A) | is.vector(driver.A) | !is.numeric(driver.A)){ message("driver.B not provided") } else { if(length(driver.A) != length(driver.B)){ stop("driver.A and driver.B should have the same length.") } } } #CHECKING AND SELECTING species #---------------- #creating dictionary of species names and indexes names.dictionary <- data.frame(name=parameters$label, index=1:nrow(parameters)) if(is.character(species)){ if(species == "all" | species == "ALL" | species == "All"){ selected.species <- names.dictionary$index } else { if(sum(species %in% names.dictionary$name) != length(species)){ stop("You have selected species that are not available in the parameters table.") } else { selected.species <- names.dictionary[names.dictionary$name %in% species, "index"] } } } if(is.numeric(species)){ if(sum(species %in% names.dictionary$index) != 0){ selected.species <- species } } #dataframe to store data plot.df <- data.frame(Species = character(), Driver = character(), Driver.density.x = numeric(), Driver.density.y = numeric(), Driver.weights = numeric(), Value = numeric(), Suitability = numeric(), Age = numeric(), Biomass = numeric(), Reproductive.age = numeric(), Fecundity = numeric()) #ITERATING THROUGH SPECIES for(i in selected.species){ #GETTING DRIVER VALUES #Drivers provided as dataframe if(is.data.frame(drivers) == TRUE){ #if the autocorrelation.lengt available in parameters for species i is not in drivers, the first autocorrelation length available in drivers is assigned if(!(parameters[i, "autocorrelation.length.A"] %in% unique(drivers$autocorrelation.length)) & !(parameters[i, "autocorrelation.length.B"] %in% unique(drivers$autocorrelation.length))){ message(paste("Autocorrelation lengths in parameters do not match autocorrelation lengths in drivers, I am getting the first value of autocorrelation.length available in drivers: ", unique(drivers$autocorrelation.length)[1], sep="")) autocorrelation.length.A <- autocorrelation.length.B <- unique(drivers$autocorrelation.length)[1] } #getting driver values driver.A.ready <- drivers[drivers$driver == "A" & drivers$autocorrelation.length == parameters[i, "autocorrelation.length.A"], "value"] driver.B.ready <- drivers[drivers$driver == "B" & drivers$autocorrelation.length == parameters[i, "autocorrelation.length.B"], "value"] } else { #getting values from vectors driver.A.ready <- driver.A driver.B.ready <- driver.B } #checking if drivers are NA if(sum(is.na(driver.A.ready)) == length(driver.A.ready)){ stop("Driver A is made of NA, something is wrong with the drivers argument.") } if(sum(is.na(driver.B.ready)) == length(driver.B.ready)){ driver.B.ready <- NULL driver.B.weight <- 0 } #checking if drivers have the same length if(!is.null(driver.B.ready) & length(driver.A.ready) != length(driver.B.ready)){ stop("driver.A and driver.B have different lengths.") } #preparing driver.A density.driver.A <- density(x=driver.A.ready, from=min(driver.A.ready), to=max(driver.A.ready), n=100, bw=max(driver.A.ready)/100) density.driver.A.y <- (density.driver.A$y - min(density.driver.A$y)) / (max(density.driver.A$y) - min(density.driver.A$y)) driver.A.range <- seq(min(driver.A.ready), max(driver.A.ready), length.out = 100) niche.A <- dnorm(x=driver.A.range, mean=parameters[i, "niche.A.mean"], sd=parameters[i, "niche.A.sd"]) niche.A <- niche.A / max(niche.A) driver.A.weight <- parameters[i, "driver.A.weight"] #preparing driver.B if(!is.null(driver.B.ready)){ density.driver.B <- density(x=driver.B.ready, from=min(driver.B.ready), to=max(driver.B.ready), n=100, bw=max(driver.B.ready)/100) density.driver.B.y <- (density.driver.B$y - min(density.driver.B$y))/ (max(density.driver.B$y) - min(density.driver.B$y)) driver.B.range <- seq(min(driver.B.ready), max(driver.B.ready), length.out = 100) niche.B <- dnorm(x=driver.B.range, mean=parameters[i, "niche.B.mean"], sd=parameters[i, "niche.B.sd"]) niche.B <- niche.B / max(niche.B) driver.B.weight <- parameters[i, "driver.B.weight"] } #computing biomass age <- seq(0, parameters[i, "maximum.age"], length.out = 100) biomass <- parameters[i, "maximum.biomass"] / (1 + parameters[i, "maximum.biomass"] * exp(- parameters[i, "growth.rate"] * age)) #preparing data for plotting if(is.null(driver.B.ready) == FALSE){ plot.df.temp <- data.frame(Species = rep(paste(parameters[i, "label"], sep = ""), 100), Driver = c(rep("Driver A", 100), rep("Driver B", 100)), Driver.density.x = c(density.driver.A$x, density.driver.B$x), Driver.density.y = c(density.driver.A.y, density.driver.B.y), Driver.weights = c(rep(driver.A.weight, 100), rep(driver.B.weight, 100)), Value = c(driver.A.range, driver.B.range), Suitability = c(niche.A, niche.B), Age = age, Biomass = biomass, Reproductive.age = rep(parameters[i, "reproductive.age"], 100), Fecundity = rep(parameters[i, "fecundity"], 100)) } else { plot.df.temp <- data.frame(Species = rep(paste(parameters[i, "label"], sep = ""), 100), Driver = c(rep("Driver A", 100)), Driver.density.x = c(density.driver.A$x), Driver.density.y = c(density.driver.A.y), Driver.weights = c(rep(driver.A.weight, 100)), Value = driver.A.range, Suitability = niche.A, Age = age, Biomass = biomass, Reproductive.age = rep(parameters[i, "reproductive.age"], 100), Fecundity = rep(parameters[i, "fecundity"], 100)) } #putting together with main dataframe plot.df <- rbind(plot.df, plot.df.temp) }#end of iterations plot.df$Suitability <- round(plot.df$Suitability, 2) plot.df <- na.omit(plot.df) plot.df[plot.df$Suitability == 0, "Suitability"] <- NA color.palette <- viridis(10) niche.plot <- ggplot( data = plot.df, aes(x = Value, y = Suitability, group = Species) ) + geom_ribbon( data = plot.df, aes(ymin = 0, ymax = Driver.density.y), color = "gray80", fill = "gray80", alpha = 0.5 ) + geom_ribbon( data = plot.df, aes(ymin = 0, ymax = Suitability, alpha = Driver.weights), colour = NA, fill = color.palette[1] ) + geom_line( data = plot.df, aes(x = Value, y = Driver.density.y), color = "gray80", alpha = 0.5 ) + facet_grid(Species~Driver) + scale_alpha_continuous(range = c(0, 1)) + xlab("Driver values") + ylab("Environmental suitability") + theme(strip.background.y = element_blank(), strip.text.y = element_blank(), text = element_text(size = 12), strip.background = element_rect(fill = NA), panel.spacing = unit(1, "lines"), legend.position = "none", panel.background = element_blank()) + cowplot::background_grid(major = "none", minor = "none") fecundity.plot <- ggplot( data = plot.df, aes(x = Species, y = Fecundity, group = Species) ) + geom_hline( aes(yintercept = Fecundity), size = 10, color = "gray80", alpha = 0.5 ) + geom_hline( aes(yintercept = Fecundity), size = 2, color = color.palette[1] ) + facet_wrap( facets = "Species", ncol = 1, strip.position = "right" ) + theme(strip.background.y = element_blank(), strip.text.y = element_blank(), text = element_text(size = 12), panel.spacing = unit(1, "lines")) + scale_y_continuous(limits = c(0, max(plot.df$Fecundity))) + xlab("") + theme(legend.position = "none", panel.background = element_blank()) + cowplot::background_grid( major = "none", minor = "none" ) growth.plot <- ggplot( data = plot.df, aes(x = Age, y = Biomass, group = Species) ) + geom_ribbon( aes( ymin = 0, ymax = max(Biomass) ), color = "gray80", fill = "gray80", alpha = 0.5 ) + geom_line( aes(x = Reproductive.age, y = Biomass), color = color.palette[1], size = 2, alpha = 0.8 ) + facet_wrap( facets = "Species", ncol = 1, strip.position = "right", scales = "free_x" ) + xlab("Age (years)") + ylab("Biomass (relative)") + theme( text = element_text(size = 12), panel.spacing = unit(1, "lines") ) + theme(legend.position = "bottom", panel.background = element_blank()) + cowplot::background_grid( major = "none", minor = "none" ) joint.plot <- cowplot::plot_grid(niche.plot ,fecundity.plot, growth.plot, ncol = 3, rel_widths = c(1 ,0.2, 1), align = "h", axis = "tb") title <- cowplot::ggdraw() + cowplot::draw_label("Main parameters of virtual taxa", fontface = 'bold') print(cowplot::plot_grid(title, joint.plot, ncol = 1, rel_heights = c(0.1, 1))) #saving to file # cowplot::plot_grid(niche.plot, growth.plot, ncol=2) if(!is.null(filename) & is.character(filename)){ ggsave(filename = paste(filename, ".pdf", sep = ""), width = 12, height = 2*nrow(parameters)) } }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/parametersCheck.R
#' Generates a template dataframe to contain simulation parameters. #' #' @description Generates the dataframe structure needed to contain the parameters used as input for the \code{\link{simulatePopulation}} function. #' #' @usage parametersDataframe(rows=1) #' #' @param rows integer, number of rows in the output dataframe. #' #' @details The resulting dataframe can either be filled manually through vectors, as shown in the example (but this requires to use the function \code{\link{fixParametersTypes}} once the dataframe is completed), or can be edited manually in Rstudio by installing the \href{https://CRAN.R-project.org/package=editData}{editData} package. #' #' @author Blas M. Benito <[email protected]> #' #' @return A dataframe filled with \code{NA} values and the columns: #' \itemize{ #' \item \emph{label}: to store names (character string) of the virtual taxa. #' \item \emph{maximum.age}: integer, maximum possible age of the individuals in years. #' \item \emph{reproductive.age}: integer, age of sexual maturity in years. #' \item \emph{fecundity}: integer, number of maximum viable seeds produced by a mature individual under fully suitable conditions. #' \item \emph{growth.rate}: numeric, parameter of the logistic growth function. #' \item \emph{pollen.control}: numeric in the interval [0, 1]. If 0, pollen productivity depends on environmental suitability only. The larger the number, biomass takes over environmental suitability in determining pollen productivity. #' \item \emph{maximum.biomass}: integer, maximum biomass of the individuals. #' \item \emph{carrying.capacity}: integer, maximum sum of biomass of the individuals. Very large carrying capacities plus a low maximum.biomass generates too many individuals for the simulation to remain efficient. Try to set carrying.capacity and maximum.biomass to carrying.capacity divided by biomass returns a number lower than 1000 (and even better if it is closer to 100). #' \item \emph{driver.A.weight}: numeric in the interval [0, 1], represents the relative influence of the driver on environmental suitability. #' \item \emph{driver.B.weight}: numeric in the interval [0, 1], represents the relative influence of the driver on environmental suitability. The sum of weights of drivers A and B should be 1. #' \item \emph{niche.A.mean}: numeric, in the same units as driver A. It is the mean of the normal function defining the response of the virtual taxa to driver A. #' \item \emph{niche.A.sd}: numeric, in the same units as driver A. It is the standard deviation of the normal function defining the response of the virtual taxa to driver A. #' \item \emph{niche.B.mean}: as above, but for driver B. #' \item \emph{niche.B.sd}: as above, but for driver B. #' \item \emph{autocorrelation.length.A}: numeric, only useful if several drivers generated with different autocorrelation lengths are available (and identified by the column \code{autocorrelation.length}) in the \code{drivers} argument provided to the \code{\link{simulatePopulation}} function. #' \item \emph{autocorrelation.length.B}: same as above. #' } #' #' @seealso \code{\link{simulatePopulation}}, \code{\link{fixParametersTypes}} #' #' @examples #' #'#generating the template #'parameters <- parametersDataframe(rows=1) #' #'#filling it with a vector #'parameters[1,] <- c("Species 1", 50, 20, 2, 0.2, 0, 100, 1000, 1, 0, 50, 10, 0, 0, 600, 600) #' #' @export parametersDataframe <- function(rows = 1){ if(is.integer(rows) == FALSE){rows = ceiling(rows)} if(rows == 0){rows = 1} output.df <- data.frame(label = rep(NA, rows), maximum.age = rep(NA, rows), reproductive.age = rep(NA, rows), fecundity = rep(NA, rows), growth.rate = rep(NA, rows), pollen.control = rep(NA, rows), maximum.biomass = rep(NA, rows), carrying.capacity = rep(NA, rows), driver.A.weight = rep(NA, rows), driver.B.weight = rep(NA, rows), niche.A.mean = rep(NA, rows), niche.A.sd = rep(NA, rows), niche.B.mean = rep(NA, rows), niche.B.sd = rep(NA, rows), autocorrelation.length.A = rep(NA, rows), autocorrelation.length.B = rep(NA, rows), stringsAsFactors = FALSE) return(output.df) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/parametersDataframe.R
#' Plots results of \code{\link{acfToDf}}. #' #' @description Plots a dataframe resulting from \code{\link{acfToDf}} by using \code{\link[ggplot2]{ggplot2}}. #' #' @usage plotAcf( #' x = NULL, #' plot.title = "" #' ) #' #' @param x dataframe, output of \code{\link{acfToDf}} #' @param plot.title string, title of the output plot. #' #' @author Blas M. Benito <[email protected]> #' #' @return A ggplot object #' #' @seealso \code{\link{acfToDf}} #' #' @examples #' #'#getting a driver #'data(driverA) #' #'#computing temporal autocorrelations #'x.df <- acfToDf( #' x = driverA, #' lag.max = 1000, #' length.out = 100 #') #'str(x.df) #' #'#plotting output #'plotAcf(x.df) #' #' @export plotAcf <- function(x = NULL, plot.title = ""){ acf.plot = ggplot(data = x, aes(x = lag, y = acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle(plot.title) + theme(plot.margin = unit(c(0,0,0,0), "cm")) + cowplot::theme_cowplot() + theme(legend.position = "none") return(acf.plot) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/plotAcf.R
#' Plots results of \code{\link{simulatePopulation}}. #' #' @description This function takes as input the output of \code{\link{simulatePopulation}}, and plots the pollen abundance, number of individuals, biomass, driver, and environmnetal suitability of each simulation outcome. #' #' #' @usage plotSimulation( #' simulation.output = NULL, #' species = "all", #' burnin = FALSE, #' filename = NULL, #' time.zoom = NULL, #' panels = c("Driver A", #' "Driver B", #' "Suitability", #' "Population", #' "Mortality", #' "Biomass", #' "Pollen" #' ), #' plot.title = NULL, #' width = 12, #' text.size = 20, #' title.size = 25, #' line.size = 1 #' ) #' #' @param simulation.output output of \code{\link{simulatePopulation}}. #' @param species a number or vector of numbers representing rows in the parameters dataframe, or a string or vector of strings referencing to the "label" column of the parameters dataframe. #' @param burnin if \code{FALSE}, burn-in period is not considered in the model. #' @param filename character string, name of output pdf file. If NULL or empty, no pdf is produced. It shouldn't include the extension of the output file. #' @param time.zoom vector of two numbers indicating the beginnign and end of the time interval to be plotted (i.e. "c(5000, 10000)") #' @param panels character string or vector of character strings with these possible values: "Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen". #' @param plot.title character string to use as plot title. #' @param width plot width in inches. #' @param text.size text size of the plot. #' @param title.size plot title size. #' @param line.size size of lines in plots. #' #' @details The user can decide what virtual taxa to plot (argument \code{species}), and what information to show throught the \code{panels} argument. Output is plotted on screen by default, and printed to pdf if the \code{filename} argument is filled. #' #' @author Blas M. Benito <[email protected]> #' #' #' @seealso \code{\link{simulatePopulation}}, \code{\link{compareSimulations}} #' #' @examples #' #'#getting example data #'data(simulation) #' #'#plot first simulation #'plotSimulation(simulation.output = simulation[[1]]) #' #' @export plotSimulation <- function( simulation.output = NULL, species = "all", burnin = FALSE, filename = NULL, time.zoom = NULL, panels = c("Driver A", "Driver B", "Suitability", "Population", "Mortality", "Biomass", "Pollen"), plot.title = NULL, width = 12, text.size = 20, title.size = 25, line.size = 1){ #checking and setting panels if(length(panels) == 1){ if(panels == "all" | panels == "ALL" | panels == "All" | is.null(panels) | length(panels) == 0 | !is.character(panels)){ panels=c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen") } } else { if(sum(!(panels %in% c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen"))) >= 1){ warning(paste("There is something wrong with your 'panels' argument. Available panels are ", c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen"), " . All panels will be plotted instead")) panels=c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen") } } #checking time.zoom if(!is.null(time.zoom) & length(time.zoom) != 2){stop("Argument time.zoom must be a vector of length two, as in: time.zoom=c(1000, 2000)")} #list to store plots plots.list=list() #SELECTING SPECIES #---------------- #creating dictionary of species names and indexes #getting the data if(inherits(simulation.output, "list")){ names.dictionary = data.frame(name=names(simulation.output), index=1:length(simulation.output)) } else { #fake names.dictionary to be used donwstream when input is a data.frame names.dictionary = data.frame(name = 1, index = 1) } #if null or "all" if(species == "all" | species == "ALL" | species == "All"){ selected.species = names.dictionary$index } else { #wrong names or indexes if(!(species %in% names.dictionary$name) & !(species %in% names.dictionary$index)){stop("You have selected species that are not available in the parameters table.")} #correct species names or indexes if(species %in% names.dictionary$name){ selected.species = names.dictionary[names.dictionary$name %in% species, "index"] } if(species %in% names.dictionary$index){ selected.species = species } } if(inherits(simulation.output, "data.frame")){ selected.species = 1 } #ITERATING THROUGH SPECIES for(i in selected.species){ #getting the data if(inherits(simulation.output, "list")){ output = simulation.output[[i]] } if(inherits(simulation.output, "data.frame")){ output = simulation.output } #to long format if("Period" %in% colnames(output)){ output.long = tidyr::gather(data=output, Variable, Value, 2:(ncol(output)-1)) #removing burn-in period if burnin == FALSE if(burnin == FALSE){output.long = output.long[output.long$Period == "Simulation",]} } else { output.long = gather(data=output, Variable, Value, 2:ncol(output)) } #age limits of the plot if(is.null(time.zoom)){ age.min = 1 age.max = max(output.long$Time) } else { age.min = time.zoom[1] age.max = time.zoom[2] #burning to FALSE to avoid plotting it burnin=FALSE } #preparing groups for facets output.long$Facets = "Population" output.long[output.long$Variable == "Pollen", "Facets"] = "Pollen" output.long[grep("Biomass", output.long$Variable), "Facets"] = "Biomass" output.long[grep("Mortality", output.long$Variable), "Facets"] = "Mortality" output.long[output.long$Variable == "Suitability", "Facets"] = "Suitability" output.long[output.long$Variable == "Driver.A", "Facets"] = "Driver A" #checking if driver B is empty if(sum(is.na(output$Driver.B))!=nrow(output)){ output.long[output.long$Variable == "Driver.B", "Facets"] = "Driver B" #facets order output.long$Facets=factor(output.long$Facets, levels=c("Driver A", "Driver B","Suitability", "Population", "Mortality", "Biomass", "Pollen")) } else { output.long$Facets=factor(output.long$Facets, levels=c("Driver A","Suitability", "Population", "Mortality", "Biomass", "Pollen")) } #preparing subgroups for color output.long$Color = "Adults" output.long[grep("immature", output.long$Variable), "Color"] = "Saplings" output.long[grep("total", output.long$Variable), "Color"] = "Total biomass" output.long[output.long$Variable == "Pollen", "Color"] = "Pollen" output.long[output.long$Variable == "Population.viable.seeds", "Color"] = "Seedlings" output.long[output.long$Variable == "Suitability", "Color"] = "Suitability" output.long[output.long$Variable == "Driver.A", "Color"] = "Driver A" #checking if driver B is empty if(sum(is.na(output$Driver.B))!=nrow(output)){ output.long[output.long$Variable == "Driver.B", "Color"] = "Driver B" #facets order output.long$Color <- factor(output.long$Color, levels = c("Driver A", "Driver B", "Suitability", "Total biomass", "Adults", "Saplings", "Seedlings", "Pollen")) #palette color.palette <- c("#2F642A", "#57AD4F", "#000000", "#C45055", "#75E46A", "#4572A9", "gray40", "gray40") names(color.palette) <- c("Adults", "Saplings", "Total biomass", "Pollen", "Seedlings", "Suitability", "Driver A", "Driver B") } else { output.long$Color <- factor(output.long$Color, levels = c("Driver A", "Suitability", "Total biomass", "Adults", "Saplings", "Seedlings", "Pollen")) #palette color.palette <- c("#2F642A", "#57AD4F", "#000000", "#C45055", "#75E46A", "#4572A9", "gray40") names(color.palette) <- c("Adults", "Saplings", "Total biomass", "Pollen", "Seedlings", "Suitability", "Driver A") } #removing unwanted facets/panels output.long <-output.long[output.long$Facets %in% panels, ] #setting up plot title if(is.null(plot.title)){ plot.title <- paste("Taxon: ", names(simulation.output)[i], sep = "") } #plot p1 <- ggplot(data = output.long, aes(x = Time, y = Value, color = Color)) + geom_rect(data = output.long, aes(xmin = min(min(Time), 0), xmax = 0, ymin = 0, ymax = Inf), inherit.aes = FALSE, fill = "gray90") + geom_line(size = line.size) + scale_colour_manual(values = color.palette) + facet_wrap(facets = "Facets", scales = "free_y", ncol = 1, drop = TRUE) + ggtitle(plot.title) + xlab("Time (years)") + ylab("") + geom_vline(xintercept = seq(0, max(output.long$Time), by = 200), color = "gray") + scale_x_continuous(breaks = seq(age.min, age.max, by = age.max/10)) + theme(text = element_text(size = text.size), axis.text = element_text(size = text.size), axis.title = element_text(size = text.size), plot.title = element_text(size = title.size), plot.margin = unit(c(0.5, 1, 0.5, -0.5), "cm"), panel.spacing = unit(0, "lines")) + labs(color = 'Legend') + guides(color = guide_legend(override.aes = list(size = 2))) + coord_cartesian(xlim = c(age.min, age.max)) + cowplot::theme_cowplot() + theme(legend.position = "bottom") # guides(linetype = guide_legend(override.aes = list(size = 4))) # + theme(plot.margin=unit(c(1,3,1,1),"cm")) plots.list[[i]] <- p1 } #end of iteration through species #plots to screen invisible(lapply(plots.list, print)) #plots to pdf if(!is.null(filename) & is.character(filename)){ pdf(paste(filename, ".pdf", sep = ""), width = 12, height = length(unique(output.long$Facets))*2) invisible(lapply(plots.list, print)) dev.off() } } #end of plotting function
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/plotSimulation.R
#' Rescales a vector within given bounds. #' #' @description Takes a numeric vector \code{x} and rescales it within the values given by \code{new.min} and \code{new.max}. #' #' @usage rescaleVector( #' x = rnorm(100), #' new.min = 0, #' new.max = 100, #' integer = FALSE #' ) #' #' @param x numeric vector to be rescaled. #' @param new.min numeric, new minimum value for \code{x}. Default is 0. #' @param new.max numeric, new maximum value for \code{x}. Default is 100. #' @param integer boolean, if TRUE, output vector is returned as vector of integers. Default is FALSE. #' #' #' @author Blas M. Benito <[email protected]> #' #' @return A vector of the same length as \code{x} rescaled between \code{output.min} and \code{output.max}. #' #' #' @examples #' #generating example data #' x = rnorm(100) #' #' #as float #' x.float <- rescaleVector( #' x = x, #' new.min = 0, #' new.max = 100, #' integer = FALSE #' ) #' #' #as integer #' x.integer <- rescaleVector( #' x = x, #' new.min = 0, #' new.max = 100, #' integer = TRUE #' ) #' #' @export rescaleVector <- function(x = rnorm(100), new.min = 0, new.max = 100, integer = FALSE){ #CHECKS INPUT VECTOR #---------------------- if(!is.vector(x) | length(x) == 0 | is.null(x)){ stop("rescaleVector: the argument x is not a vector.") } #OUTPUT MIN AND MAX #---------------------- #checks that min and max are actually min and max and swaps them if not if(new.max < new.min){ temp.min = new.max #new.max to temporary variable new.max = new.min #swaps values new.min = temp.min #sets new.min to value of temporary variable } #COMPUTING DATA EXTREMES #---------------------- #data extremes old.min = min(x) old.max = max(x) #SCALING VECTOR #---------------------- x = ((x - old.min) / (old.max - old.min)) * (new.max - new.min) + new.min #FORCES VECTOR INTO INTEGER #---------------------- if(integer == TRUE){ x = floor(x) } return(x) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/rescaleVector.R
#' Simulates a virtual sediment accumulation rate. #' #' @description Generates a virtual sediment accumulation rate to be applied to the results of \code{\link{simulatePopulation}}. #' #' #' @usage simulateAccumulationRate( #' seed=50, #' time=1:1000, #' output.min=10, #' output.max=40, #' direction=1, #' plot=TRUE #' ) #' #' @param seed integer, seed to be used by \code{\link{set.seed}} to configure the state of the pseudo-random number generator. It defines the shape of the curve. #' @param time vector of time values (ideally the same used to generate the simulations). \strong{Important}: the \code{time} column goes from "left to right", meaning that oldest samples have the lowest values of age/time, and viceversa. #' @param output.min numeric, in years per centimetre, minimum sediment accumulation rate (10 by default). #' @param output.max numeric, in years per centimetre, maximum sediment accumulation rate (40 bu default). #' @param direction integer, values 1 or -1, to invert the resulting accumulation rate. #' @param plot boolean, plots output accumulation rate if \code{TRUE}. #' #' @details The accumulation rate curve is generated through a random walk smoothed by a GAM model. The value of the \code{seed} argument changes the shape of the curve, but the user has no more control than trying different values to achieve a curve closer to the desired one. If \code{plot} is set to \code{TRUE}, the accumulation rate curve is printed on screen, but not exported to pdf. #' #' @author Blas M. Benito <[email protected]> #' #' @return A dataframe like \code{\link{accumulationRate}}, with the following columns: #' \itemize{ #' \item \emph{time}: numeric, time or age of the given case. #' \item \emph{accumulation.rate}: numeric, in years per centimetre, simulated accumulation rate. #' \item \emph{grouping}: integer, grouping variable to aggregate together (with \code{\link{aggregateSimulation}}) samples deposited in the same centimetre according \emph{accumulation.rate}. #' } #' #' @seealso \code{\link{simulatePopulation}}, \code{\link{aggregateSimulation}} #' #' @examples #' #'acc.rate <- simulateAccumulationRate( #' seed = 50, #' time = 1:1000, #' output.min = 10, #' output.max = 40, #' direction = 1, #' plot = TRUE #' ) #' #'str(acc.rate) #' #' @export simulateAccumulationRate <- function(seed = 50, time = 1:1000, output.min = 10, output.max = 40, direction = 1, plot = TRUE ){ #setting random seed for repeatibility set.seed(seed) #generating a random walk accumulation.rate <- cumsum(sample(c(-0.1, 0, 0.1), max(time), TRUE)) if(direction == -1){ accumulation.rate = rev(accumulation.rate) } #fitting a gam to the data and predicting a smoothed accumulation rate curve temp.data <- data.frame(accumulation.rate, time) temp.gam <- gam(accumulation.rate ~ s(time, k = 10), data = temp.data) accumulation.rate <- predict(temp.gam, type = "response") #scaling it between given bounds accumulation.rate <- rescaleVector(x = as.vector(accumulation.rate), new.min = output.min, new.max = output.max, integer = TRUE) accumulation.rate <- as.vector(accumulation.rate) #plotting data temp.df <- data.frame(time, accumulation.rate) if(plot == TRUE){ temp.plot <- ggplot(data = temp.df, aes(x = time, y = accumulation.rate)) + geom_line(color = viridis(10)[3], size = 0.5) + geom_ribbon(aes(ymin = 0, ymax = accumulation.rate), fill = viridis(10)[1], alpha = 0.3) + xlab("Time") + ylab("Acc. rate") + scale_y_continuous(breaks = seq(0, output.max, by = 10)) + scale_x_continuous(breaks = seq(0, max(time), by = max(time)/5)) + cowplot::theme_cowplot() + theme(legend.position = "none", panel.background = element_blank()) print(temp.plot) } #generating a grouping variable (consecutive numbers with same value are put in separated groups) #applying rle to identify groups of consecutivee integers accumulation.rate.rle <- rle(accumulation.rate) accumulation.rate.rle <- data.frame(value = accumulation.rate.rle$values, length = accumulation.rate.rle$lengths) #using rle as guide to build the groups accumulation.rate.groups <- vector() start.group <- 0 for(i in 1:nrow(accumulation.rate.rle)){ value <- accumulation.rate.rle[i, "value"] length <- accumulation.rate.rle[i, "length"] times <- start.group + (1:round(length/value, 0)) accumulation.rate.groups <- c(accumulation.rate.groups, rep(times, each = value)) start.group <- max(times) } accumulation.rate.groups <- accumulation.rate.groups[1:max(time)] output <- data.frame(time = time, accumulation.rate = accumulation.rate, grouping = accumulation.rate.groups) return(output) } #end of function
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/simulateAccumulationRate.R
#' Generates a random time series with temporal autocorrelation. #' #' @description Generates a vector of the same legnth as the \code{time} argument, with a temporal autocorrelation length close to the defined by \code{autocorrelation.length}, and a range within \code{output.min} and \code{output.max}. The output of this function is intended to be used as an input to the function \code{\link{simulatePopulation}}. \strong{Important}: note that the variable \code{time} runs from left to right in \code{\link{simulatePopulation}}, with lower values representing older samples. #' #' @usage simulateDriver( #' random.seed = 30, #' time = 1:10000, #' autocorrelation.length = 100, #' output.min = 0, #' output.max = 100, #' rescale = TRUE #' ) #' #' @param random.seed integer, seed to be used by \code{set.seed()}. Default is 50. #' @param time integer, or numeric vector of integers with constant intervals. If a single integer is provided, a time sequence is generated from 1 to the given integer as \emph{seq(1, time, by = 1)}. Default is 1:10000. #' @param autocorrelation.length integer, represents the length of the convolution filter to be used to impose a particular temporal structure on the time series. Default is 100, equivalent to a filter composed by a hundred of ones. #' @param output.min numeric, minimum value of the output time series. Used as input for\code{\link{rescaleVector}}. Default is 0. #' @param output.max numeric, maximum value of the output time series. Used as input for \code{\link{rescaleVector}}. Default is 100. #' @param rescale boolean. If FALSE, \code{output.min} and \code{output.max} are ignored, and the original data range provided by \code{rnorm} is preserved. Default is TRUE. #' #' @details It is recommended to use \code{time} vectors with a time step of 1 between consecutive values when the output is to be used as input for \code{\link{simulatePopulation}}, which considers annual time-steps while simulating virtual pollen curves. The initial random sequence of numers is generated by \code{rnorm}. Desired temporal autocorrelation are approximate, but deviation becomes higher if \code{autocorrelation.length} is larger than half the length of \code{time}. Consequently, the function limits \code{autocorrelation.length} to \code{length(time)/2}. #' #' @author Blas M. Benito <[email protected]> #' #' @return A vector of the same length as \code{time}. Datasets \code{\link{driverA}} and \code{\link{driverB}} are outputs of this function. #' #' @seealso \code{\link{rescaleVector}}, \code{\link{driverA}}, \code{\link{driverB}}, \code{\link{set.seed}} #' #' @examples #' #' x <- simulateDriver( #' random.seed = 30, #' time = 1:10000, #' autocorrelation.length = 100, #' output.min = -10, #' output.max = 20, #' rescale = TRUE #' ) #' #' #plots output #' plot(x, type = "l") #' #' #checks temporal autocorrelation #' acf(x, lag.max = 300) #' #' @export simulateDriver = function(random.seed = 30, time = 1:10000, autocorrelation.length = 100, output.min = 0, output.max = 100, rescale = TRUE){ #RANDOM SEED #---------------------- #is integer? if(!is.integer(random.seed)){ random.seed <- floor(random.seed) } #setting random seed to the specified one set.seed(random.seed) #TIME #---------------------- #if not a vector, forces "time" as integer and creates vector starting at 0 if(!is.vector(time)){ time <- seq(1, floor(time), by = 1) if(length(time) == 0){ error("Time needs to be an integer higher than 1, or a vector of integers.") } } #AUTOCORRELATION LENGTH #---------------------- #limits autocorrelation length to half the length of time if higher if (autocorrelation.length > length(time)/2){ autocorrelation.length <- floor(length(time)/2) } #GENERATES DRIVER #---------------------- #generates driver (returns time series) driver <- filter(rnorm(max(time)), filter = rep(1, autocorrelation.length), circular = TRUE) #converts from time series to vector driver <- as.vector(driver) #rescales time series to [output.min output.max] if(rescale == TRUE){ driver <- rescaleVector(x = driver, new.min = output.min, new.max = output.max) } return(driver) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/simulateDriver.R
#' Generates drivers for \code{\link{simulatePopulation}}. #' #' @description Wrapper of \code{simulateDriver} to generate several drivers with different autocorrelation lengths, and return a long format dataframe to be used as input for \code{\link{simulatePopulation}}. It also produces a plot of the generated drivers. \strong{Important}: note that the variable \code{time} runs from left to right, with lower values representing older samples. #' #'@usage simulateDriverS( #' random.seeds=c(60, 120), #' time=1:10000, #' autocorrelation.lengths=c(200, 600), #' output.min=c(0,0), #' output.max=c(100, 100), #' driver.names=c("A", "B"), #' filename=NULL) #' #' @param random.seeds vector of integers, seeds to be used by \code{set.seed}. #' @param time integer, or numeric vector of integers with constant intervals. If a single integer is provided, a time sequence is generated from 0 to the given integer as \emph{seq(0, time, by = 1)}. Default is 1:10000. #' @param autocorrelation.lengths vector of integers, represents the lengths of the convolution filters to be used to impose a particular temporal structure on each driver. Default is 100, equivalent to a filter composed by a hundred of ones. #' @param output.min numeric vector, minimum values of the output time series. Used as input for \strong{rescaleVector}. Default is 0. #' @param output.max numeric vector, maximum values of the output time series. Used as input for \strong{rescaleVector}. Default is 100. #' @param driver.names character vector, with labels to be used to identify the drivers. #' @param filename character string, name of output pdf file. If NULL or empty, no pdf is produced. #' #' @details It is recommended to use \code{time} vectors with a time step of 1 between consecutive values when the output is to be used as input for \code{\link{simulatePopulation}}, which considers annual time-steps while simulating virtual pollen curves. Initial random sequence is generated by \code{rnorm}. Desired temporal autocorrelation are approximate, but deviation becomes higher if \code{autocorrelation.length} is larger than half the length of \code{time}. Consequently, the function limits \code{autocorrelation.length} to \code{length(time)/2}. #' #' @author Blas M. Benito <[email protected]> #' #' @return A long format dataframe (see dataset \code{\link{drivers}}) with the following columns: #' #' \itemize{ #' \item \emph{time}: integer. #' \item \emph{driver}: character, values are \code{A} and \code{B} #' \item \emph{autocorrelation.length}: numeric, default values are 200, 600, and 1800. #' \item \emph{value}: numeric, value of the driver for the given \emph{time}. #' } #' #' @seealso \code{\link{drivers}}, \code{\link{simulateDriver}}, \code{\link{simulatePopulation}}, \code{\link{drivers}} #' #' @examples #' #'drivers <- simulateDriverS( #' random.seeds=c(60, 120), #' time=1:10000, #' autocorrelation.lengths=c(200, 600), #' output.min=c(0,0), #' output.max=c(100, 100), #' driver.names=c("A", "B"), #' filename=NULL #') #' #'str(drivers) #' #' @export simulateDriverS <- function(random.seeds=c(60, 120), time=1:10000, autocorrelation.lengths=c(200, 600), output.min=c(0,0), output.max=c(100, 100), driver.names=c("A", "B"), filename=NULL){ #TESTING INPUT DATA #number of driver names n.names <- length(driver.names) #driver.names if(!is.character(driver.names)){stop("The argument driver.names should be a character vector.")} #random.seeds if(!is.numeric(random.seeds)){ random.seeds <- 1:n.names } else { random.seeds <- as.integer(random.seeds) } if(length(random.seeds) < n.names){ random.seeds <- random.seeds[1]:(random.seeds[1]+n.names) } #output.min and output.max if(length(output.min) < n.names){ output.min <- rep(output.min[1], n.names) } if(length(output.min) > n.names){ output.min <- output.min[1:n.names] } if(length(output.max) < n.names){ output.max <- rep(output.min[1], n.names) } if(length(output.max) > n.names){ output.max <- output.max[1:n.names] } for(i in 1:n.names){ if(output.max[i] < output.min[i]){ temp.min <- output.max[i] output.max[i] <- output.min[i] output.min[i] <- temp.min } } #time if(!is.numeric(time)){stop("time should be a numeric vector, try 1:1000.")} if(length(time)==1){time <- 1:floor(time)} #autocorrelation.length if(!is.numeric(autocorrelation.lengths)){stop("autocorrelation.length should be a numeric vector.")} #data ranges and random seed for each drivers.2k data.ranges <- data.frame(driver=driver.names, output.min, output.max, random.seeds) #dataframes to store drivers drivers <- drivers.temp <- data.frame(time=numeric(), driver=character(), autocorrelation.length=numeric(), value=numeric()) autocorrelation <- data.frame(lag=numeric(), acf=numeric(), ci.max=numeric(), ci.min=numeric(), driver=character(), autocorrelation.length=numeric()) #looping through drivers and memory lengths for(driver in driver.names){ for(autocorrelation.length in autocorrelation.lengths){ #FILLING drivers.temp #--------------------------------------- #fill drivers.temp with time and grouping drivers.temp[max(time), ] <- NA drivers.temp$time <- time #fill with parameter values drivers.temp$driver <- rep(driver, nrow(drivers.temp)) drivers.temp$autocorrelation.length <- rep(autocorrelation.length, nrow(drivers.temp)) #fill values of the driver simulated.driver <- simulateDriver( random.seed = data.ranges[data.ranges$driver==driver, "random.seeds"], time = time, autocorrelation.length = autocorrelation.length, output.min = data.ranges[data.ranges$driver==driver, "output.min"], output.max = data.ranges[data.ranges$driver==driver, "output.max"] ) #getting the driver values drivers.temp$value <- rescaleVector(x = simulated.driver, new.max = 100, new.min = 0) #FILLING autocorrelation.temp #--------------------------------------- #computing acf autocorrelation.temp <- acfToDf(x = simulated.driver, lag.max = max(max(autocorrelation.lengths)), length.out = 100) autocorrelation.temp$driver <- rep(driver, nrow(autocorrelation.temp)) autocorrelation.temp$autocorrelation.length <- rep(autocorrelation.length, nrow(autocorrelation.temp)) #merging with main dataframes drivers <- rbind(drivers, drivers.temp) autocorrelation <- rbind(autocorrelation, autocorrelation.temp) } } #PLOTTING OUTPUT p.drivers <- ggplot(data = drivers, aes(x = time, y = value, color = driver)) + geom_line() + scale_color_viridis(discrete = TRUE, begin = 0.2, end = 0.6) + facet_wrap(driver ~ autocorrelation.length, ncol = 1, scales = "free_y") + xlab("Time (years)") + ylab("") + ggtitle("Virtual drivers") + theme(plot.margin = unit(c(0.5, -1, 0.5, 0.5), "cm"), legend.position = "none", panel.background = element_blank()) + cowplot::background_grid(major = "none", minor = "none") p.density <- ggplot(data = drivers, aes(value, fill = driver, colour = driver)) + geom_density(aes(y = ..scaled..), alpha = 0.5) + scale_color_viridis(discrete = TRUE, begin = 0.2, end = 0.6) + scale_fill_viridis(discrete = TRUE, begin = 0.2, end = 0.6) + facet_wrap(driver ~ autocorrelation.length, ncol = 1, scales = "free") + xlab("Years") + ylab("") + xlab("") + coord_flip() + theme(plot.margin = unit(c(0.5, 0.5, 0.5, 0), "cm"), axis.line = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), strip.background = element_blank(), strip.text.x = element_blank(), legend.position = "none", panel.background = element_blank()) + cowplot::background_grid(major = "none", minor = "none") p.acfs <- ggplot(data = autocorrelation, aes(x = lag, y = acf, color = driver)) + scale_color_viridis(discrete = TRUE, begin = 0.2, end = 0.6) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color = "red", linetype = "dashed") + geom_hline(aes(yintercept = ci.min), color = "red", linetype = "dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ylab("") + xlab("Lag (years)") + facet_wrap(driver ~ autocorrelation.length, ncol = 1, scales = "free_y") + ggtitle("Temporal autocorrelation") + theme(plot.margin = unit(c(0.5, 0.5, 0.5, -1), "cm"), legend.position = "none", panel.background = element_blank()) + cowplot::background_grid(major = "none", minor = "none") print(cowplot::plot_grid(p.drivers, NULL, p.density, NULL, p.acfs, align = "h", ncol = 5, rel_widths = c(1, 0, 0.3, 0, 1))) if(!is.null(filename) & is.character(filename)){ ggsave(width = 12, height = (1.5*(length(driver.names) * length(autocorrelation.lengths))), filename = paste(filename, ".pdf", sep = "")) } return(drivers) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/simulateDriverS.R
#' Simulates population dynamics for virtual species with different traits. #' #' @description This function takes as input a dataframe of parameters defining virtual taxa produced by \code{\link{parametersDataframe}} and \code{\link{fixParametersTypes}}, a driver or drivers generated with \code{\link{simulateDriver}} or \code{\link{simulateDriverS}}, and simulates population dynamics for the given virtual taxa at yearly resolution for the time-length defined by the driver or drivers. \strong{Important}: note that the variable \code{time} runs from left to right, with lower values representing older samples. The model relies on the following set of assumptions: #' #' \itemize{ #' \item The spatial structure of the population is not important to explain its pollen productivity. This is an operative assumption, to speed-up model execution. #' \item The environmental niche of the species follows a Gaussian distribution, characterized by a mean (niche optimum, also niche position) and a standard deviation (niche breadth or tolerance). #' \item Different drivers can have a different influence on the species dynamics, and that influence can be defined by the user by tuning the weights of each driver. #' \item Environmental suitability, expressed in the range [0, 1], is the result of an additive function of the species niches (normal function defined by the species' mean and standard deviation for each driver), the drivers' values, and the relative influence of each driver (driver weights). #' \item Pollen productivity is a function of the individual's biomass and environmental suitability, so under a hypothetical constant individual's biomass, its pollen production depends linearly on environmental suitability values. #' \item Effective fecundity is limited by environmental suitability. Low environmental suitability values limit recruitment, acting as an environmental filter. Therefore, even though the fecundity of the individuals is fixed by the fecundity parameter, the overall population fecundity is limited by environmental suitability. #' } #' #' #' #' @usage simulatePopulation( #' parameters=NULL, #' species="all", #' driver.A=NULL, #' driver.B=NULL, #' drivers=NULL, #' burnin=TRUE #' ) #' #' @param parameters dataframe with parameters. #' @param species if "all" or "ALL", all species in "parameters" are simulated It also accepts a vector of numbers representing the rows of the selected species, or a vector of names of the selected species. #' @param driver.A numeric vector with driver values. Typically produced by \code{\link{simulateDriver}}. #' @param driver.B numeric vector with driver values. Typically produced by \code{\link{simulateDriver}}. Must have same length as \code{driver.A}. #' @param drivers dataframe with drivers produced by \code{\link{simulateDriverS}}. It should have the columns: #' \itemize{ #' \item \emph{time} integer. #' \item \emph{driver} character, values are \code{A} and \code{B} #' \item \emph{autocorrelation.length} numeric, values are 200, 600, and 1800. #' \item \emph{value} numeric, value of the driver for the given \emph{time}. #' } #' @param burnin boolean, generates a warming-up period for the population model of a length of five times the maximum age of the virtual taxa. #' #' @details The model starts with a population of 100 individuals with random ages, in the range [1, maximum age], taken from a uniform distribution (all ages are equiprobable). For each environmental suitability value, including the burn-in period, the model performs the following operations: #' #' \itemize{ #' \item \strong{Aging}: adds one year to the age of the individuals. #' \item \strong{Mortality due to senescence}: individuals reaching the maximum age are removed from the simulation. #' \item \strong{Local extinction and immigration}: If the number of individuals drops to zero, the population is replaced by a "seed bank" of #' 100 individuals with age zero, and the simulation jumps to step 7.. This is intended to simulate the arrival of seeds from nearby regions, and will only lead to population growth if environmental suitability is higher than zero. #' \item \strong{Plant growth}: Applies a plant growth equation to compute the biomass of every individual. #' \item \strong{Carrying capacity}: If maximum population biomass is reached, individuals are iteratively selected for removal according to a mortality risk curve computed by the equation \eqn{P_{m} = 1 - sqrt(a/A)}, were \emph{Pm} is the probability of mortality, \emph{a} is the age of the given individual, and \emph{A} is the maximum age reached by the virtual taxa. This curve gives removal preference to younger individuals, matching observed patterns in natural populations. #' \item \strong{Pollen productivity}: In each time step the model computes the pollen productivity (in relative values) of the population using the equation \eqn{P_{t} = \sum x_{it} \times max(S_{t}, B)}, where \emph{t} is time (a given simulation time step), \emph{P} is the pollen productivity of the population at a given time, \emph{x_{i}} represents the biomass of every adult individual, \emph{S} is the environmental suitability at the given time, \emph{B} is the contribution of biomass to pollen productivity regardless of environmental suitability (\emph{pollen.control} parameter in the simulation, 0 by default). If \emph{B} equals 1, \emph{P} is equal to the total biomass sum of the adult population, regardless of the environmental suitability. If \emph{B} equals 0, pollen productivity depends entirely on environmental suitability values. #' \item \strong{Reproduction}: Generates as many seeds as reproductive individuals are available multiplied by the maximum fecundity and the environmental suitability of the given time. #' } #'The model returns a table with climatic suitability, pollen production, and population size (reproductive individuals only) per simulation year. Figure 10 shows the results of the population model when applied to the example virtual species. #' #' #' @author Blas M. Benito <[email protected]> #' #' @return A list of dataframes, each one of them with the results of one simulation. The dataset \code{\link{simulation}} exemplifies the output of this function. Each dataframe in the output list has the columns: #' \itemize{ #' \item \emph{Time} integer, ages in years. Negative ages indicate the burn-in period. #' \item \emph{Pollen} numeric, pollen counts #' \item \emph{Population.mature} numeric, number of mature individuals. #' \item \emph{Population.immatre} numeric, number of immature individuals. #' \item \emph{Population.viable.seeds} numeric, number of viable seeds generated each year. #' \item \emph{Suitability} numeric, environmental suitability computed from the driver by the normal function/s defining the taxon niche. #' \item \emph{Biomass.total} numeric, overall biomass of the population. #' \item \emph{Biomass.mature} numeric, sum of biomass of mature individuals. #' \item \emph{Biomass.immature} numeric, sum of biomass of immature individuals. #' \item \emph{Mortality.mature} numeric, number of mature individuals dead each year. #' \item \emph{Mortality.immature} numeric, same as above for immature individuals. #' \item \emph{Driver.A} numeric, values of driver A. #' \item \emph{Driver.B} numeric, values of driver B, if available, and NA otherwise. #' \item \emph{Period} qualitative, with value "Burn-in" for burn-in period, and "Simulation" otherwise. #' } #' #' @seealso \code{\link{parametersDataframe}}, \code{\link{fixParametersTypes}}, \code{\link{plotSimulation}} #' #' @examples #' #'#getting data #'data(parameters) #'data(driverA) #' #'#simulating population dynamics #'# of first taxon in parameters #'# for first 500 values of driverA #'sim.output <- simulatePopulation( #' parameters=parameters[1,], #' driver.A=driverA[1:500] #' ) #' #'#checking output #'str(sim.output) #' #' @export simulatePopulation <- function(parameters=NULL, species="all", driver.A=NULL, driver.B=NULL, drivers=NULL, burnin=TRUE){ #CHECKING INPUT DATA #------------------- #CHECKING parameters if(is.null(parameters) == TRUE | is.data.frame(parameters) == FALSE){ stop("The argument 'parameters' empty.") } else { if(sum(!(colnames(parameters) %in% c("label", "maximum.age", "reproductive.age", "fecundity", "growth.rate", "pollen.control", "maximum.biomass", "carrying.capacity", "driver.A.weight", "driver.B.weight", "niche.A.mean", "niche.A.sd", "niche.B.mean", "niche.B.sd", "autocorrelation.length.A", "autocorrelation.length.B"))) != 0){ stop(paste("The following column/s of 'parameters' seem to be missing: ", colnames(parameters)[!(colnames(parameters) %in% c("label", "maximum.age", "reproductive.age", "fecundity", "growth.rate", "pollen.control", "maximum.biomass", "carrying.capacity", "driver.A.weight", "driver.B.weight", "niche.A.mean", "niche.A.sd", "niche.B.mean", "niche.B.sd", "autocorrelation.length.A", "autocorrelation.length.B"))], sep="")) } } #function to check if driver B is available is.driver.B.available <- function(driver.B){ if(is.null(driver.B) == TRUE | is.vector(driver.B) == FALSE){ return(FALSE) } else { return(TRUE) } } #CHECKING drivers, driver.A, driver.B if(is.null(drivers) == TRUE | is.data.frame(drivers) == FALSE){ #checking driver B driver.B.available <- is.driver.B.available(driver.B) #checking driver.A if(is.null(driver.A) == TRUE | is.vector(driver.A) == FALSE){ if(driver.B.available == FALSE){ stop("No drivers have been provided.") } } else { drivers.input<-"vector" #create fake driver.B if absent if(driver.B.available == FALSE){ driver.B <- rep(1, length(driver.A)) } } } else { #CHECKING drivers dataframe #checking columns if(sum(!(colnames(drivers) %in% c("time", "driver", "autocorrelation.length", "value"))) != 0){ stop(paste("The following column/s of 'drivers' seem to be missing: ", colnames(parameters)[!(colnames(parameters) %in% c("time", "driver", "autocorrelation.length", "value"))], sep="")) } else { #switch to dataframe input drivers.input <- "data.frame" #giving preference to dataframe format driver.A <- NULL driver.B <- NULL driver.B.available <- TRUE } } #CHECKING AND SELECTING species #---------------- #creating dictionary of species names and indexes names.dictionary <- data.frame(name=parameters$label, index=1:nrow(parameters)) if(is.character(species)){ if(species == "all" | species == "ALL" | species == "All"){ selected.species <- names.dictionary$index } else { if(sum(species %in% names.dictionary$name) != length(species)){ stop("You have selected species that are not available in the parameters table.") } else { selected.species <- names.dictionary[names.dictionary$name %in% species, "index"] } } } if(is.numeric(species)){ if(sum(species %in% names.dictionary$index) != 0){ selected.species <- species } } #generating output list #---------------- output.list <- list() #function to rescale suitability #---------------- rescaleSuitability <- function(predicted.density, max.observed.density){ new.min <- 0 new.max <- 1 old.min <- 0 old.max <- max.observed.density scaled.density<-((predicted.density - old.min) / (old.max - old.min)) * (new.max - new.min) + new.min return(scaled.density) } #ITERATING THROUGH SPECIES #---------------- for(i in selected.species){ message(paste("Simulating taxon: ", parameters[i, "label"], sep=""), "\n") #dataframe rows into list parameters.list <- list() for(j in 1:ncol(parameters)){ parameters.list[[paste0(colnames(parameters)[j])]] <- parameters[i,j] } #parameters from list to environment list2env(parameters.list, envir=environment()) #GETTING DRIVER VALUES #IF DRIVERS PROVIDED AS DATAFRAME if(drivers.input == "data.frame"){ #if the autocorrelation.lengt available in parameters for species i is not in drivers, the first autocorrelation length available in drivers is assigned if(!(autocorrelation.length.A %in% unique(drivers$autocorrelation.length)) & !(autocorrelation.length.B %in% unique(drivers$autocorrelation.length))){ message(paste("Autocorrelation lengths in parameters do not match autocorrelation lengths in drivers, I am getting the first value of autocorrelation.length available in drivers: ", unique(drivers$autocorrelation.length)[1], sep="")) autocorrelation.length.A <- autocorrelation.length.B <- unique(drivers$autocorrelation.length)[1] } #getting driver values driver.A.ready <- drivers[drivers$driver == "A" & drivers$autocorrelation.length == autocorrelation.length.A, "value"] driver.B.ready = drivers[drivers$driver == "B" & drivers$autocorrelation.length == autocorrelation.length.B, "value"] #checking if drivers are NA if(sum(is.na(driver.A.ready)) == length(driver.A.ready)){ stop("Driver A is made of NA, something is wrong with the drivers argument.") } #driver.B is empty if(sum(is.na(driver.B.ready)) == length(driver.B.ready)){ driver.B.ready <- rep(1, length(driver.A.ready)) if(driver.B.weight > 0){ driver.B.weight <- 0 driver.A.weight <- 1 } } } #if input drivers are vectors if(drivers.input == "vector"){ driver.A.ready <- driver.A #setting driver.B.weight to 0 if driver.B was missing if(driver.B.available == FALSE){ driver.B.ready <- rep(1, length(driver.A.ready)) if(driver.B.weight > 0){ driver.B.weight <- 0 driver.A.weight <- 1 } } else { driver.B.ready <- driver.B } } #checking niche parameters if(is.na(niche.A.sd) == TRUE | niche.A.sd == 0){niche.A.sd <- 1} if(is.na(niche.B.sd) == TRUE | niche.B.sd == 0){niche.B.sd <- 1} if(is.na(niche.A.mean) == TRUE){niche.A.mean <- 0} if(is.na(niche.B.mean) == TRUE){niche.B.mean <- 0} #COMPUTING MAXIMUM DENSITY (output of normal function) OF EACH DRIVER max.possible.density.driver.A <- dnorm(niche.A.mean, mean=niche.A.mean, sd=niche.A.sd) max.possible.density.driver.B <- dnorm(niche.B.mean, mean=niche.B.mean, sd=niche.B.sd) #computes suitability over driver.A using dnorm, niche.A.mean, and niche.A.sd, and multiplies it by driver.A.weight suitability.A <- rescaleSuitability(dnorm(driver.A.ready, mean=niche.A.mean, sd=niche.A.sd), max.possible.density.driver.A) * driver.A.weight #same over driver.B suitability.B <- rescaleSuitability(dnorm(driver.B.ready, mean=niche.B.mean, sd=niche.B.sd), max.possible.density.driver.B) * driver.B.weight #sums the results of both is driver.B is available suitability <- suitability.A + suitability.B #rounding to three decimal places suitability <- round(suitability, 3) #BURN-IN PERIOD ADDED TO SUITABILITY if(burnin == TRUE){ burnin.suitability <- jitter(c(rep(1, maximum.age*5), seq(1, suitability[1], length.out = maximum.age*5)), amount=0.01) burnin.suitability[burnin.suitability < 0]<-0 burnin.suitability[burnin.suitability > 1]<-1 length.burnin.suitability <- length(burnin.suitability) burnin.suitability <- c(burnin.suitability, suitability) } else { burnin.suitability <- suitability } #VECTORS TO SAVE RESULTS pollen.count <- vector() population.mature <- vector() population.immature <- vector() population.seeds <- vector() population.biomass <- vector() population.biomass.mature <- vector() population.biomass.immature <- vector() mortality.mature <- vector() mortality.immature <- vector() #SCALING AGE reproductive.age <- reproductive.age / maximum.age scaled.year <- 1/maximum.age maximum.age.original <- maximum.age maximum.age <- 1 #STARTING POPULATION population <- sample(seq(0, 1, by=scaled.year), 100, replace=TRUE) #EXECUTING SIMULATION, one iteration per suitaiblity value #---------------- for(suitability.i in burnin.suitability){ #aging population <- population + scaled.year #death due to senescence population <- population[population < maximum.age] #population drops to 0 if (length(population) == 0){ #local extinction, replaces population with a seedbank population <- rep(0, floor(100 * suitability.i)) #adds 0 to the output vectors pollen.count <- c(pollen.count, 0) population.mature <- c(population.mature, 0) population.immature <- c(population.immature, 0) population.seeds <- c(population.seeds, 0) population.biomass <- c(population.biomass, 0) population.biomass.mature <- c(population.biomass.mature, 0) population.biomass.immature <- c(population.biomass.immature, 0) mortality.mature <- c(mortality.mature, 0) mortality.immature <- c(mortality.immature, 0) #jumps to next iteration next } #PLANT GROWTH biomass <- maximum.biomass / (1 + maximum.biomass * exp(- (growth.rate * suitability.i) * (population * maximum.age.original))) #MORTALITY individuals.removed <- vector() #carrying capacity is reached while(sum(biomass) > carrying.capacity){ #removes random individual (curvilinear risk curve) individual.to.remove <- sample(x=length(population), size=1L, replace=TRUE, prob=1 - sqrt(population)) #adds the removed individuals to the list individuals.removed <- c(individuals.removed, population[individual.to.remove]) #removing individuals population <- population[-individual.to.remove] biomass <- biomass[-individual.to.remove] }#end of while #indexes of adult individuals adults <- population > reproductive.age #producing seeds seeds <- rep(0, floor(sum((biomass[adults]/maximum.biomass) * fecundity) * suitability.i)) #filling output vectors #pollen count pollen.count <- c(pollen.count, sum(biomass[adults]) * max(suitability.i, pollen.control)) population.mature <- c(population.mature, sum(adults)) population.immature <- c(population.immature, sum(population <= reproductive.age)) population.seeds <- c(population.seeds, length(seeds)) population.biomass <- c(population.biomass, sum(biomass)) population.biomass.mature <- c(population.biomass.mature, sum(biomass[adults])) population.biomass.immature <- c(population.biomass.immature, sum(biomass[!adults])) mortality.mature <- c(mortality.mature, sum(individuals.removed > reproductive.age)) mortality.immature <- c(mortality.immature, sum(individuals.removed <= reproductive.age)) #joining seeds to the population population <- c(population, seeds) } #end of loop through suitability values #removing drivers that were not used if(driver.A.weight == 0){ driver.A.write <- rep(NA, length(driver.A.ready)) } else { driver.A.write <- driver.A.ready } if(driver.B.weight == 0 | driver.B.available == FALSE){ driver.B.write <- rep(NA, length(driver.B.ready)) } else { driver.B.write <- driver.B.ready } #data frame output.df <- data.frame(Time = c(-length.burnin.suitability:-1, 1:(length(suitability))), Pollen = pollen.count, Population.mature = population.mature, Population.immature = population.immature, Population.viable.seeds = population.seeds, Suitability = burnin.suitability, Biomass.total = population.biomass, Biomass.mature = population.biomass.mature, Biomass.immature = population.biomass.immature, Mortality.mature = mortality.mature, Mortality.immature = mortality.immature, Driver.A = c(rep(NA, length.burnin.suitability), driver.A.write), Driver.B = c(rep(NA, length.burnin.suitability), driver.B.write), Period = c(rep("Burn-in", length.burnin.suitability), rep("Simulation", length(suitability)))) #mergest with output.list output.list[[parameters[i, "label"]]] <- output.df } #end of iteration through selected species return(output.list) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/simulatePopulation.R
#' Reinterpolates aggregated simulations into regular time. #' #' @description Takes the output of \code{\link{aggregateSimulation}}, and interpolates it into a regular time grid. #' #' #' @usage toRegularTime( #' x = NULL, #' time.column = "Time", #' interpolation.interval = NULL, #' columns.to.interpolate = c("Suitability", #' "Driver.A", #' "Pollen") #' ) #' #' @param x list of dataframes (generally the output of \code{\link{aggregateSimulation}}) or single dataframe with irregular time series. #' @param time.column character string, default value is "Time". #' @param interpolation.interval integer, in years, time length encompassed by each sample. #' @param columns.to.interpolate character string or character vector, columns of simulation output to be interpolated. Any subset of: "Pollen", "Population.mature", "Population.immature", "Population.viable.seeds", "Suitability", "Biomass.total", "Biomass.mature", "Biomass.immature", "Mortality.mature", "Mortality.immature", "Driver.A", "Driver.B". #' #' @details This function fits a \code{\link{loess}} model of the form \code{y ~ x}, where \code{y} is any column given by \code{columns.to.interpolate} and \code{x} is the column given by the \code{time.column} argument. The model is used to interpolate column \code{y} on a regular time series of intervals equal to \code{interpolation.interval}. If \code{x} is a matrix-like list returned by \code{\link{aggregateSimulation}} (on results of \code{\link{simulateAccumulationRate}} and \code{\link{simulatePopulation}}), the first column of the matrix will already have a regular time column, and therefore nothing will be done with this column of the list. #' #' @author Blas M. Benito <[email protected]> #' #' @return If \code{x} is a list of dataframes, the function returns a list with the same structure as the input list. If \code{x} is a dataframe, the function returns a dataframe. In any case, output dataframes have the columns "Time" (now regular), and any column listed in \code{columns.to.interpolate}. \strong{Important}: as in the input data, the \code{time} column of the output data has lower time for oldest samples and higher time for newest samples. #' #' @seealso \code{\link{simulateAccumulationRate}}, \code{\link{aggregateSimulation}} #' #' @examples #' #'\dontrun{ #'#getting example data #'data(simulation) #'data(accumulationRate) #' #'#aggregating first simulation outcome #'sim.output.aggregated <- aggregateSimulation( #' simulation.output = simulation[1], #' accumulation.rate = accumulationRate, #' sampling.intervals = c(2,6)) #' #'#to regular time #'sim.output.aggregated <- toRegularTime( #' x=sim.output.aggregated, #' time.column ="Time", #' interpolation.interval = 10, #' columns.to.interpolate = c("Suitability", "Pollen") #') #' #'#comparing simulations #'par(mfrow = c(3,1)) #'#notice the subsetting of the given column of the input list #'plot(sim.output.aggregated[[1,1]]$Time, #' sim.output.aggregated[[1,1]]$Pollen, #' type = "l", #' xlim = c(500, 1000), #' main = "Annual" #') #'plot(sim.output.aggregated[[1,2]]$Time, #' sim.output.aggregated[[1,2]]$Pollen, #' type = "l", #' xlim = c(500, 1000), #' main = "2cm" #') #'plot(sim.output.aggregated[[1,3]]$Time, #' sim.output.aggregated[[1,3]]$Pollen, #' type = "l", #' xlim = c(500, 1000), #' main = "6cm" #') #' #'#check differences in nrow #'nrow(sim.output.aggregated[[1,1]]) #original data #'nrow(sim.output.aggregated[[1,2]]) #2cm #'nrow(sim.output.aggregated[[1,3]]) #6cm intervals #'} #' #' @export toRegularTime <- function(x = NULL, time.column = "Time", interpolation.interval = NULL, columns.to.interpolate=c("Suitability", "Driver.A", "Pollen") ){ #list dimensions if x is list if(is.list(x) == TRUE){ x.rows <- 1:dim(x)[1] x.columns <- 1:dim(x)[2] } if(is.data.frame(x) == TRUE){ x.rows <- 1 x.columns <- 1 } #iterating through list elements #virtual taxa for(x.row in x.rows){ #aggregation levels for(x.column in x.columns){ #getting the dataframe temp <- x[[x.row, x.column]] temp <- temp[temp$Period=="Simulation", ] #computing age extremes min.time <- 0 max.time <- max(temp[,time.column]) #reference time to interpolate into reference.time <- round(seq(min.time, max.time, by=interpolation.interval), 0) #empty dataset to store interpolation temp.interpolated <- data.frame(time=reference.time) names(temp.interpolated)<-time.column #iterating through columns for (column.to.interpolate in columns.to.interpolate){ #do not interpolate non-numeric columns if (!is.numeric(temp[, column.to.interpolate])){ next } #interpolation interpolation.formula <- as.formula(paste(column.to.interpolate, "~", time.column, sep=" ")) #iteration through span values untill R-squared equals 0.9985 (R-squared equal to 1 may throw errors) span.values <- seq(50/nrow(temp), 5/nrow(temp), by = -0.0005) for(span in span.values){ interpolation.function <- loess(interpolation.formula, data = temp, span = span, control = loess.control(surface = "direct")) #check fit if(cor(interpolation.function$fitted, temp[, column.to.interpolate]) >= 0.9985){break} } #interpolation interpolation.result <- predict(interpolation.function, newdata = reference.time, se = FALSE) #constraining the range of the interpolation result to the range of the reference data interpolation.range <- range(temp[, column.to.interpolate]) interpolation.result[interpolation.result < interpolation.range[1]] <- interpolation.range[1] interpolation.result[interpolation.result > interpolation.range[2]] <- interpolation.range[2] #putting the interpolated data back in place temp.interpolated[, column.to.interpolate] <- interpolation.result }#end of iteration through columns temp.interpolated$Period <- "Simulation" if(is.list(x) == TRUE){ x[[x.row, x.column]] <- temp.interpolated } if(is.data.frame(x) == TRUE){ x <- temp.interpolated } } } return(x) }
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/toRegularTime.R
#' Driver A #' #' A vector of 5000 values (years) between 0 and 100 generated by \code{\link{simulateDriver}} with a temporal autocorrelation significant for 600 years. It is meant to be used as input for \code{\link{simulatePopulation}}. #' @author Blas M. Benito <[email protected]> #' @docType data #' @keywords datasets #' @name driverA #' @usage data(driverA) #' @format Numeric vector of length 5000. #' @seealso \code{\link{simulateDriver}} "driverA" #' Driver B #' #' A vector of 5000 values (years) between 0 and 100 generated by \code{\link{simulateDriver}} with a temporal autocorrelation significant for 600 years. It is meant to be used as input for \code{\link{simulatePopulation}}. #' @author Blas M. Benito <[email protected]> #' @docType data #' @keywords datasets #' @name driverB #' @usage data(driverB) #' @format Numeric vector of length 5000. #' @seealso \code{\link{simulateDriver}} "driverB" #' Drivers with different temporal autocorrelation. #' #' A dataframe with 20000 rows and 4 columns (years) containing two drivers (A and B) generated by \code{\link{simulateDriver}} with different temporal autocorrelations (200 and 600). Each driver represents a period of 5000 years. This dataset is is meant to be used as input for \code{\link{simulatePopulation}} #' #' \itemize{ #' \item \emph{time} integer, represents time from 0 to 10000, \strong{where 0 is the oldest sample, and 10000 is the newest one (opposite to the general interpretation of age in palaeoecology!)}. #' \item \emph{driver} character, values are \code{A} and \code{B} #' \item \emph{autocorrelation.length} numeric, values are 200, 600, and 1800. #' \item \emph{value} numeric, value of the driver for the given \code{time}, \emph{driver}, and \emph{autocorrelation.length}. #' } #' #' @author Blas M. Benito <[email protected]> #' @docType data #' @keywords datasets #' @name drivers #' @usage data(drivers) #' @format Dataframe with 4 columns and 20000 rows. #' @seealso \code{\link{simulateDriver}} "drivers" #' Parameters of 4 virtual taxa. #' #' A dataframe with the parameters of 4 virtual taxa. This dataset was generated by \code{\link{parametersDataframe}} and \code{\link{fixParametersTypes}}. It is meant to be used as input for \code{\link{simulatePopulation}}. Its columns are: #' #' \itemize{ #' \item \emph{label}: to store names (character string) of the virtual taxa. #' \item \emph{maximum.age}: integer, maximum possible age of the individuals in years. #' \item \emph{reproductive.age}: integer, age of sexual maturity in years. #' \item \emph{fecundity}: integer, number of maximum viable seeds produced by a mature individual under fully suitable conditions. #' \item \emph{growth.rate}: numeric, parameter of the logistic growth function. #' \item \emph{pollen.control}: numeric in the interval [0, 1]. If 0, pollen productivity depends on environmental suitability only. The larger the number, biomass takes over environmental suitability in determining pollen productivity. #' \item \emph{maximum.biomass}: integer, maximum biomass of the individuals. #' \item \emph{carrying.capacity}: integer, maximum sum of biomass of the individuals. Very large carrying capacities plus a low maximum.biomass generates too many individuals for the simulation to remain efficient. Try to set carrying.capacity and maximum.biomass to carrying.capacity divided by biomass returns a number lower than 1000 (and even better if it is closer to 100). #' \item \emph{driver.A.weight}: numeric in the interval [0, 1], represents the relative influence of the driver on environmental suitability. #' \item \emph{driver.B.weight}: numeric in the interval [0, 1], represents the relative influence of the driver on environmental suitability. The sum of weights of drivers A and B should be 1. #' \item \emph{niche.A.mean}: numeric, in the same units as driver A. It is the mean of the normal function defining the response of the virtual taxa to driver A. #' \item \emph{niche.A.sd}: numeric, in the same units as driver A. It is the standard deviation of the normal function defining the response of the virtual taxa to driver A. #' \item \emph{niche.B.mean}: as above, but for driver B. #' \item \emph{niche.B.sd}: as above, but for driver B. #' \item \emph{autocorrelation.length.A}: numeric, only useful if several drivers generated with different autocorrelation lengths are available (and identified by the column \code{autocorrelation.length}) in the \code{drivers} argument provided to the \code{\link{simulatePopulation}} function. #' \item \emph{autocorrelation.length.B}: same as above. #' } #' #' @author Blas M. Benito <[email protected]> #' @docType data #' @keywords datasets #' @name parameters #' @usage data(parameters) #' @format Dataframe with 16 columns and 4 rows. #' @seealso \code{\link{parametersCheck}}, \code{\link{parametersDataframe}}, \code{\link{simulatePopulation}} "parameters" #' List with simulation outputs for all virtual taxa in \code{\link{parameters}}. #' #' A list of dataframes with 4 slots, output of \code{\link{simulatePopulation}}, taking \code{\link{parameters}} and \code{\link{drivers}} as inputs. Each dataframe in the list has the following columns: #' #' \itemize{ #' \item \emph{Time}: integer, ages in years. Negative ages indicate the burn-in period. #' \item \emph{Pollen}: numeric, pollen counts #' \item \emph{Population.mature}: numeric, number of mature individuals. #' \item \emph{Population.immatre}: numeric, number of immature individuals. #' \item \emph{Population.viable.seeds}: numeric, number of viable seeds generated each year. #' \item \emph{Suitability}: numeric, environmental suitability computed from the driver by the normal function/s defining the taxon niche. #' \item \emph{Biomass.total}: numeric, overall biomass of the population. #' \item \emph{Biomass.mature}: numeric, sum of biomass of mature individuals. #' \item \emph{Biomass.immature}: numeric, sum of biomass of immature individuals. #' \item \emph{Mortality.mature}: numeric, number of mature individuals dead each year. #' \item \emph{Mortality.immature}: numeric, same as above for immature individuals. #' \item \emph{Driver.A}: numeric, values of driver A. #' \item \emph{Driver.B}: numeric, values of driver B, if available, and NA otherwise. #' \item \emph{Period}: qualitative, with value "Burn-in" for burn-in period, and "Simulation" otherwise. #' } #' #' @author Blas M. Benito <[email protected]> #' @docType data #' @keywords datasets #' @name simulation #' @usage data(simulation) #' @format List with 4 dataframes with outputs of \code{\link{simulatePopulation}}. #' @seealso \code{\link{simulatePopulation}}, \code{\link{plotSimulation}} "simulation" #' Accumulation rate #' #' Dataframe, output of \code{\link{simulateAccumulationRate}}. #' @author Blas M. Benito <[email protected]> #' @docType data #' @keywords datasets #' @name accumulationRate #' @usage data(accumulationRate) #' @format Dataframe with 5000 rows and the following columns: #' #' @return A dataframe with the following columns. #' \itemize{ #' \item \emph{time}: numeric, time or age of the given case. \strong{Important}: the \code{time} column goes from "left to right", meaning that oldest samples have the lowest values of age/time, and viceversa. #' \item \emph{accumulation.rate}: numeric, in years per centimetre, simulated accumulation rate. #' \item \emph{grouping}: integer, grouping variable to aggregate together (with \code{\link{aggregateSimulation}}) samples deposited in the same centimetre according \emph{accumulation.rate}. #' } #' @seealso \code{\link{simulateAccumulationRate}}, \code{\link{aggregateSimulation}} "accumulationRate" #' @import ggplot2 viridis tidyr plyr stats NULL #' @importFrom mgcv gam NULL #' @importFrom grDevices dev.off pdf NULL #' @import utils utils::globalVariables(c("Variable", "Value", "Time", "Species", "Suitability", "Driver.density.y", "Driver.weights", "Fecundity", "Age", "Biomass", "Reproductive.age", "ci.max", "ci.min", "Time", "Color", "error", "value", "..scaled..", "driver.A.weight", "maximum.biomass", "growthrate", "carrying.capacity", "fecundity", "pollen.control", "growth.rate"))
/scratch/gouwar.j/cran-all/cranData/virtualPollen/R/virtualPollen.R
## ----setup, warning=FALSE, message=FALSE, echo=FALSE-------------------------- #checking if required packages are installed, and installing them if not list.of.packages <- c("ggplot2", "cowplot", "knitr", "viridis", "tidyr", "formatR", "grid", "devtools", "magrittr", "kableExtra", "viridis") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages, dep=TRUE) #install virtualPollen if not installed if(!("virtualPollen" %in% installed.packages())){ library(devtools) install_github("blasbenito/virtualPollen") } # source("ecological_memory_functions.R") library(virtualPollen) #to simulate pollen curves library(ggplot2) #plotting library library(cowplot) #plotting library library(knitr) #report generation in R library(viridis) #pretty plotting colors library(grid) library(tidyr) library(formatR) library(kableExtra) #to fit tables to pdf page size library(magrittr) #kableExtra requires pipes options(scipen=999) #trying to line-wrap code in pdf output #from https://github.com/yihui/knitr-examples/blob/master/077-wrap-output.Rmd knitr::opts_chunk$set(echo = TRUE, fig.pos= "h") opts_chunk$set(tidy.opts=list(width.cutoff=80), tidy=FALSE) ## ---- size="small"------------------------------------------------------------ #sets a state for the generator of pseudo-random numbers set.seed(1) #defines the variable "time" time <- 1:10000 #samples (-1, 0, 1) with replacement moves <- sample(x=c(-1, 0, 1), size=length(time), replace=TRUE) #computes the cumulative sum of moves random.walk <- cumsum(moves) ## ---- echo=FALSE , fig.height=2.5, fig.width=9, fig.cap="Random walk (a) and its temporal autocorrelation (b)."---- p1 <- ggplot(data=data.frame(Time=time, Value=random.walk), aes(x=Time, y=Value)) + geom_line(color="gray40") + ggtitle("Random walk") + xlab("Time (years)") p2 <- ggplot(data=acfToDf(random.walk, 5000, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle("Temporal autocorrelation") + xlab("Lag (years)") + ylab("Pearson correlation") plot_grid(p1, p2, labels = c("a", "b"), align = "h") ## ---- size="small"------------------------------------------------------------ #setting a fixed seed for the generator of pseudo-random numbers set.seed(1) #generating 5 random numbers in the range [0, 1] a <- runif(5) #applying a convolution filter of length 2 and value 1 b <- filter(a, filter=c(1,1), method="convolution", circular=TRUE) ## ---- echo=FALSE , fig.height=2.5, fig.width=9, fig.cap="Original sequence (dashed line) and filtered sequence with filter (solid line)."---- ggplot(data=data.frame(Time=rep(1:length(a), 2), Value=c(a, b), Legend=c(rep("Original (a)", length(a)), rep("Filtered (b)", length(b)))), aes(x=Time, y=Value, group=Legend)) + geom_line(aes(linetype=Legend), color="gray40", size=1) + theme(legend.position="right") + ggtitle("Effect of a convolution filter") ## ---- results="asis", echo=FALSE---------------------------------------------- temp.table <- data.frame(row=1:5, a=round(a, 2), b=round(b, 2)) temp.table$operation <- c("b1 = a1 x f2 + a2 x f1", "b2 = a2 x f2 + a3 x f1", "b3 = a3 x f2 + a4 x f1" , "b4 = a4 x f2 + a5 x f1", "b5 = a5 x f2 + a6 x f1") kable(temp.table, caption = "Original sequence (a), filtered sequence (b), and filtering operations. Numbers beside letters a and b represent row numbers, while f1 and f2 represent the values of the convolution filter (both equal to 1 in this case).", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ## ---- size="small"------------------------------------------------------------ moves.10 <- filter(moves, filter=rep(1, 10), circular=TRUE) moves.100 <- filter(moves, filter=rep(1, 100), circular=TRUE) ## ---- fig.height=5, fig.width=9, warning=TRUE, fig.cap="Sequences filtered with different filter lengths: a) Sequence with autocorrelation length equal to 10; b) Temporal autocorrelation of a); c) Sequence with autocorrelation length equal to 100: d) Temporal autocorrelation of c).", echo=FALSE---- p4 <- ggplot(data=data.frame(Time=time, Value=as.vector(moves.10)), aes(x=Time, y=Value)) + geom_line(color="gray40", size=0.5) + ggtitle("Time series") + xlab("") + theme(axis.line.x=element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank(), plot.margin=unit(c(0,0,0,0), "cm")) p5 <- ggplot(data=acfToDf(moves.10, 200, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle("Temporal autocorrelation") + xlab("") + ylab("R-squared") + theme(axis.line.x=element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank(), plot.margin=unit(c(0,0,0,0), "cm")) p6 <- ggplot(data=data.frame(Time=time, Value=as.vector(moves.100)), aes(x=Time, y=Value)) + geom_line(color="gray40", size=0.5) + theme(plot.margin=unit(c(0,0,0,0), "cm")) p7 <- ggplot(data=acfToDf(moves.100, 200, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + xlab("Lag (years)") + ylab("R-squared") + theme(plot.margin=unit(c(0,0,0,0), "cm")) plot_grid(p4, p5, p6, p7, labels = c("a", "b", "c", "d"), align = "v", nrow=2) ## ---- size="small"------------------------------------------------------------ moves.5000 <- filter(moves, filter=rep(1, 5000), circular=TRUE) ## ---- fig.height=2.5, fig.width=9, fig.cap="Sequence moves.5000 (a) and its temporal autocorrelation (b). In this case there is a large deviation between the required temporal autocorrelation (5000) and the outcome (2000).", echo=FALSE---- p8 <- ggplot(data=data.frame(Time=time, Value=as.vector(moves.5000)), aes(x=Time, y=Value)) + geom_line(color="gray40", size=0.5) + ggtitle("Time series") + theme(plot.margin=unit(c(0,0,0,0), "cm")) p9 <- ggplot(data=acfToDf(moves.5000, 5000, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle("Temporal autocorrelation") + theme(plot.margin=unit(c(0,0,0,0), "cm")) + ylab("R-square") plot_grid(p8, p9, labels = c("a", "b"), align = "h") ## ---- fig.height=4, fig.width=9, fig.cap="Virtual driver and its temporal autocorrelation. Note that virtual driver B will not be used hereafter to simplify the explanation on the model dynamics."---- driver <- simulateDriverS( random.seeds=c(60, 120), time=1:10000, autocorrelation.lengths = 600, output.min=c(0, 0), output.max=c(100, 100), driver.names=c("A", "B"), filename=NULL) ## ---- echo=FALSE-------------------------------------------------------------- parameters <- parametersDataframe(rows=1) parameters[1,] <- c("Species 1", 50, 20, 2, 0.2, 0, 100, 10000, 1, 0, 50, 10, 0, 0, 600, 0) parameters[, 2:ncol(parameters)] <- sapply(parameters[, 2:ncol(parameters)], as.numeric) parameters.t <- data.frame(t(parameters)) parameters.t <- data.frame(parameters.t[-1,]) colnames(parameters.t) <- paste(parameters$label, sep="") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] #removing last two lines parameters.t <- parameters.t[c(1:8, 10:11), ] kable(parameters.t, caption="Parameters of a virtual species. Note that driver.B is ommited in order to simplify the explanation of the model.", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ## ---- size="small"------------------------------------------------------------ niche.A <- dnorm(x=0:100, mean=50, sd=10) ## ---- fig.height=2.5, fig.width=6, fig.cap="Ecological niche of the virtual species (blue) against the density (relative availability of values over time) of the driver (gray). Both densities have been scaled in the range [0, 1].", echo=FALSE---- #scaling suitability between 0 and 1 niche.A <- niche.A / max(niche.A) #getting the density of the driver driver.A = driver[driver$driver=="A", "value"] density.driver.A = density(driver.A, from=min(driver.A), to=max(driver.A), n=101, bw=max(driver.A)/100) density.driver.A.y = (density.driver.A$y - min(density.driver.A$y)) / (max(density.driver.A$y) - min(density.driver.A$y)) driver.A.range = seq(min(driver.A), max(driver.A), length.out = 101) #dataframe for plot plot.df = data.frame(Species=rep(paste(parameters[1, "label"], sep=""), 101), Driver=c(rep("Driver A", 101)), Driver.density.x=c(density.driver.A$x), Driver.density.y=c(density.driver.A.y), Value=driver.A.range, Suitability=niche.A) ggplot(data=plot.df, aes(x=Value, y=Suitability, group=Species)) + geom_ribbon(data=plot.df, aes(ymin=0, ymax=Driver.density.y), color="gray80", fill="gray80", alpha=0.5) + geom_ribbon(data=plot.df, aes(ymin=0, ymax=Suitability), alpha=0.8, colour=NA, fill="#4572A9") + geom_line(data=plot.df, aes(x=Value, y=Driver.density.y), color="gray80", alpha=0.5) + xlab("Driver values") + ylab("Environmental suitability") + theme(text = element_text(size=12), strip.background = element_rect(fill=NA), panel.spacing = unit(1, "lines")) ## ---- fig.height=5, fig.width=9, fig.cap="Driver and environmental suitability of the virtual taxa. Burn-in period is highlighted by a gray box in the Environmental suitability panel.", echo=FALSE---- #computing density density.A <- dnorm(driver[driver$driver == "A", "value"], mean=50, sd=10) #scaling to [0, 1] suitability <- density.A / max(density.A) burnin.suitability <- jitter(c(rep(1, parameters$maximum.age*5), seq(1, suitability[1], length.out = parameters$maximum.age*5)), amount=max(suitability)/100) burnin.suitability[burnin.suitability < 0]<-0 burnin.suitability[burnin.suitability > 1]<-1 length.burnin.suitability<-length(burnin.suitability) burnin.suitability <- c(burnin.suitability, suitability) plot.df4 <- data.frame(Time=c(-length.burnin.suitability:-1, 1:(length(suitability))), Suitability=burnin.suitability, Period=c(rep("Burn-in", length.burnin.suitability), rep("Simulation", length(suitability)))) p1 <- ggplot(data=driver[driver$driver == "A", ], aes(x=time, y=value)) + geom_line(size=0.5, color="gray40") + ggtitle("Driver and environmental suitability") + xlab("") + ylab("Driver") + coord_cartesian(xlim=c(-500, 10000)) + theme(plot.margin = unit(c(0,0,-0.5,0), "cm"), axis.line.x=element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank()) p2 <- ggplot(data=plot.df4, aes(x=Time, y=Suitability)) + geom_rect(aes(xmin=min(plot.df4$Time), xmax=0, ymin=0, ymax=Inf), inherit.aes=FALSE, fill="gray90") + geom_line(size=0.5, color="#4572A9") + xlab("Time (years)") + ylab("Suitability") + coord_cartesian(xlim=c(-500, 10000))+ theme(plot.margin = unit(c(0,0,0,0), "cm")) plot_grid(p1, p2, ncol = 1, align = "v", rel_heights = c(1.1, 1)) ## ---- fig.height=3, fig.width=6, fig.cap="Biomass vs. age curves resulting from different growth rates for a 400 years life-span.", echo=FALSE---- #objects to store results growth.rate.vector <- biomass.vector <- age.vector <- vector() #params age <- 0:400 maximum.biomass <- 100 growth.rates <- c(0.025, 0.05, 0.1, 0.2, 0.4, 0.8) #iterating through growth rates for(growth.rate in growth.rates){ biomass.vector <- c(biomass.vector,maximum.biomass / (1 + maximum.biomass * exp(- growth.rate * age))) growth.rate.vector <- c(growth.rate.vector, rep(growth.rate, length(age))) age.vector <- c(age.vector, age) } plot.df3 <- data.frame(growth.rate=growth.rate.vector, age=age.vector, biomass=biomass.vector) plot.df3$growth.rate <- as.factor(plot.df3$growth.rate) p3 <- ggplot(data=plot.df3, aes(x=age, y=biomass, group=growth.rate, color=growth.rate)) + geom_line(size=1) + scale_color_viridis(option="D", discrete=TRUE, direction=-1) + ggtitle("Biomass gain under different growth rates") + ylab("Biomass (relative)") + xlab("Age (years)") p3 rm(age, age.vector, biomass.vector, growth.rate, growth.rate.vector, growth.rates, p3, plot.df3) ## ---- fig.height=3, fig.width=6, fig.cap="Risk curve defining the probability of removal of a given individual as a function of its fractional age when maximum carrying capacity is reached.", echo=FALSE---- temp.df <- data.frame(Pm=NA, Age=0:100) temp.df$Pm <- 1 - sqrt(temp.df$Age/100) temp.df$Age <- temp.df$Age/100 ggplot(data=temp.df, aes(x=Age, y=Pm, color=Pm)) + geom_line(size=1) + scale_color_viridis(option="D", direction=-1) + ggtitle("Probability of mortality when carrying capacity is reached") + ylab("Removal probability") + xlab("Age of the individual (as proportion to the maximum age)") + theme(legend.position = "none") rm(temp.df) ## ---- eval=FALSE, size="small"------------------------------------------------ # # #parameters (1st line in dataframe "parameters") # maximum.age <- parameters[1, "maximum.age"] # reproductive.age <- parameters[1, "reproductive.age"] # growth.rate <- parameters[1, "growth.rate"] # carrying.capacity <- parameters[1, "carrying.capacity"] # fecundity <- parameters[1, "fecundity"] # # #reproductive age to proportion # reproductive.age <- reproductive.age / maximum.age # # #years scaled taking maximum.age as reference # scaled.year <- 1/maximum.age # # #vector to store pollen counts # pollen.count <- vector() # # #starting population # population <- sample(seq(0, 1, by=scaled.year), # 100, # replace=TRUE) # # #iterating through suitability (once per year) # #------------------------------------ # for(suitability.i in suitability){ # # #AGING ----------------------------------------------- # population <- population + scaled.year # # #SENESCENCE ------------------------------------------ # #1 is the maximum age of ages expressed as proportions # population <- population[population < 1] # # #LOCAL EXTINCTION AND RECOLONIZATION ----------------- # if (length(population) == 0){ # # #local extinction, replaces population with a seedbank # population <- rep(0, floor(100 * suitability.i)) # # #adds 0 to pollen.count # pollen.count <- c(pollen.count, 0) # # #jumps to next iteration # next # } # # #PLANT GROWTH --------------------------------------- # #biomass of every individual # biomass <- maximum.biomass / # (1 + maximum.biomass * # exp(- (growth.rate * suitability.i) * # (population * maximum.age) # ) # ) # # #SELF-THINNING -------------------------------------- # #carrying capacity reached # while(sum(biomass) > carrying.capacity){ # # #removes a random individual based on risk curve # individual.to.remove <- sample( # x = length(population), # size = 1, # replace = TRUE, # prob = 1 - sqrt(population) #risk curve # ) # # #removing individuals from population and biomass # population <- population[-individual.to.remove] # biomass <- biomass[-individual.to.remove] # # }#end of while # # #REPRODUCTION -------------------------------------- # #identifyies adult individuals # adults <- population > reproductive.age # # #seeds (vector of 0s) # #fractional biomass of adults * fecundity * suitability # seeds <- rep(0, floor(sum((biomass[adults]/maximum.biomass) * # fecundity) * suitability.i)) # # #adding seeds to the population # population <- c(population, seeds) # # #POLLEN OUTPUT ------------------------------------- # #biomass of adults multiplied by suitability # pollen.count <- c(pollen.count, # sum(biomass[adults]) * suitability.i) # # } #end of loop through suitability values # ## ---- size="small", fig.height=6.5, fig.width=9, message=TRUE, warning=TRUE, error=TRUE, results="hide", fig.cap="Simulation outcome. Green shades represent different age groups (seedlings, saplings, and adults).", warning=FALSE, message=FALSE---- #simulate population based on parameters simulation <- simulatePopulation(parameters=parameters[1, ], drivers=driver) #plotting simulation output plotSimulation(simulation.output=simulation, burnin=FALSE, panels=c("Driver A", "Suitability", "Population", "Pollen"), plot.title="", text.size=12, line.size=0.4) ## ---- message=TRUE, warning=TRUE, echo=FALSE---------------------------------- parameters[2,] <- c("Species 2", 50, 20, 4, 0.3, 0, 100, 10000, 1, 0, 50, 15, 0, 0, 600, 600) parameters[3,] <- c("Species 3", 50, 20, 6, 0.4, 0.5, 100, 10000, 1, 0, 50, 20, 0, 0, 600, 600) parameters[, 2:ncol(parameters)] <- sapply(parameters[, 2:ncol(parameters)], as.numeric) parameters.t <- data.frame(t(parameters)) parameters.t <- parameters.t[c(2:9, 11:12),] names(parameters.t) <- paste(parameters$label, sep="") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] kable(parameters.t, caption="Parameters of the three virtual species.", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ## ---- fig.height=5, fig.width=9, fig.cap="Comparison of the pollen abundance and environmental suitability (same in all cases) for the three virtual species shown in Table 2 within the period 5600-6400. Species 2 has a higher fecundity than Species 1 (1 vs 10)", message=FALSE---- #simulating species 2 and 3 of the dataframe parameters simulation.2 <- simulatePopulation(parameters=parameters, species=c(2,3), drivers=driver) #adding the results to the previous simulation simulation <- c(simulation, simulation.2) rm(simulation.2) #plotting the comparison for the time interval between 4000 and 5000.- compareSimulations(simulation.output=simulation, species = "all", columns = c("Suitability", "Pollen"), time.zoom = c(5600, 6400)) ## ---- echo=FALSE-------------------------------------------------------------- parameters.test <- parametersDataframe(rows=1) parameters.test[1,] <- c("Test 1", 4, 1, 0.55, 2, 0, 1, 30, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[2,] <- c("Test 2", 3, 1, 0.5, 2, 0, 1, 30, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[, 2:ncol(parameters.test)] <- sapply(parameters.test[, 2:ncol(parameters.test)], as.numeric) parameters.t <- data.frame(t(parameters.test)) parameters.t <- data.frame(parameters.t[-1,]) names(parameters.t) <- c("Test 1", "Test 2") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] parameters.t <- parameters.t[1:(nrow(parameters.t)-2),] kable(parameters.t, caption="Parameters of virtual taxa used to test model limits.") rm(parameters.t) ## ---- message=FALSE----------------------------------------------------------- simulation.test.1 <- simulatePopulation( parameters=parameters.test, driver.A=jitter(rep(50, 500), amount=4) ) ## ---- fig.height=3, fig.width=9, fig.cap="Pollen output of virtual taxa Test 1 and Test 2 for a 200 years time-window."---- compareSimulations(simulation.output=simulation.test.1, columns="Pollen", time.zoom = c(0, 200)) ## ---- echo=FALSE-------------------------------------------------------------- parameters.test[3,] <- c("Test 3", 1000, 100, 0.5, 0.05, 0, 100, 10000, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[4,] <- c("Test 4", 1000, 500, 0.5, 0.05, 0, 100, 10000, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[5,] <- c("Test 5", 1000, 900, 0.5, 0.05,0, 100, 10000, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[, 2:ncol(parameters.test)] <- sapply(parameters.test[, 2:ncol(parameters.test)], as.numeric) parameters.t <- data.frame(t(parameters.test[3:5,])) parameters.t <- data.frame(parameters.t[-1,]) names(parameters.t) <- c("Test 3", "Test 4", "Test 5") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] parameters.t <- parameters.t[1:(nrow(parameters.t)-2),] kable(parameters.t, caption="Parameters of virtual taxa used to test model limits.") rm(parameters.t) ## ---- message=FALSE----------------------------------------------------------- simulation.test.2 <- simulatePopulation( parameters=parameters.test, species=c(3:5), driver.A=jitter(rep(50, 2000), amount=4) ) ## ---- fig.height=3, fig.width=9, fig.cap="Pollen output of virtual taxa Test 1 and Test 2 for a 200 years time-window."---- compareSimulations(simulation.output=simulation.test.2, columns="Pollen", time.zoom = c(0, 800)) ## ---- fig.height=3, fig.width=9, fig.cap="Virtual sediment accumulation rate."---- accumulation.rate <- simulateAccumulationRate(seed=140, time=1:10000, output.min=1, output.max=50) ## ---- echo=FALSE-------------------------------------------------------------- kable(accumulation.rate[1:20, ], caption="Dataframe resulting from the function to generate virtual accumulation rates. Each group in the grouping column has as many elements as accumulation.rate the given group has.", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ## ---- size="small"------------------------------------------------------------ simulation.aggregated <- aggregateSimulation( simulation.output=simulation, accumulation.rate=accumulation.rate, sampling.intervals=c(2, 6, 10) ) ## ---- fig.height=5, fig.width=9, fig.cap="Effect of applying accumulation rate and different sampling depth intervals to a section of the the annual data produced by the simulation (represented in the Legend by the label Annual). Note that the 10 cm resampling completely misses the whole high-suitability event in the Pollen panel, and barely registers it in the Suitability panel. Inter-decadal variability shown by the Annual data is completely lost even at 1 cm, the higher sampling resolution.", echo=FALSE---- #checking results temp.list <- simulation.aggregated[1, 1:5] names(temp.list) <- c("Annual", "1 cm", "2 cm", "6 cm", "10 cm") compareSimulations(simulation.output=temp.list, columns = c("Suitability","Pollen"), time.zoom=c(5800, 6200)) ## ---- fig.height=3, fig.width=6, fig.cap="Histogram of the time differences (in years) between consecutive samples for the outcome of aggregateSimulation when resampled at intervals of 6 centimeters on Species 1. It clearly shows how the data are not organized in regular time intervals, and therefore are unsuitable for analyses requiring regular time lags.", echo=FALSE---- #getting example data sampled at 2cm intervals simulated.data = simulation.aggregated[[1, 3]] #checking distribution of differences in age between consecutive samples hist(simulated.data[2:nrow(simulated.data),"Time"] - simulated.data[1:(nrow(simulated.data)-1),"Time"], main="Age differences between consecutive samples", xlab="Age difference between consecutive samples", col=viridis(12, begin = 0, end=1)) ## ---- size="small", eval=FALSE------------------------------------------------ # # #getting example data sampled at 2cm intervals # simulated.data = simulation.aggregated[[1, 3]] # # # #span values to be explored # span.values=seq(20/nrow(simulated.data), # 5/nrow(simulated.data), # by=-0.0005) # # # #plotting the optimization process in real time # x11(height=12, width=24) # # #iteration through candidate span values # for(span in span.values){ # # #function to interpolate the data # interpolation.function = loess( # Pollen ~ Time, # data=simulated.data, # span=span, # control=loess.control(surface="direct")) # # #plot model fit # plot(simulated.data$Pollen, type="l", lwd=2) # lines(interpolation.function$fitted, col="red", lwd=2) # # #if correlation equals 0.9985 loop stops # if(cor(interpolation.function$fitted, # simulated.data$Pollen) >= 0.9985){break} # # } # # #gives time to look at result before closing the plot window # Sys.sleep(5) ## ---- warning=FALSE, size="small"--------------------------------------------- simulation.interpolated <- toRegularTime( x=simulation.aggregated, time.column="Time", interpolation.interval=10, columns.to.interpolate=c("Pollen", "Suitability", "Driver.A") ) ## ---- echo=FALSE , fig.height=5, fig.width=9, message=TRUE, error=TRUE, warning=TRUE, fig.cap="Data aggregated using virtual accumulation rate and reinterpolated into a regular time grid of 10 years resolution."---- #getting the results for Species 1 temp.list <- simulation.interpolated[1, 1:5] names(temp.list) <- c("Annual", "1 cm", "2 cm", "6 cm", "10 cm") #plotting comparison compareSimulations(simulation.output=temp.list, columns = c("Suitability","Pollen"), time.zoom=c(5800, 6200))
/scratch/gouwar.j/cran-all/cranData/virtualPollen/inst/doc/using_virtualPollen.R
--- title: 'virtualPollen' subtitle: 'Generation of virtual pollen curves' author: "Blas M. Benito" date: "`r Sys.Date()`" fig_width: 6 output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Using virtualPollen} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, warning=FALSE, message=FALSE, echo=FALSE} #checking if required packages are installed, and installing them if not list.of.packages <- c("ggplot2", "cowplot", "knitr", "viridis", "tidyr", "formatR", "grid", "devtools", "magrittr", "kableExtra", "viridis") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages, dep=TRUE) #install virtualPollen if not installed if(!("virtualPollen" %in% installed.packages())){ library(devtools) install_github("blasbenito/virtualPollen") } # source("ecological_memory_functions.R") library(virtualPollen) #to simulate pollen curves library(ggplot2) #plotting library library(cowplot) #plotting library library(knitr) #report generation in R library(viridis) #pretty plotting colors library(grid) library(tidyr) library(formatR) library(kableExtra) #to fit tables to pdf page size library(magrittr) #kableExtra requires pipes options(scipen=999) #trying to line-wrap code in pdf output #from https://github.com/yihui/knitr-examples/blob/master/077-wrap-output.Rmd knitr::opts_chunk$set(echo = TRUE, fig.pos= "h") opts_chunk$set(tidy.opts=list(width.cutoff=80), tidy=FALSE) ``` **Summary** This document describes in detail the methods used to generate a virtual environmental driver with a given temporal autocorrelation, to be used as an input for a population model simulating synthetic pollen curves generated by virtual taxa with different life-traits (life-span and fecundity) and environmental niche features (niche position and breadth). We also describe how we generated a virtual sediment accumulation rate to aggregate the results of the population model to mimic taphonomic processes producing real pollen curves, and how we resampled virtual pollen data at different depth intervals. Finally, we present the code used to generate the 16 virtual taxa used for the analyses described in the paper. **IMPORTANT:** An Rmarkdown version of this document can be found at: https://github.com/BlasBenito/EcologicalMemory. \pagebreak #Generating a virtual environmental driver# ###Rationale### To simulate virtual pollen curves with the population model described in section **2** a virtual driver representing changes in environmental conditions is required. This section explains how to generate such a driver as a time series with a given temporal autocorrelation simulating the temporal structure generally shown by observed environmental drivers. ###Generating a random walk### The following steps are required to generate a **random walk** in R: 1. Generate a vector *time* with time values. 2. Re-sample with replacement the set of numbers (-1, 0, 1) as many times as values are in the *time* vector, to produce the vector *moves* (as in *moves of a random walk*). 3. Compute the cumulative sum of *moves*, which generates the random walk. ```{r, size="small"} #sets a state for the generator of pseudo-random numbers set.seed(1) #defines the variable "time" time <- 1:10000 #samples (-1, 0, 1) with replacement moves <- sample(x=c(-1, 0, 1), size=length(time), replace=TRUE) #computes the cumulative sum of moves random.walk <- cumsum(moves) ``` ```{r, echo=FALSE , fig.height=2.5, fig.width=9, fig.cap="Random walk (a) and its temporal autocorrelation (b)."} p1 <- ggplot(data=data.frame(Time=time, Value=random.walk), aes(x=Time, y=Value)) + geom_line(color="gray40") + ggtitle("Random walk") + xlab("Time (years)") p2 <- ggplot(data=acfToDf(random.walk, 5000, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle("Temporal autocorrelation") + xlab("Lag (years)") + ylab("Pearson correlation") plot_grid(p1, p2, labels = c("a", "b"), align = "h") ``` Every time this code is executed with a different number in *set.seed()*, it generates a different random walk with a different temporal autocorrelation, but still, this simple method is not useful to generate a time series with a given temporal autocorrelation. ###Applying a convolution filter to generate a given temporal autocorrelation### Applying a **convolution** filter to a time series allows to generate dependence among sets of consecutive cases. Below we show an example of how it works on a random sequence *a* composed by five numbers in the range [0, 1]. The operations to compute the filtered sequence are shown in **Table 1**. ```{r, size="small"} #setting a fixed seed for the generator of pseudo-random numbers set.seed(1) #generating 5 random numbers in the range [0, 1] a <- runif(5) #applying a convolution filter of length 2 and value 1 b <- filter(a, filter=c(1,1), method="convolution", circular=TRUE) ``` ```{r , echo=FALSE , fig.height=2.5, fig.width=9, fig.cap="Original sequence (dashed line) and filtered sequence with filter (solid line)."} ggplot(data=data.frame(Time=rep(1:length(a), 2), Value=c(a, b), Legend=c(rep("Original (a)", length(a)), rep("Filtered (b)", length(b)))), aes(x=Time, y=Value, group=Legend)) + geom_line(aes(linetype=Legend), color="gray40", size=1) + theme(legend.position="right") + ggtitle("Effect of a convolution filter") ``` ```{r, results="asis", echo=FALSE} temp.table <- data.frame(row=1:5, a=round(a, 2), b=round(b, 2)) temp.table$operation <- c("b1 = a1 x f2 + a2 x f1", "b2 = a2 x f2 + a3 x f1", "b3 = a3 x f2 + a4 x f1" , "b4 = a4 x f2 + a5 x f1", "b5 = a5 x f2 + a6 x f1") kable(temp.table, caption = "Original sequence (a), filtered sequence (b), and filtering operations. Numbers beside letters a and b represent row numbers, while f1 and f2 represent the values of the convolution filter (both equal to 1 in this case).", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ``` The *operation* column in **Table 1** shows how the convolution filter generates a dependence between values located within the length of the filter. This positional dependence creates a temporal autocorrelation pattern with a significant length equal to the length of the filter. The following piece of code demonstrates this by generating two versions of the same *moves* vector used before, one with a length of the significant temporal autocorrelation equal to 10 (defined in the same units of the *time* vector), and another with a length of 100. Results are shown in **Figure 3**. ```{r, size="small"} moves.10 <- filter(moves, filter=rep(1, 10), circular=TRUE) moves.100 <- filter(moves, filter=rep(1, 100), circular=TRUE) ``` ```{r, fig.height=5, fig.width=9, warning=TRUE, fig.cap="Sequences filtered with different filter lengths: a) Sequence with autocorrelation length equal to 10; b) Temporal autocorrelation of a); c) Sequence with autocorrelation length equal to 100: d) Temporal autocorrelation of c).", echo=FALSE} p4 <- ggplot(data=data.frame(Time=time, Value=as.vector(moves.10)), aes(x=Time, y=Value)) + geom_line(color="gray40", size=0.5) + ggtitle("Time series") + xlab("") + theme(axis.line.x=element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank(), plot.margin=unit(c(0,0,0,0), "cm")) p5 <- ggplot(data=acfToDf(moves.10, 200, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle("Temporal autocorrelation") + xlab("") + ylab("R-squared") + theme(axis.line.x=element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank(), plot.margin=unit(c(0,0,0,0), "cm")) p6 <- ggplot(data=data.frame(Time=time, Value=as.vector(moves.100)), aes(x=Time, y=Value)) + geom_line(color="gray40", size=0.5) + theme(plot.margin=unit(c(0,0,0,0), "cm")) p7 <- ggplot(data=acfToDf(moves.100, 200, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + xlab("Lag (years)") + ylab("R-squared") + theme(plot.margin=unit(c(0,0,0,0), "cm")) plot_grid(p4, p5, p6, p7, labels = c("a", "b", "c", "d"), align = "v", nrow=2) ``` A major limitation is that this method does not work well when the required length of the temporal autocorrelation is a large fraction of the overall length of the time series. The code below and **Figure 4** show an example with a filter of length 5000 (note that the *time* variable has 10000 elements). ```{r, size="small"} moves.5000 <- filter(moves, filter=rep(1, 5000), circular=TRUE) ``` ```{r , fig.height=2.5, fig.width=9, fig.cap="Sequence moves.5000 (a) and its temporal autocorrelation (b). In this case there is a large deviation between the required temporal autocorrelation (5000) and the outcome (2000).", echo=FALSE} p8 <- ggplot(data=data.frame(Time=time, Value=as.vector(moves.5000)), aes(x=Time, y=Value)) + geom_line(color="gray40", size=0.5) + ggtitle("Time series") + theme(plot.margin=unit(c(0,0,0,0), "cm")) p9 <- ggplot(data=acfToDf(moves.5000, 5000, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle("Temporal autocorrelation") + theme(plot.margin=unit(c(0,0,0,0), "cm")) + ylab("R-square") plot_grid(p8, p9, labels = c("a", "b"), align = "h") ``` All the ideas presented above are implemented in the functions named **simulateDriver()** and **simulateDriverS**, with these main arguments: * **random.seed**: an integer for *set.seed()*, to use the same random seed in the generation of random numbers and make sure that results are reproducible. * **age**: a vector (i.e. 1:1000), representing the length of the output. * **autocorrelation.length**: length of the desired temporal autocorrelation structure, in the same units as *age*. * **output.min**: minimum value of the driver. * **output.max**: maximum value of the driver. **Figure 5** shows a driver generated with **simulateDriverS** with annual resolution in the range [0, 100], over a period of 10000 years, with a temporal autocorrelation length of 600 years. ```{r , fig.height=4, fig.width=9, fig.cap="Virtual driver and its temporal autocorrelation. Note that virtual driver B will not be used hereafter to simplify the explanation on the model dynamics."} driver <- simulateDriverS( random.seeds=c(60, 120), time=1:10000, autocorrelation.lengths = 600, output.min=c(0, 0), output.max=c(100, 100), driver.names=c("A", "B"), filename=NULL) ``` #Simulating pollen curves from virtual taxa# ###Rationale### The ability of plant populations to respond more or less quickly to environmental change is mediated by a set of species' traits, such as niche optimum and breadth, growth rate, sexual maturity age, effective fecundity, and life span. Even though we have values for these traits for many plant species, pollen types are often clusters of species of the same genus or family rather than single species, making the assignment of trait values uncertain. For the purpose of this paper it is more practical to work with **virtual pollen curves** generated by **virtual taxa** with known relationships with the environment and traits. These virtual taxa are the result of a population model with different parametrizations. ###Assumptions### The model follows these assumptions: * **The spatial structure of the population is not important to explain its pollen productivity**. This is an operative assumption, to speed-up model executions. The lack of spatial structure is partially compensated by the model parameter **charge capacity**, which simulates a limited space for population expansion. * **The environmental niche of the species follows a Gaussian distribution**, characterized by a mean (niche optimum, also niche position) and a standard deviation (niche breadth or tolerance). * **Different drivers can have a different influence on the species dynamics**, and that influence can be defined by the user by tuning the weights of each driver. * **Environmental suitability**, expressed in the range [0, 1], is the result of an additive function of the species niches (normal function defined by the species' mean and standard deviation for each driver), the drivers' values, and the relative influence of each driver (*driver weights*). * **Pollen productivity is a function of the individual's biomass and environmental suitability**, so under a hypothetical constant individual's biomass, its pollen production depends linearly on environmental suitability values. * **Effective fecundity is limited by environmental suitability**. Low environmental suitability values limit recruitment, acting as an environmental filter. Therefore, even though the fecundity of the individuals is fixed by the **fecundity** parameter, the overall population fecundity is limited by environmental suitability. ###Parameters### We have designed a simple individual-based population model which input parameters are: * **Drivers**: Values of up to two drivers (provided as numeric vectors) for a given time span. * **Niche mean**: The average (in drivers units) of the normal functions describing the species niche for each driver. This parameter defines the niche optimum of the species. * **Niche breadth**: Standard deviations (in driver units) of the normal functions describing the species niche for each driver. Smaller values simulate more specialized species, while larger values simulate generalist species. * **Driver weight**: Importance of each driver (note that in the paper we only used one driver) in defining climatic suitability for the given species. Balanced weights mean each driver is contributing equally to climatic suitability. The sum of both drivers must be 1. * **Maximum age**: age (in years) of senescence of the individuals. Individuals die when reaching this age. * **Reproductive age**: age (in years) at which individuals start to produce pollen and seeds. * **Fecundity**: actually, **effective fecundity**, which is the maximum number of viable seeds produced by mature individuals per year under under fully suitable climatic conditions. * **Growth rate**: amount of the maximum biomass gained per year by each individual. Used as input in the logistic equation of the growth model. * **Maximum biomass**: maximum biomass (in relative units) individuals can reach. Used as input in the logistic equation of the growth model to set a maximum growth. * **Carrying capacity**: maximum sum of the biomass of all individuals allowed in the model. Ideally, to keep model performance, it should be equal to **maximum biomass** multiplied by 100 or 1000. The model removes individuals at random when this number is reached is reached. In order to explain the model dynamics in the most simplified way, the parameters in **Table 1** are considered. ```{r, echo=FALSE} parameters <- parametersDataframe(rows=1) parameters[1,] <- c("Species 1", 50, 20, 2, 0.2, 0, 100, 10000, 1, 0, 50, 10, 0, 0, 600, 0) parameters[, 2:ncol(parameters)] <- sapply(parameters[, 2:ncol(parameters)], as.numeric) parameters.t <- data.frame(t(parameters)) parameters.t <- data.frame(parameters.t[-1,]) colnames(parameters.t) <- paste(parameters$label, sep="") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] #removing last two lines parameters.t <- parameters.t[c(1:8, 10:11), ] kable(parameters.t, caption="Parameters of a virtual species. Note that driver.B is ommited in order to simplify the explanation of the model.", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ``` ###Ecological niche and environmental suitability### The model assumes that normal distributions can be used as simple mathematical representations of ecological niches. Considering a virtual species and an environmental predictor, the abundance of the species along the range of the predictor values can be defined by a normal distribution with a mean $μ$ (niche optimum or niche position), and standard deviation $σ$ (niche breadth or tolerance). The equation to compute the density of a normal distribution has the form: **Equation 1:** \Large $$f(x) = 1/(√(2 π) σ) e^{-((x - μ)^2/(2 σ^2))}$$ \normalsize Where: + $x$ is the value of the predictor. + $μ$ is the mean of the normal distribution. + $σ$ is the standard deviation. The following code shows a simple example on how *dnorm()* uses **Equation 1** to compute the density of a normal function over a data range from a mean and a standard deviation: ```{r, size="small"} niche.A <- dnorm(x=0:100, mean=50, sd=10) ``` We use the code above and a computation on the density of the driver to plot the ecological niche of the virtual taxa against the availability of driver values (**Figure 6**). ```{r , fig.height=2.5, fig.width=6, fig.cap="Ecological niche of the virtual species (blue) against the density (relative availability of values over time) of the driver (gray). Both densities have been scaled in the range [0, 1].", echo=FALSE} #scaling suitability between 0 and 1 niche.A <- niche.A / max(niche.A) #getting the density of the driver driver.A = driver[driver$driver=="A", "value"] density.driver.A = density(driver.A, from=min(driver.A), to=max(driver.A), n=101, bw=max(driver.A)/100) density.driver.A.y = (density.driver.A$y - min(density.driver.A$y)) / (max(density.driver.A$y) - min(density.driver.A$y)) driver.A.range = seq(min(driver.A), max(driver.A), length.out = 101) #dataframe for plot plot.df = data.frame(Species=rep(paste(parameters[1, "label"], sep=""), 101), Driver=c(rep("Driver A", 101)), Driver.density.x=c(density.driver.A$x), Driver.density.y=c(density.driver.A.y), Value=driver.A.range, Suitability=niche.A) ggplot(data=plot.df, aes(x=Value, y=Suitability, group=Species)) + geom_ribbon(data=plot.df, aes(ymin=0, ymax=Driver.density.y), color="gray80", fill="gray80", alpha=0.5) + geom_ribbon(data=plot.df, aes(ymin=0, ymax=Suitability), alpha=0.8, colour=NA, fill="#4572A9") + geom_line(data=plot.df, aes(x=Value, y=Driver.density.y), color="gray80", alpha=0.5) + xlab("Driver values") + ylab("Environmental suitability") + theme(text = element_text(size=12), strip.background = element_rect(fill=NA), panel.spacing = unit(1, "lines")) ``` Environmental suitability for the given species over the study period is computed as follows: 1. *dnorm()* is computed on the mean and standard deviation defined for the species niche for a given driver. 2. The output of *dnorm()* is scaled to the range [0, 1]. 3. The scaled values of the output are multiplied by the driver weights (which equals 1 if only one driver is used). 4. If there are two drivers, suitability values of each individual driver are summed together. A **burn-in period** with a length of ten generations of the virtual taxa is added to the environmental suitability computed from the species niche and the driver/drivers. The added segment starts at maximum environmental suitability, stays there for five generations, and decreases linearly for another five generations until meeting the first suitability value of the actual simulation time. The whole burn-in segment has a small amount of white noise added (**Figure 7**). The burn-in period lets the population model to warm-up and go beyond starting conditions before simulation time starts. ```{r , fig.height=5, fig.width=9, fig.cap="Driver and environmental suitability of the virtual taxa. Burn-in period is highlighted by a gray box in the Environmental suitability panel.", echo=FALSE} #computing density density.A <- dnorm(driver[driver$driver == "A", "value"], mean=50, sd=10) #scaling to [0, 1] suitability <- density.A / max(density.A) burnin.suitability <- jitter(c(rep(1, parameters$maximum.age*5), seq(1, suitability[1], length.out = parameters$maximum.age*5)), amount=max(suitability)/100) burnin.suitability[burnin.suitability < 0]<-0 burnin.suitability[burnin.suitability > 1]<-1 length.burnin.suitability<-length(burnin.suitability) burnin.suitability <- c(burnin.suitability, suitability) plot.df4 <- data.frame(Time=c(-length.burnin.suitability:-1, 1:(length(suitability))), Suitability=burnin.suitability, Period=c(rep("Burn-in", length.burnin.suitability), rep("Simulation", length(suitability)))) p1 <- ggplot(data=driver[driver$driver == "A", ], aes(x=time, y=value)) + geom_line(size=0.5, color="gray40") + ggtitle("Driver and environmental suitability") + xlab("") + ylab("Driver") + coord_cartesian(xlim=c(-500, 10000)) + theme(plot.margin = unit(c(0,0,-0.5,0), "cm"), axis.line.x=element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank()) p2 <- ggplot(data=plot.df4, aes(x=Time, y=Suitability)) + geom_rect(aes(xmin=min(plot.df4$Time), xmax=0, ymin=0, ymax=Inf), inherit.aes=FALSE, fill="gray90") + geom_line(size=0.5, color="#4572A9") + xlab("Time (years)") + ylab("Suitability") + coord_cartesian(xlim=c(-500, 10000))+ theme(plot.margin = unit(c(0,0,0,0), "cm")) plot_grid(p1, p2, ncol = 1, align = "v", rel_heights = c(1.1, 1)) ``` ###Biomass growth### Individuals age one year on each simulation step, and their biomass at any given age is defined by the following equation of logistic growth (**Equation 2**). **Figure 8** shows different growth curves for different growth rates for a virtual taxon with a maximum age of 400 years. **Equation 2:** \Large $$f(x) = \frac{B}{1 + B} \times e^{(- \alpha \times t)}$$ \normalsize Where: + $B$ is the maximum biomass an individual can reach. + $\alpha$ is the growth rate. + $t$ is the age of the individual at a given time. ```{r , fig.height=3, fig.width=6, fig.cap="Biomass vs. age curves resulting from different growth rates for a 400 years life-span.", echo=FALSE} #objects to store results growth.rate.vector <- biomass.vector <- age.vector <- vector() #params age <- 0:400 maximum.biomass <- 100 growth.rates <- c(0.025, 0.05, 0.1, 0.2, 0.4, 0.8) #iterating through growth rates for(growth.rate in growth.rates){ biomass.vector <- c(biomass.vector,maximum.biomass / (1 + maximum.biomass * exp(- growth.rate * age))) growth.rate.vector <- c(growth.rate.vector, rep(growth.rate, length(age))) age.vector <- c(age.vector, age) } plot.df3 <- data.frame(growth.rate=growth.rate.vector, age=age.vector, biomass=biomass.vector) plot.df3$growth.rate <- as.factor(plot.df3$growth.rate) p3 <- ggplot(data=plot.df3, aes(x=age, y=biomass, group=growth.rate, color=growth.rate)) + geom_line(size=1) + scale_color_viridis(option="D", discrete=TRUE, direction=-1) + ggtitle("Biomass gain under different growth rates") + ylab("Biomass (relative)") + xlab("Age (years)") p3 rm(age, age.vector, biomass.vector, growth.rate, growth.rate.vector, growth.rates, p3, plot.df3) ``` ###Population dynamics### The model starts with a population of 100 individuals with random ages, in the range [1, maximum age], taken from a uniform distribution (all ages are equiprobable). For each environmental suitability value, including the burn-in period, the model performs the following operations: 1. **Aging:** adds one year to the age of the individuals. 2. **Mortality due to senescence:** individuals reaching the maximum age are removed from the simulation. * **Local extinction and immigration** If the number of individuals drops to zero, the population is replaced by a "seed bank" of 100 individuals with age zero, and the simulation jumps to step **7.**. This is intended to simulate the arrival of seeds from nearby regions, and will only lead to population growth if environmental suitability is higher than zero. 3. **Plant growth:** Applies a plant growth equation to compute the biomass of every individual (see **Figure 8**). 4. **Carrying capacity:** If maximum population biomass is reached, individuals are iteratively selected for removal according to a mortality risk curve computed by **Equation 3** (see **Figure 9**). This curve gives removal preference to younger individuals, matching observed patterns in natural populations. 5. **Pollen productivity:** In each time step the model computes the pollen productivity (in relative values) of the population using **Equation 4**. 6. **Reproduction:** Generates as many seeds as reproductive individuals are available multiplied by the maximum fecundity and the environmental suitability of the given time. The model returns a table with climatic suitability, pollen production, and population size (reproductive individuals only) per simulation year. **Figure 10** shows the results of the population model when applied to the example virtual species. **Equation 3:** \Large $$P_{m} = 1 - sqrt(a/A)$$ \normalsize Where: + $P_{m}$ is the probability of mortality. + $a$ is the age of the given individual. + $A$ is the maximum age reached by the virtual taxa. ```{r , fig.height=3, fig.width=6, fig.cap="Risk curve defining the probability of removal of a given individual as a function of its fractional age when maximum carrying capacity is reached.", echo=FALSE} temp.df <- data.frame(Pm=NA, Age=0:100) temp.df$Pm <- 1 - sqrt(temp.df$Age/100) temp.df$Age <- temp.df$Age/100 ggplot(data=temp.df, aes(x=Age, y=Pm, color=Pm)) + geom_line(size=1) + scale_color_viridis(option="D", direction=-1) + ggtitle("Probability of mortality when carrying capacity is reached") + ylab("Removal probability") + xlab("Age of the individual (as proportion to the maximum age)") + theme(legend.position = "none") rm(temp.df) ``` **Equation 4:** \Large $$P_{t} = \sum x_{it} \times max(S_{t}, B)$$ \normalsize Where: + $t$ is time (a given simulation time step). + $P$ is the pollen productivity of the population at a given time. + $x_{i}$ represents the biomass of every adult individual. + $S$ is the environmental suitability at the given time. + $B$ is the contribution of biomass to pollen productivity regardless of environmental suitability (*pollen.control* parameter in the simulation, 0 by default). If $B$ equals 1, $P$ is equal to the total biomass sum of the adult population, regardless of the environmental suitability. If $B$ equals 0, pollen productivity depends entirely on environmental suitability values. The code below shows the core of the *simulatePopulation* function. It is slightly simplified to improve readability, and only pollen counts are written as output. Note that age of individuals is represented as a proportion of the maximum age to facilitate operations throughout the code. ```{r, eval=FALSE, size="small"} #parameters (1st line in dataframe "parameters") maximum.age <- parameters[1, "maximum.age"] reproductive.age <- parameters[1, "reproductive.age"] growth.rate <- parameters[1, "growth.rate"] carrying.capacity <- parameters[1, "carrying.capacity"] fecundity <- parameters[1, "fecundity"] #reproductive age to proportion reproductive.age <- reproductive.age / maximum.age #years scaled taking maximum.age as reference scaled.year <- 1/maximum.age #vector to store pollen counts pollen.count <- vector() #starting population population <- sample(seq(0, 1, by=scaled.year), 100, replace=TRUE) #iterating through suitability (once per year) #------------------------------------ for(suitability.i in suitability){ #AGING ----------------------------------------------- population <- population + scaled.year #SENESCENCE ------------------------------------------ #1 is the maximum age of ages expressed as proportions population <- population[population < 1] #LOCAL EXTINCTION AND RECOLONIZATION ----------------- if (length(population) == 0){ #local extinction, replaces population with a seedbank population <- rep(0, floor(100 * suitability.i)) #adds 0 to pollen.count pollen.count <- c(pollen.count, 0) #jumps to next iteration next } #PLANT GROWTH --------------------------------------- #biomass of every individual biomass <- maximum.biomass / (1 + maximum.biomass * exp(- (growth.rate * suitability.i) * (population * maximum.age) ) ) #SELF-THINNING -------------------------------------- #carrying capacity reached while(sum(biomass) > carrying.capacity){ #removes a random individual based on risk curve individual.to.remove <- sample( x = length(population), size = 1, replace = TRUE, prob = 1 - sqrt(population) #risk curve ) #removing individuals from population and biomass population <- population[-individual.to.remove] biomass <- biomass[-individual.to.remove] }#end of while #REPRODUCTION -------------------------------------- #identifyies adult individuals adults <- population > reproductive.age #seeds (vector of 0s) #fractional biomass of adults * fecundity * suitability seeds <- rep(0, floor(sum((biomass[adults]/maximum.biomass) * fecundity) * suitability.i)) #adding seeds to the population population <- c(population, seeds) #POLLEN OUTPUT ------------------------------------- #biomass of adults multiplied by suitability pollen.count <- c(pollen.count, sum(biomass[adults]) * suitability.i) } #end of loop through suitability values ``` The code below executes the simulation, and plots the outcome using the function *plotSimulation*. ```{r , size="small", fig.height=6.5, fig.width=9, message=TRUE, warning=TRUE, error=TRUE, results="hide", fig.cap="Simulation outcome. Green shades represent different age groups (seedlings, saplings, and adults).", warning=FALSE, message=FALSE} #simulate population based on parameters simulation <- simulatePopulation(parameters=parameters[1, ], drivers=driver) #plotting simulation output plotSimulation(simulation.output=simulation, burnin=FALSE, panels=c("Driver A", "Suitability", "Population", "Pollen"), plot.title="", text.size=12, line.size=0.4) ``` The simulation outcomes can vary with the traits of the virtual species. **Table 2** shows the parameters of two new taxa named **Species 2** and **Species 3**. These species have a higher niche breadth than **Species 1**, and **Species 3** has a pollen productivity depending more on biomass than suitability (parameter *pollen.control* higher than zero). The comparison of both simulations (**Figure 11**) along with **Species 1** shows that different traits generate different pollen curves in our simulation. ```{r, message=TRUE, warning=TRUE, echo=FALSE} parameters[2,] <- c("Species 2", 50, 20, 4, 0.3, 0, 100, 10000, 1, 0, 50, 15, 0, 0, 600, 600) parameters[3,] <- c("Species 3", 50, 20, 6, 0.4, 0.5, 100, 10000, 1, 0, 50, 20, 0, 0, 600, 600) parameters[, 2:ncol(parameters)] <- sapply(parameters[, 2:ncol(parameters)], as.numeric) parameters.t <- data.frame(t(parameters)) parameters.t <- parameters.t[c(2:9, 11:12),] names(parameters.t) <- paste(parameters$label, sep="") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] kable(parameters.t, caption="Parameters of the three virtual species.", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ``` ```{r , fig.height=5, fig.width=9, fig.cap="Comparison of the pollen abundance and environmental suitability (same in all cases) for the three virtual species shown in Table 2 within the period 5600-6400. Species 2 has a higher fecundity than Species 1 (1 vs 10)", message=FALSE} #simulating species 2 and 3 of the dataframe parameters simulation.2 <- simulatePopulation(parameters=parameters, species=c(2,3), drivers=driver) #adding the results to the previous simulation simulation <- c(simulation, simulation.2) rm(simulation.2) #plotting the comparison for the time interval between 4000 and 5000.- compareSimulations(simulation.output=simulation, species = "all", columns = c("Suitability", "Pollen"), time.zoom = c(5600, 6400)) ``` ###Testing the model limits### We searched for the minimum values of the parameters required to keep a simulated population viable under fully suitable conditions. The taxa *Test 1* and *Test 2* shown below ```{r, echo=FALSE} parameters.test <- parametersDataframe(rows=1) parameters.test[1,] <- c("Test 1", 4, 1, 0.55, 2, 0, 1, 30, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[2,] <- c("Test 2", 3, 1, 0.5, 2, 0, 1, 30, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[, 2:ncol(parameters.test)] <- sapply(parameters.test[, 2:ncol(parameters.test)], as.numeric) parameters.t <- data.frame(t(parameters.test)) parameters.t <- data.frame(parameters.t[-1,]) names(parameters.t) <- c("Test 1", "Test 2") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] parameters.t <- parameters.t[1:(nrow(parameters.t)-2),] kable(parameters.t, caption="Parameters of virtual taxa used to test model limits.") rm(parameters.t) ``` ```{r, message=FALSE} simulation.test.1 <- simulatePopulation( parameters=parameters.test, driver.A=jitter(rep(50, 500), amount=4) ) ``` The test driver used had an average of 50 (optimum values according the environmental niche of both species) for 500 years, with random noise added through the *jitter* function. The model results (column *Pollen* only) for both virtual taxa are shown below. ```{r, fig.height=3, fig.width=9, fig.cap="Pollen output of virtual taxa Test 1 and Test 2 for a 200 years time-window."} compareSimulations(simulation.output=simulation.test.1, columns="Pollen", time.zoom = c(0, 200)) ``` The outputs of the test taxa show how a minimal change in the parameters can lead to fully unstable results when the considered taxa are short lived. Considering such a result, values for life-traits (*maximum.age*, *reproductive.age*, *fecundity*, *growth.rate*, and *maximum.biomass*) of taxon *Test 1* should be taken as safe lower limits for these traits. A similar situation can happen with long lived species when the age of sexual maturity is close to the maximum age. The table below shows three new test species with long life-spans and increasing maturity ages. The driver is again centered in 50, with added white noise, and 2000 years length. ```{r, echo=FALSE} parameters.test[3,] <- c("Test 3", 1000, 100, 0.5, 0.05, 0, 100, 10000, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[4,] <- c("Test 4", 1000, 500, 0.5, 0.05, 0, 100, 10000, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[5,] <- c("Test 5", 1000, 900, 0.5, 0.05,0, 100, 10000, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[, 2:ncol(parameters.test)] <- sapply(parameters.test[, 2:ncol(parameters.test)], as.numeric) parameters.t <- data.frame(t(parameters.test[3:5,])) parameters.t <- data.frame(parameters.t[-1,]) names(parameters.t) <- c("Test 3", "Test 4", "Test 5") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] parameters.t <- parameters.t[1:(nrow(parameters.t)-2),] kable(parameters.t, caption="Parameters of virtual taxa used to test model limits.") rm(parameters.t) ``` ```{r, message=FALSE} simulation.test.2 <- simulatePopulation( parameters=parameters.test, species=c(3:5), driver.A=jitter(rep(50, 2000), amount=4) ) ``` ```{r, fig.height=3, fig.width=9, fig.cap="Pollen output of virtual taxa Test 1 and Test 2 for a 200 years time-window."} compareSimulations(simulation.output=simulation.test.2, columns="Pollen", time.zoom = c(0, 800)) ``` The figure shows how *Test 3* yields a stable pollen productivity across time, while *Test 4* and *Test 5* show, respectively, a very low productivity due to scarcity of adults, a total inability to sustain stable populations. Considering these results, it is important to keep a careful balance between the parameters *maximum.age* and *reproductive.age* to obtain viable virtual populations. #Simulating a virtual accumulation rate# Sediments containing pollen grains accumulate at varying rates, generally measured in *years per centimeter* (y/cm). Accumulation rates found in real datasets are broadly between 10 and 70 y/cm, with a paulatine increase towards the present time. To simulate such an effect and aggregate the annual data produced by the simulation in a realistic manner we have written a function named *simulateAccumulationRate* that takes a random seed, a time-span, and a range of possible accumulation rate values, and generates a virtual accumulation rate curve. It does so by generating a random walk first, smoothing it through the application of a GAM model, and scaling it between given minimum and maximum accumulation rates (see **Figure 12**). ```{r , fig.height=3, fig.width=9, fig.cap="Virtual sediment accumulation rate."} accumulation.rate <- simulateAccumulationRate(seed=140, time=1:10000, output.min=1, output.max=50) ``` The output is a dataframe with three columns: *time*, *accumulation.rate*, and *grouping* (see **Table 4**). ```{r, echo=FALSE} kable(accumulation.rate[1:20, ], caption="Dataframe resulting from the function to generate virtual accumulation rates. Each group in the grouping column has as many elements as accumulation.rate the given group has.", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ``` Cases of the simulation data belonging to the same group according to the *grouping* column are aggregated together to simulate a *centimeter* of sedimented data. The data are aggregated by the function *aggregateSimulation*, that can additionally sample the resulting data at given depth intervals expressed in centimeters between consecutive samples (2, 6, and 10 cm in the code below). ```{r, size="small"} simulation.aggregated <- aggregateSimulation( simulation.output=simulation, accumulation.rate=accumulation.rate, sampling.intervals=c(2, 6, 10) ) ``` The function returns a matrix-like list with as many rows as simulations are available in *simulation.output*, a column containing the data of the original simulations, a column with the data aggregated every centimeter, and the sampling intervals requested by the user. The data are accessed individually by list subsetting, as shown below (see **Figure 13**), to allow easy analysis and visualization. ```{r , fig.height=5, fig.width=9, fig.cap="Effect of applying accumulation rate and different sampling depth intervals to a section of the the annual data produced by the simulation (represented in the Legend by the label Annual). Note that the 10 cm resampling completely misses the whole high-suitability event in the Pollen panel, and barely registers it in the Suitability panel. Inter-decadal variability shown by the Annual data is completely lost even at 1 cm, the higher sampling resolution.", echo=FALSE} #checking results temp.list <- simulation.aggregated[1, 1:5] names(temp.list) <- c("Annual", "1 cm", "2 cm", "6 cm", "10 cm") compareSimulations(simulation.output=temp.list, columns = c("Suitability","Pollen"), time.zoom=c(5800, 6200)) ``` #Sampling virtual pollen curves at different depth intervals# Applying a virtual accumulation rate to the data generated by the population model at given depth intervals simulates to a certain extent how pollen deposition and sampling work in reality, and the outcome of that is data-points separated by regular depth intervals, but not regular time intervals. **Figure 14** shows that time intervals between consecutive samples produced by *aggregateSimulation* are not regular. However, analyzing ecological memory requires to organize the input data in regular time lags, and to do that the data need to have regular time intervals between consecutive cases. ```{r , fig.height=3, fig.width=6, fig.cap="Histogram of the time differences (in years) between consecutive samples for the outcome of aggregateSimulation when resampled at intervals of 6 centimeters on Species 1. It clearly shows how the data are not organized in regular time intervals, and therefore are unsuitable for analyses requiring regular time lags.", echo=FALSE} #getting example data sampled at 2cm intervals simulated.data = simulation.aggregated[[1, 3]] #checking distribution of differences in age between consecutive samples hist(simulated.data[2:nrow(simulated.data),"Time"] - simulated.data[1:(nrow(simulated.data)-1),"Time"], main="Age differences between consecutive samples", xlab="Age difference between consecutive samples", col=viridis(12, begin = 0, end=1)) ``` Irregular time series can be interpolated into regular time series by using the *loess* function. This function fits a polynomial surface representing the relationship between two (or more) variables. The smoothness of this polynomial surface is modulated by the *span* parameter, and finding the right value for this parameter is critical to obtain an interpolation result as close as possible to the real data. The following code is able to find the value of *span* that maximizes the correlation between input and interpolated data for any given time series. ```{r, size="small", eval=FALSE} #getting example data sampled at 2cm intervals simulated.data = simulation.aggregated[[1, 3]] #span values to be explored span.values=seq(20/nrow(simulated.data), 5/nrow(simulated.data), by=-0.0005) #plotting the optimization process in real time x11(height=12, width=24) #iteration through candidate span values for(span in span.values){ #function to interpolate the data interpolation.function = loess( Pollen ~ Time, data=simulated.data, span=span, control=loess.control(surface="direct")) #plot model fit plot(simulated.data$Pollen, type="l", lwd=2) lines(interpolation.function$fitted, col="red", lwd=2) #if correlation equals 0.9985 loop stops if(cor(interpolation.function$fitted, simulated.data$Pollen) >= 0.9985){break} } #gives time to look at result before closing the plot window Sys.sleep(5) ``` The function *toRegularTime* (usage shown below), uses the code above to interpolate the data produced by *aggregateSimulation* into a given time interval, expressed in years, returning a list of the same dimensions of the input list. ```{r, warning=FALSE, size="small"} simulation.interpolated <- toRegularTime( x=simulation.aggregated, time.column="Time", interpolation.interval=10, columns.to.interpolate=c("Pollen", "Suitability", "Driver.A") ) ``` **Figure 15** shows the same data segment shown in **Figure 13**, but with samples re-interpolated into a regular time grid at 10 years intervals. ```{r, echo=FALSE , fig.height=5, fig.width=9, message=TRUE, error=TRUE, warning=TRUE, fig.cap="Data aggregated using virtual accumulation rate and reinterpolated into a regular time grid of 10 years resolution."} #getting the results for Species 1 temp.list <- simulation.interpolated[1, 1:5] names(temp.list) <- c("Annual", "1 cm", "2 cm", "6 cm", "10 cm") #plotting comparison compareSimulations(simulation.output=temp.list, columns = c("Suitability","Pollen"), time.zoom=c(5800, 6200)) ```
/scratch/gouwar.j/cran-all/cranData/virtualPollen/inst/doc/using_virtualPollen.Rmd
--- title: 'virtualPollen' subtitle: 'Generation of virtual pollen curves' author: "Blas M. Benito" date: "`r Sys.Date()`" fig_width: 6 output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Using virtualPollen} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, warning=FALSE, message=FALSE, echo=FALSE} #checking if required packages are installed, and installing them if not list.of.packages <- c("ggplot2", "cowplot", "knitr", "viridis", "tidyr", "formatR", "grid", "devtools", "magrittr", "kableExtra", "viridis") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages, dep=TRUE) #install virtualPollen if not installed if(!("virtualPollen" %in% installed.packages())){ library(devtools) install_github("blasbenito/virtualPollen") } # source("ecological_memory_functions.R") library(virtualPollen) #to simulate pollen curves library(ggplot2) #plotting library library(cowplot) #plotting library library(knitr) #report generation in R library(viridis) #pretty plotting colors library(grid) library(tidyr) library(formatR) library(kableExtra) #to fit tables to pdf page size library(magrittr) #kableExtra requires pipes options(scipen=999) #trying to line-wrap code in pdf output #from https://github.com/yihui/knitr-examples/blob/master/077-wrap-output.Rmd knitr::opts_chunk$set(echo = TRUE, fig.pos= "h") opts_chunk$set(tidy.opts=list(width.cutoff=80), tidy=FALSE) ``` **Summary** This document describes in detail the methods used to generate a virtual environmental driver with a given temporal autocorrelation, to be used as an input for a population model simulating synthetic pollen curves generated by virtual taxa with different life-traits (life-span and fecundity) and environmental niche features (niche position and breadth). We also describe how we generated a virtual sediment accumulation rate to aggregate the results of the population model to mimic taphonomic processes producing real pollen curves, and how we resampled virtual pollen data at different depth intervals. Finally, we present the code used to generate the 16 virtual taxa used for the analyses described in the paper. **IMPORTANT:** An Rmarkdown version of this document can be found at: https://github.com/BlasBenito/EcologicalMemory. \pagebreak #Generating a virtual environmental driver# ###Rationale### To simulate virtual pollen curves with the population model described in section **2** a virtual driver representing changes in environmental conditions is required. This section explains how to generate such a driver as a time series with a given temporal autocorrelation simulating the temporal structure generally shown by observed environmental drivers. ###Generating a random walk### The following steps are required to generate a **random walk** in R: 1. Generate a vector *time* with time values. 2. Re-sample with replacement the set of numbers (-1, 0, 1) as many times as values are in the *time* vector, to produce the vector *moves* (as in *moves of a random walk*). 3. Compute the cumulative sum of *moves*, which generates the random walk. ```{r, size="small"} #sets a state for the generator of pseudo-random numbers set.seed(1) #defines the variable "time" time <- 1:10000 #samples (-1, 0, 1) with replacement moves <- sample(x=c(-1, 0, 1), size=length(time), replace=TRUE) #computes the cumulative sum of moves random.walk <- cumsum(moves) ``` ```{r, echo=FALSE , fig.height=2.5, fig.width=9, fig.cap="Random walk (a) and its temporal autocorrelation (b)."} p1 <- ggplot(data=data.frame(Time=time, Value=random.walk), aes(x=Time, y=Value)) + geom_line(color="gray40") + ggtitle("Random walk") + xlab("Time (years)") p2 <- ggplot(data=acfToDf(random.walk, 5000, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle("Temporal autocorrelation") + xlab("Lag (years)") + ylab("Pearson correlation") plot_grid(p1, p2, labels = c("a", "b"), align = "h") ``` Every time this code is executed with a different number in *set.seed()*, it generates a different random walk with a different temporal autocorrelation, but still, this simple method is not useful to generate a time series with a given temporal autocorrelation. ###Applying a convolution filter to generate a given temporal autocorrelation### Applying a **convolution** filter to a time series allows to generate dependence among sets of consecutive cases. Below we show an example of how it works on a random sequence *a* composed by five numbers in the range [0, 1]. The operations to compute the filtered sequence are shown in **Table 1**. ```{r, size="small"} #setting a fixed seed for the generator of pseudo-random numbers set.seed(1) #generating 5 random numbers in the range [0, 1] a <- runif(5) #applying a convolution filter of length 2 and value 1 b <- filter(a, filter=c(1,1), method="convolution", circular=TRUE) ``` ```{r , echo=FALSE , fig.height=2.5, fig.width=9, fig.cap="Original sequence (dashed line) and filtered sequence with filter (solid line)."} ggplot(data=data.frame(Time=rep(1:length(a), 2), Value=c(a, b), Legend=c(rep("Original (a)", length(a)), rep("Filtered (b)", length(b)))), aes(x=Time, y=Value, group=Legend)) + geom_line(aes(linetype=Legend), color="gray40", size=1) + theme(legend.position="right") + ggtitle("Effect of a convolution filter") ``` ```{r, results="asis", echo=FALSE} temp.table <- data.frame(row=1:5, a=round(a, 2), b=round(b, 2)) temp.table$operation <- c("b1 = a1 x f2 + a2 x f1", "b2 = a2 x f2 + a3 x f1", "b3 = a3 x f2 + a4 x f1" , "b4 = a4 x f2 + a5 x f1", "b5 = a5 x f2 + a6 x f1") kable(temp.table, caption = "Original sequence (a), filtered sequence (b), and filtering operations. Numbers beside letters a and b represent row numbers, while f1 and f2 represent the values of the convolution filter (both equal to 1 in this case).", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ``` The *operation* column in **Table 1** shows how the convolution filter generates a dependence between values located within the length of the filter. This positional dependence creates a temporal autocorrelation pattern with a significant length equal to the length of the filter. The following piece of code demonstrates this by generating two versions of the same *moves* vector used before, one with a length of the significant temporal autocorrelation equal to 10 (defined in the same units of the *time* vector), and another with a length of 100. Results are shown in **Figure 3**. ```{r, size="small"} moves.10 <- filter(moves, filter=rep(1, 10), circular=TRUE) moves.100 <- filter(moves, filter=rep(1, 100), circular=TRUE) ``` ```{r, fig.height=5, fig.width=9, warning=TRUE, fig.cap="Sequences filtered with different filter lengths: a) Sequence with autocorrelation length equal to 10; b) Temporal autocorrelation of a); c) Sequence with autocorrelation length equal to 100: d) Temporal autocorrelation of c).", echo=FALSE} p4 <- ggplot(data=data.frame(Time=time, Value=as.vector(moves.10)), aes(x=Time, y=Value)) + geom_line(color="gray40", size=0.5) + ggtitle("Time series") + xlab("") + theme(axis.line.x=element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank(), plot.margin=unit(c(0,0,0,0), "cm")) p5 <- ggplot(data=acfToDf(moves.10, 200, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle("Temporal autocorrelation") + xlab("") + ylab("R-squared") + theme(axis.line.x=element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank(), plot.margin=unit(c(0,0,0,0), "cm")) p6 <- ggplot(data=data.frame(Time=time, Value=as.vector(moves.100)), aes(x=Time, y=Value)) + geom_line(color="gray40", size=0.5) + theme(plot.margin=unit(c(0,0,0,0), "cm")) p7 <- ggplot(data=acfToDf(moves.100, 200, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + xlab("Lag (years)") + ylab("R-squared") + theme(plot.margin=unit(c(0,0,0,0), "cm")) plot_grid(p4, p5, p6, p7, labels = c("a", "b", "c", "d"), align = "v", nrow=2) ``` A major limitation is that this method does not work well when the required length of the temporal autocorrelation is a large fraction of the overall length of the time series. The code below and **Figure 4** show an example with a filter of length 5000 (note that the *time* variable has 10000 elements). ```{r, size="small"} moves.5000 <- filter(moves, filter=rep(1, 5000), circular=TRUE) ``` ```{r , fig.height=2.5, fig.width=9, fig.cap="Sequence moves.5000 (a) and its temporal autocorrelation (b). In this case there is a large deviation between the required temporal autocorrelation (5000) and the outcome (2000).", echo=FALSE} p8 <- ggplot(data=data.frame(Time=time, Value=as.vector(moves.5000)), aes(x=Time, y=Value)) + geom_line(color="gray40", size=0.5) + ggtitle("Time series") + theme(plot.margin=unit(c(0,0,0,0), "cm")) p9 <- ggplot(data=acfToDf(moves.5000, 5000, 50), aes(x=lag, y=acf)) + geom_hline(aes(yintercept = 0)) + geom_hline(aes(yintercept = ci.max), color="red", linetype="dashed") + geom_hline(aes(yintercept = ci.min), color="red", linetype="dashed") + geom_segment(mapping = aes(xend = lag, yend = 0)) + ggtitle("Temporal autocorrelation") + theme(plot.margin=unit(c(0,0,0,0), "cm")) + ylab("R-square") plot_grid(p8, p9, labels = c("a", "b"), align = "h") ``` All the ideas presented above are implemented in the functions named **simulateDriver()** and **simulateDriverS**, with these main arguments: * **random.seed**: an integer for *set.seed()*, to use the same random seed in the generation of random numbers and make sure that results are reproducible. * **age**: a vector (i.e. 1:1000), representing the length of the output. * **autocorrelation.length**: length of the desired temporal autocorrelation structure, in the same units as *age*. * **output.min**: minimum value of the driver. * **output.max**: maximum value of the driver. **Figure 5** shows a driver generated with **simulateDriverS** with annual resolution in the range [0, 100], over a period of 10000 years, with a temporal autocorrelation length of 600 years. ```{r , fig.height=4, fig.width=9, fig.cap="Virtual driver and its temporal autocorrelation. Note that virtual driver B will not be used hereafter to simplify the explanation on the model dynamics."} driver <- simulateDriverS( random.seeds=c(60, 120), time=1:10000, autocorrelation.lengths = 600, output.min=c(0, 0), output.max=c(100, 100), driver.names=c("A", "B"), filename=NULL) ``` #Simulating pollen curves from virtual taxa# ###Rationale### The ability of plant populations to respond more or less quickly to environmental change is mediated by a set of species' traits, such as niche optimum and breadth, growth rate, sexual maturity age, effective fecundity, and life span. Even though we have values for these traits for many plant species, pollen types are often clusters of species of the same genus or family rather than single species, making the assignment of trait values uncertain. For the purpose of this paper it is more practical to work with **virtual pollen curves** generated by **virtual taxa** with known relationships with the environment and traits. These virtual taxa are the result of a population model with different parametrizations. ###Assumptions### The model follows these assumptions: * **The spatial structure of the population is not important to explain its pollen productivity**. This is an operative assumption, to speed-up model executions. The lack of spatial structure is partially compensated by the model parameter **charge capacity**, which simulates a limited space for population expansion. * **The environmental niche of the species follows a Gaussian distribution**, characterized by a mean (niche optimum, also niche position) and a standard deviation (niche breadth or tolerance). * **Different drivers can have a different influence on the species dynamics**, and that influence can be defined by the user by tuning the weights of each driver. * **Environmental suitability**, expressed in the range [0, 1], is the result of an additive function of the species niches (normal function defined by the species' mean and standard deviation for each driver), the drivers' values, and the relative influence of each driver (*driver weights*). * **Pollen productivity is a function of the individual's biomass and environmental suitability**, so under a hypothetical constant individual's biomass, its pollen production depends linearly on environmental suitability values. * **Effective fecundity is limited by environmental suitability**. Low environmental suitability values limit recruitment, acting as an environmental filter. Therefore, even though the fecundity of the individuals is fixed by the **fecundity** parameter, the overall population fecundity is limited by environmental suitability. ###Parameters### We have designed a simple individual-based population model which input parameters are: * **Drivers**: Values of up to two drivers (provided as numeric vectors) for a given time span. * **Niche mean**: The average (in drivers units) of the normal functions describing the species niche for each driver. This parameter defines the niche optimum of the species. * **Niche breadth**: Standard deviations (in driver units) of the normal functions describing the species niche for each driver. Smaller values simulate more specialized species, while larger values simulate generalist species. * **Driver weight**: Importance of each driver (note that in the paper we only used one driver) in defining climatic suitability for the given species. Balanced weights mean each driver is contributing equally to climatic suitability. The sum of both drivers must be 1. * **Maximum age**: age (in years) of senescence of the individuals. Individuals die when reaching this age. * **Reproductive age**: age (in years) at which individuals start to produce pollen and seeds. * **Fecundity**: actually, **effective fecundity**, which is the maximum number of viable seeds produced by mature individuals per year under under fully suitable climatic conditions. * **Growth rate**: amount of the maximum biomass gained per year by each individual. Used as input in the logistic equation of the growth model. * **Maximum biomass**: maximum biomass (in relative units) individuals can reach. Used as input in the logistic equation of the growth model to set a maximum growth. * **Carrying capacity**: maximum sum of the biomass of all individuals allowed in the model. Ideally, to keep model performance, it should be equal to **maximum biomass** multiplied by 100 or 1000. The model removes individuals at random when this number is reached is reached. In order to explain the model dynamics in the most simplified way, the parameters in **Table 1** are considered. ```{r, echo=FALSE} parameters <- parametersDataframe(rows=1) parameters[1,] <- c("Species 1", 50, 20, 2, 0.2, 0, 100, 10000, 1, 0, 50, 10, 0, 0, 600, 0) parameters[, 2:ncol(parameters)] <- sapply(parameters[, 2:ncol(parameters)], as.numeric) parameters.t <- data.frame(t(parameters)) parameters.t <- data.frame(parameters.t[-1,]) colnames(parameters.t) <- paste(parameters$label, sep="") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] #removing last two lines parameters.t <- parameters.t[c(1:8, 10:11), ] kable(parameters.t, caption="Parameters of a virtual species. Note that driver.B is ommited in order to simplify the explanation of the model.", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ``` ###Ecological niche and environmental suitability### The model assumes that normal distributions can be used as simple mathematical representations of ecological niches. Considering a virtual species and an environmental predictor, the abundance of the species along the range of the predictor values can be defined by a normal distribution with a mean $μ$ (niche optimum or niche position), and standard deviation $σ$ (niche breadth or tolerance). The equation to compute the density of a normal distribution has the form: **Equation 1:** \Large $$f(x) = 1/(√(2 π) σ) e^{-((x - μ)^2/(2 σ^2))}$$ \normalsize Where: + $x$ is the value of the predictor. + $μ$ is the mean of the normal distribution. + $σ$ is the standard deviation. The following code shows a simple example on how *dnorm()* uses **Equation 1** to compute the density of a normal function over a data range from a mean and a standard deviation: ```{r, size="small"} niche.A <- dnorm(x=0:100, mean=50, sd=10) ``` We use the code above and a computation on the density of the driver to plot the ecological niche of the virtual taxa against the availability of driver values (**Figure 6**). ```{r , fig.height=2.5, fig.width=6, fig.cap="Ecological niche of the virtual species (blue) against the density (relative availability of values over time) of the driver (gray). Both densities have been scaled in the range [0, 1].", echo=FALSE} #scaling suitability between 0 and 1 niche.A <- niche.A / max(niche.A) #getting the density of the driver driver.A = driver[driver$driver=="A", "value"] density.driver.A = density(driver.A, from=min(driver.A), to=max(driver.A), n=101, bw=max(driver.A)/100) density.driver.A.y = (density.driver.A$y - min(density.driver.A$y)) / (max(density.driver.A$y) - min(density.driver.A$y)) driver.A.range = seq(min(driver.A), max(driver.A), length.out = 101) #dataframe for plot plot.df = data.frame(Species=rep(paste(parameters[1, "label"], sep=""), 101), Driver=c(rep("Driver A", 101)), Driver.density.x=c(density.driver.A$x), Driver.density.y=c(density.driver.A.y), Value=driver.A.range, Suitability=niche.A) ggplot(data=plot.df, aes(x=Value, y=Suitability, group=Species)) + geom_ribbon(data=plot.df, aes(ymin=0, ymax=Driver.density.y), color="gray80", fill="gray80", alpha=0.5) + geom_ribbon(data=plot.df, aes(ymin=0, ymax=Suitability), alpha=0.8, colour=NA, fill="#4572A9") + geom_line(data=plot.df, aes(x=Value, y=Driver.density.y), color="gray80", alpha=0.5) + xlab("Driver values") + ylab("Environmental suitability") + theme(text = element_text(size=12), strip.background = element_rect(fill=NA), panel.spacing = unit(1, "lines")) ``` Environmental suitability for the given species over the study period is computed as follows: 1. *dnorm()* is computed on the mean and standard deviation defined for the species niche for a given driver. 2. The output of *dnorm()* is scaled to the range [0, 1]. 3. The scaled values of the output are multiplied by the driver weights (which equals 1 if only one driver is used). 4. If there are two drivers, suitability values of each individual driver are summed together. A **burn-in period** with a length of ten generations of the virtual taxa is added to the environmental suitability computed from the species niche and the driver/drivers. The added segment starts at maximum environmental suitability, stays there for five generations, and decreases linearly for another five generations until meeting the first suitability value of the actual simulation time. The whole burn-in segment has a small amount of white noise added (**Figure 7**). The burn-in period lets the population model to warm-up and go beyond starting conditions before simulation time starts. ```{r , fig.height=5, fig.width=9, fig.cap="Driver and environmental suitability of the virtual taxa. Burn-in period is highlighted by a gray box in the Environmental suitability panel.", echo=FALSE} #computing density density.A <- dnorm(driver[driver$driver == "A", "value"], mean=50, sd=10) #scaling to [0, 1] suitability <- density.A / max(density.A) burnin.suitability <- jitter(c(rep(1, parameters$maximum.age*5), seq(1, suitability[1], length.out = parameters$maximum.age*5)), amount=max(suitability)/100) burnin.suitability[burnin.suitability < 0]<-0 burnin.suitability[burnin.suitability > 1]<-1 length.burnin.suitability<-length(burnin.suitability) burnin.suitability <- c(burnin.suitability, suitability) plot.df4 <- data.frame(Time=c(-length.burnin.suitability:-1, 1:(length(suitability))), Suitability=burnin.suitability, Period=c(rep("Burn-in", length.burnin.suitability), rep("Simulation", length(suitability)))) p1 <- ggplot(data=driver[driver$driver == "A", ], aes(x=time, y=value)) + geom_line(size=0.5, color="gray40") + ggtitle("Driver and environmental suitability") + xlab("") + ylab("Driver") + coord_cartesian(xlim=c(-500, 10000)) + theme(plot.margin = unit(c(0,0,-0.5,0), "cm"), axis.line.x=element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank()) p2 <- ggplot(data=plot.df4, aes(x=Time, y=Suitability)) + geom_rect(aes(xmin=min(plot.df4$Time), xmax=0, ymin=0, ymax=Inf), inherit.aes=FALSE, fill="gray90") + geom_line(size=0.5, color="#4572A9") + xlab("Time (years)") + ylab("Suitability") + coord_cartesian(xlim=c(-500, 10000))+ theme(plot.margin = unit(c(0,0,0,0), "cm")) plot_grid(p1, p2, ncol = 1, align = "v", rel_heights = c(1.1, 1)) ``` ###Biomass growth### Individuals age one year on each simulation step, and their biomass at any given age is defined by the following equation of logistic growth (**Equation 2**). **Figure 8** shows different growth curves for different growth rates for a virtual taxon with a maximum age of 400 years. **Equation 2:** \Large $$f(x) = \frac{B}{1 + B} \times e^{(- \alpha \times t)}$$ \normalsize Where: + $B$ is the maximum biomass an individual can reach. + $\alpha$ is the growth rate. + $t$ is the age of the individual at a given time. ```{r , fig.height=3, fig.width=6, fig.cap="Biomass vs. age curves resulting from different growth rates for a 400 years life-span.", echo=FALSE} #objects to store results growth.rate.vector <- biomass.vector <- age.vector <- vector() #params age <- 0:400 maximum.biomass <- 100 growth.rates <- c(0.025, 0.05, 0.1, 0.2, 0.4, 0.8) #iterating through growth rates for(growth.rate in growth.rates){ biomass.vector <- c(biomass.vector,maximum.biomass / (1 + maximum.biomass * exp(- growth.rate * age))) growth.rate.vector <- c(growth.rate.vector, rep(growth.rate, length(age))) age.vector <- c(age.vector, age) } plot.df3 <- data.frame(growth.rate=growth.rate.vector, age=age.vector, biomass=biomass.vector) plot.df3$growth.rate <- as.factor(plot.df3$growth.rate) p3 <- ggplot(data=plot.df3, aes(x=age, y=biomass, group=growth.rate, color=growth.rate)) + geom_line(size=1) + scale_color_viridis(option="D", discrete=TRUE, direction=-1) + ggtitle("Biomass gain under different growth rates") + ylab("Biomass (relative)") + xlab("Age (years)") p3 rm(age, age.vector, biomass.vector, growth.rate, growth.rate.vector, growth.rates, p3, plot.df3) ``` ###Population dynamics### The model starts with a population of 100 individuals with random ages, in the range [1, maximum age], taken from a uniform distribution (all ages are equiprobable). For each environmental suitability value, including the burn-in period, the model performs the following operations: 1. **Aging:** adds one year to the age of the individuals. 2. **Mortality due to senescence:** individuals reaching the maximum age are removed from the simulation. * **Local extinction and immigration** If the number of individuals drops to zero, the population is replaced by a "seed bank" of 100 individuals with age zero, and the simulation jumps to step **7.**. This is intended to simulate the arrival of seeds from nearby regions, and will only lead to population growth if environmental suitability is higher than zero. 3. **Plant growth:** Applies a plant growth equation to compute the biomass of every individual (see **Figure 8**). 4. **Carrying capacity:** If maximum population biomass is reached, individuals are iteratively selected for removal according to a mortality risk curve computed by **Equation 3** (see **Figure 9**). This curve gives removal preference to younger individuals, matching observed patterns in natural populations. 5. **Pollen productivity:** In each time step the model computes the pollen productivity (in relative values) of the population using **Equation 4**. 6. **Reproduction:** Generates as many seeds as reproductive individuals are available multiplied by the maximum fecundity and the environmental suitability of the given time. The model returns a table with climatic suitability, pollen production, and population size (reproductive individuals only) per simulation year. **Figure 10** shows the results of the population model when applied to the example virtual species. **Equation 3:** \Large $$P_{m} = 1 - sqrt(a/A)$$ \normalsize Where: + $P_{m}$ is the probability of mortality. + $a$ is the age of the given individual. + $A$ is the maximum age reached by the virtual taxa. ```{r , fig.height=3, fig.width=6, fig.cap="Risk curve defining the probability of removal of a given individual as a function of its fractional age when maximum carrying capacity is reached.", echo=FALSE} temp.df <- data.frame(Pm=NA, Age=0:100) temp.df$Pm <- 1 - sqrt(temp.df$Age/100) temp.df$Age <- temp.df$Age/100 ggplot(data=temp.df, aes(x=Age, y=Pm, color=Pm)) + geom_line(size=1) + scale_color_viridis(option="D", direction=-1) + ggtitle("Probability of mortality when carrying capacity is reached") + ylab("Removal probability") + xlab("Age of the individual (as proportion to the maximum age)") + theme(legend.position = "none") rm(temp.df) ``` **Equation 4:** \Large $$P_{t} = \sum x_{it} \times max(S_{t}, B)$$ \normalsize Where: + $t$ is time (a given simulation time step). + $P$ is the pollen productivity of the population at a given time. + $x_{i}$ represents the biomass of every adult individual. + $S$ is the environmental suitability at the given time. + $B$ is the contribution of biomass to pollen productivity regardless of environmental suitability (*pollen.control* parameter in the simulation, 0 by default). If $B$ equals 1, $P$ is equal to the total biomass sum of the adult population, regardless of the environmental suitability. If $B$ equals 0, pollen productivity depends entirely on environmental suitability values. The code below shows the core of the *simulatePopulation* function. It is slightly simplified to improve readability, and only pollen counts are written as output. Note that age of individuals is represented as a proportion of the maximum age to facilitate operations throughout the code. ```{r, eval=FALSE, size="small"} #parameters (1st line in dataframe "parameters") maximum.age <- parameters[1, "maximum.age"] reproductive.age <- parameters[1, "reproductive.age"] growth.rate <- parameters[1, "growth.rate"] carrying.capacity <- parameters[1, "carrying.capacity"] fecundity <- parameters[1, "fecundity"] #reproductive age to proportion reproductive.age <- reproductive.age / maximum.age #years scaled taking maximum.age as reference scaled.year <- 1/maximum.age #vector to store pollen counts pollen.count <- vector() #starting population population <- sample(seq(0, 1, by=scaled.year), 100, replace=TRUE) #iterating through suitability (once per year) #------------------------------------ for(suitability.i in suitability){ #AGING ----------------------------------------------- population <- population + scaled.year #SENESCENCE ------------------------------------------ #1 is the maximum age of ages expressed as proportions population <- population[population < 1] #LOCAL EXTINCTION AND RECOLONIZATION ----------------- if (length(population) == 0){ #local extinction, replaces population with a seedbank population <- rep(0, floor(100 * suitability.i)) #adds 0 to pollen.count pollen.count <- c(pollen.count, 0) #jumps to next iteration next } #PLANT GROWTH --------------------------------------- #biomass of every individual biomass <- maximum.biomass / (1 + maximum.biomass * exp(- (growth.rate * suitability.i) * (population * maximum.age) ) ) #SELF-THINNING -------------------------------------- #carrying capacity reached while(sum(biomass) > carrying.capacity){ #removes a random individual based on risk curve individual.to.remove <- sample( x = length(population), size = 1, replace = TRUE, prob = 1 - sqrt(population) #risk curve ) #removing individuals from population and biomass population <- population[-individual.to.remove] biomass <- biomass[-individual.to.remove] }#end of while #REPRODUCTION -------------------------------------- #identifyies adult individuals adults <- population > reproductive.age #seeds (vector of 0s) #fractional biomass of adults * fecundity * suitability seeds <- rep(0, floor(sum((biomass[adults]/maximum.biomass) * fecundity) * suitability.i)) #adding seeds to the population population <- c(population, seeds) #POLLEN OUTPUT ------------------------------------- #biomass of adults multiplied by suitability pollen.count <- c(pollen.count, sum(biomass[adults]) * suitability.i) } #end of loop through suitability values ``` The code below executes the simulation, and plots the outcome using the function *plotSimulation*. ```{r , size="small", fig.height=6.5, fig.width=9, message=TRUE, warning=TRUE, error=TRUE, results="hide", fig.cap="Simulation outcome. Green shades represent different age groups (seedlings, saplings, and adults).", warning=FALSE, message=FALSE} #simulate population based on parameters simulation <- simulatePopulation(parameters=parameters[1, ], drivers=driver) #plotting simulation output plotSimulation(simulation.output=simulation, burnin=FALSE, panels=c("Driver A", "Suitability", "Population", "Pollen"), plot.title="", text.size=12, line.size=0.4) ``` The simulation outcomes can vary with the traits of the virtual species. **Table 2** shows the parameters of two new taxa named **Species 2** and **Species 3**. These species have a higher niche breadth than **Species 1**, and **Species 3** has a pollen productivity depending more on biomass than suitability (parameter *pollen.control* higher than zero). The comparison of both simulations (**Figure 11**) along with **Species 1** shows that different traits generate different pollen curves in our simulation. ```{r, message=TRUE, warning=TRUE, echo=FALSE} parameters[2,] <- c("Species 2", 50, 20, 4, 0.3, 0, 100, 10000, 1, 0, 50, 15, 0, 0, 600, 600) parameters[3,] <- c("Species 3", 50, 20, 6, 0.4, 0.5, 100, 10000, 1, 0, 50, 20, 0, 0, 600, 600) parameters[, 2:ncol(parameters)] <- sapply(parameters[, 2:ncol(parameters)], as.numeric) parameters.t <- data.frame(t(parameters)) parameters.t <- parameters.t[c(2:9, 11:12),] names(parameters.t) <- paste(parameters$label, sep="") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] kable(parameters.t, caption="Parameters of the three virtual species.", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ``` ```{r , fig.height=5, fig.width=9, fig.cap="Comparison of the pollen abundance and environmental suitability (same in all cases) for the three virtual species shown in Table 2 within the period 5600-6400. Species 2 has a higher fecundity than Species 1 (1 vs 10)", message=FALSE} #simulating species 2 and 3 of the dataframe parameters simulation.2 <- simulatePopulation(parameters=parameters, species=c(2,3), drivers=driver) #adding the results to the previous simulation simulation <- c(simulation, simulation.2) rm(simulation.2) #plotting the comparison for the time interval between 4000 and 5000.- compareSimulations(simulation.output=simulation, species = "all", columns = c("Suitability", "Pollen"), time.zoom = c(5600, 6400)) ``` ###Testing the model limits### We searched for the minimum values of the parameters required to keep a simulated population viable under fully suitable conditions. The taxa *Test 1* and *Test 2* shown below ```{r, echo=FALSE} parameters.test <- parametersDataframe(rows=1) parameters.test[1,] <- c("Test 1", 4, 1, 0.55, 2, 0, 1, 30, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[2,] <- c("Test 2", 3, 1, 0.5, 2, 0, 1, 30, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[, 2:ncol(parameters.test)] <- sapply(parameters.test[, 2:ncol(parameters.test)], as.numeric) parameters.t <- data.frame(t(parameters.test)) parameters.t <- data.frame(parameters.t[-1,]) names(parameters.t) <- c("Test 1", "Test 2") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] parameters.t <- parameters.t[1:(nrow(parameters.t)-2),] kable(parameters.t, caption="Parameters of virtual taxa used to test model limits.") rm(parameters.t) ``` ```{r, message=FALSE} simulation.test.1 <- simulatePopulation( parameters=parameters.test, driver.A=jitter(rep(50, 500), amount=4) ) ``` The test driver used had an average of 50 (optimum values according the environmental niche of both species) for 500 years, with random noise added through the *jitter* function. The model results (column *Pollen* only) for both virtual taxa are shown below. ```{r, fig.height=3, fig.width=9, fig.cap="Pollen output of virtual taxa Test 1 and Test 2 for a 200 years time-window."} compareSimulations(simulation.output=simulation.test.1, columns="Pollen", time.zoom = c(0, 200)) ``` The outputs of the test taxa show how a minimal change in the parameters can lead to fully unstable results when the considered taxa are short lived. Considering such a result, values for life-traits (*maximum.age*, *reproductive.age*, *fecundity*, *growth.rate*, and *maximum.biomass*) of taxon *Test 1* should be taken as safe lower limits for these traits. A similar situation can happen with long lived species when the age of sexual maturity is close to the maximum age. The table below shows three new test species with long life-spans and increasing maturity ages. The driver is again centered in 50, with added white noise, and 2000 years length. ```{r, echo=FALSE} parameters.test[3,] <- c("Test 3", 1000, 100, 0.5, 0.05, 0, 100, 10000, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[4,] <- c("Test 4", 1000, 500, 0.5, 0.05, 0, 100, 10000, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[5,] <- c("Test 5", 1000, 900, 0.5, 0.05,0, 100, 10000, 0.5, 0.5, 50, 10, 50, 10, 100, 100) parameters.test[, 2:ncol(parameters.test)] <- sapply(parameters.test[, 2:ncol(parameters.test)], as.numeric) parameters.t <- data.frame(t(parameters.test[3:5,])) parameters.t <- data.frame(parameters.t[-1,]) names(parameters.t) <- c("Test 3", "Test 4", "Test 5") parameters.t$Parameter <- rownames(parameters.t) rownames(parameters.t) <- NULL parameters.t <- parameters.t[, c(ncol(parameters.t),1:(ncol(parameters.t)-1))] parameters.t <- parameters.t[1:(nrow(parameters.t)-2),] kable(parameters.t, caption="Parameters of virtual taxa used to test model limits.") rm(parameters.t) ``` ```{r, message=FALSE} simulation.test.2 <- simulatePopulation( parameters=parameters.test, species=c(3:5), driver.A=jitter(rep(50, 2000), amount=4) ) ``` ```{r, fig.height=3, fig.width=9, fig.cap="Pollen output of virtual taxa Test 1 and Test 2 for a 200 years time-window."} compareSimulations(simulation.output=simulation.test.2, columns="Pollen", time.zoom = c(0, 800)) ``` The figure shows how *Test 3* yields a stable pollen productivity across time, while *Test 4* and *Test 5* show, respectively, a very low productivity due to scarcity of adults, a total inability to sustain stable populations. Considering these results, it is important to keep a careful balance between the parameters *maximum.age* and *reproductive.age* to obtain viable virtual populations. #Simulating a virtual accumulation rate# Sediments containing pollen grains accumulate at varying rates, generally measured in *years per centimeter* (y/cm). Accumulation rates found in real datasets are broadly between 10 and 70 y/cm, with a paulatine increase towards the present time. To simulate such an effect and aggregate the annual data produced by the simulation in a realistic manner we have written a function named *simulateAccumulationRate* that takes a random seed, a time-span, and a range of possible accumulation rate values, and generates a virtual accumulation rate curve. It does so by generating a random walk first, smoothing it through the application of a GAM model, and scaling it between given minimum and maximum accumulation rates (see **Figure 12**). ```{r , fig.height=3, fig.width=9, fig.cap="Virtual sediment accumulation rate."} accumulation.rate <- simulateAccumulationRate(seed=140, time=1:10000, output.min=1, output.max=50) ``` The output is a dataframe with three columns: *time*, *accumulation.rate*, and *grouping* (see **Table 4**). ```{r, echo=FALSE} kable(accumulation.rate[1:20, ], caption="Dataframe resulting from the function to generate virtual accumulation rates. Each group in the grouping column has as many elements as accumulation.rate the given group has.", booktabs = T) %>% kable_styling(latex_options = c("hold_position", "striped")) ``` Cases of the simulation data belonging to the same group according to the *grouping* column are aggregated together to simulate a *centimeter* of sedimented data. The data are aggregated by the function *aggregateSimulation*, that can additionally sample the resulting data at given depth intervals expressed in centimeters between consecutive samples (2, 6, and 10 cm in the code below). ```{r, size="small"} simulation.aggregated <- aggregateSimulation( simulation.output=simulation, accumulation.rate=accumulation.rate, sampling.intervals=c(2, 6, 10) ) ``` The function returns a matrix-like list with as many rows as simulations are available in *simulation.output*, a column containing the data of the original simulations, a column with the data aggregated every centimeter, and the sampling intervals requested by the user. The data are accessed individually by list subsetting, as shown below (see **Figure 13**), to allow easy analysis and visualization. ```{r , fig.height=5, fig.width=9, fig.cap="Effect of applying accumulation rate and different sampling depth intervals to a section of the the annual data produced by the simulation (represented in the Legend by the label Annual). Note that the 10 cm resampling completely misses the whole high-suitability event in the Pollen panel, and barely registers it in the Suitability panel. Inter-decadal variability shown by the Annual data is completely lost even at 1 cm, the higher sampling resolution.", echo=FALSE} #checking results temp.list <- simulation.aggregated[1, 1:5] names(temp.list) <- c("Annual", "1 cm", "2 cm", "6 cm", "10 cm") compareSimulations(simulation.output=temp.list, columns = c("Suitability","Pollen"), time.zoom=c(5800, 6200)) ``` #Sampling virtual pollen curves at different depth intervals# Applying a virtual accumulation rate to the data generated by the population model at given depth intervals simulates to a certain extent how pollen deposition and sampling work in reality, and the outcome of that is data-points separated by regular depth intervals, but not regular time intervals. **Figure 14** shows that time intervals between consecutive samples produced by *aggregateSimulation* are not regular. However, analyzing ecological memory requires to organize the input data in regular time lags, and to do that the data need to have regular time intervals between consecutive cases. ```{r , fig.height=3, fig.width=6, fig.cap="Histogram of the time differences (in years) between consecutive samples for the outcome of aggregateSimulation when resampled at intervals of 6 centimeters on Species 1. It clearly shows how the data are not organized in regular time intervals, and therefore are unsuitable for analyses requiring regular time lags.", echo=FALSE} #getting example data sampled at 2cm intervals simulated.data = simulation.aggregated[[1, 3]] #checking distribution of differences in age between consecutive samples hist(simulated.data[2:nrow(simulated.data),"Time"] - simulated.data[1:(nrow(simulated.data)-1),"Time"], main="Age differences between consecutive samples", xlab="Age difference between consecutive samples", col=viridis(12, begin = 0, end=1)) ``` Irregular time series can be interpolated into regular time series by using the *loess* function. This function fits a polynomial surface representing the relationship between two (or more) variables. The smoothness of this polynomial surface is modulated by the *span* parameter, and finding the right value for this parameter is critical to obtain an interpolation result as close as possible to the real data. The following code is able to find the value of *span* that maximizes the correlation between input and interpolated data for any given time series. ```{r, size="small", eval=FALSE} #getting example data sampled at 2cm intervals simulated.data = simulation.aggregated[[1, 3]] #span values to be explored span.values=seq(20/nrow(simulated.data), 5/nrow(simulated.data), by=-0.0005) #plotting the optimization process in real time x11(height=12, width=24) #iteration through candidate span values for(span in span.values){ #function to interpolate the data interpolation.function = loess( Pollen ~ Time, data=simulated.data, span=span, control=loess.control(surface="direct")) #plot model fit plot(simulated.data$Pollen, type="l", lwd=2) lines(interpolation.function$fitted, col="red", lwd=2) #if correlation equals 0.9985 loop stops if(cor(interpolation.function$fitted, simulated.data$Pollen) >= 0.9985){break} } #gives time to look at result before closing the plot window Sys.sleep(5) ``` The function *toRegularTime* (usage shown below), uses the code above to interpolate the data produced by *aggregateSimulation* into a given time interval, expressed in years, returning a list of the same dimensions of the input list. ```{r, warning=FALSE, size="small"} simulation.interpolated <- toRegularTime( x=simulation.aggregated, time.column="Time", interpolation.interval=10, columns.to.interpolate=c("Pollen", "Suitability", "Driver.A") ) ``` **Figure 15** shows the same data segment shown in **Figure 13**, but with samples re-interpolated into a regular time grid at 10 years intervals. ```{r, echo=FALSE , fig.height=5, fig.width=9, message=TRUE, error=TRUE, warning=TRUE, fig.cap="Data aggregated using virtual accumulation rate and reinterpolated into a regular time grid of 10 years resolution."} #getting the results for Species 1 temp.list <- simulation.interpolated[1, 1:5] names(temp.list) <- c("Annual", "1 cm", "2 cm", "6 cm", "10 cm") #plotting comparison compareSimulations(simulation.output=temp.list, columns = c("Suitability","Pollen"), time.zoom=c(5800, 6200)) ```
/scratch/gouwar.j/cran-all/cranData/virtualPollen/vignettes/using_virtualPollen.Rmd
#' Convert a virtual species distribution (or a suitability raster) #' into presence-absence #' #' This functions converts the probabilities of presence from the output of #' \code{\link{generateSpFromFun}}, \code{\link{generateSpFromPCA}}, #' \code{\link{generateRandomSp}} #' or a suitability raster into #' a presence-absence raster. The conversion can be threshold-based, or based #' on a probability of conversion (see details). #' #' @param x the output from functions #' \code{\link{generateSpFromFun}}, \code{\link{generateSpFromPCA}} #' or \code{\link{generateRandomSp}}, or a suitability SpatRaster #' @param PA.method \code{"threshold"} or \code{"probability"}. If #' \code{"threshold"}, then occurrence probabilities are simply converted into #' presence-absence according to the threshold \code{beta}. If #' \code{"probability"}, then #' probabilities are converted according to a logistic function of threshold #' \code{beta} and slope \code{alpha}. #' @param prob.method \code{"logistic"} or \code{"linear"}. Defines how #' the initial environmental suitability is translated into probabilities of #' presence/absence. #' @param beta \code{"random"}, a numeric value in the range of your #' probabilities or \code{NULL}. This is the threshold of conversion into #' presence-absence (if \code{PA.method = "probability"} and #' \code{prob.method = "logistic"}, then beta = the inflexion point). #' If \code{"random"}, a numeric value will be randomly generated within the range #' of \code{x}. #' @param alpha \code{NULL} or a negative numeric value. Only useful if #' \code{PA.method = "probability"} and \code{proba.method = "logistic"}. #' The value of \code{alpha} will #' shape the logistic function transforming occurrences into presence-absences. #' See \code{\link{logisticFun}} and examples therein for the choice of #' \code{alpha} #' @param a \code{NULL} or a numeric value. Only useful if #' \code{PA.method = "probability"} and \code{proba.method = "linear"}. #' Slope of the linear conversion of environmental suitability. #' @param b \code{NULL} or a numeric value. Only useful if #' \code{PA.method = "probability"} and \code{proba.method = "linear"}. #' Intercept of the linear conversion of environmental suitability. #' @param species.prevalence \code{NULL} or a numeric value between 0 and 1. #' The species prevalence is the proportion of sites actually occupied by the #' species. #' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, maps of #' probabilities #' of occurrence and presence-absence will be plotted. #' #' @export #' @import raster #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, D.M. Kaplan, C. Bellard & F. Courchamp #' @references #' Meynard C.N. & Kaplan D.M. 2013. Using virtual species to study species #' distributions and model performance. #' \emph{Journal of Biogeography} \bold{40}:1-8 #' #' Meynard C.N. & Kaplan D.M. 2011. The effect of a gradual response to the #' environment on species distribution model performance. #' \emph{Ecography} \bold{35}:499-509 #' #' @details #' \href{http://borisleroy.com/virtualspecies_tutorial/04-presenceabsence.html}{ #' Online tutorial for this function} #' #' #' #' The conversion of environmental suitability into presence - absence used to #' be #' performed by selecting a threshold above which presence always occurs, #' and never below. However, this approach may is unrealistic because #' species may sometime be present in areas with a low probability of #' occurrence, #' or be absent from areas with a high probability of occurrence. In addition, #' when using a threshold you erase the previously generated response shapes: #' it all becomes threshold. Thus, this threshold approach should be avoided. #' #' #' A more #' realistic conversion consists in converting environmental suitability into #' presence - #' absence with a probability function (see references). Such a probability #' conversion can be performed with two different methods here: #' \enumerate{ #' \item{Using a logistic transformation of environmental suitability #' (see \code{\link{logisticFun}}). A logistic function on the other hand, will #' ensure that the simulated probability is within the 0-1 range and allow easy #' control of species prevalence. However, the #' logistic function will also flatten out the relationship at the extreme #' suitability values, and narrow or broaden the intermediate probability values #' depending on the slope of the logistic curve } #' \item{Using a linear transformation of environmental suitability. A linear #' transformation will preserve the shape of the originally simulated #' occurrence-environment relationships, uniformly increasing or decreasing the #' probabilities of occurrence across the landscape.}} #' #' --- note --- #' #' If the Virtual Species study aims at comparing simulated and predicted #' probability values, it is important to recover the correct simulated #' probability instead of directly using the initial suitability function. #' #' Therefore, the function stores the probability of occurrence in the #' output list, under the object \code{probability.of.occurrence}. #' The initial suitability function (before logistic or linear conversion) #' will still be stored in the output list as \code{suitab.raster}. #' #' -------------------------------------------------------------------------- #' #' \bold{PROBABILISTIC CONVERSION - LOGISTIC METHOD} #' #' To perform the logistic transformation of environmental suitability #' you have to define two of the #' three following parameters: #' \itemize{ #' \item{\code{beta}: the 'threshold' of the logistic function (i.e. the #' inflexion point. It should normaly be in the range of values of your #' environmental suitability.)} #' \item{\code{alpha}: the slope of the logistic function. It should generally #' be in value equal to something like 1/20 or 1/10 of your environmental #' suitability range} #' \item{\code{species.prevalence}: the proportion of sites in which the species #' occur} #' } #' #' If you provide \code{beta} and \code{alpha}, the \code{species.prevalence} #' is calculated immediately calculated after conversion into presence-absence. #' #' On the other hand, if you provide \code{species.prevalence} and either #' \code{beta} or \code{alpha}, the function will try to determine \code{alpha} #' (if you provided \code{beta}) or \code{beta} (if you provided \code{alpha}). #' #' The relationship between species prevalence, alpha and beta is dependent #' on the available range of environmental conditions (see Meynard and Kaplan, #' 2011 and especially the Supporting Information). As a consequence, the #' desired species prevalence may not be available for the defined \code{alpha} #' or \code{beta}. In these conditions, the function will retain the #' \code{alpha} or #' \code{beta} which provides the closest prevalence to your #' \code{species.prevalence}, #' but you may also provide another value of \code{alpha} or \code{beta} to #' obtain #' a closer prevalence. #' #' -------------------------------------------------------------------------- #' #' \bold{PROBABILISTIC CONVERSION - LINEAR METHOD } #' #' To perform the linear transformation of environmental suitability #' you have to define *one* of the following: #' \itemize{ #' \item{nothing - in which case your input environmental suitability will be #' used as the probability of occurrence for the Bernoulli trial (it is #' equivalent to defining a slope \code{a} of 1 and intercept \code{b} of 0.)} #' \item{the coefficients of the linear regression: slope \code{a} and #' intercept \code{b}. The transformed environmental suitability will #' be used as the probability of occurrence for the Bernoulli trial.} #' \item{\code{species.prevalence}: the proportion of sites in which the #' species occur. In this case, the function will try to find coefficients #' of a linear regression which results in the requested #' \code{species.prevalence} #' (see below).} #' } #' #' Method used to find coefficients of a linear regression which results in the #' requested \code{species.prevalence}: #' #' \enumerate{ #' \item{The simplest linear transformation of habitat suitability would #' be to just multiply the raw suitability by a constant. For example, if the #' raw average suitability in the area is 0.04, it means an expected prevalence #' of 0.40. To to go from this expected prevalence of 0.04 to an expected #' prevalence of 0.4, we can just multiply the raw suitability by 10. It is the #' default choice, unless it results in probabilities superior to 1 or raw #' suitability have values below 0, in which case the function proceeds to #' method 2.} #' \item{If it does not work, then we look at the line that passes through #' (min suitability, 0) and (mean suitability, desired prevalence). For this #' line, we only need to ensure that the maximum probability of occurence is #' lower than 1. Otherwise, the function proceeds to method 3.} #' \item{If method 2 fails, then we test the line going through (mean #' suitability, desired prevalence) and (max suitability, 1). If the minimum #' probability resulting from this line is greater than 0, then this method is #' correct. #' } #' } #' #' One of these 3 lines should always work. In fact, one of the last two has to #' work, and it does not hurt to try the first one which is simpler. #' #' -------------------------------------------------------------------------- #' #' In all cases, the \code{species.prevalence} indicated in the output is the #' prevalence measured on the output presence-absence map. #' #' @note #' The approximation of \code{alpha} or \code{beta} to the chosen #' \code{species.prevalence} may take time if you work on very large rasters. #' @return #' a \code{list} containing 6 elements: #' \itemize{ #' \item{\code{approach}: the approach used to generate the species, #' \emph{i.e.}, \code{"response"}} #' \item{\code{details}: the details and parameters used to generate the #' species} #' \item{\code{suitab.raster}: the environmental suitability of your virtual #' species, as a Raster object } #' \item{\code{probability.of.occurrence}: the probability of occurrence of your #' species, based on the chosen transformation of environmental suitability, #' as a Raster object } #' \item{\code{PA.conversion}: the parameters used to convert the suitability #' into presence-absence} #' \item{\code{pa.raster}: the presence-absence map, as a Raster object #' containing 0 (absence) / 1 (presence) / NA} #' } #' The structure of the virtualspecies object can be seen using \code{str()} #' #' #' @import terra #' @examples #' # Create an example stack with two environmental variables #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' env <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100)) #' names(env) <- c("variable1", "variable2") #' #' # Creation of the parameter list #' parameters <- formatFunctions(variable1 = c(fun = 'dnorm', mean = 0.00012, #' sd = 0.0001), #' variable2 = c(fun = 'linearFun', a = 1, b = 0)) #' sp1 <- generateSpFromFun(env, parameters, plot = FALSE) #' #' # Conversion into presence-absence with a threshold-based approach #' convertToPA(sp1, PA.method = "threshold", beta = 0.2, plot = TRUE) #' convertToPA(sp1, PA.method = "threshold", beta = "random", plot = TRUE) #' #' # Conversion into presence-absence with a probability approach using logistic #' # method #' convertToPA(sp1, PA.method = "probability", beta = 0.4, #' alpha = -0.05, plot = TRUE) #' convertToPA(sp1, PA.method = "probability", beta = "random", #' alpha = -0.1, plot = TRUE) #' #' # Conversion into presence-absence with a probability approach using linear #' # method #' convertToPA(sp1, PA.method = "probability", prob.method = "linear", #' a = 1, b = 0, plot = TRUE) #' #' #' # Conversion into presence-absence by choosing the prevalence #' # Threshold method #' convertToPA(sp1, PA.method = "threshold", #' species.prevalence = 0.3, plot = TRUE) #' # Logistic method, with alpha provided #' convertToPA(sp1, PA.method = "probability", alpha = -0.1, #' species.prevalence = 0.2, plot = TRUE) #' # Logistic method, with beta provided #' convertToPA(sp1, PA.method = "probability", beta = 0.5, #' species.prevalence = 0.2, alpha = NULL, #' plot = TRUE) #' # Linear method #' convertToPA(sp1, PA.method = "probability", prob.method = "linear", #' species.prevalence = 0.2, #' plot = TRUE) #' convertToPA(sp1, PA.method = "probability", prob.method = "linear", #' species.prevalence = 0.5, #' plot = TRUE) #' convertToPA(sp1, PA.method = "probability", prob.method = "linear", #' species.prevalence = 0.8, #' plot = TRUE) #' #' # Plot the output Presence-Absence raster only #' sp1 <- convertToPA(sp1, plot = FALSE) #' plot(sp1$pa.raster) convertToPA <- function(x, PA.method = "probability", prob.method = "logistic", beta = "random", alpha = -.05, a = NULL, b = NULL, species.prevalence = NULL, plot = TRUE) { if(inherits(x, "virtualspecies")) { if(x$approach == "bca") { approach <- "bca" sp.raster <- x$suitab.raster.current } else if(inherits(x$suitab.raster, "SpatRaster")) { approach <- "not bca" sp.raster <- x$suitab.raster } else stop("x must be:\n- a raster layer object from package terra", " \nor\n- the output list", " from functions generateSpFromFun(), generateSpFromPCA(), ", "generateRandomSp() or generateSpFromBCA()") } else if (inherits(x, "RasterLayer")) { approach <- "not bca" sp.raster <- rast(x) } else if (inherits(x, "SpatRaster")) { approach <- "not bca" sp.raster <- x } else stop("x must be:\n- a raster layer object from package terra", " \nor\n- the output list", " from functions generateSpFromFun(), generateSpFromPCA(), ", "generateRandomSp() or generateSpFromBCA()") min_p <- global(sp.raster, min, na.rm = TRUE)[1, 1] max_p <- global(sp.raster, max, na.rm = TRUE)[1, 1] if(PA.method == "threshold") { if(is.numeric(beta)) { if(is.numeric(species.prevalence)) { warning("Both beta and species.prevalence were provided. beta will be ignored.") beta <- NULL } else if(beta < min_p) { warning("beta is lower than all values in your suitability raster. The species will most likely be present everywhere") } else if(beta > max_p) { warning("beta is higher than all values in your suitability raster. The species will most likely be absent everywhere") } } else if(beta == "random") { if(is.numeric(species.prevalence)) { beta <- NULL } else { beta <- sample(seq(min_p, max_p, length = 1000), 1) message(" --- Generating a random value of beta for the threshold conversion\n\n") } } else if(is.null(beta)) { if(is.null(species.prevalence)) { stop("Either provide beta or species.prevalence when choosing PA.method = 'threshold'") } } else { stop("beta must either be 'random', a numeric value (preferably within the range of your data or NULL") } } else if(PA.method == "probability") { if(prob.method == "logistic") { if(length(c(alpha, beta, species.prevalence)) <= 1) { if(!is.null(species.prevalence)) { warning("Neither alpha nor beta were provided. As a consequence, alpha will be determined to a random value, and beta will be adjusted automatically to the desired species prevalence.") alpha <- -sample(c(seq((max_p - min_p)/1000, (max_p - min_p)/100, length = 10), seq((max_p - min_p)/100, (max_p - min_p)/10, length = 100), seq((max_p - min_p)/10, (max_p - min_p)*10, length = 10)), size = 1) } else { stop("If you choose PA.method = 'probability', you must provide two of the three following parameters: beta, alpha and species.prevalence.") } } else if(length(c(alpha, beta, species.prevalence)) > 2) { if(beta != "random") { stop("You should not provide the three parameters beta, alpha and species.prevalence. Set beta to 'random' if you want to specify species.prevalence.") } beta <- NULL } # Checking the arguments. At this stage only two of them should be not NULL if(!is.null(beta)) { if(is.numeric(beta)) { if(beta < min_p) { warning("beta is lower than all values in your suitability raster. The species will most likely be present everywhere") } else if(beta > max_p) { warning("beta is higher than all values in your suitability raster. The species will most likely be absent everywhere") } } else if(beta == "random") { beta <- sample(seq(min_p, max_p, length = 1000), 1) message(" --- Generating a random value of beta for the logistic conversion\n\n") } else { stop("beta must either be 'random', a numeric value (preferably within the range of your data) or NULL") } } if(!is.null(species.prevalence)) { if(is.numeric(species.prevalence)) { if(!(species.prevalence >= 0 & species.prevalence <= 1)) { stop("species.prevalence must be a numeric value between 0 and 1.") } } else { stop("species.prevalence must either be a numeric value between 0 and 1 or NULL") } } if(!is.null(alpha)) { if(!is.numeric(alpha)) { stop("Please provide a numeric value to alpha") } else if(alpha > 0) { warning("alpha was provided > 0. This means that low probabilities will be converted to presences, and high probabilities will be converted to absences. If this is not what was intended, provide a negative alpha.") } } if(!is.null(species.prevalence)) { if(!is.null(beta)) { message(" --- Determing alpha automatically according to beta and species.prevalence\n\n") } else { message(" --- Determing beta automatically according to alpha and species.prevalence\n\n") } } else { message(" --- Determing species.prevalence automatically according to alpha and beta\n\n") } } else if(prob.method == "linear") { if(length(c(a, b, species.prevalence)) <= 1) { if(!is.null(species.prevalence)) { if(is.numeric(species.prevalence)) { if(!(species.prevalence >= 0 & species.prevalence <= 1)) { stop("species.prevalence must be a numeric value between 0 and 1.") } } else { stop("species.prevalence must either be a numeric value between 0 and 1 or NULL") } } else { if(length(c(a, b, species.prevalence)) < 1) { message(" --- No target prevalence provided; setting the linear transformation to slope 1 and intercept 0.") a <- 1 b <- 0 } else if(!is.null(a)) { stop("Only the slope (a) of the linear transformation was provided, please also provide the intercept (b).") } else if(!is.null(b)) { stop("Only the intercept (b) of the linear transformation was provided; please also provide the slope (a).") } } } else if(!is.null(species.prevalence) & (!is.null(a) | !is.null(b))) { stop("You should either provide species.prevalence or both a and b, but not a combination of species.prevalence, a and b.") } if(!is.null(b)) { if(!is.numeric(b)) { stop("b must be a numeric value or NULL") } } if(!is.null(a)) { if(!is.numeric(a)) { stop("a must be a numeric value or NULL") } } if(!is.null(species.prevalence)) { message(" --- Searching for a linear transformation of environmental suitability that fits the chosen species.prevalence.\n") } else { message(paste0(" --- Determing species prevalence automatically based on a linear transformation of environmental suitability of slope a = ", a, " and intercept b = ", b, "\n")) } } else { stop("prob.method must be either 'logistic' or 'linear'") } } if (PA.method == "probability") { if(prob.method == "logistic") { if(!is.null(species.prevalence)) { if(!is.null(beta)) { alpha.test <- NULL for (alpha in c((max_p - min_p)/1000, (max_p - min_p) * 10)) { if(alpha > 0) alpha <- -alpha proba.of.occurrence <- app(sp.raster, fun = function(x) { logisticFun(x, beta = beta, alpha = alpha) }) PA.raster <- .quickBernoulliTrial(proba.of.occurrence) alpha.test <- rbind(alpha.test, c(alpha, global(PA.raster, stat = "mean", na.rm = TRUE)[1, 1])) } epsilon <- species.prevalence - alpha.test[, 2] if(all(epsilon > 0)) { warning(paste("Warning, the desired species prevalence cannot be obtained, because of the chosen beta and available environmental conditions (see details). The closest possible estimate of prevalence was", round(alpha.test[2, 2], 2), "\nPerhaps you can try a lower beta value.")) alpha <- alpha.test[2, 1] } else if (all(epsilon < 0)) { warning(paste("Warning, the desired species prevalence cannot be obtained, because of the chosen beta and available environmental conditions (see details). The closest possible estimate of prevalence was", round(alpha.test[1, 2], 2), "\nPerhaps you can try a higher beta value.")) alpha <- alpha.test[1, 1] } else { while (all(abs(epsilon) > 0.01)) { alpha <- (alpha.test[which(epsilon == max(epsilon[epsilon < 0])), 1] + alpha.test[which(epsilon == min(epsilon[epsilon > 0])), 1]) / 2 proba.of.occurrence <- app(sp.raster, fun = function(x) { logisticFun(x, beta = beta, alpha = alpha) }) PA.raster <- .quickBernoulliTrial(proba.of.occurrence) alpha.test <- rbind(alpha.test, c(alpha, global(PA.raster, stat = "mean", na.rm = TRUE)[1, 1])) epsilon <- species.prevalence - alpha.test[, 2] } } } else { beta.test <- NULL # We define the upper and lower boundaries for beta. # We choose to be able to define beta values beyond the boundaries of # our probability of occurrence raster, so we can have a larger range # of prevalence for (beta in c(min_p - diff(c(min_p, max_p)) / 2, max_p + diff(c(min_p, max_p)) / 2)) { proba.of.occurrence <- app(sp.raster, fun = function(x) { logisticFun(x, beta = beta, alpha = alpha) }) PA.raster <- .quickBernoulliTrial(proba.of.occurrence) beta.test <- rbind(beta.test, c(beta, global(PA.raster, stat = "mean", na.rm = TRUE)[1, 1])) } epsilon <- data.frame(epsi = species.prevalence - beta.test[, 2], prevalence = beta.test[, 2]) if(all(epsilon$epsi > 0)) { warning(paste("Warning, the desired species prevalence may not", " be obtained, because of the chosen alpha and ", "available environmental conditions (see details). ", "The closest possible estimate of prevalence was", round(beta.test[1, 2], 3), "\nPerhaps you can try an alpha value closer to 0.")) beta <- beta.test[1, 1] } else if (all(epsilon$epsi < 0)) { warning(paste("Warning, the desired species prevalence may be ", "obtained, because of the chosen beta and available environmental ", "conditions (see details). ", "The closest possible estimate of prevalence was", round(beta.test[2, 2], 3), "Perhaps you can try an alpha value closer to 0.")) beta <- beta.test[2, 1] } else { while (all(apply(epsilon, 1, function(x) ifelse(abs(x[1]) > 0.001, TRUE, ifelse(x[2] == 0, TRUE, FALSE))))) { beta <- (beta.test[which(epsilon$epsi == max(epsilon$epsi[epsilon$epsi < 0])), 1][1] + beta.test[which(epsilon$epsi == min(epsilon$epsi[epsilon$epsi > 0])), 1][1]) / 2 proba.of.occurrence <- app(sp.raster, fun = function(x) { logisticFun(x, beta = beta, alpha = alpha) }) PA.raster <- .quickBernoulliTrial(proba.of.occurrence) beta.test <- rbind(beta.test, c(beta, global(PA.raster, stat = "mean", na.rm = TRUE)[1, 1])) epsilon <- data.frame(epsi = species.prevalence - beta.test[, 2], prevalence = beta.test[, 2]) } } } } proba.of.occurrence <- app(sp.raster, fun = function(x) { logisticFun(x, beta = beta, alpha = alpha) }) PA.raster <- .quickBernoulliTrial(proba.of.occurrence) if(global(PA.raster, stat = "max", na.rm = TRUE)[1, 1] == 0) # Necessary to generate # species with very low prevalence # Without this step, rasters with only zeros can be generated { while(global(PA.raster, stat = "max", na.rm = TRUE)[1, 1] == 0) { proba.of.occurrence <- app(sp.raster, fun = function(x) { logisticFun(x, beta = beta, alpha = alpha) }) PA.raster <- .quickBernoulliTrial(proba.of.occurrence) } } if(approach == "bca") { proba.of.occurrence.future <- app(x$suitab.raster.future, fun = function(x) { logisticFun(x, beta = beta, alpha = alpha) }) PA.raster.future <- .quickBernoulliTrial(proba.of.occurrence.future) } } else if(prob.method == "linear") { if(!is.null(species.prevalence)) { tmp <- .findLinearConversion(sp.raster, target.prevalence = species.prevalence) a <- tmp$a b <- tmp$b proba.of.occurrence <- unwrap(tmp$probability.of.occurrence) PA.raster <- unwrap(tmp$distribution) if(approach == "bca") { proba.of.occurrence.future <- .transf(x$suitab.raster.future, c(a, b)) PA.raster.future <- .quickBernoulliTrial(proba.of.occurrence.future) } } else { proba.of.occurrence <- .transf(sp.raster, c(a, b)) PA.raster <- .quickBernoulliTrial(proba.of.occurrence) if(approach == "bca") { proba.of.occurrence.future <- .transf(x$suitab.raster.future, c(a, b)) PA.raster.future <- .quickBernoulliTrial(proba.of.occurrence.future) } } } else if(prob.method == "truncated linear") { # This one is not documented in the package, I have only used it for # self testings. It works if anyone is interested enough to read these # lines ;) tmp <- .findTruncatedLinearConversion( sp.raster, target.prevalence = species.prevalence) a <- tmp$a b <- tmp$b proba.of.occurrence <- unwrap(tmp$probability.of.occurrence) PA.raster <- unwrap(tmp$distribution) if(approach == "bca") { proba.of.occurrence.future <- .transf(x$suitab.raster.future, c(a, b)) PA.raster.future <- .quickBernoulliTrial(proba.of.occurrence.future) } } } else if (PA.method == "threshold") { if(!is.null(species.prevalence)) { beta <- global(sp.raster, fun = quantile, probs = 1 - species.prevalence, na.rm = TRUE)[1, 1] } PA.raster <- proba.of.occurrence <- classify(sp.raster, matrix(data = c(-Inf, beta, 0, beta, +Inf, 1), ncol = 3, nrow = 2, byrow = TRUE)) if(approach == "bca") { PA.raster.future <- proba.of.occurrence.future <- classify(x$suitab.raster.future, matrix(data = c(-Inf, beta, 0, beta, +Inf, 1), ncol = 3, nrow = 2, byrow = TRUE)) } } else { stop("Wrong PA.method entered (either 'probability' or 'threshold')") } species.prevalence <- global(PA.raster, stat = "mean", na.rm = TRUE)[1, 1] if(inherits(x, "virtualspecies")) { if(PA.method == "threshold") { x$PA.conversion = c(conversion.method = PA.method, cutoff = beta, species.prevalence = species.prevalence) message(paste0(" Threshold conversion finished: \n- cutoff = ", beta, "\n- species prevalence =", species.prevalence, "\n\n")) } else if(prob.method == "logistic") { x$PA.conversion = c(conversion.method = PA.method, probabilistic.method = prob.method, alpha = alpha, beta = beta, species.prevalence = species.prevalence) message(paste0(" Logistic conversion finished: \n- beta = ", beta, "\n- alpha = ", alpha, "\n- species prevalence =", species.prevalence, "\n\n")) } else if(prob.method == "linear") { names(a) <- NULL names(b) <- NULL x$PA.conversion = c(conversion.method = PA.method, probabilistic.method = prob.method, a = a, b = b, species.prevalence = species.prevalence) message(paste0(" Linear conversion finished: \n- slope (a) = ", a, "\n- intercept (b) = ", b, "\n- species prevalence =", species.prevalence, "\n\n")) } if(approach == "bca") { x$probability.of.occurrence.current <- wrap(proba.of.occurrence) x$pa.raster.current <- wrap(as.numeric(PA.raster)) x$probability.of.occurrence.future <- wrap(proba.of.occurrence.future) x$pa.raster.future <- wrap(as.numeric(PA.raster.future)) } else { x$probability.of.occurrence <- wrap(proba.of.occurrence) x$pa.raster <- wrap(as.numeric(PA.raster)) } results <- x if(plot) { if(approach == "bca") { plot(c(results$suitab.raster.current, results$probability.of.occurrence.current, results$pa.raster.current, results$suitab.raster.future, results$probability.of.occurrence.future, results$pa.raster.future), main = c("Current suitability", "Current prob of occ", "Current pres-abs", "Future suitability", "Future prob of occ", "Future pres-abs"), col = rev(viridis::magma(10))) } else { plot(c(results$suitab.raster, results$probability.of.occurrence, results$pa.raster), main = c("Environmental suitability", "Probability of occurrence", "Presence-absence"), col = rev(viridis::magma(10))) } } } else if (inherits(x, "SpatRaster")) { if(PA.method == "threshold") { PA.conversion = c(cutoff = beta, conversion.method = PA.method, species.prevalence = species.prevalence) message(paste0(" Threshold conversion finished: \n- cutoff = ", beta, "\n- species prevalence =", species.prevalence, "\n\n")) } else if(prob.method == "logistic") { PA.conversion = c(conversion.method = PA.method, probabilistic.method = prob.method, alpha = alpha, beta = beta, species.prevalence = species.prevalence) message(paste0(" Logistic conversion finished: \n- beta = ", beta, "\n- alpha = ", alpha, "\n- species prevalence =", species.prevalence, "\n\n")) } else if(prob.method == "linear") { PA.conversion = c(conversion.method = PA.method, probabilistic.method = prob.method, a = a, b = b, species.prevalence = species.prevalence) message(paste0(" Linear conversion finished: \n- slope (a) = ", a, "\n- intercept (b) = ", b, "\n- species prevalence =", species.prevalence, "\n\n")) } results <- list(suitab.raster = wrap(x), probability.of.occurrence = wrap(proba.of.occurrence), PA.conversion = PA.conversion, pa.raster = wrap(as.numeric(PA.raster))) if(plot) plot(c(results$suitab.raster, results$probability.of.occurrence, results$pa.raster), main = c("Suitability", "Probability of ocurrence", "Presence-absence"), col = rev(viridis::magma(10))) } return(results) } .quickBernoulliTrial <- function(prob.raster, ...) { # Raster of same dimentions than the probability raster random.numbers <- rast(x = prob.raster) # Generate random numbers between 0 and 1 from uniform distribution random.numbers <- setValues (random.numbers, stats::runif(ncell(prob.raster), 0, 1)) # Attribute presence or absence on the basis of whether the random number # is above or below the probability of occurrence pa.raster <- prob.raster > random.numbers } .transf <- function(x, coefs) { x <- x * coefs[1] + coefs[2] minval <- global(x, min, na.rm = TRUE)[1, 1] maxval <- global(x, max, na.rm = TRUE)[1, 1] if(minval < 0 | maxval > 1) { if(minval < 0 & maxval > 1) { message("The linear transformation resulted in probability values below 0 and above 1, so these were respectively truncated to 0 and 1\n") } else if(minval < 0 ) { message("The linear transformation resulted in probability values below 0 so these were truncated to 0\n") } else { message("The linear transformation resulted in probability values above 1 so these were truncated to 1\n") } } x[x < 0] <- 0 x[x > 1] <- 1 return(x) } .findLinearConversion = function(suit.raster, target.prevalence) { suit.max <- global(suit.raster, max, na.rm = TRUE)[1, 1] suit.mean <- global(suit.raster, stat = "mean", na.rm = TRUE)[1, 1] suit.min <- global(suit.raster, na.rm = TRUE)[1, 1] xs = c(suit.min, suit.max) ys = c(0, 1) # Only include (0,0) case if suitability >= 0 if (suit.min >= 0) { xs = c(0, xs) ys = c(0, ys) } AB = .abcoefs(suit.mean, target.prevalence, xs, ys) ymn = .lab(suit.min, AB$a, AB$b) ymx = .lab(suit.max, AB$a, AB$b) # Round to avoid very small floating point calculation errors ymn = round(ymn, 6) ymx = round(ymx, 6) I = min(which(ymn >= 0 & ymx <= 1)) # Find first one that works #Calculate the resulting prevalence: new.suit <- AB$a[I] * suit.raster + AB$b[I] distr = .quickBernoulliTrial(new.suit) prev = global(distr, stat = "mean", na.rm = TRUE)[1, 1] return(list(a = AB$a[I], b = AB$b[I], prevalence = prev, probability.of.occurrence = wrap(new.suit), distribution = wrap(distr))) } # Get line coefficients from two points .abcoefs = function(x1, y1, x2, y2) { list(b = y1 - x1 * (y1 - y2) / (x1 - x2), a = (y1 - y2) / (x1 - x2)) } # Function for a line with intercept (b) and slope (a) .lab = function(x, b, a) { a * x + b } .findTruncatedLinearConversion <- function(suit.raster, target.prevalence, m = FALSE, plot.conv = FALSE) { max.suit <- global(suit.raster, stat = "max", na.rm = TRUE)[1, 1] min.suit <- global(suit.raster, stat = "min", na.rm = TRUE)[1, 1] mean.suit <- global(suit.raster, stat = "mean", na.rm = TRUE)[1, 1] slopemax <- (1 - target.prevalence) / (max.suit - mean.suit) slopemin <- target.prevalence / (mean.suit - min.suit) bmax <- slopemax * mean.suit - target.prevalence bmin <- slopemin * mean.suit - target.prevalence new.suit.max = .transf(suit.raster, coefs = c(slopemax, bmax)) distr.max <- .quickBernoulliTrial(new.suit.max) prev.max <- global(distr.max, stat = "mean", na.rm = TRUE)[1, 1] new.suit.min = .transf(suit.raster, coefs = c(slopemin, bmin)) distr.min <- .quickBernoulliTrial(new.suit.min) prev.min <- global(distr.min, stat = "mean", na.rm = TRUE)[1, 1] if(target.prevalence > max(c(prev.max, prev.min))) { if(max(c(prev.max, prev.min)) == prev.min) { params <- c(slope = slopemin, intercept = bmin) prev0 <- prev.min distr0 <- distr.min raster0 <- new.suit.min } else { params <- c(slope = slopemax, intercept = bmax) prev0 <- prev.max distr0 <- distr.max raster0 <- new.suit.max } } else if (target.prevalence < min(c(prev.max, prev.min))) { if(min(c(prev.max, prev.min)) == prev.min) { params <- c(slope = slopemin, intercept = bmin) prev0 <- prev.min distr0 <- distr.min raster0 <- new.suit.min } else { params <- c(slope = slopemax, intercept = bmax) prev0 <- prev.max distr0 <- distr.max raster0 <- new.suit.max } } else { pos <- (target.prevalence - min(c(prev.max, prev.min))) / (max(c(prev.max, prev.min)) - min(c(prev.max, prev.min))) slopepos <- pos * (max(c(slopemax, slopemin)) - min(c(slopemax, slopemin))) + min(c(slopemax, slopemin)) bpos <- slopepos * mean.suit - target.prevalence params <- c(slope = slopepos, intercept = bpos) raster0 <- .transf(suit.raster, coefs = params) distr0 <- .quickBernoulliTrial(raster0) prev0 <- global(distr0, stat = "mean", na.rm = TRUE)[1, 1] } if(m) cat("prev0 = ", prev0, "\n") slop <- params[1] b0 <- params[2] if(abs(prev0 - target.prevalence) > 0.01) { if(m) cat("Determining b1...\n") if(prev0 < target.prevalence) { b1 <- .99 # Just below 1 to avoid a reponse fully equal to 1 } else { b1 <- - global(suit.raster, stat = "max", na.rm = TRUE)[1, 1] * slop + 0.01 # Just above minimum value to avoid a reponse fully equal to 0 } raster1 = .transf(suit.raster, coefs = c(slop, b1)) distr1 = .quickBernoulliTrial(raster1) prev1 <- global(distr1, stat = "mean", na.rm = TRUE)[1, 1] if(m) cat("prev1 = ", prev1, "\n") if(abs(prev1 - target.prevalence) > 0.01) { if(m) cat("Finding a better b...\n") b0.5 <- stats::median(c(b0, b1)) raster0.5 = .transf(suit.raster, coefs = c(slop, b0.5)) distr0.5 = .quickBernoulliTrial(raster0.5) prev0.5 <- global(distr0.5, stat = "mean", na.rm = TRUE)[1, 1] while(abs(prev0.5 - target.prevalence) > 0.01) { if(target.prevalence >= min(c(prev0, prev0.5)) & target.prevalence <= max(c(prev0, prev0.5))) { b1 <- b0.5 } else { b0 <- b0.5 } b0.5 <- stats::median(c(b0, b1)) raster0.5 = .transf(suit.raster, coefs = c(slop, b0.5)) distr0.5 = .quickBernoulliTrial(raster0.5) prev0.5 <- global(distr0.5, stat = "mean", na.rm = TRUE)[1, 1] if(m) cat("b0.5 = ", b0.5, " - prevalence = ", prev0.5, "\n") } rasterfinal <- raster0.5 bfinal <- b0.5 prevfinal <- prev0.5 distrfinal <- distr0.5 if(m) cat("Best b is equal to b0.5\n") } else { if(m) cat("Best b is equal to b1\n") rasterfinal <- raster1 bfinal <- b1 prevfinal <- prev1 distrfinal <- distr1 } } else { if(m) cat("Best b is equal to b0\n") bfinal <- b0 rasterfinal <- raster0 prevfinal <- prev0 distrfinal <- distr0 } return(list(a = slop, b = bfinal, prevalence = prevfinal, probability.of.occurrence = wrap(rasterfinal), distribution = wrap(distrfinal))) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/convertToPA.R
#' Format and visualise functions used to generate virtual species with \code{\link{generateSpFromFun}} #' #' @description #' This function is a helper function to simplify the formatting of functions #' for \code{\link{generateSpFromFun}} #' @details #' This function formats the \code{parameters} argument of \code{\link{generateSpFromFun}}. #' For each environmental variable, provide a vector containing the function name, and its arguments. #' #' #' For example, assume we want to generate a species responding to two environmental variables bio1 and bio2. #' \itemize{ #' \item{The response to bio1 is a normal response (\code{\link{dnorm}}), of mean 1 and standard deviation 0.5.} #' \item{The response to bio2 is a linear response (\code{\link{linearFun}}), of slope (a) 2 and intercept (b) 5.} #' } #' The correct writing is: #' #' \code{formatFunctions( #' bio1 = c(fun = "dnorm", mean = 1, sd = 0.5), #' bio2 = c(fun = "linearFun", a = 2, b = 5))} #' #' #' #' @section Warning: #' Do not use 'x' as a name for your environmental variables. #' @param x NULL or a \code{RasterStack}. If you want to visualise the functions, #' provide your \code{RasterStack} here. #' @param rescale \code{TRUE} or \code{FALSE}. If \code{TRUE}, individual response #' plots are rescaled between 0 and 1 with the formula (val - min) / (max - min). #' @param ... the parameters to be formatted. See details. #' @export #' @import raster #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' @examples #' my.parameters <- formatFunctions(variable1 = c(fun = 'dnorm', #' mean = 0.00012, sd = 0.0001), #' variable2 = c(fun = 'linearFun', a = 1, b = 0)) #' #' #' my.parameters <- formatFunctions(bio1 = c(fun = "logisticFun", #' alpha = -12.7, beta = 68), #' bio2 = c(fun = "linearFun", #' a = -0.03, b = 191.2), #' bio3 = c(fun = "dnorm", #' mean = 86.4, sd = 19.1), #' bio4 = c(fun = "logisticFun", #' alpha = 2198.5, beta = 11381.4)) #' \dontrun{ #' # An example using worldclim data #' bio1.4 <- getData('worldclim', var='bio', res=10)[[1:4]] #' my.parameters <- formatFunctions(x = bio1.4, #' bio1 = c(fun = "logisticFun", #' alpha = -12.7, beta = 68), #' bio2 = c(fun = "linearFun", #' a = -0.03, b = 191.2), #' bio3 = c(fun = "dnorm", #' mean = 86.4, sd = 19.1), #' bio4 = c(fun = "logisticFun", #' alpha = 2198.5, beta = 11381.4)) #' } formatFunctions <- function(x = NULL, rescale = TRUE, ...) { details <- list() args <- list(...) for (i in names(args)) { if(!("fun" %in% names(args[[i]]))) { stop(paste("No function was correctly provided for variable", i)) } details[[i]]$fun <- args[[i]]["fun"] args[[i]] <- args[[i]][-(names(args[[i]]) %in% "fun")] details[[i]]$args <- as.list(args[[i]]) a <- sapply(args[[i]], as.numeric) if(any(is.na(a))) { details[[i]]$args[[which(!is.na(a))]] <- sapply(details[[i]]$args[[which(!is.na(a))]], as.numeric) } else { details[[i]]$args <- a } } if(inherits(x, "Raster")) { x <- rast(x) } if(inherits(x, "SpatRaster")) { plotResponse(x = x, parameters = details, rescale = rescale, approach = "response") } else if (!is.null(x)) { stop("x must either be NULL or a raster stack of environmental variables") } return(details) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/formatFunctions.R
#' Generate a random virtual species distribution from environmental variables #' #' @description #' This function generates randomly a virtual species distribution. #' #' @param raster.stack a SpatRaster object, in which each layer represent an #' environmental #' variable. #' @param approach \code{"automatic"}, \code{"random"}, \code{"response"} #' or \code{"pca"}. This parameters defines how species will be generated. #' \code{"automatic"}: If less than 6 variables in \code{raster.stack}, a #' response approach will be used, otherwise a pca approach will be used. #' \code{"random"}: the approach will be randomly picked. Otherwise choose #' \code{"response"} or \code{"pca"}. See details. #' @param rescale \code{TRUE} or \code{FALSE}. If \code{TRUE}, the final #' probability of presence is rescaled between 0 and 1. #' @param convert.to.PA \code{TRUE} or \code{FALSE}. If \code{TRUE}, the #' virtual species distribution will also be converted into Presence-Absence. #' @param relations [response approach] a vector containing the possible types #' of response function. #' The implemented type of relations are \code{"gaussian"}, \code{"linear"}, #' \code{"logistic"} and \code{"quadratic"}. #' @param rescale.each.response \code{TRUE} or \code{FALSE}. If \code{TRUE}, #' the individual responses to #' each environmental variable are rescaled between 0 and 1 #' @param realistic.sp [response approach] \code{TRUE} or \code{FALSE}. If #' \code{TRUE}, the function will try to define responses that can form a viable #' species. If \code{FALSE}, the responses will be randomly generated #' (may result in environmental conditions that do not co-exist). #' @param species.type [response approach] \code{"additive"} or #' \code{"multiplicative"}. Defines #' how the final probability of presence is calculated: if \code{"additive"}, #' responses to each #' variable are summed; if \code{"multiplicative"}, responses are multiplied. #' See \code{\link{generateSpFromFun}} #' @param niche.breadth [pca approach] \code{"any"}, \code{"narrow"} or #' \code{"wide"}. This parameter #' defines how tolerant is the species regarding environmental conditions by #' adjusting #' the standard deviations of the gaussian functions. See #' \code{\link{generateSpFromPCA}} #' @param sample.points [pca approach] \code{TRUE} of \code{FALSE}. If you have #' a large #' raster file then use this parameter to sample a number of points equal to #' \code{nb.points}. #' @param nb.points [pca approach] a numeric value. Only useful if #' \code{sample.points = TRUE}. #' The number of sampled points from the raster, to perform the PCA. A too small #' value may not be representative of the environmental conditions in your #' raster. #' @param PA.method \code{"threshold"} or \code{"probability"}. If #' \code{"threshold"}, then occurrence probabilities are simply converted into #' presence-absence according to the threshold \code{beta}. If #' \code{"probability"}, then #' probabilities are converted according to a logistic function of threshold #' \code{beta} and slope \code{alpha}. #' @param beta \code{"random"}, a numeric value in the range of your #' probabilities or \code{NULL}. This is the threshold of conversion into #' presence-absence (= the inflexion point if \code{PA.method = "probability"}). #' If \code{"random"}, a numeric value will be randomly generated within #' the range #' of probabilities of occurrence. See \code{\link{convertToPA}} #' @param alpha \code{NULL} or a negative numeric value. Only useful if #' \code{PA.method = "probability"}. The value of \code{alpha} will #' shape the logistic function transforming occurrences into presence-absences. #' See \code{\link{logisticFun}} and examples therein for the choice of #' \code{alpha} #' @param adjust.alpha \code{TRUE} or \code{FALSE}. Only useful if #' \code{rescale = FALSE}. If \code{adjust.alpha = TRUE}, then the value #' of \code{alpha} will #' be adjusted to an appropriate value for the range of suitabilities. #' @param species.prevalence \code{NULL} or a numeric value between 0 and 1. #' The species prevalence is the proportion of sites actually occupied by the #' species. See \code{\link{convertToPA}} #' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, the generated #' virtual species will be plotted. #' @details #' \href{http://borisleroy.com/virtualspecies_tutorial/05-randomspecies.html}{ #' Online tutorial for this function} #' #' #' #' This function generate random virtual species, either using a PCA #' approach, or using #' a response approach. In case of a response approach, only four response #' functions are #' currently used: gaussian, linear, logistic and quadratic functions. #' #' Note that in case of numerous predictor variables, the "response" #' approach will #' not work well because it will often generate contradicting response functions #' (e.g., mean annual temperature optimum at degrees C and temperature #' of the coldest month at #' 10 degrees C). In these case, it is advised to use the PCA approach #' (by default, a PCA approach #' will be used if there are more than 6 predictor variables). #' #' If \code{rescale.each.response = TRUE}, then the probability response to each #' variable will be normalised between 0 and 1 according to the following #' formula: #' P.rescaled = (P - min(P)) / (max(P) - min (P)). Similarly, if #' \code{rescale = TRUE}, #' the final environmental suitability will be rescaled between 0 and 1 #' with the same formula. #' #' By default, the function will perform a probabilistic conversion into #' presence- #' absence, with a randomly chosen beta threshold. If you want to customise the #' conversion parameters, you have to define \bold{two} of the three #' following parameters: #' \itemize{ #' \item{\code{beta}: the 'threshold' of the logistic function (i.e. the #' inflexion point)} #' \item{\code{alpha}: the slope of the logistic function} #' \item{\code{species.prevalence}: the proportion of sites in which the species #' occur} #' } #' #' If you provide \code{beta} and \code{alpha}, the \code{species.prevalence} #' is calculated immediately calculated after conversion into presence-absence. #' #' As explained in \code{\link{convertToPA}}, if you choose choose a precise #' \code{species.prevalence}, it may not be possible to reach this particular #' value because of the availability of environmental conditions. Several #' runs may be necessary to reach the desired \code{species.prevalence}. #' @import terra #' @export #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' @return a \code{list} with 3 to 5 elements (depending if the conversion #' to presence-absence was performed): #' \itemize{ #' \item{\code{approach}: the approach used to generate the species, #' \emph{i.e.}, \code{"response"}} #' \item{\code{details}: the details and parameters used to generate the #' species} #' \item{\code{suitab.raster}: the virtual species distribution, as a #' SpatRaster object containing the #' environmental suitability)} #' \item{\code{PA.conversion}: the parameters used to convert the suitability #' into presence-absence} #' \item{\code{pa.raster}: the presence-absence map, as a SpatRaster object #' containing 0 (absence) / 1 (presence) / NA} #' } #' The structure of the virtualspecies can object be seen using \code{str()} #' @examples #' # Create an example stack with six environmental variables #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' env <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100), #' rast(a * logisticFun(1:100, alpha = 10, beta = 70)), #' rast(t(a)), #' rast(exp(a)), #' rast(log(a))) #' names(env) <- paste("Var", 1:6, sep = "") #' #' # More than 6 variables: by default a PCA approach will be used #' generateRandomSp(env) #' #' # Manually choosing a response approach: this may fail because it is hard #' # to find a realistic species with six distinct responses to six variables #' \donttest{ #' generateRandomSp(env, approach = "response") #' } #' #' # Randomly choosing the approach #' generateRandomSp(env, approach = "random") #' #' generateRandomSp <- function(raster.stack, approach = "automatic", rescale = TRUE, convert.to.PA = TRUE, relations = c("gaussian", "linear", "logistic", "quadratic"), rescale.each.response = TRUE, realistic.sp = TRUE, species.type = "multiplicative", niche.breadth = "any", sample.points = FALSE, nb.points = 10000, PA.method = "probability", alpha = -.1, adjust.alpha = TRUE, beta = "random", species.prevalence = NULL, plot = TRUE) { if(inherits(raster.stack, "Raster")) { raster.stack <- rast(raster.stack) } if(!(inherits(raster.stack, "SpatRaster"))) { stop("raster.stack must be a SpatRaster object") } if(approach == "automatic") { if(nlyr(raster.stack) <= 5) { approach <- "response" } else { approach <- "pca" } } else if (approach == "random") { approach <- sample(c("response", "pca"), 1) } else if(!(approach %in% c("response", "pca"))) { stop("Argument approach was misspecified. Either choose 'automatic', ", "'random', 'response' or 'pca'.") } var.names <- names(raster.stack) if(approach == "pca") { results <- generateSpFromPCA(raster.stack, rescale = rescale, niche.breadth = niche.breadth, sample.points = sample.points, nb.points = nb.points, plot = FALSE) } else if (approach == "response") { parameters <- list() message(" - Determining species' response to predictor variables\n") if(any(!(relations %in% c("gaussian", "linear", "logistic", "quadratic")))) { stop(paste("Wrong relation type specified: pick among '", paste(c("gaussian", "linear", "logistic", "quadratic"), collapse = " "), "'", collapse = " ")) } valid.cells <- setValues(raster.stack[[1]], 1) var.order <- sample(var.names, length(var.names), replace = F) for (i in 1:length(var.order)) { cur.var <- var.order[i] cur.rast <- raster.stack[[cur.var]] if(realistic.sp) cur.rast <- cur.rast * valid.cells # Cur.rast is # here restricted to current suitable conds type <- sample(relations, 1) min_ <- global(cur.rast, "min", na.rm = TRUE)[1, 1] max_ <- global(cur.rast, "max", na.rm = TRUE)[1, 1] if (type == "gaussian") { parameters[[cur.var]] <- list( fun = 'dnorm', args = c(mean = sample(seq(min_, max_, length = 100000), 1), sd = sample(seq(0, (max_ - min_), length = 100000), 1)) ) } else if (type == "linear") { # At the moment this is not really useful because the rescale will transforme the results in either 0:1 or 1:0, regardless of the slope # To be improved later parameters[[cur.var]] <- list( fun = 'linearFun', args = c(a = sample(seq(-1, 1, length = 100), 1), b = sample(seq(min_, max_, length = 100000), 1)) ) } else if (type == "logistic") { beta.t <- sample(seq(min_, max_, length = 1000000), 1) alpha.t <- sample(c(seq((max_ - min_)/1000, (max_ - min_)/100, length = 10), seq((max_ - min_)/100, (max_ - min_)/10, length = 100), seq((max_ - min_)/10, (max_ - min_)*10, length = 10)), size = 1) if(realistic.sp == TRUE) { if(beta.t > max_) { alpha.t <- alpha.t } else if (beta.t < min_) { alpha.t <- -alpha.t } else { alpha.t <- sample(c(alpha.t, -alpha.t), 1) } } parameters[[cur.var]] <- list(fun = 'logisticFun', args = c(alpha = alpha.t, beta = beta.t) ) } else if (type == "quadratic") { max.point <- sample(seq(min_, max_, length = 1000), 1) a <- sample(seq(-.01, -20, length = 10000), 1) b <- - max.point * 2 * a parameters[[cur.var]] <- list(fun = 'quadraticFun', args = c(a = a, b = b, c = 0) ) } # Restricting values to suitable conditions tmp.rast <- app(raster.stack[[cur.var]], fun = function(x) { do.call(match.fun(parameters[[cur.var]]$fun), args = c(list(x), parameters[[cur.var]]$args)) } ) tmp.rast <- (tmp.rast - global(tmp.rast, "min", na.rm = TRUE)[1, 1]) / (global(tmp.rast, "max", na.rm = TRUE)[1, 1] - global(tmp.rast, "min", na.rm = TRUE)[1, 1]) valid.cells <- valid.cells * (tmp.rast > 0.05) } message(" - Calculating species suitability\n") results <- generateSpFromFun(raster.stack, parameters, rescale = rescale, species.type = species.type, plot = FALSE, rescale.each.response = rescale.each.response) } if(convert.to.PA == TRUE) { message(" - Converting into Presence - Absence\n") # Need to adjust alpha to appropriate scale if rescale = FALSE if(rescale == FALSE) { if(adjust.alpha) { alpha <- diff(c(global(results$suitab.raster, min, na.rm = TRUE)[1, 1], global(results$suitab.raster, max, na.rm = TRUE)[1, 1])) * alpha } results <- convertToPA(results, PA.method = PA.method, alpha = alpha, beta = beta, species.prevalence = species.prevalence, plot = FALSE) if(plot) plot(results) } else { results <- convertToPA(results, PA.method = PA.method, alpha = alpha, beta = beta, species.prevalence = species.prevalence, plot = FALSE) if(plot) plot(results) } } else { if(plot) plot(results) } return(results) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/generateRandomSp.R
#' Generate a virtual species distribution from a Between Component Analysis #' of environmental variables #' #' #' A Between Component Analysis is similar to a PCA, except that two sets of #' environmental conditions #' (e.g. current and future) will be used. This function is useful to generate #' species designed to test the extrapolation capacity of models, e.g. #' for climate change extrapolations #' #' #' @param raster.stack.current a SpatRaster object, in which each layer #' represent an environmental #' variable from the "current" time horizon. #' @param raster.stack.future a SpatRaster object, in which each layer #' represent an environmental #' variable from a "future" time horizon. #' @param rescale \code{TRUE} of \code{FALSE}. Should the output suitability #' raster be #' rescaled between 0 and 1? #' @param niche.breadth \code{"any"}, \code{"narrow"} or \code{"wide"}. This #' parameter #' defines how tolerant is the species regarding environmental conditions by #' adjusting #' the standard deviations of the gaussian functions. See details. #' @param means a vector containing two numeric values. Will be used to define #' the means of the gaussian response functions to the axes of the BCA. #' @param sds a vector containing two numeric values. Will be used to define #' the standard deviations of the gaussian response functions to the axes of #' the BCA. #' @param bca a \code{bca} object. You can provide a bca object that you #' already computed yourself with #' \code{\link[virtualspecies]{generateSpFromBCA}} #' @param sample.points \code{TRUE} of \code{FALSE}. If you have large #' raster files then use this parameter to sample a number of points equal to #' \code{nb.points}. However the representation of your environmental variables #' will not be complete. #' @param nb.points a numeric value. Only useful if \code{sample.points = TRUE}. #' The number of sampled points from the raster, to perform the PCA. A too small #' value may not be representative of the environmental conditions in your #' rasters. #' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, the generated #' virtual species will be plotted. #' @note #' To perform the BCA, the function has to transform rasters into matrices. #' This may not be feasible if the chosen rasters are too large for the #' computer's memory. #' In this case, you should run the function with \code{sample.points = TRUE} #' and set the number of points to sample with \code{nb.points}. #' @details #' This function generates a virtual species distribution by computing a Between #' Component Analysis based on two different stacks of environmental variables. #' The response of the species is then simulated along the two first axes of #' the BCA with gaussian functions in the #' same way as in \code{\link{generateSpFromPCA}}. #' #' A Between Component Analysis is used to separate two sets of environmental #' conditions. #' This function proceeds in 4 steps: #' \enumerate{ #' \item{A Principal Component Analysis is generated based on both set of #' environmental conditions} #' \item{A BCA of this PCA is generated using the function #' \code{\link[ade4:bca]{bca}} #' from package \code{ade4}. Note that at this step we choose one random point #' from \code{raster.stack.future}, #' and we use this single point as if it was a third set of environmental #' conditions for the BCA. This trick allows us to subtly change the shape of #' the bca in order to #' generate different types of conditions.} #' \item{Gaussian responses to the first two axes are computed} #' \item{These responses are multiplied to obtain the final environmental #' suitability}} #' #' If \code{rescale = TRUE}, the final environmental suitability is rescaled #' between 0 and 1, #' with the formula (val - min) / (max - min). #' #' The shape of gaussian responses can be randomly generated by the function #' or defined manually by choosing #' \code{means} and \code{sds}. The random generation is constrained #' by the argument \code{niche.breadth}, which controls the range of possible #' standard deviation values. This range of values is based on #' a fraction of the axis: #' \itemize{ #' \item{\code{"any"}: the standard deviations can have values from 1\% to #' 50\% of axes' ranges. For example if the first axis of the PCA ranges from #' -5 to +5, #' then sd values along this axis can range from 0.1 to 5. #' } #' \item{\code{"narrow"}: the standard deviations are limited between 1\% and #' 10\% of axes' ranges. For example if the first axis of the PCA ranges from #' -5 to +5, #' then sd values along this axis can range from 0.1 to 1. #' } #' \item{\code{"wide"}: the standard deviations are limited between 10\% and #' 50\% of axes' ranges. For example if the first axis of the PCA ranges from #' -5 to +5, #' then sd values along this axis can range from 1 to 5. #' } #' } #' If a \code{bca} object is provided, the output bca object will contain the #' new environments coordinates along the provided bca axes. #' #' #' #' #' @return a \code{list} with 4 elements: #' \itemize{ #' \item{\code{approach}: the approach used to generate the species, #' \emph{i.e.}, \code{"bca"}} #' \item{\code{details}: the details and parameters used to generate the #' species} #' \item{\code{suitab.raster.current}: the virtual species distribution, as a #' SpatRaster object containing the #' current environmental suitability} #' \item{\code{suitab.raster.future}: the virtual species distribution, as a #' SpatRaster object containing the #' future environmental suitability} #' } #' The structure of the virtualspecies object can be seen using \code{str()} #' #' #' @import terra #' @export #' #' #' @author #' Robin Delsol, Boris Leroy #' #' Maintainer: Boris Leroy \email{leroy.boris@@gmail.com} #' #' #' @seealso \code{\link{generateSpFromFun}} to generate a virtual species with #' the responses to each environmental variables.\code{\link{generateSpFromPCA}} #' to generate a virtual species with #' the PCA of environmental variables. #' #' #' @examples #' # Create two example stacks with four environmental variables each #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' #' env1 <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100), #' rast(a), #' rast(t(a))) #' names(env1) <- c("var1", "var2", "var3", "var4") #' plot(env1) # Illustration of the variables #' #' b <- matrix(rep(dnorm(1:100, 25, sd = 50)), #' nrow = 100, ncol = 100, byrow = TRUE) #' #' env2 <- c(rast(b * dnorm(1:100, 50, sd = 25)), #' rast(b * 1:100), #' rast(b), #' rast(t(b))) #' #' names(env2) <- c("var1", "var2", "var3", "var4") #' plot(env2) # Illustration of the variables #' #' # Generating a species with the BCA #' #' generateSpFromBCA(raster.stack.current = env1, raster.stack.future = env2) #' #' # The left part of the plot shows the BCA and the response functions along #' # the two axes. #' # The top-right part shows environmental suitability of the virtual #' # species in the current environment. #' # The bottom-right part shows environmental suitability of the virtual #' # species in the future environment. #' #' #' # Defining manually the response to axes #' #' generateSpFromBCA(raster.stack.current = env1, raster.stack.future = env2, #' means = c(-2, 0), #' sds = c(0.6, 1.5)) #' #' generateSpFromBCA <- function(raster.stack.current, raster.stack.future, rescale = TRUE, niche.breadth = "any", means = NULL, sds = NULL, bca = NULL, sample.points = FALSE, nb.points = 10000, plot = TRUE) { if(inherits(raster.stack.current, "Raster")) { raster.stack.current <- rast(raster.stack.current) } if(inherits(raster.stack.future, "Raster")) { raster.stack.future <- rast(raster.stack.future) } if(!(inherits(raster.stack.current, "SpatRaster"))){ stop("raster.stack.current must be a SpatRaster object") } if(!(inherits(raster.stack.future, "SpatRaster"))){ stop("raster.stack.future must be a SpatRaster object") } if(!all((names(raster.stack.future) %in% names(raster.stack.current)))){ stop("The variables names in raster.stack.future must be the same as ", "variables names in raster.stack.current") } if(global(app(raster.stack.current, sum) == app(raster.stack.future, sum), fun = sum, na.rm = TRUE) == ncell(raster.stack.current)){ stop("Please provide two different rasters") } if(sample.points){ if(!is.numeric(nb.points)) {stop("nb.points must be a numeric value corresponding to the number of", " pixels to sample from raster.stack.current ", "and from raster.stack.future")} env.df.current <- spatSample(raster.stack.current, size = nb.points, na.rm = TRUE) env.df.future <- spatSample(raster.stack.future , size = nb.points, na.rm = TRUE) } else { message("Reading raster values. If it fails for very large rasters, use", " arguments 'sample.points = TRUE' and define a number of", " points to sample with 'nb.point'.") # if(canProcessInMemory(raster.stack.current, n = 4)){ env.df.current <- values(raster.stack.current) env.df.future <- values(raster.stack.future) if(any(is.na(env.df.current))) { env.df.current <- env.df.current[-unique(which(is.na(env.df.current), arr.ind = T)[, 1]), ] } if(any(is.na(env.df.future))) { env.df.future <- env.df.future[-unique(which(is.na(env.df.future), arr.ind = T)[, 1]), ] } # } else # { # stop("Your computer does not have enough memory to extract all the values from raster.stack.current. # Use the argument sample.points = TRUE, and adjust the number of points to use with nb.points. # More details in ?generateSpFromBCA") # } } if(!is.null(bca)){ if(!all(class(bca) %in% c("between", "dudi"))) { stop("Please provide an appropriate bca object (output of bca()) to make the bca plot.") } if(any(!(names(bca$tab) %in% names(raster.stack.current)))){ stop("The variables used to make the bca must be the same as variables names in raster.stack.current") } if (is.null(bca$cent) | is.null(bca$norm) ){ stop("Please provide an appropriate bca object (output of generateSpFromBCA) to make the bca plot.") } between.object <- bca rm(bca) sel.vars <- names(raster.stack.current) } else { sel.vars <- names(raster.stack.current) xpoint <- sample(nrow(env.df.future), 1) env.df <- rbind(env.df.current, env.df.future, env.df.future[xpoint, ], deparse.level = 0) condition <- as.factor(c(rep('Current', nrow(env.df.current)), rep('Future' , nrow(env.df.future )), "X")) message(" - Perfoming the between component analysis\n") pca.object <- ade4::dudi.pca(env.df, scannf = F, nf = 2) between.object <- ade4::bca(pca.object, condition, scannf = F, nf = 2) between.object$xpoint <- xpoint between.object$cent <- pca.object$cent between.object$norm <- pca.object$norm if(!ncol(between.object$ls)==2){ stop("A two dimension BCA can not be performed with provided rasters stack") } } message(" - Defining the response of the species along the axis\n") if(!is.null(means)){ if(!is.numeric(means)) {stop("Please provide numeric means for the gaussian function to compute ", "probabilities of presence")} if(!is.vector(means) | length(means) != 2) {stop("Please provide a vector with 2 means for the gaussian function ", "(one for each of the two between axes)")} } else { means <- between.object$ls[sample(1:nrow(between.object$ls), 1), ] #[1, ] means <- c(mean1 = means[1, 1], mean2 = means[1, 2]) } if(!is.null(sds)){ if(!is.numeric(sds)) {stop("Please provide numeric standard deviations for the gaussian function to compute probabilities of presence")} if(!is.vector(sds) | length(sds) != 2) {stop("Please provide a vector with 2 standard deviations for the gaussian function (one for each of the two pca axes)")} if(any(sds < 0)) {stop("The standard deviations must have a positive value!")} message(" - You have provided standard deviations, so argument niche.breadth will be ignored.\n") } else { # Defining a range of values to determine sds for the gaussian functions axis1 <- c(min = max(min(between.object$ls[, 1]), quantile(between.object$ls[, 1], probs = .25) - 5 * (quantile(between.object$ls[, 1], probs = .75) - quantile(between.object$ls[, 1], probs = .25))), max = min(max(between.object$ls[, 1]), quantile(between.object$ls[, 1], probs = .75) + 5 * (quantile(between.object$ls[, 1], probs = .75) - quantile(between.object$ls[, 1], probs = .25)))) axis2 <- c(min = max(min(between.object$ls[, 2]), quantile(between.object$ls[, 2], probs = .25) - 5 * (quantile(between.object$ls[, 2], probs = .75) - quantile(between.object$ls[, 2], probs = .25))), max = min(max(between.object$ls[, 2]), quantile(between.object$ls[, 2], probs = .75) + 5 * (quantile(between.object$ls[, 2], probs = .75) - quantile(between.object$ls[, 2], probs = .25)))) # Random sampling of parameters if(niche.breadth == "any") { sds <- c(sd1 = sample(seq((axis1[2] - axis1[1])/100, (axis1[2] - axis1[1])/2, length = 1000), 1), sd2 = sample(seq((axis2[2] - axis2[1])/100, (axis2[2] - axis2[1])/2, length = 1000), 1)) } else if (niche.breadth == "narrow") { sds <- c(sd1 = sample(seq((axis1[2] - axis1[1])/100, (axis1[2] - axis1[1])/10, length = 1000), 1), sd2 = sample(seq((axis2[2] - axis2[1])/100, (axis2[2] - axis2[1])/10, length = 1000), 1)) } else if (niche.breadth == "wide") { sds <- c(sd1 = sample(seq((axis1[2] - axis1[1])/10, (axis1[2] - axis1[1])/2, length = 1000), 1), sd2 = sample(seq((axis2[2] - axis2[1])/10, (axis2[2] - axis2[1])/2, length = 1000), 1)) } else { stop("niche.breadth must be one of these: 'any', 'narrow', 'wide") } } message(" - Calculating current suitability values\n") rasters.env.current <- app(raster.stack.current[[sel.vars]], fun = function(x, ...) { .pca.coordinates(x, pca = between.object, na.rm = TRUE, axes = c(1, 2)) }) suitab.raster.current <- app(rasters.env.current, fun = function(x, ...) { .prob.gaussian(x, means = means, sds = sds) }) message(" - Calculating future suitability values\n") rasters.env.future <- app(raster.stack.future[[sel.vars]], fun = function(x, ...) { .pca.coordinates(x, pca = between.object, na.rm = TRUE, axes = c(1, 2)) }) suitab.raster.future <- app(rasters.env.future , fun = function(x, ...) { .prob.gaussian(x, means = means, sds = sds) }) if(!is.null(bca)){ between.env.current <- .pca.coordinates(env.df.current, pca = between.object, na.rm = TRUE) between.env.future <- .pca.coordinates(env.df.future , pca = between.object, na.rm = TRUE) between.object$ls <- as.data.frame( rbind(between.env.current, between.env.future) ) } if(rescale) { max_ <- max(global(suitab.raster.current, "max", na.rm = TRUE)[1, 1], global(suitab.raster.future, "max", na.rm = TRUE)[1, 1]) min_ <- min(global(suitab.raster.current, "min", na.rm = TRUE)[1, 1], global(suitab.raster.future, "min", na.rm = TRUE)[1, 1]) suitab.raster.current <- (suitab.raster.current - min_) / (max_ - min_) suitab.raster.future <- (suitab.raster.future - min_) / (max_ - min_) message(" The final environmental suitability was rescaled between 0 and", "1. To disable, set argument rescale = FALSE") } else { max_ <- NA min_ <- NA } stack.lengths <- c(nrow(env.df.current), nrow(env.df.future)) results <- list(approach = "bca", details = list(variables = sel.vars, bca = between.object, rescale = rescale, axes = c(1, 2), means = means, sds = sds, max_prob_rescale = max_, min_prob_rescale = min_, stack.lengths = stack.lengths), suitab.raster.current = wrap(suitab.raster.current, proxy = FALSE), suitab.raster.future = wrap(suitab.raster.future, proxy = FALSE)) class(results) <- append("virtualspecies", class(results)) if(plot){ message(" - Ploting response and suitability\n") op <- graphics::par(no.readonly = TRUE) # par(mar = c(5.1, 4.1, 4.1, 2.1)) # layout(matrix(nrow = 2, ncol = 2, c(1, 1, 2, 3 ))) # plotResponse(results, no.plot.reset = T) stac <- c(suitab.raster.current, suitab.raster.future) names(stac) <- c("Current", "Future") plot(stac, col = viridis::viridis(10)) # image(suitab.raster.current, axes = T, ann = F, asp = 1, # las = 1, col = rev(terrain.colors(12))) # # legend(title = "Pixel\nsuitability", "right", inset = c(-0.14, 0), # legend = c(1, 0.8, 0.6, 0.4, 0.2, 0), # fill = terrain.colors(6), bty = "n") # title("Current environmental suitability of the virtual species") # # image(suitab.raster.future, axes = T, ann = F, asp = 1, # las = 1, col = rev(terrain.colors(12))) # legend(title = "Pixel\nsuitability", "right", inset = c(-0.14, 0), # legend = c(1, 0.8, 0.6, 0.4, 0.2, 0), # fill = terrain.colors(6), bty = "n") # title("Future environmental suitability of the virtual species") graphics::par(op) } return(results) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/generateSpFromBCA.R
#' Generate a virtual species distributions with responses to environmental #' variables #' #' This function generates a virtual species distribution from a stack of #' environmental #' variables and a defined set of responses to each environmental parameter. #' #' @param raster.stack a SpatRaster object, in which each layer represent an #' environmental #' variable. #' @param parameters a list containing the functions of response of the species #' to environmental variables with their parameters. See details. #' @param rescale \code{TRUE} or \code{FALSE}. If \code{TRUE}, the final #' probability of presence is rescaled between 0 and 1. #' @param formula a character string or \code{NULL}. The formula used to combine #' partial responses into the final #' environmental suitability value (e.g., \code{"layername1 + 2 * layername2 + #' layername3 * layername4 etc."}). If \code{NULL} then partial responses will #' be added or multiplied according to #' \code{species.type} #' @param species.type \code{"additive"} or \code{"multiplicative"}. Only used #' if \code{formula = NULL}. #' Defines how the final environmental suitability is calculated: if #' \code{"additive"}, responses to each #' variable are summed; if \code{"multiplicative"}, responses are multiplied. #' @param rescale.each.response \code{TRUE} or \code{FALSE}. If \code{TRUE}, #' the individual responses to #' each environmental variable are rescaled between 0 and 1 (see details). #' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, the generated #' virtual species will be plotted. #' @return a \code{list} with 3 elements: #' \itemize{ #' \item{\code{approach}: the approach used to generate the species, #' \emph{i.e.}, \code{"response"}} #' \item{\code{details}: the details and parameters used to generate the #' species} #' \item{\code{suitab.raster}: the raster containing the environmental #' suitability of the virtual species} #' } #' The structure of the virtualspecies object can be seen using \code{str()} #' @seealso \code{\link{generateSpFromPCA}} to generate a virtual species with #' a PCA approach #' @details #' \href{http://borisleroy.com/virtualspecies_tutorial/02-response.html}{Online #' tutorial for this function} #' #' This function proceeds in two steps: #' \enumerate{ #' \item{The response to each environmental variable is calculated with the #' functions provided #' in \code{parameters}. This results in a suitability of each variable. #' #' \bold{By default, each response is rescaled between 0 and 1.} Disable with #' \code{rescale.each.response = FALSE}} #' \item{The final environmental suitability is calculated according to the #' chosen \code{species.type}. #' #' \bold{By default, the final suitability is rescaled between 0 and 1.} #' Disable with #' \code{rescale = FALSE}} #' } #' The SpatRaster stack containing environmental variables must have consistent #' names, #' because they will be checked with the \code{parameters}. For example, they #' can be named #' var1, var2, etc. Names can be checked and set with \code{names(my.stack)}. #' #' The \code{parameters} have to be carefully created, otherwise the function #' will not work: #' \itemize{ #' \item{Either see \code{\link{formatFunctions}} to easily create your list of #' parameters} #' \item{Or create a \code{list} defined according to the following template:\cr #' \code{list( #' var1 = list(fun = 'fun1', args = list(arg1 = ..., arg2 = ..., #' etc.)), #' var2 = list(fun = 'fun2', args = list(arg1 = ..., arg2 = ..., #' etc.)))}\cr #' It is important to keep the same names in the parameters as in the stack of #' environmental #' variables. Similarly, argument names must be identical to argument names in #' the associated #' function (e.g., if you use \code{fun = 'dnorm'}, then args should look like #' \code{list(mean = 0, sd = 1)}). #' #' See the example section below for more examples.}} #' #' #' Any response function that can be applied to the environmental variables can #' be chosen here. Several functions are proposed in this package: #' \code{\link{linearFun}}, \code{\link{logisticFun}} and #' \code{\link{quadraticFun}}. #' Another classical example is the normal distribution: #' \code{\link[stats:Normal]{stats::dnorm()}}. #' Users can also create and use their own functions very easily. #' #' #' If \code{rescale.each.response = TRUE}, then the probability response to each #' variable will be normalised between 0 and 1 according to the following #' formula: #' P.rescaled = (P - min(P)) / (max(P) - min (P)) #' This rescaling has a strong impact on response functions, so users may #' prefer to #' use \code{rescale.each.response = FALSE} and apply their own rescaling within #' their response functions. #' #' #' @import terra #' @export #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' @examples #' # Create an example stack with two environmental variables #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' env <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100)) #' names(env) <- c("variable1", "variable2") #' plot(env) # Illustration of the variables #' #' # Easy creation of the parameter list: #' # see in real time the shape of the response functions #' parameters <- formatFunctions(variable1 = c(fun = 'dnorm', mean = 1e-04, #' sd = 1e-04), #' variable2 = c(fun = 'linearFun', a = 1, b = 0)) #' #' # If you provide env, then you can see the shape of response functions: #' parameters <- formatFunctions(x = env, #' variable1 = c(fun = 'dnorm', mean = 1e-04, #' sd = 1e-04), #' variable2 = c(fun = 'linearFun', a = 1, b = 0)) #' #' # Generation of the virtual species #' sp1 <- generateSpFromFun(env, parameters) #' sp1 #' par(mfrow = c(1, 1)) #' plot(sp1) #' #' #' # Manual creation of the parameter list #' # Note that the variable names are the same as above #' parameters <- list(variable1 = list(fun = 'dnorm', #' args = list(mean = 0.00012, #' sd = 0.0001)), #' variable2 = list(fun = 'linearFun', #' args = list(a = 1, b = 0))) #' # Generation of the virtual species #' sp1 <- generateSpFromFun(env, parameters, plot = TRUE) #' sp1 #' plot(sp1) generateSpFromFun <- function(raster.stack, parameters, rescale = TRUE, formula = NULL, species.type = "multiplicative", rescale.each.response = TRUE, plot = FALSE) { if(inherits(raster.stack, "Raster")) { raster.stack <- rast(raster.stack) } message("Generating virtual species environmental suitability...\n") approach <- "response" if(!(inherits(raster.stack, "SpatRaster"))) { stop("raster.stack must be a SpatRaster object") } # if(any(is.na(maxValue(raster.stack)))) # { # raster.stack <- setMinMax(raster.stack) # } n.l <- nlyr(raster.stack) if(n.l != length(parameters)) {stop("Provide as many layers in raster.stack as functions on parameters")} if(any(!(names(parameters) %in% names(raster.stack)) | !(names(raster.stack) %in% names(parameters)))) {stop("Layer names and names of parameters must be identical")} # Checking the structure and consistency of parameters for (i in 1:length(parameters)) { if(any(!(c("fun", "args") %in% names(parameters[[i]])))) {stop("The structure of parameters does not seem correct.", "Please provide function and arguments for variable '", names(parameters)[i], "'. See help(generateSpFromFun) for more details.", sep = "")} test <- tryCatch(match.fun(parameters[[i]]$fun), error = function(c) "error") if(!inherits(test, "function")) { stop(paste("The function ", parameters[[i]]$fun, " does not exist, please verify spelling.", sep = "")) } if(any(!(names(parameters[[i]]$args) %in% names(formals(fun = test))))) { stop(paste("Arguments of variable '", names(parameters)[i], "' (", paste(names(parameters[[i]]$args), collapse = ", "), ") do not match arguments of the associated function\n List of possible arguments for this function: ", paste(names(formals(fun = test)), collapse = ", "), sep = "")) } rm(test) } # Adding a message to inform users about the default rescaling of variables if(rescale.each.response) { message(" - The response to each variable was rescaled between 0 and 1. To disable, set argument rescale.each.response = FALSE\n") } if(rescale) { message(" - The final environmental suitability was rescaled between 0", " and 1. To disable, set argument rescale = FALSE\n") } suitab.raster <- rast(lapply(names(raster.stack), FUN = function(y) { app(raster.stack[[y]], fun = function(x) { do.call(match.fun(parameters[[y]]$fun), args = c(list(x), parameters[[y]]$args)) } ) })) names(suitab.raster) <- names(parameters) for (var in names(raster.stack)) { parameters[[var]]$min <- global(raster.stack[[var]], "min", na.rm = TRUE)[1, 1] parameters[[var]]$max <- global(raster.stack[[var]], "max", na.rm = TRUE)[1, 1] } if(rescale.each.response) { suitab.raster <- rast(lapply(names(suitab.raster), function(y) { (suitab.raster[[y]] - global(suitab.raster[[y]], "min", na.rm = TRUE)[1, 1]) / (global(suitab.raster[[y]], "max", na.rm = TRUE)[1, 1] - global(suitab.raster[[y]], "min", na.rm = TRUE)[1, 1]) })) } if(is.null(formula)) { if(species.type == "multiplicative") { formula <- paste(names(suitab.raster), collapse = " * ") suitab.raster <- app(suitab.raster, fun = prod) } else if (species.type == "additive") { formula <- paste(names(suitab.raster), collapse = " + ") suitab.raster <- app(suitab.raster, fun = sum) } else stop("If you do not provide a formula, please choose either ", "species.type = 'additive' or 'multiplicative'") } else { if(any(!(all.vars(stats::reformulate(formula)) %in% names(suitab.raster)))) { stop("Please verify that the variable names in your formula are ", "correctly spelled") } else if(any(!(names(suitab.raster) %in% all.vars(stats::reformulate(formula))))) { stop("Please verify that your formula contains all the variables of ", "your input raster stack") } else { custom.fun <- NULL # To remove the note in rcheck eval(parse(text = paste("custom.fun <- function(", paste(names(suitab.raster), collapse = ", "), ") {", formula, "}" ))) suitab.raster <- lapp(suitab.raster, fun = custom.fun) print(formula) } } if(rescale) { suitab.raster <- (suitab.raster - global(suitab.raster, "min", na.rm = TRUE)[1, 1]) / (global(suitab.raster, "max", na.rm = TRUE)[1, 1] - global(suitab.raster, "min", na.rm = TRUE)[1, 1]) } names(suitab.raster) <- "VSP suitability" results <- list(approach = approach, details = list(variables = names(parameters), formula = formula, rescale.each.response = rescale.each.response, rescale = rescale, parameters = parameters), suitab.raster = wrap(suitab.raster, proxy = FALSE) ) class(results) <- append("virtualspecies", class(results)) if(plot) { plot(results$suitab.raster, main = "Environmental suitability of the virtual species", col = viridis::viridis(20)) } return(results) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/generateSpFromFun.R
#' Generate a virtual species distribution with a PCA of environmental variables #' #' This functions generates a virtual species distribution by computing a #' PCA among environmental variables, and simulating the response of the species #' along the two first axes of the PCA. The response to axes of the PCA is #' determined with gaussian functions. #' @param raster.stack a SpatRaster object, in which each layer represent an #' environmental #' variable. #' @param rescale \code{TRUE} or \code{FALSE}. Should the output suitability #' raster be #' rescaled between 0 and 1? #' @param niche.breadth \code{"any"}, \code{"narrow"} or \code{"wide"}. This #' parameter #' defines how tolerant is the species regarding environmental conditions by #' adjusting #' the standard deviations of the gaussian functions. See details. #' @param axes a vector of values. Which axes would you like to keep in your #' PCA? #' At least 2 axes should be included (Only 1 axis currently not supported) #' @param means a vector containing as many numeric values as axes. Will be #' used to define #' the means of the gaussian response functions to the axes of the PCA. #' @param sds a vector containing as many numeric values as axes. Will be #' used to define #' the standard deviations of the gaussian response functions to the axes of #' the PCA. #' @param pca a \code{dudi.pca} object. You can provide a pca object that you #' computed yourself with \code{\link[ade4]{dudi.pca}} #' @param sample.points \code{TRUE} of \code{FALSE}. If you have a large #' raster file then use this parameter to sample a number of points equal to #' \code{nb.points}. #' @param nb.points a numeric value. Only useful if \code{sample.points = TRUE}. #' The number of sampled points from the raster, to perform the PCA. A too small #' value may not be representative of the environmental conditions in your #' raster. #' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, the generated #' virtual species will be plotted. #' @note #' To perform the PCA, the function has to transform the raster into a matrix. #' This may not be feasible if the raster is too large for the #' computer's memory. #' In this case, you should perform the PCA on a sample of your raster with #' set \code{sample.points = TRUE} and choose the number of points to sample #' with #' \code{nb.points}. #' @details #' \href{http://borisleroy.com/virtualspecies_tutorial/03-PCA.html}{Online #' tutorial for this function} #' #' #' This function proceeds in 3 steps: #' \enumerate{ #' \item{A PCA of environmental conditions is generated} #' \item{Gaussian responses to the first two axes are computed} #' \item{These responses are multiplied to obtain the final environmental #' suitability}} #' #' If \code{rescale = TRUE}, the final environmental suitability is rescaled #' between 0 and 1, #' with the formula (val - min) / (max - min). #' #' The shape of gaussian responses can be randomly generated by the function or #' defined manually by choosing #' \code{means} and \code{sds}. The random generation is constrained #' by the argument \code{niche.breadth}, which controls the range of possible #' standard deviation values. This range of values is based on #' a fraction of the axis: #' \itemize{ #' \item{\code{"any"}: the standard deviations can have values from 1\% to #' 50\% of axes' ranges. For example if the first axis of the PCA ranges from #' -5 to +5, #' then sd values along this axis can range from 0.1 to 5. #' } #' \item{\code{"narrow"}: the standard deviations are limited between 1\% and #' 10\% of axes' ranges. For example if the first axis of the PCA ranges from #' -5 to +5, #' then sd values along this axis can range from 0.1 to 1. #' } #' \item{\code{"wide"}: the standard deviations are limited between 10\% and #' 50\% of axes' ranges. For example if the first axis of the PCA ranges from #' -5 to +5, #' then sd values along this axis can range from 1 to 5. #' } #' } #' @export #' @import terra #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' @seealso \code{\link{generateSpFromFun}} to generate a virtual species with #' the responses to each environmental variables. #' @return a \code{list} with 3 elements: #' \itemize{ #' \item{\code{approach}: the approach used to generate the species, #' \emph{i.e.}, \code{"pca"}} #' \item{\code{details}: the details and parameters used to generate #' the species} #' \item{\code{suitab.raster}: the virtual species distribution, as a #' SpatRaster object containing the #' environmental suitability} #' } #' The structure of the virtualspecies object can be seen using \code{str()} #' @examples #' # Create an example stack with four environmental variables #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' env <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100), #' rast(a * logisticFun(1:100, alpha = 10, beta = 70)), #' rast(t(a))) #' names(env) <- c("var1", "var2", "var3", "var4") #' plot(env) # Illustration of the variables #' #' #' #' #' #' # Generating a species with the PCA #' #' generateSpFromPCA(raster.stack = env) #' #' # The top part of the plot shows the PCA and the response functions along #' # the two axes. #' # The bottom part shows the probabilities of occurrence of the virtual #' # species. #' #' #' #' #' #' # Defining manually the response to axes #' #' generateSpFromPCA(raster.stack = env, #' means = c(-2, 0), #' sds = c(0.6, 1.5)) #' #' # This species can be seen as occupying intermediate altitude ranges of a #' # conic mountain. #' #' #' # Beyond the first two axes #' generateSpFromPCA(raster.stack = env, #' axes = c(1, 3)) #' #' sp <- generateSpFromPCA(raster.stack = env, #' axes = 1:3) #' plotResponse(sp, axes = c(1, 2)) #' plotResponse(sp, axes = c(1, 3)) #' plotResponse(sp, axes = c(2, 3)) #' generateSpFromPCA <- function(raster.stack, rescale = TRUE, niche.breadth = "any", axes = c(1, 2), means = NULL, sds = NULL, pca = NULL, sample.points = FALSE, nb.points = 10000, plot = TRUE) { if(inherits(raster.stack, "Raster")) { raster.stack <- rast(raster.stack) } if(!(inherits(raster.stack, "SpatRaster"))) { stop("raster.stack must be a SpatRaster object") } if(sample.points) { if(!is.numeric(nb.points)) { stop("nb.points must be a numeric value corresponding to the number of", " pixels to sample from raster.stack") } env.df <- spatSample(raster.stack, size = nb.points, na.rm = TRUE) } else { message("Reading raster values. If it fails for very large rasters, use", " arguments 'sample.points = TRUE' and define a number of", " points to sample with 'nb.point'.") env.df <- values(raster.stack) if(any(is.na(env.df))) # Removing NAs if applicable { env.df <- env.df[-unique(which(is.na(env.df), arr.ind = T)[, 1]), ] } } if(!is.null(pca)) { if(!all(class(pca) %in% c("pca", "dudi"))) {stop("Please provide an appropriate pca.object (output of dudi.pca()) to", " make the pca plot.")} if(any(!(names(pca$tab) %in% names(raster.stack)))) {stop("The variables used to make the pca must be the same as variables", " names in raster.stack")} pca.object <- pca rm(pca) sel.vars <- names(raster.stack) } else { sel.vars <- names(raster.stack) raster.stack <- raster.stack[[sel.vars]] if(sample.points) { if(!is.numeric(nb.points)) {stop("nb.points must be a numeric value corresponding to the number of", " pixels to sample from raster.stack")} env.df <- spatSample(raster.stack, size = nb.points, na.rm = TRUE) } else { env.df <- values(raster.stack) if(any(is.na(env.df))) { env.df <- env.df[-unique(which(is.na(env.df), arr.ind = T)[, 1]), ] } } message(" - Perfoming the pca\n") pca.object <- ade4::dudi.pca(env.df, scannf = F, nf = max(axes)) } message(" - Defining the response of the species along PCA axes\n") if(!is.null(means)) { if(!is.numeric(means)) {stop("Please provide numeric means for the gaussian function to compute", " probabilities of presence")} if(!is.vector(means) | length(means) != length(axes)) {stop("Please provide a vector with as many means as chosen axes for the", " gaussian function (argument 'means')")} } else { means <- pca.object$li[sample(1:nrow(pca.object$li), 1), ] means <- unlist(means[1, axes]) names(means) <- paste0("mean", axes) } if(!is.null(sds)) { if(!is.numeric(sds)) {stop("Please provide numeric standard deviations for the gaussian", " function to compute probabilities of presence")} if(!is.vector(sds) | length(sds) != length(axes)) {stop("Please provide a vector with as many standard deviations as", " chosen axes for the gaussian function (argument 'sds')")} if(any(sds < 0)) {stop("The standard deviations must have a positive value!")} message(" - You have provided standard deviations, so argument", " niche.breadth will be ignored.\n") } else { # Defining a range of values to determine sds for the gaussian functions axes.sdrange <- vapply(axes, .range.function, pca.object, FUN.VALUE = numeric(2)) colnames(axes.sdrange) <- paste0("axis.", axes) # The range of values for sds are defined here if(niche.breadth == "any") { floor.sd <- 100 ceiling.sd <- 2 } else if (niche.breadth == "narrow") { floor.sd <- 100 ceiling.sd <- 10 } else if (niche.breadth == "wide") { floor.sd <- 10 ceiling.sd <- 2 } else { stop("niche.breadth must be one of these: 'any', 'narrow', 'wide") } sds <- vapply(axes, .sd.sample, sdrange = axes.sdrange, sdfloor = floor.sd, sdceiling = ceiling.sd, FUN.VALUE = numeric(1)) names(sds) <- paste0("sd", axes) } message(" - Calculating suitability values\n") pca.env <- app(raster.stack[[sel.vars]], fun = function(x, ...) { .pca.coordinates(x, pca = pca.object, na.rm = TRUE, axes = axes) }) suitab.raster <- app(pca.env, fun = function(x, ...) { .prob.gaussian(x, means = means, sds = sds) }) if(rescale) { max_ <- global(suitab.raster, "max", na.rm = TRUE)[1, 1] min_ <- global(suitab.raster, "min", na.rm = TRUE)[1, 1] suitab.raster <- (suitab.raster - min_) / (max_ - min_) message(" The final environmental suitability was rescaled between 0 and 1. To disable, set argument rescale = FALSE") } else { max_ <- NA min_ <- NA } if(plot) { op <- graphics::par(no.readonly = TRUE) graphics::par(mar = c(5.1, 4.1, 4.1, 2.1)) graphics::layout(matrix(nrow = 2, ncol = 1, c(1, 2))) plotResponse(x = raster.stack, approach = "pca", parameters = list(pca = pca.object, axes = axes, means = means, sds = sds, rescale = rescale, max_prob_rescale = max_, min_prob_rescale = min_), no.plot.reset = T) plot(suitab.raster, axes = T, ann = F, asp = 1, main = "Environmental suitability of the virtual species", las = 1, col = viridis::viridis(10), bty = "n") # # legend(title = "Pixel\nsuitability", "right", inset = c(-0.1, 0), # legend = c(1, 0.8, 0.6, 0.4, 0.2, 0), # fill = terrain.colors(6), bty = "n") # title("Environmental suitability of the virtual species") graphics::par(op) } results <- list(approach = "pca", details = list(variables = sel.vars, pca = pca.object, rescale = rescale, axes = axes, means = means, sds = sds, max_prob_rescale = max_, min_prob_rescale = min_), suitab.raster = wrap(suitab.raster, proxy = FALSE)) class(results) <- append("virtualspecies", class(results)) return(results) } # Functions useful for the PCA approach .f <- function(x, co) x %*% co .pca.coordinates <- function(x, pca, na.rm, axes) { x <- sweep(x, 2L, pca$cent, check.margin=FALSE) x <- sweep(x, 2L, pca$norm, "/", check.margin=FALSE) res <- matrix(sapply(axes, function(ax, x., pca.) { apply(x., 1, .f, co = pca.$c1[, ax]) }, x. = x, pca. = pca), ncol = length(axes)) colnames(res) <- paste0("x", axes) return(res) } .prob.gaussian <- function(x, means, sds) { prod(stats::dnorm(x, mean = means, sd = sds)) } .range.function <- function(axis, pca) { return(c(min = max(min(pca$li[, axis]), quantile(pca$li[, axis], probs = .25) - 5 * (quantile(pca$li[, axis], probs = .75) - quantile(pca$li[, axis], probs = .25))), max = min(max(pca$li[, axis]), quantile(pca$li[, axis], probs = .75) + 5 * (quantile(pca$li[, axis], probs = .75) - quantile(pca$li[, axis], probs = .25))))) } .sd.sample <- function(axis, sdrange, sdfloor, sdceiling) { sample(seq(diff(sdrange[, paste0("axis.", axis)]) / sdfloor, diff(sdrange[, paste0("axis.", axis)]) / sdceiling, length = 1000), 1) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/generateSpFromPCA.R
#' @export #' @import terra #' @method `$` VSSampledPoints `$.VSSampledPoints` <- function(x, name) { if(inherits(as.list(x)[[name]], "PackedSpatRaster")) { return(unwrap(`[[`(as.list(x), name))) } else { return(`[[`(as.list(x), name)) } } #' @export #' @import terra #' @method `[[` VSSampledPoints `[[.VSSampledPoints` <- function(x, name) { if(inherits(as.list(x)[[name]], "PackedSpatRaster")) { return(unwrap(`[[`(as.list(x), name))) } else { return(`[[`(as.list(x), name)) } } #' @export #' @method `as.list` VSSampledPoints as.list.VSSampledPoints <- function(x, ...) { class(x) <- "list" return(x) } #' @export #' @import terra #' @method `$` virtualspecies `$.virtualspecies` <- function(x, name) { if(inherits(as.list(x)[[name]], "PackedSpatRaster")) { return(unwrap(`[[`(as.list(x), name))) } else { return(`[[`(as.list(x), name)) } } #' @export #' @import terra #' @method `[[` virtualspecies `[[.virtualspecies` <- function(x, name) { if(inherits(as.list(x)[[name]], "PackedSpatRaster")) { return(unwrap(`[[`(as.list(x), name))) } else { return(`[[`(as.list(x), name)) } } #' @export #' @method `as.list` virtualspecies as.list.virtualspecies <- function(x, ...) { class(x) <- "list" return(x) } #' @export #' @method print virtualspecies print.virtualspecies <- function(x, ...) { cat(paste("Virtual species generated from", length(x$details$variables), "variables:\n", paste(x$details$variables, collapse = ", "))) cat("\n\n- Approach used:") if(x$approach == "response") { cat(" Responses to each variable") cat("\n- Response functions:") sapply(x$details$variables, FUN = function(y) { cat("\n .", y, " [min=", x$details$parameters[[y]]$min, "; max=", x$details$parameters[[y]]$max, "] : ", x$details$parameters[[y]]$fun, " (", paste(names(x$details$parameters[[y]]$args), x$details$parameters[[y]]$args, sep = '=', collapse = "; "), ")", sep = "") }) if (x$details$rescale.each.response) { cat("\n- Each response function was rescaled between 0 and 1") } else { cat("\n- Response functions were not rescaled between 0 and 1") } cat("\n- Environmental suitability formula = ", x$details$formula, sep = "") if (x$details$rescale) { cat("\n- Environmental suitability was rescaled between 0 and 1\n") } else { cat("\n- Environmental suitability was not rescaled between 0 and 1\n") } } else if(x$approach == "pca") { cat(" Response to axes of a PCA") cat("\n- Axes: ", paste(x$details$axes, collapse = ", "), "; ", round(sum(x$details$pca$eig[x$details$axes])/sum(x$details$pca$eig) * 100, 2), "% explained by these axes") cat("\n- Responses to axes:") sapply(1:length(x$details$axes), function(y) { cat("\n .Axis ", x$details$axes[y], " [min=", round(min(x$details$pca$li[, x$details$axes[y]]), 2), "; max=", round(max(x$details$pca$li[, x$details$axes[y]]), 2), "] : dnorm (mean=", x$details$means[y], "; sd=", x$details$sds[y], ")", sep = "") }) if (x$details$rescale) { cat("\n- Environmental suitability was rescaled between 0 and 1\n") } else { cat("\n- Environmental suitability was not rescaled between 0 and 1\n") } } else if(x$approach == "bca"){ cat(" Response to axes of a BCA") cat("\n- Axes: ", paste(x$details$axes, collapse = " & "), "; ", round(sum(x$details$bca$eig[x$details$axes])/sum(x$details$bca$eig) * 100, 2), "% explained by these axes") cat("\n- Responses to axes:") sapply(1:length(x$details$axes), function(y) { cat("\n .Axis ", x$details$axes[y], " [min=", round(min(x$details$bca$ls[, x$details$axes[y]]), 2), "; max=", round(max(x$details$bca$ls[, x$details$axes[y]]), 2), "] : dnorm (mean=", x$details$means[y], "; sd=", x$details$sds[y], ")", sep = "") }) if (x$details$rescale) { cat("\n- Environmental suitability was rescaled between 0 and 1") } else { cat("\n- Environmental suitability was not rescaled between 0 and 1") } } if(!is.null(x$PA.conversion)) { cat("\n- Converted into presence-absence:") cat("\n .Method =", x$PA.conversion["conversion.method"]) if(x$PA.conversion["conversion.method"] == "probability") { if(x$PA.conversion["probabilistic.method"] == "logistic") { cat("\n .probabilistic method =", x$PA.conversion["probabilistic.method"]) cat("\n .alpha (slope) =", x$PA.conversion["alpha"]) cat("\n .beta (inflexion point) =", x$PA.conversion["beta"]) cat("\n .species prevalence =", x$PA.conversion["species.prevalence"]) } else if(x$PA.conversion["probabilistic.method"] == "linear") { cat("\n .probabilistic method =", x$PA.conversion["probabilistic.method"]) cat("\n .a (slope) =", x$PA.conversion["a"]) cat("\n .b (intercept) =", x$PA.conversion["b"]) cat("\n .species prevalence =", x$PA.conversion["species.prevalence"]) } } else if(x$PA.conversion["conversion.method"] == "threshold") { cat("\n .threshold =", x$PA.conversion["cutoff"]) cat("\n .species prevalence =", x$PA.conversion["species.prevalence"], "\n") } } if(!is.null(x$occupied.area)) { if(!is.null(x$geographical.limit)) { cat("\n- Distribution bias introduced:") cat("\n .method used :", x$geographical.limit$method) if(x$geographical.limit$method %in% c("country", "region", "continent")) { cat("\n .area(s) :", x$geographical.limit$area, "\n") } else if(x$geographical.limit$method == "extent") { cat("\n .extent : [Xmin; Xmax] = [", ext(x$geographical.limit$extent)[1], "; ", ext(x$geographical.limit$extent)[2], "] - [Ymin; Ymax] = [", ext(x$geographical.limit$extent)[3], "; ", ext(x$geographical.limit$extent)[4], "]", "\n", sep = "") } else if(x$geographical.limit$method == "polygon") { cat("\n .polygon : Object of class ", class(x$geographical.limit$area), "\n", sep = "") } } } } #' @export #' @method str virtualspecies str.virtualspecies <- function(object, ...) { args <- list(...) if(is.null(args$max.level)) { args$max.level <- 2 } NextMethod("str", object = object, max.level = args$max.level) } #' @export #' @method plot virtualspecies plot.virtualspecies <- function(x, ...) { y <- x$suitab.raster names(y) <- "Suitability.raster" if(!is.null(x$probability.of.occurrence)) { y <- c(y, x$probability.of.occurrence) names(y)[[nlyr(y)]] <- "Probability.of.occurrence.raster" } if(!is.null(x$pa.raster)) { y <- c(y, x$pa.raster) names(y)[[nlyr(y)]] <- "Presence.absence.raster" } if(!is.null(x$occupied.area)) { y <- c(y, x$occupied.area) names(y)[[nlyr(y)]] <- "Occupied.area.raster" } x <- y defaults <- list(x = x, col = rev(viridis::magma(10))) args <- utils::modifyList(defaults, list(...)) do.call("plot", defaults) } #' @export #' @method print VSSampledPoints print.VSSampledPoints <- function(x, ...) { # Next line is to ensure retrocompatibility with earlier versions of # virtualspecies where no print function was designed for VSSampledPoints if(!is.list(x$detection.probability)) { print(x) } else { cat(paste("Occurrence points sampled from a virtual species")) cat(paste("\n\n- Type:", x$type)) cat(paste("\n- Number of points:", nrow(x$sample.points))) if(length(x$bias)) { cat("\n- Sampling bias: ") cat(paste("\n .Bias type:", x$bias$bias)) cat(paste("\n .Bias strength:", x$bias$bias.strength)) } else { cat("\n- No sampling bias") } cat(paste0("\n- Detection probability: ")) cat(paste0("\n .Probability: ", x$detection.probability$detection.probability)) cat(paste0("\n .Corrected by suitability: ", x$detection.probability$correct.by.suitability)) cat(paste0("\n- Probability of identification error (false positive): ", x$error.probability)) if(length(x$sample.prevalence)) { cat(paste0("\n- Sample prevalence: ")) cat(paste0("\n .True:", x$sample.prevalence["true.sample.prevalence"])) cat(paste0("\n .Observed:", x$sample.prevalence["observed.sample.prevalence"])) } cat(paste0("\n- Multiple samples can occur in a single cell: ", ifelse(x$replacement, "Yes", "No"))) cat("\n\n") if(nrow(x$sample.points) > 10) { cat("First 10 lines: \n") print(x$sample.points[1:10, ]) cat(paste0("... ", nrow(x$sample.points) - 10, " more lines.\n")) } else { print(x$sample.points) } } } #' @export #' @method str VSSampledPoints str.VSSampledPoints <- function(object, ...) { args <- list(...) if(is.null(args$max.level)) { args$max.level <- 2 } NextMethod("str", object = object, max.level = args$max.level) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/genericfunctions.R
#' Limit a virtual species distribution to a defined area #' #' @description #' This function is designed to limit species distributions to a subsample of #' their total distribution range. It will thus generate a species which is not #' at the equilibrium with its environment (i.e., which did not occupy the full #' range of suitable environmental conditions). #' #' This function basically takes any type of raster and will limit values above #' 0 to areas where the species is allowed to disperse. #' @param x a \code{SpatRaster} object composed of 0, 1 and NA, or the output #' list from #' \code{\link{generateSpFromFun}}, \code{\link{generateSpFromPCA}} #' or \code{\link{generateRandomSp}} #' @param geographical.limit \code{"country"}, \code{"region"}, #' \code{"continent"}, \code{"polygon"}, \code{"raster"} or \code{"extent"}. #' The method used #' to limit the distribution range: see details. #' @param area \code{NULL}, a character string, a \code{polygon}, a #' \code{raster} or an \code{extent} object. #' The area in which the distribution range will be limited: see details. #' If \code{NULL} #' and \code{geographical.limit = "extent"}, then you will be asked to draw an #' extent on the map. #' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, the resulting #' limited #' distribution will be plotted. #' @details #' \href{http://borisleroy.com/virtualspecies_tutorial/08-dispersallimitation.html}{ #' Online tutorial for this function} #' #' #' \bold{How the function works:} #' #' The function will remove occurrences of the species outside the chosen area: #' \itemize{ #' \item{NA are kept unchanged} #' \item{0 are kept unchanged} #' \item{values > 0 within the limits of \code{area} are kept unchanged} #' \item{values > 0 outside the limits of \code{area} are set to 0} #' } #' #' #' \bold{How to define the area in which the range is limited:} #' #' You can choose to limit the distribution range of the species to: #' \enumerate{ #' \item{a particular country, region or continent (assuming your raster has #' the WGS84 projection): #' #' Set the argument #' \code{geographical.limit} to \code{"country"}, \code{"region"} or #' \code{"continent"}, and provide the name(s) of the associated countries, #' regions or continents to \code{area} (see examples). #' #' List of possible \code{area} names: #' \itemize{ #' \item{Countries: type #' \code{unique(rnaturalearth::ne_countries(returnclass ='sf')$sovereignt)} #' in the console} #' \item{Regions: "Africa", "Antarctica", "Asia", "Oceania", "Europe", #' "Americas"} #' \item{Continents: "Africa", "Antarctica", "Asia", "Europe", #' "North America", "Oceania", "South America"}} #' } #' \item{a polygon: #' #' Set \code{geographical.limit} to \code{"polygon"}, and provide your #' polygon to \code{area}. #' } #' \item{a raster: #' #' Set \code{geographical.limit} to \code{"raster"}, and provide your #' raster to \code{area}. Your raster values should be 1 (suitable area), #' 0 (unsuitable area) or NA (outside your mask). #' } #' \item{an extent object: #' #' Set \code{geographical.limit} to \code{"extent"}, and either provide your #' extent object to \code{area}, or leave it \code{NULL} to draw an extent on #' the map.} #' } #' @return #' a \code{list} containing 7 elements: #' \itemize{ #' \item{\code{approach}: the approach used to generate the species, \emph{i.e.}, \code{"response"}} #' \item{\code{details}: the details and parameters used to generate the species} #' \item{\code{suitab.raster}: the virtual species distribution, as a Raster object containing the #' environmental suitability)} #' \item{\code{PA.conversion}: the parameters used to convert the suitability into presence-absence} #' \item{\code{pa.raster}: the presence-absence map, as a Raster object containing 0 (absence) / 1 (presence) / NA} #' \item{\code{geographical.limit}: the method used to #' limit the distribution and the area in which the distribution is restricted} #' \item{\code{occupied.area}: the area occupied by the virtual species as a #' Raster of presence-absence} #' } #' The structure of the virtualspecies object can be seen using \code{str()} #' @export #' @import terra #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' @examples #' # Create an example stack with six environmental variables #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' env <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100), #' rast(a * logisticFun(1:100, alpha = 10, beta = 70)), #' rast(t(a)), #' rast(exp(a)), #' rast(log(a))) #' names(env) <- paste("Var", 1:6, sep = "") #' #' # More than 6 variables: by default a PCA approach will be used #' sp <- generateRandomSp(env) #' #' # limiting the distribution to a specific extent #' limit <- ext(1, 50, 1, 50) #' #' limitDistribution(sp, area = limit) #' #' #' # Example of a raster of habitat patches #' habitat.raster <- setValues(sp$pa.raster, #' sample(c(0, 1), size = ncell(sp$pa.raster), #' replace = TRUE)) #' #' plot(habitat.raster) # 1 = suitable habitat; 0 = unsuitable habitat #' sp <- limitDistribution(sp, geographical.limit = "raster", area = habitat.raster) #' par(mfrow = c(2, 1)) #' plot(sp$pa.raster) #' plot(sp$occupied.area) # Species could not occur in many cells because #' # habitat patches were unsuitable limitDistribution <- function(x, geographical.limit = "extent", area = NULL, plot = TRUE) { if(inherits(x, "virtualspecies")) { if(inherits(x$pa.raster, "SpatRaster")) { sp.raster <- x$pa.raster results <- x } else stop("x must be:\n- a raster layer object\nor\n- the output list", " from function convertToPA(), generateSpFromFun(), ", "generateSpFromPCA() or generateRandomSp()") } else if (inherits(x, "RasterLayer")) { sp.raster <- rast(x) results <- list(wrap(sp.raster)) } else if (inherits(x, "SpatRaster")) { sp.raster <- x results <- list(wrap(sp.raster)) } else stop("x must be:\n- a raster layer object\nor\n- the output list", " from function convertToPA(), generateSpFromFun(),", " generateSpFromPCA() or generateRandomSp()") if(length(geographical.limit) > 1) { stop('Only one dispersal limit method can be applied at a time') } if (!(geographical.limit %in% c("country", "region", "extent", "polygon", "continent", "raster"))) { stop('Argument geographical.limit must be one of : country, region,', ' continent, polygon, extent, raster') } if (geographical.limit %in% c("country", "region", "continent")) { if(!("rnaturalearth" %in% rownames(utils::installed.packages()))) { stop('You need to install the package "rnaturalearth".') } worldmap <- rnaturalearth::ne_countries(returnclass = "sf") if(geographical.limit == "country") { if (any(!(area %in% worldmap$sovereignt))) { stop("area name(s) must be correctly spelled. Type ", "unique(rnaturalearth::ne_countries(returnclass =", "'sf')$sovereignt) to obtain valid names.") } results$geographical.limit <- list(method = geographical.limit, area = area) } else if (geographical.limit == "region") { if (any(!(area %in% worldmap$region_un))) { stop(paste("region name(s) must be correctly spelled, according to", " one of the following : ", paste(unique(worldmap$region_un), collapse = ", "), sep = "\n")) } results$geographical.limit <- list(method = geographical.limit, area = area) } else if (geographical.limit == "continent") { if (any(!(area %in% worldmap$continent))) { stop(paste("region name(s) must be correctly spelled,", "according to one of the following : ", paste(unique(worldmap$continent), collapse = ", "), sep = "\n")) } results$geographical.limit <- list(method = geographical.limit, area = area) } } else if (geographical.limit == "polygon") { if(is.null(area)) { message("No object of class SpatVector or sf provided to area (or wrong", " object class). A window with a map ", "will open, click on the map to draw the polygon of the area", ".\n Once finished, press ", "escape to close the polygon.") if("RStudioGD" %in% names(grDevices::dev.list())) { grDevices::dev.new(noRStudioGD = TRUE) } plot(sp.raster) area <- draw(x = "polygon") } else if(!(inherits(area, c("SpatVector", "sf")))) { stop("If you choose geographical.limit = 'polygon', please provide a", " polygon of class ", "sf or SpatVector to argument area. You can also set", " area = NULL to draw the polygon manually.") } results$geographical.limit <- list(method = geographical.limit, area = area) } else if(geographical.limit == "extent") { if(!(inherits(area, "SpatExtent"))) { message("No object of class SpatExtent provided (or wrong object class).", " A window with a map ", "will open, click on the map to draw the extent of the area", ".\n Once finished, press ", "escape to close the polygon.") if("RStudioGD" %in% names(grDevices::dev.list())) { grDevices::dev.new(noRStudioGD = TRUE) } plot(sp.raster) area <- vect(draw()) } else { area <- vect(area) } } else if(geographical.limit == "raster") { if(inherits(area, "RasterLayer")) { message("area was a raster object, converting to terra...") area <- rast(area) } if (!(inherits(area, "SpatRaster"))) { stop("If you choose to limit the distribution with a raster, please", " provide the raster to argument 'area'") } if(global(area, "max", na.rm = TRUE)[1, 1] > 1) { warning("The raster used to limit species distribution has values", " greater than 1. This will likely result in something very", " different to a distribution raster.") } if(global(area, "min", na.rm = TRUE)[1, 1] < 0) { warning("The raster used to limit species distribution has values lower", " than 0. This will likely result in something very different to", " a distribution raster.") } results$geographical.limit <- list(method = geographical.limit, area = area) } geographical.limit.raster <- sp.raster if(geographical.limit == "country") { geographical.limit.raster1 <- rasterize( worldmap[which(worldmap$sovereignt %in% area), ], geographical.limit.raster, field = 1, background = 0, silent = TRUE) geographical.limit.raster <- geographical.limit.raster * geographical.limit.raster1 } else if(geographical.limit == "region") { geographical.limit.raster1 <- rasterize(worldmap[which(worldmap$region_un %in% area), ], geographical.limit.raster, field = 1, background = 0, silent = TRUE) geographical.limit.raster <- geographical.limit.raster * geographical.limit.raster1 } else if(geographical.limit == "continent") { geographical.limit.raster1 <- rasterize( worldmap[which(worldmap$continent %in% area), ], geographical.limit.raster, field = 1, background = 0, silent = TRUE) geographical.limit.raster <- geographical.limit.raster * geographical.limit.raster1 } else if(geographical.limit == "extent") { geographical.limit.raster <- geographical.limit.raster * rasterize(area, sp.raster, field = 1, background = 0) results$geographical.limit <- list(method = geographical.limit, extent = area) } else if(geographical.limit == "polygon") { geographical.limit.raster1 <- rasterize(area, geographical.limit.raster, field = 1, background = 0, silent = TRUE) geographical.limit.raster <- geographical.limit.raster * geographical.limit.raster1 } else if(geographical.limit == "raster") { geographical.limit.raster <- geographical.limit.raster * area } if(plot) { plot(geographical.limit.raster) } results$occupied.area <- geographical.limit.raster return(results) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/limitDistribution.R
#' Visualise the response of the virtual species to environmental variables #' #' This function plots the relationships between the virtual species and the environmental variables. #' It requires either the output from \code{\link{generateSpFromFun}}, \code{\link{generateSpFromPCA}}, #' \code{\link{generateRandomSp}}, #' or a manually defined set of environmental variables and response functions. #' #' @param x the output from \code{\link{generateSpFromFun}}, \code{\link{generateSpFromPCA}}, #' \code{\link{generateRandomSp}}, or #' a raster layer/stack of environmental variables (see details for the latter). #' @param parameters in case of manually defined response functions, a list #' containing the associated parameters. See details. #' @param approach in case of manually defined response functions, the chosen #' approach: either \code{"response"} for a per-variable response approach, or #' \code{"pca"} for a PCA approach. #' @param rescale \code{TRUE} or \code{FALSE}. If \code{TRUE}, individual response #' plots are rescaled between 0 and 1. #' @param axes.to.plot a vector of 2 values listing the two axes of the PCA to plot. #' Only useful for a PCA species. #' @param no.plot.reset \code{TRUE} or \code{FALSE}. If \code{FALSE}, the plot window #' will be reset to its initial state after the response has been plotted. #' @param rescale.each.response \code{TRUE} or \code{FALSE}. If \code{TRUE}, #' the individual responses to #' each environmental variable are rescaled between 0 and 1. #' @param ... further arguments to be passed to \code{plot}. See #' \code{\link[graphics]{plot}} and \code{\link[graphics]{par}}. #' @details #' If you provide the output from \code{\link{generateSpFromFun}}, \code{\link{generateSpFromPCA}} or #' \code{\link{generateRandomSp}} #' then the function will automatically make the appropriate plots. #' #' Otherwise, you can provide a raster layer/stack of environmental variables to #' \code{x} and a list of functions to \code{parameters} to perform the plot. #' In that case, you have to specify the \code{approach}: \code{"reponse"} or #' \code{"PCA"}: #' \itemize{ #' \item{if \code{approach = "response"}: Provide to \code{parameters} a #' \code{list} exactly as defined in \code{\link{generateSpFromFun}}:\cr #' \code{list( #' var1 = list(fun = 'fun1', args = list(arg1 = ..., arg2 = ..., etc.)), #' var2 = list(fun = 'fun2', args = list(arg1 = ..., arg2 = ..., etc.)))}\cr #' #' } #' \item{if \code{approach = "PCA"}: Provide to \code{parameters} a #' \code{list} containing the following elements: #' \itemize{ #' \item{\code{pca}: a \code{dudi.pca} object computed with #' \code{\link[ade4]{dudi.pca}}} #' \item{\code{means}: a vector containing two numeric values. Will be used to define #' the means of the gaussian response functions to the axes of the PCA.} #' \item{\code{sds} a vector containing two numeric values. Will be used to define #' the standard deviations of the gaussian response functions to the axes of #' the PCA.}} #' } #' } #' @export #' @import terra #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' @examples #' # Create an example stack with four environmental variables #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' env <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100), #' rast(a * logisticFun(1:100, alpha = 10, beta = 70)), #' rast(t(a))) #' names(env) <- c("var1", "var2", "var3", "var4") #' #' # Per-variable response approach: #' parameters <- formatFunctions(var1 = c(fun = 'dnorm', mean = 0.00012, #' sd = 0.0001), #' var2 = c(fun = 'linearFun', a = 1, b = 0), #' var3 = c(fun = 'quadraticFun', a = -20, b = 0.2, #' c = 0), #' var4 = c(fun = 'logisticFun', alpha = -0.001, #' beta = 0.002)) #' sp1 <- generateSpFromFun(env, parameters, plot = TRUE) #' plotResponse(sp1) #' #' # PCA approach: #' sp2 <- generateSpFromPCA(env, plot = FALSE) #' par(mfrow = c(1, 1)) #' plotResponse(sp2) #' plotResponse <- function(x, parameters = NULL, approach = NULL, rescale = NULL, axes.to.plot = NULL, no.plot.reset = FALSE, rescale.each.response = NULL, ...) { if(inherits(x, "Raster")) { x <- rast(x) } if(inherits(x, "SpatRaster")) { # if(any(is.na(maxValue(x, warn = FALSE)))) # { # x <- setMinMax(x) # } if (length(approach) > 1) { stop("Only one approach can be plotted at a time") } if (approach == "response") { if (!is.list(parameters)) { stop("If you choose the response approach please provide the parameters") } if(nlyr(x) != length(parameters)) { stop("Provide as many layers in x as functions on parameters") } if(any(!(names(parameters) %in% names(x)) | !(names(x) %in% names(parameters)))) { stop("Layer names and names of parameters must be identical") } for (var in names(x)) { parameters[[var]]$min <- global(x[[var]], "min", na.rm = TRUE)[1, 1] parameters[[var]]$max <- global(x[[var]], "max", na.rm = TRUE)[1, 1] } if(!is.null(rescale.each.response)){ if(rescale.each.response) { rescale <- TRUE } else if (!rescale.each.response) { rescale <- FALSE } else { stop("rescale.each.response must be TRUE or FALSE") } } else { message("No default value was defined for rescale.each.response, ", "setting rescale.each.response = TRUE") rescale <- TRUE # We use rescale for the rest of the code # but it corresponds to rescale.each.response here } } else if (approach == "pca") { if(any(!(parameters$variables %in% names(x)))) { stop("The PCA does not seem to have been computed with the same variables as in x.") } if(!is.list(parameters)) {stop("Please provide an appropriate list of parameters to draw the plots (see the help for details)")} if(!all(class(parameters$pca) %in% c("pca", "dudi"))) {stop ("Please provide an appropriate pca.object (output of dudi.pca()) to make the pca plot.\n If you don't know how to obtain the pca, try to first run generateSpFromPCA() and provide the output to plotResponse()")} if(!(is.numeric(parameters$means)) | !(is.numeric(parameters$sds))) {stop ("Please provide appropriate means & standard deviations to elements 'means' and 'sds' of parameters. If you don't know how to provide these, try to first run generateSpFromPCA() to responsePlot()") } if(is.null(rescale)) { rescale <- TRUE message("Argument rescale was not specified, so it was automatically ", "defined to TRUE. Verify that this is what you want.") } details <- parameters } else if (approach == "bca") { if(any(!(parameters$variables %in% names(x)))) { stop("The BCA does not seem to have been computed with the same variables as in x.") } if (!is.list(parameters)) { stop("Please provide an appropriate list of parameters to draw the plots (see the help for details)") } if (!all(class(parameters$bca) %in% c("between", "dudi"))) { stop ("Please provide an appropriate bca.object (output of bca()) to make the between component analysis plot.\n If you don't know how to obtain the between, try to first run generateSpFromPCA() and provide the output to plotResponse()") } if(!(is.numeric(parameters$means)) | !(is.numeric(parameters$sds))) { stop ("Please provide appropriate means & standard deviations to elements 'means' and 'sds' of parameters. If you don't know how to provide these, try to first run generateSpFromPCA() to responsePlot()") } details <- parameters } else if (is.null(approach)) { stop("Please choose the approach: 'response' or 'pca'.") } if(!is.null(rescale.each.response)){ if(rescale.each.response) { rescale <- TRUE } else if (!rescale.each.response) { rescale <- FALSE } else { stop("rescale.each.response must be TRUE or FALSE") } } else { message("No default value was defined for rescale.each.response, ", "setting rescale.each.response = TRUE") rescale <- TRUE # We use rescale for the rest of the code # but it corresponds to rescale.each.response here } } else if (inherits(x, "virtualspecies")) { if (any(!(c("approach", "details", "suitab.raster") %in% names(x)))) { if(!length(grep("suitab.raster", names(x)))) { stop("x does not seem to be a valid object: Either provide an output from functions generateSpFromFun(),", " generateSpFromPCA(), generateSpFromBCA() or generateRandomSp() or", " a raster object") } } approach <- x$approach details <- x$details if (x$approach == "response") { rescale <- details$rescale.each.response } else if (x$approach == "pca") { rescale <- details$rescale } if("parameters" %in% names(details)) { parameters <- x$details$parameters } else { parameters <- NULL } } else { stop("x does not seem to be a valid object: Either provide an output from functions generateSpFromFun(), generateSpFromPCA(), generateSpFromBCA() or generateRandomSp() or a raster object") } if (approach == "response") { mfrow <- c(floor(sqrt(length(parameters))), ceiling(sqrt(length(parameters)))) if (prod(mfrow) < length(parameters)) {mfrow[1] <- mfrow[1] + 1} if(!no.plot.reset) { op <- graphics::par(no.readonly = TRUE) } graphics::par(mfrow = mfrow, mar = c(4.1, 4.1, 0.1, 0.1)) for(i in names(parameters)) { cur.seq <- seq(parameters[[i]]$min, parameters[[i]]$max, length = 1000) if(rescale) { values <- do.call(match.fun(parameters[[i]]$fun), args = c(list(cur.seq), parameters[[i]]$args)) values <- (values - min(values)) / (max(values) - min(values)) # Formating plotting arguments defaults <- list(x = cur.seq, y = values, type = "l", bty = "l", cex.axis = .7, ylab = "Suitability", xlab = "", las = 1, cex = .5, cex.lab = 1) args <- utils::modifyList(defaults, list( ...)) do.call("plot", args) graphics::mtext(side = 1, text = i, line = 2, cex = args$cex.lab) } else { values <- do.call(match.fun(parameters[[i]]$fun), args = c(list(cur.seq), parameters[[i]]$args)) # Formating plotting arguments defaults <- list(x = cur.seq, y = values, type = "l", bty = "l", cex.axis = .7, ylab = "Suitability", xlab = "", las = 1, cex = .5, cex.lab = 1) args <- utils::modifyList(defaults, list( ...)) do.call("plot", args) graphics::mtext(side = 1, text = i, line = 2, cex = args$cex.lab) } } if(!no.plot.reset) { graphics::par(op) } } else if (approach == "pca") { pca.object <- details$pca axes <- details$axes if(is.null(axes.to.plot)) { axes.to.plot <- axes[1:2] } else if (length(axes.to.plot) != 2) { stop("Please provide 2 values in axes.to.plot (plot 3 or more axes at a time is currently unsupported)") } else if (any(!(axes.to.plot %in% axes))) { stop("Please provide axes.to.plot that are included in your PCA virtualspecies") } means <- details$means sds <- details$sds probabilities <- apply(pca.object$li[, axes], 1, .prob.gaussian, means = means, sds = sds) if(rescale) { probabilities <- (probabilities - details$min_prob_rescale) / (details$max_prob_rescale - details$min_prob_rescale) } xmin <- min(pca.object$li[, axes.to.plot[1]]) - 0.3 * diff(range(pca.object$li[, axes.to.plot[1]])) xmax <- max(pca.object$li[, axes.to.plot[1]]) ymin <- min(pca.object$li[, axes.to.plot[2]]) - 0.3 * diff(range(pca.object$li[, axes.to.plot[2]])) ymax <- max(pca.object$li[, axes.to.plot[2]]) if(!no.plot.reset) { op <- graphics::par(no.readonly = TRUE) } graphics::par(mar = c(4.1, 4.1, 4.1, 4.6)) defaults <- list(x = pca.object$li[, axes.to.plot], col = c(grDevices::grey(.8), rev(viridis::inferno(150)))[ match(round(probabilities * 100, 0), 0:100)], xlim = c(xmin, xmax), ylim = c(ymin, ymax), main = paste0("PCA of environmental conditions\nAxes ", paste0(axes.to.plot, collapse = " & "), " (", length(axes), " axes included in total)"), bty = "n", las = 1, cex.axis = .7, pch = 16) args <- utils::modifyList(defaults, list(...)) do.call("plot", defaults) i1 <- which(axes == axes.to.plot[1]) i2 <- which(axes == axes.to.plot[2]) graphics::polygon( sqrt((sds[i1] * cos(seq(0, 2 * pi, length = 100)))^2 + (sds[i2] * sin(seq(0, 2 * pi, length = 100)))^2) * cos(atan2(sds[i2] * sin(seq(0, 2 * pi, length = 100)), sds[i1] * cos(seq(0, 2 * pi, length = 100)))) + means[i1], sqrt((sds[i1] * cos(seq(0, 2 * pi, length = 100)))^2 + (sds[i2] * sin(seq(0, 2 * pi, length = 100)))^2) * sin(atan2(sds[i2] * sin(seq(0, 2 * pi, length = 100)), sds[i1] * cos(seq(0, 2 * pi, length = 100)))) + means[i2], col = NA, lty = 1, lwd = 1, border = NULL) graphics::par(xpd = F) graphics::segments(x0 = means[i1] - sds[i1], x1 = means[i1] - sds[i1], y0 = ymin - 2 * diff(c(ymin, ymax)), y1 = means[i2], lty = 3) graphics::segments(x0 = means[i1] + sds[i1], x1 = means[i1] + sds[i1], y0 = ymin - 2 * diff(c(ymin, ymax)), y1 = means[i2], lty = 3) graphics::segments(x0 = xmin - 2 * diff(c(xmin, xmax)), x1 = means[i1], y0 = means[i2] - sds[i2], y1 = means[i2] - sds[i2], lty = 3) graphics::segments(x0 = xmin - 2 * diff(c(xmin, xmax)), x1 = means[i1], y0 = means[i2] + sds[i2], y1 = means[i2] + sds[i2], lty = 3) cutX <- diff(c(xmin, xmax)) * 2/3 + xmin cutY <- diff(c(ymin, ymax)) * 2/3 + ymin if(means[i1] <= cutX & means[i2] <= cutY) { x0 <- xmax - 0.15 * diff(c(xmin, xmax)) y0 <- ymax - 0.15 * diff(c(ymin, ymax)) x1 <- pca.object$co[, i1] + x0 y1 <- pca.object$co[, i2] + y0 } else if(means[i1] > cutX & means[i2] <= cutY) { x0 <- xmin + 0.25 * diff(c(xmin, xmax)) y0 <- ymax - 0.15 * diff(c(ymin, ymax)) x1 <- pca.object$co[, i1] + x0 y1 <- pca.object$co[, i2] + y0 } else if(means[i1] <= cutX & means[i2] > cutY) { x0 <- xmax - 0.15 * diff(c(xmin, xmax)) y0 <- ymin + 0.25 * diff(c(ymin, ymax)) x1 <- pca.object$co[, i1] + x0 y1 <- pca.object$co[, i2] + y0 } else if(means[i1] > cutX & means[i2] > cutY) { x0 <- xmin + 0.25 * diff(c(xmin, xmax)) y0 <- ymin + 0.25 * diff(c(ymin, ymax)) x1 <- pca.object$co[, i1] + x0 y1 <- pca.object$co[, i2] + y0 } graphics::par(xpd = T) x1y1 <- cbind(x1, y1) apply(x1y1, 1, FUN = function(x, a = x0, b = y0) { .arrows(x0 = a, y0 = b, x1 = x[1], y1 = x[2]) }) .arrowLabels(x = x1, y = y1, label = rownames(pca.object$co), clabel = 1, origin = c(x0, y0)) graphics::legend(title = "Pixel\nsuitability", "topright", inset = c(-0.1, 0), legend = c(1, 0.8, 0.6, 0.4, 0.2, 0), pch = 16, col = c(viridis::inferno(150)[c(1, 38, 75, 113, 150)], grDevices::grey(.8)), bty = "n") graphics::par(new = T) valY <- stats::dnorm(seq(xmin, xmax, length = 1000), mean = means[i1], sd = sds[i1]) valY <- 0.15 * (valY - min(valY))/(max(valY) - min(valY)) valX <- seq(xmin, xmax, length = 1000) plot(valY ~ valX, col = grDevices::grey(.7), type = "l", bty = "n", ylim = c(0, 1), lty = 1, xlab = "", ylab = "", xaxt = "n", yaxt = "n") graphics::par(new = T) valY <- seq(ymin, ymax, length = 1000) valX <- stats::dnorm(seq(ymin, ymax, length = 1000), mean = means[2], sd = sds[2]) valX <- 0.15 * (valX - min(valX))/(max(valX) - min(valX)) plot(valX, valY, col = grDevices::grey(.7), type = "l", bty = "n", xlim = c(0, 1), lty = 1, xlab = "", ylab = "", xaxt = "n", yaxt = "n") if(!no.plot.reset) { graphics::par(op) } } else if (approach == "bca") { bca.object <- details$bca means <- details$means sds <- details$sds lengths <- details$stack.lengths if(!length(rescale)) { rescale <- details$rescale } probabilities <- apply(bca.object$ls, 1, .prob.gaussian, means = means, sds = sds) if(rescale) { probabilities <- (probabilities - details$min_prob_rescale) / (details$max_prob_rescale - details$min_prob_rescale) } xmin <- min(bca.object$ls[, 1]) - 0.3 * diff(range(bca.object$ls[, 1])) xmax <- max(bca.object$ls[, 1]) ymin <- min(bca.object$ls[, 2]) - 0.3 * diff(range(bca.object$ls[, 2])) ymax <- max(bca.object$ls[, 2]) graphics::par(mar = c(4.1, 4.1, 4.1, 6.1)) defaults <- list(x = bca.object$ls, # col = c(grey(.8), rev(heat.colors(150))[51:200]) [match(round(probabilities * 100, 0), 0:100)] , col = c(grDevices::grey(.8), rev(viridis::inferno(150)))[ match(round(probabilities * 100, 0), 0:100)], cex = .35, pch = c(rep(15, lengths[1]), rep(17, lengths[2] + 1)), # col = c( rep(grey(0.8), lengths[1] ), # rep(grey(0.5) , lengths[2] ), # "white"), xlim = c(xmin, xmax), ylim = c(ymin, ymax), main = "BCA of environmental conditions", bty = "n", las = 1, cex.axis = .7) args <- utils::modifyList(defaults, list(...)) do.call("plot", defaults) #points(means[2] ~ means[1], pch = 16) graphics::polygon( sqrt((sds[1] * cos(seq(0, 2 * pi, length = 100)))^2 + (sds[2] * sin(seq(0, 2 * pi, length = 100)))^2) * cos(atan2(sds[2] * sin(seq(0, 2 * pi, length = 100)), sds[1] * cos(seq(0, 2 * pi, length = 100)))) + means[1], sqrt((sds[1] * cos(seq(0, 2 * pi, length = 100)))^2 + (sds[2] * sin(seq(0, 2 * pi, length = 100)))^2) * sin(atan2(sds[2] * sin(seq(0, 2 * pi, length = 100)), sds[1] * cos(seq(0, 2 * pi, length = 100)))) + means[2], col = NA, lty = 1, lwd = 1, border = NULL) graphics::par(xpd = F) graphics::segments(x0 = means[1] - sds[1], x1 = means[1] - sds[1], y0 = ymin - 2 * diff(c(ymin, ymax)), y1 = means[2], lty = 3) graphics::segments(x0 = means[1] + sds[1], x1 = means[1] + sds[1], y0 = ymin - 2 * diff(c(ymin, ymax)), y1 = means[2], lty = 3) graphics::segments(x0 = xmin - 2 * diff(c(xmin, xmax)), x1 = means[1], y0 = means[2] - sds[2], y1 = means[2] - sds[2], lty = 3) graphics::segments(x0 = xmin - 2 * diff(c(xmin, xmax)), x1 = means[1], y0 = means[2] + sds[2], y1 = means[2] + sds[2], lty = 3) cutX <- diff(c(xmin, xmax)) * 2/3 + xmin cutY <- diff(c(ymin, ymax)) * 2/3 + ymin if(means[1] <= cutX & means[2] <= cutY) { x0 <- xmax - 0.15 * diff(c(xmin, xmax)) y0 <- ymax - 0.15 * diff(c(ymin, ymax)) x1 <- bca.object$c1[, 1] + x0 y1 <- bca.object$c1[, 2] + y0 } else if(means[1] > cutX & means[2] <= cutY) { x0 <- xmin + 0.25 * diff(c(xmin, xmax)) y0 <- ymax - 0.15 * diff(c(ymin, ymax)) x1 <- bca.object$c1[, 1] + x0 y1 <- bca.object$c1[, 2] + y0 } else if(means[1] <= cutX & means[2] > cutY) { x0 <- xmax - 0.15 * diff(c(xmin, xmax)) y0 <- ymin + 0.25 * diff(c(ymin, ymax)) x1 <- bca.object$c1[, 1] + x0 y1 <- bca.object$c1[, 2] + y0 } else if(means[1] > cutX & means[2] > cutY) { x0 <- xmin + 0.25 * diff(c(xmin, xmax)) y0 <- ymin + 0.25 * diff(c(ymin, ymax)) x1 <- bca.object$c1[, 1] + x0 y1 <- bca.object$c1[, 2] + y0 } graphics::par(xpd = T) x1y1 <- cbind(x1, y1) apply(x1y1, 1, FUN = function(x, a = x0, b = y0){ .arrows(x0 = a, y0 = b, x1 = x[1], y1 = x[2]) }) .arrowLabels(x = x1, y = y1, label = rownames(bca.object$c1), clabel = 1, origin = c(x0, y0)) graphics::legend("topright", inset = c(-.4, 0), legend = c("Current\nconditions\n","Future\nconditions"), pch = c(15, 17), col = grDevices::grey(.8), bty = "n", cex = .7, xpd = TRUE) graphics::legend("topright", inset = c(-.4, .5), legend = c(1, 0.8, 0.6, 0.4, 0.2, 0), pch = 16, col = c(viridis::inferno(150)[c(1, 38, 75, 113, 150)], grDevices::grey(.8)), bty = "n", cex = .7, title = "Pixel\nsuitability", xpd = TRUE) graphics::par(new = T) valY <- stats::dnorm(seq(xmin, xmax,length = 1000), mean = means[1], sd = sds[1]) valY <- 0.15 * (valY - min(valY))/(max(valY) - min(valY)) valX <- seq(xmin, xmax, length = 1000) plot(valY ~ valX, type = "l", bty = "n", ylim = c(0, 1), lty = 1, xlab = "", ylab = "", xaxt = "n", yaxt = "n") graphics::par(new = T) valY <- seq(ymin, ymax, length = 1000) valX <- stats::dnorm(seq(ymin, ymax,length = 1000), mean = means[2], sd = sds[2]) valX <- 0.15 * (valX - min(valX))/(max(valX) - min(valX)) plot(valX, valY, type = "l", bty = "n", xlim = c(0, 1), lty = 1, xlab = "", ylab = "", xaxt = "n", yaxt = "n") } else { stop("The argument approach was not valid, please provide either 'response' or 'pca'") } } .arrows <- function(x0, y0, x1, y1, len = 0.1, ang = 15, lty = 1, edge = TRUE) { d0 <- sqrt((x0 - x1)^2 + (y0 - y1)^2) if (d0 < 1e-07) return(invisible()) graphics::segments(x0, y0, x1, y1, lty = lty) h <- graphics::strheight("A", cex = graphics::par("cex")) if (d0 > 2 * h) { x0 <- x1 - h * (x1 - x0)/d0 y0 <- y1 - h * (y1 - y0)/d0 if (edge) graphics::arrows(x0, y0, x1, y1, angle = ang, length = len, lty = 1) } } .arrowLabels <- function(x, y, label, clabel, origin = c(0, 0), boxes = FALSE) { xref <- x - origin[1] yref <- y - origin[2] for (i in 1:(length(x))) { cha <- as.character(label[i]) cha <- paste(" ", cha, " ", sep = "") cex0 <- graphics::par("cex") * clabel xh <- graphics::strwidth(cha, cex = cex0) yh <- graphics::strheight(cha, cex = cex0) * 5/6 if ((xref[i] > yref[i]) & (xref[i] > -yref[i])) { x1 <- x[i] + xh/2 y1 <- y[i] } else if ((xref[i] > yref[i]) & (xref[i] <= (-yref[i]))) { x1 <- x[i] y1 <- y[i] - yh } else if ((xref[i] <= yref[i]) & (xref[i] <= (-yref[i]))) { x1 <- x[i] - xh/2 y1 <- y[i] } else if ((xref[i] <= yref[i]) & (xref[i] > (-yref[i]))) { x1 <- x[i] y1 <- y[i] + yh } if (boxes) { graphics::rect(x1 - xh/2, y1 - yh, x1 + xh/2, y1 + yh, col = "white", border = 1) } text(x1, y1, cha, cex = cex0) } }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/plotResponse.R
#' Visualise the function that was used to transform environmental suitability into #' probability of occurrence #' #' This function plots the relationships between the environmental suitability #' and the probability of occurrence, which is used to generate the presence- #' absence distribution. #' It requires the output from \code{\link{convertToPA}}. #' #' @param sp the output from \code{\link{convertToPA}}. #' @param add \code{TRUE} or \code{FALSE}. If \code{TRUE}, the relationship #' will be added to the currently active graph. #' @param ... further arguments to be passed to \code{plot}. See #' \code{\link[graphics]{plot}} and \code{\link[graphics]{par}}. #' #' @export #' @import terra #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' @examples #' # Create an example stack with two environmental variables #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' env <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100)) #' names(env) <- c("variable1", "variable2") #' #' parameters <- formatFunctions(variable1 = c(fun = 'dnorm', mean = 1e-04, #' sd = 1e-04), #' variable2 = c(fun = 'linearFun', a = 1, b = 0)) #' # Generation of the virtual species #' sp1 <- generateSpFromFun(env, parameters) #' sp1 #' #' #' # Converting to presence-absence, probablistic method, logistic conversion #' # A species with a low prevalence: #' #' sp1.lowprev <- convertToPA(sp1, species.prevalence = 0.1) #' plotSuitabilityToProba(sp1.lowprev) #' #' # A species with a high prevalence: #' #' sp1.highprev <- convertToPA(sp1, species.prevalence = 0.9) #' plotSuitabilityToProba(sp1.lowprev) #' #' # Converting to presence-absence, probablistic method, linear conversion #' # A species with a low prevalence: #' #' sp1.lowprev <- convertToPA(sp1, species.prevalence = 0.1, #' prob.method = "linear") #' plotSuitabilityToProba(sp1.highprev) #' #' # A species with a high prevalence: #' #' sp1.highprev <- convertToPA(sp1, species.prevalence = 0.9, #' prob.method = "linear") #' plotSuitabilityToProba(sp1.highprev) #' plotSuitabilityToProba <- function(sp, add = FALSE, ...) { if(inherits(sp, "virtualspecies")) { if(!("PA.conversion" %in% names(sp))) { stop("sp does not seem to be a valid object: provide the output from", " convertToPA()") } } else { stop("sp does not seem to be a valid object: provide the output from", " convertToPA()") } method <- sp$PA.conversion[1] if(sp$approach == "bca") { x <- seq(min(global(c(sp$suitab.raster.current, sp$suitab.raster.future), "min", na.rm = TRUE)[, 1]), max(global(c(sp$suitab.raster.current, sp$suitab.raster.future), "max", na.rm = TRUE)[, 1]), length = 1000) } else { x <- seq(global(sp$suitab.raster, min, na.rm = TRUE)[1, 1], global(sp$suitab.raster, max, na.rm = TRUE)[1, 1], length = 1000) } if(method == "probability") { if(sp$PA.conversion[2] == "logistic") { y <- logisticFun(x, alpha = as.numeric(sp$PA.conversion["alpha"]), beta = as.numeric(sp$PA.conversion["beta"])) } else { y <- as.numeric(sp$PA.conversion["a"]) * x + as.numeric(sp$PA.conversion["b"]) y[y < 0] <- 0 y[y > 1] <- 1 } } else if (method == "threshold") { y <- x y[y >= as.numeric(sp$PA.conversion["cutoff"])] <- 1 y[y < as.numeric(sp$PA.conversion["cutoff"])] <- 0 } if(!add) { plot(x = x, y = y, type = "l", bty = "l", las = 1, cex.axis = .8, ylim = c(0, 1), xlab = "Environmental suitability", ylab = "Probability of ocurrence", ...) } else { lines(x = x, y = y, ...) } }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/plotSuitabilityToProba.R
#' Remove collinearity among variables of a raster stack #' #' This functions analyses the correlation among variables of the provided #' stack of environmental variables (using Pearson's R), and can return a #' vector containing names of variables that are not colinear, or a list #' containing grouping variables according to their degree of collinearity. #' #' @param raster.stack a SpatRaster object, in which each layer represent an #' environmental #' variable. #' @param multicollinearity.cutoff a numeric value corresponding to the cutoff #' of correlation above which to group variables. #' @param select.variables \code{TRUE} or \code{FALSE}. If \code{TRUE}, then the #' function will choose one variable among each group to return a vector of #' non correlated variables (see details). If \code{FALSE}, the function will #' return a list #' containing the groups of correlated variables. #' @param sample.points \code{TRUE} or \code{FALSE}. If you have a large #' raster file then use this parameter to sample a number of points equal to #' \code{nb.points}. #' @param nb.points a numeric value. Only useful if \code{sample.points = TRUE}. #' The number of sampled points from the raster, to perform the PCA. A too small #' value may not be representative of the environmental conditions in your #' raster. #' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, the hierarchical #' ascendant classification used to group variables will be plotted. #' @param method \code{"pearson"}, \code{"spearman"} or \code{"kendall"}. #' The correlation method to be used. If your variables are skewed or have #' outliers (e.g. when working with precipitation variables) you should favour #' the Spearman or Kendall methods. #' @return #' a vector of non correlated variables, or a list where each element is a #' group of non correlated variables. #' @details #' This function uses the Pearson's correlation coefficient to analyse #' correlation among variables. This coefficient is then used to compute a #' distance matrix, which in turn is used it compute an ascendant hierarchical #' classification, with the '\emph{complete}' method (see #' \code{\link[stats]{hclust}}). If at least one correlation above the \code{ #' multicollinearity.cutoff} is detected, then the variables will be grouped #' according to their degree of correlation. #' #' If \code{select.variables = TRUE}, then the function will return a vector #' containing variables that are not colinear. #' The variables not correlated to any other variables are automatically #' included #' in this vector. For each group of colinear variables, one variable will #' be randomly chosen and included in this vector. #' #' #' @export #' @import terra #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' @examples #' # Create an example stack with six environmental variables #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' env <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100), #' rast(a * logisticFun(1:100, alpha = 10, beta = 70)), #' rast(t(a)), #' rast(exp(a)), #' rast(log(a))) #' names(env) <- paste("Var", 1:6, sep = "") #' #' # Defaults settings: cutoff at 0.7 #' removeCollinearity(env, plot = TRUE) #' #' # Changing cutoff to 0.5 #' removeCollinearity(env, plot = TRUE, multicollinearity.cutoff = 0.5) #' #' # Automatic selection of variables not intercorrelated #' removeCollinearity(env, plot = TRUE, select.variables = TRUE) #' #' # Assuming a very large raster file: selecting a subset of points #' removeCollinearity(env, plot = TRUE, select.variables = TRUE, #' sample.points = TRUE, nb.points = 5000) #' #' removeCollinearity <- function(raster.stack, multicollinearity.cutoff = .7, select.variables = FALSE, sample.points = FALSE, nb.points = 10000, plot = FALSE, method = "pearson") { if(inherits(raster.stack, "RasterStack")) { raster.stack <- rast(raster.stack) } else if(!inherits(raster.stack, "SpatRaster")) { stop("raster.stack must be an object of class SpatRaster from package", " terra. RasterStack objects from package raster are also accepted", " (they will be converted to a terra SpatRaster object)") } if(sample.points) { if(!is.numeric(nb.points)) {stop("nb.points must be a numeric value corresponding to the number of", " pixels to sample from raster.stack")} env.df <- spatSample(raster.stack, size = nb.points, na.rm = TRUE) } else { env.df <- values(raster.stack) if(any(is.na(env.df))) { env.df <- env.df[-unique(which(is.na(env.df), arr.ind = T)[, 1]), ] } } if(!is.numeric(multicollinearity.cutoff)) { stop("You must provide a numeric cutoff between 0 and 1 in", " multicollinearity.cutoff") } else if(multicollinearity.cutoff > 1 | multicollinearity.cutoff < 0) { stop("You must provide a numeric cutoff between 0 and 1 in ", " multicollinearity.cutoff") } # Correlation matrix creation cor.matrix <- matrix(data = 0, nrow = nlyr(raster.stack), ncol = nlyr(raster.stack), dimnames = list(names(raster.stack), names(raster.stack))) # Correlation based on Pearson cor.matrix <- 1 - abs(stats::cor(env.df, method = method)) # Transforming the correlation matrix into an ascendent hierarchical classification dist.matrix <- stats::as.dist(cor.matrix) ahc <- stats::hclust(dist.matrix, method = "complete") groups <- stats::cutree(ahc, h = 1 - multicollinearity.cutoff) if(length(groups) == max(groups)) { message(paste( " - No multicollinearity detected in your data at threshold ", multicollinearity.cutoff, "\n", sep = "")) mc <- FALSE } else { mc <- TRUE } if(plot) { op <- graphics::par(no.readonly = TRUE) graphics::par(mar = c(5.1, 5.1, 4.1, 3.1)) plot(ahc, hang = -1, xlab = "", ylab = "Distance (1 - Pearson's r)", main = "", las = 1, sub = "", axes = F) graphics::axis(2, at = seq(0, 1, length = 6), las = 1) if(mc) { graphics::title(paste('Groups of intercorrelated variables at cutoff', multicollinearity.cutoff)) graphics::par(xpd = T) stats::rect.hclust(ahc, h = 1 - multicollinearity.cutoff) } else { graphics::title(paste('No intercorrelation among variables at cutoff', multicollinearity.cutoff)) } graphics::par(op) } # Random selection of variables if(select.variables) { sel.vars <- NULL for (i in 1:max(groups)) { sel.vars <- c(sel.vars, sample(names(groups[groups == i]), 1)) } } else { if(mc) { sel.vars <- list() for (i in groups) { sel.vars[[i]] <- names(groups)[groups == i] } } else { sel.vars <- names(raster.stack) } } return(sel.vars) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/removeCollinearity.R
#' Sample occurrences in a virtual species distribution #' #' @description #' This function samples occurrences/records (presence only or presence-absence) #' within a species distribution, either randomly or with a sampling bias. #' The sampling bias can be defined manually or with a set of predefined #' biases. #' #' @param x a \code{SpatRaster} object or the output list from #' \code{generateSpFromFun}, \code{generateSpFromPCA}, \code{generateRandomSp}, #' \code{convertToPA} #' or \code{limitDistribution} #' The raster must contain values of 0 or 1 (or NA). #' @param n an integer. The number of occurrence points / records to sample. #' @param type \code{"presence only"} or \code{"presence-absence"}. The type of #' occurrence points to sample. #' @param extract.probability \code{TRUE} or \code{FALSE}. If \code{TRUE}, then #' true probability at sampled locations will also be extracted #' @param sampling.area a character string, a \code{polygon} or an \code{extent} #' object. #' The area in which the sampling will take place. See details. #' @param detection.probability a numeric value between 0 and 1, corresponding #' to the probability of detection of the species. See details. #' @param correct.by.suitability \code{TRUE} or \code{FALSE}. If \code{TRUE}, #' then the probability of detection will be weighted by the suitability, such #' that cells with lower suitabilities will further decrease the chance that #' the species is detected when sampled. NOTE: this will NOT increase #' likelihood of samplings in areas of high suitability. In this case look for #' argument weights. #' @param error.probability \code{TRUE} or \code{FALSE}. Probability to #' attribute an erroneous presence (False Positive) in cells where the species #' is actually absent. #' @param bias \code{"no.bias"}, \code{"country"}, \code{"region"}, #' \code{"extent"}, \code{"polygon"} or \code{"manual"}. The method used to #' generate a sampling bias: see details. #' @param bias.strength a positive numeric value. The strength of the bias to be #' applied in \code{area} (as a multiplier). Above 1, \code{area} will be #' oversampled. Below 1, \code{area} will be undersampled. #' @param bias.area \code{NULL}, a character string, a \code{polygon} or an #' \code{extent} object. The area in which the sampling will be biased: see #' details. If \code{NULL} and \code{bias = "extent"}, then you will be asked to #' draw an extent on the map. #' @param weights \code{NULL} or a raster layer. Only used if #' \code{bias = "manual"}. The raster of bias weights to be applied to the #' sampling of occurrences. Higher weights mean a higher probability of #' sampling. For example, species suitability raster can be entered here to #' increase likelihood of sampling occurrences in areas with high suitability. #' @param sample.prevalence \code{NULL} or a numeric value between 0 and 1. #' Only useful if \code{type = "presence-absence"}. Defines the sample #' prevalence, i.e. the proportion of presences sampled. Note that the #' probabilities of detection and error are applied AFTER this parameter, #' so the final sample prevalence may not different if you apply probabilities #' of detection and/or error #' @param replacement \code{TRUE} or \code{FALSE}. If \code{TRUE}, multiple #' samples can occur in the same cell. Can be useful to mimic real datasets #' where samplings can be duplicated or repeated in time. #' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, the sampled #' occurrence points will be plotted. #' @details #' \href{http://borisleroy.com/virtualspecies_tutorial/07-sampleoccurrences.html}{ #' Online tutorial for this function} #' #' #' #' #' \bold{How the function works:} #' #' The function randomly selects \code{n} cells in which samples occur. If a #' \code{bias} is chosen, then the selection of these cells will be biased #' according to the type and strength of bias chosen. If the sampling is of #' \code{type "presence only"}, then only cells where the species is present #' will be chosen. If the sampling is of \code{type "presence-absence"}, then #' all non-NA cells can be chosen. #' #' The function then samples the species inside the chosen cells. In cells #' where the species is present the species will always be sampled unless #' the parameter \code{detection.probability} is lower than 1. In that case the #' species will be sampled with the associated probability of detection. #' #' In cells where the species is absent (in case of a \code{"presence-absence"} #' sampling), the function will always assign absence unless #' \code{error.probability} is greater than 1. In that case, the species can be #' found present with the associated probability of error. Note that this step #' happens AFTER the detection step. Hence, in cells where the species is #' present but not detected, it can still be sampled due to a sampling error. #' #' \bold{How to restrict the sampling area:} #' #' Use the argument \code{sampling.area}: #' \itemize{ #' \item{Provide the name (s) (or a combination of names) of country(ies), #' region(s) or continent(s). #' Examples: #' \itemize{ #' \item{\code{sampling.area = "Africa"}} #' \item{\code{sampling.area = c("Africa", "North America", "France")}} #' }} #' \item{Provide a polygon (\code{SpatialPolygons} or #' \code{SpatialPolygonsDataFrame} of package \code{sp})} #' \item{Provide an \code{extent} object} #' } #' #' \bold{How the sampling bias works:} #' #' The argument \code{bias.strength} indicates the strength of the bias. #' For example, a value of 50 will result in 50 times more samples within the #' \code{bias.area} than outside. #' Conversely, a value of 0.5 will result in half less samples within the #' \code{bias.area} than outside. #' #' \bold{How to choose where the sampling is biased:} #' #' You can choose to bias the sampling in: #' \enumerate{ #' \item{a particular country, region or continent (assuming your raster has #' the WGS84 projection): #' #' Set the argument #' \code{bias} to \code{"country"}, \code{"region"} or #' \code{"continent"}, and provide the name(s) of the associated countries, #' regions or continents to \code{bias.area} (see examples). #' #' List of possible \code{bias.area} names: #' \itemize{ #' \item{Countries: type #' \code{unique(rnaturalearth::ne_countries(returnclass ='sf')$sovereignt)} #' in the console} #' \item{Regions: "Africa", "Antarctica", "Asia", "Oceania", "Europe", #' "Americas"} #' \item{Continents: "Africa", "Antarctica", "Asia", "Europe", #' "North America", "Oceania", "South America"}} #' } #' \item{a polygon: #' #' Set \code{bias} to \code{"polygon"}, and provide your #' polygon to \code{area}. #' } #' \item{an extent object: #' #' Set \code{bias} to \code{"extent"}, and either provide your #' extent object to \code{bias.area}, or leave it \code{NULL} to draw an extent #' on the map.} #' } #' #' Otherwise you can enter a raster of sampling probability. It can be useful #' if you want to increase likelihood of samplings in areas of high #' suitability (simply enter the suitability raster in weights; see examples #' below), #' or if you want to define sampling biases manually, \emph{e.g.} to to create #' biases along roads. In that case you have to provide to \code{weights} a #' raster layer in which each cell contains the probability to be sampled. #' #' The \code{\link{.Random.seed}} and \code{\link{RNGkind}} are stored as #' \code{\link{attributes}} when the function is called, and can be used to #' reproduce the results as shown in the examples (though #' it is preferable to set the seed with \code{\link{set.seed}} before calling #' \code{sampleOccurrences()} and to then use the same value in #' \code{\link{set.seed}} to reproduce results later. Note that #' reproducing the sampling will only work if the same original distribution map #' is used. #' #' @return a \code{list} with 8 elements: #' \itemize{ #' \item{\code{type}: type of occurrence sampled (presence-absences or #' presence-only)} #' \item{\code{sample.points}: data.frame containing the coordinates of #' samples, true and sampled observations (i.e, 1, 0 or NA), and, if asked, the true #' environmental suitability in sampled locations} #' \item{\code{detection.probability}: the chosen probability of detection of #' the virtual species} #' \item{\code{error.probability}: the chosen probability to assign presence #' in cells where the species is absent} #' \item{\code{bias}: if a bias was chosen, then the type of bias and the #' associated \code{area} will be included.} #' \item{\code{replacement}: indicates whether multiple samples could occur #' in the same cells} #' \item{\code{original.distribution.raster}: the distribution raster from #' which samples were drawn} #' \item{\code{sample.plot}: a recorded plot showing the sampled points #' overlaying the original distribution.} #' } #' @note #' Setting \code{sample.prevalence} may at least partly #' override \code{bias}, e.g. if \code{bias} is specified with \code{extent} to #' an area that contains no presences, but sample prevalence is set to > 0, #' then cells outside of the biased sampling extent will be sampled until #' the number of presences required by \code{sample.prevalence} are obtained, #' after which the sampling of absences will proceed according to the specified #' bias. #' @export #' @import terra #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' Willson Gaul \email{wgaul@@hotmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' @examples #' # Create an example stack with six environmental variables #' a <- matrix(rep(dnorm(1:100, 50, sd = 25)), #' nrow = 100, ncol = 100, byrow = TRUE) #' env <- c(rast(a * dnorm(1:100, 50, sd = 25)), #' rast(a * 1:100), #' rast(a * logisticFun(1:100, alpha = 10, beta = 70)), #' rast(t(a)), #' rast(exp(a)), #' rast(log(a))) #' names(env) <- paste("Var", 1:6, sep = "") #' #' # More than 6 variables: by default a PCA approach will be used #' sp <- generateRandomSp(env, niche.breadth = "wide") #' #' # Sampling of 25 presences #' sampleOccurrences(sp, n = 25) #' #' # Sampling of 30 presences and absences #' sampleOccurrences(sp, n = 30, type = "presence-absence") #' #' #' # Reducing of the probability of detection #' sampleOccurrences(sp, n = 30, type = "presence-absence", #' detection.probability = 0.5) #' #' # Further reducing in relation to environmental suitability #' sampleOccurrences(sp, n = 30, type = "presence-absence", #' detection.probability = 0.5, #' correct.by.suitability = TRUE) #' #' #' # Creating sampling errors (far too much) #' sampleOccurrences(sp, n = 30, type = "presence-absence", #' error.probability = 0.5) #' #' # Introducing a sampling bias (oversampling) #' biased.area <- ext(1, 50, 1, 50) #' sampleOccurrences(sp, n = 50, type = "presence-absence", #' bias = "extent", #' bias.area = biased.area) #' # Showing the area in which the sampling is biased #' plot(biased.area, add = TRUE) #' #' # Introducing a sampling bias (no sampling at all in the chosen area) #' biased.area <- ext(1, 50, 1, 50) #' sampleOccurrences(sp, n = 50, type = "presence-absence", #' bias = "extent", #' bias.strength = 0, #' bias.area = biased.area) #' # Showing the area in which the sampling is biased #' plot(biased.area, add = TRUE) #' samps <- sampleOccurrences(sp, n = 50, #' bias = "manual", #' weights = sp$suitab.raster) #' plot(sp$suitab.raster) #' points(samps$sample.points[, c("x", "y")]) #' #' # Create a sampling bias so that more presences are sampled in areas with #' # higher suitability #' #' #' #' #' # Reproduce sampling based on the saved .Random.seed from a previous result #' samps <- sampleOccurrences(sp, n = 100, #' type = "presence-absence", #' detection.probability = 0.7, #' bias = "extent", #' bias.strength = 50, #' bias.area = biased.area) #' # Reset the random seed using the value saved in the attributes #' .Random.seed <- attr(samps, "seed") #' reproduced_samps <- sampleOccurrences(sp, n = 100, #' type = "presence-absence", #' detection.probability = 0.7, #' bias = "extent", #' bias.strength = 50, #' bias.area = biased.area) #' identical(samps$sample.points, reproduced_samps$sample.points) sampleOccurrences <- function(x, n, type = "presence only", extract.probability = FALSE, sampling.area = NULL, detection.probability = 1, correct.by.suitability = FALSE, error.probability = 0, bias = "no.bias", bias.strength = 50, bias.area = NULL, weights = NULL, sample.prevalence = NULL, replacement = FALSE, plot = TRUE) { results <- list(type = type, detection.probability = list( detection.probability = detection.probability, correct.by.suitability = correct.by.suitability), error.probability = error.probability, bias = NULL, replacement = replacement, original.distribution.raster = NULL, sample.plot = NULL) if(is.null(.Random.seed)) {stats::runif(1)} # initialize random seed if there # is none attr(results, "RNGkind") <- RNGkind() attr(results, "seed") <- .Random.seed if(inherits(x, "virtualspecies")) { if(inherits(x$occupied.area, "SpatRaster")) { sp.raster <- x$occupied.area } else if(inherits(x$pa.raster, "SpatRaster")) { sp.raster <- x$pa.raster } else stop("x must be:\n- a SpatRaster object\nor\n- the output list", " from functions generateRandomSp(), convertToPA() or ", "limitDistribution()") } else if (inherits(x, "RasterLayer")) { sp.raster <- rast(x) if(extract.probability) { stop("Cannot extract probability when x is not a virtualspecies object.", " Set extract.probability = FALSE") } } else if (inherits(x, "SpatRaster")) { sp.raster <- x if(extract.probability) { stop("Cannot extract probability when x is not a virtualspecies object.", " Set extract.probability = FALSE") } } else stop("x must be:\n- a SpatRaster object\nor\n- the output list", " from functions generateRandomSp(), convertToPA() or ", "limitDistribution()") if(global(sp.raster, max, na.rm = TRUE)[1, 1] > 1 | global(sp.raster, min, na.rm = TRUE)[1, 1] < 0) { stop("There are values above 1 or below 0 in your presence/absence raster.", "Please make sure that the provided raster is a correct P/A raster", " and not a suitability raster.") } original.raster <- sp.raster results$original.distribution.raster <- wrap(original.raster) if(!is.null(sample.prevalence)) { if(sample.prevalence < 0 | sample.prevalence > 1) { stop("Sample prevalence must be a numeric between 0 and 1") } } if(!is.null(sampling.area)) { if(is.character(sampling.area)) { if(!("rnaturalearth" %in% rownames(utils::installed.packages()))) { stop('You need to install the package "rnaturalearth".') } worldmap <- rnaturalearth::ne_countries(returnclass = "sf") if (any(!(sampling.area %in% c(unique(worldmap$sovereignt), unique(worldmap$region_un), unique(worldmap$continent))))) { stop("The choosen sampling.area is incorrectly spelled.\n Type", " 'unique(rnaturalearth::ne_countries(returnclass =", "'sf')$sovereignt)', ", "'unique(rnaturalearth::ne_countries(returnclass =", "'sf')$region_un)'", " & unique(rnaturalearth::ne_countries(returnclass =", "'sf')$continent) to obtain valid names.") } sampling.area <- worldmap[which( worldmap$sovereignt %in% sampling.area | worldmap$region_un %in% sampling.area | worldmap$continent %in% sampling.area), ] } else if(!(inherits(sampling.area, c("SpatVector", "sf", "SpatExtent")))) { stop("Please provide to sampling.area either \n", "- the names of countries, region and/or continents in which", " to sample\n", "- a SpatialPolygons or SpatialPolygonsDataFrame\n", "- an extent\n ", "in which the sampling will take place") } # if(inherits(sampling.area, "sf")) { # sampling.area <- vect(sampling.area) # } if(inherits(sampling.area, "SpatExtent")) { sampling.area <- vect(sampling.area) } sample.area.raster1 <- terra::rasterize(sampling.area, sp.raster, field = 1, background = NA, silent = TRUE) sp.raster <- sp.raster * sample.area.raster1 } if(correct.by.suitability) { if(!(inherits(x, "virtualspecies")) | !("suitab.raster" %in% names(x))) { stop("If you choose to weight the probability of detection by the", " suitability of the species (i.e., correct.by.suitability = TRUE),", " then you need to provide an appropriate virtual species ", "containing a suitability raster to x.") } } if(!is.numeric(detection.probability) | detection.probability > 1 | detection.probability < 0) { stop("detection.probability must be a numeric value between 0 and 1") } if(!is.numeric(error.probability) | error.probability > 1 | error.probability < 0) { stop("error.probability must be a numeric value between 0 and 1") } if(length(bias) > 1) { stop('Only one bias can be applied at a time') } if (!(bias %in% c("no.bias", "country", "region", "continent", "extent", "polygon", "manual"))) { stop('Argument bias must be one of : "no.bias", "country", "region", "continent", "extent", "polygon", "manual"') } if(!is.numeric(bias.strength) & bias != "no.bias") { stop("Please provide a numeric value for bias.strength") } if (bias %in% c("country", "region", "continent")) { if(!("rnaturalearth" %in% rownames(utils::installed.packages()))) { stop('You need to install the package "rnaturalearth" in order to use', 'bias = "region" or bias = "country"') } worldmap <- rnaturalearth::ne_countries(returnclass = "sf") if(bias == "country") { if (any(!(bias.area %in% worldmap$sovereignt))) { stop("country name(s) must be correctly spelled.", "Type unique(rnaturalearth::ne_countries(returnclass =", "'sf')$sovereignt) to obtain valid names.") } results$bias <- list(bias = bias, bias.strength = bias.strength, bias.area = bias.area) } else if (bias == "region") { if (any(!(bias.area %in% worldmap$region_un))) { stop(paste("region name(s) must be correctly spelled, according to", " one of the following : ", paste(unique(worldmap$region_un), collapse = ", "), sep = "\n")) } results$bias <- list(bias = bias, bias.strength = bias.strength, bias.area = bias.area) } else if (bias == "continent") { if (any(!(bias.area %in% worldmap$continent))) { stop(paste("region name(s) must be correctly spelled,", "according to one of the following : ", paste(unique(worldmap$continent), collapse = ", "), sep = "\n")) } results$bias <- list(bias = bias, bias.strength = bias.strength, bias.area = bias.area) } } if (bias == "polygon") { if(is.null(bias.area)) { message("No object of class SpatVector provided. A window with a map ", "will open, click on the map to draw the polygon of the area", " sampled with a bias.\n Once finished, press ", "escape to close the polygon.") if("RStudioGD" %in% names(grDevices::dev.list())) { grDevices::dev.new(noRStudioGD = TRUE) } plot(sp.raster) bias.area <- draw(x = "polygon") } else if(!(inherits(bias.area, c("sf", "SpatVector")))) { stop("If you choose bias = 'polygon', please provide a polygon of class ", "sf or SpatVector to argument bias.area. You can also set", " bias.area = NULL to draw the polygon manually.") } # warning("Polygon projection is not checked. Please make sure you have the # same projections between your polygon and your presence-absence # raster") results$bias <- list(bias = bias, bias.strength = bias.strength, bias.area = bias.area) } if (bias == "extent") { if(is.null(bias.area)) { message("No object of class SpatExtent provided. A window with a map ", "will open, click on the map to draw the extent of the area", " sampled with a bias.\n Once finished, press ", "escape to close the polygon.") if("RStudioGD" %in% names(grDevices::dev.list())) { grDevices::dev.new(noRStudioGD = TRUE) } plot(sp.raster) bias.area <- vect(draw()) } else if(!(inherits(bias.area, c("SpatExtent")))) { stop("If you choose bias = 'extent', please provide an extent of class", "SpatExtent to argument bias.area. You can also set", " bias.area = NULL to draw the extent manually.") } else { bias.area <- vect(bias.area) } results$bias <- list(bias = bias, bias.strength = bias.strength, bias.area = bias.area) } if(type == "presence-absence") { sample.raster <- sp.raster # Set all non-NA cells to 1 so that they are included in p/a sampling sample.raster[!is.na(sample.raster)] <- 1 } else if (type == "presence only") { sample.raster <- sp.raster } else stop("type must either be 'presence only' or 'presence-absence'") if (bias == "manual") { if(!(inherits(weights, "SpatRaster"))) { stop("You must provide a raster layer of weights (to argument weights) if you choose bias == 'manual'") } bias.raster <- weights results$bias <- list(bias = bias, bias.strength = "Defined by raster weights", weights = wrap(weights)) } else { bias.raster <- sample.raster bias.raster[bias.raster == 0] <- 1 } if(bias == "country") { bias.raster1 <- rasterize( worldmap[which(worldmap$sovereignt %in% bias.area), ], bias.raster, field = bias.strength, background = 1) bias.raster <- bias.raster * bias.raster1 } else if(bias == "region") { bias.raster1 <- rasterize( worldmap[which(worldmap$region_un %in% bias.area), ], bias.raster, field = bias.strength, background = 1) bias.raster <- bias.raster * bias.raster1 } else if(bias == "continent") { bias.raster1 <- rasterize( worldmap[which(worldmap$continent %in% bias.area), ], bias.raster, field = bias.strength, background = 1) bias.raster <- bias.raster * bias.raster1 } else if(bias == "extent") { bias.raster <- bias.raster * rasterize(bias.area, sp.raster, field = bias.strength, background = 1) results$bias <- list(bias = bias, bias.strength = bias.strength, bias.area = bias.area) } else if(bias == "polygon") { bias.raster1 <- rasterize(bias.area, bias.raster, field = bias.strength, background = 1, silent = TRUE) bias.raster <- bias.raster * bias.raster1 } if(type == "presence only") { number.errors <- stats::rbinom(n = 1, size = n, prob = error.probability) error.raster <- sample.raster error.raster[error.raster == 0] <- 1 if (number.errors > 0) { sample.points <- spatSample(error.raster * bias.raster, size = number.errors, method = "weights", xy = TRUE, values = FALSE, replace = replacement) } else { sample.points <- data.frame() } sample.points <- rbind(sample.points, spatSample(sample.raster * bias.raster, size = n - number.errors, method = "weights", xy = TRUE, values = FALSE, replace = replacement)) } else { if(is.null(sample.prevalence)) { sample.points <- spatSample(sample.raster * bias.raster, size = n, method = "weights", xy = TRUE, values = FALSE, replace = replacement) } else { tmp1 <- sample.raster tmp1[sp.raster != 1] <- NA sample.points <- spatSample(tmp1 * bias.raster, size = sample.prevalence * n, method = "weights", xy = TRUE, values = FALSE, replace = replacement) tmp1 <- sample.raster tmp1[sp.raster != 0] <- NA tmp1[tmp1 == 0] <- 1 sample.points <- rbind(sample.points, spatSample(tmp1 * bias.raster, size = (1 - sample.prevalence) * n, method = "weights", xy = TRUE, values = FALSE, replace = replacement)) rm(tmp1) } } sample.points <- sample.points[, c(1, 2)] if(type == "presence only") { sample.points <- data.frame(sample.points, Real = extract(sp.raster, sample.points, ID = FALSE), Observed = sample( c(NA, 1), size = nrow(sample.points), prob = c(1 - detection.probability, detection.probability), replace = TRUE)) colnames(sample.points)[3] <- "Real" } else if(type == "presence-absence") { sample.points <- data.frame(sample.points, extract(sp.raster, sample.points, ID = FALSE)) colnames(sample.points)[3] <- "Real" if(correct.by.suitability) { suitabs <- extract(x$suitab.raster, sample.points[, c("x", "y")], ID = FALSE)[, 1] } else { suitabs <- rep(1, nrow(sample.points)) } sample.points$Observed <- NA if(correct.by.suitability) { sample.points$Observed[which(sample.points$Real == 1)] <- sapply(detection.probability * suitabs[ which(sample.points$Real == 1) ], function(y) { sample(c(0, 1), size = 1, prob = c(1 - y, y)) }) } else { sample.points$Observed[which(sample.points$Real == 1)] <- sample(c(0, 1), size = length(which(sample.points$Real == 1)), prob = c(1 - detection.probability, detection.probability), replace = TRUE) } sample.points$Observed[which(sample.points$Real == 0 | sample.points$Observed == 0)] <- sample(c(0, 1), size = length(which(sample.points$Real == 0 | sample.points$Observed == 0)), prob = c(1 - error.probability, error.probability), replace = TRUE) } if(plot) { plot(original.raster, col = rev(viridis::viridis(3)[2:3])) # par(new = TRUE) if(type == "presence only") { graphics::points(sample.points[, c("x", "y")], pch = 16, cex = .5, col = viridis::viridis(3)[1]) } else { graphics::points(sample.points[sample.points$Observed == 1, c("x", "y")], pch = 16, cex = .8) # par(new = TRUE) graphics::points(sample.points[sample.points$Observed == 0, c("x", "y")], pch = 1, cex = .8) } results$sample.plot <- grDevices::recordPlot() } if(extract.probability) { sample.points <- data.frame( sample.points, extract(x$probability.of.occurrence, sample.points[, c("x", "y")], ID = FALSE)) colnames(sample.points)[ncol(sample.points)] <- "true.probability" } results$sample.points <- sample.points if(type == "presence-absence") { true.prev <- length(sample.points$Real[which( sample.points$Real == 1)]) / nrow(sample.points) obs.prev <- length(sample.points$Real[which( sample.points$Observed == 1)]) / nrow(sample.points) results$sample.prevalence <- c(true.sample.prevalence = true.prev, observed.sample.prevalence = obs.prev) } class(results) <- append("VSSampledPoints", class(results)) return(results) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/sampleOccurrences.R
#' Linear function #' @description A simple linear function of the form #' \deqn{ax+b}{a*x+b} #' @param x a numeric value or vector #' @param a a numeric value or vector #' @param b a numeric value or vector #' @return a numeric value or vector resulting from the function #' @export #' @seealso \code{\link{logisticFun}}, \code{\link{quadraticFun}} #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' Maintainer: Boris Leroy \email{leroy.boris@@gmail.com} #' @examples #' x <- 1:100 #' y <- linearFun(x, a = 0.5, b = 0) #' plot(y ~ x, type = "l") linearFun <- function(x, a, b) {a * x + b} #' Logistic function #' #' @description A simple logistic function of the form #' \deqn{\frac{1}{{1 + e^{\frac{x - \beta}{\alpha}}}}}{ #' 1 / (1 + exp((x - \beta)/\alpha))} #' @param x a numeric value or vector #' @param alpha a numeric value or vector #' @param beta a numeric value or vector #' @return a numeric value or vector resulting from the function #' @export #' @seealso \code{\link{linearFun}}, \code{\link{quadraticFun}} #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' Maintainer: Boris Leroy \email{leroy.boris@@gmail.com} #' @details #' The value of \code{beta} determines the 'threshold' of the logistic curve #' (i.e. the inflexion point). #' #' The value of \code{alpha} determines the slope of the curve (see examples): #' \itemize{ #' \item{\code{alpha} very close to 0 will result in a threshold-like response.} #' \item{Values of \code{alpha} with the same order of magnitude as the range of #' \code{x} (e.g., the range of\code{x} / 10) will result in a #' logistic function.} #' \item{\code{alpha} very far from 0 will result in a linear function.} #' } #' @examples #' x <- 1:100 #' y <- logisticFun(x, alpha = -10, b = 50) #' plot(y ~ x, type = "l") #' #' # The effect of alpha: #' y1 <- logisticFun(x, alpha = -0.01, b = 50) #' y2 <- logisticFun(x, alpha = -10, b = 50) #' y3 <- logisticFun(x, alpha = -1000, b = 50) #' #' par(mfrow = c(1, 3)) #' plot(y1 ~ x, type = "l", main = expression(alpha %->% 0)) #' plot(y2 ~ x, type = "l", main = expression(alpha %~~% range(x)/10)) #' plot(y3 ~ x, type = "l", main = expression(alpha %->% infinity)) logisticFun <- function(x, alpha, beta) {1 / (1 + exp((x - beta)/alpha))} #' Quadratic function #' #' @description A simple quadratic function of the form #' \deqn{ax^2+bx+c}{ #' a*x^2+b*x+c} #' @param x a numeric value or vector #' @param a a numeric value or vector #' @param b a numeric value or vector #' @param c a numeric value or vector #' @return a numeric value or vector resulting from the function #' @export #' @seealso \code{\link{linearFun}}, \code{\link{quadraticFun}} #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' Maintainer: Boris Leroy \email{leroy.boris@@gmail.com} #' @examples #' x <- 1:100 #' y <- quadraticFun(x, a = 2, b = 2, c = 3) #' plot(y ~ x, type = "l") quadraticFun <- function(x, a, b, c) {a * x^2 + b * x + c} #' Normal function defined by extremes #' #' @description A modified version of the normal function based on three parameters: #' \itemize{ #' \item{the mean} #' \item{the absolute difference between the mean and extreme values} #' \item{the percentage of area under the curve between the specified extreme values} #' } #' #' See the example for an easier understanding. #' @param x a numeric value or vector. The input environmental variable. #' @param mean a numeric value or vector. The optimum (mean) of the normal curve #' @param diff a numeric value or vector. The absolute difference between the mean and extremes. #' @param prob a numeric value or vector. The percentage of the area under the curve between the #' chosen extreme values #' @return a numeric value or vector resulting from the function #' @export #' @author #' Boris Leroy \email{leroy.boris@@gmail.com}, Florian David #' #' Maintainer: Boris Leroy \email{leroy.boris@@gmail.com} #' @examples #' # Let's define the response of a species to temperature which #' # - has an optimum at 20 degrees C #' # - occurs 99% of the time between 13 and 27 degrees C. #' # In that case, mean = 20, diff = 7, and prob = 0.99 #' #' # First, we generate an arbitrary temperature variable #' # between 0 and 30 degrees C #' temp <- seq(0, 30, length = 1000) #' #' #' # Then, we calculate the response to this variable with the chosen values #' response <- custnorm(x = temp, mean = 20, diff = 7, prob = .99) #' #' plot(response ~ temp, type = "l") custnorm <- function(x, mean, diff, prob) { prob <- prob + (1 - prob)/2 sd <- - diff / stats::qnorm(p = 1 - prob) stats::dnorm(x, mean = mean, sd = sd) } #' Beta response function #' #' @description Generation of a beta response curve (see references) according to the equation: #' \deqn{k * (x - p1)^{\alpha} * (p2 - x)^{\gamma}}{ #' P = k (x - p1)^\alpha (p2 - x)^\gamma} #' k is automatically estimated to have a maximum value of P equal to 1. #' @param x a numeric value or vector. The input environmental variable. #' @param p1 a numeric value or vector. Lower tolerance bound for the species #' @param p2 a a numeric value or vector. Upper tolerance bound for the species #' @param alpha a numeric value or vector. Parameter controlling the shape of the curve (see details) #' @param gamma a numeric value or vector. Parameter controlling the shape of the curve (see details) #' @return a numeric value or vector resulting from the function #' @details #' p1 and p2 can be seen as the upper and lower critical threshold of the curve. #' \code{alpha} and \code{gamma} control the shape of the curve near p1 and p2, respectively. #' When \code{alpha} = \code{gamma}, the curve is symmetric. Low values of \code{alpha} and \code{gamma} #' result in smooth (< 1) to plateau (< 0.01) curves. Higher values result in #' peak (> 10) curves. #' #' When \code{alpha} < \code{gamma}, the curve is skewed to the right. #' When \code{gamma} < \code{alpha}, the curve is skewed to the left. #' @export #' @references #' Oksanen, J. & Minchin, P.R. (2002). Continuum theory revisited: what shape #' are species responses along ecological gradients? \emph{Ecological Modelling} #' \bold{157}:119-129. #' @seealso \code{\link{linearFun}}, \code{\link{quadraticFun}}, \code{\link{custnorm}} #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' Maintainer: Boris Leroy \email{leroy.boris@@gmail.com} #' @examples #' temp <- seq(-10, 40, length = 100) #' # A curve similar to a thermal performance curve #' P <- betaFun(x = temp, p1 = 0, p2 = 35, alpha = 0.9, gamma = 0.08) #' plot(P ~ temp, type = "l") betaFun <- function(x, p1, p2, alpha, gamma) { k <- 1/((alpha * (p2 - p1) / (alpha + gamma))^alpha) / ((gamma * (p2 - p1) / (alpha + gamma))^gamma) ifelse(x > p1 & x < p2, k * ((x - p1)^alpha) * (p2 - x)^gamma, 0) } # Huisman-Olff-Fresco response function # # @description A Huisman-Olff-Fresco response function: # \deqn{P = \frac{1}{{1 + e^{a + b x}}} \frac{1}{1 + e^{c - dx}}}{ # P = (1 / (1 + exp(a + bx))) * (1 / (1 + exp(c -dx)))} # @param x a numeric value or vector # @param a a numeric value or vector # @param b a numeric value or vector # @param c a numeric value or vector # @return a numeric value or vector resulting from the function # @seealso \code{\link{linearFun}}, \code{\link{quadraticFun}} # @author # Boris Leroy \email{leroy.boris@@gmail.com} # # Maintainer: Boris Leroy \email{leroy.boris@@gmail.com} # @examples # temp <- seq(-10, 40, length = 100) # # A curve similar to a thermal performance curve # P <- HOFFun(x = temp, a = -200, b = 10, c = 10, d = 0.1) # plot(P ~ temp, type = "l") # # # .HOFFun <- function(x, a, b, c, d) # { # if (a == 0 & b == 0) # { # stop("a and b can't both be set to zero") # } else if (c == 0 & d == 0) # { # stop("c and d can't both be set to zero") # } # M/(1 + exp(a + b * x)) * 1/(1 + exp(c - d * x)) # (1 / (1 + exp(a + b * x))) * (1 / (1 + exp(c - d * x))) # } .thermalFun <- function(Pmax, Tb, To, rho, sigma) { Pmax * exp(-exp(rho * (Tb - To) - 6) - sigma * (Tb - To)^2) }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/sp.env.functions.R
#' Synchronise NA values among layers of a stack #' #' @description #' This function ensures that cells containing NAs are the same among all the #' layers of a raster stack, i.e.that for any given pixel of the stack, if one layer has a NA, then #' all layers should be set to NA for that pixel. #' @details #' This function can do that in two different ways; if your computer has enough RAM a fast way will be #' used; otherwise a slower but memory-safe way will be used. #' @param x a raster stack object which needs to be synchronised. #' @export #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' @importFrom terra mem_info mask app #' @importFrom utils capture.output #' @examples #' # Creation of a stack with different NAs across layers #' m <- matrix(nr = 10, nc = 10, 1:100) #' r1 <- rast(m) #' r2 <- rast(m) #' r1[sample(1:ncell(r1), 20)] <- NA #' r2[sample(1:ncell(r2), 20)] <- NA #' s <- c(r1, r2) #' #' #' # Effect of the synchroniseNA() function #' plot(s) # Not yet synchronised #' s <- synchroniseNA(s) #' plot(s) # Synchronised #' synchroniseNA <- function(x) { if(inherits(x, "SpatRaster")) { # The gain in performance is only about 7% so this code is currently # commented. # opt <- terra:::spatOptions() # opt$ncopies = 2 # mem <- x@ptr$mem_needs(opt) # # if(mem[1] < prod(mem[2:3])) { # val <- values(x) # if(any(is.na(val))) # { # NA.pos <- unique(which(is.na(val), arr.ind = T)[, 1]) # } # val[NA.pos, ] <- NA # x <- setValues(x, val) # return(x) # } else { x <- terra::mask(x, terra::app(x, fun = sum)) return(x) # } } else if(inherits(x, "RasterStack")){ if(canProcessInMemory(x, n = 2)) { val <- raster::getValues(x) if(any(is.na(val))) { NA.pos <- unique(which(is.na(val), arr.ind = T)[, 1]) } val[NA.pos, ] <- NA x <- raster::setValues(x, val) return(x) } else { x <- raster::mask(x, raster::calc(x, fun = sum)) return(x) } } }
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/synchroniseNA.R
########## WORK IN PROGRESS ########## # # The function "thermalResponse" is currently being improved so that its # two parameters (rho and sigma) can be automatically adjusted according to # user-defined critical temperature thresholds (CTmin and CTmax). # # # x <- seq(0, 30, length = 1000) # # thermalResponse <- function(x, To, rho, sigma) # { # exp(-exp(rho * (x - To) - 6) - sigma * (x - To)^2) # } # # rescaled.thermalResponse <- function(x, To, rho, sigma) # { # (thermalResponse(x, To, rho, sigma) - min(thermalResponse(x, To, rho, sigma))) / # max(thermalResponse(x, To, rho, sigma) - min(thermalResponse(x, To, rho, sigma))) # } # # par(mfrow = c(3, 3)) # resp <- thermalResponse(x = x, To = 5, rho = exp(100), sigma = 0.1) # # if(rescale) # { # tf <- rescaled.thermalResponse # } else # { # tf <- thermalResponse # } # # CTmax <- 16 # To = 15 # CTmin <- 0 # # precision.CTmax <- 0.01 # precision.CTmin <- 0.01 # # # rho.min <- -10 # rho.max <- 10 # sigma.min <- 0.0001 # sigma.max <- 10000 # # # ############# SECTION SIGMA #################### # x0 <- x # xmin <- min(range(x)) - 2 * diff(range(x)) # xmax <- max(range(x)) + 2 * diff(range(x)) # x <- seq(xmin, xmax, length = 100000) # resp.min <- thermalResponse(x = x, To = To, rho = exp(rho.min), sigma = sigma.min) # while(min(resp.min) > precision.CTmin & sigma.min != sigma.max) # { # sigma.min <- sigma.min * 10 # resp.min <- thermalResponse(x = x, To = To, rho = exp(rho.min), sigma = sigma.min) # } # # resp.max <- thermalResponse(x = x, To = To, rho = exp(rho.min), sigma = sigma.max) # # # # # if(CTmin > max(x[resp.max < 0.01 & x < To])) # { # stop(paste("CTmin is too high, try to decrease it (maximum possible value: ", # round(max(x[resp.max < 0.01 & x < To]), 3), ")", sep = "")) # } # # epsilon.up <- CTmin - max(x[resp.min < 0.01 & x < To]) # epsilon.bot <- CTmin - max(x[resp.max< 0.01 & x < To]) # i <- 1 # while(min(abs(epsilon.up), abs(epsilon.bot)) > precision.CTmin) # { # sigma.mid <- (sigma.min + sigma.max) / 2 # resp.mid <- thermalResponse(x = x, To = To, rho = exp(rho.min), sigma = sigma.mid) # epsilon.mid <- CTmin - max(x[resp.mid < 0.01 & x < To]) # # if(epsilon.mid < 0) # { # sigma.max <- sigma.mid # resp.max <- thermalResponse(x = x, To = To, rho = exp(rho.min), sigma = sigma.max) # epsilon.bot <- CTmin - max(x[resp.max< 0.01 & x < To]) # } else if (epsilon.mid > 0) # { # sigma.min <- sigma.mid # resp.min <- thermalResponse(x = x, To = To, rho = exp(rho.min), sigma = sigma.min) # epsilon.up <- CTmin - max(x[resp.min < 0.01 & x < To]) # } # cat(paste(i, "\n")) # i <- i+1 # } # # # if(min(abs(epsilon.up), abs(epsilon.bot)) == abs(epsilon.bot)) # { # sigma <- sigma.max # } else if(min(abs(epsilon.up), abs(epsilon.bot)) == abs(epsilon.up)) # { # sigma <- sigma.min # } # # sigma # # ############# SECTION RHO #################### # # resp.min <- thermalResponse(x = x0, To = To, rho = exp(rho.min), sigma = sigma) # resp.max <- thermalResponse(x = x0, To = To, rho = exp(rho.max), sigma = sigma) # plot(resp.min ~ x0, type = "l") # plot(resp.max ~ x0, type = "l") # plot(resp.mid ~ x, type = "l") # # # if(min(resp.min[x0 > To]) > 0.01) # { # #change cutoff ? # # Investigate the issue between the used cutoff for the y axis (0.01) # # and the condition testing in the lines below # } # # if(CTmax < min(x[resp.max < 0.01 & x > To])) # { # stop(paste("CTmax is too low, try to increase it (minimum possible value: ", # round(min(x[resp.max < 0.01 & x > To]), 3), ")", sep = "")) # } else if(CTmax > min(x[resp.min < 0.01 & x > To])) # { # stop(paste("CTmax is too high, try to increase sigma for a higher CTmax (maximum possible value: ", # round(min(x[resp.min < 0.01 & x > To]), 3), ")", sep = "")) # } # # epsilon.up <- CTmax - min(x[resp.min < 0.01 & x > To]) # epsilon.bot <- CTmax - min(x[resp.max < 0.01 & x > To]) # i <- 1 # while(min(abs(epsilon.up), abs(epsilon.bot)) > precision.CTmax) # { # rho.mid <- (rho.min + rho.max) / 2 # resp.mid <- thermalResponse(x = x, To = To, rho = exp(rho.mid), sigma = sigma) # epsilon.mid <- CTmax - min(x[resp.mid < 0.01 & x > To]) # # if(epsilon.mid > 0) # { # rho.max <- rho.mid # resp.max <- thermalResponse(x = x, To = To, rho = exp(rho.max), sigma = sigma) # epsilon.bot <- CTmax - min(x[resp.max < 0.01 & x > To]) # } else if (epsilon.mid < 0) # { # rho.min <- rho.mid # resp.min <- thermalResponse(x = x, To = To, rho = exp(rho.min), sigma = sigma) # epsilon.up <- CTmax - min(x[resp.min < 0.01 & x > To]) # } # cat(paste(i, "\n")) # i <- i+1 # } # # if(min(abs(epsilon.up), abs(epsilon.bot)) == abs(epsilon.bot)) # { # rho <- exp(rho.max) # } else if(min(abs(epsilon.up), abs(epsilon.bot)) == abs(epsilon.up)) # { # rho <- exp(rho.min) # } # # rho # # # # # # CTmax <- 7 # # resptot <- thermalResponse(x = x0, To = To, rho = rho, sigma = sigma) # # plot(resptot~x0, type = "l") # abline(v = CTmax) # abline(v = CTmin) # # min(x[resp < 0.01]) # # optimize(f = thermalResponse, interval = c(15, 100), To = 15, rho = 5, sigma = .01, tol = 1)
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/thermalFunctionDraft.R
#' Generation of virtual species #' #' This package allows generating virtual species distributions, for example #' for testing species distribution modelling protocols. #' For a complete tutorial, #' see borisleroy.com/virtualspecies #' #' #' @details #' The process of generating a virtual species distribution is divided into #' four major steps. #' \enumerate{ #' \item{Generate a virtual species distributions from environmental variables. #' This can be done by #' \itemize{ #' \item{defining partial response functions to each environmental #' variable, and then combining them to compute the overall environmental #' suitability, #' with \code{\link{generateSpFromFun}}} #' \item{computing a PCA among #' environmental variables, and simulating the response of the species along #' the two first axes of the PCA with \code{\link{generateSpFromPCA}}}} #' This step can be randomised with \code{\link{generateRandomSp}} #' } #' \item{Convert the virtual species distribution into presence-absence, with #' \code{\link{convertToPA}}} #' \item{Facultatively, introduce a distribution bias with #' \code{\link{limitDistribution}}} #' \item{Sample occurrence points (presence only or presence-absence) inside the #' virtual species distribution, either randomly or with biases, with #' \code{\link{sampleOccurrences}}} #' } #' #' There are other useful functions in the package: #' \itemize{ #' \item{\code{\link{formatFunctions}}: this is a helper function to format and #' illustrate the response functions as a correct input for #' \code{\link{generateSpFromFun}}} #' \item{\code{\link{plotResponse}}: to visualise the species-environment #' relationship #' of the virtual species} #' \item{\code{\link{removeCollinearity}}: this function can be used to remove #' collinearity among variables of a stack by selecting a subset of #' non-colinear variables} #' \item{\code{\link{synchroniseNA}}: this function can be used to synchronise #' NA values among layers of a stack} #' } #' #' #' #' #' This packages makes use of different other packages: #' \itemize{ #' \item{This package makes extensive use of the package \code{\link{terra}} #' to obtain spatialised #' environmental variables, and produce spatialised virtual #' species distributions.} #' \item{\code{\link{ade4}} is used to generate species with a PCA approach.} #' \item{\code{\link{rnaturalearth}} is used to obtain free world shapefiles, #' in order to create #' dispersal limitations and sampling biases.} #' } #' @references #' Leroy, B. et al. 2016. virtualspecies, an R package to generate virtual #' species distributions. Ecography. 39(6):599-607 #' @author #' Boris Leroy \email{leroy.boris@@gmail.com} #' #' with help from C. N. Meynard, C. Bellard & F. Courchamp #' #' Maintainer: Boris Leroy \email{leroy.boris@@gmail.com} #' @name virtualspecies-package #' @docType package #' @import raster NULL
/scratch/gouwar.j/cran-all/cranData/virtualspecies/R/virtualspecies-package.R
is_osx <- function() unname(Sys.info()["sysname"] == "Darwin") is_linux <- function() unname(Sys.info()["sysname"] == "Linux") is_windows <- function() .Platform$OS.type == "windows" is_solaris <-function() grepl('SunOS',Sys.info()['sysname']) which_os <- function() { if (is_osx()) return("osx") if (is_linux()) return("linux") if (is_windows()) return("windows") if (is_solaris()) return("solaris") warning("OS could not be determined", call. = FALSE) NULL } # utils::askYesKnow is new to R 3.5.0; avoid for backwards compatibility askYesNo <- function(msg) { prompts <- c("Yes", "No", "Cancel") choices <- tolower(prompts) msg1 <- paste0("(", paste(choices, collapse = "/"), ") ") if (nchar(paste0(msg, msg1)) > 250) { cat(msg, "\n") msg <- msg1 } else { msg <- paste0(msg, " ", msg1) } ans <- readline(msg) match <- pmatch(tolower(ans), tolower(choices)) if (!nchar(ans)) { TRUE } else if (is.na(match)) { stop("Unrecognized response ", dQuote(ans)) } else { c(TRUE, FALSE, NA)[match] } }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/utilities.R
#' virtuoso: An R Interface to Virtuoso Using ODBC #' #' Virtuoso is a high-performance "universal server," which can act #' as both a relational database (supporting standard SQL queries), #' and an Resource Description Framework (RDF) triplestore, supporting #' SPARQL queries and semantic reasoning. The `virtuoso` R package provides #' R users with a DBI-compatible connection to the Virtuoso database. #' The package also provides helper routines to install, launch, and manage #' a Virtuoso server locally on Mac, Windows and Linux platforms using #' the standard interactive installers from the R command-line. By #' automatically handling these setup steps, the package can make Virtuoso #' considerably faster and easier for a most users to deploy in a local #' environment. While this can be used as a normal `dplyr` backend, Virtuoso #' excels when used as a RDF triplestore. Managing the bulk import of triples #' from common serializations with a single intuitive command is another key #' feature of the `virtuoso` R package. Bulk import performance can be tens to #' hundreds of times faster than the comparable imports using existing R tools, #' including `rdflib` and `redland` packages. #' #' @keywords internal "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/virtuoso-package.R
## Some additional possible helper routines for common requests are shown here. ## Currently not fully developed or tested, and thus not exported. #' Clear all triples from a graph #' #' @details NOTE: after clearing a graph, re-running the bulk #' importer may refuse to re-import triples. #' @inheritParams vos_import #' @examples #' vos_status() #' \donttest{ #' if(has_virtuoso()){ #' vos_start() #' con <- vos_connect() #' vos_clear_graph(con) #' }} #' @noRd vos_clear_graph <- function(con, graph = "rdflib") { DBI::dbGetQuery(con, paste0("SPARQL CLEAR GRAPH <", graph, ">")) } #' List graphs #' #' @export #' @inheritParams vos_import #' @examples #' status <- vos_status() #' \donttest{ #' if(has_virtuoso() & is.null(status)){ #' vos_start() #' con <- vos_connect() #' vos_list_graphs(con) #' #' }} vos_list_graphs <- function(con) { DBI::dbGetQuery( con, paste( "SPARQL SELECT", "DISTINCT ?g", "WHERE {", "GRAPH ?g {?s ?p ?o}", "}", "ORDER BY ?g" ) ) } ## Methods not yet implemented, see notes inline. #' count triples #' #' @inheritParams vos_import #' @noRd vos_count_triples <- function(con, graph = NULL) { ## Official query method below. Not sure why these return ## large negative integer on debian and fail on mac... # DBI::dbGetQuery(con, "SPARQL SELECT COUNT(*) FROM <rdflib>") # DBI::dbGetQuery(con, paste("SPARQL SELECT (COUNT(?s) AS ?triples)", ## "WHERE { GRAPH ?g { ?s ?p ?o } }")) ## this way with dplyr way works but requires in-memory ## loading of all triples, probably a terrible idea! ## df <- DBI::dbGetQuery(con, paste( ## "SPARQL SELECT ?g ?s ?p ?o WHERE { GRAPH ?g {?s ?p ?o} }")) ## dplyr::count_(df, "g") }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/virtuoso.R
#' set Virtuoso paths #' #' Set the location of Virtuoso database, configure files, #' cache, and logs to your preferred location. Set home #' to the location of your Virtuoso installation. #' @param db_dir Location of data in the Virtuoso (tables, triplestore) #' @param config_dir Location of configuration files for Virtuoso #' @param cache_dir Location of cache for bulk importing #' @param log_dir Location of Virutoso Server logs #' @param home Location of the Virtuoso installation #' @return A logical vector, with elements being true #' if setting the corresponding variable succeeded #' (invisibly). #' #' @export #' @examples #' if(has_virtuoso()) #' vos_set_paths() #' vos_set_paths <- function(db_dir = vos_db(), config_dir = vos_config(), cache_dir = vos_cache(), log_dir = vos_logdir(), home = virtuoso_home() ){ if(is_solaris()){ warning("Virtuoso not available for Solaris", call. = FALSE) return("") } Sys.setenv(VIRTUOSO_DB = db_dir, VIRTUOSO_CONFIG = config_dir, VIRTUOSO_CACHE = cache_dir, VIRTUOSO_LOG = log_dir, VIRTUOSO_HOME = home) } vos_test_paths <- function(){ x <- tempdir() db <- file.path(x, "vos", "db") config <- file.path(x, "vos", "config") cache <- file.path(x, "vos", "cache") log <- file.path(x, "vos", "log") dir.create(db, FALSE, TRUE) dir.create(config, FALSE, TRUE) dir.create(cache, FALSE, TRUE) dir.create(log, FALSE, TRUE) vos_set_paths(db, config, cache, log) } # unset all virtuoso paths vos_unset_paths <- function(){ Sys.unsetenv(c("VIRTUOSO_DB", "VIRTUOSO_CONFIG", "VIRTUOSO_CACHE", "VIRTUOSO_LOG", "VIRTUOSO_HOME")) } #' @importFrom rappdirs app_dir virtuoso_app <- rappdirs::app_dir("Virtuoso") ## database and virtuoso.ini location vos_db <- function(db_dir = Sys.getenv( "VIRTUOSO_DB", rappdirs::user_data_dir("Virtuoso") )) { dir.create(db_dir, FALSE, TRUE) fs::path_expand(db_dir) } ## odbc config vos_config <- function(config_dir = Sys.getenv( "VIRTUOSO_CONFIG", rappdirs::user_config_dir("Virtuoso") )) { dir.create(config_dir, FALSE, TRUE) fs::path_expand(config_dir) } ## for bulk importer vos_cache <- function(cache_dir = Sys.getenv( "VIRTUOSO_CACHE", rappdirs::user_cache_dir("Virtuoso") )) { dir.create(cache_dir, FALSE, TRUE) fs::path_expand(cache_dir) } # virtuoso processx log (all though virtuoso also logs to vos_db...) # not to be confused with vos_log() user function to read the logs. vos_logdir <- function(log_dir = Sys.getenv( "VIRTUOSO_LOG", rappdirs::user_log_dir("Virtuoso") )) { dir.create(log_dir, FALSE, TRUE) fs::path_expand(log_dir) } #' @importFrom fs path_norm odbcinst_path <- function() { fs::path_norm(file.path(vos_config(), "odbcinst.ini")) } ## The system home location virtuoso_home <- function() { switch(which_os(), osx = virtuoso_home_osx(), windows = virtuoso_home_windows(), linux = "/etc/virtuoso-opensource-6.1/virtuoso.ini", NULL ) } brew_home <- function() { if (!has_homebrew()) return("") cmd <- processx::run("brew", c("--prefix", "virtuoso")) cmd$stdout } virtuoso_home_osx <- function(app = FALSE, use_brew = FALSE) { brewhome <- brew_home() if (file.exists(brewhome) || use_brew) { system_home <- brewhome } else { system_home <- paste0( "/Applications/Virtuoso Open Source Edition v7.2.app/", "Contents/virtuoso-opensource" ) } home <- Sys.getenv("VIRTUOSO_HOME", system_home) if (app) return(normalizePath(file.path(home, "..", ".."), mustWork = FALSE)) home } virtuoso_home_windows <- function() { system_home <- "C:/Program\ Files/OpenLink\ Software/Virtuoso OpenSource 7.2" Sys.getenv("VIRTUOSO_HOME", system_home) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/virtuoso_paths.R
## GLOBAL DEFAULT VARS #' Configure Virtuoso Server ini file #' #' Virtuoso Server configuration is determined by a virtuoso.ini file when #' server starts. This file includes both system-specific information from #' your install (location of server files, addons, etc) and user-configurable #' parameters. This helper function provides a way to create and modify an #' appropriate `virtuoso.ini` file. #' #' @param dirs_allowed Paths (relative or absolute) to directories from which #' Virtuoso should have read and write access (e.g. for bulk uploading). Should #' be specified as a single comma-separated string. #' @param gigs_ram Indicate approximately the maximum GB of memory Virtuoso can #' have access to. (Used to set NumberOfBuffers & MaxDirtyBuffers in config.) #' @param template Location of an existing virtuoso.ini file which will be used #' as a template. By default, `vos_configure()` will attempt to locate the #' appropriate template for your system. #' @param db_dir location where `virtuoso.ini` file should be written. Other #' Virtuoso database log files will also be written here. #' @return Writes the requested `virtuoso.ini` file to the db_dir specified #' and returns the path to this file. #' @importFrom ini read.ini write.ini #' @references <http://docs.openlinksw.com/virtuoso/dbadm/> #' @export #' @examples #' \donttest{ #' # can take > 5s to test #' ## configure with typical defaults: #' vos_configure() #' ## Increase or decrease RAM available to virtuoso: #' vos_configure(gigs_ram = 1) #' } vos_configure <- function(dirs_allowed = getwd(), gigs_ram = 2, template = find_virtuoso_ini(), db_dir = vos_db()) { if(!file.exists(template)){ warning("Exiting, virtuoso template not found... is virtuoso installed?") return(invisible(NULL)) } dir.create(db_dir, FALSE) DirsAllowed <- paste(unique( c( dirs_allowed, # user-supplied ".", # required (refers to ini file, e.g. db_dir()) vos_cache() # app's cache dir ) ), sep = "", collapse = "," ) ## Escape spaces in directory names gsub(" ", "\\ ", DirsAllowed) ## Consider normalizePaths with winslash="/" V <- ini::read.ini(template) V$Parameters$DirsAllowed <- DirsAllowed V$Parameters$NumberOfBuffers <- 85000 * gigs_ram V$Parameters$MaxDirtyBuffers <- 65000 * gigs_ram ## By default on Linux example config, these files are below $HOME and need ## root access. Pointing them at user_log_dir instead. V$Database$DatabaseFile <- file.path(db_dir, basename(V$Database$DatabaseFile)) V$Database$ErrorLogFile <- file.path(db_dir, basename(V$Database$ErrorLogFile)) V$Database$LockFile <- file.path(db_dir, basename(V$Database$LockFile)) V$Database$TransactionFile <- file.path(db_dir, basename(V$Database$TransactionFile)) V$Database$xa_persistent_file <- file.path(db_dir, basename(V$Database$xa_persistent_file)) V$TempDatabase$DatabaseFile <- file.path(db_dir, basename(V$TempDatabase$DatabaseFile)) V$TempDatabase$TransactionFile <- file.path(db_dir, basename(V$TempDatabase$TransactionFile)) ## Fix relative paths to absolute ones if (is_windows()) { base <- dirname(template) V$Plugins$LoadPath <- normalizePath(file.path(base, V$Plugins$LoadPath)) V$HTTPServer$ServerRoot <- normalizePath(file.path(base, V$HTTPServer$ServerRoot)) V$Parameters$VADInstallDir <- normalizePath(file.path(base, V$Parameters$VADInstallDir)) } output <- file.path(db_dir, "virtuoso.ini") dir.create(db_dir, FALSE, recursive = TRUE) ini::write.ini(V, output) output } find_virtuoso_ini <- function() { switch(which_os(), osx = find_virtuoso_ini_osx(), windows = find_virtuoso_ini_windows(), linux = find_virtuoso_ini_linux(), NULL ) } ## ick -- hardwire Linux path find_virtuoso_ini_linux <- function() { "/etc/virtuoso-opensource-6.1/virtuoso.ini" } ## Note: normalizePath fails to simplify /my/path/to/../.. find_virtuoso_ini_windows <- function() { normalizePath(file.path(virtuoso_home_windows(), "database", "virtuoso.ini")) } find_virtuoso_ini_osx <- function() { path_lookup(c( file.path(virtuoso_home_osx(), "db", "virtuoso.ini"), file.path(virtuoso_home_osx(), "database", "virtuoso.ini"), paste0(gsub("\\n$", "", brew_home()), "/var/lib/virtuoso/db/virtuoso.ini") )) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_configure.R
#' Connect to a Virtuoso Server over ODBC #' #' @param driver Name of the Driver line in the ODBC configuration #' @param uid User id. Defaults to "dba" #' @param pwd Password. Defaults to "dba" #' @param host IP address of the Virtuoso Server #' @param port Port used by Virtuoso. Defaults to #' the Virtuoso standard port, 1111 #' @inheritParams vos_odbcinst #' @details Default parameters are appropriate for the automatic installer #' provided by the package and for the default settings typically used by #' local Virtuoso installers. Adjust these only if you are connecting to a #' remote virtuoso server that is not controlled from the R package. #' #' @export #' @importFrom DBI dbConnect #' @importFrom odbc odbc #' @return a DBI connection to the Virtuoso database. This can #' be passed to additional virtuoso functions such as [vos_import()] #' or [vos_query()], and can also be used as a standard DBI or dplyr #' database backend. #' @seealso [vos_install()], [vos_start()] #' @examples #' status <- vos_status() #' \donttest{ #' if(has_virtuoso()){ #' ## start up #' vos_start() #' con <- vos_connect() #' } #' } vos_connect <- function(driver = NULL, uid = "dba", pwd = "dba", host = "localhost", port = "1111", system_odbcinst = find_odbcinst(), local_odbcinst = odbcinst_path()) { if(is_solaris()){ warning("Virtuoso not available for Solaris", call. = FALSE) return(NULL) } if (is.null(driver)) { driver <- switch(which_os(), "linux" = "Local Virtuoso", "osx" = "Local Virtuoso", "windows" = "Virtuoso (Open Source)" ) } sysini <- dirname(virtuoso::vos_odbcinst(system_odbcinst, local_odbcinst)) Sys.setenv(ODBCSYSINI = sysini) DBI::dbConnect(odbc::odbc(), driver = driver, uid = uid, pwd = pwd, host = host, port = port ) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_connect.R
#' Destroy all Virtuoso's directories #' #' Provides a clean reset of the system that purges all #' data files, config files, cache and log files created #' by virtuoso R package. This does not uninstall Virtuoso software #' itself, see [vos_uninstall()] to uninstall. #' #' @param force should permissions be changed (if possible) to allow deletion? #' @return [TRUE] if entirely successful in removing all files, #' [FALSE] otherwise (invisibly). #' @export #' @examples #' #' \dontshow{ #' virtuoso:::vos_test_paths() #' } #' vos_destroy_all() #' vos_destroy_all <- function(force = FALSE) { s1 <- unlink(vos_db(), recursive = TRUE, force = force) s2 <- unlink(vos_cache(), recursive = TRUE, force = force) s3 <- unlink(vos_config(), recursive = TRUE, force = force) s4 <- unlink(vos_logdir(), recursive = TRUE, force = force) invisible(sum(c(s1, s2, s3, s4)) == 0) } #' Delete Virtuoso Database #' #' delete the entire Virtuoso database for a fresh start. #' @param ask ask before deleting? #' @param db_dir location of the directory to delete #' @export #' @examples #' #' \dontshow{ #' virtuoso:::vos_test_paths() #' } #' vos_delete_db() #' vos_delete_db <- function(ask = is_interactive(), db_dir = vos_db()) { continue <- TRUE if (ask) { continue <- askYesNo("Are you sure?") } if (continue) { unlink(db_dir, recursive = TRUE) } }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_destroy_all.R
#' Bulk Import of RDF triples #' #' While triples data can be added one by one over SPARQL queries, #' Virtuoso bulk import is by far the fastest way to import large #' triplestores in the database. #' #' @param con a ODBC connection to Virtuoso, from [vos_connect()] #' @param files paths to files to be imported #' @param wd Alternatively, can specify directory and globbing pattern #' to import. Note that in this case, wd must be in (or a subdir of) #' the `AllowedDirs` list of `virtuoso.ini` file created by #' [vos_configure()]. By default, this includes the working directory #' where you called [vos_start()] or [vos_configure()]. #' @param glob A wildcard aka globbing pattern (e.g. `"*.nq"``). #' @param graph Name (technically URI) for a graph in the database. #' Can leave as default. If a graph is already specified by the #' import file (e.g. in nquads), that will be used instead. #' @param n_cores specify the number of available cores for parallel loading. #' Particularly useful when importing large numbers of bulk files. #' @return (Invisibly) returns the status table of the bulk loader, #' indicating file loading time or errors. #' @details the bulk importer imports all files matching a pattern #' in a given directory. If given a list of files, these are #' temporarily symlinked (or copied on Windows machines) to #' the Virtuoso app cache dir in a subdirectory, and the entire #' subdirectory is loaded (filtered by the globbing pattern). #' If files are not specified, load is called directly on the specified #' directory and pattern. This is particularly useful for loading large #' numbers of files. #' #' Note that Virtuoso recommends breaking large files into multiple smaller ones, #' which can improve loading time (particularly if using multiple cores.) #' #' Virtuoso Bulk Importer recognizes the following file formats: #' - `.grdf` #' - `.nq` #' - `.owl` #' - `.nt` #' - `.rdf` #' - `.trig` #' - `.ttl` #' - `.xml` #' #' Any of these can optionally be gzipped (with a `.gz` extension). #' @references <http://vos.openlinksw.com/owiki/wiki/VOS/VirtBulkRDFLoader> #' @importFrom digest digest #' @importFrom fs path_abs #' @export #' @examples #' #' vos_status() #' #' \donttest{ #' if(has_virtuoso()){ #' vos_start() #' con <- vos_connect() #' #' example <- system.file("extdata", "person.nq", package = "virtuoso") #' vos_import(con, example) #' } #' } vos_import <- function(con, files = NULL, wd = ".", glob = "*", graph = "rdflib", n_cores = 1L) { cache <- vos_cache() ## If given a list of specific files stopifnot(all(assert_extensions(files))) # could be more helpful error ## We have to copy (link) files into the directory Virtuoso can access. if (!is.null(files)) { subdir <- digest::digest(files) wd <- file.path(cache, subdir) dir.create(wd, showWarnings = FALSE, recursive = TRUE) ## NOTE we need abs paths of files for this to work (at least with symlinks) lapply(files, function(from) { target <- file.path(wd, basename(from)) ## remove target before symlinking if (file.exists(target)) file.remove(target) ## symlink only on Unix, must copy on Windows: switch(which_os(), "windows" = file.copy(fs::path_abs(from), target), file.symlink(fs::path_abs(from), target) ) }) } ## Even on Windows, ld_dir wants a Unix-style path-slash wd <- fs::path_tidy(wd) if (is_windows()) wd <- fs::path_abs(wd) DBI::dbGetQuery( con, paste0( "ld_dir('", wd, "', '", glob, "', '", graph, "')" ) ) importing_files <- fs::dir_ls(wd, glob = glob) ## Can call loader multiple times on multicore to load multiple files... replicate(n_cores, DBI::dbGetQuery(con, "rdf_loader_run()")) ## clean up cache if (!is.null(files)) { lapply(files, function(f) unlink(file.path(wd, basename(files)))) unlink(subdir) } ## Check status. This includes all fils ever imported ## Select only those on current import list. status <- DBI::dbGetQuery(con, paste0("SELECT * FROM DB.DBA.LOAD_LIST")) current <- status$ll_file %in% importing_files status <- status[current, ] import_errors <- any(!is.na(status$ll_error)) if (import_errors) { err <- status[!is.na(status$ll_error), c("ll_file", "ll_error")] stop(paste("Error importing:", paste(basename(err$ll_file), err$ll_error)), call. = FALSE ) } invisible(status) } assert_extensions <- function(files) { known_extensions <- c( "grdf", "nq", "owl", "nt", "rdf", "trig", "ttl", "xml" ) pattern <- paste0("[.]", known_extensions, "(.gz)?$") results <- vapply( files, function(filename) any( vapply(pattern, grepl, logical(1L), filename) ), logical(1L) ) invisible(results) } guess_ext <- function(files) { filename <- basename(files[[1]]) ext <- sub(".*([.]\\w+)", "*\\1", filename) if (ext == "*.gz") { ext <- paste0( sub( ".*([.]\\w+)", "*\\1", sub("[.]\\w+$", "", filename) ), ".gz" ) } ext } #' @importFrom fs path_tidy assert_allowedDirs <- function(wd = ".", db_dir = vos_db()) { ## In case user connects to external virtuoso status <- vos_status() if (is.null(status)) { warning(paste( "Could not access virtuoso.ini configuration.", "If you are using an external virtuoso server,", "ensure working directory is in allowedDirs" ), call. = FALSE ) return(as.character(NA)) } V <- ini::read.ini(file.path(db_dir, "virtuoso.ini")) allowed <- strsplit(V$Parameters$DirsAllowed, ",")[[1]] fs::path_tidy(wd) %in% fs::path_tidy(allowed) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_import.R
#' Helper method for installing Virtuoso Server #' #' Installation helper for Mac and Windows machines. By default, #' method will download and launch the official `.dmg` or `.exe` installer #' for your platform, running the standard drag-n-drop installer or #' interactive dialog. Setting `ask = FALSE` will allow the installer #' to run entirely unsupervised, which is suitable for use in scripts. #' Mac users can alternatively opt to install Virtuoso through HomeBrew #' by setting `use_brew=TRUE`. Linux users should simply install the #' `virtuoso-opensource` package (e.g. in debian & ubuntu) using the #' package manager or by contacting your system administrator. #' #' @seealso [vos_start()], [vos_uninstall()] #' @param use_brew Should we use homebrew to install? (MacOS only) #' @param ask Should we ask user for interactive installation? #' @export #' @importFrom processx run process #' @examples #' \dontshow{ if(has_virtuoso()) } #' vos_install() #' vos_install <- function(ask = is_interactive(), use_brew = FALSE) { ## Windows & DMG installers do not persist path ## Need path set so we can check if virtuoso is already installed vos_set_path() if (has_virtuoso()) { return(message("Virtuoso is already installed.")) } # Install Virtuoso if not already installed if (!has_virtuoso()) { switch(which_os(), "osx" = vos_install_osx(use_brew = use_brew, ask = ask), "linux" = vos_install_linux(), "windows" = vos_install_windows(ask = ask), NULL ) } ## Configure ODBC, even if Virtuoso installation already detected vos_odbcinst() } #' check for Virtuoso #' #' test if the system has a virtuoso installation on the path #' @return logical indicating if virtuoso-t binary was found or now. #' @examples #' has_virtuoso() #' @export has_virtuoso <- function() { vos_set_path() file.exists(unname(Sys.which("virtuoso-t"))) } vos_set_path <- function(vos_home = NULL) { if(is_solaris()){ message("virtuoso R package is not supported on Solaris") return(NULL) } ## Virtuoso already detected in PATH if (file.exists(unname(Sys.which("virtuoso-t")))) { return(NULL) } if (is.null(vos_home)) { vos_home <- switch(which_os(), "linux" = return(NULL), "osx" = virtuoso_home_osx(), "windows" = virtuoso_home_windows() ) } sep <- switch(which_os(), "linux" = ":", "osx" = ":", "windows" = ";", ":" ) bin_dir <- file.path(vos_home, "bin") ## If Virtuoso has not yet been installed, don't modify path yet. if (!file.exists(bin_dir)) return(NULL) bin_dir <- normalizePath(bin_dir) path <- Sys.getenv("PATH") Sys.setenv("PATH" = paste(path, bin_dir, sep = sep)) invisible(path) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_install.R
vos_install_linux <- function() { message(paste( "Package does not support direct install of virtuoso", "from R on Linux systems. Please install virtuoso-opensource", "for your distribution. e.g. on Debian/Ubuntu systems, run", "sudo apt-get -y install virtuoso-opensource" )) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_install_linux.R
# @importFrom utils askYesNo ## Do not import, breaks in R 3.4 vos_install_osx <- function(use_brew = has_homebrew(), ask = is_interactive()) { if (use_brew) { vos_install_formulae() } else if (!ask && has_homebrew()) { vos_install_formulae() } else { vos_install_dmg() } } download_osx_installer <- function() { download_url <- paste0( "https://sourceforge.net/projects/virtuoso/", "files/virtuoso/7.2.5/virtuoso-opensource-", "7.2.5-macosx-app.dmg" ) fallback_url <- paste0( "https://github.com/cboettig/virtuoso/releases/", "download/v0.1.1/Virtuoso_OpenSource_7.20.dmg" ) installer <- tempfile("virtuoso", fileext = ".dmg") message(paste("downloading Virtuoso dmg", "...")) download_fallback(download_url, installer, fallback_url) installer } download_fallback <- function(url, dest, fallback_url) { req <- curl::curl_fetch_disk(url, dest) if (req$status_code > 300) curl::curl_download(fallback_url, dest) } vos_install_dmg <- function() { dmg <- download_osx_installer() processx::run("open", dmg) ## User must then launch Virtuoso database from the taskbar. ## Installs Virtuoso home at: ## need to link $VIRTUOSO_HOME/bin/virtuoso-t askYesNo("Drag or copy Virtuoso into your Applications directory, then press return to continue...") } vos_install_formulae <- function(has_unixodbc = FALSE) { install_brew() ## Avoid possible brew install error: ## Error: Cannot install virtuoso because conflicting formulae are installed. ## unixodbc: because Both install `isql` binaries. ## Please `brew unlink unixodbc` before continuing. ## Unlinking removes a formula's symlinks from /usr/local. You can ## link the formula again after the install finishes. You can --force this ## install, but the build may fail or cause obscure side-effects in the ## resulting software. ## Manually renaming the conflict does not stop brew from complaining :-( # if (has_unixodbc | file.exists("/usr/local/bin/isql")){ # has_unixodbc <- TRUE # file.rename("/usr/local/bin/isql", "/usr/local/bin/isql-unixodbc") # } ## BREW is incredibly stupid in that it would rather we unlink unixodbc ## entirely, thus breaking ODBC functionality (e.g. odbcinst -j, which ## we use elsewhere in this package), than simply swap out the `isql` ## binary in `odbc` for that in `virtuoso` (or vice versa) ## Here, we force the install to avoid unlinking all of unixodbc ## We then link odbc with overwrite over `isql`. Sadly, `link` also ## lacks the compliment of `overwrite` to skip already-installed binaries. processx::run("brew", c("install", "--force", "virtuoso"), error_on_status = FALSE ) processx::run("brew", c("link", "--overwrite", "virtuoso"), error_on_status = FALSE ) } has_homebrew <- function() !(Sys.which("brew") == "") install_brew <- function() { if (!has_homebrew()) { processx::run("/usr/bin/ruby", paste( '-e "$(curl -fsSL', paste0( "https://raw.githubusercontent.com/", 'Homebrew/install/master/install)"' ) )) } } vos_uninstall_osx <- function() { out <- "no installation path found" if (has_homebrew()) { has_virt <- processx::run("brew", c("ls", "--versions", "virtuoso"), error_on_status = FALSE ) if (has_virt$status == 0) { p <- processx::run("brew", c("uninstall", "virtuoso"), error_on_status = FALSE ) out <- p$stdout } } if (file.exists(virtuoso_home_osx())) { unlink(virtuoso_home_osx(app = TRUE), recursive = TRUE ) out <- paste("removed", virtuoso_home_osx(app = TRUE)) } message(out) invisible(TRUE) } ## an override-able interactive check is_interactive <- function() { as.logical(Sys.getenv("INTERACTIVE", interactive())) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_install_osx.R
download_windows_installer <- function(version = "7.2.5") { exe <- "Virtuoso_OpenSource_Server_7.20.x64.exe" download_url <- paste0( "https://sourceforge.net/projects/virtuoso/", "files/virtuoso/7.2.5/", "2018_08_28_", "Virtuoso_OpenSource_Server_7.2.x64.exe/download" ) fallback_url <- paste0( "https://github.com/cboettig/virtuoso/releases/", "download/v0.1.1/Virtuoso_OpenSource_Server_7.20.x64.exe" ) installer <- normalizePath(file.path( tempdir(), exe ), mustWork = FALSE ) message(paste("downloading", exe, "...")) download_fallback(download_url, installer, fallback_url) installer } #' @importFrom curl curl_download vos_install_windows <- function(ask = is_interactive()) { installer <- system.file("windows", "Virtuoso_OpenSource_Server_7.20.x64.exe", package = "virtuoso" ) if (installer == "") { # not packaged installer <- download_windows_installer() } if (ask) { proceed <- askYesNo(paste( "R will open the Windows Installer in another window.\n", "When asked to 'Create DB and start Virtuoso',\n", "PLEASE UNCHECK this option!\n", "Ready to proceed?\n" )) if (!proceed) return(message("Install cancelled")) processx::run(installer) } else { message("Attempting unsupervised installation of Virtuoso Open Source") processx::run( installer, c("/SP-", "/VERYSILENT", "/SUPPRESSMSGBOXES", '/TASKS=""') ) ## NOTE: you can use `installer.exe /?` to see list of the above argument ## options in a Windows cmd shell } } # vos_set_path_windows <- function(vos_home = virtuoso_home_windows()){ # bin_dir <- normalizePath(file.path(vos_home, "bin"), mustWork = FALSE) # lib_dir <- normalizePath(file.path(vos_home, "lib"), mustWork = FALSE) # path <- Sys.getenv("PATH") # if(!has_virtuoso()) # Sys.setenv("PATH" = paste(path, bin_dir, lib_dir, sep=";")) # } vos_uninstall_windows <- function(vos_home = virtuoso_home_windows()) { run(file.path(vos_home, "unins000.exe")) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_install_windows.R
#' Stop (kill) the Virtuoso server #' #' Kill ends the process started by [`vos_start()`] #' @param p a process object, returned by #' [`vos_process()`] or [`vos_start()`]. #' (will be restored from cache if not provided) #' @details vos_kill simply shuts down the local Virtuoso server, #' it does not remove any data stored in the database system. #' [vos_kill()] terminates the process, removing the #' process id from the process table. #' @export #' @seealso [vos_start()] #' @aliases vos_kill #' @importFrom ps ps_kill #' @examples #' \donttest{ #' if(has_virtuoso()){ #' #' vos_start() #' vos_kill() #' #' } #' } vos_kill <- function(p = NA) { status <- vos_status(p) if (is.null(status)) { message("No active virtuoso process detected.") return(NULL) } p <- vos_process(p) ps::ps_kill(p) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_kill.R
#' Query the server logs #' #' @param collapse an optional character string to separate the #' lines in a single character string. #' @param just_errors logical, default [FALSE]. Set to [TRUE] to return #' just the lines that contain the term "error", which can be useful #' in debugging or validating bulk imports. #' @inheritParams vos_kill #' @export #' @return Virtuoso logs as a character vector. #' @seealso [vos_start()] #' @examples #' if(has_virtuoso()) #' vos_log() #' vos_log <- function(p = NA, collapse = NULL, just_errors = FALSE) { if(is_solaris()){ warning("Virtuoso not available for Solaris", call. = FALSE) return("") } p <- vos_process(p) if (!inherits(p, "ps_handle")) return("") err_file <- file.path(vos_logdir(), "virtuoso.log") if(!file.exists(err_file)) return("") log <- readLines(err_file) if (just_errors) { return(log[grepl("error", log)]) } paste(log, collapse = collapse) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_log.R
# Rename? Maybe vos_odbc_configure? #' Configure the ODBC Driver for Virtuoso #' #' ODBC uses an `odbcinst.ini` file to point ODBC at the library required #' to drive any given database. This function helps us automatically #' locate the driver library on different operating systems and configure #' the odbcinst appropriately for each OS. #' #' @param system_odbcinst Path to the system `odbcinst.ini` file. (Does not #' require write access.) Default will attempt to find the file for your system. #' @param local_odbcinst Path to the local odbcinst we should use. #' @return the path to the odbcinst file that is created or modified. #' #' @details This function is called automatically by [vos_install()] and thus #' does not usually need to be called by the user. Users can also manually #' configure ODBC as outlined in #' <https://github.com/r-dbi/odbc#dsn-configuration-files>. #' This is merely a convenience function automating that process on most #' systems. #' #' @examples #' \donttest{ #' ## Configures ODBC and returns silently on success. #' vos_odbcinst() #' #' ## see where the inst file is located: #' inst <- vos_odbcinst() #' inst #' } #' @export vos_odbcinst <- function(system_odbcinst = find_odbcinst(), local_odbcinst = odbcinst_path()) { ## NOTE: This applies to and is used only by on MacOS / Linux Sys.setenv(ODBCSYSINI = dirname(local_odbcinst)) ## Use local odbcinst if already configured if (already_set(local_odbcinst)) { return(invisible(local_odbcinst)) } ## Then use system odbcinst if that is configured ## NO -- don't trust system odbcinst to be already set # if (already_set(system_odbcinst)){ # # Sys.setenv(ODBCSYSINI=system_odbcinst) # return(invisible(system_odbcinst)) # } write(c( "", "[Local Virtuoso]", paste("Driver =", find_odbc_driver()), "" ), file = local_odbcinst, append = TRUE ) invisible(local_odbcinst) } already_set <- function(odbcinst) { if (is.null(odbcinst)) { return(FALSE) } if (file.exists(odbcinst)) { if (any(grepl("\\[Local Virtuoso\\]", readLines(odbcinst)))) { # message("Configuration for Virtuoso found") return(TRUE) } } FALSE } find_odbc_driver <- function(os = which_os()) { lookup <- switch(os, osx = c( "/usr/lib/virtodbc.so", "/usr/local/lib/virtodbc.so", # Mac Homebrew symlink file.path(virtuoso_home_osx(), "lib", "virtodbc.so") ), linux = c( "/usr/lib/virtodbc.so", "/usr/local/lib/virtodbc.so", "/usr/lib/odbc/virtodbc.so", "/usr/lib/x86_64-linux-gnu/odbc/virtodbc.so" ), windows = normalizePath(file.path( virtuoso_home_windows(), "bin", "virtodbc.dll" ), mustWork = FALSE ), "OS not recognized or not supported" ) path_lookup(lookup) } path_lookup <- function(paths, target_name = basename(paths[[1]])) { i <- vapply(paths, file.exists, logical(1L)) if (sum(i) < 1) { warning(paste("could not automatically locate", target_name), call. = FALSE ) return(target_name) } names(which(i))[[1]] } #' @importFrom utils read.table find_odbcinst <- function() { if(is_solaris()){ warning("Virtuoso not available for Solaris", call. = FALSE) return("") } if (Sys.which("odbcinst") == "") { return(normalizePath("~/.odbcinst.ini", mustWork = FALSE)) } ## Otherwise we can use `odbcinst -j` to find odbcinst.ini file p <- processx::run("odbcinst", "-j") trimws( read.table(textConnection(p$stdout), skip = 1, sep = ":", stringsAsFactors = FALSE )[1, 2] ) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_odbcinst.R
#' Return a handle to an existing Virtuoso Process #' #' Generally a user will not need to access this function directly, #' though it may be useful for debugging purposes. #' @inheritParams vos_kill #' @return returns the [processx::process()] object cached by [vos_start()] #' to control the external Virtuoso sever process from R. #' @importFrom ps ps_handle ps #' @export #' @examples #' if(has_virtuoso()) #' vos_process() #' vos_process <- function(p = NA) { if(is_solaris()){ warning("Virtuoso not available for Solaris", call. = FALSE) return(NULL) } ## p already is a handle to the process if (inherits(p, "ps_handle")) { return(p) } ## Otherwise, discover the pid pid <- virtuoso_pid() ## No pid means no running process if (length(pid) == 0) return(NA) ## Success. return a handle to this pid ps::ps_handle(pid) } virtuoso_pid <- function(...) { x <- ps::ps(...) x$pid[grepl("virtuoso-t", x$name)] }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_process.R
#' Run a SPARQL query #' #' @param query a SPARQL query statement #' @inheritParams vos_import #' @return a `data.frame` containing the results of the query #' @details SPARQL is a graph query language similar in syntax SQL, #' but allows the use of variables to walk through graph nodes. #' @seealso [vos_start()], [vos_connect()] #' @references #' - <https://en.wikipedia.org/wiki/SPARQL> #' - <https://docs.ropensci.org/rdflib/articles/rdf_intro.html> #' #' @examples #' vos_status() #' \donttest{ #' if(has_virtuoso()){ #' vos_start() #' con <- vos_connect() #' #' # show first 4 triples in the database #' DBI::dbGetQuery(con, "SPARQL SELECT * WHERE { ?s ?p ?o } LIMIT 4") #' } #' } #' @export vos_query <- function(con, query) { DBI::dbGetQuery(con, paste0("SPARQL ", query)) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_query.R
virtuoso_cache <- new.env() #' Start a Virtuoso Server #' #' This function will attempt to start a virtuoso server #' instance that can be managed completely from R. This allows #' the user to easily start, stop, and access server logs and functions #' from the R command line. This server will be automatically shut #' down when R exits or restarts, or can be explicitly controlled using #' [vos_kill()], [vos_log()], and [vos_status()]. #' #' @param ini path to a virtuoso.ini configuration file. If not #' provided, function will attempt to determine the location of the #' default configuration file. #' @param wait number of seconds to wait for server to come online #' @export #' @return invisibly returns the [processx::process()] object which can be used #' to control the external process from R. It is not necessary for a user #' to store this return object, as [vos_start()] caches the process object so #' it can be automatically accessed by other functions without needing to store #' and pass the return object. #' @details It can take some time for the server to come up before it is ready to #' accept queries. [vos_start()] will return as soon as the server is active, #' which typically takes about 10 seconds on tested systems. [vos_start()] monitors #' the Virtuoso logs every one second for a maximum time of `wait` seconds #' (default 30 seconds) to see if the server is ready. If `wait` time is exceeded, #' [vos_start()] will simply return the current server status. This does not mean #' that starting has failed, it may simply need longer before the server is active. #' Use [vos_status()] to continue to monitor the server status manually. #' #' If no `virtuoso.ini` configuration file is provided, [vos_start()] will #' automatically attempt to configure one. For more control over this, #' use [vos_configure()], see examples. #' @importFrom ps ps_pid #' @seealso [vos_install()] #' @examples #' \donttest{ #' #' if(has_virtuoso()){ #' vos_start() #' ## or with custom config: #' vos_start(vos_configure(gigs_ram = 3)) #' #' } #' } vos_start <- function(ini = NULL, wait = 30) { ## Windows & Mac-dmg-based installers do not persist path ## Need path set so we can check if virtuoso is already installed vos_set_path() if (!has_virtuoso()) { stop(paste( "Virtuoso installation not detected.", "Try running: vos_install()" )) } ## Check for cached process p <- vos_process() if (inherits(p, "ps_handle")) { message(paste( "Virtuoso is already running with pid:", ps::ps_pid(p) )) return(invisible(p)) } ## Prepare a virtuoso.ini configuration file if one is not provided. if (is.null(ini)) { ini <- vos_configure() } ## Here we go time to start the process err <- file.path(vos_logdir(), "virtuoso.log") px <- processx::process$new("virtuoso-t", c("-f", "-c", ini), stderr = err, stdout = "|", cleanup = TRUE ) ## Wait for status message(px$format()) message("Server is now starting up, this may take a few seconds...\n") Sys.sleep(2) vos_status(wait = wait) assign("px", px, envir = virtuoso_cache) ## invisible(vos_process()) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_start.R
#' Query the server status #' #' @inheritParams vos_kill #' @inheritParams vos_start #' @details Note: Use [vos_log()] to see the full log #' @return a character string indicating the state of the server: #' - "not detected" if no process can be found #' - "dead" process exists but reports that server is not alive. Server may fail #' to come online due to errors in configuration file. see [vos_configure()] #' - "running" Server is up and accepting queries. #' - "sleeping" Server is up and accepting queries. #' #' @importFrom ps ps_status #' @export #' @examples #' if(has_virtuoso()) #' vos_status() #' vos_status <- function(p = NA, wait = 10) { if(is_solaris()){ warning("Virtuoso not available for Solaris", call. = FALSE) return(NULL) } p <- vos_process(p) if (!inherits(p, "ps_handle")) { message("virtuoso isn't running.") return(invisible(NULL)) } status <- ps::ps_status(p) if (!(status %in% c("running", "sleeping"))) { return(status) } log <- vos_log(p, collapse = "\n") tries <- 0 up <- grepl("Server online at", log) while (!up && (tries < wait)) { Sys.sleep(1) log <- vos_log(p, collapse = "\n") up <- grepl("Server online at", log) tries <- tries + 1 } log <- vos_log(p) message(paste("latest log entry:", log[length(log)])) status }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_status.R
#' Uninstall Virtuoso #' #' Automatic uninstaller for Mac OSX and Windows clients. #' @export #' @examples #' \dontrun{ #' vos_uninstall() #' } #' vos_uninstall <- function() { vos_set_path() if (!has_virtuoso()) { return(message("Virtuoso installation not found", call. = FALSE)) } ## Call the appropriate installer switch(which_os(), "osx" = vos_uninstall_osx(), "linux" = paste( "Cannot automatically uninstall", "on Linux, use system tools." ), "windows" = vos_uninstall_windows(), NULL ) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/R/vos_uninstall.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" )
/scratch/gouwar.j/cran-all/cranData/virtuoso/inst/doc/installation.R
--- title: "Introduction: Virtuoso Installation and Configuration" author: "Carl Boettiger" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Installation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` [Virtuoso](https://en.wikipedia.org/wiki/Virtuoso_Universal_Server) is a high-performance "universal server" that can act as both a relational database (supporting standard SQL queries) and an RDF triplestore, (supporting SPARQL queries). Virtuoso supports communication over the standard ODBC interface, and so R users can potentially connect to Virtuoso merely by installing the server and using the `odbc` R package. However, installation can present a few gotchas to users unfamiliar with Virtuoso. This package seeks to streamline the process of installing, managing, and querying a Virtuoso server. While the package can be also be used merely to provide a standard `DBI` connection to an RDBS, e.g. as a `dplyr` back-end, Virtuoso's popularity and performance is particularly notable with respect to RDF data and SPARQL queries, so most examples focus on those use cases. ## Installation The `virtuoso` package provides installation helpers for both Mac OSX and Windows users through the function `vos_install()`. At the time of writing, the Mac OS X installer uses Homebrew to install the Virtuoso Open Source server (similar to the `hugo` installer in RStudio's `blogdown`). On Windows, `vos_install()` downloads and executes the Windows self-extracting archive (`.exe` file), which will open a standard installation dialog in interactive mode, or be run automatically if not run in an interactive session. No automated installer is provided for Linux systems; Linux users are encouraged to simply install the appropriate binaries for their distribution (e.g. `apt-get install -y virtuoso-opensource` on Debian/Ubuntu systems). ## Configuration Virtuoso Open Source configuration is controlled by a `virtouso.ini` file, which sets, among other things, which directories can be accessed for tasks such as bulk import, as well as performance tweaks such as available memory. Unfortunately, the Virtuoso server process (`virtuoso-t` application) cannot start without a path to an appropriate config file, and the installers (e.g. on both Windows and Linux) frequently install an example `virtuoso.ini` to a location which can be hard to find and for which users do not have permission to edit directly. Moreover, the file format is not always intuitive to edit. The `virtuoso` package thus helps locate this file and provides a helper function, `vos_configure()`, to create and modify this configuration file. Because reasonable defaults are also provided by this function, users should usually not need to call this function manually. `vos_configure()` is called automatically from `vos_start()` if the path to a `virtuoso.ini` file is not passed to `vos_start()`. In addition to configuring Virtuoso's settings through a `virtuoso.ini` file, the other common barrier is setting up the driver for the ODBC connection. Some installers (Mac, Linux) do not automatically add the appropriate driver to an active `odbcinst.ini` file with a predictable Driver Server Name, which we need to know to initiate the ODBC connection. An internal helper function handles identifying drivers and establishing the appropriate `odcinst.ini` automatically when necessary. ## Server management Lastly, Virtuoso Open Source is often run as a system service, starting when the operating system starts. This is often undesirable, as the casual laptop user does not want the service running all the time, and can be difficult to control for users unfamiliar with managing such background services on their operating systems. Instead of this behavior, the `virtuoso` package provides an explicit interface to control the external server. The server only starts when created by `vos_start()`, and ends automatically when the R process ends, or can be killed, paused, or resumed at any time from R (e.g. via `vos_kill()`). Helper utilities can also query the status and logs of the server from R. As with most database servers, data persists to disk, at an appropriate location for the OS determined by `rappdirs` package, and a helper utility, `vos_delete_db()` can remove this persistent storage location. Users can also connect directly to any existing (local or remote) Virtuoso instance by passing the appropriate information to `vos_connect()`, which can be convenient for queries. Note that he Virtuoso back-end provided by the R package `rdflib` can also connect to any Virtuoso server created by the `virtuoso` R package, though queries loading and queries through the `redland` libraries used by `rdflib` will generally be slower than direct calls over ODBC via the `virtuoso` package functions, often dramatically so for large triplestores.
/scratch/gouwar.j/cran-all/cranData/virtuoso/inst/doc/installation.Rmd
## ROpenSci Registry as NQuads library(jsonlite) library(jsonld) library(purrr) download.file("https://raw.githubusercontent.com/ropensci/roregistry/gh-pages/raw_cm.json", "raw_cm.json") ## Most efficient solution: jsonld::jsonld_to_rdf("raw_cm.json") %>% writeLines("ro.nq.gz") ## Alternate workflow I ## Expand and compact is a good way to remove duplicate contexts x <- jsonlite::read_json("raw_cm.json") expanded <- x %>% map( function(y){ y %>% toJSON(auto_unbox = TRUE) %>% jsonld_expand("https://raw.githubusercontent.com/codemeta/codemeta/2.0/codemeta.jsonld") %>% fromJSON() }) jsonlite::write_json(list("@graph" = expanded), "expanded.json", auto_unbox=TRUE, pretty=TRUE) flat_list <- jsonld::jsonld_flatten("json/raw_cm.json") %>% fromJSON(simplifyDataFrame = FALSE) ## Alternative workflow II ## roundtrip to reverse the rectangling created by fromJSON DataFrame simplification unsimplifyJSON <- function(df){ df %>% toJSON() %>% fromJSON(simplifyDataFrame = FALSE) %>% map(flatten) } as_uri <- function(x){ if(!is.list(x)) return(x) x[map_lgl(x, is.null)] <- NA y <- flatten(x) not_uri <- !grepl("^http", y) y[not_uri] <- NA flatten_chr(y) } flat <- jsonld::jsonld_flatten("raw_cm.json") %>% fromJSON() flat$`http://schema.org/license` <- as_uri(flat$`http://schema.org/license`) flat_list <- unsimplifyJSON(flat) rdftools::write_nquads(flat_list, "roregistry.nq.gz", prefix="registry:")
/scratch/gouwar.j/cran-all/cranData/virtuoso/inst/examples/codemeta.R
create_virtuoso <- function(datadir = "virtuoso-data") { if (!requireNamespace("stevedore", quietly = TRUE)) { stop("Package stevedore must be installed to use this function") } dir.create(datadir, FALSE) stopifnot(stevedore::docker_available()) docker <- stevedore::docker_client() docker$container$run("tenforce/virtuoso:1.3.1-virtuoso7.2.2", detach = TRUE, volumes = paste0(normalizePath(datadir), ":/var/data"), env = c( "SPARQL_UPDATE" = "true", "DEFAULT_GRAPH" = "http://www.example.com/my-graph", "DBA_PASSWORD" = "dba", "VIRT_Parameters_DirsAllowed" = "/var/data" ), ports = "1111:1111" ) }
/scratch/gouwar.j/cran-all/cranData/virtuoso/inst/examples/docker.R
library(virtuoso) library(rdftools) # for write_nquads library(dplyr) ## Transform JSON (or list data) into nquads x <- jsonlite::read_json(paste0( "https://raw.githubusercontent.com/", "ropensci/roregistry/ex/codemeta.json" )) virtuoso::write_nquads(x, "ropensci.nq", prefix = "http://schema.org/") ## And here we go vos_start() con <- vos_connect() vos_import(con, "ropensci.nq") ## Find all packages where Carl Boettiger is an author, ## and return: package name, license, and co-author surnames query <- "PREFIX schema: <http://schema.org/> SELECT DISTINCT ?package ?license ?coauthor WHERE { ?s schema:identifier ?package ; schema:author ?author ; schema:license ?license ; schema:name ?name ; schema:author ?coauth . ?author schema:givenName 'Carl' . ?author schema:familyName 'Boettiger' . ?coauth schema:familyName ?coauthor }" vos_query(con, query) %>% as_tibble() %>% mutate(license = basename(license)) query <- rdftools:::sparql_op() %>% rdftools:::select("identifier", "license", prefix = "http://schema.org/") %>% rdftools:::filter(author.familyName == "Boettiger", author.givenName == "Carl", prefix = "http://schema.org/" ) %>% rdftools:::sparql_build() vos_query(con, query)
/scratch/gouwar.j/cran-all/cranData/virtuoso/inst/examples/json.R
--- title: "Introduction: Virtuoso Installation and Configuration" author: "Carl Boettiger" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Installation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` [Virtuoso](https://en.wikipedia.org/wiki/Virtuoso_Universal_Server) is a high-performance "universal server" that can act as both a relational database (supporting standard SQL queries) and an RDF triplestore, (supporting SPARQL queries). Virtuoso supports communication over the standard ODBC interface, and so R users can potentially connect to Virtuoso merely by installing the server and using the `odbc` R package. However, installation can present a few gotchas to users unfamiliar with Virtuoso. This package seeks to streamline the process of installing, managing, and querying a Virtuoso server. While the package can be also be used merely to provide a standard `DBI` connection to an RDBS, e.g. as a `dplyr` back-end, Virtuoso's popularity and performance is particularly notable with respect to RDF data and SPARQL queries, so most examples focus on those use cases. ## Installation The `virtuoso` package provides installation helpers for both Mac OSX and Windows users through the function `vos_install()`. At the time of writing, the Mac OS X installer uses Homebrew to install the Virtuoso Open Source server (similar to the `hugo` installer in RStudio's `blogdown`). On Windows, `vos_install()` downloads and executes the Windows self-extracting archive (`.exe` file), which will open a standard installation dialog in interactive mode, or be run automatically if not run in an interactive session. No automated installer is provided for Linux systems; Linux users are encouraged to simply install the appropriate binaries for their distribution (e.g. `apt-get install -y virtuoso-opensource` on Debian/Ubuntu systems). ## Configuration Virtuoso Open Source configuration is controlled by a `virtouso.ini` file, which sets, among other things, which directories can be accessed for tasks such as bulk import, as well as performance tweaks such as available memory. Unfortunately, the Virtuoso server process (`virtuoso-t` application) cannot start without a path to an appropriate config file, and the installers (e.g. on both Windows and Linux) frequently install an example `virtuoso.ini` to a location which can be hard to find and for which users do not have permission to edit directly. Moreover, the file format is not always intuitive to edit. The `virtuoso` package thus helps locate this file and provides a helper function, `vos_configure()`, to create and modify this configuration file. Because reasonable defaults are also provided by this function, users should usually not need to call this function manually. `vos_configure()` is called automatically from `vos_start()` if the path to a `virtuoso.ini` file is not passed to `vos_start()`. In addition to configuring Virtuoso's settings through a `virtuoso.ini` file, the other common barrier is setting up the driver for the ODBC connection. Some installers (Mac, Linux) do not automatically add the appropriate driver to an active `odbcinst.ini` file with a predictable Driver Server Name, which we need to know to initiate the ODBC connection. An internal helper function handles identifying drivers and establishing the appropriate `odcinst.ini` automatically when necessary. ## Server management Lastly, Virtuoso Open Source is often run as a system service, starting when the operating system starts. This is often undesirable, as the casual laptop user does not want the service running all the time, and can be difficult to control for users unfamiliar with managing such background services on their operating systems. Instead of this behavior, the `virtuoso` package provides an explicit interface to control the external server. The server only starts when created by `vos_start()`, and ends automatically when the R process ends, or can be killed, paused, or resumed at any time from R (e.g. via `vos_kill()`). Helper utilities can also query the status and logs of the server from R. As with most database servers, data persists to disk, at an appropriate location for the OS determined by `rappdirs` package, and a helper utility, `vos_delete_db()` can remove this persistent storage location. Users can also connect directly to any existing (local or remote) Virtuoso instance by passing the appropriate information to `vos_connect()`, which can be convenient for queries. Note that he Virtuoso back-end provided by the R package `rdflib` can also connect to any Virtuoso server created by the `virtuoso` R package, though queries loading and queries through the `redland` libraries used by `rdflib` will generally be slower than direct calls over ODBC via the `virtuoso` package functions, often dramatically so for large triplestores.
/scratch/gouwar.j/cran-all/cranData/virtuoso/vignettes/installation.Rmd
#' CD4 Cell Count or Viral Load Ensemble Learning Through Stacking of Models. #' #' Stacking ensemble approach to combine predictions from various models, #' allowing for grid search of tuning hyperparameters. #' #' @param outcome The name of the outcome variable. #' @param traindata The training dataset. #' @param viralvars Vector of variable names related to viral data. #' @param logbase The base for logarithmic transformations. #' @param seed Seed for reproducibility. #' @param repetitions Number of repetitions for cross-validation. #' @param gridsize Size of the grid for hyperparameter tuning. #' #' @return A stacked ensemble model. #' @export #' #' @examples #' \donttest{ #' library(baguette) #' library(kernlab) #' library(kknn) #' library(ranger) #' library(rules) #' data("cd_train", package = "viruslearner") #' outcome <- "cd_2023" #' traindata <- cd_train #' viralvars <- c("vl_2019", "vl_2021", "vl_2022", "vl_2023") #' logbase <- 10 #' seed <- 1501 #' repetitions <- 2 #' gridsize <- 1 #' cd_ens(outcome, traindata, viralvars, logbase, seed, repetitions, gridsize) #' } cd_ens <- function(outcome, traindata, viralvars, logbase, seed, repetitions, gridsize) { set.seed(seed) stacks::stacks() |> stacks::add_candidates( dplyr::bind_rows( workflowsets::workflow_set( preproc = list(simple = workflows::workflow_variables(outcomes = tidyselect::all_of(outcome), predictors = tidyselect::everything())), models = list(rf_spec <- parsnip::rand_forest(mtry = hardhat::tune(), min_n = hardhat::tune(), trees = hardhat::tune()) |> parsnip::set_engine("ranger") |> parsnip::set_mode("regression"), CART_bagged_cd = parsnip::bag_tree() |> parsnip::set_engine("rpart", times = 50L) |> parsnip::set_mode("regression"), Cubist_cd = parsnip::cubist_rules(committees = hardhat::tune(), neighbors = hardhat::tune()) |> parsnip::set_engine("Cubist") ) ), workflowsets::workflow_set( preproc = list(normalized = recipes::recipe(stats::as.formula(paste(outcome, "~ .")), data = traindata) |> recipes::step_log(tidyselect::all_of(viralvars), base = logbase) |> recipes::step_normalize(recipes::all_predictors())), models = list(SVM_radial_cd = parsnip::svm_rbf(cost = hardhat::tune(), rbf_sigma = hardhat::tune()) |> parsnip::set_engine("kernlab") |> parsnip::set_mode("regression"), SVM_poly_cd = parsnip::svm_poly(cost = hardhat::tune(), degree = hardhat::tune()) |> parsnip::set_engine("kernlab") |> parsnip::set_mode("regression"), KNN_cd = parsnip::nearest_neighbor(neighbors = hardhat::tune(), dist_power = hardhat::tune(), weight_func = hardhat::tune()) |> parsnip::set_engine("kknn") |> parsnip::set_mode("regression"), neural_network_cd = parsnip::mlp(hidden_units = hardhat::tune(), penalty = hardhat::tune(), epochs = hardhat::tune()) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression") ) ) |> workflowsets::option_add(param_info = parsnip::mlp(hidden_units = hardhat::tune(), penalty = hardhat::tune(), epochs = hardhat::tune()) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression") |> tune::extract_parameter_set_dials() |> recipes::update(hidden_units = dials::hidden_units(c(1, 27))), id = "normalized_neural_network_cd"), workflowsets::workflow_set( preproc = list(full_quad = recipes::recipe(stats::as.formula(paste(outcome, "~ .")), data = traindata) |> recipes::step_log(tidyselect::all_of(viralvars), base = logbase) |> recipes::step_normalize(recipes::all_predictors()) |> recipes::step_poly(recipes::all_predictors()) |> recipes::step_interact(~ recipes::all_predictors():recipes::all_predictors()) ), models = list(linear_reg_cd = parsnip::linear_reg(penalty = hardhat::tune(), mixture = hardhat::tune()) |> parsnip::set_engine("glmnet"), KNN_cd = parsnip::nearest_neighbor(neighbors = hardhat::tune(), dist_power = hardhat::tune(), weight_func = hardhat::tune()) |> parsnip::set_engine("kknn") |> parsnip::set_mode("regression") ) ) ) |> workflowsets::workflow_map( seed = seed, resamples = rsample::vfold_cv(traindata, repeats = repetitions), grid = gridsize, control = tune::control_grid( save_pred = TRUE, parallel_over = "everything", save_workflow = TRUE ) ) ) |> stacks::blend_predictions(penalty = 10^seq(-2, -0.5, length = 20)) }
/scratch/gouwar.j/cran-all/cranData/viruslearner/R/cd_ens.R
#' Fit and Evaluate Stacked Ensemble Model for CD4 Cell Count or Viral Load Outcome #' #' This function builds a stacked ensemble model using various preprocessing and modeling workflows. #' #' @param outcome The name of the outcome variable. #' @param traindata The training dataset used for building the ensemble. #' @param viralvars A vector of variable names representing viral variables. #' @param logbase The base for the logarithmic transformation in preprocessing. #' @param seed Seed for reproducibility. #' @param repetitions Number of repetitions for cross-validation. #' @param gridsize Number of grid points for hyperparameter tuning. #' @param testdata The test dataset for evaluating the ensemble. #' @param predicted Column name of the predicted variable in a regression tidy format. #' #' @return A tibble containing the root mean squared error (RMSE) and coefficient of determination (R2) metrics. #' #' @export #' #' @examples #' \donttest{ #' library(baguette) #' library(kernlab) #' library(kknn) #' library(ranger) #' library(rules) #' data("cd_train", package = "viruslearner") #' data("cd_test", package = "viruslearner") #' outcome <- "cd_2023" #' traindata <- cd_train #' viralvars <- c("vl_2019", "vl_2021", "vl_2022", "vl_2023") #' logbase <- 10 #' seed <- 1501 #' repetitions <- 2 #' gridsize <- 1 #' testdata <- cd_test #' predicted <- ".pred" #' cd_fit(outcome, traindata, viralvars, logbase, seed, repetitions, gridsize, testdata, predicted) #' } cd_fit <- function(outcome, traindata, viralvars, logbase, seed, repetitions, gridsize, testdata, predicted) { set.seed(seed) stacks::stacks() |> stacks::add_candidates( dplyr::bind_rows( workflowsets::workflow_set( preproc = list(simple = workflows::workflow_variables(outcomes = tidyselect::all_of(outcome), predictors = tidyselect::everything())), models = list(rf_spec <- parsnip::rand_forest(mtry = hardhat::tune(), min_n = hardhat::tune(), trees = hardhat::tune()) |> parsnip::set_engine("ranger") |> parsnip::set_mode("regression"), CART_bagged_cd = parsnip::bag_tree() |> parsnip::set_engine("rpart", times = 50L) |> parsnip::set_mode("regression"), Cubist_cd = parsnip::cubist_rules(committees = hardhat::tune(), neighbors = hardhat::tune()) |> parsnip::set_engine("Cubist") ) ), workflowsets::workflow_set( preproc = list(normalized = recipes::recipe(stats::as.formula(paste(outcome, "~ .")), data = traindata) |> recipes::step_log(tidyselect::all_of(viralvars), base = logbase) |> recipes::step_normalize(recipes::all_predictors())), models = list(SVM_radial_cd = parsnip::svm_rbf(cost = hardhat::tune(), rbf_sigma = hardhat::tune()) |> parsnip::set_engine("kernlab") |> parsnip::set_mode("regression"), SVM_poly_cd = parsnip::svm_poly(cost = hardhat::tune(), degree = hardhat::tune()) |> parsnip::set_engine("kernlab") |> parsnip::set_mode("regression"), KNN_cd = parsnip::nearest_neighbor(neighbors = hardhat::tune(), dist_power = hardhat::tune(), weight_func = hardhat::tune()) |> parsnip::set_engine("kknn") |> parsnip::set_mode("regression"), neural_network_cd = parsnip::mlp(hidden_units = hardhat::tune(), penalty = hardhat::tune(), epochs = hardhat::tune()) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression") ) ) |> workflowsets::option_add(param_info = parsnip::mlp(hidden_units = hardhat::tune(), penalty = hardhat::tune(), epochs = hardhat::tune()) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression") |> tune::extract_parameter_set_dials() |> recipes::update(hidden_units = dials::hidden_units(c(1, 27))), id = "normalized_neural_network_cd"), workflowsets::workflow_set( preproc = list(full_quad = recipes::recipe(stats::as.formula(paste(outcome, "~ .")), data = traindata) |> recipes::step_log(tidyselect::all_of(viralvars), base = logbase) |> recipes::step_normalize(recipes::all_predictors()) |> recipes::step_poly(recipes::all_predictors()) |> recipes::step_interact(~ recipes::all_predictors():recipes::all_predictors()) ), models = list(linear_reg_cd = parsnip::linear_reg(penalty = hardhat::tune(), mixture = hardhat::tune()) |> parsnip::set_engine("glmnet"), KNN_cd = parsnip::nearest_neighbor(neighbors = hardhat::tune(), dist_power = hardhat::tune(), weight_func = hardhat::tune()) |> parsnip::set_engine("kknn") |> parsnip::set_mode("regression") ) ) ) |> workflowsets::workflow_map( seed = seed, resamples = rsample::vfold_cv(traindata, repeats = repetitions), grid = gridsize, control = tune::control_grid( save_pred = TRUE, parallel_over = "everything", save_workflow = TRUE ) ) ) |> stacks::blend_predictions(penalty = 10^seq(-2, -0.5, length = 20)) |> stacks::fit_members() |> stacks::predict.model_stack(testdata) |> dplyr::bind_cols(testdata) |> yardstick::metric_set(yardstick::rmse, yardstick::rsq)(tidyselect::all_of(outcome), tidyselect::all_of(predicted)) }
/scratch/gouwar.j/cran-all/cranData/viruslearner/R/cd_fit.R
#' CD4 Cell Count or Viral Load Plot of Blending Coefficients for the Stacking Ensemble #' #' A plot of the contribution of each model obtained using ensemble learning #' through stacking of models and grid search for tuning hyperparameters of CD4 #' cell count or viral load outcomes. #' #' @param outcome The outcome variable name (CD4 cell count). #' @param traindata The training data set. #' @param viralvars Vector of viral load variables. #' @param logbase Logarithm base for viral load transformations. #' @param seed Seed for reproducibility. #' @param repetitions Number of repetitions for cross-validation. #' @param gridsize Size of the grid for hyperparameter tuning. #' #' @return A plot of a data stack with multiple model definitions and candidate members. #' @export #' #' @examples #' \donttest{ #' library(baguette) #' library(kernlab) #' library(kknn) #' library(ranger) #' library(rules) #' data("cd_train", package = "viruslearner") #' outcome <- "cd_2023" #' traindata <- cd_train #' viralvars <- c("vl_2019", "vl_2021", "vl_2022", "vl_2023") #' logbase <- 10 #' seed <- 1501 #' repetitions <- 2 #' gridsize <- 1 #' cd_stack(outcome, traindata, viralvars, logbase, seed, repetitions, gridsize) #' } cd_stack <- function(outcome, traindata, viralvars, logbase, seed, repetitions, gridsize) { set.seed(seed) stacks::stacks() |> stacks::add_candidates( dplyr::bind_rows( workflowsets::workflow_set( preproc = list(simple = workflows::workflow_variables(outcomes = tidyselect::all_of(outcome), predictors = tidyselect::everything())), models = list(rf_spec <- parsnip::rand_forest(mtry = hardhat::tune(), min_n = hardhat::tune(), trees = hardhat::tune()) |> parsnip::set_engine("ranger") |> parsnip::set_mode("regression"), CART_bagged_cd = parsnip::bag_tree() |> parsnip::set_engine("rpart", times = 50L) |> parsnip::set_mode("regression"), Cubist_cd = parsnip::cubist_rules(committees = hardhat::tune(), neighbors = hardhat::tune()) |> parsnip::set_engine("Cubist") ) ), workflowsets::workflow_set( preproc = list(normalized = recipes::recipe(stats::as.formula(paste(outcome, "~ .")), data = traindata) |> recipes::step_log(tidyselect::all_of(viralvars), base = logbase) |> recipes::step_normalize(recipes::all_predictors())), models = list(SVM_radial_cd = parsnip::svm_rbf(cost = hardhat::tune(), rbf_sigma = hardhat::tune()) |> parsnip::set_engine("kernlab") |> parsnip::set_mode("regression"), SVM_poly_cd = parsnip::svm_poly(cost = hardhat::tune(), degree = hardhat::tune()) |> parsnip::set_engine("kernlab") |> parsnip::set_mode("regression"), KNN_cd = parsnip::nearest_neighbor(neighbors = hardhat::tune(), dist_power = hardhat::tune(), weight_func = hardhat::tune()) |> parsnip::set_engine("kknn") |> parsnip::set_mode("regression"), neural_network_cd = parsnip::mlp(hidden_units = hardhat::tune(), penalty = hardhat::tune(), epochs = hardhat::tune()) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression") ) ) |> workflowsets::option_add(param_info = parsnip::mlp(hidden_units = hardhat::tune(), penalty = hardhat::tune(), epochs = hardhat::tune()) |> parsnip::set_engine("nnet", MaxNWts = 2600) |> parsnip::set_mode("regression") |> tune::extract_parameter_set_dials() |> recipes::update(hidden_units = dials::hidden_units(c(1, 27))), id = "normalized_neural_network_cd"), workflowsets::workflow_set( preproc = list(full_quad = recipes::recipe(stats::as.formula(paste(outcome, "~ .")), data = traindata) |> recipes::step_log(tidyselect::all_of(viralvars), base = logbase) |> recipes::step_normalize(recipes::all_predictors()) |> recipes::step_poly(recipes::all_predictors()) |> recipes::step_interact(~ recipes::all_predictors():recipes::all_predictors()) ), models = list(linear_reg_cd = parsnip::linear_reg(penalty = hardhat::tune(), mixture = hardhat::tune()) |> parsnip::set_engine("glmnet"), KNN_cd = parsnip::nearest_neighbor(neighbors = hardhat::tune(), dist_power = hardhat::tune(), weight_func = hardhat::tune()) |> parsnip::set_engine("kknn") |> parsnip::set_mode("regression") ) ) ) |> workflowsets::workflow_map( seed = seed, resamples = rsample::vfold_cv(traindata, repeats = repetitions), grid = gridsize, control = tune::control_grid( save_pred = TRUE, parallel_over = "everything", save_workflow = TRUE ) ) ) |> stacks::blend_predictions(penalty = 10^seq(-2, -0.5, length = 20)) |> stacks::autoplot("weights") }
/scratch/gouwar.j/cran-all/cranData/viruslearner/R/cd_stack.R
#' Viral Rates Dataset #' #' @description The dataset contains information about patients, specifically #' their CD4 T cell counts (cd_2018, cd_2019, cd_2021, cd_2022, cd_2023) and #' viral loads (vl_2019, vl_2021, vl_2022, vl_2023). For modeling patient #' recovery and viral load persistence or suppression, column `cd_2023` is #' identified as the outcome variable for CD4 cell counts, and column `vl_2023` #' is identified as the outcome variable for viral load. The dataset also #' contains information about variables related to adherence to antiretroviral #' therapy (ART). #' #' @format A data frame with 87 rows and 21 variables: #' \describe{ #' \item{cd_2018}{CD4 count in 2018.} #' \item{cd_2019}{CD4 count in 2019.} #' \item{vl_2019}{Viral load in 2019.} #' \item{cd_2021}{CD4 count in 2021.} #' \item{vl_2021}{Viral load in 2021.} #' \item{cd_2022}{CD4 count in 2022.} #' \item{vl_2022}{Viral load in 2022.} #' \item{cd_2023}{CD4 count in 2023.} #' \item{vl_2023}{Viral load in 2023.} #' \item{recovery_rate_2019}{CD4 count recovery rate from 2018 to 2019.} #' \item{recovery_rate_2021}{CD4 count recovery rate from 2019 to 2021.} #' \item{recovery_rate_2022}{CD4 count recovery rate from 2021 to 2022.} #' \item{recovery_rate_2023}{CD4 count recovery rate from 2023 to 2022.} #' \item{viral_rate_2021}{Viral load rate of change from 2019 to 2021 (log10).} #' \item{viral_rate_2022}{Viral load rate of change from 2021 to 2022 (log10).} #' \item{viral_rate_2023}{Viral load rate of change from 2022 to 2023 (log10).} #' \item{adherence_1}{First principal component analysis scores representing adherence to ART.} #' \item{adherence_2}{Second principal component analysis scores representing adherence to ART.} #' \item{adherence_3}{Third principal component analysis scores representing adherence to ART.} #' \item{adherence_4}{Fourth principal component analysis scores representing adherence to ART.} #' \item{adherence_5}{Fifth principal component analysis scores representing adherence to ART.} #' } #' @examples #' \donttest{ #' # Load the dataset #' data("viralrates", package = "viruslearner") #' # Explore the dataset #' library(dplyr) #' dplyr::glimpse(viralrates) #' } "viralrates" #' Viral Rates Dataset for Training CD4 Counts Outcome #' #' @description This training dataset contains information about patients, #' specifically their CD4 T cell counts (cd_2018, cd_2019, cd_2021, cd_2022, #' cd_2023) and viral loads (vl_2019, vl_2021, vl_2022, vl_2023). For modeling #' patient recovery, column `cd_2023` is identified as the outcome variable. #' The dataset also contains information about variables related to adherence to #' antiretroviral therapy (ART). #' #' @format A data frame with 65 rows and 21 variables: #' \describe{ #' \item{cd_2018}{CD4 count in 2018.} #' \item{cd_2019}{CD4 count in 2019.} #' \item{vl_2019}{Viral load in 2019.} #' \item{cd_2021}{CD4 count in 2021.} #' \item{vl_2021}{Viral load in 2021.} #' \item{cd_2022}{CD4 count in 2022.} #' \item{vl_2022}{Viral load in 2022.} #' \item{cd_2023}{CD4 count in 2023.} #' \item{vl_2023}{Viral load in 2023.} #' \item{recovery_rate_2019}{CD4 count recovery rate from 2018 to 2019.} #' \item{recovery_rate_2021}{CD4 count recovery rate from 2019 to 2021.} #' \item{recovery_rate_2022}{CD4 count recovery rate from 2021 to 2022.} #' \item{recovery_rate_2023}{CD4 count recovery rate from 2023 to 2022.} #' \item{viral_rate_2021}{Viral load rate of change from 2019 to 2021 (log10).} #' \item{viral_rate_2022}{Viral load rate of change from 2021 to 2022 (log10).} #' \item{viral_rate_2023}{Viral load rate of change from 2022 to 2023 (log10).} #' \item{adherence_1}{First principal component analysis scores representing adherence to ART.} #' \item{adherence_2}{Second principal component analysis scores representing adherence to ART.} #' \item{adherence_3}{Third principal component analysis scores representing adherence to ART.} #' \item{adherence_4}{Fourth principal component analysis scores representing adherence to ART.} #' \item{adherence_5}{Fifth principal component analysis scores representing adherence to ART.} #' } #' @examples #' \donttest{ #' # Load the dataset #' data("cd_train", package = "viruslearner") #' # Explore the dataset #' library(dplyr) #' dplyr::glimpse(cd_train) #' } "cd_train" #' Viral Rates Dataset for Training Viral Load Outcome #' #' @description The training dataset contains information about patients, #' specifically their CD4 T cell counts (cd_2018, cd_2019, cd_2021, cd_2022, #' cd_2023) and viral loads (vl_2019, vl_2021, vl_2022, vl_2023). For modeling #' patient viral load persistence or suppression, column `vl_2023` is #' identified as the outcome variable. The dataset also contains information #' about variables related to adherence to antiretroviral therapy (ART). #' #' @format A data frame with 65 rows and 21 variables: #' \describe{ #' \item{cd_2018}{CD4 count in 2018.} #' \item{cd_2019}{CD4 count in 2019.} #' \item{vl_2019}{Viral load in 2019.} #' \item{cd_2021}{CD4 count in 2021.} #' \item{vl_2021}{Viral load in 2021.} #' \item{cd_2022}{CD4 count in 2022.} #' \item{vl_2022}{Viral load in 2022.} #' \item{cd_2023}{CD4 count in 2023.} #' \item{vl_2023}{Viral load in 2023.} #' \item{recovery_rate_2019}{CD4 count recovery rate from 2018 to 2019.} #' \item{recovery_rate_2021}{CD4 count recovery rate from 2019 to 2021.} #' \item{recovery_rate_2022}{CD4 count recovery rate from 2021 to 2022.} #' \item{recovery_rate_2023}{CD4 count recovery rate from 2023 to 2022.} #' \item{viral_rate_2021}{Viral load rate of change from 2019 to 2021 (log10).} #' \item{viral_rate_2022}{Viral load rate of change from 2021 to 2022 (log10).} #' \item{viral_rate_2023}{Viral load rate of change from 2022 to 2023 (log10).} #' \item{adherence_1}{First principal component analysis scores representing adherence to ART.} #' \item{adherence_2}{Second principal component analysis scores representing adherence to ART.} #' \item{adherence_3}{Third principal component analysis scores representing adherence to ART.} #' \item{adherence_4}{Fourth principal component analysis scores representing adherence to ART.} #' \item{adherence_5}{Fifth principal component analysis scores representing adherence to ART.} #' } #' @examples #' \donttest{ #' # Load the dataset #' data("vl_train", package = "viruslearner") #' # Explore the dataset #' library(dplyr) #' dplyr::glimpse(vl_train) #' } "vl_train" #' Viral Rates Dataset for Testing CD4 Counts Outcome #' #' @description This testing dataset contains information about patients, #' specifically their CD4 T cell counts (cd_2018, cd_2019, cd_2021, cd_2022, #' cd_2023) and viral loads (vl_2019, vl_2021, vl_2022, vl_2023). For modeling #' patient recovery, column `cd_2023` is identified as the outcome variable. #' The dataset also contains information about variables related to adherence to #' antiretroviral therapy (ART). #' #' @format A data frame with 22 rows and 21 variables: #' \describe{ #' \item{cd_2018}{CD4 count in 2018.} #' \item{cd_2019}{CD4 count in 2019.} #' \item{vl_2019}{Viral load in 2019.} #' \item{cd_2021}{CD4 count in 2021.} #' \item{vl_2021}{Viral load in 2021.} #' \item{cd_2022}{CD4 count in 2022.} #' \item{vl_2022}{Viral load in 2022.} #' \item{cd_2023}{CD4 count in 2023.} #' \item{vl_2023}{Viral load in 2023.} #' \item{recovery_rate_2019}{CD4 count recovery rate from 2018 to 2019.} #' \item{recovery_rate_2021}{CD4 count recovery rate from 2019 to 2021.} #' \item{recovery_rate_2022}{CD4 count recovery rate from 2021 to 2022.} #' \item{recovery_rate_2023}{CD4 count recovery rate from 2023 to 2022.} #' \item{viral_rate_2021}{Viral load rate of change from 2019 to 2021 (log10).} #' \item{viral_rate_2022}{Viral load rate of change from 2021 to 2022 (log10).} #' \item{viral_rate_2023}{Viral load rate of change from 2022 to 2023 (log10).} #' \item{adherence_1}{First principal component analysis scores representing adherence to ART.} #' \item{adherence_2}{Second principal component analysis scores representing adherence to ART.} #' \item{adherence_3}{Third principal component analysis scores representing adherence to ART.} #' \item{adherence_4}{Fourth principal component analysis scores representing adherence to ART.} #' \item{adherence_5}{Fifth principal component analysis scores representing adherence to ART.} #' } #' @examples #' \donttest{ #' # Load the dataset #' data("cd_test", package = "viruslearner") #' # Explore the dataset #' library(dplyr) #' dplyr::glimpse(cd_test) #' } "cd_test" #' Viral Rates Dataset for Testing Viral Load Outcome #' #' @description This testing dataset contains information about patients, #' specifically their CD4 T cell counts (cd_2018, cd_2019, cd_2021, cd_2022, #' cd_2023) and viral loads (vl_2019, vl_2021, vl_2022, vl_2023). For modeling #' patient viral load persistence or suppression, column `vl_2023` is #' identified as the outcome variable. The dataset also contains information #' about variables related to adherence to antiretroviral therapy (ART). #' #' @format A data frame with 22 rows and 21 variables: #' \describe{ #' \item{cd_2018}{CD4 count in 2018.} #' \item{cd_2019}{CD4 count in 2019.} #' \item{vl_2019}{Viral load in 2019.} #' \item{cd_2021}{CD4 count in 2021.} #' \item{vl_2021}{Viral load in 2021.} #' \item{cd_2022}{CD4 count in 2022.} #' \item{vl_2022}{Viral load in 2022.} #' \item{cd_2023}{CD4 count in 2023.} #' \item{vl_2023}{Viral load in 2023.} #' \item{recovery_rate_2019}{CD4 count recovery rate from 2018 to 2019.} #' \item{recovery_rate_2021}{CD4 count recovery rate from 2019 to 2021.} #' \item{recovery_rate_2022}{CD4 count recovery rate from 2021 to 2022.} #' \item{recovery_rate_2023}{CD4 count recovery rate from 2023 to 2022.} #' \item{viral_rate_2021}{Viral load rate of change from 2019 to 2021 (log10).} #' \item{viral_rate_2022}{Viral load rate of change from 2021 to 2022 (log10).} #' \item{viral_rate_2023}{Viral load rate of change from 2022 to 2023 (log10).} #' \item{adherence_1}{First principal component analysis scores representing adherence to ART.} #' \item{adherence_2}{Second principal component analysis scores representing adherence to ART.} #' \item{adherence_3}{Third principal component analysis scores representing adherence to ART.} #' \item{adherence_4}{Fourth principal component analysis scores representing adherence to ART.} #' \item{adherence_5}{Fifth principal component analysis scores representing adherence to ART.} #' } #' @examples #' \donttest{ #' # Load the dataset #' data("vl_test", package = "viruslearner") #' # Explore the dataset #' library(dplyr) #' dplyr::glimpse(vl_test) #' } "vl_test"
/scratch/gouwar.j/cran-all/cranData/viruslearner/R/data.R
#' @keywords internal "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/viruslearner/R/viruslearner-package.R
#' Get Domain Report #' #' Retrieves report on a given domain, including passive DNS, urls detected by at least one url scanner. #' Gives category of the domain from bitdefender. #' #' @param domain domain name. String. Required. #' @param \dots Additional arguments passed to \code{\link{virustotal2_GET}}. #' #' @return named list with the following possible items: #' \code{`BitDefender category`, undetected_referrer_samples, whois_timestamp, #' detected_downloaded_samples, detected_referrer_samples, `Webutation domain info`, `Alexa category`, undetected_downloaded_samples, #' resolutions, detected_communicating_samples, `Opera domain info`, `TrendMicro category`, categories, domain_siblings, #' `BitDefender domain info`, whois, `Alexa domain info`, response_code, verbose_msg, `Websense ThreatSeeker category`, subdomains, #' `WOT domain info`, detected_urls, `Alexa rank`, undetected_communicating_samples, `Dr.Web category`, pcaps} #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' domain_report("http://www.google.com") #' domain_report("http://www.goodsfwrfw.com") # Domain not found #' } domain_report <- function(domain = NULL, ...) { if (!is.character(domain)) { stop("Must specify domain.\n") } domain <- gsub("^http://", "", domain) .Deprecated("get_domain_info") res <- virustotal2_GET(path = "domain/report", query = list(domain = domain), ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/domain_report.R
#' Get File Scan Report #' #' @param hash Hash for the scan #' @param \dots Additional arguments passed to \code{\link{virustotal_GET}}. #' #' @return data.frame with 16 columns: #' \code{service, detected, version, update, result, scan_id, sha1, resource, response_code, #' scan_date, permalink, verbose_msg, total, positives, sha256, md5} #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' file_report(hash='99017f6eebbac24f351415dd410d522d') #' } file_report <- function(hash = NULL, ...) { if (!is.character(hash)) { stop("Must specify hash.\n") } params <- list(resource = hash) res <- virustotal_GET(path = "file/report", query = params, ...) if (res$response_code == 0 ){ res_df <- read.table(text = "", col.names = c("service", "detected", "version", "update", "result", "scan_id", "sha1", "resource", "response_code", "scan_date", "permalink", "verbose_msg", "total, positives", "sha256", "md5")) res_df[1, match(names(res), names(res_df))] <- res return(res_df) } scan_results <- ldply(lapply(res$scans, unlist), rbind, .id = "service") res_df <- as.data.frame(cbind(scan_results, res[2:length(res)])) res_df }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/file_report.R
#' Retrieve comments for an Internet domain #' #' #' @param domain domain name. String. Required. #' @param limit Number of entries. Integer. Optional. Default is 10. #' @param cursor String. Optional. #' @param \dots Additional arguments passed to \code{\link{virustotal_GET}}. #' #' @return named list with the following possible items: #' \code{`BitDefender category`, undetected_referrer_samples, whois_timestamp, #' detected_downloaded_samples, detected_referrer_samples, `Webutation domain info`, `Alexa category`, undetected_downloaded_samples, #' resolutions, detected_communicating_samples, `Opera domain info`, `TrendMicro category`, categories, domain_siblings, #' `BitDefender domain info`, whois, `Alexa domain info`, response_code, verbose_msg, `Websense ThreatSeeker category`, subdomains, #' `WOT domain info`, detected_urls, `Alexa rank`, undetected_communicating_samples, `Dr.Web category`, pcaps} #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' get_domain_comments("http://www.google.com") #' get_domain_comments("http://www.goodsfwrfw.com") # Domain not found #' } get_domain_comments <- function(domain = NULL, limit = limit, cursor = cursor, ...) { if (!is.character(domain)) { stop("Must specify domain.\n") } domain <- gsub("^http://|^https://", "", domain) res <- virustotal_GET(path = paste0("domains/", domain, "/comments"), query = list(limit = limit, cursor = cursor), ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/get_domain_comments.R
#' Retrieve information about an Internet domain #' #' #' @param domain domain name. String. Required. #' @param limit Number of entries. Integer. Optional. Default is 10. #' @param cursor String. Optional. #' @param \dots Additional arguments passed to \code{\link{virustotal_GET}}. #' #' @return named list #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' get_domain_info("http://www.google.com") #' get_domain_info("http://www.goodsfwrfw.com") # Domain not found #' } get_domain_info <- function(domain = NULL, limit = NULL, cursor = NULL, ...) { if (!is.character(domain)) { stop("Must specify domain.\n") } domain <- gsub("^http://|^https://", "", domain) res <- virustotal_GET(path = paste0("domains/", domain), query = list(limit = limit, cursor = cursor), ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/get_domain_info.R
#' Retrieve related objects to an Internet domain #' #' @param domain domain name. String. Required. #' @param limit Number of entries. Integer. Optional. Default is 10. #' @param cursor String. Optional. #' @param relationship relationship name. String. Required. Default is \code{subdomains}. #' For all the options see \url{https://developers.virustotal.com/v3.0/reference#domains-relationships} #' @param \dots Additional arguments passed to \code{\link{virustotal_GET}}. #' #' @return named list #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' get_domain_relationship("https://www.google.com") #' get_domain_relationship("https://www.goodsfwrfw.com") # Domain not found #' } get_domain_relationship <- function(domain = NULL, relationship = "subdomains", limit = NULL, cursor = NULL, ...) { if (!is.character(domain)) { stop("Must specify domain.\n") } domain <- gsub("^http://|^https://", "", domain) res <- virustotal_GET(path = paste0("domains/", domain, "/relationships/", relationship), query = list(limit = limit, cursor = cursor), ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/get_domain_relationship.R
#' Retrieve votes for an Internet domain #' #' #' @param domain domain name. String. Required. #' @param limit Number of entries. Integer. Optional. Default is 10. #' @param cursor String. Optional. #' @param \dots Additional arguments passed to \code{\link{virustotal_GET}}. #' #' @return named list #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' get_domain_votes("http://www.google.com") #' get_domain_votes("http://www.goodsfwrfw.com") # Domain not found #' } get_domain_votes <- function(domain = NULL, limit = NULL, cursor = NULL, ...) { if (!is.character(domain)) { stop("Must specify domain.\n") } domain <- gsub("^http://", "", domain) res <- virustotal_GET(path = paste0("domains/", domain, "/votes"), query = list(limit = limit, cursor = cursor), ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/get_domain_votes.R
#' Retrieve comments for an IP address #' #' @param ip IP Address. String. Required. #' @param limit Number of entries. Integer. Optional. Default is 10. #' @param cursor String. Optional. #' @param \dots Additional arguments passed to \code{\link{virustotal_GET}}. #' #' @return named list #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' get_ip_comments("64.233.160.0") #' } get_ip_comments <- function(ip = NULL, limit = NULL, cursor = NULL, ...) { if (!is.character(ip)) { stop("Must specify an IP address.\n") } res <- virustotal_GET(path = paste0("ip_addresses/", ip, "/comments"), query = list(limit = limit, cursor = cursor), ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/get_ip_comments.R
#' Retrieve information about an IP address #' #' Retrieves report on a given domain, including passive DNS, urls detected by at least one url scanner. #' Gives category of the domain from bitdefender. #' #' @param ip IP address. String. Required. #' @param limit Number of entries. Integer. Optional. Default is 10. #' @param cursor String. Optional. #' @param \dots Additional arguments passed to \code{\link{virustotal_GET}}. #' #' @return named list #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' get_ip_info("64.233.160.0") #' } get_ip_info <- function(ip = NULL, limit = NULL, cursor = NULL, ...) { if (!is.character(ip)) { stop("Must specify an IP address.\n") } res <- virustotal_GET(path = paste0("ip_addresses/", ip), query = list(limit = limit, cursor = cursor), ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/get_ip_info.R
#' Retrieve votes for an IP address #' #' @param ip IP address. String. Required. #' @param limit Number of entries. Integer. Optional. Default is 10. #' @param cursor String. Optional. #' @param \dots Additional arguments passed to \code{\link{virustotal_GET}}. #' #' @return named list #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' get_ip_votes("64.233.160.0") #' } get_ip_votes <- function(ip = NULL, limit = NULL, cursor = NULL, ...) { if (!is.character(ip)) { stop("Must specify an IP address.\n") } res <- virustotal_GET(path = paste0("ip_addresses/", ip, "/votes"), query = list(limit = limit, cursor = cursor), ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/get_ip_votes.R
#' Get IP Report #' #' Get passive DNS data and URLs detected by URL scanners #' #' @param ip a valid IPv4 address in dotted quad notation; String; Required #' @param \dots Additional arguments passed to \code{\link{virustotal2_GET}}. #' #' @return named list with the following potential items: #' \code{undetected_referrer_samples, detected_downloaded_samples, detected_referrer_samples, #' undetected_downloaded_samples, detected_urls, undetected_downloaded_samples, response_code, as_owner, verbose_msg, country, #' undetected_referrer_samples, detected_communicating_samples, resolutions, undetected_communicating_samples, asn} #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' ip_report(ip="8.8.8.8") #' } ip_report <- function(ip = NULL, ...) { if (!is.character(ip)) { stop("Must specify a valid IP.\n") } .Deprecated("get_ip_info") params <- list(ip = ip) res <- virustotal2_GET(path = "ip-address/report", query = params, ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/ip_report.R
#' Add comments on Files and URLs #' #' Add comments on files and URLs. For instance, flagging false positives, adding details about malware, instructions for cleaning malware, etc. #' #' @param hash hash for the resource you want to comment on; Required; String #' @param comment review; Required; String #' @param \dots Additional arguments passed to \code{\link{virustotal2_POST}}. #' #' @return data.frame with 2 columns: \code{response_code}, \code{verbose_msg} #' \itemize{ #' \item If the hash is incorrect or a duplicate comment is posted, \code{response_code} will be \code{0} #' \item If the hash is incorrect, \code{verbose_msg} will be \code{'Invalid resource'} #' \item If a duplicate comment is posted, \code{verbose_msg} will be \code{'Duplicate comment'} #' \item If a comment is posted successfully, \code{response_code} will be \code{1} #' and \code{verbose_msg} will be \code{'Your comment was successfully posted'} #' } #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' add_comments(hash='99017f6eebbac24f351415dd410d522d', comment="This is great.") #' #' #' } add_comments <- function(hash = NULL, comment = NULL, ...) { if (!is.character(hash)) { stop("Must specify the hash.\n") } if (!is.character(comment)) { stop("Must provide an actual comment.\n") } params <- list(resource = hash, comment = comment) .Deprecated("") res <- virustotal2_POST(path = "comments/put", query = params, ...) as.data.frame(res) }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/post_comments.R
#' Add a comment to an Internet domain #' #' #' @param domain domain name. String. Required. #' @param comment vote. String. Required. Any word starting with # in your comment's text will be considered a tag, and added to the comment's tag attribute. #' @param \dots Additional arguments passed to \code{\link{virustotal_POST}}. #' #' @return named list #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' post_domain_comments(domain = "https://google.com", comment = "Great!") #' } post_domain_comments <- function(domain = NULL, comment = NULL,...) { if (!is.character(domain)) { stop("Must specify domain.\n") } domain <- gsub("^http://|^https://", "", domain) comment_r = list("data" = list("type" = "comment", "attributes" = list("text" = comment))) res <- virustotal_POST(path = paste0("domains/", domain, "/comments"), body = comment_r,...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/post_domain_comments.R
#' Add a vote for a hostname or domain #' #' #' @param domain domain name. String. Required. #' @param vote vote. String. Required. #' @param \dots Additional arguments passed to \code{\link{virustotal_POST}}. #' #' @return named list #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' post_domain_votes("http://google.com", vote = "malicious") #' } post_domain_votes <- function(domain = NULL, vote = NULL,...) { if (!is.character(domain)) { stop("Must specify domain.\n") } domain <- gsub("^http://|^https://", "", domain) vote_r = list("data" = list("type" = "vote", "attributes" = list("verdict" = vote))) res <- virustotal_POST(path = paste0("domains/", domain, "/votes"), body = vote_r, ...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/post_domain_votes.R
#' Add a comment to an IP address #' #' #' @param ip IP address. String. Required. #' @param comment Comment. String. Required. #' @param \dots Additional arguments passed to \code{\link{virustotal_POST}}. #' #' @return named list #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' post_ip_comments(ip = "64.233.160.0", comment = "test") #' } post_ip_comments <- function(ip = NULL, comment = NULL, ...) { if (!is.character(ip)) { stop("Must specify an IP address.\n") } comment_r = list("data" = list("type" = "comment", "attributes" = list("text" = comment))) res <- virustotal_POST(path = paste0("ip_addresses/", ip, "/comments"), body = comment_r,...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/post_ip_comments.R
#' Add a vote for a IP address #' #' #' @param ip IP address. String. Required. #' @param vote vote. String. Required. #' @param \dots Additional arguments passed to \code{\link{virustotal_POST}}. #' #' @return named list #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' post_ip_votes(ip = "64.233.160.0", vote = "malicious") #' } post_ip_votes <- function(ip = NULL, vote = NULL, ...) { if (!is.character(ip)) { stop("Must specify an IP address.\n") } vote_r = list("data" = list("type" = "vote", "attributes" = list("verdict" = vote))) res <- virustotal_POST(path = paste0("ip_addresses/", ip, "/votes"), body = vote_r,...) res }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/post_ip_votes.R
#' Rescan already submitted files #' #' The function returns a data.frame with a \code{scan_id} and \code{sha256}, \code{sha1}, \code{md5} hashes, #' all of which can be used to retrieve the report using \code{\link{file_report}} #' #' @param hash Hash for the scan. String. Required. #' @param \dots Additional arguments passed to \code{\link{virustotal2_POST}}. #' #' @return data.frame with 12 columns: #' \code{scans, scan_id, sha1, resource, response_code, scan_date, permalink, verbose_msg, total, positives, sha256, md5} #' \code{response_code} is 0 if the file is not in the database (hash can't be found). #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' rescan_file(hash='99017f6eebbac24f351415dd410d522d') #' rescan_file(hash='99017f6ee51415dd410d522d') # incorrect hash #' } rescan_file <- function(hash = NULL, ...) { if (!is.character(hash)) { stop("Must specify the hash.") } params <- list(resource = hash) .Deprecated("") res <- virustotal2_POST(path = "file/rescan", query = params, ...) if (res$response_code == 0 ){ res_df <- read.table(text = "", col.names = c("scans", "scan_id", "sha1", "resource", "response_code", "scan_date", "permalink", "verbose_msg", "total", "positives", "sha256", "md5")) res_df[1, match(names(res), names(res_df))] <- res return(res_df) } as.data.frame(res) }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/rescan_file.R
#' Submit a file for scanning #' #' @param file_path Required; Path to the document #' @param \dots Additional arguments passed to \code{\link{virustotal2_POST}}. #' #' @return data.frame with the following columns: #' \code{scan_id, sha1, resource, response_code, sha256, permalink, md5, verbose_msg} #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' scan_file(file_path='path_to_suspicious_file') #' } scan_file <- function(file_path = NULL, ...) { if (!file.exists(file_path)) stop("The file doesn't Exist. Please check the path.\n") .Deprecated("") res <- virustotal2_POST(path = "file/scan", body = list(file = upload_file(file_path))) as.data.frame(res) }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/scan_file.R
#' Submit URL for scanning #' #' Submit a URL for scanning. Returns a data.frame with \code{scan_id} which can be used to #' fetch the report using \code{\link{url_report}} #' #' @param url url; string; required #' @param \dots Additional arguments passed to \code{\link{virustotal_POST}}. #' #' @return data.frame with 7 columns: #' \code{permalink, resource, url, response_code, scan_date, scan_id, verbose_msg} #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' scan_url("http://www.google.com") #' } scan_url <- function(url = NULL, ...) { if (!is.character(url)) { stop("Must specify a valid url.\n") } res <- virustotal_POST(path = "url/scan", query = list(url = url), ...) as.data.frame(res) }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/scan_url.R
#' Set API Key #' #' Before anything else, get the API key from \url{https://www.virustotal.com/en/}. #' Next, use \code{\link{set_key}} to store the API key in an environment variable \code{VirustotalToken}. #' Once you have set the API key, you can use any of the functions. #' #' @param api_key API key. String. Required. #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @examples \dontrun{ #' #' set_key('api_key_here') #' #' } set_key <- function(api_key = NULL) { if (!is.character(api_key)) stop("Must specify API Key.\n") Sys.setenv(VirustotalToken = api_key) }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/set_key.R
#' Get URL Report #' #' Retrieve a scan report for a given URL. If no scan report is available, set \code{scan} to \code{1} to get a new report. #' #' @param url URL. String. \code{url} or \code{scan_id} must be specified. #' @param scan_id scan id for a particular url scan. String. \code{url} or \code{scan_id} must be specified. #' @param scan String. Optional. Can be 0 or 1. Default is \code{1}. #' When \code{1}, submits \code{url} for scanning if no existing reports are found. #' When scan is set to \code{1}, the result includes a \code{scan_id} field, which can be used again to retrieve the report. #' @param \dots Additional arguments passed to \code{\link{virustotal2_GET}}. #' #' @return data.frame with 13 columns: #' \code{scan_id, resource, url, response_code, scan_date, permalink, verbose_msg, positives, total, .id, detected, result, detail} #' #' @export #' #' @references \url{https://developers.virustotal.com/v2.0/reference} #' #' @seealso \code{\link{set_key}} for setting the API key #' #' @examples \dontrun{ #' #' # Before calling the function, set the API key using set_key('api_key_here') #' #' url_report("http://www.google.com") #' url_report(scan_id = "ebdd15c397d2b0c6f50c3f2df531357d1201ff5976802316405e60880d6bf5ec-1478786749") #' } url_report <- function(url = NULL, scan_id = NULL, scan = 1, ...) { if (!is.character(url) & !is.character(scan_id)) { stop("Must specify url or scan_id.\n") } if (! (scan %in% c("0", "1"))) stop("scan must be either 0 or 1.\n") params <- list(resource = url, scan_id = scan_id, scan = scan) .Deprecated("") res <- virustotal2_POST(path = "url/report", query = params, ...) # Initialize empty data.frame res_df <- read.table(text = "", col.names = c("scan_id", "resource", "url", "response_code", "scan_date", "permalink", "verbose_msg", "positives", "total", "detected", "result", "detail")) if ( !is.null(scan_id) & length(res) == 0) { warning("No results returned. Likely cause: incorrect scan_id.\n") res_df[1, "scan_id"] <- scan_id return(res_df) } else if (res$response_code == 0) { warning("No reports for the URL available. Set scan to 1 to submit URL for scanning.\n") res_df[1, match(names(res), names(res_df))] <- res return(res_df) } else if (!is.null(url) & length(res) < 11) { warning("No reports for the URL available. The URL has been successfully submitted for scanning. Come back later for results.\n") res_df[1, match(names(res), names(res_df))] <- res return(res_df) } res_10 <- do.call(cbind, lapply(res[1:10], unlist)) res_11 <- ldply(res$scans, as.data.frame) cbind(res_10, res_11) }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/url_report.R
#' @title virustotal: Access Virustotal API #' #' @name virustotal-package #' @aliases virustotal #' #' @description Access virustotal API. See \url{https://www.virustotal.com/}. #' Details about results of calls to the API can be found at \url{https://developers.virustotal.com/v2.0/reference}. #' #' You will need credentials to use this application. #' If you haven't already, get the API Key at \url{https://www.virustotal.com/}. #' #' #' @importFrom httr GET content POST upload_file add_headers #' @importFrom plyr rbind.fill ldply #' @importFrom utils read.table #' @docType package #' @author Gaurav Sood NULL #' #' Base POST AND GET functions. Not exported. #' #' GET for the v2 API #' #' @param query query list #' @param path path to the specific API service url #' @param key A character string containing Virustotal API Key. The default is retrieved from \code{Sys.getenv("VirustotalToken")}. #' @param \dots Additional arguments passed to \code{\link[httr]{GET}}. #' @return list virustotal2_GET <- function(query=list(), path = path, key = Sys.getenv("VirustotalToken"), ...) { if (identical(key, "")) { stop("Please set application key via set_key(key='key')).\n") } query$apikey <- key rate_limit() res <- GET("https://www.virustotal.com/", path = paste0("vtapi/v2/", path), query = query, ...) virustotal_check(res) res <- content(res) res } #' #' GET for the Current V3 API #' #' @param query query list #' @param path path to the specific API service url #' @param key A character string containing Virustotal API Key. The default is retrieved from \code{Sys.getenv("VirustotalToken")}. #' @param \dots Additional arguments passed to \code{\link[httr]{GET}}. #' @return list virustotal_GET <- function(query=list(), path = path, key = Sys.getenv("VirustotalToken"), ...) { if (identical(key, "")) { stop("Please set application key via set_key(key='key')).\n") } query$apikey <- key rate_limit() res <- GET("https://virustotal.com/", path = paste0("api/v3/", path), query = query, add_headers('x-apikey' = key), ...) virustotal_check(res) res <- content(res, as = "parsed", type = "application/json") res } #' #' POST for the Current V3 API #' #' @param query query list #' @param body file #' @param path path to the specific API service url #' @param key A character string containing Virustotal API Key. The default is retrieved from \code{Sys.getenv("VirustotalToken")}. #' @param \dots Additional arguments passed to \code{\link[httr]{POST}}. #' @return list virustotal_POST <- function(query=list(), path = path, body = NULL, key = Sys.getenv("VirustotalToken"), ...) { if (identical(key, "")) { stop("Please set application key via set_key(key='key')).\n") } rate_limit() res <- POST("https://virustotal.com/", path = paste0("api/v3/", path), body = body, encode = "json", add_headers('x-apikey' = key), ...) virustotal_check(res) res <- content(res) res } #' #' POST for V2 API #' #' @param query query list #' @param body file #' @param path path to the specific API service url #' @param key A character string containing Virustotal API Key. The default is retrieved from \code{Sys.getenv("VirustotalToken")}. #' @param \dots Additional arguments passed to \code{\link[httr]{POST}}. #' @return list virustotal2_POST <- function(query=list(), path = path, body=NULL, key = Sys.getenv("VirustotalToken"), ...) { if (identical(key, "")) { stop("Please set application key via set_key(key='key')).\n") } query$apikey <- key rate_limit() res <- POST("https://www.virustotal.com/", path = paste0("vtapi/v2/", path), query = query, body = body, ...) virustotal_check(res) res <- content(res) res } #' #' Request Response Verification #' #' @param req request #' @return in case of failure, a message virustotal_check <- function(req) { if (req$status_code == 204) stop("Rate Limit Exceeded. Only 4 Queries per minute allowed.\n") if (req$status_code < 400) return(invisible()) stop("HTTP failure: ", req$status_code, "\n", call. = FALSE) } #' #' Rate Limits #' #' Virustotal requests throttled at 4 per min. The function creates an env. var. #' that tracks number of requests per minute, and enforces appropriate waiting. #' rate_limit <- function() { # First request --- initialize time of first request and request count if (Sys.getenv("VT_RATE_LIMIT") == "") { return(Sys.setenv(VT_RATE_LIMIT = paste0(0, ",", Sys.time(), ",", 0))) } rate_lim <- Sys.getenv("VT_RATE_LIMIT") req_count <- as.numeric(gsub(",.*", "", rate_lim)) + 1 past_duration <- as.numeric(strsplit(rate_lim, ",")[[1]][3], units = "secs") current_duration <- difftime(Sys.time(), as.POSIXct(strsplit(rate_lim, ",")[[1]][2]), units = "secs") if (current_duration > 60) { return(Sys.setenv(VT_RATE_LIMIT = paste0(1, ",", Sys.time(), ",", 0))) } net_duration <- past_duration + current_duration if (req_count > 4 & net_duration <= 60) { Sys.sleep(60 - net_duration) return(Sys.setenv(VT_RATE_LIMIT = paste0(1, ",", Sys.time(), ",", 0))) } return(Sys.setenv(VT_RATE_LIMIT = paste0(req_count, ",", Sys.time(), ",", net_duration))) }
/scratch/gouwar.j/cran-all/cranData/virustotal/R/virustotal.R
## ---- eval=F, install--------------------------------------------------------- # #library(devtools) # install_github("soodoku/virustotal") ## ---- eval=F, load------------------------------------------------------------ # library(virustotal) ## ---- eval=F, api_key--------------------------------------------------------- # set_key("your_key") ## ---- eval=F, domain--------------------------------------------------------- # domain_report("http://www.google.com")$categories ## ---- eval=F, scan_url-------------------------------------------------------- # scan_url("http://www.google.com") ## ---- eval=F, url------------------------------------------------------------- # head(url_report("http://www.google.com")[, 1:2], 10) ## ---- eval=F, ip-------------------------------------------------------------- # ip_report("8.8.8.8")$country ## ---- eval=F, file------------------------------------------------------------ # head(file_report("99017f6eebbac24f351415dd410d522d")[,1:2], 10) ## ---- eval=F, scan_file------------------------------------------------------- # scan_file("using_virustotal.Rmd")[,1:2] ## ---- eval=F, rescan_file----------------------------------------------------- # rescan_file(hash='99017f6eebbac24f351415dd410d522d')[,1:2]
/scratch/gouwar.j/cran-all/cranData/virustotal/inst/doc/using_virustotal.R
--- title: "Using virustotal" author: "Gaurav Sood" date: "`r Sys.Date()`" vignette: > %\VignetteIndexEntry{Using virustotal} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Using virustotal ### Installation To get the current development version from GitHub: ```{r, eval=F, install} #library(devtools) install_github("soodoku/virustotal") ``` #### Load up the lib: ```{r, eval=F, load} library(virustotal) ``` #### Authentication Start by getting the API key from [https://www.virustotal.com/](https://www.virustotal.com/). Next, set it: ```{r, eval=F, api_key} set_key("your_key") ``` #### Get domain report Get report on a domain, including passive DNS: ```{r, eval=F, domain} domain_report("http://www.google.com")$categories ``` ``` ## [[1]] ## [1] "searchengines" ``` #### Scan URL ```{r, eval=F, scan_url} scan_url("http://www.google.com") ``` ``` ## permalink resource ## 1 https://www.virustotal.com/url/dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf/analysis/1464817664/ http://www.google.com/ ``` #### Get URL report Get report on a domain, including URL: ```{r, eval=F, url} head(url_report("http://www.google.com")[, 1:2], 10) ``` ``` ## scan_id resource ## 1 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ## 2 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ## 3 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ## 4 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ## 5 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ## 6 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ## 7 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ## 8 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ## 9 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ## 10 dd014af5ed6b38d9130e3f466f850e46d21b951199d53a18ef29ee9341614eaf-1464816996 http://www.google.com ``` #### Get IP report ```{r, eval=F, ip} ip_report("8.8.8.8")$country ``` ``` ## [1] "US" ``` #### Get File Report ```{r, eval=F, file} head(file_report("99017f6eebbac24f351415dd410d522d")[,1:2], 10) ``` ``` scans scan_id ## Bkav FALSE, 1.3.0.8042, 20160601 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ## TotalDefense TRUE, 37.1.62.1, Win32/ASuspect.HDBBD, 20160601 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ## MicroWorld-eScan TRUE, 12.0.250.0, Generic.Malware.V!w.7232B058, 20160601 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ## nProtect TRUE, 2016-06-01.01, Trojan/W32.Small.28672.BJA, 20160601 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ## CMC TRUE, 1.1.0.977, Trojan.Win32.VB!O, 20160530 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ## CAT-QuickHeal TRUE, 14.00, Trojan.Comame.r3, 20160601 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ## ALYac TRUE, 1.0.1.9, Generic.Malware.V!w.7232B058, 20160601 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ## Malwarebytes TRUE, 2.1.1.1115, Trojan.Qhost, 20160601 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ## Zillya TRUE, 2.0.0.2901, Trojan.VB.Win32.33493, 20160531 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ## SUPERAntiSpyware FALSE, 5.6.0.1032, 20160601 52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c-1464797947 ``` #### Scan File ```{r, eval=F, scan_file} scan_file("using_virustotal.Rmd")[,1:2] ``` ``` ## scan_id sha1 ## 1 a9e60cd4d1e3ea00a78f7e92b77f250b26297d79e387e30916de3973a03b28a0-1464822937 303e723fd79416c3a8f3ac8247f82ed2f22e635d ``` #### Rescan File ```{r, eval=F, rescan_file} rescan_file(hash='99017f6eebbac24f351415dd410d522d')[,1:2] ``` ``` ## permalink response_code ## 1 https://www.virustotal.com/file/52d3df0ed60c46f336c131bf2ca454f73bafdc4b04dfa2aea80746f5ba9e6d1c/analysis/1464817836/ 1 ```
/scratch/gouwar.j/cran-all/cranData/virustotal/inst/doc/using_virustotal.Rmd